mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-05 09:38:07 +02:00
panfrost: add support for incremental rendering in CSF
When the tiler heap is exhausted while running an IDVS job, the
out-of-memory exception handler is gonna be executed. If that
happens, we start fragment shading for all the polygons that have
been generated until then and recycle the tiler heap memory to
continue the tiling job.
The framebuffer descriptor used for the fragment job depends on the
current pass:
- FIRST_PASS: Used the first time the OOM handler is called. We
want to preload/clear just like configured for the draw but
don't want to discard the result.
- MIDDLE_PASS: Used the subsequent times the OOM handler is
called. We want to preload the content rendered from previous
rendering passes and still don't want to discard the result.
- LAST_PASS: Used for the fragment job run when back in normal
operation. We want to preload the content from the rendering
passes that occurred in the OOM handler and want to deal with
the result just like it was configured initially for the draw.
If incremental rendering is not triggered by an OOM exception, the
fragment job is using the default framebuffer descriptor.
Signed-off-by: Louis-Francis Ratté-Boulianne <lfrb@collabora.com>
Signed-off-by: antonino <antonino.maniscalco@collabora.com>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Lars-Ivar Hesselberg Simonsen <lars-ivar.simonsen@arm.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/31174>
This commit is contained in:
parent
f1e3e68241
commit
774b70974a
2 changed files with 304 additions and 4 deletions
|
|
@ -56,6 +56,139 @@ csf_alloc_cs_buffer(void *cookie)
|
|||
};
|
||||
}
|
||||
|
||||
/*
|
||||
* Register is reserved to pass the batch tiler OOM context
|
||||
*/
|
||||
#define TILER_OOM_CTX_REG 76
|
||||
|
||||
static enum cs_reg_perm
|
||||
csf_reg_perm_cb(struct cs_builder *b, unsigned reg)
|
||||
{
|
||||
if (reg == TILER_OOM_CTX_REG)
|
||||
return CS_REG_RD;
|
||||
return CS_REG_RW;
|
||||
}
|
||||
|
||||
static void
|
||||
csf_update_tiler_oom_ctx(struct cs_builder *b, uint64_t addr)
|
||||
{
|
||||
reg_perm_cb_t orig_cb;
|
||||
|
||||
if (likely(!b->conf.reg_perm)) {
|
||||
cs_move64_to(b, cs_reg64(b, TILER_OOM_CTX_REG), addr);
|
||||
return;
|
||||
}
|
||||
|
||||
orig_cb = b->conf.reg_perm;
|
||||
b->conf.reg_perm = NULL;
|
||||
cs_move64_to(b, cs_reg64(b, TILER_OOM_CTX_REG), addr);
|
||||
b->conf.reg_perm = orig_cb;
|
||||
}
|
||||
|
||||
#define FIELD_OFFSET(_name) offsetof(struct pan_csf_tiler_oom_ctx, _name)
|
||||
|
||||
#define FBD_OFFSET(_pass) \
|
||||
(FIELD_OFFSET(fbds) + \
|
||||
(PAN_INCREMENTAL_RENDERING_##_pass##_PASS * sizeof(struct panfrost_ptr)) + \
|
||||
offsetof(struct panfrost_ptr, gpu))
|
||||
|
||||
static void
|
||||
csf_oom_handler_init(struct panfrost_context *ctx)
|
||||
{
|
||||
struct panfrost_device *dev = pan_device(ctx->base.screen);
|
||||
struct panfrost_bo *cs_bo =
|
||||
panfrost_bo_create(dev, 4096, 0, "Temporary CS buffer");
|
||||
assert(cs_bo);
|
||||
|
||||
struct panfrost_bo *reg_save_bo =
|
||||
panfrost_bo_create(dev, 4096, 0, "reg save bo");
|
||||
assert(reg_save_bo);
|
||||
|
||||
struct cs_buffer queue = {
|
||||
.cpu = cs_bo->ptr.cpu,
|
||||
.gpu = cs_bo->ptr.gpu,
|
||||
.capacity = panfrost_bo_size(cs_bo) / sizeof(uint64_t),
|
||||
};
|
||||
struct cs_builder b;
|
||||
struct cs_exception_handler handler;
|
||||
const struct cs_builder_conf conf = {
|
||||
.nr_registers = 96,
|
||||
.nr_kernel_registers = 4,
|
||||
.reg_perm = (dev->debug & PAN_DBG_CS) ? csf_reg_perm_cb : NULL,
|
||||
};
|
||||
cs_builder_init(&b, &conf, queue);
|
||||
cs_exception_handler_start(&b, &handler, reg_save_bo->ptr.gpu, 0);
|
||||
|
||||
struct cs_index tiler_oom_ctx = cs_reg64(&b, TILER_OOM_CTX_REG);
|
||||
struct cs_index counter = cs_reg32(&b, 47);
|
||||
struct cs_index zero = cs_reg64(&b, 48);
|
||||
struct cs_index flush_id = cs_reg32(&b, 48);
|
||||
struct cs_index tiler_ctx = cs_reg64(&b, 50);
|
||||
struct cs_index completed_top = cs_reg64(&b, 52);
|
||||
struct cs_index completed_bottom = cs_reg64(&b, 54);
|
||||
struct cs_index completed_chunks = cs_reg_tuple(&b, 52, 4);
|
||||
|
||||
/* Use different framebuffer descriptor depending on whether incremental
|
||||
* rendering has already been triggered */
|
||||
cs_load32_to(&b, counter, tiler_oom_ctx, FIELD_OFFSET(counter));
|
||||
cs_wait_slot(&b, 0, false);
|
||||
cs_if(&b, MALI_CS_CONDITION_GREATER, counter) {
|
||||
cs_load64_to(&b, cs_reg64(&b, 40), tiler_oom_ctx, FBD_OFFSET(MIDDLE));
|
||||
}
|
||||
cs_else(&b) {
|
||||
cs_load64_to(&b, cs_reg64(&b, 40), tiler_oom_ctx, FBD_OFFSET(FIRST));
|
||||
}
|
||||
|
||||
cs_load32_to(&b, cs_reg32(&b, 42), tiler_oom_ctx, FIELD_OFFSET(bbox_min));
|
||||
cs_load32_to(&b, cs_reg32(&b, 43), tiler_oom_ctx, FIELD_OFFSET(bbox_max));
|
||||
cs_move64_to(&b, cs_reg64(&b, 44), 0);
|
||||
cs_move32_to(&b, cs_reg32(&b, 46), 0);
|
||||
cs_wait_slot(&b, 0, false);
|
||||
|
||||
/* Run the fragment job and wait */
|
||||
cs_set_scoreboard_entry(&b, 3, 0);
|
||||
cs_run_fragment(&b, false, MALI_TILE_RENDER_ORDER_Z_ORDER, false);
|
||||
cs_wait_slot(&b, 3, false);
|
||||
|
||||
/* Increment counter */
|
||||
cs_add32(&b, counter, counter, 1);
|
||||
cs_store32(&b, counter, tiler_oom_ctx, FIELD_OFFSET(counter));
|
||||
|
||||
/* Load completed chunks */
|
||||
cs_load64_to(&b, tiler_ctx, tiler_oom_ctx, FIELD_OFFSET(tiler_desc));
|
||||
cs_wait_slot(&b, 0, false);
|
||||
cs_load_to(&b, completed_chunks, tiler_ctx, BITFIELD_MASK(4), 10 * 4);
|
||||
cs_wait_slot(&b, 0, false);
|
||||
|
||||
cs_finish_fragment(&b, false, completed_top, completed_bottom, cs_now());
|
||||
|
||||
/* Zero out polygon list, completed_top and completed_bottom */
|
||||
cs_move64_to(&b, zero, 0);
|
||||
cs_store64(&b, zero, tiler_ctx, 0);
|
||||
cs_store64(&b, zero, tiler_ctx, 10 * 4);
|
||||
cs_store64(&b, zero, tiler_ctx, 12 * 4);
|
||||
|
||||
/* We need to flush the texture caches so future preloads see the new
|
||||
* content. */
|
||||
cs_flush_caches(&b, MALI_CS_FLUSH_MODE_NONE, MALI_CS_FLUSH_MODE_NONE, true,
|
||||
flush_id, cs_defer(0, 0));
|
||||
|
||||
cs_wait_slot(&b, 0, false);
|
||||
|
||||
cs_set_scoreboard_entry(&b, 2, 0);
|
||||
|
||||
cs_exception_handler_end(&b, &handler);
|
||||
|
||||
assert(cs_is_valid(&b));
|
||||
cs_finish(&b);
|
||||
ctx->csf.tiler_oom_handler.cs_bo = cs_bo;
|
||||
ctx->csf.tiler_oom_handler.length = b.root_chunk.size * 8;
|
||||
ctx->csf.tiler_oom_handler.save_bo = reg_save_bo;
|
||||
}
|
||||
|
||||
#undef FBD_OFFSET
|
||||
#undef FIELD_OFFSET
|
||||
|
||||
void
|
||||
GENX(csf_cleanup_batch)(struct panfrost_batch *batch)
|
||||
{
|
||||
|
|
@ -65,6 +198,14 @@ GENX(csf_cleanup_batch)(struct panfrost_batch *batch)
|
|||
panfrost_pool_cleanup(&batch->csf.cs_chunk_pool);
|
||||
}
|
||||
|
||||
static inline struct panfrost_ptr
|
||||
alloc_fbd(struct panfrost_batch *batch)
|
||||
{
|
||||
return pan_pool_alloc_desc_aggregate(
|
||||
&batch->pool.base, PAN_DESC(FRAMEBUFFER), PAN_DESC(ZS_CRC_EXTENSION),
|
||||
PAN_DESC_ARRAY(MAX2(batch->key.nr_cbufs, 1), RENDER_TARGET));
|
||||
}
|
||||
|
||||
void
|
||||
GENX(csf_init_batch)(struct panfrost_batch *batch)
|
||||
{
|
||||
|
|
@ -89,6 +230,7 @@ GENX(csf_init_batch)(struct panfrost_batch *batch)
|
|||
.alloc_buffer = csf_alloc_cs_buffer,
|
||||
.cookie = batch,
|
||||
.ls_tracker = batch->csf.cs.ls_tracker,
|
||||
.reg_perm = (dev->debug & PAN_DBG_CS) ? csf_reg_perm_cb : NULL,
|
||||
};
|
||||
|
||||
/* Setup the queue builder */
|
||||
|
|
@ -101,9 +243,8 @@ GENX(csf_init_batch)(struct panfrost_batch *batch)
|
|||
struct cs_builder *b = batch->csf.cs.builder;
|
||||
cs_set_scoreboard_entry(b, 2, 0);
|
||||
|
||||
batch->framebuffer = pan_pool_alloc_desc_aggregate(
|
||||
&batch->pool.base, PAN_DESC(FRAMEBUFFER), PAN_DESC(ZS_CRC_EXTENSION),
|
||||
PAN_DESC_ARRAY(MAX2(batch->key.nr_cbufs, 1), RENDER_TARGET));
|
||||
batch->framebuffer = alloc_fbd(batch);
|
||||
|
||||
batch->tls = pan_pool_alloc_desc(&batch->pool.base, LOCAL_STORAGE);
|
||||
}
|
||||
|
||||
|
|
@ -378,6 +519,13 @@ csf_submit_wait_and_dump(struct panfrost_batch *batch,
|
|||
drmSyncobjTimelineWait(panfrost_device_fd(dev), &vm_sync_handle,
|
||||
&vm_sync_signal_point, 1, INT64_MAX, 0, NULL);
|
||||
assert(ret >= 0);
|
||||
|
||||
struct pan_csf_tiler_oom_ctx *tiler_oom_ctx =
|
||||
batch->csf.tiler_oom_ctx.cpu;
|
||||
if (tiler_oom_ctx != NULL && tiler_oom_ctx->counter > 0) {
|
||||
perf_debug(ctx, "Incremental rendering was triggered %i time(s)",
|
||||
tiler_oom_ctx->counter);
|
||||
}
|
||||
}
|
||||
|
||||
/* Jobs won't be complete if blackhole rendering, that's ok */
|
||||
|
|
@ -474,12 +622,86 @@ GENX(csf_preload_fb)(struct panfrost_batch *batch, struct pan_fb_info *fb)
|
|||
(&dev->fb_preload_cache, &batch->pool.base, fb, batch->tls.gpu, NULL);
|
||||
}
|
||||
|
||||
#define GET_FBD(_ctx, _pass) \
|
||||
(_ctx)->fbds[PAN_INCREMENTAL_RENDERING_##_pass##_PASS]
|
||||
#define EMIT_FBD(_ctx, _pass, _fb, _tls, _tiler_ctx) \
|
||||
GET_FBD(_ctx, _pass).gpu |= \
|
||||
GENX(pan_emit_fbd)(_fb, 0, _tls, _tiler_ctx, GET_FBD(_ctx, _pass).cpu)
|
||||
|
||||
void
|
||||
GENX(csf_emit_fbds)(struct panfrost_batch *batch, struct pan_fb_info *fb,
|
||||
struct pan_tls_info *tls)
|
||||
{
|
||||
struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
|
||||
|
||||
/* Default framebuffer descriptor */
|
||||
|
||||
batch->framebuffer.gpu |=
|
||||
GENX(pan_emit_fbd)(fb, 0, tls, &batch->tiler_ctx, batch->framebuffer.cpu);
|
||||
|
||||
if (batch->draw_count == 0)
|
||||
return;
|
||||
|
||||
struct pan_csf_tiler_oom_ctx *tiler_oom_ctx = batch->csf.tiler_oom_ctx.cpu;
|
||||
struct pan_fb_info alt_fb;
|
||||
bool changed = false;
|
||||
|
||||
/* First incremental rendering pass: don't discard result */
|
||||
|
||||
memcpy(&alt_fb, fb, sizeof(alt_fb));
|
||||
for (unsigned i = 0; i < fb->rt_count; i++)
|
||||
alt_fb.rts[i].discard = false;
|
||||
alt_fb.zs.discard.z = false;
|
||||
alt_fb.zs.discard.s = false;
|
||||
|
||||
EMIT_FBD(tiler_oom_ctx, FIRST, &alt_fb, tls, &batch->tiler_ctx);
|
||||
|
||||
/* Subsequent incremental rendering passes: preload old content and don't
|
||||
* discard result */
|
||||
|
||||
for (unsigned i = 0; i < fb->rt_count; i++) {
|
||||
if (fb->rts[i].view && !fb->rts[i].preload) {
|
||||
alt_fb.rts[i].preload = true;
|
||||
changed = true;
|
||||
}
|
||||
|
||||
if (alt_fb.rts[i].clear) {
|
||||
alt_fb.rts[i].clear = false;
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
if (fb->zs.view.zs && !fb->zs.preload.z && !fb->zs.preload.s) {
|
||||
alt_fb.zs.preload.z = true;
|
||||
alt_fb.zs.preload.s = true;
|
||||
changed = true;
|
||||
} else if (fb->zs.view.s && !fb->zs.preload.s) {
|
||||
alt_fb.zs.preload.s = true;
|
||||
changed = true;
|
||||
}
|
||||
|
||||
if (alt_fb.zs.clear.z || alt_fb.zs.clear.s) {
|
||||
alt_fb.zs.clear.z = false;
|
||||
alt_fb.zs.clear.s = false;
|
||||
changed = true;
|
||||
}
|
||||
|
||||
if (changed) {
|
||||
alt_fb.bifrost.pre_post.dcds.gpu = 0;
|
||||
GENX(pan_preload_fb)
|
||||
(&dev->fb_preload_cache, &batch->pool.base, &alt_fb, batch->tls.gpu, NULL);
|
||||
}
|
||||
|
||||
EMIT_FBD(tiler_oom_ctx, MIDDLE, &alt_fb, tls, &batch->tiler_ctx);
|
||||
|
||||
/* Last incremental rendering pass: preload previous content and deal with
|
||||
* results as specified by user */
|
||||
|
||||
for (unsigned i = 0; i < fb->rt_count; i++)
|
||||
alt_fb.rts[i].discard = fb->rts[i].discard;
|
||||
alt_fb.zs.discard.z = fb->zs.discard.z;
|
||||
alt_fb.zs.discard.s = fb->zs.discard.s;
|
||||
|
||||
EMIT_FBD(tiler_oom_ctx, LAST, &alt_fb, tls, &batch->tiler_ctx);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -487,6 +709,7 @@ GENX(csf_emit_fragment_job)(struct panfrost_batch *batch,
|
|||
const struct pan_fb_info *pfb)
|
||||
{
|
||||
struct cs_builder *b = batch->csf.cs.builder;
|
||||
struct pan_csf_tiler_oom_ctx *oom_ctx = batch->csf.tiler_oom_ctx.cpu;
|
||||
|
||||
if (batch->draw_count > 0) {
|
||||
/* Finish tiling and wait for IDVS and tiling */
|
||||
|
|
@ -500,6 +723,19 @@ GENX(csf_emit_fragment_job)(struct panfrost_batch *batch,
|
|||
cs_move32_to(b, cs_reg32(b, 42), (batch->miny << 16) | batch->minx);
|
||||
cs_move32_to(b, cs_reg32(b, 43),
|
||||
((batch->maxy - 1) << 16) | (batch->maxx - 1));
|
||||
cs_move64_to(b, cs_reg64(b, 44), 0);
|
||||
cs_move32_to(b, cs_reg32(b, 46), 0);
|
||||
|
||||
/* Use different framebuffer descriptor if incremental rendering was
|
||||
* triggered while tiling */
|
||||
if (batch->draw_count > 0) {
|
||||
struct cs_index counter = cs_reg32(b, 78);
|
||||
cs_load32_to(b, counter, cs_reg64(b, TILER_OOM_CTX_REG), 0);
|
||||
cs_wait_slot(b, 0, false);
|
||||
cs_if(b, MALI_CS_CONDITION_GREATER, counter) {
|
||||
cs_move64_to(b, cs_reg64(b, 40), GET_FBD(oom_ctx, LAST).gpu);
|
||||
}
|
||||
}
|
||||
|
||||
/* Run the fragment job and wait */
|
||||
cs_run_fragment(b, false, MALI_TILE_RENDER_ORDER_Z_ORDER, false);
|
||||
|
|
@ -735,6 +971,26 @@ csf_get_tiler_desc(struct panfrost_batch *batch)
|
|||
return batch->tiler_ctx.valhall.desc;
|
||||
}
|
||||
|
||||
static void
|
||||
emit_tiler_oom_context(struct cs_builder *b, struct panfrost_batch *batch)
|
||||
{
|
||||
struct pan_csf_tiler_oom_ctx *ctx;
|
||||
|
||||
batch->csf.tiler_oom_ctx =
|
||||
pan_pool_alloc_aligned(&batch->pool.base, sizeof(*ctx), 8);
|
||||
ctx = batch->csf.tiler_oom_ctx.cpu;
|
||||
|
||||
ctx->tiler_desc = csf_get_tiler_desc(batch);
|
||||
ctx->counter = 0;
|
||||
ctx->bbox_min = (batch->miny << 16) | batch->minx;
|
||||
ctx->bbox_max = ((batch->maxy - 1) << 16) | (batch->maxx - 1);
|
||||
|
||||
for (unsigned i = 0; i < PAN_INCREMENTAL_RENDERING_PASS_COUNT; ++i)
|
||||
ctx->fbds[i] = alloc_fbd(batch);
|
||||
|
||||
csf_update_tiler_oom_ctx(b, batch->csf.tiler_oom_ctx.gpu);
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
csf_emit_draw_state(struct panfrost_batch *batch,
|
||||
const struct pipe_draw_info *info, unsigned drawid_offset)
|
||||
|
|
@ -752,8 +1008,10 @@ csf_emit_draw_state(struct panfrost_batch *batch,
|
|||
|
||||
struct cs_builder *b = batch->csf.cs.builder;
|
||||
|
||||
if (batch->draw_count == 0)
|
||||
if (batch->draw_count == 0) {
|
||||
emit_tiler_oom_context(b, batch);
|
||||
cs_vt_start(batch->csf.cs.builder, cs_now());
|
||||
}
|
||||
|
||||
csf_emit_shader_regs(batch, PIPE_SHADER_VERTEX,
|
||||
panfrost_get_position_shader(batch, info));
|
||||
|
|
@ -1134,6 +1392,8 @@ GENX(csf_init_context)(struct panfrost_context *ctx)
|
|||
if (cs_bo == NULL)
|
||||
goto err_tiler_heap_cs_bo;
|
||||
|
||||
csf_oom_handler_init(ctx);
|
||||
|
||||
struct cs_buffer init_buffer = {
|
||||
.cpu = cs_bo->ptr.cpu,
|
||||
.gpu = cs_bo->ptr.gpu,
|
||||
|
|
@ -1149,6 +1409,13 @@ GENX(csf_init_context)(struct panfrost_context *ctx)
|
|||
cs_move64_to(&b, heap, thc.tiler_heap_ctx_gpu_va);
|
||||
cs_heap_set(&b, heap);
|
||||
|
||||
struct cs_index addr_reg = cs_reg64(&b, 86);
|
||||
struct cs_index length_reg = cs_reg32(&b, 88);
|
||||
cs_move64_to(&b, addr_reg, ctx->csf.tiler_oom_handler.cs_bo->ptr.gpu);
|
||||
cs_move32_to(&b, length_reg, ctx->csf.tiler_oom_handler.length);
|
||||
cs_set_exception_handler(&b, MALI_CS_EXCEPTION_TYPE_TILER_OOM,
|
||||
addr_reg, length_reg);
|
||||
|
||||
struct drm_panthor_queue_submit qsubmit;
|
||||
struct drm_panthor_group_submit gsubmit;
|
||||
struct drm_panthor_sync_op sync = {
|
||||
|
|
@ -1231,6 +1498,8 @@ GENX(csf_cleanup_context)(struct panfrost_context *ctx)
|
|||
|
||||
panfrost_bo_unreference(ctx->csf.tmp_geom_bo);
|
||||
panfrost_bo_unreference(ctx->csf.heap.desc_bo);
|
||||
panfrost_bo_unreference(ctx->csf.tiler_oom_handler.cs_bo);
|
||||
panfrost_bo_unreference(ctx->csf.tiler_oom_handler.save_bo);
|
||||
ctx->csf.is_init = false;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -28,11 +28,34 @@
|
|||
#include "compiler/shader_enums.h"
|
||||
|
||||
#include "pan_bo.h"
|
||||
#include "pan_desc.h"
|
||||
#include "pan_mempool.h"
|
||||
|
||||
struct cs_builder;
|
||||
struct cs_load_store_tracker;
|
||||
|
||||
enum pan_rendering_pass {
|
||||
PAN_INCREMENTAL_RENDERING_FIRST_PASS,
|
||||
PAN_INCREMENTAL_RENDERING_MIDDLE_PASS,
|
||||
PAN_INCREMENTAL_RENDERING_LAST_PASS,
|
||||
PAN_INCREMENTAL_RENDERING_PASS_COUNT
|
||||
};
|
||||
|
||||
struct pan_csf_tiler_oom_ctx {
|
||||
/* Number of times the OOM exception handler was called */
|
||||
uint32_t counter;
|
||||
|
||||
/* Alternative framebuffer descriptors for incremental rendering */
|
||||
struct panfrost_ptr fbds[PAN_INCREMENTAL_RENDERING_PASS_COUNT];
|
||||
|
||||
/* Bounding Box (Register 42 and 43) */
|
||||
uint32_t bbox_min;
|
||||
uint32_t bbox_max;
|
||||
|
||||
/* Tiler descriptor address */
|
||||
mali_ptr tiler_desc;
|
||||
} PACKED;
|
||||
|
||||
struct panfrost_csf_batch {
|
||||
/* CS related fields. */
|
||||
struct {
|
||||
|
|
@ -49,6 +72,8 @@ struct panfrost_csf_batch {
|
|||
|
||||
/* Pool used to allocate CS chunks. */
|
||||
struct panfrost_pool cs_chunk_pool;
|
||||
|
||||
struct panfrost_ptr tiler_oom_ctx;
|
||||
};
|
||||
|
||||
struct panfrost_csf_context {
|
||||
|
|
@ -62,6 +87,12 @@ struct panfrost_csf_context {
|
|||
|
||||
/* Temporary geometry buffer. Used as a FIFO by the tiler. */
|
||||
struct panfrost_bo *tmp_geom_bo;
|
||||
|
||||
struct {
|
||||
struct panfrost_bo *cs_bo;
|
||||
struct panfrost_bo *save_bo;
|
||||
uint32_t length;
|
||||
} tiler_oom_handler;
|
||||
};
|
||||
|
||||
#if defined(PAN_ARCH) && PAN_ARCH >= 10
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue