mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-06 09:28:07 +02:00
pan/gen_pack: Add pan_size() and pan_align() macros
And replace all references to MALI_xxx_{LENGTH,ALIGN} by
pan_{size,alignment}(xxx) calls so we can give generic alias to
midgard/bifrost specific descriptors.
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/12551>
This commit is contained in:
parent
d7ff3973e9
commit
423f67c1bd
14 changed files with 84 additions and 81 deletions
|
|
@ -239,11 +239,11 @@ panfrost_emit_blend(struct panfrost_batch *batch, void *rts, mali_ptr *blend_sha
|
|||
|
||||
/* Always have at least one render target for depth-only passes */
|
||||
for (unsigned i = 0; i < MAX2(rt_count, 1); ++i) {
|
||||
struct mali_blend_packed *packed = rts + (i * MALI_BLEND_LENGTH);
|
||||
struct mali_blend_packed *packed = rts + (i * pan_size(BLEND));
|
||||
|
||||
/* Disable blending for unbacked render targets */
|
||||
if (rt_count == 0 || !batch->key.cbufs[i] || so->info[i].no_colour) {
|
||||
pan_pack(rts + i * MALI_BLEND_LENGTH, BLEND, cfg) {
|
||||
pan_pack(rts + i * pan_size(BLEND), BLEND, cfg) {
|
||||
cfg.enable = false;
|
||||
#if PAN_ARCH >= 6
|
||||
cfg.bifrost.internal.mode = MALI_BIFROST_BLEND_MODE_OFF;
|
||||
|
|
@ -278,7 +278,7 @@ panfrost_emit_blend(struct panfrost_batch *batch, void *rts, mali_ptr *blend_sha
|
|||
|
||||
if (!blend_shaders[i]) {
|
||||
/* Word 1: Blend Equation */
|
||||
STATIC_ASSERT(MALI_BLEND_EQUATION_LENGTH == 4);
|
||||
STATIC_ASSERT(pan_size(BLEND_EQUATION) == 4);
|
||||
packed->opaque[PAN_ARCH >= 6 ? 1 : 2] = so->equation[i];
|
||||
}
|
||||
|
||||
|
|
@ -499,7 +499,7 @@ panfrost_emit_frag_shader(struct panfrost_context *ctx,
|
|||
#if PAN_ARCH == 4
|
||||
if (ctx->pipe_framebuffer.nr_cbufs > 0 && !blend_shaders[0]) {
|
||||
/* Word 14: SFBD Blend Equation */
|
||||
STATIC_ASSERT(MALI_BLEND_EQUATION_LENGTH == 4);
|
||||
STATIC_ASSERT(pan_size(BLEND_EQUATION) == 4);
|
||||
rsd.opaque[14] = ctx->blend->equation[0];
|
||||
}
|
||||
#endif
|
||||
|
|
@ -569,7 +569,7 @@ panfrost_emit_frag_shader_meta(struct panfrost_batch *batch)
|
|||
panfrost_emit_frag_shader(ctx, (struct mali_renderer_state_packed *) xfer.cpu, blend_shaders);
|
||||
|
||||
#if PAN_ARCH >= 5
|
||||
panfrost_emit_blend(batch, xfer.cpu + MALI_RENDERER_STATE_LENGTH, blend_shaders);
|
||||
panfrost_emit_blend(batch, xfer.cpu + pan_size(RENDERER_STATE), blend_shaders);
|
||||
#else
|
||||
batch->draws |= PIPE_CLEAR_COLOR0;
|
||||
batch->resolve |= PIPE_CLEAR_COLOR0;
|
||||
|
|
@ -1276,7 +1276,7 @@ panfrost_create_sampler_view_bo(struct panfrost_sampler_view *so,
|
|||
};
|
||||
|
||||
unsigned size =
|
||||
(PAN_ARCH <= 5 ? MALI_MIDGARD_TEXTURE_LENGTH : 0) +
|
||||
(PAN_ARCH <= 5 ? pan_size(MIDGARD_TEXTURE) : 0) +
|
||||
panfrost_estimate_texture_payload_size(device, &iview);
|
||||
|
||||
struct panfrost_ptr payload = pan_pool_alloc_aligned(&ctx->descs.base, size, 64);
|
||||
|
|
@ -1285,8 +1285,8 @@ panfrost_create_sampler_view_bo(struct panfrost_sampler_view *so,
|
|||
void *tex = (PAN_ARCH >= 6) ? &so->bifrost_descriptor : payload.cpu;
|
||||
|
||||
if (PAN_ARCH <= 5) {
|
||||
payload.cpu += MALI_MIDGARD_TEXTURE_LENGTH;
|
||||
payload.gpu += MALI_MIDGARD_TEXTURE_LENGTH;
|
||||
payload.cpu += pan_size(MIDGARD_TEXTURE);
|
||||
payload.gpu += pan_size(MIDGARD_TEXTURE);
|
||||
}
|
||||
|
||||
panfrost_new_texture(device, &iview, tex, &payload);
|
||||
|
|
@ -1361,8 +1361,8 @@ panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
|
|||
if (!ctx->sampler_count[stage])
|
||||
return 0;
|
||||
|
||||
assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
|
||||
assert(MALI_BIFROST_SAMPLER_ALIGN == MALI_MIDGARD_SAMPLER_ALIGN);
|
||||
assert(pan_size(BIFROST_SAMPLER) == pan_size(MIDGARD_SAMPLER));
|
||||
assert(pan_alignment(BIFROST_SAMPLER) == pan_alignment(MIDGARD_SAMPLER));
|
||||
|
||||
struct panfrost_ptr T =
|
||||
pan_pool_alloc_desc_array(&batch->pool.base,
|
||||
|
|
@ -1524,7 +1524,7 @@ panfrost_emit_image_attribs(struct panfrost_batch *batch,
|
|||
|
||||
/* We need an empty attrib buf to stop the prefetching on Bifrost */
|
||||
#if PAN_ARCH >= 6
|
||||
pan_pack(bufs.cpu + ((buf_count - 1) * MALI_ATTRIBUTE_BUFFER_LENGTH),
|
||||
pan_pack(bufs.cpu + ((buf_count - 1) * pan_size(ATTRIBUTE_BUFFER)),
|
||||
ATTRIBUTE_BUFFER, cfg);
|
||||
#endif
|
||||
|
||||
|
|
@ -2083,7 +2083,7 @@ panfrost_emit_varying_descs(
|
|||
struct mali_attribute_packed *descs = T.cpu;
|
||||
out->producer = producer_count ? T.gpu : 0;
|
||||
out->consumer = consumer_count ? T.gpu +
|
||||
(MALI_ATTRIBUTE_LENGTH * producer_count) : 0;
|
||||
(pan_size(ATTRIBUTE) * producer_count) : 0;
|
||||
|
||||
/* Lay out the varyings. Must use producer to lay out, in order to
|
||||
* respect transform feedback precisions. */
|
||||
|
|
@ -2481,7 +2481,7 @@ panfrost_draw_emit_vertex(struct panfrost_batch *batch,
|
|||
{
|
||||
void *section =
|
||||
pan_section_ptr(job, COMPUTE_JOB, INVOCATION);
|
||||
memcpy(section, invocation_template, MALI_INVOCATION_LENGTH);
|
||||
memcpy(section, invocation_template, pan_size(INVOCATION));
|
||||
|
||||
pan_section_pack(job, COMPUTE_JOB, PARAMETERS, cfg) {
|
||||
cfg.job_task_split = 5;
|
||||
|
|
@ -2642,7 +2642,7 @@ panfrost_draw_emit_tiler(struct panfrost_batch *batch,
|
|||
struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
|
||||
|
||||
void *section = pan_section_ptr(job, TILER_JOB, INVOCATION);
|
||||
memcpy(section, invocation_template, MALI_INVOCATION_LENGTH);
|
||||
memcpy(section, invocation_template, pan_size(INVOCATION));
|
||||
|
||||
section = pan_section_ptr(job, TILER_JOB, PRIMITIVE);
|
||||
pan_pack(section, PRIMITIVE, cfg) {
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@
|
|||
const uint8_t *cl = 0; \
|
||||
{ \
|
||||
struct pandecode_mapped_memory *mapped_mem = pandecode_find_mapped_gpu_mem_containing(addr); \
|
||||
cl = pandecode_fetch_gpu_mem(mapped_mem, addr, MALI_ ## T ## _LENGTH); \
|
||||
cl = pandecode_fetch_gpu_mem(mapped_mem, addr, pan_size(T)); \
|
||||
}
|
||||
|
||||
#define DUMP_ADDR(T, addr, ...) {\
|
||||
|
|
@ -243,7 +243,7 @@ pandecode_render_target(uint64_t gpu_va, unsigned job_no, bool is_bifrost, unsig
|
|||
pandecode_indent++;
|
||||
|
||||
for (int i = 0; i < (fb->render_target_count); i++) {
|
||||
mali_ptr rt_va = gpu_va + i * MALI_RENDER_TARGET_LENGTH;
|
||||
mali_ptr rt_va = gpu_va + i * pan_size(RENDER_TARGET);
|
||||
struct pandecode_mapped_memory *mem =
|
||||
pandecode_find_mapped_gpu_mem_containing(rt_va);
|
||||
const struct mali_render_target_packed *PANDECODE_PTR_VAR(rtp, mem, (mali_ptr) rt_va);
|
||||
|
|
@ -290,7 +290,7 @@ pandecode_mfbd_bfr(uint64_t gpu_va, int job_no, bool is_fragment, bool is_bifros
|
|||
pandecode_sample_locations(fb, job_no);
|
||||
|
||||
pan_section_unpack(fb, MULTI_TARGET_FRAMEBUFFER, BIFROST_PARAMETERS, bparams);
|
||||
unsigned dcd_size = MALI_DRAW_LENGTH + MALI_DRAW_PADDING_LENGTH;
|
||||
unsigned dcd_size = pan_size(DRAW) + pan_size(DRAW_PADDING);
|
||||
struct pandecode_mapped_memory *dcdmem =
|
||||
pandecode_find_mapped_gpu_mem_containing(bparams.frame_shader_dcds);
|
||||
|
||||
|
|
@ -342,7 +342,7 @@ pandecode_mfbd_bfr(uint64_t gpu_va, int job_no, bool is_fragment, bool is_bifros
|
|||
pandecode_indent--;
|
||||
pandecode_log("\n");
|
||||
|
||||
gpu_va += MALI_MULTI_TARGET_FRAMEBUFFER_LENGTH;
|
||||
gpu_va += pan_size(MULTI_TARGET_FRAMEBUFFER);
|
||||
|
||||
info.has_extra = params.has_zs_crc_extension;
|
||||
|
||||
|
|
@ -353,7 +353,7 @@ pandecode_mfbd_bfr(uint64_t gpu_va, int job_no, bool is_fragment, bool is_bifros
|
|||
DUMP_CL(ZS_CRC_EXTENSION, zs_crc, "ZS CRC Extension:\n");
|
||||
pandecode_log("\n");
|
||||
|
||||
gpu_va += MALI_ZS_CRC_EXTENSION_LENGTH;
|
||||
gpu_va += pan_size(ZS_CRC_EXTENSION);
|
||||
}
|
||||
|
||||
if (is_fragment)
|
||||
|
|
@ -378,13 +378,13 @@ pandecode_attributes(const struct pandecode_mapped_memory *mem,
|
|||
MAP_ADDR(ATTRIBUTE_BUFFER, addr, cl);
|
||||
|
||||
for (int i = 0; i < count; ++i) {
|
||||
pan_unpack(cl + i * MALI_ATTRIBUTE_BUFFER_LENGTH, ATTRIBUTE_BUFFER, temp);
|
||||
pan_unpack(cl + i * pan_size(ATTRIBUTE_BUFFER), ATTRIBUTE_BUFFER, temp);
|
||||
DUMP_UNPACKED(ATTRIBUTE_BUFFER, temp, "%s:\n", prefix);
|
||||
|
||||
switch (temp.type) {
|
||||
case MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR_WRITE_REDUCTION:
|
||||
case MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR: {
|
||||
pan_unpack(cl + (i + 1) * MALI_ATTRIBUTE_BUFFER_LENGTH,
|
||||
pan_unpack(cl + (i + 1) * pan_size(ATTRIBUTE_BUFFER),
|
||||
ATTRIBUTE_BUFFER_CONTINUATION_NPOT, temp2);
|
||||
pan_print(pandecode_dump_stream, ATTRIBUTE_BUFFER_CONTINUATION_NPOT,
|
||||
temp2, (pandecode_indent + 1) * 2);
|
||||
|
|
@ -393,7 +393,7 @@ pandecode_attributes(const struct pandecode_mapped_memory *mem,
|
|||
}
|
||||
case MALI_ATTRIBUTE_TYPE_3D_LINEAR:
|
||||
case MALI_ATTRIBUTE_TYPE_3D_INTERLEAVED: {
|
||||
pan_unpack(cl + (i + 1) * MALI_ATTRIBUTE_BUFFER_CONTINUATION_3D_LENGTH,
|
||||
pan_unpack(cl + (i + 1) * pan_size(ATTRIBUTE_BUFFER_CONTINUATION_3D),
|
||||
ATTRIBUTE_BUFFER_CONTINUATION_3D, temp2);
|
||||
pan_print(pandecode_dump_stream, ATTRIBUTE_BUFFER_CONTINUATION_3D,
|
||||
temp2, (pandecode_indent + 1) * 2);
|
||||
|
|
@ -412,7 +412,7 @@ pandecode_attributes(const struct pandecode_mapped_memory *mem,
|
|||
static mali_ptr
|
||||
pandecode_bifrost_blend(void *descs, int job_no, int rt_no, mali_ptr frag_shader)
|
||||
{
|
||||
pan_unpack(descs + (rt_no * MALI_BLEND_LENGTH), BLEND, b);
|
||||
pan_unpack(descs + (rt_no * pan_size(BLEND)), BLEND, b);
|
||||
DUMP_UNPACKED(BLEND, b, "Blend RT %d:\n", rt_no);
|
||||
if (b.bifrost.internal.mode != MALI_BIFROST_BLEND_MODE_SHADER)
|
||||
return 0;
|
||||
|
|
@ -423,7 +423,7 @@ pandecode_bifrost_blend(void *descs, int job_no, int rt_no, mali_ptr frag_shader
|
|||
static mali_ptr
|
||||
pandecode_midgard_blend_mrt(void *descs, int job_no, int rt_no)
|
||||
{
|
||||
pan_unpack(descs + (rt_no * MALI_BLEND_LENGTH), BLEND, b);
|
||||
pan_unpack(descs + (rt_no * pan_size(BLEND)), BLEND, b);
|
||||
DUMP_UNPACKED(BLEND, b, "Blend RT %d:\n", rt_no);
|
||||
return b.midgard.blend_shader ? (b.midgard.shader_pc & ~0xf) : 0;
|
||||
}
|
||||
|
|
@ -433,7 +433,7 @@ pandecode_attribute_meta(int count, mali_ptr attribute, bool varying)
|
|||
{
|
||||
unsigned max = 0;
|
||||
|
||||
for (int i = 0; i < count; ++i, attribute += MALI_ATTRIBUTE_LENGTH) {
|
||||
for (int i = 0; i < count; ++i, attribute += pan_size(ATTRIBUTE)) {
|
||||
MAP_ADDR(ATTRIBUTE, attribute, cl);
|
||||
pan_unpack(cl, ATTRIBUTE, a);
|
||||
DUMP_UNPACKED(ATTRIBUTE, a, "%s:\n", varying ? "Varying" : "Attribute");
|
||||
|
|
@ -669,7 +669,7 @@ pandecode_texture(mali_ptr u,
|
|||
unsigned job_no, unsigned tex)
|
||||
{
|
||||
struct pandecode_mapped_memory *mapped_mem = pandecode_find_mapped_gpu_mem_containing(u);
|
||||
const uint8_t *cl = pandecode_fetch_gpu_mem(mapped_mem, u, MALI_MIDGARD_TEXTURE_LENGTH);
|
||||
const uint8_t *cl = pandecode_fetch_gpu_mem(mapped_mem, u, pan_size(MIDGARD_TEXTURE));
|
||||
|
||||
pan_unpack(cl, MIDGARD_TEXTURE, temp);
|
||||
DUMP_UNPACKED(MIDGARD_TEXTURE, temp, "Texture:\n")
|
||||
|
|
@ -677,7 +677,7 @@ pandecode_texture(mali_ptr u,
|
|||
pandecode_indent++;
|
||||
unsigned nr_samples = temp.dimension == MALI_TEXTURE_DIMENSION_3D ?
|
||||
1 : temp.sample_count;
|
||||
pandecode_texture_payload(u + MALI_MIDGARD_TEXTURE_LENGTH,
|
||||
pandecode_texture_payload(u + pan_size(MIDGARD_TEXTURE),
|
||||
temp.dimension, temp.texel_ordering, temp.manual_stride,
|
||||
temp.levels, nr_samples, temp.array_size, mapped_mem);
|
||||
pandecode_indent--;
|
||||
|
|
@ -738,12 +738,12 @@ pandecode_textures(mali_ptr textures, unsigned texture_count, int job_no, bool i
|
|||
|
||||
if (is_bifrost) {
|
||||
const void *cl = pandecode_fetch_gpu_mem(mmem,
|
||||
textures, MALI_BIFROST_TEXTURE_LENGTH *
|
||||
textures, pan_size(BIFROST_TEXTURE) *
|
||||
texture_count);
|
||||
|
||||
for (unsigned tex = 0; tex < texture_count; ++tex) {
|
||||
pandecode_bifrost_texture(cl +
|
||||
MALI_BIFROST_TEXTURE_LENGTH * tex,
|
||||
pan_size(BIFROST_TEXTURE) * tex,
|
||||
job_no, tex);
|
||||
}
|
||||
} else {
|
||||
|
|
@ -776,9 +776,9 @@ pandecode_samplers(mali_ptr samplers, unsigned sampler_count, int job_no, bool i
|
|||
|
||||
for (int i = 0; i < sampler_count; ++i) {
|
||||
if (is_bifrost) {
|
||||
DUMP_ADDR(BIFROST_SAMPLER, samplers + (MALI_BIFROST_SAMPLER_LENGTH * i), "Sampler %d:\n", i);
|
||||
DUMP_ADDR(BIFROST_SAMPLER, samplers + (pan_size(BIFROST_SAMPLER) * i), "Sampler %d:\n", i);
|
||||
} else {
|
||||
DUMP_ADDR(MIDGARD_SAMPLER, samplers + (MALI_MIDGARD_SAMPLER_LENGTH * i), "Sampler %d:\n", i);
|
||||
DUMP_ADDR(MIDGARD_SAMPLER, samplers + (pan_size(MIDGARD_SAMPLER) * i), "Sampler %d:\n", i);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -813,7 +813,7 @@ pandecode_dcd(const struct MALI_DRAW *p,
|
|||
|
||||
if (p->state) {
|
||||
struct pandecode_mapped_memory *smem = pandecode_find_mapped_gpu_mem_containing(p->state);
|
||||
uint32_t *cl = pandecode_fetch_gpu_mem(smem, p->state, MALI_RENDERER_STATE_LENGTH);
|
||||
uint32_t *cl = pandecode_fetch_gpu_mem(smem, p->state, pan_size(RENDERER_STATE));
|
||||
|
||||
pan_unpack(cl, RENDERER_STATE, state);
|
||||
|
||||
|
|
@ -853,7 +853,7 @@ pandecode_dcd(const struct MALI_DRAW *p,
|
|||
|
||||
if ((job_type == MALI_JOB_TYPE_TILER || job_type == MALI_JOB_TYPE_FRAGMENT) &&
|
||||
(is_bifrost || p->fbd & MALI_FBD_TAG_IS_MFBD)) {
|
||||
void* blend_base = ((void *) cl) + MALI_RENDERER_STATE_LENGTH;
|
||||
void* blend_base = ((void *) cl) + pan_size(RENDERER_STATE);
|
||||
|
||||
for (unsigned i = 0; i < fbd_info.rt_count; i++) {
|
||||
mali_ptr shader = 0;
|
||||
|
|
|
|||
|
|
@ -153,6 +153,9 @@ __gen_unpack_padded(const uint8_t *restrict cl, uint32_t start, uint32_t end)
|
|||
#define pan_print(fp, T, var, indent) \\
|
||||
PREFIX2(T, print)(fp, &(var), indent)
|
||||
|
||||
#define pan_size(T) PREFIX2(T, LENGTH)
|
||||
#define pan_alignment(T) PREFIX2(T, ALIGN)
|
||||
|
||||
#define pan_section_offset(A, S) \\
|
||||
PREFIX4(A, SECTION, S, OFFSET)
|
||||
|
||||
|
|
|
|||
|
|
@ -346,7 +346,7 @@ pan_blitter_emit_rsd(const struct panfrost_device *dev,
|
|||
return;
|
||||
|
||||
for (unsigned i = 0; i < MAX2(rt_count, 1); ++i) {
|
||||
void *dest = out + MALI_RENDERER_STATE_LENGTH + MALI_BLEND_LENGTH * i;
|
||||
void *dest = out + pan_size(RENDERER_STATE) + pan_size(BLEND) * i;
|
||||
const struct pan_image_view *rt_view = rts ? rts[i] : NULL;
|
||||
mali_ptr blend_shader = blend_shaders ? blend_shaders[i] : 0;
|
||||
|
||||
|
|
@ -883,7 +883,7 @@ pan_blitter_emit_varying(struct pan_pool *pool,
|
|||
}
|
||||
|
||||
if (padding_buffer) {
|
||||
pan_pack(varying_buffer.cpu + MALI_ATTRIBUTE_BUFFER_LENGTH,
|
||||
pan_pack(varying_buffer.cpu + pan_size(ATTRIBUTE_BUFFER),
|
||||
ATTRIBUTE_BUFFER, cfg);
|
||||
}
|
||||
|
||||
|
|
@ -939,12 +939,12 @@ pan_blitter_emit_bifrost_textures(struct pan_pool *pool,
|
|||
pan_pool_alloc_desc_array(pool, tex_count, BIFROST_TEXTURE);
|
||||
|
||||
for (unsigned i = 0; i < tex_count; i++) {
|
||||
void *texture = textures.cpu + (MALI_BIFROST_TEXTURE_LENGTH * i);
|
||||
void *texture = textures.cpu + (pan_size(BIFROST_TEXTURE) * i);
|
||||
size_t payload_size =
|
||||
panfrost_estimate_texture_payload_size(pool->dev, views[i]);
|
||||
struct panfrost_ptr surfaces =
|
||||
pan_pool_alloc_aligned(pool, payload_size,
|
||||
MALI_SURFACE_WITH_STRIDE_ALIGN);
|
||||
pan_alignment(SURFACE_WITH_STRIDE));
|
||||
|
||||
panfrost_new_texture(pool->dev, views[i], texture, &surfaces);
|
||||
}
|
||||
|
|
@ -960,13 +960,13 @@ pan_blitter_emit_midgard_textures(struct pan_pool *pool,
|
|||
mali_ptr textures[8] = { 0 };
|
||||
|
||||
for (unsigned i = 0; i < tex_count; i++) {
|
||||
size_t sz = MALI_MIDGARD_TEXTURE_LENGTH +
|
||||
size_t sz = pan_size(MIDGARD_TEXTURE) +
|
||||
panfrost_estimate_texture_payload_size(pool->dev, views[i]);
|
||||
struct panfrost_ptr texture =
|
||||
pan_pool_alloc_aligned(pool, sz, MALI_MIDGARD_TEXTURE_ALIGN);
|
||||
pan_pool_alloc_aligned(pool, sz, pan_alignment(MIDGARD_TEXTURE));
|
||||
struct panfrost_ptr surfaces = {
|
||||
.cpu = texture.cpu + MALI_MIDGARD_TEXTURE_LENGTH,
|
||||
.gpu = texture.gpu + MALI_MIDGARD_TEXTURE_LENGTH,
|
||||
.cpu = texture.cpu + pan_size(MIDGARD_TEXTURE),
|
||||
.gpu = texture.gpu + pan_size(MIDGARD_TEXTURE),
|
||||
};
|
||||
|
||||
panfrost_new_texture(pool->dev, views[i], texture.cpu, &surfaces);
|
||||
|
|
@ -1247,7 +1247,7 @@ pan_preload_emit_bifrost_pre_frame_dcd(struct pan_pool *desc_pool,
|
|||
pan_preload_fb_bifrost_alloc_pre_post_dcds(desc_pool, fb);
|
||||
assert(fb->bifrost.pre_post.dcds.cpu);
|
||||
void *dcd = fb->bifrost.pre_post.dcds.cpu +
|
||||
(dcd_idx * (MALI_DRAW_LENGTH + MALI_DRAW_PADDING_LENGTH));
|
||||
(dcd_idx * (pan_size(DRAW) + pan_size(DRAW_PADDING)));
|
||||
|
||||
int crc_rt = pan_select_crc_rt(dev, fb);
|
||||
|
||||
|
|
|
|||
|
|
@ -674,7 +674,7 @@ pan_emit_mfbd(const struct panfrost_device *dev,
|
|||
{
|
||||
unsigned tags = MALI_FBD_TAG_IS_MFBD;
|
||||
void *fbd = out;
|
||||
void *rtd = out + MALI_MULTI_TARGET_FRAMEBUFFER_LENGTH;
|
||||
void *rtd = out + pan_size(MULTI_TARGET_FRAMEBUFFER);
|
||||
|
||||
if (pan_is_bifrost(dev)) {
|
||||
pan_emit_bifrost_mfbd_params(dev, fb, fbd);
|
||||
|
|
@ -738,8 +738,8 @@ pan_emit_mfbd(const struct panfrost_device *dev,
|
|||
|
||||
if (has_zs_crc_ext) {
|
||||
pan_emit_zs_crc_ext(dev, fb, crc_rt,
|
||||
out + MALI_MULTI_TARGET_FRAMEBUFFER_LENGTH);
|
||||
rtd += MALI_ZS_CRC_EXTENSION_LENGTH;
|
||||
out + pan_size(MULTI_TARGET_FRAMEBUFFER));
|
||||
rtd += pan_size(ZS_CRC_EXTENSION);
|
||||
tags |= MALI_FBD_TAG_HAS_ZS_RT;
|
||||
}
|
||||
|
||||
|
|
@ -747,7 +747,7 @@ pan_emit_mfbd(const struct panfrost_device *dev,
|
|||
unsigned cbuf_offset = 0;
|
||||
for (unsigned i = 0; i < rt_count; i++) {
|
||||
pan_emit_rt(dev, fb, i, cbuf_offset, rtd);
|
||||
rtd += MALI_RENDER_TARGET_LENGTH;
|
||||
rtd += pan_size(RENDER_TARGET);
|
||||
if (!fb->rts[i].view)
|
||||
continue;
|
||||
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ static mali_ptr
|
|||
get_tls(const struct panfrost_device *dev)
|
||||
{
|
||||
return dev->indirect_dispatch.descs->ptr.gpu +
|
||||
MALI_RENDERER_STATE_LENGTH;
|
||||
pan_size(RENDERER_STATE);
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
|
|
@ -244,8 +244,8 @@ pan_indirect_dispatch_init(struct panfrost_device *dev)
|
|||
dev->indirect_dispatch.push = shader_info.push;
|
||||
dev->indirect_dispatch.descs =
|
||||
panfrost_bo_create(dev,
|
||||
MALI_RENDERER_STATE_LENGTH +
|
||||
MALI_LOCAL_STORAGE_LENGTH,
|
||||
pan_size(RENDERER_STATE) +
|
||||
pan_size(LOCAL_STORAGE),
|
||||
0, "Indirect dispatch descriptors");
|
||||
|
||||
mali_ptr address = dev->indirect_dispatch.bin->ptr.gpu;
|
||||
|
|
@ -258,7 +258,7 @@ pan_indirect_dispatch_init(struct panfrost_device *dev)
|
|||
}
|
||||
|
||||
void *tsd = dev->indirect_dispatch.descs->ptr.cpu +
|
||||
MALI_RENDERER_STATE_LENGTH;
|
||||
pan_size(RENDERER_STATE);
|
||||
pan_pack(tsd, LOCAL_STORAGE, ls) {
|
||||
ls.wls_instances = MALI_LOCAL_STORAGE_NO_WORKGROUP_MEM;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -598,11 +598,11 @@ update_vertex_attribs(struct indirect_draw_shader_builder *builder)
|
|||
nir_ssa_def *attrib_buf_ptr =
|
||||
get_address(b, builder->attribs.attrib_bufs,
|
||||
nir_imul_imm(b, attrib_idx,
|
||||
2 * MALI_ATTRIBUTE_BUFFER_LENGTH));
|
||||
2 * pan_size(ATTRIBUTE_BUFFER)));
|
||||
nir_ssa_def *attrib_ptr =
|
||||
get_address(b, builder->attribs.attribs,
|
||||
nir_imul_imm(b, attrib_idx,
|
||||
MALI_ATTRIBUTE_LENGTH));
|
||||
pan_size(ATTRIBUTE)));
|
||||
|
||||
nir_ssa_def *r_e, *d;
|
||||
|
||||
|
|
@ -723,19 +723,19 @@ update_varyings(struct indirect_draw_shader_builder *builder)
|
|||
nir_ssa_def *buf_ptr =
|
||||
get_address_imm(b, builder->varyings.varying_bufs,
|
||||
PAN_VARY_GENERAL *
|
||||
MALI_ATTRIBUTE_BUFFER_LENGTH);
|
||||
pan_size(ATTRIBUTE_BUFFER));
|
||||
update_varying_buf(builder, buf_ptr, vertex_count);
|
||||
|
||||
buf_ptr = get_address_imm(b, builder->varyings.varying_bufs,
|
||||
PAN_VARY_POSITION *
|
||||
MALI_ATTRIBUTE_BUFFER_LENGTH);
|
||||
pan_size(ATTRIBUTE_BUFFER));
|
||||
builder->varyings.pos_ptr =
|
||||
update_varying_buf(builder, buf_ptr, vertex_count);
|
||||
|
||||
if (builder->flags & PAN_INDIRECT_DRAW_HAS_PSIZ) {
|
||||
buf_ptr = get_address_imm(b, builder->varyings.varying_bufs,
|
||||
PAN_VARY_PSIZ *
|
||||
MALI_ATTRIBUTE_BUFFER_LENGTH);
|
||||
pan_size(ATTRIBUTE_BUFFER));
|
||||
builder->varyings.psiz_ptr =
|
||||
update_varying_buf(builder, buf_ptr, vertex_count);
|
||||
}
|
||||
|
|
@ -1091,7 +1091,7 @@ create_indirect_draw_shader(struct panfrost_device *dev,
|
|||
struct pan_indirect_draw_shader *draw_shader =
|
||||
&dev->indirect_draw_shaders.shaders[shader_id];
|
||||
void *state = dev->indirect_draw_shaders.states->ptr.cpu +
|
||||
(shader_id * MALI_RENDERER_STATE_LENGTH);
|
||||
(shader_id * pan_size(RENDERER_STATE));
|
||||
|
||||
pthread_mutex_lock(&dev->indirect_draw_shaders.lock);
|
||||
if (!draw_shader->rsd) {
|
||||
|
|
@ -1111,7 +1111,7 @@ create_indirect_draw_shader(struct panfrost_device *dev,
|
|||
|
||||
draw_shader->push = shader_info.push;
|
||||
draw_shader->rsd = dev->indirect_draw_shaders.states->ptr.gpu +
|
||||
(shader_id * MALI_RENDERER_STATE_LENGTH);
|
||||
(shader_id * pan_size(RENDERER_STATE));
|
||||
}
|
||||
pthread_mutex_unlock(&dev->indirect_draw_shaders.lock);
|
||||
|
||||
|
|
@ -1139,7 +1139,7 @@ static mali_ptr
|
|||
get_tls(const struct panfrost_device *dev)
|
||||
{
|
||||
return dev->indirect_draw_shaders.states->ptr.gpu +
|
||||
(PAN_INDIRECT_DRAW_NUM_SHADERS * MALI_RENDERER_STATE_LENGTH);
|
||||
(PAN_INDIRECT_DRAW_NUM_SHADERS * pan_size(RENDERER_STATE));
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
|
|
@ -1189,15 +1189,15 @@ panfrost_indirect_draw_alloc_deps(struct panfrost_device *dev)
|
|||
goto out;
|
||||
|
||||
unsigned state_bo_size = (PAN_INDIRECT_DRAW_NUM_SHADERS *
|
||||
MALI_RENDERER_STATE_LENGTH) +
|
||||
MALI_LOCAL_STORAGE_LENGTH;
|
||||
pan_size(RENDERER_STATE)) +
|
||||
pan_size(LOCAL_STORAGE);
|
||||
|
||||
dev->indirect_draw_shaders.states =
|
||||
panfrost_bo_create(dev, state_bo_size, 0, "Indirect draw states");
|
||||
|
||||
/* Prepare the thread storage descriptor now since it's invariant. */
|
||||
void *tsd = dev->indirect_draw_shaders.states->ptr.cpu +
|
||||
(PAN_INDIRECT_DRAW_NUM_SHADERS * MALI_RENDERER_STATE_LENGTH);
|
||||
(PAN_INDIRECT_DRAW_NUM_SHADERS * pan_size(RENDERER_STATE));
|
||||
pan_pack(tsd, LOCAL_STORAGE, ls) {
|
||||
ls.wls_instances = MALI_LOCAL_STORAGE_NO_WORKGROUP_MEM;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -93,8 +93,8 @@ struct pan_desc_alloc_info {
|
|||
|
||||
#define PAN_DESC_ARRAY(count, name) \
|
||||
{ \
|
||||
.size = MALI_ ## name ## _LENGTH, \
|
||||
.align = MALI_ ## name ## _ALIGN, \
|
||||
.size = pan_size(name), \
|
||||
.align = pan_alignment(name), \
|
||||
.nelems = count, \
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -391,7 +391,7 @@ panfrost_emit_texture_payload(const struct panfrost_device *dev,
|
|||
pan_pack(payload, SURFACE, cfg) {
|
||||
cfg.pointer = pointer;
|
||||
}
|
||||
payload += MALI_SURFACE_LENGTH;
|
||||
payload += pan_size(SURFACE);
|
||||
} else {
|
||||
pan_pack(payload, SURFACE_WITH_STRIDE, cfg) {
|
||||
cfg.pointer = pointer;
|
||||
|
|
@ -399,7 +399,7 @@ panfrost_emit_texture_payload(const struct panfrost_device *dev,
|
|||
&cfg.row_stride,
|
||||
&cfg.surface_stride);
|
||||
}
|
||||
payload += MALI_SURFACE_WITH_STRIDE_LENGTH;
|
||||
payload += pan_size(SURFACE_WITH_STRIDE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1035,7 +1035,7 @@ panvk_draw_prepare_fs_rsd(struct panvk_cmd_buffer *cmdbuf,
|
|||
pan_merge(rsd_dyn, pipeline->fs.rsd_template, RENDERER_STATE);
|
||||
memcpy(rsd.cpu, &rsd_dyn, sizeof(rsd_dyn));
|
||||
|
||||
void *bd = rsd.cpu + MALI_RENDERER_STATE_LENGTH;
|
||||
void *bd = rsd.cpu + pan_size(RENDERER_STATE);
|
||||
for (unsigned i = 0; i < pipeline->blend.state.rt_count; i++) {
|
||||
if (pipeline->blend.constant[i].index != ~0) {
|
||||
struct mali_blend_packed bd_dyn;
|
||||
|
|
@ -1046,7 +1046,7 @@ panvk_draw_prepare_fs_rsd(struct panvk_cmd_buffer *cmdbuf,
|
|||
pan_merge(bd_dyn, pipeline->blend.bd_template[i], BLEND);
|
||||
memcpy(bd, &bd_dyn, sizeof(bd_dyn));
|
||||
}
|
||||
bd += MALI_BLEND_LENGTH;
|
||||
bd += pan_size(BLEND);
|
||||
}
|
||||
|
||||
cmdbuf->state.fs_rsd = rsd.gpu;
|
||||
|
|
|
|||
|
|
@ -282,7 +282,7 @@ panvk_emit_vertex_job(const struct panvk_device *dev,
|
|||
const struct panfrost_device *pdev = &dev->physical_device->pdev;
|
||||
void *section = pan_section_ptr(job, COMPUTE_JOB, INVOCATION);
|
||||
|
||||
memcpy(section, &draw->invocation, MALI_INVOCATION_LENGTH);
|
||||
memcpy(section, &draw->invocation, pan_size(INVOCATION));
|
||||
|
||||
pan_section_pack(job, COMPUTE_JOB, PARAMETERS, cfg) {
|
||||
cfg.job_task_split = 5;
|
||||
|
|
@ -321,7 +321,7 @@ panvk_emit_tiler_job(const struct panvk_device *dev,
|
|||
pan_section_ptr(job, BIFROST_TILER_JOB, INVOCATION) :
|
||||
pan_section_ptr(job, MIDGARD_TILER_JOB, INVOCATION);
|
||||
|
||||
memcpy(section, &draw->invocation, MALI_INVOCATION_LENGTH);
|
||||
memcpy(section, &draw->invocation, pan_size(INVOCATION));
|
||||
|
||||
section = pan_is_bifrost(pdev) ?
|
||||
pan_section_ptr(job, BIFROST_TILER_JOB, PRIMITIVE) :
|
||||
|
|
@ -770,7 +770,7 @@ panvk_emit_bifrost_tiler_context(const struct panvk_device *dev,
|
|||
{
|
||||
const struct panfrost_device *pdev = &dev->physical_device->pdev;
|
||||
|
||||
pan_pack(descs->cpu + MALI_BIFROST_TILER_LENGTH, BIFROST_TILER_HEAP, cfg) {
|
||||
pan_pack(descs->cpu + pan_size(BIFROST_TILER), BIFROST_TILER_HEAP, cfg) {
|
||||
cfg.size = pdev->tiler_heap->size;
|
||||
cfg.base = pdev->tiler_heap->ptr.gpu;
|
||||
cfg.bottom = pdev->tiler_heap->ptr.gpu;
|
||||
|
|
@ -781,7 +781,7 @@ panvk_emit_bifrost_tiler_context(const struct panvk_device *dev,
|
|||
cfg.hierarchy_mask = 0x28;
|
||||
cfg.fb_width = width;
|
||||
cfg.fb_height = height;
|
||||
cfg.heap = descs->gpu + MALI_BIFROST_TILER_LENGTH;
|
||||
cfg.heap = descs->gpu + pan_size(BIFROST_TILER);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -372,8 +372,8 @@ panvk_CreateImageView(VkDevice _device,
|
|||
|
||||
unsigned surf_descs_offset = 0;
|
||||
if (!pan_is_bifrost(pdev)) {
|
||||
bo_size += MALI_MIDGARD_TEXTURE_LENGTH;
|
||||
surf_descs_offset = MALI_MIDGARD_TEXTURE_LENGTH;
|
||||
bo_size += pan_size(MIDGARD_TEXTURE);
|
||||
surf_descs_offset = pan_size(MIDGARD_TEXTURE);
|
||||
}
|
||||
|
||||
view->bo = panfrost_bo_create(pdev, bo_size, 0, "Texture descriptor");
|
||||
|
|
|
|||
|
|
@ -258,7 +258,7 @@ panvk_meta_clear_attachments_emit_rsd(struct panfrost_device *pdev,
|
|||
}
|
||||
}
|
||||
|
||||
pan_pack(rsd_ptr.cpu + MALI_RENDERER_STATE_LENGTH, BLEND, cfg) {
|
||||
pan_pack(rsd_ptr.cpu + pan_size(RENDERER_STATE), BLEND, cfg) {
|
||||
cfg.round_to_fb_precision = true;
|
||||
cfg.load_destination = false;
|
||||
if (pan_is_bifrost(pdev)) {
|
||||
|
|
|
|||
|
|
@ -203,18 +203,18 @@ panvk_pipeline_builder_alloc_static_state_bo(struct panvk_pipeline_builder *buil
|
|||
if (pipeline->fs.dynamic_rsd && i == MESA_SHADER_FRAGMENT)
|
||||
continue;
|
||||
|
||||
bo_size = ALIGN_POT(bo_size, MALI_RENDERER_STATE_ALIGN);
|
||||
bo_size = ALIGN_POT(bo_size, pan_alignment(RENDERER_STATE));
|
||||
builder->stages[i].rsd_offset = bo_size;
|
||||
bo_size += MALI_RENDERER_STATE_LENGTH;
|
||||
bo_size += pan_size(RENDERER_STATE);
|
||||
if (i == MESA_SHADER_FRAGMENT)
|
||||
bo_size += MALI_BLEND_LENGTH * pipeline->blend.state.rt_count;
|
||||
bo_size += pan_size(BLEND) * pipeline->blend.state.rt_count;
|
||||
}
|
||||
|
||||
if (panvk_pipeline_static_state(pipeline, VK_DYNAMIC_STATE_VIEWPORT) &&
|
||||
panvk_pipeline_static_state(pipeline, VK_DYNAMIC_STATE_SCISSOR)) {
|
||||
bo_size = ALIGN_POT(bo_size, MALI_VIEWPORT_ALIGN);
|
||||
bo_size = ALIGN_POT(bo_size, pan_alignment(VIEWPORT));
|
||||
builder->vpd_offset = bo_size;
|
||||
bo_size += MALI_VIEWPORT_LENGTH;
|
||||
bo_size += pan_size(VIEWPORT);
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++) {
|
||||
|
|
@ -325,12 +325,12 @@ panvk_pipeline_builder_init_shaders(struct panvk_pipeline_builder *builder,
|
|||
if (i != MESA_SHADER_FRAGMENT) {
|
||||
panvk_emit_non_fs_rsd(builder->device, &shader->info, shader_ptr, rsd);
|
||||
} else if (!pipeline->fs.dynamic_rsd) {
|
||||
void *bd = rsd + MALI_RENDERER_STATE_LENGTH;
|
||||
void *bd = rsd + pan_size(RENDERER_STATE);
|
||||
|
||||
panvk_emit_base_fs_rsd(builder->device, pipeline, rsd);
|
||||
for (unsigned rt = 0; rt < MAX2(pipeline->blend.state.rt_count, 1); rt++) {
|
||||
panvk_emit_blend(builder->device, pipeline, rt, bd);
|
||||
bd += MALI_BLEND_LENGTH;
|
||||
bd += pan_size(BLEND);
|
||||
}
|
||||
} else {
|
||||
gpu_rsd = 0;
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue