mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-05 00:58:05 +02:00
panfrost: Kill the mali_ptr typedef
mali_ptr is no shorter than uint64_t, and we already have a few places where we use uint64_t to store GPU virtual addresses in src/panfrost, so let's just kill this typedef and use uint64_t types everywhere. That's one step towards panfrost-job.h removal. Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com> Reviewed-by: Mary Guillemard <mary.guillemard@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/32899>
This commit is contained in:
parent
701600fb11
commit
63fc0a2de9
56 changed files with 330 additions and 335 deletions
|
|
@ -56,14 +56,14 @@ struct pan_afbc_block_info {
|
|||
};
|
||||
|
||||
struct panfrost_afbc_size_info {
|
||||
mali_ptr src;
|
||||
mali_ptr metadata;
|
||||
uint64_t src;
|
||||
uint64_t metadata;
|
||||
} PACKED;
|
||||
|
||||
struct panfrost_afbc_pack_info {
|
||||
mali_ptr src;
|
||||
mali_ptr dst;
|
||||
mali_ptr metadata;
|
||||
uint64_t src;
|
||||
uint64_t dst;
|
||||
uint64_t metadata;
|
||||
uint32_t header_size;
|
||||
uint32_t src_stride;
|
||||
uint32_t dst_stride;
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ struct panfrost_blend_state {
|
|||
unsigned enabled_mask : PIPE_MAX_COLOR_BUFS;
|
||||
};
|
||||
|
||||
mali_ptr panfrost_get_blend(struct panfrost_batch *batch, unsigned rt,
|
||||
uint64_t panfrost_get_blend(struct panfrost_batch *batch, unsigned rt,
|
||||
struct panfrost_bo **bo, unsigned *shader_offset);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@
|
|||
|
||||
#include <time.h>
|
||||
#include "util/list.h"
|
||||
#include "panfrost-job.h"
|
||||
|
||||
#include "pan_pool.h"
|
||||
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ struct panfrost_sampler_view {
|
|||
struct pipe_sampler_view base;
|
||||
struct panfrost_pool_ref state;
|
||||
struct mali_texture_packed bifrost_descriptor;
|
||||
mali_ptr texture_bo;
|
||||
uint64_t texture_bo;
|
||||
uint64_t texture_size;
|
||||
uint64_t modifier;
|
||||
|
||||
|
|
@ -269,7 +269,7 @@ panfrost_create_sampler_state(struct pipe_context *pctx,
|
|||
*/
|
||||
static void
|
||||
panfrost_get_blend_shaders(struct panfrost_batch *batch,
|
||||
mali_ptr *blend_shaders)
|
||||
uint64_t *blend_shaders)
|
||||
{
|
||||
unsigned shader_offset = 0;
|
||||
struct panfrost_bo *shader_bo = NULL;
|
||||
|
|
@ -303,7 +303,7 @@ pack_blend_constant(enum pipe_format format, float cons)
|
|||
|
||||
static void
|
||||
panfrost_emit_blend(struct panfrost_batch *batch, void *rts,
|
||||
mali_ptr *blend_shaders)
|
||||
uint64_t *blend_shaders)
|
||||
{
|
||||
unsigned rt_count = batch->key.nr_cbufs;
|
||||
struct panfrost_context *ctx = batch->ctx;
|
||||
|
|
@ -424,7 +424,7 @@ panfrost_emit_blend(struct panfrost_batch *batch, void *rts,
|
|||
}
|
||||
#endif
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panfrost_emit_compute_shader_meta(struct panfrost_batch *batch,
|
||||
enum pipe_shader_type stage)
|
||||
{
|
||||
|
|
@ -480,7 +480,7 @@ pan_merge_empty_fs(struct mali_renderer_state_packed *rsd)
|
|||
}
|
||||
|
||||
static void
|
||||
panfrost_prepare_fs_state(struct panfrost_context *ctx, mali_ptr *blend_shaders,
|
||||
panfrost_prepare_fs_state(struct panfrost_context *ctx, uint64_t *blend_shaders,
|
||||
struct mali_renderer_state_packed *rsd)
|
||||
{
|
||||
struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
|
||||
|
|
@ -615,7 +615,7 @@ panfrost_prepare_fs_state(struct panfrost_context *ctx, mali_ptr *blend_shaders,
|
|||
static void
|
||||
panfrost_emit_frag_shader(struct panfrost_context *ctx,
|
||||
struct mali_renderer_state_packed *fragmeta,
|
||||
mali_ptr *blend_shaders)
|
||||
uint64_t *blend_shaders)
|
||||
{
|
||||
const struct panfrost_zsa_state *zsa = ctx->depth_stencil;
|
||||
const struct panfrost_rasterizer *rast = ctx->rasterizer;
|
||||
|
|
@ -664,7 +664,7 @@ panfrost_emit_frag_shader(struct panfrost_context *ctx,
|
|||
memcpy(fragmeta, &rsd, sizeof(rsd));
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panfrost_emit_frag_shader_meta(struct panfrost_batch *batch)
|
||||
{
|
||||
struct panfrost_context *ctx = batch->ctx;
|
||||
|
|
@ -688,7 +688,7 @@ panfrost_emit_frag_shader_meta(struct panfrost_batch *batch)
|
|||
if (!xfer.cpu)
|
||||
return 0;
|
||||
|
||||
mali_ptr blend_shaders[PIPE_MAX_COLOR_BUFS] = {0};
|
||||
uint64_t blend_shaders[PIPE_MAX_COLOR_BUFS] = {0};
|
||||
panfrost_get_blend_shaders(batch, blend_shaders);
|
||||
|
||||
panfrost_emit_frag_shader(ctx, (struct mali_renderer_state_packed *)xfer.cpu,
|
||||
|
|
@ -703,7 +703,7 @@ panfrost_emit_frag_shader_meta(struct panfrost_batch *batch)
|
|||
}
|
||||
#endif
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panfrost_emit_viewport(struct panfrost_batch *batch)
|
||||
{
|
||||
struct panfrost_context *ctx = batch->ctx;
|
||||
|
|
@ -788,7 +788,7 @@ panfrost_emit_viewport(struct panfrost_batch *batch)
|
|||
* interactions are dynamic state. Pack only the dynamic state here and OR
|
||||
* together.
|
||||
*/
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panfrost_emit_depth_stencil(struct panfrost_batch *batch)
|
||||
{
|
||||
struct panfrost_context *ctx = batch->ctx;
|
||||
|
|
@ -833,7 +833,7 @@ panfrost_emit_depth_stencil(struct panfrost_batch *batch)
|
|||
* Emit Valhall blend descriptor at draw-time. The descriptor itself is shared
|
||||
* with Bifrost, but the container data structure is simplified.
|
||||
*/
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panfrost_emit_blend_valhall(struct panfrost_batch *batch)
|
||||
{
|
||||
unsigned rt_count = MAX2(batch->key.nr_cbufs, 1);
|
||||
|
|
@ -844,7 +844,7 @@ panfrost_emit_blend_valhall(struct panfrost_batch *batch)
|
|||
if (!T.cpu)
|
||||
return 0;
|
||||
|
||||
mali_ptr blend_shaders[PIPE_MAX_COLOR_BUFS] = {0};
|
||||
uint64_t blend_shaders[PIPE_MAX_COLOR_BUFS] = {0};
|
||||
panfrost_get_blend_shaders(batch, blend_shaders);
|
||||
|
||||
panfrost_emit_blend(batch, T.cpu, blend_shaders);
|
||||
|
|
@ -863,7 +863,7 @@ panfrost_emit_blend_valhall(struct panfrost_batch *batch)
|
|||
/**
|
||||
* Emit Valhall buffer descriptors for bound vertex buffers at draw-time.
|
||||
*/
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panfrost_emit_vertex_buffers(struct panfrost_batch *batch)
|
||||
{
|
||||
struct panfrost_context *ctx = batch->ctx;
|
||||
|
|
@ -896,7 +896,7 @@ panfrost_emit_vertex_buffers(struct panfrost_batch *batch)
|
|||
return T.gpu;
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panfrost_emit_vertex_data(struct panfrost_batch *batch)
|
||||
{
|
||||
struct panfrost_context *ctx = batch->ctx;
|
||||
|
|
@ -910,7 +910,7 @@ panfrost_emit_vertex_data(struct panfrost_batch *batch)
|
|||
static void panfrost_update_sampler_view(struct panfrost_sampler_view *view,
|
||||
struct pipe_context *pctx);
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panfrost_emit_images(struct panfrost_batch *batch, enum pipe_shader_type stage)
|
||||
{
|
||||
struct panfrost_context *ctx = batch->ctx;
|
||||
|
|
@ -950,7 +950,7 @@ panfrost_emit_images(struct panfrost_batch *batch, enum pipe_shader_type stage)
|
|||
}
|
||||
#endif
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
|
||||
enum pipe_shader_type st,
|
||||
struct panfrost_constant_buffer *buf,
|
||||
|
|
@ -1212,7 +1212,7 @@ panfrost_xfb_offset(unsigned stride, struct pipe_stream_output_target *target)
|
|||
|
||||
static void
|
||||
panfrost_upload_sysvals(struct panfrost_batch *batch, void *ptr_cpu,
|
||||
mali_ptr ptr_gpu, struct panfrost_compiled_shader *ss,
|
||||
uint64_t ptr_gpu, struct panfrost_compiled_shader *ss,
|
||||
enum pipe_shader_type st)
|
||||
{
|
||||
struct sysval_uniform *uniforms = ptr_cpu;
|
||||
|
|
@ -1344,7 +1344,7 @@ panfrost_map_constant_buffer_cpu(struct panfrost_context *ctx,
|
|||
* structure, sized in terms of entries.
|
||||
*/
|
||||
static void
|
||||
panfrost_emit_ubo(void *base, unsigned index, mali_ptr address, size_t size)
|
||||
panfrost_emit_ubo(void *base, unsigned index, uint64_t address, size_t size)
|
||||
{
|
||||
#if PAN_ARCH >= 9
|
||||
struct mali_buffer_packed *out = base;
|
||||
|
|
@ -1368,7 +1368,7 @@ panfrost_emit_ubo(void *base, unsigned index, mali_ptr address, size_t size)
|
|||
}
|
||||
|
||||
#if PAN_ARCH >= 9
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panfrost_emit_ssbos(struct panfrost_batch *batch, enum pipe_shader_type st)
|
||||
{
|
||||
struct panfrost_context *ctx = batch->ctx;
|
||||
|
|
@ -1402,10 +1402,10 @@ panfrost_emit_ssbos(struct panfrost_batch *batch, enum pipe_shader_type st)
|
|||
}
|
||||
#endif
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panfrost_emit_const_buf(struct panfrost_batch *batch,
|
||||
enum pipe_shader_type stage, unsigned *buffer_count,
|
||||
mali_ptr *push_constants, unsigned *pushed_words)
|
||||
uint64_t *push_constants, unsigned *pushed_words)
|
||||
{
|
||||
struct panfrost_context *ctx = batch->ctx;
|
||||
struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
|
||||
|
|
@ -1460,7 +1460,7 @@ panfrost_emit_const_buf(struct panfrost_batch *batch,
|
|||
|
||||
u_foreach_bit(ubo, ss->info.ubo_mask & buf->enabled_mask) {
|
||||
size_t usz = buf->cb[ubo].buffer_size;
|
||||
mali_ptr address = 0;
|
||||
uint64_t address = 0;
|
||||
|
||||
if (usz > 0) {
|
||||
address = panfrost_map_constant_buffer_gpu(batch, stage, buf, ubo);
|
||||
|
|
@ -1493,7 +1493,7 @@ panfrost_emit_const_buf(struct panfrost_batch *batch,
|
|||
unsigned sysval_comp = (src.offset % 16) / 4;
|
||||
unsigned sysval_type =
|
||||
PAN_SYSVAL_TYPE(ss->sysvals.sysvals[sysval_idx]);
|
||||
mali_ptr ptr = push_transfer.gpu + (4 * i);
|
||||
uint64_t ptr = push_transfer.gpu + (4 * i);
|
||||
|
||||
if (sysval_type == PAN_SYSVAL_NUM_WORK_GROUPS &&
|
||||
sysval_comp < ARRAY_SIZE(batch->num_wg_sysval))
|
||||
|
|
@ -1540,7 +1540,7 @@ panfrost_choose_wls_instance_count(const struct pipe_grid_info *grid)
|
|||
}
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panfrost_emit_shared_memory(struct panfrost_batch *batch,
|
||||
const struct pipe_grid_info *grid)
|
||||
{
|
||||
|
|
@ -1576,12 +1576,12 @@ panfrost_emit_shared_memory(struct panfrost_batch *batch,
|
|||
}
|
||||
|
||||
#if PAN_ARCH <= 5
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panfrost_get_tex_desc(struct panfrost_batch *batch, enum pipe_shader_type st,
|
||||
struct panfrost_sampler_view *view)
|
||||
{
|
||||
if (!view)
|
||||
return (mali_ptr)0;
|
||||
return (uint64_t)0;
|
||||
|
||||
struct pipe_sampler_view *pview = &view->base;
|
||||
struct panfrost_resource *rsrc = pan_resource(pview->texture);
|
||||
|
|
@ -1754,7 +1754,7 @@ panfrost_emit_null_texture(struct mali_texture_packed *out)
|
|||
}
|
||||
#endif
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
|
||||
enum pipe_shader_type stage)
|
||||
{
|
||||
|
|
@ -1823,7 +1823,7 @@ panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
|
|||
#endif
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panfrost_upload_wa_sampler(struct panfrost_batch *batch)
|
||||
{
|
||||
struct panfrost_ptr T = pan_pool_alloc_desc(&batch->pool.base, SAMPLER);
|
||||
|
|
@ -1832,7 +1832,7 @@ panfrost_upload_wa_sampler(struct panfrost_batch *batch)
|
|||
return T.gpu;
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
|
||||
enum pipe_shader_type stage)
|
||||
{
|
||||
|
|
@ -1982,8 +1982,8 @@ emit_image_bufs(struct panfrost_batch *batch, enum pipe_shader_type shader,
|
|||
}
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
panfrost_emit_image_attribs(struct panfrost_batch *batch, mali_ptr *buffers,
|
||||
static uint64_t
|
||||
panfrost_emit_image_attribs(struct panfrost_batch *batch, uint64_t *buffers,
|
||||
enum pipe_shader_type type)
|
||||
{
|
||||
struct panfrost_context *ctx = batch->ctx;
|
||||
|
|
@ -2018,8 +2018,8 @@ panfrost_emit_image_attribs(struct panfrost_batch *batch, mali_ptr *buffers,
|
|||
return attribs.gpu;
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
panfrost_emit_vertex_data(struct panfrost_batch *batch, mali_ptr *buffers)
|
||||
static uint64_t
|
||||
panfrost_emit_vertex_data(struct panfrost_batch *batch, uint64_t *buffers)
|
||||
{
|
||||
struct panfrost_context *ctx = batch->ctx;
|
||||
struct panfrost_vertex_state *so = ctx->vertex;
|
||||
|
|
@ -2088,8 +2088,8 @@ panfrost_emit_vertex_data(struct panfrost_batch *batch, mali_ptr *buffers)
|
|||
panfrost_batch_read_rsrc(batch, rsrc, PIPE_SHADER_VERTEX);
|
||||
|
||||
/* Mask off lower bits, see offset fixup below */
|
||||
mali_ptr raw_addr = rsrc->image.data.base + buf->buffer_offset;
|
||||
mali_ptr addr = raw_addr & ~63;
|
||||
uint64_t raw_addr = rsrc->image.data.base + buf->buffer_offset;
|
||||
uint64_t addr = raw_addr & ~63;
|
||||
|
||||
/* Since we advanced the base pointer, we shrink the buffer
|
||||
* size, but add the offset we subtracted */
|
||||
|
|
@ -2237,13 +2237,13 @@ panfrost_emit_vertex_data(struct panfrost_batch *batch, mali_ptr *buffers)
|
|||
return T.gpu;
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panfrost_emit_varyings(struct panfrost_batch *batch,
|
||||
struct mali_attribute_buffer_packed *slot,
|
||||
unsigned stride, unsigned count)
|
||||
{
|
||||
unsigned size = stride * count;
|
||||
mali_ptr ptr =
|
||||
uint64_t ptr =
|
||||
pan_pool_alloc_aligned(&batch->invisible_pool.base, size, 64).gpu;
|
||||
|
||||
pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
|
||||
|
|
@ -2913,9 +2913,9 @@ panfrost_launch_xfb(struct panfrost_batch *batch,
|
|||
|
||||
vs_uncompiled->xfb->stream_output = vs->stream_output;
|
||||
|
||||
mali_ptr saved_rsd = batch->rsd[PIPE_SHADER_VERTEX];
|
||||
mali_ptr saved_ubo = batch->uniform_buffers[PIPE_SHADER_VERTEX];
|
||||
mali_ptr saved_push = batch->push_uniforms[PIPE_SHADER_VERTEX];
|
||||
uint64_t saved_rsd = batch->rsd[PIPE_SHADER_VERTEX];
|
||||
uint64_t saved_ubo = batch->uniform_buffers[PIPE_SHADER_VERTEX];
|
||||
uint64_t saved_push = batch->push_uniforms[PIPE_SHADER_VERTEX];
|
||||
unsigned saved_nr_push_uniforms =
|
||||
batch->nr_push_uniforms[PIPE_SHADER_VERTEX];
|
||||
|
||||
|
|
@ -3318,7 +3318,7 @@ panfrost_launch_grid_on_batch(struct pipe_context *pipe,
|
|||
* Save the global one, and restore it when we're done emitting
|
||||
* the job.
|
||||
*/
|
||||
mali_ptr saved_tls = batch->tls.gpu;
|
||||
uint64_t saved_tls = batch->tls.gpu;
|
||||
batch->tls.gpu = panfrost_emit_shared_memory(batch, info);
|
||||
|
||||
/* if indirect, mark the indirect buffer as being read */
|
||||
|
|
@ -3957,7 +3957,7 @@ context_cleanup(struct panfrost_context *ctx)
|
|||
* the polygon list. It's perfectly fast to use allocate/free BO directly,
|
||||
* since we'll hit the BO cache and this is one-per-batch anyway. */
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
batch_get_polygon_list(struct panfrost_batch *batch)
|
||||
{
|
||||
struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
|
||||
|
|
@ -4009,7 +4009,7 @@ static void
|
|||
init_polygon_list(struct panfrost_batch *batch)
|
||||
{
|
||||
#if PAN_ARCH <= 5
|
||||
mali_ptr polygon_list = batch_get_polygon_list(batch);
|
||||
uint64_t polygon_list = batch_get_polygon_list(batch);
|
||||
pan_jc_initialize_tiler(&batch->pool.base, &batch->jm.jobs.vtc_jc,
|
||||
polygon_list);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -153,7 +153,7 @@ panfrost_overdraw_alpha(const struct panfrost_context *ctx, bool zero)
|
|||
|
||||
static inline void
|
||||
panfrost_emit_primitive_size(struct panfrost_context *ctx, bool points,
|
||||
mali_ptr size_array, void *prim_size)
|
||||
uint64_t size_array, void *prim_size)
|
||||
{
|
||||
struct panfrost_rasterizer *rast = ctx->rasterizer;
|
||||
|
||||
|
|
@ -235,12 +235,12 @@ panfrost_fs_required(struct panfrost_compiled_shader *fs,
|
|||
}
|
||||
|
||||
#if PAN_ARCH >= 9
|
||||
static inline mali_ptr
|
||||
static inline uint64_t
|
||||
panfrost_get_position_shader(struct panfrost_batch *batch,
|
||||
const struct pipe_draw_info *info)
|
||||
{
|
||||
/* IDVS/points vertex shader */
|
||||
mali_ptr vs_ptr = batch->rsd[PIPE_SHADER_VERTEX];
|
||||
uint64_t vs_ptr = batch->rsd[PIPE_SHADER_VERTEX];
|
||||
|
||||
/* IDVS/triangle vertex shader */
|
||||
if (vs_ptr && info->mode != MESA_PRIM_POINTS)
|
||||
|
|
@ -249,7 +249,7 @@ panfrost_get_position_shader(struct panfrost_batch *batch,
|
|||
return vs_ptr;
|
||||
}
|
||||
|
||||
static inline mali_ptr
|
||||
static inline uint64_t
|
||||
panfrost_get_varying_shader(struct panfrost_batch *batch)
|
||||
{
|
||||
return batch->rsd[PIPE_SHADER_VERTEX] + (2 * pan_size(SHADER_PROGRAM));
|
||||
|
|
@ -268,7 +268,7 @@ panfrost_vertex_attribute_stride(struct panfrost_compiled_shader *vs,
|
|||
return slots * 16;
|
||||
}
|
||||
|
||||
static inline mali_ptr
|
||||
static inline uint64_t
|
||||
panfrost_emit_resources(struct panfrost_batch *batch,
|
||||
enum pipe_shader_type stage)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -162,7 +162,7 @@ panfrost_set_blend_color(struct pipe_context *pipe,
|
|||
|
||||
/* Create a final blend given the context */
|
||||
|
||||
mali_ptr
|
||||
uint64_t
|
||||
panfrost_get_blend(struct panfrost_batch *batch, unsigned rti,
|
||||
struct panfrost_bo **bo, unsigned *shader_offset)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -256,7 +256,7 @@ struct pan_linkage {
|
|||
struct panfrost_bo *bo;
|
||||
|
||||
/* Uploaded attribute descriptors */
|
||||
mali_ptr producer, consumer;
|
||||
uint64_t producer, consumer;
|
||||
|
||||
/* Varyings buffers required */
|
||||
uint32_t present;
|
||||
|
|
@ -518,12 +518,12 @@ void panfrost_update_shader_variant(struct panfrost_context *ctx,
|
|||
|
||||
void panfrost_analyze_sysvals(struct panfrost_compiled_shader *ss);
|
||||
|
||||
mali_ptr
|
||||
uint64_t
|
||||
panfrost_get_index_buffer(struct panfrost_batch *batch,
|
||||
const struct pipe_draw_info *info,
|
||||
const struct pipe_draw_start_count_bias *draw);
|
||||
|
||||
mali_ptr
|
||||
uint64_t
|
||||
panfrost_get_index_buffer_bounded(struct panfrost_batch *batch,
|
||||
const struct pipe_draw_info *info,
|
||||
const struct pipe_draw_start_count_bias *draw,
|
||||
|
|
@ -531,7 +531,7 @@ panfrost_get_index_buffer_bounded(struct panfrost_batch *batch,
|
|||
|
||||
/* Instancing */
|
||||
|
||||
mali_ptr panfrost_vertex_buffer_address(struct panfrost_context *ctx,
|
||||
uint64_t panfrost_vertex_buffer_address(struct panfrost_context *ctx,
|
||||
unsigned i);
|
||||
|
||||
void panfrost_shader_context_init(struct pipe_context *pctx);
|
||||
|
|
|
|||
|
|
@ -649,7 +649,7 @@ out_free_syncops:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
csf_get_tiler_desc(struct panfrost_batch *batch)
|
||||
{
|
||||
if (batch->tiler_ctx.valhall.desc)
|
||||
|
|
@ -856,9 +856,9 @@ GENX(csf_emit_fragment_job)(struct panfrost_batch *batch,
|
|||
|
||||
static void
|
||||
csf_emit_shader_regs(struct panfrost_batch *batch, enum pipe_shader_type stage,
|
||||
mali_ptr shader)
|
||||
uint64_t shader)
|
||||
{
|
||||
mali_ptr resources = panfrost_emit_resources(batch, stage);
|
||||
uint64_t resources = panfrost_emit_resources(batch, stage);
|
||||
|
||||
assert(stage == PIPE_SHADER_VERTEX || stage == PIPE_SHADER_FRAGMENT ||
|
||||
stage == PIPE_SHADER_COMPUTE);
|
||||
|
|
|
|||
|
|
@ -53,10 +53,10 @@ struct pan_csf_tiler_oom_ctx {
|
|||
uint32_t bbox_max;
|
||||
|
||||
/* Tiler descriptor address */
|
||||
mali_ptr tiler_desc;
|
||||
uint64_t tiler_desc;
|
||||
|
||||
/* Address of the region reserved for saving registers. */
|
||||
mali_ptr dump_addr;
|
||||
uint64_t dump_addr;
|
||||
} PACKED;
|
||||
|
||||
struct panfrost_csf_batch {
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ struct pan_preload_shader_key {
|
|||
struct pan_preload_shader_data {
|
||||
struct pan_preload_shader_key key;
|
||||
struct pan_shader_info info;
|
||||
mali_ptr address;
|
||||
uint64_t address;
|
||||
unsigned blend_ret_offsets[8];
|
||||
nir_alu_type blend_types[8];
|
||||
};
|
||||
|
|
@ -109,7 +109,7 @@ struct pan_preload_blend_shader_key {
|
|||
|
||||
struct pan_preload_blend_shader_data {
|
||||
struct pan_preload_blend_shader_key key;
|
||||
mali_ptr address;
|
||||
uint64_t address;
|
||||
};
|
||||
|
||||
struct pan_preload_rsd_key {
|
||||
|
|
@ -124,7 +124,7 @@ struct pan_preload_rsd_key {
|
|||
|
||||
struct pan_preload_rsd_data {
|
||||
struct pan_preload_rsd_key key;
|
||||
mali_ptr address;
|
||||
uint64_t address;
|
||||
};
|
||||
|
||||
#if PAN_ARCH >= 5
|
||||
|
|
@ -132,7 +132,7 @@ static void
|
|||
pan_preload_emit_blend(unsigned rt,
|
||||
const struct pan_image_view *iview,
|
||||
const struct pan_preload_shader_data *preload_shader,
|
||||
mali_ptr blend_shader, void *out)
|
||||
uint64_t blend_shader, void *out)
|
||||
{
|
||||
assert(blend_shader == 0 || PAN_ARCH <= 5);
|
||||
|
||||
|
|
@ -212,12 +212,12 @@ pan_preload_is_ms(struct pan_preload_views *views)
|
|||
static void
|
||||
pan_preload_emit_blends(const struct pan_preload_shader_data *preload_shader,
|
||||
struct pan_preload_views *views,
|
||||
mali_ptr *blend_shaders, void *out)
|
||||
uint64_t *blend_shaders, void *out)
|
||||
{
|
||||
for (unsigned i = 0; i < MAX2(views->rt_count, 1); ++i) {
|
||||
void *dest = out + pan_size(BLEND) * i;
|
||||
const struct pan_image_view *rt_view = views->rts[i];
|
||||
mali_ptr blend_shader = blend_shaders ? blend_shaders[i] : 0;
|
||||
uint64_t blend_shader = blend_shaders ? blend_shaders[i] : 0;
|
||||
|
||||
pan_preload_emit_blend(i, rt_view, preload_shader, blend_shader, dest);
|
||||
}
|
||||
|
|
@ -227,7 +227,7 @@ pan_preload_emit_blends(const struct pan_preload_shader_data *preload_shader,
|
|||
#if PAN_ARCH <= 7
|
||||
static void
|
||||
pan_preload_emit_rsd(const struct pan_preload_shader_data *preload_shader,
|
||||
struct pan_preload_views *views, mali_ptr *blend_shaders,
|
||||
struct pan_preload_views *views, uint64_t *blend_shaders,
|
||||
void *out)
|
||||
{
|
||||
UNUSED bool zs = (views->z || views->s);
|
||||
|
|
@ -273,7 +273,7 @@ pan_preload_emit_rsd(const struct pan_preload_shader_data *preload_shader,
|
|||
cfg.properties.allow_forward_pixel_to_be_killed = !zs;
|
||||
#else
|
||||
|
||||
mali_ptr blend_shader =
|
||||
uint64_t blend_shader =
|
||||
blend_shaders
|
||||
? panfrost_last_nonnull(blend_shaders, MAX2(views->rt_count, 1))
|
||||
: 0;
|
||||
|
|
@ -323,7 +323,7 @@ pan_preload_get_blend_shaders(struct pan_fb_preload_cache *cache,
|
|||
unsigned rt_count,
|
||||
const struct pan_image_view **rts,
|
||||
const struct pan_preload_shader_data *preload_shader,
|
||||
mali_ptr *blend_shaders)
|
||||
uint64_t *blend_shaders)
|
||||
{
|
||||
if (!rt_count)
|
||||
return;
|
||||
|
|
@ -676,7 +676,7 @@ pan_preload_get_key(struct pan_preload_views *views)
|
|||
}
|
||||
|
||||
#if PAN_ARCH <= 7
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
pan_preload_get_rsd(struct pan_fb_preload_cache *cache,
|
||||
struct pan_preload_views *views)
|
||||
{
|
||||
|
|
@ -736,7 +736,7 @@ pan_preload_get_rsd(struct pan_fb_preload_cache *cache,
|
|||
if (!rsd_ptr.cpu)
|
||||
return 0;
|
||||
|
||||
mali_ptr blend_shaders[8] = {0};
|
||||
uint64_t blend_shaders[8] = {0};
|
||||
|
||||
const struct pan_preload_shader_data *preload_shader =
|
||||
pan_preload_get_shader(cache, &preload_key);
|
||||
|
|
@ -819,7 +819,7 @@ pan_preload_needed(const struct pan_fb_info *fb, bool zs)
|
|||
return false;
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
pan_preload_emit_varying(struct pan_pool *pool)
|
||||
{
|
||||
struct panfrost_ptr varying = pan_pool_alloc_desc(pool, ATTRIBUTE);
|
||||
|
|
@ -844,8 +844,8 @@ pan_preload_emit_varying(struct pan_pool *pool)
|
|||
return varying.gpu;
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
pan_preload_emit_varying_buffer(struct pan_pool *pool, mali_ptr coordinates)
|
||||
static uint64_t
|
||||
pan_preload_emit_varying_buffer(struct pan_pool *pool, uint64_t coordinates)
|
||||
{
|
||||
#if PAN_ARCH >= 9
|
||||
struct panfrost_ptr varying_buffer = pan_pool_alloc_desc(pool, BUFFER);
|
||||
|
|
@ -883,7 +883,7 @@ pan_preload_emit_varying_buffer(struct pan_pool *pool, mali_ptr coordinates)
|
|||
return varying_buffer.gpu;
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
pan_preload_emit_sampler(struct pan_pool *pool, bool nearest_filter)
|
||||
{
|
||||
struct panfrost_ptr sampler = pan_pool_alloc_desc(pool, SAMPLER);
|
||||
|
|
@ -901,7 +901,7 @@ pan_preload_emit_sampler(struct pan_pool *pool, bool nearest_filter)
|
|||
return sampler.gpu;
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
pan_preload_emit_textures(struct pan_pool *pool, const struct pan_fb_info *fb,
|
||||
bool zs, unsigned *tex_count_out)
|
||||
{
|
||||
|
|
@ -996,7 +996,7 @@ pan_preload_emit_textures(struct pan_pool *pool, const struct pan_fb_info *fb,
|
|||
|
||||
return textures.gpu;
|
||||
#else
|
||||
mali_ptr textures[8] = {0};
|
||||
uint64_t textures[8] = {0};
|
||||
|
||||
for (unsigned i = 0; i < tex_count; i++) {
|
||||
size_t sz = pan_size(TEXTURE) +
|
||||
|
|
@ -1012,14 +1012,14 @@ pan_preload_emit_textures(struct pan_pool *pool, const struct pan_fb_info *fb,
|
|||
textures[i] = texture.gpu;
|
||||
}
|
||||
|
||||
return pan_pool_upload_aligned(pool, textures, tex_count * sizeof(mali_ptr),
|
||||
sizeof(mali_ptr));
|
||||
return pan_pool_upload_aligned(pool, textures, tex_count * sizeof(uint64_t),
|
||||
sizeof(uint64_t));
|
||||
#endif
|
||||
}
|
||||
|
||||
#if PAN_ARCH >= 8
|
||||
/* TODO: cache */
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
pan_preload_emit_zs(struct pan_pool *pool, bool z, bool s)
|
||||
{
|
||||
struct panfrost_ptr zsd = pan_pool_alloc_desc(pool, DEPTH_STENCIL);
|
||||
|
|
@ -1057,7 +1057,7 @@ pan_preload_emit_zs(struct pan_pool *pool, bool z, bool s)
|
|||
return zsd.gpu;
|
||||
}
|
||||
#else
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
pan_preload_emit_viewport(struct pan_pool *pool, uint16_t minx, uint16_t miny,
|
||||
uint16_t maxx, uint16_t maxy)
|
||||
{
|
||||
|
|
@ -1080,14 +1080,14 @@ pan_preload_emit_viewport(struct pan_pool *pool, uint16_t minx, uint16_t miny,
|
|||
static void
|
||||
pan_preload_emit_dcd(struct pan_fb_preload_cache *cache,
|
||||
struct pan_pool *pool, struct pan_fb_info *fb, bool zs,
|
||||
mali_ptr coordinates, mali_ptr tsd, void *out,
|
||||
uint64_t coordinates, uint64_t tsd, void *out,
|
||||
bool always_write)
|
||||
{
|
||||
unsigned tex_count = 0;
|
||||
mali_ptr textures = pan_preload_emit_textures(pool, fb, zs, &tex_count);
|
||||
mali_ptr samplers = pan_preload_emit_sampler(pool, true);
|
||||
mali_ptr varyings = pan_preload_emit_varying(pool);
|
||||
mali_ptr varying_buffers =
|
||||
uint64_t textures = pan_preload_emit_textures(pool, fb, zs, &tex_count);
|
||||
uint64_t samplers = pan_preload_emit_sampler(pool, true);
|
||||
uint64_t varyings = pan_preload_emit_varying(pool);
|
||||
uint64_t varying_buffers =
|
||||
pan_preload_emit_varying_buffer(pool, coordinates);
|
||||
|
||||
/* Tiles updated by preload shaders are still considered clean (separate
|
||||
|
|
@ -1228,8 +1228,8 @@ pan_preload_fb_alloc_pre_post_dcds(struct pan_pool *desc_pool,
|
|||
static void
|
||||
pan_preload_emit_pre_frame_dcd(struct pan_fb_preload_cache *cache,
|
||||
struct pan_pool *desc_pool,
|
||||
struct pan_fb_info *fb, bool zs, mali_ptr coords,
|
||||
mali_ptr tsd)
|
||||
struct pan_fb_info *fb, bool zs, uint64_t coords,
|
||||
uint64_t tsd)
|
||||
{
|
||||
unsigned dcd_idx = zs ? 1 : 0;
|
||||
pan_preload_fb_alloc_pre_post_dcds(desc_pool, fb);
|
||||
|
|
@ -1298,8 +1298,8 @@ pan_preload_emit_pre_frame_dcd(struct pan_fb_preload_cache *cache,
|
|||
#else
|
||||
static struct panfrost_ptr
|
||||
pan_preload_emit_tiler_job(struct pan_fb_preload_cache *cache, struct pan_pool *desc_pool,
|
||||
struct pan_fb_info *fb, bool zs, mali_ptr coords,
|
||||
mali_ptr tsd)
|
||||
struct pan_fb_info *fb, bool zs, uint64_t coords,
|
||||
uint64_t tsd)
|
||||
{
|
||||
struct panfrost_ptr job = pan_pool_alloc_desc(desc_pool, TILER_JOB);
|
||||
|
||||
|
|
@ -1328,8 +1328,8 @@ pan_preload_emit_tiler_job(struct pan_fb_preload_cache *cache, struct pan_pool *
|
|||
|
||||
static struct panfrost_ptr
|
||||
pan_preload_fb_part(struct pan_fb_preload_cache *cache, struct pan_pool *pool,
|
||||
struct pan_fb_info *fb, bool zs, mali_ptr coords,
|
||||
mali_ptr tsd)
|
||||
struct pan_fb_info *fb, bool zs, uint64_t coords,
|
||||
uint64_t tsd)
|
||||
{
|
||||
struct panfrost_ptr job = {0};
|
||||
|
||||
|
|
@ -1343,12 +1343,12 @@ pan_preload_fb_part(struct pan_fb_preload_cache *cache, struct pan_pool *pool,
|
|||
|
||||
unsigned
|
||||
GENX(pan_preload_fb)(struct pan_fb_preload_cache *cache, struct pan_pool *pool,
|
||||
struct pan_fb_info *fb, mali_ptr tsd,
|
||||
struct pan_fb_info *fb, uint64_t tsd,
|
||||
struct panfrost_ptr *jobs)
|
||||
{
|
||||
bool preload_zs = pan_preload_needed(fb, true);
|
||||
bool preload_rts = pan_preload_needed(fb, false);
|
||||
mali_ptr coords;
|
||||
uint64_t coords;
|
||||
|
||||
if (!preload_zs && !preload_rts)
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ void GENX(pan_fb_preload_cache_cleanup)(struct pan_fb_preload_cache *cache);
|
|||
|
||||
unsigned GENX(pan_preload_fb)(struct pan_fb_preload_cache *cache,
|
||||
struct pan_pool *desc_pool,
|
||||
struct pan_fb_info *fb, mali_ptr tsd,
|
||||
struct pan_fb_info *fb, uint64_t tsd,
|
||||
struct panfrost_ptr *jobs);
|
||||
#endif
|
||||
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ panfrost_analyze_sysvals(struct panfrost_compiled_shader *ss)
|
|||
* good for the duration of the draw (transient), could last longer. Bounds are
|
||||
* not calculated.
|
||||
*/
|
||||
mali_ptr
|
||||
uint64_t
|
||||
panfrost_get_index_buffer(struct panfrost_batch *batch,
|
||||
const struct pipe_draw_info *info,
|
||||
const struct pipe_draw_start_count_bias *draw)
|
||||
|
|
@ -117,7 +117,7 @@ panfrost_get_index_buffer(struct panfrost_batch *batch,
|
|||
* these operations together because there are natural optimizations which
|
||||
* require them to be together. */
|
||||
|
||||
mali_ptr
|
||||
uint64_t
|
||||
panfrost_get_index_buffer_bounded(struct panfrost_batch *batch,
|
||||
const struct pipe_draw_info *info,
|
||||
const struct pipe_draw_start_count_bias *draw,
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ GENX(jm_init_batch)(struct panfrost_batch *batch)
|
|||
}
|
||||
|
||||
static int
|
||||
jm_submit_jc(struct panfrost_batch *batch, mali_ptr first_job_desc,
|
||||
jm_submit_jc(struct panfrost_batch *batch, uint64_t first_job_desc,
|
||||
uint32_t reqs, uint32_t out_sync)
|
||||
{
|
||||
struct panfrost_context *ctx = batch->ctx;
|
||||
|
|
@ -290,7 +290,7 @@ GENX(jm_emit_fragment_job)(struct panfrost_batch *batch,
|
|||
static void
|
||||
jm_emit_shader_env(struct panfrost_batch *batch,
|
||||
struct MALI_SHADER_ENVIRONMENT *cfg,
|
||||
enum pipe_shader_type stage, mali_ptr shader_ptr)
|
||||
enum pipe_shader_type stage, uint64_t shader_ptr)
|
||||
{
|
||||
cfg->resources = panfrost_emit_resources(batch, stage);
|
||||
cfg->thread_storage = batch->tls.gpu;
|
||||
|
|
@ -399,11 +399,11 @@ GENX(jm_launch_grid)(struct panfrost_batch *batch,
|
|||
}
|
||||
|
||||
#if PAN_ARCH >= 6
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
jm_emit_tiler_desc(struct panfrost_batch *batch)
|
||||
{
|
||||
struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
|
||||
mali_ptr tiler_desc = PAN_ARCH >= 9 ? batch->tiler_ctx.bifrost.desc
|
||||
uint64_t tiler_desc = PAN_ARCH >= 9 ? batch->tiler_ctx.bifrost.desc
|
||||
: batch->tiler_ctx.valhall.desc;
|
||||
|
||||
if (tiler_desc)
|
||||
|
|
@ -418,7 +418,7 @@ jm_emit_tiler_desc(struct panfrost_batch *batch)
|
|||
heap.top = dev->tiler_heap->ptr.gpu + panfrost_bo_size(dev->tiler_heap);
|
||||
}
|
||||
|
||||
mali_ptr heap = t.gpu;
|
||||
uint64_t heap = t.gpu;
|
||||
unsigned max_levels = dev->tiler_features.max_levels;
|
||||
assert(max_levels >= 2);
|
||||
|
||||
|
|
@ -890,7 +890,7 @@ GENX(jm_launch_xfb)(struct panfrost_batch *batch,
|
|||
PAN_ARCH <= 5, false);
|
||||
|
||||
/* No varyings on XFB compute jobs. */
|
||||
mali_ptr saved_vs_varyings = batch->varyings.vs;
|
||||
uint64_t saved_vs_varyings = batch->varyings.vs;
|
||||
|
||||
batch->varyings.vs = 0;
|
||||
jm_emit_vertex_job(batch, info, &invocation, t.cpu);
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ struct panfrost_jm_batch {
|
|||
struct pan_jc vtc_jc;
|
||||
|
||||
/* Fragment job, only one per batch. */
|
||||
mali_ptr frag;
|
||||
uint64_t frag;
|
||||
} jobs;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -117,35 +117,35 @@ struct panfrost_batch {
|
|||
struct panfrost_bo *polygon_list_bo;
|
||||
|
||||
/* Keep the num_work_groups sysval around for indirect dispatch */
|
||||
mali_ptr num_wg_sysval[3];
|
||||
uint64_t num_wg_sysval[3];
|
||||
|
||||
/* Cached descriptors */
|
||||
mali_ptr viewport;
|
||||
mali_ptr rsd[PIPE_SHADER_TYPES];
|
||||
mali_ptr textures[PIPE_SHADER_TYPES];
|
||||
mali_ptr samplers[PIPE_SHADER_TYPES];
|
||||
mali_ptr attribs[PIPE_SHADER_TYPES];
|
||||
mali_ptr attrib_bufs[PIPE_SHADER_TYPES];
|
||||
mali_ptr uniform_buffers[PIPE_SHADER_TYPES];
|
||||
mali_ptr push_uniforms[PIPE_SHADER_TYPES];
|
||||
mali_ptr depth_stencil;
|
||||
mali_ptr blend;
|
||||
uint64_t viewport;
|
||||
uint64_t rsd[PIPE_SHADER_TYPES];
|
||||
uint64_t textures[PIPE_SHADER_TYPES];
|
||||
uint64_t samplers[PIPE_SHADER_TYPES];
|
||||
uint64_t attribs[PIPE_SHADER_TYPES];
|
||||
uint64_t attrib_bufs[PIPE_SHADER_TYPES];
|
||||
uint64_t uniform_buffers[PIPE_SHADER_TYPES];
|
||||
uint64_t push_uniforms[PIPE_SHADER_TYPES];
|
||||
uint64_t depth_stencil;
|
||||
uint64_t blend;
|
||||
|
||||
unsigned nr_push_uniforms[PIPE_SHADER_TYPES];
|
||||
unsigned nr_uniform_buffers[PIPE_SHADER_TYPES];
|
||||
|
||||
/* Varying related pointers */
|
||||
struct {
|
||||
mali_ptr bufs;
|
||||
uint64_t bufs;
|
||||
unsigned nr_bufs;
|
||||
mali_ptr vs;
|
||||
mali_ptr fs;
|
||||
mali_ptr pos;
|
||||
mali_ptr psiz;
|
||||
uint64_t vs;
|
||||
uint64_t fs;
|
||||
uint64_t pos;
|
||||
uint64_t psiz;
|
||||
} varyings;
|
||||
|
||||
/* Index array */
|
||||
mali_ptr indices;
|
||||
uint64_t indices;
|
||||
|
||||
/* Valhall: struct mali_scissor_packed */
|
||||
unsigned scissor[2];
|
||||
|
|
@ -154,10 +154,10 @@ struct panfrost_batch {
|
|||
/* Used on Valhall only. Midgard includes attributes in-band with
|
||||
* attributes, wildly enough.
|
||||
*/
|
||||
mali_ptr images[PIPE_SHADER_TYPES];
|
||||
uint64_t images[PIPE_SHADER_TYPES];
|
||||
|
||||
/* SSBOs. */
|
||||
mali_ptr ssbos[PIPE_SHADER_TYPES];
|
||||
uint64_t ssbos[PIPE_SHADER_TYPES];
|
||||
|
||||
/* On Valhall, these are properties of the batch. On Bifrost, they are
|
||||
* per draw.
|
||||
|
|
|
|||
|
|
@ -71,14 +71,14 @@ struct panfrost_pool_ref {
|
|||
struct panfrost_bo *bo;
|
||||
|
||||
/* Mapped GPU VA */
|
||||
mali_ptr gpu;
|
||||
uint64_t gpu;
|
||||
};
|
||||
|
||||
/* Take a reference to an allocation pool. Call directly after allocating from
|
||||
* an unowned pool for correct operation. */
|
||||
|
||||
static inline struct panfrost_pool_ref
|
||||
panfrost_pool_take_ref(struct panfrost_pool *pool, mali_ptr ptr)
|
||||
panfrost_pool_take_ref(struct panfrost_pool *pool, uint64_t ptr)
|
||||
{
|
||||
if (!pool->owned)
|
||||
panfrost_bo_reference(pool->transient_bo);
|
||||
|
|
|
|||
|
|
@ -36,7 +36,6 @@ typedef uint8_t u8;
|
|||
typedef uint16_t u16;
|
||||
typedef uint32_t u32;
|
||||
typedef uint64_t u64;
|
||||
typedef uint64_t mali_ptr;
|
||||
|
||||
#define MALI_FORMAT_COMPRESSED (0 << 5)
|
||||
#define MALI_EXTRACT_TYPE(fmt) ((fmt)&0xe0)
|
||||
|
|
|
|||
|
|
@ -80,9 +80,9 @@ pandecode_render_target(struct pandecode_context *ctx, uint64_t gpu_va,
|
|||
ctx->indent++;
|
||||
|
||||
for (int i = 0; i < (fb->render_target_count); i++) {
|
||||
mali_ptr rt_va = gpu_va + i * pan_size(RENDER_TARGET);
|
||||
uint64_t rt_va = gpu_va + i * pan_size(RENDER_TARGET);
|
||||
const struct mali_render_target_packed *PANDECODE_PTR_VAR(
|
||||
ctx, rtp, (mali_ptr)rt_va);
|
||||
ctx, rtp, (uint64_t)rt_va);
|
||||
DUMP_CL(ctx, RENDER_TARGET, rtp, "Color Render Target %d:\n", i);
|
||||
}
|
||||
|
||||
|
|
@ -112,7 +112,7 @@ struct pandecode_fbd
|
|||
GENX(pandecode_fbd)(struct pandecode_context *ctx, uint64_t gpu_va,
|
||||
bool is_fragment, unsigned gpu_id)
|
||||
{
|
||||
const void *PANDECODE_PTR_VAR(ctx, fb, (mali_ptr)gpu_va);
|
||||
const void *PANDECODE_PTR_VAR(ctx, fb, (uint64_t)gpu_va);
|
||||
pan_section_unpack(fb, FRAMEBUFFER, PARAMETERS, params);
|
||||
DUMP_UNPACKED(ctx, FRAMEBUFFER_PARAMETERS, params, "Parameters:\n");
|
||||
|
||||
|
|
@ -176,7 +176,7 @@ GENX(pandecode_fbd)(struct pandecode_context *ctx, uint64_t gpu_va,
|
|||
|
||||
if (params.has_zs_crc_extension) {
|
||||
const struct mali_zs_crc_extension_packed *PANDECODE_PTR_VAR(
|
||||
ctx, zs_crc, (mali_ptr)gpu_va);
|
||||
ctx, zs_crc, (uint64_t)gpu_va);
|
||||
DUMP_CL(ctx, ZS_CRC_EXTENSION, zs_crc, "ZS CRC Extension:\n");
|
||||
pandecode_log(ctx, "\n");
|
||||
|
||||
|
|
@ -204,9 +204,9 @@ GENX(pandecode_fbd)(struct pandecode_context *ctx, uint64_t gpu_va,
|
|||
}
|
||||
|
||||
#if PAN_ARCH >= 5
|
||||
mali_ptr
|
||||
uint64_t
|
||||
GENX(pandecode_blend)(struct pandecode_context *ctx, void *descs, int rt_no,
|
||||
mali_ptr frag_shader)
|
||||
uint64_t frag_shader)
|
||||
{
|
||||
pan_unpack(descs + (rt_no * pan_size(BLEND)), BLEND, b);
|
||||
DUMP_UNPACKED(ctx, BLEND, b, "Blend RT %d:\n", rt_no);
|
||||
|
|
@ -242,7 +242,7 @@ panfrost_is_yuv_format(uint32_t packed)
|
|||
}
|
||||
|
||||
static void
|
||||
pandecode_texture_payload(struct pandecode_context *ctx, mali_ptr payload,
|
||||
pandecode_texture_payload(struct pandecode_context *ctx, uint64_t payload,
|
||||
const struct MALI_TEXTURE *tex)
|
||||
{
|
||||
unsigned nr_samples =
|
||||
|
|
@ -314,7 +314,7 @@ pandecode_texture_payload(struct pandecode_context *ctx, mali_ptr payload,
|
|||
|
||||
#if PAN_ARCH <= 5
|
||||
void
|
||||
GENX(pandecode_texture)(struct pandecode_context *ctx, mali_ptr u, unsigned tex)
|
||||
GENX(pandecode_texture)(struct pandecode_context *ctx, uint64_t u, unsigned tex)
|
||||
{
|
||||
const uint8_t *cl = pandecode_fetch_gpu_mem(ctx, u, pan_size(TEXTURE));
|
||||
|
||||
|
|
@ -354,7 +354,7 @@ GENX(pandecode_texture)(struct pandecode_context *ctx, const void *cl,
|
|||
|
||||
#if PAN_ARCH >= 6
|
||||
void
|
||||
GENX(pandecode_tiler)(struct pandecode_context *ctx, mali_ptr gpu_va,
|
||||
GENX(pandecode_tiler)(struct pandecode_context *ctx, uint64_t gpu_va,
|
||||
unsigned gpu_id)
|
||||
{
|
||||
pan_unpack(PANDECODE_PTR(ctx, gpu_va, void), TILER_CONTEXT, t);
|
||||
|
|
@ -371,7 +371,7 @@ GENX(pandecode_tiler)(struct pandecode_context *ctx, mali_ptr gpu_va,
|
|||
|
||||
#if PAN_ARCH >= 9
|
||||
void
|
||||
GENX(pandecode_fau)(struct pandecode_context *ctx, mali_ptr addr,
|
||||
GENX(pandecode_fau)(struct pandecode_context *ctx, uint64_t addr,
|
||||
unsigned count, const char *name)
|
||||
{
|
||||
if (count == 0)
|
||||
|
|
@ -388,8 +388,8 @@ GENX(pandecode_fau)(struct pandecode_context *ctx, mali_ptr addr,
|
|||
fprintf(ctx->dump_stream, "\n");
|
||||
}
|
||||
|
||||
mali_ptr
|
||||
GENX(pandecode_shader)(struct pandecode_context *ctx, mali_ptr addr,
|
||||
uint64_t
|
||||
GENX(pandecode_shader)(struct pandecode_context *ctx, uint64_t addr,
|
||||
const char *label, unsigned gpu_id)
|
||||
{
|
||||
MAP_ADDR(ctx, SHADER_PROGRAM, addr, cl);
|
||||
|
|
@ -404,7 +404,7 @@ GENX(pandecode_shader)(struct pandecode_context *ctx, mali_ptr addr,
|
|||
}
|
||||
|
||||
static void
|
||||
pandecode_resources(struct pandecode_context *ctx, mali_ptr addr, unsigned size)
|
||||
pandecode_resources(struct pandecode_context *ctx, uint64_t addr, unsigned size)
|
||||
{
|
||||
const uint8_t *cl = pandecode_fetch_gpu_mem(ctx, addr, size);
|
||||
assert((size % 0x20) == 0);
|
||||
|
|
@ -434,7 +434,7 @@ pandecode_resources(struct pandecode_context *ctx, mali_ptr addr, unsigned size)
|
|||
}
|
||||
|
||||
void
|
||||
GENX(pandecode_resource_tables)(struct pandecode_context *ctx, mali_ptr addr,
|
||||
GENX(pandecode_resource_tables)(struct pandecode_context *ctx, uint64_t addr,
|
||||
const char *label)
|
||||
{
|
||||
unsigned count = addr & 0x3F;
|
||||
|
|
@ -459,7 +459,7 @@ GENX(pandecode_resource_tables)(struct pandecode_context *ctx, mali_ptr addr,
|
|||
}
|
||||
|
||||
void
|
||||
GENX(pandecode_depth_stencil)(struct pandecode_context *ctx, mali_ptr addr)
|
||||
GENX(pandecode_depth_stencil)(struct pandecode_context *ctx, uint64_t addr)
|
||||
{
|
||||
MAP_ADDR(ctx, DEPTH_STENCIL, addr, cl);
|
||||
pan_unpack(cl, DEPTH_STENCIL, desc);
|
||||
|
|
@ -485,14 +485,14 @@ GENX(pandecode_shader_environment)(struct pandecode_context *ctx,
|
|||
}
|
||||
|
||||
void
|
||||
GENX(pandecode_blend_descs)(struct pandecode_context *ctx, mali_ptr blend,
|
||||
unsigned count, mali_ptr frag_shader,
|
||||
GENX(pandecode_blend_descs)(struct pandecode_context *ctx, uint64_t blend,
|
||||
unsigned count, uint64_t frag_shader,
|
||||
unsigned gpu_id)
|
||||
{
|
||||
for (unsigned i = 0; i < count; ++i) {
|
||||
struct mali_blend_packed *PANDECODE_PTR_VAR(ctx, blend_descs, blend);
|
||||
|
||||
mali_ptr blend_shader =
|
||||
uint64_t blend_shader =
|
||||
GENX(pandecode_blend)(ctx, blend_descs, i, frag_shader);
|
||||
if (blend_shader) {
|
||||
fprintf(ctx->dump_stream, "Blend shader %u @%" PRIx64 "", i,
|
||||
|
|
@ -506,7 +506,7 @@ void
|
|||
GENX(pandecode_dcd)(struct pandecode_context *ctx, const struct MALI_DRAW *p,
|
||||
unsigned unused, unsigned gpu_id)
|
||||
{
|
||||
mali_ptr frag_shader = 0;
|
||||
uint64_t frag_shader = 0;
|
||||
|
||||
GENX(pandecode_depth_stencil)(ctx, p->depth_stencil);
|
||||
GENX(pandecode_blend_descs)
|
||||
|
|
|
|||
|
|
@ -104,37 +104,37 @@ __pandecode_fetch_gpu_mem(struct pandecode_context *ctx, uint64_t gpu_va,
|
|||
name = __pandecode_fetch_gpu_mem(ctx, gpu_va, sizeof(*name), __LINE__, \
|
||||
__FILE__)
|
||||
|
||||
void pandecode_validate_buffer(struct pandecode_context *ctx, mali_ptr addr,
|
||||
void pandecode_validate_buffer(struct pandecode_context *ctx, uint64_t addr,
|
||||
size_t sz);
|
||||
|
||||
/* Forward declare for all supported gens to permit thunking */
|
||||
void pandecode_jc_v4(struct pandecode_context *ctx, mali_ptr jc_gpu_va,
|
||||
void pandecode_jc_v4(struct pandecode_context *ctx, uint64_t jc_gpu_va,
|
||||
unsigned gpu_id);
|
||||
void pandecode_jc_v5(struct pandecode_context *ctx, mali_ptr jc_gpu_va,
|
||||
void pandecode_jc_v5(struct pandecode_context *ctx, uint64_t jc_gpu_va,
|
||||
unsigned gpu_id);
|
||||
void pandecode_jc_v6(struct pandecode_context *ctx, mali_ptr jc_gpu_va,
|
||||
void pandecode_jc_v6(struct pandecode_context *ctx, uint64_t jc_gpu_va,
|
||||
unsigned gpu_id);
|
||||
void pandecode_jc_v7(struct pandecode_context *ctx, mali_ptr jc_gpu_va,
|
||||
void pandecode_jc_v7(struct pandecode_context *ctx, uint64_t jc_gpu_va,
|
||||
unsigned gpu_id);
|
||||
void pandecode_jc_v9(struct pandecode_context *ctx, mali_ptr jc_gpu_va,
|
||||
void pandecode_jc_v9(struct pandecode_context *ctx, uint64_t jc_gpu_va,
|
||||
unsigned gpu_id);
|
||||
|
||||
void pandecode_abort_on_fault_v4(struct pandecode_context *ctx,
|
||||
mali_ptr jc_gpu_va);
|
||||
uint64_t jc_gpu_va);
|
||||
void pandecode_abort_on_fault_v5(struct pandecode_context *ctx,
|
||||
mali_ptr jc_gpu_va);
|
||||
uint64_t jc_gpu_va);
|
||||
void pandecode_abort_on_fault_v6(struct pandecode_context *ctx,
|
||||
mali_ptr jc_gpu_va);
|
||||
uint64_t jc_gpu_va);
|
||||
void pandecode_abort_on_fault_v7(struct pandecode_context *ctx,
|
||||
mali_ptr jc_gpu_va);
|
||||
uint64_t jc_gpu_va);
|
||||
void pandecode_abort_on_fault_v9(struct pandecode_context *ctx,
|
||||
mali_ptr jc_gpu_va);
|
||||
uint64_t jc_gpu_va);
|
||||
|
||||
void pandecode_interpret_cs_v10(struct pandecode_context *ctx, mali_ptr queue,
|
||||
void pandecode_interpret_cs_v10(struct pandecode_context *ctx, uint64_t queue,
|
||||
uint32_t size, unsigned gpu_id, uint32_t *regs);
|
||||
void pandecode_cs_binary_v10(struct pandecode_context *ctx, mali_ptr bin,
|
||||
void pandecode_cs_binary_v10(struct pandecode_context *ctx, uint64_t bin,
|
||||
uint32_t bin_size, unsigned gpu_id);
|
||||
void pandecode_cs_trace_v10(struct pandecode_context *ctx, mali_ptr trace,
|
||||
void pandecode_cs_trace_v10(struct pandecode_context *ctx, uint64_t trace,
|
||||
uint32_t trace_size, unsigned gpu_id);
|
||||
|
||||
/* Logging infrastructure */
|
||||
|
|
@ -210,7 +210,7 @@ pandecode_log_cont(struct pandecode_context *ctx, const char *format, ...)
|
|||
}
|
||||
|
||||
void pandecode_shader_disassemble(struct pandecode_context *ctx,
|
||||
mali_ptr shader_ptr, unsigned gpu_id);
|
||||
uint64_t shader_ptr, unsigned gpu_id);
|
||||
|
||||
#ifdef PAN_ARCH
|
||||
|
||||
|
|
@ -235,7 +235,7 @@ void GENX(pandecode_dcd)(struct pandecode_context *ctx,
|
|||
#endif
|
||||
|
||||
#if PAN_ARCH <= 5
|
||||
void GENX(pandecode_texture)(struct pandecode_context *ctx, mali_ptr u,
|
||||
void GENX(pandecode_texture)(struct pandecode_context *ctx, uint64_t u,
|
||||
unsigned tex);
|
||||
#else
|
||||
void GENX(pandecode_texture)(struct pandecode_context *ctx, const void *cl,
|
||||
|
|
@ -243,12 +243,12 @@ void GENX(pandecode_texture)(struct pandecode_context *ctx, const void *cl,
|
|||
#endif
|
||||
|
||||
#if PAN_ARCH >= 5
|
||||
mali_ptr GENX(pandecode_blend)(struct pandecode_context *ctx, void *descs,
|
||||
int rt_no, mali_ptr frag_shader);
|
||||
uint64_t GENX(pandecode_blend)(struct pandecode_context *ctx, void *descs,
|
||||
int rt_no, uint64_t frag_shader);
|
||||
#endif
|
||||
|
||||
#if PAN_ARCH >= 6
|
||||
void GENX(pandecode_tiler)(struct pandecode_context *ctx, mali_ptr gpu_va,
|
||||
void GENX(pandecode_tiler)(struct pandecode_context *ctx, uint64_t gpu_va,
|
||||
unsigned gpu_id);
|
||||
#endif
|
||||
|
||||
|
|
@ -258,20 +258,20 @@ void GENX(pandecode_shader_environment)(struct pandecode_context *ctx,
|
|||
unsigned gpu_id);
|
||||
|
||||
void GENX(pandecode_resource_tables)(struct pandecode_context *ctx,
|
||||
mali_ptr addr, const char *label);
|
||||
uint64_t addr, const char *label);
|
||||
|
||||
void GENX(pandecode_fau)(struct pandecode_context *ctx, mali_ptr addr,
|
||||
void GENX(pandecode_fau)(struct pandecode_context *ctx, uint64_t addr,
|
||||
unsigned count, const char *name);
|
||||
|
||||
mali_ptr GENX(pandecode_shader)(struct pandecode_context *ctx, mali_ptr addr,
|
||||
uint64_t GENX(pandecode_shader)(struct pandecode_context *ctx, uint64_t addr,
|
||||
const char *label, unsigned gpu_id);
|
||||
|
||||
void GENX(pandecode_blend_descs)(struct pandecode_context *ctx, mali_ptr blend,
|
||||
unsigned count, mali_ptr frag_shader,
|
||||
void GENX(pandecode_blend_descs)(struct pandecode_context *ctx, uint64_t blend,
|
||||
unsigned count, uint64_t frag_shader,
|
||||
unsigned gpu_id);
|
||||
|
||||
void GENX(pandecode_depth_stencil)(struct pandecode_context *ctx,
|
||||
mali_ptr addr);
|
||||
uint64_t addr);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -107,7 +107,7 @@ pandecode_find_mapped_gpu_mem_containing(struct pandecode_context *ctx,
|
|||
* detect GPU-side memory bugs by validating pointers.
|
||||
*/
|
||||
void
|
||||
pandecode_validate_buffer(struct pandecode_context *ctx, mali_ptr addr,
|
||||
pandecode_validate_buffer(struct pandecode_context *ctx, uint64_t addr,
|
||||
size_t sz)
|
||||
{
|
||||
if (!addr) {
|
||||
|
|
@ -360,7 +360,7 @@ pandecode_dump_mappings(struct pandecode_context *ctx)
|
|||
}
|
||||
|
||||
void
|
||||
pandecode_abort_on_fault(struct pandecode_context *ctx, mali_ptr jc_gpu_va,
|
||||
pandecode_abort_on_fault(struct pandecode_context *ctx, uint64_t jc_gpu_va,
|
||||
unsigned gpu_id)
|
||||
{
|
||||
simple_mtx_lock(&ctx->lock);
|
||||
|
|
@ -389,7 +389,7 @@ pandecode_abort_on_fault(struct pandecode_context *ctx, mali_ptr jc_gpu_va,
|
|||
}
|
||||
|
||||
void
|
||||
pandecode_jc(struct pandecode_context *ctx, mali_ptr jc_gpu_va, unsigned gpu_id)
|
||||
pandecode_jc(struct pandecode_context *ctx, uint64_t jc_gpu_va, unsigned gpu_id)
|
||||
{
|
||||
simple_mtx_lock(&ctx->lock);
|
||||
|
||||
|
|
@ -417,7 +417,7 @@ pandecode_jc(struct pandecode_context *ctx, mali_ptr jc_gpu_va, unsigned gpu_id)
|
|||
}
|
||||
|
||||
void
|
||||
pandecode_interpret_cs(struct pandecode_context *ctx, mali_ptr queue_gpu_va,
|
||||
pandecode_interpret_cs(struct pandecode_context *ctx, uint64_t queue_gpu_va,
|
||||
uint32_t size, unsigned gpu_id, uint32_t *regs)
|
||||
{
|
||||
simple_mtx_lock(&ctx->lock);
|
||||
|
|
@ -434,7 +434,7 @@ pandecode_interpret_cs(struct pandecode_context *ctx, mali_ptr queue_gpu_va,
|
|||
}
|
||||
|
||||
void
|
||||
pandecode_cs_binary(struct pandecode_context *ctx, mali_ptr bin_gpu_va,
|
||||
pandecode_cs_binary(struct pandecode_context *ctx, uint64_t bin_gpu_va,
|
||||
uint32_t size, unsigned gpu_id)
|
||||
{
|
||||
simple_mtx_lock(&ctx->lock);
|
||||
|
|
@ -451,7 +451,7 @@ pandecode_cs_binary(struct pandecode_context *ctx, mali_ptr bin_gpu_va,
|
|||
}
|
||||
|
||||
void
|
||||
pandecode_cs_trace(struct pandecode_context *ctx, mali_ptr trace_gpu_va,
|
||||
pandecode_cs_trace(struct pandecode_context *ctx, uint64_t trace_gpu_va,
|
||||
uint32_t size, unsigned gpu_id)
|
||||
{
|
||||
simple_mtx_lock(&ctx->lock);
|
||||
|
|
@ -468,7 +468,7 @@ pandecode_cs_trace(struct pandecode_context *ctx, mali_ptr trace_gpu_va,
|
|||
}
|
||||
|
||||
void
|
||||
pandecode_shader_disassemble(struct pandecode_context *ctx, mali_ptr shader_ptr,
|
||||
pandecode_shader_disassemble(struct pandecode_context *ctx, uint64_t shader_ptr,
|
||||
unsigned gpu_id)
|
||||
{
|
||||
uint8_t *PANDECODE_PTR_VAR(ctx, code, shader_ptr);
|
||||
|
|
|
|||
|
|
@ -463,7 +463,7 @@ pandecode_run_compute(struct pandecode_context *ctx, FILE *fp,
|
|||
|
||||
GENX(pandecode_resource_tables)(ctx, cs_get_u64(qctx, reg_srt), "Resources");
|
||||
|
||||
mali_ptr fau = cs_get_u64(qctx, reg_fau);
|
||||
uint64_t fau = cs_get_u64(qctx, reg_fau);
|
||||
|
||||
if (fau)
|
||||
GENX(pandecode_fau)(ctx, fau & BITFIELD64_MASK(48), fau >> 56, "FAU");
|
||||
|
|
@ -503,7 +503,7 @@ pandecode_run_compute_indirect(struct pandecode_context *ctx, FILE *fp,
|
|||
|
||||
GENX(pandecode_resource_tables)(ctx, cs_get_u64(qctx, reg_srt), "Resources");
|
||||
|
||||
mali_ptr fau = cs_get_u64(qctx, reg_fau);
|
||||
uint64_t fau = cs_get_u64(qctx, reg_fau);
|
||||
|
||||
if (fau)
|
||||
GENX(pandecode_fau)(ctx, fau & BITFIELD64_MASK(48), fau >> 56, "FAU");
|
||||
|
|
@ -545,10 +545,10 @@ pandecode_run_tiling(struct pandecode_context *ctx, FILE *fp,
|
|||
unsigned reg_spd = 16 + I->spd_select * 2;
|
||||
unsigned reg_tsd = 24 + I->tsd_select;
|
||||
|
||||
mali_ptr srt = cs_get_u64(qctx, reg_srt);
|
||||
mali_ptr fau = cs_get_u64(qctx, reg_fau);
|
||||
mali_ptr spd = cs_get_u64(qctx, reg_spd);
|
||||
mali_ptr tsd = cs_get_u64(qctx, reg_tsd);
|
||||
uint64_t srt = cs_get_u64(qctx, reg_srt);
|
||||
uint64_t fau = cs_get_u64(qctx, reg_fau);
|
||||
uint64_t spd = cs_get_u64(qctx, reg_spd);
|
||||
uint64_t tsd = cs_get_u64(qctx, reg_tsd);
|
||||
|
||||
if (srt)
|
||||
GENX(pandecode_resource_tables)(ctx, srt, "Fragment resources");
|
||||
|
|
@ -590,7 +590,7 @@ pandecode_run_tiling(struct pandecode_context *ctx, FILE *fp,
|
|||
pandecode_log(ctx, "Vertex position array: %" PRIx64 "\n",
|
||||
cs_get_u64(qctx, 48));
|
||||
|
||||
mali_ptr blend = cs_get_u64(qctx, 50);
|
||||
uint64_t blend = cs_get_u64(qctx, 50);
|
||||
GENX(pandecode_blend_descs)(ctx, blend & ~7, blend & 7, 0, qctx->gpu_id);
|
||||
|
||||
DUMP_ADDR(ctx, DEPTH_STENCIL, cs_get_u64(qctx, 52), "Depth/stencil");
|
||||
|
|
@ -645,9 +645,9 @@ pandecode_run_idvs(struct pandecode_context *ctx, FILE *fp,
|
|||
if (frag_srt)
|
||||
GENX(pandecode_resource_tables)(ctx, frag_srt, "Fragment resources");
|
||||
|
||||
mali_ptr position_fau = cs_get_u64(qctx, reg_position_fau);
|
||||
mali_ptr vary_fau = cs_get_u64(qctx, reg_vary_fau);
|
||||
mali_ptr fragment_fau = cs_get_u64(qctx, reg_frag_fau);
|
||||
uint64_t position_fau = cs_get_u64(qctx, reg_position_fau);
|
||||
uint64_t vary_fau = cs_get_u64(qctx, reg_vary_fau);
|
||||
uint64_t fragment_fau = cs_get_u64(qctx, reg_frag_fau);
|
||||
|
||||
if (position_fau) {
|
||||
uint64_t lo = position_fau & BITFIELD64_MASK(48);
|
||||
|
|
@ -720,7 +720,7 @@ pandecode_run_idvs(struct pandecode_context *ctx, FILE *fp,
|
|||
if (tiler_flags.secondary_shader)
|
||||
pandecode_log(ctx, "Varying allocation: %u\n", cs_get_u32(qctx, 48));
|
||||
|
||||
mali_ptr blend = cs_get_u64(qctx, 50);
|
||||
uint64_t blend = cs_get_u64(qctx, 50);
|
||||
GENX(pandecode_blend_descs)(ctx, blend & ~7, blend & 7, 0, qctx->gpu_id);
|
||||
|
||||
DUMP_ADDR(ctx, DEPTH_STENCIL, cs_get_u64(qctx, 52), "Depth/stencil");
|
||||
|
|
@ -921,7 +921,7 @@ interpret_cs_instr(struct pandecode_context *ctx, struct queue_ctx *qctx)
|
|||
|
||||
case MALI_CS_OPCODE_LOAD_MULTIPLE: {
|
||||
pan_unpack(bytes, CS_LOAD_MULTIPLE, I);
|
||||
mali_ptr addr =
|
||||
uint64_t addr =
|
||||
((uint64_t)qctx->regs[I.address + 1] << 32) | qctx->regs[I.address];
|
||||
addr += I.offset;
|
||||
|
||||
|
|
@ -1046,7 +1046,7 @@ no_interpret:
|
|||
}
|
||||
|
||||
void
|
||||
GENX(pandecode_interpret_cs)(struct pandecode_context *ctx, mali_ptr queue,
|
||||
GENX(pandecode_interpret_cs)(struct pandecode_context *ctx, uint64_t queue,
|
||||
uint32_t size, unsigned gpu_id, uint32_t *regs)
|
||||
{
|
||||
pandecode_dump_file_open(ctx);
|
||||
|
|
@ -1341,7 +1341,7 @@ collect_indirect_branch_targets(struct cs_code_cfg *cfg,
|
|||
|
||||
static struct cs_code_cfg *
|
||||
get_cs_cfg(struct pandecode_context *ctx, struct hash_table_u64 *symbols,
|
||||
mali_ptr bin, uint32_t bin_size)
|
||||
uint64_t bin, uint32_t bin_size)
|
||||
{
|
||||
uint32_t instr_count = bin_size / sizeof(uint64_t);
|
||||
struct cs_code_cfg *cfg = _mesa_hash_table_u64_search(symbols, bin);
|
||||
|
|
@ -1449,7 +1449,7 @@ get_cs_cfg(struct pandecode_context *ctx, struct hash_table_u64 *symbols,
|
|||
}
|
||||
|
||||
static void
|
||||
print_cs_binary(struct pandecode_context *ctx, mali_ptr bin,
|
||||
print_cs_binary(struct pandecode_context *ctx, uint64_t bin,
|
||||
struct cs_code_cfg *cfg, const char *name)
|
||||
{
|
||||
pandecode_log(ctx, "%s@%" PRIx64 "{\n", name, bin);
|
||||
|
|
@ -1520,7 +1520,7 @@ print_cs_binary(struct pandecode_context *ctx, mali_ptr bin,
|
|||
}
|
||||
|
||||
void
|
||||
GENX(pandecode_cs_binary)(struct pandecode_context *ctx, mali_ptr bin,
|
||||
GENX(pandecode_cs_binary)(struct pandecode_context *ctx, uint64_t bin,
|
||||
uint32_t bin_size, unsigned gpu_id)
|
||||
{
|
||||
if (!bin_size)
|
||||
|
|
@ -1546,7 +1546,7 @@ GENX(pandecode_cs_binary)(struct pandecode_context *ctx, mali_ptr bin,
|
|||
}
|
||||
|
||||
void
|
||||
GENX(pandecode_cs_trace)(struct pandecode_context *ctx, mali_ptr trace,
|
||||
GENX(pandecode_cs_trace)(struct pandecode_context *ctx, uint64_t trace,
|
||||
uint32_t trace_size, unsigned gpu_id)
|
||||
{
|
||||
pandecode_dump_file_open(ctx);
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ pandecode_primitive(struct pandecode_context *ctx, const void *p)
|
|||
|
||||
#if PAN_ARCH <= 7
|
||||
static void
|
||||
pandecode_attributes(struct pandecode_context *ctx, mali_ptr addr, int count,
|
||||
pandecode_attributes(struct pandecode_context *ctx, uint64_t addr, int count,
|
||||
bool varying, enum mali_job_type job_type)
|
||||
{
|
||||
char *prefix = varying ? "Varying" : "Attribute";
|
||||
|
|
@ -106,7 +106,7 @@ pandecode_attributes(struct pandecode_context *ctx, mali_ptr addr, int count,
|
|||
|
||||
static unsigned
|
||||
pandecode_attribute_meta(struct pandecode_context *ctx, int count,
|
||||
mali_ptr attribute, bool varying)
|
||||
uint64_t attribute, bool varying)
|
||||
{
|
||||
unsigned max = 0;
|
||||
|
||||
|
|
@ -170,7 +170,7 @@ pandecode_invocation(struct pandecode_context *ctx, const void *i)
|
|||
}
|
||||
|
||||
static void
|
||||
pandecode_textures(struct pandecode_context *ctx, mali_ptr textures,
|
||||
pandecode_textures(struct pandecode_context *ctx, uint64_t textures,
|
||||
unsigned texture_count)
|
||||
{
|
||||
if (!textures)
|
||||
|
|
@ -186,10 +186,10 @@ pandecode_textures(struct pandecode_context *ctx, mali_ptr textures,
|
|||
for (unsigned tex = 0; tex < texture_count; ++tex)
|
||||
GENX(pandecode_texture)(ctx, cl + pan_size(TEXTURE) * tex, tex);
|
||||
#else
|
||||
mali_ptr *PANDECODE_PTR_VAR(ctx, u, textures);
|
||||
uint64_t *PANDECODE_PTR_VAR(ctx, u, textures);
|
||||
|
||||
for (int tex = 0; tex < texture_count; ++tex) {
|
||||
mali_ptr *PANDECODE_PTR_VAR(ctx, u, textures + tex * sizeof(mali_ptr));
|
||||
uint64_t *PANDECODE_PTR_VAR(ctx, u, textures + tex * sizeof(uint64_t));
|
||||
char *a = pointer_as_memory_reference(ctx, *u);
|
||||
pandecode_log(ctx, "%s,\n", a);
|
||||
free(a);
|
||||
|
|
@ -197,7 +197,7 @@ pandecode_textures(struct pandecode_context *ctx, mali_ptr textures,
|
|||
|
||||
/* Now, finally, descend down into the texture descriptor */
|
||||
for (unsigned tex = 0; tex < texture_count; ++tex) {
|
||||
mali_ptr *PANDECODE_PTR_VAR(ctx, u, textures + tex * sizeof(mali_ptr));
|
||||
uint64_t *PANDECODE_PTR_VAR(ctx, u, textures + tex * sizeof(uint64_t));
|
||||
GENX(pandecode_texture)(ctx, *u, tex);
|
||||
}
|
||||
#endif
|
||||
|
|
@ -206,7 +206,7 @@ pandecode_textures(struct pandecode_context *ctx, mali_ptr textures,
|
|||
}
|
||||
|
||||
static void
|
||||
pandecode_samplers(struct pandecode_context *ctx, mali_ptr samplers,
|
||||
pandecode_samplers(struct pandecode_context *ctx, uint64_t samplers,
|
||||
unsigned sampler_count)
|
||||
{
|
||||
pandecode_log(ctx, "Samplers %" PRIx64 ":\n", samplers);
|
||||
|
|
@ -221,13 +221,13 @@ pandecode_samplers(struct pandecode_context *ctx, mali_ptr samplers,
|
|||
}
|
||||
|
||||
static void
|
||||
pandecode_uniform_buffers(struct pandecode_context *ctx, mali_ptr pubufs,
|
||||
pandecode_uniform_buffers(struct pandecode_context *ctx, uint64_t pubufs,
|
||||
int ubufs_count)
|
||||
{
|
||||
uint64_t *PANDECODE_PTR_VAR(ctx, ubufs, pubufs);
|
||||
|
||||
for (int i = 0; i < ubufs_count; i++) {
|
||||
mali_ptr addr = (ubufs[i] >> 10) << 2;
|
||||
uint64_t addr = (ubufs[i] >> 10) << 2;
|
||||
unsigned size = addr ? (((ubufs[i] & ((1 << 10) - 1)) + 1) * 16) : 0;
|
||||
|
||||
pandecode_validate_buffer(ctx, addr, size);
|
||||
|
|
@ -241,7 +241,7 @@ pandecode_uniform_buffers(struct pandecode_context *ctx, mali_ptr pubufs,
|
|||
}
|
||||
|
||||
static void
|
||||
pandecode_uniforms(struct pandecode_context *ctx, mali_ptr uniforms,
|
||||
pandecode_uniforms(struct pandecode_context *ctx, uint64_t uniforms,
|
||||
unsigned uniform_count)
|
||||
{
|
||||
pandecode_validate_buffer(ctx, uniforms, uniform_count * 16);
|
||||
|
|
@ -319,7 +319,7 @@ GENX(pandecode_dcd)(struct pandecode_context *ctx, const struct MALI_DRAW *p,
|
|||
#endif
|
||||
|
||||
#if PAN_ARCH == 4
|
||||
mali_ptr shader = state.blend_shader & ~0xF;
|
||||
uint64_t shader = state.blend_shader & ~0xF;
|
||||
if (state.multisample_misc.blend_shader && shader)
|
||||
pandecode_shader_disassemble(ctx, shader, gpu_id);
|
||||
#endif
|
||||
|
|
@ -336,7 +336,7 @@ GENX(pandecode_dcd)(struct pandecode_context *ctx, const struct MALI_DRAW *p,
|
|||
void *blend_base = ((void *)cl) + pan_size(RENDERER_STATE);
|
||||
|
||||
for (unsigned i = 0; i < fbd_info.rt_count; i++) {
|
||||
mali_ptr shader =
|
||||
uint64_t shader =
|
||||
GENX(pandecode_blend)(ctx, blend_base, i, state.shader.shader);
|
||||
if (shader & ~0xF)
|
||||
pandecode_shader_disassemble(ctx, shader, gpu_id);
|
||||
|
|
@ -400,7 +400,7 @@ GENX(pandecode_dcd)(struct pandecode_context *ctx, const struct MALI_DRAW *p,
|
|||
static void
|
||||
pandecode_vertex_compute_geometry_job(struct pandecode_context *ctx,
|
||||
const struct MALI_JOB_HEADER *h,
|
||||
mali_ptr job, unsigned gpu_id)
|
||||
uint64_t job, unsigned gpu_id)
|
||||
{
|
||||
struct mali_compute_job_packed *PANDECODE_PTR_VAR(ctx, p, job);
|
||||
pan_section_unpack(p, COMPUTE_JOB, DRAW, draw);
|
||||
|
|
@ -417,7 +417,7 @@ pandecode_vertex_compute_geometry_job(struct pandecode_context *ctx,
|
|||
#endif
|
||||
|
||||
static void
|
||||
pandecode_write_value_job(struct pandecode_context *ctx, mali_ptr job)
|
||||
pandecode_write_value_job(struct pandecode_context *ctx, uint64_t job)
|
||||
{
|
||||
struct mali_write_value_job_packed *PANDECODE_PTR_VAR(ctx, p, job);
|
||||
pan_section_unpack(p, WRITE_VALUE_JOB, PAYLOAD, u);
|
||||
|
|
@ -426,7 +426,7 @@ pandecode_write_value_job(struct pandecode_context *ctx, mali_ptr job)
|
|||
}
|
||||
|
||||
static void
|
||||
pandecode_cache_flush_job(struct pandecode_context *ctx, mali_ptr job)
|
||||
pandecode_cache_flush_job(struct pandecode_context *ctx, uint64_t job)
|
||||
{
|
||||
struct mali_cache_flush_job_packed *PANDECODE_PTR_VAR(ctx, p, job);
|
||||
pan_section_unpack(p, CACHE_FLUSH_JOB, PAYLOAD, u);
|
||||
|
|
@ -436,7 +436,7 @@ pandecode_cache_flush_job(struct pandecode_context *ctx, mali_ptr job)
|
|||
|
||||
static void
|
||||
pandecode_tiler_job(struct pandecode_context *ctx,
|
||||
const struct MALI_JOB_HEADER *h, mali_ptr job,
|
||||
const struct MALI_JOB_HEADER *h, uint64_t job,
|
||||
unsigned gpu_id)
|
||||
{
|
||||
struct mali_tiler_job_packed *PANDECODE_PTR_VAR(ctx, p, job);
|
||||
|
|
@ -473,7 +473,7 @@ pandecode_tiler_job(struct pandecode_context *ctx,
|
|||
}
|
||||
|
||||
static void
|
||||
pandecode_fragment_job(struct pandecode_context *ctx, mali_ptr job,
|
||||
pandecode_fragment_job(struct pandecode_context *ctx, uint64_t job,
|
||||
unsigned gpu_id)
|
||||
{
|
||||
struct mali_fragment_job_packed *PANDECODE_PTR_VAR(ctx, p, job);
|
||||
|
|
@ -510,7 +510,7 @@ pandecode_fragment_job(struct pandecode_context *ctx, mali_ptr job,
|
|||
#if PAN_ARCH == 6 || PAN_ARCH == 7
|
||||
static void
|
||||
pandecode_indexed_vertex_job(struct pandecode_context *ctx,
|
||||
const struct MALI_JOB_HEADER *h, mali_ptr job,
|
||||
const struct MALI_JOB_HEADER *h, uint64_t job,
|
||||
unsigned gpu_id)
|
||||
{
|
||||
struct mali_indexed_vertex_job_packed *PANDECODE_PTR_VAR(ctx, p, job);
|
||||
|
|
@ -544,7 +544,7 @@ pandecode_indexed_vertex_job(struct pandecode_context *ctx,
|
|||
|
||||
#if PAN_ARCH == 9
|
||||
static void
|
||||
pandecode_malloc_vertex_job(struct pandecode_context *ctx, mali_ptr job,
|
||||
pandecode_malloc_vertex_job(struct pandecode_context *ctx, uint64_t job,
|
||||
unsigned gpu_id)
|
||||
{
|
||||
struct mali_malloc_vertex_job_packed *PANDECODE_PTR_VAR(ctx, p, job);
|
||||
|
|
@ -577,7 +577,7 @@ pandecode_malloc_vertex_job(struct pandecode_context *ctx, mali_ptr job,
|
|||
}
|
||||
|
||||
static void
|
||||
pandecode_compute_job(struct pandecode_context *ctx, mali_ptr job,
|
||||
pandecode_compute_job(struct pandecode_context *ctx, uint64_t job,
|
||||
unsigned gpu_id)
|
||||
{
|
||||
struct mali_compute_job_packed *PANDECODE_PTR_VAR(ctx, p, job);
|
||||
|
|
@ -593,7 +593,7 @@ pandecode_compute_job(struct pandecode_context *ctx, mali_ptr job,
|
|||
* GPU using the job manager.
|
||||
*/
|
||||
void
|
||||
GENX(pandecode_jc)(struct pandecode_context *ctx, mali_ptr jc_gpu_va,
|
||||
GENX(pandecode_jc)(struct pandecode_context *ctx, uint64_t jc_gpu_va,
|
||||
unsigned gpu_id)
|
||||
{
|
||||
pandecode_dump_file_open(ctx);
|
||||
|
|
@ -601,7 +601,7 @@ GENX(pandecode_jc)(struct pandecode_context *ctx, mali_ptr jc_gpu_va,
|
|||
struct set *va_set = _mesa_pointer_set_create(NULL);
|
||||
struct set_entry *entry = NULL;
|
||||
|
||||
mali_ptr next_job = 0;
|
||||
uint64_t next_job = 0;
|
||||
|
||||
do {
|
||||
struct mali_job_header_packed *hdr =
|
||||
|
|
@ -674,9 +674,9 @@ GENX(pandecode_jc)(struct pandecode_context *ctx, mali_ptr jc_gpu_va,
|
|||
|
||||
void
|
||||
GENX(pandecode_abort_on_fault)(struct pandecode_context *ctx,
|
||||
mali_ptr jc_gpu_va)
|
||||
uint64_t jc_gpu_va)
|
||||
{
|
||||
mali_ptr next_job = 0;
|
||||
uint64_t next_job = 0;
|
||||
|
||||
do {
|
||||
pan_unpack(PANDECODE_PTR(ctx, jc_gpu_va, struct mali_job_header_packed),
|
||||
|
|
|
|||
|
|
@ -1069,7 +1069,7 @@ GENX(pan_emit_fbd)(const struct pan_fb_info *fb, unsigned layer_idx,
|
|||
|
||||
#if PAN_ARCH <= 9
|
||||
void
|
||||
GENX(pan_emit_fragment_job_payload)(const struct pan_fb_info *fb, mali_ptr fbd,
|
||||
GENX(pan_emit_fragment_job_payload)(const struct pan_fb_info *fb, uint64_t fbd,
|
||||
void *out)
|
||||
{
|
||||
pan_section_pack(out, FRAGMENT_JOB, PAYLOAD, payload) {
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ struct pan_fb_zs_attachment {
|
|||
struct pan_tiler_context {
|
||||
union {
|
||||
struct {
|
||||
mali_ptr desc;
|
||||
uint64_t desc;
|
||||
/* A tiler descriptor can only handle a limited amount of layers.
|
||||
* If the number of layers is bigger than this, several tiler
|
||||
* descriptors will be issued, each with a different layer_offset.
|
||||
|
|
@ -79,7 +79,7 @@ struct pan_tiler_context {
|
|||
uint8_t layer_offset;
|
||||
} valhall;
|
||||
struct {
|
||||
mali_ptr desc;
|
||||
uint64_t desc;
|
||||
} bifrost;
|
||||
struct {
|
||||
/* Sum of vertex counts (for non-indexed draws), index counts, or ~0 if
|
||||
|
|
@ -88,9 +88,9 @@ struct pan_tiler_context {
|
|||
uint32_t vertex_count;
|
||||
bool disable;
|
||||
bool no_hierarchical_tiling;
|
||||
mali_ptr polygon_list;
|
||||
uint64_t polygon_list;
|
||||
struct {
|
||||
mali_ptr start;
|
||||
uint64_t start;
|
||||
unsigned size;
|
||||
} heap;
|
||||
} midgard;
|
||||
|
|
@ -99,13 +99,13 @@ struct pan_tiler_context {
|
|||
|
||||
struct pan_tls_info {
|
||||
struct {
|
||||
mali_ptr ptr;
|
||||
uint64_t ptr;
|
||||
unsigned size;
|
||||
} tls;
|
||||
|
||||
struct {
|
||||
unsigned instances;
|
||||
mali_ptr ptr;
|
||||
uint64_t ptr;
|
||||
unsigned size;
|
||||
} wls;
|
||||
};
|
||||
|
|
@ -131,7 +131,7 @@ struct pan_fb_info {
|
|||
|
||||
struct {
|
||||
unsigned stride;
|
||||
mali_ptr base;
|
||||
uint64_t base;
|
||||
} tile_map;
|
||||
|
||||
union {
|
||||
|
|
@ -144,7 +144,7 @@ struct pan_fb_info {
|
|||
unsigned cbuf_allocation;
|
||||
|
||||
/* Sample position array. */
|
||||
mali_ptr sample_positions;
|
||||
uint64_t sample_positions;
|
||||
|
||||
/* Only used on Valhall */
|
||||
bool sprite_coord_origin;
|
||||
|
|
@ -198,7 +198,7 @@ unsigned GENX(pan_emit_fbd)(const struct pan_fb_info *fb, unsigned layer_idx,
|
|||
|
||||
#if PAN_ARCH <= 9
|
||||
void GENX(pan_emit_fragment_job_payload)(const struct pan_fb_info *fb,
|
||||
mali_ptr fbd, void *out);
|
||||
uint64_t fbd, void *out);
|
||||
#endif
|
||||
|
||||
#endif /* ifdef PAN_ARCH */
|
||||
|
|
|
|||
|
|
@ -229,7 +229,7 @@ panfrost_get_z_internal_format(enum pipe_format fmt)
|
|||
#if PAN_ARCH >= 9
|
||||
static inline void
|
||||
panfrost_make_resource_table(struct panfrost_ptr base, unsigned index,
|
||||
mali_ptr address, unsigned resource_count)
|
||||
uint64_t address, unsigned resource_count)
|
||||
{
|
||||
if (resource_count == 0)
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -38,10 +38,10 @@ struct pan_indirect_dispatch_meta {
|
|||
unsigned gpu_id;
|
||||
|
||||
/* Renderer state descriptor. */
|
||||
mali_ptr rsd;
|
||||
uint64_t rsd;
|
||||
|
||||
/* Thread storage descriptor. */
|
||||
mali_ptr tsd;
|
||||
uint64_t tsd;
|
||||
|
||||
/* Shader binary pool. */
|
||||
struct pan_pool *bin_pool;
|
||||
|
|
@ -54,9 +54,9 @@ struct pan_indirect_dispatch_meta {
|
|||
};
|
||||
|
||||
struct pan_indirect_dispatch_info {
|
||||
mali_ptr job;
|
||||
mali_ptr indirect_dim;
|
||||
mali_ptr num_wg_sysval[3];
|
||||
uint64_t job;
|
||||
uint64_t indirect_dim;
|
||||
uint64_t num_wg_sysval[3];
|
||||
} PACKED;
|
||||
|
||||
static inline void
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@
|
|||
/* Job chain */
|
||||
struct pan_jc {
|
||||
/* The first job in the chain */
|
||||
mali_ptr first_job;
|
||||
uint64_t first_job;
|
||||
|
||||
/* The number of jobs in the chain, essentially */
|
||||
unsigned job_index;
|
||||
|
|
@ -236,7 +236,7 @@ pan_jc_add_job(struct pan_jc *jc, enum mali_job_type type, bool barrier,
|
|||
static inline struct panfrost_ptr
|
||||
pan_jc_initialize_tiler(struct pan_pool *pool,
|
||||
struct pan_jc *jc,
|
||||
mali_ptr polygon_list)
|
||||
uint64_t polygon_list)
|
||||
{
|
||||
struct panfrost_ptr transfer = {0};
|
||||
|
||||
|
|
|
|||
|
|
@ -694,7 +694,7 @@ pan_iview_get_surface(const struct pan_image_view *iview, unsigned level,
|
|||
|
||||
bool is_3d = image->layout.dim == MALI_TEXTURE_DIMENSION_3D;
|
||||
const struct pan_image_slice_layout *slice = &image->layout.slices[level];
|
||||
mali_ptr base = image->data.base + image->data.offset;
|
||||
uint64_t base = image->data.base + image->data.offset;
|
||||
|
||||
if (drm_is_afbc(image->layout.modifier)) {
|
||||
assert(!sample);
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ struct panfrost_ptr {
|
|||
void *cpu;
|
||||
|
||||
/* GPU address */
|
||||
mali_ptr gpu;
|
||||
uint64_t gpu;
|
||||
};
|
||||
|
||||
/* Represents grow-only memory. */
|
||||
|
|
@ -65,7 +65,7 @@ struct panfrost_ptr pan_pool_alloc_aligned(struct pan_pool *pool, size_t sz,
|
|||
return alloc_func(pool, sz, alignment); \
|
||||
}
|
||||
|
||||
static inline mali_ptr
|
||||
static inline uint64_t
|
||||
pan_pool_upload_aligned(struct pan_pool *pool, const void *data, size_t sz,
|
||||
unsigned alignment)
|
||||
{
|
||||
|
|
@ -77,7 +77,7 @@ pan_pool_upload_aligned(struct pan_pool *pool, const void *data, size_t sz,
|
|||
return transfer.gpu;
|
||||
}
|
||||
|
||||
static inline mali_ptr
|
||||
static inline uint64_t
|
||||
pan_pool_upload(struct pan_pool *pool, const void *data, size_t sz)
|
||||
{
|
||||
return pan_pool_upload_aligned(pool, data, sz, sz);
|
||||
|
|
|
|||
|
|
@ -241,7 +241,7 @@ pan_shader_prepare_bifrost_rsd(const struct pan_shader_info *info,
|
|||
|
||||
static inline void
|
||||
pan_shader_prepare_rsd(const struct pan_shader_info *shader_info,
|
||||
mali_ptr shader_ptr, struct MALI_RENDERER_STATE *rsd)
|
||||
uint64_t shader_ptr, struct MALI_RENDERER_STATE *rsd)
|
||||
{
|
||||
#if PAN_ARCH <= 5
|
||||
shader_ptr |= shader_info->midgard.first_tag;
|
||||
|
|
|
|||
|
|
@ -192,9 +192,9 @@ panfrost_get_surface_strides(const struct pan_image_layout *layout, unsigned l,
|
|||
}
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panfrost_get_surface_pointer(const struct pan_image_layout *layout,
|
||||
enum mali_texture_dimension dim, mali_ptr base,
|
||||
enum mali_texture_dimension dim, uint64_t base,
|
||||
unsigned l, unsigned i, unsigned s)
|
||||
{
|
||||
unsigned offset;
|
||||
|
|
@ -211,7 +211,7 @@ panfrost_get_surface_pointer(const struct pan_image_layout *layout,
|
|||
}
|
||||
|
||||
struct pan_image_section_info {
|
||||
mali_ptr pointer;
|
||||
uint64_t pointer;
|
||||
int32_t row_stride;
|
||||
int32_t surface_stride;
|
||||
};
|
||||
|
|
@ -223,7 +223,7 @@ get_image_section_info(const struct pan_image_view *iview,
|
|||
{
|
||||
const struct util_format_description *desc =
|
||||
util_format_description(iview->format);
|
||||
mali_ptr base = plane->data.base + plane->data.offset;
|
||||
uint64_t base = plane->data.base + plane->data.offset;
|
||||
struct pan_image_section_info info = {0};
|
||||
|
||||
if (iview->buf.size) {
|
||||
|
|
@ -416,7 +416,7 @@ panfrost_emit_plane(const struct pan_image_view *iview,
|
|||
const struct pan_image_layout *layout = &plane->layout;
|
||||
int32_t row_stride = sections[plane_index].row_stride;
|
||||
int32_t surface_stride = sections[plane_index].surface_stride;
|
||||
mali_ptr pointer = sections[plane_index].pointer;
|
||||
uint64_t pointer = sections[plane_index].pointer;
|
||||
|
||||
assert(row_stride >= 0 && surface_stride >= 0 && "negative stride");
|
||||
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ struct pan_image_layout {
|
|||
};
|
||||
|
||||
struct pan_image_mem {
|
||||
mali_ptr base;
|
||||
uint64_t base;
|
||||
unsigned offset;
|
||||
};
|
||||
|
||||
|
|
@ -451,10 +451,10 @@ unsigned panfrost_from_legacy_stride(unsigned legacy_stride,
|
|||
|
||||
struct pan_surface {
|
||||
union {
|
||||
mali_ptr data;
|
||||
uint64_t data;
|
||||
struct {
|
||||
mali_ptr header;
|
||||
mali_ptr body;
|
||||
uint64_t header;
|
||||
uint64_t body;
|
||||
} afbc;
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -62,13 +62,13 @@ void pandecode_jc(struct pandecode_context *ctx, uint64_t jc_gpu_va,
|
|||
unsigned gpu_id);
|
||||
|
||||
void pandecode_interpret_cs(struct pandecode_context *ctx,
|
||||
mali_ptr queue_gpu_va, uint32_t size,
|
||||
uint64_t queue_gpu_va, uint32_t size,
|
||||
unsigned gpu_id, uint32_t *regs);
|
||||
|
||||
void pandecode_cs_binary(struct pandecode_context *ctx, mali_ptr binary_gpu_va,
|
||||
void pandecode_cs_binary(struct pandecode_context *ctx, uint64_t binary_gpu_va,
|
||||
uint32_t size, unsigned gpu_id);
|
||||
|
||||
void pandecode_cs_trace(struct pandecode_context *ctx, mali_ptr trace_gpu_va,
|
||||
void pandecode_cs_trace(struct pandecode_context *ctx, uint64_t trace_gpu_va,
|
||||
uint32_t size, unsigned gpu_id);
|
||||
|
||||
void pandecode_abort_on_fault(struct pandecode_context *ctx, uint64_t jc_gpu_va,
|
||||
|
|
|
|||
|
|
@ -21,11 +21,11 @@
|
|||
#include "panvk_shader.h"
|
||||
|
||||
struct pan_nir_desc_copy_info {
|
||||
mali_ptr sets[MAX_SETS];
|
||||
mali_ptr tables[PANVK_BIFROST_DESC_TABLE_COUNT];
|
||||
mali_ptr img_attrib_table;
|
||||
uint64_t sets[MAX_SETS];
|
||||
uint64_t tables[PANVK_BIFROST_DESC_TABLE_COUNT];
|
||||
uint64_t img_attrib_table;
|
||||
struct {
|
||||
mali_ptr table;
|
||||
uint64_t table;
|
||||
uint32_t limits[PANVK_BIFROST_DESC_TABLE_COUNT];
|
||||
uint32_t attrib_buf_idx_offset;
|
||||
} desc_copy;
|
||||
|
|
@ -265,7 +265,7 @@ single_desc_copy(nir_builder *b, nir_def *desc_copy_idx)
|
|||
nir_pop_if(b, NULL);
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
static uint64_t
|
||||
panvk_meta_desc_copy_rsd(struct panvk_device *dev)
|
||||
{
|
||||
struct panvk_physical_device *phys_dev =
|
||||
|
|
@ -344,7 +344,7 @@ panvk_per_arch(meta_get_copy_desc_job)(
|
|||
if (!shader)
|
||||
return VK_SUCCESS;
|
||||
|
||||
mali_ptr copy_table = panvk_priv_mem_dev_addr(shader->desc_info.others.map);
|
||||
uint64_t copy_table = panvk_priv_mem_dev_addr(shader->desc_info.others.map);
|
||||
if (!copy_table)
|
||||
return VK_SUCCESS;
|
||||
|
||||
|
|
@ -381,7 +381,7 @@ panvk_per_arch(meta_get_copy_desc_job)(
|
|||
copy_info.tables[i] = shader_desc_state->tables[i];
|
||||
}
|
||||
|
||||
mali_ptr desc_copy_rsd = panvk_meta_desc_copy_rsd(dev);
|
||||
uint64_t desc_copy_rsd = panvk_meta_desc_copy_rsd(dev);
|
||||
if (!desc_copy_rsd)
|
||||
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
||||
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ get_fbd_size(bool has_zs_ext, uint32_t rt_count)
|
|||
offsetof(struct panvk_cs_subqueue_context, tiler_oom_ctx._name)
|
||||
#define TILER_OOM_CTX_FBDPTR_OFFSET(_pass) \
|
||||
(TILER_OOM_CTX_FIELD_OFFSET(fbds) + \
|
||||
(PANVK_IR_##_pass##_PASS * sizeof(mali_ptr)))
|
||||
(PANVK_IR_##_pass##_PASS * sizeof(uint64_t)))
|
||||
|
||||
struct panvk_cs_subqueue_context {
|
||||
uint64_t syncobjs;
|
||||
|
|
@ -96,10 +96,10 @@ struct panvk_cs_subqueue_context {
|
|||
} render;
|
||||
struct {
|
||||
uint32_t counter;
|
||||
mali_ptr fbds[PANVK_IR_PASS_COUNT];
|
||||
uint64_t fbds[PANVK_IR_PASS_COUNT];
|
||||
uint32_t td_count;
|
||||
uint32_t layer_count;
|
||||
mali_ptr reg_dump_addr;
|
||||
uint64_t reg_dump_addr;
|
||||
} tiler_oom_ctx;
|
||||
struct {
|
||||
uint64_t syncobjs;
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ struct panvk_tiler_heap {
|
|||
struct panvk_priv_mem desc;
|
||||
struct {
|
||||
uint32_t handle;
|
||||
mali_ptr dev_addr;
|
||||
uint64_t dev_addr;
|
||||
} context;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -260,7 +260,7 @@ cmd_dispatch(struct panvk_cmd_buffer *cmdbuf, struct panvk_dispatch_info *info)
|
|||
cs_move64_to(b, cs_sr_reg64(b, 0), cs_desc_state->res_table);
|
||||
|
||||
if (compute_state_dirty(cmdbuf, PUSH_UNIFORMS)) {
|
||||
mali_ptr fau_ptr = cmdbuf->state.compute.push_uniforms |
|
||||
uint64_t fau_ptr = cmdbuf->state.compute.push_uniforms |
|
||||
((uint64_t)shader->fau.total_count << 56);
|
||||
cs_move64_to(b, cs_sr_reg64(b, 8), fau_ptr);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1219,7 +1219,7 @@ prepare_push_uniforms(struct panvk_cmd_buffer *cmdbuf)
|
|||
}
|
||||
|
||||
if (fs_user_dirty(cmdbuf) || gfx_state_dirty(cmdbuf, FS_PUSH_UNIFORMS)) {
|
||||
mali_ptr fau_ptr = 0;
|
||||
uint64_t fau_ptr = 0;
|
||||
|
||||
if (fs) {
|
||||
result = panvk_per_arch(cmd_prepare_push_uniforms)(cmdbuf, fs);
|
||||
|
|
@ -2202,7 +2202,7 @@ issue_fragment_jobs(struct panvk_cmd_buffer *cmdbuf)
|
|||
struct cs_index addr_reg = cs_scratch_reg64(b, 0);
|
||||
struct cs_index length_reg = cs_scratch_reg32(b, 2);
|
||||
uint32_t handler_idx = calc_tiler_oom_handler_idx(cmdbuf);
|
||||
mali_ptr handler_addr = dev->tiler_oom.handlers_bo->addr.dev +
|
||||
uint64_t handler_addr = dev->tiler_oom.handlers_bo->addr.dev +
|
||||
handler_idx * dev->tiler_oom.handler_stride;
|
||||
cs_move64_to(b, addr_reg, handler_addr);
|
||||
cs_move32_to(b, length_reg, dev->tiler_oom.handler_stride);
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ panvk_cmd_begin_occlusion_query(struct panvk_cmd_buffer *cmd,
|
|||
struct panvk_query_pool *pool, uint32_t query,
|
||||
VkQueryControlFlags flags)
|
||||
{
|
||||
mali_ptr report_addr = panvk_query_report_dev_addr(pool, query);
|
||||
uint64_t report_addr = panvk_query_report_dev_addr(pool, query);
|
||||
|
||||
cmd->state.gfx.occlusion_query.ptr = report_addr;
|
||||
cmd->state.gfx.occlusion_query.mode = flags & VK_QUERY_CONTROL_PRECISE_BIT
|
||||
|
|
@ -133,7 +133,7 @@ static void
|
|||
panvk_copy_occlusion_query_results(struct panvk_cmd_buffer *cmd,
|
||||
struct panvk_query_pool *pool,
|
||||
uint32_t first_query, uint32_t query_count,
|
||||
mali_ptr dst_buffer_addr,
|
||||
uint64_t dst_buffer_addr,
|
||||
VkDeviceSize stride,
|
||||
VkQueryResultFlags flags)
|
||||
{
|
||||
|
|
@ -294,7 +294,7 @@ panvk_per_arch(CmdCopyQueryPoolResults)(
|
|||
VK_FROM_HANDLE(panvk_query_pool, pool, queryPool);
|
||||
VK_FROM_HANDLE(panvk_buffer, dst_buffer, dstBuffer);
|
||||
|
||||
mali_ptr dst_buffer_addr = panvk_buffer_gpu_ptr(dst_buffer, dstOffset);
|
||||
uint64_t dst_buffer_addr = panvk_buffer_gpu_ptr(dst_buffer, dstOffset);
|
||||
|
||||
switch (pool->vk.query_type) {
|
||||
case VK_QUERY_TYPE_OCCLUSION: {
|
||||
|
|
|
|||
|
|
@ -1154,7 +1154,7 @@ panvk_queue_submit_process_debug(const struct panvk_queue_submit *submit)
|
|||
ctx->render.desc_ringbuf.pos <= queue->render_desc_ringbuf.size ||
|
||||
!"OOB access on the desc tracebuf, pass a bigger PANVK_DESC_TRACEBUF_SIZE");
|
||||
|
||||
mali_ptr trace = queue->subqueues[i].tracebuf.addr.dev;
|
||||
uint64_t trace = queue->subqueues[i].tracebuf.addr.dev;
|
||||
|
||||
pandecode_user_msg(decode_ctx, "\nCS traces on subqueue %d\n\n", i);
|
||||
pandecode_cs_trace(decode_ctx, trace, trace_size, props->gpu_prod_id);
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@
|
|||
#include "panvk_priv_bo.h"
|
||||
|
||||
static void
|
||||
cmd_write_timestamp(struct cs_builder *b, mali_ptr addr)
|
||||
cmd_write_timestamp(struct cs_builder *b, uint64_t addr)
|
||||
{
|
||||
const struct cs_index addr_reg = cs_scratch_reg64(b, 0);
|
||||
/* abuse DEFERRED_SYNC */
|
||||
|
|
@ -23,7 +23,7 @@ cmd_write_timestamp(struct cs_builder *b, mali_ptr addr)
|
|||
}
|
||||
|
||||
static void
|
||||
cmd_copy_data(struct cs_builder *b, mali_ptr dst_addr, mali_ptr src_addr,
|
||||
cmd_copy_data(struct cs_builder *b, uint64_t dst_addr, uint64_t src_addr,
|
||||
uint32_t size)
|
||||
{
|
||||
assert((dst_addr | src_addr | size) % sizeof(uint32_t) == 0);
|
||||
|
|
@ -80,7 +80,7 @@ panvk_utrace_record_ts(struct u_trace *ut, void *cs, void *timestamps,
|
|||
{
|
||||
struct cs_builder *b = get_builder(cs, ut);
|
||||
const struct panvk_priv_bo *bo = timestamps;
|
||||
const mali_ptr addr = bo->addr.dev + offset_B;
|
||||
const uint64_t addr = bo->addr.dev + offset_B;
|
||||
|
||||
cmd_write_timestamp(b, addr);
|
||||
}
|
||||
|
|
@ -109,8 +109,8 @@ panvk_per_arch(utrace_copy_buffer)(struct u_trace_context *utctx,
|
|||
struct cs_builder *b = cmdstream;
|
||||
const struct panvk_priv_bo *src_bo = ts_from;
|
||||
const struct panvk_priv_bo *dst_bo = ts_to;
|
||||
const mali_ptr src_addr = src_bo->addr.dev + from_offset;
|
||||
const mali_ptr dst_addr = dst_bo->addr.dev + to_offset;
|
||||
const uint64_t src_addr = src_bo->addr.dev + from_offset;
|
||||
const uint64_t dst_addr = dst_bo->addr.dev + to_offset;
|
||||
|
||||
cmd_copy_data(b, dst_addr, src_addr, size_B);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@
|
|||
#include "vk_format.h"
|
||||
|
||||
static VkResult
|
||||
panvk_cmd_prepare_fragment_job(struct panvk_cmd_buffer *cmdbuf, mali_ptr fbd)
|
||||
panvk_cmd_prepare_fragment_job(struct panvk_cmd_buffer *cmdbuf, uint64_t fbd)
|
||||
{
|
||||
const struct pan_fb_info *fbinfo = &cmdbuf->state.gfx.render.fb.info;
|
||||
struct panvk_batch *batch = cmdbuf->cur_batch;
|
||||
|
|
@ -150,7 +150,7 @@ panvk_per_arch(cmd_close_batch)(struct panvk_cmd_buffer *cmdbuf)
|
|||
for (uint32_t i = 0; i < batch->fb.layer_count; i++) {
|
||||
VkResult result;
|
||||
|
||||
mali_ptr fbd = batch->fb.desc.gpu + (batch->fb.desc_stride * i);
|
||||
uint64_t fbd = batch->fb.desc.gpu + (batch->fb.desc_stride * i);
|
||||
|
||||
result = panvk_per_arch(cmd_prepare_tiler_context)(cmdbuf, i);
|
||||
if (result != VK_SUCCESS)
|
||||
|
|
@ -228,7 +228,7 @@ panvk_per_arch(cmd_prepare_tiler_context)(struct panvk_cmd_buffer *cmdbuf,
|
|||
struct panvk_physical_device *phys_dev =
|
||||
to_panvk_physical_device(cmdbuf->vk.base.device->physical);
|
||||
struct panvk_batch *batch = cmdbuf->cur_batch;
|
||||
mali_ptr tiler_desc;
|
||||
uint64_t tiler_desc;
|
||||
|
||||
if (batch->tiler.ctx_descs.gpu) {
|
||||
tiler_desc =
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ panvk_per_arch(CmdDispatchBase)(VkCommandBuffer commandBuffer,
|
|||
&cmdbuf->state.compute.cs.desc;
|
||||
|
||||
panvk_per_arch(cmd_alloc_tls_desc)(cmdbuf, false);
|
||||
mali_ptr tsd = batch->tls.gpu;
|
||||
uint64_t tsd = batch->tls.gpu;
|
||||
|
||||
result = panvk_per_arch(cmd_prepare_push_descs)(
|
||||
cmdbuf, desc_state, shader->desc_info.used_set_mask);
|
||||
|
|
|
|||
|
|
@ -42,25 +42,25 @@ struct panvk_draw_data {
|
|||
unsigned padded_vertex_count;
|
||||
struct mali_invocation_packed invocation;
|
||||
struct {
|
||||
mali_ptr varyings;
|
||||
mali_ptr attributes;
|
||||
mali_ptr attribute_bufs;
|
||||
uint64_t varyings;
|
||||
uint64_t attributes;
|
||||
uint64_t attribute_bufs;
|
||||
} vs;
|
||||
struct {
|
||||
mali_ptr rsd;
|
||||
mali_ptr varyings;
|
||||
uint64_t rsd;
|
||||
uint64_t varyings;
|
||||
} fs;
|
||||
mali_ptr varying_bufs;
|
||||
mali_ptr position;
|
||||
mali_ptr indices;
|
||||
uint64_t varying_bufs;
|
||||
uint64_t position;
|
||||
uint64_t indices;
|
||||
union {
|
||||
mali_ptr psiz;
|
||||
uint64_t psiz;
|
||||
float line_width;
|
||||
};
|
||||
mali_ptr tls;
|
||||
mali_ptr fb;
|
||||
uint64_t tls;
|
||||
uint64_t fb;
|
||||
const struct pan_tiler_context *tiler_ctx;
|
||||
mali_ptr viewport;
|
||||
uint64_t viewport;
|
||||
struct {
|
||||
struct panfrost_ptr vertex_copy_desc;
|
||||
struct panfrost_ptr frag_copy_desc;
|
||||
|
|
@ -236,7 +236,7 @@ panvk_draw_prepare_fs_rsd(struct panvk_cmd_buffer *cmdbuf,
|
|||
struct mali_blend_packed *bds = ptr.cpu + pan_size(RENDERER_STATE);
|
||||
struct panvk_blend_info *binfo = &cmdbuf->state.gfx.cb.info;
|
||||
|
||||
mali_ptr fs_code = panvk_shader_get_dev_addr(fs);
|
||||
uint64_t fs_code = panvk_shader_get_dev_addr(fs);
|
||||
|
||||
if (fs_info != NULL) {
|
||||
panvk_per_arch(blend_emit_descs)(cmdbuf, bds);
|
||||
|
|
@ -416,11 +416,11 @@ panvk_draw_prepare_varyings(struct panvk_cmd_buffer *cmdbuf,
|
|||
ia->primitive_topology == VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
|
||||
unsigned vertex_count =
|
||||
draw->padded_vertex_count * draw->info.instance.count;
|
||||
mali_ptr psiz_buf = 0;
|
||||
uint64_t psiz_buf = 0;
|
||||
|
||||
for (unsigned i = 0; i < PANVK_VARY_BUF_MAX; i++) {
|
||||
unsigned buf_size = vertex_count * link->buf_strides[i];
|
||||
mali_ptr buf_addr =
|
||||
uint64_t buf_addr =
|
||||
buf_size ? panvk_cmd_alloc_dev_mem(cmdbuf, varying, buf_size, 64).gpu
|
||||
: 0;
|
||||
if (buf_size && !buf_addr)
|
||||
|
|
@ -462,7 +462,7 @@ panvk_draw_emit_attrib_buf(const struct panvk_draw_data *draw,
|
|||
const struct vk_vertex_binding_state *buf_info,
|
||||
const struct panvk_attrib_buf *buf, void *desc)
|
||||
{
|
||||
mali_ptr addr = buf->address & ~63ULL;
|
||||
uint64_t addr = buf->address & ~63ULL;
|
||||
unsigned size = buf->size + (buf->address & 63);
|
||||
unsigned divisor = draw->padded_vertex_count * buf_info->divisor;
|
||||
bool per_instance = buf_info->input_rate == VK_VERTEX_INPUT_RATE_INSTANCE;
|
||||
|
|
|
|||
|
|
@ -297,7 +297,7 @@ panvk_per_arch(CmdBeginQueryIndexedEXT)(VkCommandBuffer commandBuffer,
|
|||
|
||||
bool had_batch;
|
||||
struct panvk_batch *batch = open_batch(cmd, &had_batch);
|
||||
mali_ptr report_addr = panvk_query_report_dev_addr(pool, query);
|
||||
uint64_t report_addr = panvk_query_report_dev_addr(pool, query);
|
||||
|
||||
switch (pool->vk.query_type) {
|
||||
case VK_QUERY_TYPE_OCCLUSION: {
|
||||
|
|
|
|||
|
|
@ -29,15 +29,15 @@ struct panvk_cmd_buffer;
|
|||
|
||||
struct panvk_shader_desc_state {
|
||||
#if PAN_ARCH <= 7
|
||||
mali_ptr tables[PANVK_BIFROST_DESC_TABLE_COUNT];
|
||||
mali_ptr img_attrib_table;
|
||||
mali_ptr dyn_ssbos;
|
||||
uint64_t tables[PANVK_BIFROST_DESC_TABLE_COUNT];
|
||||
uint64_t img_attrib_table;
|
||||
uint64_t dyn_ssbos;
|
||||
#else
|
||||
struct {
|
||||
mali_ptr dev_addr;
|
||||
uint64_t dev_addr;
|
||||
uint32_t size;
|
||||
} driver_set;
|
||||
mali_ptr res_table;
|
||||
uint64_t res_table;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ struct panvk_cmd_compute_state {
|
|||
struct panvk_descriptor_state desc_state;
|
||||
const struct panvk_shader *shader;
|
||||
struct panvk_compute_sysvals sysvals;
|
||||
mali_ptr push_uniforms;
|
||||
uint64_t push_uniforms;
|
||||
struct {
|
||||
struct panvk_shader_desc_state desc;
|
||||
} cs;
|
||||
|
|
@ -67,7 +67,7 @@ struct panvk_dispatch_info {
|
|||
} direct;
|
||||
|
||||
struct {
|
||||
mali_ptr buffer_dev_addr;
|
||||
uint64_t buffer_dev_addr;
|
||||
} indirect;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@
|
|||
struct panvk_cmd_buffer;
|
||||
|
||||
struct panvk_attrib_buf {
|
||||
mali_ptr address;
|
||||
uint64_t address;
|
||||
unsigned size;
|
||||
};
|
||||
|
||||
|
|
@ -71,7 +71,7 @@ struct panvk_rendering_state {
|
|||
|
||||
#if PAN_ARCH >= 10
|
||||
struct panfrost_ptr fbds;
|
||||
mali_ptr tiler;
|
||||
uint64_t tiler;
|
||||
|
||||
/* When a secondary command buffer has to flush draws, it disturbs the
|
||||
* inherited context, and the primary command buffer needs to know. */
|
||||
|
|
@ -110,20 +110,20 @@ struct panvk_cmd_graphics_state {
|
|||
struct {
|
||||
const struct panvk_shader *shader;
|
||||
struct panvk_shader_desc_state desc;
|
||||
mali_ptr push_uniforms;
|
||||
uint64_t push_uniforms;
|
||||
bool required;
|
||||
#if PAN_ARCH <= 7
|
||||
mali_ptr rsd;
|
||||
uint64_t rsd;
|
||||
#endif
|
||||
} fs;
|
||||
|
||||
struct {
|
||||
const struct panvk_shader *shader;
|
||||
struct panvk_shader_desc_state desc;
|
||||
mali_ptr push_uniforms;
|
||||
uint64_t push_uniforms;
|
||||
#if PAN_ARCH <= 7
|
||||
mali_ptr attribs;
|
||||
mali_ptr attrib_bufs;
|
||||
uint64_t attribs;
|
||||
uint64_t attrib_bufs;
|
||||
#endif
|
||||
} vs;
|
||||
|
||||
|
|
@ -146,11 +146,11 @@ struct panvk_cmd_graphics_state {
|
|||
struct panvk_rendering_state render;
|
||||
|
||||
#if PAN_ARCH <= 7
|
||||
mali_ptr vpd;
|
||||
uint64_t vpd;
|
||||
#endif
|
||||
|
||||
#if PAN_ARCH >= 10
|
||||
mali_ptr tsd;
|
||||
uint64_t tsd;
|
||||
#endif
|
||||
|
||||
BITSET_DECLARE(dirty, PANVK_CMD_GRAPHICS_DIRTY_STATE_COUNT);
|
||||
|
|
@ -336,7 +336,7 @@ struct panvk_draw_info {
|
|||
} instance;
|
||||
|
||||
struct {
|
||||
mali_ptr buffer_dev_addr;
|
||||
uint64_t buffer_dev_addr;
|
||||
uint32_t draw_count;
|
||||
uint32_t stride;
|
||||
} indirect;
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ struct panvk_cmd_meta_compute_save_ctx {
|
|||
const struct panvk_descriptor_set *set0;
|
||||
struct {
|
||||
struct panvk_opaque_desc desc_storage[MAX_PUSH_DESCS];
|
||||
mali_ptr descs_dev_addr;
|
||||
uint64_t descs_dev_addr;
|
||||
uint32_t desc_count;
|
||||
} push_set0;
|
||||
struct panvk_push_constant_state push_constants;
|
||||
|
|
@ -44,7 +44,7 @@ struct panvk_cmd_meta_graphics_save_ctx {
|
|||
const struct panvk_descriptor_set *set0;
|
||||
struct {
|
||||
struct panvk_opaque_desc desc_storage[MAX_PUSH_DESCS];
|
||||
mali_ptr descs_dev_addr;
|
||||
uint64_t descs_dev_addr;
|
||||
uint32_t desc_count;
|
||||
} push_set0;
|
||||
struct panvk_push_constant_state push_constants;
|
||||
|
|
|
|||
|
|
@ -11,10 +11,9 @@
|
|||
#endif
|
||||
|
||||
#include "genxml/gen_macros.h"
|
||||
#include "panfrost-job.h"
|
||||
|
||||
struct panvk_occlusion_query_state {
|
||||
mali_ptr ptr;
|
||||
uint64_t ptr;
|
||||
enum mali_occlusion_mode mode;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -496,7 +496,7 @@ panvk_GetDeviceImageSparseMemoryRequirements(VkDevice device,
|
|||
|
||||
static void
|
||||
panvk_image_plane_bind(struct pan_image *plane, struct pan_kmod_bo *bo,
|
||||
mali_ptr base, uint64_t offset)
|
||||
uint64_t base, uint64_t offset)
|
||||
{
|
||||
plane->data.base = base;
|
||||
plane->data.offset = offset;
|
||||
|
|
|
|||
|
|
@ -11,8 +11,6 @@
|
|||
#include "util/list.h"
|
||||
#include "util/u_atomic.h"
|
||||
|
||||
#include "panfrost-job.h"
|
||||
|
||||
struct panvk_kmod_bo;
|
||||
|
||||
/* Used for internal object allocation. */
|
||||
|
|
@ -22,7 +20,7 @@ struct panvk_priv_bo {
|
|||
struct panvk_device *dev;
|
||||
struct pan_kmod_bo *bo;
|
||||
struct {
|
||||
mali_ptr dev;
|
||||
uint64_t dev;
|
||||
void *host;
|
||||
} addr;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -284,7 +284,7 @@ struct panvk_shader {
|
|||
const char *asm_str;
|
||||
};
|
||||
|
||||
static inline mali_ptr
|
||||
static inline uint64_t
|
||||
panvk_shader_get_dev_addr(const struct panvk_shader *shader)
|
||||
{
|
||||
return shader != NULL ? panvk_priv_mem_dev_addr(shader->code_mem) : 0;
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ static VkResult
|
|||
get_blend_shader(struct panvk_device *dev,
|
||||
const struct pan_blend_state *state,
|
||||
nir_alu_type src0_type, nir_alu_type src1_type,
|
||||
unsigned rt, mali_ptr *shader_addr)
|
||||
unsigned rt, uint64_t *shader_addr)
|
||||
{
|
||||
struct panvk_physical_device *pdev =
|
||||
to_panvk_physical_device(dev->vk.physical);
|
||||
|
|
@ -124,9 +124,9 @@ out:
|
|||
}
|
||||
|
||||
static void
|
||||
emit_blend_desc(const struct pan_shader_info *fs_info, mali_ptr fs_code,
|
||||
emit_blend_desc(const struct pan_shader_info *fs_info, uint64_t fs_code,
|
||||
const struct pan_blend_state *state, unsigned rt_idx,
|
||||
mali_ptr blend_shader, uint16_t constant,
|
||||
uint64_t blend_shader, uint16_t constant,
|
||||
struct mali_blend_packed *bd)
|
||||
{
|
||||
const struct pan_blend_rt_state *rt = &state->rts[rt_idx];
|
||||
|
|
@ -305,7 +305,7 @@ panvk_per_arch(blend_emit_descs)(struct panvk_cmd_buffer *cmdbuf,
|
|||
const struct vk_color_blend_state *cb = &dyns->cb;
|
||||
const struct panvk_shader *fs = cmdbuf->state.gfx.fs.shader;
|
||||
const struct pan_shader_info *fs_info = fs ? &fs->info : NULL;
|
||||
mali_ptr fs_code = panvk_shader_get_dev_addr(fs);
|
||||
uint64_t fs_code = panvk_shader_get_dev_addr(fs);
|
||||
const struct panvk_rendering_state *render = &cmdbuf->state.gfx.render;
|
||||
const VkFormat *color_attachment_formats = render->color_attachments.fmts;
|
||||
const uint8_t *color_attachment_samples = render->color_attachments.samples;
|
||||
|
|
@ -323,7 +323,7 @@ panvk_per_arch(blend_emit_descs)(struct panvk_cmd_buffer *cmdbuf,
|
|||
cb->blend_constants[3],
|
||||
},
|
||||
};
|
||||
mali_ptr blend_shaders[8] = {};
|
||||
uint64_t blend_shaders[8] = {};
|
||||
/* All bits set to one encodes unused fixed-function blend constant. */
|
||||
unsigned ff_blend_constant = ~0;
|
||||
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ panvk_per_arch(CreateBufferView)(VkDevice _device,
|
|||
|
||||
enum pipe_format pfmt = vk_format_to_pipe_format(view->vk.format);
|
||||
|
||||
mali_ptr address = panvk_buffer_gpu_ptr(buffer, pCreateInfo->offset);
|
||||
uint64_t address = panvk_buffer_gpu_ptr(buffer, pCreateInfo->offset);
|
||||
VkBufferUsageFlags tex_usage_mask = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
|
||||
|
||||
#if PAN_ARCH >= 9
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ VkResult
|
|||
panvk_per_arch(cmd_prepare_push_uniforms)(struct panvk_cmd_buffer *cmdbuf,
|
||||
const struct panvk_shader *shader)
|
||||
{
|
||||
mali_ptr *push_ptr;
|
||||
uint64_t *push_ptr;
|
||||
|
||||
switch (shader->vk.stage) {
|
||||
case MESA_SHADER_COMPUTE:
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue