diff --git a/docs/drivers/powervr.rst b/docs/drivers/powervr.rst index c94f97da3cf..51fd642cfde 100644 --- a/docs/drivers/powervr.rst +++ b/docs/drivers/powervr.rst @@ -66,6 +66,37 @@ Hardware documentation can be found at: https://docs.imgtec.com/ Note: GPUs prior to Series6 do not have the hardware capabilities required to support Vulkan and therefore cannot be supported by this driver. +Multi-Architecture support +-------------------------- + +In order to support multiple distinct hardware generations without too much +spaghetti-code, ther PowerVR compiles a few files multiple times (once per +hardware architecture), and uses a system of macros and aliases to be able +to refer to the different versions. + +The files that gets compiled multiple times are those named +:file:`pvr_arch_*.c`. These files contains definitions of functions prefixed +with ``pvr_arch_`` (instead of the normal ``pvr_``-prefix). The ``arch``-bit +of that function is a placeholder, which gets replaced at compile-time, thanks +to a system of defines in the corresponding header file, supported by a set +of macros defined in :file:`pvr_macros.h`. + +The intention is that these functions are mostly called from architecture +specific entrypoints, that are handled by the common vulkan dispatch-table +code. This means that a architecture specific function can easily call either +architecture specific or architecture agnostic code. + +The tricky bit comes when architecture agnostic calls architecture specific +code. In that case, we have the ``PVR_ARCH_DISPATCH`` and +``PVR_ARCH_DISPATCH_RET`` macros. These are a bit error-prone to use, because +they need to see definition for all architecture versions of each entrypoint, +which isn't something we have available. To work around this, we define a +``PER_ARCH_FUNCS(arch)`` macro in each source-file that needs to use these +dispatch macros, and make sure to instantiate it once per architecture. + +To avoid confusion, please do not add functions that are prefixed with +``pvr_arch_`` if they are not part of the system described here. + Chat ---- diff --git a/src/imagination/vulkan/pvr_arch_border.c b/src/imagination/vulkan/pvr_arch_border.c index 6416d1bf24a..b45e5094019 100644 --- a/src/imagination/vulkan/pvr_arch_border.c +++ b/src/imagination/vulkan/pvr_arch_border.c @@ -415,7 +415,7 @@ static inline void pvr_border_color_swizzle_to_tex_format( *color = swizzled_color; } -VkResult PVR_PER_ARCH(border_color_table_init)(struct pvr_device *const device) +VkResult pvr_arch_border_color_table_init(struct pvr_device *const device) { struct pvr_border_color_table *table = device->border_color_table = vk_zalloc(&device->vk.alloc, @@ -470,7 +470,7 @@ err_out: return result; } -void PVR_PER_ARCH(border_color_table_finish)(struct pvr_device *const device) +void pvr_arch_border_color_table_finish(struct pvr_device *const device) { #if MESA_DEBUG BITSET_SET_RANGE_INSIDE_WORD(device->border_color_table->unused_entries, @@ -496,7 +496,7 @@ static inline void pvr_border_color_table_set_custom_entry( struct pvr_border_color_table_entry *const entry = &entries[index]; const enum pipe_format format = vk_format_to_pipe_format(vk_format); - uint32_t tex_format = pvr_get_tex_format(vk_format); + uint32_t tex_format = pvr_arch_get_tex_format(vk_format); assert(tex_format != ROGUE_TEXSTATE_FORMAT_INVALID); @@ -527,7 +527,7 @@ static inline void pvr_border_color_table_set_custom_entry( * format relative to the depth-only or stencil-only compoment * associated with this Vulkan format. */ - tex_format = pvr_get_tex_format_aspect(vk_format, aspect_mask); + tex_format = pvr_arch_get_tex_format_aspect(vk_format, aspect_mask); assert(tex_format != ROGUE_TEXSTATE_FORMAT_INVALID); } @@ -593,7 +593,7 @@ err_out: "Failed to allocate border color table entry"); } -VkResult PVR_PER_ARCH(border_color_table_get_or_create_entry)( +VkResult pvr_arch_border_color_table_get_or_create_entry( struct pvr_device *const device, const struct pvr_sampler *const sampler, struct pvr_border_color_table *const table, @@ -612,7 +612,7 @@ VkResult PVR_PER_ARCH(border_color_table_get_or_create_entry)( index_out); } -void PVR_PER_ARCH(border_color_table_release_entry)( +void pvr_arch_border_color_table_release_entry( struct pvr_border_color_table *const table, const uint32_t index) { diff --git a/src/imagination/vulkan/pvr_arch_cmd_buffer.c b/src/imagination/vulkan/pvr_arch_cmd_buffer.c index fcba5acf972..0377ebbe0d8 100644 --- a/src/imagination/vulkan/pvr_arch_cmd_buffer.c +++ b/src/imagination/vulkan/pvr_arch_cmd_buffer.c @@ -112,7 +112,7 @@ static void pvr_cmd_buffer_clear_values_free(struct pvr_cmd_buffer *cmd_buffer); static void pvr_cmd_buffer_attachments_free(struct pvr_cmd_buffer *cmd_buffer); -struct pvr_renderpass_hwsetup_render *PVR_PER_ARCH(pass_info_get_hw_render)( +struct pvr_renderpass_hwsetup_render *pvr_arch_pass_info_get_hw_render( const struct pvr_render_pass_info *render_pass_info, uint32_t idx) { @@ -424,11 +424,11 @@ pvr_cmd_buffer_emit_ppp_state(const struct pvr_cmd_buffer *const cmd_buffer, return csb->status; } -VkResult PVR_PER_ARCH(cmd_buffer_upload_general)( - struct pvr_cmd_buffer *const cmd_buffer, - const void *const data, - const size_t size, - struct pvr_suballoc_bo **const pvr_bo_out) +VkResult +pvr_arch_cmd_buffer_upload_general(struct pvr_cmd_buffer *const cmd_buffer, + const void *const data, + const size_t size, + struct pvr_suballoc_bo **const pvr_bo_out) { struct pvr_device *const device = cmd_buffer->device; const uint32_t cache_line_size = @@ -480,15 +480,15 @@ pvr_cmd_buffer_upload_usc(struct pvr_cmd_buffer *const cmd_buffer, } VkResult -PVR_PER_ARCH(cmd_buffer_upload_pds)(struct pvr_cmd_buffer *const cmd_buffer, - const uint32_t *data, - uint32_t data_size_dwords, - uint32_t data_alignment, - const uint32_t *code, - uint32_t code_size_dwords, - uint32_t code_alignment, - uint64_t min_alignment, - struct pvr_pds_upload *const pds_upload_out) +pvr_arch_cmd_buffer_upload_pds(struct pvr_cmd_buffer *const cmd_buffer, + const uint32_t *data, + uint32_t data_size_dwords, + uint32_t data_alignment, + const uint32_t *code, + uint32_t code_size_dwords, + uint32_t code_alignment, + uint64_t min_alignment, + struct pvr_pds_upload *const pds_upload_out) { struct pvr_device *const device = cmd_buffer->device; VkResult result; @@ -517,15 +517,15 @@ pvr_cmd_buffer_upload_pds_data(struct pvr_cmd_buffer *const cmd_buffer, uint32_t data_alignment, struct pvr_pds_upload *const pds_upload_out) { - return pvr_cmd_buffer_upload_pds(cmd_buffer, - data, - data_size_dwords, - data_alignment, - NULL, - 0, - 0, - data_alignment, - pds_upload_out); + return pvr_arch_cmd_buffer_upload_pds(cmd_buffer, + data, + data_size_dwords, + data_alignment, + NULL, + 0, + 0, + data_alignment, + pds_upload_out); } /* pbe_cs_words must be an array of length emit_count with @@ -655,7 +655,7 @@ static VkResult pvr_sub_cmd_gfx_build_terminate_ctrl_stream( if (result != VK_SUCCESS) goto err_csb_finish; - result = pvr_csb_emit_terminate(&csb); + result = pvr_arch_csb_emit_terminate(&csb); if (result != VK_SUCCESS) goto err_csb_finish; @@ -705,7 +705,7 @@ static VkResult pvr_setup_texture_state_words( memcpy(&info.swizzle, swizzle, sizeof(info.swizzle)); /* TODO: Can we use image_view->texture_state instead of generating here? */ - result = pvr_pack_tex_state(device, &info, &descriptor->image); + result = pvr_arch_pack_tex_state(device, &info, &descriptor->image); if (result != VK_SUCCESS) return result; @@ -870,10 +870,11 @@ pvr_load_op_constants_create_and_upload(struct pvr_cmd_buffer *cmd_buffer, assert(!(buffer_size % sizeof(uint32_t))); assert(buffer_size / sizeof(uint32_t) == load_op->shareds_count); - result = pvr_cmd_buffer_alloc_mem(cmd_buffer, - cmd_buffer->device->heaps.general_heap, - buffer_size, - &clear_bo); + result = + pvr_arch_cmd_buffer_alloc_mem(cmd_buffer, + cmd_buffer->device->heaps.general_heap, + buffer_size, + &clear_bo); if (result != VK_SUCCESS) return result; @@ -1130,15 +1131,15 @@ static void pvr_setup_pbe_state( swizzle = pvr_get_format_swizzle(iview->vk.format); memcpy(surface_params.swizzle, swizzle, sizeof(surface_params.swizzle)); - pvr_pbe_get_src_format_and_gamma(iview->vk.format, - PVR_PBE_GAMMA_NONE, - with_packed_usc_channel, - &surface_params.source_format, - &surface_params.gamma); + pvr_arch_pbe_get_src_format_and_gamma(iview->vk.format, + PVR_PBE_GAMMA_NONE, + with_packed_usc_channel, + &surface_params.source_format, + &surface_params.gamma); surface_params.is_normalized = pvr_vk_format_is_fully_normalized(iview->vk.format); - surface_params.pbe_packmode = pvr_get_pbe_packmode(iview->vk.format); + surface_params.pbe_packmode = pvr_arch_get_pbe_packmode(iview->vk.format); surface_params.nr_components = vk_format_get_nr_components(iview->vk.format); /* FIXME: Should we have an inline function to return the address of a mip @@ -1215,11 +1216,11 @@ static void pvr_setup_pbe_state( render_params.mrt_index = mrt_index; - pvr_pbe_pack_state(dev_info, - &surface_params, - &render_params, - pbe_cs_words, - pbe_reg_words); + pvr_arch_pbe_pack_state(dev_info, + &surface_params, + &render_params, + pbe_cs_words, + pbe_reg_words); } static struct pvr_render_target * @@ -1367,10 +1368,11 @@ pvr_sub_cmd_gfx_align_ds_subtiles(struct pvr_cmd_buffer *const cmd_buffer, buffer_size = buffer_layer_size * ds->iview->vk.layer_count; - result = pvr_cmd_buffer_alloc_mem(cmd_buffer, - cmd_buffer->device->heaps.general_heap, - buffer_size, - &buffer); + result = + pvr_arch_cmd_buffer_alloc_mem(cmd_buffer, + cmd_buffer->device->heaps.general_heap, + buffer_size, + &buffer); if (result != VK_SUCCESS) return result; @@ -1399,8 +1401,8 @@ pvr_sub_cmd_gfx_align_ds_subtiles(struct pvr_cmd_buffer *const cmd_buffer, cmd_buffer->state.current_sub_cmd = NULL; - result = - pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_TRANSFER); + result = pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, + PVR_SUB_CMD_TYPE_TRANSFER); if (result != VK_SUCCESS) return result; @@ -1417,7 +1419,7 @@ pvr_sub_cmd_gfx_align_ds_subtiles(struct pvr_cmd_buffer *const cmd_buffer, new_sub_cmd->transfer.serialize_with_frag = true; - result = pvr_cmd_buffer_end_sub_cmd(cmd_buffer); + result = pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); if (result != VK_SUCCESS) return result; @@ -1435,8 +1437,8 @@ pvr_sub_cmd_gfx_align_ds_subtiles(struct pvr_cmd_buffer *const cmd_buffer, if (ds->store.d || ds->store.s) { cmd_buffer->state.current_sub_cmd = NULL; - result = - pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_TRANSFER); + result = pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, + PVR_SUB_CMD_TYPE_TRANSFER); if (result != VK_SUCCESS) return result; @@ -1452,7 +1454,7 @@ pvr_sub_cmd_gfx_align_ds_subtiles(struct pvr_cmd_buffer *const cmd_buffer, cmd_buffer->state.current_sub_cmd->transfer.serialize_with_frag = true; - result = pvr_cmd_buffer_end_sub_cmd(cmd_buffer); + result = pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); if (result != VK_SUCCESS) return result; @@ -1596,7 +1598,8 @@ static VkResult pvr_sub_cmd_gfx_job_init(const struct pvr_device_info *dev_info, struct pvr_render_pass_info *render_pass_info = &cmd_buffer->state.render_pass_info; const struct pvr_renderpass_hwsetup_render *hw_render = - pvr_pass_info_get_hw_render(render_pass_info, sub_cmd->hw_render_idx); + pvr_arch_pass_info_get_hw_render(render_pass_info, + sub_cmd->hw_render_idx); struct pvr_render_job *job = &sub_cmd->job; struct pvr_render_state *rstate = render_pass_info->rstate; struct pvr_spm_bgobj_state *spm_bgobj_state = @@ -2238,10 +2241,9 @@ pvr_compute_generate_idfwdf(struct pvr_cmd_buffer *cmd_buffer, /* TODO: This can be pre-packed and uploaded directly. Would that provide any * speed up? */ -void PVR_PER_ARCH(compute_generate_fence)( - struct pvr_cmd_buffer *cmd_buffer, - struct pvr_sub_cmd_compute *const sub_cmd, - bool deallocate_shareds) +void pvr_arch_compute_generate_fence(struct pvr_cmd_buffer *cmd_buffer, + struct pvr_sub_cmd_compute *const sub_cmd, + bool deallocate_shareds) { const struct pvr_pds_upload *program = &cmd_buffer->device->pds_compute_fence_program; @@ -2286,7 +2288,7 @@ pvr_cmd_buffer_process_deferred_clears(struct pvr_cmd_buffer *cmd_buffer) VkResult result; list_del(&transfer_cmd->link); - result = pvr_cmd_buffer_add_transfer_cmd(cmd_buffer, transfer_cmd); + result = pvr_arch_cmd_buffer_add_transfer_cmd(cmd_buffer, transfer_cmd); if (result != VK_SUCCESS) return result; @@ -2341,7 +2343,7 @@ pvr_csb_gfx_build_view_index_ctrl_stream(struct pvr_device *const device, pvr_csb_clear_relocation_mark(&csb); - pvr_csb_emit_link(&csb, addr, false); + pvr_arch_csb_emit_link(&csb, addr, false); } result = pvr_csb_bake(&csb, &bo_list); @@ -2366,7 +2368,7 @@ err_csb_finish: return result; } -VkResult PVR_PER_ARCH(cmd_buffer_end_sub_cmd)(struct pvr_cmd_buffer *cmd_buffer) +VkResult pvr_arch_cmd_buffer_end_sub_cmd(struct pvr_cmd_buffer *cmd_buffer) { struct pvr_cmd_buffer_state *state = &cmd_buffer->state; struct pvr_sub_cmd *sub_cmd = state->current_sub_cmd; @@ -2409,10 +2411,10 @@ VkResult PVR_PER_ARCH(cmd_buffer_end_sub_cmd)(struct pvr_cmd_buffer *cmd_buffer) } else { const void *data = util_dynarray_begin(&state->query_indices); - result = pvr_cmd_buffer_upload_general(cmd_buffer, - data, - query_indices_size, - &query_bo); + result = pvr_arch_cmd_buffer_upload_general(cmd_buffer, + data, + query_indices_size, + &query_bo); if (result != VK_SUCCESS) return pvr_cmd_buffer_set_error_unwarned(cmd_buffer, result); @@ -2427,7 +2429,7 @@ VkResult PVR_PER_ARCH(cmd_buffer_end_sub_cmd)(struct pvr_cmd_buffer *cmd_buffer) } if (secondary_cont) { - result = pvr_csb_emit_return(&gfx_sub_cmd->control_stream); + result = pvr_arch_csb_emit_return(&gfx_sub_cmd->control_stream); if (result != VK_SUCCESS) return pvr_cmd_buffer_set_error_unwarned(cmd_buffer, result); @@ -2451,7 +2453,7 @@ VkResult PVR_PER_ARCH(cmd_buffer_end_sub_cmd)(struct pvr_cmd_buffer *cmd_buffer) if (result != VK_SUCCESS) return pvr_cmd_buffer_set_error_unwarned(cmd_buffer, result); - result = pvr_csb_emit_terminate(&gfx_sub_cmd->control_stream); + result = pvr_arch_csb_emit_terminate(&gfx_sub_cmd->control_stream); if (result != VK_SUCCESS) return pvr_cmd_buffer_set_error_unwarned(cmd_buffer, result); @@ -2486,9 +2488,9 @@ VkResult PVR_PER_ARCH(cmd_buffer_end_sub_cmd)(struct pvr_cmd_buffer *cmd_buffer) case PVR_SUB_CMD_TYPE_COMPUTE: { struct pvr_sub_cmd_compute *const compute_sub_cmd = &sub_cmd->compute; - pvr_compute_generate_fence(cmd_buffer, compute_sub_cmd, true); + pvr_arch_compute_generate_fence(cmd_buffer, compute_sub_cmd, true); - result = pvr_csb_emit_terminate(&compute_sub_cmd->control_stream); + result = pvr_arch_csb_emit_terminate(&compute_sub_cmd->control_stream); if (result != VK_SUCCESS) return pvr_cmd_buffer_set_error_unwarned(cmd_buffer, result); @@ -2552,7 +2554,8 @@ VkResult PVR_PER_ARCH(cmd_buffer_end_sub_cmd)(struct pvr_cmd_buffer *cmd_buffer) * fragment shader to complete. */ - result = pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); + result = + pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); if (result != VK_SUCCESS) return result; @@ -2564,15 +2567,14 @@ VkResult PVR_PER_ARCH(cmd_buffer_end_sub_cmd)(struct pvr_cmd_buffer *cmd_buffer) }, }; - return pvr_add_query_program(cmd_buffer, &query_info); + return pvr_arch_add_query_program(cmd_buffer, &query_info); } return VK_SUCCESS; } -void PVR_PER_ARCH(reset_graphics_dirty_state)( - struct pvr_cmd_buffer *const cmd_buffer, - bool start_geom) +void pvr_arch_reset_graphics_dirty_state(struct pvr_cmd_buffer *const cmd_buffer, + bool start_geom) { struct vk_dynamic_graphics_state *const dynamic_state = &cmd_buffer->vk.dynamic_graphics_state; @@ -2651,14 +2653,13 @@ pvr_render_pass_info_get_view_mask(const struct pvr_render_pass_info *rp_info) { const uint32_t hw_render_idx = rp_info->current_hw_subpass; const struct pvr_renderpass_hwsetup_render *hw_render = - pvr_pass_info_get_hw_render(rp_info, hw_render_idx); + pvr_arch_pass_info_get_hw_render(rp_info, hw_render_idx); return hw_render->view_mask; } -VkResult -PVR_PER_ARCH(cmd_buffer_start_sub_cmd)(struct pvr_cmd_buffer *cmd_buffer, - enum pvr_sub_cmd_type type) +VkResult pvr_arch_cmd_buffer_start_sub_cmd(struct pvr_cmd_buffer *cmd_buffer, + enum pvr_sub_cmd_type type) { struct pvr_cmd_buffer_state *state = &cmd_buffer->state; struct pvr_device *device = cmd_buffer->device; @@ -2679,7 +2680,7 @@ PVR_PER_ARCH(cmd_buffer_start_sub_cmd)(struct pvr_cmd_buffer *cmd_buffer, } /* End the current sub command. */ - result = pvr_cmd_buffer_end_sub_cmd(cmd_buffer); + result = pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); if (result != VK_SUCCESS) return result; } @@ -2731,7 +2732,7 @@ PVR_PER_ARCH(cmd_buffer_start_sub_cmd)(struct pvr_cmd_buffer *cmd_buffer, if (state->vis_test_enabled) sub_cmd->gfx.query_pool = state->query_pool; - pvr_reset_graphics_dirty_state(cmd_buffer, true); + pvr_arch_reset_graphics_dirty_state(cmd_buffer, true); if (pvr_cmd_uses_deferred_cs_cmds(cmd_buffer)) { pvr_csb_init(device, @@ -2772,10 +2773,10 @@ PVR_PER_ARCH(cmd_buffer_start_sub_cmd)(struct pvr_cmd_buffer *cmd_buffer, } VkResult -PVR_PER_ARCH(cmd_buffer_alloc_mem)(struct pvr_cmd_buffer *cmd_buffer, - struct pvr_winsys_heap *heap, - uint64_t size, - struct pvr_suballoc_bo **const pvr_bo_out) +pvr_arch_cmd_buffer_alloc_mem(struct pvr_cmd_buffer *cmd_buffer, + struct pvr_winsys_heap *heap, + uint64_t size, + struct pvr_suballoc_bo **const pvr_bo_out) { const uint32_t cache_line_size = pvr_get_slc_cache_line_size(&cmd_buffer->device->pdevice->dev_info); @@ -3204,12 +3205,12 @@ static inline VkResult pvr_render_targets_datasets_create( if (render_target->valid_mask & BITFIELD_BIT(view_idx)) continue; - result = pvr_render_target_dataset_create(device, - rstate->width, - rstate->height, - hw_render->sample_count, - layers, - &rt_dataset); + result = pvr_arch_render_target_dataset_create(device, + rstate->width, + rstate->height, + hw_render->sample_count, + layers, + &rt_dataset); if (result != VK_SUCCESS) { pvr_render_targets_datasets_destroy(render_target); pthread_mutex_unlock(&render_target->mutex); @@ -3260,8 +3261,8 @@ pvr_render_targets_init_for_render(struct pvr_device *device, } const struct pvr_renderpass_hwsetup_subpass * -PVR_PER_ARCH(get_hw_subpass)(const struct pvr_render_pass *pass, - const uint32_t subpass) +pvr_arch_get_hw_subpass(const struct pvr_render_pass *pass, + const uint32_t subpass) { const struct pvr_renderpass_hw_map *map = &pass->hw_setup->subpass_map[subpass]; @@ -3286,7 +3287,7 @@ pvr_perform_start_of_render_attachment_clear(struct pvr_cmd_buffer *cmd_buffer, const uint32_t hw_render_idx = hw_setup ? hw_setup->subpass_map[info->subpass_idx].render : 0; const struct pvr_renderpass_hwsetup_render *hw_render = - pvr_pass_info_get_hw_render(info, hw_render_idx); + pvr_arch_pass_info_get_hw_render(info, hw_render_idx); VkImageAspectFlags image_aspect; const struct pvr_image *image; struct pvr_image_view *iview; @@ -3394,7 +3395,7 @@ pvr_perform_start_of_render_clears(struct pvr_cmd_buffer *cmd_buffer, hw_render = &hw_setup->renders[hw_setup->subpass_map[info->subpass_idx].render]; } else { - hw_render = pvr_pass_info_get_hw_render(info, 0); + hw_render = pvr_arch_pass_info_get_hw_render(info, 0); } /* Mask of attachment clears using index lists instead of background object @@ -3439,8 +3440,8 @@ static void pvr_stash_depth_format(struct pvr_cmd_buffer_state *state, struct pvr_sub_cmd_gfx *const sub_cmd) { const struct pvr_renderpass_hwsetup_render *hw_render = - pvr_pass_info_get_hw_render(&state->render_pass_info, - sub_cmd->hw_render_idx); + pvr_arch_pass_info_get_hw_render(&state->render_pass_info, + sub_cmd->hw_render_idx); if (hw_render->ds_attach_idx != VK_ATTACHMENT_UNUSED) { struct pvr_image_view **iviews = state->render_pass_info.attachments; @@ -3583,7 +3584,7 @@ static void pvr_emit_clear_words(struct pvr_cmd_buffer *const cmd_buffer, pvr_csb_set_relocation_mark(csb); - stream = pvr_csb_alloc_dwords(csb, vdm_state_size_in_dw); + stream = pvr_arch_csb_alloc_dwords(csb, vdm_state_size_in_dw); if (!stream) { pvr_cmd_buffer_set_error_unwarned(cmd_buffer, csb->status); return; @@ -3678,7 +3679,7 @@ static VkResult pvr_cs_write_load_op_for_view(struct pvr_cmd_buffer *cmd_buffer, pvr_emit_clear_words(cmd_buffer, sub_cmd); - pvr_reset_graphics_dirty_state(cmd_buffer, false); + pvr_arch_reset_graphics_dirty_state(cmd_buffer, false); return VK_SUCCESS; } @@ -3771,8 +3772,8 @@ pvr_resolve_unemitted_resolve_attachments(struct pvr_cmd_buffer *cmd_buffer, { struct pvr_cmd_buffer_state *state = &cmd_buffer->state; const struct pvr_renderpass_hwsetup_render *hw_render = - pvr_pass_info_get_hw_render(&state->render_pass_info, - info->current_hw_subpass); + pvr_arch_pass_info_get_hw_render(&state->render_pass_info, + info->current_hw_subpass); for (uint32_t i = 0U; i < hw_render->eot_surface_count; i++) { const struct pvr_renderpass_hwsetup_eot_surface *surface = @@ -3920,7 +3921,7 @@ pvr_resolve_unemitted_resolve_attachments(struct pvr_cmd_buffer *cmd_buffer, } } - return pvr_cmd_buffer_end_sub_cmd(cmd_buffer); + return pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); } void PVR_PER_ARCH(CmdBeginRenderPass2)( @@ -3976,14 +3977,15 @@ void PVR_PER_ARCH(CmdBeginRenderPass2)( assert(pass->subpasses[0].pipeline_bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS); - result = pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_GRAPHICS); + result = + pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_GRAPHICS); if (result != VK_SUCCESS) return; /* Run subpass 0 "soft" background object after the actual background * object. */ - hw_subpass = pvr_get_hw_subpass(pass, 0); + hw_subpass = pvr_arch_get_hw_subpass(pass, 0); if (hw_subpass->load_op) { result = pvr_cs_write_load_op(cmd_buffer, &cmd_buffer->state.current_sub_cmd->gfx, @@ -4033,7 +4035,7 @@ pvr_can_pbe_resolve_ds_attachment(const struct pvr_device_info *dev_info, if (pvr_get_ds_component_bits(vk_format, VK_IMAGE_ASPECT_STENCIL_BIT)) return false; - return pvr_format_is_pbe_downscalable(dev_info, vk_format); + return pvr_arch_format_is_pbe_downscalable(dev_info, vk_format); } static inline VkResult @@ -4075,13 +4077,13 @@ static void pvr_dynamic_rendering_output_attachments_cleanup( if (!dr_info) return; - pvr_mrt_load_op_state_cleanup(device, - allocator, - dr_info->hw_render.load_op_state); + pvr_arch_mrt_load_op_state_cleanup(device, + allocator, + dr_info->hw_render.load_op_state); - pvr_destroy_mrt_setup(device, &dr_info->hw_render.eot_setup); - pvr_destroy_mrt_setup(device, &dr_info->hw_render.init_setup); - pvr_destroy_mrt_setup(device, dr_info->mrt_setup); + pvr_arch_destroy_mrt_setup(device, &dr_info->hw_render.eot_setup); + pvr_arch_destroy_mrt_setup(device, &dr_info->hw_render.init_setup); + pvr_arch_destroy_mrt_setup(device, dr_info->mrt_setup); vk_free2(&device->vk.alloc, allocator, dr_info->hw_render.eot_surfaces); vk_free2(&device->vk.alloc, allocator, dr_info->hw_render.color_init); @@ -4275,8 +4277,10 @@ static VkResult pvr_dynamic_rendering_output_attachments_setup( } } - result = - pvr_init_usc_mrt_setup(device, mrt_count, attachment_formats, &mrt_setup); + result = pvr_arch_init_usc_mrt_setup(device, + mrt_count, + attachment_formats, + &mrt_setup); if (result != VK_SUCCESS) { vk_command_buffer_set_error(&cmd_buffer->vk, result); goto err_free_hw_render_eot_surface; @@ -4291,7 +4295,7 @@ static VkResult pvr_dynamic_rendering_output_attachments_setup( dr_info->hw_render.output_regs_count, dr_info->hw_render.tile_buffers_count); if (result != VK_SUCCESS) { - pvr_destroy_mrt_setup(device, &mrt_setup); + pvr_arch_destroy_mrt_setup(device, &mrt_setup); vk_command_buffer_set_error(&cmd_buffer->vk, result); goto err_free_hw_render_eot_surface; } @@ -4315,7 +4319,7 @@ static VkResult pvr_dynamic_rendering_output_attachments_setup( dr_info->hw_render.output_regs_count, dr_info->hw_render.tile_buffers_count); if (result != VK_SUCCESS) { - pvr_destroy_mrt_setup(device, &mrt_setup); + pvr_arch_destroy_mrt_setup(device, &mrt_setup); vk_command_buffer_set_error(&cmd_buffer->vk, result); goto err_finish_mrt_setup; } @@ -4326,7 +4330,7 @@ static VkResult pvr_dynamic_rendering_output_attachments_setup( dr_info->hw_render.output_regs_count, dr_info->hw_render.tile_buffers_count); if (result != VK_SUCCESS) { - pvr_destroy_mrt_setup(device, &mrt_setup); + pvr_arch_destroy_mrt_setup(device, &mrt_setup); vk_command_buffer_set_error(&cmd_buffer->vk, result); goto err_finish_mrt_init_setup; } @@ -4422,15 +4426,15 @@ static VkResult pvr_dynamic_rendering_output_attachments_setup( dr_info->hw_render.pbe_emits = pbe_emits; - pvr_destroy_mrt_setup(device, &mrt_setup); + pvr_arch_destroy_mrt_setup(device, &mrt_setup); return VK_SUCCESS; err_finish_mrt_init_setup: - pvr_destroy_mrt_setup(device, &dr_info->hw_render.init_setup); + pvr_arch_destroy_mrt_setup(device, &dr_info->hw_render.init_setup); err_finish_mrt_setup: - pvr_destroy_mrt_setup(device, dr_info->mrt_setup); + pvr_arch_destroy_mrt_setup(device, dr_info->mrt_setup); err_free_hw_render_eot_surface: vk_free(&device->vk.alloc, dr_info->hw_render.eot_surfaces); @@ -4741,7 +4745,7 @@ static inline uint64_t pvr_render_pass_info_get_scratch_buffer_size( struct pvr_device *device, const struct pvr_render_pass_info *info) { - return pvr_spm_scratch_buffer_calc_required_size( + return pvr_arch_spm_scratch_buffer_calc_required_size( &info->dr_info->hw_render, 1, info->dr_info->hw_render.sample_count, @@ -4835,17 +4839,18 @@ void PVR_PER_ARCH(CmdBeginRendering)(VkCommandBuffer commandBuffer, pvr_render_pass_info_get_scratch_buffer_size(device, &state->render_pass_info); - result = pvr_render_state_setup(device, - NULL, - state->render_pass_info.rstate, - 1, - &dr_info->hw_render); + result = pvr_arch_render_state_setup(device, + NULL, + state->render_pass_info.rstate, + 1, + &dr_info->hw_render); if (result != VK_SUCCESS) { vk_command_buffer_set_error(&cmd_buffer->vk, result); goto err_cleanup_tile_buffers; } - result = pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_GRAPHICS); + result = + pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_GRAPHICS); if (result != VK_SUCCESS) { vk_command_buffer_set_error(&cmd_buffer->vk, result); goto err_cleanup_render_state; @@ -4856,9 +4861,9 @@ void PVR_PER_ARCH(CmdBeginRendering)(VkCommandBuffer commandBuffer, sub_cmd->dr_info = dr_info; assert(sub_cmd->dr_info); - result = pvr_mrt_load_ops_setup(cmd_buffer, - &cmd_buffer->vk.pool->alloc, - &dr_info->hw_render.load_op_state); + result = pvr_arch_mrt_load_ops_setup(cmd_buffer, + &cmd_buffer->vk.pool->alloc, + &dr_info->hw_render.load_op_state); if (result != VK_SUCCESS) { vk_command_buffer_set_error(&cmd_buffer->vk, result); goto err_cleanup_render_state; @@ -4892,7 +4897,7 @@ void PVR_PER_ARCH(CmdEndRendering)(VkCommandBuffer commandBuffer) return; } - result = pvr_cmd_buffer_end_sub_cmd(cmd_buffer); + result = pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); if (result != VK_SUCCESS) { vk_command_buffer_set_error(&cmd_buffer->vk, result); goto exit_teardown_render; @@ -5034,8 +5039,10 @@ static void pvr_cmd_buffer_state_from_dynamic_inheritance( attach_idx++; } - result = - pvr_init_usc_mrt_setup(device, mrt_count, attachment_formats, &mrt_setup); + result = pvr_arch_init_usc_mrt_setup(device, + mrt_count, + attachment_formats, + &mrt_setup); if (result != VK_SUCCESS) { vk_command_buffer_set_error(&cmd_buffer->vk, result); goto err_free_attachments; @@ -5047,7 +5054,7 @@ static void pvr_cmd_buffer_state_from_dynamic_inheritance( mrt_setup.num_output_regs, mrt_setup.num_tile_buffers); if (result != VK_SUCCESS) { - pvr_destroy_mrt_setup(device, &mrt_setup); + pvr_arch_destroy_mrt_setup(device, &mrt_setup); vk_command_buffer_set_error(&cmd_buffer->vk, result); goto err_free_attachments; } @@ -5065,7 +5072,7 @@ static void pvr_cmd_buffer_state_from_dynamic_inheritance( } } - pvr_destroy_mrt_setup(device, &mrt_setup); + pvr_arch_destroy_mrt_setup(device, &mrt_setup); if (dr_info->mrt_setup->num_tile_buffers) { result = pvr_device_tile_buffer_ensure_cap( @@ -5091,7 +5098,7 @@ static void pvr_cmd_buffer_state_from_dynamic_inheritance( return; err_destroy_mrt_setup: - pvr_destroy_mrt_setup(device, dr_info->mrt_setup); + pvr_arch_destroy_mrt_setup(device, dr_info->mrt_setup); err_free_attachments: vk_free(&device->vk.alloc, dr_info->attachments); @@ -5158,8 +5165,8 @@ PVR_PER_ARCH(BeginCommandBuffer)(VkCommandBuffer commandBuffer, pBeginInfo->pInheritanceInfo); } - result = - pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_GRAPHICS); + result = pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, + PVR_SUB_CMD_TYPE_GRAPHICS); if (result != VK_SUCCESS) return result; @@ -5183,13 +5190,14 @@ PVR_PER_ARCH(BeginCommandBuffer)(VkCommandBuffer commandBuffer, } VkResult -PVR_PER_ARCH(cmd_buffer_add_transfer_cmd)(struct pvr_cmd_buffer *cmd_buffer, - struct pvr_transfer_cmd *transfer_cmd) +pvr_arch_cmd_buffer_add_transfer_cmd(struct pvr_cmd_buffer *cmd_buffer, + struct pvr_transfer_cmd *transfer_cmd) { struct pvr_sub_cmd_transfer *sub_cmd; VkResult result; - result = pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_TRANSFER); + result = + pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_TRANSFER); if (result != VK_SUCCESS) return result; @@ -5214,11 +5222,11 @@ pvr_setup_vertex_buffers(struct pvr_cmd_buffer *cmd_buffer, uint64_t *qword_buffer; VkResult result; - result = - pvr_cmd_buffer_alloc_mem(cmd_buffer, - cmd_buffer->device->heaps.pds_heap, - PVR_DW_TO_BYTES(pds_info->data_size_in_dwords), - &pvr_bo); + result = pvr_arch_cmd_buffer_alloc_mem( + cmd_buffer, + cmd_buffer->device->heaps.pds_heap, + PVR_DW_TO_BYTES(pds_info->data_size_in_dwords), + &pvr_bo); if (result != VK_SUCCESS) return result; @@ -5482,11 +5490,11 @@ static VkResult pvr_setup_descriptor_mappings( if (!pds_info->data_size_in_dwords) return VK_SUCCESS; - result = - pvr_cmd_buffer_alloc_mem(cmd_buffer, - cmd_buffer->device->heaps.pds_heap, - PVR_DW_TO_BYTES(pds_info->data_size_in_dwords), - &pvr_bo); + result = pvr_arch_cmd_buffer_alloc_mem( + cmd_buffer, + cmd_buffer->device->heaps.pds_heap, + PVR_DW_TO_BYTES(pds_info->data_size_in_dwords), + &pvr_bo); if (result != VK_SUCCESS) return result; @@ -5571,7 +5579,7 @@ static VkResult pvr_setup_descriptor_mappings( descriptor_set = desc_state->sets[desc_set]; assert(descriptor_set); - result = pvr_cmd_buffer_upload_general( + result = pvr_arch_cmd_buffer_upload_general( cmd_buffer, descriptor_set->dynamic_buffers, special_buff_entry->size_in_dwords * sizeof(uint32_t), @@ -5618,10 +5626,11 @@ static VkResult pvr_setup_descriptor_mappings( &cmd_buffer->vk.dynamic_graphics_state.cb; struct pvr_suballoc_bo *blend_consts_bo; - result = pvr_cmd_buffer_upload_general(cmd_buffer, - cb->blend_constants, - sizeof(cb->blend_constants), - &blend_consts_bo); + result = + pvr_arch_cmd_buffer_upload_general(cmd_buffer, + cb->blend_constants, + sizeof(cb->blend_constants), + &blend_consts_bo); if (result != VK_SUCCESS) return result; @@ -5653,10 +5662,11 @@ static VkResult pvr_setup_descriptor_mappings( sampler) {} struct pvr_suballoc_bo *point_sampler_bo; - result = pvr_cmd_buffer_upload_general(cmd_buffer, - point_sampler_words, - sizeof(point_sampler_words), - &point_sampler_bo); + result = + pvr_arch_cmd_buffer_upload_general(cmd_buffer, + point_sampler_words, + sizeof(point_sampler_words), + &point_sampler_bo); if (result != VK_SUCCESS) return result; @@ -5688,10 +5698,11 @@ static VkResult pvr_setup_descriptor_mappings( sampler) {} struct pvr_suballoc_bo *ia_sampler_bo; - result = pvr_cmd_buffer_upload_general(cmd_buffer, - ia_sampler_words, - sizeof(ia_sampler_words), - &ia_sampler_bo); + result = + pvr_arch_cmd_buffer_upload_general(cmd_buffer, + ia_sampler_words, + sizeof(ia_sampler_words), + &ia_sampler_bo); if (result != VK_SUCCESS) return result; @@ -5725,10 +5736,10 @@ static VkResult pvr_setup_descriptor_mappings( } struct pvr_suballoc_bo *ff_op_bo; - result = pvr_cmd_buffer_upload_general(cmd_buffer, - &ff_op, - sizeof(ff_op), - &ff_op_bo); + result = pvr_arch_cmd_buffer_upload_general(cmd_buffer, + &ff_op, + sizeof(ff_op), + &ff_op_bo); if (result != VK_SUCCESS) return result; @@ -5757,10 +5768,10 @@ static VkResult pvr_setup_descriptor_mappings( fs_meta |= (1 << 25); struct pvr_suballoc_bo *fs_meta_bo; - result = pvr_cmd_buffer_upload_general(cmd_buffer, - &fs_meta, - sizeof(fs_meta), - &fs_meta_bo); + result = pvr_arch_cmd_buffer_upload_general(cmd_buffer, + &fs_meta, + sizeof(fs_meta), + &fs_meta_bo); if (result != VK_SUCCESS) return result; @@ -5794,11 +5805,11 @@ static VkResult pvr_setup_descriptor_mappings( } struct pvr_suballoc_bo *tile_buffer_bo; - result = pvr_cmd_buffer_upload_general(cmd_buffer, - &tile_buffer_addrs, - num_tile_buffers * - sizeof(uint64_t), - &tile_buffer_bo); + result = pvr_arch_cmd_buffer_upload_general(cmd_buffer, + &tile_buffer_addrs, + num_tile_buffers * + sizeof(uint64_t), + &tile_buffer_bo); if (result != VK_SUCCESS) return result; @@ -5817,10 +5828,10 @@ static VkResult pvr_setup_descriptor_mappings( : sizeof(uint32_t); struct pvr_suballoc_bo *spill_buffer_bo; - result = pvr_cmd_buffer_upload_general(cmd_buffer, - NULL, - spill_block_size * 2048, - &spill_buffer_bo); + result = pvr_arch_cmd_buffer_upload_general(cmd_buffer, + NULL, + spill_block_size * 2048, + &spill_buffer_bo); if (result != VK_SUCCESS) return result; @@ -5832,10 +5843,10 @@ static VkResult pvr_setup_descriptor_mappings( }; struct pvr_suballoc_bo *spill_info_bo; - result = pvr_cmd_buffer_upload_general(cmd_buffer, - spill_info, - sizeof(spill_info), - &spill_info_bo); + result = pvr_arch_cmd_buffer_upload_general(cmd_buffer, + spill_info, + sizeof(spill_info), + &spill_info_bo); if (result != VK_SUCCESS) return result; @@ -5855,10 +5866,11 @@ static VkResult pvr_setup_descriptor_mappings( * programmatically! */ struct pvr_suballoc_bo *scratch_buffer_bo; - result = pvr_cmd_buffer_upload_general(cmd_buffer, - NULL, - scratch_block_size * 2048, - &scratch_buffer_bo); + result = + pvr_arch_cmd_buffer_upload_general(cmd_buffer, + NULL, + scratch_block_size * 2048, + &scratch_buffer_bo); if (result != VK_SUCCESS) return result; @@ -5870,10 +5882,10 @@ static VkResult pvr_setup_descriptor_mappings( }; struct pvr_suballoc_bo *scratch_info_bo; - result = pvr_cmd_buffer_upload_general(cmd_buffer, - scratch_info, - sizeof(scratch_info), - &scratch_info_bo); + result = pvr_arch_cmd_buffer_upload_general(cmd_buffer, + scratch_info, + sizeof(scratch_info), + &scratch_info_bo); if (result != VK_SUCCESS) return result; @@ -5895,11 +5907,11 @@ static VkResult pvr_setup_descriptor_mappings( }; struct pvr_suballoc_bo *sample_locations_bo; - result = - pvr_cmd_buffer_upload_general(cmd_buffer, - &packed_sample_locations, - sizeof(packed_sample_locations), - &sample_locations_bo); + result = pvr_arch_cmd_buffer_upload_general( + cmd_buffer, + &packed_sample_locations, + sizeof(packed_sample_locations), + &sample_locations_bo); if (result != VK_SUCCESS) return result; @@ -6004,7 +6016,7 @@ static void pvr_compute_update_shared(struct pvr_cmd_buffer *cmd_buffer, pvr_compute_generate_control_stream(csb, sub_cmd, &info); } -void PVR_PER_ARCH(compute_update_shared_private)( +void pvr_arch_compute_update_shared_private( struct pvr_cmd_buffer *cmd_buffer, struct pvr_sub_cmd_compute *const sub_cmd, struct pvr_private_compute_pipeline *pipeline) @@ -6079,7 +6091,7 @@ pvr_compute_flat_pad_workgroup_size(const struct pvr_physical_device *pdevice, return workgroup_size; } -void PVR_PER_ARCH(compute_update_kernel_private)( +void pvr_arch_compute_update_kernel_private( struct pvr_cmd_buffer *cmd_buffer, struct pvr_sub_cmd_compute *const sub_cmd, struct pvr_private_compute_pipeline *pipeline, @@ -6274,10 +6286,10 @@ static VkResult pvr_cmd_upload_push_consts(struct pvr_cmd_buffer *cmd_buffer, if (!push_consts->dirty) return VK_SUCCESS; - result = pvr_cmd_buffer_upload_general(cmd_buffer, - push_consts->data, - push_consts->bytes_updated, - &suballoc_bo); + result = pvr_arch_cmd_buffer_upload_general(cmd_buffer, + push_consts->data, + push_consts->bytes_updated, + &suballoc_bo); if (result != VK_SUCCESS) return result; @@ -6300,7 +6312,7 @@ static void pvr_cmd_dispatch( struct pvr_sub_cmd_compute *sub_cmd; VkResult result; - pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_COMPUTE); + pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_COMPUTE); sub_cmd = &state->current_sub_cmd->compute; sub_cmd->uses_atomic_ops |= cs_data->common.uses.atomics; @@ -7110,7 +7122,7 @@ setup_pds_fragment_program(struct pvr_cmd_buffer *const cmd_buffer, &doutu_src); /* FIXME: Figure out the define for alignment of 16. */ - return pvr_cmd_buffer_upload_pds( + return pvr_arch_cmd_buffer_upload_pds( cmd_buffer, &pds_fragment_program_buffer[0], program->data_size, @@ -7178,7 +7190,7 @@ setup_pds_coeff_program(struct pvr_cmd_buffer *const cmd_buffer, } /* FIXME: Figure out the define for alignment of 16. */ - return pvr_cmd_buffer_upload_pds( + return pvr_arch_cmd_buffer_upload_pds( cmd_buffer, &pds_coeff_program_buffer[0], program->data_size, @@ -7240,7 +7252,7 @@ pvr_setup_fragment_state_pointers(struct pvr_cmd_buffer *const cmd_buffer, ROGUE_TA_STATE_PDS_SIZEINFO2_USC_SHAREDSIZE_UNIT_SIZE); const uint32_t max_tiles_in_flight = - pvr_calc_fscommon_size_and_tiles_in_flight( + pvr_arch_calc_fscommon_size_and_tiles_in_flight( &pdevice->dev_info, &pdevice->dev_runtime_info, usc_shared_size * @@ -7695,10 +7707,11 @@ static VkResult pvr_emit_ppp_state(struct pvr_cmd_buffer *const cmd_buffer, ppp_state_words_count = buffer_ptr - ppp_state_words; assert(ppp_state_words_count <= PVR_MAX_PPP_STATE_DWORDS); - result = pvr_cmd_buffer_alloc_mem(cmd_buffer, - cmd_buffer->device->heaps.general_heap, - PVR_DW_TO_BYTES(ppp_state_words_count), - &pvr_bo); + result = + pvr_arch_cmd_buffer_alloc_mem(cmd_buffer, + cmd_buffer->device->heaps.general_heap, + PVR_DW_TO_BYTES(ppp_state_words_count), + &pvr_bo); if (result != VK_SUCCESS) return result; @@ -7730,7 +7743,7 @@ static VkResult pvr_emit_ppp_state(struct pvr_cmd_buffer *const cmd_buffer, pvr_csb_set_relocation_mark(control_stream); - vdm_state = pvr_csb_alloc_dwords(control_stream, num_dwords); + vdm_state = pvr_arch_csb_alloc_dwords(control_stream, num_dwords); if (!vdm_state) { result = pvr_csb_get_status(control_stream); return pvr_cmd_buffer_set_error_unwarned(cmd_buffer, result); @@ -7828,10 +7841,10 @@ pvr_emit_dirty_ppp_state(struct pvr_cmd_buffer *const cmd_buffer, VkResult result; /* TODO: The emit_header will be dirty only if - * pvr_reset_graphics_dirty_state() was called before this (so when command - * buffer begins recording or when it's reset). Otherwise it will have been - * zeroed out by the previous pvr_emit_ppp_state(). We can probably set a - * flag in there and check it here instead of checking the header. + * pvr_arch_reset_graphics_dirty_state() was called before this (so when + * command buffer begins recording or when it's reset). Otherwise it will + * have been zeroed out by the previous pvr_emit_ppp_state(). We can probably + * set a flag in there and check it here instead of checking the header. * Check if this is true and implement the flag. */ if (!pvr_ppp_state_update_required(cmd_buffer)) @@ -7898,12 +7911,11 @@ pvr_emit_dirty_ppp_state(struct pvr_cmd_buffer *const cmd_buffer, return VK_SUCCESS; } -void PVR_PER_ARCH(calculate_vertex_cam_size)( - const struct pvr_device_info *dev_info, - const uint32_t vs_output_size, - const bool raster_enable, - uint32_t *const cam_size_out, - uint32_t *const vs_max_instances_out) +void pvr_arch_calculate_vertex_cam_size(const struct pvr_device_info *dev_info, + const uint32_t vs_output_size, + const bool raster_enable, + uint32_t *const cam_size_out, + uint32_t *const vs_max_instances_out) { /* First work out the size of a vertex in the UVS and multiply by 4 for * column ordering. @@ -7989,11 +8001,11 @@ static void pvr_emit_dirty_vdm_state(struct pvr_cmd_buffer *const cmd_buffer, /* CAM Calculations and HW state take vertex size aligned to DWORDS. */ assert(vs_data->vs.vtxouts <= max_user_vertex_output_components); - pvr_calculate_vertex_cam_size(dev_info, - vs_data->vs.vtxouts, - true, - &cam_size, - &max_instances); + pvr_arch_calculate_vertex_cam_size(dev_info, + vs_data->vs.vtxouts, + true, + &cam_size, + &max_instances); pvr_csb_set_relocation_mark(csb); @@ -8099,7 +8111,7 @@ static VkResult pvr_validate_draw_state(struct pvr_cmd_buffer *cmd_buffer) bool bstencil_keep; VkResult result; - pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_GRAPHICS); + pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_GRAPHICS); sub_cmd = &state->current_sub_cmd->gfx; sub_cmd->empty_cmd = false; @@ -8345,10 +8357,11 @@ pvr_write_draw_indirect_vdm_stream(struct pvr_cmd_buffer *cmd_buffer, VkResult result; /* TODO: Move this outside the loop and allocate all of them in one go? */ - result = pvr_cmd_buffer_alloc_mem(cmd_buffer, - cmd_buffer->device->heaps.general_heap, - DUMMY_VDM_CONTROL_STREAM_BLOCK_SIZE, - &dummy_bo); + result = + pvr_arch_cmd_buffer_alloc_mem(cmd_buffer, + cmd_buffer->device->heaps.general_heap, + DUMMY_VDM_CONTROL_STREAM_BLOCK_SIZE, + &dummy_bo); if (result != VK_SUCCESS) return result; @@ -8370,10 +8383,10 @@ pvr_write_draw_indirect_vdm_stream(struct pvr_cmd_buffer *cmd_buffer, pds_size = PVR_DW_TO_BYTES(pds_prog.program.data_size_aligned + pds_prog.program.code_size_aligned); - result = pvr_cmd_buffer_alloc_mem(cmd_buffer, - cmd_buffer->device->heaps.pds_heap, - pds_size, - &pds_bo); + result = pvr_arch_cmd_buffer_alloc_mem(cmd_buffer, + cmd_buffer->device->heaps.pds_heap, + pds_size, + &pds_bo); if (result != VK_SUCCESS) return result; @@ -8763,7 +8776,7 @@ void PVR_PER_ARCH(CmdEndRenderPass2)(VkCommandBuffer commandBuffer, assert(state->render_pass_info.pass); assert(state->render_pass_info.framebuffer); - result = pvr_cmd_buffer_end_sub_cmd(cmd_buffer); + result = pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); if (result != VK_SUCCESS) return; @@ -8819,10 +8832,10 @@ pvr_execute_deferred_cmd_buffer(struct pvr_cmd_buffer *cmd_buffer, ispdbsc.scindex = scissor_idx; } - result = pvr_cmd_buffer_upload_general(cmd_buffer, - &ppp_state[0], - sizeof(ppp_state), - &suballoc_bo); + result = pvr_arch_cmd_buffer_upload_general(cmd_buffer, + &ppp_state[0], + sizeof(ppp_state), + &suballoc_bo); if (result != VK_SUCCESS) return result; @@ -8948,12 +8961,12 @@ pvr_execute_graphics_cmd_buffer(struct pvr_cmd_buffer *cmd_buffer, primary_sub_cmd->gfx.query_pool != first_sec_cmd->gfx.query_pool) { state->current_sub_cmd->gfx.barrier_store = true; - result = pvr_cmd_buffer_end_sub_cmd(cmd_buffer); + result = pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); if (result != VK_SUCCESS) return result; - result = - pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_GRAPHICS); + result = pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, + PVR_SUB_CMD_TYPE_GRAPHICS); if (result != VK_SUCCESS) return result; @@ -8986,7 +8999,7 @@ pvr_execute_graphics_cmd_buffer(struct pvr_cmd_buffer *cmd_buffer, if (pvr_cmd_uses_deferred_cs_cmds(sec_cmd_buffer)) { /* TODO: In case if secondary buffer is created with * VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT, then we patch the - * stream and copy it to primary stream using pvr_csb_copy below. + * stream and copy it to primary stream using pvr_arch_csb_copy below. * This will need locking if the same secondary command buffer is * executed in multiple primary buffers at the same time. */ @@ -8994,8 +9007,8 @@ pvr_execute_graphics_cmd_buffer(struct pvr_cmd_buffer *cmd_buffer, if (result != VK_SUCCESS) return result; - result = pvr_csb_copy(&primary_sub_cmd->gfx.control_stream, - &sec_sub_cmd->gfx.control_stream); + result = pvr_arch_csb_copy(&primary_sub_cmd->gfx.control_stream, + &sec_sub_cmd->gfx.control_stream); if (result != VK_SUCCESS) return pvr_cmd_buffer_set_error_unwarned(cmd_buffer, result); } else { @@ -9003,7 +9016,7 @@ pvr_execute_graphics_cmd_buffer(struct pvr_cmd_buffer *cmd_buffer, if (result != VK_SUCCESS) return result; - pvr_csb_emit_link( + pvr_arch_csb_emit_link( &primary_sub_cmd->gfx.control_stream, pvr_csb_get_start_address(&sec_sub_cmd->gfx.control_stream), true); @@ -9048,12 +9061,12 @@ pvr_execute_graphics_cmd_buffer(struct pvr_cmd_buffer *cmd_buffer, state->current_sub_cmd->gfx.barrier_store = true; state->current_sub_cmd->gfx.empty_cmd = false; - result = pvr_cmd_buffer_end_sub_cmd(cmd_buffer); + result = pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); if (result != VK_SUCCESS) return result; - result = - pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_GRAPHICS); + result = pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, + PVR_SUB_CMD_TYPE_GRAPHICS); if (result != VK_SUCCESS) return result; @@ -9098,7 +9111,7 @@ void PVR_PER_ARCH(CmdExecuteCommands)(VkCommandBuffer commandBuffer, * Can't just copy state from the secondary because the recording state of * the secondary command buffers would have been deleted at this point. */ - pvr_reset_graphics_dirty_state(cmd_buffer, false); + pvr_arch_reset_graphics_dirty_state(cmd_buffer, false); if (state->current_sub_cmd && state->current_sub_cmd->type == PVR_SUB_CMD_TYPE_GRAPHICS) { @@ -9127,7 +9140,7 @@ void PVR_PER_ARCH(CmdExecuteCommands)(VkCommandBuffer commandBuffer, assert(sec_cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY); - result = pvr_cmd_buffer_end_sub_cmd(cmd_buffer); + result = pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); if (result != VK_SUCCESS) return; @@ -9187,7 +9200,7 @@ static void pvr_insert_transparent_obj(struct pvr_cmd_buffer *const cmd_buffer, pvr_emit_clear_words(cmd_buffer, sub_cmd); /* Reset graphics state. */ - pvr_reset_graphics_dirty_state(cmd_buffer, false); + pvr_arch_reset_graphics_dirty_state(cmd_buffer, false); } static inline struct pvr_render_subpass * @@ -9220,7 +9233,7 @@ void PVR_PER_ARCH(CmdNextSubpass2)(VkCommandBuffer commandBuffer, next_hw_render = &pass->hw_setup->renders[next_map->render]; if (current_map->render != next_map->render) { - result = pvr_cmd_buffer_end_sub_cmd(cmd_buffer); + result = pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); if (result != VK_SUCCESS) return; @@ -9230,8 +9243,8 @@ void PVR_PER_ARCH(CmdNextSubpass2)(VkCommandBuffer commandBuffer, rp_info->current_hw_subpass = next_map->render; - result = - pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_GRAPHICS); + result = pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, + PVR_SUB_CMD_TYPE_GRAPHICS); if (result != VK_SUCCESS) return; @@ -9362,7 +9375,7 @@ static bool pvr_is_stencil_store_load_needed( hw_render_idx = state->current_sub_cmd->gfx.hw_render_idx; hw_render = - pvr_pass_info_get_hw_render(&state->render_pass_info, hw_render_idx); + pvr_arch_pass_info_get_hw_render(&state->render_pass_info, hw_render_idx); if (hw_render->ds_attach_idx == VK_ATTACHMENT_UNUSED) return false; @@ -9427,8 +9440,9 @@ pvr_cmd_buffer_insert_mid_frag_barrier_event(struct pvr_cmd_buffer *cmd_buffer, /* Submit graphics job to store stencil. */ cmd_buffer->state.current_sub_cmd->gfx.barrier_store = true; - pvr_cmd_buffer_end_sub_cmd(cmd_buffer); - result = pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); + pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); + result = + pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); if (result != VK_SUCCESS) return result; @@ -9439,8 +9453,8 @@ pvr_cmd_buffer_insert_mid_frag_barrier_event(struct pvr_cmd_buffer *cmd_buffer, .wait_at_stage_mask = dst_stage_mask, }, }; - pvr_cmd_buffer_end_sub_cmd(cmd_buffer); - pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_GRAPHICS); + pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); + pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_GRAPHICS); cmd_buffer->state.current_sub_cmd->gfx.dr_info = prev_sub_cmd->gfx.dr_info; prev_sub_cmd->gfx.dr_info = NULL; @@ -9466,7 +9480,8 @@ pvr_cmd_buffer_insert_barrier_event(struct pvr_cmd_buffer *cmd_buffer, { VkResult result; - result = pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); + result = + pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); if (result != VK_SUCCESS) return result; @@ -9478,7 +9493,7 @@ pvr_cmd_buffer_insert_barrier_event(struct pvr_cmd_buffer *cmd_buffer, }, }; - return pvr_cmd_buffer_end_sub_cmd(cmd_buffer); + return pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); } /* This is just enough to handle vkCmdPipelineBarrier(). @@ -9585,9 +9600,9 @@ void PVR_PER_ARCH(CmdPipelineBarrier2)(VkCommandBuffer commandBuffer, */ pvr_compute_generate_idfwdf(cmd_buffer, ¤t_sub_cmd->compute); - pvr_compute_generate_fence(cmd_buffer, - ¤t_sub_cmd->compute, - false); + pvr_arch_compute_generate_fence(cmd_buffer, + ¤t_sub_cmd->compute, + false); break; default: @@ -9633,7 +9648,8 @@ void PVR_PER_ARCH(CmdResetEvent2)(VkCommandBuffer commandBuffer, PVR_CHECK_COMMAND_BUFFER_BUILDING_STATE(cmd_buffer); - result = pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); + result = + pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); if (result != VK_SUCCESS) return; @@ -9645,7 +9661,7 @@ void PVR_PER_ARCH(CmdResetEvent2)(VkCommandBuffer commandBuffer, }, }; - pvr_cmd_buffer_end_sub_cmd(cmd_buffer); + pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); } void PVR_PER_ARCH(CmdSetEvent2)(VkCommandBuffer commandBuffer, @@ -9658,7 +9674,8 @@ void PVR_PER_ARCH(CmdSetEvent2)(VkCommandBuffer commandBuffer, PVR_CHECK_COMMAND_BUFFER_BUILDING_STATE(cmd_buffer); - result = pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); + result = + pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); if (result != VK_SUCCESS) return; @@ -9673,7 +9690,7 @@ void PVR_PER_ARCH(CmdSetEvent2)(VkCommandBuffer commandBuffer, }, }; - pvr_cmd_buffer_end_sub_cmd(cmd_buffer); + pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); } void PVR_PER_ARCH(CmdWaitEvents2)(VkCommandBuffer commandBuffer, @@ -9699,7 +9716,8 @@ void PVR_PER_ARCH(CmdWaitEvents2)(VkCommandBuffer commandBuffer, return; } - result = pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); + result = + pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); if (result != VK_SUCCESS) { vk_free(&cmd_buffer->vk.pool->alloc, events_array); return; @@ -9732,7 +9750,7 @@ void PVR_PER_ARCH(CmdWaitEvents2)(VkCommandBuffer commandBuffer, }, }; - pvr_cmd_buffer_end_sub_cmd(cmd_buffer); + pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); } void PVR_PER_ARCH(CmdWriteTimestamp2)(VkCommandBuffer commandBuffer, @@ -9757,7 +9775,7 @@ VkResult PVR_PER_ARCH(EndCommandBuffer)(VkCommandBuffer commandBuffer) */ util_dynarray_fini(&state->query_indices); - result = pvr_cmd_buffer_end_sub_cmd(cmd_buffer); + result = pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); if (result != VK_SUCCESS) pvr_cmd_buffer_set_error_unwarned(cmd_buffer, result); diff --git a/src/imagination/vulkan/pvr_arch_cmd_query.c b/src/imagination/vulkan/pvr_arch_cmd_query.c index 3b4dd56a8ea..334ea1014c9 100644 --- a/src/imagination/vulkan/pvr_arch_cmd_query.c +++ b/src/imagination/vulkan/pvr_arch_cmd_query.c @@ -31,7 +31,8 @@ void PVR_PER_ARCH(CmdResetQueryPool)(VkCommandBuffer commandBuffer, /* make the query-reset program wait for previous geom/frag, * to not overwrite them */ - result = pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); + result = + pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); if (result != VK_SUCCESS) return; @@ -44,14 +45,15 @@ void PVR_PER_ARCH(CmdResetQueryPool)(VkCommandBuffer commandBuffer, }; /* add the query-program itself */ - result = pvr_add_query_program(cmd_buffer, &query_info); + result = pvr_arch_add_query_program(cmd_buffer, &query_info); if (result != VK_SUCCESS) return; /* make future geom/frag wait for the query-reset program to * reset the counters to 0 */ - result = pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); + result = + pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); if (result != VK_SUCCESS) return; @@ -89,7 +91,8 @@ void PVR_PER_ARCH(CmdCopyQueryPoolResults)(VkCommandBuffer commandBuffer, query_info.copy_query_results.stride = stride; query_info.copy_query_results.flags = flags; - result = pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); + result = + pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); if (result != VK_SUCCESS) return; @@ -114,13 +117,14 @@ void PVR_PER_ARCH(CmdCopyQueryPoolResults)(VkCommandBuffer commandBuffer, }, }; - result = pvr_cmd_buffer_end_sub_cmd(cmd_buffer); + result = pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); if (result != VK_SUCCESS) return; - pvr_add_query_program(cmd_buffer, &query_info); + pvr_arch_add_query_program(cmd_buffer, &query_info); - result = pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); + result = + pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT); if (result != VK_SUCCESS) return; @@ -141,7 +145,7 @@ pvr_cmd_buffer_state_get_view_count(const struct pvr_cmd_buffer_state *state) const struct pvr_sub_cmd_gfx *gfx_sub_cmd = &state->current_sub_cmd->gfx; const uint32_t hw_render_idx = gfx_sub_cmd->hw_render_idx; const struct pvr_renderpass_hwsetup_render *hw_render = - pvr_pass_info_get_hw_render(render_pass_info, hw_render_idx); + pvr_arch_pass_info_get_hw_render(render_pass_info, hw_render_idx); const uint32_t view_count = util_bitcount(hw_render->view_mask); assert(state->current_sub_cmd->type == PVR_SUB_CMD_TYPE_GRAPHICS); @@ -177,12 +181,12 @@ void PVR_PER_ARCH(CmdBeginQuery)(VkCommandBuffer commandBuffer, /* Kick render. */ state->current_sub_cmd->gfx.barrier_store = true; - result = pvr_cmd_buffer_end_sub_cmd(cmd_buffer); + result = pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); if (result != VK_SUCCESS) return; - result = - pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_GRAPHICS); + result = pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, + PVR_SUB_CMD_TYPE_GRAPHICS); if (result != VK_SUCCESS) return; diff --git a/src/imagination/vulkan/pvr_arch_csb.c b/src/imagination/vulkan/pvr_arch_csb.c index 10502b3de9a..66edec7622b 100644 --- a/src/imagination/vulkan/pvr_arch_csb.c +++ b/src/imagination/vulkan/pvr_arch_csb.c @@ -90,7 +90,7 @@ pvr_csb_emit_link_unmarked(struct pvr_csb *csb, pvr_dev_addr_t addr, bool ret) * current buffer, a few bytes including guard padding size are reserved at the * end, every time a buffer is created. Every time we allocate a new buffer we * fix the current buffer in use to emit the stream link dwords. This makes sure - * that when #pvr_csb_alloc_dwords() is called from #pvr_csb_emit() to add + * that when #pvr_arch_csb_alloc_dwords() is called from #pvr_csb_emit() to add * STREAM_LINK0 and STREAM_LINK1, it succeeds without trying to allocate new * pages. * @@ -188,7 +188,7 @@ static bool pvr_csb_buffer_extend(struct pvr_csb *csb) * \param[in] num_dwords Number of dwords to allocate. * \return Valid host virtual address or NULL otherwise. */ -void *PVR_PER_ARCH(csb_alloc_dwords)(struct pvr_csb *csb, uint32_t num_dwords) +void *pvr_arch_csb_alloc_dwords(struct pvr_csb *csb, uint32_t num_dwords) { const uint32_t required_space = PVR_DW_TO_BYTES(num_dwords); void *p; @@ -241,8 +241,7 @@ void *PVR_PER_ARCH(csb_alloc_dwords)(struct pvr_csb *csb, uint32_t num_dwords) * \param[in,out] csb_dst Destination control Stream Builder object. * \param[in] csb_src Source Control Stream Builder object. */ -VkResult PVR_PER_ARCH(csb_copy)(struct pvr_csb *csb_dst, - struct pvr_csb *csb_src) +VkResult pvr_arch_csb_copy(struct pvr_csb *csb_dst, struct pvr_csb *csb_src) { const uint8_t stream_reserved_space = PVR_DW_TO_BYTES(pvr_cmd_length(VDMCTRL_STREAM_LINK0) + @@ -270,7 +269,7 @@ VkResult PVR_PER_ARCH(csb_copy)(struct pvr_csb *csb_dst, assert(!"CSB source buffer too large to do a full copy"); } - destination = PVR_PER_ARCH(csb_alloc_dwords)(csb_dst, size); + destination = pvr_arch_csb_alloc_dwords(csb_dst, size); if (!destination) { assert(csb_dst->status != VK_SUCCESS); return csb_dst->status; @@ -290,9 +289,7 @@ VkResult PVR_PER_ARCH(csb_copy)(struct pvr_csb *csb_dst, * \param[in] ret Selects whether the sub control stream will return or * terminate. */ -void PVR_PER_ARCH(csb_emit_link)(struct pvr_csb *csb, - pvr_dev_addr_t addr, - bool ret) +void pvr_arch_csb_emit_link(struct pvr_csb *csb, pvr_dev_addr_t addr, bool ret) { pvr_csb_set_relocation_mark(csb); pvr_csb_emit_link_unmarked(csb, addr, ret); @@ -307,7 +304,7 @@ void PVR_PER_ARCH(csb_emit_link)(struct pvr_csb *csb, * \param[in] csb Control Stream Builder object to add VDMCTRL_STREAM_RETURN to. * \return VK_SUCCESS on success, or error code otherwise. */ -VkResult PVR_PER_ARCH(csb_emit_return)(struct pvr_csb *csb) +VkResult pvr_arch_csb_emit_return(struct pvr_csb *csb) { /* STREAM_RETURN is only supported by graphics control stream. */ assert(csb->stream_type == PVR_CMD_STREAM_TYPE_GRAPHICS || @@ -330,7 +327,7 @@ VkResult PVR_PER_ARCH(csb_emit_return)(struct pvr_csb *csb) * \param[in] csb Control Stream Builder object to terminate. * \return VK_SUCCESS on success, or error code otherwise. */ -VkResult PVR_PER_ARCH(csb_emit_terminate)(struct pvr_csb *csb) +VkResult pvr_arch_csb_emit_terminate(struct pvr_csb *csb) { pvr_csb_set_relocation_mark(csb); diff --git a/src/imagination/vulkan/pvr_arch_device.c b/src/imagination/vulkan/pvr_arch_device.c index 40fd8dd8f63..fa0ce5048e2 100644 --- a/src/imagination/vulkan/pvr_arch_device.c +++ b/src/imagination/vulkan/pvr_arch_device.c @@ -86,7 +86,7 @@ static uint32_t pvr_get_simultaneous_num_allocs( return 4; } -uint32_t PVR_PER_ARCH(calc_fscommon_size_and_tiles_in_flight)( +uint32_t pvr_arch_calc_fscommon_size_and_tiles_in_flight( const struct pvr_device_info *dev_info, const struct pvr_device_runtime_info *dev_runtime_info, uint32_t fs_common_size, @@ -148,7 +148,7 @@ uint32_t PVR_PER_ARCH(calc_fscommon_size_and_tiles_in_flight)( return MIN2(num_tile_in_flight, max_tiles_in_flight); } -VkResult PVR_PER_ARCH(pds_compute_shader_create_and_upload)( +VkResult pvr_arch_pds_compute_shader_create_and_upload( struct pvr_device *device, struct pvr_pds_compute_shader_program *program, struct pvr_pds_upload *const pds_upload_out) @@ -224,7 +224,7 @@ static VkResult pvr_device_init_compute_fence_program(struct pvr_device *device) program.fence = true; program.clear_pds_barrier = true; - return pvr_pds_compute_shader_create_and_upload( + return pvr_arch_pds_compute_shader_create_and_upload( device, &program, &device->pds_compute_fence_program); @@ -237,7 +237,7 @@ static VkResult pvr_device_init_compute_empty_program(struct pvr_device *device) pvr_pds_compute_shader_program_init(&program); program.clear_pds_barrier = true; - return pvr_pds_compute_shader_create_and_upload( + return pvr_arch_pds_compute_shader_create_and_upload( device, &program, &device->pds_compute_empty_program); @@ -444,7 +444,7 @@ static VkResult pvr_device_init_compute_idfwdf_state(struct pvr_device *device) .addr = device->idfwdf_state.store_bo->vma->dev_addr, }; - result = pvr_pack_tex_state(device, &tex_info, &image_state); + result = pvr_arch_pack_tex_state(device, &tex_info, &image_state); if (result != VK_SUCCESS) goto err_free_shareds_buffer; @@ -809,7 +809,7 @@ VkResult PVR_PER_ARCH(create_device)(struct pvr_physical_device *pdevice, if (result != VK_SUCCESS) goto err_pvr_free_compute_empty; - result = pvr_device_create_compute_query_programs(device); + result = pvr_arch_device_create_compute_query_programs(device); if (result != VK_SUCCESS) goto err_pvr_free_view_index; @@ -821,13 +821,13 @@ VkResult PVR_PER_ARCH(create_device)(struct pvr_physical_device *pdevice, if (result != VK_SUCCESS) goto err_pvr_finish_compute_idfwdf; - result = pvr_device_init_spm_load_state(device); + result = pvr_arch_device_init_spm_load_state(device); if (result != VK_SUCCESS) goto err_pvr_finish_graphics_static_clear_state; pvr_device_init_tile_buffer_state(device); - result = pvr_queues_create(device, pCreateInfo); + result = pvr_arch_queues_create(device, pCreateInfo); if (result != VK_SUCCESS) goto err_pvr_finish_tile_buffer_state; @@ -839,7 +839,7 @@ VkResult PVR_PER_ARCH(create_device)(struct pvr_physical_device *pdevice, if (result != VK_SUCCESS) goto err_pvr_spm_finish_scratch_buffer_store; - result = pvr_border_color_table_init(device); + result = pvr_arch_border_color_table_init(device); if (result != VK_SUCCESS) goto err_pvr_robustness_buffer_finish; @@ -867,11 +867,11 @@ err_pvr_robustness_buffer_finish: err_pvr_spm_finish_scratch_buffer_store: pvr_spm_finish_scratch_buffer_store(device); - pvr_queues_destroy(device); + pvr_arch_queues_destroy(device); err_pvr_finish_tile_buffer_state: pvr_device_finish_tile_buffer_state(device); - pvr_device_finish_spm_load_state(device); + pvr_arch_device_finish_spm_load_state(device); err_pvr_finish_graphics_static_clear_state: pvr_device_finish_graphics_static_clear_state(device); @@ -880,7 +880,7 @@ err_pvr_finish_compute_idfwdf: pvr_device_finish_compute_idfwdf_state(device); err_pvr_destroy_compute_query_programs: - pvr_device_destroy_compute_query_programs(device); + pvr_arch_device_destroy_compute_query_programs(device); err_pvr_free_view_index: for (uint32_t u = 0; u < PVR_MAX_MULTIVIEW; ++u) @@ -942,15 +942,15 @@ void PVR_PER_ARCH(destroy_device)(struct pvr_device *device, simple_mtx_unlock(&device->rs_mtx); simple_mtx_destroy(&device->rs_mtx); - pvr_border_color_table_finish(device); + pvr_arch_border_color_table_finish(device); pvr_robustness_buffer_finish(device); pvr_spm_finish_scratch_buffer_store(device); - pvr_queues_destroy(device); + pvr_arch_queues_destroy(device); pvr_device_finish_tile_buffer_state(device); - pvr_device_finish_spm_load_state(device); + pvr_arch_device_finish_spm_load_state(device); pvr_device_finish_graphics_static_clear_state(device); pvr_device_finish_compute_idfwdf_state(device); - pvr_device_destroy_compute_query_programs(device); + pvr_arch_device_destroy_compute_query_programs(device); pvr_bo_suballoc_free(device->pds_compute_empty_program.pvr_bo); for (uint32_t u = 0; u < PVR_MAX_MULTIVIEW; ++u) diff --git a/src/imagination/vulkan/pvr_arch_formats.c b/src/imagination/vulkan/pvr_arch_formats.c index d767a877280..5f6b96cd81d 100644 --- a/src/imagination/vulkan/pvr_arch_formats.c +++ b/src/imagination/vulkan/pvr_arch_formats.c @@ -255,15 +255,14 @@ static const struct pvr_pbe_format pvr_pbe_format_table[] = { #undef FORMAT #undef FORMAT_DEPTH_STENCIL -const struct pvr_format *PVR_PER_ARCH(get_format_table)(unsigned *num_formats) +const struct pvr_format *pvr_arch_get_format_table(unsigned *num_formats) { assert(num_formats != NULL); *num_formats = ARRAY_SIZE(pvr_format_table); return pvr_format_table; } -static inline const struct pvr_format * -PVR_PER_ARCH(get_format)(VkFormat vk_format) +static inline const struct pvr_format *get_format(VkFormat vk_format) { if (vk_format < ARRAY_SIZE(pvr_format_table) && pvr_format_table[vk_format].bind != 0) { @@ -284,9 +283,9 @@ pvr_get_pbe_format(VkFormat vk_format) return &pvr_pbe_format_table[vk_format]; } -uint32_t PVR_PER_ARCH(get_tex_format)(VkFormat vk_format) +uint32_t pvr_arch_get_tex_format(VkFormat vk_format) { - const struct pvr_format *pvr_format = PVR_PER_ARCH(get_format)(vk_format); + const struct pvr_format *pvr_format = get_format(vk_format); if (pvr_format) { return pvr_format->tex_format; } @@ -294,10 +293,10 @@ uint32_t PVR_PER_ARCH(get_tex_format)(VkFormat vk_format) return ROGUE_TEXSTATE_FORMAT_INVALID; } -uint32_t PVR_PER_ARCH(get_tex_format_aspect)(VkFormat vk_format, - VkImageAspectFlags aspect_mask) +uint32_t pvr_arch_get_tex_format_aspect(VkFormat vk_format, + VkImageAspectFlags aspect_mask) { - const struct pvr_format *pvr_format = PVR_PER_ARCH(get_format)(vk_format); + const struct pvr_format *pvr_format = get_format(vk_format); if (pvr_format) { if (aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) return pvr_format->depth_tex_format; @@ -310,7 +309,7 @@ uint32_t PVR_PER_ARCH(get_tex_format_aspect)(VkFormat vk_format, return ROGUE_TEXSTATE_FORMAT_INVALID; } -uint32_t pvr_get_pbe_packmode(VkFormat vk_format) +uint32_t pvr_arch_get_pbe_packmode(VkFormat vk_format) { if (vk_format_is_block_compressed(vk_format)) return ROGUE_PBESTATE_PACKMODE_INVALID; @@ -318,7 +317,7 @@ uint32_t pvr_get_pbe_packmode(VkFormat vk_format) return pvr_get_pbe_format(vk_format)->packmode; } -uint32_t pvr_get_pbe_accum_format(VkFormat vk_format) +uint32_t pvr_arch_get_pbe_accum_format(VkFormat vk_format) { if (vk_format_is_block_compressed(vk_format)) return PVR_PBE_ACCUM_FORMAT_INVALID; @@ -326,9 +325,8 @@ uint32_t pvr_get_pbe_accum_format(VkFormat vk_format) return pvr_get_pbe_format(vk_format)->accum_format; } -bool PVR_PER_ARCH(format_is_pbe_downscalable)( - const struct pvr_device_info *dev_info, - VkFormat vk_format) +bool pvr_arch_format_is_pbe_downscalable(const struct pvr_device_info *dev_info, + VkFormat vk_format) { if (vk_format_is_int(vk_format)) { /* PBE downscale behavior for integer formats does not match Vulkan @@ -338,7 +336,7 @@ bool PVR_PER_ARCH(format_is_pbe_downscalable)( return false; } - switch (PVR_PER_ARCH(get_pbe_packmode)(vk_format)) { + switch (pvr_arch_get_pbe_packmode(vk_format)) { default: return true; case ROGUE_PBESTATE_PACKMODE_F16: diff --git a/src/imagination/vulkan/pvr_arch_framebuffer.c b/src/imagination/vulkan/pvr_arch_framebuffer.c index 9bc3cb8f4bf..8262afe7436 100644 --- a/src/imagination/vulkan/pvr_arch_framebuffer.c +++ b/src/imagination/vulkan/pvr_arch_framebuffer.c @@ -91,12 +91,12 @@ err_mutex_destroy: return false; } -VkResult PVR_PER_ARCH(render_state_setup)( - struct pvr_device *device, - const VkAllocationCallbacks *pAllocator, - struct pvr_render_state *rstate, - uint32_t render_count, - const struct pvr_renderpass_hwsetup_render *renders) +VkResult +pvr_arch_render_state_setup(struct pvr_device *device, + const VkAllocationCallbacks *pAllocator, + struct pvr_render_state *rstate, + uint32_t render_count, + const struct pvr_renderpass_hwsetup_render *renders) { struct pvr_spm_bgobj_state *spm_bgobj_state_per_render; struct pvr_spm_eot_state *spm_eot_state_per_render; @@ -147,17 +147,17 @@ VkResult PVR_PER_ARCH(render_state_setup)( goto err_release_scratch_buffer; for (uint32_t i = 0; i < render_count; i++) { - result = pvr_spm_init_eot_state(device, - &spm_eot_state_per_render[i], - rstate, - &renders[i]); + result = pvr_arch_spm_init_eot_state(device, + &spm_eot_state_per_render[i], + rstate, + &renders[i]); if (result != VK_SUCCESS) goto err_finish_eot_state; - result = pvr_spm_init_bgobj_state(device, - &spm_bgobj_state_per_render[i], - rstate, - &renders[i]); + result = pvr_arch_spm_init_bgobj_state(device, + &spm_bgobj_state_per_render[i], + rstate, + &renders[i]); if (result != VK_SUCCESS) goto err_finish_bgobj_state; @@ -202,7 +202,7 @@ pvr_render_pass_get_scratch_buffer_size(struct pvr_device *device, const struct pvr_render_pass *pass, const struct pvr_render_state *rstate) { - return pvr_spm_scratch_buffer_calc_required_size( + return pvr_arch_spm_scratch_buffer_calc_required_size( pass->hw_setup->renders, pass->hw_setup->render_count, pass->max_sample_count, @@ -267,11 +267,11 @@ PVR_PER_ARCH(CreateFramebuffer)(VkDevice _device, rstate->scratch_buffer_size = pvr_render_pass_get_scratch_buffer_size(device, pass, rstate); - result = pvr_render_state_setup(device, - pAllocator, - rstate, - pass->hw_setup->render_count, - pass->hw_setup->renders); + result = pvr_arch_render_state_setup(device, + pAllocator, + rstate, + pass->hw_setup->render_count, + pass->hw_setup->renders); if (result != VK_SUCCESS) goto err_free_framebuffer; diff --git a/src/imagination/vulkan/pvr_arch_hw_pass.c b/src/imagination/vulkan/pvr_arch_hw_pass.c index 96b3b49918e..6e2b3e5816b 100644 --- a/src/imagination/vulkan/pvr_arch_hw_pass.c +++ b/src/imagination/vulkan/pvr_arch_hw_pass.c @@ -2454,9 +2454,8 @@ pvr_count_uses_in_color_output_list(struct pvr_render_subpass *subpass, *resolve_output_count_out = resolve_count; } -void PVR_PER_ARCH(destroy_renderpass_hwsetup)( - const VkAllocationCallbacks *alloc, - struct pvr_renderpass_hwsetup *hw_setup) +void pvr_arch_destroy_renderpass_hwsetup(const VkAllocationCallbacks *alloc, + struct pvr_renderpass_hwsetup *hw_setup) { for (uint32_t i = 0U; i < hw_setup->render_count; i++) { struct pvr_renderpass_hwsetup_render *hw_render = &hw_setup->renders[i]; @@ -2482,7 +2481,7 @@ void PVR_PER_ARCH(destroy_renderpass_hwsetup)( vk_free(alloc, hw_setup); } -VkResult PVR_PER_ARCH(create_renderpass_hwsetup)( +VkResult pvr_arch_create_renderpass_hwsetup( struct pvr_device *device, const VkAllocationCallbacks *alloc, struct pvr_render_pass *pass, @@ -2552,7 +2551,7 @@ VkResult PVR_PER_ARCH(create_renderpass_hwsetup)( const uint32_t part_bits = 0; if (vk_format_is_color(format) && - pvr_get_pbe_accum_format(attachment->vk_format) == + pvr_arch_get_pbe_accum_format(attachment->vk_format) == PVR_PBE_ACCUM_FORMAT_INVALID) { /* The VkFormat is not supported as a color attachment so `0`. * Vulkan doesn't seems to restrict vkCreateRenderPass() to supported @@ -2720,7 +2719,7 @@ end_create_renderpass_hwsetup: pvr_free_render(ctx); if (hw_setup) { - PVR_PER_ARCH(destroy_renderpass_hwsetup)(alloc, hw_setup); + pvr_arch_destroy_renderpass_hwsetup(alloc, hw_setup); hw_setup = NULL; } } diff --git a/src/imagination/vulkan/pvr_arch_image.c b/src/imagination/vulkan/pvr_arch_image.c index 9645989b551..3283ec9b586 100644 --- a/src/imagination/vulkan/pvr_arch_image.c +++ b/src/imagination/vulkan/pvr_arch_image.c @@ -122,9 +122,9 @@ VkResult PVR_PER_ARCH(CreateImageView)(VkDevice _device, util_format_compose_swizzles(format_swizzle, input_swizzle, info.swizzle); - result = pvr_pack_tex_state(device, - &info, - &iview->image_state[info.tex_state_type]); + result = pvr_arch_pack_tex_state(device, + &info, + &iview->image_state[info.tex_state_type]); if (result != VK_SUCCESS) goto err_vk_image_view_destroy; @@ -134,9 +134,10 @@ VkResult PVR_PER_ARCH(CreateImageView)(VkDevice _device, if (info.is_cube && image->vk.usage & VK_IMAGE_USAGE_STORAGE_BIT) { info.tex_state_type = PVR_TEXTURE_STATE_STORAGE; - result = pvr_pack_tex_state(device, - &info, - &iview->image_state[info.tex_state_type]); + result = + pvr_arch_pack_tex_state(device, + &info, + &iview->image_state[info.tex_state_type]); if (result != VK_SUCCESS) goto err_vk_image_view_destroy; } @@ -165,9 +166,10 @@ VkResult PVR_PER_ARCH(CreateImageView)(VkDevice _device, info.type = iview->vk.view_type; } - result = pvr_pack_tex_state(device, - &info, - &iview->image_state[info.tex_state_type]); + result = + pvr_arch_pack_tex_state(device, + &info, + &iview->image_state[info.tex_state_type]); if (result != VK_SUCCESS) goto err_vk_image_view_destroy; } @@ -251,7 +253,7 @@ PVR_PER_ARCH(CreateBufferView)(VkDevice _device, format_swizzle = pvr_get_format_swizzle(info.format); memcpy(info.swizzle, format_swizzle, sizeof(info.swizzle)); - result = pvr_pack_tex_state(device, &info, &bview->image_state); + result = pvr_arch_pack_tex_state(device, &info, &bview->image_state); if (result != VK_SUCCESS) goto err_vk_buffer_view_destroy; diff --git a/src/imagination/vulkan/pvr_arch_job_common.c b/src/imagination/vulkan/pvr_arch_job_common.c index bd66dfbf16c..f420deb6969 100644 --- a/src/imagination/vulkan/pvr_arch_job_common.c +++ b/src/imagination/vulkan/pvr_arch_job_common.c @@ -36,12 +36,11 @@ #include "vk_format.h" #include "vk_object.h" -void PVR_PER_ARCH(pbe_get_src_format_and_gamma)( - VkFormat vk_format, - enum pvr_pbe_gamma default_gamma, - bool with_packed_usc_channel, - uint32_t *const src_format_out, - enum pvr_pbe_gamma *const gamma_out) +void pvr_arch_pbe_get_src_format_and_gamma(VkFormat vk_format, + enum pvr_pbe_gamma default_gamma, + bool with_packed_usc_channel, + uint32_t *const src_format_out, + enum pvr_pbe_gamma *const gamma_out) { const struct util_format_description *desc = vk_format_description(vk_format); @@ -80,7 +79,7 @@ void PVR_PER_ARCH(pbe_get_src_format_and_gamma)( } } -void PVR_PER_ARCH(pbe_pack_state)( +void pvr_arch_pbe_pack_state( const struct pvr_device_info *dev_info, const struct pvr_pbe_surf_params *surface_params, const struct pvr_pbe_render_params *render_params, @@ -302,7 +301,7 @@ void PVR_PER_ARCH(pbe_pack_state)( * total_tiles_in_flight so that CR_ISP_CTL can be fully packed in * pvr_render_job_ws_fragment_state_init(). */ -void PVR_PER_ARCH(setup_tiles_in_flight)( +void pvr_arch_setup_tiles_in_flight( const struct pvr_device_info *dev_info, const struct pvr_device_runtime_info *dev_runtime_info, uint32_t msaa_mode, diff --git a/src/imagination/vulkan/pvr_arch_job_compute.c b/src/imagination/vulkan/pvr_arch_job_compute.c index fc9379a028d..c976eedd982 100644 --- a/src/imagination/vulkan/pvr_arch_job_compute.c +++ b/src/imagination/vulkan/pvr_arch_job_compute.c @@ -212,10 +212,10 @@ static void pvr_compute_job_ws_submit_info_init( pvr_submit_info_flags_init(dev_info, sub_cmd, &submit_info->flags); } -VkResult PVR_PER_ARCH(compute_job_submit)(struct pvr_compute_ctx *ctx, - struct pvr_sub_cmd_compute *sub_cmd, - struct vk_sync *wait, - struct vk_sync *signal_sync) +VkResult pvr_arch_compute_job_submit(struct pvr_compute_ctx *ctx, + struct pvr_sub_cmd_compute *sub_cmd, + struct vk_sync *wait, + struct vk_sync *signal_sync) { struct pvr_winsys_compute_submit_info submit_info; struct pvr_device *device = ctx->device; diff --git a/src/imagination/vulkan/pvr_arch_job_context.c b/src/imagination/vulkan/pvr_arch_job_context.c index d1a14904fe2..c9e9770ed55 100644 --- a/src/imagination/vulkan/pvr_arch_job_context.c +++ b/src/imagination/vulkan/pvr_arch_job_context.c @@ -853,9 +853,9 @@ static void pvr_render_ctx_ws_create_info_init( pvr_render_ctx_ws_static_state_init(ctx, &create_info->static_state); } -VkResult PVR_PER_ARCH(render_ctx_create)(struct pvr_device *device, - enum pvr_winsys_ctx_priority priority, - struct pvr_render_ctx **const ctx_out) +VkResult pvr_arch_render_ctx_create(struct pvr_device *device, + enum pvr_winsys_ctx_priority priority, + struct pvr_render_ctx **const ctx_out) { const uint64_t vdm_callstack_size = sizeof(uint64_t) * PVR_VDM_CALLSTACK_MAX_DEPTH; @@ -920,7 +920,7 @@ err_vk_free_ctx: return result; } -void PVR_PER_ARCH(render_ctx_destroy)(struct pvr_render_ctx *ctx) +void pvr_arch_render_ctx_destroy(struct pvr_render_ctx *ctx) { struct pvr_device *device = ctx->device; @@ -1103,10 +1103,9 @@ static void pvr_compute_ctx_ws_create_info_init( &create_info->static_state); } -VkResult -PVR_PER_ARCH(compute_ctx_create)(struct pvr_device *const device, - enum pvr_winsys_ctx_priority priority, - struct pvr_compute_ctx **const ctx_out) +VkResult pvr_arch_compute_ctx_create(struct pvr_device *const device, + enum pvr_winsys_ctx_priority priority, + struct pvr_compute_ctx **const ctx_out) { struct pvr_winsys_compute_ctx_create_info create_info; struct pvr_compute_ctx *ctx; @@ -1190,7 +1189,7 @@ err_free_ctx: return result; } -void PVR_PER_ARCH(compute_ctx_destroy)(struct pvr_compute_ctx *const ctx) +void pvr_arch_compute_ctx_destroy(struct pvr_compute_ctx *const ctx) { struct pvr_device *device = ctx->device; @@ -1305,10 +1304,9 @@ static void pvr_transfer_ctx_shaders_fini(struct pvr_device *device, pvr_transfer_frag_store_fini(device, &ctx->frag_store); } -VkResult -PVR_PER_ARCH(transfer_ctx_create)(struct pvr_device *const device, - enum pvr_winsys_ctx_priority priority, - struct pvr_transfer_ctx **const ctx_out) +VkResult pvr_arch_transfer_ctx_create(struct pvr_device *const device, + enum pvr_winsys_ctx_priority priority, + struct pvr_transfer_ctx **const ctx_out) { struct pvr_winsys_transfer_ctx_create_info create_info; struct pvr_transfer_ctx *ctx; @@ -1345,7 +1343,7 @@ PVR_PER_ARCH(transfer_ctx_create)(struct pvr_device *const device, if (i == 0U && j == 0U) continue; - result = pvr_pds_unitex_state_program_create_and_upload( + result = pvr_arch_pds_unitex_state_program_create_and_upload( device, NULL, i, @@ -1385,7 +1383,7 @@ err_free_ctx: return result; } -void PVR_PER_ARCH(transfer_ctx_destroy)(struct pvr_transfer_ctx *const ctx) +void pvr_arch_transfer_ctx_destroy(struct pvr_transfer_ctx *const ctx) { struct pvr_device *device = ctx->device; diff --git a/src/imagination/vulkan/pvr_arch_job_render.c b/src/imagination/vulkan/pvr_arch_job_render.c index 24c4cb03ef0..f1d0f20d2cf 100644 --- a/src/imagination/vulkan/pvr_arch_job_render.c +++ b/src/imagination/vulkan/pvr_arch_job_render.c @@ -108,11 +108,11 @@ static inline void pvr_get_samples_in_xy(uint32_t samples, } } -void PVR_PER_ARCH(rt_mtile_info_init)(const struct pvr_device_info *dev_info, - struct pvr_rt_mtile_info *info, - uint32_t width, - uint32_t height, - uint32_t samples) +void pvr_arch_rt_mtile_info_init(const struct pvr_device_info *dev_info, + struct pvr_rt_mtile_info *info, + uint32_t width, + uint32_t height, + uint32_t samples) { uint32_t samples_in_x; uint32_t samples_in_y; @@ -611,7 +611,7 @@ static void pvr_rt_dataset_ws_create_info_init( pvr_rt_get_isp_region_size(device, mtile_info); } -VkResult PVR_PER_ARCH(render_target_dataset_create)( +VkResult pvr_arch_render_target_dataset_create( struct pvr_device *device, uint32_t width, uint32_t height, @@ -632,7 +632,7 @@ VkResult PVR_PER_ARCH(render_target_dataset_create)( assert(height <= rogue_get_render_size_max_y(dev_info)); assert(layers > 0 && layers <= PVR_MAX_FRAMEBUFFER_LAYERS); - pvr_rt_mtile_info_init(dev_info, &mtile_info, width, height, samples); + pvr_arch_rt_mtile_info_init(dev_info, &mtile_info, width, height, samples); rt_dataset = vk_zalloc(&device->vk.alloc, sizeof(*rt_dataset), @@ -925,14 +925,14 @@ static void pvr_frag_state_stream_init(struct pvr_render_ctx *ctx, stream_ptr += pvr_cmd_length(KMD_STREAM_HDR); /* FIXME: pass in the number of samples rather than isp_aa_mode? */ - pvr_setup_tiles_in_flight(dev_info, - dev_runtime_info, - isp_aa_mode, - job->pixel_output_width, - false, - job->max_tiles_in_flight, - &isp_ctl, - &pixel_ctl); + pvr_arch_setup_tiles_in_flight(dev_info, + dev_runtime_info, + isp_aa_mode, + job->pixel_output_width, + false, + job->max_tiles_in_flight, + &isp_ctl, + &pixel_ctl); pvr_csb_pack ((uint64_t *)stream_ptr, CR_ISP_SCISSOR_BASE, value) { value.addr = job->scissor_table_addr; @@ -1136,11 +1136,11 @@ static void pvr_frag_state_stream_init(struct pvr_render_ctx *ctx, } stream_ptr += pvr_cmd_length(CR_ISP_AA); - pvr_rt_mtile_info_init(dev_info, - &tiling_info, - rt_dataset->width, - rt_dataset->height, - rt_dataset->samples); + pvr_arch_rt_mtile_info_init(dev_info, + &tiling_info, + rt_dataset->width, + rt_dataset->height, + rt_dataset->samples); pvr_csb_pack (stream_ptr, CR_ISP_CTL, value) { value.sample_pos = true; value.process_empty_tiles = job->process_empty_tiles; @@ -1452,12 +1452,12 @@ static void pvr_render_job_ws_submit_info_init( &submit_info->fragment_pr); } -VkResult PVR_PER_ARCH(render_job_submit)(struct pvr_render_ctx *ctx, - struct pvr_render_job *job, - struct vk_sync *wait_geom, - struct vk_sync *wait_frag, - struct vk_sync *signal_sync_geom, - struct vk_sync *signal_sync_frag) +VkResult pvr_arch_render_job_submit(struct pvr_render_ctx *ctx, + struct pvr_render_job *job, + struct vk_sync *wait_geom, + struct vk_sync *wait_frag, + struct vk_sync *signal_sync_geom, + struct vk_sync *signal_sync_frag) { struct pvr_rt_dataset *rt_dataset = job->view_state.rt_datasets[job->view_state.view_index]; diff --git a/src/imagination/vulkan/pvr_arch_job_transfer.c b/src/imagination/vulkan/pvr_arch_job_transfer.c index d2aa3569c2b..314ed9d1e7c 100644 --- a/src/imagination/vulkan/pvr_arch_job_transfer.c +++ b/src/imagination/vulkan/pvr_arch_job_transfer.c @@ -816,14 +816,14 @@ pvr_pbe_setup_codegen_defaults(const struct pvr_device_info *dev_info, swizzle = pvr_get_format_swizzle(format); memcpy(surface_params->swizzle, swizzle, sizeof(surface_params->swizzle)); - pvr_pbe_get_src_format_and_gamma(format, - PVR_PBE_GAMMA_NONE, - false, - &surface_params->source_format, - &surface_params->gamma); + pvr_arch_pbe_get_src_format_and_gamma(format, + PVR_PBE_GAMMA_NONE, + false, + &surface_params->source_format, + &surface_params->gamma); surface_params->is_normalized = pvr_vk_format_is_fully_normalized(format); - surface_params->pbe_packmode = pvr_get_pbe_packmode(format); + surface_params->pbe_packmode = pvr_arch_get_pbe_packmode(format); surface_params->nr_components = vk_format_get_nr_components(format); result = pvr_mem_layout_spec(dst, @@ -1206,16 +1206,16 @@ static VkResult pvr_pbe_setup_emit(const struct pvr_transfer_cmd *transfer_cmd, staging_buffer + program.data_size, dev_info); - result = - pvr_cmd_buffer_upload_pds(transfer_cmd->cmd_buffer, - staging_buffer, - program.data_size, - ROGUE_CR_EVENT_PIXEL_PDS_DATA_ADDR_ALIGNMENT, - staging_buffer + program.data_size, - program.code_size, - ROGUE_CR_EVENT_PIXEL_PDS_CODE_ADDR_ALIGNMENT, - ROGUE_CR_EVENT_PIXEL_PDS_DATA_ADDR_ALIGNMENT, - &pds_upload); + result = pvr_arch_cmd_buffer_upload_pds( + transfer_cmd->cmd_buffer, + staging_buffer, + program.data_size, + ROGUE_CR_EVENT_PIXEL_PDS_DATA_ADDR_ALIGNMENT, + staging_buffer + program.data_size, + program.code_size, + ROGUE_CR_EVENT_PIXEL_PDS_CODE_ADDR_ALIGNMENT, + ROGUE_CR_EVENT_PIXEL_PDS_DATA_ADDR_ALIGNMENT, + &pds_upload); vk_free(&device->vk.alloc, staging_buffer); if (result != VK_SUCCESS) return result; @@ -1300,11 +1300,11 @@ static VkResult pvr_pbe_setup(const struct pvr_transfer_cmd *transfer_cmd, pvr_pbe_setup_swizzle(transfer_cmd, state, &surf_params); - pvr_pbe_pack_state(dev_info, - &surf_params, - &render_params, - pbe_words, - pbe_regs); + pvr_arch_pbe_pack_state(dev_info, + &surf_params, + &render_params, + pbe_words, + pbe_regs); if (PVR_HAS_ERN(dev_info, 42064)) { uint64_t temp_reg; @@ -1438,14 +1438,14 @@ static VkResult pvr_isp_tiles(const struct pvr_device *device, reg.y = origin_y; } - pvr_setup_tiles_in_flight(dev_info, - dev_runtime_info, - pvr_cr_isp_aa_mode_type(samples), - state->usc_pixel_width, - state->pair_tiles != PVR_PAIRED_TILES_NONE, - 0, - &isp_tiles_in_flight, - &state->regs.usc_pixel_output_ctrl); + pvr_arch_setup_tiles_in_flight(dev_info, + dev_runtime_info, + pvr_cr_isp_aa_mode_type(samples), + state->usc_pixel_width, + state->pair_tiles != PVR_PAIRED_TILES_NONE, + 0, + &isp_tiles_in_flight, + &state->regs.usc_pixel_output_ctrl); pvr_csb_pack (&state->regs.isp_ctl, CR_ISP_CTL, reg) { reg.process_empty_tiles = true; @@ -1737,7 +1737,7 @@ static inline VkResult pvr_image_state_set_codegen_defaults( else info.type = VK_IMAGE_VIEW_TYPE_1D; - result = pvr_pack_tex_state(device, &info, &image_state); + result = pvr_arch_pack_tex_state(device, &info, &image_state); if (result != VK_SUCCESS) return result; @@ -2241,10 +2241,10 @@ pvr_pds_unitex(const struct pvr_device_info *dev_info, ROGUE_TA_STATE_PDS_SIZEINFO1_PDS_TEXTURESTATESIZE_UNIT_SIZE); result = - pvr_cmd_buffer_alloc_mem(transfer_cmd->cmd_buffer, - ctx->device->heaps.pds_heap, - PVR_DW_TO_BYTES(state->tex_state_data_size), - &pvr_bo); + pvr_arch_cmd_buffer_alloc_mem(transfer_cmd->cmd_buffer, + ctx->device->heaps.pds_heap, + PVR_DW_TO_BYTES(state->tex_state_data_size), + &pvr_bo); if (result != VK_SUCCESS) return result; @@ -2375,7 +2375,7 @@ static VkResult pvr_pack_clear_color(VkFormat format, { const uint32_t red_width = vk_format_get_component_bits(format, UTIL_FORMAT_COLORSPACE_RGB, 0U); - uint32_t pbe_pack_mode = pvr_get_pbe_packmode(format); + uint32_t pbe_pack_mode = pvr_arch_get_pbe_packmode(format); const bool pbe_norm = pvr_vk_format_is_fully_normalized(format); /* TODO: Use PBE Accum format NOT PBE pack format! */ @@ -2950,10 +2950,11 @@ static VkResult pvr_3d_copy_blit_core(struct pvr_transfer_ctx *ctx, unitex_prog.num_texture_dma_kicks = 1U; unitex_prog.num_uniform_dma_kicks = 0U; - result = pvr_cmd_buffer_alloc_mem(transfer_cmd->cmd_buffer, - device->heaps.general_heap, - PVR_DW_TO_BYTES(tex_state_dma_size_dw), - &pvr_bo); + result = + pvr_arch_cmd_buffer_alloc_mem(transfer_cmd->cmd_buffer, + device->heaps.general_heap, + PVR_DW_TO_BYTES(tex_state_dma_size_dw), + &pvr_bo); if (result != VK_SUCCESS) return result; @@ -3132,7 +3133,7 @@ pvr_pds_coeff_task(struct pvr_transfer_ctx *ctx, pvr_pds_set_sizes_coeff_loading(&program); - result = pvr_cmd_buffer_alloc_mem( + result = pvr_arch_cmd_buffer_alloc_mem( transfer_cmd->cmd_buffer, ctx->device->heaps.pds_heap, PVR_DW_TO_BYTES(program.data_size + program.code_size), @@ -4131,10 +4132,10 @@ static VkResult pvr_isp_ctrl_stream(const struct pvr_device_info *dev_info, total_stream_size = region_arrays_size + prim_blk_size; /* Allocate space for IPF control stream. */ - result = pvr_cmd_buffer_alloc_mem(transfer_cmd->cmd_buffer, - ctx->device->heaps.transfer_frag_heap, - total_stream_size, - &pvr_cs_bo); + result = pvr_arch_cmd_buffer_alloc_mem(transfer_cmd->cmd_buffer, + ctx->device->heaps.transfer_frag_heap, + total_stream_size, + &pvr_cs_bo); if (result != VK_SUCCESS) return result; @@ -4295,10 +4296,11 @@ static VkResult pvr_isp_ctrl_stream(const struct pvr_device_info *dev_info, unitex_pds_prog.num_uniform_dma_kicks = 0U; /* Allocate memory for DMA. */ - result = pvr_cmd_buffer_alloc_mem(transfer_cmd->cmd_buffer, - ctx->device->heaps.general_heap, - tex_state_dma_size << 2U, - &pvr_bo); + result = + pvr_arch_cmd_buffer_alloc_mem(transfer_cmd->cmd_buffer, + ctx->device->heaps.general_heap, + tex_state_dma_size << 2U, + &pvr_bo); if (result != VK_SUCCESS) return result; @@ -6012,10 +6014,10 @@ static VkResult pvr_queue_transfer(struct pvr_transfer_ctx *ctx, return VK_SUCCESS; } -VkResult PVR_PER_ARCH(transfer_job_submit)(struct pvr_transfer_ctx *ctx, - struct pvr_sub_cmd_transfer *sub_cmd, - struct vk_sync *wait_sync, - struct vk_sync *signal_sync) +VkResult pvr_arch_transfer_job_submit(struct pvr_transfer_ctx *ctx, + struct pvr_sub_cmd_transfer *sub_cmd, + struct vk_sync *wait_sync, + struct vk_sync *signal_sync) { list_for_each_entry_safe (struct pvr_transfer_cmd, transfer_cmd, diff --git a/src/imagination/vulkan/pvr_arch_mrt.c b/src/imagination/vulkan/pvr_arch_mrt.c index fa07d6d410d..9752e955153 100644 --- a/src/imagination/vulkan/pvr_arch_mrt.c +++ b/src/imagination/vulkan/pvr_arch_mrt.c @@ -79,7 +79,7 @@ static int32_t pvr_mrt_alloc_from_buffer(const struct pvr_device_info *dev_info, return -1; } -void PVR_PER_ARCH(init_mrt_desc)(VkFormat format, struct usc_mrt_desc *desc) +void pvr_arch_init_mrt_desc(VkFormat format, struct usc_mrt_desc *desc) { uint32_t pixel_size_in_chunks; uint32_t pixel_size_in_bits; @@ -89,7 +89,7 @@ void PVR_PER_ARCH(init_mrt_desc)(VkFormat format, struct usc_mrt_desc *desc) */ const uint32_t part_bits = 0; if (vk_format_is_color(format) && - pvr_get_pbe_accum_format(format) == PVR_PBE_ACCUM_FORMAT_INVALID) { + pvr_arch_get_pbe_accum_format(format) == PVR_PBE_ACCUM_FORMAT_INVALID) { /* The VkFormat is not supported as a color attachment so `0`. * vulkan doesn't seem to restrict vkCreateRenderPass() to supported * formats only. @@ -179,7 +179,7 @@ static VkResult pvr_alloc_mrt(const struct pvr_device_info *dev_info, MAX2(alloc->output_regs_count, resource->mem.offset_dw + pixel_size); } - pvr_init_mrt_desc(format, &resource->mrt_desc); + pvr_arch_init_mrt_desc(format, &resource->mrt_desc); resource->intermediate_size = resource->mrt_desc.intermediate_size; setup->num_render_targets++; @@ -187,11 +187,11 @@ static VkResult pvr_alloc_mrt(const struct pvr_device_info *dev_info, return VK_SUCCESS; } -VkResult PVR_PER_ARCH(init_usc_mrt_setup)( - struct pvr_device *device, - uint32_t attachment_count, - const VkFormat attachment_formats[attachment_count], - struct usc_mrt_setup *setup) +VkResult +pvr_arch_init_usc_mrt_setup(struct pvr_device *device, + uint32_t attachment_count, + const VkFormat attachment_formats[attachment_count], + struct usc_mrt_setup *setup) { const struct pvr_device_info *dev_info = &device->pdevice->dev_info; struct pvr_mrt_alloc_ctx alloc = { 0 }; @@ -230,8 +230,8 @@ fail: return result; } -void PVR_PER_ARCH(destroy_mrt_setup)(const struct pvr_device *device, - struct usc_mrt_setup *setup) +void pvr_arch_destroy_mrt_setup(const struct pvr_device *device, + struct usc_mrt_setup *setup) { if (!setup) return; @@ -298,7 +298,7 @@ static VkResult pvr_mrt_load_op_init(struct pvr_device *device, load_op->clears_loads_state.mrt_setup = &dr_info->hw_render.init_setup; - result = pvr_load_op_shader_generate(device, alloc, load_op); + result = pvr_arch_load_op_shader_generate(device, alloc, load_op); if (result != VK_SUCCESS) { vk_free2(&device->vk.alloc, alloc, load_op); return result; @@ -328,9 +328,9 @@ static void pvr_load_op_destroy(struct pvr_device *device, vk_free2(&device->vk.alloc, allocator, load_op); } -void PVR_PER_ARCH(mrt_load_op_state_cleanup)(const struct pvr_device *device, - const VkAllocationCallbacks *alloc, - struct pvr_load_op_state *state) +void pvr_arch_mrt_load_op_state_cleanup(const struct pvr_device *device, + const VkAllocationCallbacks *alloc, + struct pvr_load_op_state *state) { if (!state) return; @@ -382,7 +382,7 @@ pvr_mrt_load_op_state_create(struct pvr_device *device, return VK_SUCCESS; err_load_op_state_cleanup: - pvr_mrt_load_op_state_cleanup(device, alloc, load_op_state); + pvr_arch_mrt_load_op_state_cleanup(device, alloc, load_op_state); return result; } @@ -441,10 +441,9 @@ pvr_mrt_add_missing_output_register_write(struct usc_mrt_setup *setup, return VK_SUCCESS; } -VkResult -PVR_PER_ARCH(mrt_load_ops_setup)(struct pvr_cmd_buffer *cmd_buffer, - const VkAllocationCallbacks *alloc, - struct pvr_load_op_state **load_op_state) +VkResult pvr_arch_mrt_load_ops_setup(struct pvr_cmd_buffer *cmd_buffer, + const VkAllocationCallbacks *alloc, + struct pvr_load_op_state **load_op_state) { const struct pvr_cmd_buffer_state *state = &cmd_buffer->state; const struct pvr_dynamic_render_info *dr_info = @@ -477,7 +476,7 @@ PVR_PER_ARCH(mrt_load_ops_setup)(struct pvr_cmd_buffer *cmd_buffer, return result; } -VkResult PVR_PER_ARCH(pds_unitex_state_program_create_and_upload)( +VkResult pvr_arch_pds_unitex_state_program_create_and_upload( struct pvr_device *device, const VkAllocationCallbacks *allocator, uint32_t texture_kicks, @@ -593,9 +592,9 @@ static VkResult pvr_pds_fragment_program_create_and_upload( } VkResult -PVR_PER_ARCH(load_op_shader_generate)(struct pvr_device *device, - const VkAllocationCallbacks *allocator, - struct pvr_load_op *load_op) +pvr_arch_load_op_shader_generate(struct pvr_device *device, + const VkAllocationCallbacks *allocator, + struct pvr_load_op *load_op) { const struct pvr_device_info *dev_info = &device->pdevice->dev_info; const uint32_t cache_line_size = pvr_get_slc_cache_line_size(dev_info); @@ -634,7 +633,7 @@ PVR_PER_ARCH(load_op_shader_generate)(struct pvr_device *device, * one buffer to be DMAed. See `pvr_load_op_data_create_and_upload()`, where * we upload the buffer and upload the code section. */ - result = pvr_pds_unitex_state_program_create_and_upload( + result = pvr_arch_pds_unitex_state_program_create_and_upload( device, allocator, 1U, diff --git a/src/imagination/vulkan/pvr_arch_pass.c b/src/imagination/vulkan/pvr_arch_pass.c index da33d5a8c04..43ed9a9671c 100644 --- a/src/imagination/vulkan/pvr_arch_pass.c +++ b/src/imagination/vulkan/pvr_arch_pass.c @@ -208,7 +208,7 @@ pvr_subpass_load_op_init(struct pvr_device *device, load_op->subpass = subpass; load_op->clears_loads_state.mrt_setup = &hw_subpass->setup; - result = pvr_load_op_shader_generate(device, allocator, load_op); + result = pvr_arch_load_op_shader_generate(device, allocator, load_op); if (result != VK_SUCCESS) { vk_free2(&device->vk.alloc, allocator, load_op); return result; @@ -328,7 +328,7 @@ static VkResult pvr_render_load_op_init( load_op->view_indices[0] = view_index; load_op->view_count = 1; - return pvr_load_op_shader_generate(device, allocator, load_op); + return pvr_arch_load_op_shader_generate(device, allocator, load_op); } static void pvr_load_op_fini(struct pvr_load_op *load_op) @@ -848,8 +848,8 @@ PVR_PER_ARCH(CreateRenderPass2)(VkDevice _device, */ attachment->is_pbe_downscalable = PVR_HAS_FEATURE(dev_info, gs_rta_support) && - pvr_format_is_pbe_downscalable(&device->pdevice->dev_info, - attachment->vk_format); + pvr_arch_format_is_pbe_downscalable(&device->pdevice->dev_info, + attachment->vk_format); if (attachment->sample_count > pass->max_sample_count) pass->max_sample_count = attachment->sample_count; @@ -1034,8 +1034,11 @@ PVR_PER_ARCH(CreateRenderPass2)(VkDevice _device, pass->max_tilebuffer_count = PVR_SPM_LOAD_IN_BUFFERS_COUNT(&device->pdevice->dev_info); - result = - pvr_create_renderpass_hwsetup(device, alloc, pass, false, &pass->hw_setup); + result = pvr_arch_create_renderpass_hwsetup(device, + alloc, + pass, + false, + &pass->hw_setup); if (result != VK_SUCCESS) goto err_free_pass; @@ -1050,7 +1053,7 @@ PVR_PER_ARCH(CreateRenderPass2)(VkDevice _device, return VK_SUCCESS; err_destroy_renderpass_hwsetup: - pvr_destroy_renderpass_hwsetup(alloc, pass->hw_setup); + pvr_arch_destroy_renderpass_hwsetup(alloc, pass->hw_setup); err_free_pass: vk_object_base_finish(&pass->base); @@ -1075,7 +1078,7 @@ void PVR_PER_ARCH(DestroyRenderPass)(VkDevice _device, allocator, pass, pass->hw_setup->render_count); - PVR_PER_ARCH(destroy_renderpass_hwsetup)(allocator, pass->hw_setup); + pvr_arch_destroy_renderpass_hwsetup(allocator, pass->hw_setup); vk_object_base_finish(&pass->base); vk_free2(&device->vk.alloc, pAllocator, pass); } diff --git a/src/imagination/vulkan/pvr_arch_pipeline.c b/src/imagination/vulkan/pvr_arch_pipeline.c index c44ebe1f9f9..e0b1c33d73d 100644 --- a/src/imagination/vulkan/pvr_arch_pipeline.c +++ b/src/imagination/vulkan/pvr_arch_pipeline.c @@ -2736,10 +2736,10 @@ pvr_graphics_pipeline_compile(struct pvr_device *const device, if (!pCreateInfo->renderPass) { const struct vk_render_pass_state *rp = state->rp; - result = pvr_init_usc_mrt_setup(device, - rp->color_attachment_count, - rp->color_attachment_formats, - &mrt_setup); + result = pvr_arch_init_usc_mrt_setup(device, + rp->color_attachment_count, + rp->color_attachment_formats, + &mrt_setup); if (result != VK_SUCCESS) return result; } @@ -2831,7 +2831,7 @@ pvr_graphics_pipeline_compile(struct pvr_device *const device, } if (!pCreateInfo->renderPass) - pvr_destroy_mrt_setup(device, &mrt_setup); + pvr_arch_destroy_mrt_setup(device, &mrt_setup); for (mesa_shader_stage stage = 0; stage < MESA_SHADER_STAGES; ++stage) { pco_shader **pco = &pco_shaders[stage]; @@ -2971,7 +2971,7 @@ err_free_vertex_bo: err_free_build_context: ralloc_free(shader_mem_ctx); if (!pCreateInfo->renderPass) - pvr_destroy_mrt_setup(device, &mrt_setup); + pvr_arch_destroy_mrt_setup(device, &mrt_setup); return result; } diff --git a/src/imagination/vulkan/pvr_arch_query_compute.c b/src/imagination/vulkan/pvr_arch_query_compute.c index d9f6621d8fe..d9915f043b6 100644 --- a/src/imagination/vulkan/pvr_arch_query_compute.c +++ b/src/imagination/vulkan/pvr_arch_query_compute.c @@ -172,9 +172,9 @@ static VkResult pvr_create_compute_query_precomp_program( false); result = - pvr_pds_compute_shader_create_and_upload(device, - &pds_primary_prog, - &query_prog->pds_prim_code); + pvr_arch_pds_compute_shader_create_and_upload(device, + &pds_primary_prog, + &query_prog->pds_prim_code); if (result != VK_SUCCESS) goto err_free_usc_bo; @@ -212,10 +212,11 @@ static VkResult pvr_write_compute_query_pds_data_section( uint64_t *qword_buffer; VkResult result; - result = pvr_cmd_buffer_alloc_mem(cmd_buffer, - cmd_buffer->device->heaps.pds_heap, - PVR_DW_TO_BYTES(info->data_size_in_dwords), - &pvr_bo); + result = + pvr_arch_cmd_buffer_alloc_mem(cmd_buffer, + cmd_buffer->device->heaps.pds_heap, + PVR_DW_TO_BYTES(info->data_size_in_dwords), + &pvr_bo); if (result != VK_SUCCESS) return result; @@ -329,12 +330,14 @@ static void pvr_write_private_compute_dispatch( assert(sub_cmd->type == PVR_SUB_CMD_TYPE_QUERY); - pvr_compute_update_shared_private(cmd_buffer, &sub_cmd->compute, pipeline); - pvr_compute_update_kernel_private(cmd_buffer, - &sub_cmd->compute, - pipeline, - workgroup_size); - pvr_compute_generate_fence(cmd_buffer, &sub_cmd->compute, false); + pvr_arch_compute_update_shared_private(cmd_buffer, + &sub_cmd->compute, + pipeline); + pvr_arch_compute_update_kernel_private(cmd_buffer, + &sub_cmd->compute, + pipeline, + workgroup_size); + pvr_arch_compute_generate_fence(cmd_buffer, &sub_cmd->compute, false); } static void @@ -347,7 +350,7 @@ pvr_destroy_compute_query_program(struct pvr_device *device, } VkResult -PVR_PER_ARCH(device_create_compute_query_programs)(struct pvr_device *device) +pvr_arch_device_create_compute_query_programs(struct pvr_device *device) { VkResult result; @@ -389,8 +392,7 @@ err_destroy_availability_query_program: return result; } -void PVR_PER_ARCH(device_destroy_compute_query_programs)( - struct pvr_device *device) +void pvr_arch_device_destroy_compute_query_programs(struct pvr_device *device) { pvr_destroy_compute_query_program(device, &device->availability_shader); pvr_destroy_compute_query_program(device, &device->copy_results_shader); @@ -398,9 +400,8 @@ void PVR_PER_ARCH(device_destroy_compute_query_programs)( } /* TODO: Split this function into per program type functions. */ -VkResult -PVR_PER_ARCH(add_query_program)(struct pvr_cmd_buffer *cmd_buffer, - const struct pvr_query_info *query_info) +VkResult pvr_arch_add_query_program(struct pvr_cmd_buffer *cmd_buffer, + const struct pvr_query_info *query_info) { struct pvr_device *device = cmd_buffer->device; const struct pvr_compute_query_shader *query_prog; @@ -410,7 +411,8 @@ PVR_PER_ARCH(add_query_program)(struct pvr_cmd_buffer *cmd_buffer, struct pvr_suballoc_bo *pvr_bo; VkResult result; - result = pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_QUERY); + result = + pvr_arch_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_QUERY); if (result != VK_SUCCESS) return result; @@ -564,7 +566,7 @@ PVR_PER_ARCH(add_query_program)(struct pvr_cmd_buffer *cmd_buffer, UNREACHABLE("Invalid query type"); } - result = pvr_cmd_buffer_upload_general( + result = pvr_arch_cmd_buffer_upload_general( cmd_buffer, const_buffer, PVR_DW_TO_BYTES(pipeline.const_shared_regs_count), @@ -592,5 +594,5 @@ PVR_PER_ARCH(add_query_program)(struct pvr_cmd_buffer *cmd_buffer, pvr_write_private_compute_dispatch(cmd_buffer, &pipeline, num_query_indices); - return pvr_cmd_buffer_end_sub_cmd(cmd_buffer); + return pvr_arch_cmd_buffer_end_sub_cmd(cmd_buffer); } diff --git a/src/imagination/vulkan/pvr_arch_queue.c b/src/imagination/vulkan/pvr_arch_queue.c index 7f223b66383..dd1962ff142 100644 --- a/src/imagination/vulkan/pvr_arch_queue.c +++ b/src/imagination/vulkan/pvr_arch_queue.c @@ -89,26 +89,27 @@ static VkResult pvr_queue_init(struct pvr_device *device, goto err_vk_queue_finish; } - result = pvr_transfer_ctx_create(device, - PVR_WINSYS_CTX_PRIORITY_MEDIUM, - &transfer_ctx); + result = pvr_arch_transfer_ctx_create(device, + PVR_WINSYS_CTX_PRIORITY_MEDIUM, + &transfer_ctx); if (result != VK_SUCCESS) goto err_vk_queue_finish; - result = pvr_compute_ctx_create(device, - PVR_WINSYS_CTX_PRIORITY_MEDIUM, - &compute_ctx); + result = pvr_arch_compute_ctx_create(device, + PVR_WINSYS_CTX_PRIORITY_MEDIUM, + &compute_ctx); if (result != VK_SUCCESS) goto err_transfer_ctx_destroy; - result = pvr_compute_ctx_create(device, - PVR_WINSYS_CTX_PRIORITY_MEDIUM, - &query_ctx); + result = pvr_arch_compute_ctx_create(device, + PVR_WINSYS_CTX_PRIORITY_MEDIUM, + &query_ctx); if (result != VK_SUCCESS) goto err_compute_ctx_destroy; - result = - pvr_render_ctx_create(device, PVR_WINSYS_CTX_PRIORITY_MEDIUM, &gfx_ctx); + result = pvr_arch_render_ctx_create(device, + PVR_WINSYS_CTX_PRIORITY_MEDIUM, + &gfx_ctx); if (result != VK_SUCCESS) goto err_query_ctx_destroy; @@ -123,13 +124,13 @@ static VkResult pvr_queue_init(struct pvr_device *device, return VK_SUCCESS; err_query_ctx_destroy: - pvr_compute_ctx_destroy(query_ctx); + pvr_arch_compute_ctx_destroy(query_ctx); err_compute_ctx_destroy: - pvr_compute_ctx_destroy(compute_ctx); + pvr_arch_compute_ctx_destroy(compute_ctx); err_transfer_ctx_destroy: - pvr_transfer_ctx_destroy(transfer_ctx); + pvr_arch_transfer_ctx_destroy(transfer_ctx); err_vk_queue_finish: vk_queue_finish(&queue->vk); @@ -137,8 +138,8 @@ err_vk_queue_finish: return result; } -VkResult PVR_PER_ARCH(queues_create)(struct pvr_device *device, - const VkDeviceCreateInfo *pCreateInfo) +VkResult pvr_arch_queues_create(struct pvr_device *device, + const VkDeviceCreateInfo *pCreateInfo) { VkResult result; @@ -170,7 +171,7 @@ VkResult PVR_PER_ARCH(queues_create)(struct pvr_device *device, return VK_SUCCESS; err_queues_finish: - PVR_PER_ARCH(queues_destroy)(device); + pvr_arch_queues_destroy(device); return result; } @@ -186,15 +187,15 @@ static void pvr_queue_finish(struct pvr_queue *queue) vk_sync_destroy(&queue->device->vk, queue->last_job_signal_sync[i]); } - pvr_render_ctx_destroy(queue->gfx_ctx); - pvr_compute_ctx_destroy(queue->query_ctx); - pvr_compute_ctx_destroy(queue->compute_ctx); - pvr_transfer_ctx_destroy(queue->transfer_ctx); + pvr_arch_render_ctx_destroy(queue->gfx_ctx); + pvr_arch_compute_ctx_destroy(queue->query_ctx); + pvr_arch_compute_ctx_destroy(queue->compute_ctx); + pvr_arch_transfer_ctx_destroy(queue->transfer_ctx); vk_queue_finish(&queue->vk); } -void PVR_PER_ARCH(queues_destroy)(struct pvr_device *device) +void pvr_arch_queues_destroy(struct pvr_device *device) { for (uint32_t q_idx = 0; q_idx < device->queue_count; q_idx++) pvr_queue_finish(&device->queues[q_idx]); @@ -275,13 +276,13 @@ pvr_process_graphics_cmd_for_view(struct pvr_device *device, job->geometry_terminate = false; job->run_frag = false; - result = - pvr_render_job_submit(queue->gfx_ctx, - &sub_cmd->job, - queue->next_job_wait_sync[PVR_JOB_TYPE_GEOM], - NULL, - NULL, - NULL); + result = pvr_arch_render_job_submit( + queue->gfx_ctx, + &sub_cmd->job, + queue->next_job_wait_sync[PVR_JOB_TYPE_GEOM], + NULL, + NULL, + NULL); job->geometry_terminate = true; job->run_frag = true; @@ -303,12 +304,13 @@ pvr_process_graphics_cmd_for_view(struct pvr_device *device, (view_index * PVR_DW_TO_BYTES(sub_cmd->multiview_ctrl_stream_stride)); } - result = pvr_render_job_submit(queue->gfx_ctx, - &sub_cmd->job, - queue->next_job_wait_sync[PVR_JOB_TYPE_GEOM], - queue->next_job_wait_sync[PVR_JOB_TYPE_FRAG], - geom_signal_sync, - frag_signal_sync); + result = + pvr_arch_render_job_submit(queue->gfx_ctx, + &sub_cmd->job, + queue->next_job_wait_sync[PVR_JOB_TYPE_GEOM], + queue->next_job_wait_sync[PVR_JOB_TYPE_FRAG], + geom_signal_sync, + frag_signal_sync); if (original_ctrl_stream_addr.addr > 0) job->ctrl_stream_addr = original_ctrl_stream_addr; @@ -373,11 +375,11 @@ static VkResult pvr_process_compute_cmd(struct pvr_device *device, if (result != VK_SUCCESS) return result; - result = - pvr_compute_job_submit(queue->compute_ctx, - sub_cmd, - queue->next_job_wait_sync[PVR_JOB_TYPE_COMPUTE], - sync); + result = pvr_arch_compute_job_submit( + queue->compute_ctx, + sub_cmd, + queue->next_job_wait_sync[PVR_JOB_TYPE_COMPUTE], + sync); if (result != VK_SUCCESS) { vk_sync_destroy(&device->vk, sync); return result; @@ -403,11 +405,11 @@ static VkResult pvr_process_transfer_cmds(struct pvr_device *device, if (result != VK_SUCCESS) return result; - result = - pvr_transfer_job_submit(queue->transfer_ctx, - sub_cmd, - queue->next_job_wait_sync[PVR_JOB_TYPE_TRANSFER], - sync); + result = pvr_arch_transfer_job_submit( + queue->transfer_ctx, + sub_cmd, + queue->next_job_wait_sync[PVR_JOB_TYPE_TRANSFER], + sync); if (result != VK_SUCCESS) { vk_sync_destroy(&device->vk, sync); return result; @@ -440,10 +442,10 @@ static VkResult pvr_process_query_cmd(struct pvr_device *device, return result; result = - pvr_compute_job_submit(queue->query_ctx, - sub_cmd, - queue->next_job_wait_sync[PVR_JOB_TYPE_QUERY], - sync); + pvr_arch_compute_job_submit(queue->query_ctx, + sub_cmd, + queue->next_job_wait_sync[PVR_JOB_TYPE_QUERY], + sync); if (result != VK_SUCCESS) { vk_sync_destroy(&device->vk, sync); return result; diff --git a/src/imagination/vulkan/pvr_arch_sampler.c b/src/imagination/vulkan/pvr_arch_sampler.c index 0f659a21d7c..12a7fde7fab 100644 --- a/src/imagination/vulkan/pvr_arch_sampler.c +++ b/src/imagination/vulkan/pvr_arch_sampler.c @@ -78,7 +78,7 @@ VkResult PVR_PER_ARCH(CreateSampler)(VkDevice _device, mag_filter = pCreateInfo->magFilter; min_filter = pCreateInfo->minFilter; - result = pvr_border_color_table_get_or_create_entry( + result = pvr_arch_border_color_table_get_or_create_entry( device, sampler, device->border_color_table, @@ -229,8 +229,8 @@ void PVR_PER_ARCH(DestroySampler)(VkDevice _device, if (!sampler) return; - pvr_border_color_table_release_entry(device->border_color_table, - sampler->border_color_table_index); + pvr_arch_border_color_table_release_entry(device->border_color_table, + sampler->border_color_table_index); vk_sampler_destroy(&device->vk, pAllocator, &sampler->vk); } diff --git a/src/imagination/vulkan/pvr_arch_spm.c b/src/imagination/vulkan/pvr_arch_spm.c index 267b93737e3..20f2761eee6 100644 --- a/src/imagination/vulkan/pvr_arch_spm.c +++ b/src/imagination/vulkan/pvr_arch_spm.c @@ -36,7 +36,7 @@ struct pvr_spm_scratch_buffer { uint64_t size; }; -uint64_t PVR_PER_ARCH(spm_scratch_buffer_calc_required_size)( +uint64_t pvr_arch_spm_scratch_buffer_calc_required_size( const struct pvr_renderpass_hwsetup_render *renders, uint32_t render_count, uint32_t sample_count, @@ -69,7 +69,7 @@ uint64_t PVR_PER_ARCH(spm_scratch_buffer_calc_required_size)( return buffer_size; } -VkResult pvr_device_init_spm_load_state(struct pvr_device *device) +VkResult pvr_arch_device_init_spm_load_state(struct pvr_device *device) { const struct pvr_device_info *dev_info = &device->pdevice->dev_info; uint32_t pds_texture_aligned_offsets[PVR_NUM_SPM_LOAD_SHADERS]; @@ -244,7 +244,7 @@ VkResult pvr_device_init_spm_load_state(struct pvr_device *device) return VK_SUCCESS; } -void PVR_PER_ARCH(device_finish_spm_load_state)(struct pvr_device *device) +void pvr_arch_device_finish_spm_load_state(struct pvr_device *device) { pvr_bo_suballoc_free(device->spm_load_state.pds_programs); pvr_bo_suballoc_free(device->spm_load_state.usc_programs); @@ -310,11 +310,11 @@ static uint64_t pvr_spm_setup_pbe_state( .source_start = source_start, }; - pvr_pbe_pack_state(dev_info, - &surface_params, - &render_params, - pbe_state_words_out, - pbe_reg_words_out); + pvr_arch_pbe_pack_state(dev_info, + &surface_params, + &render_params, + pbe_state_words_out, + pbe_reg_words_out); return (uint64_t)stride * framebuffer_size->height * sample_count * PVR_DW_TO_BYTES(dword_count); @@ -452,7 +452,7 @@ static VkResult pvr_pds_pixel_event_program_create_and_upload( * This sets up an EOT program to store the render pass'es on-chip and * off-chip tile data to the SPM scratch buffer on the EOT event. */ -VkResult PVR_PER_ARCH(spm_init_eot_state)( +VkResult pvr_arch_spm_init_eot_state( struct pvr_device *device, struct pvr_spm_eot_state *spm_eot_state, const struct pvr_render_state *rstate, @@ -666,7 +666,7 @@ pvr_spm_setup_texture_state_words(struct pvr_device *device, format_swizzle = pvr_get_format_swizzle(info.format); memcpy(info.swizzle, format_swizzle, sizeof(info.swizzle)); - result = pvr_pack_tex_state(device, &info, &image_descriptor); + result = pvr_arch_pack_tex_state(device, &info, &image_descriptor); if (result != VK_SUCCESS) return result; @@ -746,7 +746,7 @@ static VkResult pvr_pds_bgnd_program_create_and_upload( return VK_SUCCESS; } -VkResult PVR_PER_ARCH(spm_init_bgobj_state)( +VkResult pvr_arch_spm_init_bgobj_state( struct pvr_device *device, struct pvr_spm_bgobj_state *spm_bgobj_state, const struct pvr_render_state *rstate, diff --git a/src/imagination/vulkan/pvr_arch_tex_state.c b/src/imagination/vulkan/pvr_arch_tex_state.c index b9d500ec3a3..f4544955f63 100644 --- a/src/imagination/vulkan/pvr_arch_tex_state.c +++ b/src/imagination/vulkan/pvr_arch_tex_state.c @@ -151,9 +151,9 @@ static uint32_t setup_pck_info(VkFormat vk_format) return pck_info; } -VkResult PVR_PER_ARCH(pack_tex_state)(struct pvr_device *device, - const struct pvr_texture_state_info *info, - struct pvr_image_descriptor *state) +VkResult pvr_arch_pack_tex_state(struct pvr_device *device, + const struct pvr_texture_state_info *info, + struct pvr_image_descriptor *state) { const struct pvr_device_info *dev_info = &device->pdevice->dev_info; enum pvr_memlayout mem_layout; @@ -226,7 +226,7 @@ VkResult PVR_PER_ARCH(pack_tex_state)(struct pvr_device *device, * to avoid this. */ word0.texformat = - pvr_get_tex_format_aspect(info->format, info->aspect_mask); + pvr_arch_get_tex_format_aspect(info->format, info->aspect_mask); word0.smpcnt = util_logbase2(info->sample_count); word0.swiz0 = pvr_get_hw_swizzle(VK_COMPONENT_SWIZZLE_R, info->swizzle[0]); diff --git a/src/imagination/vulkan/pvr_border.h b/src/imagination/vulkan/pvr_border.h index c46ec550cab..44782a0f3a8 100644 --- a/src/imagination/vulkan/pvr_border.h +++ b/src/imagination/vulkan/pvr_border.h @@ -64,23 +64,25 @@ struct pvr_border_color_table { #ifdef PVR_PER_ARCH VkResult PVR_PER_ARCH(border_color_table_init)(struct pvr_device *const device); -# define pvr_border_color_table_init PVR_PER_ARCH(border_color_table_init) +# define pvr_arch_border_color_table_init \ + PVR_PER_ARCH(border_color_table_init) void PVR_PER_ARCH(border_color_table_finish)(struct pvr_device *device); -# define pvr_border_color_table_finish PVR_PER_ARCH(border_color_table_finish) +# define pvr_arch_border_color_table_finish \ + PVR_PER_ARCH(border_color_table_finish) VkResult PVR_PER_ARCH(border_color_table_get_or_create_entry)( struct pvr_device *device, const struct pvr_sampler *sampler, struct pvr_border_color_table *table, uint32_t *index_out); -# define pvr_border_color_table_get_or_create_entry \ +# define pvr_arch_border_color_table_get_or_create_entry \ PVR_PER_ARCH(border_color_table_get_or_create_entry) void PVR_PER_ARCH(border_color_table_release_entry)( struct pvr_border_color_table *table, uint32_t index); -# define pvr_border_color_table_release_entry \ +# define pvr_arch_border_color_table_release_entry \ PVR_PER_ARCH(border_color_table_release_entry) #endif diff --git a/src/imagination/vulkan/pvr_cmd_buffer.h b/src/imagination/vulkan/pvr_cmd_buffer.h index 3bc39144dbc..ce07a0455b1 100644 --- a/src/imagination/vulkan/pvr_cmd_buffer.h +++ b/src/imagination/vulkan/pvr_cmd_buffer.h @@ -599,7 +599,7 @@ VkResult PVR_PER_ARCH(cmd_buffer_add_transfer_cmd)( struct pvr_cmd_buffer *cmd_buffer, struct pvr_transfer_cmd *transfer_cmd); -# define pvr_cmd_buffer_add_transfer_cmd \ +# define pvr_arch_cmd_buffer_add_transfer_cmd \ PVR_PER_ARCH(cmd_buffer_add_transfer_cmd) VkResult PVR_PER_ARCH(cmd_buffer_alloc_mem)( @@ -608,7 +608,7 @@ VkResult PVR_PER_ARCH(cmd_buffer_alloc_mem)( uint64_t size, struct pvr_suballoc_bo **const pvr_bo_out); -# define pvr_cmd_buffer_alloc_mem PVR_PER_ARCH(cmd_buffer_alloc_mem) +# define pvr_arch_cmd_buffer_alloc_mem PVR_PER_ARCH(cmd_buffer_alloc_mem) VkResult PVR_PER_ARCH(cmd_buffer_upload_general)( struct pvr_cmd_buffer *const cmd_buffer, @@ -616,7 +616,8 @@ VkResult PVR_PER_ARCH(cmd_buffer_upload_general)( const size_t size, struct pvr_suballoc_bo **const pvr_bo_out); -# define pvr_cmd_buffer_upload_general PVR_PER_ARCH(cmd_buffer_upload_general) +# define pvr_arch_cmd_buffer_upload_general \ + PVR_PER_ARCH(cmd_buffer_upload_general) VkResult PVR_PER_ARCH(cmd_buffer_upload_pds)( struct pvr_cmd_buffer *const cmd_buffer, @@ -629,32 +630,33 @@ VkResult PVR_PER_ARCH(cmd_buffer_upload_pds)( uint64_t min_alignment, struct pvr_pds_upload *const pds_upload_out); -# define pvr_cmd_buffer_upload_pds PVR_PER_ARCH(cmd_buffer_upload_pds) +# define pvr_arch_cmd_buffer_upload_pds PVR_PER_ARCH(cmd_buffer_upload_pds) VkResult PVR_PER_ARCH(cmd_buffer_start_sub_cmd)(struct pvr_cmd_buffer *cmd_buffer, enum pvr_sub_cmd_type type); -# define pvr_cmd_buffer_start_sub_cmd PVR_PER_ARCH(cmd_buffer_start_sub_cmd) +# define pvr_arch_cmd_buffer_start_sub_cmd \ + PVR_PER_ARCH(cmd_buffer_start_sub_cmd) VkResult PVR_PER_ARCH(cmd_buffer_end_sub_cmd)(struct pvr_cmd_buffer *cmd_buffer); -# define pvr_cmd_buffer_end_sub_cmd PVR_PER_ARCH(cmd_buffer_end_sub_cmd) +# define pvr_arch_cmd_buffer_end_sub_cmd PVR_PER_ARCH(cmd_buffer_end_sub_cmd) void PVR_PER_ARCH(compute_generate_fence)( struct pvr_cmd_buffer *cmd_buffer, struct pvr_sub_cmd_compute *const sub_cmd, bool deallocate_shareds); -# define pvr_compute_generate_fence PVR_PER_ARCH(compute_generate_fence) +# define pvr_arch_compute_generate_fence PVR_PER_ARCH(compute_generate_fence) void PVR_PER_ARCH(compute_update_shared_private)( struct pvr_cmd_buffer *cmd_buffer, struct pvr_sub_cmd_compute *const sub_cmd, struct pvr_private_compute_pipeline *pipeline); -# define pvr_compute_update_shared_private \ +# define pvr_arch_compute_update_shared_private \ PVR_PER_ARCH(compute_update_shared_private) void PVR_PER_ARCH(compute_update_kernel_private)( @@ -663,20 +665,20 @@ void PVR_PER_ARCH(compute_update_kernel_private)( struct pvr_private_compute_pipeline *pipeline, const uint32_t global_workgroup_size[static const PVR_WORKGROUP_DIMENSIONS]); -# define pvr_compute_update_kernel_private \ +# define pvr_arch_compute_update_kernel_private \ PVR_PER_ARCH(compute_update_kernel_private) VkResult PVR_PER_ARCH(add_query_program)(struct pvr_cmd_buffer *cmd_buffer, const struct pvr_query_info *query_info); -# define pvr_add_query_program PVR_PER_ARCH(add_query_program) +# define pvr_arch_add_query_program PVR_PER_ARCH(add_query_program) void PVR_PER_ARCH(reset_graphics_dirty_state)( struct pvr_cmd_buffer *const cmd_buffer, bool start_geom); -# define pvr_reset_graphics_dirty_state \ +# define pvr_arch_reset_graphics_dirty_state \ PVR_PER_ARCH(reset_graphics_dirty_state) void PVR_PER_ARCH(calculate_vertex_cam_size)( @@ -686,21 +688,21 @@ void PVR_PER_ARCH(calculate_vertex_cam_size)( uint32_t *const cam_size_out, uint32_t *const vs_max_instances_out); -# define pvr_cmd_buffer_end_sub_cmd PVR_PER_ARCH(cmd_buffer_end_sub_cmd) +# define pvr_arch_cmd_buffer_end_sub_cmd PVR_PER_ARCH(cmd_buffer_end_sub_cmd) void PVR_PER_ARCH(compute_generate_fence)( struct pvr_cmd_buffer *cmd_buffer, struct pvr_sub_cmd_compute *const sub_cmd, bool deallocate_shareds); -# define pvr_compute_generate_fence PVR_PER_ARCH(compute_generate_fence) +# define pvr_arch_compute_generate_fence PVR_PER_ARCH(compute_generate_fence) void PVR_PER_ARCH(compute_update_shared_private)( struct pvr_cmd_buffer *cmd_buffer, struct pvr_sub_cmd_compute *const sub_cmd, struct pvr_private_compute_pipeline *pipeline); -# define pvr_compute_update_shared_private \ +# define pvr_arch_compute_update_shared_private \ PVR_PER_ARCH(compute_update_shared_private) void PVR_PER_ARCH(compute_update_kernel_private)( @@ -709,20 +711,20 @@ void PVR_PER_ARCH(compute_update_kernel_private)( struct pvr_private_compute_pipeline *pipeline, const uint32_t global_workgroup_size[static const PVR_WORKGROUP_DIMENSIONS]); -# define pvr_compute_update_kernel_private \ +# define pvr_arch_compute_update_kernel_private \ PVR_PER_ARCH(compute_update_kernel_private) VkResult PVR_PER_ARCH(add_query_program)(struct pvr_cmd_buffer *cmd_buffer, const struct pvr_query_info *query_info); -# define pvr_add_query_program PVR_PER_ARCH(add_query_program) +# define pvr_arch_add_query_program PVR_PER_ARCH(add_query_program) void PVR_PER_ARCH(reset_graphics_dirty_state)( struct pvr_cmd_buffer *const cmd_buffer, bool start_geom); -# define pvr_reset_graphics_dirty_state \ +# define pvr_arch_reset_graphics_dirty_state \ PVR_PER_ARCH(reset_graphics_dirty_state) void PVR_PER_ARCH(calculate_vertex_cam_size)( @@ -732,19 +734,21 @@ void PVR_PER_ARCH(calculate_vertex_cam_size)( uint32_t *const cam_size_out, uint32_t *const vs_max_instances_out); -# define pvr_calculate_vertex_cam_size PVR_PER_ARCH(calculate_vertex_cam_size) +# define pvr_arch_calculate_vertex_cam_size \ + PVR_PER_ARCH(calculate_vertex_cam_size) const struct pvr_renderpass_hwsetup_subpass * PVR_PER_ARCH(get_hw_subpass)(const struct pvr_render_pass *pass, const uint32_t subpass); -# define pvr_get_hw_subpass PVR_PER_ARCH(get_hw_subpass) +# define pvr_arch_get_hw_subpass PVR_PER_ARCH(get_hw_subpass) struct pvr_renderpass_hwsetup_render *PVR_PER_ARCH(pass_info_get_hw_render)( const struct pvr_render_pass_info *render_pass_info, uint32_t idx); -# define pvr_pass_info_get_hw_render PVR_PER_ARCH(pass_info_get_hw_render) +# define pvr_arch_pass_info_get_hw_render \ + PVR_PER_ARCH(pass_info_get_hw_render) #endif /* PVR_PER_ARCH */ diff --git a/src/imagination/vulkan/pvr_csb.h b/src/imagination/vulkan/pvr_csb.h index 6066c3c5a89..07a49dfd6ff 100644 --- a/src/imagination/vulkan/pvr_csb.h +++ b/src/imagination/vulkan/pvr_csb.h @@ -255,26 +255,26 @@ VkResult pvr_csb_bake(struct pvr_csb *csb, struct list_head *bo_list_out); void *PVR_PER_ARCH(csb_alloc_dwords)(struct pvr_csb *csb, uint32_t num_dwords); -# define pvr_csb_alloc_dwords PVR_PER_ARCH(csb_alloc_dwords) +# define pvr_arch_csb_alloc_dwords PVR_PER_ARCH(csb_alloc_dwords) VkResult PVR_PER_ARCH(csb_copy)(struct pvr_csb *csb_dst, struct pvr_csb *csb_src); -# define pvr_csb_copy PVR_PER_ARCH(csb_copy) +# define pvr_arch_csb_copy PVR_PER_ARCH(csb_copy) void PVR_PER_ARCH(csb_emit_link)(struct pvr_csb *csb, pvr_dev_addr_t addr, bool ret); -# define pvr_csb_emit_link PVR_PER_ARCH(csb_emit_link) +# define pvr_arch_csb_emit_link PVR_PER_ARCH(csb_emit_link) VkResult PVR_PER_ARCH(csb_emit_return)(struct pvr_csb *csb); -# define pvr_csb_emit_return PVR_PER_ARCH(csb_emit_return) +# define pvr_arch_csb_emit_return PVR_PER_ARCH(csb_emit_return) VkResult PVR_PER_ARCH(csb_emit_terminate)(struct pvr_csb *csb); -# define pvr_csb_emit_terminate PVR_PER_ARCH(csb_emit_terminate) +# define pvr_arch_csb_emit_terminate PVR_PER_ARCH(csb_emit_terminate) #endif /* PVR_PER_ARCH */ @@ -301,14 +301,14 @@ void pvr_csb_dump(const struct pvr_csb *csb, * used by the caller to modify the command or state * information before it's packed. */ -#define pvr_csb_emit(csb, cmd, name) \ - for (struct ROGUE_##cmd \ - name = { pvr_cmd_header(cmd) }, \ - *_dst = pvr_csb_alloc_dwords(csb, pvr_cmd_length(cmd)); \ - __builtin_expect(_dst != NULL, 1); \ - ({ \ - pvr_cmd_pack(cmd)(_dst, &name); \ - _dst = NULL; \ +#define pvr_csb_emit(csb, cmd, name) \ + for (struct ROGUE_##cmd \ + name = { pvr_cmd_header(cmd) }, \ + *_dst = pvr_arch_csb_alloc_dwords(csb, pvr_cmd_length(cmd)); \ + __builtin_expect(_dst != NULL, 1); \ + ({ \ + pvr_cmd_pack(cmd)(_dst, &name); \ + _dst = NULL; \ })) /** diff --git a/src/imagination/vulkan/pvr_device.h b/src/imagination/vulkan/pvr_device.h index 0487a6c66f1..e3dfca52813 100644 --- a/src/imagination/vulkan/pvr_device.h +++ b/src/imagination/vulkan/pvr_device.h @@ -229,7 +229,7 @@ uint32_t PVR_PER_ARCH(calc_fscommon_size_and_tiles_in_flight)( uint32_t fs_common_size, uint32_t min_tiles_in_flight); -# define pvr_calc_fscommon_size_and_tiles_in_flight \ +# define pvr_arch_calc_fscommon_size_and_tiles_in_flight \ PVR_PER_ARCH(calc_fscommon_size_and_tiles_in_flight) VkResult PVR_PER_ARCH(pds_compute_shader_create_and_upload)( @@ -237,7 +237,7 @@ VkResult PVR_PER_ARCH(pds_compute_shader_create_and_upload)( struct pvr_pds_compute_shader_program *program, struct pvr_pds_upload *const pds_upload_out); -# define pvr_pds_compute_shader_create_and_upload \ +# define pvr_arch_pds_compute_shader_create_and_upload \ PVR_PER_ARCH(pds_compute_shader_create_and_upload) #endif /* PVR_PER_ARCH */ diff --git a/src/imagination/vulkan/pvr_formats.c b/src/imagination/vulkan/pvr_formats.c index 074a148f0b0..60decc5c079 100644 --- a/src/imagination/vulkan/pvr_formats.c +++ b/src/imagination/vulkan/pvr_formats.c @@ -687,7 +687,7 @@ VkResult pvr_GetPhysicalDeviceImageFormatProperties2( break; case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO: /* Nothing to do here, it's handled in - * PVR_PER_ARCH(get_image_format_properties) + * pvr_get_image_format_properties) */ break; default: diff --git a/src/imagination/vulkan/pvr_formats.h b/src/imagination/vulkan/pvr_formats.h index 1b4160a8f26..af9de572eb5 100644 --- a/src/imagination/vulkan/pvr_formats.h +++ b/src/imagination/vulkan/pvr_formats.h @@ -306,25 +306,25 @@ pvr_vk_format_get_common_color_channel_count(VkFormat src_format, #ifdef PVR_PER_ARCH const struct pvr_format *PVR_PER_ARCH(get_format_table)(unsigned *num_formats); -# define pvr_get_format_table PVR_PER_ARCH(get_format_table) +# define pvr_arch_get_format_table PVR_PER_ARCH(get_format_table) uint32_t PVR_PER_ARCH(get_tex_format)(VkFormat vk_format); -# define pvr_get_tex_format PVR_PER_ARCH(get_tex_format) +# define pvr_arch_get_tex_format PVR_PER_ARCH(get_tex_format) uint32_t PVR_PER_ARCH(get_tex_format_aspect)(VkFormat vk_format, VkImageAspectFlags aspect_mask); -# define pvr_get_tex_format_aspect PVR_PER_ARCH(get_tex_format_aspect) +# define pvr_arch_get_tex_format_aspect PVR_PER_ARCH(get_tex_format_aspect) uint32_t PVR_PER_ARCH(get_pbe_packmode)(VkFormat vk_format); -# define pvr_get_pbe_packmode PVR_PER_ARCH(get_pbe_packmode) +# define pvr_arch_get_pbe_packmode PVR_PER_ARCH(get_pbe_packmode) uint32_t PVR_PER_ARCH(get_pbe_accum_format)(VkFormat vk_format); -# define pvr_get_pbe_accum_format PVR_PER_ARCH(get_pbe_accum_format) +# define pvr_arch_get_pbe_accum_format PVR_PER_ARCH(get_pbe_accum_format) bool PVR_PER_ARCH(format_is_pbe_downscalable)( const struct pvr_device_info *dev_info, VkFormat vk_format); -# define pvr_format_is_pbe_downscalable \ +# define pvr_arch_format_is_pbe_downscalable \ PVR_PER_ARCH(format_is_pbe_downscalable) #endif /* PVR_PER_ARCH */ diff --git a/src/imagination/vulkan/pvr_framebuffer.h b/src/imagination/vulkan/pvr_framebuffer.h index f0a76d9bf17..4d61021cb2b 100644 --- a/src/imagination/vulkan/pvr_framebuffer.h +++ b/src/imagination/vulkan/pvr_framebuffer.h @@ -77,7 +77,7 @@ VkResult PVR_PER_ARCH(render_state_setup)( uint32_t render_count, const struct pvr_renderpass_hwsetup_render *renders); -# define pvr_render_state_setup PVR_PER_ARCH(render_state_setup) +# define pvr_arch_render_state_setup PVR_PER_ARCH(render_state_setup) #endif diff --git a/src/imagination/vulkan/pvr_hw_pass.h b/src/imagination/vulkan/pvr_hw_pass.h index 8ff9e1df791..69496eb5d16 100644 --- a/src/imagination/vulkan/pvr_hw_pass.h +++ b/src/imagination/vulkan/pvr_hw_pass.h @@ -229,13 +229,14 @@ VkResult PVR_PER_ARCH(create_renderpass_hwsetup)( bool disable_merge, struct pvr_renderpass_hwsetup **const hw_setup_out); -# define pvr_create_renderpass_hwsetup PVR_PER_ARCH(create_renderpass_hwsetup) +# define pvr_arch_create_renderpass_hwsetup \ + PVR_PER_ARCH(create_renderpass_hwsetup) void PVR_PER_ARCH(destroy_renderpass_hwsetup)( const VkAllocationCallbacks *alloc, struct pvr_renderpass_hwsetup *hw_setup); -# define pvr_destroy_renderpass_hwsetup \ +# define pvr_arch_destroy_renderpass_hwsetup \ PVR_PER_ARCH(destroy_renderpass_hwsetup) #endif diff --git a/src/imagination/vulkan/pvr_job_common.h b/src/imagination/vulkan/pvr_job_common.h index d1698eda100..977aa3d3129 100644 --- a/src/imagination/vulkan/pvr_job_common.h +++ b/src/imagination/vulkan/pvr_job_common.h @@ -121,7 +121,7 @@ void PVR_PER_ARCH(pbe_pack_state)( uint32_t pbe_cs_words[static const ROGUE_NUM_PBESTATE_STATE_WORDS], uint64_t pbe_reg_words[static const ROGUE_NUM_PBESTATE_REG_WORDS]); -# define pvr_pbe_pack_state PVR_PER_ARCH(pbe_pack_state) +# define pvr_arch_pbe_pack_state PVR_PER_ARCH(pbe_pack_state) /* Helper to calculate pvr_pbe_surf_params::gamma and * pvr_pbe_surf_params::source_format. @@ -133,7 +133,7 @@ void PVR_PER_ARCH(pbe_get_src_format_and_gamma)( uint32_t *const src_format_out, enum pvr_pbe_gamma *const gamma_out); -# define pvr_pbe_get_src_format_and_gamma \ +# define pvr_arch_pbe_get_src_format_and_gamma \ PVR_PER_ARCH(pbe_get_src_format_and_gamma) void PVR_PER_ARCH(setup_tiles_in_flight)( @@ -146,7 +146,7 @@ void PVR_PER_ARCH(setup_tiles_in_flight)( uint32_t *const isp_ctl_out, uint32_t *const pixel_ctl_out); -# define pvr_setup_tiles_in_flight PVR_PER_ARCH(setup_tiles_in_flight) +# define pvr_arch_setup_tiles_in_flight PVR_PER_ARCH(setup_tiles_in_flight) #endif /* PVR_PER_ARCH */ diff --git a/src/imagination/vulkan/pvr_job_compute.h b/src/imagination/vulkan/pvr_job_compute.h index bfea71ebf08..afd9a6ec41e 100644 --- a/src/imagination/vulkan/pvr_job_compute.h +++ b/src/imagination/vulkan/pvr_job_compute.h @@ -36,6 +36,6 @@ VkResult PVR_PER_ARCH(compute_job_submit)(struct pvr_compute_ctx *ctx, struct vk_sync *wait, struct vk_sync *signal_sync); -#define pvr_compute_job_submit PVR_PER_ARCH(compute_job_submit) +#define pvr_arch_compute_job_submit PVR_PER_ARCH(compute_job_submit) #endif /* PVR_JOB_COMPUTE_H */ diff --git a/src/imagination/vulkan/pvr_job_context.h b/src/imagination/vulkan/pvr_job_context.h index 0f64f628cdc..b04138c9837 100644 --- a/src/imagination/vulkan/pvr_job_context.h +++ b/src/imagination/vulkan/pvr_job_context.h @@ -163,32 +163,32 @@ VkResult PVR_PER_ARCH(render_ctx_create)(struct pvr_device *device, enum pvr_winsys_ctx_priority priority, struct pvr_render_ctx **const ctx_out); -#define pvr_render_ctx_create PVR_PER_ARCH(render_ctx_create) +#define pvr_arch_render_ctx_create PVR_PER_ARCH(render_ctx_create) void PVR_PER_ARCH(render_ctx_destroy)(struct pvr_render_ctx *ctx); -#define pvr_render_ctx_destroy PVR_PER_ARCH(render_ctx_destroy) +#define pvr_arch_render_ctx_destroy PVR_PER_ARCH(render_ctx_destroy) VkResult PVR_PER_ARCH(compute_ctx_create)(struct pvr_device *const device, enum pvr_winsys_ctx_priority priority, struct pvr_compute_ctx **const ctx_out); -#define pvr_compute_ctx_create PVR_PER_ARCH(compute_ctx_create) +#define pvr_arch_compute_ctx_create PVR_PER_ARCH(compute_ctx_create) void PVR_PER_ARCH(compute_ctx_destroy)(struct pvr_compute_ctx *ctx); -#define pvr_compute_ctx_destroy PVR_PER_ARCH(compute_ctx_destroy) +#define pvr_arch_compute_ctx_destroy PVR_PER_ARCH(compute_ctx_destroy) VkResult PVR_PER_ARCH(transfer_ctx_create)(struct pvr_device *const device, enum pvr_winsys_ctx_priority priority, struct pvr_transfer_ctx **const ctx_out); -#define pvr_transfer_ctx_create PVR_PER_ARCH(transfer_ctx_create) +#define pvr_arch_transfer_ctx_create PVR_PER_ARCH(transfer_ctx_create) void PVR_PER_ARCH(transfer_ctx_destroy)(struct pvr_transfer_ctx *const ctx); -#define pvr_transfer_ctx_destroy PVR_PER_ARCH(transfer_ctx_destroy) +#define pvr_arch_transfer_ctx_destroy PVR_PER_ARCH(transfer_ctx_destroy) #endif /* PVR_JOB_CONTEXT_H */ diff --git a/src/imagination/vulkan/pvr_job_render.h b/src/imagination/vulkan/pvr_job_render.h index 939018114c9..4f52b3d929d 100644 --- a/src/imagination/vulkan/pvr_job_render.h +++ b/src/imagination/vulkan/pvr_job_render.h @@ -173,7 +173,7 @@ void PVR_PER_ARCH(rt_mtile_info_init)(const struct pvr_device_info *dev_info, uint32_t height, uint32_t samples); -# define pvr_rt_mtile_info_init PVR_PER_ARCH(rt_mtile_info_init) +# define pvr_arch_rt_mtile_info_init PVR_PER_ARCH(rt_mtile_info_init) VkResult PVR_PER_ARCH(render_target_dataset_create)( struct pvr_device *device, @@ -183,7 +183,7 @@ VkResult PVR_PER_ARCH(render_target_dataset_create)( uint32_t layers, struct pvr_rt_dataset **const rt_dataset_out); -# define pvr_render_target_dataset_create \ +# define pvr_arch_render_target_dataset_create \ PVR_PER_ARCH(render_target_dataset_create) VkResult PVR_PER_ARCH(render_job_submit)(struct pvr_render_ctx *ctx, @@ -193,7 +193,7 @@ VkResult PVR_PER_ARCH(render_job_submit)(struct pvr_render_ctx *ctx, struct vk_sync *signal_sync_geom, struct vk_sync *signal_sync_frag); -# define pvr_render_job_submit PVR_PER_ARCH(render_job_submit) +# define pvr_arch_render_job_submit PVR_PER_ARCH(render_job_submit) #endif diff --git a/src/imagination/vulkan/pvr_job_transfer.h b/src/imagination/vulkan/pvr_job_transfer.h index 6002f00826b..5fa011d48e1 100644 --- a/src/imagination/vulkan/pvr_job_transfer.h +++ b/src/imagination/vulkan/pvr_job_transfer.h @@ -48,6 +48,6 @@ VkResult PVR_PER_ARCH(transfer_job_submit)(struct pvr_transfer_ctx *ctx, struct vk_sync *wait, struct vk_sync *signal_sync); -#define pvr_transfer_job_submit PVR_PER_ARCH(transfer_job_submit) +#define pvr_arch_transfer_job_submit PVR_PER_ARCH(transfer_job_submit) #endif /* PVR_JOB_TRANSFER_H */ diff --git a/src/imagination/vulkan/pvr_mrt.h b/src/imagination/vulkan/pvr_mrt.h index 657b7911310..e899b83bd47 100644 --- a/src/imagination/vulkan/pvr_mrt.h +++ b/src/imagination/vulkan/pvr_mrt.h @@ -177,16 +177,16 @@ VkResult PVR_PER_ARCH(init_usc_mrt_setup)( const VkFormat attachment_formats[attachment_count], struct usc_mrt_setup *setup); -# define pvr_init_usc_mrt_setup PVR_PER_ARCH(init_usc_mrt_setup) +# define pvr_arch_init_usc_mrt_setup PVR_PER_ARCH(init_usc_mrt_setup) void PVR_PER_ARCH(destroy_mrt_setup)(const struct pvr_device *device, struct usc_mrt_setup *setup); -# define pvr_destroy_mrt_setup PVR_PER_ARCH(destroy_mrt_setup) +# define pvr_arch_destroy_mrt_setup PVR_PER_ARCH(destroy_mrt_setup) void PVR_PER_ARCH(init_mrt_desc)(VkFormat format, struct usc_mrt_desc *desc); -# define pvr_init_mrt_desc PVR_PER_ARCH(init_mrt_desc) +# define pvr_arch_init_mrt_desc PVR_PER_ARCH(init_mrt_desc) VkResult PVR_PER_ARCH(pds_unitex_state_program_create_and_upload)( struct pvr_device *device, @@ -195,7 +195,7 @@ VkResult PVR_PER_ARCH(pds_unitex_state_program_create_and_upload)( uint32_t uniform_kicks, struct pvr_pds_upload *const pds_upload_out); -# define pvr_pds_unitex_state_program_create_and_upload \ +# define pvr_arch_pds_unitex_state_program_create_and_upload \ PVR_PER_ARCH(pds_unitex_state_program_create_and_upload) VkResult @@ -203,19 +203,21 @@ VkResult const VkAllocationCallbacks *allocator, struct pvr_load_op *load_op); -# define pvr_load_op_shader_generate PVR_PER_ARCH(load_op_shader_generate) +# define pvr_arch_load_op_shader_generate \ + PVR_PER_ARCH(load_op_shader_generate) VkResult PVR_PER_ARCH(mrt_load_ops_setup)(struct pvr_cmd_buffer *cmd_buffer, const VkAllocationCallbacks *alloc, struct pvr_load_op_state **state); -# define pvr_mrt_load_ops_setup PVR_PER_ARCH(mrt_load_ops_setup) +# define pvr_arch_mrt_load_ops_setup PVR_PER_ARCH(mrt_load_ops_setup) void PVR_PER_ARCH(mrt_load_op_state_cleanup)(const struct pvr_device *device, const VkAllocationCallbacks *alloc, struct pvr_load_op_state *state); -# define pvr_mrt_load_op_state_cleanup PVR_PER_ARCH(mrt_load_op_state_cleanup) +# define pvr_arch_mrt_load_op_state_cleanup \ + PVR_PER_ARCH(mrt_load_op_state_cleanup) #endif /* PVR_PER_ARCH */ diff --git a/src/imagination/vulkan/pvr_query.h b/src/imagination/vulkan/pvr_query.h index b5cefc9345b..1b34299e0a9 100644 --- a/src/imagination/vulkan/pvr_query.h +++ b/src/imagination/vulkan/pvr_query.h @@ -78,13 +78,13 @@ VK_DEFINE_NONDISP_HANDLE_CASTS(pvr_query_pool, VkResult PVR_PER_ARCH(device_create_compute_query_programs)( struct pvr_device *device); -# define pvr_device_create_compute_query_programs \ +# define pvr_arch_device_create_compute_query_programs \ PVR_PER_ARCH(device_create_compute_query_programs) void PVR_PER_ARCH(device_destroy_compute_query_programs)( struct pvr_device *device); -# define pvr_device_destroy_compute_query_programs \ +# define pvr_arch_device_destroy_compute_query_programs \ PVR_PER_ARCH(device_destroy_compute_query_programs) #endif diff --git a/src/imagination/vulkan/pvr_queue.h b/src/imagination/vulkan/pvr_queue.h index 30b59140411..ee6f7635841 100644 --- a/src/imagination/vulkan/pvr_queue.h +++ b/src/imagination/vulkan/pvr_queue.h @@ -45,10 +45,10 @@ VK_DEFINE_HANDLE_CASTS(pvr_queue, vk.base, VkQueue, VK_OBJECT_TYPE_QUEUE) #ifdef PVR_PER_ARCH VkResult PVR_PER_ARCH(queues_create)(struct pvr_device *device, const VkDeviceCreateInfo *pCreateInfo); -# define pvr_queues_create PVR_PER_ARCH(queues_create) +# define pvr_arch_queues_create PVR_PER_ARCH(queues_create) void PVR_PER_ARCH(queues_destroy)(struct pvr_device *device); -# define pvr_queues_destroy PVR_PER_ARCH(queues_destroy) +# define pvr_arch_queues_destroy PVR_PER_ARCH(queues_destroy) #endif /* PVR_PER_ARCH */ diff --git a/src/imagination/vulkan/pvr_spm.h b/src/imagination/vulkan/pvr_spm.h index f78512fb622..d9fccdb6169 100644 --- a/src/imagination/vulkan/pvr_spm.h +++ b/src/imagination/vulkan/pvr_spm.h @@ -116,18 +116,18 @@ uint64_t PVR_PER_ARCH(spm_scratch_buffer_calc_required_size)( uint32_t framebuffer_width, uint32_t framebuffer_height); -# define pvr_spm_scratch_buffer_calc_required_size \ +# define pvr_arch_spm_scratch_buffer_calc_required_size \ PVR_PER_ARCH(spm_scratch_buffer_calc_required_size) /* The SPM load programs are needed for the SPM background object load op. */ VkResult PVR_PER_ARCH(device_init_spm_load_state)(struct pvr_device *device); -# define pvr_device_init_spm_load_state \ +# define pvr_arch_device_init_spm_load_state \ PVR_PER_ARCH(device_init_spm_load_state) void PVR_PER_ARCH(device_finish_spm_load_state)(struct pvr_device *device); -# define pvr_device_finish_spm_load_state \ +# define pvr_arch_device_finish_spm_load_state \ PVR_PER_ARCH(device_finish_spm_load_state) VkResult PVR_PER_ARCH(spm_init_eot_state)( @@ -136,7 +136,7 @@ VkResult PVR_PER_ARCH(spm_init_eot_state)( const struct pvr_render_state *rstate, const struct pvr_renderpass_hwsetup_render *hw_render); -# define pvr_spm_init_eot_state PVR_PER_ARCH(spm_init_eot_state) +# define pvr_arch_spm_init_eot_state PVR_PER_ARCH(spm_init_eot_state) VkResult PVR_PER_ARCH(spm_init_bgobj_state)( struct pvr_device *device, @@ -144,7 +144,7 @@ VkResult PVR_PER_ARCH(spm_init_bgobj_state)( const struct pvr_render_state *rstate, const struct pvr_renderpass_hwsetup_render *hw_render); -# define pvr_spm_init_bgobj_state PVR_PER_ARCH(spm_init_bgobj_state) +# define pvr_arch_spm_init_bgobj_state PVR_PER_ARCH(spm_init_bgobj_state) #endif /* PVR_PER_ARCH */ diff --git a/src/imagination/vulkan/pvr_tex_state.h b/src/imagination/vulkan/pvr_tex_state.h index 64b749e0b8d..852a2a038d7 100644 --- a/src/imagination/vulkan/pvr_tex_state.h +++ b/src/imagination/vulkan/pvr_tex_state.h @@ -115,7 +115,7 @@ VkResult PVR_PER_ARCH(pack_tex_state)(struct pvr_device *device, const struct pvr_texture_state_info *info, struct pvr_image_descriptor *state); -# define pvr_pack_tex_state PVR_PER_ARCH(pack_tex_state) +# define pvr_arch_pack_tex_state PVR_PER_ARCH(pack_tex_state) #endif diff --git a/src/imagination/vulkan/rogue/pvr_blit.c b/src/imagination/vulkan/rogue/pvr_blit.c index f02cc44da3b..e44925e9ff1 100644 --- a/src/imagination/vulkan/rogue/pvr_blit.c +++ b/src/imagination/vulkan/rogue/pvr_blit.c @@ -413,7 +413,7 @@ void pvr_rogue_CmdBlitImage2(VkCommandBuffer commandBuffer, transfer_cmd->dst = dst_surface; transfer_cmd->scissor = dst_rect; - result = pvr_cmd_buffer_add_transfer_cmd(cmd_buffer, transfer_cmd); + result = pvr_arch_cmd_buffer_add_transfer_cmd(cmd_buffer, transfer_cmd); if (result != VK_SUCCESS) { vk_free(&cmd_buffer->vk.pool->alloc, transfer_cmd); return; @@ -630,7 +630,7 @@ pvr_copy_or_resolve_image_region(struct pvr_cmd_buffer *cmd_buffer, transfer_cmd->sources[0].mapping_count++; transfer_cmd->source_count = 1; - result = pvr_cmd_buffer_add_transfer_cmd(cmd_buffer, transfer_cmd); + result = pvr_arch_cmd_buffer_add_transfer_cmd(cmd_buffer, transfer_cmd); if (result != VK_SUCCESS) { vk_free(&cmd_buffer->vk.pool->alloc, transfer_cmd); return result; @@ -895,7 +895,7 @@ pvr_copy_buffer_to_image_region_format(struct pvr_cmd_buffer *const cmd_buffer, transfer_cmd->sources[0].mappings[0].dst_rect = transfer_cmd->scissor; transfer_cmd->sources[0].mapping_count++; - result = pvr_cmd_buffer_add_transfer_cmd(cmd_buffer, transfer_cmd); + result = pvr_arch_cmd_buffer_add_transfer_cmd(cmd_buffer, transfer_cmd); if (result != VK_SUCCESS) { vk_free(&cmd_buffer->vk.pool->alloc, transfer_cmd); return result; @@ -1081,7 +1081,7 @@ pvr_copy_image_to_buffer_region_format(struct pvr_cmd_buffer *const cmd_buffer, transfer_cmd->dst = dst_surface; transfer_cmd->scissor = dst_rect; - result = pvr_cmd_buffer_add_transfer_cmd(cmd_buffer, transfer_cmd); + result = pvr_arch_cmd_buffer_add_transfer_cmd(cmd_buffer, transfer_cmd); if (result != VK_SUCCESS) { vk_free(&cmd_buffer->vk.pool->alloc, transfer_cmd); return result; @@ -1233,7 +1233,7 @@ static VkResult pvr_clear_image_range(struct pvr_cmd_buffer *cmd_buffer, format, psRange->aspectMask); - result = pvr_cmd_buffer_add_transfer_cmd(cmd_buffer, transfer_cmd); + result = pvr_arch_cmd_buffer_add_transfer_cmd(cmd_buffer, transfer_cmd); if (result != VK_SUCCESS) { vk_free(&cmd_buffer->vk.pool->alloc, transfer_cmd); return result; @@ -1407,7 +1407,7 @@ static VkResult pvr_cmd_copy_buffer_region(struct pvr_cmd_buffer *cmd_buffer, transfer_cmd->sources[0].mapping_count++; } - result = pvr_cmd_buffer_add_transfer_cmd(cmd_buffer, transfer_cmd); + result = pvr_arch_cmd_buffer_add_transfer_cmd(cmd_buffer, transfer_cmd); if (result != VK_SUCCESS) { vk_free(&cmd_buffer->vk.pool->alloc, transfer_cmd); return result; @@ -1432,7 +1432,7 @@ void pvr_rogue_CmdUpdateBuffer(VkCommandBuffer commandBuffer, PVR_CHECK_COMMAND_BUFFER_BUILDING_STATE(cmd_buffer); - result = pvr_cmd_buffer_upload_general(cmd_buffer, pData, dataSize, &pvr_bo); + result = pvr_arch_cmd_buffer_upload_general(cmd_buffer, pData, dataSize, &pvr_bo); if (result != VK_SUCCESS) return; @@ -1604,11 +1604,11 @@ static VkResult pvr_clear_color_attachment_static_create_consts_buffer( VkResult result; /* TODO: This doesn't need to be aligned to slc size. Alignment to 4 is fine. - * Change pvr_cmd_buffer_alloc_mem() to take in an alignment? + * Change pvr_arch_cmd_buffer_alloc_mem() to take in an alignment? */ /* TODO: only allocate what's needed, not always * _PVR_CLEAR_ATTACH_DATA_COUNT? */ - result = pvr_cmd_buffer_alloc_mem(cmd_buffer, + result = pvr_arch_cmd_buffer_alloc_mem(cmd_buffer, device->heaps.general_heap, _PVR_CLEAR_ATTACH_DATA_COUNT, &const_shareds_buffer); @@ -1715,9 +1715,9 @@ static VkResult pvr_clear_color_attachment_static( &dev_clear_state->pds_clear_attachment_program_info[program_idx]; /* TODO: This doesn't need to be aligned to slc size. Alignment to 4 is fine. - * Change pvr_cmd_buffer_alloc_mem() to take in an alignment? + * Change pvr_arch_cmd_buffer_alloc_mem() to take in an alignment? */ - result = pvr_cmd_buffer_alloc_mem( + result = pvr_arch_cmd_buffer_alloc_mem( cmd_buffer, device->heaps.pds_heap, clear_attachment_program->texture_program_data_size, @@ -1835,7 +1835,7 @@ static VkResult pvr_add_deferred_rta_clear(struct pvr_cmd_buffer *cmd_buffer, struct pvr_render_pass_info *pass_info = &cmd_buffer->state.render_pass_info; struct pvr_sub_cmd_gfx *sub_cmd = &cmd_buffer->state.current_sub_cmd->gfx; const struct pvr_renderpass_hwsetup_render *hw_render = - pvr_pass_info_get_hw_render(pass_info, sub_cmd->hw_render_idx); + pvr_arch_pass_info_get_hw_render(pass_info, sub_cmd->hw_render_idx); const struct pvr_image_view *image_view; const struct pvr_image *image; uint32_t base_layer; @@ -1882,7 +1882,7 @@ static VkResult pvr_add_deferred_rta_clear(struct pvr_cmd_buffer *cmd_buffer, image_view = pass_info->attachments[index]; } else { const struct pvr_renderpass_hwsetup_subpass *hw_pass = - pvr_get_hw_subpass(pass_info->pass, pass_info->subpass_idx); + pvr_arch_get_hw_subpass(pass_info->pass, pass_info->subpass_idx); const struct pvr_render_subpass *sub_pass = &pass_info->pass->subpasses[hw_pass->index]; const uint32_t attachment_idx = @@ -1958,7 +1958,7 @@ static void pvr_clear_attachments(struct pvr_cmd_buffer *cmd_buffer, */ if (pass) { - hw_pass = pvr_get_hw_subpass(pass, pass_info->subpass_idx); + hw_pass = pvr_arch_get_hw_subpass(pass, pass_info->subpass_idx); multiview_enabled = pass->multiview_enabled; } else { multiview_enabled = pass_info->dr_info->hw_render.multiview_enabled; @@ -1967,7 +1967,7 @@ static void pvr_clear_attachments(struct pvr_cmd_buffer *cmd_buffer, assert(cmd_buffer->state.current_sub_cmd->type == PVR_SUB_CMD_TYPE_GRAPHICS); - pvr_reset_graphics_dirty_state(cmd_buffer, false); + pvr_arch_reset_graphics_dirty_state(cmd_buffer, false); /* We'll be emitting to the control stream. */ sub_cmd->empty_cmd = false; @@ -2003,7 +2003,7 @@ static void pvr_clear_attachments(struct pvr_cmd_buffer *cmd_buffer, assert(cmd_buffer->state.current_sub_cmd->is_dynamic_render || pass->hw_setup->render_count > 0); hw_render = - pvr_pass_info_get_hw_render(&cmd_buffer->state.render_pass_info, 0); + pvr_arch_pass_info_get_hw_render(&cmd_buffer->state.render_pass_info, 0); /* TODO: verify that the hw_render if is_render_init is true is * exclusive to a non dynamic rendering path. @@ -2258,7 +2258,7 @@ static void pvr_clear_attachments(struct pvr_cmd_buffer *cmd_buffer, pvr_csb_set_relocation_mark(&sub_cmd->control_stream); vdm_cs_buffer = - pvr_csb_alloc_dwords(&sub_cmd->control_stream, vdm_cs_size_in_dw); + pvr_arch_csb_alloc_dwords(&sub_cmd->control_stream, vdm_cs_size_in_dw); if (!vdm_cs_buffer) { pvr_cmd_buffer_set_error_unwarned(cmd_buffer, sub_cmd->control_stream.status); diff --git a/src/imagination/vulkan/rogue/pvr_clear.c b/src/imagination/vulkan/rogue/pvr_clear.c index 5441f90905e..ae3e2c0bf4c 100644 --- a/src/imagination/vulkan/rogue/pvr_clear.c +++ b/src/imagination/vulkan/rogue/pvr_clear.c @@ -748,7 +748,7 @@ VkResult pvr_pds_clear_vertex_shader_program_create_and_upload_data( PDS_GENERATE_DATA_SEGMENT, dev_info); - result = pvr_cmd_buffer_upload_pds(cmd_buffer, + result = pvr_arch_cmd_buffer_upload_pds(cmd_buffer, staging_buffer, program->data_size, 4, @@ -822,7 +822,7 @@ VkResult pvr_pds_clear_rta_vertex_shader_program_create_and_upload_code( PDS_GENERATE_CODE_SEGMENT, dev_info); - result = pvr_cmd_buffer_upload_pds(cmd_buffer, + result = pvr_arch_cmd_buffer_upload_pds(cmd_buffer, NULL, 0, 0, @@ -880,7 +880,7 @@ void pvr_pack_clear_vdm_state(const struct pvr_device_info *const dev_info, */ } - pvr_calculate_vertex_cam_size(dev_info, + pvr_arch_calculate_vertex_cam_size(dev_info, vs_output_size, true, &cam_size, diff --git a/src/imagination/vulkan/winsys/pvrsrvkm/pvr_arch_srv_job_render.c b/src/imagination/vulkan/winsys/pvrsrvkm/pvr_arch_srv_job_render.c index 35fd8fa0050..22d61385cfe 100644 --- a/src/imagination/vulkan/winsys/pvrsrvkm/pvr_arch_srv_job_render.c +++ b/src/imagination/vulkan/winsys/pvrsrvkm/pvr_arch_srv_job_render.c @@ -350,11 +350,11 @@ VkResult PVR_PER_ARCH(srv_render_target_dataset_create)( /* If not 2 the arrays used in the bridge call will require updating. */ STATIC_ASSERT(ROGUE_FWIF_NUM_RTDATAS == 2); - pvr_rt_mtile_info_init(dev_info, - &mtile_info, - create_info->width, - create_info->height, - create_info->samples); + pvr_arch_rt_mtile_info_init(dev_info, + &mtile_info, + create_info->width, + create_info->height, + create_info->samples); isp_mtile_size = pvr_rogue_get_cr_isp_mtile_size_val(dev_info, &mtile_info,