mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-31 01:10:16 +01:00
pvr: Use pvr_sub_cmd_event union members directly
This has the benefit of replacing some explicit asserts with static type checking. Signed-off-by: Matt Coster <matt.coster@imgtec.com> Reviewed-by: Karmjit Mahil <Karmjit.Mahil@imgtec.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23313>
This commit is contained in:
parent
f63f4fac0a
commit
40ce383554
4 changed files with 106 additions and 115 deletions
|
|
@ -1939,7 +1939,6 @@ VkResult pvr_cmd_buffer_end_sub_cmd(struct pvr_cmd_buffer *cmd_buffer)
|
|||
}
|
||||
|
||||
if (query_pool) {
|
||||
struct pvr_sub_cmd_event *sub_cmd;
|
||||
struct pvr_query_info query_info;
|
||||
|
||||
assert(query_bo);
|
||||
|
|
@ -1965,9 +1964,7 @@ VkResult pvr_cmd_buffer_end_sub_cmd(struct pvr_cmd_buffer *cmd_buffer)
|
|||
if (result != VK_SUCCESS)
|
||||
return result;
|
||||
|
||||
sub_cmd = &cmd_buffer->state.current_sub_cmd->event;
|
||||
|
||||
*sub_cmd = (struct pvr_sub_cmd_event) {
|
||||
cmd_buffer->state.current_sub_cmd->event = (struct pvr_sub_cmd_event){
|
||||
.type = PVR_EVENT_TYPE_BARRIER,
|
||||
.barrier = {
|
||||
.wait_for_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
|
||||
|
|
@ -7351,7 +7348,6 @@ pvr_cmd_buffer_insert_mid_frag_barrier_event(struct pvr_cmd_buffer *cmd_buffer,
|
|||
uint32_t src_stage_mask,
|
||||
uint32_t dst_stage_mask)
|
||||
{
|
||||
struct pvr_sub_cmd_event *sub_cmd;
|
||||
VkResult result;
|
||||
|
||||
assert(cmd_buffer->state.current_sub_cmd->type == PVR_SUB_CMD_TYPE_GRAPHICS);
|
||||
|
|
@ -7366,12 +7362,14 @@ pvr_cmd_buffer_insert_mid_frag_barrier_event(struct pvr_cmd_buffer *cmd_buffer,
|
|||
if (result != VK_SUCCESS)
|
||||
return result;
|
||||
|
||||
sub_cmd = &cmd_buffer->state.current_sub_cmd->event;
|
||||
|
||||
sub_cmd->type = PVR_EVENT_TYPE_BARRIER;
|
||||
sub_cmd->barrier.in_render_pass = true;
|
||||
sub_cmd->barrier.wait_for_stage_mask = src_stage_mask;
|
||||
sub_cmd->barrier.wait_at_stage_mask = dst_stage_mask;
|
||||
cmd_buffer->state.current_sub_cmd->event = (struct pvr_sub_cmd_event){
|
||||
.type = PVR_EVENT_TYPE_BARRIER,
|
||||
.barrier = {
|
||||
.in_render_pass = true,
|
||||
.wait_for_stage_mask = src_stage_mask,
|
||||
.wait_at_stage_mask = dst_stage_mask,
|
||||
},
|
||||
};
|
||||
|
||||
pvr_cmd_buffer_end_sub_cmd(cmd_buffer);
|
||||
pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_GRAPHICS);
|
||||
|
|
@ -7390,18 +7388,19 @@ pvr_cmd_buffer_insert_barrier_event(struct pvr_cmd_buffer *cmd_buffer,
|
|||
uint32_t src_stage_mask,
|
||||
uint32_t dst_stage_mask)
|
||||
{
|
||||
struct pvr_sub_cmd_event *sub_cmd;
|
||||
VkResult result;
|
||||
|
||||
result = pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_EVENT);
|
||||
if (result != VK_SUCCESS)
|
||||
return result;
|
||||
|
||||
sub_cmd = &cmd_buffer->state.current_sub_cmd->event;
|
||||
|
||||
sub_cmd->type = PVR_EVENT_TYPE_BARRIER;
|
||||
sub_cmd->barrier.wait_for_stage_mask = src_stage_mask;
|
||||
sub_cmd->barrier.wait_at_stage_mask = dst_stage_mask;
|
||||
cmd_buffer->state.current_sub_cmd->event = (struct pvr_sub_cmd_event){
|
||||
.type = PVR_EVENT_TYPE_BARRIER,
|
||||
.barrier = {
|
||||
.wait_for_stage_mask = src_stage_mask,
|
||||
.wait_at_stage_mask = dst_stage_mask,
|
||||
},
|
||||
};
|
||||
|
||||
return pvr_cmd_buffer_end_sub_cmd(cmd_buffer);
|
||||
}
|
||||
|
|
@ -7558,7 +7557,6 @@ void pvr_CmdResetEvent2(VkCommandBuffer commandBuffer,
|
|||
{
|
||||
PVR_FROM_HANDLE(pvr_cmd_buffer, cmd_buffer, commandBuffer);
|
||||
PVR_FROM_HANDLE(pvr_event, event, _event);
|
||||
struct pvr_sub_cmd_event *sub_cmd;
|
||||
VkResult result;
|
||||
|
||||
PVR_CHECK_COMMAND_BUFFER_BUILDING_STATE(cmd_buffer);
|
||||
|
|
@ -7567,11 +7565,13 @@ void pvr_CmdResetEvent2(VkCommandBuffer commandBuffer,
|
|||
if (result != VK_SUCCESS)
|
||||
return;
|
||||
|
||||
sub_cmd = &cmd_buffer->state.current_sub_cmd->event;
|
||||
|
||||
sub_cmd->type = PVR_EVENT_TYPE_RESET;
|
||||
sub_cmd->reset.event = event;
|
||||
sub_cmd->reset.wait_for_stage_mask = pvr_stage_mask_src(stageMask);
|
||||
cmd_buffer->state.current_sub_cmd->event = (struct pvr_sub_cmd_event){
|
||||
.type = PVR_EVENT_TYPE_RESET,
|
||||
.set_reset = {
|
||||
.event = event,
|
||||
.wait_for_stage_mask = pvr_stage_mask_src(stageMask),
|
||||
},
|
||||
};
|
||||
|
||||
pvr_cmd_buffer_end_sub_cmd(cmd_buffer);
|
||||
}
|
||||
|
|
@ -7583,7 +7583,6 @@ void pvr_CmdSetEvent2(VkCommandBuffer commandBuffer,
|
|||
PVR_FROM_HANDLE(pvr_cmd_buffer, cmd_buffer, commandBuffer);
|
||||
PVR_FROM_HANDLE(pvr_event, event, _event);
|
||||
VkPipelineStageFlags2 stage_mask = 0;
|
||||
struct pvr_sub_cmd_event *sub_cmd;
|
||||
VkResult result;
|
||||
|
||||
PVR_CHECK_COMMAND_BUFFER_BUILDING_STATE(cmd_buffer);
|
||||
|
|
@ -7601,11 +7600,13 @@ void pvr_CmdSetEvent2(VkCommandBuffer commandBuffer,
|
|||
for (uint32_t i = 0; i < pDependencyInfo->imageMemoryBarrierCount; i++)
|
||||
stage_mask |= pDependencyInfo->pImageMemoryBarriers[i].srcStageMask;
|
||||
|
||||
sub_cmd = &cmd_buffer->state.current_sub_cmd->event;
|
||||
|
||||
sub_cmd->type = PVR_EVENT_TYPE_SET;
|
||||
sub_cmd->set.event = event;
|
||||
sub_cmd->set.wait_for_stage_mask = pvr_stage_mask_dst(stage_mask);
|
||||
cmd_buffer->state.current_sub_cmd->event = (struct pvr_sub_cmd_event){
|
||||
.type = PVR_EVENT_TYPE_SET,
|
||||
.set_reset = {
|
||||
.event = event,
|
||||
.wait_for_stage_mask = pvr_stage_mask_dst(stage_mask),
|
||||
},
|
||||
};
|
||||
|
||||
pvr_cmd_buffer_end_sub_cmd(cmd_buffer);
|
||||
}
|
||||
|
|
@ -7616,7 +7617,6 @@ void pvr_CmdWaitEvents2(VkCommandBuffer commandBuffer,
|
|||
const VkDependencyInfo *pDependencyInfos)
|
||||
{
|
||||
PVR_FROM_HANDLE(pvr_cmd_buffer, cmd_buffer, commandBuffer);
|
||||
struct pvr_sub_cmd_event *sub_cmd;
|
||||
struct pvr_event **events_array;
|
||||
uint32_t *stage_masks;
|
||||
VkResult result;
|
||||
|
|
@ -7658,12 +7658,14 @@ void pvr_CmdWaitEvents2(VkCommandBuffer commandBuffer,
|
|||
stage_masks[i] = pvr_stage_mask_dst(mask);
|
||||
}
|
||||
|
||||
sub_cmd = &cmd_buffer->state.current_sub_cmd->event;
|
||||
|
||||
sub_cmd->type = PVR_EVENT_TYPE_WAIT;
|
||||
sub_cmd->wait.count = eventCount;
|
||||
sub_cmd->wait.events = events_array;
|
||||
sub_cmd->wait.wait_at_stage_masks = stage_masks;
|
||||
cmd_buffer->state.current_sub_cmd->event = (struct pvr_sub_cmd_event){
|
||||
.type = PVR_EVENT_TYPE_WAIT,
|
||||
.wait = {
|
||||
.count = eventCount,
|
||||
.events = events_array,
|
||||
.wait_at_stage_masks = stage_masks,
|
||||
},
|
||||
};
|
||||
|
||||
pvr_cmd_buffer_end_sub_cmd(cmd_buffer);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -526,19 +526,13 @@ struct pvr_sub_cmd_event {
|
|||
enum pvr_event_type type;
|
||||
|
||||
union {
|
||||
struct {
|
||||
struct pvr_sub_cmd_event_set_reset {
|
||||
struct pvr_event *event;
|
||||
/* Stages to wait for until the event is set. */
|
||||
/* Stages to wait for until the event is set or reset. */
|
||||
uint32_t wait_for_stage_mask;
|
||||
} set;
|
||||
} set_reset;
|
||||
|
||||
struct {
|
||||
struct pvr_event *event;
|
||||
/* Stages to wait for until the event is reset. */
|
||||
uint32_t wait_for_stage_mask;
|
||||
} reset;
|
||||
|
||||
struct {
|
||||
struct pvr_sub_cmd_event_wait {
|
||||
uint32_t count;
|
||||
/* Events to wait for before resuming. */
|
||||
struct pvr_event **events;
|
||||
|
|
@ -546,7 +540,7 @@ struct pvr_sub_cmd_event {
|
|||
uint32_t *wait_at_stage_masks;
|
||||
} wait;
|
||||
|
||||
struct {
|
||||
struct pvr_sub_cmd_event_barrier {
|
||||
bool in_render_pass;
|
||||
|
||||
/* Stages to wait for. */
|
||||
|
|
|
|||
|
|
@ -290,7 +290,6 @@ void pvr_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,
|
|||
{
|
||||
PVR_FROM_HANDLE(pvr_cmd_buffer, cmd_buffer, commandBuffer);
|
||||
struct pvr_query_info query_info;
|
||||
struct pvr_sub_cmd_event *sub_cmd;
|
||||
VkResult result;
|
||||
|
||||
PVR_CHECK_COMMAND_BUFFER_BUILDING_STATE(cmd_buffer);
|
||||
|
|
@ -322,8 +321,7 @@ void pvr_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,
|
|||
* transfer job with the compute job.
|
||||
*/
|
||||
|
||||
sub_cmd = &cmd_buffer->state.current_sub_cmd->event;
|
||||
*sub_cmd = (struct pvr_sub_cmd_event) {
|
||||
cmd_buffer->state.current_sub_cmd->event = (struct pvr_sub_cmd_event){
|
||||
.type = PVR_EVENT_TYPE_BARRIER,
|
||||
.barrier = {
|
||||
.wait_for_stage_mask = PVR_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
|
|
@ -341,8 +339,7 @@ void pvr_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,
|
|||
if (result != VK_SUCCESS)
|
||||
return;
|
||||
|
||||
sub_cmd = &cmd_buffer->state.current_sub_cmd->event;
|
||||
*sub_cmd = (struct pvr_sub_cmd_event) {
|
||||
cmd_buffer->state.current_sub_cmd->event = (struct pvr_sub_cmd_event){
|
||||
.type = PVR_EVENT_TYPE_BARRIER,
|
||||
.barrier = {
|
||||
.wait_for_stage_mask = PVR_PIPELINE_STAGE_OCCLUSION_QUERY_BIT,
|
||||
|
|
|
|||
|
|
@ -412,18 +412,17 @@ pvr_process_occlusion_query_cmd(struct pvr_device *device,
|
|||
return result;
|
||||
}
|
||||
|
||||
static VkResult pvr_process_event_cmd_barrier(struct pvr_device *device,
|
||||
struct pvr_queue *queue,
|
||||
struct pvr_sub_cmd_event *sub_cmd)
|
||||
static VkResult
|
||||
pvr_process_event_cmd_barrier(struct pvr_device *device,
|
||||
struct pvr_queue *queue,
|
||||
struct pvr_sub_cmd_event_barrier *sub_cmd)
|
||||
{
|
||||
const uint32_t src_mask = sub_cmd->barrier.wait_for_stage_mask;
|
||||
const uint32_t dst_mask = sub_cmd->barrier.wait_at_stage_mask;
|
||||
const uint32_t src_mask = sub_cmd->wait_for_stage_mask;
|
||||
const uint32_t dst_mask = sub_cmd->wait_at_stage_mask;
|
||||
struct vk_sync_wait wait_syncs[PVR_JOB_TYPE_MAX + 1];
|
||||
uint32_t src_wait_count = 0;
|
||||
VkResult result;
|
||||
|
||||
assert(sub_cmd->type == PVR_EVENT_TYPE_BARRIER);
|
||||
|
||||
assert(!(src_mask & ~PVR_PIPELINE_STAGE_ALL_BITS));
|
||||
assert(!(dst_mask & ~PVR_PIPELINE_STAGE_ALL_BITS));
|
||||
|
||||
|
|
@ -489,7 +488,8 @@ static VkResult pvr_process_event_cmd_barrier(struct pvr_device *device,
|
|||
static VkResult
|
||||
pvr_process_event_cmd_set_or_reset(struct pvr_device *device,
|
||||
struct pvr_queue *queue,
|
||||
struct pvr_sub_cmd_event *sub_cmd)
|
||||
struct pvr_sub_cmd_event_set_reset *sub_cmd,
|
||||
const enum pvr_event_state new_event_state)
|
||||
{
|
||||
/* Not PVR_JOB_TYPE_MAX since that also includes
|
||||
* PVR_JOB_TYPE_OCCLUSION_QUERY so no stage in the src mask.
|
||||
|
|
@ -498,21 +498,12 @@ pvr_process_event_cmd_set_or_reset(struct pvr_device *device,
|
|||
struct vk_sync_signal signal;
|
||||
struct vk_sync *signal_sync;
|
||||
|
||||
uint32_t wait_for_stage_mask;
|
||||
uint32_t wait_count = 0;
|
||||
VkResult result;
|
||||
|
||||
assert(sub_cmd->type == PVR_EVENT_TYPE_SET ||
|
||||
sub_cmd->type == PVR_EVENT_TYPE_RESET);
|
||||
assert(!(sub_cmd->wait_for_stage_mask & ~PVR_PIPELINE_STAGE_ALL_BITS));
|
||||
|
||||
if (sub_cmd->type == PVR_EVENT_TYPE_SET)
|
||||
wait_for_stage_mask = sub_cmd->set.wait_for_stage_mask;
|
||||
else
|
||||
wait_for_stage_mask = sub_cmd->reset.wait_for_stage_mask;
|
||||
|
||||
assert(!(wait_for_stage_mask & ~PVR_PIPELINE_STAGE_ALL_BITS));
|
||||
|
||||
u_foreach_bit (stage, wait_for_stage_mask) {
|
||||
u_foreach_bit (stage, sub_cmd->wait_for_stage_mask) {
|
||||
if (!queue->last_job_signal_sync[stage])
|
||||
continue;
|
||||
|
||||
|
|
@ -544,23 +535,37 @@ pvr_process_event_cmd_set_or_reset(struct pvr_device *device,
|
|||
return result;
|
||||
}
|
||||
|
||||
if (sub_cmd->type == PVR_EVENT_TYPE_SET) {
|
||||
if (sub_cmd->set.event->sync)
|
||||
vk_sync_destroy(&device->vk, sub_cmd->set.event->sync);
|
||||
if (sub_cmd->event->sync)
|
||||
vk_sync_destroy(&device->vk, sub_cmd->event->sync);
|
||||
|
||||
sub_cmd->set.event->sync = signal_sync;
|
||||
sub_cmd->set.event->state = PVR_EVENT_STATE_SET_BY_DEVICE;
|
||||
} else {
|
||||
if (sub_cmd->reset.event->sync)
|
||||
vk_sync_destroy(&device->vk, sub_cmd->reset.event->sync);
|
||||
|
||||
sub_cmd->reset.event->sync = signal_sync;
|
||||
sub_cmd->reset.event->state = PVR_EVENT_STATE_RESET_BY_DEVICE;
|
||||
}
|
||||
sub_cmd->event->sync = signal_sync;
|
||||
sub_cmd->event->state = new_event_state;
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
static inline VkResult
|
||||
pvr_process_event_cmd_set(struct pvr_device *device,
|
||||
struct pvr_queue *queue,
|
||||
struct pvr_sub_cmd_event_set_reset *sub_cmd)
|
||||
{
|
||||
return pvr_process_event_cmd_set_or_reset(device,
|
||||
queue,
|
||||
sub_cmd,
|
||||
PVR_EVENT_STATE_SET_BY_DEVICE);
|
||||
}
|
||||
|
||||
static inline VkResult
|
||||
pvr_process_event_cmd_reset(struct pvr_device *device,
|
||||
struct pvr_queue *queue,
|
||||
struct pvr_sub_cmd_event_set_reset *sub_cmd)
|
||||
{
|
||||
return pvr_process_event_cmd_set_or_reset(device,
|
||||
queue,
|
||||
sub_cmd,
|
||||
PVR_EVENT_STATE_RESET_BY_DEVICE);
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Process an event sub command of wait type.
|
||||
*
|
||||
|
|
@ -577,29 +582,30 @@ pvr_process_event_cmd_set_or_reset(struct pvr_device *device,
|
|||
* \parma[in,out] per_cmd_buffer_syncobjs Completion syncobjs for the command
|
||||
* buffer being processed.
|
||||
*/
|
||||
static VkResult pvr_process_event_cmd_wait(struct pvr_device *device,
|
||||
struct pvr_queue *queue,
|
||||
struct pvr_sub_cmd_event *sub_cmd)
|
||||
static VkResult
|
||||
pvr_process_event_cmd_wait(struct pvr_device *device,
|
||||
struct pvr_queue *queue,
|
||||
struct pvr_sub_cmd_event_wait *sub_cmd)
|
||||
{
|
||||
uint32_t dst_mask = 0;
|
||||
VkResult result;
|
||||
|
||||
STACK_ARRAY(struct vk_sync_wait, waits, sub_cmd->wait.count + 1);
|
||||
STACK_ARRAY(struct vk_sync_wait, waits, sub_cmd->count + 1);
|
||||
if (!waits)
|
||||
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
for (uint32_t i = 0; i < sub_cmd->wait.count; i++)
|
||||
dst_mask |= sub_cmd->wait.wait_at_stage_masks[i];
|
||||
for (uint32_t i = 0; i < sub_cmd->count; i++)
|
||||
dst_mask |= sub_cmd->wait_at_stage_masks[i];
|
||||
|
||||
u_foreach_bit (stage, dst_mask) {
|
||||
struct vk_sync_signal signal;
|
||||
struct vk_sync *signal_sync;
|
||||
uint32_t wait_count = 0;
|
||||
|
||||
for (uint32_t i = 0; i < sub_cmd->wait.count; i++) {
|
||||
if (sub_cmd->wait.wait_at_stage_masks[i] & stage) {
|
||||
for (uint32_t i = 0; i < sub_cmd->count; i++) {
|
||||
if (sub_cmd->wait_at_stage_masks[i] & stage) {
|
||||
waits[wait_count++] = (struct vk_sync_wait){
|
||||
.sync = sub_cmd->wait.events[i]->sync,
|
||||
.sync = sub_cmd->events[i]->sync,
|
||||
.stage_mask = ~(VkPipelineStageFlags2)0,
|
||||
.wait_value = 0,
|
||||
};
|
||||
|
|
@ -617,7 +623,7 @@ static VkResult pvr_process_event_cmd_wait(struct pvr_device *device,
|
|||
};
|
||||
}
|
||||
|
||||
assert(wait_count <= (sub_cmd->wait.count + 1));
|
||||
assert(wait_count <= (sub_cmd->count + 1));
|
||||
|
||||
result = vk_sync_create(&device->vk,
|
||||
&device->pdevice->ws->syncobj_type,
|
||||
|
|
@ -664,12 +670,13 @@ static VkResult pvr_process_event_cmd(struct pvr_device *device,
|
|||
{
|
||||
switch (sub_cmd->type) {
|
||||
case PVR_EVENT_TYPE_SET:
|
||||
return pvr_process_event_cmd_set(device, queue, &sub_cmd->set_reset);
|
||||
case PVR_EVENT_TYPE_RESET:
|
||||
return pvr_process_event_cmd_set_or_reset(device, queue, sub_cmd);
|
||||
return pvr_process_event_cmd_reset(device, queue, &sub_cmd->set_reset);
|
||||
case PVR_EVENT_TYPE_WAIT:
|
||||
return pvr_process_event_cmd_wait(device, queue, sub_cmd);
|
||||
return pvr_process_event_cmd_wait(device, queue, &sub_cmd->wait);
|
||||
case PVR_EVENT_TYPE_BARRIER:
|
||||
return pvr_process_event_cmd_barrier(device, queue, sub_cmd);
|
||||
return pvr_process_event_cmd_barrier(device, queue, &sub_cmd->barrier);
|
||||
default:
|
||||
unreachable("Invalid event sub-command type.");
|
||||
};
|
||||
|
|
@ -688,12 +695,9 @@ static VkResult pvr_process_cmd_buffer(struct pvr_device *device,
|
|||
switch (sub_cmd->type) {
|
||||
case PVR_SUB_CMD_TYPE_GRAPHICS: {
|
||||
if (sub_cmd->gfx.has_occlusion_query) {
|
||||
struct pvr_sub_cmd_event frag_to_transfer_barrier = {
|
||||
.type = PVR_EVENT_TYPE_BARRIER,
|
||||
.barrier = {
|
||||
.wait_for_stage_mask = PVR_PIPELINE_STAGE_OCCLUSION_QUERY_BIT,
|
||||
.wait_at_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
|
||||
},
|
||||
struct pvr_sub_cmd_event_barrier occlusion_to_frag_barrier = {
|
||||
.wait_for_stage_mask = PVR_PIPELINE_STAGE_OCCLUSION_QUERY_BIT,
|
||||
.wait_at_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
|
||||
};
|
||||
|
||||
/* If the fragment job utilizes occlusion queries, for data
|
||||
|
|
@ -703,7 +707,7 @@ static VkResult pvr_process_cmd_buffer(struct pvr_device *device,
|
|||
|
||||
result = pvr_process_event_cmd_barrier(device,
|
||||
queue,
|
||||
&frag_to_transfer_barrier);
|
||||
&occlusion_to_frag_barrier);
|
||||
if (result != VK_SUCCESS)
|
||||
break;
|
||||
}
|
||||
|
|
@ -721,12 +725,9 @@ static VkResult pvr_process_cmd_buffer(struct pvr_device *device,
|
|||
const bool serialize_with_frag = sub_cmd->transfer.serialize_with_frag;
|
||||
|
||||
if (serialize_with_frag) {
|
||||
struct pvr_sub_cmd_event frag_to_transfer_barrier = {
|
||||
.type = PVR_EVENT_TYPE_BARRIER,
|
||||
.barrier = {
|
||||
.wait_for_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
|
||||
.wait_at_stage_mask = PVR_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
},
|
||||
struct pvr_sub_cmd_event_barrier frag_to_transfer_barrier = {
|
||||
.wait_for_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
|
||||
.wait_at_stage_mask = PVR_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
};
|
||||
|
||||
result = pvr_process_event_cmd_barrier(device,
|
||||
|
|
@ -739,12 +740,9 @@ static VkResult pvr_process_cmd_buffer(struct pvr_device *device,
|
|||
result = pvr_process_transfer_cmds(device, queue, &sub_cmd->transfer);
|
||||
|
||||
if (serialize_with_frag) {
|
||||
struct pvr_sub_cmd_event transfer_to_frag_barrier = {
|
||||
.type = PVR_EVENT_TYPE_BARRIER,
|
||||
.barrier = {
|
||||
.wait_for_stage_mask = PVR_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
.wait_at_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
|
||||
},
|
||||
struct pvr_sub_cmd_event_barrier transfer_to_frag_barrier = {
|
||||
.wait_for_stage_mask = PVR_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
.wait_at_stage_mask = PVR_PIPELINE_STAGE_FRAG_BIT,
|
||||
};
|
||||
|
||||
if (result != VK_SUCCESS)
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue