diff --git a/src/intel/vulkan/anv_cmd_buffer.c b/src/intel/vulkan/anv_cmd_buffer.c index d56cc7a9290..e53a3c0ccaf 100644 --- a/src/intel/vulkan/anv_cmd_buffer.c +++ b/src/intel/vulkan/anv_cmd_buffer.c @@ -104,6 +104,7 @@ const struct anv_dynamic_state default_dynamic_state = { .raster_discard = 0, .depth_bias_enable = 0, .primitive_restart_enable = 0, + .logic_op = 0, }; /** @@ -195,6 +196,7 @@ anv_dynamic_state_copy(struct anv_dynamic_state *dest, ANV_CMP_COPY(raster_discard, ANV_CMD_DIRTY_DYNAMIC_RASTERIZER_DISCARD_ENABLE); ANV_CMP_COPY(depth_bias_enable, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS_ENABLE); ANV_CMP_COPY(primitive_restart_enable, ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_RESTART_ENABLE); + ANV_CMP_COPY(logic_op, ANV_CMD_DIRTY_DYNAMIC_LOGIC_OP); if (copy_mask & ANV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS) { dest->sample_locations.samples = src->sample_locations.samples; @@ -544,6 +546,17 @@ void anv_CmdSetPrimitiveRestartEnableEXT( cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_RESTART_ENABLE; } +void anv_CmdSetLogicOpEXT( + VkCommandBuffer commandBuffer, + VkLogicOp logicOp) +{ + ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); + + cmd_buffer->state.gfx.dynamic.logic_op = logicOp; + + cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_LOGIC_OP; +} + void anv_CmdSetViewport( VkCommandBuffer commandBuffer, uint32_t firstViewport, diff --git a/src/intel/vulkan/anv_pipeline.c b/src/intel/vulkan/anv_pipeline.c index 1dd3682b585..e0c147cf153 100644 --- a/src/intel/vulkan/anv_pipeline.c +++ b/src/intel/vulkan/anv_pipeline.c @@ -2182,7 +2182,8 @@ copy_non_dynamic_state(struct anv_graphics_pipeline *pipeline, (ANV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS | ANV_CMD_DIRTY_DYNAMIC_COLOR_BLEND_STATE | ANV_CMD_DIRTY_DYNAMIC_SHADING_RATE | - ANV_CMD_DIRTY_DYNAMIC_RASTERIZER_DISCARD_ENABLE); + ANV_CMD_DIRTY_DYNAMIC_RASTERIZER_DISCARD_ENABLE | + ANV_CMD_DIRTY_DYNAMIC_LOGIC_OP); } static void diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h index 47bbd491d27..660fcf040ec 100644 --- a/src/intel/vulkan/anv_private.h +++ b/src/intel/vulkan/anv_private.h @@ -2679,6 +2679,7 @@ struct anv_dynamic_state { bool raster_discard; bool depth_bias_enable; bool primitive_restart_enable; + VkLogicOp logic_op; bool dyn_vbo_stride; bool dyn_vbo_size; diff --git a/src/intel/vulkan/genX_pipeline.c b/src/intel/vulkan/genX_pipeline.c index f3bda16ff01..f47abdd9459 100644 --- a/src/intel/vulkan/genX_pipeline.c +++ b/src/intel/vulkan/genX_pipeline.c @@ -1217,7 +1217,8 @@ emit_cb_state(struct anv_graphics_pipeline *pipeline, GENX(BLEND_STATE_ENTRY_length) * surface_count; uint32_t *blend_state_start, *state_pos; - if (dynamic_states & ANV_CMD_DIRTY_DYNAMIC_COLOR_BLEND_STATE) { + if (dynamic_states & (ANV_CMD_DIRTY_DYNAMIC_COLOR_BLEND_STATE | + ANV_CMD_DIRTY_DYNAMIC_LOGIC_OP)) { const struct intel_device_info *devinfo = &pipeline->base.device->info; blend_state_start = devinfo->ver >= 8 ? pipeline->gfx8.blend_state : pipeline->gfx7.blend_state; @@ -1263,7 +1264,9 @@ emit_cb_state(struct anv_graphics_pipeline *pipeline, .AlphaToOneEnable = ms_info && ms_info->alphaToOneEnable, #endif .LogicOpEnable = info->logicOpEnable, - .LogicOpFunction = genX(vk_to_intel_logic_op)[info->logicOp], + .LogicOpFunction = dynamic_states & ANV_CMD_DIRTY_DYNAMIC_LOGIC_OP ? + 0: genX(vk_to_intel_logic_op)[info->logicOp], + /* Vulkan specification 1.2.168, VkLogicOp: * * "Logical operations are controlled by the logicOpEnable and @@ -1370,7 +1373,8 @@ emit_cb_state(struct anv_graphics_pipeline *pipeline, blend.AlphaTestEnable = false; blend.IndependentAlphaBlendEnable = blend_state.IndependentAlphaBlendEnable; - if (dynamic_states & ANV_CMD_DIRTY_DYNAMIC_COLOR_BLEND_STATE) { + if (dynamic_states & (ANV_CMD_DIRTY_DYNAMIC_COLOR_BLEND_STATE | + ANV_CMD_DIRTY_DYNAMIC_LOGIC_OP)) { GENX(3DSTATE_PS_BLEND_pack)(NULL, pipeline->gfx8.ps_blend, &blend); } else { anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_PS_BLEND), _blend) @@ -1382,7 +1386,8 @@ emit_cb_state(struct anv_graphics_pipeline *pipeline, GENX(BLEND_STATE_pack)(NULL, blend_state_start, &blend_state); - if (!(dynamic_states & ANV_CMD_DIRTY_DYNAMIC_COLOR_BLEND_STATE)) { + if (!(dynamic_states & (ANV_CMD_DIRTY_DYNAMIC_COLOR_BLEND_STATE | + ANV_CMD_DIRTY_DYNAMIC_LOGIC_OP))) { anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_BLEND_STATE_POINTERS), bsp) { bsp.BlendStatePointer = pipeline->blend_state.offset; #if GFX_VER >= 8 diff --git a/src/intel/vulkan/gfx7_cmd_buffer.c b/src/intel/vulkan/gfx7_cmd_buffer.c index ae3d7354680..c7d1b96cb6a 100644 --- a/src/intel/vulkan/gfx7_cmd_buffer.c +++ b/src/intel/vulkan/gfx7_cmd_buffer.c @@ -356,21 +356,27 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) cmd_buffer->state.gfx.dynamic.sample_locations.locations); } - if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_COLOR_BLEND_STATE) { + if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_COLOR_BLEND_STATE || + cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_LOGIC_OP) { const uint8_t color_writes = cmd_buffer->state.gfx.dynamic.color_writes; /* 3DSTATE_WM in the hope we can avoid spawning fragment shaders * threads. */ - uint32_t dwords[GENX(3DSTATE_WM_length)]; - struct GENX(3DSTATE_WM) wm = { - GENX(3DSTATE_WM_header), + bool dirty_color_blend = + cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_COLOR_BLEND_STATE; - .ThreadDispatchEnable = pipeline->force_fragment_thread_dispatch || - color_writes, - }; - GENX(3DSTATE_WM_pack)(NULL, dwords, &wm); + if (dirty_color_blend) { + uint32_t dwords[GENX(3DSTATE_WM_length)]; + struct GENX(3DSTATE_WM) wm = { + GENX(3DSTATE_WM_header), - anv_batch_emit_merge(&cmd_buffer->batch, dwords, pipeline->gfx7.wm); + .ThreadDispatchEnable = pipeline->force_fragment_thread_dispatch || + color_writes, + }; + GENX(3DSTATE_WM_pack)(NULL, dwords, &wm); + + anv_batch_emit_merge(&cmd_buffer->batch, dwords, pipeline->gfx7.wm); + } /* Blend states of each RT */ uint32_t surface_count = 0; @@ -388,14 +394,20 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) /* Skip this part */ dws += GENX(BLEND_STATE_length); + bool dirty_logic_op = + cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_LOGIC_OP; + for (uint32_t i = 0; i < surface_count; i++) { struct anv_pipeline_binding *binding = &map->surface_to_descriptor[i]; - bool write_disabled = (color_writes & (1u << binding->index)) == 0; + bool write_disabled = + dirty_color_blend && (color_writes & (1u << binding->index)) == 0; struct GENX(BLEND_STATE_ENTRY) entry = { .WriteDisableAlpha = write_disabled, .WriteDisableRed = write_disabled, .WriteDisableGreen = write_disabled, .WriteDisableBlue = write_disabled, + .LogicOpFunction = + dirty_logic_op ? genX(vk_to_intel_logic_op)[d->logic_op] : 0, }; GENX(BLEND_STATE_ENTRY_pack)(NULL, dws, &entry); dws += GENX(BLEND_STATE_ENTRY_length); diff --git a/src/intel/vulkan/gfx8_cmd_buffer.c b/src/intel/vulkan/gfx8_cmd_buffer.c index e70b4173915..b8baca4725d 100644 --- a/src/intel/vulkan/gfx8_cmd_buffer.c +++ b/src/intel/vulkan/gfx8_cmd_buffer.c @@ -659,32 +659,38 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) cmd_buffer->state.gfx.dynamic.sample_locations.locations); } - if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_COLOR_BLEND_STATE) { + if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_COLOR_BLEND_STATE || + cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_LOGIC_OP) { const uint8_t color_writes = cmd_buffer->state.gfx.dynamic.color_writes; /* 3DSTATE_WM in the hope we can avoid spawning fragment shaders * threads. */ - uint32_t dwords[MAX2(GENX(3DSTATE_WM_length), - GENX(3DSTATE_PS_BLEND_length))]; - struct GENX(3DSTATE_WM) wm = { - GENX(3DSTATE_WM_header), + bool dirty_color_blend = + cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_COLOR_BLEND_STATE; - .ForceThreadDispatchEnable = (pipeline->force_fragment_thread_dispatch || - !color_writes) ? ForceON : 0, - }; - GENX(3DSTATE_WM_pack)(NULL, dwords, &wm); + if (dirty_color_blend) { + uint32_t dwords[MAX2(GENX(3DSTATE_WM_length), + GENX(3DSTATE_PS_BLEND_length))]; + struct GENX(3DSTATE_WM) wm = { + GENX(3DSTATE_WM_header), - anv_batch_emit_merge(&cmd_buffer->batch, dwords, pipeline->gfx8.wm); + .ForceThreadDispatchEnable = (pipeline->force_fragment_thread_dispatch || + !color_writes) ? ForceON : 0, + }; + GENX(3DSTATE_WM_pack)(NULL, dwords, &wm); - /* 3DSTATE_PS_BLEND to be consistent with the rest of the - * BLEND_STATE_ENTRY. - */ - struct GENX(3DSTATE_PS_BLEND) ps_blend = { - GENX(3DSTATE_PS_BLEND_header), - .HasWriteableRT = color_writes != 0, - }; - GENX(3DSTATE_PS_BLEND_pack)(NULL, dwords, &ps_blend); - anv_batch_emit_merge(&cmd_buffer->batch, dwords, pipeline->gfx8.ps_blend); + anv_batch_emit_merge(&cmd_buffer->batch, dwords, pipeline->gfx8.wm); + + /* 3DSTATE_PS_BLEND to be consistent with the rest of the + * BLEND_STATE_ENTRY. + */ + struct GENX(3DSTATE_PS_BLEND) ps_blend = { + GENX(3DSTATE_PS_BLEND_header), + .HasWriteableRT = color_writes != 0, + }; + GENX(3DSTATE_PS_BLEND_pack)(NULL, dwords, &ps_blend); + anv_batch_emit_merge(&cmd_buffer->batch, dwords, pipeline->gfx8.ps_blend); + } /* Blend states of each RT */ uint32_t surface_count = 0; @@ -702,14 +708,20 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) /* Skip this part */ dws += GENX(BLEND_STATE_length); + bool dirty_logic_op = + cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_LOGIC_OP; + for (uint32_t i = 0; i < surface_count; i++) { struct anv_pipeline_binding *binding = &map->surface_to_descriptor[i]; - bool write_disabled = (color_writes & (1u << binding->index)) == 0; + bool write_disabled = + dirty_color_blend && (color_writes & (1u << binding->index)) == 0; struct GENX(BLEND_STATE_ENTRY) entry = { .WriteDisableAlpha = write_disabled, .WriteDisableRed = write_disabled, .WriteDisableGreen = write_disabled, .WriteDisableBlue = write_disabled, + .LogicOpFunction = + dirty_logic_op ? genX(vk_to_intel_logic_op)[d->logic_op] : 0, }; GENX(BLEND_STATE_ENTRY_pack)(NULL, dws, &entry); dws += GENX(BLEND_STATE_ENTRY_length);