hasvk/cmd_buffer: Rip out SKL+ support

Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/19852>
This commit is contained in:
Jason Ekstrand 2022-09-02 22:15:48 -05:00 committed by Marge Bot
parent 0626b68c88
commit 91090e39af
4 changed files with 27 additions and 1183 deletions

View file

@ -64,9 +64,6 @@ void genX(cmd_buffer_apply_pipe_flushes)(struct anv_cmd_buffer *cmd_buffer);
void genX(cmd_buffer_emit_gfx7_depth_flush)(struct anv_cmd_buffer *cmd_buffer);
void genX(cmd_buffer_emit_gfx12_depth_wa)(struct anv_cmd_buffer *cmd_buffer,
const struct isl_surf *surf);
void genX(cmd_buffer_set_binding_for_gfx8_vb_flush)(struct anv_cmd_buffer *cmd_buffer,
int vb_index,
struct anv_address vb_address,
@ -75,10 +72,6 @@ void genX(cmd_buffer_update_dirty_vbs_for_gfx8_vb_flush)(struct anv_cmd_buffer *
uint32_t access_type,
uint64_t vb_used);
void genX(cmd_buffer_emit_hashing_mode)(struct anv_cmd_buffer *cmd_buffer,
unsigned width, unsigned height,
unsigned scale);
void genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer);
void genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer);

View file

@ -282,10 +282,6 @@ blorp_exec_on_render(struct blorp_batch *batch,
struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
assert(cmd_buffer->queue_family->queueFlags & VK_QUEUE_GRAPHICS_BIT);
const unsigned scale = params->fast_clear_op ? UINT_MAX : 1;
genX(cmd_buffer_emit_hashing_mode)(cmd_buffer, params->x1 - params->x0,
params->y1 - params->y0, scale);
#if GFX_VER >= 11
/* The PIPE_CONTROL command description says:
*
@ -301,10 +297,6 @@ blorp_exec_on_render(struct blorp_batch *batch,
"before blorp BTI change");
#endif
if (params->depth.enabled &&
!(batch->flags & BLORP_BATCH_NO_EMIT_DEPTH_STENCIL))
genX(cmd_buffer_emit_gfx12_depth_wa)(cmd_buffer, &params->depth.surf);
genX(flush_pipeline_select_3d)(cmd_buffer);
/* Apply any outstanding flushes in case pipeline select haven't. */

File diff suppressed because it is too large Load diff

View file

@ -53,29 +53,8 @@ genX(cmd_buffer_enable_pma_fix)(struct anv_cmd_buffer *cmd_buffer, bool enable)
pc.DepthCacheFlushEnable = true;
pc.CommandStreamerStallEnable = true;
pc.RenderTargetCacheFlushEnable = true;
#if GFX_VER >= 12
pc.TileCacheFlushEnable = true;
/* Wa_1409600907: "PIPE_CONTROL with Depth Stall Enable bit must
* be set with any PIPE_CONTROL with Depth Flush Enable bit set.
*/
pc.DepthStallEnable = true;
#endif
}
#if GFX_VER == 9
uint32_t cache_mode;
anv_pack_struct(&cache_mode, GENX(CACHE_MODE_0),
.STCPMAOptimizationEnable = enable,
.STCPMAOptimizationEnableMask = true);
anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
lri.RegisterOffset = GENX(CACHE_MODE_0_num);
lri.DataDWord = cache_mode;
}
#elif GFX_VER == 8
uint32_t cache_mode;
anv_pack_struct(&cache_mode, GENX(CACHE_MODE_1),
.NPPMAFixEnable = enable,
@ -87,8 +66,6 @@ genX(cmd_buffer_enable_pma_fix)(struct anv_cmd_buffer *cmd_buffer, bool enable)
lri.DataDWord = cache_mode;
}
#endif /* GFX_VER == 8 */
/* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
* Flush bits is often necessary. We do it regardless because it's easier.
* The render cache flush is also necessary if stencil writes are enabled.
@ -100,9 +77,6 @@ genX(cmd_buffer_enable_pma_fix)(struct anv_cmd_buffer *cmd_buffer, bool enable)
pc.DepthStallEnable = true;
pc.DepthCacheFlushEnable = true;
pc.RenderTargetCacheFlushEnable = true;
#if GFX_VER >= 12
pc.TileCacheFlushEnable = true;
#endif
}
}
@ -196,122 +170,6 @@ want_depth_pma_fix(struct anv_cmd_buffer *cmd_buffer,
wm_prog_data->computed_depth_mode != PSCDEPTH_OFF;
}
UNUSED static bool
want_stencil_pma_fix(struct anv_cmd_buffer *cmd_buffer,
const struct vk_depth_stencil_state *ds)
{
if (GFX_VER > 9)
return false;
assert(GFX_VER == 9);
/* From the Skylake PRM Vol. 2c CACHE_MODE_1::STC PMA Optimization Enable:
*
* Clearing this bit will force the STC cache to wait for pending
* retirement of pixels at the HZ-read stage and do the STC-test for
* Non-promoted, R-computed and Computed depth modes instead of
* postponing the STC-test to RCPFE.
*
* STC_TEST_EN = 3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE &&
* 3DSTATE_WM_DEPTH_STENCIL::StencilTestEnable
*
* STC_WRITE_EN = 3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE &&
* (3DSTATE_WM_DEPTH_STENCIL::Stencil Buffer Write Enable &&
* 3DSTATE_DEPTH_BUFFER::STENCIL_WRITE_ENABLE)
*
* COMP_STC_EN = STC_TEST_EN &&
* 3DSTATE_PS_EXTRA::PixelShaderComputesStencil
*
* SW parses the pipeline states to generate the following logical
* signal indicating if PMA FIX can be enabled.
*
* STC_PMA_OPT =
* 3DSTATE_WM::ForceThreadDispatch != 1 &&
* !(3DSTATE_RASTER::ForceSampleCount != NUMRASTSAMPLES_0) &&
* 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
* 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
* !(3DSTATE_WM::EDSC_Mode == 2) &&
* 3DSTATE_PS_EXTRA::PixelShaderValid &&
* !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
* 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
* 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
* 3DSTATE_WM_HZ_OP::StencilBufferClear) &&
* (COMP_STC_EN || STC_WRITE_EN) &&
* ((3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
* 3DSTATE_WM::ForceKillPix == ON ||
* 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
* 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
* 3DSTATE_PS_BLEND::AlphaTestEnable ||
* 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable) ||
* (3DSTATE_PS_EXTRA::Pixel Shader Computed Depth mode != PSCDEPTH_OFF))
*/
/* These are always true:
* 3DSTATE_WM::ForceThreadDispatch != 1 &&
* !(3DSTATE_RASTER::ForceSampleCount != NUMRASTSAMPLES_0)
*/
/* We only enable the PMA fix if we know for certain that HiZ is enabled.
* If we don't know whether HiZ is enabled or not, we disable the PMA fix
* and there is no harm.
*
* (3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL) &&
* 3DSTATE_DEPTH_BUFFER::HIZ Enable
*/
if (!cmd_buffer->state.hiz_enabled)
return false;
/* We can't possibly know if HiZ is enabled without the depth attachment */
ASSERTED const struct anv_image_view *d_iview =
cmd_buffer->state.gfx.depth_att.iview;
assert(d_iview && d_iview->image->planes[0].aux_usage == ISL_AUX_USAGE_HIZ);
/* 3DSTATE_PS_EXTRA::PixelShaderValid */
struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT))
return false;
/* !(3DSTATE_WM::EDSC_Mode == 2) */
const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
if (wm_prog_data->early_fragment_tests)
return false;
/* We never use anv_pipeline for HiZ ops so this is trivially true:
* !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
* 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
* 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
* 3DSTATE_WM_HZ_OP::StencilBufferClear)
*/
/* 3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE &&
* 3DSTATE_WM_DEPTH_STENCIL::StencilTestEnable
*/
const bool stc_test_en = ds->stencil.test_enable;
/* 3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE &&
* (3DSTATE_WM_DEPTH_STENCIL::Stencil Buffer Write Enable &&
* 3DSTATE_DEPTH_BUFFER::STENCIL_WRITE_ENABLE)
*/
const bool stc_write_en = ds->stencil.write_enable;
/* STC_TEST_EN && 3DSTATE_PS_EXTRA::PixelShaderComputesStencil */
const bool comp_stc_en = stc_test_en && wm_prog_data->computed_stencil;
/* COMP_STC_EN || STC_WRITE_EN */
if (!(comp_stc_en || stc_write_en))
return false;
/* (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
* 3DSTATE_WM::ForceKillPix == ON ||
* 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
* 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
* 3DSTATE_PS_BLEND::AlphaTestEnable ||
* 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable) ||
* (3DSTATE_PS_EXTRA::Pixel Shader Computed Depth mode != PSCDEPTH_OFF)
*/
return pipeline->kill_pixel ||
wm_prog_data->computed_depth_mode != PSCDEPTH_OFF;
}
void
genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
{
@ -319,27 +177,17 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
const struct vk_dynamic_graphics_state *dyn =
&cmd_buffer->vk.dynamic_graphics_state;
#if GFX_VER >= 11
if (cmd_buffer->device->vk.enabled_extensions.KHR_fragment_shading_rate &&
BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_FSR))
genX(emit_shading_rate)(&cmd_buffer->batch, pipeline, &dyn->fsr);
#endif /* GFX_VER >= 11 */
if ((cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE) ||
BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_RS_LINE_WIDTH)) {
uint32_t sf_dw[GENX(3DSTATE_SF_length)];
struct GENX(3DSTATE_SF) sf = {
GENX(3DSTATE_SF_header),
};
#if GFX_VER == 8
if (cmd_buffer->device->info->platform == INTEL_PLATFORM_CHV) {
sf.CHVLineWidth = dyn->rs.line.width;
} else {
sf.LineWidth = dyn->rs.line.width;
}
#else
sf.LineWidth = dyn->rs.line.width,
#endif
GENX(3DSTATE_SF_pack)(NULL, sf_dw, &sf);
anv_batch_emit_merge(&cmd_buffer->batch, sf_dw, pipeline->gfx8.sf);
}
@ -394,7 +242,6 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
* across different state packets for gfx8 and gfx9. We handle that by
* using a big old #if switch here.
*/
#if GFX_VER == 8
if (BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_STENCIL_REFERENCE) ||
BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_CB_BLEND_CONSTANTS)) {
struct anv_state cc_state =
@ -462,87 +309,6 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
const bool pma = want_depth_pma_fix(cmd_buffer, &opt_ds);
genX(cmd_buffer_enable_pma_fix)(cmd_buffer, pma);
}
#else
if (BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_CB_BLEND_CONSTANTS)) {
struct anv_state cc_state =
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
GENX(COLOR_CALC_STATE_length) * 4,
64);
struct GENX(COLOR_CALC_STATE) cc = {
.BlendConstantColorRed = dyn->cb.blend_constants[0],
.BlendConstantColorGreen = dyn->cb.blend_constants[1],
.BlendConstantColorBlue = dyn->cb.blend_constants[2],
.BlendConstantColorAlpha = dyn->cb.blend_constants[3],
};
GENX(COLOR_CALC_STATE_pack)(NULL, cc_state.map, &cc);
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), ccp) {
ccp.ColorCalcStatePointer = cc_state.offset;
ccp.ColorCalcStatePointerValid = true;
}
}
if ((cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE |
ANV_CMD_DIRTY_RENDER_TARGETS)) ||
BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_DEPTH_TEST_ENABLE) ||
BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_DEPTH_WRITE_ENABLE) ||
BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_DEPTH_COMPARE_OP) ||
BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_STENCIL_TEST_ENABLE) ||
BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_STENCIL_OP) ||
BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_STENCIL_COMPARE_MASK) ||
BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_STENCIL_WRITE_MASK) ||
BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_STENCIL_REFERENCE)) {
VkImageAspectFlags ds_aspects = 0;
if (cmd_buffer->state.gfx.depth_att.vk_format != VK_FORMAT_UNDEFINED)
ds_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
if (cmd_buffer->state.gfx.stencil_att.vk_format != VK_FORMAT_UNDEFINED)
ds_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
struct vk_depth_stencil_state opt_ds = dyn->ds;
vk_optimize_depth_stencil_state(&opt_ds, ds_aspects, true);
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_WM_DEPTH_STENCIL), ds) {
ds.DoubleSidedStencilEnable = true;
ds.StencilTestMask = opt_ds.stencil.front.compare_mask & 0xff;
ds.StencilWriteMask = opt_ds.stencil.front.write_mask & 0xff;
ds.BackfaceStencilTestMask = opt_ds.stencil.back.compare_mask & 0xff;
ds.BackfaceStencilWriteMask = opt_ds.stencil.back.write_mask & 0xff;
ds.StencilReferenceValue = opt_ds.stencil.front.reference & 0xff;
ds.BackfaceStencilReferenceValue = opt_ds.stencil.back.reference & 0xff;
ds.DepthTestEnable = opt_ds.depth.test_enable;
ds.DepthBufferWriteEnable = opt_ds.depth.write_enable;
ds.DepthTestFunction = genX(vk_to_intel_compare_op)[opt_ds.depth.compare_op];
ds.StencilTestEnable = opt_ds.stencil.test_enable;
ds.StencilBufferWriteEnable = opt_ds.stencil.write_enable;
ds.StencilFailOp = genX(vk_to_intel_stencil_op)[opt_ds.stencil.front.op.fail];
ds.StencilPassDepthPassOp = genX(vk_to_intel_stencil_op)[opt_ds.stencil.front.op.pass];
ds.StencilPassDepthFailOp = genX(vk_to_intel_stencil_op)[opt_ds.stencil.front.op.depth_fail];
ds.StencilTestFunction = genX(vk_to_intel_compare_op)[opt_ds.stencil.front.op.compare];
ds.BackfaceStencilFailOp = genX(vk_to_intel_stencil_op)[opt_ds.stencil.back.op.fail];
ds.BackfaceStencilPassDepthPassOp = genX(vk_to_intel_stencil_op)[opt_ds.stencil.back.op.pass];
ds.BackfaceStencilPassDepthFailOp = genX(vk_to_intel_stencil_op)[opt_ds.stencil.back.op.depth_fail];
ds.BackfaceStencilTestFunction = genX(vk_to_intel_compare_op)[opt_ds.stencil.back.op.compare];
}
const bool pma = want_stencil_pma_fix(cmd_buffer, &opt_ds);
genX(cmd_buffer_enable_pma_fix)(cmd_buffer, pma);
}
#endif
#if GFX_VER >= 12
if (BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_DEPTH_BOUNDS_TEST_ENABLE) ||
BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_DEPTH_BOUNDS_TEST_BOUNDS)) {
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BOUNDS), db) {
db.DepthBoundsTestEnable = dyn->ds.depth.bounds_test.enable;
db.DepthBoundsTestMinValue = dyn->ds.depth.bounds_test.min;
db.DepthBoundsTestMaxValue = dyn->ds.depth.bounds_test.max;
}
}
#endif
if (BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_RS_LINE_STIPPLE)) {
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_LINE_STIPPLE), ls) {
@ -557,9 +323,6 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
ANV_CMD_DIRTY_INDEX_BUFFER)) ||
BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_IA_PRIMITIVE_RESTART_ENABLE)) {
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF), vf) {
#if GFX_VERx10 >= 125
vf.GeometryDistributionEnable = true;
#endif
vf.IndexedDrawCutIndexEnable = dyn->ia.primitive_restart_enable;
vf.CutIndex = cmd_buffer->state.gfx.restart_index;
}
@ -573,46 +336,12 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
ib.MOCS = anv_mocs(cmd_buffer->device,
buffer->address.bo,
ISL_SURF_USAGE_INDEX_BUFFER_BIT);
#if GFX_VER >= 12
ib.L3BypassDisable = true;
#endif
ib.BufferStartingAddress = anv_address_add(buffer->address, offset);
ib.BufferSize = vk_buffer_range(&buffer->vk, offset,
VK_WHOLE_SIZE);
}
}
#if GFX_VERx10 >= 125
if ((cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE) ||
BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_IA_PRIMITIVE_RESTART_ENABLE)) {
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VFG), vfg) {
/* If 3DSTATE_TE: TE Enable == 1 then RR_STRICT else RR_FREE*/
vfg.DistributionMode =
anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL) ? RR_STRICT :
RR_FREE;
vfg.DistributionGranularity = BatchLevelGranularity;
/* Wa_14014890652 */
if (intel_device_info_is_dg2(cmd_buffer->device->info))
vfg.GranularityThresholdDisable = 1;
vfg.ListCutIndexEnable = dyn->ia.primitive_restart_enable;
/* 192 vertices for TRILIST_ADJ */
vfg.ListNBatchSizeScale = 0;
/* Batch size of 384 vertices */
vfg.List3BatchSizeScale = 2;
/* Batch size of 128 vertices */
vfg.List2BatchSizeScale = 1;
/* Batch size of 128 vertices */
vfg.List1BatchSizeScale = 2;
/* Batch size of 256 vertices for STRIP topologies */
vfg.StripBatchSizeScale = 3;
/* 192 control points for PATCHLIST_3 */
vfg.PatchBatchSizeScale = 1;
/* 192 control points for PATCHLIST_3 */
vfg.PatchBatchSizeMultiplier = 31;
}
}
#endif
if (pipeline->base.device->vk.enabled_extensions.EXT_sample_locations &&
BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_MS_SAMPLE_LOCATIONS))
genX(emit_sample_pattern)(&cmd_buffer->batch, dyn->ms.sample_locations);