mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-02-10 03:10:28 +01:00
i965: Port brw_cs_state tracked state to genxml.
Emit the respective commands using genxml code. Signed-off-by: Rafael Antognolli <rafael.antognolli@intel.com> Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
This commit is contained in:
parent
d9b4a81672
commit
71bfb44005
3 changed files with 145 additions and 164 deletions
|
|
@ -99,7 +99,6 @@ extern const struct brw_tracked_state brw_psp_urb_cbs;
|
|||
|
||||
extern const struct brw_tracked_state brw_indices;
|
||||
extern const struct brw_tracked_state brw_index_buffer;
|
||||
extern const struct brw_tracked_state brw_cs_state;
|
||||
extern const struct brw_tracked_state gen7_cs_push_constants;
|
||||
extern const struct brw_tracked_state gen6_binding_table_pointers;
|
||||
extern const struct brw_tracked_state gen6_gs_binding_table;
|
||||
|
|
|
|||
|
|
@ -33,168 +33,6 @@
|
|||
#include "compiler/glsl/ir_uniform.h"
|
||||
#include "main/shaderapi.h"
|
||||
|
||||
static void
|
||||
brw_upload_cs_state(struct brw_context *brw)
|
||||
{
|
||||
if (!brw->cs.base.prog_data)
|
||||
return;
|
||||
|
||||
uint32_t offset;
|
||||
uint32_t *desc = (uint32_t*) brw_state_batch(brw, 8 * 4, 64, &offset);
|
||||
struct brw_stage_state *stage_state = &brw->cs.base;
|
||||
struct brw_stage_prog_data *prog_data = stage_state->prog_data;
|
||||
struct brw_cs_prog_data *cs_prog_data = brw_cs_prog_data(prog_data);
|
||||
const struct gen_device_info *devinfo = &brw->screen->devinfo;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
|
||||
brw_emit_buffer_surface_state(
|
||||
brw, &stage_state->surf_offset[
|
||||
prog_data->binding_table.shader_time_start],
|
||||
brw->shader_time.bo, 0, ISL_FORMAT_RAW,
|
||||
brw->shader_time.bo->size, 1, true);
|
||||
}
|
||||
|
||||
uint32_t *bind = brw_state_batch(brw, prog_data->binding_table.size_bytes,
|
||||
32, &stage_state->bind_bo_offset);
|
||||
|
||||
uint32_t dwords = brw->gen < 8 ? 8 : 9;
|
||||
BEGIN_BATCH(dwords);
|
||||
OUT_BATCH(MEDIA_VFE_STATE << 16 | (dwords - 2));
|
||||
|
||||
if (prog_data->total_scratch) {
|
||||
if (brw->gen >= 8) {
|
||||
/* Broadwell's Per Thread Scratch Space is in the range [0, 11]
|
||||
* where 0 = 1k, 1 = 2k, 2 = 4k, ..., 11 = 2M.
|
||||
*/
|
||||
OUT_RELOC64(stage_state->scratch_bo,
|
||||
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
|
||||
ffs(stage_state->per_thread_scratch) - 11);
|
||||
} else if (brw->is_haswell) {
|
||||
/* Haswell's Per Thread Scratch Space is in the range [0, 10]
|
||||
* where 0 = 2k, 1 = 4k, 2 = 8k, ..., 10 = 2M.
|
||||
*/
|
||||
OUT_RELOC(stage_state->scratch_bo,
|
||||
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
|
||||
ffs(stage_state->per_thread_scratch) - 12);
|
||||
} else {
|
||||
/* Earlier platforms use the range [0, 11] to mean [1kB, 12kB]
|
||||
* where 0 = 1kB, 1 = 2kB, 2 = 3kB, ..., 11 = 12kB.
|
||||
*/
|
||||
OUT_RELOC(stage_state->scratch_bo,
|
||||
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
|
||||
stage_state->per_thread_scratch / 1024 - 1);
|
||||
}
|
||||
} else {
|
||||
OUT_BATCH(0);
|
||||
if (brw->gen >= 8)
|
||||
OUT_BATCH(0);
|
||||
}
|
||||
|
||||
const uint32_t vfe_num_urb_entries = brw->gen >= 8 ? 2 : 0;
|
||||
const uint32_t vfe_gpgpu_mode =
|
||||
brw->gen == 7 ? SET_FIELD(1, GEN7_MEDIA_VFE_STATE_GPGPU_MODE) : 0;
|
||||
const uint32_t subslices = MAX2(brw->screen->subslice_total, 1);
|
||||
OUT_BATCH(SET_FIELD(devinfo->max_cs_threads * subslices - 1,
|
||||
MEDIA_VFE_STATE_MAX_THREADS) |
|
||||
SET_FIELD(vfe_num_urb_entries, MEDIA_VFE_STATE_URB_ENTRIES) |
|
||||
SET_FIELD(1, MEDIA_VFE_STATE_RESET_GTW_TIMER) |
|
||||
SET_FIELD(1, MEDIA_VFE_STATE_BYPASS_GTW) |
|
||||
vfe_gpgpu_mode);
|
||||
|
||||
OUT_BATCH(0);
|
||||
const uint32_t vfe_urb_allocation = brw->gen >= 8 ? 2 : 0;
|
||||
|
||||
/* We are uploading duplicated copies of push constant uniforms for each
|
||||
* thread. Although the local id data needs to vary per thread, it won't
|
||||
* change for other uniform data. Unfortunately this duplication is
|
||||
* required for gen7. As of Haswell, this duplication can be avoided, but
|
||||
* this older mechanism with duplicated data continues to work.
|
||||
*
|
||||
* FINISHME: As of Haswell, we could make use of the
|
||||
* INTERFACE_DESCRIPTOR_DATA "Cross-Thread Constant Data Read Length" field
|
||||
* to only store one copy of uniform data.
|
||||
*
|
||||
* FINISHME: Broadwell adds a new alternative "Indirect Payload Storage"
|
||||
* which is described in the GPGPU_WALKER command and in the Broadwell PRM
|
||||
* Volume 7: 3D Media GPGPU, under Media GPGPU Pipeline => Mode of
|
||||
* Operations => GPGPU Mode => Indirect Payload Storage.
|
||||
*
|
||||
* Note: The constant data is built in brw_upload_cs_push_constants below.
|
||||
*/
|
||||
const uint32_t vfe_curbe_allocation =
|
||||
ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
|
||||
cs_prog_data->push.cross_thread.regs, 2);
|
||||
OUT_BATCH(SET_FIELD(vfe_urb_allocation, MEDIA_VFE_STATE_URB_ALLOC) |
|
||||
SET_FIELD(vfe_curbe_allocation, MEDIA_VFE_STATE_CURBE_ALLOC));
|
||||
OUT_BATCH(0);
|
||||
OUT_BATCH(0);
|
||||
OUT_BATCH(0);
|
||||
ADVANCE_BATCH();
|
||||
|
||||
if (cs_prog_data->push.total.size > 0) {
|
||||
BEGIN_BATCH(4);
|
||||
OUT_BATCH(MEDIA_CURBE_LOAD << 16 | (4 - 2));
|
||||
OUT_BATCH(0);
|
||||
OUT_BATCH(ALIGN(cs_prog_data->push.total.size, 64));
|
||||
OUT_BATCH(stage_state->push_const_offset);
|
||||
ADVANCE_BATCH();
|
||||
}
|
||||
|
||||
/* BRW_NEW_SURFACES and BRW_NEW_*_CONSTBUF */
|
||||
memcpy(bind, stage_state->surf_offset,
|
||||
prog_data->binding_table.size_bytes);
|
||||
|
||||
memset(desc, 0, 8 * 4);
|
||||
|
||||
int dw = 0;
|
||||
desc[dw++] = brw->cs.base.prog_offset;
|
||||
if (brw->gen >= 8)
|
||||
desc[dw++] = 0; /* Kernel Start Pointer High */
|
||||
desc[dw++] = 0;
|
||||
desc[dw++] = stage_state->sampler_offset |
|
||||
((stage_state->sampler_count + 3) / 4);
|
||||
desc[dw++] = stage_state->bind_bo_offset;
|
||||
desc[dw++] = SET_FIELD(cs_prog_data->push.per_thread.regs,
|
||||
MEDIA_CURBE_READ_LENGTH);
|
||||
const uint32_t media_threads =
|
||||
brw->gen >= 8 ?
|
||||
SET_FIELD(cs_prog_data->threads, GEN8_MEDIA_GPGPU_THREAD_COUNT) :
|
||||
SET_FIELD(cs_prog_data->threads, MEDIA_GPGPU_THREAD_COUNT);
|
||||
assert(cs_prog_data->threads <= devinfo->max_cs_threads);
|
||||
|
||||
const uint32_t slm_size =
|
||||
encode_slm_size(devinfo->gen, prog_data->total_shared);
|
||||
|
||||
desc[dw++] =
|
||||
SET_FIELD(cs_prog_data->uses_barrier, MEDIA_BARRIER_ENABLE) |
|
||||
SET_FIELD(slm_size, MEDIA_SHARED_LOCAL_MEMORY_SIZE) |
|
||||
media_threads;
|
||||
|
||||
desc[dw++] =
|
||||
SET_FIELD(cs_prog_data->push.cross_thread.regs, CROSS_THREAD_READ_LENGTH);
|
||||
|
||||
BEGIN_BATCH(4);
|
||||
OUT_BATCH(MEDIA_INTERFACE_DESCRIPTOR_LOAD << 16 | (4 - 2));
|
||||
OUT_BATCH(0);
|
||||
OUT_BATCH(8 * 4);
|
||||
OUT_BATCH(offset);
|
||||
ADVANCE_BATCH();
|
||||
}
|
||||
|
||||
const struct brw_tracked_state brw_cs_state = {
|
||||
.dirty = {
|
||||
.mesa = _NEW_PROGRAM_CONSTANTS,
|
||||
.brw = BRW_NEW_BATCH |
|
||||
BRW_NEW_BLORP |
|
||||
BRW_NEW_CS_PROG_DATA |
|
||||
BRW_NEW_PUSH_CONSTANT_ALLOCATION |
|
||||
BRW_NEW_SAMPLER_STATE_TABLE |
|
||||
BRW_NEW_SURFACES,
|
||||
},
|
||||
.emit = brw_upload_cs_state
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Creates a region containing the push constants for the CS on gen7+.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -3565,6 +3565,150 @@ static const struct brw_tracked_state genX(tcs_push_constants) = {
|
|||
},
|
||||
.emit = genX(upload_tcs_push_constants),
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
#if GEN_GEN >= 7
|
||||
static void
|
||||
genX(upload_cs_state)(struct brw_context *brw)
|
||||
{
|
||||
if (!brw->cs.base.prog_data)
|
||||
return;
|
||||
|
||||
uint32_t offset;
|
||||
uint32_t *desc = (uint32_t*) brw_state_batch(
|
||||
brw, GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t), 64,
|
||||
&offset);
|
||||
|
||||
struct brw_stage_state *stage_state = &brw->cs.base;
|
||||
struct brw_stage_prog_data *prog_data = stage_state->prog_data;
|
||||
struct brw_cs_prog_data *cs_prog_data = brw_cs_prog_data(prog_data);
|
||||
const struct gen_device_info *devinfo = &brw->screen->devinfo;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
|
||||
brw_emit_buffer_surface_state(
|
||||
brw, &stage_state->surf_offset[
|
||||
prog_data->binding_table.shader_time_start],
|
||||
brw->shader_time.bo, 0, ISL_FORMAT_RAW,
|
||||
brw->shader_time.bo->size, 1, true);
|
||||
}
|
||||
|
||||
uint32_t *bind = brw_state_batch(brw, prog_data->binding_table.size_bytes,
|
||||
32, &stage_state->bind_bo_offset);
|
||||
|
||||
brw_batch_emit(brw, GENX(MEDIA_VFE_STATE), vfe) {
|
||||
if (prog_data->total_scratch) {
|
||||
uint32_t bo_offset;
|
||||
|
||||
if (GEN_GEN >= 8) {
|
||||
/* Broadwell's Per Thread Scratch Space is in the range [0, 11]
|
||||
* where 0 = 1k, 1 = 2k, 2 = 4k, ..., 11 = 2M.
|
||||
*/
|
||||
bo_offset = ffs(stage_state->per_thread_scratch) - 11;
|
||||
} else if (GEN_IS_HASWELL) {
|
||||
/* Haswell's Per Thread Scratch Space is in the range [0, 10]
|
||||
* where 0 = 2k, 1 = 4k, 2 = 8k, ..., 10 = 2M.
|
||||
*/
|
||||
bo_offset = ffs(stage_state->per_thread_scratch) - 12;
|
||||
} else {
|
||||
/* Earlier platforms use the range [0, 11] to mean [1kB, 12kB]
|
||||
* where 0 = 1kB, 1 = 2kB, 2 = 3kB, ..., 11 = 12kB.
|
||||
*/
|
||||
bo_offset = stage_state->per_thread_scratch / 1024 - 1;
|
||||
}
|
||||
vfe.ScratchSpaceBasePointer =
|
||||
render_bo(stage_state->scratch_bo, bo_offset);
|
||||
}
|
||||
|
||||
const uint32_t subslices = MAX2(brw->screen->subslice_total, 1);
|
||||
vfe.MaximumNumberofThreads = devinfo->max_cs_threads * subslices - 1;
|
||||
vfe.NumberofURBEntries = GEN_GEN >= 8 ? 2 : 0;;
|
||||
vfe.ResetGatewayTimer =
|
||||
Resettingrelativetimerandlatchingtheglobaltimestamp;
|
||||
#if GEN_GEN < 9
|
||||
vfe.BypassGatewayControl = BypassingOpenGatewayCloseGatewayprotocol;
|
||||
#endif
|
||||
#if GEN_GEN == 7
|
||||
vfe.GPGPUMode = 1;
|
||||
#endif
|
||||
|
||||
/* We are uploading duplicated copies of push constant uniforms for each
|
||||
* thread. Although the local id data needs to vary per thread, it won't
|
||||
* change for other uniform data. Unfortunately this duplication is
|
||||
* required for gen7. As of Haswell, this duplication can be avoided,
|
||||
* but this older mechanism with duplicated data continues to work.
|
||||
*
|
||||
* FINISHME: As of Haswell, we could make use of the
|
||||
* INTERFACE_DESCRIPTOR_DATA "Cross-Thread Constant Data Read Length"
|
||||
* field to only store one copy of uniform data.
|
||||
*
|
||||
* FINISHME: Broadwell adds a new alternative "Indirect Payload Storage"
|
||||
* which is described in the GPGPU_WALKER command and in the Broadwell
|
||||
* PRM Volume 7: 3D Media GPGPU, under Media GPGPU Pipeline => Mode of
|
||||
* Operations => GPGPU Mode => Indirect Payload Storage.
|
||||
*
|
||||
* Note: The constant data is built in brw_upload_cs_push_constants
|
||||
* below.
|
||||
*/
|
||||
vfe.URBEntryAllocationSize = GEN_GEN >= 8 ? 2 : 0;
|
||||
|
||||
const uint32_t vfe_curbe_allocation =
|
||||
ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
|
||||
cs_prog_data->push.cross_thread.regs, 2);
|
||||
vfe.CURBEAllocationSize = vfe_curbe_allocation;
|
||||
}
|
||||
|
||||
if (cs_prog_data->push.total.size > 0) {
|
||||
brw_batch_emit(brw, GENX(MEDIA_CURBE_LOAD), curbe) {
|
||||
curbe.CURBETotalDataLength =
|
||||
ALIGN(cs_prog_data->push.total.size, 64);
|
||||
curbe.CURBEDataStartAddress = stage_state->push_const_offset;
|
||||
}
|
||||
}
|
||||
|
||||
/* BRW_NEW_SURFACES and BRW_NEW_*_CONSTBUF */
|
||||
memcpy(bind, stage_state->surf_offset,
|
||||
prog_data->binding_table.size_bytes);
|
||||
const struct GENX(INTERFACE_DESCRIPTOR_DATA) idd = {
|
||||
.KernelStartPointer = brw->cs.base.prog_offset,
|
||||
.SamplerStatePointer = stage_state->sampler_offset,
|
||||
.SamplerCount = DIV_ROUND_UP(stage_state->sampler_count, 4) >> 2,
|
||||
.BindingTablePointer = stage_state->bind_bo_offset,
|
||||
.ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs,
|
||||
.NumberofThreadsinGPGPUThreadGroup = cs_prog_data->threads,
|
||||
.SharedLocalMemorySize = encode_slm_size(devinfo->gen,
|
||||
prog_data->total_shared),
|
||||
.BarrierEnable = cs_prog_data->uses_barrier,
|
||||
#if GEN_GEN >= 8 || GEN_IS_HASWELL
|
||||
.CrossThreadConstantDataReadLength =
|
||||
cs_prog_data->push.cross_thread.regs,
|
||||
#endif
|
||||
};
|
||||
|
||||
GENX(INTERFACE_DESCRIPTOR_DATA_pack)(brw, desc, &idd);
|
||||
|
||||
brw_batch_emit(brw, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), load) {
|
||||
load.InterfaceDescriptorTotalLength =
|
||||
GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
|
||||
load.InterfaceDescriptorDataStartAddress = offset;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct brw_tracked_state genX(cs_state) = {
|
||||
.dirty = {
|
||||
.mesa = _NEW_PROGRAM_CONSTANTS,
|
||||
.brw = BRW_NEW_BATCH |
|
||||
BRW_NEW_BLORP |
|
||||
BRW_NEW_CS_PROG_DATA |
|
||||
BRW_NEW_PUSH_CONSTANT_ALLOCATION |
|
||||
BRW_NEW_SAMPLER_STATE_TABLE |
|
||||
BRW_NEW_SURFACES,
|
||||
},
|
||||
.emit = genX(upload_cs_state)
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
|
@ -4234,7 +4378,7 @@ genX(init_atoms)(struct brw_context *brw)
|
|||
&brw_cs_texture_surfaces,
|
||||
&brw_cs_work_groups_surface,
|
||||
&brw_cs_samplers,
|
||||
&brw_cs_state,
|
||||
&genX(cs_state),
|
||||
};
|
||||
|
||||
STATIC_ASSERT(ARRAY_SIZE(compute_atoms) <= ARRAY_SIZE(brw->compute_atoms));
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue