2024-02-14 18:17:59 -08:00
|
|
|
/*
|
|
|
|
|
* Copyright © 2011 Intel Corporation
|
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
|
*/
|
|
|
|
|
|
2025-02-05 14:25:15 -08:00
|
|
|
#include "brw_shader.h"
|
2024-12-06 16:17:46 -08:00
|
|
|
#include "brw_generator.h"
|
2024-02-14 18:17:59 -08:00
|
|
|
#include "brw_eu.h"
|
|
|
|
|
#include "brw_nir.h"
|
|
|
|
|
#include "brw_private.h"
|
|
|
|
|
#include "dev/intel_debug.h"
|
|
|
|
|
|
2024-07-12 17:30:10 -07:00
|
|
|
static void
|
2024-12-07 10:25:45 -08:00
|
|
|
brw_assign_vs_urb_setup(brw_shader &s)
|
2024-07-12 17:30:10 -07:00
|
|
|
{
|
|
|
|
|
struct brw_vs_prog_data *vs_prog_data = brw_vs_prog_data(s.prog_data);
|
|
|
|
|
|
|
|
|
|
assert(s.stage == MESA_SHADER_VERTEX);
|
|
|
|
|
|
|
|
|
|
/* Each attribute is 4 regs. */
|
2025-01-17 14:50:32 -08:00
|
|
|
s.first_non_payload_grf += 8 * vs_prog_data->base.urb_read_length;
|
2024-07-12 17:30:10 -07:00
|
|
|
|
|
|
|
|
assert(vs_prog_data->base.urb_read_length <= 15);
|
|
|
|
|
|
|
|
|
|
/* Rewrite all ATTR file references to the hw grf that they land in. */
|
2024-12-07 00:23:07 -08:00
|
|
|
foreach_block_and_inst(block, brw_inst, inst, s.cfg) {
|
2024-07-12 17:30:10 -07:00
|
|
|
s.convert_attr_sources_to_hw_regs(inst);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-11-29 12:37:29 +02:00
|
|
|
static unsigned
|
|
|
|
|
brw_nir_pack_vs_input(nir_shader *nir, struct brw_vs_prog_data *prog_data)
|
|
|
|
|
{
|
|
|
|
|
struct vf_attribute {
|
|
|
|
|
unsigned reg_offset;
|
|
|
|
|
uint8_t component_mask;
|
|
|
|
|
bool is_64bit:1;
|
|
|
|
|
bool is_used:1;
|
|
|
|
|
} attributes[MAX_HW_VERT_ATTRIB] = {};
|
|
|
|
|
|
|
|
|
|
/* IO lowering is going to break dmat inputs into a location each, so we
|
|
|
|
|
* need to reproduce the 64bit nature of the variable into each slot.
|
|
|
|
|
*/
|
|
|
|
|
nir_foreach_shader_in_variable(var, nir) {
|
|
|
|
|
const bool is_64bit = glsl_type_is_64bit(var->type);
|
|
|
|
|
const uint32_t slots = glsl_count_vec4_slots(var->type, true, false);
|
|
|
|
|
for (uint32_t i = 0; i < slots; i++)
|
|
|
|
|
attributes[var->data.location + i].is_64bit = is_64bit;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* First mark all used inputs */
|
|
|
|
|
nir_foreach_function_impl(impl, nir) {
|
|
|
|
|
nir_foreach_block(block, impl) {
|
|
|
|
|
nir_foreach_instr(instr, block) {
|
|
|
|
|
if (instr->type != nir_instr_type_intrinsic)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
|
|
|
|
if (intrin->intrinsic != nir_intrinsic_load_input)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
assert(intrin->def.bit_size == 32);
|
|
|
|
|
|
|
|
|
|
const struct nir_io_semantics io =
|
|
|
|
|
nir_intrinsic_io_semantics(intrin);
|
|
|
|
|
|
|
|
|
|
attributes[io.location].is_used = true;
|
|
|
|
|
|
|
|
|
|
/* SKL PRMs, Vol 2a: Command Reference: Instructions,
|
|
|
|
|
* 3DSTATE_VF_COMPONENT_PACKING:
|
|
|
|
|
*
|
|
|
|
|
* "Software shall enable all components (XYZW) for any and all
|
|
|
|
|
* VERTEX_ELEMENTs associated with a 256-bit SURFACE_FORMAT.
|
|
|
|
|
* It is INVALID to disable any components in these cases."
|
|
|
|
|
*
|
|
|
|
|
* Enable this XYZW for any > 128-bit format.
|
|
|
|
|
*/
|
|
|
|
|
if (nir->info.dual_slot_inputs & BITFIELD64_BIT(io.location)) {
|
|
|
|
|
attributes[io.location].component_mask |= 0xff;
|
|
|
|
|
} else {
|
|
|
|
|
const uint8_t mask =
|
|
|
|
|
nir_component_mask(intrin->num_components) <<
|
|
|
|
|
nir_intrinsic_component(intrin);
|
|
|
|
|
|
|
|
|
|
attributes[io.location].component_mask |= mask;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-12-02 15:00:34 +02:00
|
|
|
/* SKL PRMs, Vol 2a: Command Reference: Instructions,
|
|
|
|
|
* 3DSTATE_VF_COMPONENT_PACKING:
|
|
|
|
|
*
|
|
|
|
|
* "At least one component of one "valid" Vertex Element must be
|
|
|
|
|
* enabled."
|
|
|
|
|
*/
|
|
|
|
|
if (nir->info.inputs_read == 0) {
|
|
|
|
|
if (prog_data->no_vf_slot_compaction) {
|
|
|
|
|
attributes[VERT_ATTRIB_GENERIC0].is_used = true;
|
|
|
|
|
attributes[VERT_ATTRIB_GENERIC0].component_mask = 0x1;
|
|
|
|
|
} else if (!BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_IS_INDEXED_DRAW) &&
|
|
|
|
|
!BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX) &&
|
|
|
|
|
!BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE) &&
|
|
|
|
|
!BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) &&
|
|
|
|
|
!BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID) &&
|
|
|
|
|
!BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_DRAW_ID)) {
|
|
|
|
|
attributes[VERT_ATTRIB_GENERIC0].is_used = true;
|
|
|
|
|
attributes[VERT_ATTRIB_GENERIC0].component_mask = 0x1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-11-29 12:37:29 +02:00
|
|
|
/* Compute the register offsets */
|
|
|
|
|
unsigned reg_offset = 0;
|
|
|
|
|
unsigned vertex_element = 0;
|
|
|
|
|
for (unsigned a = 0; a < ARRAY_SIZE(attributes); a++) {
|
|
|
|
|
if (!attributes[a].is_used)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
/* SKL PRMs, Vol 2a: Command Reference: Instructions,
|
|
|
|
|
* 3DSTATE_VF_COMPONENT_PACKING:
|
|
|
|
|
*
|
|
|
|
|
* "No enable bits are provided for Vertex Elements [32-33],
|
|
|
|
|
* and therefore no packing is performed on these elements (if
|
|
|
|
|
* Valid, all 4 components are stored)."
|
|
|
|
|
*/
|
2024-12-02 15:00:34 +02:00
|
|
|
if (vertex_element >= 32 ||
|
|
|
|
|
(prog_data->no_vf_slot_compaction && a >= VERT_ATTRIB_GENERIC(32)))
|
2024-11-29 12:37:29 +02:00
|
|
|
attributes[a].component_mask = 0xf;
|
|
|
|
|
|
|
|
|
|
attributes[a].reg_offset = reg_offset;
|
|
|
|
|
|
|
|
|
|
reg_offset += util_bitcount(attributes[a].component_mask);
|
|
|
|
|
vertex_element++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Remap inputs */
|
|
|
|
|
nir_foreach_function_impl(impl, nir) {
|
|
|
|
|
nir_foreach_block(block, impl) {
|
|
|
|
|
nir_foreach_instr(instr, block) {
|
|
|
|
|
if (instr->type != nir_instr_type_intrinsic)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
|
|
|
|
if (intrin->intrinsic != nir_intrinsic_load_input)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
struct nir_io_semantics io = nir_intrinsic_io_semantics(intrin);
|
|
|
|
|
|
|
|
|
|
unsigned slot = attributes[io.location].reg_offset / 4;
|
|
|
|
|
unsigned slot_component =
|
|
|
|
|
attributes[io.location].reg_offset % 4 +
|
|
|
|
|
util_bitcount(attributes[io.location].component_mask &
|
|
|
|
|
BITFIELD_MASK(io.high_dvec2 * 4 +
|
|
|
|
|
nir_intrinsic_component(intrin)));
|
|
|
|
|
|
|
|
|
|
slot += slot_component / 4;
|
|
|
|
|
slot_component %= 4;
|
|
|
|
|
|
|
|
|
|
nir_intrinsic_set_base(intrin, slot);
|
|
|
|
|
nir_intrinsic_set_component(intrin, slot_component);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-02-14 17:09:14 +02:00
|
|
|
/* Generate the packing array, we start from the first application
|
|
|
|
|
* attribute : VERT_ATTRIB_GENERIC0
|
|
|
|
|
*/
|
2024-12-02 15:00:34 +02:00
|
|
|
unsigned vf_element_count = 0;
|
2025-02-14 17:09:14 +02:00
|
|
|
for (unsigned a = VERT_ATTRIB_GENERIC0; a < ARRAY_SIZE(attributes) && vf_element_count < 32; a++) {
|
2024-12-02 15:00:34 +02:00
|
|
|
/* Consider all attributes used when no slot compaction is active */
|
|
|
|
|
if (!attributes[a].is_used && !prog_data->no_vf_slot_compaction)
|
2024-11-29 12:37:29 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
uint32_t mask;
|
|
|
|
|
/* Stores masks in attributes[a].component_mask are in terms of 32-bit
|
|
|
|
|
* components, but the HW depending on the format will interpret
|
|
|
|
|
* prog_data->vf_component_packing[] bits as either a 32-bit or 64-bit
|
|
|
|
|
* component. So we need to only consider every other bit.
|
|
|
|
|
*/
|
|
|
|
|
if (attributes[a].is_64bit) {
|
|
|
|
|
mask = 0;
|
|
|
|
|
u_foreach_bit(b, attributes[a].component_mask)
|
|
|
|
|
mask |= BITFIELD_BIT(b / 2);
|
|
|
|
|
} else {
|
|
|
|
|
mask = attributes[a].component_mask;
|
|
|
|
|
}
|
|
|
|
|
/* We should only have 4bits enabled max */
|
|
|
|
|
assert((mask & ~0xfu) == 0);
|
|
|
|
|
|
2024-12-02 15:00:34 +02:00
|
|
|
prog_data->vf_component_packing[vf_element_count / 8] |=
|
|
|
|
|
mask << (4 * (vf_element_count % 8));
|
|
|
|
|
vf_element_count++;
|
|
|
|
|
}
|
2024-11-29 12:37:29 +02:00
|
|
|
|
|
|
|
|
return reg_offset;
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-12 14:20:57 -07:00
|
|
|
static bool
|
2024-12-07 10:25:45 -08:00
|
|
|
run_vs(brw_shader &s)
|
2024-07-12 14:20:57 -07:00
|
|
|
{
|
|
|
|
|
assert(s.stage == MESA_SHADER_VERTEX);
|
|
|
|
|
|
2024-12-06 22:13:36 -08:00
|
|
|
s.payload_ = new brw_vs_thread_payload(s);
|
2024-07-12 14:20:57 -07:00
|
|
|
|
2024-12-07 09:36:03 -08:00
|
|
|
brw_from_nir(&s);
|
2024-07-12 14:20:57 -07:00
|
|
|
|
|
|
|
|
if (s.failed)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
s.emit_urb_writes();
|
|
|
|
|
|
2024-07-12 17:08:46 -07:00
|
|
|
brw_calculate_cfg(s);
|
2024-07-12 14:20:57 -07:00
|
|
|
|
2024-12-06 11:37:57 -08:00
|
|
|
brw_optimize(s);
|
2024-07-12 14:20:57 -07:00
|
|
|
|
|
|
|
|
s.assign_curb_setup();
|
2024-07-12 17:30:10 -07:00
|
|
|
brw_assign_vs_urb_setup(s);
|
2024-07-12 14:20:57 -07:00
|
|
|
|
2024-12-06 11:37:57 -08:00
|
|
|
brw_lower_3src_null_dest(s);
|
|
|
|
|
brw_workaround_memory_fence_before_eot(s);
|
|
|
|
|
brw_workaround_emit_dummy_mov_instruction(s);
|
2024-07-12 14:20:57 -07:00
|
|
|
|
2024-07-12 16:55:33 -07:00
|
|
|
brw_allocate_registers(s, true /* allow_spilling */);
|
2024-07-12 14:20:57 -07:00
|
|
|
|
2024-12-06 11:37:57 -08:00
|
|
|
brw_workaround_source_arf_before_eot(s);
|
2024-10-19 12:53:21 +03:00
|
|
|
|
2024-07-12 14:20:57 -07:00
|
|
|
return !s.failed;
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-14 18:17:59 -08:00
|
|
|
extern "C" const unsigned *
|
|
|
|
|
brw_compile_vs(const struct brw_compiler *compiler,
|
|
|
|
|
struct brw_compile_vs_params *params)
|
|
|
|
|
{
|
|
|
|
|
struct nir_shader *nir = params->base.nir;
|
|
|
|
|
const struct brw_vs_prog_key *key = params->key;
|
|
|
|
|
struct brw_vs_prog_data *prog_data = params->prog_data;
|
|
|
|
|
const bool debug_enabled =
|
|
|
|
|
brw_should_print_shader(nir, params->base.debug_flag ?
|
|
|
|
|
params->base.debug_flag : DEBUG_VS);
|
2025-01-22 13:06:33 -08:00
|
|
|
const unsigned dispatch_width = brw_geometry_stage_dispatch_width(compiler->devinfo);
|
2024-02-14 18:17:59 -08:00
|
|
|
|
2024-12-02 15:00:34 +02:00
|
|
|
/* We only expect slot compaction to be disabled when using device
|
|
|
|
|
* generated commands, to provide an independent 3DSTATE_VERTEX_ELEMENTS
|
|
|
|
|
* programming. This should always be enabled together with VF component
|
|
|
|
|
* packing to minimize the size of the payload.
|
|
|
|
|
*/
|
|
|
|
|
assert(!key->no_vf_slot_compaction || key->vf_component_packing);
|
|
|
|
|
|
2025-02-12 12:42:08 +02:00
|
|
|
brw_prog_data_init(&prog_data->base.base, ¶ms->base);
|
2024-02-14 18:17:59 -08:00
|
|
|
|
2025-01-22 13:06:33 -08:00
|
|
|
brw_nir_apply_key(nir, compiler, &key->base, dispatch_width);
|
2024-02-14 18:17:59 -08:00
|
|
|
|
|
|
|
|
prog_data->inputs_read = nir->info.inputs_read;
|
|
|
|
|
prog_data->double_inputs_read = nir->info.vs.double_inputs;
|
2024-12-02 15:00:34 +02:00
|
|
|
prog_data->no_vf_slot_compaction = key->no_vf_slot_compaction;
|
2024-02-14 18:17:59 -08:00
|
|
|
|
2024-02-26 21:33:05 -08:00
|
|
|
brw_nir_lower_vs_inputs(nir);
|
2024-02-14 18:17:59 -08:00
|
|
|
brw_nir_lower_vue_outputs(nir);
|
2024-11-29 12:37:29 +02:00
|
|
|
|
|
|
|
|
memset(prog_data->vf_component_packing, 0,
|
|
|
|
|
sizeof(prog_data->vf_component_packing));
|
|
|
|
|
unsigned nr_packed_regs = 0;
|
|
|
|
|
if (key->vf_component_packing)
|
|
|
|
|
nr_packed_regs = brw_nir_pack_vs_input(nir, prog_data);
|
|
|
|
|
|
2024-02-14 18:17:59 -08:00
|
|
|
brw_postprocess_nir(nir, compiler, debug_enabled,
|
|
|
|
|
key->base.robust_flags);
|
|
|
|
|
|
|
|
|
|
prog_data->base.clip_distance_mask =
|
|
|
|
|
((1 << nir->info.clip_distance_array_size) - 1);
|
|
|
|
|
prog_data->base.cull_distance_mask =
|
|
|
|
|
((1 << nir->info.cull_distance_array_size) - 1) <<
|
|
|
|
|
nir->info.clip_distance_array_size;
|
|
|
|
|
|
|
|
|
|
unsigned nr_attribute_slots = util_bitcount64(prog_data->inputs_read);
|
|
|
|
|
/* gl_VertexID and gl_InstanceID are system values, but arrive via an
|
|
|
|
|
* incoming vertex attribute. So, add an extra slot.
|
|
|
|
|
*/
|
|
|
|
|
if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX) ||
|
|
|
|
|
BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE) ||
|
|
|
|
|
BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) ||
|
|
|
|
|
BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID)) {
|
|
|
|
|
nr_attribute_slots++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* gl_DrawID and IsIndexedDraw share its very own vec4 */
|
|
|
|
|
if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_DRAW_ID) ||
|
|
|
|
|
BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_IS_INDEXED_DRAW)) {
|
|
|
|
|
nr_attribute_slots++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_IS_INDEXED_DRAW))
|
|
|
|
|
prog_data->uses_is_indexed_draw = true;
|
|
|
|
|
|
|
|
|
|
if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX))
|
|
|
|
|
prog_data->uses_firstvertex = true;
|
|
|
|
|
|
|
|
|
|
if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE))
|
|
|
|
|
prog_data->uses_baseinstance = true;
|
|
|
|
|
|
|
|
|
|
if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE))
|
|
|
|
|
prog_data->uses_vertexid = true;
|
|
|
|
|
|
|
|
|
|
if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID))
|
|
|
|
|
prog_data->uses_instanceid = true;
|
|
|
|
|
|
|
|
|
|
if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_DRAW_ID))
|
2024-11-29 11:41:23 +02:00
|
|
|
prog_data->uses_drawid = true;
|
2024-02-14 18:17:59 -08:00
|
|
|
|
2024-11-29 12:37:29 +02:00
|
|
|
unsigned nr_attribute_regs;
|
|
|
|
|
if (key->vf_component_packing) {
|
|
|
|
|
prog_data->base.urb_read_length = DIV_ROUND_UP(nr_packed_regs, 8);
|
|
|
|
|
nr_attribute_regs = nr_packed_regs;
|
|
|
|
|
} else {
|
|
|
|
|
prog_data->base.urb_read_length = DIV_ROUND_UP(nr_attribute_slots, 2);
|
|
|
|
|
nr_attribute_regs = 4 * (nr_attribute_slots);
|
|
|
|
|
}
|
2024-02-14 18:17:59 -08:00
|
|
|
|
|
|
|
|
/* Since vertex shaders reuse the same VUE entry for inputs and outputs
|
|
|
|
|
* (overwriting the original contents), we need to make sure the size is
|
|
|
|
|
* the larger of the two.
|
|
|
|
|
*/
|
|
|
|
|
const unsigned vue_entries =
|
2024-11-29 16:42:46 +02:00
|
|
|
MAX2(DIV_ROUND_UP(nr_attribute_regs, 4),
|
|
|
|
|
(unsigned)prog_data->base.vue_map.num_slots);
|
2024-02-14 18:17:59 -08:00
|
|
|
|
2024-02-15 02:03:38 -08:00
|
|
|
prog_data->base.urb_entry_size = DIV_ROUND_UP(vue_entries, 4);
|
2024-02-14 18:17:59 -08:00
|
|
|
|
|
|
|
|
if (unlikely(debug_enabled)) {
|
|
|
|
|
fprintf(stderr, "VS Output ");
|
|
|
|
|
brw_print_vue_map(stderr, &prog_data->base.vue_map, MESA_SHADER_VERTEX);
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-14 22:41:17 -08:00
|
|
|
prog_data->base.dispatch_mode = INTEL_DISPATCH_MODE_SIMD8;
|
|
|
|
|
|
2024-12-07 10:25:45 -08:00
|
|
|
brw_shader v(compiler, ¶ms->base, &key->base,
|
2024-02-14 22:41:17 -08:00
|
|
|
&prog_data->base.base, nir, dispatch_width,
|
|
|
|
|
params->base.stats != NULL, debug_enabled);
|
2024-07-12 14:20:57 -07:00
|
|
|
if (!run_vs(v)) {
|
2024-02-14 22:41:17 -08:00
|
|
|
params->base.error_str =
|
|
|
|
|
ralloc_strdup(params->base.mem_ctx, v.fail_msg);
|
|
|
|
|
return NULL;
|
2024-02-14 18:17:59 -08:00
|
|
|
}
|
|
|
|
|
|
2024-02-14 22:41:17 -08:00
|
|
|
assert(v.payload().num_regs % reg_unit(compiler->devinfo) == 0);
|
|
|
|
|
prog_data->base.base.dispatch_grf_start_reg =
|
|
|
|
|
v.payload().num_regs / reg_unit(compiler->devinfo);
|
2024-09-18 14:32:58 -07:00
|
|
|
prog_data->base.base.grf_used = v.grf_used;
|
2024-02-14 22:41:17 -08:00
|
|
|
|
2024-12-06 16:33:35 -08:00
|
|
|
brw_generator g(compiler, ¶ms->base,
|
2024-02-27 12:23:52 -08:00
|
|
|
&prog_data->base.base,
|
2024-02-14 22:41:17 -08:00
|
|
|
MESA_SHADER_VERTEX);
|
|
|
|
|
if (unlikely(debug_enabled)) {
|
|
|
|
|
const char *debug_name =
|
|
|
|
|
ralloc_asprintf(params->base.mem_ctx, "%s vertex shader %s",
|
|
|
|
|
nir->info.label ? nir->info.label :
|
|
|
|
|
"unnamed",
|
|
|
|
|
nir->info.name);
|
|
|
|
|
|
|
|
|
|
g.enable_debug(debug_name);
|
2024-02-14 18:17:59 -08:00
|
|
|
}
|
2024-02-14 22:41:17 -08:00
|
|
|
g.generate_code(v.cfg, dispatch_width, v.shader_stats,
|
|
|
|
|
v.performance_analysis.require(), params->base.stats);
|
|
|
|
|
g.add_const_data(nir->constant_data, nir->constant_data_size);
|
2024-02-14 18:17:59 -08:00
|
|
|
|
2024-02-14 22:41:17 -08:00
|
|
|
return g.get_assembly();
|
2024-02-14 18:17:59 -08:00
|
|
|
}
|