broadcom: follow version naming convention

We usually name the functions that depend on hardware version as
v3d<version>_foo.

Keep the same convention in QPU and lower_image_load_store, so it makes
easier when searching for versioned functions.

Acked-by: Iago Toral Quiroga <itoral@igalia.com>
Reviewed-by: Jose Maria Casanova Crespo <jmcasanova@igalia.com>
Signed-off-by: Juan A. Suarez Romero <jasuarez@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/30000>
This commit is contained in:
Juan A. Suarez Romero 2024-07-02 13:31:35 +02:00 committed by Marge Bot
parent a10957adb6
commit 4581bf595b
2 changed files with 44 additions and 44 deletions

View file

@ -274,7 +274,7 @@ pack_xbit(nir_builder *b, nir_def *color,
}
static bool
v3d_nir_lower_image_store_v42(nir_builder *b, nir_intrinsic_instr *instr)
v3d42_nir_lower_image_store(nir_builder *b, nir_intrinsic_instr *instr)
{
enum pipe_format format = nir_intrinsic_format(instr);
assert(format != PIPE_FORMAT_NONE);
@ -351,7 +351,7 @@ v3d_nir_lower_image_store_v42(nir_builder *b, nir_intrinsic_instr *instr)
static bool
v3d_nir_lower_image_store_v71(nir_builder *b, nir_intrinsic_instr *instr)
v3d71_nir_lower_image_store(nir_builder *b, nir_intrinsic_instr *instr)
{
enum pipe_format format = nir_intrinsic_format(instr);
assert(format != PIPE_FORMAT_NONE);
@ -437,9 +437,9 @@ v3d_nir_lower_image_load_store_cb(nir_builder *b,
return v3d_nir_lower_image_load(b, intr);
case nir_intrinsic_image_store:
if (c->devinfo->ver >= 71)
return v3d_nir_lower_image_store_v71(b, intr);
return v3d71_nir_lower_image_store(b, intr);
else
return v3d_nir_lower_image_store_v42(b, intr);
return v3d42_nir_lower_image_store(b, intr);
break;
default:
return false;

View file

@ -128,7 +128,7 @@
#define SMIMM_C .small_imm_c = true
#define SMIMM_D .small_imm_d = true
static const struct v3d_qpu_sig v42_sig_map[] = {
static const struct v3d_qpu_sig v3d42_sig_map[] = {
/* MISC phys R5 */
[0] = { },
[1] = { THRSW, },
@ -161,7 +161,7 @@ static const struct v3d_qpu_sig v42_sig_map[] = {
};
static const struct v3d_qpu_sig v71_sig_map[] = {
static const struct v3d_qpu_sig v3d71_sig_map[] = {
/* MISC phys RF0 */
[0] = { },
[1] = { THRSW, },
@ -201,17 +201,17 @@ v3d_qpu_sig_unpack(const struct v3d_device_info *devinfo,
uint32_t packed_sig,
struct v3d_qpu_sig *sig)
{
if (packed_sig >= ARRAY_SIZE(v42_sig_map))
if (packed_sig >= ARRAY_SIZE(v3d42_sig_map))
return false;
if (devinfo->ver >= 71)
*sig = v71_sig_map[packed_sig];
*sig = v3d71_sig_map[packed_sig];
else
*sig = v42_sig_map[packed_sig];
*sig = v3d42_sig_map[packed_sig];
/* Signals with zeroed unpacked contents after element 0 are reserved. */
return (packed_sig == 0 ||
memcmp(sig, &v42_sig_map[0], sizeof(*sig)) != 0);
memcmp(sig, &v3d42_sig_map[0], sizeof(*sig)) != 0);
}
bool
@ -222,11 +222,11 @@ v3d_qpu_sig_pack(const struct v3d_device_info *devinfo,
static const struct v3d_qpu_sig *map;
if (devinfo->ver >= 71)
map = v71_sig_map;
map = v3d71_sig_map;
else
map = v42_sig_map;
map = v3d42_sig_map;
for (int i = 0; i < ARRAY_SIZE(v42_sig_map); i++) {
for (int i = 0; i < ARRAY_SIZE(v3d42_sig_map); i++) {
if (memcmp(&map[i], sig, sizeof(*sig)) == 0) {
*packed_sig = i;
return true;
@ -454,7 +454,7 @@ struct opcode_desc {
uint8_t last_ver;
};
static const struct opcode_desc add_ops_v42[] = {
static const struct opcode_desc v3d42_add_ops[] = {
/* FADD is FADDNF depending on the order of the mux_a/mux_b. */
{ 0, 47, .mux.b_mask = ANYMUX, .mux.a_mask = ANYMUX, V3D_QPU_A_FADD },
{ 0, 47, .mux.b_mask = ANYMUX, .mux.a_mask = ANYMUX, V3D_QPU_A_FADDNF },
@ -558,7 +558,7 @@ static const struct opcode_desc add_ops_v42[] = {
{ 252, 252, .mux.b_mask = OP_RANGE(4, 6), .mux.a_mask = ANYMUX, V3D_QPU_A_UTOF },
};
static const struct opcode_desc mul_ops_v42[] = {
static const struct opcode_desc v3d42_mul_ops[] = {
{ 1, 1, .mux.b_mask = ANYMUX, .mux.a_mask = ANYMUX, V3D_QPU_M_ADD },
{ 2, 2, .mux.b_mask = ANYMUX, .mux.a_mask = ANYMUX, V3D_QPU_M_SUB },
{ 3, 3, .mux.b_mask = ANYMUX, .mux.a_mask = ANYMUX, V3D_QPU_M_UMUL24 },
@ -575,14 +575,14 @@ static const struct opcode_desc mul_ops_v42[] = {
/* Note that it would have been possible to define all the add/mul opcodes in
* just one table, using the first_ver/last_ver. But taking into account that
* for v71 there were a lot of changes, it was more tidy this way. Also right
* now we are doing a linear search on those tables, so this maintains the
* tables smaller.
* for v3d71 there were a lot of changes, it was more tidy this way. Also
* right now we are doing a linear search on those tables, so this maintains
* the tables smaller.
*
* Just in case we merge the tables, we define the first_ver as 71 for those
* opcodes that changed on v71
* opcodes that changed on v3d71
*/
static const struct opcode_desc add_ops_v71[] = {
static const struct opcode_desc v3d71_add_ops[] = {
/* FADD is FADDNF depending on the order of the raddr_a/raddr_b. */
{ 0, 47, .raddr_mask = ANYOPMASK, V3D_QPU_A_FADD },
{ 0, 47, .raddr_mask = ANYOPMASK, V3D_QPU_A_FADDNF },
@ -744,7 +744,7 @@ static const struct opcode_desc add_ops_v71[] = {
{ 254, 254, .raddr_mask = ANYOPMASK, V3D_QPU_A_SHUFFLE, 71 },
};
static const struct opcode_desc mul_ops_v71[] = {
static const struct opcode_desc v3d71_mul_ops[] = {
/* For V3D 7.1, second mask field would be ignored */
{ 1, 1, .raddr_mask = ANYOPMASK, V3D_QPU_M_ADD, 71 },
{ 2, 2, .raddr_mask = ANYOPMASK, V3D_QPU_M_SUB, 71 },
@ -1012,8 +1012,8 @@ v3d42_qpu_add_unpack(const struct v3d_device_info *devinfo, uint64_t packed_inst
map_op = (map_op - 253 + 245);
const struct opcode_desc *desc =
lookup_opcode_from_packed(devinfo, add_ops_v42,
ARRAY_SIZE(add_ops_v42),
lookup_opcode_from_packed(devinfo, v3d42_add_ops,
ARRAY_SIZE(v3d42_add_ops),
map_op, mux_a, mux_b, 0);
if (!desc)
@ -1161,8 +1161,8 @@ v3d71_qpu_add_unpack(const struct v3d_device_info *devinfo, uint64_t packed_inst
const struct opcode_desc *desc =
lookup_opcode_from_packed(devinfo,
add_ops_v71,
ARRAY_SIZE(add_ops_v71),
v3d71_add_ops,
ARRAY_SIZE(v3d71_add_ops),
map_op, 0, 0,
raddr_b);
if (!desc)
@ -1260,7 +1260,7 @@ v3d71_qpu_add_unpack(const struct v3d_device_info *devinfo, uint64_t packed_inst
case V3D_QPU_A_VFMIN:
case V3D_QPU_A_VFMAX:
unreachable("pending v71 update");
unreachable("pending v3d71 update");
if (!v3d_qpu_float16_unpack_unpack(op & 0x7,
&instr->alu.add.a.unpack)) {
return false;
@ -1346,8 +1346,8 @@ v3d42_qpu_mul_unpack(const struct v3d_device_info *devinfo, uint64_t packed_inst
{
const struct opcode_desc *desc =
lookup_opcode_from_packed(devinfo,
mul_ops_v42,
ARRAY_SIZE(mul_ops_v42),
v3d42_mul_ops,
ARRAY_SIZE(v3d42_mul_ops),
op, mux_a, mux_b, 0);
if (!desc)
return false;
@ -1420,8 +1420,8 @@ v3d71_qpu_mul_unpack(const struct v3d_device_info *devinfo, uint64_t packed_inst
{
const struct opcode_desc *desc =
lookup_opcode_from_packed(devinfo,
mul_ops_v71,
ARRAY_SIZE(mul_ops_v71),
v3d71_mul_ops,
ARRAY_SIZE(v3d71_mul_ops),
op, 0, 0,
raddr_d);
if (!desc)
@ -1457,7 +1457,7 @@ v3d71_qpu_mul_unpack(const struct v3d_device_info *devinfo, uint64_t packed_inst
break;
case V3D_QPU_M_VFMUL:
unreachable("pending v71 update");
unreachable("pending v3d71 update");
instr->alu.mul.output_pack = V3D_QPU_PACK_NONE;
if (!v3d_qpu_float16_unpack_unpack(((op & 0x7) - 4) & 7,
@ -1532,8 +1532,8 @@ v3d42_qpu_add_pack(const struct v3d_device_info *devinfo,
uint32_t mux_b = instr->alu.add.b.mux;
int nsrc = v3d_qpu_add_op_num_src(instr->alu.add.op);
const struct opcode_desc *desc =
lookup_opcode_from_instr(devinfo, add_ops_v42,
ARRAY_SIZE(add_ops_v42),
lookup_opcode_from_instr(devinfo, v3d42_add_ops,
ARRAY_SIZE(v3d42_add_ops),
instr->alu.add.op);
if (!desc)
@ -1747,8 +1747,8 @@ v3d71_qpu_add_pack(const struct v3d_device_info *devinfo,
int nsrc = v3d_qpu_add_op_num_src(instr->alu.add.op);
const struct opcode_desc *desc =
lookup_opcode_from_instr(devinfo, add_ops_v71,
ARRAY_SIZE(add_ops_v71),
lookup_opcode_from_instr(devinfo, v3d71_add_ops,
ARRAY_SIZE(v3d71_add_ops),
instr->alu.add.op);
if (!desc)
return false;
@ -2010,8 +2010,8 @@ v3d42_qpu_mul_pack(const struct v3d_device_info *devinfo,
int nsrc = v3d_qpu_mul_op_num_src(instr->alu.mul.op);
const struct opcode_desc *desc =
lookup_opcode_from_instr(devinfo, mul_ops_v42,
ARRAY_SIZE(mul_ops_v42),
lookup_opcode_from_instr(devinfo, v3d42_mul_ops,
ARRAY_SIZE(v3d42_mul_ops),
instr->alu.mul.op);
if (!desc)
@ -2124,8 +2124,8 @@ v3d71_qpu_mul_pack(const struct v3d_device_info *devinfo,
int nsrc = v3d_qpu_mul_op_num_src(instr->alu.mul.op);
const struct opcode_desc *desc =
lookup_opcode_from_instr(devinfo, mul_ops_v71,
ARRAY_SIZE(mul_ops_v71),
lookup_opcode_from_instr(devinfo, v3d71_mul_ops,
ARRAY_SIZE(v3d71_mul_ops),
instr->alu.mul.op);
if (!desc)
return false;
@ -2183,7 +2183,7 @@ v3d71_qpu_mul_pack(const struct v3d_device_info *devinfo,
}
case V3D_QPU_M_VFMUL: {
unreachable("pending v71 update");
unreachable("pending v3d71 update");
uint32_t packed;
if (instr->alu.mul.output_pack != V3D_QPU_PACK_NONE)
@ -2289,8 +2289,8 @@ v3d_qpu_instr_unpack_alu(const struct v3d_device_info *devinfo,
if (devinfo->ver <= 71) {
/*
* For v71 this will be set on add/mul unpack, as raddr are now
* part of v3d_qpu_input
* For v3d71 this will be set on add/mul unpack, as raddr are
* now part of v3d_qpu_input
*/
instr->raddr_a = QPU_GET_FIELD(packed_instr, V3D_QPU_RADDR_A);
instr->raddr_b = QPU_GET_FIELD(packed_instr, V3D_QPU_RADDR_B);
@ -2382,8 +2382,8 @@ v3d_qpu_instr_pack_alu(const struct v3d_device_info *devinfo,
if (instr->type == V3D_QPU_INSTR_TYPE_ALU) {
if (devinfo->ver < 71) {
/*
* For v71 this will be set on add/mul unpack, as raddr are now
* part of v3d_qpu_input
* For v3d71 this will be set on add/mul unpack, as
* raddr are now part of v3d_qpu_input
*/
*packed_instr |= QPU_SET_FIELD(instr->raddr_a, V3D_QPU_RADDR_A);
*packed_instr |= QPU_SET_FIELD(instr->raddr_b, V3D_QPU_RADDR_B);