amd: replace most u_bit_consecutive* with BITFIELD_MASK/RANGE
Some checks are pending
macOS-CI / macOS-CI (dri) (push) Waiting to run
macOS-CI / macOS-CI (xlib) (push) Waiting to run

Reviewed-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/35346>
This commit is contained in:
Marek Olšák 2025-05-30 08:57:31 -04:00 committed by Marge Bot
parent feff86220c
commit c3034fa82c
21 changed files with 49 additions and 49 deletions

View file

@ -2602,5 +2602,5 @@ uint32_t ac_gfx103_get_cu_mask_ps(const struct radeon_info *info)
* increase clocks for busy CUs. In the future, we might disable or enable this
* tweak only for certain apps.
*/
return u_bit_consecutive(0, info->min_good_cu_per_sa);
return BITFIELD_MASK(info->min_good_cu_per_sa);
}

View file

@ -1129,7 +1129,7 @@ static int gfx6_surface_settings(ADDR_HANDLE addrlib, const struct radeon_info *
return r;
assert(AddrBaseSwizzleOut.tileSwizzle <=
u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
BITFIELD_MASK(sizeof(surf->tile_swizzle) * 8));
surf->tile_swizzle = AddrBaseSwizzleOut.tileSwizzle;
}
return 0;
@ -1691,7 +1691,7 @@ static int gfx6_compute_surface(ADDR_HANDLE addrlib, const struct radeon_info *i
if (r != ADDR_OK)
return r;
assert(xout.tileSwizzle <= u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
assert(xout.tileSwizzle <= BITFIELD_MASK(sizeof(surf->tile_swizzle) * 8));
surf->fmask_tile_swizzle = xout.tileSwizzle;
}
}
@ -2260,7 +2260,7 @@ static int gfx9_compute_miptree(struct ac_addrlib *addrlib, const struct radeon_
if (ret != ADDR_OK)
return ret;
assert(xout.pipeBankXor <= u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
assert(xout.pipeBankXor <= BITFIELD_MASK(sizeof(surf->tile_swizzle) * 8));
surf->tile_swizzle = xout.pipeBankXor;
/* Gfx11 should shift it by 10 bits instead of 8, and drivers already shift it by 8 bits,
@ -2462,7 +2462,7 @@ static int gfx9_compute_miptree(struct ac_addrlib *addrlib, const struct radeon_
if (ret != ADDR_OK)
return ret;
assert(xout.pipeBankXor <= u_bit_consecutive(0, sizeof(surf->fmask_tile_swizzle) * 8));
assert(xout.pipeBankXor <= BITFIELD_MASK(sizeof(surf->fmask_tile_swizzle) * 8));
surf->fmask_tile_swizzle = xout.pipeBankXor;
}
}
@ -3305,7 +3305,7 @@ static bool gfx12_compute_miptree(struct ac_addrlib *addrlib, const struct radeo
if (ret != ADDR_OK)
return false;
assert(xout.pipeBankXor <= u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8 + 2));
assert(xout.pipeBankXor <= BITFIELD_MASK(sizeof(surf->tile_swizzle) * 8 + 2));
surf->tile_swizzle = xout.pipeBankXor;
}

View file

@ -295,7 +295,7 @@ ms_store_arrayed_output(nir_builder *b,
bool hi_16b = io_sem.high_16bits;
bool lo_16b = !hi_16b && store_val->bit_size == 16;
unsigned mapped_location = util_bitcount64(out->mask & u_bit_consecutive64(0, io_sem.location));
unsigned mapped_location = util_bitcount64(out->mask & BITFIELD64_MASK(io_sem.location));
unsigned num_outputs = util_bitcount64(out->mask);
unsigned const_off = out->addr + component_offset * 4 + (hi_16b ? 2 : 0);
@ -414,7 +414,7 @@ ms_load_arrayed_output(nir_builder *b,
unsigned const_off = out->addr + component_offset * 4;
/* Use compacted location instead of the original semantic location. */
unsigned mapped_location = util_bitcount64(out->mask & u_bit_consecutive64(0, location));
unsigned mapped_location = util_bitcount64(out->mask & BITFIELD64_MASK(location));
nir_def *base_addr = ms_arrayed_output_base_addr(b, arr_index, mapped_location, num_outputs);
nir_def *base_addr_off = nir_imul_imm(b, base_offset, 16);

View file

@ -1180,7 +1180,7 @@ static bool visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr)
case nir_op_insert_u16: {
unsigned size = instr->op == nir_op_insert_u8 ? 8 : 16;
LLVMValueRef offset = LLVMConstInt(LLVMTypeOf(src[0]), nir_src_as_uint(instr->src[1].src) * size, false);
LLVMValueRef mask = LLVMConstInt(LLVMTypeOf(src[0]), u_bit_consecutive(0, size), false);
LLVMValueRef mask = LLVMConstInt(LLVMTypeOf(src[0]), BITFIELD_MASK(size), false);
result = LLVMBuildShl(ctx->ac.builder, LLVMBuildAnd(ctx->ac.builder, src[0], mask, ""), offset, "");
break;
}

View file

@ -1183,14 +1183,14 @@ gfx8_get_fast_clear_parameters(struct radv_device *device, const struct radv_ima
if (desc->channel[i].pure_integer && desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
/* Use the maximum value for clamping the clear color. */
int max = u_bit_consecutive(0, desc->channel[i].size - 1);
int max = BITFIELD_MASK(desc->channel[i].size - 1);
values[i] = clear_value->int32[i] != 0;
if (clear_value->int32[i] != 0 && MIN2(clear_value->int32[i], max) != max)
return;
} else if (desc->channel[i].pure_integer && desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) {
/* Use the maximum value for clamping the clear color. */
unsigned max = u_bit_consecutive(0, desc->channel[i].size);
unsigned max = BITFIELD_MASK(desc->channel[i].size);
values[i] = clear_value->uint32[i] != 0U;
if (clear_value->uint32[i] != 0U && MIN2(clear_value->uint32[i], max) != max)

View file

@ -225,7 +225,7 @@ lower_load_vs_input(nir_builder *b, nir_intrinsic_instr *intrin, lower_vs_inputs
const struct ac_vtx_format_info *vtx_info =
ac_get_vtx_format_info(s->gpu_info->gfx_level, s->gpu_info->family, attrib_format);
const unsigned binding_index = s->info->vs.use_per_attribute_vb_descs ? location : attrib_binding;
const unsigned desc_index = util_bitcount(s->info->vs.vb_desc_usage_mask & u_bit_consecutive(0, binding_index));
const unsigned desc_index = util_bitcount(s->info->vs.vb_desc_usage_mask & BITFIELD_MASK(binding_index));
nir_def *vertex_buffers_arg = ac_nir_load_arg(b, &s->args->ac, s->args->ac.vertex_buffers);
nir_def *vertex_buffers = nir_pack_64_2x32_split(b, vertex_buffers_arg, nir_imm_int(b, s->gpu_info->address32_hi));

View file

@ -219,7 +219,7 @@ radv_device_init_vs_prologs(struct radv_device *device)
for (unsigned num_attributes = 1; num_attributes <= 16; num_attributes++) {
for (unsigned count = 1; count <= num_attributes; count++) {
for (unsigned start = 0; start <= (num_attributes - count); start++) {
key.instance_rate_inputs = u_bit_consecutive(start, count);
key.instance_rate_inputs = BITFIELD_RANGE(start, count);
key.num_attributes = num_attributes;
struct radv_shader_part *prolog = radv_create_vs_prolog(device, &key);

View file

@ -206,7 +206,7 @@ gather_push_constant_info(const nir_shader *nir, const nir_intrinsic_instr *inst
uint32_t size = instr->num_components * (instr->def.bit_size / 32u);
if (start + size <= (MAX_PUSH_CONSTANTS_SIZE / 4u)) {
info->inline_push_constant_mask |= u_bit_consecutive64(start, size);
info->inline_push_constant_mask |= BITFIELD64_RANGE(start, size);
return;
}
}

View file

@ -738,7 +738,7 @@ is_lower_half_zero(UNUSED struct hash_table *ht, const nir_alu_instr *instr,
return false;
for (unsigned i = 0; i < num_components; i++) {
uint64_t low_bits = u_bit_consecutive64(0, nir_src_bit_size(instr->src[src].src) / 2);
uint64_t low_bits = BITFIELD64_MASK(nir_src_bit_size(instr->src[src].src) / 2);
if ((nir_src_comp_as_uint(instr->src[src].src, swizzle[i]) & low_bits) != 0)
return false;
}
@ -776,7 +776,7 @@ is_lower_half_negative_one(UNUSED struct hash_table *ht, const nir_alu_instr *in
return false;
for (unsigned i = 0; i < num_components; i++) {
uint64_t low_bits = u_bit_consecutive64(0, nir_src_bit_size(instr->src[src].src) / 2);
uint64_t low_bits = BITFIELD64_MASK(nir_src_bit_size(instr->src[src].src) / 2);
if ((nir_src_comp_as_uint(instr->src[src].src, swizzle[i]) & low_bits) != low_bits)
return false;
}

View file

@ -244,7 +244,7 @@ ntr_tgsi_var_usage_mask(const struct nir_variable *var)
if (num_components == 0) /* structs */
num_components = 4;
return u_bit_consecutive(var->data.location_frac, num_components);
return BITFIELD_RANGE(var->data.location_frac, num_components);
}
static struct ureg_dst
@ -277,7 +277,7 @@ ntr_output_decl(struct ntr_compile *c, nir_intrinsic_instr *instr, uint32_t *fra
tgsi_get_gl_varying_semantic(semantics.location, true, &semantic_name, &semantic_index);
uint32_t usage_mask = u_bit_consecutive(*frac, instr->num_components);
uint32_t usage_mask = BITFIELD_RANGE(*frac, instr->num_components);
uint32_t gs_streams = semantics.gs_streams;
for (int i = 0; i < 4; i++) {
if (!(usage_mask & (1 << i)))

View file

@ -287,7 +287,7 @@ static void si_decompress_depth(struct si_context *sctx, struct si_texture *tex,
{
unsigned inplace_planes = 0;
unsigned copy_planes = 0;
unsigned level_mask = u_bit_consecutive(first_level, last_level - first_level + 1);
unsigned level_mask = BITFIELD_RANGE(first_level, last_level - first_level + 1);
unsigned levels_z = 0;
unsigned levels_s = 0;
@ -443,7 +443,7 @@ static void si_blit_decompress_color(struct si_context *sctx, struct si_texture
{
void *custom_blend;
unsigned layer, checked_last_layer, max_layer;
unsigned level_mask = u_bit_consecutive(first_level, last_level - first_level + 1);
unsigned level_mask = BITFIELD_RANGE(first_level, last_level - first_level + 1);
/* No decompression is ever needed on Gfx12. */
assert(sctx->gfx_level < GFX12);
@ -763,7 +763,7 @@ static void si_check_render_feedback(struct si_context *sctx)
struct si_shader_info *info = &sctx->shaders[i].cso->info;
si_check_render_feedback_images(sctx, &sctx->images[i],
u_bit_consecutive(0, info->base.num_images));
BITFIELD_MASK(info->base.num_images));
si_check_render_feedback_textures(sctx, &sctx->samplers[i],
info->base.textures_used);
}
@ -857,7 +857,7 @@ void gfx6_decompress_textures(struct si_context *sctx, unsigned shader_mask)
sctx->b.flush(&sctx->b, NULL, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW);
}
if (shader_mask & u_bit_consecutive(0, SI_NUM_GRAPHICS_SHADERS)) {
if (shader_mask & BITFIELD_MASK(SI_NUM_GRAPHICS_SHADERS)) {
if (sctx->uses_bindless_samplers) {
si_decompress_resident_color_textures(sctx);
si_decompress_resident_depth_textures(sctx);
@ -895,7 +895,7 @@ void gfx11_decompress_textures(struct si_context *sctx, unsigned shader_mask)
}
/* Decompress bindless depth textures and disable DCC for render feedback. */
if (shader_mask & u_bit_consecutive(0, SI_NUM_GRAPHICS_SHADERS)) {
if (shader_mask & BITFIELD_MASK(SI_NUM_GRAPHICS_SHADERS)) {
if (sctx->uses_bindless_samplers)
si_decompress_resident_depth_textures(sctx);
@ -1385,7 +1385,7 @@ static bool si_generate_mipmap(struct pipe_context *ctx, struct pipe_resource *t
/* Clear dirty_level_mask for the levels that will be overwritten. */
assert(base_level < last_level);
stex->dirty_level_mask &= ~u_bit_consecutive(base_level + 1, last_level - base_level);
stex->dirty_level_mask &= ~BITFIELD_RANGE(base_level + 1, last_level - base_level);
sctx->generate_mipmap_for_depth = stex->is_depth;

View file

@ -187,7 +187,7 @@ static bool gfx8_get_dcc_clear_parameters(struct si_screen *sscreen, enum pipe_f
if (desc->channel[i].pure_integer && desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
/* Use the maximum value for clamping the clear color. */
int max = u_bit_consecutive(0, desc->channel[i].size - 1);
int max = BITFIELD_MASK(desc->channel[i].size - 1);
values[i] = color->i[i] != 0;
if (color->i[i] != 0 && MIN2(color->i[i], max) != max)
@ -195,7 +195,7 @@ static bool gfx8_get_dcc_clear_parameters(struct si_screen *sscreen, enum pipe_f
} else if (desc->channel[i].pure_integer &&
desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) {
/* Use the maximum value for clamping the clear color. */
unsigned max = u_bit_consecutive(0, desc->channel[i].size);
unsigned max = BITFIELD_MASK(desc->channel[i].size);
values[i] = color->ui[i] != 0U;
if (color->ui[i] != 0U && MIN2(color->ui[i], max) != max)

View file

@ -64,7 +64,7 @@ static void si_create_compute_state_async(void *job, void *gdata, int thread_ind
}
/* Images in user SGPRs. */
unsigned non_fmask_images = u_bit_consecutive(0, sel->nir->info.num_images);
unsigned non_fmask_images = BITFIELD_MASK(sel->nir->info.num_images);
/* Remove images with FMASK from the bitmask. We only care about the first
* 3 anyway, so we can take msaa_images[0] and ignore the rest.
@ -859,7 +859,7 @@ static bool si_check_needs_implicit_sync(struct si_context *sctx, uint32_t usage
}
struct si_images *images = &sctx->images[PIPE_SHADER_COMPUTE];
mask = u_bit_consecutive(0, info->base.num_images) & images->enabled_mask;
mask = BITFIELD_MASK(info->base.num_images) & images->enabled_mask;
while (mask) {
int i = u_bit_scan(&mask);

View file

@ -775,10 +775,10 @@ static void si_dump_descriptors(struct si_context *sctx, gl_shader_stage stage,
unsigned enabled_images;
if (info) {
enabled_constbuf = u_bit_consecutive(0, info->base.num_ubos);
enabled_shaderbuf = u_bit_consecutive(0, info->base.num_ssbos);
enabled_constbuf = BITFIELD_MASK(info->base.num_ubos);
enabled_shaderbuf = BITFIELD_MASK(info->base.num_ssbos);
enabled_samplers = info->base.textures_used;
enabled_images = u_bit_consecutive(0, info->base.num_images);
enabled_images = BITFIELD_MASK(info->base.num_images);
} else {
enabled_constbuf =
sctx->const_and_shader_buffers[stage].enabled_mask >> SI_NUM_SHADER_BUFFERS;

View file

@ -1709,7 +1709,7 @@ void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf)
u_foreach_bit(shader, mask) {
si_reset_buffer_resources(sctx, &sctx->const_and_shader_buffers[shader],
si_const_and_shader_buffer_descriptors_idx(shader),
u_bit_consecutive64(SI_NUM_SHADER_BUFFERS, SI_NUM_CONST_BUFFERS),
BITFIELD64_RANGE(SI_NUM_SHADER_BUFFERS, SI_NUM_CONST_BUFFERS),
buf, sctx->const_and_shader_buffers[shader].priority_constbuf);
}
}
@ -1720,7 +1720,7 @@ void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf)
u_foreach_bit(shader, mask) {
if (si_reset_buffer_resources(sctx, &sctx->const_and_shader_buffers[shader],
si_const_and_shader_buffer_descriptors_idx(shader),
u_bit_consecutive64(0, SI_NUM_SHADER_BUFFERS), buf,
BITFIELD64_MASK(SI_NUM_SHADER_BUFFERS), buf,
sctx->const_and_shader_buffers[shader].priority) &&
shader == PIPE_SHADER_COMPUTE) {
sctx->compute_shaderbuf_sgprs_dirty = true;
@ -2011,7 +2011,7 @@ void si_update_all_texture_descriptors(struct si_context *sctx)
static void si_mark_shader_pointers_dirty(struct si_context *sctx, unsigned shader)
{
sctx->shader_pointers_dirty |=
u_bit_consecutive(SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS, SI_NUM_SHADER_DESCS);
BITFIELD_RANGE(SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS, SI_NUM_SHADER_DESCS);
if (shader == PIPE_SHADER_VERTEX)
sctx->vertex_buffers_dirty = sctx->num_vertex_elements > 0;
@ -2022,7 +2022,7 @@ static void si_mark_shader_pointers_dirty(struct si_context *sctx, unsigned shad
void si_shader_pointers_mark_dirty(struct si_context *sctx)
{
sctx->shader_pointers_dirty =
u_bit_consecutive(SI_DESCS_FIRST_SHADER, SI_NUM_DESCS - SI_DESCS_FIRST_SHADER);
BITFIELD_RANGE(SI_DESCS_FIRST_SHADER, SI_NUM_DESCS - SI_DESCS_FIRST_SHADER);
sctx->vertex_buffers_dirty = sctx->num_vertex_elements > 0;
si_mark_atom_dirty(sctx, &sctx->atoms.s.gfx_shader_pointers);
sctx->graphics_internal_bindings_pointer_dirty = sctx->descriptors[SI_DESCS_INTERNAL].buffer != NULL;
@ -2933,7 +2933,7 @@ void si_init_all_descriptors(struct si_context *sctx)
si_init_bindless_descriptors(sctx, &sctx->bindless_descriptors,
SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES, 1024);
sctx->descriptors_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
sctx->descriptors_dirty = BITFIELD_MASK(SI_NUM_DESCS);
/* Set pipe_context functions. */
sctx->b.bind_sampler_states = si_bind_sampler_states;
@ -3005,7 +3005,7 @@ bool si_gfx_resources_check_encrypted(struct si_context *sctx)
si_sampler_views_check_encrypted(sctx, &sctx->samplers[i],
current_shader->cso->info.base.textures_used);
use_encrypted_bo |= si_image_views_check_encrypted(sctx, &sctx->images[i],
u_bit_consecutive(0, current_shader->cso->info.base.num_images));
BITFIELD_MASK(current_shader->cso->info.base.num_images));
}
use_encrypted_bo |= si_buffer_resources_check_encrypted(sctx, &sctx->internal_bindings);
@ -3084,7 +3084,7 @@ bool si_compute_resources_check_encrypted(struct si_context *sctx)
*/
return si_buffer_resources_check_encrypted(sctx, &sctx->const_and_shader_buffers[sh]) ||
si_sampler_views_check_encrypted(sctx, &sctx->samplers[sh], info->base.textures_used) ||
si_image_views_check_encrypted(sctx, &sctx->images[sh], u_bit_consecutive(0, info->base.num_images)) ||
si_image_views_check_encrypted(sctx, &sctx->images[sh], BITFIELD_MASK(info->base.num_images)) ||
si_buffer_resources_check_encrypted(sctx, &sctx->internal_bindings);
}
@ -3121,7 +3121,7 @@ void si_set_active_descriptors(struct si_context *sctx, unsigned desc_idx, uint6
/* Ignore no-op updates and updates that disable all slots. */
if (!new_active_mask ||
new_active_mask == u_bit_consecutive64(desc->first_active_slot, desc->num_active_slots))
new_active_mask == BITFIELD64_RANGE(desc->first_active_slot, desc->num_active_slots))
return;
int first, count;

View file

@ -550,11 +550,11 @@ void si_begin_new_gfx_cs(struct si_context *ctx, bool first_cs)
bool has_clear_state = ctx->screen->info.has_clear_state;
if (has_clear_state) {
ctx->framebuffer.dirty_cbufs =
u_bit_consecutive(0, ctx->framebuffer.state.nr_cbufs);
BITFIELD_MASK(ctx->framebuffer.state.nr_cbufs);
/* CLEAR_STATE disables the zbuffer, so only enable it if it's bound. */
ctx->framebuffer.dirty_zsbuf = ctx->framebuffer.state.zsbuf.texture != NULL;
} else {
ctx->framebuffer.dirty_cbufs = u_bit_consecutive(0, 8);
ctx->framebuffer.dirty_cbufs = BITFIELD_MASK(8);
ctx->framebuffer.dirty_zsbuf = true;
}

View file

@ -501,7 +501,7 @@ enum
#define SI_NUM_DESCS (SI_DESCS_FIRST_SHADER + SI_NUM_SHADERS * SI_NUM_SHADER_DESCS)
#define SI_DESCS_SHADER_MASK(name) \
u_bit_consecutive(SI_DESCS_FIRST_SHADER + PIPE_SHADER_##name * SI_NUM_SHADER_DESCS, \
BITFIELD_RANGE(SI_DESCS_FIRST_SHADER + PIPE_SHADER_##name * SI_NUM_SHADER_DESCS, \
SI_NUM_SHADER_DESCS)
static inline unsigned si_const_and_shader_buffer_descriptors_idx(unsigned shader)

View file

@ -2097,9 +2097,9 @@ static void si_draw(struct pipe_context *ctx,
si_check_dirty_buffers_textures(sctx);
if (GFX_VERSION < GFX11)
gfx6_decompress_textures(sctx, u_bit_consecutive(0, SI_NUM_GRAPHICS_SHADERS));
gfx6_decompress_textures(sctx, BITFIELD_MASK(SI_NUM_GRAPHICS_SHADERS));
else if (GFX_VERSION < GFX12)
gfx11_decompress_textures(sctx, u_bit_consecutive(0, SI_NUM_GRAPHICS_SHADERS));
gfx11_decompress_textures(sctx, BITFIELD_MASK(SI_NUM_GRAPHICS_SHADERS));
si_need_gfx_cs_space(sctx, num_draws, ALT_HIZ_LOGIC ? 8 : 0);

View file

@ -3569,7 +3569,7 @@ void si_get_active_slot_masks(struct si_screen *sscreen, const struct si_shader_
/* The layout is: sb[last] ... sb[0], cb[0] ... cb[last] */
start = si_get_shaderbuf_slot(num_shaderbufs - 1);
*const_and_shader_buffers = u_bit_consecutive64(start, num_shaderbufs + num_constbufs);
*const_and_shader_buffers = BITFIELD64_RANGE(start, num_shaderbufs + num_constbufs);
/* The layout is:
* - fmask[last] ... fmask[0] go to [15-last .. 15]
@ -3584,7 +3584,7 @@ void si_get_active_slot_masks(struct si_screen *sscreen, const struct si_shader_
num_images = SI_NUM_IMAGES + num_msaa_images; /* add FMASK descriptors */
start = si_get_image_slot(num_images - 1) / 2;
*samplers_and_images = u_bit_consecutive64(start, num_images / 2 + num_samplers);
*samplers_and_images = BITFIELD64_RANGE(start, num_images / 2 + num_samplers);
}
static void *si_create_shader_selector(struct pipe_context *ctx,
@ -4844,7 +4844,7 @@ void si_update_tess_io_layout_state(struct si_context *sctx)
sctx->ws->cs_is_secure(&sctx->gfx_cs) ?
si_resource(sctx->screen->tess_rings_tmz)->gpu_address :
si_resource(sctx->screen->tess_rings)->gpu_address;
assert((ring_va & u_bit_consecutive(0, 19)) == 0);
assert((ring_va & BITFIELD_MASK(19)) == 0);
sctx->tes_offchip_ring_va_sgpr = ring_va;
sctx->tcs_offchip_layout &= 0xe0000000;

View file

@ -420,7 +420,7 @@ static bool do_winsys_init(struct radeon_drm_winsys *ws)
ws->info.r600_gb_backend_map_valid = true;
/* Default value. */
ws->info.enabled_rb_mask = u_bit_consecutive(0, ws->info.max_render_backends);
ws->info.enabled_rb_mask = BITFIELD_MASK(ws->info.max_render_backends);
/*
* This fails (silently) on non-GCN or older kernels, overwriting the
* default enabled_rb_mask with the result of the last query.

View file

@ -1127,7 +1127,7 @@ vbo_exec_vtx_init(struct vbo_exec_context *exec)
exec->vtx.bufferobj = _mesa_bufferobj_alloc(ctx, IMM_BUFFER_NAME);
exec->vtx.enabled = u_bit_consecutive64(0, VBO_ATTRIB_MAX); /* reset all */
exec->vtx.enabled = BITFIELD64_MASK(VBO_ATTRIB_MAX); /* reset all */
vbo_reset_all_attr(ctx);
exec->vtx.info.instance_count = 1;