mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-01-03 15:50:17 +01:00
tree-wide: replace MAYBE_UNUSED with ASSERTED
Suggested-by: Jason Ekstrand <jason@jlekstrand.net> Signed-off-by: Eric Engestrom <eric.engestrom@intel.com> Reviewed-by: Matt Turner <mattst88@gmail.com>
This commit is contained in:
parent
ab9c76769a
commit
abc226cf41
81 changed files with 162 additions and 145 deletions
|
|
@ -820,7 +820,7 @@ BOOL_32 ADDR_API ElemGetExportNorm(
|
|||
Addr::Lib* pLib = Lib::GetLib(hLib);
|
||||
BOOL_32 enabled = FALSE;
|
||||
|
||||
MAYBE_UNUSED ADDR_E_RETURNCODE returnCode = ADDR_OK;
|
||||
ASSERTED ADDR_E_RETURNCODE returnCode = ADDR_OK;
|
||||
|
||||
if (pLib != NULL)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -2483,7 +2483,7 @@ ADDR_E_RETURNCODE Gfx9Lib::HwlComputeBlock256Equation(
|
|||
// Post validation
|
||||
if (ret == ADDR_OK)
|
||||
{
|
||||
MAYBE_UNUSED Dim2d microBlockDim = Block256_2d[elementBytesLog2];
|
||||
ASSERTED Dim2d microBlockDim = Block256_2d[elementBytesLog2];
|
||||
ADDR_ASSERT((2u << GetMaxValidChannelIndex(pEquation->addr, 8, 0)) ==
|
||||
(microBlockDim.w * (1 << elementBytesLog2)));
|
||||
ADDR_ASSERT((2u << GetMaxValidChannelIndex(pEquation->addr, 8, 1)) == microBlockDim.h);
|
||||
|
|
@ -3843,7 +3843,7 @@ ADDR_E_RETURNCODE Gfx9Lib::ComputeStereoInfo(
|
|||
const UINT_32 numBankBits = GetBankXorBits(blkSizeLog2);
|
||||
const UINT_32 bppLog2 = Log2(pIn->bpp >> 3);
|
||||
const UINT_32 maxYCoordBlock256 = Log2(Block256_2d[bppLog2].h) - 1;
|
||||
MAYBE_UNUSED const ADDR_EQUATION *pEqToCheck = &m_equationTable[eqIndex];
|
||||
ASSERTED const ADDR_EQUATION *pEqToCheck = &m_equationTable[eqIndex];
|
||||
|
||||
ADDR_ASSERT(maxYCoordBlock256 ==
|
||||
GetMaxValidChannelIndex(&pEqToCheck->addr[0], GetBlockSizeLog2(ADDR_SW_256B), 1));
|
||||
|
|
|
|||
|
|
@ -975,7 +975,7 @@ BOOL_32 EgBasedLib::SanityCheckMacroTiled(
|
|||
) const
|
||||
{
|
||||
BOOL_32 valid = TRUE;
|
||||
MAYBE_UNUSED UINT_32 numPipes = HwlGetPipes(pTileInfo);
|
||||
ASSERTED UINT_32 numPipes = HwlGetPipes(pTileInfo);
|
||||
|
||||
switch (pTileInfo->banks)
|
||||
{
|
||||
|
|
@ -4095,7 +4095,7 @@ UINT_64 EgBasedLib::HwlGetSizeAdjustmentMicroTiled(
|
|||
) const
|
||||
{
|
||||
UINT_64 logicalSliceSize;
|
||||
MAYBE_UNUSED UINT_64 physicalSliceSize;
|
||||
ASSERTED UINT_64 physicalSliceSize;
|
||||
|
||||
UINT_32 pitch = *pPitch;
|
||||
UINT_32 height = *pHeight;
|
||||
|
|
|
|||
|
|
@ -261,7 +261,7 @@ static LLVMValueRef emit_intrin_1f_param(struct ac_llvm_context *ctx,
|
|||
ac_to_float(ctx, src0),
|
||||
};
|
||||
|
||||
MAYBE_UNUSED const int length = snprintf(name, sizeof(name), "%s.f%d", intrin,
|
||||
ASSERTED const int length = snprintf(name, sizeof(name), "%s.f%d", intrin,
|
||||
ac_get_elem_bits(ctx, result_type));
|
||||
assert(length < sizeof(name));
|
||||
return ac_build_intrinsic(ctx, name, result_type, params, 1, AC_FUNC_ATTR_READNONE);
|
||||
|
|
@ -278,7 +278,7 @@ static LLVMValueRef emit_intrin_2f_param(struct ac_llvm_context *ctx,
|
|||
ac_to_float(ctx, src1),
|
||||
};
|
||||
|
||||
MAYBE_UNUSED const int length = snprintf(name, sizeof(name), "%s.f%d", intrin,
|
||||
ASSERTED const int length = snprintf(name, sizeof(name), "%s.f%d", intrin,
|
||||
ac_get_elem_bits(ctx, result_type));
|
||||
assert(length < sizeof(name));
|
||||
return ac_build_intrinsic(ctx, name, result_type, params, 2, AC_FUNC_ATTR_READNONE);
|
||||
|
|
@ -296,7 +296,7 @@ static LLVMValueRef emit_intrin_3f_param(struct ac_llvm_context *ctx,
|
|||
ac_to_float(ctx, src2),
|
||||
};
|
||||
|
||||
MAYBE_UNUSED const int length = snprintf(name, sizeof(name), "%s.f%d", intrin,
|
||||
ASSERTED const int length = snprintf(name, sizeof(name), "%s.f%d", intrin,
|
||||
ac_get_elem_bits(ctx, result_type));
|
||||
assert(length < sizeof(name));
|
||||
return ac_build_intrinsic(ctx, name, result_type, params, 3, AC_FUNC_ATTR_READNONE);
|
||||
|
|
@ -2374,7 +2374,7 @@ static void get_image_coords(struct ac_nir_context *ctx,
|
|||
LLVMValueRef sample_index = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[2]), 0);
|
||||
|
||||
int count;
|
||||
MAYBE_UNUSED bool add_frag_pos = (dim == GLSL_SAMPLER_DIM_SUBPASS ||
|
||||
ASSERTED bool add_frag_pos = (dim == GLSL_SAMPLER_DIM_SUBPASS ||
|
||||
dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
|
||||
bool is_ms = (dim == GLSL_SAMPLER_DIM_MS ||
|
||||
dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
|
||||
|
|
@ -2576,7 +2576,7 @@ static LLVMValueRef visit_image_atomic(struct ac_nir_context *ctx,
|
|||
const char *atomic_name;
|
||||
char intrinsic_name[64];
|
||||
enum ac_atomic_op atomic_subop;
|
||||
MAYBE_UNUSED int length;
|
||||
ASSERTED int length;
|
||||
|
||||
enum glsl_sampler_dim dim;
|
||||
bool is_unsigned = false;
|
||||
|
|
@ -3427,7 +3427,7 @@ static void visit_intrinsic(struct ac_nir_context *ctx,
|
|||
}
|
||||
case nir_intrinsic_load_interpolated_input: {
|
||||
/* We assume any indirect loads have been lowered away */
|
||||
MAYBE_UNUSED nir_const_value *offset = nir_src_as_const_value(instr->src[1]);
|
||||
ASSERTED nir_const_value *offset = nir_src_as_const_value(instr->src[1]);
|
||||
assert(offset);
|
||||
assert(offset[0].i32 == 0);
|
||||
|
||||
|
|
@ -3442,7 +3442,7 @@ static void visit_intrinsic(struct ac_nir_context *ctx,
|
|||
}
|
||||
case nir_intrinsic_load_input: {
|
||||
/* We only lower inputs for fragment shaders ATM */
|
||||
MAYBE_UNUSED nir_const_value *offset = nir_src_as_const_value(instr->src[0]);
|
||||
ASSERTED nir_const_value *offset = nir_src_as_const_value(instr->src[0]);
|
||||
assert(offset);
|
||||
assert(offset[0].i32 == 0);
|
||||
|
||||
|
|
|
|||
|
|
@ -1938,7 +1938,7 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
|
|||
struct radv_attachment_info *att = &framebuffer->attachments[idx];
|
||||
struct radv_image *image = att->attachment->image;
|
||||
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
|
||||
MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image,
|
||||
ASSERTED uint32_t queue_mask = radv_image_queue_family_mask(image,
|
||||
cmd_buffer->queue_family_index,
|
||||
cmd_buffer->queue_family_index);
|
||||
/* We currently don't support writing decompressed HTILE */
|
||||
|
|
@ -2214,7 +2214,7 @@ radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
|
|||
if (flush_indirect_descriptors)
|
||||
radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point);
|
||||
|
||||
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
|
||||
ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
|
||||
cmd_buffer->cs,
|
||||
MAX_SETS * MESA_SHADER_STAGES * 4);
|
||||
|
||||
|
|
@ -2300,7 +2300,7 @@ radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
|
|||
va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
|
||||
va += offset;
|
||||
|
||||
MAYBE_UNUSED unsigned cdw_max =
|
||||
ASSERTED unsigned cdw_max =
|
||||
radeon_check_space(cmd_buffer->device->ws,
|
||||
cmd_buffer->cs, MESA_SHADER_STAGES * 4);
|
||||
|
||||
|
|
@ -3470,7 +3470,7 @@ void radv_CmdPushDescriptorSetKHR(
|
|||
* because it is invalid, according to Vulkan spec.
|
||||
*/
|
||||
for (int i = 0; i < descriptorWriteCount; i++) {
|
||||
MAYBE_UNUSED const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
|
||||
ASSERTED const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
|
||||
assert(writeset->descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT);
|
||||
}
|
||||
|
||||
|
|
@ -3651,7 +3651,7 @@ void radv_CmdSetViewport(
|
|||
{
|
||||
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
|
||||
struct radv_cmd_state *state = &cmd_buffer->state;
|
||||
MAYBE_UNUSED const uint32_t total_count = firstViewport + viewportCount;
|
||||
ASSERTED const uint32_t total_count = firstViewport + viewportCount;
|
||||
|
||||
assert(firstViewport < MAX_VIEWPORTS);
|
||||
assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
|
||||
|
|
@ -3675,7 +3675,7 @@ void radv_CmdSetScissor(
|
|||
{
|
||||
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
|
||||
struct radv_cmd_state *state = &cmd_buffer->state;
|
||||
MAYBE_UNUSED const uint32_t total_count = firstScissor + scissorCount;
|
||||
ASSERTED const uint32_t total_count = firstScissor + scissorCount;
|
||||
|
||||
assert(firstScissor < MAX_SCISSORS);
|
||||
assert(total_count >= 1 && total_count <= MAX_SCISSORS);
|
||||
|
|
@ -3837,7 +3837,7 @@ void radv_CmdSetDiscardRectangleEXT(
|
|||
{
|
||||
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
|
||||
struct radv_cmd_state *state = &cmd_buffer->state;
|
||||
MAYBE_UNUSED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
|
||||
ASSERTED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
|
||||
|
||||
assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES);
|
||||
assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES);
|
||||
|
|
@ -4060,7 +4060,7 @@ radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer,
|
|||
struct radv_cmd_state *state = &cmd_buffer->state;
|
||||
struct radv_subpass *subpass = &state->pass->subpasses[subpass_id];
|
||||
|
||||
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
|
||||
ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
|
||||
cmd_buffer->cs, 4096);
|
||||
|
||||
radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
|
||||
|
|
@ -4465,7 +4465,7 @@ radv_draw(struct radv_cmd_buffer *cmd_buffer,
|
|||
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
|
||||
cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
|
||||
|
||||
MAYBE_UNUSED unsigned cdw_max =
|
||||
ASSERTED unsigned cdw_max =
|
||||
radeon_check_space(cmd_buffer->device->ws,
|
||||
cmd_buffer->cs, 4096);
|
||||
|
||||
|
|
@ -4720,7 +4720,7 @@ radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer,
|
|||
loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
|
||||
AC_UD_CS_GRID_SIZE);
|
||||
|
||||
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25);
|
||||
ASSERTED unsigned cdw_max = radeon_check_space(ws, cs, 25);
|
||||
|
||||
if (info->indirect) {
|
||||
uint64_t va = radv_buffer_get_va(info->indirect->bo);
|
||||
|
|
@ -5337,7 +5337,7 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer,
|
|||
|
||||
radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
|
||||
|
||||
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
|
||||
ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
|
||||
|
||||
radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, va, 1, 0xffffffff);
|
||||
assert(cmd_buffer->cs->cdw <= cdw_max);
|
||||
|
|
@ -5456,7 +5456,7 @@ static void write_event(struct radv_cmd_buffer *cmd_buffer,
|
|||
|
||||
radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
|
||||
|
||||
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21);
|
||||
ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21);
|
||||
|
||||
/* Flags that only require a top-of-pipe event. */
|
||||
VkPipelineStageFlags top_of_pipe_flags =
|
||||
|
|
|
|||
|
|
@ -1371,7 +1371,7 @@ void radv_CmdCopyQueryPoolResults(
|
|||
unsigned query = firstQuery + i;
|
||||
uint64_t local_src_va = va + query * pool->stride;
|
||||
|
||||
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 19);
|
||||
ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 19);
|
||||
|
||||
|
||||
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
|
||||
|
|
@ -1745,7 +1745,7 @@ void radv_CmdWriteTimestamp(
|
|||
if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask)
|
||||
num_queries = util_bitcount(cmd_buffer->state.subpass->view_mask);
|
||||
|
||||
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 28 * num_queries);
|
||||
ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 28 * num_queries);
|
||||
|
||||
for (unsigned i = 0; i < num_queries; i++) {
|
||||
switch(pipelineStage) {
|
||||
|
|
|
|||
|
|
@ -507,7 +507,7 @@ radv_amdgpu_winsys_bo_from_ptr(struct radeon_winsys *_ws,
|
|||
bo->initial_domain = RADEON_DOMAIN_GTT;
|
||||
bo->priority = priority;
|
||||
|
||||
MAYBE_UNUSED int r = amdgpu_bo_export(buf_handle, amdgpu_bo_handle_type_kms, &bo->bo_handle);
|
||||
ASSERTED int r = amdgpu_bo_export(buf_handle, amdgpu_bo_handle_type_kms, &bo->bo_handle);
|
||||
assert(!r);
|
||||
|
||||
p_atomic_add(&ws->allocated_gtt,
|
||||
|
|
|
|||
|
|
@ -652,7 +652,7 @@ v3d_spec_load(const struct v3d_device_info *devinfo)
|
|||
void *buf;
|
||||
uint8_t *text_data = NULL;
|
||||
uint32_t text_offset = 0, text_length = 0;
|
||||
MAYBE_UNUSED uint32_t total_length;
|
||||
ASSERTED uint32_t total_length;
|
||||
|
||||
for (int i = 0; i < ARRAY_SIZE(genxml_files_table); i++) {
|
||||
if (i != 0) {
|
||||
|
|
|
|||
|
|
@ -944,7 +944,7 @@ ntq_emit_alu(struct v3d_compile *c, nir_alu_instr *instr)
|
|||
case nir_op_sge:
|
||||
case nir_op_slt: {
|
||||
enum v3d_qpu_cond cond;
|
||||
MAYBE_UNUSED bool ok = ntq_emit_comparison(c, instr, &cond);
|
||||
ASSERTED bool ok = ntq_emit_comparison(c, instr, &cond);
|
||||
assert(ok);
|
||||
result = vir_MOV(c, vir_SEL(c, cond,
|
||||
vir_uniform_f(c, 1.0),
|
||||
|
|
@ -965,7 +965,7 @@ ntq_emit_alu(struct v3d_compile *c, nir_alu_instr *instr)
|
|||
case nir_op_ilt32:
|
||||
case nir_op_ult32: {
|
||||
enum v3d_qpu_cond cond;
|
||||
MAYBE_UNUSED bool ok = ntq_emit_comparison(c, instr, &cond);
|
||||
ASSERTED bool ok = ntq_emit_comparison(c, instr, &cond);
|
||||
assert(ok);
|
||||
result = vir_MOV(c, vir_SEL(c, cond,
|
||||
vir_uniform_ui(c, ~0),
|
||||
|
|
|
|||
|
|
@ -333,7 +333,7 @@ static bool
|
|||
reads_uniform(const struct v3d_device_info *devinfo, uint64_t instruction)
|
||||
{
|
||||
struct v3d_qpu_instr qpu;
|
||||
MAYBE_UNUSED bool ok = v3d_qpu_instr_unpack(devinfo, instruction, &qpu);
|
||||
ASSERTED bool ok = v3d_qpu_instr_unpack(devinfo, instruction, &qpu);
|
||||
assert(ok);
|
||||
|
||||
if (qpu.sig.ldunif ||
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ v3d_qpu_disasm_raddr(struct disasm_state *disasm,
|
|||
} else if (mux == V3D_QPU_MUX_B) {
|
||||
if (instr->sig.small_imm) {
|
||||
uint32_t val;
|
||||
MAYBE_UNUSED bool ok =
|
||||
ASSERTED bool ok =
|
||||
v3d_qpu_small_imm_unpack(disasm->devinfo,
|
||||
instr->raddr_b,
|
||||
&val);
|
||||
|
|
|
|||
|
|
@ -175,8 +175,8 @@ validate_ordering(YYLTYPE *loc,
|
|||
}
|
||||
|
||||
static bool
|
||||
validate_point_mode(MAYBE_UNUSED const ast_type_qualifier &qualifier,
|
||||
MAYBE_UNUSED const ast_type_qualifier &new_qualifier)
|
||||
validate_point_mode(ASSERTED const ast_type_qualifier &qualifier,
|
||||
ASSERTED const ast_type_qualifier &new_qualifier)
|
||||
{
|
||||
/* Point mode can only be true if the flag is set. */
|
||||
assert (!qualifier.flags.q.point_mode || !new_qualifier.flags.q.point_mode
|
||||
|
|
|
|||
|
|
@ -1052,7 +1052,7 @@ ir_validate::validate_ir(ir_instruction *ir, void *data)
|
|||
_mesa_set_add(ir_set, ir);
|
||||
}
|
||||
|
||||
MAYBE_UNUSED static void
|
||||
ASSERTED static void
|
||||
check_node_type(ir_instruction *ir, void *data)
|
||||
{
|
||||
(void) data;
|
||||
|
|
|
|||
|
|
@ -233,7 +233,7 @@ lower_buffer_access::is_dereferenced_thing_row_major(const ir_rvalue *deref)
|
|||
* layouts at HIR generation time, but we don't do that for shared
|
||||
* variables, which are always column-major
|
||||
*/
|
||||
MAYBE_UNUSED ir_variable *var = deref->variable_referenced();
|
||||
ASSERTED ir_variable *var = deref->variable_referenced();
|
||||
assert((var->is_in_buffer_block() && !matrix) ||
|
||||
var->data.mode == ir_var_shader_shared);
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@
|
|||
static bool
|
||||
assert_ssa_def_is_not_int(nir_ssa_def *def, void *arg)
|
||||
{
|
||||
MAYBE_UNUSED BITSET_WORD *int_types = arg;
|
||||
ASSERTED BITSET_WORD *int_types = arg;
|
||||
assert(!BITSET_TEST(int_types, def->index));
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -684,7 +684,7 @@ write_phi(write_ctx *ctx, const nir_phi_instr *phi)
|
|||
nir_foreach_phi_src(src, phi) {
|
||||
assert(src->src.is_ssa);
|
||||
size_t blob_offset = blob_reserve_intptr(ctx->blob);
|
||||
MAYBE_UNUSED size_t blob_offset2 = blob_reserve_intptr(ctx->blob);
|
||||
ASSERTED size_t blob_offset2 = blob_reserve_intptr(ctx->blob);
|
||||
assert(blob_offset + sizeof(uintptr_t) == blob_offset2);
|
||||
write_phi_fixup fixup = {
|
||||
.blob_offset = blob_offset,
|
||||
|
|
|
|||
|
|
@ -257,7 +257,7 @@ _eglQueryDeviceStringEXT(_EGLDevice *dev, EGLint name)
|
|||
static int
|
||||
_eglRefreshDeviceList(void)
|
||||
{
|
||||
MAYBE_UNUSED _EGLDevice *dev;
|
||||
ASSERTED _EGLDevice *dev;
|
||||
int count = 0;
|
||||
|
||||
dev = _eglGlobal.DeviceList;
|
||||
|
|
|
|||
|
|
@ -2034,6 +2034,6 @@ error1:
|
|||
void
|
||||
trace_context_check(const struct pipe_context *pipe)
|
||||
{
|
||||
MAYBE_UNUSED struct trace_context *tr_ctx = (struct trace_context *) pipe;
|
||||
ASSERTED struct trace_context *tr_ctx = (struct trace_context *) pipe;
|
||||
assert(tr_ctx->base.destroy == trace_context_destroy);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -826,7 +826,7 @@ fenced_buffer_fence(struct pb_buffer *buf,
|
|||
assert(fenced_buf->validation_flags);
|
||||
|
||||
if (fenced_buf->fence) {
|
||||
MAYBE_UNUSED boolean destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
|
||||
ASSERTED boolean destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
|
||||
assert(!destroyed);
|
||||
}
|
||||
if (fence) {
|
||||
|
|
|
|||
|
|
@ -384,7 +384,7 @@ util_blit_pixels(struct blit_state *ctx,
|
|||
struct pipe_surface *dst,
|
||||
int dstX0, int dstY0,
|
||||
int dstX1, int dstY1,
|
||||
MAYBE_UNUSED float z,
|
||||
ASSERTED float z,
|
||||
enum pipe_tex_filter filter,
|
||||
uint writemask)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -579,7 +579,7 @@ void util_blitter_unset_running_flag(struct blitter_context *blitter)
|
|||
blitter->pipe->set_active_query_state(blitter->pipe, true);
|
||||
}
|
||||
|
||||
static void blitter_check_saved_vertex_states(MAYBE_UNUSED struct blitter_context_priv *ctx)
|
||||
static void blitter_check_saved_vertex_states(ASSERTED struct blitter_context_priv *ctx)
|
||||
{
|
||||
assert(ctx->base.saved_vs != INVALID_PTR);
|
||||
assert(!ctx->has_geometry_shader || ctx->base.saved_gs != INVALID_PTR);
|
||||
|
|
@ -645,7 +645,7 @@ void util_blitter_restore_vertex_states(struct blitter_context *blitter)
|
|||
ctx->base.saved_rs_state = INVALID_PTR;
|
||||
}
|
||||
|
||||
static void blitter_check_saved_fragment_states(MAYBE_UNUSED struct blitter_context_priv *ctx)
|
||||
static void blitter_check_saved_fragment_states(ASSERTED struct blitter_context_priv *ctx)
|
||||
{
|
||||
assert(ctx->base.saved_fs != INVALID_PTR);
|
||||
assert(ctx->base.saved_dsa_state != INVALID_PTR);
|
||||
|
|
@ -691,7 +691,7 @@ void util_blitter_restore_fragment_states(struct blitter_context *blitter)
|
|||
}
|
||||
}
|
||||
|
||||
static void blitter_check_saved_fb_state(MAYBE_UNUSED struct blitter_context_priv *ctx)
|
||||
static void blitter_check_saved_fb_state(ASSERTED struct blitter_context_priv *ctx)
|
||||
{
|
||||
assert(ctx->base.saved_fb_state.nr_cbufs != (ubyte) ~0);
|
||||
}
|
||||
|
|
@ -727,7 +727,7 @@ void util_blitter_restore_fb_state(struct blitter_context *blitter)
|
|||
util_unreference_framebuffer_state(&ctx->base.saved_fb_state);
|
||||
}
|
||||
|
||||
static void blitter_check_saved_textures(MAYBE_UNUSED struct blitter_context_priv *ctx)
|
||||
static void blitter_check_saved_textures(ASSERTED struct blitter_context_priv *ctx)
|
||||
{
|
||||
assert(ctx->base.saved_num_sampler_states != ~0u);
|
||||
assert(ctx->base.saved_num_sampler_views != ~0u);
|
||||
|
|
|
|||
|
|
@ -246,7 +246,7 @@ util_format_r8g8_b8g8_unorm_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stri
|
|||
|
||||
void
|
||||
util_format_r8g8_b8g8_unorm_fetch_rgba_float(float *dst, const uint8_t *src,
|
||||
unsigned i, MAYBE_UNUSED unsigned j)
|
||||
unsigned i, ASSERTED unsigned j)
|
||||
{
|
||||
assert(i < 2);
|
||||
assert(j < 1);
|
||||
|
|
@ -466,7 +466,7 @@ util_format_g8r8_g8b8_unorm_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stri
|
|||
|
||||
void
|
||||
util_format_g8r8_g8b8_unorm_fetch_rgba_float(float *dst, const uint8_t *src,
|
||||
unsigned i, MAYBE_UNUSED unsigned j)
|
||||
unsigned i, ASSERTED unsigned j)
|
||||
{
|
||||
assert(i < 2);
|
||||
assert(j < 1);
|
||||
|
|
@ -682,7 +682,7 @@ util_format_uyvy_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
|
|||
|
||||
void
|
||||
util_format_uyvy_fetch_rgba_float(float *dst, const uint8_t *src,
|
||||
unsigned i, MAYBE_UNUSED unsigned j)
|
||||
unsigned i, ASSERTED unsigned j)
|
||||
{
|
||||
uint8_t y, u, v;
|
||||
|
||||
|
|
@ -903,7 +903,7 @@ util_format_yuyv_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
|
|||
|
||||
void
|
||||
util_format_yuyv_fetch_rgba_float(float *dst, const uint8_t *src,
|
||||
unsigned i, MAYBE_UNUSED unsigned j)
|
||||
unsigned i, ASSERTED unsigned j)
|
||||
{
|
||||
uint8_t y, u, v;
|
||||
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@ pipe_reference_described(struct pipe_reference *dst,
|
|||
if (dst != src) {
|
||||
/* bump the src.count first */
|
||||
if (src) {
|
||||
MAYBE_UNUSED int count = p_atomic_inc_return(&src->count);
|
||||
ASSERTED int count = p_atomic_inc_return(&src->count);
|
||||
assert(count != 1); /* src had to be referenced */
|
||||
debug_reference(src, get_desc, 1);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2223,7 +2223,7 @@ static void
|
|||
tc_call_generate_mipmap(struct pipe_context *pipe, union tc_payload *payload)
|
||||
{
|
||||
struct tc_generate_mipmap *p = (struct tc_generate_mipmap *)payload;
|
||||
MAYBE_UNUSED bool result = pipe->generate_mipmap(pipe, p->res, p->format,
|
||||
ASSERTED bool result = pipe->generate_mipmap(pipe, p->res, p->format,
|
||||
p->base_level,
|
||||
p->last_level,
|
||||
p->first_layer,
|
||||
|
|
|
|||
|
|
@ -477,7 +477,7 @@ static void
|
|||
etna_compile_parse_declarations(struct etna_compile *c)
|
||||
{
|
||||
struct tgsi_parse_context ctx = { };
|
||||
MAYBE_UNUSED unsigned status = tgsi_parse_init(&ctx, c->tokens);
|
||||
ASSERTED unsigned status = tgsi_parse_init(&ctx, c->tokens);
|
||||
assert(status == TGSI_PARSE_OK);
|
||||
|
||||
while (!tgsi_parse_end_of_tokens(&ctx)) {
|
||||
|
|
@ -529,7 +529,7 @@ static void
|
|||
etna_compile_pass_check_usage(struct etna_compile *c)
|
||||
{
|
||||
struct tgsi_parse_context ctx = { };
|
||||
MAYBE_UNUSED unsigned status = tgsi_parse_init(&ctx, c->tokens);
|
||||
ASSERTED unsigned status = tgsi_parse_init(&ctx, c->tokens);
|
||||
assert(status == TGSI_PARSE_OK);
|
||||
|
||||
for (int idx = 0; idx < c->total_decls; ++idx) {
|
||||
|
|
@ -660,7 +660,7 @@ etna_compile_pass_optimize_outputs(struct etna_compile *c)
|
|||
{
|
||||
struct tgsi_parse_context ctx = { };
|
||||
int inst_idx = 0;
|
||||
MAYBE_UNUSED unsigned status = tgsi_parse_init(&ctx, c->tokens);
|
||||
ASSERTED unsigned status = tgsi_parse_init(&ctx, c->tokens);
|
||||
assert(status == TGSI_PARSE_OK);
|
||||
|
||||
while (!tgsi_parse_end_of_tokens(&ctx)) {
|
||||
|
|
@ -1809,7 +1809,7 @@ static void
|
|||
etna_compile_pass_generate_code(struct etna_compile *c)
|
||||
{
|
||||
struct tgsi_parse_context ctx = { };
|
||||
MAYBE_UNUSED unsigned status = tgsi_parse_init(&ctx, c->tokens);
|
||||
ASSERTED unsigned status = tgsi_parse_init(&ctx, c->tokens);
|
||||
assert(status == TGSI_PARSE_OK);
|
||||
|
||||
int inst_idx = 0;
|
||||
|
|
|
|||
|
|
@ -912,7 +912,7 @@ static void spill_node(sched_ctx *ctx, gpir_node *node, gpir_store_node *store)
|
|||
gpir_node_add_dep(&load->node, &store->node, GPIR_DEP_READ_AFTER_WRITE);
|
||||
gpir_debug("spilling use %d of node %d to load node %d\n",
|
||||
use->index, node->index, load->node.index);
|
||||
MAYBE_UNUSED bool result = _try_place_node(ctx, use->sched.instr, &load->node);
|
||||
ASSERTED bool result = _try_place_node(ctx, use->sched.instr, &load->node);
|
||||
assert(result);
|
||||
}
|
||||
}
|
||||
|
|
@ -1303,7 +1303,7 @@ static bool try_node(sched_ctx *ctx)
|
|||
if (best_node) {
|
||||
gpir_debug("scheduling %d (score = %d)%s\n", best_node->index,
|
||||
best_score, best_node->sched.max_node ? " (max)" : "");
|
||||
MAYBE_UNUSED int score = schedule_try_node(ctx, best_node, false);
|
||||
ASSERTED int score = schedule_try_node(ctx, best_node, false);
|
||||
assert(score != INT_MIN);
|
||||
return true;
|
||||
}
|
||||
|
|
@ -1323,7 +1323,7 @@ static void place_move(sched_ctx *ctx, gpir_node *node)
|
|||
gpir_node_replace_child(succ, move, node);
|
||||
}
|
||||
}
|
||||
MAYBE_UNUSED int score = schedule_try_node(ctx, move, false);
|
||||
ASSERTED int score = schedule_try_node(ctx, move, false);
|
||||
assert(score != INT_MIN);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -123,7 +123,7 @@ static bool insert_to_each_succ_instr(ppir_block *block, ppir_node *node)
|
|||
if (!create_new_instr(block, move))
|
||||
return false;
|
||||
|
||||
MAYBE_UNUSED bool insert_result =
|
||||
ASSERTED bool insert_result =
|
||||
ppir_instr_insert_node(move->instr, node);
|
||||
assert(insert_result);
|
||||
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ lp_setup_unmap_vertices(struct vbuf_render *vbr,
|
|||
ushort min_index,
|
||||
ushort max_index )
|
||||
{
|
||||
MAYBE_UNUSED struct lp_setup_context *setup = lp_setup_context(vbr);
|
||||
ASSERTED struct lp_setup_context *setup = lp_setup_context(vbr);
|
||||
assert( setup->vertex_buffer_size >= (max_index+1) * setup->vertex_size );
|
||||
/* do nothing */
|
||||
}
|
||||
|
|
|
|||
|
|
@ -621,7 +621,7 @@ void
|
|||
CodeEmitterNV50::emitLOAD(const Instruction *i)
|
||||
{
|
||||
DataFile sf = i->src(0).getFile();
|
||||
MAYBE_UNUSED int32_t offset = i->getSrc(0)->reg.data.offset;
|
||||
ASSERTED int32_t offset = i->getSrc(0)->reg.data.offset;
|
||||
|
||||
switch (sf) {
|
||||
case FILE_SHADER_INPUT:
|
||||
|
|
|
|||
|
|
@ -3514,7 +3514,7 @@ PostRaLoadPropagation::handleMADforNV50(Instruction *i)
|
|||
ImmediateValue val;
|
||||
// getImmediate() has side-effects on the argument so this *shouldn't*
|
||||
// be folded into the assert()
|
||||
MAYBE_UNUSED bool ret = def->src(0).getImmediate(val);
|
||||
ASSERTED bool ret = def->src(0).getImmediate(val);
|
||||
assert(ret);
|
||||
if (i->getSrc(1)->reg.data.id & 1)
|
||||
val.reg.data.u32 >>= 16;
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ nv98_decoder_decode_bitstream(struct pipe_video_codec *decoder,
|
|||
union pipe_desc desc;
|
||||
|
||||
unsigned vp_caps, is_ref;
|
||||
MAYBE_UNUSED unsigned ret; /* used in debug checks */
|
||||
ASSERTED unsigned ret; /* used in debug checks */
|
||||
struct nouveau_vp3_video_buffer *refs[16] = {};
|
||||
|
||||
desc.base = picture;
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ nvc0_decoder_begin_frame(struct pipe_video_codec *decoder,
|
|||
{
|
||||
struct nouveau_vp3_decoder *dec = (struct nouveau_vp3_decoder *)decoder;
|
||||
uint32_t comm_seq = ++dec->fence_seq;
|
||||
MAYBE_UNUSED unsigned ret = 0; /* used in debug checks */
|
||||
ASSERTED unsigned ret = 0; /* used in debug checks */
|
||||
|
||||
assert(dec);
|
||||
assert(target);
|
||||
|
|
@ -53,7 +53,7 @@ nvc0_decoder_decode_bitstream(struct pipe_video_codec *decoder,
|
|||
{
|
||||
struct nouveau_vp3_decoder *dec = (struct nouveau_vp3_decoder *)decoder;
|
||||
uint32_t comm_seq = dec->fence_seq;
|
||||
MAYBE_UNUSED unsigned ret = 0; /* used in debug checks */
|
||||
ASSERTED unsigned ret = 0; /* used in debug checks */
|
||||
|
||||
assert(decoder);
|
||||
|
||||
|
|
@ -73,7 +73,7 @@ nvc0_decoder_end_frame(struct pipe_video_codec *decoder,
|
|||
union pipe_desc desc;
|
||||
|
||||
unsigned vp_caps, is_ref;
|
||||
MAYBE_UNUSED unsigned ret; /* used in debug checks */
|
||||
ASSERTED unsigned ret; /* used in debug checks */
|
||||
struct nouveau_vp3_video_buffer *refs[16] = {};
|
||||
|
||||
desc.base = picture;
|
||||
|
|
|
|||
|
|
@ -197,7 +197,7 @@ panfrost_drm_import_bo(struct panfrost_screen *screen, int fd)
|
|||
{
|
||||
struct panfrost_bo *bo = rzalloc(screen, struct panfrost_bo);
|
||||
struct drm_panfrost_get_bo_offset get_bo_offset = {0,};
|
||||
MAYBE_UNUSED int ret;
|
||||
ASSERTED int ret;
|
||||
unsigned gem_handle;
|
||||
|
||||
ret = drmPrimeFDToHandle(screen->fd, fd, &gem_handle);
|
||||
|
|
@ -355,7 +355,7 @@ unsigned
|
|||
panfrost_drm_query_gpu_version(struct panfrost_screen *screen)
|
||||
{
|
||||
struct drm_panfrost_get_param get_param = {0,};
|
||||
MAYBE_UNUSED int ret;
|
||||
ASSERTED int ret;
|
||||
|
||||
get_param.param = DRM_PANFROST_PARAM_GPU_PROD_ID;
|
||||
ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_GET_PARAM, &get_param);
|
||||
|
|
|
|||
|
|
@ -435,7 +435,7 @@ static void compute_memory_move_item(struct compute_memory_pool *pool,
|
|||
new_start_in_dw, new_start_in_dw * 4);
|
||||
|
||||
if (pool->item_list != item->link.prev) {
|
||||
MAYBE_UNUSED struct compute_memory_item *prev;
|
||||
ASSERTED struct compute_memory_item *prev;
|
||||
prev = container_of(item->link.prev, item, link);
|
||||
assert(prev->start_in_dw + prev->size_in_dw <= new_start_in_dw);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1062,7 +1062,7 @@ static bool do_hardware_msaa_resolve(struct pipe_context *ctx,
|
|||
struct si_context *sctx = (struct si_context*)ctx;
|
||||
struct si_texture *src = (struct si_texture*)info->src.resource;
|
||||
struct si_texture *dst = (struct si_texture*)info->dst.resource;
|
||||
MAYBE_UNUSED struct si_texture *stmp;
|
||||
ASSERTED struct si_texture *stmp;
|
||||
unsigned dst_width = u_minify(info->dst.resource->width0, info->dst.level);
|
||||
unsigned dst_height = u_minify(info->dst.resource->height0, info->dst.level);
|
||||
enum pipe_format format = info->src.format;
|
||||
|
|
|
|||
|
|
@ -193,7 +193,7 @@ void si_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
|
|||
if (!size)
|
||||
return;
|
||||
|
||||
MAYBE_UNUSED unsigned clear_alignment = MIN2(clear_value_size, 4);
|
||||
ASSERTED unsigned clear_alignment = MIN2(clear_value_size, 4);
|
||||
|
||||
assert(clear_value_size != 3 && clear_value_size != 6); /* 12 is allowed. */
|
||||
assert(offset % clear_alignment == 0);
|
||||
|
|
|
|||
|
|
@ -1098,7 +1098,7 @@ si_prepare_prim_discard_or_split_draw(struct si_context *sctx,
|
|||
|
||||
/* The compute IB is always chained, but we need to call cs_check_space to add more space. */
|
||||
struct radeon_cmdbuf *cs = sctx->prim_discard_compute_cs;
|
||||
MAYBE_UNUSED bool compute_has_space = sctx->ws->cs_check_space(cs, need_compute_dw, false);
|
||||
ASSERTED bool compute_has_space = sctx->ws->cs_check_space(cs, need_compute_dw, false);
|
||||
assert(compute_has_space);
|
||||
assert(si_check_ring_space(sctx, out_indexbuf_size));
|
||||
return SI_PRIM_DISCARD_ENABLED;
|
||||
|
|
|
|||
|
|
@ -2321,7 +2321,7 @@ static void si_init_bindless_descriptors(struct si_context *sctx,
|
|||
short shader_userdata_rel_index,
|
||||
unsigned num_elements)
|
||||
{
|
||||
MAYBE_UNUSED unsigned desc_slot;
|
||||
ASSERTED unsigned desc_slot;
|
||||
|
||||
si_init_descriptors(desc, shader_userdata_rel_index, 16, num_elements);
|
||||
sctx->bindless_descriptors.num_active_slots = num_elements;
|
||||
|
|
|
|||
|
|
@ -134,7 +134,7 @@ static void add_arg_assign_checked(struct si_function_info *fninfo,
|
|||
enum si_arg_regfile regfile, LLVMTypeRef type,
|
||||
LLVMValueRef *assign, unsigned idx)
|
||||
{
|
||||
MAYBE_UNUSED unsigned actual = add_arg_assign(fninfo, regfile, type, assign);
|
||||
ASSERTED unsigned actual = add_arg_assign(fninfo, regfile, type, assign);
|
||||
assert(actual == idx);
|
||||
}
|
||||
|
||||
|
|
@ -6649,8 +6649,8 @@ static void si_build_wrapper_function(struct si_shader_context *ctx,
|
|||
LLVMTypeRef function_type;
|
||||
unsigned num_first_params;
|
||||
unsigned num_out, initial_num_out;
|
||||
MAYBE_UNUSED unsigned num_out_sgpr; /* used in debug checks */
|
||||
MAYBE_UNUSED unsigned initial_num_out_sgpr; /* used in debug checks */
|
||||
ASSERTED unsigned num_out_sgpr; /* used in debug checks */
|
||||
ASSERTED unsigned initial_num_out_sgpr; /* used in debug checks */
|
||||
unsigned num_sgprs, num_vgprs;
|
||||
unsigned gprs;
|
||||
|
||||
|
|
|
|||
|
|
@ -200,7 +200,7 @@ prepare_shader_sampling(
|
|||
|
||||
if (!sp_tex->dt) {
|
||||
/* regular texture - setup array of mipmap level offsets */
|
||||
MAYBE_UNUSED struct pipe_resource *res = view->texture;
|
||||
ASSERTED struct pipe_resource *res = view->texture;
|
||||
int j;
|
||||
|
||||
if (view->target != PIPE_BUFFER) {
|
||||
|
|
|
|||
|
|
@ -420,7 +420,7 @@ static void
|
|||
softpipe_delete_compute_state(struct pipe_context *pipe,
|
||||
void *cs)
|
||||
{
|
||||
MAYBE_UNUSED struct softpipe_context *softpipe = softpipe_context(pipe);
|
||||
ASSERTED struct softpipe_context *softpipe = softpipe_context(pipe);
|
||||
struct sp_compute_shader *state = (struct sp_compute_shader *)cs;
|
||||
|
||||
assert(softpipe->cs != state);
|
||||
|
|
|
|||
|
|
@ -1002,10 +1002,10 @@ check_draw_params(struct svga_hwtnl *hwtnl,
|
|||
assert(range->indexWidth == range->indexArray.stride);
|
||||
|
||||
if (ib) {
|
||||
MAYBE_UNUSED unsigned size = ib->width0;
|
||||
MAYBE_UNUSED unsigned offset = range->indexArray.offset;
|
||||
MAYBE_UNUSED unsigned stride = range->indexArray.stride;
|
||||
MAYBE_UNUSED unsigned count;
|
||||
ASSERTED unsigned size = ib->width0;
|
||||
ASSERTED unsigned offset = range->indexArray.offset;
|
||||
ASSERTED unsigned stride = range->indexArray.stride;
|
||||
ASSERTED unsigned count;
|
||||
|
||||
assert(size);
|
||||
assert(offset < size);
|
||||
|
|
|
|||
|
|
@ -370,7 +370,7 @@ svga_screen_cache_flush(struct svga_screen *svgascreen,
|
|||
* It will be done using the current context.
|
||||
*/
|
||||
if (SVGA3D_InvalidateGBSurface(svga->swc, entry->handle) != PIPE_OK) {
|
||||
MAYBE_UNUSED enum pipe_error ret;
|
||||
ASSERTED enum pipe_error ret;
|
||||
|
||||
/* Even though surface invalidation here is done after the command
|
||||
* buffer is flushed, it is still possible that it will
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ svga_swtnl_draw_vbo(struct svga_context *svga,
|
|||
struct pipe_transfer *ib_transfer = NULL;
|
||||
struct pipe_transfer *cb_transfer[SVGA_MAX_CONST_BUFS] = { 0 };
|
||||
struct draw_context *draw = svga->swtnl.draw;
|
||||
MAYBE_UNUSED unsigned old_num_vertex_buffers;
|
||||
ASSERTED unsigned old_num_vertex_buffers;
|
||||
unsigned i;
|
||||
const void *map;
|
||||
enum pipe_error ret;
|
||||
|
|
|
|||
|
|
@ -204,9 +204,9 @@ fixup_raddr_conflict(struct qblock *block,
|
|||
static void
|
||||
set_last_dst_pack(struct qblock *block, struct qinst *inst)
|
||||
{
|
||||
MAYBE_UNUSED bool had_pm = *last_inst(block) & QPU_PM;
|
||||
MAYBE_UNUSED bool had_ws = *last_inst(block) & QPU_WS;
|
||||
MAYBE_UNUSED uint32_t unpack = QPU_GET_FIELD(*last_inst(block), QPU_UNPACK);
|
||||
ASSERTED bool had_pm = *last_inst(block) & QPU_PM;
|
||||
ASSERTED bool had_ws = *last_inst(block) & QPU_WS;
|
||||
ASSERTED uint32_t unpack = QPU_GET_FIELD(*last_inst(block), QPU_UNPACK);
|
||||
|
||||
if (!inst->dst.pack)
|
||||
return;
|
||||
|
|
@ -419,7 +419,7 @@ vc4_generate_code_block(struct vc4_compile *c,
|
|||
break;
|
||||
}
|
||||
|
||||
MAYBE_UNUSED bool handled_qinst_cond = false;
|
||||
ASSERTED bool handled_qinst_cond = false;
|
||||
|
||||
switch (qinst->op) {
|
||||
case QOP_RCP:
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ virgl_rebind_resource(struct virgl_context *vctx,
|
|||
/* Queries use internally created buffers and do not go through transfers.
|
||||
* Index buffers are not bindable. They are not tracked.
|
||||
*/
|
||||
MAYBE_UNUSED const unsigned tracked_bind = (PIPE_BIND_VERTEX_BUFFER |
|
||||
ASSERTED const unsigned tracked_bind = (PIPE_BIND_VERTEX_BUFFER |
|
||||
PIPE_BIND_CONSTANT_BUFFER |
|
||||
PIPE_BIND_SHADER_BUFFER |
|
||||
PIPE_BIND_SHADER_IMAGE);
|
||||
|
|
|
|||
|
|
@ -227,8 +227,8 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
|
|||
|
||||
XvMCContextPrivate *context_priv;
|
||||
XvMCSurfacePrivate *target_surface_priv;
|
||||
MAYBE_UNUSED XvMCSurfacePrivate *past_surface_priv;
|
||||
MAYBE_UNUSED XvMCSurfacePrivate *future_surface_priv;
|
||||
ASSERTED XvMCSurfacePrivate *past_surface_priv;
|
||||
ASSERTED XvMCSurfacePrivate *future_surface_priv;
|
||||
XvMCMacroBlock *xvmc_mb;
|
||||
|
||||
XVMC_MSG(XVMC_TRACE, "[XvMC] Rendering to surface %p, with past %p and future %p\n",
|
||||
|
|
|
|||
|
|
@ -370,7 +370,7 @@ vmw_swc_add_validate_buffer(struct vmw_svga_winsys_context *vswc,
|
|||
struct pb_buffer *pb_buf,
|
||||
unsigned flags)
|
||||
{
|
||||
MAYBE_UNUSED enum pipe_error ret;
|
||||
ASSERTED enum pipe_error ret;
|
||||
unsigned translated_flags;
|
||||
boolean already_present;
|
||||
|
||||
|
|
|
|||
|
|
@ -1179,7 +1179,7 @@ blorp_ccs_ambiguate(struct blorp_batch *batch,
|
|||
const uint32_t width_rgba_px = width_cl;
|
||||
const uint32_t height_rgba_px = height_cl * 4;
|
||||
|
||||
MAYBE_UNUSED bool ok =
|
||||
ASSERTED bool ok =
|
||||
isl_surf_init(batch->blorp->isl_dev, ¶ms.dst.surf,
|
||||
.dim = ISL_SURF_DIM_2D,
|
||||
.format = ISL_FORMAT_R32G32B32A32_UINT,
|
||||
|
|
|
|||
|
|
@ -601,7 +601,7 @@ gen_spec_load(const struct gen_device_info *devinfo)
|
|||
void *buf;
|
||||
uint8_t *text_data = NULL;
|
||||
uint32_t text_offset = 0, text_length = 0;
|
||||
MAYBE_UNUSED uint32_t total_length;
|
||||
ASSERTED uint32_t total_length;
|
||||
uint32_t gen_10 = devinfo_to_gen(devinfo, true);
|
||||
|
||||
for (int i = 0; i < ARRAY_SIZE(genxml_files_table); i++) {
|
||||
|
|
|
|||
|
|
@ -1417,7 +1417,7 @@ encode_slm_size(unsigned gen, uint32_t bytes)
|
|||
* '2^n - 1' for some n.
|
||||
*/
|
||||
static inline bool
|
||||
brw_stage_has_packed_dispatch(MAYBE_UNUSED const struct gen_device_info *devinfo,
|
||||
brw_stage_has_packed_dispatch(ASSERTED const struct gen_device_info *devinfo,
|
||||
gl_shader_stage stage,
|
||||
const struct brw_stage_prog_data *prog_data)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -287,7 +287,7 @@ brw_message_desc_rlen(const struct gen_device_info *devinfo, uint32_t desc)
|
|||
}
|
||||
|
||||
static inline bool
|
||||
brw_message_desc_header_present(MAYBE_UNUSED const struct gen_device_info *devinfo,
|
||||
brw_message_desc_header_present(ASSERTED const struct gen_device_info *devinfo,
|
||||
uint32_t desc)
|
||||
{
|
||||
assert(devinfo->gen >= 5);
|
||||
|
|
@ -370,7 +370,7 @@ brw_sampler_desc_simd_mode(const struct gen_device_info *devinfo, uint32_t desc)
|
|||
}
|
||||
|
||||
static inline unsigned
|
||||
brw_sampler_desc_return_format(MAYBE_UNUSED const struct gen_device_info *devinfo,
|
||||
brw_sampler_desc_return_format(ASSERTED const struct gen_device_info *devinfo,
|
||||
uint32_t desc)
|
||||
{
|
||||
assert(devinfo->gen == 4 && !devinfo->is_g4x);
|
||||
|
|
@ -753,7 +753,7 @@ brw_dp_a64_byte_scattered_rw_desc(const struct gen_device_info *devinfo,
|
|||
|
||||
static inline uint32_t
|
||||
brw_dp_a64_untyped_atomic_desc(const struct gen_device_info *devinfo,
|
||||
MAYBE_UNUSED unsigned exec_size, /**< 0 for SIMD4x2 */
|
||||
ASSERTED unsigned exec_size, /**< 0 for SIMD4x2 */
|
||||
unsigned bit_size,
|
||||
unsigned atomic_op,
|
||||
bool response_expected)
|
||||
|
|
@ -774,7 +774,7 @@ brw_dp_a64_untyped_atomic_desc(const struct gen_device_info *devinfo,
|
|||
|
||||
static inline uint32_t
|
||||
brw_dp_a64_untyped_atomic_float_desc(const struct gen_device_info *devinfo,
|
||||
MAYBE_UNUSED unsigned exec_size,
|
||||
ASSERTED unsigned exec_size,
|
||||
unsigned atomic_op,
|
||||
bool response_expected)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -7287,7 +7287,7 @@ fs_visitor::allocate_registers(unsigned min_dispatch_width, bool allow_spilling)
|
|||
schedule_instructions(SCHEDULE_POST);
|
||||
|
||||
if (last_scratch > 0) {
|
||||
MAYBE_UNUSED unsigned max_scratch_size = 2 * 1024 * 1024;
|
||||
ASSERTED unsigned max_scratch_size = 2 * 1024 * 1024;
|
||||
|
||||
prog_data->total_scratch = brw_get_scratch_size(last_scratch);
|
||||
|
||||
|
|
|
|||
|
|
@ -345,7 +345,7 @@ REG_TYPE(src2)
|
|||
* @{
|
||||
*/
|
||||
static inline uint16_t
|
||||
brw_inst_3src_a1_src0_imm(MAYBE_UNUSED const struct gen_device_info *devinfo,
|
||||
brw_inst_3src_a1_src0_imm(ASSERTED const struct gen_device_info *devinfo,
|
||||
const brw_inst *insn)
|
||||
{
|
||||
assert(devinfo->gen >= 10);
|
||||
|
|
@ -353,7 +353,7 @@ brw_inst_3src_a1_src0_imm(MAYBE_UNUSED const struct gen_device_info *devinfo,
|
|||
}
|
||||
|
||||
static inline uint16_t
|
||||
brw_inst_3src_a1_src2_imm(MAYBE_UNUSED const struct gen_device_info *devinfo,
|
||||
brw_inst_3src_a1_src2_imm(ASSERTED const struct gen_device_info *devinfo,
|
||||
const brw_inst *insn)
|
||||
{
|
||||
assert(devinfo->gen >= 10);
|
||||
|
|
@ -361,7 +361,7 @@ brw_inst_3src_a1_src2_imm(MAYBE_UNUSED const struct gen_device_info *devinfo,
|
|||
}
|
||||
|
||||
static inline void
|
||||
brw_inst_set_3src_a1_src0_imm(MAYBE_UNUSED const struct gen_device_info *devinfo,
|
||||
brw_inst_set_3src_a1_src0_imm(ASSERTED const struct gen_device_info *devinfo,
|
||||
brw_inst *insn, uint16_t value)
|
||||
{
|
||||
assert(devinfo->gen >= 10);
|
||||
|
|
@ -369,7 +369,7 @@ brw_inst_set_3src_a1_src0_imm(MAYBE_UNUSED const struct gen_device_info *devinfo
|
|||
}
|
||||
|
||||
static inline void
|
||||
brw_inst_set_3src_a1_src2_imm(MAYBE_UNUSED const struct gen_device_info *devinfo,
|
||||
brw_inst_set_3src_a1_src2_imm(ASSERTED const struct gen_device_info *devinfo,
|
||||
brw_inst *insn, uint16_t value)
|
||||
{
|
||||
assert(devinfo->gen >= 10);
|
||||
|
|
@ -814,7 +814,7 @@ brw_inst_imm_ud(const struct gen_device_info *devinfo, const brw_inst *insn)
|
|||
}
|
||||
|
||||
static inline uint64_t
|
||||
brw_inst_imm_uq(MAYBE_UNUSED const struct gen_device_info *devinfo,
|
||||
brw_inst_imm_uq(ASSERTED const struct gen_device_info *devinfo,
|
||||
const brw_inst *insn)
|
||||
{
|
||||
assert(devinfo->gen >= 8);
|
||||
|
|
|
|||
|
|
@ -1032,7 +1032,7 @@ vec4_visitor::emit_conversion_to_double(dst_reg dst, src_reg src,
|
|||
static int
|
||||
try_immediate_source(const nir_alu_instr *instr, src_reg *op,
|
||||
bool try_src0_also,
|
||||
MAYBE_UNUSED const gen_device_info *devinfo)
|
||||
ASSERTED const gen_device_info *devinfo)
|
||||
{
|
||||
unsigned idx;
|
||||
|
||||
|
|
|
|||
|
|
@ -261,9 +261,9 @@ isl_genX(surf_fill_state_s)(const struct isl_device *dev, void *state,
|
|||
* S3TC workaround that requires us to do reinterpretation. So assert
|
||||
* that they're at least the same bpb and block size.
|
||||
*/
|
||||
MAYBE_UNUSED const struct isl_format_layout *surf_fmtl =
|
||||
ASSERTED const struct isl_format_layout *surf_fmtl =
|
||||
isl_format_get_layout(info->surf->format);
|
||||
MAYBE_UNUSED const struct isl_format_layout *view_fmtl =
|
||||
ASSERTED const struct isl_format_layout *view_fmtl =
|
||||
isl_format_get_layout(info->surf->format);
|
||||
assert(surf_fmtl->bpb == view_fmtl->bpb);
|
||||
assert(surf_fmtl->bw == view_fmtl->bw);
|
||||
|
|
|
|||
|
|
@ -155,7 +155,7 @@ ensure_phys_mem(struct aub_mem *mem, uint64_t phys_addr)
|
|||
new_mem->phys_addr = phys_addr;
|
||||
new_mem->fd_offset = mem->mem_fd_len;
|
||||
|
||||
MAYBE_UNUSED int ftruncate_res = ftruncate(mem->mem_fd, mem->mem_fd_len += 4096);
|
||||
ASSERTED int ftruncate_res = ftruncate(mem->mem_fd, mem->mem_fd_len += 4096);
|
||||
assert(ftruncate_res == 0);
|
||||
|
||||
new_mem->data = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||
|
|
@ -289,7 +289,7 @@ aub_mem_get_ggtt_bo(void *_mem, uint64_t address)
|
|||
continue;
|
||||
|
||||
uint32_t map_offset = i->virt_addr - address;
|
||||
MAYBE_UNUSED void *res =
|
||||
ASSERTED void *res =
|
||||
mmap((uint8_t *)bo.map + map_offset, 4096, PROT_READ,
|
||||
MAP_SHARED | MAP_FIXED, mem->mem_fd, phys_mem->fd_offset);
|
||||
assert(res != MAP_FAILED);
|
||||
|
|
@ -355,7 +355,7 @@ aub_mem_get_ppgtt_bo(void *_mem, uint64_t address)
|
|||
for (uint64_t page = address; page < end; page += 4096) {
|
||||
struct phys_mem *phys_mem = ppgtt_walk(mem, mem->pml4, page);
|
||||
|
||||
MAYBE_UNUSED void *res =
|
||||
ASSERTED void *res =
|
||||
mmap((uint8_t *)bo.map + (page - bo.addr), 4096, PROT_READ,
|
||||
MAP_SHARED | MAP_FIXED, mem->mem_fd, phys_mem->fd_offset);
|
||||
assert(res != MAP_FAILED);
|
||||
|
|
|
|||
|
|
@ -290,7 +290,7 @@ int
|
|||
aub_read_command(struct aub_read *read, const void *data, uint32_t data_len)
|
||||
{
|
||||
const uint32_t *p = data, *next;
|
||||
MAYBE_UNUSED const uint32_t *end = data + data_len;
|
||||
ASSERTED const uint32_t *end = data + data_len;
|
||||
uint32_t h, header_length, bias;
|
||||
|
||||
assert(data_len >= 4);
|
||||
|
|
|
|||
|
|
@ -757,7 +757,7 @@ main(int argc, char *argv[])
|
|||
setup_pager();
|
||||
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
MAYBE_UNUSED int ret;
|
||||
ASSERTED int ret;
|
||||
char *filename;
|
||||
|
||||
ret = asprintf(&filename, "%s/i915_error_state", path);
|
||||
|
|
|
|||
|
|
@ -420,7 +420,7 @@ anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
|
|||
{
|
||||
const uint32_t bb_start_offset =
|
||||
prev_bbo->length - GEN8_MI_BATCH_BUFFER_START_length * 4;
|
||||
MAYBE_UNUSED const uint32_t *bb_start = prev_bbo->bo.map + bb_start_offset;
|
||||
ASSERTED const uint32_t *bb_start = prev_bbo->bo.map + bb_start_offset;
|
||||
|
||||
/* Make sure we're looking at a MI_BATCH_BUFFER_START */
|
||||
assert(((*bb_start >> 29) & 0x07) == 0);
|
||||
|
|
|
|||
|
|
@ -318,7 +318,7 @@ anv_physical_device_init_disk_cache(struct anv_physical_device *device)
|
|||
{
|
||||
#ifdef ENABLE_SHADER_CACHE
|
||||
char renderer[10];
|
||||
MAYBE_UNUSED int len = snprintf(renderer, sizeof(renderer), "anv_%04x",
|
||||
ASSERTED int len = snprintf(renderer, sizeof(renderer), "anv_%04x",
|
||||
device->chipset_id);
|
||||
assert(len == sizeof(renderer) - 2);
|
||||
|
||||
|
|
@ -2795,7 +2795,7 @@ anv_vma_free(struct anv_device *device, struct anv_bo *bo)
|
|||
util_vma_heap_free(&device->vma_lo, addr_48b, bo->size);
|
||||
device->vma_lo_available += bo->size;
|
||||
} else {
|
||||
MAYBE_UNUSED const struct anv_physical_device *physical_device =
|
||||
ASSERTED const struct anv_physical_device *physical_device =
|
||||
&device->instance->physicalDevice;
|
||||
assert(addr_48b >= physical_device->memory.heaps[0].vma_start &&
|
||||
addr_48b < (physical_device->memory.heaps[0].vma_start +
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ dump_image_init(struct anv_device *device, struct dump_image *image,
|
|||
uint32_t width, uint32_t height, const char *filename)
|
||||
{
|
||||
VkDevice vk_device = anv_device_to_handle(device);
|
||||
MAYBE_UNUSED VkResult result;
|
||||
ASSERTED VkResult result;
|
||||
|
||||
image->filename = filename;
|
||||
image->extent = (VkExtent2D) { width, height };
|
||||
|
|
@ -200,7 +200,7 @@ static void
|
|||
dump_image_write_to_ppm(struct anv_device *device, struct dump_image *image)
|
||||
{
|
||||
VkDevice vk_device = anv_device_to_handle(device);
|
||||
MAYBE_UNUSED VkResult result;
|
||||
ASSERTED VkResult result;
|
||||
|
||||
VkMemoryRequirements reqs;
|
||||
anv_GetImageMemoryRequirements(vk_device, image->image, &reqs);
|
||||
|
|
@ -249,7 +249,7 @@ anv_dump_image_to_ppm(struct anv_device *device,
|
|||
const char *filename)
|
||||
{
|
||||
VkDevice vk_device = anv_device_to_handle(device);
|
||||
MAYBE_UNUSED VkResult result;
|
||||
ASSERTED VkResult result;
|
||||
|
||||
PFN_vkBeginCommandBuffer BeginCommandBuffer =
|
||||
(void *)anv_GetDeviceProcAddr(anv_device_to_handle(device),
|
||||
|
|
|
|||
|
|
@ -637,7 +637,7 @@ anv_wait_for_bo_fences(struct anv_device *device,
|
|||
.tv_nsec = abs_timeout_ns % NSEC_PER_SEC,
|
||||
};
|
||||
|
||||
MAYBE_UNUSED int ret;
|
||||
ASSERTED int ret;
|
||||
ret = pthread_cond_timedwait(&device->queue_submit,
|
||||
&device->mutex, &abstime);
|
||||
assert(ret != EINVAL);
|
||||
|
|
|
|||
|
|
@ -176,7 +176,7 @@ brw_upload_cs_prog(struct brw_context *brw)
|
|||
cp = (struct brw_program *) brw->programs[MESA_SHADER_COMPUTE];
|
||||
cp->id = key.base.program_string_id;
|
||||
|
||||
MAYBE_UNUSED bool success = brw_codegen_cs_prog(brw, cp, &key);
|
||||
ASSERTED bool success = brw_codegen_cs_prog(brw, cp, &key);
|
||||
assert(success);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -396,7 +396,7 @@ brw_disk_cache_init(struct intel_screen *screen)
|
|||
|
||||
/* array length: print length + null char + 1 extra to verify it is unused */
|
||||
char renderer[11];
|
||||
MAYBE_UNUSED int len = snprintf(renderer, sizeof(renderer), "i965_%04x",
|
||||
ASSERTED int len = snprintf(renderer, sizeof(renderer), "i965_%04x",
|
||||
screen->deviceID);
|
||||
assert(len == sizeof(renderer) - 2);
|
||||
|
||||
|
|
|
|||
|
|
@ -178,7 +178,7 @@ brw_upload_gs_prog(struct brw_context *brw)
|
|||
gp = (struct brw_program *) brw->programs[MESA_SHADER_GEOMETRY];
|
||||
gp->id = key.base.program_string_id;
|
||||
|
||||
MAYBE_UNUSED bool success = brw_codegen_gs_prog(brw, gp, &key);
|
||||
ASSERTED bool success = brw_codegen_gs_prog(brw, gp, &key);
|
||||
assert(success);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -118,7 +118,7 @@ brw_emit_depth_stall_flushes(struct brw_context *brw)
|
|||
void
|
||||
gen7_emit_vs_workaround_flush(struct brw_context *brw)
|
||||
{
|
||||
MAYBE_UNUSED const struct gen_device_info *devinfo = &brw->screen->devinfo;
|
||||
ASSERTED const struct gen_device_info *devinfo = &brw->screen->devinfo;
|
||||
|
||||
assert(devinfo->gen == 7);
|
||||
brw_emit_pipe_control_write(brw,
|
||||
|
|
|
|||
|
|
@ -206,14 +206,14 @@ brw_program_deserialize_driver_blob(struct gl_context *ctx,
|
|||
break;
|
||||
switch ((enum driver_cache_blob_part)part_type) {
|
||||
case GEN_PART: {
|
||||
MAYBE_UNUSED uint32_t gen_size = blob_read_uint32(&reader);
|
||||
ASSERTED uint32_t gen_size = blob_read_uint32(&reader);
|
||||
assert(!reader.overrun &&
|
||||
(uintptr_t)(reader.end - reader.current) > gen_size);
|
||||
deserialize_gen_program(&reader, ctx, prog, stage);
|
||||
break;
|
||||
}
|
||||
case NIR_PART: {
|
||||
MAYBE_UNUSED uint32_t nir_size = blob_read_uint32(&reader);
|
||||
ASSERTED uint32_t nir_size = blob_read_uint32(&reader);
|
||||
assert(!reader.overrun &&
|
||||
(uintptr_t)(reader.end - reader.current) > nir_size);
|
||||
const struct nir_shader_compiler_options *options =
|
||||
|
|
|
|||
|
|
@ -205,7 +205,7 @@ brw_upload_tcs_prog(struct brw_context *brw)
|
|||
/* BRW_NEW_TESS_PROGRAMS */
|
||||
struct brw_program *tcp =
|
||||
(struct brw_program *) brw->programs[MESA_SHADER_TESS_CTRL];
|
||||
MAYBE_UNUSED struct brw_program *tep =
|
||||
ASSERTED struct brw_program *tep =
|
||||
(struct brw_program *) brw->programs[MESA_SHADER_TESS_EVAL];
|
||||
assert(tep);
|
||||
|
||||
|
|
@ -229,7 +229,7 @@ brw_upload_tcs_prog(struct brw_context *brw)
|
|||
if (tcp)
|
||||
tcp->id = key.base.program_string_id;
|
||||
|
||||
MAYBE_UNUSED bool success = brw_codegen_tcs_prog(brw, tcp, tep, &key);
|
||||
ASSERTED bool success = brw_codegen_tcs_prog(brw, tcp, tep, &key);
|
||||
assert(success);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -177,7 +177,7 @@ brw_upload_tes_prog(struct brw_context *brw)
|
|||
tep = (struct brw_program *) brw->programs[MESA_SHADER_TESS_EVAL];
|
||||
tep->id = key.base.program_string_id;
|
||||
|
||||
MAYBE_UNUSED bool success = brw_codegen_tes_prog(brw, tep, &key);
|
||||
ASSERTED bool success = brw_codegen_tes_prog(brw, tep, &key);
|
||||
assert(success);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -317,7 +317,7 @@ brw_upload_vs_prog(struct brw_context *brw)
|
|||
vp = (struct brw_program *) brw->programs[MESA_SHADER_VERTEX];
|
||||
vp->id = key.base.program_string_id;
|
||||
|
||||
MAYBE_UNUSED bool success = brw_codegen_vs_prog(brw, vp, &key);
|
||||
ASSERTED bool success = brw_codegen_vs_prog(brw, vp, &key);
|
||||
assert(success);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -535,7 +535,7 @@ brw_upload_wm_prog(struct brw_context *brw)
|
|||
fp = (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
|
||||
fp->id = key.base.program_string_id;
|
||||
|
||||
MAYBE_UNUSED bool success = brw_codegen_wm_prog(brw, fp, &key,
|
||||
ASSERTED bool success = brw_codegen_wm_prog(brw, fp, &key,
|
||||
&brw->vue_map_geom_out);
|
||||
assert(success);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -250,7 +250,7 @@ static mesa_format
|
|||
intel_renderbuffer_format(struct gl_context * ctx, GLenum internalFormat)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
MAYBE_UNUSED const struct gen_device_info *devinfo = &brw->screen->devinfo;
|
||||
ASSERTED const struct gen_device_info *devinfo = &brw->screen->devinfo;
|
||||
|
||||
switch (internalFormat) {
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -1277,7 +1277,7 @@ error_check_subtexture_dimensions(struct gl_context *ctx, GLuint dims,
|
|||
*/
|
||||
GLboolean
|
||||
_mesa_test_proxy_teximage(struct gl_context *ctx, GLenum target,
|
||||
GLuint numLevels, MAYBE_UNUSED GLint level,
|
||||
GLuint numLevels, ASSERTED GLint level,
|
||||
mesa_format format, GLuint numSamples,
|
||||
GLint width, GLint height, GLint depth)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -303,7 +303,7 @@ draw_bitmap_quad(struct gl_context *ctx, GLint x, GLint y, GLfloat z,
|
|||
/* XXX if the bitmap is larger than the max texture size, break
|
||||
* it up into chunks.
|
||||
*/
|
||||
GLuint MAYBE_UNUSED maxSize =
|
||||
ASSERTED GLuint maxSize =
|
||||
pipe->screen->get_param(pipe->screen, PIPE_CAP_MAX_TEXTURE_2D_SIZE);
|
||||
assert(width <= (GLsizei) maxSize);
|
||||
assert(height <= (GLsizei) maxSize);
|
||||
|
|
|
|||
|
|
@ -700,7 +700,7 @@ make_texture(struct st_context *st,
|
|||
unpack);
|
||||
}
|
||||
else {
|
||||
bool MAYBE_UNUSED success;
|
||||
ASSERTED bool success;
|
||||
success = _mesa_texstore(ctx, 2, /* dims */
|
||||
baseInternalFormat, /* baseInternalFormat */
|
||||
mformat, /* mesa_format */
|
||||
|
|
@ -750,7 +750,7 @@ draw_textured_quad(struct gl_context *ctx, GLint x, GLint y, GLfloat z,
|
|||
const unsigned fb_width = _mesa_geometric_width(ctx->DrawBuffer);
|
||||
const unsigned fb_height = _mesa_geometric_height(ctx->DrawBuffer);
|
||||
GLfloat x0, y0, x1, y1;
|
||||
GLsizei MAYBE_UNUSED maxSize;
|
||||
ASSERTED GLsizei maxSize;
|
||||
boolean normalized = sv[0]->texture->target == PIPE_TEXTURE_2D;
|
||||
unsigned cso_state_mask;
|
||||
|
||||
|
|
|
|||
|
|
@ -2537,7 +2537,7 @@ copy_image_data_to_texture(struct st_context *st,
|
|||
{
|
||||
/* debug checks */
|
||||
{
|
||||
const struct gl_texture_image MAYBE_UNUSED *dstImage =
|
||||
ASSERTED const struct gl_texture_image *dstImage =
|
||||
stObj->base.Image[stImage->base.Face][dstLevel];
|
||||
assert(dstImage);
|
||||
assert(dstImage->Width == stImage->base.Width);
|
||||
|
|
|
|||
|
|
@ -427,7 +427,7 @@ get_texture_format_swizzle(const struct st_context *st,
|
|||
*
|
||||
* \param stObj the st texture object,
|
||||
*/
|
||||
MAYBE_UNUSED static boolean
|
||||
ASSERTED static boolean
|
||||
check_sampler_swizzle(const struct st_context *st,
|
||||
const struct st_texture_object *stObj,
|
||||
const struct pipe_sampler_view *sv,
|
||||
|
|
@ -616,8 +616,8 @@ st_get_buffer_sampler_view_from_stobj(struct st_context *st,
|
|||
stObj->base._BufferObjectFormat)
|
||||
== view->format);
|
||||
assert(view->target == PIPE_BUFFER);
|
||||
unsigned base = stObj->base.BufferOffset;
|
||||
MAYBE_UNUSED unsigned size = MIN2(buf->width0 - base,
|
||||
ASSERTED unsigned base = stObj->base.BufferOffset;
|
||||
ASSERTED unsigned size = MIN2(buf->width0 - base,
|
||||
(unsigned) stObj->base.BufferSize);
|
||||
assert(view->u.buf.offset == base);
|
||||
assert(view->u.buf.size == size);
|
||||
|
|
|
|||
|
|
@ -146,7 +146,7 @@ uint8_t _mesa_half_to_unorm8(uint16_t val)
|
|||
{
|
||||
const int m = val & 0x3ff;
|
||||
const int e = (val >> 10) & 0x1f;
|
||||
MAYBE_UNUSED const int s = (val >> 15) & 0x1;
|
||||
ASSERTED const int s = (val >> 15) & 0x1;
|
||||
|
||||
/* v = round_to_nearest(1.mmmmmmmmmm * 2^(e-15) * 255)
|
||||
* = round_to_nearest((1.mmmmmmmmmm * 255) * 2^(e-15))
|
||||
|
|
|
|||
|
|
@ -230,13 +230,30 @@ do { \
|
|||
# endif
|
||||
#endif
|
||||
|
||||
/**
|
||||
* UNUSED marks variables (or sometimes functions) that have to be defined,
|
||||
* but are sometimes (or always) unused beyond that. A common case is for
|
||||
* a function parameter to be used in some build configurations but not others.
|
||||
* Another case is fallback vfuncs that don't do anything with their params.
|
||||
*
|
||||
* Note that this should not be used for identifiers used in `assert()`;
|
||||
* see ASSERTED below.
|
||||
*/
|
||||
#ifdef HAVE_FUNC_ATTRIBUTE_UNUSED
|
||||
#define UNUSED __attribute__((unused))
|
||||
#else
|
||||
#define UNUSED
|
||||
#endif
|
||||
|
||||
#define MAYBE_UNUSED UNUSED
|
||||
/**
|
||||
* Use ASSERTED to indicate that an identifier is unused outside of an `assert()`,
|
||||
* so that assert-free builds don't get "unused variable" warnings.
|
||||
*/
|
||||
#ifdef NDEBUG
|
||||
#define ASSERTED UNUSED
|
||||
#else
|
||||
#define ASSERTED
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT
|
||||
#define MUST_CHECK __attribute__((warn_unused_result))
|
||||
|
|
@ -261,7 +278,7 @@ do { \
|
|||
*/
|
||||
#define ASSERT_BITFIELD_SIZE(STRUCT, FIELD, MAXVAL) \
|
||||
do { \
|
||||
MAYBE_UNUSED STRUCT s; \
|
||||
ASSERTED STRUCT s; \
|
||||
s.FIELD = (MAXVAL); \
|
||||
assert((int) s.FIELD == (MAXVAL) && "Insufficient bitfield size!"); \
|
||||
} while (0)
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ typedef struct {
|
|||
#define _SIMPLE_MTX_INITIALIZER_NP { 0 }
|
||||
|
||||
static inline void
|
||||
simple_mtx_init(simple_mtx_t *mtx, MAYBE_UNUSED int type)
|
||||
simple_mtx_init(simple_mtx_t *mtx, ASSERTED int type)
|
||||
{
|
||||
assert(type == mtx_plain);
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue