diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c index 6d6f7f1e68d..4ed317f91f9 100644 --- a/src/amd/vulkan/radv_cmd_buffer.c +++ b/src/amd/vulkan/radv_cmd_buffer.c @@ -1872,11 +1872,12 @@ radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, VkImageAspectFlags aspects) { struct radeon_cmdbuf *cs = cmd_buffer->cs; - uint64_t va = radv_get_ds_clear_value_va(image, range->baseMipLevel); uint32_t level_count = radv_get_levelCount(image, range); if (aspects == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) { + uint64_t va = radv_get_ds_clear_value_va(image, range->baseMipLevel); + /* Use the fastest way when both aspects are used. */ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + 2 * level_count, cmd_buffer->state.predicating)); radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | diff --git a/src/amd/vulkan/radv_debug.c b/src/amd/vulkan/radv_debug.c index 7f80381df33..016104a613b 100644 --- a/src/amd/vulkan/radv_debug.c +++ b/src/amd/vulkan/radv_debug.c @@ -793,8 +793,12 @@ radv_get_faulty_shader(struct radv_device *device, uint64_t faulty_pc) struct radv_shader_variant *shader = NULL; mtx_lock(&device->shader_slab_mutex); + list_for_each_entry(struct radv_shader_slab, slab, &device->shader_slabs, slabs) { +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wshadow" list_for_each_entry(struct radv_shader_variant, s, &slab->shaders, slab_list) { +#pragma GCC diagnostic pop uint64_t offset = align_u64(s->bo_offset + s->code_size, 256); uint64_t va = radv_buffer_get_va(s->bo); diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c index 56d4e472bb5..2d3e8813d30 100644 --- a/src/amd/vulkan/radv_device.c +++ b/src/amd/vulkan/radv_device.c @@ -5641,7 +5641,6 @@ static bool radv_sparse_bind_has_effects(const VkBindSparseInfo *info) VkFence fence) { RADV_FROM_HANDLE(radv_queue, queue, _queue); - VkResult result; uint32_t fence_idx = 0; if (radv_device_is_lost(queue->device)) @@ -5682,7 +5681,7 @@ static bool radv_sparse_bind_has_effects(const VkBindSparseInfo *info) } if (fence != VK_NULL_HANDLE && !bindInfoCount) { - result = radv_signal_fence(queue, fence); + VkResult result = radv_signal_fence(queue, fence); if (result != VK_SUCCESS) return result; } diff --git a/src/amd/vulkan/radv_image.c b/src/amd/vulkan/radv_image.c index d97f51f8df1..908fb1eade3 100644 --- a/src/amd/vulkan/radv_image.c +++ b/src/amd/vulkan/radv_image.c @@ -1044,7 +1044,7 @@ si_make_texture_descriptor(struct radv_device *device, /* Initialize the sampler view for FMASK. */ if (fmask_state) { if (radv_image_has_fmask(image)) { - uint32_t fmask_format, num_format; + uint32_t fmask_format; uint64_t gpu_address = radv_buffer_get_va(image->bo); uint64_t va; diff --git a/src/amd/vulkan/radv_meta_clear.c b/src/amd/vulkan/radv_meta_clear.c index db67db21fae..016aa34be92 100644 --- a/src/amd/vulkan/radv_meta_clear.c +++ b/src/amd/vulkan/radv_meta_clear.c @@ -1592,7 +1592,6 @@ static void vi_get_fast_clear_parameters(struct radv_device *device, bool extra_value = false; bool has_color = false; bool has_alpha = false; - int i; *can_avoid_fast_clear_elim = false; *reset_value = RADV_DCC_CLEAR_REG; @@ -1610,7 +1609,7 @@ static void vi_get_fast_clear_parameters(struct radv_device *device, } else return; - for (i = 0; i < 4; i++) { + for (int i = 0; i < 4; i++) { int index = desc->swizzle[i] - VK_SWIZZLE_X; if (desc->swizzle[i] < VK_SWIZZLE_X || desc->swizzle[i] > VK_SWIZZLE_W) diff --git a/src/amd/vulkan/radv_nir_to_llvm.c b/src/amd/vulkan/radv_nir_to_llvm.c index f94411d9bd1..1cd576f592d 100644 --- a/src/amd/vulkan/radv_nir_to_llvm.c +++ b/src/amd/vulkan/radv_nir_to_llvm.c @@ -1390,7 +1390,6 @@ si_llvm_init_export_args(struct radv_shader_context *ctx, unsigned col_format = (ctx->args->options->key.fs.col_format >> (4 * index)) & 0xf; bool is_int8 = (ctx->args->options->key.fs.is_int8 >> index) & 1; bool is_int10 = (ctx->args->options->key.fs.is_int10 >> index) & 1; - unsigned chan; LLVMValueRef (*packf)(struct ac_llvm_context *ctx, LLVMValueRef args[2]) = NULL; LLVMValueRef (*packi)(struct ac_llvm_context *ctx, LLVMValueRef args[2], @@ -1485,13 +1484,13 @@ si_llvm_init_export_args(struct radv_shader_context *ctx, col_format == V_028714_SPI_SHADER_32_ABGR || col_format == V_028714_SPI_SHADER_FP16_ABGR)) { for (unsigned i = 0; i < 4; i++) { - LLVMValueRef args[2] = { + LLVMValueRef class_args[2] = { values[i], LLVMConstInt(ctx->ac.i32, S_NAN | Q_NAN, false) }; LLVMValueRef isnan = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.class.f32", ctx->ac.i1, - args, 2, AC_FUNC_ATTR_READNONE); + class_args, 2, AC_FUNC_ATTR_READNONE); values[i] = LLVMBuildSelect(ctx->ac.builder, isnan, ctx->ac.f32_0, values[i], ""); @@ -1500,7 +1499,7 @@ si_llvm_init_export_args(struct radv_shader_context *ctx, /* Pack f16 or norm_i16/u16. */ if (packf) { - for (chan = 0; chan < 2; chan++) { + for (unsigned chan = 0; chan < 2; chan++) { LLVMValueRef pack_args[2] = { values[2 * chan], values[2 * chan + 1] @@ -1515,7 +1514,7 @@ si_llvm_init_export_args(struct radv_shader_context *ctx, /* Pack i16/u16. */ if (packi) { - for (chan = 0; chan < 2; chan++) { + for (unsigned chan = 0; chan < 2; chan++) { LLVMValueRef pack_args[2] = { ac_to_integer(&ctx->ac, values[2 * chan]), ac_to_integer(&ctx->ac, values[2 * chan + 1]) @@ -3897,12 +3896,12 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm, } } - for(int i = 0; i < shader_count; ++i) { - ctx.stage = shaders[i]->info.stage; - ctx.shader = shaders[i]; + for(int shader_idx = 0; shader_idx < shader_count; ++shader_idx) { + ctx.stage = shaders[shader_idx]->info.stage; + ctx.shader = shaders[shader_idx]; ctx.output_mask = 0; - if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) { + if (shaders[shader_idx]->info.stage == MESA_SHADER_GEOMETRY) { for (int i = 0; i < 4; i++) { ctx.gs_next_vertex[i] = ac_build_alloca(&ctx.ac, ctx.ac.i32, ""); @@ -3934,7 +3933,7 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm, ctx.abi.load_inputs = load_gs_input; ctx.abi.emit_primitive = visit_end_primitive; - } else if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) { + } else if (shaders[shader_idx]->info.stage == MESA_SHADER_TESS_CTRL) { ctx.abi.load_tess_varyings = load_tcs_varyings; ctx.abi.load_patch_vertices_in = load_patch_vertices_in; ctx.abi.store_tcs_outputs = store_tcs_output; @@ -3951,19 +3950,19 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm, ctx.args->options->tess_offchip_block_dw_size, ctx.args->options->chip_class, ctx.args->options->family); - } else if (shaders[i]->info.stage == MESA_SHADER_TESS_EVAL) { + } else if (shaders[shader_idx]->info.stage == MESA_SHADER_TESS_EVAL) { ctx.abi.load_tess_varyings = load_tes_input; ctx.abi.load_tess_coord = load_tess_coord; ctx.abi.load_patch_vertices_in = load_patch_vertices_in; ctx.tcs_num_patches = args->options->key.tes.num_patches; - } else if (shaders[i]->info.stage == MESA_SHADER_VERTEX) { + } else if (shaders[shader_idx]->info.stage == MESA_SHADER_VERTEX) { ctx.abi.load_base_vertex = radv_load_base_vertex; - } else if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT) { + } else if (shaders[shader_idx]->info.stage == MESA_SHADER_FRAGMENT) { ctx.abi.load_sample_position = load_sample_position; ctx.abi.load_sample_mask_in = load_sample_mask_in; } - if (shaders[i]->info.stage == MESA_SHADER_VERTEX && + if (shaders[shader_idx]->info.stage == MESA_SHADER_VERTEX && args->options->key.vs_common_out.as_ngg && args->options->key.vs_common_out.export_prim_id) { declare_esgs_ring(&ctx); @@ -3971,8 +3970,8 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm, bool nested_barrier = false; - if (i) { - if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY && + if (shader_idx) { + if (shaders[shader_idx]->info.stage == MESA_SHADER_GEOMETRY && args->options->key.vs_common_out.as_ngg) { gfx10_ngg_gs_emit_prologue(&ctx); nested_barrier = false; @@ -4002,8 +4001,8 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm, ac_emit_barrier(&ctx.ac, ctx.stage); } - nir_foreach_shader_out_variable(variable, shaders[i]) - scan_shader_output_decl(&ctx, variable, shaders[i], shaders[i]->info.stage); + nir_foreach_shader_out_variable(variable, shaders[shader_idx]) + scan_shader_output_decl(&ctx, variable, shaders[shader_idx], shaders[shader_idx]->info.stage); ac_setup_rings(&ctx); @@ -4016,7 +4015,7 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm, LLVMValueRef count = ac_unpack_param(&ctx.ac, ac_get_arg(&ctx.ac, args->merged_wave_info), - 8 * i, 8); + 8 * shader_idx, 8); LLVMValueRef thread_id = ac_get_thread_id(&ctx.ac); LLVMValueRef cond = LLVMBuildICmp(ctx.ac.builder, LLVMIntULT, thread_id, count, ""); @@ -4025,14 +4024,14 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm, LLVMPositionBuilderAtEnd(ctx.ac.builder, then_block); } - if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT) - prepare_interp_optimize(&ctx, shaders[i]); - else if(shaders[i]->info.stage == MESA_SHADER_VERTEX) - handle_vs_inputs(&ctx, shaders[i]); - else if(shaders[i]->info.stage == MESA_SHADER_GEOMETRY) + if (shaders[shader_idx]->info.stage == MESA_SHADER_FRAGMENT) + prepare_interp_optimize(&ctx, shaders[shader_idx]); + else if(shaders[shader_idx]->info.stage == MESA_SHADER_VERTEX) + handle_vs_inputs(&ctx, shaders[shader_idx]); + else if(shaders[shader_idx]->info.stage == MESA_SHADER_GEOMETRY) prepare_gs_input_vgprs(&ctx, shader_count >= 2); - ac_nir_translate(&ctx.ac, &ctx.abi, &args->ac, shaders[i]); + ac_nir_translate(&ctx.ac, &ctx.abi, &args->ac, shaders[shader_idx]); if (shader_count >= 2 || is_ngg) { LLVMBuildBr(ctx.ac.builder, merge_block); @@ -4041,16 +4040,16 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm, /* This needs to be outside the if wrapping the shader body, as sometimes * the HW generates waves with 0 es/vs threads. */ - if (is_pre_gs_stage(shaders[i]->info.stage) && + if (is_pre_gs_stage(shaders[shader_idx]->info.stage) && args->options->key.vs_common_out.as_ngg && - i == shader_count - 1) { + shader_idx == shader_count - 1) { handle_ngg_outputs_post_2(&ctx); - } else if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY && + } else if (shaders[shader_idx]->info.stage == MESA_SHADER_GEOMETRY && args->options->key.vs_common_out.as_ngg) { gfx10_ngg_gs_emit_epilogue_2(&ctx); } - if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) { + if (shaders[shader_idx]->info.stage == MESA_SHADER_TESS_CTRL) { unsigned tcs_num_outputs = ctx.args->shader_info->tcs.num_linked_outputs; unsigned tcs_num_patch_outputs = ctx.args->shader_info->tcs.num_linked_patch_outputs; args->shader_info->tcs.num_patches = ctx.tcs_num_patches; diff --git a/src/amd/vulkan/radv_pipeline.c b/src/amd/vulkan/radv_pipeline.c index 77b7ccef391..3d5b47d704d 100644 --- a/src/amd/vulkan/radv_pipeline.c +++ b/src/amd/vulkan/radv_pipeline.c @@ -3051,7 +3051,7 @@ lower_bit_size_callback(const nir_instr *instr, void *_) VkResult radv_create_shaders(struct radv_pipeline *pipeline, struct radv_device *device, struct radv_pipeline_cache *cache, - const struct radv_pipeline_key *key, + const struct radv_pipeline_key *pipeline_key, const VkPipelineShaderStageCreateInfo **pStages, const VkPipelineCreateFlags flags, VkPipelineCreationFeedbackEXT *pipeline_feedback, @@ -3084,7 +3084,7 @@ VkResult radv_create_shaders(struct radv_pipeline *pipeline, } } - radv_hash_shaders(hash, pStages, pipeline->layout, key, get_hash_flags(device)); + radv_hash_shaders(hash, pStages, pipeline->layout, pipeline_key, get_hash_flags(device)); memcpy(gs_copy_hash, hash, 20); gs_copy_hash[0] ^= 1; @@ -3124,13 +3124,13 @@ VkResult radv_create_shaders(struct radv_pipeline *pipeline, radv_start_feedback(stage_feedbacks[i]); - if (key->compute_subgroup_size) { + if (pipeline_key->compute_subgroup_size) { /* Only compute shaders currently support requiring a * specific subgroup size. */ assert(i == MESA_SHADER_COMPUTE); - subgroup_size = key->compute_subgroup_size; - ballot_bit_size = key->compute_subgroup_size; + subgroup_size = pipeline_key->compute_subgroup_size; + ballot_bit_size = pipeline_key->compute_subgroup_size; } nir[i] = radv_shader_compile_to_nir(device, modules[i], @@ -3282,7 +3282,7 @@ VkResult radv_create_shaders(struct radv_pipeline *pipeline, nir_print_shader(nir[i], stderr); } - radv_fill_shader_keys(device, keys, key, nir); + radv_fill_shader_keys(device, keys, pipeline_key, nir); radv_fill_shader_info(pipeline, pStages, keys, infos, nir); @@ -3299,12 +3299,12 @@ VkResult radv_create_shaders(struct radv_pipeline *pipeline, else ngg_info = &infos[MESA_SHADER_VERTEX].ngg_info; - gfx10_get_ngg_info(key, pipeline, nir, infos, ngg_info); + gfx10_get_ngg_info(pipeline_key, pipeline, nir, infos, ngg_info); } else if (nir[MESA_SHADER_GEOMETRY]) { struct gfx9_gs_info *gs_info = &infos[MESA_SHADER_GEOMETRY].gs_ring_info; - gfx9_get_gs_info(key, pipeline, nir, infos, gs_info); + gfx9_get_gs_info(pipeline_key, pipeline, nir, infos, gs_info); } if(modules[MESA_SHADER_GEOMETRY]) { @@ -3331,16 +3331,16 @@ VkResult radv_create_shaders(struct radv_pipeline *pipeline, } if (!keep_executable_info && !keep_statistic_info && pipeline->gs_copy_shader) { - struct radv_shader_binary *binaries[MESA_SHADER_STAGES] = {NULL}; - struct radv_shader_variant *variants[MESA_SHADER_STAGES] = {0}; + struct radv_shader_binary *gs_binaries[MESA_SHADER_STAGES] = {NULL}; + struct radv_shader_variant *gs_variants[MESA_SHADER_STAGES] = {0}; - binaries[MESA_SHADER_GEOMETRY] = gs_copy_binary; - variants[MESA_SHADER_GEOMETRY] = pipeline->gs_copy_shader; + gs_binaries[MESA_SHADER_GEOMETRY] = gs_copy_binary; + gs_variants[MESA_SHADER_GEOMETRY] = pipeline->gs_copy_shader; radv_pipeline_cache_insert_shaders(device, cache, gs_copy_hash, - variants, - binaries); + gs_variants, + gs_binaries); } free(gs_copy_binary); } diff --git a/src/amd/vulkan/radv_pipeline_cache.c b/src/amd/vulkan/radv_pipeline_cache.c index d031129df27..afffb7d0077 100644 --- a/src/amd/vulkan/radv_pipeline_cache.c +++ b/src/amd/vulkan/radv_pipeline_cache.c @@ -483,19 +483,19 @@ radv_pipeline_cache_load(struct radv_pipeline_cache *cache, while (end - p >= sizeof(struct cache_entry)) { struct cache_entry *entry = (struct cache_entry*)p; struct cache_entry *dest_entry; - size_t size = entry_size(entry); - if(end - p < size) + size_t size_of_entry = entry_size(entry); + if(end - p < size_of_entry) break; - dest_entry = vk_alloc(&cache->alloc, size, + dest_entry = vk_alloc(&cache->alloc, size_of_entry, 8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE); if (dest_entry) { - memcpy(dest_entry, entry, size); + memcpy(dest_entry, entry, size_of_entry); for (int i = 0; i < MESA_SHADER_STAGES; ++i) dest_entry->variants[i] = NULL; radv_pipeline_cache_add_entry(cache, dest_entry); } - p += size; + p += size_of_entry; } return true; @@ -594,16 +594,16 @@ VkResult radv_GetPipelineCacheData( if (!cache->hash_table[i]) continue; entry = cache->hash_table[i]; - const uint32_t size = entry_size(entry); - if (end < p + size) { + const uint32_t size_of_entry = entry_size(entry); + if (end < p + size_of_entry) { result = VK_INCOMPLETE; break; } - memcpy(p, entry, size); + memcpy(p, entry, size_of_entry); for(int j = 0; j < MESA_SHADER_STAGES; ++j) ((struct cache_entry*)p)->variants[j] = NULL; - p += size; + p += size_of_entry; } *pDataSize = p - pData; diff --git a/src/amd/vulkan/radv_query.c b/src/amd/vulkan/radv_query.c index 59ce6b02a91..5f12f7727aa 100644 --- a/src/amd/vulkan/radv_query.c +++ b/src/amd/vulkan/radv_query.c @@ -1232,9 +1232,9 @@ VkResult radv_GetQueryPoolResults( if (radv_device_is_lost(device)) return VK_ERROR_DEVICE_LOST; - for(unsigned i = 0; i < queryCount; ++i, data += stride) { + for(unsigned query_idx = 0; query_idx < queryCount; ++query_idx, data += stride) { char *dest = data; - unsigned query = firstQuery + i; + unsigned query = firstQuery + query_idx; char *src = pool->ptr + query * pool->stride; uint32_t available; @@ -1852,9 +1852,6 @@ void radv_CmdEndQueryIndexedEXT( * query returns 0. */ if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) { - uint64_t avail_va = va + pool->availability_offset + 4 * query; - - for (unsigned i = 1; i < util_bitcount(cmd_buffer->state.subpass->view_mask); i++) { va += pool->stride; avail_va += 4; diff --git a/src/amd/vulkan/radv_shader.c b/src/amd/vulkan/radv_shader.c index fc3cf734e83..ea9b9444c7a 100644 --- a/src/amd/vulkan/radv_shader.c +++ b/src/amd/vulkan/radv_shader.c @@ -761,7 +761,11 @@ radv_alloc_shader_memory(struct radv_device *device, mtx_lock(&device->shader_slab_mutex); list_for_each_entry(struct radv_shader_slab, slab, &device->shader_slabs, slabs) { uint64_t offset = 0; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wshadow" list_for_each_entry(struct radv_shader_variant, s, &slab->shaders, slab_list) { +#pragma GCC diagnostic pop if (s->bo_offset - offset >= shader->code_size) { shader->bo = slab->bo; shader->bo_offset = offset; @@ -1671,17 +1675,16 @@ radv_dump_shader_stats(struct radv_device *device, if (result != VK_SUCCESS) goto fail; - for (unsigned i = 0; i < prop_count; i++) { - if (!(props[i].stages & mesa_to_vk_shader_stage(stage))) + for (unsigned exec_idx = 0; exec_idx < prop_count; exec_idx++) { + if (!(props[exec_idx].stages & mesa_to_vk_shader_stage(stage))) continue; VkPipelineExecutableStatisticKHR *stats = NULL; uint32_t stat_count = 0; - VkResult result; VkPipelineExecutableInfoKHR exec_info = {0}; exec_info.pipeline = radv_pipeline_to_handle(pipeline); - exec_info.executableIndex = i; + exec_info.executableIndex = exec_idx; result = radv_GetPipelineExecutableStatisticsKHR(radv_device_to_handle(device), &exec_info, diff --git a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c index 76218dccfbd..e9a16de6bad 100644 --- a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c +++ b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c @@ -288,8 +288,7 @@ static void radv_amdgpu_cs_destroy(struct radeon_cmdbuf *rcs) cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]); for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) { - struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i]; - free(rcs->buf); + free(cs->old_cs_buffers[i].buf); } free(cs->old_cs_buffers); @@ -1091,7 +1090,6 @@ radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx, unsigned number_of_ibs; uint32_t *ptr; unsigned cnt = 0; - unsigned size = 0; unsigned pad_words = 0; /* Compute the number of IBs for this submit. */ @@ -1166,6 +1164,8 @@ radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx, cnt++; free(new_cs_array); } else { + unsigned size = 0; + if (preamble_cs) size += preamble_cs->cdw; @@ -1194,9 +1194,9 @@ radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx, } for (unsigned j = 0; j < cnt; ++j) { - struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]); - memcpy(ptr, cs->base.buf, 4 * cs->base.cdw); - ptr += cs->base.cdw; + struct radv_amdgpu_cs *cs2 = radv_amdgpu_cs(cs_array[i + j]); + memcpy(ptr, cs2->base.buf, 4 * cs->base.cdw); + ptr += cs2->base.cdw; }