anv: Do not duplicate intel_device_info memory in each logical device

Each logical device can point to its physical device intel_device_info
saving at least one intel_device_info.

This also allow us to set 'const' to avoid values in intel_device_info
being changed by mistake.

Acked-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Acked-by: Jordan Justen <jordan.l.justen@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17897>
This commit is contained in:
José Roberto de Souza 2022-08-04 12:56:17 -07:00 committed by Marge Bot
parent 9474ff72dd
commit 356a60bd6c
25 changed files with 216 additions and 216 deletions

View file

@ -568,7 +568,7 @@ buffered_event_count(struct intel_measure_device *device)
static void static void
print_combined_results(struct intel_measure_device *measure_device, print_combined_results(struct intel_measure_device *measure_device,
int result_count, int result_count,
struct intel_device_info *info) const struct intel_device_info *info)
{ {
if (result_count == 0) if (result_count == 0)
return; return;
@ -619,7 +619,7 @@ print_combined_results(struct intel_measure_device *measure_device,
*/ */
static void static void
intel_measure_print(struct intel_measure_device *device, intel_measure_print(struct intel_measure_device *device,
struct intel_device_info *info) const struct intel_device_info *info)
{ {
while (true) { while (true) {
const int events_to_combine = buffered_event_count(device); const int events_to_combine = buffered_event_count(device);
@ -635,7 +635,7 @@ intel_measure_print(struct intel_measure_device *device,
*/ */
void void
intel_measure_gather(struct intel_measure_device *measure_device, intel_measure_gather(struct intel_measure_device *measure_device,
struct intel_device_info *info) const struct intel_device_info *info)
{ {
pthread_mutex_lock(&measure_device->mutex); pthread_mutex_lock(&measure_device->mutex);

View file

@ -164,6 +164,6 @@ bool intel_measure_ready(struct intel_measure_batch *batch);
struct intel_device_info; struct intel_device_info;
void intel_measure_gather(struct intel_measure_device *device, void intel_measure_gather(struct intel_measure_device *device,
struct intel_device_info *info); const struct intel_device_info *info);
#endif /* INTEL_MEASURE_H */ #endif /* INTEL_MEASURE_H */

View file

@ -516,7 +516,7 @@ intel_driver_ds_init(void)
void void
intel_ds_device_init(struct intel_ds_device *device, intel_ds_device_init(struct intel_ds_device *device,
struct intel_device_info *devinfo, const struct intel_device_info *devinfo,
int drm_fd, int drm_fd,
uint32_t gpu_id, uint32_t gpu_id,
enum intel_ds_api api) enum intel_ds_api api)

View file

@ -151,7 +151,7 @@ struct intel_ds_flush_data {
void intel_driver_ds_init(void); void intel_driver_ds_init(void);
void intel_ds_device_init(struct intel_ds_device *device, void intel_ds_device_init(struct intel_ds_device *device,
struct intel_device_info *devinfo, const struct intel_device_info *devinfo,
int drm_fd, int drm_fd,
uint32_t gpu_id, uint32_t gpu_id,
enum intel_ds_api api); enum intel_ds_api api);

View file

@ -374,7 +374,7 @@ anv_block_pool_init(struct anv_block_pool *pool,
{ {
VkResult result; VkResult result;
if (device->info.verx10 >= 125) { if (device->info->verx10 >= 125) {
/* Make sure VMA addresses are 2MiB aligned for the block pool */ /* Make sure VMA addresses are 2MiB aligned for the block pool */
assert(anv_is_aligned(start_address, 2 * 1024 * 1024)); assert(anv_is_aligned(start_address, 2 * 1024 * 1024));
assert(anv_is_aligned(initial_size, 2 * 1024 * 1024)); assert(anv_is_aligned(initial_size, 2 * 1024 * 1024));
@ -845,7 +845,7 @@ anv_state_pool_init(struct anv_state_pool *pool,
assert(start_offset < INT32_MAX - (int32_t)BLOCK_POOL_MEMFD_SIZE); assert(start_offset < INT32_MAX - (int32_t)BLOCK_POOL_MEMFD_SIZE);
uint32_t initial_size = block_size * 16; uint32_t initial_size = block_size * 16;
if (device->info.verx10 >= 125) if (device->info->verx10 >= 125)
initial_size = MAX2(initial_size, 2 * 1024 * 1024); initial_size = MAX2(initial_size, 2 * 1024 * 1024);
VkResult result = anv_block_pool_init(&pool->block_pool, device, name, VkResult result = anv_block_pool_init(&pool->block_pool, device, name,
@ -1455,7 +1455,7 @@ anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool,
assert(stage < ARRAY_SIZE(pool->bos)); assert(stage < ARRAY_SIZE(pool->bos));
const struct intel_device_info *devinfo = &device->info; const struct intel_device_info *devinfo = device->info;
/* On GFX version 12.5, scratch access changed to a surface-based model. /* On GFX version 12.5, scratch access changed to a surface-based model.
* Instead of each shader type having its own layout based on IDs passed * Instead of each shader type having its own layout based on IDs passed
@ -1635,13 +1635,13 @@ anv_bo_vma_alloc_or_close(struct anv_device *device,
uint32_t align = 4096; uint32_t align = 4096;
/* Gen12 CCS surface addresses need to be 64K aligned. */ /* Gen12 CCS surface addresses need to be 64K aligned. */
if (device->info.ver >= 12 && (alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS)) if (device->info->ver >= 12 && (alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS))
align = 64 * 1024; align = 64 * 1024;
/* For XeHP, lmem and smem cannot share a single PDE, which means they /* For XeHP, lmem and smem cannot share a single PDE, which means they
* can't live in the same 2MiB aligned region. * can't live in the same 2MiB aligned region.
*/ */
if (device->info.verx10 >= 125) if (device->info->verx10 >= 125)
align = 2 * 1024 * 1024; align = 2 * 1024 * 1024;
if (alloc_flags & ANV_BO_ALLOC_FIXED_ADDRESS) { if (alloc_flags & ANV_BO_ALLOC_FIXED_ADDRESS) {
@ -1683,7 +1683,7 @@ anv_device_alloc_bo(struct anv_device *device,
size = align_u64(size, 4096); size = align_u64(size, 4096);
uint64_t ccs_size = 0; uint64_t ccs_size = 0;
if (device->info.has_aux_map && (alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS)) { if (device->info->has_aux_map && (alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS)) {
/* Align the size up to the next multiple of 64K so we don't have any /* Align the size up to the next multiple of 64K so we don't have any
* AUX-TT entries pointing from a 64K page to itself. * AUX-TT entries pointing from a 64K page to itself.
*/ */
@ -1737,7 +1737,7 @@ anv_device_alloc_bo(struct anv_device *device,
.is_external = (alloc_flags & ANV_BO_ALLOC_EXTERNAL), .is_external = (alloc_flags & ANV_BO_ALLOC_EXTERNAL),
.has_client_visible_address = .has_client_visible_address =
(alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0, (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
.has_implicit_ccs = ccs_size > 0 || (device->info.verx10 >= 125 && .has_implicit_ccs = ccs_size > 0 || (device->info->verx10 >= 125 &&
(alloc_flags & ANV_BO_ALLOC_LOCAL_MEM)), (alloc_flags & ANV_BO_ALLOC_LOCAL_MEM)),
}; };
@ -1764,7 +1764,7 @@ anv_device_alloc_bo(struct anv_device *device,
* I915_CACHING_CACHED, which on non-LLC means snooped so there's no * I915_CACHING_CACHED, which on non-LLC means snooped so there's no
* need to do this there. * need to do this there.
*/ */
if (!device->info.has_llc) { if (!device->info->has_llc) {
anv_gem_set_caching(device, new_bo.gem_handle, anv_gem_set_caching(device, new_bo.gem_handle,
I915_CACHING_CACHED); I915_CACHING_CACHED);
} }
@ -1781,7 +1781,7 @@ anv_device_alloc_bo(struct anv_device *device,
} }
if (new_bo._ccs_size > 0) { if (new_bo._ccs_size > 0) {
assert(device->info.has_aux_map); assert(device->info->has_aux_map);
intel_aux_map_add_mapping(device->aux_map_ctx, intel_aux_map_add_mapping(device->aux_map_ctx,
intel_canonical_address(new_bo.offset), intel_canonical_address(new_bo.offset),
intel_canonical_address(new_bo.offset + new_bo.size), intel_canonical_address(new_bo.offset + new_bo.size),
@ -1846,7 +1846,7 @@ anv_device_import_bo_from_host_ptr(struct anv_device *device,
ANV_BO_ALLOC_FIXED_ADDRESS))); ANV_BO_ALLOC_FIXED_ADDRESS)));
assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS) || assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS) ||
(device->physical->has_implicit_ccs && device->info.has_aux_map)); (device->physical->has_implicit_ccs && device->info->has_aux_map));
struct anv_bo_cache *cache = &device->bo_cache; struct anv_bo_cache *cache = &device->bo_cache;
const uint32_t bo_flags = const uint32_t bo_flags =
@ -1936,7 +1936,7 @@ anv_device_import_bo(struct anv_device *device,
ANV_BO_ALLOC_FIXED_ADDRESS))); ANV_BO_ALLOC_FIXED_ADDRESS)));
assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS) || assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS) ||
(device->physical->has_implicit_ccs && device->info.has_aux_map)); (device->physical->has_implicit_ccs && device->info->has_aux_map));
struct anv_bo_cache *cache = &device->bo_cache; struct anv_bo_cache *cache = &device->bo_cache;
const uint32_t bo_flags = const uint32_t bo_flags =
@ -2153,7 +2153,7 @@ anv_device_release_bo(struct anv_device *device,
if (bo->_ccs_size > 0) { if (bo->_ccs_size > 0) {
assert(device->physical->has_implicit_ccs); assert(device->physical->has_implicit_ccs);
assert(device->info.has_aux_map); assert(device->info->has_aux_map);
assert(bo->has_implicit_ccs); assert(bo->has_implicit_ccs);
intel_aux_map_unmap_range(device->aux_map_ctx, intel_aux_map_unmap_range(device->aux_map_ctx,
intel_canonical_address(bo->offset), intel_canonical_address(bo->offset),

View file

@ -214,7 +214,7 @@ get_ahw_buffer_format_properties2(
tiling = VK_IMAGE_TILING_LINEAR; tiling = VK_IMAGE_TILING_LINEAR;
p->formatFeatures = p->formatFeatures =
anv_get_image_format_features2(&device->info, p->format, anv_format, anv_get_image_format_features2(device->info, p->format, anv_format,
tiling, NULL); tiling, NULL);
/* "Images can be created with an external format even if the Android hardware /* "Images can be created with an external format even if the Android hardware
@ -530,7 +530,7 @@ anv_image_init_from_gralloc(struct anv_device *device,
} }
anv_info.isl_tiling_flags = 1u << tiling; anv_info.isl_tiling_flags = 1u << tiling;
enum isl_format format = anv_get_isl_format(&device->info, enum isl_format format = anv_get_isl_format(device->info,
base_info->format, base_info->format,
VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_ASPECT_COLOR_BIT,
base_info->tiling); base_info->tiling);

View file

@ -588,7 +588,7 @@ emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
GFX8_MI_BATCH_BUFFER_START_length - GFX8_MI_BATCH_BUFFER_START_length_bias; GFX8_MI_BATCH_BUFFER_START_length - GFX8_MI_BATCH_BUFFER_START_length_bias;
anv_batch_emit(&cmd_buffer->batch, GFX8_MI_BATCH_BUFFER_START, bbs) { anv_batch_emit(&cmd_buffer->batch, GFX8_MI_BATCH_BUFFER_START, bbs) {
bbs.DWordLength = cmd_buffer->device->info.ver < 8 ? bbs.DWordLength = cmd_buffer->device->info->ver < 8 ?
gfx7_length : gfx8_length; gfx7_length : gfx8_length;
bbs.SecondLevelBatchBuffer = Firstlevelbatch; bbs.SecondLevelBatchBuffer = Firstlevelbatch;
bbs.AddressSpaceIndicator = ASI_PPGTT; bbs.AddressSpaceIndicator = ASI_PPGTT;
@ -792,7 +792,7 @@ anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
cmd_buffer->bt_next.map += bt_size; cmd_buffer->bt_next.map += bt_size;
cmd_buffer->bt_next.alloc_size -= bt_size; cmd_buffer->bt_next.alloc_size -= bt_size;
if (cmd_buffer->device->info.verx10 >= 125) { if (cmd_buffer->device->info->verx10 >= 125) {
/* We're using 3DSTATE_BINDING_TABLE_POOL_ALLOC to change the binding /* We're using 3DSTATE_BINDING_TABLE_POOL_ALLOC to change the binding
* table address independently from surface state base address. We no * table address independently from surface state base address. We no
* longer need any sort of offsetting. * longer need any sort of offsetting.
@ -1018,7 +1018,7 @@ anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
* prefetch. * prefetch.
*/ */
if (cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) { if (cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) {
const struct intel_device_info *devinfo = &cmd_buffer->device->info; const struct intel_device_info *devinfo = cmd_buffer->device->info;
/* Careful to have everything in signed integer. */ /* Careful to have everything in signed integer. */
int32_t prefetch_len = devinfo->cs_prefetch_size; int32_t prefetch_len = devinfo->cs_prefetch_size;
int32_t batch_len = int32_t batch_len =
@ -2033,7 +2033,7 @@ anv_queue_exec_utrace_locked(struct anv_queue *queue,
if (result != VK_SUCCESS) if (result != VK_SUCCESS)
goto error; goto error;
int ret = queue->device->info.no_hw ? 0 : int ret = queue->device->info->no_hw ? 0 :
anv_gem_execbuffer(queue->device, &execbuf.execbuf); anv_gem_execbuffer(queue->device, &execbuf.execbuf);
if (ret) if (ret)
result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m"); result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
@ -2253,13 +2253,13 @@ anv_queue_exec_locked(struct anv_queue *queue,
.rsvd1 = device->context_id, .rsvd1 = device->context_id,
}; };
int ret = queue->device->info.no_hw ? 0 : int ret = queue->device->info->no_hw ? 0 :
anv_gem_execbuffer(queue->device, &query_pass_execbuf); anv_gem_execbuffer(queue->device, &query_pass_execbuf);
if (ret) if (ret)
result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m"); result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
} }
int ret = queue->device->info.no_hw ? 0 : int ret = queue->device->info->no_hw ? 0 :
anv_gem_execbuffer(queue->device, &execbuf.execbuf); anv_gem_execbuffer(queue->device, &execbuf.execbuf);
if (ret) if (ret)
result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m"); result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
@ -2388,7 +2388,7 @@ anv_queue_submit(struct vk_queue *vk_queue,
struct anv_device *device = queue->device; struct anv_device *device = queue->device;
VkResult result; VkResult result;
if (queue->device->info.no_hw) { if (queue->device->info->no_hw) {
for (uint32_t i = 0; i < submit->signal_count; i++) { for (uint32_t i = 0; i < submit->signal_count; i++) {
result = vk_sync_signal(&device->vk, result = vk_sync_signal(&device->vk,
submit->signals[i].sync, submit->signals[i].sync,
@ -2419,7 +2419,7 @@ anv_queue_submit_simple_batch(struct anv_queue *queue,
VkResult result = VK_SUCCESS; VkResult result = VK_SUCCESS;
int err; int err;
if (queue->device->info.no_hw) if (queue->device->info->no_hw)
return VK_SUCCESS; return VK_SUCCESS;
/* This is only used by device init so we can assume the queue is empty and /* This is only used by device init so we can assume the queue is empty and

View file

@ -95,7 +95,7 @@ anv_device_init_blorp(struct anv_device *device)
device->blorp.compiler = device->physical->compiler; device->blorp.compiler = device->physical->compiler;
device->blorp.lookup_shader = lookup_blorp_shader; device->blorp.lookup_shader = lookup_blorp_shader;
device->blorp.upload_shader = upload_blorp_shader; device->blorp.upload_shader = upload_blorp_shader;
switch (device->info.verx10) { switch (device->info->verx10) {
case 70: case 70:
device->blorp.exec = gfx7_blorp_exec; device->blorp.exec = gfx7_blorp_exec;
break; break;
@ -211,7 +211,7 @@ get_blorp_surf_for_anv_image(const struct anv_device *device,
if (layout != ANV_IMAGE_LAYOUT_EXPLICIT_AUX) { if (layout != ANV_IMAGE_LAYOUT_EXPLICIT_AUX) {
assert(usage != 0); assert(usage != 0);
aux_usage = anv_layout_to_aux_usage(&device->info, image, aux_usage = anv_layout_to_aux_usage(device->info, image,
aspect, usage, layout); aspect, usage, layout);
} }
@ -503,7 +503,7 @@ copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
} }
const enum isl_format linear_format = const enum isl_format linear_format =
anv_get_isl_format(&cmd_buffer->device->info, anv_image->vk.format, anv_get_isl_format(cmd_buffer->device->info, anv_image->vk.format,
aspect, VK_IMAGE_TILING_LINEAR); aspect, VK_IMAGE_TILING_LINEAR);
const struct isl_format_layout *linear_fmtl = const struct isl_format_layout *linear_fmtl =
isl_format_get_layout(linear_format); isl_format_get_layout(linear_format);
@ -678,10 +678,10 @@ blit_image(struct anv_cmd_buffer *cmd_buffer,
dst_image_layout, ISL_AUX_USAGE_NONE, &dst); dst_image_layout, ISL_AUX_USAGE_NONE, &dst);
struct anv_format_plane src_format = struct anv_format_plane src_format =
anv_get_format_aspect(&cmd_buffer->device->info, src_image->vk.format, anv_get_format_aspect(cmd_buffer->device->info, src_image->vk.format,
1U << aspect_bit, src_image->vk.tiling); 1U << aspect_bit, src_image->vk.tiling);
struct anv_format_plane dst_format = struct anv_format_plane dst_format =
anv_get_format_aspect(&cmd_buffer->device->info, dst_image->vk.format, anv_get_format_aspect(cmd_buffer->device->info, dst_image->vk.format,
1U << aspect_bit, dst_image->vk.tiling); 1U << aspect_bit, dst_image->vk.tiling);
unsigned dst_start, dst_end; unsigned dst_start, dst_end;
@ -1019,7 +1019,7 @@ void anv_CmdClearColorImage(
imageLayout, ISL_AUX_USAGE_NONE, &surf); imageLayout, ISL_AUX_USAGE_NONE, &surf);
struct anv_format_plane src_format = struct anv_format_plane src_format =
anv_get_format_aspect(&cmd_buffer->device->info, image->vk.format, anv_get_format_aspect(cmd_buffer->device->info, image->vk.format,
VK_IMAGE_ASPECT_COLOR_BIT, image->vk.tiling); VK_IMAGE_ASPECT_COLOR_BIT, image->vk.tiling);
unsigned base_layer = pRanges[r].baseArrayLayer; unsigned base_layer = pRanges[r].baseArrayLayer;
@ -1263,7 +1263,7 @@ clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED; enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED;
if (d_att->vk_format != VK_FORMAT_UNDEFINED) { if (d_att->vk_format != VK_FORMAT_UNDEFINED) {
depth_format = anv_get_isl_format(&cmd_buffer->device->info, depth_format = anv_get_isl_format(cmd_buffer->device->info,
d_att->vk_format, d_att->vk_format,
VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_ASPECT_DEPTH_BIT,
VK_IMAGE_TILING_OPTIMAL); VK_IMAGE_TILING_OPTIMAL);
@ -1446,12 +1446,12 @@ resolve_image(struct anv_cmd_buffer *cmd_buffer,
anv_foreach_image_aspect_bit(aspect_bit, src_image, anv_foreach_image_aspect_bit(aspect_bit, src_image,
region->srcSubresource.aspectMask) { region->srcSubresource.aspectMask) {
enum isl_aux_usage src_aux_usage = enum isl_aux_usage src_aux_usage =
anv_layout_to_aux_usage(&cmd_buffer->device->info, src_image, anv_layout_to_aux_usage(cmd_buffer->device->info, src_image,
(1 << aspect_bit), (1 << aspect_bit),
VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
src_image_layout); src_image_layout);
enum isl_aux_usage dst_aux_usage = enum isl_aux_usage dst_aux_usage =
anv_layout_to_aux_usage(&cmd_buffer->device->info, dst_image, anv_layout_to_aux_usage(cmd_buffer->device->info, dst_image,
(1 << aspect_bit), (1 << aspect_bit),
VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_USAGE_TRANSFER_DST_BIT,
dst_image_layout); dst_image_layout);
@ -1818,7 +1818,7 @@ anv_image_mcs_op(struct anv_cmd_buffer *cmd_buffer,
/* Multisampling with multi-planar formats is not supported */ /* Multisampling with multi-planar formats is not supported */
assert(image->n_planes == 1); assert(image->n_planes == 1);
const struct intel_device_info *devinfo = &cmd_buffer->device->info; const struct intel_device_info *devinfo = cmd_buffer->device->info;
struct blorp_batch batch; struct blorp_batch batch;
anv_blorp_batch_init(cmd_buffer, &batch, anv_blorp_batch_init(cmd_buffer, &batch,
BLORP_BATCH_PREDICATE_ENABLE * predicate + BLORP_BATCH_PREDICATE_ENABLE * predicate +
@ -1910,7 +1910,7 @@ anv_image_ccs_op(struct anv_cmd_buffer *cmd_buffer,
anv_image_aux_layers(image, aspect, level)); anv_image_aux_layers(image, aspect, level));
const uint32_t plane = anv_image_aspect_to_plane(image, aspect); const uint32_t plane = anv_image_aspect_to_plane(image, aspect);
const struct intel_device_info *devinfo = &cmd_buffer->device->info; const struct intel_device_info *devinfo = cmd_buffer->device->info;
struct blorp_batch batch; struct blorp_batch batch;
anv_blorp_batch_init(cmd_buffer, &batch, anv_blorp_batch_init(cmd_buffer, &batch,

View file

@ -240,7 +240,7 @@ VkResult anv_ResetCommandBuffer(
void void
anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer) anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
{ {
const struct intel_device_info *devinfo = &cmd_buffer->device->info; const struct intel_device_info *devinfo = cmd_buffer->device->info;
anv_genX(devinfo, cmd_buffer_emit_state_base_address)(cmd_buffer); anv_genX(devinfo, cmd_buffer_emit_state_base_address)(cmd_buffer);
} }
@ -253,7 +253,7 @@ anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer *cmd_buffer,
uint32_t base_layer, uint32_t base_layer,
uint32_t layer_count) uint32_t layer_count)
{ {
const struct intel_device_info *devinfo = &cmd_buffer->device->info; const struct intel_device_info *devinfo = cmd_buffer->device->info;
anv_genX(devinfo, cmd_buffer_mark_image_written)(cmd_buffer, image, anv_genX(devinfo, cmd_buffer_mark_image_written)(cmd_buffer, image,
aspect, aux_usage, aspect, aux_usage,
level, base_layer, level, base_layer,
@ -263,7 +263,7 @@ anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer *cmd_buffer,
void void
anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer *cmd_buffer) anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer *cmd_buffer)
{ {
const struct intel_device_info *devinfo = &cmd_buffer->device->info; const struct intel_device_info *devinfo = cmd_buffer->device->info;
anv_genX(devinfo, cmd_emit_conditional_render_predicate)(cmd_buffer); anv_genX(devinfo, cmd_emit_conditional_render_predicate)(cmd_buffer);
} }
@ -314,7 +314,7 @@ anv_cmd_buffer_set_ray_query_buffer(struct anv_cmd_buffer *cmd_buffer,
struct anv_device *device = cmd_buffer->device; struct anv_device *device = cmd_buffer->device;
uint64_t ray_shadow_size = uint64_t ray_shadow_size =
align_u64(brw_rt_ray_queries_shadow_stacks_size(&device->info, align_u64(brw_rt_ray_queries_shadow_stacks_size(device->info,
pipeline->ray_queries), pipeline->ray_queries),
4096); 4096);
if (ray_shadow_size > 0 && if (ray_shadow_size > 0 &&
@ -359,7 +359,7 @@ anv_cmd_buffer_set_ray_query_buffer(struct anv_cmd_buffer *cmd_buffer,
/* Fill the push constants & mark them dirty. */ /* Fill the push constants & mark them dirty. */
struct anv_state ray_query_global_state = struct anv_state ray_query_global_state =
anv_genX(&device->info, cmd_buffer_ray_query_globals)(cmd_buffer); anv_genX(device->info, cmd_buffer_ray_query_globals)(cmd_buffer);
struct anv_address ray_query_globals_addr = (struct anv_address) { struct anv_address ray_query_globals_addr = (struct anv_address) {
.bo = device->dynamic_state_pool.block_pool.bo, .bo = device->dynamic_state_pool.block_pool.bo,
@ -736,7 +736,7 @@ anv_cmd_buffer_gfx_push_constants(struct anv_cmd_buffer *cmd_buffer)
struct anv_state struct anv_state
anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer) anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
{ {
const struct intel_device_info *devinfo = &cmd_buffer->device->info; const struct intel_device_info *devinfo = cmd_buffer->device->info;
struct anv_push_constants *data = struct anv_push_constants *data =
&cmd_buffer->state.compute.base.push_constants; &cmd_buffer->state.compute.base.push_constants;
struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline; struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline;
@ -751,7 +751,7 @@ anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
return (struct anv_state) { .offset = 0 }; return (struct anv_state) { .offset = 0 };
const unsigned push_constant_alignment = const unsigned push_constant_alignment =
cmd_buffer->device->info.ver < 8 ? 32 : 64; cmd_buffer->device->info->ver < 8 ? 32 : 64;
const unsigned aligned_total_push_constants_size = const unsigned aligned_total_push_constants_size =
ALIGN(total_push_constants_size, push_constant_alignment); ALIGN(total_push_constants_size, push_constant_alignment);
struct anv_state state; struct anv_state state;
@ -1076,7 +1076,7 @@ void anv_CmdSetRayTracingPipelineStackSizeKHR(
if (rt->scratch.layout.total_size == 1 << stack_size_log2) if (rt->scratch.layout.total_size == 1 << stack_size_log2)
return; return;
brw_rt_compute_scratch_layout(&rt->scratch.layout, &device->info, brw_rt_compute_scratch_layout(&rt->scratch.layout, device->info,
stack_ids_per_dss, 1 << stack_size_log2); stack_ids_per_dss, 1 << stack_size_log2);
unsigned bucket = stack_size_log2 - 10; unsigned bucket = stack_size_log2 - 10;

View file

@ -2901,7 +2901,7 @@ anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align,
static void static void
anv_device_init_border_colors(struct anv_device *device) anv_device_init_border_colors(struct anv_device *device)
{ {
if (device->info.platform == INTEL_PLATFORM_HSW) { if (device->info->platform == INTEL_PLATFORM_HSW) {
static const struct hsw_border_color border_colors[] = { static const struct hsw_border_color border_colors[] = {
[VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } }, [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
[VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } }, [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
@ -3271,7 +3271,7 @@ VkResult anv_CreateDevice(
} }
} }
device->info = physical_device->info; device->info = &physical_device->info;
device->isl_dev = physical_device->isl_dev; device->isl_dev = physical_device->isl_dev;
/* On Broadwell and later, we can use batch chaining to more efficiently /* On Broadwell and later, we can use batch chaining to more efficiently
@ -3279,7 +3279,7 @@ VkResult anv_CreateDevice(
* command parser gets in the way and we have to fall back to growing * command parser gets in the way and we have to fall back to growing
* the batch. * the batch.
*/ */
device->can_chain_batches = device->info.ver >= 8; device->can_chain_batches = device->info->ver >= 8;
device->robust_buffer_access = robust_buffer_access; device->robust_buffer_access = robust_buffer_access;
@ -3327,7 +3327,7 @@ VkResult anv_CreateDevice(
if (result != VK_SUCCESS) if (result != VK_SUCCESS)
goto fail_general_state_pool; goto fail_general_state_pool;
if (device->info.ver >= 8) { if (device->info->ver >= 8) {
/* The border color pointer is limited to 24 bits, so we need to make /* The border color pointer is limited to 24 bits, so we need to make
* sure that any such color used at any point in the program doesn't * sure that any such color used at any point in the program doesn't
* exceed that limit. * exceed that limit.
@ -3352,7 +3352,7 @@ VkResult anv_CreateDevice(
if (result != VK_SUCCESS) if (result != VK_SUCCESS)
goto fail_instruction_state_pool; goto fail_instruction_state_pool;
if (device->info.verx10 >= 125) { if (device->info->verx10 >= 125) {
/* We're using 3DSTATE_BINDING_TABLE_POOL_ALLOC to give the binding /* We're using 3DSTATE_BINDING_TABLE_POOL_ALLOC to give the binding
* table its own base address separately from surface state base. * table its own base address separately from surface state base.
*/ */
@ -3373,7 +3373,7 @@ VkResult anv_CreateDevice(
if (result != VK_SUCCESS) if (result != VK_SUCCESS)
goto fail_surface_state_pool; goto fail_surface_state_pool;
if (device->info.has_aux_map) { if (device->info->has_aux_map) {
device->aux_map_ctx = intel_aux_map_init(device, &aux_map_allocator, device->aux_map_ctx = intel_aux_map_init(device, &aux_map_allocator,
&physical_device->info); &physical_device->info);
if (!device->aux_map_ctx) if (!device->aux_map_ctx)
@ -3404,7 +3404,7 @@ VkResult anv_CreateDevice(
if (device->vk.enabled_extensions.KHR_ray_query) { if (device->vk.enabled_extensions.KHR_ray_query) {
uint32_t ray_queries_size = uint32_t ray_queries_size =
align_u32(brw_rt_ray_queries_hw_stacks_size(&device->info), 4096); align_u32(brw_rt_ray_queries_hw_stacks_size(device->info), 4096);
result = anv_device_alloc_bo(device, "ray queries", result = anv_device_alloc_bo(device, "ray queries",
ray_queries_size, ray_queries_size,
@ -3419,11 +3419,11 @@ VkResult anv_CreateDevice(
if (result != VK_SUCCESS) if (result != VK_SUCCESS)
goto fail_ray_query_bo; goto fail_ray_query_bo;
if (device->info.ver >= 12 && if (device->info->ver >= 12 &&
device->vk.enabled_extensions.KHR_fragment_shading_rate) { device->vk.enabled_extensions.KHR_fragment_shading_rate) {
uint32_t n_cps_states = 3 * 3; /* All combinaisons of X by Y CP sizes (1, 2, 4) */ uint32_t n_cps_states = 3 * 3; /* All combinaisons of X by Y CP sizes (1, 2, 4) */
if (device->info.has_coarse_pixel_primitive_and_cb) if (device->info->has_coarse_pixel_primitive_and_cb)
n_cps_states *= 5 * 5; /* 5 combiners by 2 operators */ n_cps_states *= 5 * 5; /* 5 combiners by 2 operators */
n_cps_states += 1; /* Disable CPS */ n_cps_states += 1; /* Disable CPS */
@ -3433,12 +3433,12 @@ VkResult anv_CreateDevice(
device->cps_states = device->cps_states =
anv_state_pool_alloc(&device->dynamic_state_pool, anv_state_pool_alloc(&device->dynamic_state_pool,
n_cps_states * CPS_STATE_length(&device->info) * 4, n_cps_states * CPS_STATE_length(device->info) * 4,
32); 32);
if (device->cps_states.map == NULL) if (device->cps_states.map == NULL)
goto fail_trivial_batch; goto fail_trivial_batch;
anv_genX(&device->info, init_cps_device_state)(device); anv_genX(device->info, init_cps_device_state)(device);
} }
/* Allocate a null surface state at surface state offset 0. This makes /* Allocate a null surface state at surface state offset 0. This makes
@ -3458,7 +3458,7 @@ VkResult anv_CreateDevice(
/* TODO(RT): Do we want some sort of data structure for this? */ /* TODO(RT): Do we want some sort of data structure for this? */
memset(device->rt_scratch_bos, 0, sizeof(device->rt_scratch_bos)); memset(device->rt_scratch_bos, 0, sizeof(device->rt_scratch_bos));
result = anv_genX(&device->info, init_device_state)(device); result = anv_genX(device->info, init_device_state)(device);
if (result != VK_SUCCESS) if (result != VK_SUCCESS)
goto fail_trivial_batch_bo_and_scratch_pool; goto fail_trivial_batch_bo_and_scratch_pool;
@ -3515,7 +3515,7 @@ VkResult anv_CreateDevice(
fail_workaround_bo: fail_workaround_bo:
anv_device_release_bo(device, device->workaround_bo); anv_device_release_bo(device, device->workaround_bo);
fail_surface_aux_map_pool: fail_surface_aux_map_pool:
if (device->info.has_aux_map) { if (device->info->has_aux_map) {
intel_aux_map_finish(device->aux_map_ctx); intel_aux_map_finish(device->aux_map_ctx);
device->aux_map_ctx = NULL; device->aux_map_ctx = NULL;
} }
@ -3527,7 +3527,7 @@ VkResult anv_CreateDevice(
fail_instruction_state_pool: fail_instruction_state_pool:
anv_state_pool_finish(&device->instruction_state_pool); anv_state_pool_finish(&device->instruction_state_pool);
fail_dynamic_state_pool: fail_dynamic_state_pool:
if (device->info.ver >= 8) if (device->info->ver >= 8)
anv_state_reserved_pool_finish(&device->custom_border_colors); anv_state_reserved_pool_finish(&device->custom_border_colors);
anv_state_pool_finish(&device->dynamic_state_pool); anv_state_pool_finish(&device->dynamic_state_pool);
fail_general_state_pool: fail_general_state_pool:
@ -3583,7 +3583,7 @@ void anv_DestroyDevice(
/* We only need to free these to prevent valgrind errors. The backing /* We only need to free these to prevent valgrind errors. The backing
* BO will go away in a couple of lines so we don't actually leak. * BO will go away in a couple of lines so we don't actually leak.
*/ */
if (device->info.ver >= 8) if (device->info->ver >= 8)
anv_state_reserved_pool_finish(&device->custom_border_colors); anv_state_reserved_pool_finish(&device->custom_border_colors);
anv_state_pool_free(&device->dynamic_state_pool, device->border_colors); anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
anv_state_pool_free(&device->dynamic_state_pool, device->slice_hash); anv_state_pool_free(&device->dynamic_state_pool, device->slice_hash);
@ -3607,7 +3607,7 @@ void anv_DestroyDevice(
anv_device_release_bo(device, device->workaround_bo); anv_device_release_bo(device, device->workaround_bo);
anv_device_release_bo(device, device->trivial_batch_bo); anv_device_release_bo(device, device->trivial_batch_bo);
if (device->info.has_aux_map) { if (device->info->has_aux_map) {
intel_aux_map_finish(device->aux_map_ctx); intel_aux_map_finish(device->aux_map_ctx);
device->aux_map_ctx = NULL; device->aux_map_ctx = NULL;
} }
@ -3858,7 +3858,7 @@ VkResult anv_AllocateMemory(
} }
/* By default, we want all VkDeviceMemory objects to support CCS */ /* By default, we want all VkDeviceMemory objects to support CCS */
if (device->physical->has_implicit_ccs && device->info.has_aux_map) if (device->physical->has_implicit_ccs && device->info->has_aux_map)
alloc_flags |= ANV_BO_ALLOC_IMPLICIT_CCS; alloc_flags |= ANV_BO_ALLOC_IMPLICIT_CCS;
/* If i915 reported a mappable/non_mappable vram regions and the /* If i915 reported a mappable/non_mappable vram regions and the
@ -4176,7 +4176,7 @@ VkResult anv_MapMemory(
uint32_t gem_flags = 0; uint32_t gem_flags = 0;
if (!device->info.has_llc && if (!device->info->has_llc &&
(mem->type->propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) (mem->type->propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))
gem_flags |= I915_MMAP_WC; gem_flags |= I915_MMAP_WC;
@ -4660,7 +4660,7 @@ VkResult anv_GetCalibratedTimestampsEXT(
uint64_t *pMaxDeviation) uint64_t *pMaxDeviation)
{ {
ANV_FROM_HANDLE(anv_device, device, _device); ANV_FROM_HANDLE(anv_device, device, _device);
uint64_t timestamp_frequency = device->info.timestamp_frequency; uint64_t timestamp_frequency = device->info->timestamp_frequency;
int ret; int ret;
int d; int d;
uint64_t begin, end; uint64_t begin, end;

View file

@ -102,7 +102,7 @@ anv_gem_mmap_offset(struct anv_device *device, uint32_t gem_handle,
{ {
struct drm_i915_gem_mmap_offset gem_mmap = { struct drm_i915_gem_mmap_offset gem_mmap = {
.handle = gem_handle, .handle = gem_handle,
.flags = device->info.has_local_mem ? I915_MMAP_OFFSET_FIXED : .flags = device->info->has_local_mem ? I915_MMAP_OFFSET_FIXED :
(flags & I915_MMAP_WC) ? I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB, (flags & I915_MMAP_WC) ? I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB,
}; };
assert(offset == 0); assert(offset == 0);
@ -122,7 +122,7 @@ static void*
anv_gem_mmap_legacy(struct anv_device *device, uint32_t gem_handle, anv_gem_mmap_legacy(struct anv_device *device, uint32_t gem_handle,
uint64_t offset, uint64_t size, uint32_t flags) uint64_t offset, uint64_t size, uint32_t flags)
{ {
assert(!device->info.has_local_mem); assert(!device->info->has_local_mem);
struct drm_i915_gem_mmap gem_mmap = { struct drm_i915_gem_mmap gem_mmap = {
.handle = gem_handle, .handle = gem_handle,
@ -286,7 +286,7 @@ anv_gem_set_tiling(struct anv_device *device,
/* On discrete platforms we don't have DRM_IOCTL_I915_GEM_SET_TILING. So /* On discrete platforms we don't have DRM_IOCTL_I915_GEM_SET_TILING. So
* nothing needs to be done. * nothing needs to be done.
*/ */
if (!device->info.has_tiling_uapi) if (!device->info->has_tiling_uapi)
return 0; return 0;
/* set_tiling overwrites the input on the error path, so we have to open /* set_tiling overwrites the input on the error path, so we have to open

View file

@ -650,7 +650,7 @@ add_aux_state_tracking_buffer(struct anv_device *device,
image->vk.aspects & (VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV | image->vk.aspects & (VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV |
VK_IMAGE_ASPECT_DEPTH_BIT)); VK_IMAGE_ASPECT_DEPTH_BIT));
const unsigned clear_color_state_size = device->info.ver >= 10 ? const unsigned clear_color_state_size = device->info->ver >= 10 ?
device->isl_dev.ss.clear_color_state_size : device->isl_dev.ss.clear_color_state_size :
device->isl_dev.ss.clear_value_size; device->isl_dev.ss.clear_value_size;
@ -731,7 +731,7 @@ add_aux_surface_if_supported(struct anv_device *device,
return VK_SUCCESS; return VK_SUCCESS;
} }
if (device->info.ver == 7) { if (device->info->ver == 7) {
anv_perf_warn(VK_LOG_OBJS(&image->vk.base), "Implement gfx7 HiZ"); anv_perf_warn(VK_LOG_OBJS(&image->vk.base), "Implement gfx7 HiZ");
return VK_SUCCESS; return VK_SUCCESS;
} }
@ -741,7 +741,7 @@ add_aux_surface_if_supported(struct anv_device *device,
return VK_SUCCESS; return VK_SUCCESS;
} }
if (device->info.ver == 8 && image->vk.samples > 1) { if (device->info->ver == 8 && image->vk.samples > 1) {
anv_perf_warn(VK_LOG_OBJS(&image->vk.base), anv_perf_warn(VK_LOG_OBJS(&image->vk.base),
"Enable gfx8 multisampled HiZ"); "Enable gfx8 multisampled HiZ");
return VK_SUCCESS; return VK_SUCCESS;
@ -771,10 +771,10 @@ add_aux_surface_if_supported(struct anv_device *device,
* *
* TODO: This is a heuristic trade-off; we haven't tuned it at all. * TODO: This is a heuristic trade-off; we haven't tuned it at all.
*/ */
assert(device->info.ver >= 12); assert(device->info->ver >= 12);
image->planes[plane].aux_usage = ISL_AUX_USAGE_HIZ_CCS_WT; image->planes[plane].aux_usage = ISL_AUX_USAGE_HIZ_CCS_WT;
} else { } else {
assert(device->info.ver >= 12); assert(device->info->ver >= 12);
image->planes[plane].aux_usage = ISL_AUX_USAGE_HIZ_CCS; image->planes[plane].aux_usage = ISL_AUX_USAGE_HIZ_CCS;
} }
@ -830,11 +830,11 @@ add_aux_surface_if_supported(struct anv_device *device,
return VK_SUCCESS; return VK_SUCCESS;
/* Choose aux usage */ /* Choose aux usage */
if (anv_formats_ccs_e_compatible(&device->info, image->vk.create_flags, if (anv_formats_ccs_e_compatible(device->info, image->vk.create_flags,
image->vk.format, image->vk.tiling, image->vk.format, image->vk.tiling,
image->vk.usage, fmt_list)) { image->vk.usage, fmt_list)) {
image->planes[plane].aux_usage = ISL_AUX_USAGE_CCS_E; image->planes[plane].aux_usage = ISL_AUX_USAGE_CCS_E;
} else if (device->info.ver >= 12) { } else if (device->info->ver >= 12) {
anv_perf_warn(VK_LOG_OBJS(&image->vk.base), anv_perf_warn(VK_LOG_OBJS(&image->vk.base),
"The CCS_D aux mode is not yet handled on " "The CCS_D aux mode is not yet handled on "
"Gfx12+. Not allocating a CCS buffer."); "Gfx12+. Not allocating a CCS buffer.");
@ -1152,7 +1152,7 @@ check_drm_format_mod(const struct anv_device *device,
isl_drm_modifier_get_info(image->vk.drm_format_mod); isl_drm_modifier_get_info(image->vk.drm_format_mod);
/* Driver must support the modifier. */ /* Driver must support the modifier. */
assert(isl_drm_modifier_get_score(&device->info, isl_mod_info->modifier)); assert(isl_drm_modifier_get_score(device->info, isl_mod_info->modifier));
/* Enforced by us, not the Vulkan spec. */ /* Enforced by us, not the Vulkan spec. */
assert(image->vk.image_type == VK_IMAGE_TYPE_2D); assert(image->vk.image_type == VK_IMAGE_TYPE_2D);
@ -1205,7 +1205,7 @@ add_all_surfaces_implicit_layout(
isl_tiling_flags_t isl_tiling_flags, isl_tiling_flags_t isl_tiling_flags,
isl_surf_usage_flags_t isl_extra_usage_flags) isl_surf_usage_flags_t isl_extra_usage_flags)
{ {
const struct intel_device_info *devinfo = &device->info; const struct intel_device_info *devinfo = device->info;
VkResult result; VkResult result;
u_foreach_bit(b, image->vk.aspects) { u_foreach_bit(b, image->vk.aspects) {
@ -1269,7 +1269,7 @@ add_all_surfaces_explicit_layout(
isl_tiling_flags_t isl_tiling_flags, isl_tiling_flags_t isl_tiling_flags,
isl_surf_usage_flags_t isl_extra_usage_flags) isl_surf_usage_flags_t isl_extra_usage_flags)
{ {
const struct intel_device_info *devinfo = &device->info; const struct intel_device_info *devinfo = device->info;
const uint32_t mod_plane_count = drm_info->drmFormatModifierPlaneCount; const uint32_t mod_plane_count = drm_info->drmFormatModifierPlaneCount;
const bool mod_has_aux = const bool mod_has_aux =
isl_drm_modifier_has_aux(drm_info->drmFormatModifier); isl_drm_modifier_has_aux(drm_info->drmFormatModifier);
@ -1482,7 +1482,7 @@ anv_image_init(struct anv_device *device, struct anv_image *image,
(pCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT); (pCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT);
const isl_tiling_flags_t isl_tiling_flags = const isl_tiling_flags_t isl_tiling_flags =
choose_isl_tiling_flags(&device->info, create_info, isl_mod_info, choose_isl_tiling_flags(device->info, create_info, isl_mod_info,
image->vk.wsi_legacy_scanout); image->vk.wsi_legacy_scanout);
const VkImageFormatListCreateInfo *fmt_list = const VkImageFormatListCreateInfo *fmt_list =
@ -1517,7 +1517,7 @@ anv_image_init(struct anv_device *device, struct anv_image *image,
*/ */
for (uint32_t p = 0; p < image->n_planes; p++) { for (uint32_t p = 0; p < image->n_planes; p++) {
image->planes[p].can_non_zero_fast_clear = image->planes[p].can_non_zero_fast_clear =
can_fast_clear_with_non_zero_color(&device->info, image, p, fmt_list); can_fast_clear_with_non_zero_color(device->info, image, p, fmt_list);
} }
return VK_SUCCESS; return VK_SUCCESS;
@ -1670,7 +1670,7 @@ resolve_ahw_image(struct anv_device *device,
/* Check format. */ /* Check format. */
VkFormat vk_format = vk_format_from_android(desc.format, desc.usage); VkFormat vk_format = vk_format_from_android(desc.format, desc.usage);
enum isl_format isl_fmt = anv_get_isl_format(&device->info, enum isl_format isl_fmt = anv_get_isl_format(device->info,
vk_format, vk_format,
VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_ASPECT_COLOR_BIT,
vk_tiling); vk_tiling);
@ -2500,7 +2500,7 @@ anv_image_fill_surface_state(struct anv_device *device,
*/ */
if (anv_surface_is_valid(&image->planes[plane].shadow_surface) && if (anv_surface_is_valid(&image->planes[plane].shadow_surface) &&
aspect == VK_IMAGE_ASPECT_STENCIL_BIT) { aspect == VK_IMAGE_ASPECT_STENCIL_BIT) {
assert(device->info.ver == 7); assert(device->info->ver == 7);
assert(view_usage & ISL_SURF_USAGE_TEXTURE_BIT); assert(view_usage & ISL_SURF_USAGE_TEXTURE_BIT);
surface = &image->planes[plane].shadow_surface; surface = &image->planes[plane].shadow_surface;
} }
@ -2509,14 +2509,14 @@ anv_image_fill_surface_state(struct anv_device *device,
view.swizzle = anv_swizzle_for_render(view.swizzle); view.swizzle = anv_swizzle_for_render(view.swizzle);
/* On Ivy Bridge and Bay Trail we do the swizzle in the shader */ /* On Ivy Bridge and Bay Trail we do the swizzle in the shader */
if (device->info.verx10 == 70) if (device->info->verx10 == 70)
view.swizzle = ISL_SWIZZLE_IDENTITY; view.swizzle = ISL_SWIZZLE_IDENTITY;
/* If this is a HiZ buffer we can sample from with a programmable clear /* If this is a HiZ buffer we can sample from with a programmable clear
* value (SKL+), define the clear value to the optimal constant. * value (SKL+), define the clear value to the optimal constant.
*/ */
union isl_color_value default_clear_color = { .u32 = { 0, } }; union isl_color_value default_clear_color = { .u32 = { 0, } };
if (device->info.ver >= 9 && aspect == VK_IMAGE_ASPECT_DEPTH_BIT) if (device->info->ver >= 9 && aspect == VK_IMAGE_ASPECT_DEPTH_BIT)
default_clear_color.f32[0] = ANV_HZ_FC_VAL; default_clear_color.f32[0] = ANV_HZ_FC_VAL;
if (!clear_color) if (!clear_color)
clear_color = &default_clear_color; clear_color = &default_clear_color;
@ -2526,7 +2526,7 @@ anv_image_fill_surface_state(struct anv_device *device,
if (view_usage == ISL_SURF_USAGE_STORAGE_BIT && if (view_usage == ISL_SURF_USAGE_STORAGE_BIT &&
(flags & ANV_IMAGE_VIEW_STATE_STORAGE_LOWERED) && (flags & ANV_IMAGE_VIEW_STATE_STORAGE_LOWERED) &&
!isl_has_matching_typed_storage_image_format(&device->info, !isl_has_matching_typed_storage_image_format(device->info,
view.format)) { view.format)) {
/* In this case, we are a writeable storage buffer which needs to be /* In this case, we are a writeable storage buffer which needs to be
* lowered to linear. All tiling and offset calculations will be done in * lowered to linear. All tiling and offset calculations will be done in
@ -2551,11 +2551,11 @@ anv_image_fill_surface_state(struct anv_device *device,
* supports. * supports.
*/ */
enum isl_format lower_format = enum isl_format lower_format =
isl_lower_storage_image_format(&device->info, view.format); isl_lower_storage_image_format(device->info, view.format);
if (aux_usage != ISL_AUX_USAGE_NONE) { if (aux_usage != ISL_AUX_USAGE_NONE) {
assert(device->info.verx10 >= 125); assert(device->info->verx10 >= 125);
assert(aux_usage == ISL_AUX_USAGE_CCS_E); assert(aux_usage == ISL_AUX_USAGE_CCS_E);
assert(isl_formats_are_ccs_e_compatible(&device->info, assert(isl_formats_are_ccs_e_compatible(device->info,
view.format, view.format,
lower_format)); lower_format));
} }
@ -2592,7 +2592,7 @@ anv_image_fill_surface_state(struct anv_device *device,
assert(ok); assert(ok);
isl_surf = &tmp_surf; isl_surf = &tmp_surf;
if (device->info.ver <= 8) { if (device->info->ver <= 8) {
assert(surface->isl.tiling == ISL_TILING_LINEAR); assert(surface->isl.tiling == ISL_TILING_LINEAR);
assert(tile_x_sa == 0); assert(tile_x_sa == 0);
assert(tile_y_sa == 0); assert(tile_y_sa == 0);
@ -2607,7 +2607,7 @@ anv_image_fill_surface_state(struct anv_device *device,
state_inout->aux_address = aux_address; state_inout->aux_address = aux_address;
struct anv_address clear_address = ANV_NULL_ADDRESS; struct anv_address clear_address = ANV_NULL_ADDRESS;
if (device->info.ver >= 10 && isl_aux_usage_has_fast_clears(aux_usage)) { if (device->info->ver >= 10 && isl_aux_usage_has_fast_clears(aux_usage)) {
clear_address = anv_image_get_clear_color_addr(device, image, aspect); clear_address = anv_image_get_clear_color_addr(device, image, aspect);
} }
state_inout->clear_address = clear_address; state_inout->clear_address = clear_address;
@ -2638,7 +2638,7 @@ anv_image_fill_surface_state(struct anv_device *device,
state_inout->aux_address.offset |= *aux_addr_dw & 0xfff; state_inout->aux_address.offset |= *aux_addr_dw & 0xfff;
} }
if (device->info.ver >= 10 && clear_address.bo) { if (device->info->ver >= 10 && clear_address.bo) {
uint32_t *clear_addr_dw = state_inout->state.map + uint32_t *clear_addr_dw = state_inout->state.map +
device->isl_dev.ss.clear_color_state_offset; device->isl_dev.ss.clear_color_state_offset;
assert((clear_address.offset & 0x3f) == 0); assert((clear_address.offset & 0x3f) == 0);
@ -2717,7 +2717,7 @@ anv_CreateImageView(VkDevice _device,
const uint32_t vplane = const uint32_t vplane =
anv_aspect_to_plane(iview->vk.aspects, 1UL << iaspect_bit); anv_aspect_to_plane(iview->vk.aspects, 1UL << iaspect_bit);
struct anv_format_plane format; struct anv_format_plane format;
format = anv_get_format_plane(&device->info, iview->vk.view_format, format = anv_get_format_plane(device->info, iview->vk.view_format,
vplane, image->vk.tiling); vplane, image->vk.tiling);
iview->planes[vplane].image_plane = iplane; iview->planes[vplane].image_plane = iplane;
@ -2755,11 +2755,11 @@ anv_CreateImageView(VkDevice _device,
iview->planes[vplane].general_sampler_surface_state.state = alloc_surface_state(device); iview->planes[vplane].general_sampler_surface_state.state = alloc_surface_state(device);
enum isl_aux_usage general_aux_usage = enum isl_aux_usage general_aux_usage =
anv_layout_to_aux_usage(&device->info, image, 1UL << iaspect_bit, anv_layout_to_aux_usage(device->info, image, 1UL << iaspect_bit,
VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_USAGE_SAMPLED_BIT,
VK_IMAGE_LAYOUT_GENERAL); VK_IMAGE_LAYOUT_GENERAL);
enum isl_aux_usage optimal_aux_usage = enum isl_aux_usage optimal_aux_usage =
anv_layout_to_aux_usage(&device->info, image, 1UL << iaspect_bit, anv_layout_to_aux_usage(device->info, image, 1UL << iaspect_bit,
VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_USAGE_SAMPLED_BIT,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
@ -2783,7 +2783,7 @@ anv_CreateImageView(VkDevice _device,
/* NOTE: This one needs to go last since it may stomp isl_view.format */ /* NOTE: This one needs to go last since it may stomp isl_view.format */
if (iview->vk.usage & VK_IMAGE_USAGE_STORAGE_BIT) { if (iview->vk.usage & VK_IMAGE_USAGE_STORAGE_BIT) {
enum isl_aux_usage general_aux_usage = enum isl_aux_usage general_aux_usage =
anv_layout_to_aux_usage(&device->info, image, 1UL << iaspect_bit, anv_layout_to_aux_usage(device->info, image, 1UL << iaspect_bit,
VK_IMAGE_USAGE_STORAGE_BIT, VK_IMAGE_USAGE_STORAGE_BIT,
VK_IMAGE_LAYOUT_GENERAL); VK_IMAGE_LAYOUT_GENERAL);
iview->planes[vplane].storage_surface_state.state = alloc_surface_state(device); iview->planes[vplane].storage_surface_state.state = alloc_surface_state(device);
@ -2805,7 +2805,7 @@ anv_CreateImageView(VkDevice _device,
general_aux_usage, NULL, general_aux_usage, NULL,
ANV_IMAGE_VIEW_STATE_STORAGE_LOWERED, ANV_IMAGE_VIEW_STATE_STORAGE_LOWERED,
&iview->planes[vplane].lowered_storage_surface_state, &iview->planes[vplane].lowered_storage_surface_state,
device->info.ver >= 9 ? NULL : device->info->ver >= 9 ? NULL :
&iview->planes[vplane].lowered_storage_image_param); &iview->planes[vplane].lowered_storage_image_param);
} else { } else {
/* In this case, we support the format but, because there's no /* In this case, we support the format but, because there's no
@ -2814,7 +2814,7 @@ anv_CreateImageView(VkDevice _device,
* reads but for most writes. Instead of hanging if someone gets * reads but for most writes. Instead of hanging if someone gets
* it wrong, we give them a NULL descriptor. * it wrong, we give them a NULL descriptor.
*/ */
assert(isl_format_supports_typed_writes(&device->info, assert(isl_format_supports_typed_writes(device->info,
format.isl_format)); format.isl_format));
iview->planes[vplane].lowered_storage_surface_state.state = iview->planes[vplane].lowered_storage_surface_state.state =
device->null_surface_state; device->null_surface_state;
@ -2883,7 +2883,7 @@ anv_CreateBufferView(VkDevice _device,
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY); return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
struct anv_format_plane format; struct anv_format_plane format;
format = anv_get_format_plane(&device->info, pCreateInfo->format, format = anv_get_format_plane(device->info, pCreateInfo->format,
0, VK_IMAGE_TILING_LINEAR); 0, VK_IMAGE_TILING_LINEAR);
const uint32_t format_bs = isl_format_get_layout(format.isl_format)->bpb / 8; const uint32_t format_bs = isl_format_get_layout(format.isl_format)->bpb / 8;
@ -2914,9 +2914,9 @@ anv_CreateBufferView(VkDevice _device,
view->address, view->range, format_bs); view->address, view->range, format_bs);
enum isl_format lowered_format = enum isl_format lowered_format =
isl_has_matching_typed_storage_image_format(&device->info, isl_has_matching_typed_storage_image_format(device->info,
format.isl_format) ? format.isl_format) ?
isl_lower_storage_image_format(&device->info, format.isl_format) : isl_lower_storage_image_format(device->info, format.isl_format) :
ISL_FORMAT_RAW; ISL_FORMAT_RAW;
/* If we lower the format, we should ensure either they both match in /* If we lower the format, we should ensure either they both match in

View file

@ -323,7 +323,7 @@ anv_measure_reset(struct anv_cmd_buffer *cmd_buffer)
* yet been processed * yet been processed
*/ */
intel_measure_gather(&device->physical->measure_device, intel_measure_gather(&device->physical->measure_device,
&device->info); device->info);
assert(cmd_buffer->device != NULL); assert(cmd_buffer->device != NULL);

View file

@ -290,7 +290,7 @@ bool
anv_check_for_primitive_replication(nir_shader **shaders, anv_check_for_primitive_replication(nir_shader **shaders,
struct anv_graphics_pipeline *pipeline) struct anv_graphics_pipeline *pipeline)
{ {
assert(pipeline->base.device->info.ver >= 12); assert(pipeline->base.device->info->ver >= 12);
static int primitive_replication_max_views = -1; static int primitive_replication_max_views = -1;
if (primitive_replication_max_views < 0) { if (primitive_replication_max_views < 0) {

View file

@ -117,7 +117,7 @@ anv_device_perf_open(struct anv_device *device, uint64_t metric_id)
properties[p++] = metric_id; properties[p++] = metric_id;
properties[p++] = DRM_I915_PERF_PROP_OA_FORMAT; properties[p++] = DRM_I915_PERF_PROP_OA_FORMAT;
properties[p++] = device->info.ver >= 8 ? properties[p++] = device->info->ver >= 8 ?
I915_OA_FORMAT_A32u40_A4u32_B8_C8 : I915_OA_FORMAT_A32u40_A4u32_B8_C8 :
I915_OA_FORMAT_A45_B8_C8; I915_OA_FORMAT_A45_B8_C8;
@ -139,7 +139,7 @@ anv_device_perf_open(struct anv_device *device, uint64_t metric_id)
* support it. * support it.
*/ */
if (intel_perf_has_global_sseu(device->physical->perf) && if (intel_perf_has_global_sseu(device->physical->perf) &&
device->info.verx10 < 125) { device->info->verx10 < 125) {
properties[p++] = DRM_I915_PERF_PROP_GLOBAL_SSEU; properties[p++] = DRM_I915_PERF_PROP_GLOBAL_SSEU;
properties[p++] = (uintptr_t) &device->physical->perf->sseu; properties[p++] = (uintptr_t) &device->physical->perf->sseu;
} }

View file

@ -309,7 +309,7 @@ populate_base_prog_key(const struct anv_device *device,
key->limit_trig_input_range = key->limit_trig_input_range =
device->physical->instance->limit_trig_input_range; device->physical->instance->limit_trig_input_range;
populate_sampler_prog_key(&device->info, &key->tex); populate_sampler_prog_key(device->info, &key->tex);
} }
static void static void
@ -1600,7 +1600,7 @@ anv_graphics_pipeline_compile(struct anv_graphics_pipeline *pipeline,
next_stage = &stages[s]; next_stage = &stages[s];
} }
if (pipeline->base.device->info.ver >= 12 && if (pipeline->base.device->info->ver >= 12 &&
pipeline->view_mask != 0) { pipeline->view_mask != 0) {
/* For some pipelines HW Primitive Replication can be used instead of /* For some pipelines HW Primitive Replication can be used instead of
* instancing to implement Multiview. This depend on how viewIndex is * instancing to implement Multiview. This depend on how viewIndex is
@ -1653,7 +1653,7 @@ anv_graphics_pipeline_compile(struct anv_graphics_pipeline *pipeline,
* We iterate backwards in the stage and stop on the first shader that can * We iterate backwards in the stage and stop on the first shader that can
* set the value. * set the value.
*/ */
const struct intel_device_info *devinfo = &pipeline->base.device->info; const struct intel_device_info *devinfo = pipeline->base.device->info;
if (devinfo->has_coarse_pixel_primitive_and_cb && if (devinfo->has_coarse_pixel_primitive_and_cb &&
stages[MESA_SHADER_FRAGMENT].info && stages[MESA_SHADER_FRAGMENT].info &&
stages[MESA_SHADER_FRAGMENT].key.wm.coarse_pixel && stages[MESA_SHADER_FRAGMENT].key.wm.coarse_pixel &&
@ -1988,7 +1988,7 @@ anv_compute_pipeline_create(struct anv_device *device,
return result; return result;
} }
anv_genX(&device->info, compute_pipeline_emit)(pipeline); anv_genX(device->info, compute_pipeline_emit)(pipeline);
*pPipeline = anv_pipeline_to_handle(&pipeline->base); *pPipeline = anv_pipeline_to_handle(&pipeline->base);
@ -2046,7 +2046,7 @@ VkResult anv_CreateComputePipelines(
void void
anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm) anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
{ {
const struct intel_device_info *devinfo = &pipeline->device->info; const struct intel_device_info *devinfo = pipeline->device->info;
const struct intel_l3_weights w = const struct intel_l3_weights w =
intel_get_default_l3_weights(devinfo, true, needs_slm); intel_get_default_l3_weights(devinfo, true, needs_slm);
@ -2191,7 +2191,7 @@ anv_graphics_pipeline_create(struct anv_device *device,
return result; return result;
} }
anv_genX(&device->info, graphics_pipeline_emit)(pipeline, &state); anv_genX(device->info, graphics_pipeline_emit)(pipeline, &state);
*pPipeline = anv_pipeline_to_handle(&pipeline->base); *pPipeline = anv_pipeline_to_handle(&pipeline->base);
@ -2491,7 +2491,7 @@ anv_pipeline_compile_ray_tracing(struct anv_ray_tracing_pipeline *pipeline,
struct vk_pipeline_cache *cache, struct vk_pipeline_cache *cache,
const VkRayTracingPipelineCreateInfoKHR *info) const VkRayTracingPipelineCreateInfoKHR *info)
{ {
const struct intel_device_info *devinfo = &pipeline->base.device->info; const struct intel_device_info *devinfo = pipeline->base.device->info;
VkResult result; VkResult result;
VkPipelineCreationFeedback pipeline_feedback = { VkPipelineCreationFeedback pipeline_feedback = {
@ -2771,7 +2771,7 @@ anv_device_init_rt_shaders(struct anv_device *device)
nir_shader *trivial_return_nir = nir_shader *trivial_return_nir =
brw_nir_create_trivial_return_shader(device->physical->compiler, tmp_ctx); brw_nir_create_trivial_return_shader(device->physical->compiler, tmp_ctx);
NIR_PASS_V(trivial_return_nir, brw_nir_lower_rt_intrinsics, &device->info); NIR_PASS_V(trivial_return_nir, brw_nir_lower_rt_intrinsics, device->info);
struct anv_pipeline_bind_map bind_map = { struct anv_pipeline_bind_map bind_map = {
.surface_count = 0, .surface_count = 0,
@ -2942,7 +2942,7 @@ anv_ray_tracing_pipeline_create(
return result; return result;
} }
anv_genX(&device->info, ray_tracing_pipeline_emit)(pipeline); anv_genX(device->info, ray_tracing_pipeline_emit)(pipeline);
*pPipeline = anv_pipeline_to_handle(&pipeline->base); *pPipeline = anv_pipeline_to_handle(&pipeline->base);

View file

@ -1157,7 +1157,7 @@ struct anv_device {
struct vk_device vk; struct vk_device vk;
struct anv_physical_device * physical; struct anv_physical_device * physical;
struct intel_device_info info; const struct intel_device_info * info;
struct isl_device isl_dev; struct isl_device isl_dev;
int context_id; int context_id;
int fd; int fd;
@ -1596,7 +1596,7 @@ static inline void
write_reloc(const struct anv_device *device, void *p, uint64_t v, bool flush) write_reloc(const struct anv_device *device, void *p, uint64_t v, bool flush)
{ {
unsigned reloc_size = 0; unsigned reloc_size = 0;
if (device->info.ver >= 8) { if (device->info->ver >= 8) {
reloc_size = sizeof(uint64_t); reloc_size = sizeof(uint64_t);
*(uint64_t *)p = intel_canonical_address(v); *(uint64_t *)p = intel_canonical_address(v);
} else { } else {
@ -3702,7 +3702,7 @@ anv_image_get_fast_clear_type_addr(const struct anv_device *device,
struct anv_address addr = struct anv_address addr =
anv_image_get_clear_color_addr(device, image, aspect); anv_image_get_clear_color_addr(device, image, aspect);
const unsigned clear_color_state_size = device->info.ver >= 10 ? const unsigned clear_color_state_size = device->info->ver >= 10 ?
device->isl_dev.ss.clear_color_state_size : device->isl_dev.ss.clear_color_state_size :
device->isl_dev.ss.clear_value_size; device->isl_dev.ss.clear_value_size;
return anv_address_add(addr, clear_color_state_size); return anv_address_add(addr, clear_color_state_size);
@ -3800,7 +3800,7 @@ anv_image_plane_uses_aux_map(const struct anv_device *device,
const struct anv_image *image, const struct anv_image *image,
uint32_t plane) uint32_t plane)
{ {
return device->info.has_aux_map && return device->info->has_aux_map &&
isl_aux_usage_has_ccs(image->planes[plane].aux_usage); isl_aux_usage_has_ccs(image->planes[plane].aux_usage);
} }

View file

@ -83,7 +83,7 @@ anv_device_utrace_emit_copy_ts_buffer(struct u_trace_context *utctx,
struct anv_address to_addr = (struct anv_address) { struct anv_address to_addr = (struct anv_address) {
.bo = ts_to, .offset = to_offset * sizeof(uint64_t) }; .bo = ts_to, .offset = to_offset * sizeof(uint64_t) };
anv_genX(&device->info, emit_so_memcpy)(&flush->memcpy_state, anv_genX(device->info, emit_so_memcpy)(&flush->memcpy_state,
to_addr, from_addr, count * sizeof(uint64_t)); to_addr, from_addr, count * sizeof(uint64_t));
} }
@ -143,7 +143,7 @@ anv_device_utrace_flush_cmd_buffers(struct anv_queue *queue,
flush->batch_bo->map, flush->batch_bo->size); flush->batch_bo->map, flush->batch_bo->size);
/* Emit the copies */ /* Emit the copies */
anv_genX(&device->info, emit_so_memcpy_init)(&flush->memcpy_state, anv_genX(device->info, emit_so_memcpy_init)(&flush->memcpy_state,
device, device,
&flush->batch); &flush->batch);
for (uint32_t i = 0; i < cmd_buffer_count; i++) { for (uint32_t i = 0; i < cmd_buffer_count; i++) {
@ -157,7 +157,7 @@ anv_device_utrace_flush_cmd_buffers(struct anv_queue *queue,
anv_device_utrace_emit_copy_ts_buffer); anv_device_utrace_emit_copy_ts_buffer);
} }
} }
anv_genX(&device->info, emit_so_memcpy_fini)(&flush->memcpy_state); anv_genX(device->info, emit_so_memcpy_fini)(&flush->memcpy_state);
u_trace_flush(&flush->ds.trace, flush, true); u_trace_flush(&flush->ds.trace, flush, true);
@ -260,7 +260,7 @@ anv_utrace_read_ts(struct u_trace_context *utctx,
if (ts[idx] == U_TRACE_NO_TIMESTAMP) if (ts[idx] == U_TRACE_NO_TIMESTAMP)
return U_TRACE_NO_TIMESTAMP; return U_TRACE_NO_TIMESTAMP;
return intel_device_info_timebase_scale(&device->info, ts[idx]); return intel_device_info_timebase_scale(device->info, ts[idx]);
} }
static const char * static const char *
@ -284,7 +284,7 @@ void
anv_device_utrace_init(struct anv_device *device) anv_device_utrace_init(struct anv_device *device)
{ {
anv_bo_pool_init(&device->utrace_bo_pool, device, "utrace"); anv_bo_pool_init(&device->utrace_bo_pool, device, "utrace");
intel_ds_device_init(&device->ds, &device->info, device->fd, intel_ds_device_init(&device->ds, device->info, device->fd,
device->physical->local_minor - 128, device->physical->local_minor - 128,
INTEL_DS_API_VULKAN); INTEL_DS_API_VULKAN);
u_trace_context_init(&device->ds.trace_context, u_trace_context_init(&device->ds.trace_context,

View file

@ -386,7 +386,7 @@ genX(blorp_exec)(struct blorp_batch *batch,
if (!cmd_buffer->state.current_l3_config) { if (!cmd_buffer->state.current_l3_config) {
const struct intel_l3_config *cfg = const struct intel_l3_config *cfg =
intel_get_default_l3_config(&cmd_buffer->device->info); intel_get_default_l3_config(cmd_buffer->device->info);
genX(cmd_buffer_config_l3)(cmd_buffer, cfg); genX(cmd_buffer_config_l3)(cmd_buffer, cfg);
} }

View file

@ -391,7 +391,7 @@ anv_can_fast_clear_color_view(struct anv_device * device,
* to use the attachment can't handle fast-clears. * to use the attachment can't handle fast-clears.
*/ */
enum anv_fast_clear_type fast_clear_type = enum anv_fast_clear_type fast_clear_type =
anv_layout_to_fast_clear_type(&device->info, iview->image, anv_layout_to_fast_clear_type(device->info, iview->image,
VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_ASPECT_COLOR_BIT,
layout); layout);
switch (fast_clear_type) { switch (fast_clear_type) {
@ -480,11 +480,11 @@ anv_can_hiz_clear_ds_view(struct anv_device *device,
return false; return false;
const enum isl_aux_usage clear_aux_usage = const enum isl_aux_usage clear_aux_usage =
anv_layout_to_aux_usage(&device->info, iview->image, anv_layout_to_aux_usage(device->info, iview->image,
VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_ASPECT_DEPTH_BIT,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
layout); layout);
if (!blorp_can_hiz_clear_depth(&device->info, if (!blorp_can_hiz_clear_depth(device->info,
&iview->image->planes[0].primary_surface.isl, &iview->image->planes[0].primary_surface.isl,
clear_aux_usage, clear_aux_usage,
iview->planes[0].isl.base_level, iview->planes[0].isl.base_level,
@ -504,7 +504,7 @@ anv_can_hiz_clear_ds_view(struct anv_device *device,
* portion of a HiZ buffer. Testing has revealed that Gfx8 only supports * portion of a HiZ buffer. Testing has revealed that Gfx8 only supports
* returning 0.0f. Gens prior to gfx8 do not support this feature at all. * returning 0.0f. Gens prior to gfx8 do not support this feature at all.
*/ */
if (GFX_VER == 8 && anv_can_sample_with_hiz(&device->info, iview->image)) if (GFX_VER == 8 && anv_can_sample_with_hiz(device->info, iview->image))
return false; return false;
/* If we got here, then we can fast clear */ /* If we got here, then we can fast clear */
@ -541,7 +541,7 @@ anv_image_init_aux_tt(struct anv_cmd_buffer *cmd_buffer,
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer); genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
for (uint32_t a = 0; a < layer_count; a++) { for (uint32_t a = 0; a < layer_count; a++) {
const uint32_t layer = base_layer + a; const uint32_t layer = base_layer + a;
@ -635,7 +635,7 @@ transition_depth_buffer(struct anv_cmd_buffer *cmd_buffer,
if ((initial_layout == VK_IMAGE_LAYOUT_UNDEFINED || if ((initial_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) && initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) &&
cmd_buffer->device->physical->has_implicit_ccs && cmd_buffer->device->physical->has_implicit_ccs &&
cmd_buffer->device->info.has_aux_map) { cmd_buffer->device->info->has_aux_map) {
anv_image_init_aux_tt(cmd_buffer, image, VK_IMAGE_ASPECT_DEPTH_BIT, anv_image_init_aux_tt(cmd_buffer, image, VK_IMAGE_ASPECT_DEPTH_BIT,
0, 1, base_layer, layer_count); 0, 1, base_layer, layer_count);
} }
@ -650,11 +650,11 @@ transition_depth_buffer(struct anv_cmd_buffer *cmd_buffer,
return; return;
const enum isl_aux_state initial_state = const enum isl_aux_state initial_state =
anv_layout_to_aux_state(&cmd_buffer->device->info, image, anv_layout_to_aux_state(cmd_buffer->device->info, image,
VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_ASPECT_DEPTH_BIT,
initial_layout); initial_layout);
const enum isl_aux_state final_state = const enum isl_aux_state final_state =
anv_layout_to_aux_state(&cmd_buffer->device->info, image, anv_layout_to_aux_state(cmd_buffer->device->info, image,
VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_ASPECT_DEPTH_BIT,
final_layout); final_layout);
@ -744,7 +744,7 @@ transition_stencil_buffer(struct anv_cmd_buffer *cmd_buffer,
if ((initial_layout == VK_IMAGE_LAYOUT_UNDEFINED || if ((initial_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) && initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) &&
cmd_buffer->device->physical->has_implicit_ccs && cmd_buffer->device->physical->has_implicit_ccs &&
cmd_buffer->device->info.has_aux_map) { cmd_buffer->device->info->has_aux_map) {
anv_image_init_aux_tt(cmd_buffer, image, VK_IMAGE_ASPECT_STENCIL_BIT, anv_image_init_aux_tt(cmd_buffer, image, VK_IMAGE_ASPECT_STENCIL_BIT,
base_level, level_count, base_layer, layer_count); base_level, level_count, base_layer, layer_count);
@ -843,7 +843,7 @@ anv_cmd_compute_resolve_predicate(struct anv_cmd_buffer *cmd_buffer,
enum anv_fast_clear_type fast_clear_supported) enum anv_fast_clear_type fast_clear_supported)
{ {
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
const struct mi_value fast_clear_type = const struct mi_value fast_clear_type =
mi_mem32(anv_image_get_fast_clear_type_addr(cmd_buffer->device, mi_mem32(anv_image_get_fast_clear_type_addr(cmd_buffer->device,
@ -926,7 +926,7 @@ anv_cmd_simple_resolve_predicate(struct anv_cmd_buffer *cmd_buffer,
enum anv_fast_clear_type fast_clear_supported) enum anv_fast_clear_type fast_clear_supported)
{ {
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value fast_clear_type_mem = struct mi_value fast_clear_type_mem =
mi_mem32(anv_image_get_fast_clear_type_addr(cmd_buffer->device, mi_mem32(anv_image_get_fast_clear_type_addr(cmd_buffer->device,
@ -1140,7 +1140,7 @@ genX(copy_fast_clear_dwords)(struct anv_cmd_buffer *cmd_buffer,
#endif #endif
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
if (copy_from_surface_state) { if (copy_from_surface_state) {
mi_memcpy(&b, entry_addr, ss_clear_addr, copy_size); mi_memcpy(&b, entry_addr, ss_clear_addr, copy_size);
@ -1188,7 +1188,7 @@ transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
bool will_full_fast_clear) bool will_full_fast_clear)
{ {
struct anv_device *device = cmd_buffer->device; struct anv_device *device = cmd_buffer->device;
const struct intel_device_info *devinfo = &device->info; const struct intel_device_info *devinfo = device->info;
/* Validate the inputs. */ /* Validate the inputs. */
assert(cmd_buffer); assert(cmd_buffer);
assert(image && image->vk.aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV); assert(image && image->vk.aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
@ -1671,7 +1671,7 @@ genX(BeginCommandBuffer)(
* ensured that we have the table even if this command buffer doesn't * ensured that we have the table even if this command buffer doesn't
* initialize any images. * initialize any images.
*/ */
if (cmd_buffer->device->info.has_aux_map) { if (cmd_buffer->device->info->has_aux_map) {
anv_add_pending_pipe_bits(cmd_buffer, anv_add_pending_pipe_bits(cmd_buffer,
ANV_PIPE_AUX_TABLE_INVALIDATE_BIT, ANV_PIPE_AUX_TABLE_INVALIDATE_BIT,
"new cmd buffer with aux-tt"); "new cmd buffer with aux-tt");
@ -1869,7 +1869,7 @@ genX(CmdExecuteCommands)(
* regardless of conditional rendering being enabled in primary. * regardless of conditional rendering being enabled in primary.
*/ */
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &primary->device->info, &primary->batch); mi_builder_init(&b, primary->device->info, &primary->batch);
mi_store(&b, mi_reg64(ANV_PREDICATE_RESULT_REG), mi_store(&b, mi_reg64(ANV_PREDICATE_RESULT_REG),
mi_imm(UINT64_MAX)); mi_imm(UINT64_MAX));
} }
@ -2317,7 +2317,7 @@ genX(emit_apply_pipe_flushes)(struct anv_batch *batch,
} }
#if GFX_VER == 12 #if GFX_VER == 12
if ((bits & ANV_PIPE_AUX_TABLE_INVALIDATE_BIT) && device->info.has_aux_map) { if ((bits & ANV_PIPE_AUX_TABLE_INVALIDATE_BIT) && device->info->has_aux_map) {
anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) { anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
lri.RegisterOffset = GENX(GFX_CCS_AUX_INV_num); lri.RegisterOffset = GENX(GFX_CCS_AUX_INV_num);
lri.DataDWord = 1; lri.DataDWord = 1;
@ -2481,7 +2481,7 @@ cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer)
return; return;
const unsigned push_constant_kb = const unsigned push_constant_kb =
cmd_buffer->device->info.max_constant_urb_size_kb; cmd_buffer->device->info->max_constant_urb_size_kb;
const unsigned num_stages = const unsigned num_stages =
util_bitcount(stages & VK_SHADER_STAGE_ALL_GRAPHICS); util_bitcount(stages & VK_SHADER_STAGE_ALL_GRAPHICS);
@ -2519,7 +2519,7 @@ cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer)
* program push constant command(ZERO length) without any commit between * program push constant command(ZERO length) without any commit between
* them. * them.
*/ */
if (intel_device_info_is_dg2(&cmd_buffer->device->info)) { if (intel_device_info_is_dg2(cmd_buffer->device->info)) {
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_ALL), c) { anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_ALL), c) {
c.MOCS = anv_mocs(cmd_buffer->device, NULL, 0); c.MOCS = anv_mocs(cmd_buffer->device, NULL, 0);
} }
@ -3883,7 +3883,7 @@ genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
* 3dstate_so_buffer_index_0/1/2/3 states to ensure so_buffer_index_* * 3dstate_so_buffer_index_0/1/2/3 states to ensure so_buffer_index_*
* state is not combined with other state changes. * state is not combined with other state changes.
*/ */
if (intel_device_info_is_dg2(&cmd_buffer->device->info)) { if (intel_device_info_is_dg2(cmd_buffer->device->info)) {
anv_add_pending_pipe_bits(cmd_buffer, anv_add_pending_pipe_bits(cmd_buffer,
ANV_PIPE_CS_STALL_BIT, ANV_PIPE_CS_STALL_BIT,
"before SO_BUFFER change WA"); "before SO_BUFFER change WA");
@ -3927,7 +3927,7 @@ genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
} }
} }
if (intel_device_info_is_dg2(&cmd_buffer->device->info)) { if (intel_device_info_is_dg2(cmd_buffer->device->info)) {
/* Wa_16011411144: also CS_STALL after touching SO_BUFFER change */ /* Wa_16011411144: also CS_STALL after touching SO_BUFFER change */
anv_add_pending_pipe_bits(cmd_buffer, anv_add_pending_pipe_bits(cmd_buffer,
ANV_PIPE_CS_STALL_BIT, ANV_PIPE_CS_STALL_BIT,
@ -4477,7 +4477,7 @@ void genX(CmdDrawIndirectByteCountEXT)(
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer); genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value count = struct mi_value count =
mi_mem32(anv_address_add(counter_buffer->address, mi_mem32(anv_address_add(counter_buffer->address,
counterBufferOffset)); counterBufferOffset));
@ -4514,7 +4514,7 @@ load_indirect_parameters(struct anv_cmd_buffer *cmd_buffer,
struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
mi_store(&b, mi_reg32(GFX7_3DPRIM_VERTEX_COUNT), mi_store(&b, mi_reg32(GFX7_3DPRIM_VERTEX_COUNT),
mi_mem32(anv_address_add(addr, 0))); mi_mem32(anv_address_add(addr, 0)));
@ -4794,7 +4794,7 @@ void genX(CmdDrawIndirectCount)(
genX(cmd_buffer_flush_state)(cmd_buffer); genX(cmd_buffer_flush_state)(cmd_buffer);
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value max = struct mi_value max =
prepare_for_draw_count_predicate(cmd_buffer, &b, prepare_for_draw_count_predicate(cmd_buffer, &b,
count_buffer, countBufferOffset); count_buffer, countBufferOffset);
@ -4862,7 +4862,7 @@ void genX(CmdDrawIndexedIndirectCount)(
genX(cmd_buffer_flush_state)(cmd_buffer); genX(cmd_buffer_flush_state)(cmd_buffer);
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value max = struct mi_value max =
prepare_for_draw_count_predicate(cmd_buffer, &b, prepare_for_draw_count_predicate(cmd_buffer, &b,
count_buffer, countBufferOffset); count_buffer, countBufferOffset);
@ -5103,7 +5103,7 @@ genX(CmdDrawMeshTasksIndirectNV)(
bool uses_drawid = (task_prog_data && task_prog_data->uses_drawid) || bool uses_drawid = (task_prog_data && task_prog_data->uses_drawid) ||
mesh_prog_data->uses_drawid; mesh_prog_data->uses_drawid;
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
for (uint32_t i = 0; i < drawCount; i++) { for (uint32_t i = 0; i < drawCount; i++) {
struct anv_address draw = anv_address_add(buffer->address, offset); struct anv_address draw = anv_address_add(buffer->address, offset);
@ -5143,7 +5143,7 @@ genX(CmdDrawMeshTasksIndirectCountNV)(
mesh_prog_data->uses_drawid; mesh_prog_data->uses_drawid;
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value max = struct mi_value max =
prepare_for_draw_count_predicate(cmd_buffer, &b, prepare_for_draw_count_predicate(cmd_buffer, &b,
@ -5319,7 +5319,7 @@ emit_compute_walker(struct anv_cmd_buffer *cmd_buffer,
const struct anv_shader_bin *cs_bin = pipeline->cs; const struct anv_shader_bin *cs_bin = pipeline->cs;
bool predicate = cmd_buffer->state.conditional_render_enabled; bool predicate = cmd_buffer->state.conditional_render_enabled;
const struct intel_device_info *devinfo = &pipeline->base.device->info; const struct intel_device_info *devinfo = pipeline->base.device->info;
const struct brw_cs_dispatch_info dispatch = const struct brw_cs_dispatch_info dispatch =
brw_cs_get_dispatch_info(devinfo, prog_data, NULL); brw_cs_get_dispatch_info(devinfo, prog_data, NULL);
@ -5366,7 +5366,7 @@ emit_gpgpu_walker(struct anv_cmd_buffer *cmd_buffer,
bool predicate = (GFX_VER <= 7 && indirect) || bool predicate = (GFX_VER <= 7 && indirect) ||
cmd_buffer->state.conditional_render_enabled; cmd_buffer->state.conditional_render_enabled;
const struct intel_device_info *devinfo = &pipeline->base.device->info; const struct intel_device_info *devinfo = pipeline->base.device->info;
const struct brw_cs_dispatch_info dispatch = const struct brw_cs_dispatch_info dispatch =
brw_cs_get_dispatch_info(devinfo, prog_data, NULL); brw_cs_get_dispatch_info(devinfo, prog_data, NULL);
@ -5504,7 +5504,7 @@ void genX(CmdDispatchIndirect)(
genX(cmd_buffer_flush_compute_state)(cmd_buffer); genX(cmd_buffer_flush_compute_state)(cmd_buffer);
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value size_x = mi_mem32(anv_address_add(addr, 0)); struct mi_value size_x = mi_mem32(anv_address_add(addr, 0));
struct mi_value size_y = mi_mem32(anv_address_add(addr, 4)); struct mi_value size_y = mi_mem32(anv_address_add(addr, 4));
@ -5584,7 +5584,7 @@ genX(cmd_buffer_ray_query_globals)(struct anv_cmd_buffer *cmd_buffer)
uint32_t stack_ids_per_dss = 2048; /* TODO: can we use a lower value in uint32_t stack_ids_per_dss = 2048; /* TODO: can we use a lower value in
* some cases? * some cases?
*/ */
brw_rt_compute_scratch_layout(&layout, &device->info, brw_rt_compute_scratch_layout(&layout, device->info,
stack_ids_per_dss, 1 << 10); stack_ids_per_dss, 1 << 10);
struct GFX_RT_DISPATCH_GLOBALS rtdg = { struct GFX_RT_DISPATCH_GLOBALS rtdg = {
@ -5737,7 +5737,7 @@ cmd_buffer_trace_rays(struct anv_cmd_buffer *cmd_buffer,
local_size_log2[2] = 0; local_size_log2[2] = 0;
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value launch_size[3] = { struct mi_value launch_size[3] = {
mi_mem32(anv_address_from_u64(launch_size_addr + 0)), mi_mem32(anv_address_from_u64(launch_size_addr + 0)),
@ -5883,7 +5883,7 @@ static void
genX(flush_pipeline_select)(struct anv_cmd_buffer *cmd_buffer, genX(flush_pipeline_select)(struct anv_cmd_buffer *cmd_buffer,
uint32_t pipeline) uint32_t pipeline)
{ {
UNUSED const struct intel_device_info *devinfo = &cmd_buffer->device->info; UNUSED const struct intel_device_info *devinfo = cmd_buffer->device->info;
if (cmd_buffer->state.current_pipeline == pipeline) if (cmd_buffer->state.current_pipeline == pipeline)
return; return;
@ -6195,7 +6195,7 @@ genX(cmd_buffer_emit_hashing_mode)(struct anv_cmd_buffer *cmd_buffer,
unsigned scale) unsigned scale)
{ {
#if GFX_VER == 9 #if GFX_VER == 9
const struct intel_device_info *devinfo = &cmd_buffer->device->info; const struct intel_device_info *devinfo = cmd_buffer->device->info;
const unsigned slice_hashing[] = { const unsigned slice_hashing[] = {
/* Because all Gfx9 platforms with more than one slice require /* Because all Gfx9 platforms with more than one slice require
* three-way subslice hashing, a single "normal" 16x16 slice hashing * three-way subslice hashing, a single "normal" 16x16 slice hashing
@ -6501,7 +6501,7 @@ void genX(CmdBeginRendering)(
gfx->samples |= iview->vk.image->samples; gfx->samples |= iview->vk.image->samples;
enum isl_aux_usage aux_usage = enum isl_aux_usage aux_usage =
anv_layout_to_aux_usage(&cmd_buffer->device->info, anv_layout_to_aux_usage(cmd_buffer->device->info,
iview->image, iview->image,
VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
@ -6701,7 +6701,7 @@ void genX(CmdBeginRendering)(
initial_depth_layout = attachment_initial_layout(d_att); initial_depth_layout = attachment_initial_layout(d_att);
depth_layout = d_att->imageLayout; depth_layout = d_att->imageLayout;
depth_aux_usage = depth_aux_usage =
anv_layout_to_aux_usage(&cmd_buffer->device->info, anv_layout_to_aux_usage(cmd_buffer->device->info,
d_iview->image, d_iview->image,
VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_ASPECT_DEPTH_BIT,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
@ -6714,7 +6714,7 @@ void genX(CmdBeginRendering)(
initial_stencil_layout = attachment_initial_layout(s_att); initial_stencil_layout = attachment_initial_layout(s_att);
stencil_layout = s_att->imageLayout; stencil_layout = s_att->imageLayout;
stencil_aux_usage = stencil_aux_usage =
anv_layout_to_aux_usage(&cmd_buffer->device->info, anv_layout_to_aux_usage(cmd_buffer->device->info,
s_iview->image, s_iview->image,
VK_IMAGE_ASPECT_STENCIL_BIT, VK_IMAGE_ASPECT_STENCIL_BIT,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
@ -6998,13 +6998,13 @@ cmd_buffer_resolve_msaa_attachment(struct anv_cmd_buffer *cmd_buffer,
const struct anv_image_view *dst_iview = att->resolve_iview; const struct anv_image_view *dst_iview = att->resolve_iview;
enum isl_aux_usage src_aux_usage = enum isl_aux_usage src_aux_usage =
anv_layout_to_aux_usage(&cmd_buffer->device->info, anv_layout_to_aux_usage(cmd_buffer->device->info,
src_iview->image, aspect, src_iview->image, aspect,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
layout); layout);
enum isl_aux_usage dst_aux_usage = enum isl_aux_usage dst_aux_usage =
anv_layout_to_aux_usage(&cmd_buffer->device->info, anv_layout_to_aux_usage(cmd_buffer->device->info,
dst_iview->image, aspect, dst_iview->image, aspect,
VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_USAGE_TRANSFER_DST_BIT,
att->resolve_layout); att->resolve_layout);
@ -7200,7 +7200,7 @@ genX(cmd_emit_conditional_render_predicate)(struct anv_cmd_buffer *cmd_buffer)
{ {
#if GFX_VERx10 >= 75 #if GFX_VERx10 >= 75
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
mi_store(&b, mi_reg64(MI_PREDICATE_SRC0), mi_store(&b, mi_reg64(MI_PREDICATE_SRC0),
mi_reg32(ANV_PREDICATE_RESULT_REG)); mi_reg32(ANV_PREDICATE_RESULT_REG));
@ -7233,7 +7233,7 @@ void genX(CmdBeginConditionalRenderingEXT)(
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer); genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
/* Section 19.4 of the Vulkan 1.1.85 spec says: /* Section 19.4 of the Vulkan 1.1.85 spec says:
* *
@ -7482,7 +7482,7 @@ void genX(cmd_emit_timestamp)(struct anv_batch *batch,
} }
} else { } else {
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &device->info, batch); mi_builder_init(&b, device->info, batch);
mi_store(&b, mi_mem64(addr), mi_reg64(TIMESTAMP)); mi_store(&b, mi_mem64(addr), mi_reg64(TIMESTAMP));
} }
} }

View file

@ -244,7 +244,7 @@ genX(emit_so_memcpy_init)(struct anv_memcpy_state *state,
state->batch = batch; state->batch = batch;
state->device = device; state->device = device;
const struct intel_l3_config *cfg = intel_get_default_l3_config(&device->info); const struct intel_l3_config *cfg = intel_get_default_l3_config(device->info);
genX(emit_l3_config)(batch, device, cfg); genX(emit_l3_config)(batch, device, cfg);
anv_batch_emit(batch, GENX(PIPELINE_SELECT), ps) { anv_batch_emit(batch, GENX(PIPELINE_SELECT), ps) {
@ -299,7 +299,7 @@ genX(cmd_buffer_so_memcpy)(struct anv_cmd_buffer *cmd_buffer,
if (!cmd_buffer->state.current_l3_config) { if (!cmd_buffer->state.current_l3_config) {
const struct intel_l3_config *cfg = const struct intel_l3_config *cfg =
intel_get_default_l3_config(&cmd_buffer->device->info); intel_get_default_l3_config(cmd_buffer->device->info);
genX(cmd_buffer_config_l3)(cmd_buffer, cfg); genX(cmd_buffer_config_l3)(cmd_buffer, cfg);
} }

View file

@ -148,7 +148,7 @@ emit_vertex_input(struct anv_graphics_pipeline *pipeline,
} }
u_foreach_bit(a, vi->attributes_valid) { u_foreach_bit(a, vi->attributes_valid) {
enum isl_format format = anv_get_isl_format(&pipeline->base.device->info, enum isl_format format = anv_get_isl_format(pipeline->base.device->info,
vi->attributes[a].format, vi->attributes[a].format,
VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_TILING_LINEAR); VK_IMAGE_TILING_LINEAR);
@ -273,7 +273,7 @@ genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch,
const unsigned entry_size[4], const unsigned entry_size[4],
enum intel_urb_deref_block_size *deref_block_size) enum intel_urb_deref_block_size *deref_block_size)
{ {
const struct intel_device_info *devinfo = &device->info; const struct intel_device_info *devinfo = device->info;
unsigned entries[4]; unsigned entries[4];
unsigned start[4]; unsigned start[4];
@ -322,7 +322,7 @@ static void
emit_urb_setup_mesh(struct anv_graphics_pipeline *pipeline, emit_urb_setup_mesh(struct anv_graphics_pipeline *pipeline,
enum intel_urb_deref_block_size *deref_block_size) enum intel_urb_deref_block_size *deref_block_size)
{ {
const struct intel_device_info *devinfo = &pipeline->base.device->info; const struct intel_device_info *devinfo = pipeline->base.device->info;
const struct brw_task_prog_data *task_prog_data = const struct brw_task_prog_data *task_prog_data =
anv_pipeline_has_stage(pipeline, MESA_SHADER_TASK) ? anv_pipeline_has_stage(pipeline, MESA_SHADER_TASK) ?
@ -841,7 +841,7 @@ emit_rs_state(struct anv_graphics_pipeline *pipeline,
rp->depth_attachment_format != VK_FORMAT_UNDEFINED) { rp->depth_attachment_format != VK_FORMAT_UNDEFINED) {
assert(vk_format_has_depth(rp->depth_attachment_format)); assert(vk_format_has_depth(rp->depth_attachment_format));
enum isl_format isl_format = enum isl_format isl_format =
anv_get_isl_format(&pipeline->base.device->info, anv_get_isl_format(pipeline->base.device->info,
rp->depth_attachment_format, rp->depth_attachment_format,
VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_ASPECT_DEPTH_BIT,
VK_IMAGE_TILING_OPTIMAL); VK_IMAGE_TILING_OPTIMAL);
@ -1018,7 +1018,7 @@ emit_cb_state(struct anv_graphics_pipeline *pipeline,
surface_count = map->surface_count; surface_count = map->surface_count;
} }
const struct intel_device_info *devinfo = &pipeline->base.device->info; const struct intel_device_info *devinfo = pipeline->base.device->info;
uint32_t *blend_state_start = devinfo->ver >= 8 ? uint32_t *blend_state_start = devinfo->ver >= 8 ?
pipeline->gfx8.blend_state : pipeline->gfx7.blend_state; pipeline->gfx8.blend_state : pipeline->gfx7.blend_state;
uint32_t *state_pos = blend_state_start; uint32_t *state_pos = blend_state_start;
@ -1363,7 +1363,7 @@ emit_3dstate_streamout(struct anv_graphics_pipeline *pipeline,
* 2. Send SO_DECL NP state * 2. Send SO_DECL NP state
* 3. Send 3D State SOL with SOL Enabled * 3. Send 3D State SOL with SOL Enabled
*/ */
if (intel_device_info_is_dg2(&pipeline->base.device->info)) if (intel_device_info_is_dg2(pipeline->base.device->info))
anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_STREAMOUT), so); anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_STREAMOUT), so);
uint32_t *dw = anv_batch_emitn(&pipeline->base.batch, 3 + 2 * max_decls, uint32_t *dw = anv_batch_emitn(&pipeline->base.batch, 3 + 2 * max_decls,
@ -1514,7 +1514,7 @@ get_scratch_surf(struct anv_pipeline *pipeline,
static void static void
emit_3dstate_vs(struct anv_graphics_pipeline *pipeline) emit_3dstate_vs(struct anv_graphics_pipeline *pipeline)
{ {
const struct intel_device_info *devinfo = &pipeline->base.device->info; const struct intel_device_info *devinfo = pipeline->base.device->info;
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
const struct anv_shader_bin *vs_bin = const struct anv_shader_bin *vs_bin =
pipeline->shaders[MESA_SHADER_VERTEX]; pipeline->shaders[MESA_SHADER_VERTEX];
@ -1603,7 +1603,7 @@ emit_3dstate_hs_te_ds(struct anv_graphics_pipeline *pipeline,
return; return;
} }
const struct intel_device_info *devinfo = &pipeline->base.device->info; const struct intel_device_info *devinfo = pipeline->base.device->info;
const struct anv_shader_bin *tcs_bin = const struct anv_shader_bin *tcs_bin =
pipeline->shaders[MESA_SHADER_TESS_CTRL]; pipeline->shaders[MESA_SHADER_TESS_CTRL];
const struct anv_shader_bin *tes_bin = const struct anv_shader_bin *tes_bin =
@ -1748,7 +1748,7 @@ emit_3dstate_hs_te_ds(struct anv_graphics_pipeline *pipeline,
static void static void
emit_3dstate_gs(struct anv_graphics_pipeline *pipeline) emit_3dstate_gs(struct anv_graphics_pipeline *pipeline)
{ {
const struct intel_device_info *devinfo = &pipeline->base.device->info; const struct intel_device_info *devinfo = pipeline->base.device->info;
const struct anv_shader_bin *gs_bin = const struct anv_shader_bin *gs_bin =
pipeline->shaders[MESA_SHADER_GEOMETRY]; pipeline->shaders[MESA_SHADER_GEOMETRY];
@ -1905,7 +1905,7 @@ emit_3dstate_wm(struct anv_graphics_pipeline *pipeline,
wm.LineStippleEnable = rs->line.stipple.enable; wm.LineStippleEnable = rs->line.stipple.enable;
} }
const struct intel_device_info *devinfo = &pipeline->base.device->info; const struct intel_device_info *devinfo = pipeline->base.device->info;
uint32_t *dws = devinfo->ver >= 8 ? pipeline->gfx8.wm : pipeline->gfx7.wm; uint32_t *dws = devinfo->ver >= 8 ? pipeline->gfx8.wm : pipeline->gfx7.wm;
GENX(3DSTATE_WM_pack)(NULL, dws, &wm); GENX(3DSTATE_WM_pack)(NULL, dws, &wm);
} }
@ -1916,7 +1916,7 @@ emit_3dstate_ps(struct anv_graphics_pipeline *pipeline,
const struct vk_color_blend_state *cb) const struct vk_color_blend_state *cb)
{ {
UNUSED const struct intel_device_info *devinfo = UNUSED const struct intel_device_info *devinfo =
&pipeline->base.device->info; pipeline->base.device->info;
const struct anv_shader_bin *fs_bin = const struct anv_shader_bin *fs_bin =
pipeline->shaders[MESA_SHADER_FRAGMENT]; pipeline->shaders[MESA_SHADER_FRAGMENT];
@ -2182,7 +2182,7 @@ emit_task_state(struct anv_graphics_pipeline *pipeline)
get_scratch_surf(&pipeline->base, MESA_SHADER_TASK, task_bin); get_scratch_surf(&pipeline->base, MESA_SHADER_TASK, task_bin);
} }
const struct intel_device_info *devinfo = &pipeline->base.device->info; const struct intel_device_info *devinfo = pipeline->base.device->info;
const struct brw_task_prog_data *task_prog_data = get_task_prog_data(pipeline); const struct brw_task_prog_data *task_prog_data = get_task_prog_data(pipeline);
const struct brw_cs_dispatch_info task_dispatch = const struct brw_cs_dispatch_info task_dispatch =
brw_cs_get_dispatch_info(devinfo, &task_prog_data->base, NULL); brw_cs_get_dispatch_info(devinfo, &task_prog_data->base, NULL);
@ -2235,7 +2235,7 @@ emit_mesh_state(struct anv_graphics_pipeline *pipeline)
/* TODO(mesh): MaximumNumberofThreadGroups. */ /* TODO(mesh): MaximumNumberofThreadGroups. */
} }
const struct intel_device_info *devinfo = &pipeline->base.device->info; const struct intel_device_info *devinfo = pipeline->base.device->info;
const struct brw_mesh_prog_data *mesh_prog_data = get_mesh_prog_data(pipeline); const struct brw_mesh_prog_data *mesh_prog_data = get_mesh_prog_data(pipeline);
const struct brw_cs_dispatch_info mesh_dispatch = const struct brw_cs_dispatch_info mesh_dispatch =
brw_cs_get_dispatch_info(devinfo, &mesh_prog_data->base, NULL); brw_cs_get_dispatch_info(devinfo, &mesh_prog_data->base, NULL);
@ -2328,7 +2328,7 @@ genX(graphics_pipeline_emit)(struct anv_graphics_pipeline *pipeline,
* whole fixed function pipeline" means to emit a PIPE_CONTROL with the "CS * whole fixed function pipeline" means to emit a PIPE_CONTROL with the "CS
* Stall" bit set. * Stall" bit set.
*/ */
if (device->info.platform == INTEL_PLATFORM_IVB) if (device->info->platform == INTEL_PLATFORM_IVB)
gfx7_emit_vs_workaround_flush(brw); gfx7_emit_vs_workaround_flush(brw);
#endif #endif
@ -2384,7 +2384,7 @@ genX(compute_pipeline_emit)(struct anv_compute_pipeline *pipeline)
anv_pipeline_setup_l3_config(&pipeline->base, cs_prog_data->base.total_shared > 0); anv_pipeline_setup_l3_config(&pipeline->base, cs_prog_data->base.total_shared > 0);
const UNUSED struct anv_shader_bin *cs_bin = pipeline->cs; const UNUSED struct anv_shader_bin *cs_bin = pipeline->cs;
const struct intel_device_info *devinfo = &device->info; const struct intel_device_info *devinfo = device->info;
anv_batch_emit(&pipeline->base.batch, GENX(CFE_STATE), cfe) { anv_batch_emit(&pipeline->base.batch, GENX(CFE_STATE), cfe) {
cfe.MaximumNumberofThreads = cfe.MaximumNumberofThreads =
@ -2400,7 +2400,7 @@ void
genX(compute_pipeline_emit)(struct anv_compute_pipeline *pipeline) genX(compute_pipeline_emit)(struct anv_compute_pipeline *pipeline)
{ {
struct anv_device *device = pipeline->base.device; struct anv_device *device = pipeline->base.device;
const struct intel_device_info *devinfo = &device->info; const struct intel_device_info *devinfo = device->info;
const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline); const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
anv_pipeline_setup_l3_config(&pipeline->base, cs_prog_data->base.total_shared > 0); anv_pipeline_setup_l3_config(&pipeline->base, cs_prog_data->base.total_shared > 0);

View file

@ -219,7 +219,7 @@ VkResult genX(CreateQueryPool)(
}; };
batch.next = batch.start; batch.next = batch.start;
mi_builder_init(&b, &device->info, &batch); mi_builder_init(&b, device->info, &batch);
mi_store(&b, mi_reg64(ANV_PERF_QUERY_OFFSET_REG), mi_store(&b, mi_reg64(ANV_PERF_QUERY_OFFSET_REG),
mi_imm(p * (uint64_t)pool->pass_size)); mi_imm(p * (uint64_t)pool->pass_size));
anv_batch_emit(&batch, GENX(MI_BATCH_BUFFER_END), bbe); anv_batch_emit(&batch, GENX(MI_BATCH_BUFFER_END), bbe);
@ -522,7 +522,7 @@ VkResult genX(GetQueryPoolResults)(
uint64_t result = slot[idx * 2 + 2] - slot[idx * 2 + 1]; uint64_t result = slot[idx * 2 + 2] - slot[idx * 2 + 1];
/* WaDividePSInvocationCountBy4:HSW,BDW */ /* WaDividePSInvocationCountBy4:HSW,BDW */
if ((device->info.ver == 8 || device->info.verx10 == 75) && if ((device->info->ver == 8 || device->info->verx10 == 75) &&
(1 << stat) == VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT) (1 << stat) == VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT)
result >>= 2; result >>= 2;
@ -584,10 +584,10 @@ VkResult genX(GetQueryPoolResults)(
query_data + intel_perf_query_data_offset(pool, true), query_data + intel_perf_query_data_offset(pool, true),
false /* no_oa_accumulate */); false /* no_oa_accumulate */);
intel_perf_query_result_write_mdapi(pData, stride, intel_perf_query_result_write_mdapi(pData, stride,
&device->info, device->info,
query, &result); query, &result);
const uint64_t *marker = query_data + intel_perf_marker_offset(); const uint64_t *marker = query_data + intel_perf_marker_offset();
intel_perf_query_mdapi_write_marker(pData, stride, &device->info, *marker); intel_perf_query_mdapi_write_marker(pData, stride, device->info, *marker);
break; break;
} }
@ -622,7 +622,7 @@ emit_ps_depth_count(struct anv_cmd_buffer *cmd_buffer,
pc.DepthStallEnable = true; pc.DepthStallEnable = true;
pc.Address = addr; pc.Address = addr;
if (GFX_VER == 9 && cmd_buffer->device->info.gt == 4) if (GFX_VER == 9 && cmd_buffer->device->info->gt == 4)
pc.CommandStreamerStallEnable = true; pc.CommandStreamerStallEnable = true;
} }
} }
@ -760,7 +760,7 @@ void genX(CmdResetQueryPool)(
case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT: { case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT: {
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
for (uint32_t i = 0; i < queryCount; i++) for (uint32_t i = 0; i < queryCount; i++)
emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false); emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false);
@ -770,7 +770,7 @@ void genX(CmdResetQueryPool)(
#if GFX_VER >= 8 #if GFX_VER >= 8
case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: { case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
for (uint32_t i = 0; i < queryCount; i++) { for (uint32_t i = 0; i < queryCount; i++) {
for (uint32_t p = 0; p < pool->n_passes; p++) { for (uint32_t p = 0; p < pool->n_passes; p++) {
@ -786,7 +786,7 @@ void genX(CmdResetQueryPool)(
case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: { case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
for (uint32_t i = 0; i < queryCount; i++) for (uint32_t i = 0; i < queryCount; i++)
emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false); emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false);
@ -925,7 +925,7 @@ void genX(CmdBeginQueryIndexedEXT)(
struct anv_address query_addr = anv_query_address(pool, query); struct anv_address query_addr = anv_query_address(pool, query);
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
switch (pool->type) { switch (pool->type) {
case VK_QUERY_TYPE_OCCLUSION: case VK_QUERY_TYPE_OCCLUSION:
@ -1112,7 +1112,7 @@ void genX(CmdEndQueryIndexedEXT)(
struct anv_address query_addr = anv_query_address(pool, query); struct anv_address query_addr = anv_query_address(pool, query);
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
switch (pool->type) { switch (pool->type) {
case VK_QUERY_TYPE_OCCLUSION: case VK_QUERY_TYPE_OCCLUSION:
@ -1291,7 +1291,7 @@ void genX(CmdWriteTimestamp2)(
assert(pool->type == VK_QUERY_TYPE_TIMESTAMP); assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
if (stage == VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT) { if (stage == VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT) {
mi_store(&b, mi_mem64(anv_address_add(query_addr, 8)), mi_store(&b, mi_mem64(anv_address_add(query_addr, 8)),
@ -1307,7 +1307,7 @@ void genX(CmdWriteTimestamp2)(
pc.PostSyncOperation = WriteTimestamp; pc.PostSyncOperation = WriteTimestamp;
pc.Address = anv_address_add(query_addr, 8); pc.Address = anv_address_add(query_addr, 8);
if (GFX_VER == 9 && cmd_buffer->device->info.gt == 4) if (GFX_VER == 9 && cmd_buffer->device->info->gt == 4)
pc.CommandStreamerStallEnable = true; pc.CommandStreamerStallEnable = true;
} }
emit_query_pc_availability(cmd_buffer, query_addr, true); emit_query_pc_availability(cmd_buffer, query_addr, true);
@ -1405,7 +1405,7 @@ void genX(CmdCopyQueryPoolResults)(
ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer); ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
struct mi_builder b; struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch); mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value result; struct mi_value result;
/* If render target writes are ongoing, request a render target cache flush /* If render target writes are ongoing, request a render target cache flush
@ -1471,8 +1471,8 @@ void genX(CmdCopyQueryPoolResults)(
idx * 16 + 8)); idx * 16 + 8));
/* WaDividePSInvocationCountBy4:HSW,BDW */ /* WaDividePSInvocationCountBy4:HSW,BDW */
if ((cmd_buffer->device->info.ver == 8 || if ((cmd_buffer->device->info->ver == 8 ||
cmd_buffer->device->info.verx10 == 75) && cmd_buffer->device->info->verx10 == 75) &&
(1 << stat) == VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT) { (1 << stat) == VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT) {
result = mi_ushr32_imm(&b, result, 2); result = mi_ushr32_imm(&b, result, 2);
} }

View file

@ -44,10 +44,10 @@ genX(emit_slice_hashing_state)(struct anv_device *device,
{ {
#if GFX_VER == 11 #if GFX_VER == 11
/* Gfx11 hardware has two pixel pipes at most. */ /* Gfx11 hardware has two pixel pipes at most. */
for (unsigned i = 2; i < ARRAY_SIZE(device->info.ppipe_subslices); i++) for (unsigned i = 2; i < ARRAY_SIZE(device->info->ppipe_subslices); i++)
assert(device->info.ppipe_subslices[i] == 0); assert(device->info->ppipe_subslices[i] == 0);
if (device->info.ppipe_subslices[0] == device->info.ppipe_subslices[1]) if (device->info->ppipe_subslices[0] == device->info->ppipe_subslices[1])
return; return;
if (!device->slice_hash.alloc_size) { if (!device->slice_hash.alloc_size) {
@ -55,8 +55,8 @@ genX(emit_slice_hashing_state)(struct anv_device *device,
device->slice_hash = device->slice_hash =
anv_state_pool_alloc(&device->dynamic_state_pool, size, 64); anv_state_pool_alloc(&device->dynamic_state_pool, size, 64);
const bool flip = device->info.ppipe_subslices[0] < const bool flip = device->info->ppipe_subslices[0] <
device->info.ppipe_subslices[1]; device->info->ppipe_subslices[1];
struct GENX(SLICE_HASH_TABLE) table; struct GENX(SLICE_HASH_TABLE) table;
intel_compute_pixel_hash_table_3way(16, 16, 3, 3, flip, table.Entry[0]); intel_compute_pixel_hash_table_3way(16, 16, 3, 3, flip, table.Entry[0]);
@ -79,12 +79,12 @@ genX(emit_slice_hashing_state)(struct anv_device *device,
for (unsigned n = 0; n < ARRAY_SIZE(ppipes_of); n++) { for (unsigned n = 0; n < ARRAY_SIZE(ppipes_of); n++) {
for (unsigned p = 0; p < 3; p++) for (unsigned p = 0; p < 3; p++)
ppipes_of[n] += (device->info.ppipe_subslices[p] == n); ppipes_of[n] += (device->info->ppipe_subslices[p] == n);
} }
/* Gfx12 has three pixel pipes. */ /* Gfx12 has three pixel pipes. */
for (unsigned p = 3; p < ARRAY_SIZE(device->info.ppipe_subslices); p++) for (unsigned p = 3; p < ARRAY_SIZE(device->info->ppipe_subslices); p++)
assert(device->info.ppipe_subslices[p] == 0); assert(device->info->ppipe_subslices[p] == 0);
if (ppipes_of[2] == 3 || ppipes_of[0] == 2) { if (ppipes_of[2] == 3 || ppipes_of[0] == 2) {
/* All three pixel pipes have the maximum number of active dual /* All three pixel pipes have the maximum number of active dual
@ -117,8 +117,8 @@ genX(emit_slice_hashing_state)(struct anv_device *device,
} }
#elif GFX_VERx10 == 125 #elif GFX_VERx10 == 125
uint32_t ppipe_mask = 0; uint32_t ppipe_mask = 0;
for (unsigned p = 0; p < ARRAY_SIZE(device->info.ppipe_subslices); p++) { for (unsigned p = 0; p < ARRAY_SIZE(device->info->ppipe_subslices); p++) {
if (device->info.ppipe_subslices[p]) if (device->info->ppipe_subslices[p])
ppipe_mask |= (1u << p); ppipe_mask |= (1u << p);
} }
assert(ppipe_mask); assert(ppipe_mask);
@ -168,7 +168,7 @@ init_common_queue_state(struct anv_queue *queue, struct anv_batch *batch)
/* Starting with GFX version 11, SLM is no longer part of the L3$ config /* Starting with GFX version 11, SLM is no longer part of the L3$ config
* so it never changes throughout the lifetime of the VkDevice. * so it never changes throughout the lifetime of the VkDevice.
*/ */
const struct intel_l3_config *cfg = intel_get_default_l3_config(&device->info); const struct intel_l3_config *cfg = intel_get_default_l3_config(device->info);
genX(emit_l3_config)(batch, device, cfg); genX(emit_l3_config)(batch, device, cfg);
device->l3_config = cfg; device->l3_config = cfg;
#endif #endif
@ -319,7 +319,7 @@ init_render_queue_state(struct anv_queue *queue)
/* hardware specification recommends disabling repacking for /* hardware specification recommends disabling repacking for
* the compatibility with decompression mechanism in display controller. * the compatibility with decompression mechanism in display controller.
*/ */
if (device->info.disable_ccs_repack) { if (device->info->disable_ccs_repack) {
anv_batch_write_reg(&batch, GENX(CACHE_MODE_0), cm0) { anv_batch_write_reg(&batch, GENX(CACHE_MODE_0), cm0) {
cm0.DisableRepackingforCompression = true; cm0.DisableRepackingforCompression = true;
cm0.DisableRepackingforCompressionMask = true; cm0.DisableRepackingforCompressionMask = true;
@ -368,7 +368,7 @@ init_render_queue_state(struct anv_queue *queue)
#endif #endif
#if GFX_VER == 12 #if GFX_VER == 12
if (device->info.has_aux_map) { if (device->info->has_aux_map) {
uint64_t aux_base_addr = intel_aux_map_get_base(device->aux_map_ctx); uint64_t aux_base_addr = intel_aux_map_get_base(device->aux_map_ctx);
assert(aux_base_addr % (32 * 1024) == 0); assert(aux_base_addr % (32 * 1024) == 0);
anv_batch_emit(&batch, GENX(MI_LOAD_REGISTER_IMM), lri) { anv_batch_emit(&batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
@ -440,9 +440,9 @@ init_compute_queue_state(struct anv_queue *queue)
} }
void void
genX(init_physical_device_state)(ASSERTED struct anv_physical_device *device) genX(init_physical_device_state)(ASSERTED struct anv_physical_device *pdevice)
{ {
assert(device->info.verx10 == GFX_VERx10); assert(pdevice->info.verx10 == GFX_VERx10);
} }
VkResult VkResult
@ -581,7 +581,7 @@ genX(emit_l3_config)(struct anv_batch *batch,
const struct anv_device *device, const struct anv_device *device,
const struct intel_l3_config *cfg) const struct intel_l3_config *cfg)
{ {
UNUSED const struct intel_device_info *devinfo = &device->info; UNUSED const struct intel_device_info *devinfo = device->info;
#if GFX_VER >= 8 #if GFX_VER >= 8

View file

@ -332,7 +332,7 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
GENX(3DSTATE_SF_header), GENX(3DSTATE_SF_header),
}; };
#if GFX_VER == 8 #if GFX_VER == 8
if (cmd_buffer->device->info.platform == INTEL_PLATFORM_CHV) { if (cmd_buffer->device->info->platform == INTEL_PLATFORM_CHV) {
sf.CHVLineWidth = dyn->rs.line.width; sf.CHVLineWidth = dyn->rs.line.width;
} else { } else {
sf.LineWidth = dyn->rs.line.width; sf.LineWidth = dyn->rs.line.width;
@ -592,7 +592,7 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
RR_FREE; RR_FREE;
vfg.DistributionGranularity = BatchLevelGranularity; vfg.DistributionGranularity = BatchLevelGranularity;
/* Wa_14014890652 */ /* Wa_14014890652 */
if (intel_device_info_is_dg2(&cmd_buffer->device->info)) if (intel_device_info_is_dg2(cmd_buffer->device->info))
vfg.GranularityThresholdDisable = 1; vfg.GranularityThresholdDisable = 1;
vfg.ListCutIndexEnable = dyn->ia.primitive_restart_enable; vfg.ListCutIndexEnable = dyn->ia.primitive_restart_enable;
/* 192 vertices for TRILIST_ADJ */ /* 192 vertices for TRILIST_ADJ */