anv: Do not duplicate intel_device_info memory in each logical device

Each logical device can point to its physical device intel_device_info
saving at least one intel_device_info.

This also allow us to set 'const' to avoid values in intel_device_info
being changed by mistake.

Acked-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Acked-by: Jordan Justen <jordan.l.justen@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17897>
This commit is contained in:
José Roberto de Souza 2022-08-04 12:56:17 -07:00 committed by Marge Bot
parent 9474ff72dd
commit 356a60bd6c
25 changed files with 216 additions and 216 deletions

View file

@ -568,7 +568,7 @@ buffered_event_count(struct intel_measure_device *device)
static void
print_combined_results(struct intel_measure_device *measure_device,
int result_count,
struct intel_device_info *info)
const struct intel_device_info *info)
{
if (result_count == 0)
return;
@ -619,7 +619,7 @@ print_combined_results(struct intel_measure_device *measure_device,
*/
static void
intel_measure_print(struct intel_measure_device *device,
struct intel_device_info *info)
const struct intel_device_info *info)
{
while (true) {
const int events_to_combine = buffered_event_count(device);
@ -635,7 +635,7 @@ intel_measure_print(struct intel_measure_device *device,
*/
void
intel_measure_gather(struct intel_measure_device *measure_device,
struct intel_device_info *info)
const struct intel_device_info *info)
{
pthread_mutex_lock(&measure_device->mutex);

View file

@ -164,6 +164,6 @@ bool intel_measure_ready(struct intel_measure_batch *batch);
struct intel_device_info;
void intel_measure_gather(struct intel_measure_device *device,
struct intel_device_info *info);
const struct intel_device_info *info);
#endif /* INTEL_MEASURE_H */

View file

@ -516,7 +516,7 @@ intel_driver_ds_init(void)
void
intel_ds_device_init(struct intel_ds_device *device,
struct intel_device_info *devinfo,
const struct intel_device_info *devinfo,
int drm_fd,
uint32_t gpu_id,
enum intel_ds_api api)

View file

@ -151,7 +151,7 @@ struct intel_ds_flush_data {
void intel_driver_ds_init(void);
void intel_ds_device_init(struct intel_ds_device *device,
struct intel_device_info *devinfo,
const struct intel_device_info *devinfo,
int drm_fd,
uint32_t gpu_id,
enum intel_ds_api api);

View file

@ -374,7 +374,7 @@ anv_block_pool_init(struct anv_block_pool *pool,
{
VkResult result;
if (device->info.verx10 >= 125) {
if (device->info->verx10 >= 125) {
/* Make sure VMA addresses are 2MiB aligned for the block pool */
assert(anv_is_aligned(start_address, 2 * 1024 * 1024));
assert(anv_is_aligned(initial_size, 2 * 1024 * 1024));
@ -845,7 +845,7 @@ anv_state_pool_init(struct anv_state_pool *pool,
assert(start_offset < INT32_MAX - (int32_t)BLOCK_POOL_MEMFD_SIZE);
uint32_t initial_size = block_size * 16;
if (device->info.verx10 >= 125)
if (device->info->verx10 >= 125)
initial_size = MAX2(initial_size, 2 * 1024 * 1024);
VkResult result = anv_block_pool_init(&pool->block_pool, device, name,
@ -1455,7 +1455,7 @@ anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool,
assert(stage < ARRAY_SIZE(pool->bos));
const struct intel_device_info *devinfo = &device->info;
const struct intel_device_info *devinfo = device->info;
/* On GFX version 12.5, scratch access changed to a surface-based model.
* Instead of each shader type having its own layout based on IDs passed
@ -1635,13 +1635,13 @@ anv_bo_vma_alloc_or_close(struct anv_device *device,
uint32_t align = 4096;
/* Gen12 CCS surface addresses need to be 64K aligned. */
if (device->info.ver >= 12 && (alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS))
if (device->info->ver >= 12 && (alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS))
align = 64 * 1024;
/* For XeHP, lmem and smem cannot share a single PDE, which means they
* can't live in the same 2MiB aligned region.
*/
if (device->info.verx10 >= 125)
if (device->info->verx10 >= 125)
align = 2 * 1024 * 1024;
if (alloc_flags & ANV_BO_ALLOC_FIXED_ADDRESS) {
@ -1683,7 +1683,7 @@ anv_device_alloc_bo(struct anv_device *device,
size = align_u64(size, 4096);
uint64_t ccs_size = 0;
if (device->info.has_aux_map && (alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS)) {
if (device->info->has_aux_map && (alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS)) {
/* Align the size up to the next multiple of 64K so we don't have any
* AUX-TT entries pointing from a 64K page to itself.
*/
@ -1737,7 +1737,7 @@ anv_device_alloc_bo(struct anv_device *device,
.is_external = (alloc_flags & ANV_BO_ALLOC_EXTERNAL),
.has_client_visible_address =
(alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
.has_implicit_ccs = ccs_size > 0 || (device->info.verx10 >= 125 &&
.has_implicit_ccs = ccs_size > 0 || (device->info->verx10 >= 125 &&
(alloc_flags & ANV_BO_ALLOC_LOCAL_MEM)),
};
@ -1764,7 +1764,7 @@ anv_device_alloc_bo(struct anv_device *device,
* I915_CACHING_CACHED, which on non-LLC means snooped so there's no
* need to do this there.
*/
if (!device->info.has_llc) {
if (!device->info->has_llc) {
anv_gem_set_caching(device, new_bo.gem_handle,
I915_CACHING_CACHED);
}
@ -1781,7 +1781,7 @@ anv_device_alloc_bo(struct anv_device *device,
}
if (new_bo._ccs_size > 0) {
assert(device->info.has_aux_map);
assert(device->info->has_aux_map);
intel_aux_map_add_mapping(device->aux_map_ctx,
intel_canonical_address(new_bo.offset),
intel_canonical_address(new_bo.offset + new_bo.size),
@ -1846,7 +1846,7 @@ anv_device_import_bo_from_host_ptr(struct anv_device *device,
ANV_BO_ALLOC_FIXED_ADDRESS)));
assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS) ||
(device->physical->has_implicit_ccs && device->info.has_aux_map));
(device->physical->has_implicit_ccs && device->info->has_aux_map));
struct anv_bo_cache *cache = &device->bo_cache;
const uint32_t bo_flags =
@ -1936,7 +1936,7 @@ anv_device_import_bo(struct anv_device *device,
ANV_BO_ALLOC_FIXED_ADDRESS)));
assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS) ||
(device->physical->has_implicit_ccs && device->info.has_aux_map));
(device->physical->has_implicit_ccs && device->info->has_aux_map));
struct anv_bo_cache *cache = &device->bo_cache;
const uint32_t bo_flags =
@ -2153,7 +2153,7 @@ anv_device_release_bo(struct anv_device *device,
if (bo->_ccs_size > 0) {
assert(device->physical->has_implicit_ccs);
assert(device->info.has_aux_map);
assert(device->info->has_aux_map);
assert(bo->has_implicit_ccs);
intel_aux_map_unmap_range(device->aux_map_ctx,
intel_canonical_address(bo->offset),

View file

@ -214,7 +214,7 @@ get_ahw_buffer_format_properties2(
tiling = VK_IMAGE_TILING_LINEAR;
p->formatFeatures =
anv_get_image_format_features2(&device->info, p->format, anv_format,
anv_get_image_format_features2(device->info, p->format, anv_format,
tiling, NULL);
/* "Images can be created with an external format even if the Android hardware
@ -530,7 +530,7 @@ anv_image_init_from_gralloc(struct anv_device *device,
}
anv_info.isl_tiling_flags = 1u << tiling;
enum isl_format format = anv_get_isl_format(&device->info,
enum isl_format format = anv_get_isl_format(device->info,
base_info->format,
VK_IMAGE_ASPECT_COLOR_BIT,
base_info->tiling);

View file

@ -588,7 +588,7 @@ emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
GFX8_MI_BATCH_BUFFER_START_length - GFX8_MI_BATCH_BUFFER_START_length_bias;
anv_batch_emit(&cmd_buffer->batch, GFX8_MI_BATCH_BUFFER_START, bbs) {
bbs.DWordLength = cmd_buffer->device->info.ver < 8 ?
bbs.DWordLength = cmd_buffer->device->info->ver < 8 ?
gfx7_length : gfx8_length;
bbs.SecondLevelBatchBuffer = Firstlevelbatch;
bbs.AddressSpaceIndicator = ASI_PPGTT;
@ -792,7 +792,7 @@ anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
cmd_buffer->bt_next.map += bt_size;
cmd_buffer->bt_next.alloc_size -= bt_size;
if (cmd_buffer->device->info.verx10 >= 125) {
if (cmd_buffer->device->info->verx10 >= 125) {
/* We're using 3DSTATE_BINDING_TABLE_POOL_ALLOC to change the binding
* table address independently from surface state base address. We no
* longer need any sort of offsetting.
@ -1018,7 +1018,7 @@ anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
* prefetch.
*/
if (cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) {
const struct intel_device_info *devinfo = &cmd_buffer->device->info;
const struct intel_device_info *devinfo = cmd_buffer->device->info;
/* Careful to have everything in signed integer. */
int32_t prefetch_len = devinfo->cs_prefetch_size;
int32_t batch_len =
@ -2033,7 +2033,7 @@ anv_queue_exec_utrace_locked(struct anv_queue *queue,
if (result != VK_SUCCESS)
goto error;
int ret = queue->device->info.no_hw ? 0 :
int ret = queue->device->info->no_hw ? 0 :
anv_gem_execbuffer(queue->device, &execbuf.execbuf);
if (ret)
result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
@ -2253,13 +2253,13 @@ anv_queue_exec_locked(struct anv_queue *queue,
.rsvd1 = device->context_id,
};
int ret = queue->device->info.no_hw ? 0 :
int ret = queue->device->info->no_hw ? 0 :
anv_gem_execbuffer(queue->device, &query_pass_execbuf);
if (ret)
result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
}
int ret = queue->device->info.no_hw ? 0 :
int ret = queue->device->info->no_hw ? 0 :
anv_gem_execbuffer(queue->device, &execbuf.execbuf);
if (ret)
result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
@ -2388,7 +2388,7 @@ anv_queue_submit(struct vk_queue *vk_queue,
struct anv_device *device = queue->device;
VkResult result;
if (queue->device->info.no_hw) {
if (queue->device->info->no_hw) {
for (uint32_t i = 0; i < submit->signal_count; i++) {
result = vk_sync_signal(&device->vk,
submit->signals[i].sync,
@ -2419,7 +2419,7 @@ anv_queue_submit_simple_batch(struct anv_queue *queue,
VkResult result = VK_SUCCESS;
int err;
if (queue->device->info.no_hw)
if (queue->device->info->no_hw)
return VK_SUCCESS;
/* This is only used by device init so we can assume the queue is empty and

View file

@ -95,7 +95,7 @@ anv_device_init_blorp(struct anv_device *device)
device->blorp.compiler = device->physical->compiler;
device->blorp.lookup_shader = lookup_blorp_shader;
device->blorp.upload_shader = upload_blorp_shader;
switch (device->info.verx10) {
switch (device->info->verx10) {
case 70:
device->blorp.exec = gfx7_blorp_exec;
break;
@ -211,7 +211,7 @@ get_blorp_surf_for_anv_image(const struct anv_device *device,
if (layout != ANV_IMAGE_LAYOUT_EXPLICIT_AUX) {
assert(usage != 0);
aux_usage = anv_layout_to_aux_usage(&device->info, image,
aux_usage = anv_layout_to_aux_usage(device->info, image,
aspect, usage, layout);
}
@ -503,7 +503,7 @@ copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
}
const enum isl_format linear_format =
anv_get_isl_format(&cmd_buffer->device->info, anv_image->vk.format,
anv_get_isl_format(cmd_buffer->device->info, anv_image->vk.format,
aspect, VK_IMAGE_TILING_LINEAR);
const struct isl_format_layout *linear_fmtl =
isl_format_get_layout(linear_format);
@ -678,10 +678,10 @@ blit_image(struct anv_cmd_buffer *cmd_buffer,
dst_image_layout, ISL_AUX_USAGE_NONE, &dst);
struct anv_format_plane src_format =
anv_get_format_aspect(&cmd_buffer->device->info, src_image->vk.format,
anv_get_format_aspect(cmd_buffer->device->info, src_image->vk.format,
1U << aspect_bit, src_image->vk.tiling);
struct anv_format_plane dst_format =
anv_get_format_aspect(&cmd_buffer->device->info, dst_image->vk.format,
anv_get_format_aspect(cmd_buffer->device->info, dst_image->vk.format,
1U << aspect_bit, dst_image->vk.tiling);
unsigned dst_start, dst_end;
@ -1019,7 +1019,7 @@ void anv_CmdClearColorImage(
imageLayout, ISL_AUX_USAGE_NONE, &surf);
struct anv_format_plane src_format =
anv_get_format_aspect(&cmd_buffer->device->info, image->vk.format,
anv_get_format_aspect(cmd_buffer->device->info, image->vk.format,
VK_IMAGE_ASPECT_COLOR_BIT, image->vk.tiling);
unsigned base_layer = pRanges[r].baseArrayLayer;
@ -1263,7 +1263,7 @@ clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED;
if (d_att->vk_format != VK_FORMAT_UNDEFINED) {
depth_format = anv_get_isl_format(&cmd_buffer->device->info,
depth_format = anv_get_isl_format(cmd_buffer->device->info,
d_att->vk_format,
VK_IMAGE_ASPECT_DEPTH_BIT,
VK_IMAGE_TILING_OPTIMAL);
@ -1446,12 +1446,12 @@ resolve_image(struct anv_cmd_buffer *cmd_buffer,
anv_foreach_image_aspect_bit(aspect_bit, src_image,
region->srcSubresource.aspectMask) {
enum isl_aux_usage src_aux_usage =
anv_layout_to_aux_usage(&cmd_buffer->device->info, src_image,
anv_layout_to_aux_usage(cmd_buffer->device->info, src_image,
(1 << aspect_bit),
VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
src_image_layout);
enum isl_aux_usage dst_aux_usage =
anv_layout_to_aux_usage(&cmd_buffer->device->info, dst_image,
anv_layout_to_aux_usage(cmd_buffer->device->info, dst_image,
(1 << aspect_bit),
VK_IMAGE_USAGE_TRANSFER_DST_BIT,
dst_image_layout);
@ -1818,7 +1818,7 @@ anv_image_mcs_op(struct anv_cmd_buffer *cmd_buffer,
/* Multisampling with multi-planar formats is not supported */
assert(image->n_planes == 1);
const struct intel_device_info *devinfo = &cmd_buffer->device->info;
const struct intel_device_info *devinfo = cmd_buffer->device->info;
struct blorp_batch batch;
anv_blorp_batch_init(cmd_buffer, &batch,
BLORP_BATCH_PREDICATE_ENABLE * predicate +
@ -1910,7 +1910,7 @@ anv_image_ccs_op(struct anv_cmd_buffer *cmd_buffer,
anv_image_aux_layers(image, aspect, level));
const uint32_t plane = anv_image_aspect_to_plane(image, aspect);
const struct intel_device_info *devinfo = &cmd_buffer->device->info;
const struct intel_device_info *devinfo = cmd_buffer->device->info;
struct blorp_batch batch;
anv_blorp_batch_init(cmd_buffer, &batch,

View file

@ -240,7 +240,7 @@ VkResult anv_ResetCommandBuffer(
void
anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
{
const struct intel_device_info *devinfo = &cmd_buffer->device->info;
const struct intel_device_info *devinfo = cmd_buffer->device->info;
anv_genX(devinfo, cmd_buffer_emit_state_base_address)(cmd_buffer);
}
@ -253,7 +253,7 @@ anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer *cmd_buffer,
uint32_t base_layer,
uint32_t layer_count)
{
const struct intel_device_info *devinfo = &cmd_buffer->device->info;
const struct intel_device_info *devinfo = cmd_buffer->device->info;
anv_genX(devinfo, cmd_buffer_mark_image_written)(cmd_buffer, image,
aspect, aux_usage,
level, base_layer,
@ -263,7 +263,7 @@ anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer *cmd_buffer,
void
anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer *cmd_buffer)
{
const struct intel_device_info *devinfo = &cmd_buffer->device->info;
const struct intel_device_info *devinfo = cmd_buffer->device->info;
anv_genX(devinfo, cmd_emit_conditional_render_predicate)(cmd_buffer);
}
@ -314,7 +314,7 @@ anv_cmd_buffer_set_ray_query_buffer(struct anv_cmd_buffer *cmd_buffer,
struct anv_device *device = cmd_buffer->device;
uint64_t ray_shadow_size =
align_u64(brw_rt_ray_queries_shadow_stacks_size(&device->info,
align_u64(brw_rt_ray_queries_shadow_stacks_size(device->info,
pipeline->ray_queries),
4096);
if (ray_shadow_size > 0 &&
@ -359,7 +359,7 @@ anv_cmd_buffer_set_ray_query_buffer(struct anv_cmd_buffer *cmd_buffer,
/* Fill the push constants & mark them dirty. */
struct anv_state ray_query_global_state =
anv_genX(&device->info, cmd_buffer_ray_query_globals)(cmd_buffer);
anv_genX(device->info, cmd_buffer_ray_query_globals)(cmd_buffer);
struct anv_address ray_query_globals_addr = (struct anv_address) {
.bo = device->dynamic_state_pool.block_pool.bo,
@ -736,7 +736,7 @@ anv_cmd_buffer_gfx_push_constants(struct anv_cmd_buffer *cmd_buffer)
struct anv_state
anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
{
const struct intel_device_info *devinfo = &cmd_buffer->device->info;
const struct intel_device_info *devinfo = cmd_buffer->device->info;
struct anv_push_constants *data =
&cmd_buffer->state.compute.base.push_constants;
struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline;
@ -751,7 +751,7 @@ anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
return (struct anv_state) { .offset = 0 };
const unsigned push_constant_alignment =
cmd_buffer->device->info.ver < 8 ? 32 : 64;
cmd_buffer->device->info->ver < 8 ? 32 : 64;
const unsigned aligned_total_push_constants_size =
ALIGN(total_push_constants_size, push_constant_alignment);
struct anv_state state;
@ -1076,7 +1076,7 @@ void anv_CmdSetRayTracingPipelineStackSizeKHR(
if (rt->scratch.layout.total_size == 1 << stack_size_log2)
return;
brw_rt_compute_scratch_layout(&rt->scratch.layout, &device->info,
brw_rt_compute_scratch_layout(&rt->scratch.layout, device->info,
stack_ids_per_dss, 1 << stack_size_log2);
unsigned bucket = stack_size_log2 - 10;

View file

@ -2901,7 +2901,7 @@ anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align,
static void
anv_device_init_border_colors(struct anv_device *device)
{
if (device->info.platform == INTEL_PLATFORM_HSW) {
if (device->info->platform == INTEL_PLATFORM_HSW) {
static const struct hsw_border_color border_colors[] = {
[VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
[VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
@ -3271,7 +3271,7 @@ VkResult anv_CreateDevice(
}
}
device->info = physical_device->info;
device->info = &physical_device->info;
device->isl_dev = physical_device->isl_dev;
/* On Broadwell and later, we can use batch chaining to more efficiently
@ -3279,7 +3279,7 @@ VkResult anv_CreateDevice(
* command parser gets in the way and we have to fall back to growing
* the batch.
*/
device->can_chain_batches = device->info.ver >= 8;
device->can_chain_batches = device->info->ver >= 8;
device->robust_buffer_access = robust_buffer_access;
@ -3327,7 +3327,7 @@ VkResult anv_CreateDevice(
if (result != VK_SUCCESS)
goto fail_general_state_pool;
if (device->info.ver >= 8) {
if (device->info->ver >= 8) {
/* The border color pointer is limited to 24 bits, so we need to make
* sure that any such color used at any point in the program doesn't
* exceed that limit.
@ -3352,7 +3352,7 @@ VkResult anv_CreateDevice(
if (result != VK_SUCCESS)
goto fail_instruction_state_pool;
if (device->info.verx10 >= 125) {
if (device->info->verx10 >= 125) {
/* We're using 3DSTATE_BINDING_TABLE_POOL_ALLOC to give the binding
* table its own base address separately from surface state base.
*/
@ -3373,7 +3373,7 @@ VkResult anv_CreateDevice(
if (result != VK_SUCCESS)
goto fail_surface_state_pool;
if (device->info.has_aux_map) {
if (device->info->has_aux_map) {
device->aux_map_ctx = intel_aux_map_init(device, &aux_map_allocator,
&physical_device->info);
if (!device->aux_map_ctx)
@ -3404,7 +3404,7 @@ VkResult anv_CreateDevice(
if (device->vk.enabled_extensions.KHR_ray_query) {
uint32_t ray_queries_size =
align_u32(brw_rt_ray_queries_hw_stacks_size(&device->info), 4096);
align_u32(brw_rt_ray_queries_hw_stacks_size(device->info), 4096);
result = anv_device_alloc_bo(device, "ray queries",
ray_queries_size,
@ -3419,11 +3419,11 @@ VkResult anv_CreateDevice(
if (result != VK_SUCCESS)
goto fail_ray_query_bo;
if (device->info.ver >= 12 &&
if (device->info->ver >= 12 &&
device->vk.enabled_extensions.KHR_fragment_shading_rate) {
uint32_t n_cps_states = 3 * 3; /* All combinaisons of X by Y CP sizes (1, 2, 4) */
if (device->info.has_coarse_pixel_primitive_and_cb)
if (device->info->has_coarse_pixel_primitive_and_cb)
n_cps_states *= 5 * 5; /* 5 combiners by 2 operators */
n_cps_states += 1; /* Disable CPS */
@ -3433,12 +3433,12 @@ VkResult anv_CreateDevice(
device->cps_states =
anv_state_pool_alloc(&device->dynamic_state_pool,
n_cps_states * CPS_STATE_length(&device->info) * 4,
n_cps_states * CPS_STATE_length(device->info) * 4,
32);
if (device->cps_states.map == NULL)
goto fail_trivial_batch;
anv_genX(&device->info, init_cps_device_state)(device);
anv_genX(device->info, init_cps_device_state)(device);
}
/* Allocate a null surface state at surface state offset 0. This makes
@ -3458,7 +3458,7 @@ VkResult anv_CreateDevice(
/* TODO(RT): Do we want some sort of data structure for this? */
memset(device->rt_scratch_bos, 0, sizeof(device->rt_scratch_bos));
result = anv_genX(&device->info, init_device_state)(device);
result = anv_genX(device->info, init_device_state)(device);
if (result != VK_SUCCESS)
goto fail_trivial_batch_bo_and_scratch_pool;
@ -3515,7 +3515,7 @@ VkResult anv_CreateDevice(
fail_workaround_bo:
anv_device_release_bo(device, device->workaround_bo);
fail_surface_aux_map_pool:
if (device->info.has_aux_map) {
if (device->info->has_aux_map) {
intel_aux_map_finish(device->aux_map_ctx);
device->aux_map_ctx = NULL;
}
@ -3527,7 +3527,7 @@ VkResult anv_CreateDevice(
fail_instruction_state_pool:
anv_state_pool_finish(&device->instruction_state_pool);
fail_dynamic_state_pool:
if (device->info.ver >= 8)
if (device->info->ver >= 8)
anv_state_reserved_pool_finish(&device->custom_border_colors);
anv_state_pool_finish(&device->dynamic_state_pool);
fail_general_state_pool:
@ -3583,7 +3583,7 @@ void anv_DestroyDevice(
/* We only need to free these to prevent valgrind errors. The backing
* BO will go away in a couple of lines so we don't actually leak.
*/
if (device->info.ver >= 8)
if (device->info->ver >= 8)
anv_state_reserved_pool_finish(&device->custom_border_colors);
anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
anv_state_pool_free(&device->dynamic_state_pool, device->slice_hash);
@ -3607,7 +3607,7 @@ void anv_DestroyDevice(
anv_device_release_bo(device, device->workaround_bo);
anv_device_release_bo(device, device->trivial_batch_bo);
if (device->info.has_aux_map) {
if (device->info->has_aux_map) {
intel_aux_map_finish(device->aux_map_ctx);
device->aux_map_ctx = NULL;
}
@ -3858,7 +3858,7 @@ VkResult anv_AllocateMemory(
}
/* By default, we want all VkDeviceMemory objects to support CCS */
if (device->physical->has_implicit_ccs && device->info.has_aux_map)
if (device->physical->has_implicit_ccs && device->info->has_aux_map)
alloc_flags |= ANV_BO_ALLOC_IMPLICIT_CCS;
/* If i915 reported a mappable/non_mappable vram regions and the
@ -4176,7 +4176,7 @@ VkResult anv_MapMemory(
uint32_t gem_flags = 0;
if (!device->info.has_llc &&
if (!device->info->has_llc &&
(mem->type->propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))
gem_flags |= I915_MMAP_WC;
@ -4660,7 +4660,7 @@ VkResult anv_GetCalibratedTimestampsEXT(
uint64_t *pMaxDeviation)
{
ANV_FROM_HANDLE(anv_device, device, _device);
uint64_t timestamp_frequency = device->info.timestamp_frequency;
uint64_t timestamp_frequency = device->info->timestamp_frequency;
int ret;
int d;
uint64_t begin, end;

View file

@ -102,7 +102,7 @@ anv_gem_mmap_offset(struct anv_device *device, uint32_t gem_handle,
{
struct drm_i915_gem_mmap_offset gem_mmap = {
.handle = gem_handle,
.flags = device->info.has_local_mem ? I915_MMAP_OFFSET_FIXED :
.flags = device->info->has_local_mem ? I915_MMAP_OFFSET_FIXED :
(flags & I915_MMAP_WC) ? I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB,
};
assert(offset == 0);
@ -122,7 +122,7 @@ static void*
anv_gem_mmap_legacy(struct anv_device *device, uint32_t gem_handle,
uint64_t offset, uint64_t size, uint32_t flags)
{
assert(!device->info.has_local_mem);
assert(!device->info->has_local_mem);
struct drm_i915_gem_mmap gem_mmap = {
.handle = gem_handle,
@ -286,7 +286,7 @@ anv_gem_set_tiling(struct anv_device *device,
/* On discrete platforms we don't have DRM_IOCTL_I915_GEM_SET_TILING. So
* nothing needs to be done.
*/
if (!device->info.has_tiling_uapi)
if (!device->info->has_tiling_uapi)
return 0;
/* set_tiling overwrites the input on the error path, so we have to open

View file

@ -650,7 +650,7 @@ add_aux_state_tracking_buffer(struct anv_device *device,
image->vk.aspects & (VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV |
VK_IMAGE_ASPECT_DEPTH_BIT));
const unsigned clear_color_state_size = device->info.ver >= 10 ?
const unsigned clear_color_state_size = device->info->ver >= 10 ?
device->isl_dev.ss.clear_color_state_size :
device->isl_dev.ss.clear_value_size;
@ -731,7 +731,7 @@ add_aux_surface_if_supported(struct anv_device *device,
return VK_SUCCESS;
}
if (device->info.ver == 7) {
if (device->info->ver == 7) {
anv_perf_warn(VK_LOG_OBJS(&image->vk.base), "Implement gfx7 HiZ");
return VK_SUCCESS;
}
@ -741,7 +741,7 @@ add_aux_surface_if_supported(struct anv_device *device,
return VK_SUCCESS;
}
if (device->info.ver == 8 && image->vk.samples > 1) {
if (device->info->ver == 8 && image->vk.samples > 1) {
anv_perf_warn(VK_LOG_OBJS(&image->vk.base),
"Enable gfx8 multisampled HiZ");
return VK_SUCCESS;
@ -771,10 +771,10 @@ add_aux_surface_if_supported(struct anv_device *device,
*
* TODO: This is a heuristic trade-off; we haven't tuned it at all.
*/
assert(device->info.ver >= 12);
assert(device->info->ver >= 12);
image->planes[plane].aux_usage = ISL_AUX_USAGE_HIZ_CCS_WT;
} else {
assert(device->info.ver >= 12);
assert(device->info->ver >= 12);
image->planes[plane].aux_usage = ISL_AUX_USAGE_HIZ_CCS;
}
@ -830,11 +830,11 @@ add_aux_surface_if_supported(struct anv_device *device,
return VK_SUCCESS;
/* Choose aux usage */
if (anv_formats_ccs_e_compatible(&device->info, image->vk.create_flags,
if (anv_formats_ccs_e_compatible(device->info, image->vk.create_flags,
image->vk.format, image->vk.tiling,
image->vk.usage, fmt_list)) {
image->planes[plane].aux_usage = ISL_AUX_USAGE_CCS_E;
} else if (device->info.ver >= 12) {
} else if (device->info->ver >= 12) {
anv_perf_warn(VK_LOG_OBJS(&image->vk.base),
"The CCS_D aux mode is not yet handled on "
"Gfx12+. Not allocating a CCS buffer.");
@ -1152,7 +1152,7 @@ check_drm_format_mod(const struct anv_device *device,
isl_drm_modifier_get_info(image->vk.drm_format_mod);
/* Driver must support the modifier. */
assert(isl_drm_modifier_get_score(&device->info, isl_mod_info->modifier));
assert(isl_drm_modifier_get_score(device->info, isl_mod_info->modifier));
/* Enforced by us, not the Vulkan spec. */
assert(image->vk.image_type == VK_IMAGE_TYPE_2D);
@ -1205,7 +1205,7 @@ add_all_surfaces_implicit_layout(
isl_tiling_flags_t isl_tiling_flags,
isl_surf_usage_flags_t isl_extra_usage_flags)
{
const struct intel_device_info *devinfo = &device->info;
const struct intel_device_info *devinfo = device->info;
VkResult result;
u_foreach_bit(b, image->vk.aspects) {
@ -1269,7 +1269,7 @@ add_all_surfaces_explicit_layout(
isl_tiling_flags_t isl_tiling_flags,
isl_surf_usage_flags_t isl_extra_usage_flags)
{
const struct intel_device_info *devinfo = &device->info;
const struct intel_device_info *devinfo = device->info;
const uint32_t mod_plane_count = drm_info->drmFormatModifierPlaneCount;
const bool mod_has_aux =
isl_drm_modifier_has_aux(drm_info->drmFormatModifier);
@ -1482,7 +1482,7 @@ anv_image_init(struct anv_device *device, struct anv_image *image,
(pCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT);
const isl_tiling_flags_t isl_tiling_flags =
choose_isl_tiling_flags(&device->info, create_info, isl_mod_info,
choose_isl_tiling_flags(device->info, create_info, isl_mod_info,
image->vk.wsi_legacy_scanout);
const VkImageFormatListCreateInfo *fmt_list =
@ -1517,7 +1517,7 @@ anv_image_init(struct anv_device *device, struct anv_image *image,
*/
for (uint32_t p = 0; p < image->n_planes; p++) {
image->planes[p].can_non_zero_fast_clear =
can_fast_clear_with_non_zero_color(&device->info, image, p, fmt_list);
can_fast_clear_with_non_zero_color(device->info, image, p, fmt_list);
}
return VK_SUCCESS;
@ -1670,7 +1670,7 @@ resolve_ahw_image(struct anv_device *device,
/* Check format. */
VkFormat vk_format = vk_format_from_android(desc.format, desc.usage);
enum isl_format isl_fmt = anv_get_isl_format(&device->info,
enum isl_format isl_fmt = anv_get_isl_format(device->info,
vk_format,
VK_IMAGE_ASPECT_COLOR_BIT,
vk_tiling);
@ -2500,7 +2500,7 @@ anv_image_fill_surface_state(struct anv_device *device,
*/
if (anv_surface_is_valid(&image->planes[plane].shadow_surface) &&
aspect == VK_IMAGE_ASPECT_STENCIL_BIT) {
assert(device->info.ver == 7);
assert(device->info->ver == 7);
assert(view_usage & ISL_SURF_USAGE_TEXTURE_BIT);
surface = &image->planes[plane].shadow_surface;
}
@ -2509,14 +2509,14 @@ anv_image_fill_surface_state(struct anv_device *device,
view.swizzle = anv_swizzle_for_render(view.swizzle);
/* On Ivy Bridge and Bay Trail we do the swizzle in the shader */
if (device->info.verx10 == 70)
if (device->info->verx10 == 70)
view.swizzle = ISL_SWIZZLE_IDENTITY;
/* If this is a HiZ buffer we can sample from with a programmable clear
* value (SKL+), define the clear value to the optimal constant.
*/
union isl_color_value default_clear_color = { .u32 = { 0, } };
if (device->info.ver >= 9 && aspect == VK_IMAGE_ASPECT_DEPTH_BIT)
if (device->info->ver >= 9 && aspect == VK_IMAGE_ASPECT_DEPTH_BIT)
default_clear_color.f32[0] = ANV_HZ_FC_VAL;
if (!clear_color)
clear_color = &default_clear_color;
@ -2526,7 +2526,7 @@ anv_image_fill_surface_state(struct anv_device *device,
if (view_usage == ISL_SURF_USAGE_STORAGE_BIT &&
(flags & ANV_IMAGE_VIEW_STATE_STORAGE_LOWERED) &&
!isl_has_matching_typed_storage_image_format(&device->info,
!isl_has_matching_typed_storage_image_format(device->info,
view.format)) {
/* In this case, we are a writeable storage buffer which needs to be
* lowered to linear. All tiling and offset calculations will be done in
@ -2551,11 +2551,11 @@ anv_image_fill_surface_state(struct anv_device *device,
* supports.
*/
enum isl_format lower_format =
isl_lower_storage_image_format(&device->info, view.format);
isl_lower_storage_image_format(device->info, view.format);
if (aux_usage != ISL_AUX_USAGE_NONE) {
assert(device->info.verx10 >= 125);
assert(device->info->verx10 >= 125);
assert(aux_usage == ISL_AUX_USAGE_CCS_E);
assert(isl_formats_are_ccs_e_compatible(&device->info,
assert(isl_formats_are_ccs_e_compatible(device->info,
view.format,
lower_format));
}
@ -2592,7 +2592,7 @@ anv_image_fill_surface_state(struct anv_device *device,
assert(ok);
isl_surf = &tmp_surf;
if (device->info.ver <= 8) {
if (device->info->ver <= 8) {
assert(surface->isl.tiling == ISL_TILING_LINEAR);
assert(tile_x_sa == 0);
assert(tile_y_sa == 0);
@ -2607,7 +2607,7 @@ anv_image_fill_surface_state(struct anv_device *device,
state_inout->aux_address = aux_address;
struct anv_address clear_address = ANV_NULL_ADDRESS;
if (device->info.ver >= 10 && isl_aux_usage_has_fast_clears(aux_usage)) {
if (device->info->ver >= 10 && isl_aux_usage_has_fast_clears(aux_usage)) {
clear_address = anv_image_get_clear_color_addr(device, image, aspect);
}
state_inout->clear_address = clear_address;
@ -2638,7 +2638,7 @@ anv_image_fill_surface_state(struct anv_device *device,
state_inout->aux_address.offset |= *aux_addr_dw & 0xfff;
}
if (device->info.ver >= 10 && clear_address.bo) {
if (device->info->ver >= 10 && clear_address.bo) {
uint32_t *clear_addr_dw = state_inout->state.map +
device->isl_dev.ss.clear_color_state_offset;
assert((clear_address.offset & 0x3f) == 0);
@ -2717,7 +2717,7 @@ anv_CreateImageView(VkDevice _device,
const uint32_t vplane =
anv_aspect_to_plane(iview->vk.aspects, 1UL << iaspect_bit);
struct anv_format_plane format;
format = anv_get_format_plane(&device->info, iview->vk.view_format,
format = anv_get_format_plane(device->info, iview->vk.view_format,
vplane, image->vk.tiling);
iview->planes[vplane].image_plane = iplane;
@ -2755,11 +2755,11 @@ anv_CreateImageView(VkDevice _device,
iview->planes[vplane].general_sampler_surface_state.state = alloc_surface_state(device);
enum isl_aux_usage general_aux_usage =
anv_layout_to_aux_usage(&device->info, image, 1UL << iaspect_bit,
anv_layout_to_aux_usage(device->info, image, 1UL << iaspect_bit,
VK_IMAGE_USAGE_SAMPLED_BIT,
VK_IMAGE_LAYOUT_GENERAL);
enum isl_aux_usage optimal_aux_usage =
anv_layout_to_aux_usage(&device->info, image, 1UL << iaspect_bit,
anv_layout_to_aux_usage(device->info, image, 1UL << iaspect_bit,
VK_IMAGE_USAGE_SAMPLED_BIT,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
@ -2783,7 +2783,7 @@ anv_CreateImageView(VkDevice _device,
/* NOTE: This one needs to go last since it may stomp isl_view.format */
if (iview->vk.usage & VK_IMAGE_USAGE_STORAGE_BIT) {
enum isl_aux_usage general_aux_usage =
anv_layout_to_aux_usage(&device->info, image, 1UL << iaspect_bit,
anv_layout_to_aux_usage(device->info, image, 1UL << iaspect_bit,
VK_IMAGE_USAGE_STORAGE_BIT,
VK_IMAGE_LAYOUT_GENERAL);
iview->planes[vplane].storage_surface_state.state = alloc_surface_state(device);
@ -2805,7 +2805,7 @@ anv_CreateImageView(VkDevice _device,
general_aux_usage, NULL,
ANV_IMAGE_VIEW_STATE_STORAGE_LOWERED,
&iview->planes[vplane].lowered_storage_surface_state,
device->info.ver >= 9 ? NULL :
device->info->ver >= 9 ? NULL :
&iview->planes[vplane].lowered_storage_image_param);
} else {
/* In this case, we support the format but, because there's no
@ -2814,7 +2814,7 @@ anv_CreateImageView(VkDevice _device,
* reads but for most writes. Instead of hanging if someone gets
* it wrong, we give them a NULL descriptor.
*/
assert(isl_format_supports_typed_writes(&device->info,
assert(isl_format_supports_typed_writes(device->info,
format.isl_format));
iview->planes[vplane].lowered_storage_surface_state.state =
device->null_surface_state;
@ -2883,7 +2883,7 @@ anv_CreateBufferView(VkDevice _device,
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
struct anv_format_plane format;
format = anv_get_format_plane(&device->info, pCreateInfo->format,
format = anv_get_format_plane(device->info, pCreateInfo->format,
0, VK_IMAGE_TILING_LINEAR);
const uint32_t format_bs = isl_format_get_layout(format.isl_format)->bpb / 8;
@ -2914,9 +2914,9 @@ anv_CreateBufferView(VkDevice _device,
view->address, view->range, format_bs);
enum isl_format lowered_format =
isl_has_matching_typed_storage_image_format(&device->info,
isl_has_matching_typed_storage_image_format(device->info,
format.isl_format) ?
isl_lower_storage_image_format(&device->info, format.isl_format) :
isl_lower_storage_image_format(device->info, format.isl_format) :
ISL_FORMAT_RAW;
/* If we lower the format, we should ensure either they both match in

View file

@ -323,7 +323,7 @@ anv_measure_reset(struct anv_cmd_buffer *cmd_buffer)
* yet been processed
*/
intel_measure_gather(&device->physical->measure_device,
&device->info);
device->info);
assert(cmd_buffer->device != NULL);

View file

@ -290,7 +290,7 @@ bool
anv_check_for_primitive_replication(nir_shader **shaders,
struct anv_graphics_pipeline *pipeline)
{
assert(pipeline->base.device->info.ver >= 12);
assert(pipeline->base.device->info->ver >= 12);
static int primitive_replication_max_views = -1;
if (primitive_replication_max_views < 0) {

View file

@ -117,7 +117,7 @@ anv_device_perf_open(struct anv_device *device, uint64_t metric_id)
properties[p++] = metric_id;
properties[p++] = DRM_I915_PERF_PROP_OA_FORMAT;
properties[p++] = device->info.ver >= 8 ?
properties[p++] = device->info->ver >= 8 ?
I915_OA_FORMAT_A32u40_A4u32_B8_C8 :
I915_OA_FORMAT_A45_B8_C8;
@ -139,7 +139,7 @@ anv_device_perf_open(struct anv_device *device, uint64_t metric_id)
* support it.
*/
if (intel_perf_has_global_sseu(device->physical->perf) &&
device->info.verx10 < 125) {
device->info->verx10 < 125) {
properties[p++] = DRM_I915_PERF_PROP_GLOBAL_SSEU;
properties[p++] = (uintptr_t) &device->physical->perf->sseu;
}

View file

@ -309,7 +309,7 @@ populate_base_prog_key(const struct anv_device *device,
key->limit_trig_input_range =
device->physical->instance->limit_trig_input_range;
populate_sampler_prog_key(&device->info, &key->tex);
populate_sampler_prog_key(device->info, &key->tex);
}
static void
@ -1600,7 +1600,7 @@ anv_graphics_pipeline_compile(struct anv_graphics_pipeline *pipeline,
next_stage = &stages[s];
}
if (pipeline->base.device->info.ver >= 12 &&
if (pipeline->base.device->info->ver >= 12 &&
pipeline->view_mask != 0) {
/* For some pipelines HW Primitive Replication can be used instead of
* instancing to implement Multiview. This depend on how viewIndex is
@ -1653,7 +1653,7 @@ anv_graphics_pipeline_compile(struct anv_graphics_pipeline *pipeline,
* We iterate backwards in the stage and stop on the first shader that can
* set the value.
*/
const struct intel_device_info *devinfo = &pipeline->base.device->info;
const struct intel_device_info *devinfo = pipeline->base.device->info;
if (devinfo->has_coarse_pixel_primitive_and_cb &&
stages[MESA_SHADER_FRAGMENT].info &&
stages[MESA_SHADER_FRAGMENT].key.wm.coarse_pixel &&
@ -1988,7 +1988,7 @@ anv_compute_pipeline_create(struct anv_device *device,
return result;
}
anv_genX(&device->info, compute_pipeline_emit)(pipeline);
anv_genX(device->info, compute_pipeline_emit)(pipeline);
*pPipeline = anv_pipeline_to_handle(&pipeline->base);
@ -2046,7 +2046,7 @@ VkResult anv_CreateComputePipelines(
void
anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
{
const struct intel_device_info *devinfo = &pipeline->device->info;
const struct intel_device_info *devinfo = pipeline->device->info;
const struct intel_l3_weights w =
intel_get_default_l3_weights(devinfo, true, needs_slm);
@ -2191,7 +2191,7 @@ anv_graphics_pipeline_create(struct anv_device *device,
return result;
}
anv_genX(&device->info, graphics_pipeline_emit)(pipeline, &state);
anv_genX(device->info, graphics_pipeline_emit)(pipeline, &state);
*pPipeline = anv_pipeline_to_handle(&pipeline->base);
@ -2491,7 +2491,7 @@ anv_pipeline_compile_ray_tracing(struct anv_ray_tracing_pipeline *pipeline,
struct vk_pipeline_cache *cache,
const VkRayTracingPipelineCreateInfoKHR *info)
{
const struct intel_device_info *devinfo = &pipeline->base.device->info;
const struct intel_device_info *devinfo = pipeline->base.device->info;
VkResult result;
VkPipelineCreationFeedback pipeline_feedback = {
@ -2771,7 +2771,7 @@ anv_device_init_rt_shaders(struct anv_device *device)
nir_shader *trivial_return_nir =
brw_nir_create_trivial_return_shader(device->physical->compiler, tmp_ctx);
NIR_PASS_V(trivial_return_nir, brw_nir_lower_rt_intrinsics, &device->info);
NIR_PASS_V(trivial_return_nir, brw_nir_lower_rt_intrinsics, device->info);
struct anv_pipeline_bind_map bind_map = {
.surface_count = 0,
@ -2942,7 +2942,7 @@ anv_ray_tracing_pipeline_create(
return result;
}
anv_genX(&device->info, ray_tracing_pipeline_emit)(pipeline);
anv_genX(device->info, ray_tracing_pipeline_emit)(pipeline);
*pPipeline = anv_pipeline_to_handle(&pipeline->base);

View file

@ -1157,7 +1157,7 @@ struct anv_device {
struct vk_device vk;
struct anv_physical_device * physical;
struct intel_device_info info;
const struct intel_device_info * info;
struct isl_device isl_dev;
int context_id;
int fd;
@ -1596,7 +1596,7 @@ static inline void
write_reloc(const struct anv_device *device, void *p, uint64_t v, bool flush)
{
unsigned reloc_size = 0;
if (device->info.ver >= 8) {
if (device->info->ver >= 8) {
reloc_size = sizeof(uint64_t);
*(uint64_t *)p = intel_canonical_address(v);
} else {
@ -3702,7 +3702,7 @@ anv_image_get_fast_clear_type_addr(const struct anv_device *device,
struct anv_address addr =
anv_image_get_clear_color_addr(device, image, aspect);
const unsigned clear_color_state_size = device->info.ver >= 10 ?
const unsigned clear_color_state_size = device->info->ver >= 10 ?
device->isl_dev.ss.clear_color_state_size :
device->isl_dev.ss.clear_value_size;
return anv_address_add(addr, clear_color_state_size);
@ -3800,7 +3800,7 @@ anv_image_plane_uses_aux_map(const struct anv_device *device,
const struct anv_image *image,
uint32_t plane)
{
return device->info.has_aux_map &&
return device->info->has_aux_map &&
isl_aux_usage_has_ccs(image->planes[plane].aux_usage);
}

View file

@ -83,7 +83,7 @@ anv_device_utrace_emit_copy_ts_buffer(struct u_trace_context *utctx,
struct anv_address to_addr = (struct anv_address) {
.bo = ts_to, .offset = to_offset * sizeof(uint64_t) };
anv_genX(&device->info, emit_so_memcpy)(&flush->memcpy_state,
anv_genX(device->info, emit_so_memcpy)(&flush->memcpy_state,
to_addr, from_addr, count * sizeof(uint64_t));
}
@ -143,7 +143,7 @@ anv_device_utrace_flush_cmd_buffers(struct anv_queue *queue,
flush->batch_bo->map, flush->batch_bo->size);
/* Emit the copies */
anv_genX(&device->info, emit_so_memcpy_init)(&flush->memcpy_state,
anv_genX(device->info, emit_so_memcpy_init)(&flush->memcpy_state,
device,
&flush->batch);
for (uint32_t i = 0; i < cmd_buffer_count; i++) {
@ -157,7 +157,7 @@ anv_device_utrace_flush_cmd_buffers(struct anv_queue *queue,
anv_device_utrace_emit_copy_ts_buffer);
}
}
anv_genX(&device->info, emit_so_memcpy_fini)(&flush->memcpy_state);
anv_genX(device->info, emit_so_memcpy_fini)(&flush->memcpy_state);
u_trace_flush(&flush->ds.trace, flush, true);
@ -260,7 +260,7 @@ anv_utrace_read_ts(struct u_trace_context *utctx,
if (ts[idx] == U_TRACE_NO_TIMESTAMP)
return U_TRACE_NO_TIMESTAMP;
return intel_device_info_timebase_scale(&device->info, ts[idx]);
return intel_device_info_timebase_scale(device->info, ts[idx]);
}
static const char *
@ -284,7 +284,7 @@ void
anv_device_utrace_init(struct anv_device *device)
{
anv_bo_pool_init(&device->utrace_bo_pool, device, "utrace");
intel_ds_device_init(&device->ds, &device->info, device->fd,
intel_ds_device_init(&device->ds, device->info, device->fd,
device->physical->local_minor - 128,
INTEL_DS_API_VULKAN);
u_trace_context_init(&device->ds.trace_context,

View file

@ -386,7 +386,7 @@ genX(blorp_exec)(struct blorp_batch *batch,
if (!cmd_buffer->state.current_l3_config) {
const struct intel_l3_config *cfg =
intel_get_default_l3_config(&cmd_buffer->device->info);
intel_get_default_l3_config(cmd_buffer->device->info);
genX(cmd_buffer_config_l3)(cmd_buffer, cfg);
}

View file

@ -391,7 +391,7 @@ anv_can_fast_clear_color_view(struct anv_device * device,
* to use the attachment can't handle fast-clears.
*/
enum anv_fast_clear_type fast_clear_type =
anv_layout_to_fast_clear_type(&device->info, iview->image,
anv_layout_to_fast_clear_type(device->info, iview->image,
VK_IMAGE_ASPECT_COLOR_BIT,
layout);
switch (fast_clear_type) {
@ -480,11 +480,11 @@ anv_can_hiz_clear_ds_view(struct anv_device *device,
return false;
const enum isl_aux_usage clear_aux_usage =
anv_layout_to_aux_usage(&device->info, iview->image,
anv_layout_to_aux_usage(device->info, iview->image,
VK_IMAGE_ASPECT_DEPTH_BIT,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
layout);
if (!blorp_can_hiz_clear_depth(&device->info,
if (!blorp_can_hiz_clear_depth(device->info,
&iview->image->planes[0].primary_surface.isl,
clear_aux_usage,
iview->planes[0].isl.base_level,
@ -504,7 +504,7 @@ anv_can_hiz_clear_ds_view(struct anv_device *device,
* portion of a HiZ buffer. Testing has revealed that Gfx8 only supports
* returning 0.0f. Gens prior to gfx8 do not support this feature at all.
*/
if (GFX_VER == 8 && anv_can_sample_with_hiz(&device->info, iview->image))
if (GFX_VER == 8 && anv_can_sample_with_hiz(device->info, iview->image))
return false;
/* If we got here, then we can fast clear */
@ -541,7 +541,7 @@ anv_image_init_aux_tt(struct anv_cmd_buffer *cmd_buffer,
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
for (uint32_t a = 0; a < layer_count; a++) {
const uint32_t layer = base_layer + a;
@ -635,7 +635,7 @@ transition_depth_buffer(struct anv_cmd_buffer *cmd_buffer,
if ((initial_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) &&
cmd_buffer->device->physical->has_implicit_ccs &&
cmd_buffer->device->info.has_aux_map) {
cmd_buffer->device->info->has_aux_map) {
anv_image_init_aux_tt(cmd_buffer, image, VK_IMAGE_ASPECT_DEPTH_BIT,
0, 1, base_layer, layer_count);
}
@ -650,11 +650,11 @@ transition_depth_buffer(struct anv_cmd_buffer *cmd_buffer,
return;
const enum isl_aux_state initial_state =
anv_layout_to_aux_state(&cmd_buffer->device->info, image,
anv_layout_to_aux_state(cmd_buffer->device->info, image,
VK_IMAGE_ASPECT_DEPTH_BIT,
initial_layout);
const enum isl_aux_state final_state =
anv_layout_to_aux_state(&cmd_buffer->device->info, image,
anv_layout_to_aux_state(cmd_buffer->device->info, image,
VK_IMAGE_ASPECT_DEPTH_BIT,
final_layout);
@ -744,7 +744,7 @@ transition_stencil_buffer(struct anv_cmd_buffer *cmd_buffer,
if ((initial_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) &&
cmd_buffer->device->physical->has_implicit_ccs &&
cmd_buffer->device->info.has_aux_map) {
cmd_buffer->device->info->has_aux_map) {
anv_image_init_aux_tt(cmd_buffer, image, VK_IMAGE_ASPECT_STENCIL_BIT,
base_level, level_count, base_layer, layer_count);
@ -843,7 +843,7 @@ anv_cmd_compute_resolve_predicate(struct anv_cmd_buffer *cmd_buffer,
enum anv_fast_clear_type fast_clear_supported)
{
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
const struct mi_value fast_clear_type =
mi_mem32(anv_image_get_fast_clear_type_addr(cmd_buffer->device,
@ -926,7 +926,7 @@ anv_cmd_simple_resolve_predicate(struct anv_cmd_buffer *cmd_buffer,
enum anv_fast_clear_type fast_clear_supported)
{
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value fast_clear_type_mem =
mi_mem32(anv_image_get_fast_clear_type_addr(cmd_buffer->device,
@ -1140,7 +1140,7 @@ genX(copy_fast_clear_dwords)(struct anv_cmd_buffer *cmd_buffer,
#endif
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
if (copy_from_surface_state) {
mi_memcpy(&b, entry_addr, ss_clear_addr, copy_size);
@ -1188,7 +1188,7 @@ transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
bool will_full_fast_clear)
{
struct anv_device *device = cmd_buffer->device;
const struct intel_device_info *devinfo = &device->info;
const struct intel_device_info *devinfo = device->info;
/* Validate the inputs. */
assert(cmd_buffer);
assert(image && image->vk.aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
@ -1671,7 +1671,7 @@ genX(BeginCommandBuffer)(
* ensured that we have the table even if this command buffer doesn't
* initialize any images.
*/
if (cmd_buffer->device->info.has_aux_map) {
if (cmd_buffer->device->info->has_aux_map) {
anv_add_pending_pipe_bits(cmd_buffer,
ANV_PIPE_AUX_TABLE_INVALIDATE_BIT,
"new cmd buffer with aux-tt");
@ -1869,7 +1869,7 @@ genX(CmdExecuteCommands)(
* regardless of conditional rendering being enabled in primary.
*/
struct mi_builder b;
mi_builder_init(&b, &primary->device->info, &primary->batch);
mi_builder_init(&b, primary->device->info, &primary->batch);
mi_store(&b, mi_reg64(ANV_PREDICATE_RESULT_REG),
mi_imm(UINT64_MAX));
}
@ -2317,7 +2317,7 @@ genX(emit_apply_pipe_flushes)(struct anv_batch *batch,
}
#if GFX_VER == 12
if ((bits & ANV_PIPE_AUX_TABLE_INVALIDATE_BIT) && device->info.has_aux_map) {
if ((bits & ANV_PIPE_AUX_TABLE_INVALIDATE_BIT) && device->info->has_aux_map) {
anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
lri.RegisterOffset = GENX(GFX_CCS_AUX_INV_num);
lri.DataDWord = 1;
@ -2481,7 +2481,7 @@ cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer)
return;
const unsigned push_constant_kb =
cmd_buffer->device->info.max_constant_urb_size_kb;
cmd_buffer->device->info->max_constant_urb_size_kb;
const unsigned num_stages =
util_bitcount(stages & VK_SHADER_STAGE_ALL_GRAPHICS);
@ -2519,7 +2519,7 @@ cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer)
* program push constant command(ZERO length) without any commit between
* them.
*/
if (intel_device_info_is_dg2(&cmd_buffer->device->info)) {
if (intel_device_info_is_dg2(cmd_buffer->device->info)) {
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_ALL), c) {
c.MOCS = anv_mocs(cmd_buffer->device, NULL, 0);
}
@ -3883,7 +3883,7 @@ genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
* 3dstate_so_buffer_index_0/1/2/3 states to ensure so_buffer_index_*
* state is not combined with other state changes.
*/
if (intel_device_info_is_dg2(&cmd_buffer->device->info)) {
if (intel_device_info_is_dg2(cmd_buffer->device->info)) {
anv_add_pending_pipe_bits(cmd_buffer,
ANV_PIPE_CS_STALL_BIT,
"before SO_BUFFER change WA");
@ -3927,7 +3927,7 @@ genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
}
}
if (intel_device_info_is_dg2(&cmd_buffer->device->info)) {
if (intel_device_info_is_dg2(cmd_buffer->device->info)) {
/* Wa_16011411144: also CS_STALL after touching SO_BUFFER change */
anv_add_pending_pipe_bits(cmd_buffer,
ANV_PIPE_CS_STALL_BIT,
@ -4477,7 +4477,7 @@ void genX(CmdDrawIndirectByteCountEXT)(
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value count =
mi_mem32(anv_address_add(counter_buffer->address,
counterBufferOffset));
@ -4514,7 +4514,7 @@ load_indirect_parameters(struct anv_cmd_buffer *cmd_buffer,
struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
mi_store(&b, mi_reg32(GFX7_3DPRIM_VERTEX_COUNT),
mi_mem32(anv_address_add(addr, 0)));
@ -4794,7 +4794,7 @@ void genX(CmdDrawIndirectCount)(
genX(cmd_buffer_flush_state)(cmd_buffer);
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value max =
prepare_for_draw_count_predicate(cmd_buffer, &b,
count_buffer, countBufferOffset);
@ -4862,7 +4862,7 @@ void genX(CmdDrawIndexedIndirectCount)(
genX(cmd_buffer_flush_state)(cmd_buffer);
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value max =
prepare_for_draw_count_predicate(cmd_buffer, &b,
count_buffer, countBufferOffset);
@ -5103,7 +5103,7 @@ genX(CmdDrawMeshTasksIndirectNV)(
bool uses_drawid = (task_prog_data && task_prog_data->uses_drawid) ||
mesh_prog_data->uses_drawid;
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
for (uint32_t i = 0; i < drawCount; i++) {
struct anv_address draw = anv_address_add(buffer->address, offset);
@ -5143,7 +5143,7 @@ genX(CmdDrawMeshTasksIndirectCountNV)(
mesh_prog_data->uses_drawid;
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value max =
prepare_for_draw_count_predicate(cmd_buffer, &b,
@ -5319,7 +5319,7 @@ emit_compute_walker(struct anv_cmd_buffer *cmd_buffer,
const struct anv_shader_bin *cs_bin = pipeline->cs;
bool predicate = cmd_buffer->state.conditional_render_enabled;
const struct intel_device_info *devinfo = &pipeline->base.device->info;
const struct intel_device_info *devinfo = pipeline->base.device->info;
const struct brw_cs_dispatch_info dispatch =
brw_cs_get_dispatch_info(devinfo, prog_data, NULL);
@ -5366,7 +5366,7 @@ emit_gpgpu_walker(struct anv_cmd_buffer *cmd_buffer,
bool predicate = (GFX_VER <= 7 && indirect) ||
cmd_buffer->state.conditional_render_enabled;
const struct intel_device_info *devinfo = &pipeline->base.device->info;
const struct intel_device_info *devinfo = pipeline->base.device->info;
const struct brw_cs_dispatch_info dispatch =
brw_cs_get_dispatch_info(devinfo, prog_data, NULL);
@ -5504,7 +5504,7 @@ void genX(CmdDispatchIndirect)(
genX(cmd_buffer_flush_compute_state)(cmd_buffer);
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value size_x = mi_mem32(anv_address_add(addr, 0));
struct mi_value size_y = mi_mem32(anv_address_add(addr, 4));
@ -5584,7 +5584,7 @@ genX(cmd_buffer_ray_query_globals)(struct anv_cmd_buffer *cmd_buffer)
uint32_t stack_ids_per_dss = 2048; /* TODO: can we use a lower value in
* some cases?
*/
brw_rt_compute_scratch_layout(&layout, &device->info,
brw_rt_compute_scratch_layout(&layout, device->info,
stack_ids_per_dss, 1 << 10);
struct GFX_RT_DISPATCH_GLOBALS rtdg = {
@ -5737,7 +5737,7 @@ cmd_buffer_trace_rays(struct anv_cmd_buffer *cmd_buffer,
local_size_log2[2] = 0;
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value launch_size[3] = {
mi_mem32(anv_address_from_u64(launch_size_addr + 0)),
@ -5883,7 +5883,7 @@ static void
genX(flush_pipeline_select)(struct anv_cmd_buffer *cmd_buffer,
uint32_t pipeline)
{
UNUSED const struct intel_device_info *devinfo = &cmd_buffer->device->info;
UNUSED const struct intel_device_info *devinfo = cmd_buffer->device->info;
if (cmd_buffer->state.current_pipeline == pipeline)
return;
@ -6195,7 +6195,7 @@ genX(cmd_buffer_emit_hashing_mode)(struct anv_cmd_buffer *cmd_buffer,
unsigned scale)
{
#if GFX_VER == 9
const struct intel_device_info *devinfo = &cmd_buffer->device->info;
const struct intel_device_info *devinfo = cmd_buffer->device->info;
const unsigned slice_hashing[] = {
/* Because all Gfx9 platforms with more than one slice require
* three-way subslice hashing, a single "normal" 16x16 slice hashing
@ -6501,7 +6501,7 @@ void genX(CmdBeginRendering)(
gfx->samples |= iview->vk.image->samples;
enum isl_aux_usage aux_usage =
anv_layout_to_aux_usage(&cmd_buffer->device->info,
anv_layout_to_aux_usage(cmd_buffer->device->info,
iview->image,
VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
@ -6701,7 +6701,7 @@ void genX(CmdBeginRendering)(
initial_depth_layout = attachment_initial_layout(d_att);
depth_layout = d_att->imageLayout;
depth_aux_usage =
anv_layout_to_aux_usage(&cmd_buffer->device->info,
anv_layout_to_aux_usage(cmd_buffer->device->info,
d_iview->image,
VK_IMAGE_ASPECT_DEPTH_BIT,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
@ -6714,7 +6714,7 @@ void genX(CmdBeginRendering)(
initial_stencil_layout = attachment_initial_layout(s_att);
stencil_layout = s_att->imageLayout;
stencil_aux_usage =
anv_layout_to_aux_usage(&cmd_buffer->device->info,
anv_layout_to_aux_usage(cmd_buffer->device->info,
s_iview->image,
VK_IMAGE_ASPECT_STENCIL_BIT,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
@ -6998,13 +6998,13 @@ cmd_buffer_resolve_msaa_attachment(struct anv_cmd_buffer *cmd_buffer,
const struct anv_image_view *dst_iview = att->resolve_iview;
enum isl_aux_usage src_aux_usage =
anv_layout_to_aux_usage(&cmd_buffer->device->info,
anv_layout_to_aux_usage(cmd_buffer->device->info,
src_iview->image, aspect,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
layout);
enum isl_aux_usage dst_aux_usage =
anv_layout_to_aux_usage(&cmd_buffer->device->info,
anv_layout_to_aux_usage(cmd_buffer->device->info,
dst_iview->image, aspect,
VK_IMAGE_USAGE_TRANSFER_DST_BIT,
att->resolve_layout);
@ -7200,7 +7200,7 @@ genX(cmd_emit_conditional_render_predicate)(struct anv_cmd_buffer *cmd_buffer)
{
#if GFX_VERx10 >= 75
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
mi_store(&b, mi_reg64(MI_PREDICATE_SRC0),
mi_reg32(ANV_PREDICATE_RESULT_REG));
@ -7233,7 +7233,7 @@ void genX(CmdBeginConditionalRenderingEXT)(
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
/* Section 19.4 of the Vulkan 1.1.85 spec says:
*
@ -7482,7 +7482,7 @@ void genX(cmd_emit_timestamp)(struct anv_batch *batch,
}
} else {
struct mi_builder b;
mi_builder_init(&b, &device->info, batch);
mi_builder_init(&b, device->info, batch);
mi_store(&b, mi_mem64(addr), mi_reg64(TIMESTAMP));
}
}

View file

@ -244,7 +244,7 @@ genX(emit_so_memcpy_init)(struct anv_memcpy_state *state,
state->batch = batch;
state->device = device;
const struct intel_l3_config *cfg = intel_get_default_l3_config(&device->info);
const struct intel_l3_config *cfg = intel_get_default_l3_config(device->info);
genX(emit_l3_config)(batch, device, cfg);
anv_batch_emit(batch, GENX(PIPELINE_SELECT), ps) {
@ -299,7 +299,7 @@ genX(cmd_buffer_so_memcpy)(struct anv_cmd_buffer *cmd_buffer,
if (!cmd_buffer->state.current_l3_config) {
const struct intel_l3_config *cfg =
intel_get_default_l3_config(&cmd_buffer->device->info);
intel_get_default_l3_config(cmd_buffer->device->info);
genX(cmd_buffer_config_l3)(cmd_buffer, cfg);
}

View file

@ -148,7 +148,7 @@ emit_vertex_input(struct anv_graphics_pipeline *pipeline,
}
u_foreach_bit(a, vi->attributes_valid) {
enum isl_format format = anv_get_isl_format(&pipeline->base.device->info,
enum isl_format format = anv_get_isl_format(pipeline->base.device->info,
vi->attributes[a].format,
VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_TILING_LINEAR);
@ -273,7 +273,7 @@ genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch,
const unsigned entry_size[4],
enum intel_urb_deref_block_size *deref_block_size)
{
const struct intel_device_info *devinfo = &device->info;
const struct intel_device_info *devinfo = device->info;
unsigned entries[4];
unsigned start[4];
@ -322,7 +322,7 @@ static void
emit_urb_setup_mesh(struct anv_graphics_pipeline *pipeline,
enum intel_urb_deref_block_size *deref_block_size)
{
const struct intel_device_info *devinfo = &pipeline->base.device->info;
const struct intel_device_info *devinfo = pipeline->base.device->info;
const struct brw_task_prog_data *task_prog_data =
anv_pipeline_has_stage(pipeline, MESA_SHADER_TASK) ?
@ -841,7 +841,7 @@ emit_rs_state(struct anv_graphics_pipeline *pipeline,
rp->depth_attachment_format != VK_FORMAT_UNDEFINED) {
assert(vk_format_has_depth(rp->depth_attachment_format));
enum isl_format isl_format =
anv_get_isl_format(&pipeline->base.device->info,
anv_get_isl_format(pipeline->base.device->info,
rp->depth_attachment_format,
VK_IMAGE_ASPECT_DEPTH_BIT,
VK_IMAGE_TILING_OPTIMAL);
@ -1018,7 +1018,7 @@ emit_cb_state(struct anv_graphics_pipeline *pipeline,
surface_count = map->surface_count;
}
const struct intel_device_info *devinfo = &pipeline->base.device->info;
const struct intel_device_info *devinfo = pipeline->base.device->info;
uint32_t *blend_state_start = devinfo->ver >= 8 ?
pipeline->gfx8.blend_state : pipeline->gfx7.blend_state;
uint32_t *state_pos = blend_state_start;
@ -1363,7 +1363,7 @@ emit_3dstate_streamout(struct anv_graphics_pipeline *pipeline,
* 2. Send SO_DECL NP state
* 3. Send 3D State SOL with SOL Enabled
*/
if (intel_device_info_is_dg2(&pipeline->base.device->info))
if (intel_device_info_is_dg2(pipeline->base.device->info))
anv_batch_emit(&pipeline->base.batch, GENX(3DSTATE_STREAMOUT), so);
uint32_t *dw = anv_batch_emitn(&pipeline->base.batch, 3 + 2 * max_decls,
@ -1514,7 +1514,7 @@ get_scratch_surf(struct anv_pipeline *pipeline,
static void
emit_3dstate_vs(struct anv_graphics_pipeline *pipeline)
{
const struct intel_device_info *devinfo = &pipeline->base.device->info;
const struct intel_device_info *devinfo = pipeline->base.device->info;
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
const struct anv_shader_bin *vs_bin =
pipeline->shaders[MESA_SHADER_VERTEX];
@ -1603,7 +1603,7 @@ emit_3dstate_hs_te_ds(struct anv_graphics_pipeline *pipeline,
return;
}
const struct intel_device_info *devinfo = &pipeline->base.device->info;
const struct intel_device_info *devinfo = pipeline->base.device->info;
const struct anv_shader_bin *tcs_bin =
pipeline->shaders[MESA_SHADER_TESS_CTRL];
const struct anv_shader_bin *tes_bin =
@ -1748,7 +1748,7 @@ emit_3dstate_hs_te_ds(struct anv_graphics_pipeline *pipeline,
static void
emit_3dstate_gs(struct anv_graphics_pipeline *pipeline)
{
const struct intel_device_info *devinfo = &pipeline->base.device->info;
const struct intel_device_info *devinfo = pipeline->base.device->info;
const struct anv_shader_bin *gs_bin =
pipeline->shaders[MESA_SHADER_GEOMETRY];
@ -1905,7 +1905,7 @@ emit_3dstate_wm(struct anv_graphics_pipeline *pipeline,
wm.LineStippleEnable = rs->line.stipple.enable;
}
const struct intel_device_info *devinfo = &pipeline->base.device->info;
const struct intel_device_info *devinfo = pipeline->base.device->info;
uint32_t *dws = devinfo->ver >= 8 ? pipeline->gfx8.wm : pipeline->gfx7.wm;
GENX(3DSTATE_WM_pack)(NULL, dws, &wm);
}
@ -1916,7 +1916,7 @@ emit_3dstate_ps(struct anv_graphics_pipeline *pipeline,
const struct vk_color_blend_state *cb)
{
UNUSED const struct intel_device_info *devinfo =
&pipeline->base.device->info;
pipeline->base.device->info;
const struct anv_shader_bin *fs_bin =
pipeline->shaders[MESA_SHADER_FRAGMENT];
@ -2182,7 +2182,7 @@ emit_task_state(struct anv_graphics_pipeline *pipeline)
get_scratch_surf(&pipeline->base, MESA_SHADER_TASK, task_bin);
}
const struct intel_device_info *devinfo = &pipeline->base.device->info;
const struct intel_device_info *devinfo = pipeline->base.device->info;
const struct brw_task_prog_data *task_prog_data = get_task_prog_data(pipeline);
const struct brw_cs_dispatch_info task_dispatch =
brw_cs_get_dispatch_info(devinfo, &task_prog_data->base, NULL);
@ -2235,7 +2235,7 @@ emit_mesh_state(struct anv_graphics_pipeline *pipeline)
/* TODO(mesh): MaximumNumberofThreadGroups. */
}
const struct intel_device_info *devinfo = &pipeline->base.device->info;
const struct intel_device_info *devinfo = pipeline->base.device->info;
const struct brw_mesh_prog_data *mesh_prog_data = get_mesh_prog_data(pipeline);
const struct brw_cs_dispatch_info mesh_dispatch =
brw_cs_get_dispatch_info(devinfo, &mesh_prog_data->base, NULL);
@ -2328,7 +2328,7 @@ genX(graphics_pipeline_emit)(struct anv_graphics_pipeline *pipeline,
* whole fixed function pipeline" means to emit a PIPE_CONTROL with the "CS
* Stall" bit set.
*/
if (device->info.platform == INTEL_PLATFORM_IVB)
if (device->info->platform == INTEL_PLATFORM_IVB)
gfx7_emit_vs_workaround_flush(brw);
#endif
@ -2384,7 +2384,7 @@ genX(compute_pipeline_emit)(struct anv_compute_pipeline *pipeline)
anv_pipeline_setup_l3_config(&pipeline->base, cs_prog_data->base.total_shared > 0);
const UNUSED struct anv_shader_bin *cs_bin = pipeline->cs;
const struct intel_device_info *devinfo = &device->info;
const struct intel_device_info *devinfo = device->info;
anv_batch_emit(&pipeline->base.batch, GENX(CFE_STATE), cfe) {
cfe.MaximumNumberofThreads =
@ -2400,7 +2400,7 @@ void
genX(compute_pipeline_emit)(struct anv_compute_pipeline *pipeline)
{
struct anv_device *device = pipeline->base.device;
const struct intel_device_info *devinfo = &device->info;
const struct intel_device_info *devinfo = device->info;
const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
anv_pipeline_setup_l3_config(&pipeline->base, cs_prog_data->base.total_shared > 0);

View file

@ -219,7 +219,7 @@ VkResult genX(CreateQueryPool)(
};
batch.next = batch.start;
mi_builder_init(&b, &device->info, &batch);
mi_builder_init(&b, device->info, &batch);
mi_store(&b, mi_reg64(ANV_PERF_QUERY_OFFSET_REG),
mi_imm(p * (uint64_t)pool->pass_size));
anv_batch_emit(&batch, GENX(MI_BATCH_BUFFER_END), bbe);
@ -522,7 +522,7 @@ VkResult genX(GetQueryPoolResults)(
uint64_t result = slot[idx * 2 + 2] - slot[idx * 2 + 1];
/* WaDividePSInvocationCountBy4:HSW,BDW */
if ((device->info.ver == 8 || device->info.verx10 == 75) &&
if ((device->info->ver == 8 || device->info->verx10 == 75) &&
(1 << stat) == VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT)
result >>= 2;
@ -584,10 +584,10 @@ VkResult genX(GetQueryPoolResults)(
query_data + intel_perf_query_data_offset(pool, true),
false /* no_oa_accumulate */);
intel_perf_query_result_write_mdapi(pData, stride,
&device->info,
device->info,
query, &result);
const uint64_t *marker = query_data + intel_perf_marker_offset();
intel_perf_query_mdapi_write_marker(pData, stride, &device->info, *marker);
intel_perf_query_mdapi_write_marker(pData, stride, device->info, *marker);
break;
}
@ -622,7 +622,7 @@ emit_ps_depth_count(struct anv_cmd_buffer *cmd_buffer,
pc.DepthStallEnable = true;
pc.Address = addr;
if (GFX_VER == 9 && cmd_buffer->device->info.gt == 4)
if (GFX_VER == 9 && cmd_buffer->device->info->gt == 4)
pc.CommandStreamerStallEnable = true;
}
}
@ -760,7 +760,7 @@ void genX(CmdResetQueryPool)(
case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT: {
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
for (uint32_t i = 0; i < queryCount; i++)
emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false);
@ -770,7 +770,7 @@ void genX(CmdResetQueryPool)(
#if GFX_VER >= 8
case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
for (uint32_t i = 0; i < queryCount; i++) {
for (uint32_t p = 0; p < pool->n_passes; p++) {
@ -786,7 +786,7 @@ void genX(CmdResetQueryPool)(
case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
for (uint32_t i = 0; i < queryCount; i++)
emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false);
@ -925,7 +925,7 @@ void genX(CmdBeginQueryIndexedEXT)(
struct anv_address query_addr = anv_query_address(pool, query);
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
switch (pool->type) {
case VK_QUERY_TYPE_OCCLUSION:
@ -1112,7 +1112,7 @@ void genX(CmdEndQueryIndexedEXT)(
struct anv_address query_addr = anv_query_address(pool, query);
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
switch (pool->type) {
case VK_QUERY_TYPE_OCCLUSION:
@ -1291,7 +1291,7 @@ void genX(CmdWriteTimestamp2)(
assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
if (stage == VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT) {
mi_store(&b, mi_mem64(anv_address_add(query_addr, 8)),
@ -1307,7 +1307,7 @@ void genX(CmdWriteTimestamp2)(
pc.PostSyncOperation = WriteTimestamp;
pc.Address = anv_address_add(query_addr, 8);
if (GFX_VER == 9 && cmd_buffer->device->info.gt == 4)
if (GFX_VER == 9 && cmd_buffer->device->info->gt == 4)
pc.CommandStreamerStallEnable = true;
}
emit_query_pc_availability(cmd_buffer, query_addr, true);
@ -1405,7 +1405,7 @@ void genX(CmdCopyQueryPoolResults)(
ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
struct mi_builder b;
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value result;
/* If render target writes are ongoing, request a render target cache flush
@ -1471,8 +1471,8 @@ void genX(CmdCopyQueryPoolResults)(
idx * 16 + 8));
/* WaDividePSInvocationCountBy4:HSW,BDW */
if ((cmd_buffer->device->info.ver == 8 ||
cmd_buffer->device->info.verx10 == 75) &&
if ((cmd_buffer->device->info->ver == 8 ||
cmd_buffer->device->info->verx10 == 75) &&
(1 << stat) == VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT) {
result = mi_ushr32_imm(&b, result, 2);
}

View file

@ -44,10 +44,10 @@ genX(emit_slice_hashing_state)(struct anv_device *device,
{
#if GFX_VER == 11
/* Gfx11 hardware has two pixel pipes at most. */
for (unsigned i = 2; i < ARRAY_SIZE(device->info.ppipe_subslices); i++)
assert(device->info.ppipe_subslices[i] == 0);
for (unsigned i = 2; i < ARRAY_SIZE(device->info->ppipe_subslices); i++)
assert(device->info->ppipe_subslices[i] == 0);
if (device->info.ppipe_subslices[0] == device->info.ppipe_subslices[1])
if (device->info->ppipe_subslices[0] == device->info->ppipe_subslices[1])
return;
if (!device->slice_hash.alloc_size) {
@ -55,8 +55,8 @@ genX(emit_slice_hashing_state)(struct anv_device *device,
device->slice_hash =
anv_state_pool_alloc(&device->dynamic_state_pool, size, 64);
const bool flip = device->info.ppipe_subslices[0] <
device->info.ppipe_subslices[1];
const bool flip = device->info->ppipe_subslices[0] <
device->info->ppipe_subslices[1];
struct GENX(SLICE_HASH_TABLE) table;
intel_compute_pixel_hash_table_3way(16, 16, 3, 3, flip, table.Entry[0]);
@ -79,12 +79,12 @@ genX(emit_slice_hashing_state)(struct anv_device *device,
for (unsigned n = 0; n < ARRAY_SIZE(ppipes_of); n++) {
for (unsigned p = 0; p < 3; p++)
ppipes_of[n] += (device->info.ppipe_subslices[p] == n);
ppipes_of[n] += (device->info->ppipe_subslices[p] == n);
}
/* Gfx12 has three pixel pipes. */
for (unsigned p = 3; p < ARRAY_SIZE(device->info.ppipe_subslices); p++)
assert(device->info.ppipe_subslices[p] == 0);
for (unsigned p = 3; p < ARRAY_SIZE(device->info->ppipe_subslices); p++)
assert(device->info->ppipe_subslices[p] == 0);
if (ppipes_of[2] == 3 || ppipes_of[0] == 2) {
/* All three pixel pipes have the maximum number of active dual
@ -117,8 +117,8 @@ genX(emit_slice_hashing_state)(struct anv_device *device,
}
#elif GFX_VERx10 == 125
uint32_t ppipe_mask = 0;
for (unsigned p = 0; p < ARRAY_SIZE(device->info.ppipe_subslices); p++) {
if (device->info.ppipe_subslices[p])
for (unsigned p = 0; p < ARRAY_SIZE(device->info->ppipe_subslices); p++) {
if (device->info->ppipe_subslices[p])
ppipe_mask |= (1u << p);
}
assert(ppipe_mask);
@ -168,7 +168,7 @@ init_common_queue_state(struct anv_queue *queue, struct anv_batch *batch)
/* Starting with GFX version 11, SLM is no longer part of the L3$ config
* so it never changes throughout the lifetime of the VkDevice.
*/
const struct intel_l3_config *cfg = intel_get_default_l3_config(&device->info);
const struct intel_l3_config *cfg = intel_get_default_l3_config(device->info);
genX(emit_l3_config)(batch, device, cfg);
device->l3_config = cfg;
#endif
@ -319,7 +319,7 @@ init_render_queue_state(struct anv_queue *queue)
/* hardware specification recommends disabling repacking for
* the compatibility with decompression mechanism in display controller.
*/
if (device->info.disable_ccs_repack) {
if (device->info->disable_ccs_repack) {
anv_batch_write_reg(&batch, GENX(CACHE_MODE_0), cm0) {
cm0.DisableRepackingforCompression = true;
cm0.DisableRepackingforCompressionMask = true;
@ -368,7 +368,7 @@ init_render_queue_state(struct anv_queue *queue)
#endif
#if GFX_VER == 12
if (device->info.has_aux_map) {
if (device->info->has_aux_map) {
uint64_t aux_base_addr = intel_aux_map_get_base(device->aux_map_ctx);
assert(aux_base_addr % (32 * 1024) == 0);
anv_batch_emit(&batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
@ -440,9 +440,9 @@ init_compute_queue_state(struct anv_queue *queue)
}
void
genX(init_physical_device_state)(ASSERTED struct anv_physical_device *device)
genX(init_physical_device_state)(ASSERTED struct anv_physical_device *pdevice)
{
assert(device->info.verx10 == GFX_VERx10);
assert(pdevice->info.verx10 == GFX_VERx10);
}
VkResult
@ -581,7 +581,7 @@ genX(emit_l3_config)(struct anv_batch *batch,
const struct anv_device *device,
const struct intel_l3_config *cfg)
{
UNUSED const struct intel_device_info *devinfo = &device->info;
UNUSED const struct intel_device_info *devinfo = device->info;
#if GFX_VER >= 8

View file

@ -332,7 +332,7 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
GENX(3DSTATE_SF_header),
};
#if GFX_VER == 8
if (cmd_buffer->device->info.platform == INTEL_PLATFORM_CHV) {
if (cmd_buffer->device->info->platform == INTEL_PLATFORM_CHV) {
sf.CHVLineWidth = dyn->rs.line.width;
} else {
sf.LineWidth = dyn->rs.line.width;
@ -592,7 +592,7 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
RR_FREE;
vfg.DistributionGranularity = BatchLevelGranularity;
/* Wa_14014890652 */
if (intel_device_info_is_dg2(&cmd_buffer->device->info))
if (intel_device_info_is_dg2(cmd_buffer->device->info))
vfg.GranularityThresholdDisable = 1;
vfg.ListCutIndexEnable = dyn->ia.primitive_restart_enable;
/* 192 vertices for TRILIST_ADJ */