2015-05-08 22:32:37 -07:00
|
|
|
/*
|
|
|
|
|
* Copyright © 2015 Intel Corporation
|
|
|
|
|
*
|
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
|
*
|
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
|
* Software.
|
|
|
|
|
*
|
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <assert.h>
|
|
|
|
|
#include <stdbool.h>
|
|
|
|
|
#include <string.h>
|
2016-11-07 17:24:24 -08:00
|
|
|
#include <sys/mman.h>
|
2015-05-08 22:32:37 -07:00
|
|
|
#include <unistd.h>
|
|
|
|
|
#include <fcntl.h>
|
2019-02-12 18:18:03 +00:00
|
|
|
#include "drm-uapi/drm_fourcc.h"
|
2019-08-06 15:56:40 +03:00
|
|
|
#include "drm-uapi/drm.h"
|
|
|
|
|
#include <xf86drm.h>
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2015-07-17 15:04:27 -07:00
|
|
|
#include "anv_private.h"
|
2021-01-05 19:34:51 -08:00
|
|
|
#include "anv_measure.h"
|
2016-01-20 11:16:44 -08:00
|
|
|
#include "util/debug.h"
|
2017-02-14 08:21:43 -08:00
|
|
|
#include "util/build_id.h"
|
2018-06-29 17:08:30 -07:00
|
|
|
#include "util/disk_cache.h"
|
2017-02-24 16:36:00 -08:00
|
|
|
#include "util/mesa-sha1.h"
|
2019-01-08 12:45:38 +00:00
|
|
|
#include "util/os_file.h"
|
2019-11-30 17:36:01 +11:00
|
|
|
#include "util/os_misc.h"
|
2019-01-08 12:45:38 +00:00
|
|
|
#include "util/u_atomic.h"
|
2018-10-23 15:27:51 +01:00
|
|
|
#include "util/u_string.h"
|
2020-06-12 11:42:32 +02:00
|
|
|
#include "util/driconf.h"
|
2018-06-19 20:27:36 -07:00
|
|
|
#include "git_sha1.h"
|
2017-06-06 12:31:05 +01:00
|
|
|
#include "vk_util.h"
|
2020-06-22 11:57:32 -05:00
|
|
|
#include "vk_deferred_operation.h"
|
2021-03-03 13:20:06 -08:00
|
|
|
#include "common/intel_aux_map.h"
|
|
|
|
|
#include "common/intel_defines.h"
|
|
|
|
|
#include "common/intel_uuid.h"
|
2019-03-15 09:47:49 +02:00
|
|
|
#include "compiler/glsl_types.h"
|
2021-01-29 22:40:39 -06:00
|
|
|
#include "perf/gen_perf.h"
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2016-02-18 10:19:02 -08:00
|
|
|
#include "genxml/gen7_pack.h"
|
2015-11-16 12:29:07 -08:00
|
|
|
|
2020-09-25 12:56:22 -07:00
|
|
|
static const driOptionDescription anv_dri_options[] = {
|
2019-08-29 23:55:29 +01:00
|
|
|
DRI_CONF_SECTION_PERFORMANCE
|
|
|
|
|
DRI_CONF_VK_X11_OVERRIDE_MIN_IMAGE_COUNT(0)
|
2020-09-29 09:28:18 -07:00
|
|
|
DRI_CONF_VK_X11_STRICT_IMAGE_COUNT(false)
|
2019-08-29 23:55:29 +01:00
|
|
|
DRI_CONF_SECTION_END
|
2019-11-11 12:46:33 -06:00
|
|
|
|
|
|
|
|
DRI_CONF_SECTION_DEBUG
|
2020-09-29 09:28:18 -07:00
|
|
|
DRI_CONF_ALWAYS_FLUSH_CACHE(false)
|
|
|
|
|
DRI_CONF_VK_WSI_FORCE_BGRA8_UNORM_FIRST(false)
|
2019-11-11 12:46:33 -06:00
|
|
|
DRI_CONF_SECTION_END
|
2020-09-25 12:56:22 -07:00
|
|
|
};
|
2019-04-24 16:42:25 +01:00
|
|
|
|
2019-02-18 15:40:49 +11:00
|
|
|
/* This is probably far to big but it reflects the max size used for messages
|
|
|
|
|
* in OpenGLs KHR_debug.
|
|
|
|
|
*/
|
|
|
|
|
#define MAX_DEBUG_MESSAGE_LENGTH 4096
|
|
|
|
|
|
2019-05-15 11:30:36 +01:00
|
|
|
/* Render engine timestamp register */
|
|
|
|
|
#define TIMESTAMP 0x2358
|
|
|
|
|
|
2018-10-30 20:38:03 +03:00
|
|
|
/* The "RAW" clocks on Linux are called "FAST" on FreeBSD */
|
|
|
|
|
#if !defined(CLOCK_MONOTONIC_RAW) && defined(CLOCK_MONOTONIC_FAST)
|
|
|
|
|
#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC_FAST
|
|
|
|
|
#endif
|
|
|
|
|
|
2015-10-19 22:06:59 -07:00
|
|
|
static void
|
|
|
|
|
compiler_debug_log(void *data, const char *fmt, ...)
|
2019-02-18 15:40:49 +11:00
|
|
|
{
|
|
|
|
|
char str[MAX_DEBUG_MESSAGE_LENGTH];
|
|
|
|
|
struct anv_device *device = (struct anv_device *)data;
|
2020-01-17 23:05:13 -06:00
|
|
|
struct anv_instance *instance = device->physical->instance;
|
2019-02-18 15:40:49 +11:00
|
|
|
|
2021-01-27 11:37:00 -06:00
|
|
|
if (list_is_empty(&instance->vk.debug_report.callbacks))
|
2019-02-18 15:40:49 +11:00
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
va_list args;
|
|
|
|
|
va_start(args, fmt);
|
|
|
|
|
(void) vsnprintf(str, MAX_DEBUG_MESSAGE_LENGTH, fmt, args);
|
|
|
|
|
va_end(args);
|
|
|
|
|
|
2021-01-28 17:43:58 -06:00
|
|
|
vk_debug_report(&instance->vk,
|
2019-02-18 15:40:49 +11:00
|
|
|
VK_DEBUG_REPORT_DEBUG_BIT_EXT,
|
2021-01-28 17:20:24 -06:00
|
|
|
NULL, 0, 0, "anv", str);
|
2019-02-18 15:40:49 +11:00
|
|
|
}
|
2015-10-19 22:06:59 -07:00
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
compiler_perf_log(void *data, const char *fmt, ...)
|
|
|
|
|
{
|
|
|
|
|
va_list args;
|
|
|
|
|
va_start(args, fmt);
|
|
|
|
|
|
2020-09-14 18:53:13 +02:00
|
|
|
if (INTEL_DEBUG & DEBUG_PERF)
|
mesa: Promote Intel's simple logging façade for Android to util/
I'm bringing up freedreno Vulkan on an Android phone, and my pains are
exactly what Chad said when working on Intel's vulkan for Android in
aa716db0f64d ("intel: Add simple logging façade for Android (v2)"):
On Android, stdio goes to /dev/null. On Android, remote gdb is even
more painful than the usual remote gdb. On Android, nothing works like
you expect and debugging is hell. I need logging.
This patch introduces a small, simple logging API that can easily wrap
Android's API. On non-Android platforms, this logger does nothing
fancy. It follows the time-honored Unix tradition of spewing
everything to stderr with minimal fuss.
My goal here is not perfection. My goal is to make a minimal, clean API,
that people hate merely a little instead of a lot, and that's good
enough to let me bring up Android Vulkan. And it needs to be fast,
which means it must be small. No one wants to their game to miss frames
while aiming a flaming bow into the jaws of an angry robot t-rex, and
thus become t-rex breakfast, because some fool had too much fun desiging
a bloated, ideal logging API.
Compared to trusty fprintf, _mesa_log[ewi]() is actually usable on
Android. Compared to os_log_message(), this has different error levels
and supports format arguments.
The only code change in the move is wrapping flockfile/funlockfile in
!DETECT_OS_WINDOWS, since mingw32 doesn't have it. Windows likely wants
different logging code, anyway.
Reviewed-by: Tapani Pälli <tapani.palli@intel.com>
Reviewed-by: Kristian H. Kristensen <hoegsberg@google.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6806>
2020-09-21 12:53:14 -07:00
|
|
|
mesa_logd_v(fmt, args);
|
2015-10-19 22:06:59 -07:00
|
|
|
|
|
|
|
|
va_end(args);
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-08 12:50:49 +00:00
|
|
|
static uint64_t
|
|
|
|
|
anv_compute_heap_size(int fd, uint64_t gtt_size)
|
2017-03-17 16:16:06 -07:00
|
|
|
{
|
|
|
|
|
/* Query the total ram from the system */
|
2019-11-30 17:36:01 +11:00
|
|
|
uint64_t total_ram;
|
|
|
|
|
if (!os_get_total_physical_memory(&total_ram))
|
|
|
|
|
return 0;
|
2017-03-17 16:16:06 -07:00
|
|
|
|
|
|
|
|
/* We don't want to burn too much ram with the GPU. If the user has 4GiB
|
|
|
|
|
* or less, we use at most half. If they have more than 4GiB, we use 3/4.
|
|
|
|
|
*/
|
|
|
|
|
uint64_t available_ram;
|
|
|
|
|
if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
|
|
|
|
|
available_ram = total_ram / 2;
|
|
|
|
|
else
|
|
|
|
|
available_ram = total_ram * 3 / 4;
|
|
|
|
|
|
|
|
|
|
/* We also want to leave some padding for things we allocate in the driver,
|
|
|
|
|
* so don't go over 3/4 of the GTT either.
|
|
|
|
|
*/
|
|
|
|
|
uint64_t available_gtt = gtt_size * 3 / 4;
|
|
|
|
|
|
2019-01-08 12:50:49 +00:00
|
|
|
return MIN2(available_ram, available_gtt);
|
2017-03-17 16:16:06 -07:00
|
|
|
}
|
|
|
|
|
|
2021-01-29 22:40:39 -06:00
|
|
|
#if defined(VK_USE_PLATFORM_WAYLAND_KHR) || \
|
|
|
|
|
defined(VK_USE_PLATFORM_XCB_KHR) || \
|
|
|
|
|
defined(VK_USE_PLATFORM_XLIB_KHR) || \
|
|
|
|
|
defined(VK_USE_PLATFORM_DISPLAY_KHR)
|
|
|
|
|
#define ANV_USE_WSI_PLATFORM
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#ifdef ANDROID
|
|
|
|
|
#define ANV_API_VERSION VK_MAKE_VERSION(1, 1, VK_HEADER_VERSION)
|
|
|
|
|
#else
|
|
|
|
|
#define ANV_API_VERSION VK_MAKE_VERSION(1, 2, VK_HEADER_VERSION)
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
VkResult anv_EnumerateInstanceVersion(
|
|
|
|
|
uint32_t* pApiVersion)
|
|
|
|
|
{
|
|
|
|
|
*pApiVersion = ANV_API_VERSION;
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct vk_instance_extension_table instance_extensions = {
|
|
|
|
|
.KHR_device_group_creation = true,
|
|
|
|
|
.KHR_external_fence_capabilities = true,
|
|
|
|
|
.KHR_external_memory_capabilities = true,
|
|
|
|
|
.KHR_external_semaphore_capabilities = true,
|
|
|
|
|
.KHR_get_physical_device_properties2 = true,
|
|
|
|
|
.EXT_debug_report = true,
|
|
|
|
|
|
|
|
|
|
#ifdef ANV_USE_WSI_PLATFORM
|
|
|
|
|
.KHR_get_surface_capabilities2 = true,
|
|
|
|
|
.KHR_surface = true,
|
|
|
|
|
.KHR_surface_protected_capabilities = true,
|
|
|
|
|
#endif
|
|
|
|
|
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
|
|
|
|
|
.KHR_wayland_surface = true,
|
|
|
|
|
#endif
|
|
|
|
|
#ifdef VK_USE_PLATFORM_XCB_KHR
|
|
|
|
|
.KHR_xcb_surface = true,
|
|
|
|
|
#endif
|
|
|
|
|
#ifdef VK_USE_PLATFORM_XLIB_KHR
|
|
|
|
|
.KHR_xlib_surface = true,
|
|
|
|
|
#endif
|
|
|
|
|
#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
|
|
|
|
|
.EXT_acquire_xlib_display = true,
|
|
|
|
|
#endif
|
|
|
|
|
#ifdef VK_USE_PLATFORM_DISPLAY_KHR
|
|
|
|
|
.KHR_display = true,
|
|
|
|
|
.KHR_get_display_properties2 = true,
|
|
|
|
|
.EXT_direct_mode_display = true,
|
|
|
|
|
.EXT_display_surface_counter = true,
|
|
|
|
|
#endif
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
get_device_extensions(const struct anv_physical_device *device,
|
|
|
|
|
struct vk_device_extension_table *ext)
|
|
|
|
|
{
|
|
|
|
|
*ext = (struct vk_device_extension_table) {
|
|
|
|
|
.KHR_8bit_storage = device->info.gen >= 8,
|
|
|
|
|
.KHR_16bit_storage = device->info.gen >= 8,
|
|
|
|
|
.KHR_bind_memory2 = true,
|
|
|
|
|
.KHR_buffer_device_address = device->has_a64_buffer_access,
|
|
|
|
|
.KHR_copy_commands2 = true,
|
|
|
|
|
.KHR_create_renderpass2 = true,
|
|
|
|
|
.KHR_dedicated_allocation = true,
|
|
|
|
|
.KHR_deferred_host_operations = true,
|
|
|
|
|
.KHR_depth_stencil_resolve = true,
|
|
|
|
|
.KHR_descriptor_update_template = true,
|
|
|
|
|
.KHR_device_group = true,
|
|
|
|
|
.KHR_draw_indirect_count = true,
|
|
|
|
|
.KHR_driver_properties = true,
|
|
|
|
|
.KHR_external_fence = device->has_syncobj_wait,
|
|
|
|
|
.KHR_external_fence_fd = device->has_syncobj_wait,
|
|
|
|
|
.KHR_external_memory = true,
|
|
|
|
|
.KHR_external_memory_fd = true,
|
|
|
|
|
.KHR_external_semaphore = true,
|
|
|
|
|
.KHR_external_semaphore_fd = true,
|
|
|
|
|
.KHR_get_memory_requirements2 = true,
|
|
|
|
|
.KHR_image_format_list = true,
|
|
|
|
|
.KHR_imageless_framebuffer = true,
|
|
|
|
|
#ifdef ANV_USE_WSI_PLATFORM
|
|
|
|
|
.KHR_incremental_present = true,
|
|
|
|
|
#endif
|
|
|
|
|
.KHR_maintenance1 = true,
|
|
|
|
|
.KHR_maintenance2 = true,
|
|
|
|
|
.KHR_maintenance3 = true,
|
|
|
|
|
.KHR_multiview = true,
|
|
|
|
|
.KHR_performance_query =
|
|
|
|
|
device->use_softpin && device->perf &&
|
|
|
|
|
(device->perf->i915_perf_version >= 3 ||
|
|
|
|
|
INTEL_DEBUG & DEBUG_NO_OACONFIG) &&
|
|
|
|
|
device->use_call_secondary,
|
|
|
|
|
.KHR_pipeline_executable_properties = true,
|
|
|
|
|
.KHR_push_descriptor = true,
|
|
|
|
|
.KHR_relaxed_block_layout = true,
|
|
|
|
|
.KHR_sampler_mirror_clamp_to_edge = true,
|
|
|
|
|
.KHR_sampler_ycbcr_conversion = true,
|
|
|
|
|
.KHR_separate_depth_stencil_layouts = true,
|
|
|
|
|
.KHR_shader_atomic_int64 = device->info.gen >= 9 &&
|
|
|
|
|
device->use_softpin,
|
|
|
|
|
.KHR_shader_clock = true,
|
|
|
|
|
.KHR_shader_draw_parameters = true,
|
|
|
|
|
.KHR_shader_float16_int8 = device->info.gen >= 8,
|
|
|
|
|
.KHR_shader_float_controls = device->info.gen >= 8,
|
|
|
|
|
.KHR_shader_non_semantic_info = true,
|
|
|
|
|
.KHR_shader_subgroup_extended_types = device->info.gen >= 8,
|
|
|
|
|
.KHR_shader_terminate_invocation = true,
|
|
|
|
|
.KHR_spirv_1_4 = true,
|
|
|
|
|
.KHR_storage_buffer_storage_class = true,
|
|
|
|
|
#ifdef ANV_USE_WSI_PLATFORM
|
|
|
|
|
.KHR_swapchain = true,
|
|
|
|
|
.KHR_swapchain_mutable_format = true,
|
|
|
|
|
#endif
|
|
|
|
|
.KHR_timeline_semaphore = true,
|
|
|
|
|
.KHR_uniform_buffer_standard_layout = true,
|
|
|
|
|
.KHR_variable_pointers = true,
|
|
|
|
|
.KHR_vulkan_memory_model = true,
|
|
|
|
|
.KHR_workgroup_memory_explicit_layout = true,
|
|
|
|
|
.KHR_zero_initialize_workgroup_memory = true,
|
|
|
|
|
.EXT_4444_formats = true,
|
|
|
|
|
.EXT_buffer_device_address = device->has_a64_buffer_access,
|
|
|
|
|
.EXT_calibrated_timestamps = device->has_reg_timestamp,
|
|
|
|
|
.EXT_conditional_rendering = device->info.gen >= 8 ||
|
|
|
|
|
device->info.is_haswell,
|
|
|
|
|
.EXT_custom_border_color = device->info.gen >= 8,
|
|
|
|
|
.EXT_depth_clip_enable = true,
|
|
|
|
|
.EXT_descriptor_indexing = device->has_a64_buffer_access &&
|
|
|
|
|
device->has_bindless_images,
|
|
|
|
|
#ifdef VK_USE_PLATFORM_DISPLAY_KHR
|
|
|
|
|
.EXT_display_control = true,
|
|
|
|
|
#endif
|
|
|
|
|
.EXT_extended_dynamic_state = true,
|
|
|
|
|
.EXT_external_memory_dma_buf = true,
|
|
|
|
|
.EXT_external_memory_host = true,
|
|
|
|
|
.EXT_fragment_shader_interlock = device->info.gen >= 9,
|
|
|
|
|
.EXT_global_priority = device->has_context_priority,
|
|
|
|
|
.EXT_host_query_reset = true,
|
|
|
|
|
.EXT_image_robustness = true,
|
|
|
|
|
.EXT_index_type_uint8 = true,
|
|
|
|
|
.EXT_inline_uniform_block = true,
|
|
|
|
|
.EXT_line_rasterization = true,
|
|
|
|
|
.EXT_memory_budget = device->has_mem_available,
|
|
|
|
|
.EXT_pci_bus_info = true,
|
|
|
|
|
.EXT_pipeline_creation_cache_control = true,
|
|
|
|
|
.EXT_pipeline_creation_feedback = true,
|
|
|
|
|
.EXT_post_depth_coverage = device->info.gen >= 9,
|
|
|
|
|
.EXT_private_data = true,
|
|
|
|
|
#ifdef ANDROID
|
|
|
|
|
.EXT_queue_family_foreign = ANDROID,
|
|
|
|
|
#endif
|
|
|
|
|
.EXT_robustness2 = true,
|
|
|
|
|
.EXT_sample_locations = true,
|
|
|
|
|
.EXT_sampler_filter_minmax = device->info.gen >= 9,
|
|
|
|
|
.EXT_scalar_block_layout = true,
|
|
|
|
|
.EXT_separate_stencil_usage = true,
|
|
|
|
|
.EXT_shader_atomic_float = true,
|
|
|
|
|
.EXT_shader_demote_to_helper_invocation = true,
|
|
|
|
|
.EXT_shader_stencil_export = device->info.gen >= 9,
|
|
|
|
|
.EXT_shader_subgroup_ballot = true,
|
|
|
|
|
.EXT_shader_subgroup_vote = true,
|
|
|
|
|
.EXT_shader_viewport_index_layer = true,
|
|
|
|
|
.EXT_subgroup_size_control = true,
|
|
|
|
|
.EXT_texel_buffer_alignment = true,
|
|
|
|
|
.EXT_transform_feedback = true,
|
|
|
|
|
.EXT_vertex_attribute_divisor = true,
|
|
|
|
|
.EXT_ycbcr_image_arrays = true,
|
|
|
|
|
#ifdef ANDROID
|
|
|
|
|
.ANDROID_external_memory_android_hardware_buffer = true,
|
|
|
|
|
.ANDROID_native_buffer = true,
|
|
|
|
|
#endif
|
|
|
|
|
.GOOGLE_decorate_string = true,
|
|
|
|
|
.GOOGLE_hlsl_functionality1 = true,
|
|
|
|
|
.GOOGLE_user_type = true,
|
|
|
|
|
.INTEL_performance_query = device->perf &&
|
|
|
|
|
device->perf->i915_perf_version >= 3,
|
|
|
|
|
.INTEL_shader_integer_functions2 = device->info.gen >= 8,
|
|
|
|
|
.NV_compute_shader_derivatives = true,
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-08 12:23:38 -08:00
|
|
|
static void
|
|
|
|
|
anv_init_meminfo(struct anv_physical_device *device, int fd)
|
|
|
|
|
{
|
|
|
|
|
uint64_t heap_size = anv_compute_heap_size(fd, device->gtt_size);
|
|
|
|
|
|
|
|
|
|
if (heap_size > (2ull << 30) && !device->supports_48bit_addresses) {
|
|
|
|
|
/* When running with an overridden PCI ID, we may get a GTT size from
|
|
|
|
|
* the kernel that is greater than 2 GiB but the execbuf check for 48bit
|
|
|
|
|
* address support can still fail. Just clamp the address space size to
|
|
|
|
|
* 2 GiB if we don't have 48-bit support.
|
|
|
|
|
*/
|
|
|
|
|
mesa_logw("%s:%d: The kernel reported a GTT size larger than 2 GiB but "
|
|
|
|
|
"not support for 48-bit addresses",
|
|
|
|
|
__FILE__, __LINE__);
|
|
|
|
|
heap_size = 2ull << 30;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
device->sys.size = heap_size;
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-17 10:55:41 -07:00
|
|
|
static VkResult
|
|
|
|
|
anv_physical_device_init_heaps(struct anv_physical_device *device, int fd)
|
|
|
|
|
{
|
2018-04-19 07:54:28 -07:00
|
|
|
if (anv_gem_get_context_param(fd, 0, I915_CONTEXT_PARAM_GTT_SIZE,
|
2019-12-02 14:51:30 -06:00
|
|
|
&device->gtt_size) == -1) {
|
2018-04-19 07:54:28 -07:00
|
|
|
/* If, for whatever reason, we can't actually get the GTT size from the
|
|
|
|
|
* kernel (too old?) fall back to the aperture size.
|
|
|
|
|
*/
|
|
|
|
|
anv_perf_warn(NULL, NULL,
|
|
|
|
|
"Failed to get I915_CONTEXT_PARAM_GTT_SIZE: %m");
|
|
|
|
|
|
2020-05-14 11:44:29 -07:00
|
|
|
if (gen_get_aperture_size(fd, &device->gtt_size) == -1) {
|
2020-01-17 22:43:06 -06:00
|
|
|
return vk_errorfi(device->instance, NULL,
|
|
|
|
|
VK_ERROR_INITIALIZATION_FAILED,
|
|
|
|
|
"failed to get aperture size: %m");
|
2018-04-19 07:54:28 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-25 21:55:51 -06:00
|
|
|
/* We only allow 48-bit addresses with softpin because knowing the actual
|
|
|
|
|
* address is required for the vertex cache flush workaround.
|
|
|
|
|
*/
|
2018-04-19 07:54:28 -07:00
|
|
|
device->supports_48bit_addresses = (device->info.gen >= 8) &&
|
2019-11-25 21:55:51 -06:00
|
|
|
device->has_softpin &&
|
2019-12-02 14:51:30 -06:00
|
|
|
device->gtt_size > (4ULL << 30 /* GiB */);
|
2017-05-17 10:55:41 -07:00
|
|
|
|
2021-02-08 12:23:38 -08:00
|
|
|
anv_init_meminfo(device, fd);
|
|
|
|
|
assert(device->sys.size != 0);
|
2017-05-17 10:55:41 -07:00
|
|
|
|
2021-02-08 12:23:38 -08:00
|
|
|
if (device->vram.size > 0) {
|
|
|
|
|
/* We can create 2 different heaps when we have local memory support,
|
|
|
|
|
* first heap with local memory size and second with system memory size.
|
|
|
|
|
*/
|
|
|
|
|
device->memory.heap_count = 2;
|
|
|
|
|
device->memory.heaps[0] = (struct anv_memory_heap) {
|
|
|
|
|
.size = device->vram.size,
|
|
|
|
|
.flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
|
|
|
|
|
.is_local_mem = true,
|
|
|
|
|
};
|
|
|
|
|
device->memory.heaps[1] = (struct anv_memory_heap) {
|
|
|
|
|
.size = device->sys.size,
|
|
|
|
|
.flags = 0,
|
|
|
|
|
.is_local_mem = false,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
device->memory.type_count = 3;
|
|
|
|
|
device->memory.types[0] = (struct anv_memory_type) {
|
|
|
|
|
.propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
|
|
|
|
|
.heapIndex = 0,
|
|
|
|
|
};
|
|
|
|
|
device->memory.types[1] = (struct anv_memory_type) {
|
|
|
|
|
.propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
|
|
|
|
|
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
|
|
|
|
|
VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
|
|
|
|
|
.heapIndex = 1,
|
|
|
|
|
};
|
|
|
|
|
device->memory.types[2] = (struct anv_memory_type) {
|
|
|
|
|
.propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
|
|
|
|
|
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
|
|
|
|
|
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
|
|
|
|
|
.heapIndex = 0,
|
|
|
|
|
};
|
|
|
|
|
} else if (device->info.has_llc) {
|
2021-02-08 12:23:38 -08:00
|
|
|
device->memory.heap_count = 1;
|
|
|
|
|
device->memory.heaps[0] = (struct anv_memory_heap) {
|
|
|
|
|
.size = device->sys.size,
|
|
|
|
|
.flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
|
|
|
|
|
.is_local_mem = false,
|
|
|
|
|
};
|
2017-05-17 10:55:41 -07:00
|
|
|
|
2021-02-08 12:23:38 -08:00
|
|
|
/* Big core GPUs share LLC with the CPU and thus one memory type can be
|
|
|
|
|
* both cached and coherent at the same time.
|
|
|
|
|
*/
|
|
|
|
|
device->memory.type_count = 1;
|
|
|
|
|
device->memory.types[0] = (struct anv_memory_type) {
|
|
|
|
|
.propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
|
|
|
|
|
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
|
|
|
|
|
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
|
|
|
|
|
VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
|
|
|
|
|
.heapIndex = 0,
|
|
|
|
|
};
|
|
|
|
|
} else {
|
|
|
|
|
device->memory.heap_count = 1;
|
|
|
|
|
device->memory.heaps[0] = (struct anv_memory_heap) {
|
|
|
|
|
.size = device->sys.size,
|
|
|
|
|
.flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
|
|
|
|
|
.is_local_mem = false,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* The spec requires that we expose a host-visible, coherent memory
|
|
|
|
|
* type, but Atom GPUs don't share LLC. Thus we offer two memory types
|
|
|
|
|
* to give the application a choice between cached, but not coherent and
|
|
|
|
|
* coherent but uncached (WC though).
|
|
|
|
|
*/
|
|
|
|
|
device->memory.type_count = 2;
|
|
|
|
|
device->memory.types[0] = (struct anv_memory_type) {
|
|
|
|
|
.propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
|
|
|
|
|
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
|
2021-02-10 17:08:44 -08:00
|
|
|
VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
|
2021-02-08 12:23:38 -08:00
|
|
|
.heapIndex = 0,
|
|
|
|
|
};
|
|
|
|
|
device->memory.types[1] = (struct anv_memory_type) {
|
|
|
|
|
.propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
|
|
|
|
|
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
|
2021-02-10 17:08:44 -08:00
|
|
|
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
|
2021-02-08 12:23:38 -08:00
|
|
|
.heapIndex = 0,
|
|
|
|
|
};
|
2017-05-17 11:42:36 -07:00
|
|
|
}
|
|
|
|
|
|
2017-05-17 10:55:41 -07:00
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-27 09:34:53 -08:00
|
|
|
static VkResult
|
|
|
|
|
anv_physical_device_init_uuids(struct anv_physical_device *device)
|
2016-11-24 20:30:38 +00:00
|
|
|
{
|
2017-09-12 15:52:03 -07:00
|
|
|
const struct build_id_note *note =
|
|
|
|
|
build_id_find_nhdr_for_addr(anv_physical_device_init_uuids);
|
2017-02-27 09:34:53 -08:00
|
|
|
if (!note) {
|
2020-01-17 22:43:06 -06:00
|
|
|
return vk_errorfi(device->instance, NULL,
|
|
|
|
|
VK_ERROR_INITIALIZATION_FAILED,
|
|
|
|
|
"Failed to find build-id");
|
2017-02-27 09:34:53 -08:00
|
|
|
}
|
2016-11-24 20:30:39 +00:00
|
|
|
|
2017-02-24 16:36:00 -08:00
|
|
|
unsigned build_id_len = build_id_length(note);
|
2017-02-27 09:34:53 -08:00
|
|
|
if (build_id_len < 20) {
|
2020-01-17 22:43:06 -06:00
|
|
|
return vk_errorfi(device->instance, NULL,
|
|
|
|
|
VK_ERROR_INITIALIZATION_FAILED,
|
|
|
|
|
"build-id too short. It needs to be a SHA");
|
2017-02-27 09:34:53 -08:00
|
|
|
}
|
2016-11-24 20:30:39 +00:00
|
|
|
|
2018-06-29 17:08:30 -07:00
|
|
|
memcpy(device->driver_build_sha1, build_id_data(note), 20);
|
|
|
|
|
|
2017-02-24 16:36:00 -08:00
|
|
|
struct mesa_sha1 sha1_ctx;
|
|
|
|
|
uint8_t sha1[20];
|
|
|
|
|
STATIC_ASSERT(VK_UUID_SIZE <= sizeof(sha1));
|
|
|
|
|
|
2017-02-28 10:58:31 -08:00
|
|
|
/* The pipeline cache UUID is used for determining when a pipeline cache is
|
|
|
|
|
* invalid. It needs both a driver build and the PCI ID of the device.
|
|
|
|
|
*/
|
2017-02-24 16:36:00 -08:00
|
|
|
_mesa_sha1_init(&sha1_ctx);
|
|
|
|
|
_mesa_sha1_update(&sha1_ctx, build_id_data(note), build_id_len);
|
2020-01-17 23:45:31 -06:00
|
|
|
_mesa_sha1_update(&sha1_ctx, &device->info.chipset_id,
|
|
|
|
|
sizeof(device->info.chipset_id));
|
2019-02-25 13:59:07 -06:00
|
|
|
_mesa_sha1_update(&sha1_ctx, &device->always_use_bindless,
|
|
|
|
|
sizeof(device->always_use_bindless));
|
2019-01-09 16:04:22 -06:00
|
|
|
_mesa_sha1_update(&sha1_ctx, &device->has_a64_buffer_access,
|
|
|
|
|
sizeof(device->has_a64_buffer_access));
|
2019-02-07 14:10:33 -06:00
|
|
|
_mesa_sha1_update(&sha1_ctx, &device->has_bindless_images,
|
|
|
|
|
sizeof(device->has_bindless_images));
|
|
|
|
|
_mesa_sha1_update(&sha1_ctx, &device->has_bindless_samplers,
|
|
|
|
|
sizeof(device->has_bindless_samplers));
|
2017-02-24 16:36:00 -08:00
|
|
|
_mesa_sha1_final(&sha1_ctx, sha1);
|
2017-02-27 09:36:20 -08:00
|
|
|
memcpy(device->pipeline_cache_uuid, sha1, VK_UUID_SIZE);
|
2017-02-24 16:36:00 -08:00
|
|
|
|
2021-03-03 13:49:18 -08:00
|
|
|
intel_uuid_compute_driver_id(device->driver_uuid, &device->info, VK_UUID_SIZE);
|
|
|
|
|
intel_uuid_compute_device_id(device->device_uuid, &device->isl_dev, VK_UUID_SIZE);
|
2017-02-28 10:58:31 -08:00
|
|
|
|
2017-02-27 09:34:53 -08:00
|
|
|
return VK_SUCCESS;
|
2016-11-24 20:30:38 +00:00
|
|
|
}
|
|
|
|
|
|
2018-06-29 17:08:30 -07:00
|
|
|
static void
|
|
|
|
|
anv_physical_device_init_disk_cache(struct anv_physical_device *device)
|
|
|
|
|
{
|
|
|
|
|
#ifdef ENABLE_SHADER_CACHE
|
2018-07-20 23:41:23 -07:00
|
|
|
char renderer[10];
|
2019-06-19 12:47:19 +01:00
|
|
|
ASSERTED int len = snprintf(renderer, sizeof(renderer), "anv_%04x",
|
2020-01-17 23:45:31 -06:00
|
|
|
device->info.chipset_id);
|
2018-07-20 23:41:23 -07:00
|
|
|
assert(len == sizeof(renderer) - 2);
|
2018-06-29 17:08:30 -07:00
|
|
|
|
|
|
|
|
char timestamp[41];
|
|
|
|
|
_mesa_sha1_format(timestamp, device->driver_build_sha1);
|
|
|
|
|
|
2018-07-25 14:31:05 -07:00
|
|
|
const uint64_t driver_flags =
|
|
|
|
|
brw_get_compiler_config_value(device->compiler);
|
2018-07-20 23:52:59 -07:00
|
|
|
device->disk_cache = disk_cache_create(renderer, timestamp, driver_flags);
|
2018-06-29 17:08:30 -07:00
|
|
|
#else
|
|
|
|
|
device->disk_cache = NULL;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
anv_physical_device_free_disk_cache(struct anv_physical_device *device)
|
|
|
|
|
{
|
|
|
|
|
#ifdef ENABLE_SHADER_CACHE
|
|
|
|
|
if (device->disk_cache)
|
|
|
|
|
disk_cache_destroy(device->disk_cache);
|
|
|
|
|
#else
|
|
|
|
|
assert(device->disk_cache == NULL);
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
2021-01-25 12:33:19 -08:00
|
|
|
/* The ANV_QUEUE_OVERRIDE environment variable is a comma separated list of
|
|
|
|
|
* queue overrides.
|
|
|
|
|
*
|
|
|
|
|
* To override the number queues:
|
|
|
|
|
* * "gc" is for graphics queues with compute support
|
|
|
|
|
* * "g" is for graphics queues with no compute support
|
|
|
|
|
* * "c" is for compute queues with no graphics support
|
|
|
|
|
*
|
|
|
|
|
* For example, ANV_QUEUE_OVERRIDE=gc=2,c=1 would override the number of
|
|
|
|
|
* advertised queues to be 2 queues with graphics+compute support, and 1 queue
|
|
|
|
|
* with compute-only support.
|
|
|
|
|
*
|
|
|
|
|
* ANV_QUEUE_OVERRIDE=c=1 would override the number of advertised queues to
|
|
|
|
|
* include 1 queue with compute-only support, but it will not change the
|
|
|
|
|
* number of graphics+compute queues.
|
|
|
|
|
*
|
|
|
|
|
* ANV_QUEUE_OVERRIDE=gc=0,c=1 would override the number of advertised queues
|
|
|
|
|
* to include 1 queue with compute-only support, and it would override the
|
|
|
|
|
* number of graphics+compute queues to be 0.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
anv_override_engine_counts(int *gc_count, int *g_count, int *c_count)
|
|
|
|
|
{
|
|
|
|
|
int gc_override = -1;
|
|
|
|
|
int g_override = -1;
|
|
|
|
|
int c_override = -1;
|
|
|
|
|
char *env = getenv("ANV_QUEUE_OVERRIDE");
|
|
|
|
|
|
|
|
|
|
if (env == NULL)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
env = strdup(env);
|
|
|
|
|
char *save = NULL;
|
|
|
|
|
char *next = strtok_r(env, ",", &save);
|
|
|
|
|
while (next != NULL) {
|
|
|
|
|
if (strncmp(next, "gc=", 3) == 0) {
|
|
|
|
|
gc_override = strtol(next + 3, NULL, 0);
|
|
|
|
|
} else if (strncmp(next, "g=", 2) == 0) {
|
|
|
|
|
g_override = strtol(next + 2, NULL, 0);
|
|
|
|
|
} else if (strncmp(next, "c=", 2) == 0) {
|
|
|
|
|
c_override = strtol(next + 2, NULL, 0);
|
|
|
|
|
} else {
|
|
|
|
|
mesa_logw("Ignoring unsupported ANV_QUEUE_OVERRIDE token: %s", next);
|
|
|
|
|
}
|
|
|
|
|
next = strtok_r(NULL, ",", &save);
|
|
|
|
|
}
|
|
|
|
|
free(env);
|
|
|
|
|
if (gc_override >= 0)
|
|
|
|
|
*gc_count = gc_override;
|
|
|
|
|
if (g_override >= 0)
|
|
|
|
|
*g_count = g_override;
|
|
|
|
|
if (*g_count > 0 && *gc_count <= 0 && (gc_override >= 0 || g_override >= 0))
|
|
|
|
|
mesa_logw("ANV_QUEUE_OVERRIDE: gc=0 with g > 0 violates the "
|
|
|
|
|
"Vulkan specification");
|
|
|
|
|
if (c_override >= 0)
|
|
|
|
|
*c_count = c_override;
|
|
|
|
|
}
|
|
|
|
|
|
2021-01-26 01:13:36 -06:00
|
|
|
static void
|
|
|
|
|
anv_physical_device_init_queue_families(struct anv_physical_device *pdevice)
|
|
|
|
|
{
|
2019-03-24 01:00:37 -07:00
|
|
|
uint32_t family_count = 0;
|
|
|
|
|
|
|
|
|
|
if (pdevice->engine_info) {
|
2021-01-26 10:38:47 -06:00
|
|
|
int gc_count =
|
|
|
|
|
anv_gem_count_engines(pdevice->engine_info, I915_ENGINE_CLASS_RENDER);
|
|
|
|
|
int g_count = 0;
|
|
|
|
|
int c_count = 0;
|
|
|
|
|
|
2021-01-25 12:33:19 -08:00
|
|
|
anv_override_engine_counts(&gc_count, &g_count, &c_count);
|
|
|
|
|
|
2021-01-26 10:38:47 -06:00
|
|
|
if (gc_count > 0) {
|
2019-03-24 01:00:37 -07:00
|
|
|
pdevice->queue.families[family_count++] = (struct anv_queue_family) {
|
|
|
|
|
.queueFlags = VK_QUEUE_GRAPHICS_BIT |
|
|
|
|
|
VK_QUEUE_COMPUTE_BIT |
|
|
|
|
|
VK_QUEUE_TRANSFER_BIT,
|
2021-01-26 10:38:47 -06:00
|
|
|
.queueCount = gc_count,
|
|
|
|
|
.engine_class = I915_ENGINE_CLASS_RENDER,
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
if (g_count > 0) {
|
|
|
|
|
pdevice->queue.families[family_count++] = (struct anv_queue_family) {
|
|
|
|
|
.queueFlags = VK_QUEUE_GRAPHICS_BIT |
|
|
|
|
|
VK_QUEUE_TRANSFER_BIT,
|
|
|
|
|
.queueCount = g_count,
|
|
|
|
|
.engine_class = I915_ENGINE_CLASS_RENDER,
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
if (c_count > 0) {
|
|
|
|
|
pdevice->queue.families[family_count++] = (struct anv_queue_family) {
|
|
|
|
|
.queueFlags = VK_QUEUE_COMPUTE_BIT |
|
|
|
|
|
VK_QUEUE_TRANSFER_BIT,
|
|
|
|
|
.queueCount = c_count,
|
2019-03-24 01:00:37 -07:00
|
|
|
.engine_class = I915_ENGINE_CLASS_RENDER,
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
/* Increase count below when other families are added as a reminder to
|
|
|
|
|
* increase the ANV_MAX_QUEUE_FAMILIES value.
|
|
|
|
|
*/
|
2021-01-26 10:38:47 -06:00
|
|
|
STATIC_ASSERT(ANV_MAX_QUEUE_FAMILIES >= 3);
|
2019-03-24 01:00:37 -07:00
|
|
|
} else {
|
|
|
|
|
/* Default to a single render queue */
|
|
|
|
|
pdevice->queue.families[family_count++] = (struct anv_queue_family) {
|
|
|
|
|
.queueFlags = VK_QUEUE_GRAPHICS_BIT |
|
|
|
|
|
VK_QUEUE_COMPUTE_BIT |
|
|
|
|
|
VK_QUEUE_TRANSFER_BIT,
|
|
|
|
|
.queueCount = 1,
|
|
|
|
|
.engine_class = I915_ENGINE_CLASS_RENDER,
|
|
|
|
|
};
|
|
|
|
|
family_count = 1;
|
|
|
|
|
}
|
|
|
|
|
assert(family_count <= ANV_MAX_QUEUE_FAMILIES);
|
|
|
|
|
pdevice->queue.family_count = family_count;
|
2021-01-26 01:13:36 -06:00
|
|
|
}
|
|
|
|
|
|
2015-05-08 22:32:37 -07:00
|
|
|
static VkResult
|
2020-01-17 23:52:50 -06:00
|
|
|
anv_physical_device_try_create(struct anv_instance *instance,
|
|
|
|
|
drmDevicePtr drm_device,
|
|
|
|
|
struct anv_physical_device **device_out)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2018-10-14 13:12:50 +01:00
|
|
|
const char *primary_path = drm_device->nodes[DRM_NODE_PRIMARY];
|
|
|
|
|
const char *path = drm_device->nodes[DRM_NODE_RENDER];
|
2015-08-26 04:03:38 -07:00
|
|
|
VkResult result;
|
2015-07-21 13:09:25 -07:00
|
|
|
int fd;
|
vulkan: Add KHR_display extension using DRM [v10]
This adds support for the KHR_display extension support to the vulkan
WSI layer. Driver support will be added separately.
v2:
* fix double ;; in wsi_common_display.c
* Move mode list from wsi_display to wsi_display_connector
* Fix scope for wsi_display_mode andwsi_display_connector
allocs
* Switch all allocations to vk_zalloc instead of vk_alloc.
* Fix DRM failure in
wsi_display_get_physical_device_display_properties
When DRM fails, or when we don't have a master fd
(presumably due to application errors), just return 0
properties from this function, which is at least a valid
response.
* Use vk_outarray for all property queries
This is a bit less error-prone than open-coding the same
stuff.
* Remove VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR from surface caps
Until we have multi-plane support, we shouldn't pretend to
have any multi-plane semantics, even if undefined.
Suggested-by: Jason Ekstrand <jason@jlekstrand.net>
* Simplify addition of VK_USE_PLATFORM_DISPLAY_KHR to
vulkan_wsi_args
Suggested-by: Eric Engestrom <eric.engestrom@imgtec.com>
v3:
Add separate 'display_fd' and 'render_fd' arguments to
wsi_device_init API. This allows drivers to use different FDs
for the different aspects of the device.
Use largest mode as display size when no preferred mode.
If the display doesn't provide a preferred mode, we'll assume
that the largest supported mode is the "physical size" of the
device and report that.
v4:
Make wsi_image_state enumeration values uppercase.
Follow more common mesa conventions.
Remove 'render_fd' from wsi_device_init API. The
wsi_common_display code doesn't use this fd at all, so stop
passing it in. This avoids any potential confusion over which
fd to use when creating display-relative object handles.
Remove call to wsi_create_prime_image which would never have
been reached as the necessary condition (use_prime_blit) is
never set.
whitespace cleanups in wsi_common_display.c
Suggested-by: Jason Ekstrand <jason@jlekstrand.net>
Add depth/bpp info to available surface formats. Instead of
hard-coding depth 24 bpp 32 in the drmModeAddFB call, use the
requested format to find suitable values.
Destroy kernel buffers and FBs when swapchain is destroyed. We
were leaking both of these kernel objects across swapchain
destruction.
Note that wsi_display_wait_for_event waits for anything to
happen. wsi_display_wait_for_event is simply a yield so that
the caller can then check to see if the desired state change
has occurred.
Record swapchain failures in chain for later return. If some
asynchronous swapchain activity fails, we need to tell the
application eventually. Record the failure in the swapchain
and report it at the next acquire_next_image or queue_present
call.
Fix error returns from wsi_display_setup_connector. If a
malloc failed, then the result should be
VK_ERROR_OUT_OF_HOST_MEMORY. Otherwise, the associated ioctl
failed and we're either VT switched away, or our lease has
been revoked, in which case we should return
VK_ERROR_OUT_OF_DATE_KHR.
Make sure both sides of if/else brace use matches
Note that we assume drmModeSetCrtc is synchronous. Add a
comment explaining why we can idle any previous displayed
image as soon as the mode set returns.
Note that EACCES from drmModePageFlip means VT inactive. When
vt switched away drmModePageFlip returns EACCES. Poll once a
second waiting until we get some other return value back.
Clean up after alloc failure in
wsi_display_surface_create_swapchain. Destroy any created
images, free the swapchain.
Remove physical_device from wsi_display_init_wsi. We never
need this value, so remove it from the API and from the
internal wsi_display structure.
Use drmModeAddFB2 in wsi_display_image_init. This takes a drm
format instead of depth/bpp, which provides more control over
the format of the data.
v5:
Set the 'currentStackIndex' member of the
VkDisplayPlanePropertiesKHR record to zero, instead of
indexing across all displays. This value is the stack depth of
the plane within an individual display, and as the current
code supports only a single plane per display, should be set
to zero for all elements
Discovered-by: David Mao <David.Mao@amd.com>
v6:
Remove 'platform_display' bits from the build and use the
existing 'platform_drm' instead.
v7:
Ensure VK_ICD_WSI_PLATFORM_MAX is large enough by
setting to VK_ICD_WSI_PLATFORM_DISPLAY + 1
v8:
Simplify wsi_device_init failure from wsi_display_init_wsi
by using the same pattern as the other wsi layers.
Adopt Jason Ekstrand's white space and variable declaration
suggestions. Declare variables at first use, eliminate extra
whitespace between types and names, add list iterator helpers,
switch to lower-case list_ macros.
Respond to Jason's April 8 review:
* Create a function to convert relative to absolute timeouts
to catch overflow issues in one place
* use VK_NULL_HANDLE to clear prop->currentDisplay
* Get rid of available_present_modes array.
* return OUT_OF_DATE_KHR when display_queue_next called after
display has been released.
* Make errors from mode setting fatal in display_queue_next
* Remove duplicate pthread_mutex_init call
* Add wsi_init_pthread_cond_monotonic helper function to
isolate pthread error handling from wsi_display_init_wsi
Suggested-by: Jason Ekstrand <jason.ekstrand@intel.com>
v9:
Fix vscan handling by using MAX2(vscan, 1) everywhere. Vscan
can be zero anywhere, which is treated the same as 1.
Suggested-by: Jason Ekstrand <jason.ekstrand@intel.com>
v10:
Respond to Vulkan CTS failures.
1. Initialize planeReorderPossible in display_properties code
2. Only report connected displays in
get_display_plane_supported_displays
3. Return VK_ERROR_OUT_OF_HOST_MEMORY when pthread cond
initialization fails.
Signed-off-by: Jason Ekstrand <jason.ekstrand@intel.com>
4. Add vkCreateDisplayModeKHR. This doesn't actually create
new modes, it only looks to see if the requested parameters
matches an existing mode and returns that.
Suggested-by: Jason Ekstrand <jason.ekstrand@intel.com>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Signed-off-by: Keith Packard <keithp@keithp.com>
2018-02-07 10:31:44 -08:00
|
|
|
int master_fd = -1;
|
2015-07-21 13:09:25 -07:00
|
|
|
|
2017-08-29 08:44:54 +03:00
|
|
|
brw_process_intel_debug_variable();
|
|
|
|
|
|
2015-07-21 13:09:25 -07:00
|
|
|
fd = open(path, O_RDWR | O_CLOEXEC);
|
2020-09-11 09:00:53 -04:00
|
|
|
if (fd < 0) {
|
|
|
|
|
if (errno == ENOMEM) {
|
|
|
|
|
return vk_errorfi(instance, NULL, VK_ERROR_OUT_OF_HOST_MEMORY,
|
|
|
|
|
"Unable to open device %s: out of memory", path);
|
|
|
|
|
}
|
|
|
|
|
return vk_errorfi(instance, NULL, VK_ERROR_INCOMPATIBLE_DRIVER,
|
|
|
|
|
"Unable to open device %s: %m", path);
|
|
|
|
|
}
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2020-01-17 23:48:12 -06:00
|
|
|
struct gen_device_info devinfo;
|
|
|
|
|
if (!gen_get_device_info_from_fd(fd, &devinfo)) {
|
2019-07-25 10:40:55 -07:00
|
|
|
result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
|
2020-01-17 23:52:50 -06:00
|
|
|
goto fail_fd;
|
2015-08-26 04:03:38 -07:00
|
|
|
}
|
2018-10-14 13:12:50 +01:00
|
|
|
|
2020-01-17 23:48:12 -06:00
|
|
|
const char *device_name = gen_get_device_name(devinfo.chipset_id);
|
2015-11-02 12:14:37 -08:00
|
|
|
|
2020-01-17 23:48:12 -06:00
|
|
|
if (devinfo.is_haswell) {
|
mesa: Promote Intel's simple logging façade for Android to util/
I'm bringing up freedreno Vulkan on an Android phone, and my pains are
exactly what Chad said when working on Intel's vulkan for Android in
aa716db0f64d ("intel: Add simple logging façade for Android (v2)"):
On Android, stdio goes to /dev/null. On Android, remote gdb is even
more painful than the usual remote gdb. On Android, nothing works like
you expect and debugging is hell. I need logging.
This patch introduces a small, simple logging API that can easily wrap
Android's API. On non-Android platforms, this logger does nothing
fancy. It follows the time-honored Unix tradition of spewing
everything to stderr with minimal fuss.
My goal here is not perfection. My goal is to make a minimal, clean API,
that people hate merely a little instead of a lot, and that's good
enough to let me bring up Android Vulkan. And it needs to be fast,
which means it must be small. No one wants to their game to miss frames
while aiming a flaming bow into the jaws of an angry robot t-rex, and
thus become t-rex breakfast, because some fool had too much fun desiging
a bloated, ideal logging API.
Compared to trusty fprintf, _mesa_log[ewi]() is actually usable on
Android. Compared to os_log_message(), this has different error levels
and supports format arguments.
The only code change in the move is wrapping flockfile/funlockfile in
!DETECT_OS_WINDOWS, since mingw32 doesn't have it. Windows likely wants
different logging code, anyway.
Reviewed-by: Tapani Pälli <tapani.palli@intel.com>
Reviewed-by: Kristian H. Kristensen <hoegsberg@google.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6806>
2020-09-21 12:53:14 -07:00
|
|
|
mesa_logw("Haswell Vulkan support is incomplete");
|
2020-01-17 23:48:12 -06:00
|
|
|
} else if (devinfo.gen == 7 && !devinfo.is_baytrail) {
|
mesa: Promote Intel's simple logging façade for Android to util/
I'm bringing up freedreno Vulkan on an Android phone, and my pains are
exactly what Chad said when working on Intel's vulkan for Android in
aa716db0f64d ("intel: Add simple logging façade for Android (v2)"):
On Android, stdio goes to /dev/null. On Android, remote gdb is even
more painful than the usual remote gdb. On Android, nothing works like
you expect and debugging is hell. I need logging.
This patch introduces a small, simple logging API that can easily wrap
Android's API. On non-Android platforms, this logger does nothing
fancy. It follows the time-honored Unix tradition of spewing
everything to stderr with minimal fuss.
My goal here is not perfection. My goal is to make a minimal, clean API,
that people hate merely a little instead of a lot, and that's good
enough to let me bring up Android Vulkan. And it needs to be fast,
which means it must be small. No one wants to their game to miss frames
while aiming a flaming bow into the jaws of an angry robot t-rex, and
thus become t-rex breakfast, because some fool had too much fun desiging
a bloated, ideal logging API.
Compared to trusty fprintf, _mesa_log[ewi]() is actually usable on
Android. Compared to os_log_message(), this has different error levels
and supports format arguments.
The only code change in the move is wrapping flockfile/funlockfile in
!DETECT_OS_WINDOWS, since mingw32 doesn't have it. Windows likely wants
different logging code, anyway.
Reviewed-by: Tapani Pälli <tapani.palli@intel.com>
Reviewed-by: Kristian H. Kristensen <hoegsberg@google.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6806>
2020-09-21 12:53:14 -07:00
|
|
|
mesa_logw("Ivy Bridge Vulkan support is incomplete");
|
2020-01-17 23:48:12 -06:00
|
|
|
} else if (devinfo.gen == 7 && devinfo.is_baytrail) {
|
mesa: Promote Intel's simple logging façade for Android to util/
I'm bringing up freedreno Vulkan on an Android phone, and my pains are
exactly what Chad said when working on Intel's vulkan for Android in
aa716db0f64d ("intel: Add simple logging façade for Android (v2)"):
On Android, stdio goes to /dev/null. On Android, remote gdb is even
more painful than the usual remote gdb. On Android, nothing works like
you expect and debugging is hell. I need logging.
This patch introduces a small, simple logging API that can easily wrap
Android's API. On non-Android platforms, this logger does nothing
fancy. It follows the time-honored Unix tradition of spewing
everything to stderr with minimal fuss.
My goal here is not perfection. My goal is to make a minimal, clean API,
that people hate merely a little instead of a lot, and that's good
enough to let me bring up Android Vulkan. And it needs to be fast,
which means it must be small. No one wants to their game to miss frames
while aiming a flaming bow into the jaws of an angry robot t-rex, and
thus become t-rex breakfast, because some fool had too much fun desiging
a bloated, ideal logging API.
Compared to trusty fprintf, _mesa_log[ewi]() is actually usable on
Android. Compared to os_log_message(), this has different error levels
and supports format arguments.
The only code change in the move is wrapping flockfile/funlockfile in
!DETECT_OS_WINDOWS, since mingw32 doesn't have it. Windows likely wants
different logging code, anyway.
Reviewed-by: Tapani Pälli <tapani.palli@intel.com>
Reviewed-by: Kristian H. Kristensen <hoegsberg@google.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6806>
2020-09-21 12:53:14 -07:00
|
|
|
mesa_logw("Bay Trail Vulkan support is incomplete");
|
2020-10-27 02:24:36 -07:00
|
|
|
} else if (devinfo.gen >= 8 && devinfo.gen <= 12) {
|
|
|
|
|
/* Gen8-12 fully supported */
|
2015-11-02 12:14:37 -08:00
|
|
|
} else {
|
2020-01-17 22:43:06 -06:00
|
|
|
result = vk_errorfi(instance, NULL, VK_ERROR_INCOMPATIBLE_DRIVER,
|
2020-01-17 23:48:12 -06:00
|
|
|
"Vulkan not yet supported on %s", device_name);
|
2020-01-17 23:52:50 -06:00
|
|
|
goto fail_fd;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct anv_physical_device *device =
|
2021-01-05 19:34:51 -08:00
|
|
|
vk_zalloc(&instance->vk.alloc, sizeof(*device), 8,
|
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
|
2020-01-17 23:52:50 -06:00
|
|
|
if (device == NULL) {
|
|
|
|
|
result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
goto fail_fd;
|
2015-11-02 12:14:37 -08:00
|
|
|
}
|
|
|
|
|
|
2021-01-23 04:57:21 -06:00
|
|
|
struct vk_physical_device_dispatch_table dispatch_table;
|
|
|
|
|
vk_physical_device_dispatch_table_from_entrypoints(
|
|
|
|
|
&dispatch_table, &anv_physical_device_entrypoints, true);
|
|
|
|
|
|
|
|
|
|
result = vk_physical_device_init(&device->vk, &instance->vk,
|
|
|
|
|
NULL, /* We set up extensions later */
|
|
|
|
|
&dispatch_table);
|
|
|
|
|
if (result != VK_SUCCESS) {
|
|
|
|
|
vk_error(result);
|
|
|
|
|
goto fail_alloc;
|
|
|
|
|
}
|
2020-01-17 23:48:12 -06:00
|
|
|
device->instance = instance;
|
|
|
|
|
|
|
|
|
|
assert(strlen(path) < ARRAY_SIZE(device->path));
|
|
|
|
|
snprintf(device->path, ARRAY_SIZE(device->path), "%s", path);
|
|
|
|
|
|
|
|
|
|
device->info = devinfo;
|
|
|
|
|
device->name = device_name;
|
|
|
|
|
|
|
|
|
|
device->no_hw = device->info.no_hw;
|
|
|
|
|
if (getenv("INTEL_NO_HW") != NULL)
|
|
|
|
|
device->no_hw = true;
|
|
|
|
|
|
|
|
|
|
device->pci_info.domain = drm_device->businfo.pci->domain;
|
|
|
|
|
device->pci_info.bus = drm_device->businfo.pci->bus;
|
|
|
|
|
device->pci_info.device = drm_device->businfo.pci->dev;
|
|
|
|
|
device->pci_info.function = drm_device->businfo.pci->func;
|
|
|
|
|
|
2016-03-28 14:45:24 -07:00
|
|
|
device->cmd_parser_version = -1;
|
2016-09-22 14:58:11 +03:00
|
|
|
if (device->info.gen == 7) {
|
2016-03-28 14:45:24 -07:00
|
|
|
device->cmd_parser_version =
|
|
|
|
|
anv_gem_get_param(fd, I915_PARAM_CMD_PARSER_VERSION);
|
|
|
|
|
if (device->cmd_parser_version == -1) {
|
2020-01-17 22:43:06 -06:00
|
|
|
result = vk_errorfi(device->instance, NULL,
|
|
|
|
|
VK_ERROR_INITIALIZATION_FAILED,
|
|
|
|
|
"failed to get command parser version");
|
2021-01-23 04:57:21 -06:00
|
|
|
goto fail_base;
|
2016-03-28 14:45:24 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-26 04:03:38 -07:00
|
|
|
if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
|
2020-01-17 22:43:06 -06:00
|
|
|
result = vk_errorfi(device->instance, NULL,
|
|
|
|
|
VK_ERROR_INITIALIZATION_FAILED,
|
|
|
|
|
"kernel missing gem wait");
|
2021-01-23 04:57:21 -06:00
|
|
|
goto fail_base;
|
2015-08-26 04:03:38 -07:00
|
|
|
}
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2015-08-26 04:03:38 -07:00
|
|
|
if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
|
2020-01-17 22:43:06 -06:00
|
|
|
result = vk_errorfi(device->instance, NULL,
|
|
|
|
|
VK_ERROR_INITIALIZATION_FAILED,
|
|
|
|
|
"kernel missing execbuf2");
|
2021-01-23 04:57:21 -06:00
|
|
|
goto fail_base;
|
2015-08-26 04:03:38 -07:00
|
|
|
}
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2016-09-22 14:58:11 +03:00
|
|
|
if (!device->info.has_llc &&
|
2015-12-19 22:25:57 -08:00
|
|
|
anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
|
2020-01-17 22:43:06 -06:00
|
|
|
result = vk_errorfi(device->instance, NULL,
|
|
|
|
|
VK_ERROR_INITIALIZATION_FAILED,
|
|
|
|
|
"kernel missing wc mmap");
|
2021-01-23 04:57:21 -06:00
|
|
|
goto fail_base;
|
2015-12-03 23:58:05 -08:00
|
|
|
}
|
|
|
|
|
|
2019-11-25 12:06:20 -06:00
|
|
|
device->has_softpin = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_SOFTPIN);
|
2017-04-13 16:30:19 -07:00
|
|
|
device->has_exec_async = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_ASYNC);
|
2017-11-17 17:29:26 +00:00
|
|
|
device->has_exec_capture = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CAPTURE);
|
2017-05-10 14:28:33 -07:00
|
|
|
device->has_exec_fence = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE);
|
2017-04-10 18:36:42 -07:00
|
|
|
device->has_syncobj = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE_ARRAY);
|
2017-08-04 13:08:35 -07:00
|
|
|
device->has_syncobj_wait = device->has_syncobj &&
|
|
|
|
|
anv_gem_supports_syncobj_wait(fd);
|
2019-08-06 15:56:40 +03:00
|
|
|
device->has_syncobj_wait_available =
|
|
|
|
|
anv_gem_get_drm_cap(fd, DRM_CAP_SYNCOBJ_TIMELINE) != 0;
|
|
|
|
|
|
2018-02-28 15:25:48 -08:00
|
|
|
device->has_context_priority = anv_gem_has_context_priority(fd);
|
2018-01-23 14:01:00 +02:00
|
|
|
|
2020-03-31 18:23:25 -07:00
|
|
|
/* Initialize memory regions struct to 0. */
|
|
|
|
|
memset(&device->vram, 0, sizeof(device->vram));
|
|
|
|
|
memset(&device->sys, 0, sizeof(device->sys));
|
|
|
|
|
|
2019-11-25 21:55:51 -06:00
|
|
|
result = anv_physical_device_init_heaps(device, fd);
|
|
|
|
|
if (result != VK_SUCCESS)
|
2021-01-23 04:57:21 -06:00
|
|
|
goto fail_base;
|
2019-11-25 21:55:51 -06:00
|
|
|
|
2019-11-25 12:06:20 -06:00
|
|
|
device->use_softpin = device->has_softpin &&
|
|
|
|
|
device->supports_48bit_addresses;
|
2018-03-07 09:18:37 -08:00
|
|
|
|
2018-06-15 09:31:25 -07:00
|
|
|
device->has_context_isolation =
|
|
|
|
|
anv_gem_get_param(fd, I915_PARAM_HAS_CONTEXT_ISOLATION);
|
|
|
|
|
|
2019-08-06 15:56:40 +03:00
|
|
|
device->has_exec_timeline =
|
|
|
|
|
anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_TIMELINE_FENCES);
|
|
|
|
|
if (env_var_as_boolean("ANV_QUEUE_THREAD_DISABLE", false))
|
|
|
|
|
device->has_exec_timeline = false;
|
|
|
|
|
|
2019-08-28 13:22:30 +03:00
|
|
|
device->has_thread_submit =
|
|
|
|
|
device->has_syncobj_wait_available && device->has_exec_timeline;
|
|
|
|
|
|
2019-02-25 13:59:07 -06:00
|
|
|
device->always_use_bindless =
|
|
|
|
|
env_var_as_boolean("ANV_ALWAYS_BINDLESS", false);
|
|
|
|
|
|
2020-06-08 20:33:14 +03:00
|
|
|
device->use_call_secondary =
|
|
|
|
|
device->use_softpin &&
|
|
|
|
|
!env_var_as_boolean("ANV_DISABLE_SECONDARY_CMD_BUFFER_CALLS", false);
|
|
|
|
|
|
2019-02-07 12:01:18 -06:00
|
|
|
/* We first got the A64 messages on broadwell and we can only use them if
|
|
|
|
|
* we can pass addresses directly into the shader which requires softpin.
|
|
|
|
|
*/
|
|
|
|
|
device->has_a64_buffer_access = device->info.gen >= 8 &&
|
|
|
|
|
device->use_softpin;
|
2019-02-25 13:59:07 -06:00
|
|
|
|
2019-02-07 14:10:33 -06:00
|
|
|
/* We first get bindless image access on Skylake and we can only really do
|
|
|
|
|
* it if we don't have any relocations so we need softpin.
|
|
|
|
|
*/
|
|
|
|
|
device->has_bindless_images = device->info.gen >= 9 &&
|
|
|
|
|
device->use_softpin;
|
|
|
|
|
|
|
|
|
|
/* We've had bindless samplers since Ivy Bridge (forever in Vulkan terms)
|
|
|
|
|
* because it's just a matter of setting the sampler address in the sample
|
|
|
|
|
* message header. However, we've not bothered to wire it up for vec4 so
|
|
|
|
|
* we leave it disabled on gen7.
|
|
|
|
|
*/
|
|
|
|
|
device->has_bindless_samplers = device->info.gen >= 8;
|
|
|
|
|
|
2020-01-22 15:29:51 -06:00
|
|
|
device->has_implicit_ccs = device->info.has_aux_map;
|
|
|
|
|
|
2019-05-15 11:30:36 +01:00
|
|
|
/* Check if we can read the GPU timestamp register from the CPU */
|
|
|
|
|
uint64_t u64_ignore;
|
|
|
|
|
device->has_reg_timestamp = anv_gem_reg_read(fd, TIMESTAMP | I915_REG_READ_8B_WA,
|
|
|
|
|
&u64_ignore) == 0;
|
|
|
|
|
|
2019-12-06 01:07:56 +11:00
|
|
|
uint64_t avail_mem;
|
|
|
|
|
device->has_mem_available = os_get_available_system_memory(&avail_mem);
|
2019-01-08 12:45:38 +00:00
|
|
|
|
2019-11-11 12:46:33 -06:00
|
|
|
device->always_flush_cache =
|
|
|
|
|
driQueryOptionb(&instance->dri_options, "always_flush_cache");
|
|
|
|
|
|
2019-01-18 15:05:55 -08:00
|
|
|
device->has_mmap_offset =
|
|
|
|
|
anv_gem_get_param(fd, I915_PARAM_MMAP_GTT_VERSION) >= 4;
|
|
|
|
|
|
2016-09-07 17:19:35 +01:00
|
|
|
/* GENs prior to 8 do not support EU/Subslice info */
|
2016-09-22 14:58:11 +03:00
|
|
|
if (device->info.gen >= 8) {
|
2016-09-07 17:19:35 +01:00
|
|
|
device->subslice_total = anv_gem_get_param(fd, I915_PARAM_SUBSLICE_TOTAL);
|
|
|
|
|
device->eu_total = anv_gem_get_param(fd, I915_PARAM_EU_TOTAL);
|
|
|
|
|
|
|
|
|
|
/* Without this information, we cannot get the right Braswell
|
|
|
|
|
* brandstrings, and we have to use conservative numbers for GPGPU on
|
|
|
|
|
* many platforms, but otherwise, things will just work.
|
|
|
|
|
*/
|
|
|
|
|
if (device->subslice_total < 1 || device->eu_total < 1) {
|
mesa: Promote Intel's simple logging façade for Android to util/
I'm bringing up freedreno Vulkan on an Android phone, and my pains are
exactly what Chad said when working on Intel's vulkan for Android in
aa716db0f64d ("intel: Add simple logging façade for Android (v2)"):
On Android, stdio goes to /dev/null. On Android, remote gdb is even
more painful than the usual remote gdb. On Android, nothing works like
you expect and debugging is hell. I need logging.
This patch introduces a small, simple logging API that can easily wrap
Android's API. On non-Android platforms, this logger does nothing
fancy. It follows the time-honored Unix tradition of spewing
everything to stderr with minimal fuss.
My goal here is not perfection. My goal is to make a minimal, clean API,
that people hate merely a little instead of a lot, and that's good
enough to let me bring up Android Vulkan. And it needs to be fast,
which means it must be small. No one wants to their game to miss frames
while aiming a flaming bow into the jaws of an angry robot t-rex, and
thus become t-rex breakfast, because some fool had too much fun desiging
a bloated, ideal logging API.
Compared to trusty fprintf, _mesa_log[ewi]() is actually usable on
Android. Compared to os_log_message(), this has different error levels
and supports format arguments.
The only code change in the move is wrapping flockfile/funlockfile in
!DETECT_OS_WINDOWS, since mingw32 doesn't have it. Windows likely wants
different logging code, anyway.
Reviewed-by: Tapani Pälli <tapani.palli@intel.com>
Reviewed-by: Kristian H. Kristensen <hoegsberg@google.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6806>
2020-09-21 12:53:14 -07:00
|
|
|
mesa_logw("Kernel 4.1 required to properly query GPU properties");
|
2016-09-07 17:19:35 +01:00
|
|
|
}
|
2016-09-22 14:58:11 +03:00
|
|
|
} else if (device->info.gen == 7) {
|
|
|
|
|
device->subslice_total = 1 << (device->info.gt - 1);
|
2016-09-07 17:19:35 +01:00
|
|
|
}
|
|
|
|
|
|
2016-09-22 14:58:11 +03:00
|
|
|
if (device->info.is_cherryview &&
|
2016-09-07 17:19:35 +01:00
|
|
|
device->subslice_total > 0 && device->eu_total > 0) {
|
2017-06-23 17:01:47 +03:00
|
|
|
/* Logical CS threads = EUs per subslice * num threads per EU */
|
|
|
|
|
uint32_t max_cs_threads =
|
|
|
|
|
device->eu_total / device->subslice_total * device->info.num_thread_per_eu;
|
2016-09-07 17:19:35 +01:00
|
|
|
|
|
|
|
|
/* Fuse configurations may give more threads than expected, never less. */
|
2016-09-23 01:04:25 +03:00
|
|
|
if (max_cs_threads > device->info.max_cs_threads)
|
|
|
|
|
device->info.max_cs_threads = max_cs_threads;
|
2016-09-07 17:19:35 +01:00
|
|
|
}
|
|
|
|
|
|
2016-09-22 14:58:11 +03:00
|
|
|
device->compiler = brw_compiler_create(NULL, &device->info);
|
2015-10-19 20:21:45 -07:00
|
|
|
if (device->compiler == NULL) {
|
|
|
|
|
result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
2021-01-23 04:57:21 -06:00
|
|
|
goto fail_base;
|
2015-10-19 20:21:45 -07:00
|
|
|
}
|
2015-10-19 22:06:59 -07:00
|
|
|
device->compiler->shader_debug_log = compiler_debug_log;
|
|
|
|
|
device->compiler->shader_perf_log = compiler_perf_log;
|
2017-09-28 22:16:55 -07:00
|
|
|
device->compiler->supports_pull_constants = false;
|
2018-06-15 11:44:28 -07:00
|
|
|
device->compiler->constant_buffer_0_is_relative =
|
|
|
|
|
device->info.gen < 8 || !device->has_context_isolation;
|
2018-06-28 22:44:43 -07:00
|
|
|
device->compiler->supports_shader_constants = true;
|
2019-11-07 17:16:14 -06:00
|
|
|
device->compiler->compact_params = false;
|
2020-10-19 22:36:07 -05:00
|
|
|
device->compiler->indirect_ubos_use_sampler = device->info.gen < 12;
|
2015-10-19 20:21:45 -07:00
|
|
|
|
2019-01-31 13:29:40 -08:00
|
|
|
/* Broadwell PRM says:
|
|
|
|
|
*
|
|
|
|
|
* "Before Gen8, there was a historical configuration control field to
|
|
|
|
|
* swizzle address bit[6] for in X/Y tiling modes. This was set in three
|
|
|
|
|
* different places: TILECTL[1:0], ARB_MODE[5:4], and
|
|
|
|
|
* DISP_ARB_CTL[14:13].
|
|
|
|
|
*
|
|
|
|
|
* For Gen8 and subsequent generations, the swizzle fields are all
|
|
|
|
|
* reserved, and the CPU's memory controller performs all address
|
|
|
|
|
* swizzling modifications."
|
|
|
|
|
*/
|
|
|
|
|
bool swizzled =
|
|
|
|
|
device->info.gen < 8 && anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
|
|
|
|
|
|
2017-02-28 10:58:31 -08:00
|
|
|
isl_device_init(&device->isl_dev, &device->info, swizzled);
|
|
|
|
|
|
|
|
|
|
result = anv_physical_device_init_uuids(device);
|
|
|
|
|
if (result != VK_SUCCESS)
|
2020-01-17 23:52:50 -06:00
|
|
|
goto fail_compiler;
|
2017-02-28 10:58:31 -08:00
|
|
|
|
2018-06-29 17:08:30 -07:00
|
|
|
anv_physical_device_init_disk_cache(device);
|
|
|
|
|
|
2021-01-23 04:57:21 -06:00
|
|
|
if (instance->vk.enabled_extensions.KHR_display) {
|
2018-02-07 10:31:44 -08:00
|
|
|
master_fd = open(primary_path, O_RDWR | O_CLOEXEC);
|
|
|
|
|
if (master_fd >= 0) {
|
|
|
|
|
/* prod the device with a GETPARAM call which will fail if
|
|
|
|
|
* we don't have permission to even render on this device
|
|
|
|
|
*/
|
|
|
|
|
if (anv_gem_get_param(master_fd, I915_PARAM_CHIPSET_ID) == 0) {
|
|
|
|
|
close(master_fd);
|
|
|
|
|
master_fd = -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
device->master_fd = master_fd;
|
|
|
|
|
|
2019-03-23 00:28:24 -07:00
|
|
|
device->engine_info = anv_gem_get_engine_info(fd);
|
2021-01-26 01:13:36 -06:00
|
|
|
anv_physical_device_init_queue_families(device);
|
|
|
|
|
|
2016-05-28 20:03:34 +01:00
|
|
|
result = anv_init_wsi(device);
|
2020-01-17 23:52:50 -06:00
|
|
|
if (result != VK_SUCCESS)
|
2019-03-23 00:28:24 -07:00
|
|
|
goto fail_engine_info;
|
2016-05-15 22:21:24 -07:00
|
|
|
|
2020-09-15 11:22:17 +03:00
|
|
|
anv_physical_device_init_perf(device, fd);
|
2018-06-07 18:02:03 +01:00
|
|
|
|
2021-01-05 19:34:51 -08:00
|
|
|
anv_measure_device_init(device);
|
|
|
|
|
|
2021-01-29 22:40:39 -06:00
|
|
|
get_device_extensions(device, &device->vk.supported_extensions);
|
2018-02-07 10:31:44 -08:00
|
|
|
|
2017-02-19 15:27:47 +10:00
|
|
|
device->local_fd = fd;
|
2018-02-07 10:31:44 -08:00
|
|
|
|
2021-02-27 15:09:56 -08:00
|
|
|
anv_genX(&device->info, init_physical_device_state)(device);
|
|
|
|
|
|
2020-01-17 23:52:50 -06:00
|
|
|
*device_out = device;
|
|
|
|
|
|
2015-05-08 22:32:37 -07:00
|
|
|
return VK_SUCCESS;
|
2015-11-13 10:12:18 -08:00
|
|
|
|
2019-03-23 00:28:24 -07:00
|
|
|
fail_engine_info:
|
|
|
|
|
free(device->engine_info);
|
2020-01-17 23:52:50 -06:00
|
|
|
anv_physical_device_free_disk_cache(device);
|
|
|
|
|
fail_compiler:
|
|
|
|
|
ralloc_free(device->compiler);
|
2021-01-23 04:57:21 -06:00
|
|
|
fail_base:
|
|
|
|
|
vk_physical_device_finish(&device->vk);
|
2020-01-17 23:52:50 -06:00
|
|
|
fail_alloc:
|
2021-01-23 04:57:21 -06:00
|
|
|
vk_free(&instance->vk.alloc, device);
|
2020-01-17 23:52:50 -06:00
|
|
|
fail_fd:
|
2015-07-21 13:09:25 -07:00
|
|
|
close(fd);
|
vulkan: Add KHR_display extension using DRM [v10]
This adds support for the KHR_display extension support to the vulkan
WSI layer. Driver support will be added separately.
v2:
* fix double ;; in wsi_common_display.c
* Move mode list from wsi_display to wsi_display_connector
* Fix scope for wsi_display_mode andwsi_display_connector
allocs
* Switch all allocations to vk_zalloc instead of vk_alloc.
* Fix DRM failure in
wsi_display_get_physical_device_display_properties
When DRM fails, or when we don't have a master fd
(presumably due to application errors), just return 0
properties from this function, which is at least a valid
response.
* Use vk_outarray for all property queries
This is a bit less error-prone than open-coding the same
stuff.
* Remove VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR from surface caps
Until we have multi-plane support, we shouldn't pretend to
have any multi-plane semantics, even if undefined.
Suggested-by: Jason Ekstrand <jason@jlekstrand.net>
* Simplify addition of VK_USE_PLATFORM_DISPLAY_KHR to
vulkan_wsi_args
Suggested-by: Eric Engestrom <eric.engestrom@imgtec.com>
v3:
Add separate 'display_fd' and 'render_fd' arguments to
wsi_device_init API. This allows drivers to use different FDs
for the different aspects of the device.
Use largest mode as display size when no preferred mode.
If the display doesn't provide a preferred mode, we'll assume
that the largest supported mode is the "physical size" of the
device and report that.
v4:
Make wsi_image_state enumeration values uppercase.
Follow more common mesa conventions.
Remove 'render_fd' from wsi_device_init API. The
wsi_common_display code doesn't use this fd at all, so stop
passing it in. This avoids any potential confusion over which
fd to use when creating display-relative object handles.
Remove call to wsi_create_prime_image which would never have
been reached as the necessary condition (use_prime_blit) is
never set.
whitespace cleanups in wsi_common_display.c
Suggested-by: Jason Ekstrand <jason@jlekstrand.net>
Add depth/bpp info to available surface formats. Instead of
hard-coding depth 24 bpp 32 in the drmModeAddFB call, use the
requested format to find suitable values.
Destroy kernel buffers and FBs when swapchain is destroyed. We
were leaking both of these kernel objects across swapchain
destruction.
Note that wsi_display_wait_for_event waits for anything to
happen. wsi_display_wait_for_event is simply a yield so that
the caller can then check to see if the desired state change
has occurred.
Record swapchain failures in chain for later return. If some
asynchronous swapchain activity fails, we need to tell the
application eventually. Record the failure in the swapchain
and report it at the next acquire_next_image or queue_present
call.
Fix error returns from wsi_display_setup_connector. If a
malloc failed, then the result should be
VK_ERROR_OUT_OF_HOST_MEMORY. Otherwise, the associated ioctl
failed and we're either VT switched away, or our lease has
been revoked, in which case we should return
VK_ERROR_OUT_OF_DATE_KHR.
Make sure both sides of if/else brace use matches
Note that we assume drmModeSetCrtc is synchronous. Add a
comment explaining why we can idle any previous displayed
image as soon as the mode set returns.
Note that EACCES from drmModePageFlip means VT inactive. When
vt switched away drmModePageFlip returns EACCES. Poll once a
second waiting until we get some other return value back.
Clean up after alloc failure in
wsi_display_surface_create_swapchain. Destroy any created
images, free the swapchain.
Remove physical_device from wsi_display_init_wsi. We never
need this value, so remove it from the API and from the
internal wsi_display structure.
Use drmModeAddFB2 in wsi_display_image_init. This takes a drm
format instead of depth/bpp, which provides more control over
the format of the data.
v5:
Set the 'currentStackIndex' member of the
VkDisplayPlanePropertiesKHR record to zero, instead of
indexing across all displays. This value is the stack depth of
the plane within an individual display, and as the current
code supports only a single plane per display, should be set
to zero for all elements
Discovered-by: David Mao <David.Mao@amd.com>
v6:
Remove 'platform_display' bits from the build and use the
existing 'platform_drm' instead.
v7:
Ensure VK_ICD_WSI_PLATFORM_MAX is large enough by
setting to VK_ICD_WSI_PLATFORM_DISPLAY + 1
v8:
Simplify wsi_device_init failure from wsi_display_init_wsi
by using the same pattern as the other wsi layers.
Adopt Jason Ekstrand's white space and variable declaration
suggestions. Declare variables at first use, eliminate extra
whitespace between types and names, add list iterator helpers,
switch to lower-case list_ macros.
Respond to Jason's April 8 review:
* Create a function to convert relative to absolute timeouts
to catch overflow issues in one place
* use VK_NULL_HANDLE to clear prop->currentDisplay
* Get rid of available_present_modes array.
* return OUT_OF_DATE_KHR when display_queue_next called after
display has been released.
* Make errors from mode setting fatal in display_queue_next
* Remove duplicate pthread_mutex_init call
* Add wsi_init_pthread_cond_monotonic helper function to
isolate pthread error handling from wsi_display_init_wsi
Suggested-by: Jason Ekstrand <jason.ekstrand@intel.com>
v9:
Fix vscan handling by using MAX2(vscan, 1) everywhere. Vscan
can be zero anywhere, which is treated the same as 1.
Suggested-by: Jason Ekstrand <jason.ekstrand@intel.com>
v10:
Respond to Vulkan CTS failures.
1. Initialize planeReorderPossible in display_properties code
2. Only report connected displays in
get_display_plane_supported_displays
3. Return VK_ERROR_OUT_OF_HOST_MEMORY when pthread cond
initialization fails.
Signed-off-by: Jason Ekstrand <jason.ekstrand@intel.com>
4. Add vkCreateDisplayModeKHR. This doesn't actually create
new modes, it only looks to see if the requested parameters
matches an existing mode and returns that.
Suggested-by: Jason Ekstrand <jason.ekstrand@intel.com>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Signed-off-by: Keith Packard <keithp@keithp.com>
2018-02-07 10:31:44 -08:00
|
|
|
if (master_fd != -1)
|
|
|
|
|
close(master_fd);
|
2015-08-26 04:03:38 -07:00
|
|
|
return result;
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
2015-10-19 20:21:45 -07:00
|
|
|
static void
|
2020-01-17 23:52:50 -06:00
|
|
|
anv_physical_device_destroy(struct anv_physical_device *device)
|
2015-10-19 20:21:45 -07:00
|
|
|
{
|
2016-05-15 22:21:24 -07:00
|
|
|
anv_finish_wsi(device);
|
2021-01-05 19:34:51 -08:00
|
|
|
anv_measure_device_destroy(device);
|
2019-03-23 00:28:24 -07:00
|
|
|
free(device->engine_info);
|
2018-06-29 17:08:30 -07:00
|
|
|
anv_physical_device_free_disk_cache(device);
|
2015-10-19 20:21:45 -07:00
|
|
|
ralloc_free(device->compiler);
|
2018-06-07 18:02:03 +01:00
|
|
|
ralloc_free(device->perf);
|
2017-02-19 15:27:47 +10:00
|
|
|
close(device->local_fd);
|
2018-02-07 10:31:44 -08:00
|
|
|
if (device->master_fd >= 0)
|
|
|
|
|
close(device->master_fd);
|
2021-01-23 04:57:21 -06:00
|
|
|
vk_physical_device_finish(&device->vk);
|
|
|
|
|
vk_free(&device->instance->vk.alloc, device);
|
2015-10-19 20:21:45 -07:00
|
|
|
}
|
|
|
|
|
|
2015-12-02 03:28:27 -08:00
|
|
|
static void *
|
2016-09-22 14:58:11 +03:00
|
|
|
default_alloc_func(void *pUserData, size_t size, size_t align,
|
2015-12-02 03:28:27 -08:00
|
|
|
VkSystemAllocationScope allocationScope)
|
|
|
|
|
{
|
|
|
|
|
return malloc(size);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void *
|
|
|
|
|
default_realloc_func(void *pUserData, void *pOriginal, size_t size,
|
|
|
|
|
size_t align, VkSystemAllocationScope allocationScope)
|
|
|
|
|
{
|
|
|
|
|
return realloc(pOriginal, size);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
default_free_func(void *pUserData, void *pMemory)
|
|
|
|
|
{
|
|
|
|
|
free(pMemory);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const VkAllocationCallbacks default_alloc = {
|
|
|
|
|
.pUserData = NULL,
|
|
|
|
|
.pfnAllocation = default_alloc_func,
|
|
|
|
|
.pfnReallocation = default_realloc_func,
|
|
|
|
|
.pfnFree = default_free_func,
|
|
|
|
|
};
|
|
|
|
|
|
2018-01-16 15:49:28 -08:00
|
|
|
VkResult anv_EnumerateInstanceExtensionProperties(
|
|
|
|
|
const char* pLayerName,
|
|
|
|
|
uint32_t* pPropertyCount,
|
|
|
|
|
VkExtensionProperties* pProperties)
|
|
|
|
|
{
|
2021-01-30 11:03:27 -06:00
|
|
|
if (pLayerName)
|
|
|
|
|
return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
|
2018-01-16 15:49:28 -08:00
|
|
|
|
2021-01-30 11:03:27 -06:00
|
|
|
return vk_enumerate_instance_extension_properties(
|
2021-01-29 22:40:39 -06:00
|
|
|
&instance_extensions, pPropertyCount, pProperties);
|
2018-01-16 15:49:28 -08:00
|
|
|
}
|
|
|
|
|
|
2020-09-14 12:21:41 -07:00
|
|
|
static void
|
|
|
|
|
anv_init_dri_options(struct anv_instance *instance)
|
|
|
|
|
{
|
|
|
|
|
driParseOptionInfo(&instance->available_dri_options, anv_dri_options,
|
|
|
|
|
ARRAY_SIZE(anv_dri_options));
|
|
|
|
|
driParseConfigFiles(&instance->dri_options,
|
|
|
|
|
&instance->available_dri_options, 0, "anv", NULL,
|
2021-01-23 04:57:21 -06:00
|
|
|
instance->vk.app_info.app_name,
|
|
|
|
|
instance->vk.app_info.app_version,
|
|
|
|
|
instance->vk.app_info.engine_name,
|
|
|
|
|
instance->vk.app_info.engine_version);
|
2020-09-14 12:21:41 -07:00
|
|
|
}
|
|
|
|
|
|
2015-05-17 16:33:48 -07:00
|
|
|
VkResult anv_CreateInstance(
|
2015-05-08 22:32:37 -07:00
|
|
|
const VkInstanceCreateInfo* pCreateInfo,
|
2015-12-02 03:28:27 -08:00
|
|
|
const VkAllocationCallbacks* pAllocator,
|
2015-05-08 22:32:37 -07:00
|
|
|
VkInstance* pInstance)
|
|
|
|
|
{
|
|
|
|
|
struct anv_instance *instance;
|
2018-01-09 03:22:56 +01:00
|
|
|
VkResult result;
|
2015-05-08 22:32:37 -07:00
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
|
|
|
|
|
|
2021-01-23 04:57:21 -06:00
|
|
|
if (pAllocator == NULL)
|
|
|
|
|
pAllocator = &default_alloc;
|
2015-09-17 11:19:16 -07:00
|
|
|
|
2021-01-23 04:57:21 -06:00
|
|
|
instance = vk_alloc(pAllocator, sizeof(*instance), 8,
|
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
|
2015-05-08 22:32:37 -07:00
|
|
|
if (!instance)
|
|
|
|
|
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
2021-01-23 04:57:21 -06:00
|
|
|
struct vk_instance_dispatch_table dispatch_table;
|
|
|
|
|
vk_instance_dispatch_table_from_entrypoints(
|
|
|
|
|
&dispatch_table, &anv_instance_entrypoints, true);
|
2018-01-29 18:12:04 -08:00
|
|
|
|
2021-01-29 22:40:39 -06:00
|
|
|
result = vk_instance_init(&instance->vk, &instance_extensions,
|
|
|
|
|
&dispatch_table, pCreateInfo, pAllocator);
|
2021-01-23 04:57:21 -06:00
|
|
|
if (result != VK_SUCCESS) {
|
|
|
|
|
vk_free(pAllocator, instance);
|
|
|
|
|
return vk_error(result);
|
2018-01-16 18:08:09 -08:00
|
|
|
}
|
|
|
|
|
|
2020-01-17 23:52:50 -06:00
|
|
|
instance->physical_devices_enumerated = false;
|
|
|
|
|
list_inithead(&instance->physical_devices);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2018-06-29 17:29:35 -07:00
|
|
|
instance->pipeline_cache_enabled =
|
|
|
|
|
env_var_as_boolean("ANV_ENABLE_PIPELINE_CACHE", true);
|
|
|
|
|
|
2019-03-15 09:47:49 +02:00
|
|
|
glsl_type_singleton_init_or_ref();
|
2015-08-14 17:25:04 -07:00
|
|
|
|
2015-07-31 10:18:00 -07:00
|
|
|
VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
|
|
|
|
|
|
2020-09-14 12:21:41 -07:00
|
|
|
anv_init_dri_options(instance);
|
2019-04-24 16:42:25 +01:00
|
|
|
|
2015-07-09 18:41:27 -07:00
|
|
|
*pInstance = anv_instance_to_handle(instance);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-05 20:50:51 -07:00
|
|
|
void anv_DestroyInstance(
|
2015-12-02 03:28:27 -08:00
|
|
|
VkInstance _instance,
|
|
|
|
|
const VkAllocationCallbacks* pAllocator)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2015-07-09 18:41:27 -07:00
|
|
|
ANV_FROM_HANDLE(anv_instance, instance, _instance);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2017-03-01 08:39:49 -08:00
|
|
|
if (!instance)
|
|
|
|
|
return;
|
|
|
|
|
|
2020-01-17 23:52:50 -06:00
|
|
|
list_for_each_entry_safe(struct anv_physical_device, pdevice,
|
|
|
|
|
&instance->physical_devices, link)
|
|
|
|
|
anv_physical_device_destroy(pdevice);
|
2015-10-21 11:36:39 -07:00
|
|
|
|
2015-07-31 10:18:00 -07:00
|
|
|
VG(VALGRIND_DESTROY_MEMPOOL(instance));
|
|
|
|
|
|
2019-03-15 09:47:49 +02:00
|
|
|
glsl_type_singleton_decref();
|
2015-08-14 17:25:04 -07:00
|
|
|
|
2019-04-24 16:42:25 +01:00
|
|
|
driDestroyOptionCache(&instance->dri_options);
|
|
|
|
|
driDestroyOptionInfo(&instance->available_dri_options);
|
|
|
|
|
|
2021-01-23 04:57:21 -06:00
|
|
|
vk_instance_finish(&instance->vk);
|
|
|
|
|
vk_free(&instance->vk.alloc, instance);
|
2015-07-31 10:13:24 -07:00
|
|
|
}
|
|
|
|
|
|
2016-12-01 21:21:10 +00:00
|
|
|
static VkResult
|
2020-01-17 23:52:50 -06:00
|
|
|
anv_enumerate_physical_devices(struct anv_instance *instance)
|
2016-12-01 21:21:10 +00:00
|
|
|
{
|
2020-01-17 23:52:50 -06:00
|
|
|
if (instance->physical_devices_enumerated)
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
|
|
|
|
|
instance->physical_devices_enumerated = true;
|
|
|
|
|
|
2016-12-01 21:21:10 +00:00
|
|
|
/* TODO: Check for more devices ? */
|
|
|
|
|
drmDevicePtr devices[8];
|
|
|
|
|
int max_devices;
|
|
|
|
|
|
2017-05-02 19:26:17 +03:00
|
|
|
max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
|
2016-12-01 21:21:10 +00:00
|
|
|
if (max_devices < 1)
|
2020-01-21 18:19:18 +02:00
|
|
|
return VK_SUCCESS;
|
2016-12-01 21:21:10 +00:00
|
|
|
|
2020-01-21 18:19:18 +02:00
|
|
|
VkResult result = VK_SUCCESS;
|
2016-12-01 21:21:10 +00:00
|
|
|
for (unsigned i = 0; i < (unsigned)max_devices; i++) {
|
|
|
|
|
if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
|
|
|
|
|
devices[i]->bustype == DRM_BUS_PCI &&
|
|
|
|
|
devices[i]->deviceinfo.pci->vendor_id == 0x8086) {
|
|
|
|
|
|
2020-01-17 23:52:50 -06:00
|
|
|
struct anv_physical_device *pdevice;
|
|
|
|
|
result = anv_physical_device_try_create(instance, devices[i],
|
|
|
|
|
&pdevice);
|
2020-01-21 18:19:18 +02:00
|
|
|
/* Incompatible DRM device, skip. */
|
|
|
|
|
if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
|
|
|
|
|
result = VK_SUCCESS;
|
2020-01-17 23:52:50 -06:00
|
|
|
continue;
|
2020-01-21 18:19:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Error creating the physical device, report the error. */
|
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
break;
|
2020-01-17 23:52:50 -06:00
|
|
|
|
|
|
|
|
list_addtail(&pdevice->link, &instance->physical_devices);
|
2016-12-01 21:21:10 +00:00
|
|
|
}
|
|
|
|
|
}
|
2017-05-02 20:11:06 +03:00
|
|
|
drmFreeDevices(devices, max_devices);
|
2016-12-01 21:21:10 +00:00
|
|
|
|
2020-01-17 23:52:50 -06:00
|
|
|
/* If we successfully enumerated any devices, call it success */
|
2020-01-21 18:19:18 +02:00
|
|
|
return result;
|
2017-09-21 13:54:55 -07:00
|
|
|
}
|
2016-12-01 21:21:10 +00:00
|
|
|
|
2015-05-17 16:33:48 -07:00
|
|
|
VkResult anv_EnumeratePhysicalDevices(
|
2015-05-08 22:32:37 -07:00
|
|
|
VkInstance _instance,
|
|
|
|
|
uint32_t* pPhysicalDeviceCount,
|
|
|
|
|
VkPhysicalDevice* pPhysicalDevices)
|
|
|
|
|
{
|
2015-07-09 18:41:27 -07:00
|
|
|
ANV_FROM_HANDLE(anv_instance, instance, _instance);
|
2017-03-05 13:07:13 -08:00
|
|
|
VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
|
2015-07-09 15:38:58 -07:00
|
|
|
|
2020-01-17 23:52:50 -06:00
|
|
|
VkResult result = anv_enumerate_physical_devices(instance);
|
2017-09-21 13:54:55 -07:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
return result;
|
|
|
|
|
|
2020-01-17 23:52:50 -06:00
|
|
|
list_for_each_entry(struct anv_physical_device, pdevice,
|
|
|
|
|
&instance->physical_devices, link) {
|
|
|
|
|
vk_outarray_append(&out, i) {
|
|
|
|
|
*i = anv_physical_device_to_handle(pdevice);
|
|
|
|
|
}
|
2015-07-09 15:38:58 -07:00
|
|
|
}
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2017-09-21 13:54:55 -07:00
|
|
|
return vk_outarray_status(&out);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkResult anv_EnumeratePhysicalDeviceGroups(
|
|
|
|
|
VkInstance _instance,
|
|
|
|
|
uint32_t* pPhysicalDeviceGroupCount,
|
|
|
|
|
VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_instance, instance, _instance);
|
|
|
|
|
VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
|
|
|
|
|
pPhysicalDeviceGroupCount);
|
|
|
|
|
|
2020-01-17 23:52:50 -06:00
|
|
|
VkResult result = anv_enumerate_physical_devices(instance);
|
2017-09-21 13:54:55 -07:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
return result;
|
|
|
|
|
|
2020-01-17 23:52:50 -06:00
|
|
|
list_for_each_entry(struct anv_physical_device, pdevice,
|
|
|
|
|
&instance->physical_devices, link) {
|
|
|
|
|
vk_outarray_append(&out, p) {
|
|
|
|
|
p->physicalDeviceCount = 1;
|
|
|
|
|
memset(p->physicalDevices, 0, sizeof(p->physicalDevices));
|
|
|
|
|
p->physicalDevices[0] = anv_physical_device_to_handle(pdevice);
|
|
|
|
|
p->subsetAllocation = false;
|
2017-09-21 13:54:55 -07:00
|
|
|
|
2020-01-17 23:52:50 -06:00
|
|
|
vk_foreach_struct(ext, p->pNext)
|
|
|
|
|
anv_debug_ignored_stype(ext->sType);
|
|
|
|
|
}
|
2015-07-09 15:51:06 -07:00
|
|
|
}
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2017-03-05 13:07:13 -08:00
|
|
|
return vk_outarray_status(&out);
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
2015-11-30 12:21:19 -08:00
|
|
|
void anv_GetPhysicalDeviceFeatures(
|
2015-07-09 13:54:08 -07:00
|
|
|
VkPhysicalDevice physicalDevice,
|
|
|
|
|
VkPhysicalDeviceFeatures* pFeatures)
|
|
|
|
|
{
|
2016-02-15 21:24:40 -08:00
|
|
|
ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
|
2015-07-09 13:54:08 -07:00
|
|
|
|
|
|
|
|
*pFeatures = (VkPhysicalDeviceFeatures) {
|
2016-02-01 11:54:40 -08:00
|
|
|
.robustBufferAccess = true,
|
2016-02-15 21:24:40 -08:00
|
|
|
.fullDrawIndexUint32 = true,
|
2016-11-27 14:41:42 -05:00
|
|
|
.imageCubeArray = true,
|
2016-07-14 18:01:29 -07:00
|
|
|
.independentBlend = true,
|
2015-07-09 13:54:08 -07:00
|
|
|
.geometryShader = true,
|
2016-09-25 15:33:03 -07:00
|
|
|
.tessellationShader = true,
|
2016-08-08 16:10:00 -07:00
|
|
|
.sampleRateShading = true,
|
2015-12-02 16:58:54 -08:00
|
|
|
.dualSrcBlend = true,
|
2015-07-09 13:54:08 -07:00
|
|
|
.logicOp = true,
|
2017-04-19 17:37:46 -07:00
|
|
|
.multiDrawIndirect = true,
|
2016-11-22 23:20:11 -05:00
|
|
|
.drawIndirectFirstInstance = true,
|
2016-06-14 08:40:49 -07:00
|
|
|
.depthClamp = true,
|
2016-11-22 23:03:12 -05:00
|
|
|
.depthBiasClamp = true,
|
2015-07-09 13:54:08 -07:00
|
|
|
.fillModeNonSolid = true,
|
2019-10-28 23:47:39 +00:00
|
|
|
.depthBounds = pdevice->info.gen >= 12,
|
2015-07-09 13:54:08 -07:00
|
|
|
.wideLines = true,
|
|
|
|
|
.largePoints = true,
|
2015-12-02 16:58:54 -08:00
|
|
|
.alphaToOne = true,
|
|
|
|
|
.multiViewport = true,
|
2016-10-07 13:53:04 +01:00
|
|
|
.samplerAnisotropy = true,
|
2016-09-22 14:58:11 +03:00
|
|
|
.textureCompressionETC2 = pdevice->info.gen >= 8 ||
|
|
|
|
|
pdevice->info.is_baytrail,
|
|
|
|
|
.textureCompressionASTC_LDR = pdevice->info.gen >= 9, /* FINISHME CHV */
|
2015-07-09 13:54:08 -07:00
|
|
|
.textureCompressionBC = true,
|
2016-02-15 21:24:40 -08:00
|
|
|
.occlusionQueryPrecise = true,
|
2016-11-27 14:23:23 -05:00
|
|
|
.pipelineStatisticsQuery = true,
|
2015-12-02 16:58:54 -08:00
|
|
|
.fragmentStoresAndAtomics = true,
|
|
|
|
|
.shaderTessellationAndGeometryPointSize = true,
|
2016-11-27 15:45:54 -05:00
|
|
|
.shaderImageGatherExtended = true,
|
2016-11-27 16:37:17 -05:00
|
|
|
.shaderStorageImageExtendedFormats = true,
|
2015-07-09 13:54:08 -07:00
|
|
|
.shaderStorageImageMultisample = false,
|
2016-11-27 17:39:52 -05:00
|
|
|
.shaderStorageImageReadWithoutFormat = false,
|
2017-02-14 10:34:49 +00:00
|
|
|
.shaderStorageImageWriteWithoutFormat = true,
|
2015-07-09 13:54:08 -07:00
|
|
|
.shaderUniformBufferArrayDynamicIndexing = true,
|
2016-02-15 21:24:40 -08:00
|
|
|
.shaderSampledImageArrayDynamicIndexing = true,
|
|
|
|
|
.shaderStorageBufferArrayDynamicIndexing = true,
|
|
|
|
|
.shaderStorageImageArrayDynamicIndexing = true,
|
2016-10-03 20:44:38 -07:00
|
|
|
.shaderClipDistance = true,
|
|
|
|
|
.shaderCullDistance = true,
|
2018-04-30 15:15:37 -07:00
|
|
|
.shaderFloat64 = pdevice->info.gen >= 8 &&
|
2020-01-13 11:17:27 -08:00
|
|
|
pdevice->info.has_64bit_float,
|
2020-10-26 18:48:29 -05:00
|
|
|
.shaderInt64 = pdevice->info.gen >= 8,
|
2018-05-04 11:35:55 +02:00
|
|
|
.shaderInt16 = pdevice->info.gen >= 8,
|
2018-10-02 22:04:09 -05:00
|
|
|
.shaderResourceMinLod = pdevice->info.gen >= 9,
|
2018-02-08 09:03:03 -08:00
|
|
|
.variableMultisampleRate = true,
|
2017-03-16 14:17:59 -07:00
|
|
|
.inheritedQueries = true,
|
2015-07-09 13:54:08 -07:00
|
|
|
};
|
2016-04-15 14:53:16 -07:00
|
|
|
|
|
|
|
|
/* We can't do image stores in vec4 shaders */
|
|
|
|
|
pFeatures->vertexPipelineStoresAndAtomics =
|
2016-04-15 16:39:17 -07:00
|
|
|
pdevice->compiler->scalar_stage[MESA_SHADER_VERTEX] &&
|
|
|
|
|
pdevice->compiler->scalar_stage[MESA_SHADER_GEOMETRY];
|
2018-01-29 18:41:15 -08:00
|
|
|
|
2021-01-23 04:57:21 -06:00
|
|
|
struct vk_app_info *app_info = &pdevice->instance->vk.app_info;
|
2018-01-29 18:41:15 -08:00
|
|
|
|
|
|
|
|
/* The new DOOM and Wolfenstein games require depthBounds without
|
|
|
|
|
* checking for it. They seem to run fine without it so just claim it's
|
|
|
|
|
* there and accept the consequences.
|
|
|
|
|
*/
|
|
|
|
|
if (app_info->engine_name && strcmp(app_info->engine_name, "idTech") == 0)
|
|
|
|
|
pFeatures->depthBounds = true;
|
2015-07-09 13:54:08 -07:00
|
|
|
}
|
|
|
|
|
|
2020-01-13 12:57:56 -06:00
|
|
|
static void
|
|
|
|
|
anv_get_physical_device_features_1_1(struct anv_physical_device *pdevice,
|
|
|
|
|
VkPhysicalDeviceVulkan11Features *f)
|
|
|
|
|
{
|
|
|
|
|
assert(f->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES);
|
|
|
|
|
|
|
|
|
|
f->storageBuffer16BitAccess = pdevice->info.gen >= 8;
|
|
|
|
|
f->uniformAndStorageBuffer16BitAccess = pdevice->info.gen >= 8;
|
|
|
|
|
f->storagePushConstant16 = pdevice->info.gen >= 8;
|
|
|
|
|
f->storageInputOutput16 = false;
|
|
|
|
|
f->multiview = true;
|
|
|
|
|
f->multiviewGeometryShader = true;
|
|
|
|
|
f->multiviewTessellationShader = true;
|
|
|
|
|
f->variablePointersStorageBuffer = true;
|
|
|
|
|
f->variablePointers = true;
|
|
|
|
|
f->protectedMemory = false;
|
|
|
|
|
f->samplerYcbcrConversion = true;
|
|
|
|
|
f->shaderDrawParameters = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
anv_get_physical_device_features_1_2(struct anv_physical_device *pdevice,
|
|
|
|
|
VkPhysicalDeviceVulkan12Features *f)
|
|
|
|
|
{
|
|
|
|
|
assert(f->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES);
|
|
|
|
|
|
|
|
|
|
f->samplerMirrorClampToEdge = true;
|
|
|
|
|
f->drawIndirectCount = true;
|
|
|
|
|
f->storageBuffer8BitAccess = pdevice->info.gen >= 8;
|
|
|
|
|
f->uniformAndStorageBuffer8BitAccess = pdevice->info.gen >= 8;
|
|
|
|
|
f->storagePushConstant8 = pdevice->info.gen >= 8;
|
|
|
|
|
f->shaderBufferInt64Atomics = pdevice->info.gen >= 9 &&
|
|
|
|
|
pdevice->use_softpin;
|
|
|
|
|
f->shaderSharedInt64Atomics = false;
|
|
|
|
|
f->shaderFloat16 = pdevice->info.gen >= 8;
|
|
|
|
|
f->shaderInt8 = pdevice->info.gen >= 8;
|
|
|
|
|
|
|
|
|
|
bool descIndexing = pdevice->has_a64_buffer_access &&
|
|
|
|
|
pdevice->has_bindless_images;
|
|
|
|
|
f->descriptorIndexing = descIndexing;
|
|
|
|
|
f->shaderInputAttachmentArrayDynamicIndexing = false;
|
|
|
|
|
f->shaderUniformTexelBufferArrayDynamicIndexing = descIndexing;
|
|
|
|
|
f->shaderStorageTexelBufferArrayDynamicIndexing = descIndexing;
|
|
|
|
|
f->shaderUniformBufferArrayNonUniformIndexing = false;
|
|
|
|
|
f->shaderSampledImageArrayNonUniformIndexing = descIndexing;
|
|
|
|
|
f->shaderStorageBufferArrayNonUniformIndexing = descIndexing;
|
|
|
|
|
f->shaderStorageImageArrayNonUniformIndexing = descIndexing;
|
|
|
|
|
f->shaderInputAttachmentArrayNonUniformIndexing = false;
|
|
|
|
|
f->shaderUniformTexelBufferArrayNonUniformIndexing = descIndexing;
|
|
|
|
|
f->shaderStorageTexelBufferArrayNonUniformIndexing = descIndexing;
|
|
|
|
|
f->descriptorBindingUniformBufferUpdateAfterBind = false;
|
|
|
|
|
f->descriptorBindingSampledImageUpdateAfterBind = descIndexing;
|
|
|
|
|
f->descriptorBindingStorageImageUpdateAfterBind = descIndexing;
|
|
|
|
|
f->descriptorBindingStorageBufferUpdateAfterBind = descIndexing;
|
|
|
|
|
f->descriptorBindingUniformTexelBufferUpdateAfterBind = descIndexing;
|
|
|
|
|
f->descriptorBindingStorageTexelBufferUpdateAfterBind = descIndexing;
|
|
|
|
|
f->descriptorBindingUpdateUnusedWhilePending = descIndexing;
|
|
|
|
|
f->descriptorBindingPartiallyBound = descIndexing;
|
2020-10-15 22:56:09 -05:00
|
|
|
f->descriptorBindingVariableDescriptorCount = descIndexing;
|
2020-01-13 12:57:56 -06:00
|
|
|
f->runtimeDescriptorArray = descIndexing;
|
|
|
|
|
|
|
|
|
|
f->samplerFilterMinmax = pdevice->info.gen >= 9;
|
|
|
|
|
f->scalarBlockLayout = true;
|
|
|
|
|
f->imagelessFramebuffer = true;
|
|
|
|
|
f->uniformBufferStandardLayout = true;
|
|
|
|
|
f->shaderSubgroupExtendedTypes = true;
|
|
|
|
|
f->separateDepthStencilLayouts = true;
|
|
|
|
|
f->hostQueryReset = true;
|
|
|
|
|
f->timelineSemaphore = true;
|
|
|
|
|
f->bufferDeviceAddress = pdevice->has_a64_buffer_access;
|
|
|
|
|
f->bufferDeviceAddressCaptureReplay = pdevice->has_a64_buffer_access;
|
|
|
|
|
f->bufferDeviceAddressMultiDevice = false;
|
|
|
|
|
f->vulkanMemoryModel = true;
|
|
|
|
|
f->vulkanMemoryModelDeviceScope = true;
|
|
|
|
|
f->vulkanMemoryModelAvailabilityVisibilityChains = true;
|
|
|
|
|
f->shaderOutputViewportIndex = true;
|
|
|
|
|
f->shaderOutputLayer = true;
|
|
|
|
|
f->subgroupBroadcastDynamicId = true;
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-20 12:18:10 -07:00
|
|
|
void anv_GetPhysicalDeviceFeatures2(
|
2017-01-25 12:12:20 -08:00
|
|
|
VkPhysicalDevice physicalDevice,
|
2017-09-20 13:16:26 -07:00
|
|
|
VkPhysicalDeviceFeatures2* pFeatures)
|
2017-01-25 12:12:20 -08:00
|
|
|
{
|
2019-01-19 08:54:32 -06:00
|
|
|
ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
|
2017-01-25 12:12:20 -08:00
|
|
|
anv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
|
|
|
|
|
|
2020-01-13 12:57:56 -06:00
|
|
|
VkPhysicalDeviceVulkan11Features core_1_1 = {
|
|
|
|
|
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES,
|
|
|
|
|
};
|
|
|
|
|
anv_get_physical_device_features_1_1(pdevice, &core_1_1);
|
|
|
|
|
|
|
|
|
|
VkPhysicalDeviceVulkan12Features core_1_2 = {
|
|
|
|
|
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,
|
|
|
|
|
};
|
|
|
|
|
anv_get_physical_device_features_1_2(pdevice, &core_1_2);
|
|
|
|
|
|
|
|
|
|
#define CORE_FEATURE(major, minor, feature) \
|
|
|
|
|
features->feature = core_##major##_##minor.feature
|
|
|
|
|
|
|
|
|
|
|
2017-02-14 14:29:19 -08:00
|
|
|
vk_foreach_struct(ext, pFeatures->pNext) {
|
|
|
|
|
switch (ext->sType) {
|
2020-07-14 17:59:49 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDevice4444FormatsFeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDevice4444FormatsFeaturesEXT *)ext;
|
|
|
|
|
features->formatA4R4G4B4 = true;
|
|
|
|
|
features->formatA4B4G4R4 = false;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-07 10:51:59 -06:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR: {
|
|
|
|
|
VkPhysicalDevice8BitStorageFeaturesKHR *features =
|
|
|
|
|
(VkPhysicalDevice8BitStorageFeaturesKHR *)ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 2, storageBuffer8BitAccess);
|
|
|
|
|
CORE_FEATURE(1, 2, uniformAndStorageBuffer8BitAccess);
|
|
|
|
|
CORE_FEATURE(1, 2, storagePushConstant8);
|
2019-01-07 10:51:59 -06:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-08 18:04:54 +00:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
|
|
|
|
|
VkPhysicalDevice16BitStorageFeatures *features =
|
|
|
|
|
(VkPhysicalDevice16BitStorageFeatures *)ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 1, storageBuffer16BitAccess);
|
|
|
|
|
CORE_FEATURE(1, 1, uniformAndStorageBuffer16BitAccess);
|
|
|
|
|
CORE_FEATURE(1, 1, storagePushConstant16);
|
|
|
|
|
CORE_FEATURE(1, 1, storageInputOutput16);
|
2017-09-22 10:03:18 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-13 18:44:03 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceBufferDeviceAddressFeaturesEXT *features = (void *)ext;
|
2019-02-07 12:01:18 -06:00
|
|
|
features->bufferDeviceAddress = pdevice->has_a64_buffer_access;
|
2019-01-19 08:54:32 -06:00
|
|
|
features->bufferDeviceAddressCaptureReplay = false;
|
|
|
|
|
features->bufferDeviceAddressMultiDevice = false;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-02 16:28:58 -06:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_KHR: {
|
|
|
|
|
VkPhysicalDeviceBufferDeviceAddressFeaturesKHR *features = (void *)ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 2, bufferDeviceAddress);
|
|
|
|
|
CORE_FEATURE(1, 2, bufferDeviceAddressCaptureReplay);
|
|
|
|
|
CORE_FEATURE(1, 2, bufferDeviceAddressMultiDevice);
|
2019-12-02 16:28:58 -06:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-18 14:19:29 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV: {
|
|
|
|
|
VkPhysicalDeviceComputeShaderDerivativesFeaturesNV *features =
|
|
|
|
|
(VkPhysicalDeviceComputeShaderDerivativesFeaturesNV *)ext;
|
|
|
|
|
features->computeDerivativeGroupQuads = true;
|
|
|
|
|
features->computeDerivativeGroupLinear = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-05 17:54:07 +03:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDeviceConditionalRenderingFeaturesEXT*)ext;
|
|
|
|
|
features->conditionalRendering = pdevice->info.gen >= 8 ||
|
|
|
|
|
pdevice->info.is_haswell;
|
|
|
|
|
features->inheritedConditionalRendering = pdevice->info.gen >= 8 ||
|
|
|
|
|
pdevice->info.is_haswell;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-22 17:08:22 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceCustomBorderColorFeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDeviceCustomBorderColorFeaturesEXT *)ext;
|
|
|
|
|
features->customBorderColors = pdevice->info.gen >= 8;
|
|
|
|
|
features->customBorderColorWithoutFormat = pdevice->info.gen >= 8;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-14 18:06:33 +00:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceDepthClipEnableFeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDeviceDepthClipEnableFeaturesEXT *)ext;
|
2019-03-12 15:22:19 -05:00
|
|
|
features->depthClipEnable = true;
|
2019-01-14 18:06:33 +00:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-18 14:19:29 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR: {
|
|
|
|
|
VkPhysicalDeviceFloat16Int8FeaturesKHR *features = (void *)ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 2, shaderFloat16);
|
|
|
|
|
CORE_FEATURE(1, 2, shaderInt8);
|
2019-04-18 14:19:29 -05:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-17 11:33:23 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT *)ext;
|
|
|
|
|
features->fragmentShaderSampleInterlock = pdevice->info.gen >= 9;
|
|
|
|
|
features->fragmentShaderPixelInterlock = pdevice->info.gen >= 9;
|
|
|
|
|
features->fragmentShaderShadingRateInterlock = false;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-04 20:01:44 -06:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceHostQueryResetFeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDeviceHostQueryResetFeaturesEXT *)ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 2, hostQueryReset);
|
2019-03-04 20:01:44 -06:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-27 16:08:20 -06:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDeviceDescriptorIndexingFeaturesEXT *)ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 2, shaderInputAttachmentArrayDynamicIndexing);
|
|
|
|
|
CORE_FEATURE(1, 2, shaderUniformTexelBufferArrayDynamicIndexing);
|
|
|
|
|
CORE_FEATURE(1, 2, shaderStorageTexelBufferArrayDynamicIndexing);
|
|
|
|
|
CORE_FEATURE(1, 2, shaderUniformBufferArrayNonUniformIndexing);
|
|
|
|
|
CORE_FEATURE(1, 2, shaderSampledImageArrayNonUniformIndexing);
|
|
|
|
|
CORE_FEATURE(1, 2, shaderStorageBufferArrayNonUniformIndexing);
|
|
|
|
|
CORE_FEATURE(1, 2, shaderStorageImageArrayNonUniformIndexing);
|
|
|
|
|
CORE_FEATURE(1, 2, shaderInputAttachmentArrayNonUniformIndexing);
|
|
|
|
|
CORE_FEATURE(1, 2, shaderUniformTexelBufferArrayNonUniformIndexing);
|
|
|
|
|
CORE_FEATURE(1, 2, shaderStorageTexelBufferArrayNonUniformIndexing);
|
|
|
|
|
CORE_FEATURE(1, 2, descriptorBindingUniformBufferUpdateAfterBind);
|
|
|
|
|
CORE_FEATURE(1, 2, descriptorBindingSampledImageUpdateAfterBind);
|
|
|
|
|
CORE_FEATURE(1, 2, descriptorBindingStorageImageUpdateAfterBind);
|
|
|
|
|
CORE_FEATURE(1, 2, descriptorBindingStorageBufferUpdateAfterBind);
|
|
|
|
|
CORE_FEATURE(1, 2, descriptorBindingUniformTexelBufferUpdateAfterBind);
|
|
|
|
|
CORE_FEATURE(1, 2, descriptorBindingStorageTexelBufferUpdateAfterBind);
|
|
|
|
|
CORE_FEATURE(1, 2, descriptorBindingUpdateUnusedWhilePending);
|
|
|
|
|
CORE_FEATURE(1, 2, descriptorBindingPartiallyBound);
|
|
|
|
|
CORE_FEATURE(1, 2, descriptorBindingVariableDescriptorCount);
|
|
|
|
|
CORE_FEATURE(1, 2, runtimeDescriptorArray);
|
2019-02-27 16:08:20 -06:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-09 10:59:16 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceImageRobustnessFeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDeviceImageRobustnessFeaturesEXT *)ext;
|
|
|
|
|
features->robustImageAccess = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-13 16:33:22 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceIndexTypeUint8FeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)ext;
|
|
|
|
|
features->indexTypeUint8 = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-12 16:56:24 -06:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceInlineUniformBlockFeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDeviceInlineUniformBlockFeaturesEXT *)ext;
|
|
|
|
|
features->inlineUniformBlock = true;
|
2019-02-27 16:08:20 -06:00
|
|
|
features->descriptorBindingInlineUniformBlockUpdateAfterBind = true;
|
2019-02-12 16:56:24 -06:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-22 22:44:59 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceLineRasterizationFeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDeviceLineRasterizationFeaturesEXT *)ext;
|
|
|
|
|
features->rectangularLines = true;
|
|
|
|
|
features->bresenhamLines = true;
|
2019-12-06 15:20:35 -06:00
|
|
|
/* Support for Smooth lines with MSAA was removed on gen11. From the
|
|
|
|
|
* BSpec section "Multisample ModesState" table for "AA Line Support
|
|
|
|
|
* Requirements":
|
|
|
|
|
*
|
|
|
|
|
* GEN10:BUG:######## NUM_MULTISAMPLES == 1
|
|
|
|
|
*
|
|
|
|
|
* Fortunately, this isn't a case most people care about.
|
|
|
|
|
*/
|
|
|
|
|
features->smoothLines = pdevice->info.gen < 10;
|
2019-05-22 22:44:59 -05:00
|
|
|
features->stippledRectangularLines = false;
|
|
|
|
|
features->stippledBresenhamLines = true;
|
|
|
|
|
features->stippledSmoothLines = false;
|
2019-08-09 23:34:29 +01:00
|
|
|
break;
|
2019-05-22 22:44:59 -05:00
|
|
|
}
|
|
|
|
|
|
2017-09-20 13:16:26 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
|
|
|
|
|
VkPhysicalDeviceMultiviewFeatures *features =
|
|
|
|
|
(VkPhysicalDeviceMultiviewFeatures *)ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 1, multiview);
|
|
|
|
|
CORE_FEATURE(1, 1, multiviewGeometryShader);
|
|
|
|
|
CORE_FEATURE(1, 1, multiviewTessellationShader);
|
2017-03-21 14:40:46 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-17 10:28:36 -08:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR: {
|
|
|
|
|
VkPhysicalDeviceImagelessFramebufferFeaturesKHR *features =
|
|
|
|
|
(VkPhysicalDeviceImagelessFramebufferFeaturesKHR *)ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 2, imagelessFramebuffer);
|
2019-01-17 10:28:36 -08:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-06 19:12:34 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR: {
|
|
|
|
|
VkPhysicalDevicePerformanceQueryFeaturesKHR *feature =
|
|
|
|
|
(VkPhysicalDevicePerformanceQueryFeaturesKHR *)ext;
|
|
|
|
|
feature->performanceCounterQueryPools = true;
|
|
|
|
|
/* HW only supports a single configuration at a time. */
|
|
|
|
|
feature->performanceCounterMultipleQueryPools = false;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-20 14:36:49 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT *)ext;
|
|
|
|
|
features->pipelineCreationCacheControl = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-24 03:02:35 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR: {
|
|
|
|
|
VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR *features =
|
|
|
|
|
(VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR *)ext;
|
|
|
|
|
features->pipelineExecutableInfo = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-21 16:31:25 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDevicePrivateDataFeaturesEXT *features = (void *)ext;
|
|
|
|
|
features->privateData = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-07 10:51:59 -06:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
|
|
|
|
|
VkPhysicalDeviceProtectedMemoryFeatures *features = (void *)ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 1, protectedMemory);
|
2017-05-16 08:35:07 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-13 10:14:01 -06:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceRobustness2FeaturesEXT *features = (void *)ext;
|
|
|
|
|
features->robustBufferAccess2 = true;
|
|
|
|
|
features->robustImageAccess2 = true;
|
|
|
|
|
features->nullDescriptor = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-20 13:16:26 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
|
|
|
|
|
VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
|
|
|
|
|
(VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 1, samplerYcbcrConversion);
|
2017-06-19 16:57:00 +01:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-09 10:16:56 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceScalarBlockLayoutFeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDeviceScalarBlockLayoutFeaturesEXT *)ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 2, scalarBlockLayout);
|
2018-10-09 10:16:56 -05:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-04 11:17:11 +03:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR: {
|
|
|
|
|
VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR *features =
|
|
|
|
|
(VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR *)ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 2, separateDepthStencilLayouts);
|
2019-07-04 11:17:11 +03:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-19 10:26:58 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceShaderAtomicFloatFeaturesEXT *features = (void *)ext;
|
|
|
|
|
features->shaderBufferFloat32Atomics = true;
|
|
|
|
|
features->shaderBufferFloat32AtomicAdd = false;
|
|
|
|
|
features->shaderBufferFloat64Atomics = false;
|
|
|
|
|
features->shaderBufferFloat64AtomicAdd = false;
|
|
|
|
|
features->shaderSharedFloat32Atomics = true;
|
|
|
|
|
features->shaderSharedFloat32AtomicAdd = false;
|
|
|
|
|
features->shaderSharedFloat64Atomics = false;
|
|
|
|
|
features->shaderSharedFloat64AtomicAdd = false;
|
|
|
|
|
features->shaderImageFloat32Atomics = true;
|
|
|
|
|
features->shaderImageFloat32AtomicAdd = false;
|
|
|
|
|
features->sparseImageFloat32Atomics = false;
|
|
|
|
|
features->sparseImageFloat32AtomicAdd = false;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-12 18:30:47 -06:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR: {
|
|
|
|
|
VkPhysicalDeviceShaderAtomicInt64FeaturesKHR *features = (void *)ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 2, shaderBufferInt64Atomics);
|
|
|
|
|
CORE_FEATURE(1, 2, shaderSharedInt64Atomics);
|
2019-01-12 18:30:47 -06:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-07 17:37:38 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT *features = (void *)ext;
|
|
|
|
|
features->shaderDemoteToHelperInvocation = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-19 12:18:02 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR: {
|
|
|
|
|
VkPhysicalDeviceShaderClockFeaturesKHR *features =
|
|
|
|
|
(VkPhysicalDeviceShaderClockFeaturesKHR *)ext;
|
|
|
|
|
features->shaderSubgroupClock = true;
|
|
|
|
|
features->shaderDeviceClock = false;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-13 18:44:03 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
|
|
|
|
|
VkPhysicalDeviceShaderDrawParametersFeatures *features = (void *)ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 1, shaderDrawParameters);
|
2017-10-13 11:03:07 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-22 15:23:46 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL: {
|
|
|
|
|
VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL *features =
|
|
|
|
|
(VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL *)ext;
|
|
|
|
|
features->shaderIntegerFunctions2 = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-18 14:17:50 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR: {
|
|
|
|
|
VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR *features =
|
|
|
|
|
(VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR *)ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 2, shaderSubgroupExtendedTypes);
|
2019-04-18 14:17:50 -05:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-06 21:56:29 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR: {
|
|
|
|
|
VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR *features =
|
|
|
|
|
(VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR *)ext;
|
|
|
|
|
features->shaderTerminateInvocation = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-12 07:41:49 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceSubgroupSizeControlFeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDeviceSubgroupSizeControlFeaturesEXT *)ext;
|
|
|
|
|
features->subgroupSizeControl = true;
|
|
|
|
|
features->computeFullSubgroups = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-06 15:31:17 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *)ext;
|
|
|
|
|
features->texelBufferAlignment = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-16 17:44:31 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR: {
|
|
|
|
|
VkPhysicalDeviceTimelineSemaphoreFeaturesKHR *features =
|
|
|
|
|
(VkPhysicalDeviceTimelineSemaphoreFeaturesKHR *) ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 2, timelineSemaphore);
|
2018-10-16 17:44:31 -05:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-13 18:44:03 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
|
|
|
|
|
VkPhysicalDeviceVariablePointersFeatures *features = (void *)ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 1, variablePointersStorageBuffer);
|
|
|
|
|
CORE_FEATURE(1, 1, variablePointers);
|
2018-07-09 02:01:32 +02:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-10 16:17:37 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDeviceTransformFeedbackFeaturesEXT *)ext;
|
2019-03-12 15:22:19 -05:00
|
|
|
features->transformFeedback = true;
|
|
|
|
|
features->geometryStreams = true;
|
2018-09-10 16:17:37 -05:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-12 11:02:57 -06:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR: {
|
|
|
|
|
VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR *features =
|
|
|
|
|
(VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR *)ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 2, uniformBufferStandardLayout);
|
2019-02-12 11:02:57 -06:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-10 12:05:41 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)ext;
|
2019-03-12 15:22:19 -05:00
|
|
|
features->vertexAttributeInstanceRateDivisor = true;
|
|
|
|
|
features->vertexAttributeInstanceRateZeroDivisor = true;
|
2018-09-10 12:05:41 -05:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-13 12:57:56 -06:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
|
|
|
|
|
anv_get_physical_device_features_1_1(pdevice, (void *)ext);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES:
|
|
|
|
|
anv_get_physical_device_features_1_2(pdevice, (void *)ext);
|
|
|
|
|
break;
|
|
|
|
|
|
2019-09-05 11:10:02 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR: {
|
|
|
|
|
VkPhysicalDeviceVulkanMemoryModelFeaturesKHR *features = (void *)ext;
|
2020-01-13 12:57:56 -06:00
|
|
|
CORE_FEATURE(1, 2, vulkanMemoryModel);
|
|
|
|
|
CORE_FEATURE(1, 2, vulkanMemoryModelDeviceScope);
|
|
|
|
|
CORE_FEATURE(1, 2, vulkanMemoryModelAvailabilityVisibilityChains);
|
2019-09-05 11:10:02 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-05 10:23:35 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_FEATURES_KHR: {
|
|
|
|
|
VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR *features =
|
|
|
|
|
(VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR *)ext;
|
|
|
|
|
features->workgroupMemoryExplicitLayout = true;
|
|
|
|
|
features->workgroupMemoryExplicitLayoutScalarBlockLayout = true;
|
|
|
|
|
features->workgroupMemoryExplicitLayout8BitAccess = true;
|
|
|
|
|
features->workgroupMemoryExplicitLayout16BitAccess = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-22 08:54:13 +02:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceYcbcrImageArraysFeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDeviceYcbcrImageArraysFeaturesEXT *)ext;
|
2019-03-12 15:22:19 -05:00
|
|
|
features->ycbcrImageArrays = true;
|
2019-02-22 08:54:13 +02:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-10 13:07:25 +03:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT: {
|
|
|
|
|
VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *features =
|
|
|
|
|
(VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *)ext;
|
|
|
|
|
features->extendedDynamicState = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-24 22:15:28 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR: {
|
|
|
|
|
VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR *features =
|
|
|
|
|
(VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR *)ext;
|
|
|
|
|
features->shaderZeroInitializeWorkgroupMemory = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2017-01-25 12:12:20 -08:00
|
|
|
default:
|
2017-02-14 14:29:19 -08:00
|
|
|
anv_debug_ignored_stype(ext->sType);
|
2017-01-25 12:12:20 -08:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-01-13 12:57:56 -06:00
|
|
|
|
|
|
|
|
#undef CORE_FEATURE
|
2017-01-25 12:12:20 -08:00
|
|
|
}
|
|
|
|
|
|
2019-05-09 01:01:19 -07:00
|
|
|
#define MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS 64
|
|
|
|
|
|
|
|
|
|
#define MAX_PER_STAGE_DESCRIPTOR_INPUT_ATTACHMENTS 64
|
|
|
|
|
#define MAX_DESCRIPTOR_SET_INPUT_ATTACHMENTS 256
|
|
|
|
|
|
2020-04-22 17:08:22 -07:00
|
|
|
#define MAX_CUSTOM_BORDER_COLORS 4096
|
|
|
|
|
|
2015-11-30 12:21:19 -08:00
|
|
|
void anv_GetPhysicalDeviceProperties(
|
2015-07-09 15:38:30 -07:00
|
|
|
VkPhysicalDevice physicalDevice,
|
2015-10-07 10:36:46 -07:00
|
|
|
VkPhysicalDeviceProperties* pProperties)
|
2015-07-09 15:38:30 -07:00
|
|
|
{
|
2015-10-07 10:36:46 -07:00
|
|
|
ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
|
2016-09-22 14:58:11 +03:00
|
|
|
const struct gen_device_info *devinfo = &pdevice->info;
|
2015-07-09 15:38:30 -07:00
|
|
|
|
2016-07-06 11:13:48 -07:00
|
|
|
/* See assertions made when programming the buffer surface state. */
|
|
|
|
|
const uint32_t max_raw_buffer_sz = devinfo->gen >= 7 ?
|
|
|
|
|
(1ul << 30) : (1ul << 27);
|
|
|
|
|
|
2019-01-09 16:04:22 -06:00
|
|
|
const uint32_t max_ssbos = pdevice->has_a64_buffer_access ? UINT16_MAX : 64;
|
2019-02-07 14:10:33 -06:00
|
|
|
const uint32_t max_textures =
|
|
|
|
|
pdevice->has_bindless_images ? UINT16_MAX : 128;
|
|
|
|
|
const uint32_t max_samplers =
|
|
|
|
|
pdevice->has_bindless_samplers ? UINT16_MAX :
|
|
|
|
|
(devinfo->gen >= 8 || devinfo->is_haswell) ? 128 : 16;
|
2019-02-12 01:02:28 -06:00
|
|
|
const uint32_t max_images =
|
|
|
|
|
pdevice->has_bindless_images ? UINT16_MAX : MAX_IMAGES;
|
2017-06-05 15:49:05 +01:00
|
|
|
|
2019-09-12 14:20:22 -05:00
|
|
|
/* If we can use bindless for everything, claim a high per-stage limit,
|
|
|
|
|
* otherwise use the binding table size, minus the slots reserved for
|
|
|
|
|
* render targets and one slot for the descriptor buffer. */
|
2019-01-09 16:04:22 -06:00
|
|
|
const uint32_t max_per_stage =
|
2019-09-12 14:20:22 -05:00
|
|
|
pdevice->has_bindless_images && pdevice->has_a64_buffer_access
|
|
|
|
|
? UINT32_MAX : MAX_BINDING_TABLE_SIZE - MAX_RTS - 1;
|
2019-01-09 16:04:22 -06:00
|
|
|
|
2020-04-24 19:07:44 -05:00
|
|
|
/* Limit max_threads to 64 for the GPGPU_WALKER command */
|
|
|
|
|
const uint32_t max_workgroup_size = 32 * MIN2(64, devinfo->max_cs_threads);
|
2019-09-03 10:00:23 -05:00
|
|
|
|
2015-12-02 16:58:54 -08:00
|
|
|
VkSampleCountFlags sample_counts =
|
2016-01-20 16:04:28 -08:00
|
|
|
isl_device_get_sample_counts(&pdevice->isl_dev);
|
2015-12-02 16:58:54 -08:00
|
|
|
|
2019-01-10 13:34:07 +01:00
|
|
|
|
2015-10-07 10:36:46 -07:00
|
|
|
VkPhysicalDeviceLimits limits = {
|
2015-07-09 15:38:30 -07:00
|
|
|
.maxImageDimension1D = (1 << 14),
|
|
|
|
|
.maxImageDimension2D = (1 << 14),
|
2016-03-05 15:17:00 -08:00
|
|
|
.maxImageDimension3D = (1 << 11),
|
2015-07-09 15:38:30 -07:00
|
|
|
.maxImageDimensionCube = (1 << 14),
|
2016-03-05 15:17:00 -08:00
|
|
|
.maxImageArrayLayers = (1 << 11),
|
2016-01-26 23:09:45 -08:00
|
|
|
.maxTexelBufferElements = 128 * 1024 * 1024,
|
2016-07-06 11:13:48 -07:00
|
|
|
.maxUniformBufferRange = (1ul << 27),
|
|
|
|
|
.maxStorageBufferRange = max_raw_buffer_sz,
|
2015-08-26 15:01:38 -07:00
|
|
|
.maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
|
2015-07-09 15:38:30 -07:00
|
|
|
.maxMemoryAllocationCount = UINT32_MAX,
|
2016-01-20 14:36:52 -08:00
|
|
|
.maxSamplerAllocationCount = 64 * 1024,
|
2015-07-14 17:10:37 -07:00
|
|
|
.bufferImageGranularity = 64, /* A cache line */
|
2015-10-07 09:57:51 -07:00
|
|
|
.sparseAddressSpaceSize = 0,
|
2015-07-09 15:38:30 -07:00
|
|
|
.maxBoundDescriptorSets = MAX_SETS,
|
2017-06-05 15:49:05 +01:00
|
|
|
.maxPerStageDescriptorSamplers = max_samplers,
|
2019-05-09 01:01:19 -07:00
|
|
|
.maxPerStageDescriptorUniformBuffers = MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS,
|
2019-01-09 16:04:22 -06:00
|
|
|
.maxPerStageDescriptorStorageBuffers = max_ssbos,
|
2019-02-07 14:10:33 -06:00
|
|
|
.maxPerStageDescriptorSampledImages = max_textures,
|
2019-02-12 01:02:28 -06:00
|
|
|
.maxPerStageDescriptorStorageImages = max_images,
|
2019-05-09 01:01:19 -07:00
|
|
|
.maxPerStageDescriptorInputAttachments = MAX_PER_STAGE_DESCRIPTOR_INPUT_ATTACHMENTS,
|
2019-01-09 16:04:22 -06:00
|
|
|
.maxPerStageResources = max_per_stage,
|
2018-01-10 09:15:50 +01:00
|
|
|
.maxDescriptorSetSamplers = 6 * max_samplers, /* number of stages * maxPerStageDescriptorSamplers */
|
2019-05-09 01:01:19 -07:00
|
|
|
.maxDescriptorSetUniformBuffers = 6 * MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS, /* number of stages * maxPerStageDescriptorUniformBuffers */
|
2017-03-04 10:52:43 -08:00
|
|
|
.maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
|
2019-01-09 16:04:22 -06:00
|
|
|
.maxDescriptorSetStorageBuffers = 6 * max_ssbos, /* number of stages * maxPerStageDescriptorStorageBuffers */
|
2017-03-04 10:52:43 -08:00
|
|
|
.maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
|
2019-02-07 14:10:33 -06:00
|
|
|
.maxDescriptorSetSampledImages = 6 * max_textures, /* number of stages * maxPerStageDescriptorSampledImages */
|
2019-02-12 01:02:28 -06:00
|
|
|
.maxDescriptorSetStorageImages = 6 * max_images, /* number of stages * maxPerStageDescriptorStorageImages */
|
2019-05-09 01:01:19 -07:00
|
|
|
.maxDescriptorSetInputAttachments = MAX_DESCRIPTOR_SET_INPUT_ATTACHMENTS,
|
2017-01-29 02:44:32 +00:00
|
|
|
.maxVertexInputAttributes = MAX_VBS,
|
|
|
|
|
.maxVertexInputBindings = MAX_VBS,
|
2016-01-26 23:09:45 -08:00
|
|
|
.maxVertexInputAttributeOffset = 2047,
|
|
|
|
|
.maxVertexInputBindingStride = 2048,
|
|
|
|
|
.maxVertexOutputComponents = 128,
|
2016-09-29 18:11:21 -07:00
|
|
|
.maxTessellationGenerationLevel = 64,
|
|
|
|
|
.maxTessellationPatchSize = 32,
|
|
|
|
|
.maxTessellationControlPerVertexInputComponents = 128,
|
|
|
|
|
.maxTessellationControlPerVertexOutputComponents = 128,
|
|
|
|
|
.maxTessellationControlPerPatchOutputComponents = 128,
|
|
|
|
|
.maxTessellationControlTotalOutputComponents = 2048,
|
|
|
|
|
.maxTessellationEvaluationInputComponents = 128,
|
|
|
|
|
.maxTessellationEvaluationOutputComponents = 128,
|
2016-01-26 23:09:45 -08:00
|
|
|
.maxGeometryShaderInvocations = 32,
|
2021-01-08 12:37:31 -06:00
|
|
|
.maxGeometryInputComponents = devinfo->gen >= 8 ? 128 : 64,
|
2016-01-26 23:09:45 -08:00
|
|
|
.maxGeometryOutputComponents = 128,
|
|
|
|
|
.maxGeometryOutputVertices = 256,
|
|
|
|
|
.maxGeometryTotalOutputComponents = 1024,
|
2019-04-29 00:46:10 -05:00
|
|
|
.maxFragmentInputComponents = 116, /* 128 components - (PSIZ, CLIP_DIST0, CLIP_DIST1) */
|
2015-12-02 16:58:54 -08:00
|
|
|
.maxFragmentOutputAttachments = 8,
|
2016-11-29 11:16:56 +10:00
|
|
|
.maxFragmentDualSrcAttachments = 1,
|
2015-07-09 15:38:30 -07:00
|
|
|
.maxFragmentCombinedOutputResources = 8,
|
2019-07-08 10:36:59 -07:00
|
|
|
.maxComputeSharedMemorySize = 64 * 1024,
|
2016-01-26 23:09:45 -08:00
|
|
|
.maxComputeWorkGroupCount = { 65535, 65535, 65535 },
|
2019-09-03 10:00:23 -05:00
|
|
|
.maxComputeWorkGroupInvocations = max_workgroup_size,
|
2015-07-09 15:38:30 -07:00
|
|
|
.maxComputeWorkGroupSize = {
|
2019-09-03 10:00:23 -05:00
|
|
|
max_workgroup_size,
|
|
|
|
|
max_workgroup_size,
|
|
|
|
|
max_workgroup_size,
|
2015-07-09 15:38:30 -07:00
|
|
|
},
|
anv: advertise 8 subpixel precision bits
On one side, when emitting 3DSTATE_SF, VertexSubPixelPrecisionSelect is
used to select between 8 bit subpixel precision (value 0) or 4 bit
subpixel precision (value 1). As this value is not set, means it is
taking the value 0, so 8 bit are used.
On the other side, in the Vulkan CTS tests, if the reference rasterizer,
which uses 8 bit precision, as it is used to check what should be the
expected value for the tests, is changed to use 4 bit as ANV was
advertising so far, some of the tests will fail.
So it seems ANV is actually using 8 bits.
v2: explicitly set 3DSTATE_SF::VertexSubPixelPrecisionSelect (Jason)
v3: use _8Bit definition as value (Jason)
v4: (by Jason)
anv: Explicitly set 3DSTATE_CLIP::VertexSubPixelPrecisionSelect
This field was added on gen8 even though there's an identically defined
one in 3DSTATE_SF.
CC: Jason Ekstrand <jason@jlekstrand.net>
CC: Kenneth Graunke <kenneth@whitecape.org>
CC: 18.3 19.0 <mesa-stable@lists.freedesktop.org>
Signed-off-by: Juan A. Suarez Romero <jasuarez@igalia.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-02-22 16:47:53 +01:00
|
|
|
.subPixelPrecisionBits = 8,
|
2019-04-09 15:28:42 +00:00
|
|
|
.subTexelPrecisionBits = 8,
|
|
|
|
|
.mipmapPrecisionBits = 8,
|
2015-07-09 15:38:30 -07:00
|
|
|
.maxDrawIndexedIndexValue = UINT32_MAX,
|
2015-12-02 16:58:54 -08:00
|
|
|
.maxDrawIndirectCount = UINT32_MAX,
|
2015-07-09 15:38:30 -07:00
|
|
|
.maxSamplerLodBias = 16,
|
|
|
|
|
.maxSamplerAnisotropy = 16,
|
2015-10-06 17:21:44 -07:00
|
|
|
.maxViewports = MAX_VIEWPORTS,
|
2015-07-09 15:38:30 -07:00
|
|
|
.maxViewportDimensions = { (1 << 14), (1 << 14) },
|
2016-05-17 15:28:01 -07:00
|
|
|
.viewportBoundsRange = { INT16_MIN, INT16_MAX },
|
2015-07-09 15:38:30 -07:00
|
|
|
.viewportSubPixelBits = 13, /* We take a float? */
|
2016-01-01 09:26:06 -08:00
|
|
|
.minMemoryMapAlignment = 4096, /* A page */
|
2019-06-06 15:45:57 -05:00
|
|
|
/* The dataport requires texel alignment so we need to assume a worst
|
|
|
|
|
* case of R32G32B32A32 which is 16 bytes.
|
|
|
|
|
*/
|
|
|
|
|
.minTexelBufferOffsetAlignment = 16,
|
2020-05-07 12:25:50 +03:00
|
|
|
.minUniformBufferOffsetAlignment = ANV_UBO_ALIGNMENT,
|
2020-08-28 15:42:45 -05:00
|
|
|
.minStorageBufferOffsetAlignment = ANV_SSBO_ALIGNMENT,
|
2016-01-26 23:09:45 -08:00
|
|
|
.minTexelOffset = -8,
|
|
|
|
|
.maxTexelOffset = 7,
|
2016-11-27 21:05:36 -05:00
|
|
|
.minTexelGatherOffset = -32,
|
|
|
|
|
.maxTexelGatherOffset = 31,
|
2016-07-28 17:37:20 -07:00
|
|
|
.minInterpolationOffset = -0.5,
|
|
|
|
|
.maxInterpolationOffset = 0.4375,
|
|
|
|
|
.subPixelInterpolationOffsetBits = 4,
|
2015-07-09 15:38:30 -07:00
|
|
|
.maxFramebufferWidth = (1 << 14),
|
|
|
|
|
.maxFramebufferHeight = (1 << 14),
|
2016-11-28 19:49:51 -05:00
|
|
|
.maxFramebufferLayers = (1 << 11),
|
2015-12-02 16:58:54 -08:00
|
|
|
.framebufferColorSampleCounts = sample_counts,
|
|
|
|
|
.framebufferDepthSampleCounts = sample_counts,
|
|
|
|
|
.framebufferStencilSampleCounts = sample_counts,
|
|
|
|
|
.framebufferNoAttachmentsSampleCounts = sample_counts,
|
2015-07-09 15:38:30 -07:00
|
|
|
.maxColorAttachments = MAX_RTS,
|
2015-12-02 16:58:54 -08:00
|
|
|
.sampledImageColorSampleCounts = sample_counts,
|
2019-12-23 22:19:29 -06:00
|
|
|
.sampledImageIntegerSampleCounts = sample_counts,
|
2015-12-02 16:58:54 -08:00
|
|
|
.sampledImageDepthSampleCounts = sample_counts,
|
|
|
|
|
.sampledImageStencilSampleCounts = sample_counts,
|
|
|
|
|
.storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
|
2015-07-09 15:38:30 -07:00
|
|
|
.maxSampleMaskWords = 1,
|
2019-07-17 08:46:53 +03:00
|
|
|
.timestampComputeAndGraphics = true,
|
2017-07-02 00:23:29 +01:00
|
|
|
.timestampPeriod = 1000000000.0 / devinfo->timestamp_frequency,
|
2016-10-03 20:44:38 -07:00
|
|
|
.maxClipDistances = 8,
|
|
|
|
|
.maxCullDistances = 8,
|
|
|
|
|
.maxCombinedClipAndCullDistances = 8,
|
2018-08-26 21:48:01 +03:00
|
|
|
.discreteQueuePriorities = 2,
|
2015-07-09 15:38:30 -07:00
|
|
|
.pointSizeRange = { 0.125, 255.875 },
|
2019-06-12 15:19:17 -05:00
|
|
|
.lineWidthRange = {
|
|
|
|
|
0.0,
|
|
|
|
|
(devinfo->gen >= 9 || devinfo->is_cherryview) ?
|
|
|
|
|
2047.9921875 : 7.9921875,
|
|
|
|
|
},
|
2015-07-09 15:38:30 -07:00
|
|
|
.pointSizeGranularity = (1.0 / 8.0),
|
|
|
|
|
.lineWidthGranularity = (1.0 / 128.0),
|
2019-05-22 22:44:59 -05:00
|
|
|
.strictLines = false,
|
2016-01-26 10:56:06 -08:00
|
|
|
.standardSampleLocations = true,
|
2015-12-02 16:58:54 -08:00
|
|
|
.optimalBufferCopyOffsetAlignment = 128,
|
|
|
|
|
.optimalBufferCopyRowPitchAlignment = 128,
|
|
|
|
|
.nonCoherentAtomSize = 64,
|
2015-07-09 15:38:30 -07:00
|
|
|
};
|
|
|
|
|
|
2015-07-09 15:53:03 -07:00
|
|
|
*pProperties = (VkPhysicalDeviceProperties) {
|
2021-01-29 22:40:39 -06:00
|
|
|
.apiVersion = ANV_API_VERSION,
|
2017-06-06 10:42:41 +01:00
|
|
|
.driverVersion = vk_get_driver_version(),
|
2015-11-30 21:10:14 -08:00
|
|
|
.vendorID = 0x8086,
|
2020-01-17 23:45:31 -06:00
|
|
|
.deviceID = pdevice->info.chipset_id,
|
2015-07-09 15:53:03 -07:00
|
|
|
.deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
|
2015-10-07 10:36:46 -07:00
|
|
|
.limits = limits,
|
|
|
|
|
.sparseProperties = {0}, /* Broadwell doesn't do sparse. */
|
2015-07-09 15:53:03 -07:00
|
|
|
};
|
|
|
|
|
|
2017-07-16 15:28:09 +01:00
|
|
|
snprintf(pProperties->deviceName, sizeof(pProperties->deviceName),
|
|
|
|
|
"%s", pdevice->name);
|
2017-02-27 09:36:20 -08:00
|
|
|
memcpy(pProperties->pipelineCacheUUID,
|
|
|
|
|
pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
|
2015-07-09 15:53:03 -07:00
|
|
|
}
|
|
|
|
|
|
2020-01-13 13:44:16 -06:00
|
|
|
static void
|
|
|
|
|
anv_get_physical_device_properties_1_1(struct anv_physical_device *pdevice,
|
|
|
|
|
VkPhysicalDeviceVulkan11Properties *p)
|
|
|
|
|
{
|
|
|
|
|
assert(p->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES);
|
|
|
|
|
|
|
|
|
|
memcpy(p->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
|
|
|
|
|
memcpy(p->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
|
|
|
|
|
memset(p->deviceLUID, 0, VK_LUID_SIZE);
|
|
|
|
|
p->deviceNodeMask = 0;
|
|
|
|
|
p->deviceLUIDValid = false;
|
|
|
|
|
|
|
|
|
|
p->subgroupSize = BRW_SUBGROUP_SIZE;
|
|
|
|
|
VkShaderStageFlags scalar_stages = 0;
|
|
|
|
|
for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
|
|
|
|
|
if (pdevice->compiler->scalar_stage[stage])
|
|
|
|
|
scalar_stages |= mesa_to_vk_shader_stage(stage);
|
|
|
|
|
}
|
|
|
|
|
p->subgroupSupportedStages = scalar_stages;
|
|
|
|
|
p->subgroupSupportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT |
|
|
|
|
|
VK_SUBGROUP_FEATURE_VOTE_BIT |
|
|
|
|
|
VK_SUBGROUP_FEATURE_BALLOT_BIT |
|
|
|
|
|
VK_SUBGROUP_FEATURE_SHUFFLE_BIT |
|
|
|
|
|
VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT |
|
|
|
|
|
VK_SUBGROUP_FEATURE_QUAD_BIT;
|
|
|
|
|
if (pdevice->info.gen >= 8) {
|
|
|
|
|
/* TODO: There's no technical reason why these can't be made to
|
|
|
|
|
* work on gen7 but they don't at the moment so it's best to leave
|
|
|
|
|
* the feature disabled than enabled and broken.
|
|
|
|
|
*/
|
|
|
|
|
p->subgroupSupportedOperations |= VK_SUBGROUP_FEATURE_ARITHMETIC_BIT |
|
|
|
|
|
VK_SUBGROUP_FEATURE_CLUSTERED_BIT;
|
|
|
|
|
}
|
|
|
|
|
p->subgroupQuadOperationsInAllStages = pdevice->info.gen >= 8;
|
|
|
|
|
|
|
|
|
|
p->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY;
|
|
|
|
|
p->maxMultiviewViewCount = 16;
|
|
|
|
|
p->maxMultiviewInstanceIndex = UINT32_MAX / 16;
|
|
|
|
|
p->protectedNoFault = false;
|
|
|
|
|
/* This value doesn't matter for us today as our per-stage descriptors are
|
|
|
|
|
* the real limit.
|
|
|
|
|
*/
|
|
|
|
|
p->maxPerSetDescriptors = 1024;
|
|
|
|
|
p->maxMemoryAllocationSize = MAX_MEMORY_ALLOCATION_SIZE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
anv_get_physical_device_properties_1_2(struct anv_physical_device *pdevice,
|
|
|
|
|
VkPhysicalDeviceVulkan12Properties *p)
|
|
|
|
|
{
|
|
|
|
|
assert(p->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES);
|
|
|
|
|
|
|
|
|
|
p->driverID = VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA_KHR;
|
|
|
|
|
memset(p->driverName, 0, sizeof(p->driverName));
|
|
|
|
|
snprintf(p->driverName, VK_MAX_DRIVER_NAME_SIZE_KHR,
|
|
|
|
|
"Intel open-source Mesa driver");
|
|
|
|
|
memset(p->driverInfo, 0, sizeof(p->driverInfo));
|
|
|
|
|
snprintf(p->driverInfo, VK_MAX_DRIVER_INFO_SIZE_KHR,
|
|
|
|
|
"Mesa " PACKAGE_VERSION MESA_GIT_SHA1);
|
|
|
|
|
p->conformanceVersion = (VkConformanceVersionKHR) {
|
|
|
|
|
.major = 1,
|
2019-09-16 15:41:45 -07:00
|
|
|
.minor = 2,
|
|
|
|
|
.subminor = 0,
|
2020-01-13 13:44:16 -06:00
|
|
|
.patch = 0,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
p->denormBehaviorIndependence =
|
|
|
|
|
VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR;
|
|
|
|
|
p->roundingModeIndependence =
|
|
|
|
|
VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE_KHR;
|
|
|
|
|
|
|
|
|
|
/* Broadwell does not support HF denorms and there are restrictions
|
|
|
|
|
* other gens. According to Kabylake's PRM:
|
|
|
|
|
*
|
|
|
|
|
* "math - Extended Math Function
|
|
|
|
|
* [...]
|
|
|
|
|
* Restriction : Half-float denorms are always retained."
|
|
|
|
|
*/
|
|
|
|
|
p->shaderDenormFlushToZeroFloat16 = false;
|
|
|
|
|
p->shaderDenormPreserveFloat16 = pdevice->info.gen > 8;
|
|
|
|
|
p->shaderRoundingModeRTEFloat16 = true;
|
|
|
|
|
p->shaderRoundingModeRTZFloat16 = true;
|
|
|
|
|
p->shaderSignedZeroInfNanPreserveFloat16 = true;
|
|
|
|
|
|
|
|
|
|
p->shaderDenormFlushToZeroFloat32 = true;
|
|
|
|
|
p->shaderDenormPreserveFloat32 = true;
|
|
|
|
|
p->shaderRoundingModeRTEFloat32 = true;
|
|
|
|
|
p->shaderRoundingModeRTZFloat32 = true;
|
|
|
|
|
p->shaderSignedZeroInfNanPreserveFloat32 = true;
|
|
|
|
|
|
|
|
|
|
p->shaderDenormFlushToZeroFloat64 = true;
|
|
|
|
|
p->shaderDenormPreserveFloat64 = true;
|
|
|
|
|
p->shaderRoundingModeRTEFloat64 = true;
|
|
|
|
|
p->shaderRoundingModeRTZFloat64 = true;
|
|
|
|
|
p->shaderSignedZeroInfNanPreserveFloat64 = true;
|
|
|
|
|
|
|
|
|
|
/* It's a bit hard to exactly map our implementation to the limits
|
2020-10-15 20:33:12 -05:00
|
|
|
* described by Vulkan. The bindless surface handle in the extended
|
2020-01-13 13:44:16 -06:00
|
|
|
* message descriptors is 20 bits and it's an index into the table of
|
|
|
|
|
* RENDER_SURFACE_STATE structs that starts at bindless surface base
|
2020-10-15 20:33:12 -05:00
|
|
|
* address. This means that we can have at must 1M surface states
|
|
|
|
|
* allocated at any given time. Since most image views take two
|
|
|
|
|
* descriptors, this means we have a limit of about 500K image views.
|
2020-01-13 13:44:16 -06:00
|
|
|
*
|
2020-10-15 20:33:12 -05:00
|
|
|
* However, since we allocate surface states at vkCreateImageView time,
|
|
|
|
|
* this means our limit is actually something on the order of 500K image
|
|
|
|
|
* views allocated at any time. The actual limit describe by Vulkan, on
|
|
|
|
|
* the other hand, is a limit of how many you can have in a descriptor set.
|
|
|
|
|
* Assuming anyone using 1M descriptors will be using the same image view
|
|
|
|
|
* twice a bunch of times (or a bunch of null descriptors), we can safely
|
|
|
|
|
* advertise a larger limit here.
|
2020-01-13 13:44:16 -06:00
|
|
|
*/
|
2020-10-15 20:33:12 -05:00
|
|
|
const unsigned max_bindless_views = 1 << 20;
|
2020-01-13 13:44:16 -06:00
|
|
|
p->maxUpdateAfterBindDescriptorsInAllPools = max_bindless_views;
|
|
|
|
|
p->shaderUniformBufferArrayNonUniformIndexingNative = false;
|
|
|
|
|
p->shaderSampledImageArrayNonUniformIndexingNative = false;
|
|
|
|
|
p->shaderStorageBufferArrayNonUniformIndexingNative = true;
|
|
|
|
|
p->shaderStorageImageArrayNonUniformIndexingNative = false;
|
|
|
|
|
p->shaderInputAttachmentArrayNonUniformIndexingNative = false;
|
|
|
|
|
p->robustBufferAccessUpdateAfterBind = true;
|
|
|
|
|
p->quadDivergentImplicitLod = false;
|
|
|
|
|
p->maxPerStageDescriptorUpdateAfterBindSamplers = max_bindless_views;
|
|
|
|
|
p->maxPerStageDescriptorUpdateAfterBindUniformBuffers = MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS;
|
|
|
|
|
p->maxPerStageDescriptorUpdateAfterBindStorageBuffers = UINT32_MAX;
|
|
|
|
|
p->maxPerStageDescriptorUpdateAfterBindSampledImages = max_bindless_views;
|
|
|
|
|
p->maxPerStageDescriptorUpdateAfterBindStorageImages = max_bindless_views;
|
|
|
|
|
p->maxPerStageDescriptorUpdateAfterBindInputAttachments = MAX_PER_STAGE_DESCRIPTOR_INPUT_ATTACHMENTS;
|
|
|
|
|
p->maxPerStageUpdateAfterBindResources = UINT32_MAX;
|
|
|
|
|
p->maxDescriptorSetUpdateAfterBindSamplers = max_bindless_views;
|
|
|
|
|
p->maxDescriptorSetUpdateAfterBindUniformBuffers = 6 * MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS;
|
|
|
|
|
p->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2;
|
|
|
|
|
p->maxDescriptorSetUpdateAfterBindStorageBuffers = UINT32_MAX;
|
|
|
|
|
p->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2;
|
|
|
|
|
p->maxDescriptorSetUpdateAfterBindSampledImages = max_bindless_views;
|
|
|
|
|
p->maxDescriptorSetUpdateAfterBindStorageImages = max_bindless_views;
|
|
|
|
|
p->maxDescriptorSetUpdateAfterBindInputAttachments = MAX_DESCRIPTOR_SET_INPUT_ATTACHMENTS;
|
|
|
|
|
|
|
|
|
|
/* We support all of the depth resolve modes */
|
|
|
|
|
p->supportedDepthResolveModes = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR |
|
|
|
|
|
VK_RESOLVE_MODE_AVERAGE_BIT_KHR |
|
|
|
|
|
VK_RESOLVE_MODE_MIN_BIT_KHR |
|
|
|
|
|
VK_RESOLVE_MODE_MAX_BIT_KHR;
|
|
|
|
|
/* Average doesn't make sense for stencil so we don't support that */
|
|
|
|
|
p->supportedStencilResolveModes = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR;
|
|
|
|
|
if (pdevice->info.gen >= 8) {
|
|
|
|
|
/* The advanced stencil resolve modes currently require stencil
|
|
|
|
|
* sampling be supported by the hardware.
|
|
|
|
|
*/
|
|
|
|
|
p->supportedStencilResolveModes |= VK_RESOLVE_MODE_MIN_BIT_KHR |
|
|
|
|
|
VK_RESOLVE_MODE_MAX_BIT_KHR;
|
|
|
|
|
}
|
|
|
|
|
p->independentResolveNone = true;
|
|
|
|
|
p->independentResolve = true;
|
|
|
|
|
|
|
|
|
|
p->filterMinmaxSingleComponentFormats = pdevice->info.gen >= 9;
|
|
|
|
|
p->filterMinmaxImageComponentMapping = pdevice->info.gen >= 9;
|
|
|
|
|
|
|
|
|
|
p->maxTimelineSemaphoreValueDifference = UINT64_MAX;
|
|
|
|
|
|
|
|
|
|
p->framebufferIntegerColorSampleCounts =
|
|
|
|
|
isl_device_get_sample_counts(&pdevice->isl_dev);
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-20 12:18:10 -07:00
|
|
|
void anv_GetPhysicalDeviceProperties2(
|
2017-01-25 12:12:20 -08:00
|
|
|
VkPhysicalDevice physicalDevice,
|
2017-09-20 13:16:26 -07:00
|
|
|
VkPhysicalDeviceProperties2* pProperties)
|
2017-01-25 12:12:20 -08:00
|
|
|
{
|
2017-07-13 12:18:15 -07:00
|
|
|
ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
|
|
|
|
|
|
2017-01-25 12:12:20 -08:00
|
|
|
anv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
|
|
|
|
|
|
2020-01-13 13:44:16 -06:00
|
|
|
VkPhysicalDeviceVulkan11Properties core_1_1 = {
|
|
|
|
|
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES,
|
|
|
|
|
};
|
|
|
|
|
anv_get_physical_device_properties_1_1(pdevice, &core_1_1);
|
|
|
|
|
|
|
|
|
|
VkPhysicalDeviceVulkan12Properties core_1_2 = {
|
|
|
|
|
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES,
|
|
|
|
|
};
|
|
|
|
|
anv_get_physical_device_properties_1_2(pdevice, &core_1_2);
|
|
|
|
|
|
|
|
|
|
#define CORE_RENAMED_PROPERTY(major, minor, ext_property, core_property) \
|
|
|
|
|
memcpy(&properties->ext_property, &core_##major##_##minor.core_property, \
|
|
|
|
|
sizeof(core_##major##_##minor.core_property))
|
|
|
|
|
|
|
|
|
|
#define CORE_PROPERTY(major, minor, property) \
|
|
|
|
|
CORE_RENAMED_PROPERTY(major, minor, property, property)
|
|
|
|
|
|
2017-02-14 14:29:19 -08:00
|
|
|
vk_foreach_struct(ext, pProperties->pNext) {
|
|
|
|
|
switch (ext->sType) {
|
2020-04-22 17:08:22 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT: {
|
|
|
|
|
VkPhysicalDeviceCustomBorderColorPropertiesEXT *properties =
|
|
|
|
|
(VkPhysicalDeviceCustomBorderColorPropertiesEXT *)ext;
|
|
|
|
|
properties->maxCustomBorderColorSamplers = MAX_CUSTOM_BORDER_COLORS;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-26 09:08:48 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR: {
|
2020-01-13 13:44:16 -06:00
|
|
|
VkPhysicalDeviceDepthStencilResolvePropertiesKHR *properties =
|
2018-06-26 09:08:48 -07:00
|
|
|
(VkPhysicalDeviceDepthStencilResolvePropertiesKHR *)ext;
|
2020-01-13 13:44:16 -06:00
|
|
|
CORE_PROPERTY(1, 2, supportedDepthResolveModes);
|
|
|
|
|
CORE_PROPERTY(1, 2, supportedStencilResolveModes);
|
|
|
|
|
CORE_PROPERTY(1, 2, independentResolveNone);
|
|
|
|
|
CORE_PROPERTY(1, 2, independentResolve);
|
2018-06-26 09:08:48 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-27 16:08:20 -06:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT: {
|
2020-01-13 13:44:16 -06:00
|
|
|
VkPhysicalDeviceDescriptorIndexingPropertiesEXT *properties =
|
2019-02-27 16:08:20 -06:00
|
|
|
(VkPhysicalDeviceDescriptorIndexingPropertiesEXT *)ext;
|
2020-01-13 13:44:16 -06:00
|
|
|
CORE_PROPERTY(1, 2, maxUpdateAfterBindDescriptorsInAllPools);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderUniformBufferArrayNonUniformIndexingNative);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderSampledImageArrayNonUniformIndexingNative);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderStorageBufferArrayNonUniformIndexingNative);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderStorageImageArrayNonUniformIndexingNative);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderInputAttachmentArrayNonUniformIndexingNative);
|
|
|
|
|
CORE_PROPERTY(1, 2, robustBufferAccessUpdateAfterBind);
|
|
|
|
|
CORE_PROPERTY(1, 2, quadDivergentImplicitLod);
|
|
|
|
|
CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindSamplers);
|
|
|
|
|
CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindUniformBuffers);
|
|
|
|
|
CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindStorageBuffers);
|
|
|
|
|
CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindSampledImages);
|
|
|
|
|
CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindStorageImages);
|
|
|
|
|
CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindInputAttachments);
|
|
|
|
|
CORE_PROPERTY(1, 2, maxPerStageUpdateAfterBindResources);
|
|
|
|
|
CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindSamplers);
|
|
|
|
|
CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindUniformBuffers);
|
|
|
|
|
CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindUniformBuffersDynamic);
|
|
|
|
|
CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindStorageBuffers);
|
|
|
|
|
CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindStorageBuffersDynamic);
|
|
|
|
|
CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindSampledImages);
|
|
|
|
|
CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindStorageImages);
|
|
|
|
|
CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindInputAttachments);
|
2019-02-27 16:08:20 -06:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-19 20:27:36 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR: {
|
2020-01-13 13:44:16 -06:00
|
|
|
VkPhysicalDeviceDriverPropertiesKHR *properties =
|
2018-06-19 20:27:36 -07:00
|
|
|
(VkPhysicalDeviceDriverPropertiesKHR *) ext;
|
2020-01-13 13:44:16 -06:00
|
|
|
CORE_PROPERTY(1, 2, driverID);
|
|
|
|
|
CORE_PROPERTY(1, 2, driverName);
|
|
|
|
|
CORE_PROPERTY(1, 2, driverInfo);
|
|
|
|
|
CORE_PROPERTY(1, 2, conformanceVersion);
|
2018-06-19 20:27:36 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-01 13:15:31 -08:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT: {
|
|
|
|
|
VkPhysicalDeviceExternalMemoryHostPropertiesEXT *props =
|
|
|
|
|
(VkPhysicalDeviceExternalMemoryHostPropertiesEXT *) ext;
|
|
|
|
|
/* Userptr needs page aligned memory. */
|
|
|
|
|
props->minImportedHostPointerAlignment = 4096;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-20 13:16:26 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
|
2020-01-13 13:44:16 -06:00
|
|
|
VkPhysicalDeviceIDProperties *properties =
|
2017-09-20 13:16:26 -07:00
|
|
|
(VkPhysicalDeviceIDProperties *)ext;
|
2020-01-13 13:44:16 -06:00
|
|
|
CORE_PROPERTY(1, 1, deviceUUID);
|
|
|
|
|
CORE_PROPERTY(1, 1, driverUUID);
|
|
|
|
|
CORE_PROPERTY(1, 1, deviceLUID);
|
|
|
|
|
CORE_PROPERTY(1, 1, deviceLUIDValid);
|
2017-07-13 12:18:15 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-12 16:56:24 -06:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT: {
|
|
|
|
|
VkPhysicalDeviceInlineUniformBlockPropertiesEXT *props =
|
|
|
|
|
(VkPhysicalDeviceInlineUniformBlockPropertiesEXT *)ext;
|
|
|
|
|
props->maxInlineUniformBlockSize = MAX_INLINE_UNIFORM_BLOCK_SIZE;
|
|
|
|
|
props->maxPerStageDescriptorInlineUniformBlocks =
|
|
|
|
|
MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
|
|
|
|
|
props->maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks =
|
|
|
|
|
MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
|
|
|
|
|
props->maxDescriptorSetInlineUniformBlocks =
|
|
|
|
|
MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
|
|
|
|
|
props->maxDescriptorSetUpdateAfterBindInlineUniformBlocks =
|
|
|
|
|
MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-22 22:44:59 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT: {
|
|
|
|
|
VkPhysicalDeviceLineRasterizationPropertiesEXT *props =
|
|
|
|
|
(VkPhysicalDeviceLineRasterizationPropertiesEXT *)ext;
|
|
|
|
|
/* In the Skylake PRM Vol. 7, subsection titled "GIQ (Diamond)
|
|
|
|
|
* Sampling Rules - Legacy Mode", it says the following:
|
|
|
|
|
*
|
|
|
|
|
* "Note that the device divides a pixel into a 16x16 array of
|
|
|
|
|
* subpixels, referenced by their upper left corners."
|
|
|
|
|
*
|
|
|
|
|
* This is the only known reference in the PRMs to the subpixel
|
|
|
|
|
* precision of line rasterization and a "16x16 array of subpixels"
|
|
|
|
|
* implies 4 subpixel precision bits. Empirical testing has shown
|
|
|
|
|
* that 4 subpixel precision bits applies to all line rasterization
|
|
|
|
|
* types.
|
|
|
|
|
*/
|
|
|
|
|
props->lineSubPixelPrecisionBits = 4;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-05 16:03:29 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
|
2020-01-13 13:44:16 -06:00
|
|
|
VkPhysicalDeviceMaintenance3Properties *properties =
|
2017-10-05 16:03:29 -07:00
|
|
|
(VkPhysicalDeviceMaintenance3Properties *)ext;
|
|
|
|
|
/* This value doesn't matter for us today as our per-stage
|
|
|
|
|
* descriptors are the real limit.
|
|
|
|
|
*/
|
2020-01-13 13:44:16 -06:00
|
|
|
CORE_PROPERTY(1, 1, maxPerSetDescriptors);
|
|
|
|
|
CORE_PROPERTY(1, 1, maxMemoryAllocationSize);
|
2017-10-05 16:03:29 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-20 13:16:26 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
|
|
|
|
|
VkPhysicalDeviceMultiviewProperties *properties =
|
|
|
|
|
(VkPhysicalDeviceMultiviewProperties *)ext;
|
2020-01-13 13:44:16 -06:00
|
|
|
CORE_PROPERTY(1, 1, maxMultiviewViewCount);
|
|
|
|
|
CORE_PROPERTY(1, 1, maxMultiviewInstanceIndex);
|
2017-03-21 14:40:46 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-14 13:12:50 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT: {
|
|
|
|
|
VkPhysicalDevicePCIBusInfoPropertiesEXT *properties =
|
|
|
|
|
(VkPhysicalDevicePCIBusInfoPropertiesEXT *)ext;
|
|
|
|
|
properties->pciDomain = pdevice->pci_info.domain;
|
|
|
|
|
properties->pciBus = pdevice->pci_info.bus;
|
|
|
|
|
properties->pciDevice = pdevice->pci_info.device;
|
|
|
|
|
properties->pciFunction = pdevice->pci_info.function;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-06 19:12:34 +01:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR: {
|
|
|
|
|
VkPhysicalDevicePerformanceQueryPropertiesKHR *properties =
|
|
|
|
|
(VkPhysicalDevicePerformanceQueryPropertiesKHR *)ext;
|
|
|
|
|
/* We could support this by spawning a shader to do the equation
|
|
|
|
|
* normalization.
|
|
|
|
|
*/
|
|
|
|
|
properties->allowCommandBufferQueryCopies = false;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-20 13:16:26 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
|
|
|
|
|
VkPhysicalDevicePointClippingProperties *properties =
|
|
|
|
|
(VkPhysicalDevicePointClippingProperties *) ext;
|
2020-01-13 13:44:16 -06:00
|
|
|
CORE_PROPERTY(1, 1, pointClippingBehavior);
|
2017-05-17 10:02:24 +02:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-08 14:57:14 +03:00
|
|
|
#pragma GCC diagnostic push
|
|
|
|
|
#pragma GCC diagnostic ignored "-Wswitch"
|
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENTATION_PROPERTIES_ANDROID: {
|
|
|
|
|
VkPhysicalDevicePresentationPropertiesANDROID *props =
|
|
|
|
|
(VkPhysicalDevicePresentationPropertiesANDROID *)ext;
|
|
|
|
|
props->sharedImage = VK_FALSE;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
#pragma GCC diagnostic pop
|
|
|
|
|
|
2019-01-07 10:51:59 -06:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES: {
|
2020-01-13 13:44:16 -06:00
|
|
|
VkPhysicalDeviceProtectedMemoryProperties *properties =
|
2019-01-07 10:51:59 -06:00
|
|
|
(VkPhysicalDeviceProtectedMemoryProperties *)ext;
|
2020-01-13 13:44:16 -06:00
|
|
|
CORE_PROPERTY(1, 1, protectedNoFault);
|
2019-01-07 10:51:59 -06:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
|
|
|
|
|
VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
|
|
|
|
|
(VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
|
|
|
|
|
properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-13 10:14:01 -06:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT: {
|
|
|
|
|
VkPhysicalDeviceRobustness2PropertiesEXT *properties = (void *)ext;
|
|
|
|
|
properties->robustStorageBufferAccessSizeAlignment =
|
|
|
|
|
ANV_SSBO_BOUNDS_CHECK_ALIGNMENT;
|
|
|
|
|
properties->robustUniformBufferAccessSizeAlignment =
|
2020-05-07 12:25:50 +03:00
|
|
|
ANV_UBO_ALIGNMENT;
|
2020-01-13 10:14:01 -06:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-20 10:29:49 -06:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT: {
|
|
|
|
|
VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *properties =
|
|
|
|
|
(VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *)ext;
|
2020-01-13 13:44:16 -06:00
|
|
|
CORE_PROPERTY(1, 2, filterMinmaxImageComponentMapping);
|
|
|
|
|
CORE_PROPERTY(1, 2, filterMinmaxSingleComponentFormats);
|
2018-08-20 10:29:49 -06:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-28 01:22:39 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
|
|
|
|
|
VkPhysicalDeviceSubgroupProperties *properties = (void *)ext;
|
2020-01-13 13:44:16 -06:00
|
|
|
CORE_PROPERTY(1, 1, subgroupSize);
|
|
|
|
|
CORE_RENAMED_PROPERTY(1, 1, supportedStages,
|
|
|
|
|
subgroupSupportedStages);
|
|
|
|
|
CORE_RENAMED_PROPERTY(1, 1, supportedOperations,
|
|
|
|
|
subgroupSupportedOperations);
|
|
|
|
|
CORE_RENAMED_PROPERTY(1, 1, quadOperationsInAllStages,
|
|
|
|
|
subgroupQuadOperationsInAllStages);
|
2017-04-28 01:22:39 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-21 14:50:10 -06:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT: {
|
|
|
|
|
VkPhysicalDeviceSubgroupSizeControlPropertiesEXT *props =
|
|
|
|
|
(VkPhysicalDeviceSubgroupSizeControlPropertiesEXT *)ext;
|
|
|
|
|
STATIC_ASSERT(8 <= BRW_SUBGROUP_SIZE && BRW_SUBGROUP_SIZE <= 32);
|
|
|
|
|
props->minSubgroupSize = 8;
|
|
|
|
|
props->maxSubgroupSize = 32;
|
2020-11-04 13:51:56 -08:00
|
|
|
/* Limit max_threads to 64 for the GPGPU_WALKER command. */
|
|
|
|
|
props->maxComputeWorkgroupSubgroups = MIN2(64, pdevice->info.max_cs_threads);
|
2019-02-21 14:50:10 -06:00
|
|
|
props->requiredSubgroupSizeStages = VK_SHADER_STAGE_COMPUTE_BIT;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2018-05-31 11:44:21 +02:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR : {
|
|
|
|
|
VkPhysicalDeviceFloatControlsPropertiesKHR *properties = (void *)ext;
|
2020-01-13 13:44:16 -06:00
|
|
|
CORE_PROPERTY(1, 2, denormBehaviorIndependence);
|
|
|
|
|
CORE_PROPERTY(1, 2, roundingModeIndependence);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderDenormFlushToZeroFloat16);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderDenormPreserveFloat16);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderRoundingModeRTEFloat16);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderRoundingModeRTZFloat16);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderSignedZeroInfNanPreserveFloat16);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderDenormFlushToZeroFloat32);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderDenormPreserveFloat32);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderRoundingModeRTEFloat32);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderRoundingModeRTZFloat32);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderSignedZeroInfNanPreserveFloat32);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderDenormFlushToZeroFloat64);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderDenormPreserveFloat64);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderRoundingModeRTEFloat64);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderRoundingModeRTZFloat64);
|
|
|
|
|
CORE_PROPERTY(1, 2, shaderSignedZeroInfNanPreserveFloat64);
|
2018-05-31 11:44:21 +02:00
|
|
|
break;
|
|
|
|
|
}
|
2019-02-21 14:50:10 -06:00
|
|
|
|
2019-03-14 21:52:00 +02:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: {
|
|
|
|
|
VkPhysicalDeviceSampleLocationsPropertiesEXT *props =
|
|
|
|
|
(VkPhysicalDeviceSampleLocationsPropertiesEXT *)ext;
|
|
|
|
|
|
|
|
|
|
props->sampleLocationSampleCounts =
|
|
|
|
|
isl_device_get_sample_counts(&pdevice->isl_dev);
|
|
|
|
|
|
|
|
|
|
/* See also anv_GetPhysicalDeviceMultisamplePropertiesEXT */
|
|
|
|
|
props->maxSampleLocationGridSize.width = 1;
|
|
|
|
|
props->maxSampleLocationGridSize.height = 1;
|
|
|
|
|
|
|
|
|
|
props->sampleLocationCoordinateRange[0] = 0;
|
|
|
|
|
props->sampleLocationCoordinateRange[1] = 0.9375;
|
|
|
|
|
props->sampleLocationSubPixelBits = 4;
|
|
|
|
|
|
|
|
|
|
props->variableSampleLocations = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-06 15:31:17 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT: {
|
|
|
|
|
VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT *props =
|
|
|
|
|
(VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT *)ext;
|
|
|
|
|
|
|
|
|
|
/* From the SKL PRM Vol. 2d, docs for RENDER_SURFACE_STATE::Surface
|
|
|
|
|
* Base Address:
|
|
|
|
|
*
|
|
|
|
|
* "For SURFTYPE_BUFFER non-rendertarget surfaces, this field
|
|
|
|
|
* specifies the base address of the first element of the surface,
|
|
|
|
|
* computed in software by adding the surface base address to the
|
|
|
|
|
* byte offset of the element in the buffer. The base address must
|
|
|
|
|
* be aligned to element size."
|
|
|
|
|
*
|
|
|
|
|
* The typed dataport messages require that things be texel aligned.
|
|
|
|
|
* Otherwise, we may just load/store the wrong data or, in the worst
|
|
|
|
|
* case, there may be hangs.
|
|
|
|
|
*/
|
|
|
|
|
props->storageTexelBufferOffsetAlignmentBytes = 16;
|
|
|
|
|
props->storageTexelBufferOffsetSingleTexelAlignment = true;
|
|
|
|
|
|
|
|
|
|
/* The sampler, however, is much more forgiving and it can handle
|
|
|
|
|
* arbitrary byte alignment for linear and buffer surfaces. It's
|
|
|
|
|
* hard to find a good PRM citation for this but years of empirical
|
|
|
|
|
* experience demonstrate that this is true.
|
|
|
|
|
*/
|
|
|
|
|
props->uniformTexelBufferOffsetAlignmentBytes = 1;
|
|
|
|
|
props->uniformTexelBufferOffsetSingleTexelAlignment = false;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2018-10-16 17:44:31 -05:00
|
|
|
|
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES_KHR: {
|
2020-01-13 13:44:16 -06:00
|
|
|
VkPhysicalDeviceTimelineSemaphorePropertiesKHR *properties =
|
2018-10-16 17:44:31 -05:00
|
|
|
(VkPhysicalDeviceTimelineSemaphorePropertiesKHR *) ext;
|
2020-01-13 13:44:16 -06:00
|
|
|
CORE_PROPERTY(1, 2, maxTimelineSemaphoreValueDifference);
|
2018-10-16 17:44:31 -05:00
|
|
|
break;
|
|
|
|
|
}
|
2019-06-06 15:31:17 -05:00
|
|
|
|
2018-09-10 16:17:37 -05:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
|
|
|
|
|
VkPhysicalDeviceTransformFeedbackPropertiesEXT *props =
|
|
|
|
|
(VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
|
|
|
|
|
|
|
|
|
|
props->maxTransformFeedbackStreams = MAX_XFB_STREAMS;
|
|
|
|
|
props->maxTransformFeedbackBuffers = MAX_XFB_BUFFERS;
|
|
|
|
|
props->maxTransformFeedbackBufferSize = (1ull << 32);
|
|
|
|
|
props->maxTransformFeedbackStreamDataSize = 128 * 4;
|
|
|
|
|
props->maxTransformFeedbackBufferDataSize = 128 * 4;
|
|
|
|
|
props->maxTransformFeedbackBufferDataStride = 2048;
|
2019-03-12 15:22:19 -05:00
|
|
|
props->transformFeedbackQueries = true;
|
|
|
|
|
props->transformFeedbackStreamsLinesTriangles = false;
|
|
|
|
|
props->transformFeedbackRasterizationStreamSelect = false;
|
2020-10-04 09:15:36 -05:00
|
|
|
/* This requires MI_MATH */
|
|
|
|
|
props->transformFeedbackDraw = pdevice->info.is_haswell ||
|
|
|
|
|
pdevice->info.gen >= 8;
|
2018-09-10 16:17:37 -05:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-02 12:57:44 -07:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: {
|
|
|
|
|
VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *props =
|
|
|
|
|
(VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)ext;
|
|
|
|
|
/* We have to restrict this a bit for multiview */
|
|
|
|
|
props->maxVertexAttribDivisor = UINT32_MAX / 16;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-13 13:44:16 -06:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
|
|
|
|
|
anv_get_physical_device_properties_1_1(pdevice, (void *)ext);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES:
|
|
|
|
|
anv_get_physical_device_properties_1_2(pdevice, (void *)ext);
|
|
|
|
|
break;
|
|
|
|
|
|
2017-01-25 12:12:20 -08:00
|
|
|
default:
|
2017-02-14 14:29:19 -08:00
|
|
|
anv_debug_ignored_stype(ext->sType);
|
2017-01-25 12:12:20 -08:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-01-13 13:44:16 -06:00
|
|
|
|
|
|
|
|
#undef CORE_RENAMED_PROPERTY
|
|
|
|
|
#undef CORE_PROPERTY
|
2017-01-25 12:12:20 -08:00
|
|
|
}
|
|
|
|
|
|
2017-03-05 13:15:06 -08:00
|
|
|
static const VkQueueFamilyProperties
|
2021-01-26 01:13:36 -06:00
|
|
|
anv_queue_family_properties_template = {
|
2017-03-05 13:15:06 -08:00
|
|
|
.timestampValidBits = 36, /* XXX: Real value here */
|
2017-03-16 08:50:36 -07:00
|
|
|
.minImageTransferGranularity = { 1, 1, 1 },
|
2017-03-05 13:15:06 -08:00
|
|
|
};
|
2017-01-25 12:12:19 -08:00
|
|
|
|
2015-11-30 12:21:19 -08:00
|
|
|
void anv_GetPhysicalDeviceQueueFamilyProperties(
|
2015-07-09 16:11:24 -07:00
|
|
|
VkPhysicalDevice physicalDevice,
|
2015-10-05 21:17:12 -07:00
|
|
|
uint32_t* pCount,
|
|
|
|
|
VkQueueFamilyProperties* pQueueFamilyProperties)
|
2015-07-09 16:11:24 -07:00
|
|
|
{
|
2021-01-26 01:13:36 -06:00
|
|
|
ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
|
2017-03-05 13:15:06 -08:00
|
|
|
VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pCount);
|
2017-01-25 12:12:19 -08:00
|
|
|
|
2021-01-26 01:13:36 -06:00
|
|
|
for (uint32_t i = 0; i < pdevice->queue.family_count; i++) {
|
|
|
|
|
struct anv_queue_family *queue_family = &pdevice->queue.families[i];
|
|
|
|
|
vk_outarray_append(&out, p) {
|
|
|
|
|
*p = anv_queue_family_properties_template;
|
|
|
|
|
p->queueFlags = queue_family->queueFlags;
|
|
|
|
|
p->queueCount = queue_family->queueCount;
|
|
|
|
|
}
|
2015-10-05 21:17:12 -07:00
|
|
|
}
|
2015-07-09 16:11:24 -07:00
|
|
|
}
|
|
|
|
|
|
2017-09-20 12:18:10 -07:00
|
|
|
void anv_GetPhysicalDeviceQueueFamilyProperties2(
|
2017-01-25 12:12:20 -08:00
|
|
|
VkPhysicalDevice physicalDevice,
|
|
|
|
|
uint32_t* pQueueFamilyPropertyCount,
|
2017-09-20 13:16:26 -07:00
|
|
|
VkQueueFamilyProperties2* pQueueFamilyProperties)
|
2017-01-25 12:12:20 -08:00
|
|
|
{
|
2021-01-26 01:13:36 -06:00
|
|
|
ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
|
2017-03-05 13:15:06 -08:00
|
|
|
VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
|
2017-01-25 12:12:20 -08:00
|
|
|
|
2021-01-26 01:13:36 -06:00
|
|
|
for (uint32_t i = 0; i < pdevice->queue.family_count; i++) {
|
|
|
|
|
struct anv_queue_family *queue_family = &pdevice->queue.families[i];
|
|
|
|
|
vk_outarray_append(&out, p) {
|
|
|
|
|
p->queueFamilyProperties = anv_queue_family_properties_template;
|
|
|
|
|
p->queueFamilyProperties.queueFlags = queue_family->queueFlags;
|
|
|
|
|
p->queueFamilyProperties.queueCount = queue_family->queueCount;
|
2017-01-25 12:12:20 -08:00
|
|
|
|
2021-01-26 01:13:36 -06:00
|
|
|
vk_foreach_struct(s, p->pNext) {
|
|
|
|
|
anv_debug_ignored_stype(s->sType);
|
|
|
|
|
}
|
2017-01-25 12:12:20 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-30 12:21:19 -08:00
|
|
|
void anv_GetPhysicalDeviceMemoryProperties(
|
2015-07-09 19:49:19 -07:00
|
|
|
VkPhysicalDevice physicalDevice,
|
|
|
|
|
VkPhysicalDeviceMemoryProperties* pMemoryProperties)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
|
|
|
|
|
|
2017-05-17 10:55:41 -07:00
|
|
|
pMemoryProperties->memoryTypeCount = physical_device->memory.type_count;
|
|
|
|
|
for (uint32_t i = 0; i < physical_device->memory.type_count; i++) {
|
|
|
|
|
pMemoryProperties->memoryTypes[i] = (VkMemoryType) {
|
|
|
|
|
.propertyFlags = physical_device->memory.types[i].propertyFlags,
|
|
|
|
|
.heapIndex = physical_device->memory.types[i].heapIndex,
|
2015-12-03 23:09:09 -08:00
|
|
|
};
|
|
|
|
|
}
|
2015-07-09 19:49:19 -07:00
|
|
|
|
2017-05-17 10:55:41 -07:00
|
|
|
pMemoryProperties->memoryHeapCount = physical_device->memory.heap_count;
|
|
|
|
|
for (uint32_t i = 0; i < physical_device->memory.heap_count; i++) {
|
|
|
|
|
pMemoryProperties->memoryHeaps[i] = (VkMemoryHeap) {
|
|
|
|
|
.size = physical_device->memory.heaps[i].size,
|
|
|
|
|
.flags = physical_device->memory.heaps[i].flags,
|
|
|
|
|
};
|
|
|
|
|
}
|
2015-07-09 19:49:19 -07:00
|
|
|
}
|
|
|
|
|
|
2019-01-08 12:45:38 +00:00
|
|
|
static void
|
|
|
|
|
anv_get_memory_budget(VkPhysicalDevice physicalDevice,
|
|
|
|
|
VkPhysicalDeviceMemoryBudgetPropertiesEXT *memoryBudget)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
|
2019-12-06 01:07:56 +11:00
|
|
|
uint64_t sys_available;
|
|
|
|
|
ASSERTED bool has_available_memory =
|
|
|
|
|
os_get_available_system_memory(&sys_available);
|
|
|
|
|
assert(has_available_memory);
|
2019-01-08 12:45:38 +00:00
|
|
|
|
|
|
|
|
VkDeviceSize total_heaps_size = 0;
|
|
|
|
|
for (size_t i = 0; i < device->memory.heap_count; i++)
|
|
|
|
|
total_heaps_size += device->memory.heaps[i].size;
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < device->memory.heap_count; i++) {
|
|
|
|
|
VkDeviceSize heap_size = device->memory.heaps[i].size;
|
|
|
|
|
VkDeviceSize heap_used = device->memory.heaps[i].used;
|
|
|
|
|
VkDeviceSize heap_budget;
|
|
|
|
|
|
|
|
|
|
double heap_proportion = (double) heap_size / total_heaps_size;
|
|
|
|
|
VkDeviceSize sys_available_prop = sys_available * heap_proportion;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Let's not incite the app to starve the system: report at most 90% of
|
|
|
|
|
* available system memory.
|
|
|
|
|
*/
|
|
|
|
|
uint64_t heap_available = sys_available_prop * 9 / 10;
|
|
|
|
|
heap_budget = MIN2(heap_size, heap_used + heap_available);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Round down to the nearest MB
|
|
|
|
|
*/
|
|
|
|
|
heap_budget &= ~((1ull << 20) - 1);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The heapBudget value must be non-zero for array elements less than
|
|
|
|
|
* VkPhysicalDeviceMemoryProperties::memoryHeapCount. The heapBudget
|
|
|
|
|
* value must be less than or equal to VkMemoryHeap::size for each heap.
|
|
|
|
|
*/
|
|
|
|
|
assert(0 < heap_budget && heap_budget <= heap_size);
|
|
|
|
|
|
|
|
|
|
memoryBudget->heapUsage[i] = heap_used;
|
|
|
|
|
memoryBudget->heapBudget[i] = heap_budget;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* The heapBudget and heapUsage values must be zero for array elements
|
|
|
|
|
* greater than or equal to VkPhysicalDeviceMemoryProperties::memoryHeapCount
|
|
|
|
|
*/
|
|
|
|
|
for (uint32_t i = device->memory.heap_count; i < VK_MAX_MEMORY_HEAPS; i++) {
|
|
|
|
|
memoryBudget->heapBudget[i] = 0;
|
|
|
|
|
memoryBudget->heapUsage[i] = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-20 12:18:10 -07:00
|
|
|
void anv_GetPhysicalDeviceMemoryProperties2(
|
2017-01-25 12:12:20 -08:00
|
|
|
VkPhysicalDevice physicalDevice,
|
2017-09-20 13:16:26 -07:00
|
|
|
VkPhysicalDeviceMemoryProperties2* pMemoryProperties)
|
2017-01-25 12:12:20 -08:00
|
|
|
{
|
|
|
|
|
anv_GetPhysicalDeviceMemoryProperties(physicalDevice,
|
|
|
|
|
&pMemoryProperties->memoryProperties);
|
|
|
|
|
|
2017-02-14 14:29:19 -08:00
|
|
|
vk_foreach_struct(ext, pMemoryProperties->pNext) {
|
|
|
|
|
switch (ext->sType) {
|
2019-01-08 12:45:38 +00:00
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT:
|
|
|
|
|
anv_get_memory_budget(physicalDevice, (void*)ext);
|
|
|
|
|
break;
|
2017-01-25 12:12:20 -08:00
|
|
|
default:
|
2017-02-14 14:29:19 -08:00
|
|
|
anv_debug_ignored_stype(ext->sType);
|
2017-01-25 12:12:20 -08:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-21 13:54:55 -07:00
|
|
|
void
|
|
|
|
|
anv_GetDeviceGroupPeerMemoryFeatures(
|
|
|
|
|
VkDevice device,
|
|
|
|
|
uint32_t heapIndex,
|
|
|
|
|
uint32_t localDeviceIndex,
|
|
|
|
|
uint32_t remoteDeviceIndex,
|
|
|
|
|
VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
|
|
|
|
|
{
|
|
|
|
|
assert(localDeviceIndex == 0 && remoteDeviceIndex == 0);
|
|
|
|
|
*pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
|
|
|
|
|
VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
|
|
|
|
|
VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
|
|
|
|
|
VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-07 18:51:53 -07:00
|
|
|
PFN_vkVoidFunction anv_GetInstanceProcAddr(
|
2018-01-16 16:14:25 -08:00
|
|
|
VkInstance _instance,
|
2015-07-07 18:51:53 -07:00
|
|
|
const char* pName)
|
|
|
|
|
{
|
2018-01-16 16:14:25 -08:00
|
|
|
ANV_FROM_HANDLE(anv_instance, instance, _instance);
|
2021-01-23 04:57:21 -06:00
|
|
|
return vk_instance_get_proc_addr(&instance->vk,
|
|
|
|
|
&anv_instance_entrypoints,
|
|
|
|
|
pName);
|
2015-07-07 18:51:53 -07:00
|
|
|
}
|
|
|
|
|
|
2016-07-28 14:40:08 +01:00
|
|
|
/* With version 1+ of the loader interface the ICD should expose
|
|
|
|
|
* vk_icdGetInstanceProcAddr to work around certain LD_PRELOAD issues seen in apps.
|
2016-02-11 21:18:02 -08:00
|
|
|
*/
|
2016-07-28 14:40:08 +01:00
|
|
|
PUBLIC
|
2016-02-11 21:18:02 -08:00
|
|
|
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
|
|
|
|
|
VkInstance instance,
|
|
|
|
|
const char* pName);
|
|
|
|
|
|
2016-07-28 14:40:08 +01:00
|
|
|
PUBLIC
|
2016-02-11 21:18:02 -08:00
|
|
|
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
|
|
|
|
|
VkInstance instance,
|
|
|
|
|
const char* pName)
|
|
|
|
|
{
|
|
|
|
|
return anv_GetInstanceProcAddr(instance, pName);
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-08 00:11:01 +03:00
|
|
|
/* With version 4+ of the loader interface the ICD should expose
|
|
|
|
|
* vk_icdGetPhysicalDeviceProcAddr()
|
|
|
|
|
*/
|
|
|
|
|
PUBLIC
|
|
|
|
|
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr(
|
|
|
|
|
VkInstance _instance,
|
|
|
|
|
const char* pName);
|
|
|
|
|
|
|
|
|
|
PFN_vkVoidFunction vk_icdGetPhysicalDeviceProcAddr(
|
|
|
|
|
VkInstance _instance,
|
|
|
|
|
const char* pName)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_instance, instance, _instance);
|
2021-01-23 04:57:21 -06:00
|
|
|
return vk_instance_get_physical_device_proc_addr(&instance->vk, pName);
|
2019-09-08 00:11:01 +03:00
|
|
|
}
|
|
|
|
|
|
2015-12-01 15:37:12 -08:00
|
|
|
static struct anv_state
|
|
|
|
|
anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align, const void *p)
|
|
|
|
|
{
|
|
|
|
|
struct anv_state state;
|
|
|
|
|
|
|
|
|
|
state = anv_state_pool_alloc(pool, size, align);
|
|
|
|
|
memcpy(state.map, p, size);
|
|
|
|
|
|
|
|
|
|
return state;
|
|
|
|
|
}
|
|
|
|
|
|
2015-05-29 16:06:06 -07:00
|
|
|
static void
|
|
|
|
|
anv_device_init_border_colors(struct anv_device *device)
|
|
|
|
|
{
|
2019-06-18 10:15:24 -05:00
|
|
|
if (device->info.is_haswell) {
|
|
|
|
|
static const struct hsw_border_color border_colors[] = {
|
|
|
|
|
[VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
|
|
|
|
|
[VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
|
|
|
|
|
[VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
|
|
|
|
|
[VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
|
|
|
|
|
[VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
|
|
|
|
|
[VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
|
|
|
|
|
};
|
2015-05-29 16:06:06 -07:00
|
|
|
|
2019-06-18 10:15:24 -05:00
|
|
|
device->border_colors =
|
|
|
|
|
anv_state_pool_emit_data(&device->dynamic_state_pool,
|
|
|
|
|
sizeof(border_colors), 512, border_colors);
|
|
|
|
|
} else {
|
|
|
|
|
static const struct gen8_border_color border_colors[] = {
|
|
|
|
|
[VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
|
|
|
|
|
[VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
|
|
|
|
|
[VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
|
|
|
|
|
[VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
|
|
|
|
|
[VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
|
|
|
|
|
[VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
device->border_colors =
|
|
|
|
|
anv_state_pool_emit_data(&device->dynamic_state_pool,
|
|
|
|
|
sizeof(border_colors), 64, border_colors);
|
|
|
|
|
}
|
2015-05-29 16:06:06 -07:00
|
|
|
}
|
|
|
|
|
|
2019-10-28 17:28:09 -05:00
|
|
|
static VkResult
|
2017-02-27 16:34:13 -08:00
|
|
|
anv_device_init_trivial_batch(struct anv_device *device)
|
|
|
|
|
{
|
2020-06-17 15:37:33 +03:00
|
|
|
VkResult result = anv_device_alloc_bo(device, "trivial-batch", 4096,
|
2019-10-28 17:28:09 -05:00
|
|
|
ANV_BO_ALLOC_MAPPED,
|
2019-12-02 15:22:38 -06:00
|
|
|
0 /* explicit_address */,
|
2019-10-28 17:28:09 -05:00
|
|
|
&device->trivial_batch_bo);
|
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
return result;
|
2017-02-27 16:34:13 -08:00
|
|
|
|
|
|
|
|
struct anv_batch batch = {
|
2019-10-28 17:28:09 -05:00
|
|
|
.start = device->trivial_batch_bo->map,
|
|
|
|
|
.next = device->trivial_batch_bo->map,
|
|
|
|
|
.end = device->trivial_batch_bo->map + 4096,
|
2017-02-27 16:34:13 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
|
|
|
|
|
anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
|
|
|
|
|
|
|
|
|
|
if (!device->info.has_llc)
|
2019-10-28 17:28:09 -05:00
|
|
|
gen_clflush_range(batch.start, batch.next - batch.start);
|
2017-02-27 16:34:13 -08:00
|
|
|
|
2019-10-28 17:28:09 -05:00
|
|
|
return VK_SUCCESS;
|
2017-02-27 16:34:13 -08:00
|
|
|
}
|
|
|
|
|
|
2018-01-23 14:01:00 +02:00
|
|
|
static int
|
|
|
|
|
vk_priority_to_gen(int priority)
|
|
|
|
|
{
|
|
|
|
|
switch (priority) {
|
|
|
|
|
case VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT:
|
|
|
|
|
return GEN_CONTEXT_LOW_PRIORITY;
|
|
|
|
|
case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT:
|
|
|
|
|
return GEN_CONTEXT_MEDIUM_PRIORITY;
|
|
|
|
|
case VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT:
|
|
|
|
|
return GEN_CONTEXT_HIGH_PRIORITY;
|
|
|
|
|
case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT:
|
|
|
|
|
return GEN_CONTEXT_REALTIME_PRIORITY;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Invalid priority");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-28 17:28:09 -05:00
|
|
|
static VkResult
|
2018-09-25 16:04:33 -07:00
|
|
|
anv_device_init_hiz_clear_value_bo(struct anv_device *device)
|
2018-01-18 17:19:30 -08:00
|
|
|
{
|
2020-06-17 15:37:33 +03:00
|
|
|
VkResult result = anv_device_alloc_bo(device, "hiz-clear-value", 4096,
|
2019-10-28 17:28:09 -05:00
|
|
|
ANV_BO_ALLOC_MAPPED,
|
2019-12-02 15:22:38 -06:00
|
|
|
0 /* explicit_address */,
|
2019-10-28 17:28:09 -05:00
|
|
|
&device->hiz_clear_bo);
|
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
return result;
|
2018-01-18 17:19:30 -08:00
|
|
|
|
|
|
|
|
union isl_color_value hiz_clear = { .u32 = { 0, } };
|
|
|
|
|
hiz_clear.f32[0] = ANV_HZ_FC_VAL;
|
|
|
|
|
|
2019-10-28 17:28:09 -05:00
|
|
|
memcpy(device->hiz_clear_bo->map, hiz_clear.u32, sizeof(hiz_clear.u32));
|
|
|
|
|
|
|
|
|
|
if (!device->info.has_llc)
|
|
|
|
|
gen_clflush_range(device->hiz_clear_bo->map, sizeof(hiz_clear.u32));
|
|
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
2018-01-18 17:19:30 -08:00
|
|
|
}
|
|
|
|
|
|
2019-02-23 23:27:17 +00:00
|
|
|
static bool
|
2021-03-03 13:49:18 -08:00
|
|
|
get_bo_from_pool(struct intel_batch_decode_bo *ret,
|
2019-02-23 23:27:17 +00:00
|
|
|
struct anv_block_pool *pool,
|
|
|
|
|
uint64_t address)
|
|
|
|
|
{
|
2019-10-25 16:42:47 -05:00
|
|
|
anv_block_pool_foreach_bo(bo, pool) {
|
2021-03-03 13:49:18 -08:00
|
|
|
uint64_t bo_address = intel_48b_address(bo->offset);
|
2019-10-25 16:42:47 -05:00
|
|
|
if (address >= bo_address && address < (bo_address + bo->size)) {
|
2021-03-03 13:49:18 -08:00
|
|
|
*ret = (struct intel_batch_decode_bo) {
|
2019-02-23 23:27:17 +00:00
|
|
|
.addr = bo_address,
|
2019-10-25 16:42:47 -05:00
|
|
|
.size = bo->size,
|
|
|
|
|
.map = bo->map,
|
2019-02-23 23:27:17 +00:00
|
|
|
};
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Finding a buffer for batch decoding */
|
2021-03-03 13:49:18 -08:00
|
|
|
static struct intel_batch_decode_bo
|
2018-08-28 11:41:42 +01:00
|
|
|
decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
|
2019-02-23 23:27:17 +00:00
|
|
|
{
|
|
|
|
|
struct anv_device *device = v_batch;
|
2021-03-03 13:49:18 -08:00
|
|
|
struct intel_batch_decode_bo ret_bo = {};
|
2019-02-23 23:27:17 +00:00
|
|
|
|
2018-08-28 11:41:42 +01:00
|
|
|
assert(ppgtt);
|
|
|
|
|
|
2019-02-23 23:27:17 +00:00
|
|
|
if (get_bo_from_pool(&ret_bo, &device->dynamic_state_pool.block_pool, address))
|
|
|
|
|
return ret_bo;
|
|
|
|
|
if (get_bo_from_pool(&ret_bo, &device->instruction_state_pool.block_pool, address))
|
|
|
|
|
return ret_bo;
|
|
|
|
|
if (get_bo_from_pool(&ret_bo, &device->binding_table_pool.block_pool, address))
|
|
|
|
|
return ret_bo;
|
|
|
|
|
if (get_bo_from_pool(&ret_bo, &device->surface_state_pool.block_pool, address))
|
|
|
|
|
return ret_bo;
|
|
|
|
|
|
|
|
|
|
if (!device->cmd_buffer_being_decoded)
|
2021-03-03 13:49:18 -08:00
|
|
|
return (struct intel_batch_decode_bo) { };
|
2019-02-23 23:27:17 +00:00
|
|
|
|
|
|
|
|
struct anv_batch_bo **bo;
|
|
|
|
|
|
|
|
|
|
u_vector_foreach(bo, &device->cmd_buffer_being_decoded->seen_bbos) {
|
|
|
|
|
/* The decoder zeroes out the top 16 bits, so we need to as well */
|
2019-10-28 15:42:20 -05:00
|
|
|
uint64_t bo_address = (*bo)->bo->offset & (~0ull >> 16);
|
2019-02-23 23:27:17 +00:00
|
|
|
|
2019-10-28 15:42:20 -05:00
|
|
|
if (address >= bo_address && address < bo_address + (*bo)->bo->size) {
|
2021-03-03 13:49:18 -08:00
|
|
|
return (struct intel_batch_decode_bo) {
|
2019-02-23 23:27:17 +00:00
|
|
|
.addr = bo_address,
|
2019-10-28 15:42:20 -05:00
|
|
|
.size = (*bo)->bo->size,
|
|
|
|
|
.map = (*bo)->bo->map,
|
2019-02-23 23:27:17 +00:00
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-03 13:49:18 -08:00
|
|
|
return (struct intel_batch_decode_bo) { };
|
2019-02-23 23:27:17 +00:00
|
|
|
}
|
|
|
|
|
|
2021-03-03 13:49:18 -08:00
|
|
|
struct intel_aux_map_buffer {
|
|
|
|
|
struct intel_buffer base;
|
2018-04-01 13:57:13 -07:00
|
|
|
struct anv_state state;
|
|
|
|
|
};
|
|
|
|
|
|
2021-03-03 13:49:18 -08:00
|
|
|
static struct intel_buffer *
|
|
|
|
|
intel_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
|
2018-04-01 13:57:13 -07:00
|
|
|
{
|
2021-03-03 13:49:18 -08:00
|
|
|
struct intel_aux_map_buffer *buf = malloc(sizeof(struct intel_aux_map_buffer));
|
2018-04-01 13:57:13 -07:00
|
|
|
if (!buf)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
struct anv_device *device = (struct anv_device*)driver_ctx;
|
2020-01-17 22:23:30 -06:00
|
|
|
assert(device->physical->supports_48bit_addresses &&
|
|
|
|
|
device->physical->use_softpin);
|
2018-04-01 13:57:13 -07:00
|
|
|
|
|
|
|
|
struct anv_state_pool *pool = &device->dynamic_state_pool;
|
|
|
|
|
buf->state = anv_state_pool_alloc(pool, size, size);
|
|
|
|
|
|
|
|
|
|
buf->base.gpu = pool->block_pool.bo->offset + buf->state.offset;
|
|
|
|
|
buf->base.gpu_end = buf->base.gpu + buf->state.alloc_size;
|
|
|
|
|
buf->base.map = buf->state.map;
|
|
|
|
|
buf->base.driver_bo = &buf->state;
|
|
|
|
|
return &buf->base;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-03-03 13:49:18 -08:00
|
|
|
intel_aux_map_buffer_free(void *driver_ctx, struct intel_buffer *buffer)
|
2018-04-01 13:57:13 -07:00
|
|
|
{
|
2021-03-03 13:49:18 -08:00
|
|
|
struct intel_aux_map_buffer *buf = (struct intel_aux_map_buffer*)buffer;
|
2018-04-01 13:57:13 -07:00
|
|
|
struct anv_device *device = (struct anv_device*)driver_ctx;
|
|
|
|
|
struct anv_state_pool *pool = &device->dynamic_state_pool;
|
|
|
|
|
anv_state_pool_free(pool, buf->state);
|
|
|
|
|
free(buf);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct gen_mapped_pinned_buffer_alloc aux_map_allocator = {
|
2021-03-03 13:49:18 -08:00
|
|
|
.alloc = intel_aux_map_buffer_alloc,
|
|
|
|
|
.free = intel_aux_map_buffer_free,
|
2018-04-01 13:57:13 -07:00
|
|
|
};
|
|
|
|
|
|
2020-02-13 14:46:25 -06:00
|
|
|
static VkResult
|
|
|
|
|
check_physical_device_features(VkPhysicalDevice physicalDevice,
|
|
|
|
|
const VkPhysicalDeviceFeatures *features)
|
|
|
|
|
{
|
|
|
|
|
VkPhysicalDeviceFeatures supported_features;
|
|
|
|
|
anv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
|
|
|
|
|
VkBool32 *supported_feature = (VkBool32 *)&supported_features;
|
|
|
|
|
VkBool32 *enabled_feature = (VkBool32 *)features;
|
|
|
|
|
unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
|
|
|
|
|
for (uint32_t i = 0; i < num_features; i++) {
|
|
|
|
|
if (enabled_feature[i] && !supported_feature[i])
|
|
|
|
|
return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
2015-05-17 16:33:48 -07:00
|
|
|
VkResult anv_CreateDevice(
|
2015-07-09 18:20:10 -07:00
|
|
|
VkPhysicalDevice physicalDevice,
|
2015-05-08 22:32:37 -07:00
|
|
|
const VkDeviceCreateInfo* pCreateInfo,
|
2015-12-02 03:28:27 -08:00
|
|
|
const VkAllocationCallbacks* pAllocator,
|
2015-05-08 22:32:37 -07:00
|
|
|
VkDevice* pDevice)
|
|
|
|
|
{
|
2015-07-09 18:20:10 -07:00
|
|
|
ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
|
2016-01-03 22:43:47 -08:00
|
|
|
VkResult result;
|
2015-05-08 22:32:37 -07:00
|
|
|
struct anv_device *device;
|
|
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
|
|
|
|
|
|
2017-06-28 15:52:34 +02:00
|
|
|
/* Check enabled features */
|
2020-02-13 14:46:25 -06:00
|
|
|
bool robust_buffer_access = false;
|
2017-06-28 15:52:34 +02:00
|
|
|
if (pCreateInfo->pEnabledFeatures) {
|
2020-02-13 14:46:25 -06:00
|
|
|
result = check_physical_device_features(physicalDevice,
|
|
|
|
|
pCreateInfo->pEnabledFeatures);
|
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
return result;
|
|
|
|
|
|
|
|
|
|
if (pCreateInfo->pEnabledFeatures->robustBufferAccess)
|
|
|
|
|
robust_buffer_access = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
vk_foreach_struct_const(ext, pCreateInfo->pNext) {
|
|
|
|
|
switch (ext->sType) {
|
|
|
|
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: {
|
|
|
|
|
const VkPhysicalDeviceFeatures2 *features = (const void *)ext;
|
|
|
|
|
result = check_physical_device_features(physicalDevice,
|
|
|
|
|
&features->features);
|
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
return result;
|
|
|
|
|
|
|
|
|
|
if (features->features.robustBufferAccess)
|
|
|
|
|
robust_buffer_access = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
/* Don't warn */
|
|
|
|
|
break;
|
2017-06-28 15:52:34 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-06 10:37:16 +01:00
|
|
|
/* Check requested queues and fail if we are requested to create any
|
|
|
|
|
* queues with flags we don't support.
|
|
|
|
|
*/
|
|
|
|
|
assert(pCreateInfo->queueCreateInfoCount > 0);
|
|
|
|
|
for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
|
|
|
|
|
if (pCreateInfo->pQueueCreateInfos[i].flags != 0)
|
|
|
|
|
return vk_error(VK_ERROR_INITIALIZATION_FAILED);
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-23 14:01:00 +02:00
|
|
|
/* Check if client specified queue priority. */
|
|
|
|
|
const VkDeviceQueueGlobalPriorityCreateInfoEXT *queue_priority =
|
|
|
|
|
vk_find_struct_const(pCreateInfo->pQueueCreateInfos[0].pNext,
|
|
|
|
|
DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT);
|
|
|
|
|
|
|
|
|
|
VkQueueGlobalPriorityEXT priority =
|
|
|
|
|
queue_priority ? queue_priority->globalPriority :
|
|
|
|
|
VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
|
|
|
|
|
|
2021-01-23 04:57:21 -06:00
|
|
|
device = vk_alloc2(&physical_device->instance->vk.alloc, pAllocator,
|
2015-12-02 03:28:27 -08:00
|
|
|
sizeof(*device), 8,
|
2016-01-18 14:04:13 -08:00
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
|
2015-05-08 22:32:37 -07:00
|
|
|
if (!device)
|
|
|
|
|
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
2021-01-23 04:57:21 -06:00
|
|
|
struct vk_device_dispatch_table dispatch_table;
|
|
|
|
|
vk_device_dispatch_table_from_entrypoints(&dispatch_table,
|
|
|
|
|
anv_genX(&physical_device->info, device_entrypoints), true);
|
|
|
|
|
vk_device_dispatch_table_from_entrypoints(&dispatch_table,
|
|
|
|
|
&anv_device_entrypoints, false);
|
|
|
|
|
|
|
|
|
|
result = vk_device_init(&device->vk, &physical_device->vk,
|
2021-01-29 12:30:34 -06:00
|
|
|
&dispatch_table, pCreateInfo, pAllocator);
|
2021-01-24 09:26:24 -06:00
|
|
|
if (result != VK_SUCCESS) {
|
|
|
|
|
vk_error(result);
|
|
|
|
|
goto fail_alloc;
|
|
|
|
|
}
|
2020-04-21 12:42:59 -05:00
|
|
|
|
2019-06-12 12:41:36 +03:00
|
|
|
if (INTEL_DEBUG & DEBUG_BATCH) {
|
|
|
|
|
const unsigned decode_flags =
|
|
|
|
|
GEN_BATCH_DECODE_FULL |
|
|
|
|
|
((INTEL_DEBUG & DEBUG_COLOR) ? GEN_BATCH_DECODE_IN_COLOR : 0) |
|
|
|
|
|
GEN_BATCH_DECODE_OFFSETS |
|
|
|
|
|
GEN_BATCH_DECODE_FLOATS;
|
|
|
|
|
|
2021-03-03 13:49:18 -08:00
|
|
|
intel_batch_decode_ctx_init(&device->decoder_ctx,
|
2019-06-12 12:41:36 +03:00
|
|
|
&physical_device->info,
|
|
|
|
|
stderr, decode_flags, NULL,
|
|
|
|
|
decode_get_bo, NULL, device);
|
|
|
|
|
}
|
2019-02-23 23:27:17 +00:00
|
|
|
|
2020-01-17 22:23:30 -06:00
|
|
|
device->physical = physical_device;
|
2018-02-09 18:36:43 -08:00
|
|
|
device->no_hw = physical_device->no_hw;
|
2018-03-13 11:50:33 -07:00
|
|
|
device->_lost = false;
|
2015-07-09 16:31:39 -07:00
|
|
|
|
|
|
|
|
/* XXX(chadv): Can we dup() physicalDevice->fd here? */
|
2015-07-09 18:20:10 -07:00
|
|
|
device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
|
2016-01-03 22:43:47 -08:00
|
|
|
if (device->fd == -1) {
|
|
|
|
|
result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
|
2015-05-08 22:32:37 -07:00
|
|
|
goto fail_device;
|
2016-01-03 22:43:47 -08:00
|
|
|
}
|
2015-11-13 10:12:18 -08:00
|
|
|
|
2019-03-24 01:00:37 -07:00
|
|
|
uint32_t num_queues = 0;
|
|
|
|
|
for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++)
|
|
|
|
|
num_queues += pCreateInfo->pQueueCreateInfos[i].queueCount;
|
|
|
|
|
|
|
|
|
|
if (device->physical->engine_info) {
|
|
|
|
|
/* The kernel API supports at most 64 engines */
|
|
|
|
|
assert(num_queues <= 64);
|
|
|
|
|
uint16_t engine_classes[64];
|
|
|
|
|
int engine_count = 0;
|
|
|
|
|
for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
|
|
|
|
|
const VkDeviceQueueCreateInfo *queueCreateInfo =
|
|
|
|
|
&pCreateInfo->pQueueCreateInfos[i];
|
|
|
|
|
|
|
|
|
|
assert(queueCreateInfo->queueFamilyIndex <
|
|
|
|
|
physical_device->queue.family_count);
|
|
|
|
|
struct anv_queue_family *queue_family =
|
|
|
|
|
&physical_device->queue.families[queueCreateInfo->queueFamilyIndex];
|
|
|
|
|
|
|
|
|
|
for (uint32_t j = 0; j < queueCreateInfo->queueCount; j++)
|
|
|
|
|
engine_classes[engine_count++] = queue_family->engine_class;
|
|
|
|
|
}
|
|
|
|
|
device->context_id =
|
|
|
|
|
anv_gem_create_context_engines(device,
|
|
|
|
|
physical_device->engine_info,
|
|
|
|
|
engine_count, engine_classes);
|
|
|
|
|
} else {
|
|
|
|
|
assert(num_queues == 1);
|
|
|
|
|
device->context_id = anv_gem_create_context(device);
|
|
|
|
|
}
|
2016-01-03 22:43:47 -08:00
|
|
|
if (device->context_id == -1) {
|
|
|
|
|
result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
|
2015-05-08 22:32:37 -07:00
|
|
|
goto fail_fd;
|
2016-01-03 22:43:47 -08:00
|
|
|
}
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2019-08-28 13:22:30 +03:00
|
|
|
device->has_thread_submit = physical_device->has_thread_submit;
|
|
|
|
|
|
2018-08-14 02:34:16 -07:00
|
|
|
device->queues =
|
|
|
|
|
vk_zalloc(&device->vk.alloc, num_queues * sizeof(*device->queues), 8,
|
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
|
|
|
|
|
if (device->queues == NULL) {
|
|
|
|
|
result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
2019-08-23 12:30:42 +02:00
|
|
|
goto fail_context_id;
|
2018-08-14 02:34:16 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
device->queue_count = 0;
|
|
|
|
|
for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
|
|
|
|
|
const VkDeviceQueueCreateInfo *queueCreateInfo =
|
|
|
|
|
&pCreateInfo->pQueueCreateInfos[i];
|
|
|
|
|
|
|
|
|
|
for (uint32_t j = 0; j < queueCreateInfo->queueCount; j++) {
|
2019-03-24 01:00:37 -07:00
|
|
|
/* When using legacy contexts, we use I915_EXEC_RENDER but, with
|
|
|
|
|
* engine-based contexts, the bottom 6 bits of exec_flags are used
|
|
|
|
|
* for the engine ID.
|
|
|
|
|
*/
|
|
|
|
|
uint32_t exec_flags = device->physical->engine_info ?
|
|
|
|
|
device->queue_count : I915_EXEC_RENDER;
|
|
|
|
|
|
2018-08-14 02:34:16 -07:00
|
|
|
result = anv_queue_init(device, &device->queues[device->queue_count],
|
2019-03-24 01:00:37 -07:00
|
|
|
exec_flags, queueCreateInfo);
|
2018-08-14 02:34:16 -07:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
goto fail_queues;
|
|
|
|
|
|
|
|
|
|
device->queue_count++;
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-08-23 12:30:42 +02:00
|
|
|
|
2018-03-07 09:18:37 -08:00
|
|
|
if (physical_device->use_softpin) {
|
|
|
|
|
if (pthread_mutex_init(&device->vma_mutex, NULL) != 0) {
|
|
|
|
|
result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
|
2018-08-14 02:34:16 -07:00
|
|
|
goto fail_queues;
|
2018-03-07 09:18:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* keep the page with address zero out of the allocator */
|
2019-12-02 14:51:30 -06:00
|
|
|
util_vma_heap_init(&device->vma_lo,
|
|
|
|
|
LOW_HEAP_MIN_ADDRESS, LOW_HEAP_SIZE);
|
2018-03-07 09:18:37 -08:00
|
|
|
|
2019-12-02 16:03:56 -06:00
|
|
|
util_vma_heap_init(&device->vma_cva, CLIENT_VISIBLE_HEAP_MIN_ADDRESS,
|
|
|
|
|
CLIENT_VISIBLE_HEAP_SIZE);
|
|
|
|
|
|
2019-12-02 14:51:30 -06:00
|
|
|
/* Leave the last 4GiB out of the high vma range, so that no state
|
|
|
|
|
* base address + size can overflow 48 bits. For more information see
|
|
|
|
|
* the comment about Wa32bitGeneralStateOffset in anv_allocator.c
|
|
|
|
|
*/
|
|
|
|
|
util_vma_heap_init(&device->vma_hi, HIGH_HEAP_MIN_ADDRESS,
|
|
|
|
|
physical_device->gtt_size - (1ull << 32) -
|
|
|
|
|
HIGH_HEAP_MIN_ADDRESS);
|
2018-03-07 09:18:37 -08:00
|
|
|
}
|
|
|
|
|
|
2019-02-26 18:05:34 -06:00
|
|
|
list_inithead(&device->memory_objects);
|
|
|
|
|
|
2018-01-23 14:01:00 +02:00
|
|
|
/* As per spec, the driver implementation may deny requests to acquire
|
|
|
|
|
* a priority above the default priority (MEDIUM) if the caller does not
|
|
|
|
|
* have sufficient privileges. In this scenario VK_ERROR_NOT_PERMITTED_EXT
|
|
|
|
|
* is returned.
|
|
|
|
|
*/
|
|
|
|
|
if (physical_device->has_context_priority) {
|
2018-02-28 18:54:24 +02:00
|
|
|
int err = anv_gem_set_context_param(device->fd, device->context_id,
|
|
|
|
|
I915_CONTEXT_PARAM_PRIORITY,
|
|
|
|
|
vk_priority_to_gen(priority));
|
2018-01-23 14:01:00 +02:00
|
|
|
if (err != 0 && priority > VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT) {
|
|
|
|
|
result = vk_error(VK_ERROR_NOT_PERMITTED_EXT);
|
2019-10-18 15:28:30 +03:00
|
|
|
goto fail_vmas;
|
2018-01-23 14:01:00 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-22 14:58:11 +03:00
|
|
|
device->info = physical_device->info;
|
2015-12-28 13:26:49 -08:00
|
|
|
device->isl_dev = physical_device->isl_dev;
|
|
|
|
|
|
2016-03-18 16:32:46 -07:00
|
|
|
/* On Broadwell and later, we can use batch chaining to more efficiently
|
|
|
|
|
* implement growing command buffers. Prior to Haswell, the kernel
|
|
|
|
|
* command parser gets in the way and we have to fall back to growing
|
|
|
|
|
* the batch.
|
|
|
|
|
*/
|
|
|
|
|
device->can_chain_batches = device->info.gen >= 8;
|
|
|
|
|
|
2020-02-13 14:46:25 -06:00
|
|
|
device->robust_buffer_access = robust_buffer_access;
|
2018-01-16 18:24:56 -08:00
|
|
|
|
2016-11-30 06:59:15 +09:00
|
|
|
if (pthread_mutex_init(&device->mutex, NULL) != 0) {
|
|
|
|
|
result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
|
2018-08-14 02:34:16 -07:00
|
|
|
goto fail_queues;
|
2016-11-30 06:59:15 +09:00
|
|
|
}
|
2015-09-17 18:23:21 -07:00
|
|
|
|
2016-11-02 09:11:11 -07:00
|
|
|
pthread_condattr_t condattr;
|
2016-11-30 06:59:15 +09:00
|
|
|
if (pthread_condattr_init(&condattr) != 0) {
|
|
|
|
|
result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
|
|
|
|
|
goto fail_mutex;
|
|
|
|
|
}
|
|
|
|
|
if (pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC) != 0) {
|
|
|
|
|
pthread_condattr_destroy(&condattr);
|
|
|
|
|
result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
|
|
|
|
|
goto fail_mutex;
|
|
|
|
|
}
|
2019-04-18 17:39:36 +01:00
|
|
|
if (pthread_cond_init(&device->queue_submit, &condattr) != 0) {
|
2016-11-30 06:59:15 +09:00
|
|
|
pthread_condattr_destroy(&condattr);
|
|
|
|
|
result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
|
|
|
|
|
goto fail_mutex;
|
|
|
|
|
}
|
2016-11-02 09:11:11 -07:00
|
|
|
pthread_condattr_destroy(&condattr);
|
|
|
|
|
|
2017-03-13 17:20:15 -07:00
|
|
|
result = anv_bo_cache_init(&device->bo_cache);
|
|
|
|
|
if (result != VK_SUCCESS)
|
2019-10-28 15:42:20 -05:00
|
|
|
goto fail_queue_cond;
|
|
|
|
|
|
2020-06-17 15:37:33 +03:00
|
|
|
anv_bo_pool_init(&device->batch_bo_pool, device, "batch");
|
2017-03-13 17:20:15 -07:00
|
|
|
|
2020-05-04 17:08:00 -05:00
|
|
|
/* Because scratch is also relative to General State Base Address, we leave
|
|
|
|
|
* the base address 0 and start the pool memory at an offset. This way we
|
|
|
|
|
* get the correct offsets in the anv_states that get allocated from it.
|
|
|
|
|
*/
|
|
|
|
|
result = anv_state_pool_init(&device->general_state_pool, device,
|
2020-06-17 15:37:33 +03:00
|
|
|
"general pool",
|
2020-05-04 17:08:00 -05:00
|
|
|
0, GENERAL_STATE_POOL_MIN_ADDRESS, 16384);
|
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
goto fail_batch_bo_pool;
|
|
|
|
|
|
2018-03-01 09:25:44 -08:00
|
|
|
result = anv_state_pool_init(&device->dynamic_state_pool, device,
|
2020-06-17 15:37:33 +03:00
|
|
|
"dynamic pool",
|
2020-05-04 17:27:22 -05:00
|
|
|
DYNAMIC_STATE_POOL_MIN_ADDRESS, 0, 16384);
|
2016-11-30 06:59:15 +09:00
|
|
|
if (result != VK_SUCCESS)
|
2020-05-04 17:08:00 -05:00
|
|
|
goto fail_general_state_pool;
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2020-04-22 17:08:22 -07:00
|
|
|
if (device->info.gen >= 8) {
|
|
|
|
|
/* The border color pointer is limited to 24 bits, so we need to make
|
|
|
|
|
* sure that any such color used at any point in the program doesn't
|
|
|
|
|
* exceed that limit.
|
|
|
|
|
* We achieve that by reserving all the custom border colors we support
|
|
|
|
|
* right off the bat, so they are close to the base address.
|
|
|
|
|
*/
|
|
|
|
|
anv_state_reserved_pool_init(&device->custom_border_colors,
|
|
|
|
|
&device->dynamic_state_pool,
|
2020-08-04 12:59:43 -07:00
|
|
|
MAX_CUSTOM_BORDER_COLORS,
|
|
|
|
|
sizeof(struct gen8_border_color), 64);
|
2020-04-22 17:08:22 -07:00
|
|
|
}
|
|
|
|
|
|
2018-03-01 09:25:44 -08:00
|
|
|
result = anv_state_pool_init(&device->instruction_state_pool, device,
|
2020-06-17 15:37:33 +03:00
|
|
|
"instruction pool",
|
2020-05-04 17:27:22 -05:00
|
|
|
INSTRUCTION_STATE_POOL_MIN_ADDRESS, 0, 16384);
|
2016-11-30 06:59:15 +09:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
goto fail_dynamic_state_pool;
|
|
|
|
|
|
2018-03-01 09:25:44 -08:00
|
|
|
result = anv_state_pool_init(&device->surface_state_pool, device,
|
2020-06-17 15:37:33 +03:00
|
|
|
"surface state pool",
|
2020-05-04 17:27:22 -05:00
|
|
|
SURFACE_STATE_POOL_MIN_ADDRESS, 0, 4096);
|
2016-11-30 06:59:15 +09:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
goto fail_instruction_state_pool;
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2018-03-14 10:31:16 -07:00
|
|
|
if (physical_device->use_softpin) {
|
2020-05-04 17:27:22 -05:00
|
|
|
int64_t bt_pool_offset = (int64_t)BINDING_TABLE_POOL_MIN_ADDRESS -
|
|
|
|
|
(int64_t)SURFACE_STATE_POOL_MIN_ADDRESS;
|
|
|
|
|
assert(INT32_MIN < bt_pool_offset && bt_pool_offset < 0);
|
2018-03-14 10:31:16 -07:00
|
|
|
result = anv_state_pool_init(&device->binding_table_pool, device,
|
2020-06-17 15:37:33 +03:00
|
|
|
"binding table pool",
|
2020-05-04 17:27:22 -05:00
|
|
|
SURFACE_STATE_POOL_MIN_ADDRESS,
|
|
|
|
|
bt_pool_offset, 4096);
|
2018-03-14 10:31:16 -07:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
goto fail_surface_state_pool;
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-14 01:02:00 -07:00
|
|
|
if (device->info.has_aux_map) {
|
2021-03-03 13:49:18 -08:00
|
|
|
device->aux_map_ctx = intel_aux_map_init(device, &aux_map_allocator,
|
2018-03-28 01:42:50 -07:00
|
|
|
&physical_device->info);
|
|
|
|
|
if (!device->aux_map_ctx)
|
|
|
|
|
goto fail_binding_table_pool;
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-17 15:37:33 +03:00
|
|
|
result = anv_device_alloc_bo(device, "workaround", 4096,
|
2019-12-25 23:26:48 +02:00
|
|
|
ANV_BO_ALLOC_CAPTURE | ANV_BO_ALLOC_MAPPED /* flags */,
|
2019-12-02 15:22:38 -06:00
|
|
|
0 /* explicit_address */,
|
|
|
|
|
&device->workaround_bo);
|
2016-11-30 06:59:15 +09:00
|
|
|
if (result != VK_SUCCESS)
|
2018-03-28 01:42:50 -07:00
|
|
|
goto fail_surface_aux_map_pool;
|
2015-11-10 16:42:34 -08:00
|
|
|
|
2020-02-21 17:36:36 +02:00
|
|
|
device->workaround_address = (struct anv_address) {
|
|
|
|
|
.bo = device->workaround_bo,
|
2019-12-25 23:26:48 +02:00
|
|
|
.offset = align_u32(
|
|
|
|
|
intel_debug_write_identifiers(device->workaround_bo->map,
|
|
|
|
|
device->workaround_bo->size,
|
|
|
|
|
"Anv") + 8, 8),
|
2020-02-21 17:36:36 +02:00
|
|
|
};
|
|
|
|
|
|
2020-03-05 01:15:57 +02:00
|
|
|
device->debug_frame_desc =
|
|
|
|
|
intel_debug_get_identifier_block(device->workaround_bo->map,
|
|
|
|
|
device->workaround_bo->size,
|
|
|
|
|
GEN_DEBUG_BLOCK_TYPE_FRAME);
|
2019-12-25 23:26:48 +02:00
|
|
|
|
2019-10-28 17:28:09 -05:00
|
|
|
result = anv_device_init_trivial_batch(device);
|
|
|
|
|
if (result != VK_SUCCESS)
|
2018-05-30 20:16:30 -07:00
|
|
|
goto fail_workaround_bo;
|
|
|
|
|
|
2020-02-06 21:18:59 -06:00
|
|
|
/* Allocate a null surface state at surface state offset 0. This makes
|
|
|
|
|
* NULL descriptor handling trivial because we can just memset structures
|
|
|
|
|
* to zero and they have a valid descriptor.
|
|
|
|
|
*/
|
|
|
|
|
device->null_surface_state =
|
|
|
|
|
anv_state_pool_alloc(&device->surface_state_pool,
|
|
|
|
|
device->isl_dev.ss.size,
|
|
|
|
|
device->isl_dev.ss.align);
|
|
|
|
|
isl_null_fill_state(&device->isl_dev, device->null_surface_state.map,
|
|
|
|
|
isl_extent3d(1, 1, 1) /* This shouldn't matter */);
|
|
|
|
|
assert(device->null_surface_state.offset == 0);
|
|
|
|
|
|
2019-10-28 17:28:09 -05:00
|
|
|
if (device->info.gen >= 10) {
|
|
|
|
|
result = anv_device_init_hiz_clear_value_bo(device);
|
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
goto fail_trivial_batch_bo;
|
|
|
|
|
}
|
2018-01-18 17:19:30 -08:00
|
|
|
|
2016-06-16 15:26:54 -07:00
|
|
|
anv_scratch_pool_init(device, &device->scratch_pool);
|
2015-06-19 15:41:30 -07:00
|
|
|
|
2021-01-23 21:09:18 -06:00
|
|
|
result = anv_genX(&device->info, init_device_state)(device);
|
2016-02-05 16:11:12 -08:00
|
|
|
if (result != VK_SUCCESS)
|
2019-12-25 22:08:51 +02:00
|
|
|
goto fail_clear_value_bo;
|
2016-02-05 16:11:12 -08:00
|
|
|
|
2020-07-09 18:12:17 +03:00
|
|
|
anv_pipeline_cache_init(&device->default_pipeline_cache, device,
|
|
|
|
|
true /* cache_enabled */, false /* external_sync */);
|
2018-06-29 17:29:35 -07:00
|
|
|
|
2016-08-22 21:37:28 -07:00
|
|
|
anv_device_init_blorp(device);
|
|
|
|
|
|
2015-05-29 16:06:06 -07:00
|
|
|
anv_device_init_border_colors(device);
|
|
|
|
|
|
2018-06-07 18:02:03 +01:00
|
|
|
anv_device_perf_init(device);
|
|
|
|
|
|
2015-07-09 18:41:27 -07:00
|
|
|
*pDevice = anv_device_to_handle(device);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
|
2019-12-25 22:08:51 +02:00
|
|
|
fail_clear_value_bo:
|
2019-10-28 17:28:09 -05:00
|
|
|
if (device->info.gen >= 10)
|
|
|
|
|
anv_device_release_bo(device, device->hiz_clear_bo);
|
2019-12-25 22:08:51 +02:00
|
|
|
anv_scratch_pool_finish(device, &device->scratch_pool);
|
2019-10-28 17:28:09 -05:00
|
|
|
fail_trivial_batch_bo:
|
|
|
|
|
anv_device_release_bo(device, device->trivial_batch_bo);
|
2019-12-25 22:08:51 +02:00
|
|
|
fail_workaround_bo:
|
|
|
|
|
anv_device_release_bo(device, device->workaround_bo);
|
2018-03-28 01:42:50 -07:00
|
|
|
fail_surface_aux_map_pool:
|
2020-03-14 01:02:00 -07:00
|
|
|
if (device->info.has_aux_map) {
|
2021-03-03 13:49:18 -08:00
|
|
|
intel_aux_map_finish(device->aux_map_ctx);
|
2018-03-28 01:42:50 -07:00
|
|
|
device->aux_map_ctx = NULL;
|
|
|
|
|
}
|
2018-03-14 10:31:16 -07:00
|
|
|
fail_binding_table_pool:
|
|
|
|
|
if (physical_device->use_softpin)
|
|
|
|
|
anv_state_pool_finish(&device->binding_table_pool);
|
2016-11-30 06:59:15 +09:00
|
|
|
fail_surface_state_pool:
|
|
|
|
|
anv_state_pool_finish(&device->surface_state_pool);
|
|
|
|
|
fail_instruction_state_pool:
|
|
|
|
|
anv_state_pool_finish(&device->instruction_state_pool);
|
|
|
|
|
fail_dynamic_state_pool:
|
2020-04-22 17:08:22 -07:00
|
|
|
if (device->info.gen >= 8)
|
|
|
|
|
anv_state_reserved_pool_finish(&device->custom_border_colors);
|
2016-11-30 06:59:15 +09:00
|
|
|
anv_state_pool_finish(&device->dynamic_state_pool);
|
2020-05-04 17:08:00 -05:00
|
|
|
fail_general_state_pool:
|
|
|
|
|
anv_state_pool_finish(&device->general_state_pool);
|
2016-11-30 06:59:15 +09:00
|
|
|
fail_batch_bo_pool:
|
|
|
|
|
anv_bo_pool_finish(&device->batch_bo_pool);
|
2019-10-28 15:42:20 -05:00
|
|
|
anv_bo_cache_finish(&device->bo_cache);
|
|
|
|
|
fail_queue_cond:
|
2016-11-30 06:59:15 +09:00
|
|
|
pthread_cond_destroy(&device->queue_submit);
|
|
|
|
|
fail_mutex:
|
|
|
|
|
pthread_mutex_destroy(&device->mutex);
|
2019-10-18 15:28:30 +03:00
|
|
|
fail_vmas:
|
|
|
|
|
if (physical_device->use_softpin) {
|
|
|
|
|
util_vma_heap_finish(&device->vma_hi);
|
2019-12-02 16:03:56 -06:00
|
|
|
util_vma_heap_finish(&device->vma_cva);
|
2019-10-18 15:28:30 +03:00
|
|
|
util_vma_heap_finish(&device->vma_lo);
|
|
|
|
|
}
|
2018-08-14 02:34:16 -07:00
|
|
|
fail_queues:
|
|
|
|
|
for (uint32_t i = 0; i < device->queue_count; i++)
|
|
|
|
|
anv_queue_finish(&device->queues[i]);
|
|
|
|
|
vk_free(&device->vk.alloc, device->queues);
|
2019-10-22 15:34:12 +03:00
|
|
|
fail_context_id:
|
|
|
|
|
anv_gem_destroy_context(device, device->context_id);
|
2015-05-08 22:32:37 -07:00
|
|
|
fail_fd:
|
|
|
|
|
close(device->fd);
|
|
|
|
|
fail_device:
|
2021-01-23 04:33:02 -06:00
|
|
|
vk_device_finish(&device->vk);
|
2021-01-24 09:26:24 -06:00
|
|
|
fail_alloc:
|
2020-04-21 12:42:59 -05:00
|
|
|
vk_free(&device->vk.alloc, device);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2016-01-03 22:43:47 -08:00
|
|
|
return result;
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
2015-10-05 20:50:51 -07:00
|
|
|
void anv_DestroyDevice(
|
2015-12-02 03:28:27 -08:00
|
|
|
VkDevice _device,
|
|
|
|
|
const VkAllocationCallbacks* pAllocator)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2015-07-09 18:20:10 -07:00
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2017-03-01 08:39:49 -08:00
|
|
|
if (!device)
|
|
|
|
|
return;
|
|
|
|
|
|
2016-08-22 21:37:28 -07:00
|
|
|
anv_device_finish_blorp(device);
|
|
|
|
|
|
2018-06-29 17:29:35 -07:00
|
|
|
anv_pipeline_cache_finish(&device->default_pipeline_cache);
|
|
|
|
|
|
2015-06-09 11:41:31 -07:00
|
|
|
#ifdef HAVE_VALGRIND
|
|
|
|
|
/* We only need to free these to prevent valgrind errors. The backing
|
|
|
|
|
* BO will go away in a couple of lines so we don't actually leak.
|
|
|
|
|
*/
|
2020-04-22 17:08:22 -07:00
|
|
|
if (device->info.gen >= 8)
|
|
|
|
|
anv_state_reserved_pool_finish(&device->custom_border_colors);
|
2015-07-08 11:44:52 -07:00
|
|
|
anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
|
2019-07-22 10:56:53 -07:00
|
|
|
anv_state_pool_free(&device->dynamic_state_pool, device->slice_hash);
|
2015-06-09 11:41:31 -07:00
|
|
|
#endif
|
|
|
|
|
|
2016-11-25 23:34:46 +09:00
|
|
|
anv_scratch_pool_finish(device, &device->scratch_pool);
|
|
|
|
|
|
2019-10-28 17:28:09 -05:00
|
|
|
anv_device_release_bo(device, device->workaround_bo);
|
|
|
|
|
anv_device_release_bo(device, device->trivial_batch_bo);
|
2018-01-18 17:19:30 -08:00
|
|
|
if (device->info.gen >= 10)
|
2019-10-28 17:28:09 -05:00
|
|
|
anv_device_release_bo(device, device->hiz_clear_bo);
|
2017-02-27 16:34:13 -08:00
|
|
|
|
2020-03-14 01:02:00 -07:00
|
|
|
if (device->info.has_aux_map) {
|
2021-03-03 13:49:18 -08:00
|
|
|
intel_aux_map_finish(device->aux_map_ctx);
|
2018-03-28 01:42:50 -07:00
|
|
|
device->aux_map_ctx = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-17 22:23:30 -06:00
|
|
|
if (device->physical->use_softpin)
|
2018-06-28 15:36:12 +02:00
|
|
|
anv_state_pool_finish(&device->binding_table_pool);
|
2015-07-31 10:30:57 -07:00
|
|
|
anv_state_pool_finish(&device->surface_state_pool);
|
2016-11-25 23:34:46 +09:00
|
|
|
anv_state_pool_finish(&device->instruction_state_pool);
|
|
|
|
|
anv_state_pool_finish(&device->dynamic_state_pool);
|
2020-05-04 17:08:00 -05:00
|
|
|
anv_state_pool_finish(&device->general_state_pool);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2016-11-25 23:34:46 +09:00
|
|
|
anv_bo_pool_finish(&device->batch_bo_pool);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2019-10-28 15:42:20 -05:00
|
|
|
anv_bo_cache_finish(&device->bo_cache);
|
|
|
|
|
|
2020-01-17 22:23:30 -06:00
|
|
|
if (device->physical->use_softpin) {
|
2019-10-18 15:28:30 +03:00
|
|
|
util_vma_heap_finish(&device->vma_hi);
|
2019-12-02 16:03:56 -06:00
|
|
|
util_vma_heap_finish(&device->vma_cva);
|
2019-10-18 15:28:30 +03:00
|
|
|
util_vma_heap_finish(&device->vma_lo);
|
|
|
|
|
}
|
|
|
|
|
|
2016-11-25 23:34:46 +09:00
|
|
|
pthread_cond_destroy(&device->queue_submit);
|
2016-01-05 11:43:25 -08:00
|
|
|
pthread_mutex_destroy(&device->mutex);
|
|
|
|
|
|
2018-08-14 02:34:16 -07:00
|
|
|
for (uint32_t i = 0; i < device->queue_count; i++)
|
|
|
|
|
anv_queue_finish(&device->queues[i]);
|
|
|
|
|
vk_free(&device->vk.alloc, device->queues);
|
|
|
|
|
|
2016-11-25 23:34:46 +09:00
|
|
|
anv_gem_destroy_context(device, device->context_id);
|
|
|
|
|
|
2019-06-12 12:41:36 +03:00
|
|
|
if (INTEL_DEBUG & DEBUG_BATCH)
|
2021-03-03 13:49:18 -08:00
|
|
|
intel_batch_decode_ctx_finish(&device->decoder_ctx);
|
2019-02-23 23:27:17 +00:00
|
|
|
|
2016-11-25 23:34:46 +09:00
|
|
|
close(device->fd);
|
|
|
|
|
|
2020-04-21 12:42:59 -05:00
|
|
|
vk_device_finish(&device->vk);
|
|
|
|
|
vk_free(&device->vk.alloc, device);
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
2015-10-06 09:25:03 -07:00
|
|
|
VkResult anv_EnumerateInstanceLayerProperties(
|
2015-11-30 16:28:36 -08:00
|
|
|
uint32_t* pPropertyCount,
|
2015-07-14 16:11:21 -07:00
|
|
|
VkLayerProperties* pProperties)
|
|
|
|
|
{
|
|
|
|
|
if (pProperties == NULL) {
|
2015-11-30 16:28:36 -08:00
|
|
|
*pPropertyCount = 0;
|
2015-07-14 16:11:21 -07:00
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* None supported at this time */
|
2015-10-07 11:36:51 -07:00
|
|
|
return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
|
2015-07-14 16:11:21 -07:00
|
|
|
}
|
|
|
|
|
|
2017-10-05 19:29:27 -07:00
|
|
|
void anv_GetDeviceQueue2(
|
|
|
|
|
VkDevice _device,
|
|
|
|
|
const VkDeviceQueueInfo2* pQueueInfo,
|
|
|
|
|
VkQueue* pQueue)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
2018-08-14 02:34:16 -07:00
|
|
|
struct anv_physical_device *pdevice = device->physical;
|
|
|
|
|
|
|
|
|
|
assert(pQueueInfo->queueFamilyIndex < pdevice->queue.family_count);
|
|
|
|
|
struct anv_queue_family *queue_family =
|
|
|
|
|
&pdevice->queue.families[pQueueInfo->queueFamilyIndex];
|
2017-10-05 19:29:27 -07:00
|
|
|
|
2018-08-14 02:34:16 -07:00
|
|
|
int idx_in_family = 0;
|
|
|
|
|
struct anv_queue *queue = NULL;
|
|
|
|
|
for (uint32_t i = 0; i < device->queue_count; i++) {
|
|
|
|
|
if (device->queues[i].family != queue_family)
|
|
|
|
|
continue;
|
2017-10-05 19:29:27 -07:00
|
|
|
|
2018-08-14 02:34:16 -07:00
|
|
|
if (idx_in_family == pQueueInfo->queueIndex) {
|
|
|
|
|
queue = &device->queues[i];
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
idx_in_family++;
|
|
|
|
|
}
|
|
|
|
|
assert(queue != NULL);
|
|
|
|
|
|
|
|
|
|
if (queue && queue->flags == pQueueInfo->flags)
|
|
|
|
|
*pQueue = anv_queue_to_handle(queue);
|
2018-02-06 10:06:30 +01:00
|
|
|
else
|
|
|
|
|
*pQueue = NULL;
|
2017-10-05 19:29:27 -07:00
|
|
|
}
|
|
|
|
|
|
2019-08-28 13:22:30 +03:00
|
|
|
void
|
|
|
|
|
_anv_device_report_lost(struct anv_device *device)
|
|
|
|
|
{
|
|
|
|
|
assert(p_atomic_read(&device->_lost) > 0);
|
|
|
|
|
|
|
|
|
|
device->lost_reported = true;
|
|
|
|
|
|
2018-08-14 02:34:16 -07:00
|
|
|
for (uint32_t i = 0; i < device->queue_count; i++) {
|
|
|
|
|
struct anv_queue *queue = &device->queues[i];
|
|
|
|
|
if (queue->lost) {
|
2021-01-28 17:17:59 -06:00
|
|
|
__vk_errorf(device->physical->instance, &device->vk.base,
|
2018-08-14 02:34:16 -07:00
|
|
|
VK_ERROR_DEVICE_LOST,
|
|
|
|
|
queue->error_file, queue->error_line,
|
|
|
|
|
"%s", queue->error_msg);
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-08-28 13:22:30 +03:00
|
|
|
}
|
|
|
|
|
|
2018-10-26 08:32:39 -05:00
|
|
|
VkResult
|
|
|
|
|
_anv_device_set_lost(struct anv_device *device,
|
|
|
|
|
const char *file, int line,
|
|
|
|
|
const char *msg, ...)
|
2018-03-13 11:50:33 -07:00
|
|
|
{
|
2018-10-26 08:32:39 -05:00
|
|
|
VkResult err;
|
|
|
|
|
va_list ap;
|
2018-03-13 12:26:20 -07:00
|
|
|
|
2019-08-28 13:22:30 +03:00
|
|
|
if (p_atomic_read(&device->_lost) > 0)
|
|
|
|
|
return VK_ERROR_DEVICE_LOST;
|
|
|
|
|
|
2019-08-23 13:48:28 +02:00
|
|
|
p_atomic_inc(&device->_lost);
|
2019-08-28 13:22:30 +03:00
|
|
|
device->lost_reported = true;
|
2018-03-13 12:26:20 -07:00
|
|
|
|
2018-10-26 08:32:39 -05:00
|
|
|
va_start(ap, msg);
|
2021-01-28 17:17:59 -06:00
|
|
|
err = __vk_errorv(device->physical->instance, &device->vk.base,
|
2018-10-26 08:32:39 -05:00
|
|
|
VK_ERROR_DEVICE_LOST, file, line, msg, ap);
|
|
|
|
|
va_end(ap);
|
2018-03-13 12:26:20 -07:00
|
|
|
|
2018-10-26 08:32:39 -05:00
|
|
|
if (env_var_as_boolean("ANV_ABORT_ON_DEVICE_LOSS", false))
|
2018-03-13 12:26:20 -07:00
|
|
|
abort();
|
2018-10-26 08:32:39 -05:00
|
|
|
|
|
|
|
|
return err;
|
2018-03-13 11:50:33 -07:00
|
|
|
}
|
|
|
|
|
|
2019-08-23 13:48:28 +02:00
|
|
|
VkResult
|
|
|
|
|
_anv_queue_set_lost(struct anv_queue *queue,
|
2019-08-28 13:22:30 +03:00
|
|
|
const char *file, int line,
|
|
|
|
|
const char *msg, ...)
|
2019-08-23 13:48:28 +02:00
|
|
|
{
|
|
|
|
|
va_list ap;
|
|
|
|
|
|
2019-08-28 13:22:30 +03:00
|
|
|
if (queue->lost)
|
|
|
|
|
return VK_ERROR_DEVICE_LOST;
|
2019-08-23 13:48:28 +02:00
|
|
|
|
2019-08-28 13:22:30 +03:00
|
|
|
queue->lost = true;
|
|
|
|
|
|
|
|
|
|
queue->error_file = file;
|
|
|
|
|
queue->error_line = line;
|
2019-08-23 13:48:28 +02:00
|
|
|
va_start(ap, msg);
|
2019-08-28 13:22:30 +03:00
|
|
|
vsnprintf(queue->error_msg, sizeof(queue->error_msg),
|
|
|
|
|
msg, ap);
|
2019-08-23 13:48:28 +02:00
|
|
|
va_end(ap);
|
|
|
|
|
|
2019-08-28 13:22:30 +03:00
|
|
|
p_atomic_inc(&queue->device->_lost);
|
|
|
|
|
|
2019-08-23 13:48:28 +02:00
|
|
|
if (env_var_as_boolean("ANV_ABORT_ON_DEVICE_LOSS", false))
|
|
|
|
|
abort();
|
|
|
|
|
|
2019-08-28 13:22:30 +03:00
|
|
|
return VK_ERROR_DEVICE_LOST;
|
2019-08-23 13:48:28 +02:00
|
|
|
}
|
|
|
|
|
|
2017-03-27 16:03:57 -07:00
|
|
|
VkResult
|
|
|
|
|
anv_device_query_status(struct anv_device *device)
|
|
|
|
|
{
|
|
|
|
|
/* This isn't likely as most of the callers of this function already check
|
|
|
|
|
* for it. However, it doesn't hurt to check and it potentially lets us
|
|
|
|
|
* avoid an ioctl.
|
|
|
|
|
*/
|
2018-03-13 11:50:33 -07:00
|
|
|
if (anv_device_is_lost(device))
|
2017-03-27 16:03:57 -07:00
|
|
|
return VK_ERROR_DEVICE_LOST;
|
|
|
|
|
|
|
|
|
|
uint32_t active, pending;
|
2020-12-11 15:36:40 +02:00
|
|
|
int ret = anv_gem_context_get_reset_stats(device->fd, device->context_id,
|
|
|
|
|
&active, &pending);
|
2017-03-27 16:03:57 -07:00
|
|
|
if (ret == -1) {
|
|
|
|
|
/* We don't know the real error. */
|
2018-10-26 08:32:39 -05:00
|
|
|
return anv_device_set_lost(device, "get_reset_stats failed: %m");
|
2017-03-27 16:03:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (active) {
|
2018-10-26 08:32:39 -05:00
|
|
|
return anv_device_set_lost(device, "GPU hung on one of our command buffers");
|
2017-03-27 16:03:57 -07:00
|
|
|
} else if (pending) {
|
2018-10-26 08:32:39 -05:00
|
|
|
return anv_device_set_lost(device, "GPU hung with commands in-flight");
|
2017-03-27 16:03:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-05 09:55:15 -07:00
|
|
|
VkResult
|
|
|
|
|
anv_device_bo_busy(struct anv_device *device, struct anv_bo *bo)
|
|
|
|
|
{
|
|
|
|
|
/* Note: This only returns whether or not the BO is in use by an i915 GPU.
|
|
|
|
|
* Other usages of the BO (such as on different hardware) will not be
|
|
|
|
|
* flagged as "busy" by this ioctl. Use with care.
|
|
|
|
|
*/
|
|
|
|
|
int ret = anv_gem_busy(device, bo->gem_handle);
|
|
|
|
|
if (ret == 1) {
|
|
|
|
|
return VK_NOT_READY;
|
|
|
|
|
} else if (ret == -1) {
|
|
|
|
|
/* We don't know the real error. */
|
2018-10-26 08:32:39 -05:00
|
|
|
return anv_device_set_lost(device, "gem wait failed: %m");
|
2017-04-05 09:55:15 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Query for device status after the busy call. If the BO we're checking
|
|
|
|
|
* got caught in a GPU hang we don't want to return VK_SUCCESS to the
|
|
|
|
|
* client because it clearly doesn't have valid data. Yes, this most
|
|
|
|
|
* likely means an ioctl, but we just did an ioctl to query the busy status
|
|
|
|
|
* so it's no great loss.
|
|
|
|
|
*/
|
|
|
|
|
return anv_device_query_status(device);
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-27 16:03:57 -07:00
|
|
|
VkResult
|
|
|
|
|
anv_device_wait(struct anv_device *device, struct anv_bo *bo,
|
|
|
|
|
int64_t timeout)
|
|
|
|
|
{
|
|
|
|
|
int ret = anv_gem_wait(device, bo->gem_handle, &timeout);
|
|
|
|
|
if (ret == -1 && errno == ETIME) {
|
|
|
|
|
return VK_TIMEOUT;
|
|
|
|
|
} else if (ret == -1) {
|
|
|
|
|
/* We don't know the real error. */
|
2018-10-26 08:32:39 -05:00
|
|
|
return anv_device_set_lost(device, "gem wait failed: %m");
|
2017-03-27 16:03:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Query for device status after the wait. If the BO we're waiting on got
|
|
|
|
|
* caught in a GPU hang we don't want to return VK_SUCCESS to the client
|
|
|
|
|
* because it clearly doesn't have valid data. Yes, this most likely means
|
|
|
|
|
* an ioctl, but we just did an ioctl to wait so it's no great loss.
|
|
|
|
|
*/
|
|
|
|
|
return anv_device_query_status(device);
|
|
|
|
|
}
|
|
|
|
|
|
2015-05-17 16:33:48 -07:00
|
|
|
VkResult anv_DeviceWaitIdle(
|
2015-05-08 22:32:37 -07:00
|
|
|
VkDevice _device)
|
|
|
|
|
{
|
2015-07-09 18:20:10 -07:00
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
2019-08-23 20:14:34 +03:00
|
|
|
|
2018-03-13 11:50:33 -07:00
|
|
|
if (anv_device_is_lost(device))
|
2017-03-22 09:18:56 +01:00
|
|
|
return VK_ERROR_DEVICE_LOST;
|
|
|
|
|
|
2018-08-14 02:34:16 -07:00
|
|
|
for (uint32_t i = 0; i < device->queue_count; i++) {
|
|
|
|
|
VkResult res = anv_queue_submit_simple_batch(&device->queues[i], NULL);
|
|
|
|
|
if (res != VK_SUCCESS)
|
|
|
|
|
return res;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
2020-01-22 16:40:13 -06:00
|
|
|
uint64_t
|
|
|
|
|
anv_vma_alloc(struct anv_device *device,
|
|
|
|
|
uint64_t size, uint64_t align,
|
|
|
|
|
enum anv_bo_alloc_flags alloc_flags,
|
2019-12-02 16:03:56 -06:00
|
|
|
uint64_t client_address)
|
2018-03-07 09:18:37 -08:00
|
|
|
{
|
|
|
|
|
pthread_mutex_lock(&device->vma_mutex);
|
|
|
|
|
|
2020-01-22 16:40:13 -06:00
|
|
|
uint64_t addr = 0;
|
2018-03-07 09:18:37 -08:00
|
|
|
|
2020-01-22 16:40:13 -06:00
|
|
|
if (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) {
|
2019-12-02 16:03:56 -06:00
|
|
|
if (client_address) {
|
|
|
|
|
if (util_vma_heap_alloc_addr(&device->vma_cva,
|
2020-01-22 16:40:13 -06:00
|
|
|
client_address, size)) {
|
|
|
|
|
addr = client_address;
|
2019-12-02 16:03:56 -06:00
|
|
|
}
|
|
|
|
|
} else {
|
2020-01-22 16:40:13 -06:00
|
|
|
addr = util_vma_heap_alloc(&device->vma_cva, size, align);
|
2019-12-02 16:03:56 -06:00
|
|
|
}
|
|
|
|
|
/* We don't want to fall back to other heaps */
|
|
|
|
|
goto done;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(client_address == 0);
|
|
|
|
|
|
2020-01-22 16:40:13 -06:00
|
|
|
if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS))
|
|
|
|
|
addr = util_vma_heap_alloc(&device->vma_hi, size, align);
|
2018-03-07 09:18:37 -08:00
|
|
|
|
2020-01-22 16:40:13 -06:00
|
|
|
if (addr == 0)
|
|
|
|
|
addr = util_vma_heap_alloc(&device->vma_lo, size, align);
|
2018-03-07 09:18:37 -08:00
|
|
|
|
2019-12-02 16:03:56 -06:00
|
|
|
done:
|
2018-03-07 09:18:37 -08:00
|
|
|
pthread_mutex_unlock(&device->vma_mutex);
|
|
|
|
|
|
2021-03-03 13:49:18 -08:00
|
|
|
assert(addr == intel_48b_address(addr));
|
|
|
|
|
return intel_canonical_address(addr);
|
2018-03-07 09:18:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2020-01-22 16:40:13 -06:00
|
|
|
anv_vma_free(struct anv_device *device,
|
|
|
|
|
uint64_t address, uint64_t size)
|
2018-03-07 09:18:37 -08:00
|
|
|
{
|
2021-03-03 13:49:18 -08:00
|
|
|
const uint64_t addr_48b = intel_48b_address(address);
|
2018-03-07 09:18:37 -08:00
|
|
|
|
|
|
|
|
pthread_mutex_lock(&device->vma_mutex);
|
|
|
|
|
|
|
|
|
|
if (addr_48b >= LOW_HEAP_MIN_ADDRESS &&
|
|
|
|
|
addr_48b <= LOW_HEAP_MAX_ADDRESS) {
|
2020-01-22 16:40:13 -06:00
|
|
|
util_vma_heap_free(&device->vma_lo, addr_48b, size);
|
2019-12-02 16:03:56 -06:00
|
|
|
} else if (addr_48b >= CLIENT_VISIBLE_HEAP_MIN_ADDRESS &&
|
|
|
|
|
addr_48b <= CLIENT_VISIBLE_HEAP_MAX_ADDRESS) {
|
2020-01-22 16:40:13 -06:00
|
|
|
util_vma_heap_free(&device->vma_cva, addr_48b, size);
|
2018-03-07 09:18:37 -08:00
|
|
|
} else {
|
2019-12-02 14:51:30 -06:00
|
|
|
assert(addr_48b >= HIGH_HEAP_MIN_ADDRESS);
|
2020-01-22 16:40:13 -06:00
|
|
|
util_vma_heap_free(&device->vma_hi, addr_48b, size);
|
2018-03-07 09:18:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&device->vma_mutex);
|
|
|
|
|
}
|
|
|
|
|
|
2015-12-02 03:28:27 -08:00
|
|
|
VkResult anv_AllocateMemory(
|
2015-05-08 22:32:37 -07:00
|
|
|
VkDevice _device,
|
2015-12-02 03:28:27 -08:00
|
|
|
const VkMemoryAllocateInfo* pAllocateInfo,
|
|
|
|
|
const VkAllocationCallbacks* pAllocator,
|
2015-05-08 22:32:37 -07:00
|
|
|
VkDeviceMemory* pMem)
|
|
|
|
|
{
|
2015-07-09 18:20:10 -07:00
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
2020-01-17 22:23:30 -06:00
|
|
|
struct anv_physical_device *pdevice = device->physical;
|
2015-05-08 22:32:37 -07:00
|
|
|
struct anv_device_memory *mem;
|
2017-02-28 10:58:40 -08:00
|
|
|
VkResult result = VK_SUCCESS;
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2015-12-02 03:28:27 -08:00
|
|
|
assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2016-11-17 12:45:26 -08:00
|
|
|
/* The Vulkan 1.0.33 spec says "allocationSize must be greater than 0". */
|
|
|
|
|
assert(pAllocateInfo->allocationSize > 0);
|
2015-12-17 11:00:38 -08:00
|
|
|
|
2019-12-02 14:37:56 -06:00
|
|
|
VkDeviceSize aligned_alloc_size =
|
|
|
|
|
align_u64(pAllocateInfo->allocationSize, 4096);
|
2017-04-11 08:33:19 -07:00
|
|
|
|
2019-12-02 14:37:56 -06:00
|
|
|
if (aligned_alloc_size > MAX_MEMORY_ALLOCATION_SIZE)
|
|
|
|
|
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
|
|
|
|
|
|
|
|
|
assert(pAllocateInfo->memoryTypeIndex < pdevice->memory.type_count);
|
|
|
|
|
struct anv_memory_type *mem_type =
|
|
|
|
|
&pdevice->memory.types[pAllocateInfo->memoryTypeIndex];
|
|
|
|
|
assert(mem_type->heapIndex < pdevice->memory.heap_count);
|
|
|
|
|
struct anv_memory_heap *mem_heap =
|
|
|
|
|
&pdevice->memory.heaps[mem_type->heapIndex];
|
|
|
|
|
|
|
|
|
|
uint64_t mem_heap_used = p_atomic_read(&mem_heap->used);
|
|
|
|
|
if (mem_heap_used + aligned_alloc_size > mem_heap->size)
|
|
|
|
|
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
2015-07-09 19:59:44 -07:00
|
|
|
|
2020-04-21 12:42:59 -05:00
|
|
|
mem = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*mem), 8,
|
2015-12-02 03:28:27 -08:00
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
2015-05-08 22:32:37 -07:00
|
|
|
if (mem == NULL)
|
|
|
|
|
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
2017-05-17 11:13:01 -07:00
|
|
|
assert(pAllocateInfo->memoryTypeIndex < pdevice->memory.type_count);
|
2020-04-21 12:33:16 -05:00
|
|
|
vk_object_base_init(&device->vk, &mem->base, VK_OBJECT_TYPE_DEVICE_MEMORY);
|
2019-12-02 14:37:56 -06:00
|
|
|
mem->type = mem_type;
|
2016-11-07 17:25:07 -08:00
|
|
|
mem->map = NULL;
|
|
|
|
|
mem->map_size = 0;
|
2018-11-08 10:20:35 +02:00
|
|
|
mem->ahw = NULL;
|
2019-03-01 13:15:31 -08:00
|
|
|
mem->host_ptr = NULL;
|
2016-11-07 17:25:07 -08:00
|
|
|
|
2019-10-25 17:45:28 -05:00
|
|
|
enum anv_bo_alloc_flags alloc_flags = 0;
|
2018-05-30 15:34:25 -07:00
|
|
|
|
2019-06-26 18:02:19 -05:00
|
|
|
const VkExportMemoryAllocateInfo *export_info = NULL;
|
|
|
|
|
const VkImportAndroidHardwareBufferInfoANDROID *ahw_import_info = NULL;
|
|
|
|
|
const VkImportMemoryFdInfoKHR *fd_info = NULL;
|
|
|
|
|
const VkImportMemoryHostPointerInfoEXT *host_ptr_info = NULL;
|
|
|
|
|
const VkMemoryDedicatedAllocateInfo *dedicated_info = NULL;
|
2019-12-02 16:28:58 -06:00
|
|
|
VkMemoryAllocateFlags vk_flags = 0;
|
|
|
|
|
uint64_t client_address = 0;
|
2018-05-30 15:34:25 -07:00
|
|
|
|
2019-06-26 18:02:19 -05:00
|
|
|
vk_foreach_struct_const(ext, pAllocateInfo->pNext) {
|
|
|
|
|
switch (ext->sType) {
|
|
|
|
|
case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
|
|
|
|
|
export_info = (void *)ext;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID:
|
|
|
|
|
ahw_import_info = (void *)ext;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
|
|
|
|
|
fd_info = (void *)ext;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT:
|
|
|
|
|
host_ptr_info = (void *)ext;
|
|
|
|
|
break;
|
|
|
|
|
|
2019-12-02 16:28:58 -06:00
|
|
|
case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO: {
|
|
|
|
|
const VkMemoryAllocateFlagsInfo *flags_info = (void *)ext;
|
|
|
|
|
vk_flags = flags_info->flags;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-26 18:02:19 -05:00
|
|
|
case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
|
|
|
|
|
dedicated_info = (void *)ext;
|
|
|
|
|
break;
|
|
|
|
|
|
2019-12-02 16:28:58 -06:00
|
|
|
case VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO_KHR: {
|
|
|
|
|
const VkMemoryOpaqueCaptureAddressAllocateInfoKHR *addr_info =
|
|
|
|
|
(const VkMemoryOpaqueCaptureAddressAllocateInfoKHR *)ext;
|
|
|
|
|
client_address = addr_info->opaqueCaptureAddress;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-26 18:02:19 -05:00
|
|
|
default:
|
|
|
|
|
anv_debug_ignored_stype(ext->sType);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-11-08 10:20:35 +02:00
|
|
|
|
2020-01-22 15:29:51 -06:00
|
|
|
/* By default, we want all VkDeviceMemory objects to support CCS */
|
|
|
|
|
if (device->physical->has_implicit_ccs)
|
|
|
|
|
alloc_flags |= ANV_BO_ALLOC_IMPLICIT_CCS;
|
|
|
|
|
|
2019-12-02 16:28:58 -06:00
|
|
|
if (vk_flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR)
|
|
|
|
|
alloc_flags |= ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS;
|
|
|
|
|
|
2020-01-22 15:29:51 -06:00
|
|
|
if ((export_info && export_info->handleTypes) ||
|
|
|
|
|
(fd_info && fd_info->handleType) ||
|
|
|
|
|
(host_ptr_info && host_ptr_info->handleType)) {
|
|
|
|
|
/* Anything imported or exported is EXTERNAL */
|
|
|
|
|
alloc_flags |= ANV_BO_ALLOC_EXTERNAL;
|
|
|
|
|
|
|
|
|
|
/* We can't have implicit CCS on external memory with an AUX-table.
|
|
|
|
|
* Doing so would require us to sync the aux tables across processes
|
|
|
|
|
* which is impractical.
|
|
|
|
|
*/
|
|
|
|
|
if (device->info.has_aux_map)
|
|
|
|
|
alloc_flags &= ~ANV_BO_ALLOC_IMPLICIT_CCS;
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-08 10:20:35 +02:00
|
|
|
/* Check if we need to support Android HW buffer export. If so,
|
|
|
|
|
* create AHardwareBuffer and import memory from it.
|
|
|
|
|
*/
|
|
|
|
|
bool android_export = false;
|
|
|
|
|
if (export_info && export_info->handleTypes &
|
|
|
|
|
VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)
|
|
|
|
|
android_export = true;
|
|
|
|
|
|
|
|
|
|
if (ahw_import_info) {
|
|
|
|
|
result = anv_import_ahw_memory(_device, mem, ahw_import_info);
|
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
|
|
goto success;
|
|
|
|
|
} else if (android_export) {
|
|
|
|
|
result = anv_create_ahw_memory(_device, mem, pAllocateInfo);
|
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
goto fail;
|
|
|
|
|
|
2020-01-03 11:20:22 -06:00
|
|
|
const VkImportAndroidHardwareBufferInfoANDROID import_info = {
|
2018-11-08 10:20:35 +02:00
|
|
|
.buffer = mem->ahw,
|
|
|
|
|
};
|
|
|
|
|
result = anv_import_ahw_memory(_device, mem, &import_info);
|
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
|
|
goto success;
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-13 12:18:15 -07:00
|
|
|
/* The Vulkan spec permits handleType to be 0, in which case the struct is
|
|
|
|
|
* ignored.
|
|
|
|
|
*/
|
|
|
|
|
if (fd_info && fd_info->handleType) {
|
2017-11-27 18:33:44 -08:00
|
|
|
/* At the moment, we support only the below handle types. */
|
2017-07-13 12:18:15 -07:00
|
|
|
assert(fd_info->handleType ==
|
2017-09-20 13:16:26 -07:00
|
|
|
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
|
2017-11-27 18:33:44 -08:00
|
|
|
fd_info->handleType ==
|
|
|
|
|
VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
|
2017-07-13 12:18:15 -07:00
|
|
|
|
2019-10-25 17:45:28 -05:00
|
|
|
result = anv_device_import_bo(device, fd_info->fd, alloc_flags,
|
2019-12-02 16:28:58 -06:00
|
|
|
client_address, &mem->bo);
|
2017-07-13 12:18:15 -07:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
goto fail;
|
2017-09-11 16:41:21 -07:00
|
|
|
|
anv: Move size check from anv_bo_cache_import() to caller (v2)
This change prepares for VK_ANDROID_native_buffer. When the user imports
a gralloc hande into a VkImage using VK_ANDROID_native_buffer, the user
provides no size. The driver must infer the size from the internals of
the gralloc buffer.
The patch is essentially a refactor patch, but it does change behavior
in some edge cases, described below. In what follows, the "nominal size"
of the bo refers to anv_bo::size, which may not match the bo's "actual
size" according to the kernel.
Post-patch, the nominal size of the bo returned from
anv_bo_cache_import() is always the size of imported dma-buf according
to lseek(). Pre-patch, the bo's nominal size was difficult to predict.
If the imported dma-buf's gem handle was not resident in the cache, then
the bo's nominal size was align(VkMemoryAllocateInfo::allocationSize,
4096). If it *was* resident, then the bo's nominal size was whatever
the cache returned. As a consequence, the first cache insert decided the
bo's nominal size, which could be significantly smaller compared to the
dma-buf's actual size, as the nominal size was determined by
VkMemoryAllocationInfo::allocationSize and not lseek().
I believe this patch cleans up that messy behavior. For an imported or
exported VkDeviceMemory, anv_bo::size should now be the true size of the
bo, if I correctly understand the problem (which I possibly don't).
v2:
- Preserve behavior of aligning size to 4096 before checking. [for
jekstrand]
- Check size with < instead of <=, to match behavior of commit c0a4f56
"anv: bo_cache: allow importing a BO larger than needed". [for
chadv]
2017-09-12 14:05:08 -07:00
|
|
|
/* For security purposes, we reject importing the bo if it's smaller
|
|
|
|
|
* than the requested allocation size. This prevents a malicious client
|
|
|
|
|
* from passing a buffer to a trusted client, lying about the size, and
|
|
|
|
|
* telling the trusted client to try and texture from an image that goes
|
|
|
|
|
* out-of-bounds. This sort of thing could lead to GPU hangs or worse
|
|
|
|
|
* in the trusted client. The trusted client can protect itself against
|
|
|
|
|
* this sort of attack but only if it can trust the buffer size.
|
|
|
|
|
*/
|
|
|
|
|
if (mem->bo->size < aligned_alloc_size) {
|
2021-01-28 17:17:59 -06:00
|
|
|
result = vk_errorf(device, &device->vk.base,
|
|
|
|
|
VK_ERROR_INVALID_EXTERNAL_HANDLE,
|
anv: Move size check from anv_bo_cache_import() to caller (v2)
This change prepares for VK_ANDROID_native_buffer. When the user imports
a gralloc hande into a VkImage using VK_ANDROID_native_buffer, the user
provides no size. The driver must infer the size from the internals of
the gralloc buffer.
The patch is essentially a refactor patch, but it does change behavior
in some edge cases, described below. In what follows, the "nominal size"
of the bo refers to anv_bo::size, which may not match the bo's "actual
size" according to the kernel.
Post-patch, the nominal size of the bo returned from
anv_bo_cache_import() is always the size of imported dma-buf according
to lseek(). Pre-patch, the bo's nominal size was difficult to predict.
If the imported dma-buf's gem handle was not resident in the cache, then
the bo's nominal size was align(VkMemoryAllocateInfo::allocationSize,
4096). If it *was* resident, then the bo's nominal size was whatever
the cache returned. As a consequence, the first cache insert decided the
bo's nominal size, which could be significantly smaller compared to the
dma-buf's actual size, as the nominal size was determined by
VkMemoryAllocationInfo::allocationSize and not lseek().
I believe this patch cleans up that messy behavior. For an imported or
exported VkDeviceMemory, anv_bo::size should now be the true size of the
bo, if I correctly understand the problem (which I possibly don't).
v2:
- Preserve behavior of aligning size to 4096 before checking. [for
jekstrand]
- Check size with < instead of <=, to match behavior of commit c0a4f56
"anv: bo_cache: allow importing a BO larger than needed". [for
chadv]
2017-09-12 14:05:08 -07:00
|
|
|
"aligned allocationSize too large for "
|
2019-01-08 18:04:54 +00:00
|
|
|
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT: "
|
anv: Move size check from anv_bo_cache_import() to caller (v2)
This change prepares for VK_ANDROID_native_buffer. When the user imports
a gralloc hande into a VkImage using VK_ANDROID_native_buffer, the user
provides no size. The driver must infer the size from the internals of
the gralloc buffer.
The patch is essentially a refactor patch, but it does change behavior
in some edge cases, described below. In what follows, the "nominal size"
of the bo refers to anv_bo::size, which may not match the bo's "actual
size" according to the kernel.
Post-patch, the nominal size of the bo returned from
anv_bo_cache_import() is always the size of imported dma-buf according
to lseek(). Pre-patch, the bo's nominal size was difficult to predict.
If the imported dma-buf's gem handle was not resident in the cache, then
the bo's nominal size was align(VkMemoryAllocateInfo::allocationSize,
4096). If it *was* resident, then the bo's nominal size was whatever
the cache returned. As a consequence, the first cache insert decided the
bo's nominal size, which could be significantly smaller compared to the
dma-buf's actual size, as the nominal size was determined by
VkMemoryAllocationInfo::allocationSize and not lseek().
I believe this patch cleans up that messy behavior. For an imported or
exported VkDeviceMemory, anv_bo::size should now be the true size of the
bo, if I correctly understand the problem (which I possibly don't).
v2:
- Preserve behavior of aligning size to 4096 before checking. [for
jekstrand]
- Check size with < instead of <=, to match behavior of commit c0a4f56
"anv: bo_cache: allow importing a BO larger than needed". [for
chadv]
2017-09-12 14:05:08 -07:00
|
|
|
"%"PRIu64"B > %"PRIu64"B",
|
|
|
|
|
aligned_alloc_size, mem->bo->size);
|
2019-10-25 17:45:28 -05:00
|
|
|
anv_device_release_bo(device, mem->bo);
|
anv: Move size check from anv_bo_cache_import() to caller (v2)
This change prepares for VK_ANDROID_native_buffer. When the user imports
a gralloc hande into a VkImage using VK_ANDROID_native_buffer, the user
provides no size. The driver must infer the size from the internals of
the gralloc buffer.
The patch is essentially a refactor patch, but it does change behavior
in some edge cases, described below. In what follows, the "nominal size"
of the bo refers to anv_bo::size, which may not match the bo's "actual
size" according to the kernel.
Post-patch, the nominal size of the bo returned from
anv_bo_cache_import() is always the size of imported dma-buf according
to lseek(). Pre-patch, the bo's nominal size was difficult to predict.
If the imported dma-buf's gem handle was not resident in the cache, then
the bo's nominal size was align(VkMemoryAllocateInfo::allocationSize,
4096). If it *was* resident, then the bo's nominal size was whatever
the cache returned. As a consequence, the first cache insert decided the
bo's nominal size, which could be significantly smaller compared to the
dma-buf's actual size, as the nominal size was determined by
VkMemoryAllocationInfo::allocationSize and not lseek().
I believe this patch cleans up that messy behavior. For an imported or
exported VkDeviceMemory, anv_bo::size should now be the true size of the
bo, if I correctly understand the problem (which I possibly don't).
v2:
- Preserve behavior of aligning size to 4096 before checking. [for
jekstrand]
- Check size with < instead of <=, to match behavior of commit c0a4f56
"anv: bo_cache: allow importing a BO larger than needed". [for
chadv]
2017-09-12 14:05:08 -07:00
|
|
|
goto fail;
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-11 16:41:21 -07:00
|
|
|
/* From the Vulkan spec:
|
|
|
|
|
*
|
|
|
|
|
* "Importing memory from a file descriptor transfers ownership of
|
|
|
|
|
* the file descriptor from the application to the Vulkan
|
|
|
|
|
* implementation. The application must not perform any operations on
|
|
|
|
|
* the file descriptor after a successful import."
|
|
|
|
|
*
|
|
|
|
|
* If the import fails, we leave the file descriptor open.
|
|
|
|
|
*/
|
|
|
|
|
close(fd_info->fd);
|
2018-10-09 09:53:55 +03:00
|
|
|
goto success;
|
|
|
|
|
}
|
2017-11-28 08:49:29 -08:00
|
|
|
|
2019-03-01 13:15:31 -08:00
|
|
|
if (host_ptr_info && host_ptr_info->handleType) {
|
|
|
|
|
if (host_ptr_info->handleType ==
|
|
|
|
|
VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT) {
|
|
|
|
|
result = vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
|
|
|
|
|
goto fail;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(host_ptr_info->handleType ==
|
|
|
|
|
VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT);
|
|
|
|
|
|
2019-10-25 17:45:28 -05:00
|
|
|
result = anv_device_import_bo_from_host_ptr(device,
|
|
|
|
|
host_ptr_info->pHostPointer,
|
|
|
|
|
pAllocateInfo->allocationSize,
|
|
|
|
|
alloc_flags,
|
2019-12-02 16:28:58 -06:00
|
|
|
client_address,
|
2019-10-25 17:45:28 -05:00
|
|
|
&mem->bo);
|
2019-03-01 13:15:31 -08:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
|
|
mem->host_ptr = host_ptr_info->pHostPointer;
|
|
|
|
|
goto success;
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-09 09:53:55 +03:00
|
|
|
/* Regular allocate (not importing memory). */
|
2017-11-28 08:49:29 -08:00
|
|
|
|
2020-06-17 15:37:33 +03:00
|
|
|
result = anv_device_alloc_bo(device, "user", pAllocateInfo->allocationSize,
|
2019-12-02 16:28:58 -06:00
|
|
|
alloc_flags, client_address, &mem->bo);
|
2018-10-09 09:53:55 +03:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
|
|
if (dedicated_info && dedicated_info->image != VK_NULL_HANDLE) {
|
|
|
|
|
ANV_FROM_HANDLE(anv_image, image, dedicated_info->image);
|
|
|
|
|
|
|
|
|
|
/* Some legacy (non-modifiers) consumers need the tiling to be set on
|
|
|
|
|
* the BO. In this case, we have a dedicated allocation.
|
|
|
|
|
*/
|
|
|
|
|
if (image->needs_set_tiling) {
|
|
|
|
|
const uint32_t i915_tiling =
|
2021-02-05 12:07:07 -08:00
|
|
|
isl_tiling_to_i915_tiling(image->planes[0].primary_surface.isl.tiling);
|
2018-10-09 09:53:55 +03:00
|
|
|
int ret = anv_gem_set_tiling(device, mem->bo->gem_handle,
|
2021-02-05 12:07:07 -08:00
|
|
|
image->planes[0].primary_surface.isl.row_pitch_B,
|
2018-10-09 09:53:55 +03:00
|
|
|
i915_tiling);
|
|
|
|
|
if (ret) {
|
2019-10-25 17:45:28 -05:00
|
|
|
anv_device_release_bo(device, mem->bo);
|
2021-01-28 17:17:59 -06:00
|
|
|
result = vk_errorf(device, &device->vk.base,
|
|
|
|
|
VK_ERROR_OUT_OF_DEVICE_MEMORY,
|
2019-12-02 13:51:59 -06:00
|
|
|
"failed to set BO tiling: %m");
|
|
|
|
|
goto fail;
|
2017-11-28 08:49:29 -08:00
|
|
|
}
|
|
|
|
|
}
|
2017-07-13 12:18:15 -07:00
|
|
|
}
|
2017-02-28 10:58:40 -08:00
|
|
|
|
2018-10-09 09:53:55 +03:00
|
|
|
success:
|
2019-12-02 14:37:56 -06:00
|
|
|
mem_heap_used = p_atomic_add_return(&mem_heap->used, mem->bo->size);
|
|
|
|
|
if (mem_heap_used > mem_heap->size) {
|
|
|
|
|
p_atomic_add(&mem_heap->used, -mem->bo->size);
|
|
|
|
|
anv_device_release_bo(device, mem->bo);
|
2021-01-28 17:17:59 -06:00
|
|
|
result = vk_errorf(device, &device->vk.base,
|
|
|
|
|
VK_ERROR_OUT_OF_DEVICE_MEMORY,
|
2019-12-02 14:37:56 -06:00
|
|
|
"Out of heap memory");
|
|
|
|
|
goto fail;
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-26 18:05:34 -06:00
|
|
|
pthread_mutex_lock(&device->mutex);
|
|
|
|
|
list_addtail(&mem->link, &device->memory_objects);
|
|
|
|
|
pthread_mutex_unlock(&device->mutex);
|
|
|
|
|
|
2015-07-09 18:41:27 -07:00
|
|
|
*pMem = anv_device_memory_to_handle(mem);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2015-07-09 18:20:10 -07:00
|
|
|
return VK_SUCCESS;
|
2015-05-08 22:32:37 -07:00
|
|
|
|
|
|
|
|
fail:
|
2020-04-21 12:42:59 -05:00
|
|
|
vk_free2(&device->vk.alloc, pAllocator, mem);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-13 12:18:15 -07:00
|
|
|
VkResult anv_GetMemoryFdKHR(
|
|
|
|
|
VkDevice device_h,
|
|
|
|
|
const VkMemoryGetFdInfoKHR* pGetFdInfo,
|
|
|
|
|
int* pFd)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_device, dev, device_h);
|
|
|
|
|
ANV_FROM_HANDLE(anv_device_memory, mem, pGetFdInfo->memory);
|
|
|
|
|
|
|
|
|
|
assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
|
|
|
|
|
|
2017-09-20 13:16:26 -07:00
|
|
|
assert(pGetFdInfo->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
|
2017-11-27 18:33:44 -08:00
|
|
|
pGetFdInfo->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
|
2017-07-13 12:18:15 -07:00
|
|
|
|
2019-10-25 17:45:28 -05:00
|
|
|
return anv_device_export_bo(dev, mem->bo, pFd);
|
2017-07-13 12:18:15 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkResult anv_GetMemoryFdPropertiesKHR(
|
2017-11-27 18:33:44 -08:00
|
|
|
VkDevice _device,
|
2019-01-08 18:04:54 +00:00
|
|
|
VkExternalMemoryHandleTypeFlagBits handleType,
|
2017-07-13 12:18:15 -07:00
|
|
|
int fd,
|
|
|
|
|
VkMemoryFdPropertiesKHR* pMemoryFdProperties)
|
|
|
|
|
{
|
2017-11-27 18:33:44 -08:00
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
|
|
|
|
|
|
|
|
|
switch (handleType) {
|
2017-12-05 21:19:51 +01:00
|
|
|
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
|
2017-11-27 18:33:44 -08:00
|
|
|
/* dma-buf can be imported as any memory type */
|
|
|
|
|
pMemoryFdProperties->memoryTypeBits =
|
2020-01-17 22:23:30 -06:00
|
|
|
(1 << device->physical->memory.type_count) - 1;
|
2017-11-27 18:33:44 -08:00
|
|
|
return VK_SUCCESS;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
/* The valid usage section for this function says:
|
|
|
|
|
*
|
|
|
|
|
* "handleType must not be one of the handle types defined as
|
|
|
|
|
* opaque."
|
|
|
|
|
*
|
|
|
|
|
* So opaque handle types fall into the default "unsupported" case.
|
|
|
|
|
*/
|
2017-09-20 13:16:26 -07:00
|
|
|
return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
|
2017-11-27 18:33:44 -08:00
|
|
|
}
|
2017-07-13 12:18:15 -07:00
|
|
|
}
|
|
|
|
|
|
2019-03-01 13:15:31 -08:00
|
|
|
VkResult anv_GetMemoryHostPointerPropertiesEXT(
|
|
|
|
|
VkDevice _device,
|
|
|
|
|
VkExternalMemoryHandleTypeFlagBits handleType,
|
|
|
|
|
const void* pHostPointer,
|
|
|
|
|
VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
|
|
|
|
|
|
|
|
|
assert(pMemoryHostPointerProperties->sType ==
|
|
|
|
|
VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT);
|
|
|
|
|
|
|
|
|
|
switch (handleType) {
|
2020-01-17 22:23:30 -06:00
|
|
|
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT:
|
2019-03-01 13:15:31 -08:00
|
|
|
/* Host memory can be imported as any memory type. */
|
|
|
|
|
pMemoryHostPointerProperties->memoryTypeBits =
|
2020-01-17 22:23:30 -06:00
|
|
|
(1ull << device->physical->memory.type_count) - 1;
|
2019-03-01 13:15:31 -08:00
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
2020-01-17 22:23:30 -06:00
|
|
|
|
2019-03-01 13:15:31 -08:00
|
|
|
default:
|
|
|
|
|
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-05 20:50:51 -07:00
|
|
|
void anv_FreeMemory(
|
2015-05-08 22:32:37 -07:00
|
|
|
VkDevice _device,
|
2015-12-02 03:28:27 -08:00
|
|
|
VkDeviceMemory _mem,
|
|
|
|
|
const VkAllocationCallbacks* pAllocator)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2015-07-09 18:20:10 -07:00
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
|
|
|
|
ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2015-12-17 11:00:38 -08:00
|
|
|
if (mem == NULL)
|
|
|
|
|
return;
|
|
|
|
|
|
2019-02-26 18:05:34 -06:00
|
|
|
pthread_mutex_lock(&device->mutex);
|
|
|
|
|
list_del(&mem->link);
|
|
|
|
|
pthread_mutex_unlock(&device->mutex);
|
|
|
|
|
|
2016-11-07 17:25:07 -08:00
|
|
|
if (mem->map)
|
|
|
|
|
anv_UnmapMemory(_device, _mem);
|
|
|
|
|
|
2020-01-17 22:23:30 -06:00
|
|
|
p_atomic_add(&device->physical->memory.heaps[mem->type->heapIndex].used,
|
2019-05-08 11:39:09 +01:00
|
|
|
-mem->bo->size);
|
|
|
|
|
|
2019-10-25 17:45:28 -05:00
|
|
|
anv_device_release_bo(device, mem->bo);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2019-03-18 19:06:05 -07:00
|
|
|
#if defined(ANDROID) && ANDROID_API_LEVEL >= 26
|
2018-11-08 10:20:35 +02:00
|
|
|
if (mem->ahw)
|
|
|
|
|
AHardwareBuffer_release(mem->ahw);
|
|
|
|
|
#endif
|
|
|
|
|
|
2020-04-21 12:33:16 -05:00
|
|
|
vk_object_base_finish(&mem->base);
|
2020-04-21 12:42:59 -05:00
|
|
|
vk_free2(&device->vk.alloc, pAllocator, mem);
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
2015-05-17 16:33:48 -07:00
|
|
|
VkResult anv_MapMemory(
|
2015-05-08 22:32:37 -07:00
|
|
|
VkDevice _device,
|
2015-11-30 21:18:12 -08:00
|
|
|
VkDeviceMemory _memory,
|
2015-05-08 22:32:37 -07:00
|
|
|
VkDeviceSize offset,
|
|
|
|
|
VkDeviceSize size,
|
|
|
|
|
VkMemoryMapFlags flags,
|
|
|
|
|
void** ppData)
|
|
|
|
|
{
|
2015-07-09 18:20:10 -07:00
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
2015-11-30 21:18:12 -08:00
|
|
|
ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2015-12-17 11:00:38 -08:00
|
|
|
if (mem == NULL) {
|
|
|
|
|
*ppData = NULL;
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-01 13:15:31 -08:00
|
|
|
if (mem->host_ptr) {
|
|
|
|
|
*ppData = mem->host_ptr + offset;
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-19 15:01:10 -08:00
|
|
|
if (size == VK_WHOLE_SIZE)
|
2017-03-13 17:20:15 -07:00
|
|
|
size = mem->bo->size - offset;
|
2016-01-19 15:01:10 -08:00
|
|
|
|
2016-11-07 17:23:44 -08:00
|
|
|
/* From the Vulkan spec version 1.0.32 docs for MapMemory:
|
|
|
|
|
*
|
|
|
|
|
* * If size is not equal to VK_WHOLE_SIZE, size must be greater than 0
|
|
|
|
|
* assert(size != 0);
|
|
|
|
|
* * If size is not equal to VK_WHOLE_SIZE, size must be less than or
|
|
|
|
|
* equal to the size of the memory minus offset
|
|
|
|
|
*/
|
|
|
|
|
assert(size > 0);
|
2017-03-13 17:20:15 -07:00
|
|
|
assert(offset + size <= mem->bo->size);
|
2016-11-07 17:23:44 -08:00
|
|
|
|
2015-05-08 22:32:37 -07:00
|
|
|
/* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
|
|
|
|
|
* takes a VkDeviceMemory pointer, it seems like only one map of the memory
|
|
|
|
|
* at a time is valid. We could just mmap up front and return an offset
|
|
|
|
|
* pointer here, but that may exhaust virtual memory on 32 bit
|
|
|
|
|
* userspace. */
|
|
|
|
|
|
2015-12-03 23:58:05 -08:00
|
|
|
uint32_t gem_flags = 0;
|
2017-05-17 11:13:01 -07:00
|
|
|
|
|
|
|
|
if (!device->info.has_llc &&
|
|
|
|
|
(mem->type->propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))
|
2015-12-03 23:58:05 -08:00
|
|
|
gem_flags |= I915_MMAP_WC;
|
|
|
|
|
|
2016-01-01 09:26:06 -08:00
|
|
|
/* GEM will fail to map if the offset isn't 4k-aligned. Round down. */
|
2019-01-18 15:05:55 -08:00
|
|
|
uint64_t map_offset;
|
|
|
|
|
if (!device->physical->has_mmap_offset)
|
|
|
|
|
map_offset = offset & ~4095ull;
|
|
|
|
|
else
|
|
|
|
|
map_offset = 0;
|
2016-01-01 09:26:06 -08:00
|
|
|
assert(offset >= map_offset);
|
|
|
|
|
uint64_t map_size = (offset + size) - map_offset;
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2016-01-01 09:26:06 -08:00
|
|
|
/* Let's map whole pages */
|
2016-01-02 07:52:22 -08:00
|
|
|
map_size = align_u64(map_size, 4096);
|
2016-01-01 09:26:06 -08:00
|
|
|
|
2017-03-13 17:20:15 -07:00
|
|
|
void *map = anv_gem_mmap(device, mem->bo->gem_handle,
|
2016-11-07 17:24:24 -08:00
|
|
|
map_offset, map_size, gem_flags);
|
|
|
|
|
if (map == MAP_FAILED)
|
|
|
|
|
return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
|
|
|
|
|
|
|
|
|
|
mem->map = map;
|
2016-01-01 09:26:06 -08:00
|
|
|
mem->map_size = map_size;
|
|
|
|
|
|
|
|
|
|
*ppData = mem->map + (offset - map_offset);
|
2015-11-13 10:12:18 -08:00
|
|
|
|
2015-05-08 22:32:37 -07:00
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-05 20:50:51 -07:00
|
|
|
void anv_UnmapMemory(
|
2015-05-08 22:32:37 -07:00
|
|
|
VkDevice _device,
|
2015-11-30 21:18:12 -08:00
|
|
|
VkDeviceMemory _memory)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2019-02-20 15:26:43 -08:00
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
2015-11-30 21:18:12 -08:00
|
|
|
ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2019-03-01 13:15:31 -08:00
|
|
|
if (mem == NULL || mem->host_ptr)
|
2015-12-17 11:00:38 -08:00
|
|
|
return;
|
|
|
|
|
|
2019-02-20 15:26:43 -08:00
|
|
|
anv_gem_munmap(device, mem->map, mem->map_size);
|
2016-11-07 17:25:07 -08:00
|
|
|
|
|
|
|
|
mem->map = NULL;
|
|
|
|
|
mem->map_size = 0;
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
2015-12-01 15:25:07 -08:00
|
|
|
static void
|
|
|
|
|
clflush_mapped_ranges(struct anv_device *device,
|
|
|
|
|
uint32_t count,
|
|
|
|
|
const VkMappedMemoryRange *ranges)
|
|
|
|
|
{
|
|
|
|
|
for (uint32_t i = 0; i < count; i++) {
|
|
|
|
|
ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory);
|
2017-02-20 10:37:34 -08:00
|
|
|
if (ranges[i].offset >= mem->map_size)
|
|
|
|
|
continue;
|
2016-01-29 12:07:34 -08:00
|
|
|
|
2017-07-01 01:59:40 -07:00
|
|
|
gen_clflush_range(mem->map + ranges[i].offset,
|
2017-02-20 10:37:34 -08:00
|
|
|
MIN2(ranges[i].size, mem->map_size - ranges[i].offset));
|
2015-12-01 15:25:07 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-07 17:22:29 -07:00
|
|
|
VkResult anv_FlushMappedMemoryRanges(
|
2015-12-01 15:25:07 -08:00
|
|
|
VkDevice _device,
|
2015-11-30 21:18:12 -08:00
|
|
|
uint32_t memoryRangeCount,
|
|
|
|
|
const VkMappedMemoryRange* pMemoryRanges)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2015-12-01 15:25:07 -08:00
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
|
|
|
|
|
|
|
|
|
if (device->info.has_llc)
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
|
|
|
|
|
/* Make sure the writes we're flushing have landed. */
|
2016-01-29 12:10:12 -08:00
|
|
|
__builtin_ia32_mfence();
|
2015-12-01 15:25:07 -08:00
|
|
|
|
|
|
|
|
clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-07 17:22:29 -07:00
|
|
|
VkResult anv_InvalidateMappedMemoryRanges(
|
2015-12-01 15:25:07 -08:00
|
|
|
VkDevice _device,
|
2015-11-30 21:18:12 -08:00
|
|
|
uint32_t memoryRangeCount,
|
|
|
|
|
const VkMappedMemoryRange* pMemoryRanges)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2015-12-01 15:25:07 -08:00
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
|
|
|
|
|
|
|
|
|
if (device->info.has_llc)
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
|
|
|
|
|
clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
|
|
|
|
|
|
|
|
|
|
/* Make sure no reads get moved up above the invalidate. */
|
2016-01-29 12:10:12 -08:00
|
|
|
__builtin_ia32_mfence();
|
2015-12-01 15:25:07 -08:00
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
2021-01-28 14:33:19 -06:00
|
|
|
void anv_GetBufferMemoryRequirements2(
|
|
|
|
|
VkDevice _device,
|
|
|
|
|
const VkBufferMemoryRequirementsInfo2* pInfo,
|
|
|
|
|
VkMemoryRequirements2* pMemoryRequirements)
|
|
|
|
|
{
|
2017-02-14 12:23:59 -05:00
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
2021-01-28 14:33:19 -06:00
|
|
|
ANV_FROM_HANDLE(anv_buffer, buffer, pInfo->buffer);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2015-07-09 19:59:44 -07:00
|
|
|
/* The Vulkan spec (git aaed022) says:
|
|
|
|
|
*
|
|
|
|
|
* memoryTypeBits is a bitfield and contains one bit set for every
|
|
|
|
|
* supported memory type for the resource. The bit `1<<i` is set if and
|
|
|
|
|
* only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
|
|
|
|
|
* structure for the physical device is supported.
|
|
|
|
|
*/
|
2020-01-17 22:23:30 -06:00
|
|
|
uint32_t memory_types = (1ull << device->physical->memory.type_count) - 1;
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2017-12-01 16:07:23 -08:00
|
|
|
/* Base alignment requirement of a cache line */
|
|
|
|
|
uint32_t alignment = 16;
|
|
|
|
|
|
|
|
|
|
if (buffer->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)
|
2020-05-07 12:25:50 +03:00
|
|
|
alignment = MAX2(alignment, ANV_UBO_ALIGNMENT);
|
2017-12-01 16:07:23 -08:00
|
|
|
|
2021-01-28 14:33:19 -06:00
|
|
|
pMemoryRequirements->memoryRequirements.size = buffer->size;
|
|
|
|
|
pMemoryRequirements->memoryRequirements.alignment = alignment;
|
2018-01-30 09:59:34 +01:00
|
|
|
|
|
|
|
|
/* Storage and Uniform buffers should have their size aligned to
|
|
|
|
|
* 32-bits to avoid boundary checks when last DWord is not complete.
|
|
|
|
|
* This would ensure that not internal padding would be needed for
|
|
|
|
|
* 16-bit types.
|
|
|
|
|
*/
|
|
|
|
|
if (device->robust_buffer_access &&
|
|
|
|
|
(buffer->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT ||
|
|
|
|
|
buffer->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT))
|
2021-01-28 14:33:19 -06:00
|
|
|
pMemoryRequirements->memoryRequirements.size = align_u64(buffer->size, 4);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2021-01-28 14:33:19 -06:00
|
|
|
pMemoryRequirements->memoryRequirements.memoryTypeBits = memory_types;
|
2017-04-28 05:13:08 -07:00
|
|
|
|
|
|
|
|
vk_foreach_struct(ext, pMemoryRequirements->pNext) {
|
|
|
|
|
switch (ext->sType) {
|
2017-09-20 13:16:26 -07:00
|
|
|
case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
|
|
|
|
|
VkMemoryDedicatedRequirements *requirements = (void *)ext;
|
2019-03-12 15:22:19 -05:00
|
|
|
requirements->prefersDedicatedAllocation = false;
|
|
|
|
|
requirements->requiresDedicatedAllocation = false;
|
2017-04-28 05:17:38 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-28 05:13:08 -07:00
|
|
|
default:
|
|
|
|
|
anv_debug_ignored_stype(ext->sType);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-30 12:21:19 -08:00
|
|
|
void anv_GetDeviceMemoryCommitment(
|
2015-07-14 17:06:11 -07:00
|
|
|
VkDevice device,
|
|
|
|
|
VkDeviceMemory memory,
|
|
|
|
|
VkDeviceSize* pCommittedMemoryInBytes)
|
|
|
|
|
{
|
|
|
|
|
*pCommittedMemoryInBytes = 0;
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-18 09:02:53 -07:00
|
|
|
static void
|
2017-09-20 13:16:26 -07:00
|
|
|
anv_bind_buffer_memory(const VkBindBufferMemoryInfo *pBindInfo)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2017-07-18 09:02:53 -07:00
|
|
|
ANV_FROM_HANDLE(anv_device_memory, mem, pBindInfo->memory);
|
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, buffer, pBindInfo->buffer);
|
|
|
|
|
|
2017-09-20 13:16:26 -07:00
|
|
|
assert(pBindInfo->sType == VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2015-12-17 11:00:38 -08:00
|
|
|
if (mem) {
|
2018-05-30 18:05:54 -07:00
|
|
|
buffer->address = (struct anv_address) {
|
|
|
|
|
.bo = mem->bo,
|
|
|
|
|
.offset = pBindInfo->memoryOffset,
|
|
|
|
|
};
|
2015-12-17 11:00:38 -08:00
|
|
|
} else {
|
2018-05-30 18:05:54 -07:00
|
|
|
buffer->address = ANV_NULL_ADDRESS;
|
2015-12-17 11:00:38 -08:00
|
|
|
}
|
2017-07-18 09:02:53 -07:00
|
|
|
}
|
|
|
|
|
|
2017-09-20 12:18:10 -07:00
|
|
|
VkResult anv_BindBufferMemory2(
|
2017-07-18 09:02:53 -07:00
|
|
|
VkDevice device,
|
|
|
|
|
uint32_t bindInfoCount,
|
2017-09-20 13:16:26 -07:00
|
|
|
const VkBindBufferMemoryInfo* pBindInfos)
|
2017-07-18 09:02:53 -07:00
|
|
|
{
|
|
|
|
|
for (uint32_t i = 0; i < bindInfoCount; i++)
|
|
|
|
|
anv_bind_buffer_memory(&pBindInfos[i]);
|
2015-07-14 14:59:39 -07:00
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-30 16:42:12 -08:00
|
|
|
VkResult anv_QueueBindSparse(
|
2017-03-22 09:18:56 +01:00
|
|
|
VkQueue _queue,
|
2015-11-30 16:42:12 -08:00
|
|
|
uint32_t bindInfoCount,
|
|
|
|
|
const VkBindSparseInfo* pBindInfo,
|
|
|
|
|
VkFence fence)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2017-03-22 09:18:56 +01:00
|
|
|
ANV_FROM_HANDLE(anv_queue, queue, _queue);
|
2018-03-13 11:50:33 -07:00
|
|
|
if (anv_device_is_lost(queue->device))
|
2017-03-22 09:18:56 +01:00
|
|
|
return VK_ERROR_DEVICE_LOST;
|
|
|
|
|
|
2017-03-07 09:19:29 -08:00
|
|
|
return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Event functions
|
|
|
|
|
|
2015-05-17 16:33:48 -07:00
|
|
|
VkResult anv_CreateEvent(
|
2015-12-19 22:17:19 -08:00
|
|
|
VkDevice _device,
|
2015-05-08 22:32:37 -07:00
|
|
|
const VkEventCreateInfo* pCreateInfo,
|
2015-12-02 03:28:27 -08:00
|
|
|
const VkAllocationCallbacks* pAllocator,
|
2015-05-08 22:32:37 -07:00
|
|
|
VkEvent* pEvent)
|
|
|
|
|
{
|
2015-12-19 22:17:19 -08:00
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
|
|
|
|
struct anv_event *event;
|
|
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
|
|
|
|
|
|
2020-04-22 12:23:24 -05:00
|
|
|
event = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*event), 8,
|
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
|
if (event == NULL)
|
|
|
|
|
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
2020-04-21 12:33:16 -05:00
|
|
|
vk_object_base_init(&device->vk, &event->base, VK_OBJECT_TYPE_EVENT);
|
2020-04-22 12:23:24 -05:00
|
|
|
event->state = anv_state_pool_alloc(&device->dynamic_state_pool,
|
|
|
|
|
sizeof(uint64_t), 8);
|
|
|
|
|
*(uint64_t *)event->state.map = VK_EVENT_RESET;
|
2015-12-19 22:17:19 -08:00
|
|
|
|
|
|
|
|
*pEvent = anv_event_to_handle(event);
|
|
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
2015-10-05 20:50:51 -07:00
|
|
|
void anv_DestroyEvent(
|
2015-12-19 22:17:19 -08:00
|
|
|
VkDevice _device,
|
|
|
|
|
VkEvent _event,
|
2015-12-02 03:28:27 -08:00
|
|
|
const VkAllocationCallbacks* pAllocator)
|
2015-07-14 09:33:47 -07:00
|
|
|
{
|
2015-12-19 22:17:19 -08:00
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
|
|
|
|
ANV_FROM_HANDLE(anv_event, event, _event);
|
|
|
|
|
|
2016-11-10 21:32:32 -08:00
|
|
|
if (!event)
|
|
|
|
|
return;
|
|
|
|
|
|
2015-12-19 22:17:19 -08:00
|
|
|
anv_state_pool_free(&device->dynamic_state_pool, event->state);
|
2020-04-21 12:33:16 -05:00
|
|
|
|
|
|
|
|
vk_object_base_finish(&event->base);
|
2020-04-22 12:23:24 -05:00
|
|
|
vk_free2(&device->vk.alloc, pAllocator, event);
|
2015-07-14 09:33:47 -07:00
|
|
|
}
|
|
|
|
|
|
2015-05-17 16:33:48 -07:00
|
|
|
VkResult anv_GetEventStatus(
|
2015-12-19 22:17:19 -08:00
|
|
|
VkDevice _device,
|
|
|
|
|
VkEvent _event)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2015-12-19 22:17:19 -08:00
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
|
|
|
|
ANV_FROM_HANDLE(anv_event, event, _event);
|
|
|
|
|
|
2018-03-13 11:50:33 -07:00
|
|
|
if (anv_device_is_lost(device))
|
2017-03-22 09:18:56 +01:00
|
|
|
return VK_ERROR_DEVICE_LOST;
|
|
|
|
|
|
2020-04-22 12:23:24 -05:00
|
|
|
return *(uint64_t *)event->state.map;
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
2015-05-17 16:33:48 -07:00
|
|
|
VkResult anv_SetEvent(
|
2015-12-19 22:17:19 -08:00
|
|
|
VkDevice _device,
|
|
|
|
|
VkEvent _event)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2015-12-19 22:17:19 -08:00
|
|
|
ANV_FROM_HANDLE(anv_event, event, _event);
|
|
|
|
|
|
2020-04-22 12:23:24 -05:00
|
|
|
*(uint64_t *)event->state.map = VK_EVENT_SET;
|
2015-12-19 22:17:19 -08:00
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
2015-05-17 16:33:48 -07:00
|
|
|
VkResult anv_ResetEvent(
|
2015-12-19 22:17:19 -08:00
|
|
|
VkDevice _device,
|
|
|
|
|
VkEvent _event)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2015-12-19 22:17:19 -08:00
|
|
|
ANV_FROM_HANDLE(anv_event, event, _event);
|
|
|
|
|
|
2020-04-22 12:23:24 -05:00
|
|
|
*(uint64_t *)event->state.map = VK_EVENT_RESET;
|
2015-12-19 22:17:19 -08:00
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Buffer functions
|
|
|
|
|
|
2015-05-17 16:33:48 -07:00
|
|
|
VkResult anv_CreateBuffer(
|
2015-05-08 22:32:37 -07:00
|
|
|
VkDevice _device,
|
|
|
|
|
const VkBufferCreateInfo* pCreateInfo,
|
2015-12-02 03:28:27 -08:00
|
|
|
const VkAllocationCallbacks* pAllocator,
|
2015-05-08 22:32:37 -07:00
|
|
|
VkBuffer* pBuffer)
|
|
|
|
|
{
|
2015-07-08 14:24:12 -07:00
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
2015-05-08 22:32:37 -07:00
|
|
|
struct anv_buffer *buffer;
|
|
|
|
|
|
2019-11-25 10:27:02 -06:00
|
|
|
/* Don't allow creating buffers bigger than our address space. The real
|
|
|
|
|
* issue here is that we may align up the buffer size and we don't want
|
|
|
|
|
* doing so to cause roll-over. However, no one has any business
|
|
|
|
|
* allocating a buffer larger than our GTT size.
|
|
|
|
|
*/
|
2020-01-17 22:23:30 -06:00
|
|
|
if (pCreateInfo->size > device->physical->gtt_size)
|
2019-11-25 10:27:02 -06:00
|
|
|
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
|
|
|
|
|
2015-05-08 22:32:37 -07:00
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
|
|
|
|
|
|
2020-04-21 12:42:59 -05:00
|
|
|
buffer = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*buffer), 8,
|
2015-12-02 03:28:27 -08:00
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
2015-05-08 22:32:37 -07:00
|
|
|
if (buffer == NULL)
|
|
|
|
|
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
2020-04-21 12:33:16 -05:00
|
|
|
vk_object_base_init(&device->vk, &buffer->base, VK_OBJECT_TYPE_BUFFER);
|
2020-12-18 16:19:16 +02:00
|
|
|
buffer->create_flags = pCreateInfo->flags;
|
2015-05-08 22:32:37 -07:00
|
|
|
buffer->size = pCreateInfo->size;
|
2015-12-14 16:51:12 -08:00
|
|
|
buffer->usage = pCreateInfo->usage;
|
2018-05-30 18:05:54 -07:00
|
|
|
buffer->address = ANV_NULL_ADDRESS;
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2015-07-09 18:41:27 -07:00
|
|
|
*pBuffer = anv_buffer_to_handle(buffer);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-05 20:50:51 -07:00
|
|
|
void anv_DestroyBuffer(
|
2015-07-14 09:47:45 -07:00
|
|
|
VkDevice _device,
|
2015-12-02 03:28:27 -08:00
|
|
|
VkBuffer _buffer,
|
|
|
|
|
const VkAllocationCallbacks* pAllocator)
|
2015-07-14 09:47:45 -07:00
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
|
|
|
|
|
|
2016-11-10 21:32:32 -08:00
|
|
|
if (!buffer)
|
|
|
|
|
return;
|
|
|
|
|
|
2020-04-21 12:33:16 -05:00
|
|
|
vk_object_base_finish(&buffer->base);
|
2020-04-21 12:42:59 -05:00
|
|
|
vk_free2(&device->vk.alloc, pAllocator, buffer);
|
2015-07-14 09:47:45 -07:00
|
|
|
}
|
|
|
|
|
|
2019-09-16 15:41:45 -07:00
|
|
|
VkDeviceAddress anv_GetBufferDeviceAddress(
|
2019-01-19 08:54:32 -06:00
|
|
|
VkDevice device,
|
2019-12-02 16:28:58 -06:00
|
|
|
const VkBufferDeviceAddressInfoKHR* pInfo)
|
2019-01-19 08:54:32 -06:00
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, buffer, pInfo->buffer);
|
|
|
|
|
|
2019-12-02 16:28:58 -06:00
|
|
|
assert(!anv_address_is_null(buffer->address));
|
2019-01-19 08:54:32 -06:00
|
|
|
assert(buffer->address.bo->flags & EXEC_OBJECT_PINNED);
|
|
|
|
|
|
|
|
|
|
return anv_address_physical(buffer->address);
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-16 15:41:45 -07:00
|
|
|
uint64_t anv_GetBufferOpaqueCaptureAddress(
|
2019-12-02 16:28:58 -06:00
|
|
|
VkDevice device,
|
|
|
|
|
const VkBufferDeviceAddressInfoKHR* pInfo)
|
|
|
|
|
{
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-16 15:41:45 -07:00
|
|
|
uint64_t anv_GetDeviceMemoryOpaqueCaptureAddress(
|
2019-12-02 16:28:58 -06:00
|
|
|
VkDevice device,
|
|
|
|
|
const VkDeviceMemoryOpaqueCaptureAddressInfoKHR* pInfo)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_device_memory, memory, pInfo->memory);
|
|
|
|
|
|
|
|
|
|
assert(memory->bo->flags & EXEC_OBJECT_PINNED);
|
|
|
|
|
assert(memory->bo->has_client_visible_address);
|
|
|
|
|
|
2021-03-03 13:49:18 -08:00
|
|
|
return intel_48b_address(memory->bo->offset);
|
2019-12-02 16:28:58 -06:00
|
|
|
}
|
|
|
|
|
|
2015-08-19 16:01:33 -07:00
|
|
|
void
|
2016-01-26 14:50:52 -08:00
|
|
|
anv_fill_buffer_surface_state(struct anv_device *device, struct anv_state state,
|
2015-12-14 16:14:20 -08:00
|
|
|
enum isl_format format,
|
2020-10-07 07:44:56 -07:00
|
|
|
isl_surf_usage_flags_t usage,
|
2018-05-30 17:36:49 -07:00
|
|
|
struct anv_address address,
|
|
|
|
|
uint32_t range, uint32_t stride)
|
2015-08-19 16:01:33 -07:00
|
|
|
{
|
2016-02-22 16:54:25 -08:00
|
|
|
isl_buffer_fill_state(&device->isl_dev, state.map,
|
2018-05-30 17:36:49 -07:00
|
|
|
.address = anv_address_physical(address),
|
2020-12-14 11:11:59 +02:00
|
|
|
.mocs = isl_mocs(&device->isl_dev, usage,
|
|
|
|
|
address.bo && address.bo->is_external),
|
2018-09-05 14:02:12 -05:00
|
|
|
.size_B = range,
|
2016-02-22 16:54:25 -08:00
|
|
|
.format = format,
|
2019-02-28 01:13:33 -08:00
|
|
|
.swizzle = ISL_SWIZZLE_IDENTITY,
|
2018-09-05 14:02:12 -05:00
|
|
|
.stride_B = stride);
|
2015-08-19 16:01:33 -07:00
|
|
|
}
|
|
|
|
|
|
2015-10-05 20:50:51 -07:00
|
|
|
void anv_DestroySampler(
|
2015-07-14 10:34:00 -07:00
|
|
|
VkDevice _device,
|
2015-12-02 03:28:27 -08:00
|
|
|
VkSampler _sampler,
|
|
|
|
|
const VkAllocationCallbacks* pAllocator)
|
2015-07-14 10:34:00 -07:00
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
|
|
|
|
ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
|
|
|
|
|
|
2016-11-10 21:32:32 -08:00
|
|
|
if (!sampler)
|
|
|
|
|
return;
|
|
|
|
|
|
2019-02-07 14:10:33 -06:00
|
|
|
if (sampler->bindless_state.map) {
|
|
|
|
|
anv_state_pool_free(&device->dynamic_state_pool,
|
|
|
|
|
sampler->bindless_state);
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-22 17:08:22 -07:00
|
|
|
if (sampler->custom_border_color.map) {
|
|
|
|
|
anv_state_reserved_pool_free(&device->custom_border_colors,
|
|
|
|
|
sampler->custom_border_color);
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-21 12:33:16 -05:00
|
|
|
vk_object_base_finish(&sampler->base);
|
2020-04-21 12:42:59 -05:00
|
|
|
vk_free2(&device->vk.alloc, pAllocator, sampler);
|
2015-07-14 10:34:00 -07:00
|
|
|
}
|
|
|
|
|
|
2015-05-17 16:33:48 -07:00
|
|
|
VkResult anv_CreateFramebuffer(
|
2015-05-08 22:32:37 -07:00
|
|
|
VkDevice _device,
|
|
|
|
|
const VkFramebufferCreateInfo* pCreateInfo,
|
2015-12-02 03:28:27 -08:00
|
|
|
const VkAllocationCallbacks* pAllocator,
|
2015-05-08 22:32:37 -07:00
|
|
|
VkFramebuffer* pFramebuffer)
|
|
|
|
|
{
|
2015-07-09 18:20:10 -07:00
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
2015-05-08 22:32:37 -07:00
|
|
|
struct anv_framebuffer *framebuffer;
|
|
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
|
|
|
|
|
|
2019-01-17 10:28:36 -08:00
|
|
|
size_t size = sizeof(*framebuffer);
|
|
|
|
|
|
|
|
|
|
/* VK_KHR_imageless_framebuffer extension says:
|
|
|
|
|
*
|
|
|
|
|
* If flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR,
|
|
|
|
|
* parameter pAttachments is ignored.
|
|
|
|
|
*/
|
|
|
|
|
if (!(pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR)) {
|
|
|
|
|
size += sizeof(struct anv_image_view *) * pCreateInfo->attachmentCount;
|
2020-04-21 12:42:59 -05:00
|
|
|
framebuffer = vk_alloc2(&device->vk.alloc, pAllocator, size, 8,
|
2019-01-17 10:28:36 -08:00
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
|
if (framebuffer == NULL)
|
|
|
|
|
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
|
|
|
|
|
ANV_FROM_HANDLE(anv_image_view, iview, pCreateInfo->pAttachments[i]);
|
|
|
|
|
framebuffer->attachments[i] = iview;
|
|
|
|
|
}
|
|
|
|
|
framebuffer->attachment_count = pCreateInfo->attachmentCount;
|
|
|
|
|
} else {
|
2020-04-21 12:42:59 -05:00
|
|
|
framebuffer = vk_alloc2(&device->vk.alloc, pAllocator, size, 8,
|
2019-01-17 10:28:36 -08:00
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
|
if (framebuffer == NULL)
|
|
|
|
|
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2019-01-17 10:28:36 -08:00
|
|
|
framebuffer->attachment_count = 0;
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
2020-04-21 12:33:16 -05:00
|
|
|
vk_object_base_init(&device->vk, &framebuffer->base,
|
|
|
|
|
VK_OBJECT_TYPE_FRAMEBUFFER);
|
|
|
|
|
|
2015-05-08 22:32:37 -07:00
|
|
|
framebuffer->width = pCreateInfo->width;
|
|
|
|
|
framebuffer->height = pCreateInfo->height;
|
|
|
|
|
framebuffer->layers = pCreateInfo->layers;
|
|
|
|
|
|
2015-07-09 18:41:27 -07:00
|
|
|
*pFramebuffer = anv_framebuffer_to_handle(framebuffer);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-05 20:50:51 -07:00
|
|
|
void anv_DestroyFramebuffer(
|
2015-07-14 10:59:30 -07:00
|
|
|
VkDevice _device,
|
2015-12-02 03:28:27 -08:00
|
|
|
VkFramebuffer _fb,
|
|
|
|
|
const VkAllocationCallbacks* pAllocator)
|
2015-07-14 10:59:30 -07:00
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
|
|
|
|
ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
|
|
|
|
|
|
2016-11-10 21:32:32 -08:00
|
|
|
if (!fb)
|
|
|
|
|
return;
|
|
|
|
|
|
2020-04-21 12:33:16 -05:00
|
|
|
vk_object_base_finish(&fb->base);
|
2020-04-21 12:42:59 -05:00
|
|
|
vk_free2(&device->vk.alloc, pAllocator, fb);
|
2015-07-14 10:59:30 -07:00
|
|
|
}
|
2017-01-10 17:29:08 -08:00
|
|
|
|
2018-10-11 16:05:18 -07:00
|
|
|
static const VkTimeDomainEXT anv_time_domains[] = {
|
|
|
|
|
VK_TIME_DOMAIN_DEVICE_EXT,
|
|
|
|
|
VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT,
|
2020-09-01 12:13:43 +10:00
|
|
|
#ifdef CLOCK_MONOTONIC_RAW
|
2018-10-11 16:05:18 -07:00
|
|
|
VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT,
|
2020-09-01 12:13:43 +10:00
|
|
|
#endif
|
2018-10-11 16:05:18 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
VkResult anv_GetPhysicalDeviceCalibrateableTimeDomainsEXT(
|
|
|
|
|
VkPhysicalDevice physicalDevice,
|
|
|
|
|
uint32_t *pTimeDomainCount,
|
|
|
|
|
VkTimeDomainEXT *pTimeDomains)
|
|
|
|
|
{
|
|
|
|
|
int d;
|
|
|
|
|
VK_OUTARRAY_MAKE(out, pTimeDomains, pTimeDomainCount);
|
|
|
|
|
|
|
|
|
|
for (d = 0; d < ARRAY_SIZE(anv_time_domains); d++) {
|
|
|
|
|
vk_outarray_append(&out, i) {
|
|
|
|
|
*i = anv_time_domains[d];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return vk_outarray_status(&out);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static uint64_t
|
|
|
|
|
anv_clock_gettime(clockid_t clock_id)
|
|
|
|
|
{
|
|
|
|
|
struct timespec current;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
ret = clock_gettime(clock_id, ¤t);
|
2020-09-01 12:13:43 +10:00
|
|
|
#ifdef CLOCK_MONOTONIC_RAW
|
2018-10-11 16:05:18 -07:00
|
|
|
if (ret < 0 && clock_id == CLOCK_MONOTONIC_RAW)
|
|
|
|
|
ret = clock_gettime(CLOCK_MONOTONIC, ¤t);
|
2020-09-01 12:13:43 +10:00
|
|
|
#endif
|
2018-10-11 16:05:18 -07:00
|
|
|
if (ret < 0)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
return (uint64_t) current.tv_sec * 1000000000ULL + current.tv_nsec;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkResult anv_GetCalibratedTimestampsEXT(
|
|
|
|
|
VkDevice _device,
|
|
|
|
|
uint32_t timestampCount,
|
|
|
|
|
const VkCalibratedTimestampInfoEXT *pTimestampInfos,
|
|
|
|
|
uint64_t *pTimestamps,
|
|
|
|
|
uint64_t *pMaxDeviation)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
|
|
|
|
uint64_t timestamp_frequency = device->info.timestamp_frequency;
|
|
|
|
|
int ret;
|
|
|
|
|
int d;
|
|
|
|
|
uint64_t begin, end;
|
|
|
|
|
uint64_t max_clock_period = 0;
|
|
|
|
|
|
2020-09-01 12:13:43 +10:00
|
|
|
#ifdef CLOCK_MONOTONIC_RAW
|
2018-10-11 16:05:18 -07:00
|
|
|
begin = anv_clock_gettime(CLOCK_MONOTONIC_RAW);
|
2020-09-01 12:13:43 +10:00
|
|
|
#else
|
|
|
|
|
begin = anv_clock_gettime(CLOCK_MONOTONIC);
|
|
|
|
|
#endif
|
2018-10-11 16:05:18 -07:00
|
|
|
|
|
|
|
|
for (d = 0; d < timestampCount; d++) {
|
|
|
|
|
switch (pTimestampInfos[d].timeDomain) {
|
|
|
|
|
case VK_TIME_DOMAIN_DEVICE_EXT:
|
2019-05-21 18:05:34 +01:00
|
|
|
ret = anv_gem_reg_read(device->fd, TIMESTAMP | I915_REG_READ_8B_WA,
|
2018-10-11 16:05:18 -07:00
|
|
|
&pTimestamps[d]);
|
|
|
|
|
|
|
|
|
|
if (ret != 0) {
|
2018-10-26 08:32:39 -05:00
|
|
|
return anv_device_set_lost(device, "Failed to read the TIMESTAMP "
|
|
|
|
|
"register: %m");
|
2018-10-11 16:05:18 -07:00
|
|
|
}
|
|
|
|
|
uint64_t device_period = DIV_ROUND_UP(1000000000, timestamp_frequency);
|
|
|
|
|
max_clock_period = MAX2(max_clock_period, device_period);
|
|
|
|
|
break;
|
|
|
|
|
case VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT:
|
|
|
|
|
pTimestamps[d] = anv_clock_gettime(CLOCK_MONOTONIC);
|
|
|
|
|
max_clock_period = MAX2(max_clock_period, 1);
|
|
|
|
|
break;
|
|
|
|
|
|
2020-09-01 12:13:43 +10:00
|
|
|
#ifdef CLOCK_MONOTONIC_RAW
|
2018-10-11 16:05:18 -07:00
|
|
|
case VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT:
|
|
|
|
|
pTimestamps[d] = begin;
|
|
|
|
|
break;
|
2020-09-01 12:13:43 +10:00
|
|
|
#endif
|
2018-10-11 16:05:18 -07:00
|
|
|
default:
|
|
|
|
|
pTimestamps[d] = 0;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-01 12:13:43 +10:00
|
|
|
#ifdef CLOCK_MONOTONIC_RAW
|
2018-10-11 16:05:18 -07:00
|
|
|
end = anv_clock_gettime(CLOCK_MONOTONIC_RAW);
|
2020-09-01 12:13:43 +10:00
|
|
|
#else
|
|
|
|
|
end = anv_clock_gettime(CLOCK_MONOTONIC);
|
|
|
|
|
#endif
|
2018-10-11 16:05:18 -07:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The maximum deviation is the sum of the interval over which we
|
|
|
|
|
* perform the sampling and the maximum period of any sampled
|
|
|
|
|
* clock. That's because the maximum skew between any two sampled
|
|
|
|
|
* clock edges is when the sampled clock with the largest period is
|
|
|
|
|
* sampled at the end of that period but right at the beginning of the
|
|
|
|
|
* sampling interval and some other clock is sampled right at the
|
|
|
|
|
* begining of its sampling period and right at the end of the
|
|
|
|
|
* sampling interval. Let's assume the GPU has the longest clock
|
|
|
|
|
* period and that the application is sampling GPU and monotonic:
|
|
|
|
|
*
|
|
|
|
|
* s e
|
|
|
|
|
* w x y z 0 1 2 3 4 5 6 7 8 9 a b c d e f
|
|
|
|
|
* Raw -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-
|
|
|
|
|
*
|
|
|
|
|
* g
|
|
|
|
|
* 0 1 2 3
|
|
|
|
|
* GPU -----_____-----_____-----_____-----_____
|
|
|
|
|
*
|
|
|
|
|
* m
|
|
|
|
|
* x y z 0 1 2 3 4 5 6 7 8 9 a b c
|
|
|
|
|
* Monotonic -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-
|
|
|
|
|
*
|
|
|
|
|
* Interval <----------------->
|
|
|
|
|
* Deviation <-------------------------->
|
|
|
|
|
*
|
|
|
|
|
* s = read(raw) 2
|
|
|
|
|
* g = read(GPU) 1
|
|
|
|
|
* m = read(monotonic) 2
|
|
|
|
|
* e = read(raw) b
|
|
|
|
|
*
|
|
|
|
|
* We round the sample interval up by one tick to cover sampling error
|
|
|
|
|
* in the interval clock
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
uint64_t sample_interval = end - begin + 1;
|
|
|
|
|
|
|
|
|
|
*pMaxDeviation = sample_interval + max_clock_period;
|
|
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-14 21:52:00 +02:00
|
|
|
void anv_GetPhysicalDeviceMultisamplePropertiesEXT(
|
|
|
|
|
VkPhysicalDevice physicalDevice,
|
|
|
|
|
VkSampleCountFlagBits samples,
|
|
|
|
|
VkMultisamplePropertiesEXT* pMultisampleProperties)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
|
|
|
|
|
|
|
|
|
|
assert(pMultisampleProperties->sType ==
|
|
|
|
|
VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT);
|
|
|
|
|
|
|
|
|
|
VkExtent2D grid_size;
|
|
|
|
|
if (samples & isl_device_get_sample_counts(&physical_device->isl_dev)) {
|
|
|
|
|
grid_size.width = 1;
|
|
|
|
|
grid_size.height = 1;
|
|
|
|
|
} else {
|
|
|
|
|
grid_size.width = 0;
|
|
|
|
|
grid_size.height = 0;
|
|
|
|
|
}
|
|
|
|
|
pMultisampleProperties->maxSampleLocationGridSize = grid_size;
|
|
|
|
|
|
|
|
|
|
vk_foreach_struct(ext, pMultisampleProperties->pNext)
|
|
|
|
|
anv_debug_ignored_stype(ext->sType);
|
|
|
|
|
}
|
|
|
|
|
|
2017-01-10 17:29:08 -08:00
|
|
|
/* vk_icd.h does not declare this function, so we declare it here to
|
|
|
|
|
* suppress Wmissing-prototypes.
|
|
|
|
|
*/
|
|
|
|
|
PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
|
|
|
|
|
vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t* pSupportedVersion);
|
|
|
|
|
|
|
|
|
|
PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
|
|
|
|
|
vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t* pSupportedVersion)
|
|
|
|
|
{
|
|
|
|
|
/* For the full details on loader interface versioning, see
|
|
|
|
|
* <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
|
|
|
|
|
* What follows is a condensed summary, to help you navigate the large and
|
|
|
|
|
* confusing official doc.
|
|
|
|
|
*
|
|
|
|
|
* - Loader interface v0 is incompatible with later versions. We don't
|
|
|
|
|
* support it.
|
|
|
|
|
*
|
|
|
|
|
* - In loader interface v1:
|
|
|
|
|
* - The first ICD entrypoint called by the loader is
|
|
|
|
|
* vk_icdGetInstanceProcAddr(). The ICD must statically expose this
|
|
|
|
|
* entrypoint.
|
|
|
|
|
* - The ICD must statically expose no other Vulkan symbol unless it is
|
|
|
|
|
* linked with -Bsymbolic.
|
|
|
|
|
* - Each dispatchable Vulkan handle created by the ICD must be
|
|
|
|
|
* a pointer to a struct whose first member is VK_LOADER_DATA. The
|
|
|
|
|
* ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
|
|
|
|
|
* - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
|
|
|
|
|
* vkDestroySurfaceKHR(). The ICD must be capable of working with
|
|
|
|
|
* such loader-managed surfaces.
|
|
|
|
|
*
|
|
|
|
|
* - Loader interface v2 differs from v1 in:
|
|
|
|
|
* - The first ICD entrypoint called by the loader is
|
|
|
|
|
* vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
|
|
|
|
|
* statically expose this entrypoint.
|
|
|
|
|
*
|
|
|
|
|
* - Loader interface v3 differs from v2 in:
|
|
|
|
|
* - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
|
|
|
|
|
* vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
|
|
|
|
|
* because the loader no longer does so.
|
2019-09-08 00:11:01 +03:00
|
|
|
*
|
|
|
|
|
* - Loader interface v4 differs from v3 in:
|
|
|
|
|
* - The ICD must implement vk_icdGetPhysicalDeviceProcAddr().
|
2017-01-10 17:29:08 -08:00
|
|
|
*/
|
2019-09-08 00:11:01 +03:00
|
|
|
*pSupportedVersion = MIN2(*pSupportedVersion, 4u);
|
2017-01-10 17:29:08 -08:00
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|