drm-uapi: Sync xe_drm.h

Sync with:
commit 59260fe5821ad108d0fda8a4a4fe0448e9821f27
Merge: 9ec3c8ee16a0 0b075f82935e
    Author: Dave Airlie <airlied@redhat.com>

        Merge tag 'drm-xe-next-2025-12-30' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-next

Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/38425>
This commit is contained in:
José Roberto de Souza 2025-11-11 08:21:17 -08:00
parent eccdfb397e
commit 6df105d3ff

View file

@ -81,6 +81,8 @@ extern "C" {
* - &DRM_IOCTL_XE_EXEC
* - &DRM_IOCTL_XE_WAIT_USER_FENCE
* - &DRM_IOCTL_XE_OBSERVATION
* - &DRM_IOCTL_XE_MADVISE
* - &DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS
*/
/*
@ -102,6 +104,9 @@ extern "C" {
#define DRM_XE_EXEC 0x09
#define DRM_XE_WAIT_USER_FENCE 0x0a
#define DRM_XE_OBSERVATION 0x0b
#define DRM_XE_MADVISE 0x0c
#define DRM_XE_VM_QUERY_MEM_RANGE_ATTRS 0x0d
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY 0x0e
/* Must be kept compact -- no holes */
@ -117,6 +122,9 @@ extern "C" {
#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
#define DRM_IOCTL_XE_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
#define DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_MEM_RANGE_ATTRS, struct drm_xe_vm_query_mem_range_attr)
#define DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_SET_PROPERTY, struct drm_xe_exec_queue_set_property)
/**
* DOC: Xe IOCTL Extensions
@ -204,8 +212,12 @@ struct drm_xe_ext_set_property {
/** @pad: MBZ */
__u32 pad;
/** @value: property value */
__u64 value;
union {
/** @value: property value */
__u64 value;
/** @ptr: pointer to user value */
__u64 ptr;
};
/** @reserved: Reserved */
__u64 reserved[2];
@ -397,6 +409,9 @@ struct drm_xe_query_mem_regions {
* has low latency hint support
* - %DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR - Flag is set if the
* device has CPU address mirroring support
* - %DRM_XE_QUERY_CONFIG_FLAG_HAS_NO_COMPRESSION_HINT - Flag is set if the
* device supports the userspace hint %DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION.
* This is exposed only on Xe2+.
* - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
* required by this device, typically SZ_4K or SZ_64K
* - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
@ -415,6 +430,7 @@ struct drm_xe_query_config {
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY (1 << 1)
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR (1 << 2)
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_NO_COMPRESSION_HINT (1 << 3)
#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
#define DRM_XE_QUERY_CONFIG_VA_BITS 3
#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
@ -760,8 +776,16 @@ struct drm_xe_device_query {
* gem creation
*
* The @flags can be:
* - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING
* - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
* - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING - Modify the GEM object
* allocation strategy by deferring physical memory allocation
* until the object is either bound to a virtual memory region via
* VM_BIND or accessed by the CPU. As a result, no backing memory is
* reserved at the time of GEM object creation.
* - %DRM_XE_GEM_CREATE_FLAG_SCANOUT - Indicates that the GEM object is
* intended for scanout via the display engine. When set, kernel ensures
* that the allocation is placed in a memory region compatible with the
* display engine requirements. This may impose restrictions on tiling,
* alignment, and memory placement to guarantee proper display functionality.
* - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
* possible placement, ensure that the corresponding VRAM allocation
* will always use the CPU accessible part of VRAM. This is important
@ -777,6 +801,17 @@ struct drm_xe_device_query {
* need to use VRAM for display surfaces, therefore the kernel requires
* setting this flag for such objects, otherwise an error is thrown on
* small-bar systems.
* - %DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION - Allows userspace to
* hint that compression (CCS) should be disabled for the buffer being
* created. This can avoid unnecessary memory operations and CCS state
* management.
* On pre-Xe2 platforms, this flag is currently rejected as compression
* control is not supported via PAT index. On Xe2+ platforms, compression
* is controlled via PAT entries. If this flag is set, the driver will reject
* any VM bind that requests a PAT index enabling compression for this BO.
* Note: On dGPU platforms, there is currently no change in behavior with
* this flag, but future improvements may leverage it. The current benefit is
* primarily applicable to iGPU platforms.
*
* @cpu_caching supports the following values:
* - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back
@ -823,6 +858,7 @@ struct drm_xe_gem_create {
#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0)
#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1)
#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2)
#define DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION (1 << 3)
/**
* @flags: Flags, currently a mask of memory instances of where BO can
* be placed
@ -917,13 +953,17 @@ struct drm_xe_gem_mmap_offset {
* struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
*
* The @flags can be:
* - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
* - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE - Map the whole virtual address
* space of the VM to scratch page. A vm_bind would overwrite the scratch
* page mapping. This flag is mutually exclusive with the
* %DRM_XE_VM_CREATE_FLAG_FAULT_MODE flag, with an exception of on x2 and
* xe3 platform.
* - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
* exec submissions to its exec_queues that don't have an upper time
* limit on the job execution time. But exec submissions to these
* don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ,
* DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF,
* used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
* don't allow any of the sync types DRM_XE_SYNC_TYPE_SYNCOBJ,
* DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ, used as out-syncobjs, that is,
* together with sync flag DRM_XE_SYNC_FLAG_SIGNAL.
* LR VMs can be created in recoverable page-fault mode using
* DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
* If that flag is omitted, the UMD can not rely on the slightly
@ -999,6 +1039,24 @@ struct drm_xe_vm_destroy {
* valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
* mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
* handle MBZ, and the BO offset MBZ.
* - %DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET - Can be used in combination with
* %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR to reset madvises when the underlying
* CPU address space range is unmapped (typically with munmap(2) or brk(2)).
* The madvise values set with &DRM_IOCTL_XE_MADVISE are reset to the values
* that were present immediately after the &DRM_IOCTL_XE_VM_BIND.
* The reset GPU virtual address range is the intersection of the range bound
* using &DRM_IOCTL_XE_VM_BIND and the virtual CPU address space range
* unmapped.
* This functionality is present to mimic the behaviour of CPU address space
* madvises set using madvise(2), which are typically reset on unmap.
* Note: free(3) may or may not call munmap(2) and/or brk(2), and may thus
* not invoke autoreset. Neither will stack variables going out of scope.
* Therefore it's recommended to always explicitly reset the madvises when
* freeing the memory backing a region used in a &DRM_IOCTL_XE_MADVISE call.
*
* The @prefetch_mem_region_instance for %DRM_XE_VM_BIND_OP_PREFETCH can also be:
* - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in
* the memory region advised by madvise.
*/
struct drm_xe_vm_bind_op {
/** @extensions: Pointer to the first extension struct, if any */
@ -1101,9 +1159,11 @@ struct drm_xe_vm_bind_op {
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
#define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4)
#define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5)
#define DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET (1 << 6)
/** @flags: Bind flags */
__u32 flags;
#define DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC -1
/**
* @prefetch_mem_region_instance: Memory region to prefetch VMA to.
* It is a region instance, not a mask.
@ -1206,9 +1266,28 @@ struct drm_xe_vm_bind {
* there is no need to explicitly set that. When a queue of type
* %DRM_XE_PXP_TYPE_HWDRM is created, the PXP default HWDRM session
* (%XE_PXP_HWDRM_DEFAULT_SESSION) will be started, if isn't already running.
* The user is expected to query the PXP status via the query ioctl (see
* %DRM_XE_DEVICE_QUERY_PXP_STATUS) and to wait for PXP to be ready before
* attempting to create a queue with this property. When a queue is created
* before PXP is ready, the ioctl will return -EBUSY if init is still in
* progress or -EIO if init failed.
* Given that going into a power-saving state kills PXP HWDRM sessions,
* runtime PM will be blocked while queues of this type are alive.
* All PXP queues will be killed if a PXP invalidation event occurs.
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP - Create a multi-queue group
* or add secondary queues to a multi-queue group.
* If the extension's 'value' field has %DRM_XE_MULTI_GROUP_CREATE flag set,
* then a new multi-queue group is created with this queue as the primary queue
* (Q0). Otherwise, the queue gets added to the multi-queue group whose primary
* queue's exec_queue_id is specified in the lower 32 bits of the 'value' field.
* If the extension's 'value' field has %DRM_XE_MULTI_GROUP_KEEP_ACTIVE flag
* set, then the multi-queue group is kept active after the primary queue is
* destroyed.
* All the other non-relevant bits of extension's 'value' field while adding the
* primary or the secondary queues of the group must be set to 0.
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY - Set the queue
* priority within the multi-queue group. Current valid priority values are 02
* (default is 1), with higher values indicating higher priority.
*
* The example below shows how to use @drm_xe_exec_queue_create to create
* a simple exec_queue (no parallel submission) of class
@ -1249,6 +1328,11 @@ struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE 2
#define DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE 3
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP 4
#define DRM_XE_MULTI_GROUP_CREATE (1ull << 63)
#define DRM_XE_MULTI_GROUP_KEEP_ACTIVE (1ull << 62)
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY 5
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
@ -1385,7 +1469,7 @@ struct drm_xe_sync {
/**
* @timeline_value: Input for the timeline sync object. Needs to be
* different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ.
* different than 0 when used with %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ.
*/
__u64 timeline_value;
@ -1420,6 +1504,7 @@ struct drm_xe_exec {
/** @exec_queue_id: Exec queue ID for the batch buffer */
__u32 exec_queue_id;
#define DRM_XE_MAX_SYNCS 1024
/** @num_syncs: Amount of struct drm_xe_sync in array. */
__u32 num_syncs;
@ -1608,6 +1693,12 @@ enum drm_xe_oa_unit_type {
/** @DRM_XE_OA_UNIT_TYPE_OAM: OAM OA unit */
DRM_XE_OA_UNIT_TYPE_OAM,
/** @DRM_XE_OA_UNIT_TYPE_OAM_SAG: OAM_SAG OA unit */
DRM_XE_OA_UNIT_TYPE_OAM_SAG,
/** @DRM_XE_OA_UNIT_TYPE_MERT: MERT OA unit */
DRM_XE_OA_UNIT_TYPE_MERT,
};
/**
@ -1629,12 +1720,20 @@ struct drm_xe_oa_unit {
#define DRM_XE_OA_CAPS_SYNCS (1 << 1)
#define DRM_XE_OA_CAPS_OA_BUFFER_SIZE (1 << 2)
#define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS (1 << 3)
#define DRM_XE_OA_CAPS_OAM (1 << 4)
#define DRM_XE_OA_CAPS_OA_UNIT_GT_ID (1 << 5)
/** @oa_timestamp_freq: OA timestamp freq */
__u64 oa_timestamp_freq;
/** @gt_id: gt id for this OA unit */
__u16 gt_id;
/** @reserved1: MBZ */
__u16 reserved1[3];
/** @reserved: MBZ */
__u64 reserved[4];
__u64 reserved[3];
/** @num_engines: number of engines in @eci array */
__u64 num_engines;
@ -1961,6 +2060,307 @@ struct drm_xe_query_eu_stall {
__u64 sampling_rates[];
};
/**
* struct drm_xe_madvise - Input of &DRM_IOCTL_XE_MADVISE
*
* This structure is used to set memory attributes for a virtual address range
* in a VM. The type of attribute is specified by @type, and the corresponding
* union member is used to provide additional parameters for @type.
*
* Supported attribute types:
* - DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC: Set preferred memory location.
* - DRM_XE_MEM_RANGE_ATTR_ATOMIC: Set atomic access policy.
* - DRM_XE_MEM_RANGE_ATTR_PAT: Set page attribute table index.
*
* Example:
*
* .. code-block:: C
*
* struct drm_xe_madvise madvise = {
* .vm_id = vm_id,
* .start = 0x100000,
* .range = 0x2000,
* .type = DRM_XE_MEM_RANGE_ATTR_ATOMIC,
* .atomic_val = DRM_XE_ATOMIC_DEVICE,
* };
*
* ioctl(fd, DRM_IOCTL_XE_MADVISE, &madvise);
*
*/
struct drm_xe_madvise {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
/** @start: start of the virtual address range */
__u64 start;
/** @range: size of the virtual address range */
__u64 range;
/** @vm_id: vm_id of the virtual range */
__u32 vm_id;
#define DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC 0
#define DRM_XE_MEM_RANGE_ATTR_ATOMIC 1
#define DRM_XE_MEM_RANGE_ATTR_PAT 2
/** @type: type of attribute */
__u32 type;
union {
/**
* @preferred_mem_loc: preferred memory location
*
* Used when @type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC
*
* Supported values for @preferred_mem_loc.devmem_fd:
* - DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE: set vram of fault tile as preferred loc
* - DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM: set smem as preferred loc
*
* Supported values for @preferred_mem_loc.migration_policy:
* - DRM_XE_MIGRATE_ALL_PAGES
* - DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES
*/
struct {
#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE 0
#define DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM -1
/**
* @preferred_mem_loc.devmem_fd:
* Device file-descriptor of the device where the
* preferred memory is located, or one of the
* above special values. Please also see
* @preferred_mem_loc.region_instance below.
*/
__u32 devmem_fd;
#define DRM_XE_MIGRATE_ALL_PAGES 0
#define DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES 1
/** @preferred_mem_loc.migration_policy: Page migration policy */
__u16 migration_policy;
/**
* @preferred_mem_loc.region_instance : Region instance.
* MBZ if @devmem_fd <= &DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE.
* Otherwise should point to the desired device
* VRAM instance of the device indicated by
* @preferred_mem_loc.devmem_fd.
*/
__u16 region_instance;
/** @preferred_mem_loc.reserved : Reserved */
__u64 reserved;
} preferred_mem_loc;
/**
* @atomic: Atomic access policy
*
* Used when @type == DRM_XE_MEM_RANGE_ATTR_ATOMIC.
*
* Supported values for @atomic.val:
* - DRM_XE_ATOMIC_UNDEFINED: Undefined or default behaviour.
* Support both GPU and CPU atomic operations for system allocator.
* Support GPU atomic operations for normal(bo) allocator.
* - DRM_XE_ATOMIC_DEVICE: Support GPU atomic operations.
* - DRM_XE_ATOMIC_GLOBAL: Support both GPU and CPU atomic operations.
* - DRM_XE_ATOMIC_CPU: Support CPU atomic only, no GPU atomics supported.
*/
struct {
#define DRM_XE_ATOMIC_UNDEFINED 0
#define DRM_XE_ATOMIC_DEVICE 1
#define DRM_XE_ATOMIC_GLOBAL 2
#define DRM_XE_ATOMIC_CPU 3
/** @atomic.val: value of atomic operation */
__u32 val;
/** @atomic.pad: MBZ */
__u32 pad;
/** @atomic.reserved: Reserved */
__u64 reserved;
} atomic;
/**
* @pat_index: Page attribute table index
*
* Used when @type == DRM_XE_MEM_RANGE_ATTR_PAT.
*/
struct {
/** @pat_index.val: PAT index value */
__u32 val;
/** @pat_index.pad: MBZ */
__u32 pad;
/** @pat_index.reserved: Reserved */
__u64 reserved;
} pat_index;
};
/** @reserved: Reserved */
__u64 reserved[2];
};
/**
* struct drm_xe_mem_range_attr - Output of &DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS
*
* This structure is provided by userspace and filled by KMD in response to the
* DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS ioctl. It describes memory attributes of
* a memory ranges within a user specified address range in a VM.
*
* The structure includes information such as atomic access policy,
* page attribute table (PAT) index, and preferred memory location.
* Userspace allocates an array of these structures and passes a pointer to the
* ioctl to retrieve attributes for each memory ranges
*
* @extensions: Pointer to the first extension struct, if any
* @start: Start address of the memory range
* @end: End address of the virtual memory range
*
*/
struct drm_xe_mem_range_attr {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
/** @start: start of the memory range */
__u64 start;
/** @end: end of the memory range */
__u64 end;
/** @preferred_mem_loc: preferred memory location */
struct {
/** @preferred_mem_loc.devmem_fd: fd for preferred loc */
__u32 devmem_fd;
/** @preferred_mem_loc.migration_policy: Page migration policy */
__u32 migration_policy;
} preferred_mem_loc;
/** @atomic: Atomic access policy */
struct {
/** @atomic.val: atomic attribute */
__u32 val;
/** @atomic.reserved: Reserved */
__u32 reserved;
} atomic;
/** @pat_index: Page attribute table index */
struct {
/** @pat_index.val: PAT index */
__u32 val;
/** @pat_index.reserved: Reserved */
__u32 reserved;
} pat_index;
/** @reserved: Reserved */
__u64 reserved[2];
};
/**
* struct drm_xe_vm_query_mem_range_attr - Input of &DRM_IOCTL_XE_VM_QUERY_MEM_ATTRIBUTES
*
* This structure is used to query memory attributes of memory regions
* within a user specified address range in a VM. It provides detailed
* information about each memory range, including atomic access policy,
* page attribute table (PAT) index, and preferred memory location.
*
* Userspace first calls the ioctl with @num_mem_ranges = 0,
* @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL to retrieve
* the number of memory regions and size of each memory range attribute.
* Then, it allocates a buffer of that size and calls the ioctl again to fill
* the buffer with memory range attributes.
*
* If second call fails with -ENOSPC, it means memory ranges changed between
* first call and now, retry IOCTL again with @num_mem_ranges = 0,
* @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL followed by
* Second ioctl call.
*
* Example:
*
* .. code-block:: C
*
* struct drm_xe_vm_query_mem_range_attr query = {
* .vm_id = vm_id,
* .start = 0x100000,
* .range = 0x2000,
* };
*
* // First ioctl call to get num of mem regions and sizeof each attribute
* ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
*
* // Allocate buffer for the memory region attributes
* void *ptr = malloc(query.num_mem_ranges * query.sizeof_mem_range_attr);
* void *ptr_start = ptr;
*
* query.vector_of_mem_attr = (uintptr_t)ptr;
*
* // Second ioctl call to actually fill the memory attributes
* ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
*
* // Iterate over the returned memory region attributes
* for (unsigned int i = 0; i < query.num_mem_ranges; ++i) {
* struct drm_xe_mem_range_attr *attr = (struct drm_xe_mem_range_attr *)ptr;
*
* // Do something with attr
*
* // Move pointer by one entry
* ptr += query.sizeof_mem_range_attr;
* }
*
* free(ptr_start);
*/
struct drm_xe_vm_query_mem_range_attr {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
/** @vm_id: vm_id of the virtual range */
__u32 vm_id;
/** @num_mem_ranges: number of mem_ranges in range */
__u32 num_mem_ranges;
/** @start: start of the virtual address range */
__u64 start;
/** @range: size of the virtual address range */
__u64 range;
/** @sizeof_mem_range_attr: size of struct drm_xe_mem_range_attr */
__u64 sizeof_mem_range_attr;
/** @vector_of_mem_attr: userptr to array of struct drm_xe_mem_range_attr */
__u64 vector_of_mem_attr;
/** @reserved: Reserved */
__u64 reserved[2];
};
/**
* struct drm_xe_exec_queue_set_property - exec queue set property
*
* Sets execution queue properties dynamically.
* Currently only %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY
* property can be dynamically set.
*/
struct drm_xe_exec_queue_set_property {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
/** @exec_queue_id: Exec queue ID */
__u32 exec_queue_id;
/** @property: property to set */
__u32 property;
/** @value: property value */
__u64 value;
/** @reserved: Reserved */
__u64 reserved[2];
};
#if defined(__cplusplus)
}
#endif