amdgpu: UAPI for AMDGPU usermode queues

This patch adds UAPI interface changes for AMDGPU usermode
queues, semaphore and new AMDGPU GEM domain for doorbells.
Usermode queues allow a userspace process to create
and submit its graphics/compute/sdma work directly to the GPU.

v2:(Marek)
 - Add csa support for SDMA queue.
 - Rename UAPI objects and struct as per UAPI review. (Shashank)

v3:(Yogesh)
 - Rename UAPI timeline* objects as per UAPI review. (Arvind)

v4: (Marek)
 - Drop AMDGPU_USERQ_BO_WRITE as this should not be a global option
   of the IOCTL, It should be option per buffer. Hence adding separate
   array for read and write BO handles. (Arun)
 - Modify num_fences to __u16, flags changed to __u16 and placed
   the num_fences next to flags for optimal padding and size. (Arun)

 - Fix 32-bit holes issue in sturct drm_amdgpu_gem_va as per
   UAPI review (Arvind).

v5:(Marek/Pierre-Eric)
  - add more detail params description for signal and wait IOCTL calls.
  - Remove the unused structure fields in signal and wait structs.
  - Add separate array of read and write for BO handles. (Arun)

  - Removes the unused flags parameter from the usermode queue UAPI structure
  - Clarify comments on top of drm_amdgpu_userq_in
  - Clarify comment for queue_id (in)
  - Clarify comment for mqd
  - Clarify comment for compute MQD size
  - Clarify comment for queue_id (out)
  - Adds padding variables in userqueue in/out structures. (Shashank)

v6:(Pierre-Eric)
  - Modify the function parameter names and struct
    field names as per the review comments. (Arun)

v7:(Marek)
  - Modify the structure field name and comments. (Arun)

  - Rename vm_timeline_syncobj and add comment for
    vm_timeline_point.
  - Remove GDS buffer support from MQD. (Arvind)

v8:(Pierre-Eric)
  - Modify the function parameter names.

Cc: Koenig, Christian <christian.koenig@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Marek Olšák <marek.olsak@amd.com>
Acked-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Signed-off-by: Shashank Sharma <shashank.sharma@amd.com>
Signed-off-by: Arvind Yadav <arvind.yadav@amd.com>
Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
This commit is contained in:
Arvind Yadav 2024-08-13 19:37:37 +05:30 committed by Marek Olšák
parent 5d7d7c4318
commit 72d3140428

View file

@ -54,6 +54,9 @@ extern "C" {
#define DRM_AMDGPU_VM 0x13 #define DRM_AMDGPU_VM 0x13
#define DRM_AMDGPU_FENCE_TO_HANDLE 0x14 #define DRM_AMDGPU_FENCE_TO_HANDLE 0x14
#define DRM_AMDGPU_SCHED 0x15 #define DRM_AMDGPU_SCHED 0x15
#define DRM_AMDGPU_USERQ 0x16
#define DRM_AMDGPU_USERQ_SIGNAL 0x17
#define DRM_AMDGPU_USERQ_WAIT 0x18
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@ -71,6 +74,9 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm) #define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle) #define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched) #define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
#define DRM_IOCTL_AMDGPU_USERQ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ, union drm_amdgpu_userq)
#define DRM_IOCTL_AMDGPU_USERQ_SIGNAL DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct drm_amdgpu_userq_signal)
#define DRM_IOCTL_AMDGPU_USERQ_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct drm_amdgpu_userq_wait)
/** /**
* DOC: memory domains * DOC: memory domains
@ -317,6 +323,241 @@ union drm_amdgpu_ctx {
union drm_amdgpu_ctx_out out; union drm_amdgpu_ctx_out out;
}; };
/* user queue IOCTL operations */
#define AMDGPU_USERQ_OP_CREATE 1
#define AMDGPU_USERQ_OP_FREE 2
/*
* This structure is a container to pass input configuration
* info for all supported userqueue related operations.
* For operation AMDGPU_USERQ_OP_CREATE: user is expected
* to set all fields, excep the parameter 'queue_id'.
* For operation AMDGPU_USERQ_OP_FREE: the only input parameter expected
* to be set is 'queue_id', eveything else is ignored.
*/
struct drm_amdgpu_userq_in {
/** AMDGPU_USERQ_OP_* */
__u32 op;
/** Queue id passed for operation USERQ_OP_FREE */
__u32 queue_id;
/** the target GPU engine to execute workload (AMDGPU_HW_IP_*) */
__u32 ip_type;
/**
* @doorbell_handle: the handle of doorbell GEM object
* associated to this userqueue client.
*/
__u32 doorbell_handle;
/**
* @doorbell_offset: 32-bit offset of the doorbell in the doorbell bo.
* Kernel will generate absolute doorbell offset using doorbell_handle
* and doorbell_offset in the doorbell bo.
*/
__u32 doorbell_offset;
__u32 _pad;
/**
* @queue_va: Virtual address of the GPU memory which holds the queue
* object. The queue holds the workload packets.
*/
__u64 queue_va;
/**
* @queue_size: Size of the queue in bytes, this needs to be 256-byte
* aligned.
*/
__u64 queue_size;
/**
* @rptr_va : Virtual address of the GPU memory which holds the ring RPTR.
* This object must be at least 8 byte in size and aligned to 8-byte offset.
*/
__u64 rptr_va;
/**
* @wptr_va : Virtual address of the GPU memory which holds the ring WPTR.
* This object must be at least 8 byte in size and aligned to 8-byte offset.
*
* Queue, RPTR and WPTR can come from the same object, as long as the size
* and alignment related requirements are met.
*/
__u64 wptr_va;
/**
* @mqd: MQD (memory queue descriptor) is a set of parameters which allow
* the GPU to uniquely define and identify a usermode queue.
*
* MQD data can be of different size for different GPU IP/engine and
* their respective versions/revisions, so this points to a __u64 *
* which holds IP specific MQD of this usermode queue.
*/
__u64 mqd;
/**
* @size: size of MQD data in bytes, it must match the MQD structure
* size of the respective engine/revision defined in UAPI for ex, for
* gfx11 workloads, size = sizeof(drm_amdgpu_userq_mqd_gfx11).
*/
__u64 mqd_size;
};
/* The structure to carry output of userqueue ops */
struct drm_amdgpu_userq_out {
/**
* For operation AMDGPU_USERQ_OP_CREATE: This field contains a unique
* queue ID to represent the newly created userqueue in the system, otherwise
* it should be ignored.
*/
__u32 queue_id;
__u32 _pad;
};
union drm_amdgpu_userq {
struct drm_amdgpu_userq_in in;
struct drm_amdgpu_userq_out out;
};
/* GFX V11 IP specific MQD parameters */
struct drm_amdgpu_userq_mqd_gfx11 {
/**
* @shadow_va: Virtual address of the GPU memory to hold the shadow buffer.
* Use AMDGPU_INFO_IOCTL to find the exact size of the object.
*/
__u64 shadow_va;
/**
* @csa_va: Virtual address of the GPU memory to hold the CSA buffer.
* Use AMDGPU_INFO_IOCTL to find the exact size of the object.
*/
__u64 csa_va;
};
/* GFX V11 SDMA IP specific MQD parameters */
struct drm_amdgpu_userq_mqd_sdma_gfx11 {
/**
* @csa_va: Virtual address of the GPU memory to hold the CSA buffer.
* This must be a from a separate GPU object, and use AMDGPU_INFO IOCTL
* to get the size.
*/
__u64 csa_va;
};
/* GFX V11 Compute IP specific MQD parameters */
struct drm_amdgpu_userq_mqd_compute_gfx11 {
/**
* @eop_va: Virtual address of the GPU memory to hold the EOP buffer.
* This must be a from a separate GPU object, and use AMDGPU_INFO IOCTL
* to get the size.
*/
__u64 eop_va;
};
/* userq signal/wait ioctl */
struct drm_amdgpu_userq_signal {
/**
* @queue_id: Queue handle used by the userq fence creation function
* to retrieve the WPTR.
*/
__u32 queue_id;
__u32 pad;
/**
* @syncobj_handles: The list of syncobj handles submitted by the user queue
* job to be signaled.
*/
__u64 syncobj_handles;
/**
* @num_syncobj_handles: A count that represents the number of syncobj handles in
* @syncobj_handles.
*/
__u64 num_syncobj_handles;
/**
* @bo_read_handles: The list of BO handles that the submitted user queue job
* is using for read only. This will update BO fences in the kernel.
*/
__u64 bo_read_handles;
/**
* @bo_write_handles: The list of BO handles that the submitted user queue job
* is using for write only. This will update BO fences in the kernel.
*/
__u64 bo_write_handles;
/**
* @num_bo_read_handles: A count that represents the number of read BO handles in
* @bo_read_handles.
*/
__u32 num_bo_read_handles;
/**
* @num_bo_write_handles: A count that represents the number of write BO handles in
* @bo_write_handles.
*/
__u32 num_bo_write_handles;
};
struct drm_amdgpu_userq_fence_info {
/**
* @va: A gpu address allocated for each queue which stores the
* read pointer (RPTR) value.
*/
__u64 va;
/**
* @value: A 64 bit value represents the write pointer (WPTR) of the
* queue commands which compared with the RPTR value to signal the
* fences.
*/
__u64 value;
};
struct drm_amdgpu_userq_wait {
/**
* @syncobj_handles: The list of syncobj handles submitted by the user queue
* job to get the va/value pairs.
*/
__u64 syncobj_handles;
/**
* @syncobj_timeline_handles: The list of timeline syncobj handles submitted by
* the user queue job to get the va/value pairs at given @syncobj_timeline_points.
*/
__u64 syncobj_timeline_handles;
/**
* @syncobj_timeline_points: The list of timeline syncobj points submitted by the
* user queue job for the corresponding @syncobj_timeline_handles.
*/
__u64 syncobj_timeline_points;
/**
* @bo_read_handles: The list of read BO handles submitted by the user queue
* job to get the va/value pairs.
*/
__u64 bo_read_handles;
/**
* @bo_write_handles: The list of write BO handles submitted by the user queue
* job to get the va/value pairs.
*/
__u64 bo_write_handles;
/**
* @num_syncobj_timeline_handles: A count that represents the number of timeline
* syncobj handles in @syncobj_timeline_handles.
*/
__u16 num_syncobj_timeline_handles;
/**
* @num_fences: This field can be used both as input and output. As input it defines
* the maximum number of fences that can be returned and as output it will specify
* how many fences were actually returned from the ioctl.
*/
__u16 num_fences;
/**
* @num_syncobj_handles: A count that represents the number of syncobj handles in
* @syncobj_handles.
*/
__u32 num_syncobj_handles;
/**
* @num_bo_read_handles: A count that represents the number of read BO handles in
* @bo_read_handles.
*/
__u32 num_bo_read_handles;
/**
* @num_bo_write_handles: A count that represents the number of write BO handles in
* @bo_write_handles.
*/
__u32 num_bo_write_handles;
/**
* @out_fences: The field is a return value from the ioctl containing the list of
* address/value pairs to wait for.
*/
__u64 out_fences;
};
/* vm ioctl */ /* vm ioctl */
#define AMDGPU_VM_OP_RESERVE_VMID 1 #define AMDGPU_VM_OP_RESERVE_VMID 1
#define AMDGPU_VM_OP_UNRESERVE_VMID 2 #define AMDGPU_VM_OP_UNRESERVE_VMID 2
@ -590,6 +831,19 @@ struct drm_amdgpu_gem_va {
__u64 offset_in_bo; __u64 offset_in_bo;
/** Specify mapping size. Must be correctly aligned. */ /** Specify mapping size. Must be correctly aligned. */
__u64 map_size; __u64 map_size;
/**
* vm_timeline_point is a sequence number used to add new timeline point.
*/
__u64 vm_timeline_point;
/**
* The vm page table update fence is installed in given vm_timeline_syncobj_out
* at vm_timeline_point.
*/
__u32 vm_timeline_syncobj_out;
/** the number of syncobj handles in @input_fence_syncobj_handles */
__u32 num_syncobj_handles;
/** Array of sync object handle to wait for given input fences */
__u64 input_fence_syncobj_handles;
}; };
#define AMDGPU_HW_IP_GFX 0 #define AMDGPU_HW_IP_GFX 0