mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-22 15:40:11 +01:00
amd: move all uses of libdrm_amdgpu to ac_linux_drm
This is required to implement virtio native-context. In a virtualized environment, most of the functions provided by libdrm_amdgpu will be implemented using virtio. This allows to implement efficient virtualization, by forwarding the kernel API to the host, instead of the GL/VK calls. Similarly, the raw 'fd' or 'gem_handle' arguments are replaced by opaque types. This allows to encapsulate all the needed state in the handle, and use unmodified API between baremetal and virtualized contexts. Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21658>
This commit is contained in:
parent
874d34cf1b
commit
a565f2994f
15 changed files with 513 additions and 332 deletions
|
|
@ -239,17 +239,13 @@ typedef struct _drmDevice {
|
|||
enum amdgpu_sw_info {
|
||||
amdgpu_sw_info_address32_hi = 0,
|
||||
};
|
||||
typedef struct amdgpu_device *amdgpu_device_handle;
|
||||
typedef struct amdgpu_bo *amdgpu_bo_handle;
|
||||
struct amdgpu_bo_alloc_request {
|
||||
uint64_t alloc_size;
|
||||
uint64_t phys_alignment;
|
||||
uint32_t preferred_heap;
|
||||
uint64_t flags;
|
||||
};
|
||||
struct amdgpu_heap_info {
|
||||
uint64_t heap_size;
|
||||
};
|
||||
|
||||
struct amdgpu_gpu_info {
|
||||
uint32_t asic_id;
|
||||
uint32_t chip_external_rev;
|
||||
|
|
@ -284,25 +280,6 @@ static int drmGetDevice2(int fd, uint32_t flags, drmDevicePtr *device)
|
|||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static int amdgpu_bo_alloc(amdgpu_device_handle dev,
|
||||
struct amdgpu_bo_alloc_request *alloc_buffer,
|
||||
amdgpu_bo_handle *buf_handle)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static int amdgpu_query_sw_info(amdgpu_device_handle dev, enum amdgpu_sw_info info,
|
||||
void *value)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static const char *amdgpu_get_marketing_name(amdgpu_device_handle dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static intptr_t readlink(const char *path, char *buf, size_t bufsiz)
|
||||
{
|
||||
return -1;
|
||||
|
|
@ -346,11 +323,11 @@ static uint64_t fix_vram_size(uint64_t size)
|
|||
}
|
||||
|
||||
static bool
|
||||
has_tmz_support(amdgpu_device_handle dev, struct radeon_info *info, uint32_t ids_flags)
|
||||
has_tmz_support(ac_drm_device *dev, struct radeon_info *info, uint32_t ids_flags)
|
||||
{
|
||||
struct amdgpu_bo_alloc_request request = {0};
|
||||
int r;
|
||||
amdgpu_bo_handle bo;
|
||||
ac_drm_bo bo;
|
||||
|
||||
if (ids_flags & AMDGPU_IDS_FLAGS_TMZ)
|
||||
return true;
|
||||
|
|
@ -370,10 +347,10 @@ has_tmz_support(amdgpu_device_handle dev, struct radeon_info *info, uint32_t ids
|
|||
request.phys_alignment = 1024;
|
||||
request.preferred_heap = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
request.flags = AMDGPU_GEM_CREATE_ENCRYPTED;
|
||||
r = amdgpu_bo_alloc(dev, &request, &bo);
|
||||
r = ac_drm_bo_alloc(dev, &request, &bo);
|
||||
if (r)
|
||||
return false;
|
||||
amdgpu_bo_free(bo);
|
||||
ac_drm_bo_free(dev, bo);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -573,7 +550,7 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
|
|||
uint32_t vidip_fw_version = 0, vidip_fw_feature = 0;
|
||||
uint32_t num_instances = 0;
|
||||
int r, i, j;
|
||||
amdgpu_device_handle dev = dev_p;
|
||||
ac_drm_device *dev = dev_p;
|
||||
|
||||
STATIC_ASSERT(AMDGPU_HW_IP_GFX == AMD_IP_GFX);
|
||||
STATIC_ASSERT(AMDGPU_HW_IP_COMPUTE == AMD_IP_COMPUTE);
|
||||
|
|
@ -611,13 +588,13 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
|
|||
}
|
||||
|
||||
/* Query hardware and driver information. */
|
||||
r = ac_drm_query_gpu_info(fd, &amdinfo);
|
||||
r = ac_drm_query_gpu_info(dev, &amdinfo);
|
||||
if (r) {
|
||||
fprintf(stderr, "amdgpu: ac_drm_query_gpu_info failed.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
r = ac_drm_query_info(fd, AMDGPU_INFO_DEV_INFO, sizeof(device_info), &device_info);
|
||||
r = ac_drm_query_info(dev, AMDGPU_INFO_DEV_INFO, sizeof(device_info), &device_info);
|
||||
if (r) {
|
||||
fprintf(stderr, "amdgpu: ac_drm_query_info(dev_info) failed.\n");
|
||||
return false;
|
||||
|
|
@ -626,7 +603,7 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
|
|||
for (unsigned ip_type = 0; ip_type < AMD_NUM_IP_TYPES; ip_type++) {
|
||||
struct drm_amdgpu_info_hw_ip ip_info = {0};
|
||||
|
||||
r = ac_drm_query_hw_ip_info(fd, ip_type, 0, &ip_info);
|
||||
r = ac_drm_query_hw_ip_info(dev, ip_type, 0, &ip_info);
|
||||
if (r || !ip_info.available_rings)
|
||||
continue;
|
||||
|
||||
|
|
@ -655,7 +632,7 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
|
|||
info->ip[ip_type].num_queues = util_bitcount(ip_info.available_rings);
|
||||
|
||||
/* query ip count */
|
||||
r = ac_drm_query_hw_ip_count(fd, ip_type, &num_instances);
|
||||
r = ac_drm_query_hw_ip_count(dev, ip_type, &num_instances);
|
||||
if (!r)
|
||||
info->ip[ip_type].num_instances = num_instances;
|
||||
|
||||
|
|
@ -686,21 +663,21 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
|
|||
return false;
|
||||
}
|
||||
|
||||
r = ac_drm_query_firmware_version(fd, AMDGPU_INFO_FW_GFX_ME, 0, 0, &info->me_fw_version,
|
||||
r = ac_drm_query_firmware_version(dev, AMDGPU_INFO_FW_GFX_ME, 0, 0, &info->me_fw_version,
|
||||
&info->me_fw_feature);
|
||||
if (r) {
|
||||
fprintf(stderr, "amdgpu: ac_drm_query_firmware_version(me) failed.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
r = ac_drm_query_firmware_version(fd, AMDGPU_INFO_FW_GFX_MEC, 0, 0, &info->mec_fw_version,
|
||||
r = ac_drm_query_firmware_version(dev, AMDGPU_INFO_FW_GFX_MEC, 0, 0, &info->mec_fw_version,
|
||||
&info->mec_fw_feature);
|
||||
if (r) {
|
||||
fprintf(stderr, "amdgpu: ac_drm_query_firmware_version(mec) failed.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
r = ac_drm_query_firmware_version(fd, AMDGPU_INFO_FW_GFX_PFP, 0, 0, &info->pfp_fw_version,
|
||||
r = ac_drm_query_firmware_version(dev, AMDGPU_INFO_FW_GFX_PFP, 0, 0, &info->pfp_fw_version,
|
||||
&info->pfp_fw_feature);
|
||||
if (r) {
|
||||
fprintf(stderr, "amdgpu: ac_drm_query_firmware_version(pfp) failed.\n");
|
||||
|
|
@ -708,7 +685,7 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
|
|||
}
|
||||
|
||||
if (info->ip[AMD_IP_VCN_DEC].num_queues || info->ip[AMD_IP_VCN_UNIFIED].num_queues) {
|
||||
r = ac_drm_query_firmware_version(fd, AMDGPU_INFO_FW_VCN, 0, 0, &vidip_fw_version, &vidip_fw_feature);
|
||||
r = ac_drm_query_firmware_version(dev, AMDGPU_INFO_FW_VCN, 0, 0, &vidip_fw_version, &vidip_fw_feature);
|
||||
if (r) {
|
||||
fprintf(stderr, "amdgpu: ac_drm_query_firmware_version(vcn) failed.\n");
|
||||
return false;
|
||||
|
|
@ -719,7 +696,7 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
|
|||
}
|
||||
} else {
|
||||
if (info->ip[AMD_IP_VCE].num_queues) {
|
||||
r = ac_drm_query_firmware_version(fd, AMDGPU_INFO_FW_VCE, 0, 0, &vidip_fw_version, &vidip_fw_feature);
|
||||
r = ac_drm_query_firmware_version(dev, AMDGPU_INFO_FW_VCE, 0, 0, &vidip_fw_version, &vidip_fw_feature);
|
||||
if (r) {
|
||||
fprintf(stderr, "amdgpu: ac_drm_query_firmware_version(vce) failed.\n");
|
||||
return false;
|
||||
|
|
@ -728,7 +705,7 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
|
|||
}
|
||||
|
||||
if (info->ip[AMD_IP_UVD].num_queues) {
|
||||
r = ac_drm_query_firmware_version(fd, AMDGPU_INFO_FW_UVD, 0, 0, &vidip_fw_version, &vidip_fw_feature);
|
||||
r = ac_drm_query_firmware_version(dev, AMDGPU_INFO_FW_UVD, 0, 0, &vidip_fw_version, &vidip_fw_feature);
|
||||
if (r) {
|
||||
fprintf(stderr, "amdgpu: ac_drm_query_firmware_version(uvd) failed.\n");
|
||||
return false;
|
||||
|
|
@ -737,7 +714,7 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
|
|||
}
|
||||
}
|
||||
|
||||
r = amdgpu_query_sw_info(dev, amdgpu_sw_info_address32_hi, &info->address32_hi);
|
||||
r = ac_drm_query_sw_info(dev, amdgpu_sw_info_address32_hi, &info->address32_hi);
|
||||
if (r) {
|
||||
fprintf(stderr, "amdgpu: amdgpu_query_sw_info(address32_hi) failed.\n");
|
||||
return false;
|
||||
|
|
@ -745,7 +722,7 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
|
|||
|
||||
struct drm_amdgpu_memory_info meminfo = {0};
|
||||
|
||||
r = ac_drm_query_info(fd, AMDGPU_INFO_MEMORY, sizeof(meminfo), &meminfo);
|
||||
r = ac_drm_query_info(dev, AMDGPU_INFO_MEMORY, sizeof(meminfo), &meminfo);
|
||||
if (r) {
|
||||
fprintf(stderr, "amdgpu: ac_drm_query_info(memory) failed.\n");
|
||||
return false;
|
||||
|
|
@ -757,9 +734,9 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
|
|||
info->vram_vis_size_kb = DIV_ROUND_UP(meminfo.cpu_accessible_vram.total_heap_size, 1024);
|
||||
|
||||
if (info->drm_minor >= 41) {
|
||||
ac_drm_query_video_caps_info(fd, AMDGPU_INFO_VIDEO_CAPS_DECODE,
|
||||
ac_drm_query_video_caps_info(dev, AMDGPU_INFO_VIDEO_CAPS_DECODE,
|
||||
sizeof(info->dec_caps), &(info->dec_caps));
|
||||
ac_drm_query_video_caps_info(fd, AMDGPU_INFO_VIDEO_CAPS_ENCODE,
|
||||
ac_drm_query_video_caps_info(dev, AMDGPU_INFO_VIDEO_CAPS_ENCODE,
|
||||
sizeof(info->enc_caps), &(info->enc_caps));
|
||||
}
|
||||
|
||||
|
|
@ -894,7 +871,7 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
|
|||
info->family_id = device_info.family;
|
||||
info->chip_external_rev = device_info.external_rev;
|
||||
info->chip_rev = device_info.chip_rev;
|
||||
info->marketing_name = amdgpu_get_marketing_name(dev);
|
||||
info->marketing_name = ac_drm_get_marketing_name(dev);
|
||||
info->is_pro_graphics = info->marketing_name && (strstr(info->marketing_name, "Pro") ||
|
||||
strstr(info->marketing_name, "PRO") ||
|
||||
strstr(info->marketing_name, "Frontier"));
|
||||
|
|
@ -1615,7 +1592,7 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
|
|||
* It can be calculated according to the kernel source code as:
|
||||
* (ring->max_dw - emit_frame_size) / emit_ib_size
|
||||
*/
|
||||
r = ac_drm_query_info(fd, AMDGPU_INFO_MAX_IBS,
|
||||
r = ac_drm_query_info(dev, AMDGPU_INFO_MAX_IBS,
|
||||
sizeof(info->max_submitted_ibs), info->max_submitted_ibs);
|
||||
if (r) {
|
||||
/* When the number of IBs can't be queried from the kernel, we choose a
|
||||
|
|
@ -1679,7 +1656,7 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
|
|||
if (info->gfx_level >= GFX11 && debug_get_bool_option("AMD_USERQ", false)) {
|
||||
struct drm_amdgpu_info_uq_fw_areas fw_info;
|
||||
|
||||
r = ac_drm_query_uq_fw_area_info(fd, AMDGPU_HW_IP_GFX, 0, &fw_info);
|
||||
r = ac_drm_query_uq_fw_area_info(dev, AMDGPU_HW_IP_GFX, 0, &fw_info);
|
||||
if (r) {
|
||||
fprintf(stderr, "amdgpu: amdgpu_query_uq_fw_area_info() failed.\n");
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -11,7 +11,47 @@
|
|||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
int ac_drm_bo_set_metadata(int device_fd, uint32_t bo_handle, struct amdgpu_bo_metadata *info)
|
||||
struct ac_drm_device {
|
||||
union {
|
||||
amdgpu_device_handle adev;
|
||||
};
|
||||
int fd;
|
||||
};
|
||||
|
||||
int ac_drm_device_initialize(int fd, uint32_t *major_version, uint32_t *minor_version,
|
||||
ac_drm_device **dev)
|
||||
{
|
||||
int r;
|
||||
|
||||
*dev = malloc(sizeof(ac_drm_device));
|
||||
if (!(*dev))
|
||||
return -1;
|
||||
|
||||
amdgpu_device_handle adev;
|
||||
r = amdgpu_device_initialize(fd, major_version, minor_version,
|
||||
&adev);
|
||||
if (r == 0) {
|
||||
(*dev)->adev = adev;
|
||||
(*dev)->fd = amdgpu_device_get_fd(adev);
|
||||
} else {
|
||||
free(*dev);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
void ac_drm_device_deinitialize(ac_drm_device *dev)
|
||||
{
|
||||
amdgpu_device_deinitialize(dev->adev);
|
||||
free(dev);
|
||||
}
|
||||
|
||||
int ac_drm_device_get_fd(ac_drm_device *device_handle)
|
||||
{
|
||||
return device_handle->fd;
|
||||
}
|
||||
|
||||
int ac_drm_bo_set_metadata(ac_drm_device *dev, uint32_t bo_handle, struct amdgpu_bo_metadata *info)
|
||||
{
|
||||
struct drm_amdgpu_gem_metadata args = {};
|
||||
|
||||
|
|
@ -28,10 +68,11 @@ int ac_drm_bo_set_metadata(int device_fd, uint32_t bo_handle, struct amdgpu_bo_m
|
|||
memcpy(args.data.data, info->umd_metadata, info->size_metadata);
|
||||
}
|
||||
|
||||
return drm_ioctl_write_read(device_fd, DRM_AMDGPU_GEM_METADATA, &args, sizeof(args));
|
||||
return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_GEM_METADATA, &args, sizeof(args));
|
||||
|
||||
}
|
||||
|
||||
int ac_drm_bo_query_info(int device_fd, uint32_t bo_handle, struct amdgpu_bo_info *info)
|
||||
int ac_drm_bo_query_info(ac_drm_device *dev, uint32_t bo_handle, struct amdgpu_bo_info *info)
|
||||
{
|
||||
struct drm_amdgpu_gem_metadata metadata = {};
|
||||
struct drm_amdgpu_gem_create_in bo_info = {};
|
||||
|
|
@ -46,7 +87,7 @@ int ac_drm_bo_query_info(int device_fd, uint32_t bo_handle, struct amdgpu_bo_inf
|
|||
metadata.handle = bo_handle;
|
||||
metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
|
||||
|
||||
r = drm_ioctl_write_read(device_fd, DRM_AMDGPU_GEM_METADATA, &metadata, sizeof(metadata));
|
||||
r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_GEM_METADATA, &metadata, sizeof(metadata));
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
@ -58,7 +99,7 @@ int ac_drm_bo_query_info(int device_fd, uint32_t bo_handle, struct amdgpu_bo_inf
|
|||
gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
|
||||
gem_op.value = (uintptr_t)&bo_info;
|
||||
|
||||
r = drm_ioctl_write_read(device_fd, DRM_AMDGPU_GEM_OP, &gem_op, sizeof(gem_op));
|
||||
r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_GEM_OP, &gem_op, sizeof(gem_op));
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
@ -99,16 +140,17 @@ static uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout)
|
|||
return timeout;
|
||||
}
|
||||
|
||||
int ac_drm_bo_wait_for_idle(int device_fd, uint32_t bo_handle, uint64_t timeout_ns, bool *busy)
|
||||
int ac_drm_bo_wait_for_idle(ac_drm_device *dev, ac_drm_bo bo, uint64_t timeout_ns, bool *busy)
|
||||
{
|
||||
union drm_amdgpu_gem_wait_idle args;
|
||||
int r;
|
||||
union drm_amdgpu_gem_wait_idle args;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.in.handle = bo_handle;
|
||||
args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
|
||||
|
||||
r = drm_ioctl_write_read(device_fd, DRM_AMDGPU_GEM_WAIT_IDLE, &args, sizeof(args));
|
||||
ac_drm_bo_export(dev, bo, amdgpu_bo_handle_type_kms,
|
||||
&args.in.handle);
|
||||
r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE, &args, sizeof(args));
|
||||
|
||||
if (r == 0) {
|
||||
*busy = args.out.status;
|
||||
|
|
@ -119,17 +161,17 @@ int ac_drm_bo_wait_for_idle(int device_fd, uint32_t bo_handle, uint64_t timeout_
|
|||
}
|
||||
}
|
||||
|
||||
int ac_drm_bo_va_op(int device_fd, uint32_t bo_handle, uint64_t offset, uint64_t size,
|
||||
int ac_drm_bo_va_op(ac_drm_device *dev, uint32_t bo_handle, uint64_t offset, uint64_t size,
|
||||
uint64_t addr, uint64_t flags, uint32_t ops)
|
||||
{
|
||||
size = ALIGN(size, getpagesize());
|
||||
|
||||
return ac_drm_bo_va_op_raw(
|
||||
device_fd, bo_handle, offset, size, addr,
|
||||
dev, bo_handle, offset, size, addr,
|
||||
AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE, ops);
|
||||
}
|
||||
|
||||
int ac_drm_bo_va_op_raw(int device_fd, uint32_t bo_handle, uint64_t offset, uint64_t size,
|
||||
int ac_drm_bo_va_op_raw(ac_drm_device *dev, uint32_t bo_handle, uint64_t offset, uint64_t size,
|
||||
uint64_t addr, uint64_t flags, uint32_t ops)
|
||||
{
|
||||
struct drm_amdgpu_gem_va va;
|
||||
|
|
@ -147,12 +189,12 @@ int ac_drm_bo_va_op_raw(int device_fd, uint32_t bo_handle, uint64_t offset, uint
|
|||
va.offset_in_bo = offset;
|
||||
va.map_size = size;
|
||||
|
||||
r = drm_ioctl_write_read(device_fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
|
||||
r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int ac_drm_bo_va_op_raw2(int device_fd, uint32_t bo_handle, uint64_t offset, uint64_t size,
|
||||
int ac_drm_bo_va_op_raw2(ac_drm_device *dev, uint32_t bo_handle, uint64_t offset, uint64_t size,
|
||||
uint64_t addr, uint64_t flags, uint32_t ops,
|
||||
uint32_t vm_timeline_syncobj_out, uint64_t vm_timeline_point,
|
||||
uint64_t input_fence_syncobj_handles, uint32_t num_syncobj_handles)
|
||||
|
|
@ -176,15 +218,15 @@ int ac_drm_bo_va_op_raw2(int device_fd, uint32_t bo_handle, uint64_t offset, uin
|
|||
va.input_fence_syncobj_handles = input_fence_syncobj_handles;
|
||||
va.num_syncobj_handles = num_syncobj_handles;
|
||||
|
||||
r = drm_ioctl_write_read(device_fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
|
||||
r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int ac_drm_cs_ctx_create2(int device_fd, uint32_t priority, uint32_t *ctx_handle)
|
||||
int ac_drm_cs_ctx_create2(ac_drm_device *dev, uint32_t priority, uint32_t *ctx_id)
|
||||
{
|
||||
union drm_amdgpu_ctx args;
|
||||
int r;
|
||||
union drm_amdgpu_ctx args;
|
||||
char *override_priority;
|
||||
|
||||
override_priority = getenv("AMD_PRIORITY");
|
||||
|
|
@ -202,56 +244,58 @@ int ac_drm_cs_ctx_create2(int device_fd, uint32_t priority, uint32_t *ctx_handle
|
|||
args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
|
||||
args.in.priority = priority;
|
||||
|
||||
r = drm_ioctl_write_read(device_fd, DRM_AMDGPU_CTX, &args, sizeof(args));
|
||||
r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
|
||||
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
*ctx_handle = args.out.alloc.ctx_id;
|
||||
*ctx_id = args.out.alloc.ctx_id;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ac_drm_cs_ctx_free(int device_fd, uint32_t ctx_handle)
|
||||
int ac_drm_cs_ctx_free(ac_drm_device *dev, uint32_t ctx_id)
|
||||
{
|
||||
union drm_amdgpu_ctx args;
|
||||
|
||||
/* now deal with kernel side */
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.in.op = AMDGPU_CTX_OP_FREE_CTX;
|
||||
args.in.ctx_id = ctx_handle;
|
||||
return drm_ioctl_write_read(device_fd, DRM_AMDGPU_CTX, &args, sizeof(args));
|
||||
args.in.ctx_id = ctx_id;
|
||||
return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
|
||||
}
|
||||
|
||||
int ac_drm_cs_ctx_stable_pstate(int device_fd, uint32_t ctx_handle, uint32_t op, uint32_t flags,
|
||||
int ac_drm_cs_ctx_stable_pstate(ac_drm_device *dev, uint32_t ctx_id, uint32_t op, uint32_t flags,
|
||||
uint32_t *out_flags)
|
||||
{
|
||||
union drm_amdgpu_ctx args;
|
||||
int r;
|
||||
|
||||
if (!ctx_handle)
|
||||
if (!ctx_id)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.in.op = op;
|
||||
args.in.ctx_id = ctx_handle;
|
||||
args.in.ctx_id = ctx_id;
|
||||
args.in.flags = flags;
|
||||
r = drm_ioctl_write_read(device_fd, DRM_AMDGPU_CTX, &args, sizeof(args));
|
||||
r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
|
||||
if (!r && out_flags)
|
||||
*out_flags = args.out.pstate.flags;
|
||||
return r;
|
||||
}
|
||||
|
||||
int ac_drm_cs_query_reset_state2(int device_fd, uint32_t ctx_handle, uint64_t *flags)
|
||||
int ac_drm_cs_query_reset_state2(ac_drm_device *dev, uint32_t ctx_id, uint64_t *flags)
|
||||
{
|
||||
union drm_amdgpu_ctx args;
|
||||
int r;
|
||||
|
||||
if (!ctx_handle)
|
||||
if (!ctx_id)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.in.op = AMDGPU_CTX_OP_QUERY_STATE2;
|
||||
args.in.ctx_id = ctx_handle;
|
||||
r = drm_ioctl_write_read(device_fd, DRM_AMDGPU_CTX, &args, sizeof(args));
|
||||
args.in.ctx_id = ctx_id;
|
||||
r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
|
||||
if (!r)
|
||||
*flags = args.out.state.flags;
|
||||
return r;
|
||||
|
|
@ -284,7 +328,7 @@ static int amdgpu_ioctl_wait_cs(int device_fd, uint32_t ctx_handle, unsigned ip,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int ac_drm_cs_query_fence_status(int device_fd, uint32_t ctx_handle, uint32_t ip_type,
|
||||
int ac_drm_cs_query_fence_status(ac_drm_device *dev, uint32_t ctx_id, uint32_t ip_type,
|
||||
uint32_t ip_instance, uint32_t ring, uint64_t fence_seq_no,
|
||||
uint64_t timeout_ns, uint64_t flags, uint32_t *expired)
|
||||
{
|
||||
|
|
@ -298,7 +342,7 @@ int ac_drm_cs_query_fence_status(int device_fd, uint32_t ctx_handle, uint32_t ip
|
|||
|
||||
*expired = false;
|
||||
|
||||
r = amdgpu_ioctl_wait_cs(device_fd, ctx_handle, ip_type, ip_instance, ring, fence_seq_no,
|
||||
r = amdgpu_ioctl_wait_cs(dev->fd, ctx_id, ip_type, ip_instance, ring, fence_seq_no,
|
||||
timeout_ns, flags, &busy);
|
||||
|
||||
if (!r && !busy)
|
||||
|
|
@ -377,7 +421,15 @@ int ac_drm_cs_syncobj_transfer(int device_fd, uint32_t dst_handle, uint64_t dst_
|
|||
return drmSyncobjTransfer(device_fd, dst_handle, dst_point, src_handle, src_point, flags);
|
||||
}
|
||||
|
||||
int ac_drm_cs_submit_raw2(int device_fd, uint32_t ctx_handle, uint32_t bo_list_handle,
|
||||
int ac_drm_cs_syncobj_timeline_wait(int device_fd, uint32_t *handles, uint64_t *points,
|
||||
unsigned num_handles, int64_t timeout_nsec, unsigned flags,
|
||||
uint32_t *first_signaled)
|
||||
{
|
||||
return drmSyncobjTimelineWait(device_fd, handles, points, num_handles, timeout_nsec, flags,
|
||||
first_signaled);
|
||||
}
|
||||
|
||||
int ac_drm_cs_submit_raw2(ac_drm_device *dev, uint32_t ctx_id, uint32_t bo_list_handle,
|
||||
int num_chunks, struct drm_amdgpu_cs_chunk *chunks, uint64_t *seq_no)
|
||||
{
|
||||
union drm_amdgpu_cs cs;
|
||||
|
|
@ -389,10 +441,10 @@ int ac_drm_cs_submit_raw2(int device_fd, uint32_t ctx_handle, uint32_t bo_list_h
|
|||
for (i = 0; i < num_chunks; i++)
|
||||
chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
|
||||
cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
|
||||
cs.in.ctx_id = ctx_handle;
|
||||
cs.in.ctx_id = ctx_id;
|
||||
cs.in.bo_list_handle = bo_list_handle;
|
||||
cs.in.num_chunks = num_chunks;
|
||||
r = drm_ioctl_write_read(device_fd, DRM_AMDGPU_CS, &cs, sizeof(cs));
|
||||
r = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_CS, &cs, sizeof(cs));
|
||||
if (!r && seq_no)
|
||||
*seq_no = cs.out.handle;
|
||||
return r;
|
||||
|
|
@ -405,7 +457,7 @@ void ac_drm_cs_chunk_fence_info_to_data(uint32_t bo_handle, uint64_t offset,
|
|||
data->fence_data.offset = offset * sizeof(uint64_t);
|
||||
}
|
||||
|
||||
int ac_drm_query_info(int device_fd, unsigned info_id, unsigned size, void *value)
|
||||
int ac_drm_query_info(ac_drm_device *dev, unsigned info_id, unsigned size, void *value)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
|
||||
|
|
@ -414,10 +466,10 @@ int ac_drm_query_info(int device_fd, unsigned info_id, unsigned size, void *valu
|
|||
request.return_size = size;
|
||||
request.query = info_id;
|
||||
|
||||
return drm_ioctl_write(device_fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
|
||||
return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
|
||||
}
|
||||
|
||||
int ac_drm_read_mm_registers(int device_fd, unsigned dword_offset, unsigned count,
|
||||
int ac_drm_read_mm_registers(ac_drm_device *dev, unsigned dword_offset, unsigned count,
|
||||
uint32_t instance, uint32_t flags, uint32_t *values)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
|
|
@ -431,10 +483,10 @@ int ac_drm_read_mm_registers(int device_fd, unsigned dword_offset, unsigned coun
|
|||
request.read_mmr_reg.instance = instance;
|
||||
request.read_mmr_reg.flags = flags;
|
||||
|
||||
return drm_ioctl_write(device_fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
|
||||
return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
|
||||
}
|
||||
|
||||
int ac_drm_query_hw_ip_count(int device_fd, unsigned type, uint32_t *count)
|
||||
int ac_drm_query_hw_ip_count(ac_drm_device *dev, unsigned type, uint32_t *count)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
|
||||
|
|
@ -444,10 +496,10 @@ int ac_drm_query_hw_ip_count(int device_fd, unsigned type, uint32_t *count)
|
|||
request.query = AMDGPU_INFO_HW_IP_COUNT;
|
||||
request.query_hw_ip.type = type;
|
||||
|
||||
return drm_ioctl_write(device_fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
|
||||
return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
|
||||
}
|
||||
|
||||
int ac_drm_query_hw_ip_info(int device_fd, unsigned type, unsigned ip_instance,
|
||||
int ac_drm_query_hw_ip_info(ac_drm_device *dev, unsigned type, unsigned ip_instance,
|
||||
struct drm_amdgpu_info_hw_ip *info)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
|
|
@ -459,10 +511,10 @@ int ac_drm_query_hw_ip_info(int device_fd, unsigned type, unsigned ip_instance,
|
|||
request.query_hw_ip.type = type;
|
||||
request.query_hw_ip.ip_instance = ip_instance;
|
||||
|
||||
return drm_ioctl_write(device_fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
|
||||
return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
|
||||
}
|
||||
|
||||
int ac_drm_query_firmware_version(int device_fd, unsigned fw_type, unsigned ip_instance,
|
||||
int ac_drm_query_firmware_version(ac_drm_device *dev, unsigned fw_type, unsigned ip_instance,
|
||||
unsigned index, uint32_t *version, uint32_t *feature)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
|
|
@ -477,7 +529,7 @@ int ac_drm_query_firmware_version(int device_fd, unsigned fw_type, unsigned ip_i
|
|||
request.query_fw.ip_instance = ip_instance;
|
||||
request.query_fw.index = index;
|
||||
|
||||
r = drm_ioctl_write(device_fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
|
||||
r = drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
@ -486,7 +538,7 @@ int ac_drm_query_firmware_version(int device_fd, unsigned fw_type, unsigned ip_i
|
|||
return 0;
|
||||
}
|
||||
|
||||
int ac_drm_query_uq_fw_area_info(int device_fd, unsigned type, unsigned ip_instance,
|
||||
int ac_drm_query_uq_fw_area_info(ac_drm_device *dev, unsigned type, unsigned ip_instance,
|
||||
struct drm_amdgpu_info_uq_fw_areas *info)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
|
|
@ -498,15 +550,15 @@ int ac_drm_query_uq_fw_area_info(int device_fd, unsigned type, unsigned ip_insta
|
|||
request.query_hw_ip.type = type;
|
||||
request.query_hw_ip.ip_instance = ip_instance;
|
||||
|
||||
return drm_ioctl_write(device_fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
|
||||
return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
|
||||
}
|
||||
|
||||
int ac_drm_query_gpu_info(int device_fd, struct amdgpu_gpu_info *info)
|
||||
int ac_drm_query_gpu_info(ac_drm_device *dev, struct amdgpu_gpu_info *info)
|
||||
{
|
||||
struct drm_amdgpu_info_device dev_info = {0};
|
||||
int r, i;
|
||||
|
||||
r = ac_drm_query_info(device_fd, AMDGPU_INFO_DEV_INFO, sizeof(dev_info), &dev_info);
|
||||
r = ac_drm_query_info(dev, AMDGPU_INFO_DEV_INFO, sizeof(dev_info), &dev_info);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
@ -535,19 +587,19 @@ int ac_drm_query_gpu_info(int device_fd, struct amdgpu_gpu_info *info)
|
|||
unsigned instance = (i << AMDGPU_INFO_MMR_SE_INDEX_SHIFT) |
|
||||
(AMDGPU_INFO_MMR_SH_INDEX_MASK << AMDGPU_INFO_MMR_SH_INDEX_SHIFT);
|
||||
|
||||
r = ac_drm_read_mm_registers(device_fd, 0x263d, 1, instance, 0, &info->backend_disable[i]);
|
||||
r = ac_drm_read_mm_registers(dev, 0x263d, 1, instance, 0, &info->backend_disable[i]);
|
||||
if (r)
|
||||
return r;
|
||||
/* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
|
||||
info->backend_disable[i] = (info->backend_disable[i] >> 16) & 0xff;
|
||||
|
||||
r =
|
||||
ac_drm_read_mm_registers(device_fd, 0xa0d4, 1, instance, 0, &info->pa_sc_raster_cfg[i]);
|
||||
ac_drm_read_mm_registers(dev, 0xa0d4, 1, instance, 0, &info->pa_sc_raster_cfg[i]);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (info->family_id >= AMDGPU_FAMILY_CI) {
|
||||
r = ac_drm_read_mm_registers(device_fd, 0xa0d5, 1, instance, 0,
|
||||
r = ac_drm_read_mm_registers(dev, 0xa0d5, 1, instance, 0,
|
||||
&info->pa_sc_raster_cfg1[i]);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
@ -555,23 +607,23 @@ int ac_drm_query_gpu_info(int device_fd, struct amdgpu_gpu_info *info)
|
|||
}
|
||||
}
|
||||
|
||||
r = ac_drm_read_mm_registers(device_fd, 0x263e, 1, 0xffffffff, 0, &info->gb_addr_cfg);
|
||||
r = ac_drm_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0, &info->gb_addr_cfg);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (info->family_id < AMDGPU_FAMILY_AI) {
|
||||
r = ac_drm_read_mm_registers(device_fd, 0x2644, 32, 0xffffffff, 0, info->gb_tile_mode);
|
||||
r = ac_drm_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0, info->gb_tile_mode);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (info->family_id >= AMDGPU_FAMILY_CI) {
|
||||
r = ac_drm_read_mm_registers(device_fd, 0x2664, 16, 0xffffffff, 0,
|
||||
r = ac_drm_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0,
|
||||
info->gb_macro_tile_mode);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = ac_drm_read_mm_registers(device_fd, 0x9d8, 1, 0xffffffff, 0, &info->mc_arb_ramcfg);
|
||||
r = ac_drm_read_mm_registers(dev, 0x9d8, 1, 0xffffffff, 0, &info->mc_arb_ramcfg);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
@ -582,13 +634,13 @@ int ac_drm_query_gpu_info(int device_fd, struct amdgpu_gpu_info *info)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int ac_drm_query_heap_info(int device_fd, uint32_t heap, uint32_t flags,
|
||||
int ac_drm_query_heap_info(ac_drm_device *dev, uint32_t heap, uint32_t flags,
|
||||
struct amdgpu_heap_info *info)
|
||||
{
|
||||
struct drm_amdgpu_info_vram_gtt vram_gtt_info = {};
|
||||
int r;
|
||||
|
||||
r = ac_drm_query_info(device_fd, AMDGPU_INFO_VRAM_GTT, sizeof(vram_gtt_info), &vram_gtt_info);
|
||||
r = ac_drm_query_info(dev, AMDGPU_INFO_VRAM_GTT, sizeof(vram_gtt_info), &vram_gtt_info);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
@ -604,10 +656,10 @@ int ac_drm_query_heap_info(int device_fd, uint32_t heap, uint32_t flags,
|
|||
info->max_allocation = vram_gtt_info.vram_cpu_accessible_size;
|
||||
|
||||
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
|
||||
r = ac_drm_query_info(device_fd, AMDGPU_INFO_VIS_VRAM_USAGE, sizeof(info->heap_usage),
|
||||
r = ac_drm_query_info(dev, AMDGPU_INFO_VIS_VRAM_USAGE, sizeof(info->heap_usage),
|
||||
&info->heap_usage);
|
||||
else
|
||||
r = ac_drm_query_info(device_fd, AMDGPU_INFO_VRAM_USAGE, sizeof(info->heap_usage),
|
||||
r = ac_drm_query_info(dev, AMDGPU_INFO_VRAM_USAGE, sizeof(info->heap_usage),
|
||||
&info->heap_usage);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
@ -616,7 +668,7 @@ int ac_drm_query_heap_info(int device_fd, uint32_t heap, uint32_t flags,
|
|||
info->heap_size = vram_gtt_info.gtt_size;
|
||||
info->max_allocation = vram_gtt_info.vram_cpu_accessible_size;
|
||||
|
||||
r = ac_drm_query_info(device_fd, AMDGPU_INFO_GTT_USAGE, sizeof(info->heap_usage),
|
||||
r = ac_drm_query_info(dev, AMDGPU_INFO_GTT_USAGE, sizeof(info->heap_usage),
|
||||
&info->heap_usage);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
@ -628,7 +680,7 @@ int ac_drm_query_heap_info(int device_fd, uint32_t heap, uint32_t flags,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int ac_drm_query_sensor_info(int device_fd, unsigned sensor_type, unsigned size, void *value)
|
||||
int ac_drm_query_sensor_info(ac_drm_device *dev, unsigned sensor_type, unsigned size, void *value)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
|
||||
|
|
@ -638,10 +690,10 @@ int ac_drm_query_sensor_info(int device_fd, unsigned sensor_type, unsigned size,
|
|||
request.query = AMDGPU_INFO_SENSOR;
|
||||
request.sensor_info.type = sensor_type;
|
||||
|
||||
return drm_ioctl_write(device_fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
|
||||
return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
|
||||
}
|
||||
|
||||
int ac_drm_query_video_caps_info(int device_fd, unsigned cap_type, unsigned size, void *value)
|
||||
int ac_drm_query_video_caps_info(ac_drm_device *dev, unsigned cap_type, unsigned size, void *value)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
|
||||
|
|
@ -651,30 +703,117 @@ int ac_drm_query_video_caps_info(int device_fd, unsigned cap_type, unsigned size
|
|||
request.query = AMDGPU_INFO_VIDEO_CAPS;
|
||||
request.sensor_info.type = cap_type;
|
||||
|
||||
return drm_ioctl_write(device_fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
|
||||
return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
|
||||
}
|
||||
|
||||
int ac_drm_vm_reserve_vmid(int device_fd, uint32_t flags)
|
||||
int ac_drm_query_gpuvm_fault_info(ac_drm_device *dev, unsigned size, void *value)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
|
||||
memset(&request, 0, sizeof(request));
|
||||
request.return_pointer = (uintptr_t)value;
|
||||
request.return_size = size;
|
||||
request.query = AMDGPU_INFO_GPUVM_FAULT;
|
||||
|
||||
return drm_ioctl_write(dev->fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
|
||||
}
|
||||
|
||||
int ac_drm_vm_reserve_vmid(ac_drm_device *dev, uint32_t flags)
|
||||
{
|
||||
union drm_amdgpu_vm vm;
|
||||
|
||||
vm.in.op = AMDGPU_VM_OP_RESERVE_VMID;
|
||||
vm.in.flags = flags;
|
||||
|
||||
return drm_ioctl_write_read(device_fd, DRM_AMDGPU_VM, &vm, sizeof(vm));
|
||||
return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_VM, &vm, sizeof(vm));
|
||||
}
|
||||
|
||||
int ac_drm_vm_unreserve_vmid(int device_fd, uint32_t flags)
|
||||
int ac_drm_vm_unreserve_vmid(ac_drm_device *dev, uint32_t flags)
|
||||
{
|
||||
union drm_amdgpu_vm vm;
|
||||
|
||||
vm.in.op = AMDGPU_VM_OP_UNRESERVE_VMID;
|
||||
vm.in.flags = flags;
|
||||
|
||||
return drm_ioctl_write_read(device_fd, DRM_AMDGPU_VM, &vm, sizeof(vm));
|
||||
return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_VM, &vm, sizeof(vm));
|
||||
}
|
||||
|
||||
int ac_drm_create_userqueue(int device_fd, uint32_t ip_type, uint32_t doorbell_handle,
|
||||
const char *ac_drm_get_marketing_name(ac_drm_device *dev)
|
||||
{
|
||||
return amdgpu_get_marketing_name(dev->adev);
|
||||
}
|
||||
|
||||
int ac_drm_query_sw_info(ac_drm_device *dev,
|
||||
enum amdgpu_sw_info info, void *value)
|
||||
{
|
||||
return amdgpu_query_sw_info(dev->adev, info, value);
|
||||
}
|
||||
|
||||
int ac_drm_bo_alloc(ac_drm_device *dev, struct amdgpu_bo_alloc_request *alloc_buffer,
|
||||
ac_drm_bo *bo)
|
||||
{
|
||||
return amdgpu_bo_alloc(dev->adev, alloc_buffer, &bo->abo);
|
||||
}
|
||||
|
||||
int ac_drm_bo_export(ac_drm_device *dev, ac_drm_bo bo,
|
||||
enum amdgpu_bo_handle_type type, uint32_t *shared_handle)
|
||||
{
|
||||
return amdgpu_bo_export(bo.abo, type, shared_handle);
|
||||
}
|
||||
|
||||
int ac_drm_bo_import(ac_drm_device *dev, enum amdgpu_bo_handle_type type,
|
||||
uint32_t shared_handle, struct ac_drm_bo_import_result *output)
|
||||
{
|
||||
int r;
|
||||
|
||||
struct amdgpu_bo_import_result result;
|
||||
r = amdgpu_bo_import(dev->adev, type, shared_handle, &result);
|
||||
if (r == 0) {
|
||||
output->bo.abo = result.buf_handle;
|
||||
output->alloc_size = result.alloc_size;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int ac_drm_create_bo_from_user_mem(ac_drm_device *dev, void *cpu,
|
||||
uint64_t size, ac_drm_bo *bo)
|
||||
{
|
||||
return amdgpu_create_bo_from_user_mem(dev->adev, cpu, size, &bo->abo);
|
||||
}
|
||||
|
||||
int ac_drm_bo_free(ac_drm_device *dev, ac_drm_bo bo)
|
||||
{
|
||||
return amdgpu_bo_free(bo.abo);
|
||||
}
|
||||
|
||||
int ac_drm_bo_cpu_map(ac_drm_device *dev, ac_drm_bo bo,
|
||||
void **cpu)
|
||||
{
|
||||
return amdgpu_bo_cpu_map(bo.abo, cpu);
|
||||
}
|
||||
|
||||
int ac_drm_bo_cpu_unmap(ac_drm_device *dev, ac_drm_bo bo)
|
||||
{
|
||||
return amdgpu_bo_cpu_unmap(bo.abo);
|
||||
}
|
||||
|
||||
int ac_drm_va_range_alloc(ac_drm_device *dev, enum amdgpu_gpu_va_range va_range_type,
|
||||
uint64_t size, uint64_t va_base_alignment, uint64_t va_base_required,
|
||||
uint64_t *va_base_allocated, amdgpu_va_handle *va_range_handle,
|
||||
uint64_t flags)
|
||||
{
|
||||
return amdgpu_va_range_alloc(dev->adev, va_range_type, size, va_base_alignment,
|
||||
va_base_required, va_base_allocated,
|
||||
va_range_handle, flags);
|
||||
}
|
||||
|
||||
int ac_drm_va_range_free(amdgpu_va_handle va_range_handle)
|
||||
{
|
||||
return amdgpu_va_range_free(va_range_handle);
|
||||
}
|
||||
|
||||
int ac_drm_create_userqueue(ac_drm_device *dev, uint32_t ip_type, uint32_t doorbell_handle,
|
||||
uint32_t doorbell_offset, uint64_t queue_va, uint64_t queue_size,
|
||||
uint64_t wptr_va, uint64_t rptr_va, void *mqd_in, uint32_t *queue_id)
|
||||
{
|
||||
|
|
@ -712,14 +851,14 @@ int ac_drm_create_userqueue(int device_fd, uint32_t ip_type, uint32_t doorbell_h
|
|||
userq.in.mqd = (uintptr_t)mqd_in;
|
||||
userq.in.mqd_size = mqd_size;
|
||||
|
||||
ret = drm_ioctl_write_read(device_fd, DRM_AMDGPU_USERQ,
|
||||
ret = drm_ioctl_write_read(dev->fd, DRM_AMDGPU_USERQ,
|
||||
&userq, sizeof(userq));
|
||||
*queue_id = userq.out.queue_id;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ac_drm_free_userqueue(int device_fd, uint32_t queue_id)
|
||||
int ac_drm_free_userqueue(ac_drm_device *dev, uint32_t queue_id)
|
||||
{
|
||||
union drm_amdgpu_userq userq;
|
||||
|
||||
|
|
@ -727,17 +866,17 @@ int ac_drm_free_userqueue(int device_fd, uint32_t queue_id)
|
|||
userq.in.op = AMDGPU_USERQ_OP_FREE;
|
||||
userq.in.queue_id = queue_id;
|
||||
|
||||
return drm_ioctl_write_read(device_fd, DRM_AMDGPU_USERQ, &userq, sizeof(userq));
|
||||
return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_USERQ, &userq, sizeof(userq));
|
||||
}
|
||||
|
||||
int ac_drm_userq_signal(int device_fd, struct drm_amdgpu_userq_signal *signal_data)
|
||||
int ac_drm_userq_signal(ac_drm_device *dev, struct drm_amdgpu_userq_signal *signal_data)
|
||||
{
|
||||
return drm_ioctl_write_read(device_fd, DRM_AMDGPU_USERQ_SIGNAL,
|
||||
return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_USERQ_SIGNAL,
|
||||
signal_data, sizeof(struct drm_amdgpu_userq_signal));
|
||||
}
|
||||
|
||||
int ac_drm_userq_wait(int device_fd, struct drm_amdgpu_userq_wait *wait_data)
|
||||
int ac_drm_userq_wait(ac_drm_device *dev, struct drm_amdgpu_userq_wait *wait_data)
|
||||
{
|
||||
return drm_ioctl_write_read(device_fd, DRM_AMDGPU_USERQ_WAIT, wait_data,
|
||||
return drm_ioctl_write_read(dev->fd, DRM_AMDGPU_USERQ_WAIT, wait_data,
|
||||
sizeof(struct drm_amdgpu_userq_wait));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,32 +28,58 @@ extern "C" {
|
|||
#define TAILV \
|
||||
{ \
|
||||
}
|
||||
#define TAILPTR \
|
||||
{ \
|
||||
return NULL; \
|
||||
}
|
||||
typedef void* amdgpu_va_handle;
|
||||
#else
|
||||
#define PROC
|
||||
#define TAIL
|
||||
#define TAILV
|
||||
#define TAILPTR
|
||||
#endif
|
||||
|
||||
PROC int ac_drm_bo_set_metadata(int device_fd, uint32_t bo_handle,
|
||||
struct ac_drm_device;
|
||||
typedef struct ac_drm_device ac_drm_device;
|
||||
|
||||
typedef union ac_drm_bo {
|
||||
#ifdef _WIN32
|
||||
void *abo;
|
||||
#else
|
||||
amdgpu_bo_handle abo;
|
||||
#endif
|
||||
} ac_drm_bo;
|
||||
|
||||
struct ac_drm_bo_import_result {
|
||||
ac_drm_bo bo;
|
||||
uint64_t alloc_size;
|
||||
};
|
||||
|
||||
PROC int ac_drm_device_initialize(int fd, uint32_t *major_version, uint32_t *minor_version,
|
||||
ac_drm_device **dev) TAIL;
|
||||
PROC void ac_drm_device_deinitialize(ac_drm_device *dev) TAILV;
|
||||
PROC int ac_drm_device_get_fd(ac_drm_device *dev) TAIL;
|
||||
PROC int ac_drm_bo_set_metadata(ac_drm_device *dev, uint32_t bo_handle,
|
||||
struct amdgpu_bo_metadata *info) TAIL;
|
||||
PROC int ac_drm_bo_query_info(int device_fd, uint32_t bo_handle, struct amdgpu_bo_info *info) TAIL;
|
||||
PROC int ac_drm_bo_wait_for_idle(int device_fd, uint32_t bo_handle, uint64_t timeout_ns,
|
||||
PROC int ac_drm_bo_query_info(ac_drm_device *dev, uint32_t bo_handle, struct amdgpu_bo_info *info) TAIL;
|
||||
PROC int ac_drm_bo_wait_for_idle(ac_drm_device *dev, ac_drm_bo bo, uint64_t timeout_ns,
|
||||
bool *busy) TAIL;
|
||||
PROC int ac_drm_bo_va_op(int device_fd, uint32_t bo_handle, uint64_t offset, uint64_t size,
|
||||
PROC int ac_drm_bo_va_op(ac_drm_device *dev, uint32_t bo_handle, uint64_t offset, uint64_t size,
|
||||
uint64_t addr, uint64_t flags, uint32_t ops) TAIL;
|
||||
PROC int ac_drm_bo_va_op_raw(int device_fd, uint32_t bo_handle, uint64_t offset, uint64_t size,
|
||||
PROC int ac_drm_bo_va_op_raw(ac_drm_device *dev, uint32_t bo_handle, uint64_t offset, uint64_t size,
|
||||
uint64_t addr, uint64_t flags, uint32_t ops) TAIL;
|
||||
PROC int ac_drm_bo_va_op_raw2(int device_fd, uint32_t bo_handle, uint64_t offset, uint64_t size,
|
||||
PROC int ac_drm_bo_va_op_raw2(ac_drm_device *dev, uint32_t bo_handle, uint64_t offset, uint64_t size,
|
||||
uint64_t addr, uint64_t flags, uint32_t ops,
|
||||
uint32_t vm_timeline_syncobj_out, uint64_t vm_timeline_point,
|
||||
uint64_t input_fence_syncobj_handles,
|
||||
uint32_t num_syncobj_handles) TAIL;
|
||||
PROC int ac_drm_cs_ctx_create2(int device_fd, uint32_t priority, uint32_t *ctx_handle) TAIL;
|
||||
PROC int ac_drm_cs_ctx_free(int device_fd, uint32_t ctx_handle) TAIL;
|
||||
PROC int ac_drm_cs_ctx_stable_pstate(int device_fd, uint32_t ctx_handle, uint32_t op,
|
||||
PROC int ac_drm_cs_ctx_create2(ac_drm_device *dev, uint32_t priority, uint32_t *ctx_id) TAIL;
|
||||
PROC int ac_drm_cs_ctx_free(ac_drm_device *dev, uint32_t ctx_id) TAIL;
|
||||
PROC int ac_drm_cs_ctx_stable_pstate(ac_drm_device *dev, uint32_t ctx_id, uint32_t op,
|
||||
uint32_t flags, uint32_t *out_flags) TAIL;
|
||||
PROC int ac_drm_cs_query_reset_state2(int device_fd, uint32_t ctx_handle, uint64_t *flags) TAIL;
|
||||
PROC int ac_drm_cs_query_fence_status(int device_fd, uint32_t ctx_handle, uint32_t ip_type,
|
||||
PROC int ac_drm_cs_query_reset_state2(ac_drm_device *dev, uint32_t ctx_id, uint64_t *flags) TAIL;
|
||||
PROC int ac_drm_cs_query_fence_status(ac_drm_device *dev, uint32_t ctx_id, uint32_t ip_type,
|
||||
uint32_t ip_instance, uint32_t ring, uint64_t fence_seq_no,
|
||||
uint64_t timeout_ns, uint64_t flags, uint32_t *expired) TAIL;
|
||||
PROC int ac_drm_cs_create_syncobj2(int device_fd, uint32_t flags, uint32_t *handle) TAIL;
|
||||
|
|
@ -72,37 +98,60 @@ PROC int ac_drm_cs_syncobj_export_sync_file2(int device_fd, uint32_t syncobj, ui
|
|||
uint32_t flags, int *sync_file_fd) TAIL;
|
||||
PROC int ac_drm_cs_syncobj_transfer(int device_fd, uint32_t dst_handle, uint64_t dst_point,
|
||||
uint32_t src_handle, uint64_t src_point, uint32_t flags) TAIL;
|
||||
PROC int ac_drm_cs_submit_raw2(int device_fd, uint32_t ctx_handle, uint32_t bo_list_handle,
|
||||
PROC int ac_drm_cs_submit_raw2(ac_drm_device *dev, uint32_t ctx_id, uint32_t bo_list_handle,
|
||||
int num_chunks, struct drm_amdgpu_cs_chunk *chunks,
|
||||
uint64_t *seq_no) TAIL;
|
||||
PROC void ac_drm_cs_chunk_fence_info_to_data(uint32_t bo_handle, uint64_t offset,
|
||||
struct drm_amdgpu_cs_chunk_data *data) TAILV;
|
||||
PROC int ac_drm_query_info(int device_fd, unsigned info_id, unsigned size, void *value) TAIL;
|
||||
PROC int ac_drm_read_mm_registers(int device_fd, unsigned dword_offset, unsigned count,
|
||||
PROC int ac_drm_cs_syncobj_timeline_wait(int device_fd, uint32_t *handles, uint64_t *points,
|
||||
unsigned num_handles, int64_t timeout_nsec, unsigned flags,
|
||||
uint32_t *first_signaled) TAIL;
|
||||
PROC int ac_drm_query_info(ac_drm_device *dev, unsigned info_id, unsigned size, void *value) TAIL;
|
||||
PROC int ac_drm_read_mm_registers(ac_drm_device *dev, unsigned dword_offset, unsigned count,
|
||||
uint32_t instance, uint32_t flags, uint32_t *values) TAIL;
|
||||
PROC int ac_drm_query_hw_ip_count(int device_fd, unsigned type, uint32_t *count) TAIL;
|
||||
PROC int ac_drm_query_hw_ip_info(int device_fd, unsigned type, unsigned ip_instance,
|
||||
PROC int ac_drm_query_hw_ip_count(ac_drm_device *dev, unsigned type, uint32_t *count) TAIL;
|
||||
PROC int ac_drm_query_hw_ip_info(ac_drm_device *dev, unsigned type, unsigned ip_instance,
|
||||
struct drm_amdgpu_info_hw_ip *info) TAIL;
|
||||
PROC int ac_drm_query_firmware_version(int device_fd, unsigned fw_type, unsigned ip_instance,
|
||||
PROC int ac_drm_query_firmware_version(ac_drm_device *dev, unsigned fw_type, unsigned ip_instance,
|
||||
unsigned index, uint32_t *version, uint32_t *feature) TAIL;
|
||||
PROC int ac_drm_query_uq_fw_area_info(int device_fd, unsigned type, unsigned ip_instance,
|
||||
PROC int ac_drm_query_uq_fw_area_info(ac_drm_device *dev, unsigned type, unsigned ip_instance,
|
||||
struct drm_amdgpu_info_uq_fw_areas *info) TAIL;
|
||||
PROC int ac_drm_query_gpu_info(int device_fd, struct amdgpu_gpu_info *info) TAIL;
|
||||
PROC int ac_drm_query_heap_info(int device_fd, uint32_t heap, uint32_t flags,
|
||||
PROC int ac_drm_query_gpu_info(ac_drm_device *dev, struct amdgpu_gpu_info *info) TAIL;
|
||||
PROC int ac_drm_query_heap_info(ac_drm_device *dev, uint32_t heap, uint32_t flags,
|
||||
struct amdgpu_heap_info *info) TAIL;
|
||||
PROC int ac_drm_query_sensor_info(int device_fd, unsigned sensor_type, unsigned size,
|
||||
PROC int ac_drm_query_sensor_info(ac_drm_device *dev, unsigned sensor_type, unsigned size,
|
||||
void *value) TAIL;
|
||||
PROC int ac_drm_query_video_caps_info(int device_fd, unsigned cap_type, unsigned size,
|
||||
PROC int ac_drm_query_video_caps_info(ac_drm_device *dev, unsigned cap_type, unsigned size,
|
||||
void *value) TAIL;
|
||||
PROC int ac_drm_vm_reserve_vmid(int device_fd, uint32_t flags) TAIL;
|
||||
PROC int ac_drm_vm_unreserve_vmid(int device_fd, uint32_t flags) TAIL;
|
||||
PROC int ac_drm_create_userqueue(int device_fd, uint32_t ip_type, uint32_t doorbell_handle,
|
||||
PROC int ac_drm_query_gpuvm_fault_info(ac_drm_device *dev, unsigned size, void *value) TAIL;
|
||||
PROC int ac_drm_vm_reserve_vmid(ac_drm_device *dev, uint32_t flags) TAIL;
|
||||
PROC int ac_drm_vm_unreserve_vmid(ac_drm_device *dev, uint32_t flags) TAIL;
|
||||
PROC const char *ac_drm_get_marketing_name(ac_drm_device *device) TAILPTR;
|
||||
PROC int ac_drm_query_sw_info(ac_drm_device *dev,
|
||||
enum amdgpu_sw_info info, void *value) TAIL;
|
||||
PROC int ac_drm_bo_alloc(ac_drm_device *dev, struct amdgpu_bo_alloc_request *alloc_buffer,
|
||||
ac_drm_bo *bo) TAIL;
|
||||
PROC int ac_drm_bo_export(ac_drm_device *dev, ac_drm_bo bo,
|
||||
enum amdgpu_bo_handle_type type, uint32_t *shared_handle) TAIL;
|
||||
PROC int ac_drm_bo_import(ac_drm_device *dev, enum amdgpu_bo_handle_type type,
|
||||
uint32_t shared_handle, struct ac_drm_bo_import_result *output) TAIL;
|
||||
PROC int ac_drm_create_bo_from_user_mem(ac_drm_device *dev, void *cpu,
|
||||
uint64_t size, ac_drm_bo *bo) TAIL;
|
||||
PROC int ac_drm_bo_free(ac_drm_device *dev, ac_drm_bo bo) TAIL;
|
||||
PROC int ac_drm_bo_cpu_map(ac_drm_device *dev, ac_drm_bo bo, void **cpu) TAIL;
|
||||
PROC int ac_drm_bo_cpu_unmap(ac_drm_device *dev, ac_drm_bo bo) TAIL;
|
||||
PROC int ac_drm_va_range_alloc(ac_drm_device *dev, enum amdgpu_gpu_va_range va_range_type,
|
||||
uint64_t size, uint64_t va_base_alignment, uint64_t va_base_required,
|
||||
uint64_t *va_base_allocated, amdgpu_va_handle *va_range_handle,
|
||||
uint64_t flags) TAIL;
|
||||
PROC int ac_drm_va_range_free(amdgpu_va_handle va_range_handle) TAIL;
|
||||
PROC int ac_drm_create_userqueue(ac_drm_device *dev, uint32_t ip_type, uint32_t doorbell_handle,
|
||||
uint32_t doorbell_offset, uint64_t queue_va, uint64_t queue_size,
|
||||
uint64_t wptr_va, uint64_t rptr_va, void *mqd_in,
|
||||
uint32_t *queue_id) TAIL;
|
||||
PROC int ac_drm_free_userqueue(int device_fd, uint32_t queue_id) TAIL;
|
||||
PROC int ac_drm_userq_signal(int device_fd, struct drm_amdgpu_userq_signal *signal_data) TAIL;
|
||||
PROC int ac_drm_userq_wait(int device_fd, struct drm_amdgpu_userq_wait *wait_data) TAIL;
|
||||
PROC int ac_drm_free_userqueue(ac_drm_device *dev, uint32_t queue_id) TAIL;
|
||||
PROC int ac_drm_userq_signal(ac_drm_device *dev, struct drm_amdgpu_userq_signal *signal_data) TAIL;
|
||||
PROC int ac_drm_userq_wait(ac_drm_device *dev, struct drm_amdgpu_userq_wait *wait_data) TAIL;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ radv_amdgpu_bo_va_op(struct radv_amdgpu_winsys *ws, uint32_t bo_handle, uint64_t
|
|||
|
||||
size = align64(size, getpagesize());
|
||||
|
||||
return ac_drm_bo_va_op_raw(ws->fd, bo_handle, offset, size, addr, flags, ops);
|
||||
return ac_drm_bo_va_op_raw(ws->dev, bo_handle, offset, size, addr, flags, ops);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
@ -355,7 +355,7 @@ radv_amdgpu_winsys_bo_destroy(struct radeon_winsys *_ws, struct radeon_winsys_bo
|
|||
if (ws->debug_all_bos)
|
||||
radv_amdgpu_global_bo_list_del(ws, bo);
|
||||
radv_amdgpu_bo_va_op(ws, bo->bo_handle, 0, bo->base.size, bo->base.va, 0, 0, AMDGPU_VA_OP_UNMAP);
|
||||
amdgpu_bo_free(bo->bo);
|
||||
ac_drm_bo_free(ws->dev, bo->bo);
|
||||
}
|
||||
|
||||
if (bo->base.initial_domain & RADEON_DOMAIN_VRAM) {
|
||||
|
|
@ -369,7 +369,7 @@ radv_amdgpu_winsys_bo_destroy(struct radeon_winsys *_ws, struct radeon_winsys_bo
|
|||
if (bo->base.initial_domain & RADEON_DOMAIN_GTT)
|
||||
p_atomic_add(&ws->allocated_gtt, -align64(bo->base.size, ws->info.gart_page_size));
|
||||
|
||||
amdgpu_va_range_free(bo->va_handle);
|
||||
ac_drm_va_range_free(bo->va_handle);
|
||||
FREE(bo);
|
||||
}
|
||||
|
||||
|
|
@ -382,7 +382,7 @@ radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned
|
|||
struct radv_amdgpu_winsys_bo *bo;
|
||||
struct amdgpu_bo_alloc_request request = {0};
|
||||
struct radv_amdgpu_map_range *ranges = NULL;
|
||||
amdgpu_bo_handle buf_handle;
|
||||
ac_drm_bo buf_handle;
|
||||
uint64_t va = 0;
|
||||
amdgpu_va_handle va_handle;
|
||||
int r;
|
||||
|
|
@ -405,7 +405,7 @@ radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned
|
|||
|
||||
const uint64_t va_flags = AMDGPU_VA_RANGE_HIGH | (flags & RADEON_FLAG_32BIT ? AMDGPU_VA_RANGE_32_BIT : 0) |
|
||||
(flags & RADEON_FLAG_REPLAYABLE ? AMDGPU_VA_RANGE_REPLAYABLE : 0);
|
||||
r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, size, virt_alignment, replay_address, &va,
|
||||
r = ac_drm_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, size, virt_alignment, replay_address, &va,
|
||||
&va_handle, va_flags);
|
||||
if (r) {
|
||||
result = replay_address ? VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS : VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
||||
|
|
@ -492,10 +492,11 @@ radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned
|
|||
request.flags |= AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
|
||||
if ((initial_domain & RADEON_DOMAIN_VRAM_GTT) && (flags & RADEON_FLAG_NO_INTERPROCESS_SHARING) &&
|
||||
((ws->perftest & RADV_PERFTEST_LOCAL_BOS) || (flags & RADEON_FLAG_PREFER_LOCAL_BO))) {
|
||||
bo->base.is_local = true;
|
||||
request.flags |= AMDGPU_GEM_CREATE_VM_ALWAYS_VALID;
|
||||
if (request.flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) {
|
||||
bo->base.is_local = true;
|
||||
request.flags |= AMDGPU_GEM_CREATE_VM_ALWAYS_VALID;
|
||||
}
|
||||
}
|
||||
|
||||
if (initial_domain & RADEON_DOMAIN_VRAM) {
|
||||
if (ws->zero_all_vram_allocs || (flags & RADEON_FLAG_ZERO_VRAM))
|
||||
request.flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
|
||||
|
|
@ -504,7 +505,7 @@ radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned
|
|||
if (flags & RADEON_FLAG_DISCARDABLE && ws->info.drm_minor >= 47)
|
||||
request.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
|
||||
|
||||
r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
|
||||
r = ac_drm_bo_alloc(ws->dev, &request, &buf_handle);
|
||||
if (r) {
|
||||
fprintf(stderr, "radv/amdgpu: Failed to allocate a buffer:\n");
|
||||
fprintf(stderr, "radv/amdgpu: size : %" PRIu64 " bytes\n", size);
|
||||
|
|
@ -515,7 +516,7 @@ radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned
|
|||
}
|
||||
|
||||
uint32_t kms_handle = 0;
|
||||
r = amdgpu_bo_export(buf_handle, amdgpu_bo_handle_type_kms, &kms_handle);
|
||||
r = ac_drm_bo_export(ws->dev, buf_handle, amdgpu_bo_handle_type_kms, &kms_handle);
|
||||
assert(!r);
|
||||
|
||||
r = radv_amdgpu_bo_va_op(ws, kms_handle, 0, size, va, flags, 0, AMDGPU_VA_OP_MAP);
|
||||
|
|
@ -557,13 +558,13 @@ radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned
|
|||
*out_bo = (struct radeon_winsys_bo *)bo;
|
||||
return VK_SUCCESS;
|
||||
error_va_map:
|
||||
amdgpu_bo_free(buf_handle);
|
||||
ac_drm_bo_free(ws->dev, buf_handle);
|
||||
|
||||
error_bo_alloc:
|
||||
free(ranges);
|
||||
|
||||
error_ranges_alloc:
|
||||
amdgpu_va_range_free(va_handle);
|
||||
ac_drm_va_range_free(va_handle);
|
||||
|
||||
error_va_alloc:
|
||||
FREE(bo);
|
||||
|
|
@ -643,7 +644,7 @@ radv_amdgpu_winsys_bo_from_ptr(struct radeon_winsys *_ws, void *pointer, uint64_
|
|||
struct radeon_winsys_bo **out_bo)
|
||||
{
|
||||
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
|
||||
amdgpu_bo_handle buf_handle;
|
||||
ac_drm_bo buf_handle;
|
||||
struct radv_amdgpu_winsys_bo *bo;
|
||||
uint64_t va;
|
||||
amdgpu_va_handle va_handle;
|
||||
|
|
@ -659,7 +660,7 @@ radv_amdgpu_winsys_bo_from_ptr(struct radeon_winsys *_ws, void *pointer, uint64_
|
|||
if (!bo)
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
|
||||
ret = amdgpu_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle);
|
||||
ret = ac_drm_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle);
|
||||
if (ret) {
|
||||
if (ret == -EINVAL) {
|
||||
result = VK_ERROR_INVALID_EXTERNAL_HANDLE;
|
||||
|
|
@ -674,17 +675,17 @@ radv_amdgpu_winsys_bo_from_ptr(struct radeon_winsys *_ws, void *pointer, uint64_
|
|||
*/
|
||||
vm_alignment = radv_amdgpu_get_optimal_vm_alignment(ws, size, ws->info.gart_page_size);
|
||||
|
||||
if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, size, vm_alignment, 0, &va, &va_handle,
|
||||
if (ac_drm_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, size, vm_alignment, 0, &va, &va_handle,
|
||||
AMDGPU_VA_RANGE_HIGH)) {
|
||||
result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
||||
goto error_va_alloc;
|
||||
}
|
||||
|
||||
uint32_t kms_handle = 0;
|
||||
ASSERTED int r = amdgpu_bo_export(buf_handle, amdgpu_bo_handle_type_kms, &kms_handle);
|
||||
ASSERTED int r = ac_drm_bo_export(ws->dev, buf_handle, amdgpu_bo_handle_type_kms, &kms_handle);
|
||||
assert(!r);
|
||||
|
||||
if (ac_drm_bo_va_op(ws->fd, kms_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP)) {
|
||||
if (ac_drm_bo_va_op(ws->dev, kms_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP)) {
|
||||
result = VK_ERROR_UNKNOWN;
|
||||
goto error_va_map;
|
||||
}
|
||||
|
|
@ -710,10 +711,10 @@ radv_amdgpu_winsys_bo_from_ptr(struct radeon_winsys *_ws, void *pointer, uint64_
|
|||
return VK_SUCCESS;
|
||||
|
||||
error_va_map:
|
||||
amdgpu_va_range_free(va_handle);
|
||||
ac_drm_va_range_free(va_handle);
|
||||
|
||||
error_va_alloc:
|
||||
amdgpu_bo_free(buf_handle);
|
||||
ac_drm_bo_free(ws->dev, buf_handle);
|
||||
|
||||
error:
|
||||
FREE(bo);
|
||||
|
|
@ -729,7 +730,7 @@ radv_amdgpu_winsys_bo_from_fd(struct radeon_winsys *_ws, int fd, unsigned priori
|
|||
uint64_t va;
|
||||
amdgpu_va_handle va_handle;
|
||||
enum amdgpu_bo_handle_type type = amdgpu_bo_handle_type_dma_buf_fd;
|
||||
struct amdgpu_bo_import_result result;
|
||||
struct ac_drm_bo_import_result result;
|
||||
struct amdgpu_bo_info info;
|
||||
enum radeon_bo_domain initial = 0;
|
||||
int r;
|
||||
|
|
@ -743,17 +744,17 @@ radv_amdgpu_winsys_bo_from_fd(struct radeon_winsys *_ws, int fd, unsigned priori
|
|||
if (!bo)
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
|
||||
r = amdgpu_bo_import(ws->dev, type, fd, &result);
|
||||
r = ac_drm_bo_import(ws->dev, type, fd, &result);
|
||||
if (r) {
|
||||
vk_result = VK_ERROR_INVALID_EXTERNAL_HANDLE;
|
||||
goto error;
|
||||
}
|
||||
|
||||
uint32_t kms_handle = 0;
|
||||
r = amdgpu_bo_export(result.buf_handle, amdgpu_bo_handle_type_kms, &kms_handle);
|
||||
r = ac_drm_bo_export(ws->dev, result.bo, amdgpu_bo_handle_type_kms, &kms_handle);
|
||||
assert(!r);
|
||||
|
||||
r = ac_drm_bo_query_info(ws->fd, kms_handle, &info);
|
||||
r = ac_drm_bo_query_info(ws->dev, kms_handle, &info);
|
||||
if (r) {
|
||||
vk_result = VK_ERROR_UNKNOWN;
|
||||
goto error_query;
|
||||
|
|
@ -763,7 +764,7 @@ radv_amdgpu_winsys_bo_from_fd(struct radeon_winsys *_ws, int fd, unsigned priori
|
|||
*alloc_size = info.alloc_size;
|
||||
}
|
||||
|
||||
r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, result.alloc_size, 1 << 20, 0, &va, &va_handle,
|
||||
r = ac_drm_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, result.alloc_size, 1 << 20, 0, &va, &va_handle,
|
||||
AMDGPU_VA_RANGE_HIGH);
|
||||
if (r) {
|
||||
vk_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
||||
|
|
@ -781,7 +782,7 @@ radv_amdgpu_winsys_bo_from_fd(struct radeon_winsys *_ws, int fd, unsigned priori
|
|||
if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
|
||||
initial |= RADEON_DOMAIN_GTT;
|
||||
|
||||
bo->bo = result.buf_handle;
|
||||
bo->bo = result.bo;
|
||||
bo->bo_handle = kms_handle;
|
||||
bo->base.va = va;
|
||||
bo->va_handle = va_handle;
|
||||
|
|
@ -803,10 +804,10 @@ radv_amdgpu_winsys_bo_from_fd(struct radeon_winsys *_ws, int fd, unsigned priori
|
|||
*out_bo = (struct radeon_winsys_bo *)bo;
|
||||
return VK_SUCCESS;
|
||||
error_va_map:
|
||||
amdgpu_va_range_free(va_handle);
|
||||
ac_drm_va_range_free(va_handle);
|
||||
|
||||
error_query:
|
||||
amdgpu_bo_free(result.buf_handle);
|
||||
ac_drm_bo_free(ws->dev, result.bo);
|
||||
|
||||
error:
|
||||
FREE(bo);
|
||||
|
|
@ -816,11 +817,12 @@ error:
|
|||
static bool
|
||||
radv_amdgpu_winsys_get_fd(struct radeon_winsys *_ws, struct radeon_winsys_bo *_bo, int *fd)
|
||||
{
|
||||
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
|
||||
struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
|
||||
enum amdgpu_bo_handle_type type = amdgpu_bo_handle_type_dma_buf_fd;
|
||||
int r;
|
||||
unsigned handle;
|
||||
r = amdgpu_bo_export(bo->bo, type, &handle);
|
||||
r = ac_drm_bo_export(ws->dev, bo->bo, type, &handle);
|
||||
if (r)
|
||||
return false;
|
||||
|
||||
|
|
@ -833,23 +835,23 @@ radv_amdgpu_bo_get_flags_from_fd(struct radeon_winsys *_ws, int fd, enum radeon_
|
|||
enum radeon_bo_flag *flags)
|
||||
{
|
||||
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
|
||||
struct amdgpu_bo_import_result result = {0};
|
||||
struct ac_drm_bo_import_result result = {0};
|
||||
struct amdgpu_bo_info info = {0};
|
||||
int r;
|
||||
|
||||
*domains = 0;
|
||||
*flags = 0;
|
||||
|
||||
r = amdgpu_bo_import(ws->dev, amdgpu_bo_handle_type_dma_buf_fd, fd, &result);
|
||||
r = ac_drm_bo_import(ws->dev, amdgpu_bo_handle_type_dma_buf_fd, fd, &result);
|
||||
if (r)
|
||||
return false;
|
||||
|
||||
uint32_t kms_handle = 0;
|
||||
r = amdgpu_bo_export(result.buf_handle, amdgpu_bo_handle_type_kms, &kms_handle);
|
||||
r = ac_drm_bo_export(ws->dev, result.bo, amdgpu_bo_handle_type_kms, &kms_handle);
|
||||
assert(!r);
|
||||
|
||||
r = ac_drm_bo_query_info(ws->fd, kms_handle, &info);
|
||||
amdgpu_bo_free(result.buf_handle);
|
||||
r = ac_drm_bo_query_info(ws->dev, kms_handle, &info);
|
||||
ac_drm_bo_free(ws->dev, result.bo);
|
||||
if (r)
|
||||
return false;
|
||||
|
||||
|
|
@ -975,7 +977,7 @@ radv_amdgpu_winsys_bo_set_metadata(struct radeon_winsys *_ws, struct radeon_wins
|
|||
metadata.size_metadata = md->size_metadata;
|
||||
memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata));
|
||||
|
||||
ac_drm_bo_set_metadata(ws->fd, bo->bo_handle, &metadata);
|
||||
ac_drm_bo_set_metadata(ws->dev, bo->bo_handle, &metadata);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -986,7 +988,7 @@ radv_amdgpu_winsys_bo_get_metadata(struct radeon_winsys *_ws, struct radeon_wins
|
|||
struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
|
||||
struct amdgpu_bo_info info = {0};
|
||||
|
||||
int r = ac_drm_bo_query_info(ws->fd, bo->bo_handle, &info);
|
||||
int r = ac_drm_bo_query_info(ws->dev, bo->bo_handle, &info);
|
||||
if (r)
|
||||
return;
|
||||
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ struct radv_amdgpu_winsys_bo {
|
|||
union {
|
||||
/* physical bo */
|
||||
struct {
|
||||
amdgpu_bo_handle bo;
|
||||
ac_drm_bo bo;
|
||||
uint32_t bo_handle;
|
||||
|
||||
void *cpu_map;
|
||||
|
|
|
|||
|
|
@ -1578,7 +1578,7 @@ radv_amdgpu_ctx_create(struct radeon_winsys *_ws, enum radeon_ctx_priority prior
|
|||
if (!ctx)
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
|
||||
r = ac_drm_cs_ctx_create2(ws->fd, amdgpu_priority, &ctx->ctx_handle);
|
||||
r = ac_drm_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx_handle);
|
||||
if (r && r == -EACCES) {
|
||||
result = VK_ERROR_NOT_PERMITTED;
|
||||
goto fail_create;
|
||||
|
|
@ -1601,7 +1601,7 @@ radv_amdgpu_ctx_create(struct radeon_winsys *_ws, enum radeon_ctx_priority prior
|
|||
return VK_SUCCESS;
|
||||
|
||||
fail_alloc:
|
||||
ac_drm_cs_ctx_free(ws->fd, ctx->ctx_handle);
|
||||
ac_drm_cs_ctx_free(ws->dev, ctx->ctx_handle);
|
||||
fail_create:
|
||||
FREE(ctx);
|
||||
return result;
|
||||
|
|
@ -1620,7 +1620,7 @@ radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
|
|||
}
|
||||
|
||||
ctx->ws->base.buffer_destroy(&ctx->ws->base, ctx->fence_bo);
|
||||
ac_drm_cs_ctx_free(ctx->ws->fd, ctx->ctx_handle);
|
||||
ac_drm_cs_ctx_free(ctx->ws->dev, ctx->ctx_handle);
|
||||
FREE(ctx);
|
||||
}
|
||||
|
||||
|
|
@ -1642,7 +1642,7 @@ radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx, enum amd_ip_type ip_t
|
|||
if (ctx->last_submission[ip_type][ring_index].fence.fence) {
|
||||
uint32_t expired;
|
||||
int ret = ac_drm_cs_query_fence_status(
|
||||
ctx->ws->fd, ctx->ctx_handle, ctx->last_submission[ip_type][ring_index].fence.ip_type,
|
||||
ctx->ws->dev, ctx->ctx_handle, ctx->last_submission[ip_type][ring_index].fence.ip_type,
|
||||
ctx->last_submission[ip_type][ring_index].fence.ip_instance,
|
||||
ctx->last_submission[ip_type][ring_index].fence.ring, ctx->last_submission[ip_type][ring_index].fence.fence,
|
||||
1000000000ull, 0, &expired);
|
||||
|
|
@ -1681,7 +1681,7 @@ radv_amdgpu_ctx_set_pstate(struct radeon_winsys_ctx *rwctx, enum radeon_ctx_psta
|
|||
uint32_t current_pstate = 0;
|
||||
int r;
|
||||
|
||||
r = ac_drm_cs_ctx_stable_pstate(ctx->ws->fd, ctx->ctx_handle, AMDGPU_CTX_OP_GET_STABLE_PSTATE, 0, ¤t_pstate);
|
||||
r = ac_drm_cs_ctx_stable_pstate(ctx->ws->dev, ctx->ctx_handle, AMDGPU_CTX_OP_GET_STABLE_PSTATE, 0, ¤t_pstate);
|
||||
if (r) {
|
||||
fprintf(stderr, "radv/amdgpu: failed to get current pstate\n");
|
||||
return r;
|
||||
|
|
@ -1693,7 +1693,7 @@ radv_amdgpu_ctx_set_pstate(struct radeon_winsys_ctx *rwctx, enum radeon_ctx_psta
|
|||
if (current_pstate == new_pstate)
|
||||
return 0;
|
||||
|
||||
r = ac_drm_cs_ctx_stable_pstate(ctx->ws->fd, ctx->ctx_handle, AMDGPU_CTX_OP_SET_STABLE_PSTATE, new_pstate, NULL);
|
||||
r = ac_drm_cs_ctx_stable_pstate(ctx->ws->dev, ctx->ctx_handle, AMDGPU_CTX_OP_SET_STABLE_PSTATE, new_pstate, NULL);
|
||||
if (r) {
|
||||
fprintf(stderr, "radv/amdgpu: failed to set new pstate\n");
|
||||
return r;
|
||||
|
|
@ -1898,7 +1898,7 @@ radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx, struct radv_amdgpu_cs_request
|
|||
if (r == -ENOMEM)
|
||||
os_time_sleep(1000);
|
||||
|
||||
r = ac_drm_cs_submit_raw2(ctx->ws->fd, ctx->ctx_handle, 0, num_chunks, chunks, &request->seq_no);
|
||||
r = ac_drm_cs_submit_raw2(ctx->ws->dev, ctx->ctx_handle, 0, num_chunks, chunks, &request->seq_no);
|
||||
} while (r == -ENOMEM && os_time_get_nano() < abs_timeout_ns);
|
||||
|
||||
if (r) {
|
||||
|
|
|
|||
|
|
@ -68,34 +68,34 @@ radv_amdgpu_winsys_query_value(struct radeon_winsys *rws, enum radeon_value_id v
|
|||
case RADEON_ALLOCATED_GTT:
|
||||
return ws->allocated_gtt;
|
||||
case RADEON_TIMESTAMP:
|
||||
ac_drm_query_info(ws->fd, AMDGPU_INFO_TIMESTAMP, 8, &retval);
|
||||
ac_drm_query_info(ws->dev, AMDGPU_INFO_TIMESTAMP, 8, &retval);
|
||||
return retval;
|
||||
case RADEON_NUM_BYTES_MOVED:
|
||||
ac_drm_query_info(ws->fd, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
|
||||
ac_drm_query_info(ws->dev, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
|
||||
return retval;
|
||||
case RADEON_NUM_EVICTIONS:
|
||||
ac_drm_query_info(ws->fd, AMDGPU_INFO_NUM_EVICTIONS, 8, &retval);
|
||||
ac_drm_query_info(ws->dev, AMDGPU_INFO_NUM_EVICTIONS, 8, &retval);
|
||||
return retval;
|
||||
case RADEON_NUM_VRAM_CPU_PAGE_FAULTS:
|
||||
ac_drm_query_info(ws->fd, AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS, 8, &retval);
|
||||
ac_drm_query_info(ws->dev, AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS, 8, &retval);
|
||||
return retval;
|
||||
case RADEON_VRAM_USAGE:
|
||||
ac_drm_query_heap_info(ws->fd, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
|
||||
ac_drm_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
|
||||
return heap.heap_usage;
|
||||
case RADEON_VRAM_VIS_USAGE:
|
||||
ac_drm_query_heap_info(ws->fd, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, &heap);
|
||||
ac_drm_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, &heap);
|
||||
return heap.heap_usage;
|
||||
case RADEON_GTT_USAGE:
|
||||
ac_drm_query_heap_info(ws->fd, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
|
||||
ac_drm_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
|
||||
return heap.heap_usage;
|
||||
case RADEON_GPU_TEMPERATURE:
|
||||
ac_drm_query_sensor_info(ws->fd, AMDGPU_INFO_SENSOR_GPU_TEMP, 4, &retval);
|
||||
ac_drm_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GPU_TEMP, 4, &retval);
|
||||
return retval;
|
||||
case RADEON_CURRENT_SCLK:
|
||||
ac_drm_query_sensor_info(ws->fd, AMDGPU_INFO_SENSOR_GFX_SCLK, 4, &retval);
|
||||
ac_drm_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GFX_SCLK, 4, &retval);
|
||||
return retval;
|
||||
case RADEON_CURRENT_MCLK:
|
||||
ac_drm_query_sensor_info(ws->fd, AMDGPU_INFO_SENSOR_GFX_MCLK, 4, &retval);
|
||||
ac_drm_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GFX_MCLK, 4, &retval);
|
||||
return retval;
|
||||
default:
|
||||
unreachable("invalid query value");
|
||||
|
|
@ -109,15 +109,15 @@ radv_amdgpu_winsys_read_registers(struct radeon_winsys *rws, unsigned reg_offset
|
|||
{
|
||||
struct radv_amdgpu_winsys *ws = (struct radv_amdgpu_winsys *)rws;
|
||||
|
||||
return ac_drm_read_mm_registers(ws->fd, reg_offset / 4, num_registers, 0xffffffff, 0, out) == 0;
|
||||
return ac_drm_read_mm_registers(ws->dev, reg_offset / 4, num_registers, 0xffffffff, 0, out) == 0;
|
||||
}
|
||||
|
||||
static const char *
|
||||
radv_amdgpu_winsys_get_chip_name(struct radeon_winsys *rws)
|
||||
{
|
||||
amdgpu_device_handle dev = ((struct radv_amdgpu_winsys *)rws)->dev;
|
||||
ac_drm_device *dev = ((struct radv_amdgpu_winsys *)rws)->dev;
|
||||
|
||||
return amdgpu_get_marketing_name(dev);
|
||||
return ac_drm_get_marketing_name(dev);
|
||||
}
|
||||
|
||||
static bool
|
||||
|
|
@ -127,7 +127,7 @@ radv_amdgpu_winsys_query_gpuvm_fault(struct radeon_winsys *rws, struct radv_wins
|
|||
struct drm_amdgpu_info_gpuvm_fault gpuvm_fault = {0};
|
||||
int r;
|
||||
|
||||
r = ac_drm_query_info(ws->fd, AMDGPU_INFO_GPUVM_FAULT, sizeof(gpuvm_fault), &gpuvm_fault);
|
||||
r = ac_drm_query_info(ws->dev, AMDGPU_INFO_GPUVM_FAULT, sizeof(gpuvm_fault), &gpuvm_fault);
|
||||
if (r < 0) {
|
||||
fprintf(stderr, "radv/amdgpu: Failed to query the last GPUVM fault (%d).\n", r);
|
||||
return false;
|
||||
|
|
@ -174,10 +174,10 @@ radv_amdgpu_winsys_destroy(struct radeon_winsys *rws)
|
|||
free(ws->global_bo_list.bos);
|
||||
|
||||
if (ws->reserve_vmid)
|
||||
ac_drm_vm_unreserve_vmid(ws->fd, 0);
|
||||
ac_drm_vm_unreserve_vmid(ws->dev, 0);
|
||||
|
||||
u_rwlock_destroy(&ws->log_bo_list_lock);
|
||||
amdgpu_device_deinitialize(ws->dev);
|
||||
ac_drm_device_deinitialize(ws->dev);
|
||||
FREE(rws);
|
||||
}
|
||||
|
||||
|
|
@ -199,10 +199,10 @@ struct radeon_winsys *
|
|||
radv_amdgpu_winsys_create(int fd, uint64_t debug_flags, uint64_t perftest_flags, bool reserve_vmid)
|
||||
{
|
||||
uint32_t drm_major, drm_minor, r;
|
||||
amdgpu_device_handle dev;
|
||||
ac_drm_device *dev;
|
||||
struct radv_amdgpu_winsys *ws = NULL;
|
||||
|
||||
r = amdgpu_device_initialize(fd, &drm_major, &drm_minor, &dev);
|
||||
r = ac_drm_device_initialize(fd, &drm_major, &drm_minor, &dev);
|
||||
if (r) {
|
||||
fprintf(stderr, "radv/amdgpu: failed to initialize device.\n");
|
||||
return NULL;
|
||||
|
|
@ -225,7 +225,7 @@ radv_amdgpu_winsys_create(int fd, uint64_t debug_flags, uint64_t perftest_flags,
|
|||
|
||||
if (ws) {
|
||||
simple_mtx_unlock(&winsys_creation_mutex);
|
||||
amdgpu_device_deinitialize(dev);
|
||||
ac_drm_device_deinitialize(dev);
|
||||
|
||||
/* Check that options don't differ from the existing winsys. */
|
||||
if (((debug_flags & RADV_DEBUG_ALL_BOS) && !ws->debug_all_bos) ||
|
||||
|
|
@ -248,7 +248,7 @@ radv_amdgpu_winsys_create(int fd, uint64_t debug_flags, uint64_t perftest_flags,
|
|||
|
||||
ws->refcount = 1;
|
||||
ws->dev = dev;
|
||||
ws->fd = amdgpu_device_get_fd(dev);
|
||||
ws->fd = ac_drm_device_get_fd(dev);
|
||||
ws->info.drm_major = drm_major;
|
||||
ws->info.drm_minor = drm_minor;
|
||||
if (!do_winsys_init(ws, fd))
|
||||
|
|
@ -261,7 +261,7 @@ radv_amdgpu_winsys_create(int fd, uint64_t debug_flags, uint64_t perftest_flags,
|
|||
|
||||
ws->reserve_vmid = reserve_vmid;
|
||||
if (ws->reserve_vmid) {
|
||||
r = ac_drm_vm_reserve_vmid(ws->fd, 0);
|
||||
r = ac_drm_vm_reserve_vmid(ws->dev, 0);
|
||||
if (r) {
|
||||
fprintf(stderr, "radv/amdgpu: failed to reserve vmid.\n");
|
||||
goto winsys_fail;
|
||||
|
|
@ -273,7 +273,6 @@ radv_amdgpu_winsys_create(int fd, uint64_t debug_flags, uint64_t perftest_flags,
|
|||
if (ws->syncobj_sync_type.features) {
|
||||
/* multi wait is always supported */
|
||||
ws->syncobj_sync_type.features |= VK_SYNC_FEATURE_GPU_MULTI_WAIT;
|
||||
|
||||
ws->sync_types[num_sync_types++] = &ws->syncobj_sync_type;
|
||||
if (!(ws->syncobj_sync_type.features & VK_SYNC_FEATURE_TIMELINE)) {
|
||||
ws->emulated_timeline_sync_type = vk_sync_timeline_get_type(&ws->syncobj_sync_type);
|
||||
|
|
@ -313,6 +312,6 @@ fail:
|
|||
winsyses = NULL;
|
||||
}
|
||||
simple_mtx_unlock(&winsys_creation_mutex);
|
||||
amdgpu_device_deinitialize(dev);
|
||||
ac_drm_device_deinitialize(dev);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
#include "util/list.h"
|
||||
#include "util/rwlock.h"
|
||||
#include "ac_gpu_info.h"
|
||||
#include "ac_linux_drm.h"
|
||||
#include "radv_radeon_winsys.h"
|
||||
|
||||
#include "vk_sync.h"
|
||||
|
|
@ -23,7 +24,7 @@
|
|||
|
||||
struct radv_amdgpu_winsys {
|
||||
struct radeon_winsys base;
|
||||
amdgpu_device_handle dev;
|
||||
ac_drm_device *dev;
|
||||
int fd;
|
||||
|
||||
struct radeon_info info;
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@
|
|||
#include "frontend/drm_driver.h"
|
||||
#include "drm-uapi/amdgpu_drm.h"
|
||||
#include "drm-uapi/dma-buf.h"
|
||||
#include "sid.h"
|
||||
#include <xf86drm.h>
|
||||
#include <stdio.h>
|
||||
#include <inttypes.h>
|
||||
|
|
@ -109,7 +110,7 @@ static bool amdgpu_bo_wait(struct radeon_winsys *rws,
|
|||
if (timeout == 0 && usage & RADEON_USAGE_DISALLOW_SLOW_REPLY)
|
||||
return false;
|
||||
|
||||
r = ac_drm_bo_wait_for_idle(aws->fd, get_real_bo(bo)->kms_handle, timeout, &buffer_busy);
|
||||
r = ac_drm_bo_wait_for_idle(aws->dev, get_real_bo(bo)->bo, timeout, &buffer_busy);
|
||||
if (r)
|
||||
fprintf(stderr, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__, r);
|
||||
|
||||
|
|
@ -197,12 +198,12 @@ static int amdgpu_bo_va_op_common(struct amdgpu_winsys *aws, struct amdgpu_winsy
|
|||
*/
|
||||
p_atomic_set(vm_timeline_point, aws->vm_timeline_seq_num);
|
||||
}
|
||||
r = ac_drm_bo_va_op_raw2(aws->fd, bo_handle, offset, size, addr, flags, ops,
|
||||
r = ac_drm_bo_va_op_raw2(aws->dev, bo_handle, offset, size, addr, flags, ops,
|
||||
aws->vm_timeline_syncobj, aws->vm_timeline_seq_num,
|
||||
(uintptr_t)&syncobj_arr, num_fences);
|
||||
simple_mtx_unlock(&aws->vm_ioctl_lock);
|
||||
} else {
|
||||
r = ac_drm_bo_va_op_raw(aws->fd, bo_handle, offset, size, addr, flags, ops);
|
||||
r = ac_drm_bo_va_op_raw(aws->dev, bo_handle, offset, size, addr, flags, ops);
|
||||
}
|
||||
|
||||
return r;
|
||||
|
|
@ -249,14 +250,14 @@ void amdgpu_bo_destroy(struct amdgpu_winsys *aws, struct pb_buffer_lean *_buf)
|
|||
return;
|
||||
}
|
||||
|
||||
_mesa_hash_table_remove_key(aws->bo_export_table, bo->bo_handle);
|
||||
_mesa_hash_table_remove_key(aws->bo_export_table, bo->bo.abo);
|
||||
|
||||
if (bo->b.base.placement & RADEON_DOMAIN_VRAM_GTT) {
|
||||
amdgpu_bo_va_op_common(aws, amdgpu_winsys_bo(_buf), bo->kms_handle, true, NULL, 0,
|
||||
bo->b.base.size, amdgpu_va_get_start_addr(bo->va_handle),
|
||||
AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
|
||||
AMDGPU_VM_PAGE_EXECUTABLE, AMDGPU_VA_OP_UNMAP);
|
||||
amdgpu_va_range_free(bo->va_handle);
|
||||
ac_drm_va_range_free(bo->va_handle);
|
||||
}
|
||||
|
||||
simple_mtx_unlock(&aws->bo_export_table_lock);
|
||||
|
|
@ -267,7 +268,7 @@ void amdgpu_bo_destroy(struct amdgpu_winsys *aws, struct pb_buffer_lean *_buf)
|
|||
}
|
||||
assert(bo->is_user_ptr || bo->map_count == 0);
|
||||
|
||||
amdgpu_bo_free(bo->bo_handle);
|
||||
ac_drm_bo_free(aws->dev, bo->bo);
|
||||
|
||||
#if MESA_DEBUG
|
||||
if (aws->debug_all_bos) {
|
||||
|
|
@ -332,11 +333,13 @@ static bool amdgpu_bo_do_map(struct radeon_winsys *rws, struct amdgpu_bo_real *b
|
|||
|
||||
assert(!bo->is_user_ptr);
|
||||
|
||||
int r = amdgpu_bo_cpu_map(bo->bo_handle, cpu);
|
||||
*cpu = NULL;
|
||||
int r = ac_drm_bo_cpu_map(aws->dev, bo->bo, cpu);
|
||||
|
||||
if (r) {
|
||||
/* Clean up buffer managers and try again. */
|
||||
amdgpu_clean_up_buffer_managers(aws);
|
||||
r = amdgpu_bo_cpu_map(bo->bo_handle, cpu);
|
||||
r = ac_drm_bo_cpu_map(aws->dev, bo->bo, cpu);
|
||||
if (r)
|
||||
return false;
|
||||
}
|
||||
|
|
@ -509,7 +512,8 @@ void amdgpu_bo_unmap(struct radeon_winsys *rws, struct pb_buffer_lean *buf)
|
|||
aws->num_mapped_buffers--;
|
||||
}
|
||||
|
||||
amdgpu_bo_cpu_unmap(real->bo_handle);
|
||||
assert(aws->dev);
|
||||
ac_drm_bo_cpu_unmap(aws->dev, real->bo);
|
||||
}
|
||||
|
||||
static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys *aws, struct amdgpu_bo_real *bo)
|
||||
|
|
@ -548,7 +552,7 @@ static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *aws,
|
|||
int heap)
|
||||
{
|
||||
struct amdgpu_bo_alloc_request request = {0};
|
||||
amdgpu_bo_handle buf_handle;
|
||||
ac_drm_bo buf_handle;
|
||||
uint64_t va = 0;
|
||||
struct amdgpu_bo_real *bo;
|
||||
amdgpu_va_handle va_handle = NULL;
|
||||
|
|
@ -644,7 +648,7 @@ static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *aws,
|
|||
if (flags & RADEON_FLAG_GFX12_ALLOW_DCC)
|
||||
request.flags |= AMDGPU_GEM_CREATE_GFX12_DCC;
|
||||
|
||||
r = amdgpu_bo_alloc(aws->dev, &request, &buf_handle);
|
||||
r = ac_drm_bo_alloc(aws->dev, &request, &buf_handle);
|
||||
if (r) {
|
||||
fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
|
||||
fprintf(stderr, "amdgpu: size : %"PRIu64" bytes\n", size);
|
||||
|
|
@ -655,12 +659,12 @@ static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *aws,
|
|||
}
|
||||
|
||||
uint32_t kms_handle = 0;
|
||||
amdgpu_bo_export(buf_handle, amdgpu_bo_handle_type_kms, &kms_handle);
|
||||
ac_drm_bo_export(aws->dev, buf_handle, amdgpu_bo_handle_type_kms, &kms_handle);
|
||||
|
||||
if (initial_domain & RADEON_DOMAIN_VRAM_GTT) {
|
||||
unsigned va_gap_size = aws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
|
||||
|
||||
r = amdgpu_va_range_alloc(aws->dev, amdgpu_gpu_va_range_general,
|
||||
r = ac_drm_va_range_alloc(aws->dev, amdgpu_gpu_va_range_general,
|
||||
size + va_gap_size, alignment,
|
||||
0, &va, &va_handle,
|
||||
(flags & RADEON_FLAG_32BIT ? AMDGPU_VA_RANGE_32_BIT : 0) |
|
||||
|
|
@ -688,7 +692,7 @@ static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *aws,
|
|||
bo->b.base.usage = flags;
|
||||
bo->b.base.size = size;
|
||||
bo->b.unique_id = __sync_fetch_and_add(&aws->next_bo_unique_id, 1);
|
||||
bo->bo_handle = buf_handle;
|
||||
bo->bo = buf_handle;
|
||||
bo->va_handle = va_handle;
|
||||
bo->kms_handle = kms_handle;
|
||||
|
||||
|
|
@ -702,10 +706,10 @@ static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *aws,
|
|||
return &bo->b;
|
||||
|
||||
error_va_map:
|
||||
amdgpu_va_range_free(va_handle);
|
||||
ac_drm_va_range_free(va_handle);
|
||||
|
||||
error_va_alloc:
|
||||
amdgpu_bo_free(buf_handle);
|
||||
ac_drm_bo_free(aws->dev, buf_handle);
|
||||
|
||||
error_bo_alloc:
|
||||
FREE(bo);
|
||||
|
|
@ -1128,7 +1132,7 @@ static void amdgpu_bo_sparse_destroy(struct radeon_winsys *rws, struct pb_buffer
|
|||
struct amdgpu_sparse_backing, list));
|
||||
}
|
||||
|
||||
amdgpu_va_range_free(bo->va_handle);
|
||||
ac_drm_va_range_free(bo->va_handle);
|
||||
FREE(bo->commitments);
|
||||
simple_mtx_destroy(&bo->commit_lock);
|
||||
FREE(bo);
|
||||
|
|
@ -1176,9 +1180,9 @@ amdgpu_bo_sparse_create(struct amdgpu_winsys *aws, uint64_t size,
|
|||
va_gap_size = aws->check_vm ? 4 * RADEON_SPARSE_PAGE_SIZE : 0;
|
||||
|
||||
uint64_t gpu_address;
|
||||
r = amdgpu_va_range_alloc(aws->dev, amdgpu_gpu_va_range_general,
|
||||
map_size + va_gap_size, RADEON_SPARSE_PAGE_SIZE,
|
||||
0, &gpu_address, &bo->va_handle, AMDGPU_VA_RANGE_HIGH);
|
||||
r = ac_drm_va_range_alloc(aws->dev, amdgpu_gpu_va_range_general,
|
||||
map_size + va_gap_size, RADEON_SPARSE_PAGE_SIZE,
|
||||
0, &gpu_address, &bo->va_handle, AMDGPU_VA_RANGE_HIGH);
|
||||
if (r)
|
||||
goto error_va_alloc;
|
||||
|
||||
|
|
@ -1190,7 +1194,7 @@ amdgpu_bo_sparse_create(struct amdgpu_winsys *aws, uint64_t size,
|
|||
return &bo->b.base;
|
||||
|
||||
error_va_map:
|
||||
amdgpu_va_range_free(bo->va_handle);
|
||||
ac_drm_va_range_free(bo->va_handle);
|
||||
error_va_alloc:
|
||||
FREE(bo->commitments);
|
||||
error_alloc_commitments:
|
||||
|
|
@ -1394,7 +1398,7 @@ static void amdgpu_buffer_get_metadata(struct radeon_winsys *rws,
|
|||
enum amd_gfx_level gfx_level = aws->info.gfx_level;
|
||||
int r;
|
||||
|
||||
r = ac_drm_bo_query_info(aws->fd, bo->kms_handle, &info);
|
||||
r = ac_drm_bo_query_info(aws->dev, bo->kms_handle, &info);
|
||||
if (r)
|
||||
return;
|
||||
|
||||
|
|
@ -1433,7 +1437,7 @@ static void amdgpu_buffer_set_metadata(struct radeon_winsys *rws,
|
|||
metadata.size_metadata = md->size_metadata;
|
||||
memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata));
|
||||
|
||||
ac_drm_bo_set_metadata(aws->fd, real->kms_handle, &metadata);
|
||||
ac_drm_bo_set_metadata(aws->dev, real->kms_handle, &metadata);
|
||||
}
|
||||
|
||||
struct pb_buffer_lean *
|
||||
|
|
@ -1585,7 +1589,7 @@ static struct pb_buffer_lean *amdgpu_bo_from_handle(struct radeon_winsys *rws,
|
|||
struct amdgpu_winsys *aws = amdgpu_winsys(rws);
|
||||
struct amdgpu_bo_real *bo = NULL;
|
||||
enum amdgpu_bo_handle_type type;
|
||||
struct amdgpu_bo_import_result result = {0};
|
||||
struct ac_drm_bo_import_result result = {0};
|
||||
uint64_t va;
|
||||
amdgpu_va_handle va_handle = NULL;
|
||||
struct amdgpu_bo_info info = {0};
|
||||
|
|
@ -1604,12 +1608,12 @@ static struct pb_buffer_lean *amdgpu_bo_from_handle(struct radeon_winsys *rws,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_import(aws->dev, type, whandle->handle, &result);
|
||||
r = ac_drm_bo_import(aws->dev, type, whandle->handle, &result);
|
||||
if (r)
|
||||
return NULL;
|
||||
|
||||
simple_mtx_lock(&aws->bo_export_table_lock);
|
||||
bo = util_hash_table_get(aws->bo_export_table, result.buf_handle);
|
||||
bo = util_hash_table_get(aws->bo_export_table, result.bo.abo);
|
||||
|
||||
/* If the amdgpu_winsys_bo instance already exists, bump the reference
|
||||
* counter and return it.
|
||||
|
|
@ -1622,23 +1626,23 @@ static struct pb_buffer_lean *amdgpu_bo_from_handle(struct radeon_winsys *rws,
|
|||
* This function is returning an existing buffer, which has its own
|
||||
* handle.
|
||||
*/
|
||||
amdgpu_bo_free(result.buf_handle);
|
||||
ac_drm_bo_free(aws->dev, result.bo);
|
||||
return &bo->b.base;
|
||||
}
|
||||
|
||||
uint32_t kms_handle;
|
||||
amdgpu_bo_export(result.buf_handle, amdgpu_bo_handle_type_kms, &kms_handle);
|
||||
ac_drm_bo_export(aws->dev, result.bo, amdgpu_bo_handle_type_kms, &kms_handle);
|
||||
|
||||
/* Get initial domains. */
|
||||
r = ac_drm_bo_query_info(aws->fd, kms_handle, &info);
|
||||
r = ac_drm_bo_query_info(aws->dev, kms_handle, &info);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
r = amdgpu_va_range_alloc(aws->dev, amdgpu_gpu_va_range_general,
|
||||
result.alloc_size,
|
||||
amdgpu_get_optimal_alignment(aws, result.alloc_size,
|
||||
vm_alignment),
|
||||
0, &va, &va_handle, AMDGPU_VA_RANGE_HIGH);
|
||||
r = ac_drm_va_range_alloc(aws->dev, amdgpu_gpu_va_range_general,
|
||||
result.alloc_size,
|
||||
amdgpu_get_optimal_alignment(aws, result.alloc_size,
|
||||
vm_alignment),
|
||||
0, &va, &va_handle, AMDGPU_VA_RANGE_HIGH);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
|
|
@ -1683,7 +1687,7 @@ static struct pb_buffer_lean *amdgpu_bo_from_handle(struct radeon_winsys *rws,
|
|||
bo->b.type = AMDGPU_BO_REAL;
|
||||
bo->b.unique_id = __sync_fetch_and_add(&aws->next_bo_unique_id, 1);
|
||||
simple_mtx_init(&bo->map_lock, mtx_plain);
|
||||
bo->bo_handle = result.buf_handle;
|
||||
bo->bo = result.bo;
|
||||
bo->va_handle = va_handle;
|
||||
bo->kms_handle = kms_handle;
|
||||
bo->is_shared = true;
|
||||
|
|
@ -1695,7 +1699,7 @@ static struct pb_buffer_lean *amdgpu_bo_from_handle(struct radeon_winsys *rws,
|
|||
|
||||
amdgpu_add_buffer_to_global_list(aws, bo);
|
||||
|
||||
_mesa_hash_table_insert(aws->bo_export_table, bo->bo_handle, bo);
|
||||
_mesa_hash_table_insert(aws->bo_export_table, bo->bo.abo, bo);
|
||||
simple_mtx_unlock(&aws->bo_export_table_lock);
|
||||
|
||||
return &bo->b.base;
|
||||
|
|
@ -1705,8 +1709,8 @@ error:
|
|||
if (bo)
|
||||
FREE(bo);
|
||||
if (va_handle)
|
||||
amdgpu_va_range_free(va_handle);
|
||||
amdgpu_bo_free(result.buf_handle);
|
||||
ac_drm_va_range_free(va_handle);
|
||||
ac_drm_bo_free(aws->dev, result.bo);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -1758,7 +1762,7 @@ static bool amdgpu_bo_get_handle(struct radeon_winsys *rws,
|
|||
return false;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_export(bo->bo_handle, type, &whandle->handle);
|
||||
r = ac_drm_bo_export(aws->dev, bo->bo, type, &whandle->handle);
|
||||
if (r)
|
||||
return false;
|
||||
|
||||
|
|
@ -1789,7 +1793,7 @@ static bool amdgpu_bo_get_handle(struct radeon_winsys *rws,
|
|||
|
||||
hash_table_set:
|
||||
simple_mtx_lock(&aws->bo_export_table_lock);
|
||||
_mesa_hash_table_insert(aws->bo_export_table, bo->bo_handle, bo);
|
||||
_mesa_hash_table_insert(aws->bo_export_table, bo->bo.abo, bo);
|
||||
simple_mtx_unlock(&aws->bo_export_table_lock);
|
||||
|
||||
bo->is_shared = true;
|
||||
|
|
@ -1801,7 +1805,7 @@ static struct pb_buffer_lean *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
|
|||
enum radeon_bo_flag flags)
|
||||
{
|
||||
struct amdgpu_winsys *aws = amdgpu_winsys(rws);
|
||||
amdgpu_bo_handle buf_handle;
|
||||
ac_drm_bo buf_handle;
|
||||
struct amdgpu_bo_real *bo;
|
||||
uint64_t va;
|
||||
amdgpu_va_handle va_handle;
|
||||
|
|
@ -1812,19 +1816,19 @@ static struct pb_buffer_lean *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
|
|||
if (!bo)
|
||||
return NULL;
|
||||
|
||||
if (amdgpu_create_bo_from_user_mem(aws->dev, pointer,
|
||||
aligned_size, &buf_handle))
|
||||
if (ac_drm_create_bo_from_user_mem(aws->dev, pointer,
|
||||
aligned_size, &buf_handle))
|
||||
goto error;
|
||||
|
||||
if (amdgpu_va_range_alloc(aws->dev, amdgpu_gpu_va_range_general,
|
||||
aligned_size,
|
||||
amdgpu_get_optimal_alignment(aws, aligned_size,
|
||||
aws->info.gart_page_size),
|
||||
0, &va, &va_handle, AMDGPU_VA_RANGE_HIGH))
|
||||
if (ac_drm_va_range_alloc(aws->dev, amdgpu_gpu_va_range_general,
|
||||
aligned_size,
|
||||
amdgpu_get_optimal_alignment(aws, aligned_size,
|
||||
aws->info.gart_page_size),
|
||||
0, &va, &va_handle, AMDGPU_VA_RANGE_HIGH))
|
||||
goto error_va_alloc;
|
||||
|
||||
uint32_t kms_handle;
|
||||
amdgpu_bo_export(buf_handle, amdgpu_bo_handle_type_kms, &kms_handle);
|
||||
ac_drm_bo_export(aws->dev, buf_handle, amdgpu_bo_handle_type_kms, &kms_handle);
|
||||
|
||||
if (amdgpu_bo_va_op_common(aws, NULL, kms_handle, false, &bo->vm_timeline_point, 0,
|
||||
aligned_size, va, AMDGPU_VM_PAGE_READABLE |
|
||||
|
|
@ -1841,7 +1845,7 @@ static struct pb_buffer_lean *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
|
|||
bo->b.type = AMDGPU_BO_REAL;
|
||||
bo->b.unique_id = __sync_fetch_and_add(&aws->next_bo_unique_id, 1);
|
||||
simple_mtx_init(&bo->map_lock, mtx_plain);
|
||||
bo->bo_handle = buf_handle;
|
||||
bo->bo = buf_handle;
|
||||
bo->cpu_ptr = pointer;
|
||||
bo->va_handle = va_handle;
|
||||
bo->kms_handle = kms_handle;
|
||||
|
|
@ -1853,10 +1857,10 @@ static struct pb_buffer_lean *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
|
|||
return (struct pb_buffer_lean*)bo;
|
||||
|
||||
error_va_map:
|
||||
amdgpu_va_range_free(va_handle);
|
||||
ac_drm_va_range_free(va_handle);
|
||||
|
||||
error_va_alloc:
|
||||
amdgpu_bo_free(buf_handle);
|
||||
ac_drm_bo_free(aws->dev, buf_handle);
|
||||
|
||||
error:
|
||||
FREE(bo);
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ struct amdgpu_winsys_bo {
|
|||
struct amdgpu_bo_real {
|
||||
struct amdgpu_winsys_bo b;
|
||||
|
||||
amdgpu_bo_handle bo_handle;
|
||||
ac_drm_bo bo;
|
||||
amdgpu_va_handle va_handle;
|
||||
/* Timeline point of latest VM ioctl completion. Only used in userqueue. */
|
||||
uint64_t vm_timeline_point;
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@
|
|||
|
||||
#include "amdgpu_cs.h"
|
||||
#include "util/detect_os.h"
|
||||
#include "amdgpu_winsys.h"
|
||||
#include "util/os_time.h"
|
||||
#include <inttypes.h>
|
||||
#include <stdio.h>
|
||||
|
|
@ -209,7 +210,6 @@ bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
|
|||
|
||||
if (ac_drm_cs_syncobj_wait(afence->aws->fd, &afence->syncobj, 1,
|
||||
abs_timeout, 0, NULL))
|
||||
|
||||
return false;
|
||||
|
||||
afence->signalled = true;
|
||||
|
|
@ -272,7 +272,8 @@ static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *rws,
|
|||
int r;
|
||||
struct amdgpu_bo_alloc_request alloc_buffer = {};
|
||||
uint32_t amdgpu_priority = radeon_to_amdgpu_priority(priority);
|
||||
amdgpu_bo_handle buf_handle;
|
||||
ac_drm_device *dev;
|
||||
ac_drm_bo buf_handle;
|
||||
|
||||
if (!ctx)
|
||||
return NULL;
|
||||
|
|
@ -281,7 +282,9 @@ static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *rws,
|
|||
ctx->reference.count = 1;
|
||||
ctx->allow_context_lost = allow_context_lost;
|
||||
|
||||
r = ac_drm_cs_ctx_create2(ctx->aws->fd, amdgpu_priority, &ctx->ctx_handle);
|
||||
dev = ctx->aws->dev;
|
||||
|
||||
r = ac_drm_cs_ctx_create2(dev, amdgpu_priority, &ctx->ctx_handle);
|
||||
if (r) {
|
||||
fprintf(stderr, "amdgpu: amdgpu_cs_ctx_create2 failed. (%i)\n", r);
|
||||
goto error_create;
|
||||
|
|
@ -291,13 +294,14 @@ static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *rws,
|
|||
alloc_buffer.phys_alignment = ctx->aws->info.gart_page_size;
|
||||
alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
|
||||
|
||||
r = amdgpu_bo_alloc(ctx->aws->dev, &alloc_buffer, &buf_handle);
|
||||
r = ac_drm_bo_alloc(dev, &alloc_buffer, &buf_handle);
|
||||
if (r) {
|
||||
fprintf(stderr, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r);
|
||||
goto error_user_fence_alloc;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_cpu_map(buf_handle, (void**)&ctx->user_fence_cpu_address_base);
|
||||
ctx->user_fence_cpu_address_base = NULL;
|
||||
r = ac_drm_bo_cpu_map(dev, buf_handle, (void**)&ctx->user_fence_cpu_address_base);
|
||||
if (r) {
|
||||
fprintf(stderr, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r);
|
||||
goto error_user_fence_map;
|
||||
|
|
@ -305,14 +309,15 @@ static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *rws,
|
|||
|
||||
memset(ctx->user_fence_cpu_address_base, 0, alloc_buffer.alloc_size);
|
||||
ctx->user_fence_bo = buf_handle;
|
||||
amdgpu_bo_export(buf_handle, amdgpu_bo_handle_type_kms, &ctx->user_fence_bo_kms_handle);
|
||||
ac_drm_bo_export(dev, buf_handle, amdgpu_bo_handle_type_kms, &ctx->user_fence_bo_kms_handle);
|
||||
|
||||
return (struct radeon_winsys_ctx*)ctx;
|
||||
|
||||
error_user_fence_map:
|
||||
amdgpu_bo_free(buf_handle);
|
||||
ac_drm_bo_free(dev, buf_handle);
|
||||
|
||||
error_user_fence_alloc:
|
||||
ac_drm_cs_ctx_free(ctx->aws->fd, ctx->ctx_handle);
|
||||
ac_drm_cs_ctx_free(dev, ctx->ctx_handle);
|
||||
error_create:
|
||||
FREE(ctx);
|
||||
return NULL;
|
||||
|
|
@ -355,7 +360,7 @@ static int amdgpu_submit_gfx_nop(struct amdgpu_ctx *ctx)
|
|||
struct amdgpu_bo_alloc_request request = {0};
|
||||
struct drm_amdgpu_bo_list_in bo_list_in;
|
||||
struct drm_amdgpu_cs_chunk_ib ib_in = {0};
|
||||
amdgpu_bo_handle buf_handle;
|
||||
ac_drm_bo bo;
|
||||
amdgpu_va_handle va_handle = NULL;
|
||||
struct drm_amdgpu_cs_chunk chunks[2];
|
||||
struct drm_amdgpu_bo_list_entry list;
|
||||
|
|
@ -370,42 +375,44 @@ static int amdgpu_submit_gfx_nop(struct amdgpu_ctx *ctx)
|
|||
* that the reset is not complete.
|
||||
*/
|
||||
uint32_t temp_ctx_handle;
|
||||
r = ac_drm_cs_ctx_create2(ctx->aws->fd, AMDGPU_CTX_PRIORITY_NORMAL, &temp_ctx_handle);
|
||||
r = ac_drm_cs_ctx_create2(ctx->aws->dev, AMDGPU_CTX_PRIORITY_NORMAL, &temp_ctx_handle);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
request.preferred_heap = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
request.alloc_size = 4096;
|
||||
request.phys_alignment = 4096;
|
||||
r = amdgpu_bo_alloc(ctx->aws->dev, &request, &buf_handle);
|
||||
r = ac_drm_bo_alloc(ctx->aws->dev, &request, &bo);
|
||||
if (r)
|
||||
goto destroy_ctx;
|
||||
|
||||
uint32_t kms_handle;
|
||||
amdgpu_bo_export(buf_handle, amdgpu_bo_handle_type_kms, &kms_handle);
|
||||
|
||||
r = amdgpu_va_range_alloc(ctx->aws->dev, amdgpu_gpu_va_range_general,
|
||||
request.alloc_size, request.phys_alignment,
|
||||
0, &va, &va_handle,
|
||||
AMDGPU_VA_RANGE_32_BIT | AMDGPU_VA_RANGE_HIGH);
|
||||
r = ac_drm_va_range_alloc(ctx->aws->dev, amdgpu_gpu_va_range_general,
|
||||
request.alloc_size, request.phys_alignment,
|
||||
0, &va, &va_handle,
|
||||
AMDGPU_VA_RANGE_32_BIT | AMDGPU_VA_RANGE_HIGH);
|
||||
if (r)
|
||||
goto destroy_bo;
|
||||
r = ac_drm_bo_va_op_raw(ctx->aws->fd, kms_handle, 0, request.alloc_size, va,
|
||||
|
||||
uint32_t kms_handle;
|
||||
ac_drm_bo_export(ctx->aws->dev, bo, amdgpu_bo_handle_type_kms, &kms_handle);
|
||||
|
||||
r = ac_drm_bo_va_op_raw(ctx->aws->dev, kms_handle, 0, request.alloc_size, va,
|
||||
AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE,
|
||||
AMDGPU_VA_OP_MAP);
|
||||
if (r)
|
||||
goto destroy_bo;
|
||||
|
||||
r = amdgpu_bo_cpu_map(buf_handle, &cpu);
|
||||
r = ac_drm_bo_cpu_map(ctx->aws->dev, bo, &cpu);
|
||||
if (r)
|
||||
goto destroy_bo;
|
||||
|
||||
noop_dw_size = ctx->aws->info.ip[AMD_IP_GFX].ib_pad_dw_mask + 1;
|
||||
((uint32_t*)cpu)[0] = PKT3(PKT3_NOP, noop_dw_size - 2, 0);
|
||||
|
||||
amdgpu_bo_cpu_unmap(buf_handle);
|
||||
ac_drm_bo_cpu_unmap(ctx->aws->dev, bo);
|
||||
|
||||
amdgpu_bo_export(buf_handle, amdgpu_bo_handle_type_kms, &list.bo_handle);
|
||||
list.bo_handle = kms_handle;
|
||||
ac_drm_bo_export(ctx->aws->dev, bo, amdgpu_bo_handle_type_kms, &list.bo_handle);
|
||||
list.bo_priority = 0;
|
||||
|
||||
bo_list_in.list_handle = ~0;
|
||||
|
|
@ -425,14 +432,14 @@ static int amdgpu_submit_gfx_nop(struct amdgpu_ctx *ctx)
|
|||
chunks[1].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
|
||||
chunks[1].chunk_data = (uintptr_t)&ib_in;
|
||||
|
||||
r = ac_drm_cs_submit_raw2(ctx->aws->fd, temp_ctx_handle, 0, 2, chunks, &seq_no);
|
||||
r = ac_drm_cs_submit_raw2(ctx->aws->dev, temp_ctx_handle, 0, 2, chunks, &seq_no);
|
||||
|
||||
destroy_bo:
|
||||
if (va_handle)
|
||||
amdgpu_va_range_free(va_handle);
|
||||
amdgpu_bo_free(buf_handle);
|
||||
ac_drm_va_range_free(va_handle);
|
||||
ac_drm_bo_free(ctx->aws->dev, bo);
|
||||
destroy_ctx:
|
||||
ac_drm_cs_ctx_free(ctx->aws->fd, temp_ctx_handle);
|
||||
ac_drm_cs_ctx_free(ctx->aws->dev, temp_ctx_handle);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
|
@ -492,7 +499,7 @@ amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx *rwctx, bool full_reset_o
|
|||
* that the context reset is complete.
|
||||
*/
|
||||
if (ctx->sw_status != PIPE_NO_RESET) {
|
||||
int r = ac_drm_cs_query_reset_state2(ctx->aws->fd, ctx->ctx_handle, &flags);
|
||||
int r = ac_drm_cs_query_reset_state2(ctx->aws->dev, ctx->ctx_handle, &flags);
|
||||
if (!r) {
|
||||
if (flags & AMDGPU_CTX_QUERY2_FLAGS_RESET) {
|
||||
if (reset_completed) {
|
||||
|
|
@ -1367,7 +1374,7 @@ static int amdgpu_cs_submit_ib_kernelq(struct amdgpu_cs *acs,
|
|||
if (r == -ENOMEM)
|
||||
os_time_sleep(1000);
|
||||
|
||||
r = ac_drm_cs_submit_raw2(aws->fd, acs->ctx->ctx_handle, 0, num_chunks, chunks, seq_no);
|
||||
r = ac_drm_cs_submit_raw2(aws->dev, acs->ctx->ctx_handle, 0, num_chunks, chunks, seq_no);
|
||||
} while (r == -ENOMEM);
|
||||
|
||||
return r;
|
||||
|
|
@ -1509,7 +1516,7 @@ static int amdgpu_cs_submit_ib_userq(struct amdgpu_userq *userq,
|
|||
* To implement this strategy, we use amdgpu_userq_wait() before submitting
|
||||
* a job, and amdgpu_userq_signal() after to indicate completion.
|
||||
*/
|
||||
r = ac_drm_userq_wait(aws->fd, &userq_wait_data);
|
||||
r = ac_drm_userq_wait(aws->dev, &userq_wait_data);
|
||||
if (r)
|
||||
fprintf(stderr, "amdgpu: getting wait num_fences failed\n");
|
||||
|
||||
|
|
@ -1517,7 +1524,7 @@ static int amdgpu_cs_submit_ib_userq(struct amdgpu_userq *userq,
|
|||
alloca(userq_wait_data.num_fences * sizeof(struct drm_amdgpu_userq_fence_info));
|
||||
userq_wait_data.out_fences = (uintptr_t)fence_info;
|
||||
|
||||
r = ac_drm_userq_wait(aws->fd, &userq_wait_data);
|
||||
r = ac_drm_userq_wait(aws->dev, &userq_wait_data);
|
||||
if (r)
|
||||
fprintf(stderr, "amdgpu: getting wait fences failed\n");
|
||||
|
||||
|
|
@ -1533,7 +1540,7 @@ static int amdgpu_cs_submit_ib_userq(struct amdgpu_userq *userq,
|
|||
.num_bo_write_handles = num_shared_buf_write,
|
||||
};
|
||||
|
||||
r = ac_drm_userq_signal(aws->fd, &userq_signal_data);
|
||||
r = ac_drm_userq_signal(aws->dev, &userq_signal_data);
|
||||
if (!r)
|
||||
userq->doorbell_bo_map[AMDGPU_USERQ_DOORBELL_INDEX] = *userq->wptr_bo_map;
|
||||
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ struct amdgpu_ctx {
|
|||
struct pipe_reference reference;
|
||||
uint32_t ctx_handle;
|
||||
struct amdgpu_winsys *aws;
|
||||
amdgpu_bo_handle user_fence_bo;
|
||||
ac_drm_bo user_fence_bo;
|
||||
uint32_t user_fence_bo_kms_handle;
|
||||
uint64_t *user_fence_cpu_address_base;
|
||||
|
||||
|
|
@ -193,9 +193,10 @@ static inline void amdgpu_ctx_reference(struct amdgpu_ctx **dst, struct amdgpu_c
|
|||
|
||||
if (pipe_reference(old_dst ? &old_dst->reference : NULL,
|
||||
src ? &src->reference : NULL)) {
|
||||
ac_drm_cs_ctx_free(old_dst->aws->fd, old_dst->ctx_handle);
|
||||
amdgpu_bo_cpu_unmap(old_dst->user_fence_bo);
|
||||
amdgpu_bo_free(old_dst->user_fence_bo);
|
||||
ac_drm_device *dev = old_dst->aws->dev;
|
||||
ac_drm_bo_cpu_unmap(dev, old_dst->user_fence_bo);
|
||||
ac_drm_bo_free(dev, old_dst->user_fence_bo);
|
||||
ac_drm_cs_ctx_free(dev, old_dst->ctx_handle);
|
||||
FREE(old_dst);
|
||||
}
|
||||
*dst = src;
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ void
|
|||
amdgpu_userq_deinit(struct amdgpu_winsys *aws, struct amdgpu_userq *userq)
|
||||
{
|
||||
if (userq->userq_handle)
|
||||
ac_drm_free_userqueue(aws->fd, userq->userq_handle);
|
||||
ac_drm_free_userqueue(aws->dev, userq->userq_handle);
|
||||
|
||||
radeon_bo_reference(&aws->dummy_sws.base, &userq->gtt_bo, NULL);
|
||||
radeon_bo_reference(&aws->dummy_sws.base, &userq->wptr_bo, NULL);
|
||||
|
|
@ -163,7 +163,7 @@ amdgpu_userq_init(struct amdgpu_winsys *aws, struct amdgpu_userq *userq, enum am
|
|||
/* The VA page table for ring buffer should be ready before job submission so that the packets
|
||||
* submitted can be read by gpu. The same applies to rptr, wptr buffers also.
|
||||
*/
|
||||
r = amdgpu_cs_syncobj_timeline_wait(aws->dev, &aws->vm_timeline_syncobj,
|
||||
r = ac_drm_cs_syncobj_timeline_wait(aws->fd, &aws->vm_timeline_syncobj,
|
||||
&get_real_bo(amdgpu_winsys_bo(userq->doorbell_bo))
|
||||
->vm_timeline_point,
|
||||
1, INT64_MAX, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
|
||||
|
|
@ -174,7 +174,7 @@ amdgpu_userq_init(struct amdgpu_winsys *aws, struct amdgpu_userq *userq, enum am
|
|||
}
|
||||
|
||||
uint64_t ring_va = amdgpu_bo_get_va(userq->gtt_bo);
|
||||
r = ac_drm_create_userqueue(aws->fd, hw_ip_type,
|
||||
r = ac_drm_create_userqueue(aws->dev, hw_ip_type,
|
||||
get_real_bo(amdgpu_winsys_bo(userq->doorbell_bo))->kms_handle,
|
||||
AMDGPU_USERQ_DOORBELL_INDEX, ring_va, AMDGPU_USERQ_RING_SIZE,
|
||||
amdgpu_bo_get_va(userq->wptr_bo), amdgpu_bo_get_va(userq->rptr_bo),
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ static bool do_winsys_init(struct amdgpu_winsys *aws,
|
|||
return true;
|
||||
|
||||
fail:
|
||||
amdgpu_device_deinitialize(aws->dev);
|
||||
ac_drm_device_deinitialize(aws->dev);
|
||||
aws->dev = NULL;
|
||||
return false;
|
||||
}
|
||||
|
|
@ -76,7 +76,7 @@ fail:
|
|||
static void do_winsys_deinit(struct amdgpu_winsys *aws)
|
||||
{
|
||||
if (aws->reserve_vmid)
|
||||
ac_drm_vm_unreserve_vmid(aws->fd, 0);
|
||||
ac_drm_vm_unreserve_vmid(aws->dev, 0);
|
||||
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(aws->queues); i++) {
|
||||
for (unsigned j = 0; j < ARRAY_SIZE(aws->queues[i].fences); j++)
|
||||
|
|
@ -102,8 +102,8 @@ static void do_winsys_deinit(struct amdgpu_winsys *aws)
|
|||
simple_mtx_destroy(&aws->bo_export_table_lock);
|
||||
|
||||
ac_addrlib_destroy(aws->addrlib);
|
||||
amdgpu_device_deinitialize(aws->dev);
|
||||
drmSyncobjDestroy(aws->fd, aws->vm_timeline_syncobj);
|
||||
ac_drm_device_deinitialize(aws->dev);
|
||||
ac_drm_cs_destroy_syncobj(aws->fd, aws->vm_timeline_syncobj);
|
||||
simple_mtx_destroy(&aws->bo_fence_lock);
|
||||
|
||||
FREE(aws);
|
||||
|
|
@ -189,7 +189,7 @@ static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
|
|||
case RADEON_NUM_MAPPED_BUFFERS:
|
||||
return aws->num_mapped_buffers;
|
||||
case RADEON_TIMESTAMP:
|
||||
ac_drm_query_info(aws->fd, AMDGPU_INFO_TIMESTAMP, 8, &retval);
|
||||
ac_drm_query_info(aws->dev, AMDGPU_INFO_TIMESTAMP, 8, &retval);
|
||||
return retval;
|
||||
case RADEON_NUM_GFX_IBS:
|
||||
return aws->num_gfx_IBs;
|
||||
|
|
@ -200,32 +200,32 @@ static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
|
|||
case RADEON_GFX_IB_SIZE_COUNTER:
|
||||
return aws->gfx_ib_size_counter;
|
||||
case RADEON_NUM_BYTES_MOVED:
|
||||
ac_drm_query_info(aws->fd, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
|
||||
ac_drm_query_info(aws->dev, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
|
||||
return retval;
|
||||
case RADEON_NUM_EVICTIONS:
|
||||
ac_drm_query_info(aws->fd, AMDGPU_INFO_NUM_EVICTIONS, 8, &retval);
|
||||
ac_drm_query_info(aws->dev, AMDGPU_INFO_NUM_EVICTIONS, 8, &retval);
|
||||
return retval;
|
||||
case RADEON_NUM_VRAM_CPU_PAGE_FAULTS:
|
||||
ac_drm_query_info(aws->fd, AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS, 8, &retval);
|
||||
ac_drm_query_info(aws->dev, AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS, 8, &retval);
|
||||
return retval;
|
||||
case RADEON_VRAM_USAGE:
|
||||
ac_drm_query_heap_info(aws->fd, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
|
||||
ac_drm_query_heap_info(aws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
|
||||
return heap.heap_usage;
|
||||
case RADEON_VRAM_VIS_USAGE:
|
||||
ac_drm_query_heap_info(aws->fd, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
ac_drm_query_heap_info(aws->dev, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, &heap);
|
||||
return heap.heap_usage;
|
||||
case RADEON_GTT_USAGE:
|
||||
ac_drm_query_heap_info(aws->fd, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
|
||||
ac_drm_query_heap_info(aws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
|
||||
return heap.heap_usage;
|
||||
case RADEON_GPU_TEMPERATURE:
|
||||
ac_drm_query_sensor_info(aws->fd, AMDGPU_INFO_SENSOR_GPU_TEMP, 4, &retval);
|
||||
ac_drm_query_sensor_info(aws->dev, AMDGPU_INFO_SENSOR_GPU_TEMP, 4, &retval);
|
||||
return retval;
|
||||
case RADEON_CURRENT_SCLK:
|
||||
ac_drm_query_sensor_info(aws->fd, AMDGPU_INFO_SENSOR_GFX_SCLK, 4, &retval);
|
||||
ac_drm_query_sensor_info(aws->dev, AMDGPU_INFO_SENSOR_GFX_SCLK, 4, &retval);
|
||||
return retval;
|
||||
case RADEON_CURRENT_MCLK:
|
||||
ac_drm_query_sensor_info(aws->fd, AMDGPU_INFO_SENSOR_GFX_MCLK, 4, &retval);
|
||||
ac_drm_query_sensor_info(aws->dev, AMDGPU_INFO_SENSOR_GFX_MCLK, 4, &retval);
|
||||
return retval;
|
||||
case RADEON_CS_THREAD_TIME:
|
||||
return util_queue_get_thread_time_nano(&aws->cs_queue, 0);
|
||||
|
|
@ -239,7 +239,7 @@ static bool amdgpu_read_registers(struct radeon_winsys *rws,
|
|||
{
|
||||
struct amdgpu_winsys *aws = amdgpu_winsys(rws);
|
||||
|
||||
return ac_drm_read_mm_registers(aws->fd, reg_offset / 4, num_registers,
|
||||
return ac_drm_read_mm_registers(aws->dev, reg_offset / 4, num_registers,
|
||||
0xffffffff, 0, out) == 0;
|
||||
}
|
||||
|
||||
|
|
@ -337,7 +337,7 @@ amdgpu_cs_set_pstate(struct radeon_cmdbuf *rcs, enum radeon_ctx_pstate pstate)
|
|||
return false;
|
||||
|
||||
uint32_t amdgpu_pstate = radeon_to_amdgpu_pstate(pstate);
|
||||
return ac_drm_cs_ctx_stable_pstate(cs->aws->fd, cs->ctx->ctx_handle,
|
||||
return ac_drm_cs_ctx_stable_pstate(cs->aws->dev, cs->ctx->ctx_handle,
|
||||
AMDGPU_CTX_OP_SET_STABLE_PSTATE, amdgpu_pstate, NULL) == 0;
|
||||
}
|
||||
|
||||
|
|
@ -377,7 +377,7 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
{
|
||||
struct amdgpu_screen_winsys *sws;
|
||||
struct amdgpu_winsys *aws;
|
||||
amdgpu_device_handle dev;
|
||||
ac_drm_device *dev;
|
||||
uint32_t drm_major, drm_minor;
|
||||
int r;
|
||||
|
||||
|
|
@ -395,7 +395,7 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
|
||||
/* Initialize the amdgpu device. This should always return the same pointer
|
||||
* for the same fd. */
|
||||
r = amdgpu_device_initialize(fd, &drm_major, &drm_minor, &dev);
|
||||
r = ac_drm_device_initialize(fd, &drm_major, &drm_minor, &dev);
|
||||
if (r) {
|
||||
fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
|
||||
goto fail;
|
||||
|
|
@ -410,7 +410,7 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
* This function is returning an existing winsys instance, which
|
||||
* has its own device handle.
|
||||
*/
|
||||
amdgpu_device_deinitialize(dev);
|
||||
ac_drm_device_deinitialize((void*)dev);
|
||||
|
||||
simple_mtx_lock(&aws->sws_list_lock);
|
||||
for (sws_iter = aws->sws_list; sws_iter; sws_iter = sws_iter->next) {
|
||||
|
|
@ -437,12 +437,13 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
goto fail;
|
||||
|
||||
aws->dev = dev;
|
||||
|
||||
/* The device fd might be different from the one we passed because of
|
||||
* libdrm_amdgpu device dedup logic. This can happen if radv is initialized
|
||||
* first.
|
||||
* Get the correct fd or the buffer sharing will not work (see #3424).
|
||||
*/
|
||||
aws->fd = amdgpu_device_get_fd(dev);
|
||||
aws->fd = ac_drm_device_get_fd(dev);
|
||||
if (!are_file_descriptions_equal(aws->fd, fd)) {
|
||||
sws->kms_handles = _mesa_hash_table_create(NULL, kms_handle_hash,
|
||||
kms_handle_equals);
|
||||
|
|
@ -454,7 +455,7 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
aws->info.drm_major = drm_major;
|
||||
aws->info.drm_minor = drm_minor;
|
||||
|
||||
if (amdgpu_cs_create_syncobj(dev, &aws->vm_timeline_syncobj))
|
||||
if (ac_drm_cs_create_syncobj(aws->fd, &aws->vm_timeline_syncobj))
|
||||
goto fail_alloc;
|
||||
simple_mtx_init(&aws->vm_ioctl_lock, mtx_plain);
|
||||
|
||||
|
|
@ -515,7 +516,7 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
_mesa_hash_table_insert(dev_tab, dev, aws);
|
||||
|
||||
if (aws->reserve_vmid) {
|
||||
r = ac_drm_vm_reserve_vmid(aws->fd, 0);
|
||||
r = ac_drm_vm_reserve_vmid(aws->dev, 0);
|
||||
if (r) {
|
||||
amdgpu_winsys_destroy_locked(&sws->base, true);
|
||||
simple_mtx_unlock(&dev_tab_mutex);
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@
|
|||
#include "winsys/radeon_winsys.h"
|
||||
#include "util/simple_mtx.h"
|
||||
#include "util/u_queue.h"
|
||||
#include "ac_linux_drm.h"
|
||||
#include <amdgpu.h>
|
||||
#include "amdgpu_userq.h"
|
||||
|
||||
|
|
@ -200,7 +201,7 @@ struct amdgpu_winsys {
|
|||
struct pb_cache bo_cache;
|
||||
struct pb_slabs bo_slabs; /* Slab allocator. */
|
||||
|
||||
amdgpu_device_handle dev;
|
||||
ac_drm_device *dev;
|
||||
|
||||
simple_mtx_t bo_fence_lock;
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue