Compare commits

..

No commits in common. "main" and "libdrm-2.4.111" have entirely different histories.

148 changed files with 18653 additions and 7720 deletions

View file

@ -12,7 +12,7 @@
# main repository, it's recommended to remove the image from the source
# repository's container registry, so that the image from the main
# repository's registry will be used there as well.
.templates_sha: &template_sha c6aeb16f86e32525fa630fb99c66c4f3e62fc3cb # see https://docs.gitlab.com/ee/ci/yaml/#includefile
.templates_sha: &template_sha 567700e483aabed992d0a4fea84994a0472deff6 # see https://docs.gitlab.com/ee/ci/yaml/#includefile
include:
- project: 'freedesktop/ci-templates'
@ -23,7 +23,7 @@ include:
- '/templates/ci-fairy.yml'
variables:
FDO_UPSTREAM_REPO: mesa/libdrm
FDO_UPSTREAM_REPO: mesa/drm
FDO_REPO_SUFFIX: "$BUILD_OS/$BUILD_ARCH"
stages:
@ -39,11 +39,12 @@ stages:
.os-debian:
variables:
BUILD_OS: debian
FDO_DISTRIBUTION_VERSION: bookworm
FDO_DISTRIBUTION_VERSION: buster
FDO_DISTRIBUTION_PACKAGES: 'build-essential docbook-xsl libatomic-ops-dev libcairo2-dev libcunit1-dev libpciaccess-dev meson ninja-build pkg-config python3 python3-pip python3-wheel python3-setuptools python3-docutils valgrind'
FDO_DISTRIBUTION_EXEC: 'pip3 install meson==0.53.0'
# bump this tag every time you change something which requires rebuilding the
# base image
FDO_DISTRIBUTION_TAG: "2024-06-25.0"
FDO_DISTRIBUTION_TAG: "2022-01-19.0"
.debian-x86_64:
extends:
@ -62,16 +63,15 @@ stages:
- .os-debian
variables:
BUILD_ARCH: "armv7"
FDO_DISTRIBUTION_PLATFORM: linux/arm/v7
.os-freebsd:
variables:
BUILD_OS: freebsd
FDO_DISTRIBUTION_VERSION: "14.2"
FDO_DISTRIBUTION_PACKAGES: 'meson ninja pkgconf libpciaccess textproc/py-docutils cairo'
FDO_DISTRIBUTION_VERSION: "13.0"
FDO_DISTRIBUTION_PACKAGES: 'meson ninja pkgconf libpciaccess libpthread-stubs py38-docutils cairo'
# bump this tag every time you change something which requires rebuilding the
# base image
FDO_DISTRIBUTION_TAG: "2025-05-22.0"
FDO_DISTRIBUTION_TAG: "2021-11-10.1"
.freebsd-x86_64:
extends:
@ -190,9 +190,23 @@ x86_64-freebsd-container_prep:
variables:
GIT_DEPTH: 10
script:
- meson setup build
--fatal-meson-warnings --auto-features=enabled
- meson build
-D amdgpu=true
-D cairo-tests=true
-D etnaviv=true
-D exynos=true
-D freedreno=true
-D freedreno-kgsl=true
-D intel=true
-D man-pages=true
-D nouveau=true
-D omap=true
-D radeon=true
-D tegra=true
-D udev=true
-D valgrind=auto
-D vc4=true
-D vmwgfx=true
- ninja -C build
- ninja -C build test
- DESTDIR=$PWD/install ninja -C build install
@ -213,7 +227,7 @@ x86_64-freebsd-container_prep:
# the workspace to see details about the failed tests.
- |
set +e
/app/vmctl exec "pkg info; cd $CI_PROJECT_NAME ; meson setup build --fatal-meson-warnings --auto-features=enabled -D etnaviv=disabled -D nouveau=disabled -D valgrind=disabled && ninja -C build"
/app/vmctl exec "pkg info; cd $CI_PROJECT_NAME ; meson build -D amdgpu=true -D cairo-tests=true -D intel=true -D man-pages=true -D nouveau=false -D radeon=true -D valgrind=auto && ninja -C build"
set -ex
scp -r vm:$CI_PROJECT_NAME/build/meson-logs .
/app/vmctl exec "ninja -C $CI_PROJECT_NAME/build install"

View file

@ -0,0 +1,66 @@
#!/usr/bin/env bash
set -o errexit
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
CROSS_ARCHITECTURES=(i386 armhf arm64 ppc64el)
for arch in ${CROSS_ARCHITECTURES[@]}; do
dpkg --add-architecture $arch
done
apt-get install -y \
ca-certificates
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
echo 'deb https://deb.debian.org/debian buster-backports main' >/etc/apt/sources.list.d/backports.list
apt-get update
# Use newer packages from backports by default
cat >/etc/apt/preferences <<EOF
Package: *
Pin: release a=buster-backports
Pin-Priority: 500
EOF
apt-get dist-upgrade -y
apt-get install -y --no-remove \
build-essential \
docbook-xsl \
libatomic-ops-dev \
libcairo2-dev \
libcunit1-dev \
libpciaccess-dev \
meson \
ninja-build \
pkg-config \
python3 \
python3-pip \
python3-wheel \
python3-setuptools \
python3-docutils \
valgrind
for arch in ${CROSS_ARCHITECTURES[@]}; do
cross_file=/cross_file-$arch.txt
# Cross-build libdrm deps
apt-get install -y --no-remove \
libcairo2-dev:$arch \
libpciaccess-dev:$arch \
crossbuild-essential-$arch
# Generate cross build files for Meson
/usr/share/meson/debcrossgen --arch $arch -o $cross_file
# Work around a bug in debcrossgen that should be fixed in the next release
if [ $arch = i386 ]; then
sed -i "s|cpu_family = 'i686'|cpu_family = 'x86'|g" $cross_file
fi
done
# Test that the oldest Meson version we claim to support is still supported
pip3 install meson==0.46

View file

@ -1,97 +0,0 @@
//
// Copyright © 2011-2012 Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice (including the next
// paragraph) shall be included in all copies or substantial portions of the
// Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
subdirs = ["*"]
build = ["Android.sources.bp"]
cc_defaults {
name: "libdrm_defaults",
cflags: [
// XXX: Consider moving these to config.h analogous to autoconf.
"-DMAJOR_IN_SYSMACROS=1",
"-DHAVE_VISIBILITY=1",
"-DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1",
"-Wall",
"-Werror",
"-Wno-deprecated-declarations",
"-Wno-format",
"-Wno-gnu-variable-sized-type-not-at-end",
"-Wno-implicit-function-declaration",
"-Wno-int-conversion",
"-Wno-missing-field-initializers",
"-Wno-pointer-arith",
"-Wno-unused-parameter",
"-Wno-unused-variable",
],
export_system_include_dirs: ["."],
}
cc_library_headers {
name: "libdrm_headers",
vendor_available: true,
host_supported: true,
defaults: ["libdrm_defaults"],
export_include_dirs: ["include/drm", "android"],
apex_available: [
"//apex_available:platform",
"com.android.virt",
],
}
genrule {
name: "generated_static_table_fourcc_h",
out: ["generated_static_table_fourcc.h"],
srcs: ["include/drm/drm_fourcc.h"],
tool_files: ["gen_table_fourcc.py"],
cmd: "python3 $(location gen_table_fourcc.py) $(in) $(out)",
}
// Library for the device
cc_library {
name: "libdrm",
recovery_available: true,
vendor_available: true,
host_supported: true,
defaults: [
"libdrm_defaults",
"libdrm_sources",
],
generated_headers: [
"generated_static_table_fourcc_h",
],
export_include_dirs: ["include/drm", "android"],
cflags: [
"-Wno-enum-conversion",
"-Wno-pointer-arith",
"-Wno-sign-compare",
"-Wno-tautological-compare",
],
apex_available: [
"//apex_available:platform",
"com.android.virt",
],
}

22
Android.common.mk Normal file
View file

@ -0,0 +1,22 @@
# XXX: Consider moving these to config.h analogous to autoconf.
LOCAL_CFLAGS += \
-DMAJOR_IN_SYSMACROS=1 \
-DHAVE_ALLOCA_H=0 \
-DHAVE_SYS_SELECT_H=0 \
-DHAVE_SYS_SYSCTL_H=0 \
-DHAVE_VISIBILITY=1 \
-fvisibility=hidden \
-DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
LOCAL_CFLAGS += \
-Wno-error \
-Wno-unused-parameter \
-Wno-missing-field-initializers \
-Wno-pointer-arith \
-Wno-enum-conversion
# Quiet down the build system and remove any .h files from the sources
LOCAL_SRC_FILES := $(patsubst %.h, , $(LOCAL_SRC_FILES))
LOCAL_EXPORT_C_INCLUDE_DIRS += $(LOCAL_PATH)
LOCAL_PROPRIETARY_MODULE := true

74
Android.mk Normal file
View file

@ -0,0 +1,74 @@
#
# Copyright © 2011-2012 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
LIBDRM_ANDROID_MAJOR_VERSION := $(word 1, $(subst ., , $(PLATFORM_VERSION)))
ifneq ($(filter 2 4, $(LIBDRM_ANDROID_MAJOR_VERSION)),)
$(error "Android 4.4 and earlier not supported")
endif
LIBDRM_COMMON_MK := $(call my-dir)/Android.common.mk
LOCAL_PATH := $(call my-dir)
LIBDRM_TOP := $(LOCAL_PATH)
include $(CLEAR_VARS)
# Import variables LIBDRM_{,H,INCLUDE_H,INCLUDE_ANDROID_H,INCLUDE_VMWGFX_H}_FILES
include $(LOCAL_PATH)/Makefile.sources
#static library for the device (recovery)
include $(CLEAR_VARS)
LOCAL_MODULE := libdrm
LOCAL_SRC_FILES := $(LIBDRM_FILES)
LOCAL_EXPORT_C_INCLUDE_DIRS := \
$(LOCAL_PATH) \
$(LOCAL_PATH)/include/drm \
$(LOCAL_PATH)/android
LOCAL_C_INCLUDES := \
$(LOCAL_PATH)/include/drm
include $(LIBDRM_COMMON_MK)
include $(BUILD_STATIC_LIBRARY)
# Shared library for the device
include $(CLEAR_VARS)
LOCAL_MODULE := libdrm
LOCAL_SRC_FILES := $(LIBDRM_FILES)
LOCAL_EXPORT_C_INCLUDE_DIRS := \
$(LOCAL_PATH) \
$(LOCAL_PATH)/include/drm \
$(LOCAL_PATH)/android
LOCAL_SHARED_LIBRARIES := \
libcutils
LOCAL_C_INCLUDES := \
$(LOCAL_PATH)/include/drm
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)
include $(call all-makefiles-under,$(LOCAL_PATH))

View file

@ -1,12 +0,0 @@
// Autogenerated with Android.sources.bp.mk
cc_defaults {
name: "libdrm_sources",
srcs: [
"xf86drm.c",
"xf86drmHash.c",
"xf86drmRandom.c",
"xf86drmSL.c",
"xf86drmMode.c",
],
}

View file

@ -1,25 +0,0 @@
# Usage: make -f path/to/Android.sources.bp.mk NAMES=<> >Android.sources.bp
#
# It will read the Makefile.sources in the current directory, and
# write <NAME>_FILES to stdout as an Android.bp cc_defaults module.
.PHONY: all
all:
@# Do nothing
include Makefile.sources
empty :=
indent := $(empty) $(empty)
$(info // Autogenerated with Android.sources.bp.mk)
$(foreach NAME,$(NAMES), \
$(eval lower_name := $(shell echo $(PREFIX)$(NAME) | tr 'A-Z' 'a-z')) \
$(info ) \
$(info cc_defaults {) \
$(info $(indent)name: "$(lower_name)_sources",) \
$(info $(indent)srcs: [) \
$(foreach f,$(filter %.c,$($(NAME)_FILES)), \
$(info $(indent)$(indent)"$(f)",)) \
$(info $(indent)],) \
$(info }))

45
Makefile.sources Normal file
View file

@ -0,0 +1,45 @@
LIBDRM_FILES := \
xf86drm.c \
xf86drmHash.c \
xf86drmHash.h \
xf86drmRandom.c \
xf86drmRandom.h \
xf86drmSL.c \
xf86drmMode.c \
xf86atomic.h \
libdrm_macros.h \
libdrm_lists.h \
util_double_list.h \
util_math.h
LIBDRM_H_FILES := \
libsync.h \
xf86drm.h \
xf86drmMode.h
LIBDRM_INCLUDE_H_FILES := \
include/drm/drm.h \
include/drm/drm_fourcc.h \
include/drm/drm_mode.h \
include/drm/drm_sarea.h \
include/drm/i915_drm.h \
include/drm/mach64_drm.h \
include/drm/mga_drm.h \
include/drm/msm_drm.h \
include/drm/nouveau_drm.h \
include/drm/qxl_drm.h \
include/drm/r128_drm.h \
include/drm/radeon_drm.h \
include/drm/amdgpu_drm.h \
include/drm/savage_drm.h \
include/drm/sis_drm.h \
include/drm/tegra_drm.h \
include/drm/vc4_drm.h \
include/drm/via_drm.h \
include/drm/virtgpu_drm.h
LIBDRM_INCLUDE_ANDROID_H_FILES := \
android/gralloc_handle.h
LIBDRM_INCLUDE_VMWGFX_H_FILES := \
include/drm/vmwgfx_drm.h

View file

@ -49,15 +49,3 @@ Then use ninja to build and install:
If you are installing into a system location you will need to run install
separately, and as root.
AMDGPU ASIC table file
----------------------
The AMDGPU driver requires the `amdgpu.ids` file. It is usually located at
`$PREFIX/share/libdrm`, but it is possible to specify a set of alternative
paths at runtime by setting the `AMDGPU_ASIC_ID_TABLE_PATHS` environment
variable with one or more colon-separated paths where to search for the
`amdgpu.ids` file.
For this option to be available, the C library must support secure_getenv()
function. In systems without it (like NetBSD), this option won't be available.

View file

@ -1,16 +0,0 @@
build = ["Android.sources.bp"]
cc_library_shared {
name: "libdrm_amdgpu",
cflags: [
"-DAMDGPU_ASIC_ID_TABLE=\"/vendor/etc/hwdata/amdgpu.ids\""
],
defaults: [
"libdrm_defaults",
"libdrm_amdgpu_sources",
],
vendor: true,
shared_libs: ["libdrm"],
}

19
amdgpu/Android.mk Normal file
View file

@ -0,0 +1,19 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
# Import variables LIBDRM_AMDGPU_FILES, LIBDRM_AMDGPU_H_FILES
include $(LOCAL_PATH)/Makefile.sources
LOCAL_MODULE := libdrm_amdgpu
LOCAL_SHARED_LIBRARIES := libdrm
LOCAL_SRC_FILES := $(LIBDRM_AMDGPU_FILES)
LOCAL_CFLAGS := \
-DAMDGPU_ASIC_ID_TABLE=\"/vendor/etc/hwdata/amdgpu.ids\"
LOCAL_REQUIRED_MODULES := amdgpu.ids
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)

View file

@ -1,15 +0,0 @@
// Autogenerated with Android.sources.bp.mk
cc_defaults {
name: "libdrm_amdgpu_sources",
srcs: [
"amdgpu_asic_id.c",
"amdgpu_bo.c",
"amdgpu_cs.c",
"amdgpu_device.c",
"amdgpu_gpu_info.c",
"amdgpu_vamgr.c",
"amdgpu_vm.c",
"handle_table.c",
],
}

14
amdgpu/Makefile.sources Normal file
View file

@ -0,0 +1,14 @@
LIBDRM_AMDGPU_FILES := \
amdgpu_asic_id.c \
amdgpu_bo.c \
amdgpu_cs.c \
amdgpu_device.c \
amdgpu_gpu_info.c \
amdgpu_internal.h \
amdgpu_vamgr.c \
amdgpu_vm.c \
handle_table.c \
handle_table.h
LIBDRM_AMDGPU_H_FILES := \
amdgpu.h

View file

@ -14,7 +14,6 @@ amdgpu_bo_query_info
amdgpu_bo_set_metadata
amdgpu_bo_va_op
amdgpu_bo_va_op_raw
amdgpu_bo_va_op_raw2
amdgpu_bo_wait_for_idle
amdgpu_create_bo_from_user_mem
amdgpu_cs_chunk_fence_info_to_data
@ -57,7 +56,6 @@ amdgpu_cs_wait_semaphore
amdgpu_device_deinitialize
amdgpu_device_get_fd
amdgpu_device_initialize
amdgpu_device_initialize2
amdgpu_find_bo_by_cpu_mapping
amdgpu_get_marketing_name
amdgpu_query_buffer_size_alignment
@ -65,26 +63,15 @@ amdgpu_query_crtc_from_id
amdgpu_query_firmware_version
amdgpu_query_gds_info
amdgpu_query_gpu_info
amdgpu_query_gpuvm_fault_info
amdgpu_query_heap_info
amdgpu_query_hw_ip_count
amdgpu_query_hw_ip_info
amdgpu_query_info
amdgpu_query_sensor_info
amdgpu_query_uq_fw_area_info
amdgpu_query_video_caps_info
amdgpu_read_mm_registers
amdgpu_va_manager_alloc
amdgpu_va_manager_init
amdgpu_va_manager_deinit
amdgpu_va_range_alloc
amdgpu_va_range_alloc2
amdgpu_va_range_free
amdgpu_va_get_start_addr
amdgpu_va_range_query
amdgpu_vm_reserve_vmid
amdgpu_vm_unreserve_vmid
amdgpu_create_userqueue
amdgpu_free_userqueue
amdgpu_userq_signal
amdgpu_userq_wait

View file

@ -42,10 +42,7 @@ extern "C" {
#endif
struct drm_amdgpu_info_hw_ip;
struct drm_amdgpu_info_uq_fw_areas;
struct drm_amdgpu_bo_list_entry;
struct drm_amdgpu_userq_signal;
struct drm_amdgpu_userq_wait;
/*--------------------------------------------------------------------------*/
/* --------------------------- Defines ------------------------------------ */
@ -141,12 +138,6 @@ typedef struct amdgpu_bo_list *amdgpu_bo_list_handle;
*/
typedef struct amdgpu_va *amdgpu_va_handle;
/**
* Define handle dealing with VA allocation. An amdgpu_device
* owns one of these, but they can also be used without a device.
*/
typedef struct amdgpu_va_manager *amdgpu_va_manager_handle;
/**
* Define handle for semaphore
*/
@ -536,20 +527,6 @@ int amdgpu_device_initialize(int fd,
uint32_t *minor_version,
amdgpu_device_handle *device_handle);
/**
* Same as amdgpu_device_initialize() except when deduplicate_device
* is false *and* fd points to a device that was already initialized.
* In this case, amdgpu_device_initialize would return the same
* amdgpu_device_handle while here amdgpu_device_initialize2 would
* return a new handle.
* amdgpu_device_initialize() should be preferred in most situations;
* the only use-case where not-deduplicating devices make sense is
* when one wants to have isolated device handles in the same process.
*/
int amdgpu_device_initialize2(int fd, bool deduplicate_device,
uint32_t *major_version,
uint32_t *minor_version,
amdgpu_device_handle *device_handle);
/**
*
* When access to such library does not needed any more the special
@ -1175,26 +1152,6 @@ int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
unsigned ip_instance,
struct drm_amdgpu_info_hw_ip *info);
/**
* Query FW area related information.
*
* The return size is query-specific and depends on the "type" parameter.
* No more than "size" bytes is returned.
*
* \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
* \param type - \c [in] AMDGPU_HW_IP_*
* \param ip_instance - \c [in] HW IP index.
* \param info - \c [out] The pointer to return value
*
* \return 0 on success\n
* <0 - Negative POSIX error code
*
*/
int amdgpu_query_uq_fw_area_info(amdgpu_device_handle dev,
unsigned type,
unsigned ip_instance,
struct drm_amdgpu_info_uq_fw_areas *info);
/**
* Query heap information
*
@ -1325,22 +1282,6 @@ int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned sensor_type,
int amdgpu_query_video_caps_info(amdgpu_device_handle dev, unsigned cap_type,
unsigned size, void *value);
/**
* Query information about VM faults
*
* The return sizeof(struct drm_amdgpu_info_gpuvm_fault)
*
* \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
* \param size - \c [in] Size of the returned value.
* \param value - \c [out] Pointer to the return value.
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_query_gpuvm_fault_info(amdgpu_device_handle dev, unsigned size,
void *value);
/**
* Read a set of consecutive memory-mapped registers.
* Not all registers are allowed to be read by userspace.
@ -1427,11 +1368,6 @@ int amdgpu_va_range_alloc(amdgpu_device_handle dev,
*/
int amdgpu_va_range_free(amdgpu_va_handle va_range_handle);
/**
* Return the starting address of the allocated virtual address range.
*/
uint64_t amdgpu_va_get_start_addr(amdgpu_va_handle va_handle);
/**
* Query virtual address range
*
@ -1453,37 +1389,6 @@ int amdgpu_va_range_query(amdgpu_device_handle dev,
uint64_t *start,
uint64_t *end);
/**
* Allocate a amdgpu_va_manager object.
* The returned object has be initialized with the amdgpu_va_manager_init
* before use.
* On release, amdgpu_va_manager_deinit needs to be called, then the memory
* can be released using free().
*/
amdgpu_va_manager_handle amdgpu_va_manager_alloc(void);
void amdgpu_va_manager_init(amdgpu_va_manager_handle va_mgr,
uint64_t low_va_offset, uint64_t low_va_max,
uint64_t high_va_offset, uint64_t high_va_max,
uint32_t virtual_address_alignment);
void amdgpu_va_manager_deinit(amdgpu_va_manager_handle va_mgr);
/**
* Similar to #amdgpu_va_range_alloc() but allocates VA
* directly from an amdgpu_va_manager_handle instead of using
* the manager from an amdgpu_device.
*/
int amdgpu_va_range_alloc2(amdgpu_va_manager_handle va_mgr,
enum amdgpu_gpu_va_range va_range_type,
uint64_t size,
uint64_t va_base_alignment,
uint64_t va_base_required,
uint64_t *va_base_allocated,
amdgpu_va_handle *va_range_handle,
uint64_t flags);
/**
* VA mapping/unmapping for the buffer object
*
@ -1534,42 +1439,6 @@ int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
uint64_t flags,
uint32_t ops);
/**
* VA mapping/unmapping of buffer object for usermode queue.
*
* This is not a simple drop-in extension for amdgpu_bo_va_op; instead, all
* parameters are treated "raw2", i.e. size is not automatically aligned, and
* all flags must be specified explicitly.
*
* \param dev - \c [in] device handle
* \param bo - \c [in] BO handle (may be NULL)
* \param offset - \c [in] Start offset to map
* \param size - \c [in] Size to map
* \param addr - \c [in] Start virtual address.
* \param flags - \c [in] Supported flags for mapping/unmapping
* \param ops - \c [in] AMDGPU_VA_OP_MAP or AMDGPU_VA_OP_UNMAP
* \param vm_timeline_syncobj_out - \c [out] syncobj handle for PT update fence
* \param vm_timeline_point - \c [in] input timeline point
* \param input_fence_syncobj_handles - \c [in] Array of syncobj handles for bo unmap,
* clear and replace
* \param num_syncobj_handles - \c [in] Number of syncobj handles
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_bo_va_op_raw2(amdgpu_device_handle dev,
amdgpu_bo_handle bo,
uint64_t offset,
uint64_t size,
uint64_t addr,
uint64_t flags,
uint32_t ops,
uint32_t vm_timeline_syncobj_out,
uint64_t vm_timeline_point,
uint64_t input_fence_syncobj_array_in,
uint32_t num_syncobj_handles_in);
/**
* create semaphore
*
@ -2001,65 +1870,6 @@ int amdgpu_vm_reserve_vmid(amdgpu_device_handle dev, uint32_t flags);
*/
int amdgpu_vm_unreserve_vmid(amdgpu_device_handle dev, uint32_t flags);
/**
* Create USERQUEUE
* \param dev - \c [in] device handle
* \param ip_type - \c [in] ip type
* \param doorbell_handle - \c [in] doorbell handle
* \param doorbell_offset - \c [in] doorbell index
* \param mqd_in - \c [in] MQD data
* \param queue_va - \c [in] Virtual address of queue
* \param queue_size - \c [in] userqueue size
* \param wptr_va - \c [in] Virtual address of wptr
* \param rptr_va - \c [in] Virtual address of rptr
* \param queue_id - \c [out] queue id
*
* \return 0 on success otherwise POSIX Error code
*/
int amdgpu_create_userqueue(amdgpu_device_handle dev,
uint32_t ip_type,
uint32_t doorbell_handle,
uint32_t doorbell_offset,
uint64_t queue_va,
uint64_t queue_size,
uint64_t wptr_va,
uint64_t rptr_va,
void *mqd_in,
uint32_t flags,
uint32_t *queue_id);
/**
* Free USERQUEUE
* \param dev - \c [in] device handle
* \param queue_id - \c [in] queue id
*
* \return 0 on success otherwise POSIX Error code
*/
int amdgpu_free_userqueue(amdgpu_device_handle dev, uint32_t queue_id);
/**
* Signal USERQUEUE
* \param dev - \c [in] device handle
* \param signal_data - \c [in] pointer to struct drm_amdgpu_userq_signal
* to be filled by the caller
*
* \return 0 on success otherwise POSIX Error code
*/
int amdgpu_userq_signal(amdgpu_device_handle dev,
struct drm_amdgpu_userq_signal *signal_data);
/**
* Wait USERQUEUE
* \param dev - \c [in] device handle
* \param wait_data - \c [in/out] pointer to struct drm_amdgpu_userq_wait
* to be filled by the caller
*
* \return 0 on success otherwise POSIX Error code
*/
int amdgpu_userq_wait(amdgpu_device_handle dev,
struct drm_amdgpu_userq_wait *wait_data);
#ifdef __cplusplus
}
#endif

View file

@ -22,11 +22,6 @@
*
*/
// secure_getenv requires _GNU_SOURCE
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
@ -109,168 +104,6 @@ out:
return r;
}
static void amdgpu_parse_proc_cpuinfo(struct amdgpu_device *dev)
{
const char *search_key = "model name";
const char *radeon_key = "Radeon";
char *line = NULL;
size_t len = 0;
FILE *fp;
fp = fopen("/proc/cpuinfo", "r");
if (fp == NULL) {
fprintf(stderr, "%s\n", strerror(errno));
return;
}
while (getline(&line, &len, fp) != -1) {
char *saveptr;
char *value;
if (strncmp(line, search_key, strlen(search_key)))
continue;
/* check for parts that have both CPU and GPU information */
value = strstr(line, radeon_key);
/* get content after the first colon */
if (value == NULL) {
value = strstr(line, ":");
if (value == NULL)
continue;
value++;
}
/* strip whitespace */
while (*value == ' ' || *value == '\t')
value++;
saveptr = strchr(value, '\n');
if (saveptr)
*saveptr = '\0';
/* Add AMD to the new string if it's missing from slicing/dicing */
if (strncmp(value, "AMD", 3) != 0) {
char *tmp = malloc(strlen(value) + 5);
if (!tmp)
break;
sprintf(tmp, "AMD %s", value);
dev->marketing_name = tmp;
} else
dev->marketing_name = strdup(value);
break;
}
free(line);
fclose(fp);
}
#if HAVE_SECURE_GETENV
static char *join_path(const char *dir, const char *file) {
size_t dir_len = strlen(dir);
size_t file_len = strlen(file);
char *full_path = NULL;
int need_slash = ((dir_len > 0) && (dir[dir_len - 1] != '/'));
size_t total_len = dir_len + (need_slash ? 1 : 0) + file_len + 1; // +1 for null terminator
if (dir_len == 0) {
return strdup(file);
}
full_path = malloc(total_len);
if (!full_path) {
return NULL; // Memory allocation failed
}
strcpy(full_path, dir);
if (need_slash) {
full_path[dir_len] = '/';
dir_len++;
}
strcpy(full_path + dir_len, file);
return full_path;
}
static char **split_env_var(const char *env_var_content)
{
char **ret = NULL;
char *dup_env_val;
int elements = 1;
int index = 1;
if (!env_var_content || env_var_content[0] == '\0')
return NULL;
for(char *p = (char *)env_var_content; *p; p++) {
if (*p == ':')
elements++;
}
dup_env_val = strdup(env_var_content);
if (!dup_env_val) {
return NULL;
}
ret = malloc(sizeof(char *) * (elements + 1));
ret[0] = dup_env_val;
for(char *p = (char *)dup_env_val; *p; p++) {
if (*p == ':') {
*p = 0;
ret[index++] = p + 1;
}
}
ret[index] = NULL; // ensure that the last element in the array is NULL
return ret;
}
static void split_env_var_free(char **split_var)
{
if (split_var) {
// remember that the first element also points to the whole duplicated string,
// which was modified in place by replacing ':' with '\0' characters
free(split_var[0]);
free(split_var);
}
}
static char *find_asic_id_table(void)
{
// first check the paths in AMDGPU_ASIC_ID_TABLE_PATHS environment variable
const char *amdgpu_asic_id_table_paths = secure_getenv("AMDGPU_ASIC_ID_TABLE_PATHS");
char *file_name = NULL;
char *found_path = NULL;
char **paths = NULL;
if (!amdgpu_asic_id_table_paths)
return NULL;
// extract the file name from AMDGPU_ASIC_ID_TABLE
file_name = strrchr(AMDGPU_ASIC_ID_TABLE, '/');
if (!file_name)
return NULL;
file_name++; // skip the '/'
paths = split_env_var(amdgpu_asic_id_table_paths);
if (!paths)
return NULL;
// for each path, join with file_name and check if it exists
for (int i = 0; paths[i] != NULL; i++) {
char *full_path = join_path(paths[i], file_name);
if (!full_path) {
continue;
}
if (access(full_path, R_OK) == 0) {
found_path = full_path;
break;
}
}
split_env_var_free(paths);
return found_path;
}
#endif
void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
{
FILE *fp;
@ -280,21 +113,11 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
int line_num = 1;
int r = 0;
char *amdgpu_asic_id_table_path = NULL;
#if HAVE_SECURE_GETENV
// if this system lacks secure_getenv(), don't allow extra paths
// for security reasons.
amdgpu_asic_id_table_path = find_asic_id_table();
#endif
// if not found, use the default AMDGPU_ASIC_ID_TABLE path
if (!amdgpu_asic_id_table_path)
amdgpu_asic_id_table_path = strdup(AMDGPU_ASIC_ID_TABLE);
fp = fopen(amdgpu_asic_id_table_path, "r");
fp = fopen(AMDGPU_ASIC_ID_TABLE, "r");
if (!fp) {
fprintf(stderr, "%s: %s\n", amdgpu_asic_id_table_path,
fprintf(stderr, "%s: %s\n", AMDGPU_ASIC_ID_TABLE,
strerror(errno));
goto get_cpu;
return;
}
/* 1st valid line is file version */
@ -309,7 +132,7 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
continue;
}
drmMsg("%s version: %s\n", amdgpu_asic_id_table_path, line);
drmMsg("%s version: %s\n", AMDGPU_ASIC_ID_TABLE, line);
break;
}
@ -327,7 +150,7 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
if (r == -EINVAL) {
fprintf(stderr, "Invalid format: %s: line %d: %s\n",
amdgpu_asic_id_table_path, line_num, line);
AMDGPU_ASIC_ID_TABLE, line_num, line);
} else if (r && r != -EAGAIN) {
fprintf(stderr, "%s: Cannot parse ASIC IDs: %s\n",
__func__, strerror(-r));
@ -335,11 +158,4 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
free(line);
fclose(fp);
get_cpu:
free(amdgpu_asic_id_table_path);
if (dev->info.ids_flags & AMDGPU_IDS_FLAGS_FUSION &&
dev->marketing_name == NULL) {
amdgpu_parse_proc_cpuinfo(dev);
}
}

View file

@ -74,9 +74,6 @@ drm_public int amdgpu_bo_alloc(amdgpu_device_handle dev,
union drm_amdgpu_gem_create args;
int r;
if (!alloc_buffer || !buf_handle)
return -EINVAL;
memset(&args, 0, sizeof(args));
args.in.bo_size = alloc_buffer->alloc_size;
args.in.alignment = alloc_buffer->phys_alignment;
@ -108,9 +105,6 @@ drm_public int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
{
struct drm_amdgpu_gem_metadata args = {};
if (!info)
return -EINVAL;
args.handle = bo->handle;
args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
args.data.flags = info->flags;
@ -138,7 +132,7 @@ drm_public int amdgpu_bo_query_info(amdgpu_bo_handle bo,
int r;
/* Validate the BO passed in */
if (!bo->handle || !info)
if (!bo->handle)
return -EINVAL;
/* Query metadata. */
@ -539,7 +533,7 @@ drm_public int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
amdgpu_bo_handle *buf_handle,
uint64_t *offset_in_bo)
{
struct amdgpu_bo *bo = NULL;
struct amdgpu_bo *bo;
uint32_t i;
int r = 0;
@ -557,7 +551,7 @@ drm_public int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
if (!bo || !bo->cpu_ptr || size > bo->alloc_size)
continue;
if (cpu >= bo->cpu_ptr &&
cpu < (void*)((uintptr_t)bo->cpu_ptr + (size_t)bo->alloc_size))
cpu < (void*)((uintptr_t)bo->cpu_ptr + bo->alloc_size))
break;
}
@ -648,7 +642,7 @@ drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
unsigned i;
int r;
if (!number_of_resources || !resources)
if (!number_of_resources)
return -EINVAL;
/* overflow check for multiplication */
@ -795,39 +789,3 @@ drm_public int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
return r;
}
drm_public int amdgpu_bo_va_op_raw2(amdgpu_device_handle dev,
amdgpu_bo_handle bo,
uint64_t offset,
uint64_t size,
uint64_t addr,
uint64_t flags,
uint32_t ops,
uint32_t vm_timeline_syncobj_out,
uint64_t vm_timeline_point,
uint64_t input_fence_syncobj_handles,
uint32_t num_syncobj_handles)
{
struct drm_amdgpu_gem_va va;
int r;
if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
return -EINVAL;
memset(&va, 0, sizeof(va));
va.handle = bo ? bo->handle : 0;
va.operation = ops;
va.flags = flags;
va.va_address = addr;
va.offset_in_bo = offset;
va.map_size = size;
va.vm_timeline_syncobj_out = vm_timeline_syncobj_out;
va.vm_timeline_point = vm_timeline_point;
va.input_fence_syncobj_handles = input_fence_syncobj_handles;
va.num_syncobj_handles = num_syncobj_handles;
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
return r;
}

View file

@ -56,22 +56,10 @@ drm_public int amdgpu_cs_ctx_create2(amdgpu_device_handle dev,
union drm_amdgpu_ctx args;
int i, j, k;
int r;
char *override_priority;
if (!dev || !context)
return -EINVAL;
override_priority = getenv("AMD_PRIORITY");
if (override_priority) {
/* The priority is a signed integer. The variable type is
* wrong. If parsing fails, priority is unchanged.
*/
if (sscanf(override_priority, "%i", &priority) == 1) {
printf("amdgpu: context priority changed to %i\n",
priority);
}
}
gpu_context = calloc(1, sizeof(struct amdgpu_context));
if (!gpu_context)
return -ENOMEM;
@ -140,8 +128,8 @@ drm_public int amdgpu_cs_ctx_free(amdgpu_context_handle context)
for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++) {
for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++) {
amdgpu_semaphore_handle sem, tmp;
LIST_FOR_EACH_ENTRY_SAFE(sem, tmp, &context->sem_list[i][j][k], list) {
amdgpu_semaphore_handle sem;
LIST_FOR_EACH_ENTRY(sem, &context->sem_list[i][j][k], list) {
list_del(&sem->list);
amdgpu_cs_reset_sem(sem);
amdgpu_cs_unreference_sem(sem);
@ -598,31 +586,24 @@ drm_public int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
uint32_t ring,
amdgpu_semaphore_handle sem)
{
int ret;
if (!ctx || !sem)
return -EINVAL;
if (ip_type >= AMDGPU_HW_IP_NUM)
return -EINVAL;
if (ring >= AMDGPU_CS_MAX_RINGS)
return -EINVAL;
pthread_mutex_lock(&ctx->sequence_mutex);
/* sem has been signaled */
if (sem->signal_fence.context) {
ret = -EINVAL;
goto unlock;
}
if (sem->signal_fence.context)
return -EINVAL;
pthread_mutex_lock(&ctx->sequence_mutex);
sem->signal_fence.context = ctx;
sem->signal_fence.ip_type = ip_type;
sem->signal_fence.ip_instance = ip_instance;
sem->signal_fence.ring = ring;
sem->signal_fence.fence = ctx->last_seq[ip_type][ip_instance][ring];
update_references(NULL, &sem->refcount);
ret = 0;
unlock:
pthread_mutex_unlock(&ctx->sequence_mutex);
return ret;
return 0;
}
drm_public int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,

View file

@ -95,26 +95,22 @@ static int amdgpu_get_auth(int fd, int *auth)
static void amdgpu_device_free_internal(amdgpu_device_handle dev)
{
/* Remove dev from dev_list, if it was added there. */
if (dev == dev_list) {
dev_list = dev->next;
} else {
for (amdgpu_device_handle node = dev_list; node; node = node->next) {
if (node->next == dev) {
node->next = dev->next;
break;
}
}
}
amdgpu_device_handle *node = &dev_list;
pthread_mutex_lock(&dev_mutex);
while (*node != dev && (*node)->next)
node = &(*node)->next;
*node = (*node)->next;
pthread_mutex_unlock(&dev_mutex);
close(dev->fd);
if ((dev->flink_fd >= 0) && (dev->fd != dev->flink_fd))
close(dev->flink_fd);
amdgpu_vamgr_deinit(&dev->va_mgr.vamgr_32);
amdgpu_vamgr_deinit(&dev->va_mgr.vamgr_low);
amdgpu_vamgr_deinit(&dev->va_mgr.vamgr_high_32);
amdgpu_vamgr_deinit(&dev->va_mgr.vamgr_high);
amdgpu_vamgr_deinit(&dev->vamgr_32);
amdgpu_vamgr_deinit(&dev->vamgr);
amdgpu_vamgr_deinit(&dev->vamgr_high_32);
amdgpu_vamgr_deinit(&dev->vamgr_high);
handle_table_fini(&dev->bo_handles);
handle_table_fini(&dev->bo_flink_names);
pthread_mutex_destroy(&dev->bo_table_mutex);
@ -144,23 +140,22 @@ static void amdgpu_device_reference(struct amdgpu_device **dst,
*dst = src;
}
static int _amdgpu_device_initialize(int fd,
uint32_t *major_version,
uint32_t *minor_version,
amdgpu_device_handle *device_handle,
bool deduplicate_device)
drm_public int amdgpu_device_initialize(int fd,
uint32_t *major_version,
uint32_t *minor_version,
amdgpu_device_handle *device_handle)
{
struct amdgpu_device *dev = NULL;
struct amdgpu_device *dev;
drmVersionPtr version;
int r;
int flag_auth = 0;
int flag_authexist=0;
uint32_t accel_working = 0;
uint64_t start, max;
*device_handle = NULL;
pthread_mutex_lock(&dev_mutex);
r = amdgpu_get_auth(fd, &flag_auth);
if (r) {
fprintf(stderr, "%s: amdgpu_get_auth (1) failed (%i)\n",
@ -169,10 +164,9 @@ static int _amdgpu_device_initialize(int fd,
return r;
}
if (deduplicate_device)
for (dev = dev_list; dev; dev = dev->next)
if (fd_compare(dev->fd, fd) == 0)
break;
for (dev = dev_list; dev; dev = dev->next)
if (fd_compare(dev->fd, fd) == 0)
break;
if (dev) {
r = amdgpu_get_auth(dev->fd, &flag_authexist);
@ -244,22 +238,35 @@ static int _amdgpu_device_initialize(int fd,
goto cleanup;
}
amdgpu_va_manager_init(&dev->va_mgr,
dev->dev_info.virtual_address_offset,
dev->dev_info.virtual_address_max,
dev->dev_info.high_va_offset,
dev->dev_info.high_va_max,
dev->dev_info.virtual_address_alignment);
start = dev->dev_info.virtual_address_offset;
max = MIN2(dev->dev_info.virtual_address_max, 0x100000000ULL);
amdgpu_vamgr_init(&dev->vamgr_32, start, max,
dev->dev_info.virtual_address_alignment);
start = max;
max = MAX2(dev->dev_info.virtual_address_max, 0x100000000ULL);
amdgpu_vamgr_init(&dev->vamgr, start, max,
dev->dev_info.virtual_address_alignment);
start = dev->dev_info.high_va_offset;
max = MIN2(dev->dev_info.high_va_max, (start & ~0xffffffffULL) +
0x100000000ULL);
amdgpu_vamgr_init(&dev->vamgr_high_32, start, max,
dev->dev_info.virtual_address_alignment);
start = max;
max = MAX2(dev->dev_info.high_va_max, (start & ~0xffffffffULL) +
0x100000000ULL);
amdgpu_vamgr_init(&dev->vamgr_high, start, max,
dev->dev_info.virtual_address_alignment);
amdgpu_parse_asic_ids(dev);
*major_version = dev->major_version;
*minor_version = dev->minor_version;
*device_handle = dev;
if (deduplicate_device) {
dev->next = dev_list;
dev_list = dev;
}
dev->next = dev_list;
dev_list = dev;
pthread_mutex_unlock(&dev_mutex);
return 0;
@ -272,27 +279,9 @@ cleanup:
return r;
}
drm_public int amdgpu_device_initialize(int fd,
uint32_t *major_version,
uint32_t *minor_version,
amdgpu_device_handle *device_handle)
{
return _amdgpu_device_initialize(fd, major_version, minor_version, device_handle, true);
}
drm_public int amdgpu_device_initialize2(int fd, bool deduplicate_device,
uint32_t *major_version,
uint32_t *minor_version,
amdgpu_device_handle *device_handle)
{
return _amdgpu_device_initialize(fd, major_version, minor_version, device_handle, deduplicate_device);
}
drm_public int amdgpu_device_deinitialize(amdgpu_device_handle dev)
{
pthread_mutex_lock(&dev_mutex);
amdgpu_device_reference(&dev, NULL);
pthread_mutex_unlock(&dev_mutex);
return 0;
}
@ -303,10 +292,7 @@ drm_public int amdgpu_device_get_fd(amdgpu_device_handle device_handle)
drm_public const char *amdgpu_get_marketing_name(amdgpu_device_handle dev)
{
if (dev->marketing_name)
return dev->marketing_name;
else
return "AMD Radeon Graphics";
return dev->marketing_name;
}
drm_public int amdgpu_query_sw_info(amdgpu_device_handle dev,
@ -317,10 +303,10 @@ drm_public int amdgpu_query_sw_info(amdgpu_device_handle dev,
switch (info) {
case amdgpu_sw_info_address32_hi:
if (dev->va_mgr.vamgr_high_32.va_max)
*val32 = (dev->va_mgr.vamgr_high_32.va_max - 1) >> 32;
if (dev->vamgr_high_32.va_max)
*val32 = (dev->vamgr_high_32.va_max - 1) >> 32;
else
*val32 = (dev->va_mgr.vamgr_32.va_max - 1) >> 32;
*val32 = (dev->vamgr_32.va_max - 1) >> 32;
return 0;
}
return -EINVAL;

View file

@ -137,24 +137,6 @@ drm_public int amdgpu_query_firmware_version(amdgpu_device_handle dev,
return 0;
}
drm_public int amdgpu_query_uq_fw_area_info(amdgpu_device_handle dev,
unsigned type,
unsigned ip_instance,
struct drm_amdgpu_info_uq_fw_areas *info)
{
struct drm_amdgpu_info request;
memset(&request, 0, sizeof(request));
request.return_pointer = (uintptr_t)info;
request.return_size = sizeof(*info);
request.query = AMDGPU_INFO_UQ_FW_AREAS;
request.query_hw_ip.type = type;
request.query_hw_ip.ip_instance = ip_instance;
return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
sizeof(struct drm_amdgpu_info));
}
drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
{
int r, i;
@ -364,17 +346,3 @@ drm_public int amdgpu_query_video_caps_info(amdgpu_device_handle dev, unsigned c
return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
sizeof(struct drm_amdgpu_info));
}
drm_public int amdgpu_query_gpuvm_fault_info(amdgpu_device_handle dev,
unsigned size, void *value)
{
struct drm_amdgpu_info request;
memset(&request, 0, sizeof(request));
request.return_pointer = (uintptr_t)value;
request.return_size = size;
request.query = AMDGPU_INFO_GPUVM_FAULT;
return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
sizeof(struct drm_amdgpu_info));
}

View file

@ -57,23 +57,13 @@ struct amdgpu_bo_va_mgr {
};
struct amdgpu_va {
amdgpu_device_handle dev;
uint64_t address;
uint64_t size;
enum amdgpu_gpu_va_range range;
struct amdgpu_bo_va_mgr *vamgr;
};
struct amdgpu_va_manager {
/** The VA manager for the lower virtual address space */
struct amdgpu_bo_va_mgr vamgr_low;
/** The VA manager for the 32bit address space */
struct amdgpu_bo_va_mgr vamgr_32;
/** The VA manager for the high virtual address space */
struct amdgpu_bo_va_mgr vamgr_high;
/** The VA manager for the 32bit high address space */
struct amdgpu_bo_va_mgr vamgr_high_32;
};
struct amdgpu_device {
atomic_t refcount;
struct amdgpu_device *next;
@ -91,8 +81,14 @@ struct amdgpu_device {
pthread_mutex_t bo_table_mutex;
struct drm_amdgpu_info_device dev_info;
struct amdgpu_gpu_info info;
struct amdgpu_va_manager va_mgr;
/** The VA manager for the lower virtual address space */
struct amdgpu_bo_va_mgr vamgr;
/** The VA manager for the 32bit address space */
struct amdgpu_bo_va_mgr vamgr_32;
/** The VA manager for the high virtual address space */
struct amdgpu_bo_va_mgr vamgr_high;
/** The VA manager for the 32bit high address space */
struct amdgpu_bo_va_mgr vamgr_high_32;
};
struct amdgpu_bo {

View file

@ -1,123 +0,0 @@
/*
* Copyright 2024 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <string.h>
#include <errno.h>
#include "xf86drm.h"
#include "amdgpu_drm.h"
#include "amdgpu_internal.h"
drm_public int
amdgpu_create_userqueue(amdgpu_device_handle dev,
uint32_t ip_type,
uint32_t doorbell_handle,
uint32_t doorbell_offset,
uint64_t queue_va,
uint64_t queue_size,
uint64_t wptr_va,
uint64_t rptr_va,
void *mqd_in,
uint32_t flags,
uint32_t *queue_id)
{
int ret;
union drm_amdgpu_userq userq;
uint64_t mqd_size;
if (!dev)
return -EINVAL;
switch (ip_type) {
case AMDGPU_HW_IP_GFX:
mqd_size = sizeof(struct drm_amdgpu_userq_mqd_gfx11);
break;
case AMDGPU_HW_IP_DMA:
mqd_size = sizeof(struct drm_amdgpu_userq_mqd_sdma_gfx11);
break;
case AMDGPU_HW_IP_COMPUTE:
mqd_size = sizeof(struct drm_amdgpu_userq_mqd_compute_gfx11);
break;
default:
return -EINVAL;
}
memset(&userq, 0, sizeof(userq));
userq.in.op = AMDGPU_USERQ_OP_CREATE;
userq.in.ip_type = ip_type;
userq.in.doorbell_handle = doorbell_handle;
userq.in.doorbell_offset = doorbell_offset;
userq.in.queue_va = queue_va;
userq.in.queue_size = queue_size;
userq.in.wptr_va = wptr_va;
userq.in.rptr_va = rptr_va;
userq.in.mqd = (uint64_t)mqd_in;
userq.in.mqd_size = mqd_size;
userq.in.flags = flags;
ret = drmCommandWriteRead(dev->fd, DRM_AMDGPU_USERQ,
&userq, sizeof(userq));
*queue_id = userq.out.queue_id;
return ret;
}
drm_public int
amdgpu_free_userqueue(amdgpu_device_handle dev, uint32_t queue_id)
{
union drm_amdgpu_userq userq;
memset(&userq, 0, sizeof(userq));
userq.in.op = AMDGPU_USERQ_OP_FREE;
userq.in.queue_id = queue_id;
return drmCommandWriteRead(dev->fd, DRM_AMDGPU_USERQ,
&userq, sizeof(userq));
}
drm_public int
amdgpu_userq_signal(amdgpu_device_handle dev,
struct drm_amdgpu_userq_signal *signal_data)
{
int r;
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_USERQ_SIGNAL,
signal_data, sizeof(struct drm_amdgpu_userq_signal));
return r;
}
drm_public int
amdgpu_userq_wait(amdgpu_device_handle dev,
struct drm_amdgpu_userq_wait *wait_data)
{
int r;
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_USERQ_WAIT,
wait_data, sizeof(struct drm_amdgpu_userq_wait));
return r;
}

View file

@ -228,40 +228,25 @@ drm_public int amdgpu_va_range_alloc(amdgpu_device_handle dev,
uint64_t *va_base_allocated,
amdgpu_va_handle *va_range_handle,
uint64_t flags)
{
return amdgpu_va_range_alloc2(&dev->va_mgr, va_range_type, size,
va_base_alignment, va_base_required,
va_base_allocated, va_range_handle,
flags);
}
drm_public int amdgpu_va_range_alloc2(amdgpu_va_manager_handle va_mgr,
enum amdgpu_gpu_va_range va_range_type,
uint64_t size,
uint64_t va_base_alignment,
uint64_t va_base_required,
uint64_t *va_base_allocated,
amdgpu_va_handle *va_range_handle,
uint64_t flags)
{
struct amdgpu_bo_va_mgr *vamgr;
bool search_from_top = !!(flags & AMDGPU_VA_RANGE_REPLAYABLE);
int ret;
/* Clear the flag when the high VA manager is not initialized */
if (flags & AMDGPU_VA_RANGE_HIGH && !va_mgr->vamgr_high_32.va_max)
if (flags & AMDGPU_VA_RANGE_HIGH && !dev->vamgr_high_32.va_max)
flags &= ~AMDGPU_VA_RANGE_HIGH;
if (flags & AMDGPU_VA_RANGE_HIGH) {
if (flags & AMDGPU_VA_RANGE_32_BIT)
vamgr = &va_mgr->vamgr_high_32;
vamgr = &dev->vamgr_high_32;
else
vamgr = &va_mgr->vamgr_high;
vamgr = &dev->vamgr_high;
} else {
if (flags & AMDGPU_VA_RANGE_32_BIT)
vamgr = &va_mgr->vamgr_32;
vamgr = &dev->vamgr_32;
else
vamgr = &va_mgr->vamgr_low;
vamgr = &dev->vamgr;
}
va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
@ -274,9 +259,9 @@ drm_public int amdgpu_va_range_alloc2(amdgpu_va_manager_handle va_mgr,
if (!(flags & AMDGPU_VA_RANGE_32_BIT) && ret) {
/* fallback to 32bit address */
if (flags & AMDGPU_VA_RANGE_HIGH)
vamgr = &va_mgr->vamgr_high_32;
vamgr = &dev->vamgr_high_32;
else
vamgr = &va_mgr->vamgr_32;
vamgr = &dev->vamgr_32;
ret = amdgpu_vamgr_find_va(vamgr, size,
va_base_alignment, va_base_required,
search_from_top, va_base_allocated);
@ -289,6 +274,7 @@ drm_public int amdgpu_va_range_alloc2(amdgpu_va_manager_handle va_mgr,
amdgpu_vamgr_free_va(vamgr, *va_base_allocated, size);
return -ENOMEM;
}
va->dev = dev;
va->address = *va_base_allocated;
va->size = size;
va->range = va_range_type;
@ -310,50 +296,3 @@ drm_public int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
free(va_range_handle);
return 0;
}
drm_public uint64_t amdgpu_va_get_start_addr(amdgpu_va_handle va_handle)
{
return va_handle->address;
}
drm_public amdgpu_va_manager_handle amdgpu_va_manager_alloc(void)
{
amdgpu_va_manager_handle r = calloc(1, sizeof(struct amdgpu_va_manager));
return r;
}
drm_public void amdgpu_va_manager_init(struct amdgpu_va_manager *va_mgr,
uint64_t low_va_offset, uint64_t low_va_max,
uint64_t high_va_offset, uint64_t high_va_max,
uint32_t virtual_address_alignment)
{
uint64_t start, max;
start = low_va_offset;
max = MIN2(low_va_max, 0x100000000ULL);
amdgpu_vamgr_init(&va_mgr->vamgr_32, start, max,
virtual_address_alignment);
start = max;
max = MAX2(low_va_max, 0x100000000ULL);
amdgpu_vamgr_init(&va_mgr->vamgr_low, start, max,
virtual_address_alignment);
start = high_va_offset;
max = MIN2(high_va_max, (start & ~0xffffffffULL) + 0x100000000ULL);
amdgpu_vamgr_init(&va_mgr->vamgr_high_32, start, max,
virtual_address_alignment);
start = max;
max = MAX2(high_va_max, (start & ~0xffffffffULL) + 0x100000000ULL);
amdgpu_vamgr_init(&va_mgr->vamgr_high, start, max,
virtual_address_alignment);
}
drm_public void amdgpu_va_manager_deinit(struct amdgpu_va_manager *va_mgr)
{
amdgpu_vamgr_deinit(&va_mgr->vamgr_32);
amdgpu_vamgr_deinit(&va_mgr->vamgr_low);
amdgpu_vamgr_deinit(&va_mgr->vamgr_high_32);
amdgpu_vamgr_deinit(&va_mgr->vamgr_high);
}

View file

@ -27,7 +27,6 @@ libdrm_amdgpu = library(
files(
'amdgpu_asic_id.c', 'amdgpu_bo.c', 'amdgpu_cs.c', 'amdgpu_device.c',
'amdgpu_gpu_info.c', 'amdgpu_vamgr.c', 'amdgpu_vm.c', 'handle_table.c',
'amdgpu_userq.c',
),
config_file,
],
@ -37,8 +36,8 @@ libdrm_amdgpu = library(
],
include_directories : [inc_root, inc_drm],
link_with : libdrm,
dependencies : [dep_threads, dep_atomic_ops, dep_rt],
version : '1.@0@.0'.format(patch_ver),
dependencies : [dep_pthread_stubs, dep_atomic_ops, dep_rt],
version : '1.0.0',
install : true,
)
@ -56,7 +55,9 @@ ext_libdrm_amdgpu = declare_dependency(
include_directories : [inc_drm, include_directories('.')],
)
meson.override_dependency('libdrm_amdgpu', ext_libdrm_amdgpu)
if meson.version().version_compare('>= 0.54.0')
meson.override_dependency('libdrm_amdgpu', ext_libdrm_amdgpu)
endif
test(
'amdgpu-symbols-check',
@ -64,6 +65,6 @@ test(
args : [
'--lib', libdrm_amdgpu,
'--symbols-file', files('amdgpu-symbols.txt'),
'--nm', prog_nm.full_path(),
'--nm', prog_nm.path(),
],
)

View file

@ -70,7 +70,6 @@ drmGetLibVersion
drmGetLock
drmGetMagic
drmGetMap
drmGetNodeTypeFromDevId
drmGetNodeTypeFromFd
drmGetPrimaryDeviceNameFromFd
drmGetRenderDeviceNameFromFd
@ -104,15 +103,11 @@ drmModeAtomicGetCursor
drmModeAtomicMerge
drmModeAtomicSetCursor
drmModeAttachMode
drmModeCloseFB
drmModeConnectorGetPossibleCrtcs
drmModeConnectorSetProperty
drmModeCreateDumbBuffer
drmModeCreateLease
drmModeCreatePropertyBlob
drmModeCrtcGetGamma
drmModeCrtcSetGamma
drmModeDestroyDumbBuffer
drmModeDestroyPropertyBlob
drmModeDetachMode
drmModeDirtyFB
@ -131,7 +126,6 @@ drmModeFreePropertyBlob
drmModeFreeResources
drmModeGetConnector
drmModeGetConnectorCurrent
drmModeGetConnectorTypeName
drmModeGetCrtc
drmModeGetEncoder
drmModeGetFB
@ -143,7 +137,6 @@ drmModeGetProperty
drmModeGetPropertyBlob
drmModeGetResources
drmModeListLessees
drmModeMapDumbBuffer
drmModeMoveCursor
drmModeObjectGetProperties
drmModeObjectSetProperty
@ -189,7 +182,6 @@ drmSLNext
drmSwitchToContext
drmSyncobjCreate
drmSyncobjDestroy
drmSyncobjEventfd
drmSyncobjExportSyncFile
drmSyncobjFDToHandle
drmSyncobjHandleToFD
@ -209,4 +201,3 @@ drmUpdateDrawableInfo
drmWaitVBlank
drmGetFormatModifierName
drmGetFormatModifierVendor
drmGetFormatName

View file

@ -1,6 +0,0 @@
prebuilt_etc {
name: "amdgpu.ids",
proprietary: true,
sub_dir: "hwdata",
src: "amdgpu.ids",
}

10
data/Android.mk Normal file
View file

@ -0,0 +1,10 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE := amdgpu.ids
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_CLASS := ETC
LOCAL_PROPRIETARY_MODULE := true
LOCAL_MODULE_RELATIVE_PATH := hwdata
LOCAL_SRC_FILES := $(LOCAL_MODULE)
include $(BUILD_PREBUILT)

View file

@ -4,326 +4,120 @@
# device_id, revision_id, product_name <-- single tab after comma
1.0.0
1114, C2, AMD Radeon 860M Graphics
1114, C3, AMD Radeon 840M Graphics
1114, D2, AMD Radeon 860M Graphics
1114, D3, AMD Radeon 840M Graphics
1309, 00, AMD Radeon R7 Graphics
130A, 00, AMD Radeon R6 Graphics
130B, 00, AMD Radeon R4 Graphics
130C, 00, AMD Radeon R7 Graphics
130D, 00, AMD Radeon R6 Graphics
130E, 00, AMD Radeon R5 Graphics
130F, 00, AMD Radeon R7 Graphics
130F, D4, AMD Radeon R7 Graphics
130F, D5, AMD Radeon R7 Graphics
130F, D6, AMD Radeon R7 Graphics
130F, D7, AMD Radeon R7 Graphics
1313, 00, AMD Radeon R7 Graphics
1313, D4, AMD Radeon R7 Graphics
1313, D5, AMD Radeon R7 Graphics
1313, D6, AMD Radeon R7 Graphics
1315, 00, AMD Radeon R5 Graphics
1315, D4, AMD Radeon R5 Graphics
1315, D5, AMD Radeon R5 Graphics
1315, D6, AMD Radeon R5 Graphics
1315, D7, AMD Radeon R5 Graphics
1316, 00, AMD Radeon R5 Graphics
1318, 00, AMD Radeon R5 Graphics
131B, 00, AMD Radeon R4 Graphics
131C, 00, AMD Radeon R7 Graphics
131D, 00, AMD Radeon R6 Graphics
1435, AE, AMD Custom GPU 0932
1506, C1, AMD Radeon 610M
1506, C2, AMD Radeon 610M
1506, C3, AMD Radeon 610M
1506, C4, AMD Radeon 610M
150E, C1, AMD Radeon 890M Graphics
150E, C4, AMD Radeon 880M Graphics
150E, C5, AMD Radeon 890M Graphics
150E, C6, AMD Radeon 890M Graphics
150E, D1, AMD Radeon 890M Graphics
150E, D2, AMD Radeon 880M Graphics
150E, D3, AMD Radeon 890M Graphics
1586, C1, Radeon 8060S Graphics
1586, C2, Radeon 8050S Graphics
1586, C4, Radeon 8050S Graphics
1586, D1, Radeon 8060S Graphics
1586, D2, Radeon 8050S Graphics
1586, D4, Radeon 8050S Graphics
1586, D5, Radeon 8040S Graphics
15BF, 00, AMD Radeon 780M Graphics
15BF, 01, AMD Radeon 760M Graphics
15BF, 02, AMD Radeon 780M Graphics
15BF, 03, AMD Radeon 760M Graphics
15BF, C1, AMD Radeon 780M Graphics
15BF, C2, AMD Radeon 780M Graphics
15BF, C3, AMD Radeon 760M Graphics
15BF, C4, AMD Radeon 780M Graphics
15BF, C5, AMD Radeon 740M Graphics
15BF, C6, AMD Radeon 780M Graphics
15BF, C7, AMD Radeon 780M Graphics
15BF, C8, AMD Radeon 760M Graphics
15BF, C9, AMD Radeon 780M Graphics
15BF, CA, AMD Radeon 740M Graphics
15BF, CB, AMD Radeon 760M Graphics
15BF, CC, AMD Radeon 740M Graphics
15BF, CD, AMD Radeon 760M Graphics
15BF, CF, AMD Radeon 780M Graphics
15BF, D0, AMD Radeon 780M Graphics
15BF, D1, AMD Radeon 780M Graphics
15BF, D2, AMD Radeon 780M Graphics
15BF, D3, AMD Radeon 780M Graphics
15BF, D4, AMD Radeon 780M Graphics
15BF, D5, AMD Radeon 760M Graphics
15BF, D6, AMD Radeon 760M Graphics
15BF, D7, AMD Radeon 780M Graphics
15BF, D8, AMD Radeon 740M Graphics
15BF, D9, AMD Radeon 780M Graphics
15BF, DA, AMD Radeon 780M Graphics
15BF, DB, AMD Radeon 760M Graphics
15BF, DC, AMD Radeon 760M Graphics
15BF, DD, AMD Radeon 780M Graphics
15BF, DE, AMD Radeon 740M Graphics
15BF, DF, AMD Radeon 760M Graphics
15BF, F0, AMD Radeon 760M Graphics
15C8, C1, AMD Radeon 740M Graphics
15C8, C2, AMD Radeon 740M Graphics
15C8, C3, AMD Radeon 740M Graphics
15C8, C4, AMD Radeon 740M Graphics
15C8, D1, AMD Radeon 740M Graphics
15C8, D2, AMD Radeon 740M Graphics
15C8, D3, AMD Radeon 740M Graphics
15C8, D4, AMD Radeon 740M Graphics
15D8, 00, AMD Radeon RX Vega 8 Graphics WS
15D8, 91, AMD Radeon Vega 3 Graphics
15D8, 91, AMD Ryzen Embedded R1606G with Radeon Vega Gfx
15D8, 92, AMD Radeon Vega 3 Graphics
15D8, 92, AMD Ryzen Embedded R1505G with Radeon Vega Gfx
15DD, C3, AMD Radeon Vega 3 Graphics
15DD, CB, AMD Radeon Vega 3 Graphics
15DD, CE, AMD Radeon Vega 3 Graphics
15DD, D8, AMD Radeon Vega 3 Graphics
15DD, CC, AMD Radeon Vega 6 Graphics
15DD, D9, AMD Radeon Vega 6 Graphics
15DD, C2, AMD Radeon Vega 8 Graphics
15DD, C4, AMD Radeon Vega 8 Graphics
15DD, C8, AMD Radeon Vega 8 Graphics
15DD, CA, AMD Radeon Vega 8 Graphics
15DD, D1, AMD Radeon Vega 8 Graphics
15DD, D5, AMD Radeon Vega 8 Graphics
15DD, D7, AMD Radeon Vega 8 Graphics
15DD, C3, AMD Radeon Vega 10 Graphics
15DD, D0, AMD Radeon Vega 10 Graphics
15DD, C1, AMD Radeon Vega 11 Graphics
15DD, C6, AMD Radeon Vega 11 Graphics
15DD, C9, AMD Radeon Vega 11 Graphics
15DD, D3, AMD Radeon Vega 11 Graphics
15DD, D6, AMD Radeon Vega 11 Graphics
15DD, 81, AMD Ryzen Embedded V1807B with Radeon Vega Gfx
15DD, 82, AMD Ryzen Embedded V1756B with Radeon Vega Gfx
15DD, 83, AMD Ryzen Embedded V1605B with Radeon Vega Gfx
15DD, 85, AMD Ryzen Embedded V1202B with Radeon Vega Gfx
15D8, 93, AMD Radeon Vega 1 Graphics
15D8, A1, AMD Radeon Vega 10 Graphics
15D8, A2, AMD Radeon Vega 8 Graphics
15D8, A3, AMD Radeon Vega 6 Graphics
15D8, A4, AMD Radeon Vega 3 Graphics
15D8, B1, AMD Radeon Vega 10 Graphics
15D8, B2, AMD Radeon Vega 8 Graphics
15D8, B3, AMD Radeon Vega 6 Graphics
15D8, B4, AMD Radeon Vega 3 Graphics
15D8, C1, AMD Radeon Vega 10 Graphics
15D8, C2, AMD Radeon Vega 8 Graphics
15D8, C3, AMD Radeon Vega 6 Graphics
15D8, C4, AMD Radeon Vega 3 Graphics
15D8, C5, AMD Radeon Vega 3 Graphics
15D8, C8, AMD Radeon Vega 11 Graphics
15D8, C9, AMD Radeon Vega 8 Graphics
15D8, CA, AMD Radeon Vega 11 Graphics
15D8, CB, AMD Radeon Vega 8 Graphics
15D8, CC, AMD Radeon Vega 3 Graphics
15D8, CE, AMD Radeon Vega 3 Graphics
15D8, CF, AMD Ryzen Embedded R1305G with Radeon Vega Gfx
15D8, D1, AMD Radeon Vega 10 Graphics
15D8, D2, AMD Radeon Vega 8 Graphics
15D8, D3, AMD Radeon Vega 6 Graphics
15D8, CF, AMD Radeon Vega 3 Graphics
15D8, D4, AMD Radeon Vega 3 Graphics
15D8, D8, AMD Radeon Vega 11 Graphics
15D8, D9, AMD Radeon Vega 8 Graphics
15D8, DA, AMD Radeon Vega 11 Graphics
15D8, DB, AMD Radeon Vega 3 Graphics
15D8, DB, AMD Radeon Vega 8 Graphics
15D8, DC, AMD Radeon Vega 3 Graphics
15D8, DD, AMD Radeon Vega 3 Graphics
15D8, DE, AMD Radeon Vega 3 Graphics
15D8, DF, AMD Radeon Vega 3 Graphics
15D8, E3, AMD Radeon Vega 3 Graphics
15D8, E4, AMD Radeon Vega 3 Graphics
15D8, A3, AMD Radeon Vega 6 Graphics
15D8, B3, AMD Radeon Vega 6 Graphics
15D8, C3, AMD Radeon Vega 6 Graphics
15D8, D3, AMD Radeon Vega 6 Graphics
15D8, A2, AMD Radeon Vega 8 Graphics
15D8, B2, AMD Radeon Vega 8 Graphics
15D8, C2, AMD Radeon Vega 8 Graphics
15D8, C9, AMD Radeon Vega 8 Graphics
15D8, CB, AMD Radeon Vega 8 Graphics
15D8, D2, AMD Radeon Vega 8 Graphics
15D8, D9, AMD Radeon Vega 8 Graphics
15D8, DB, AMD Radeon Vega 8 Graphics
15D8, A1, AMD Radeon Vega 10 Graphics
15D8, B1, AMD Radeon Vega 10 Graphics
15D8, C1, AMD Radeon Vega 10 Graphics
15D8, D1, AMD Radeon Vega 10 Graphics
15D8, C8, AMD Radeon Vega 11 Graphics
15D8, CA, AMD Radeon Vega 11 Graphics
15D8, D8, AMD Radeon Vega 11 Graphics
15D8, DA, AMD Radeon Vega 11 Graphics
15D8, 91, AMD Ryzen Embedded R1606G with Radeon Vega Gfx
15D8, 92, AMD Ryzen Embedded R1505G with Radeon Vega Gfx
15D8, CF, AMD Ryzen Embedded R1305G with Radeon Vega Gfx
15D8, E4, AMD Ryzen Embedded R1102G with Radeon Vega Gfx
15DD, 81, AMD Ryzen Embedded V1807B with Radeon Vega Gfx
15DD, 82, AMD Ryzen Embedded V1756B with Radeon Vega Gfx
15DD, 83, AMD Ryzen Embedded V1605B with Radeon Vega Gfx
15DD, 84, AMD Radeon Vega 6 Graphics
15DD, 85, AMD Ryzen Embedded V1202B with Radeon Vega Gfx
15DD, 86, AMD Radeon Vega 11 Graphics
15DD, 88, AMD Radeon Vega 8 Graphics
15DD, C1, AMD Radeon Vega 11 Graphics
15DD, C2, AMD Radeon Vega 8 Graphics
15DD, C3, AMD Radeon Vega 3 / 10 Graphics
15DD, C4, AMD Radeon Vega 8 Graphics
15DD, C5, AMD Radeon Vega 3 Graphics
15DD, C6, AMD Radeon Vega 11 Graphics
15DD, C8, AMD Radeon Vega 8 Graphics
15DD, C9, AMD Radeon Vega 11 Graphics
15DD, CA, AMD Radeon Vega 8 Graphics
15DD, CB, AMD Radeon Vega 3 Graphics
15DD, CC, AMD Radeon Vega 6 Graphics
15DD, CE, AMD Radeon Vega 3 Graphics
15DD, CF, AMD Radeon Vega 3 Graphics
15DD, D0, AMD Radeon Vega 10 Graphics
15DD, D1, AMD Radeon Vega 8 Graphics
15DD, D3, AMD Radeon Vega 11 Graphics
15DD, D5, AMD Radeon Vega 8 Graphics
15DD, D6, AMD Radeon Vega 11 Graphics
15DD, D7, AMD Radeon Vega 8 Graphics
15DD, D8, AMD Radeon Vega 3 Graphics
15DD, D9, AMD Radeon Vega 6 Graphics
15DD, E1, AMD Radeon Vega 3 Graphics
15DD, E2, AMD Radeon Vega 3 Graphics
163F, AE, AMD Custom GPU 0405
163F, E1, AMD Custom GPU 0405
164E, D8, AMD Radeon 610M
164E, D9, AMD Radeon 610M
164E, DA, AMD Radeon 610M
164E, DB, AMD Radeon 610M
164E, DC, AMD Radeon 610M
1681, 06, AMD Radeon 680M
1681, 07, AMD Radeon 660M
1681, 0A, AMD Radeon 680M
1681, 0B, AMD Radeon 660M
1681, C7, AMD Radeon 680M
1681, C8, AMD Radeon 680M
1681, C9, AMD Radeon 660M
1900, 01, AMD Radeon 780M Graphics
1900, 02, AMD Radeon 760M Graphics
1900, 03, AMD Radeon 780M Graphics
1900, 04, AMD Radeon 760M Graphics
1900, 05, AMD Radeon 780M Graphics
1900, 06, AMD Radeon 780M Graphics
1900, 07, AMD Radeon 760M Graphics
1900, B0, AMD Radeon 780M Graphics
1900, B1, AMD Radeon 780M Graphics
1900, B2, AMD Radeon 780M Graphics
1900, B3, AMD Radeon 780M Graphics
1900, B4, AMD Radeon 780M Graphics
1900, B5, AMD Radeon 780M Graphics
1900, B6, AMD Radeon 780M Graphics
1900, B7, AMD Radeon 760M Graphics
1900, B8, AMD Radeon 760M Graphics
1900, B9, AMD Radeon 780M Graphics
1900, BA, AMD Radeon 780M Graphics
1900, BB, AMD Radeon 780M Graphics
1900, C0, AMD Radeon 780M Graphics
1900, C1, AMD Radeon 760M Graphics
1900, C2, AMD Radeon 780M Graphics
1900, C3, AMD Radeon 760M Graphics
1900, C4, AMD Radeon 780M Graphics
1900, C5, AMD Radeon 780M Graphics
1900, C6, AMD Radeon 760M Graphics
1900, C7, AMD Radeon 780M Graphics
1900, C8, AMD Radeon 760M Graphics
1900, C9, AMD Radeon 780M Graphics
1900, CA, AMD Radeon 760M Graphics
1900, CB, AMD Radeon 780M Graphics
1900, CC, AMD Radeon 780M Graphics
1900, CD, AMD Radeon 760M Graphics
1900, CE, AMD Radeon 780M Graphics
1900, CF, AMD Radeon 760M Graphics
1900, D0, AMD Radeon 780M Graphics
1900, D1, AMD Radeon 760M Graphics
1900, D2, AMD Radeon 780M Graphics
1900, D3, AMD Radeon 760M Graphics
1900, D4, AMD Radeon 780M Graphics
1900, D5, AMD Radeon 780M Graphics
1900, D6, AMD Radeon 760M Graphics
1900, D7, AMD Radeon 780M Graphics
1900, D8, AMD Radeon 760M Graphics
1900, D9, AMD Radeon 780M Graphics
1900, DA, AMD Radeon 760M Graphics
1900, DB, AMD Radeon 780M Graphics
1900, DC, AMD Radeon 780M Graphics
1900, DD, AMD Radeon 760M Graphics
1900, DE, AMD Radeon 780M Graphics
1900, DF, AMD Radeon 760M Graphics
1900, F0, AMD Radeon 780M Graphics
1900, F1, AMD Radeon 780M Graphics
1900, F2, AMD Radeon 780M Graphics
1901, C1, AMD Radeon 740M Graphics
1901, C2, AMD Radeon 740M Graphics
1901, C3, AMD Radeon 740M Graphics
1901, C6, AMD Radeon 740M Graphics
1901, C7, AMD Radeon 740M Graphics
1901, C8, AMD Radeon 740M Graphics
1901, C9, AMD Radeon 740M Graphics
1901, CA, AMD Radeon 740M Graphics
1901, D1, AMD Radeon 740M Graphics
1901, D2, AMD Radeon 740M Graphics
1901, D3, AMD Radeon 740M Graphics
1901, D4, AMD Radeon 740M Graphics
1901, D5, AMD Radeon 740M Graphics
1901, D6, AMD Radeon 740M Graphics
1901, D7, AMD Radeon 740M Graphics
1901, D8, AMD Radeon 740M Graphics
6600, 00, AMD Radeon HD 8600 / 8700M
6600, 0, AMD Radeon HD 8600 / 8700M
6600, 81, AMD Radeon R7 M370
6601, 00, AMD Radeon HD 8500M / 8700M
6604, 00, AMD Radeon R7 M265 Series
6601, 0, AMD Radeon HD 8500M / 8700M
6604, 0, AMD Radeon R7 M265 Series
6604, 81, AMD Radeon R7 M350
6605, 00, AMD Radeon R7 M260 Series
6605, 0, AMD Radeon R7 M260 Series
6605, 81, AMD Radeon R7 M340
6606, 00, AMD Radeon HD 8790M
6607, 00, AMD Radeon R5 M240
6608, 00, AMD FirePro W2100
6610, 00, AMD Radeon R7 200 Series
6606, 0, AMD Radeon HD 8790M
6607, 0, AMD Radeon HD 8530M
6608, 0, AMD FirePro W2100
6610, 0, AMD Radeon HD 8600 Series
6610, 81, AMD Radeon R7 350
6610, 83, AMD Radeon R5 340
6610, 87, AMD Radeon R7 200 Series
6611, 00, AMD Radeon R7 200 Series
6611, 87, AMD Radeon R7 200 Series
6613, 00, AMD Radeon R7 200 Series
6617, 00, AMD Radeon R7 240 Series
6617, 87, AMD Radeon R7 200 Series
6611, 0, AMD Radeon HD 8500 Series
6613, 0, AMD Radeon HD 8500 series
6617, C7, AMD Radeon R7 240 Series
6640, 00, AMD Radeon HD 8950
6640, 0, AMD Radeon HD 8950
6640, 80, AMD Radeon R9 M380
6646, 00, AMD Radeon R9 M280X
6646, 80, AMD Radeon R9 M385
6646, 0, AMD Radeon R9 M280X
6646, 80, AMD Radeon R9 M470X
6647, 00, AMD Radeon R9 M200X Series
6647, 0, AMD Radeon R9 M270X
6647, 80, AMD Radeon R9 M380
6649, 00, AMD FirePro W5100
6658, 00, AMD Radeon R7 200 Series
665C, 00, AMD Radeon HD 7700 Series
665D, 00, AMD Radeon R7 200 Series
665F, 81, AMD Radeon R7 360 Series
6660, 00, AMD Radeon HD 8600M Series
6649, 0, AMD FirePro W5100
6658, 0, AMD Radeon R7 200 Series
665C, 0, AMD Radeon HD 7700 Series
665D, 0, AMD Radeon R7 200 Series
665F, 81, AMD Radeon R7 300 Series
6660, 0, AMD Radeon HD 8600M Series
6660, 81, AMD Radeon R5 M335
6660, 83, AMD Radeon R5 M330
6663, 00, AMD Radeon HD 8500M Series
6663, 0, AMD Radeon HD 8500M Series
6663, 83, AMD Radeon R5 M320
6664, 00, AMD Radeon R5 M200 Series
6665, 00, AMD Radeon R5 M230 Series
6664, 0, AMD Radeon R5 M200 Series
6665, 0, AMD Radeon R5 M200 Series
6665, 83, AMD Radeon R5 M320
6665, C3, AMD Radeon R5 M435
6666, 00, AMD Radeon R5 M200 Series
6667, 00, AMD Radeon R5 M200 Series
666F, 00, AMD Radeon HD 8500M
66A1, 02, AMD Instinct MI60 / MI50
6667, 0, AMD Radeon R5 M200 Series
666F, 0, AMD Radeon HD 8500M
66A1, 06, AMD Radeon Pro VII
66AF, C1, AMD Radeon VII
6780, 00, AMD FirePro W9000
6784, 00, ATI FirePro V (FireGL V) Graphics Adapter
6788, 00, ATI FirePro V (FireGL V) Graphics Adapter
678A, 00, AMD FirePro W8000
6798, 00, AMD Radeon R9 200 / HD 7900 Series
6799, 00, AMD Radeon HD 7900 Series
679A, 00, AMD Radeon HD 7900 Series
679B, 00, AMD Radeon HD 7900 Series
679E, 00, AMD Radeon HD 7800 Series
67A0, 00, AMD Radeon FirePro W9100
67A1, 00, AMD Radeon FirePro W8100
67B0, 00, AMD Radeon R9 200 Series
6780, 0, ATI FirePro V (FireGL V) Graphics Adapter
678A, 0, ATI FirePro V (FireGL V) Graphics Adapter
6798, 0, AMD Radeon HD 7900 Series
679A, 0, AMD Radeon HD 7900 Series
679B, 0, AMD Radeon HD 7900 Series
679E, 0, AMD Radeon HD 7800 Series
67A0, 0, AMD Radeon FirePro W9100
67A1, 0, AMD Radeon FirePro W8100
67B0, 0, AMD Radeon R9 200 Series
67B0, 80, AMD Radeon R9 390 Series
67B1, 00, AMD Radeon R9 200 Series
67B1, 0, AMD Radeon R9 200 Series
67B1, 80, AMD Radeon R9 390 Series
67B9, 00, AMD Radeon R9 200 Series
67C0, 00, AMD Radeon Pro WX 7100 Graphics
67C0, 80, AMD Radeon E9550
67C2, 01, AMD Radeon Pro V7350x2
67C2, 02, AMD Radeon Pro V7300X
67C4, 00, AMD Radeon Pro WX 7100 Graphics
67C4, 80, AMD Radeon E9560 / E9565 Graphics
67C7, 00, AMD Radeon Pro WX 5100 Graphics
67C7, 80, AMD Radeon E9390 Graphics
67D0, 01, AMD Radeon Pro V7350x2
67D0, 02, AMD Radeon Pro V7300X
67DF, C0, AMD Radeon Pro 580X
67B9, 0, AMD Radeon R9 200 Series
67DF, C1, AMD Radeon RX 580 Series
67DF, C2, AMD Radeon RX 570 Series
67DF, C3, AMD Radeon RX 580 Series
@ -337,10 +131,17 @@
67DF, E1, AMD Radeon RX 590 Series
67DF, E3, AMD Radeon RX Series
67DF, E7, AMD Radeon RX 580 Series
67DF, EB, AMD Radeon Pro 580X
67DF, EF, AMD Radeon RX 570 Series
67DF, F7, AMD Radeon RX P30PH
67DF, FF, AMD Radeon RX 470 Series
67C2, 01, AMD Radeon Pro V7350x2
67C2, 02, AMD Radeon Pro V7300X
67C4, 00, AMD Radeon Pro WX 7100 Graphics
67C4, 80, AMD Radeon E9560 / E9565 Graphics
67C7, 00, AMD Radeon Pro WX 5100 Graphics
67C7, 80, AMD Radeon E9390 Graphics
67C0, 00, AMD Radeon Pro WX 7100 Graphics
67D0, 01, AMD Radeon Pro V7350x2
67D0, 02, AMD Radeon Pro V7300X
67E0, 00, AMD Radeon Pro WX Series
67E3, 00, AMD Radeon Pro WX 4100
67E8, 00, AMD Radeon Pro WX Series
@ -349,60 +150,52 @@
67EB, 00, AMD Radeon Pro V5300X
67EF, C0, AMD Radeon RX Graphics
67EF, C1, AMD Radeon RX 460 Graphics
67EF, C2, AMD Radeon Pro Series
67EF, C3, AMD Radeon RX Series
67EF, C5, AMD Radeon RX 460 Graphics
67EF, C7, AMD Radeon RX Graphics
67EF, CF, AMD Radeon RX 460 Graphics
67EF, E2, AMD Radeon RX 560X
67EF, E0, AMD Radeon RX 560 Series
67EF, E1, AMD Radeon RX Series
67EF, E2, AMD Radeon RX 560X
67EF, E3, AMD Radeon RX Series
67EF, E5, AMD Radeon RX 560 Series
67EF, E7, AMD Radeon RX 560 Series
67EF, EF, AMD Radeon 550 Series
67EF, EF, AMD Radeon RX Graphics
67EF, FF, AMD Radeon RX 460 Graphics
67FF, C0, AMD Radeon Pro 465
67FF, C1, AMD Radeon RX 560 Series
67FF, C0, AMD Radeon RX Graphics
67FF, C1, AMD Radeon RX Graphics
67FF, CF, AMD Radeon RX 560 Series
67FF, EF, AMD Radeon RX 560 Series
67FF, FF, AMD Radeon RX 550 Series
6800, 00, AMD Radeon HD 7970M
6801, 00, AMD Radeon HD 8970M
6806, 00, AMD Radeon R9 M290X
6808, 00, AMD FirePro W7000
6808, 00, ATI FirePro V (FireGL V) Graphics Adapter
6809, 00, ATI FirePro W5000
6810, 00, AMD Radeon R9 200 Series
6810, 81, AMD Radeon R9 370 Series
6811, 00, AMD Radeon R9 200 Series
6811, 81, AMD Radeon R7 370 Series
6818, 00, AMD Radeon HD 7800 Series
6819, 00, AMD Radeon HD 7800 Series
6820, 00, AMD Radeon R9 M275X
6800, 0, AMD Radeon HD 7970M
6801, 0, AMD Radeon HD 8970M
6808, 0, ATI FirePro V(FireGL V) Graphics Adapter
6809, 0, ATI FirePro V(FireGL V) Graphics Adapter
6810, 0, AMD Radeon HD 8800 Series
6810, 81, AMD Radeon R7 370 Series
6811, 0, AMD Radeon HD 8800 Series
6811, 81, AMD Radeon R7 300 Series
6818, 0, AMD Radeon HD 7800 Series
6819, 0, AMD Radeon HD 7800 Series
6820, 0, AMD Radeon HD 8800M Series
6820, 81, AMD Radeon R9 M375
6820, 83, AMD Radeon R9 M375X
6821, 00, AMD Radeon R9 M200X Series
6821, 83, AMD Radeon R9 M370X
6821, 0, AMD Radeon HD 8800M Series
6821, 87, AMD Radeon R7 M380
6822, 00, AMD Radeon E8860
6823, 00, AMD Radeon R9 M200X Series
6825, 00, AMD Radeon HD 7800M Series
6826, 00, AMD Radeon HD 7700M Series
6827, 00, AMD Radeon HD 7800M Series
6828, 00, AMD FirePro W600
682B, 00, AMD Radeon HD 8800M Series
6821, 83, AMD Radeon R9 M370X
6822, 0, AMD Radeon E8860
6823, 0, AMD Radeon HD 8800M Series
6825, 0, AMD Radeon HD 7800M Series
6827, 0, AMD Radeon HD 7800M Series
6828, 0, ATI FirePro V(FireGL V) Graphics Adapter
682B, 0, AMD Radeon HD 8800M Series
682B, 87, AMD Radeon R9 M360
682C, 00, AMD FirePro W4100
682D, 00, AMD Radeon HD 7700M Series
682F, 00, AMD Radeon HD 7700M Series
6830, 00, AMD Radeon 7800M Series
6831, 00, AMD Radeon 7700M Series
6835, 00, AMD Radeon R7 Series / HD 9000 Series
6837, 00, AMD Radeon HD 7700 Series
683D, 00, AMD Radeon HD 7700 Series
683F, 00, AMD Radeon HD 7700 Series
684C, 00, ATI FirePro V (FireGL V) Graphics Adapter
682C, 0, AMD FirePro W4100
682D, 0, AMD Radeon HD 7700M Series
682F, 0, AMD Radeon HD 7700M Series
6835, 0, AMD Radeon R7 Series / HD 9000 Series
6837, 0, AMD Radeon HD 7700 Series
683D, 0, AMD Radeon HD 7700 Series
683F, 0, AMD Radeon HD 7700 Series
6860, 00, AMD Radeon Instinct MI25
6860, 01, AMD Radeon Instinct MI25
6860, 02, AMD Radeon Instinct MI25
@ -414,7 +207,6 @@
6863, 00, AMD Radeon Vega Frontier Edition
6864, 03, AMD Radeon Pro V340
6864, 04, AMD Radeon Instinct MI25x2
6864, 05, AMD Radeon Pro V340
6868, 00, AMD Radeon Pro WX 8200
686C, 00, AMD Radeon Instinct MI25 MxGPU
686C, 01, AMD Radeon Instinct MI25 MxGPU
@ -423,47 +215,35 @@
686C, 04, AMD Radeon Instinct MI25x2 MxGPU
686C, 05, AMD Radeon Pro V340L MxGPU
686C, 06, AMD Radeon Instinct MI25 MxGPU
687F, 01, AMD Radeon RX Vega
687F, C0, AMD Radeon RX Vega
687F, C1, AMD Radeon RX Vega
687F, C3, AMD Radeon RX Vega
687F, C7, AMD Radeon RX Vega
6900, 00, AMD Radeon R7 M260
6900, 0, AMD Radeon R7 M260
6900, 81, AMD Radeon R7 M360
6900, 83, AMD Radeon R7 M340
6900, C1, AMD Radeon R5 M465 Series
6900, C3, AMD Radeon R5 M445 Series
6900, D1, AMD Radeon 530 Series
6900, D3, AMD Radeon 530 Series
6901, 00, AMD Radeon R5 M255
6902, 00, AMD Radeon Series
6907, 00, AMD Radeon R5 M255
6901, 0, AMD Radeon R5 M255
6907, 0, AMD Radeon R5 M255
6907, 87, AMD Radeon R5 M315
6920, 00, AMD Radeon R9 M395X
6920, 01, AMD Radeon R9 M390X
6921, 00, AMD Radeon R9 M390X
6929, 00, AMD FirePro S7150
6929, 01, AMD FirePro S7100X
692B, 00, AMD FirePro W7100
6938, 00, AMD Radeon R9 200 Series
6920, 0, AMD Radeon R9 M395X
6920, 1, AMD Radeon R9 M390X
6921, 0, AMD Radeon R9 M295X
6929, 0, AMD FirePro S7150
692B, 0, AMD FirePro W7100
6938, 0, AMD Radeon R9 200 Series
6938, F0, AMD Radeon R9 200 Series
6938, F1, AMD Radeon R9 380 Series
6939, 00, AMD Radeon R9 200 Series
6939, F0, AMD Radeon R9 200 Series
6939, 0, AMD Radeon R9 200 Series
6939, F1, AMD Radeon R9 380 Series
694C, C0, AMD Radeon RX Vega M GH Graphics
694E, C0, AMD Radeon RX Vega M GL Graphics
6980, 00, AMD Radeon Pro WX 3100
6981, 00, AMD Radeon Pro WX 3200 Series
6981, 01, AMD Radeon Pro WX 3200 Series
6981, 10, AMD Radeon Pro WX 3200 Series
6985, 00, AMD Radeon Pro WX 3100
6986, 00, AMD Radeon Pro WX 2100
6987, 80, AMD Embedded Radeon E9171
6987, C0, AMD Radeon 550X Series
6987, C1, AMD Radeon RX 640
6987, C3, AMD Radeon 540X Series
6987, C7, AMD Radeon 540
6995, 00, AMD Radeon Pro WX 2100
6997, 00, AMD Radeon Pro WX 2100
699F, 81, AMD Embedded Radeon E9170 Series
@ -471,14 +251,11 @@
699F, C1, AMD Radeon 540 Series
699F, C3, AMD Radeon 500 Series
699F, C7, AMD Radeon RX 550 / 550 Series
699F, C9, AMD Radeon 540
6FDF, E7, AMD Radeon RX 590 GME
6FDF, EF, AMD Radeon RX 580 2048SP
7300, C1, AMD FirePro S9300 x2
7300, C8, AMD Radeon R9 Fury Series
7300, C9, AMD Radeon Pro Duo
7300, CA, AMD Radeon R9 Fury Series
7300, CB, AMD Radeon R9 Fury Series
7300, CA, AMD Radeon R9 Fury Series
7312, 00, AMD Radeon Pro W5700
731E, C6, AMD Radeon RX 5700XTB
731E, C7, AMD Radeon RX 5700B
@ -491,38 +268,23 @@
731F, CA, AMD Radeon RX 5600 XT
731F, CB, AMD Radeon RX 5600 OEM
7340, C1, AMD Radeon RX 5500M
7340, C3, AMD Radeon RX 5300M
7340, C5, AMD Radeon RX 5500 XT
7340, C7, AMD Radeon RX 5500
7340, C9, AMD Radeon RX 5500XTB
7340, CF, AMD Radeon RX 5300
7341, 00, AMD Radeon Pro W5500
7347, 00, AMD Radeon Pro W5500M
7360, 41, AMD Radeon Pro 5600M
7360, C3, AMD Radeon Pro V520
7362, C1, AMD Radeon Pro V540
7362, C3, AMD Radeon Pro V520
738C, 01, AMD Instinct MI100
73A1, 00, AMD Radeon Pro V620
73A3, 00, AMD Radeon Pro W6800
73A5, C0, AMD Radeon RX 6950 XT
73AE, 00, AMD Radeon Pro V620 MxGPU
73AF, C0, AMD Radeon RX 6900 XT
73BF, C0, AMD Radeon RX 6900 XT
73BF, C1, AMD Radeon RX 6800 XT
73BF, C3, AMD Radeon RX 6800
73DF, C0, AMD Radeon RX 6750 XT
73DF, C1, AMD Radeon RX 6700 XT
73DF, C2, AMD Radeon RX 6800M
73DF, C3, AMD Radeon RX 6800M
73DF, C5, AMD Radeon RX 6700 XT
73DF, CF, AMD Radeon RX 6700M
73DF, D5, AMD Radeon RX 6750 GRE 12GB
73DF, D7, AMD TDC-235
73DF, DF, AMD Radeon RX 6700
73DF, E5, AMD Radeon RX 6750 GRE 12GB
73DF, FF, AMD Radeon RX 6700
73E0, 00, AMD Radeon RX 6600M
73E1, 00, AMD Radeon Pro W6600M
73E3, 00, AMD Radeon Pro W6600
73EF, C0, AMD Radeon RX 6800S
@ -534,167 +296,22 @@
73FF, C3, AMD Radeon RX 6600M
73FF, C7, AMD Radeon RX 6600
73FF, CB, AMD Radeon RX 6600S
73FF, CF, AMD Radeon RX 6600 LE
73FF, DF, AMD Radeon RX 6750 GRE 10GB
7408, 00, AMD Instinct MI250X
740C, 01, AMD Instinct MI250X / MI250
740F, 02, AMD Instinct MI210
7421, 00, AMD Radeon Pro W6500M
7422, 00, AMD Radeon Pro W6400
7422, 00, AMD Radeon PRO W6400
7423, 00, AMD Radeon Pro W6300M
7423, 01, AMD Radeon Pro W6300
7424, 00, AMD Radeon RX 6300
743F, C1, AMD Radeon RX 6500 XT
743F, C3, AMD Radeon RX 6500
743F, C3, AMD Radeon RX 6500M
743F, C7, AMD Radeon RX 6400
743F, C8, AMD Radeon RX 6500M
743F, CC, AMD Radeon 6550S
743F, CE, AMD Radeon RX 6450M
743F, CF, AMD Radeon RX 6300M
743F, D3, AMD Radeon RX 6550M
743F, D7, AMD Radeon RX 6400
7448, 00, AMD Radeon Pro W7900
7449, 00, AMD Radeon Pro W7800 48GB
744A, 00, AMD Radeon Pro W7900 Dual Slot
744B, 00, AMD Radeon Pro W7900D
744C, C8, AMD Radeon RX 7900 XTX
744C, CC, AMD Radeon RX 7900 XT
744C, CE, AMD Radeon RX 7900 GRE
744C, CF, AMD Radeon RX 7900M
745E, CC, AMD Radeon Pro W7800
7460, 00, AMD Radeon Pro V710
7461, 00, AMD Radeon Pro V710 MxGPU
7470, 00, AMD Radeon Pro W7700
747E, C8, AMD Radeon RX 7800 XT
747E, D8, AMD Radeon RX 7800M
747E, DB, AMD Radeon RX 7700
747E, FF, AMD Radeon RX 7700 XT
7480, 00, AMD Radeon Pro W7600
7480, C0, AMD Radeon RX 7600 XT
7480, C1, AMD Radeon RX 7700S
7480, C2, AMD Radeon RX 7650 GRE
7480, C3, AMD Radeon RX 7600S
7480, C7, AMD Radeon RX 7600M XT
7480, CF, AMD Radeon RX 7600
7481, C7, AMD Steam Machine
7483, CF, AMD Radeon RX 7600M
7489, 00, AMD Radeon Pro W7500
7499, 00, AMD Radeon Pro W7400
7499, C0, AMD Radeon RX 7400
7499, C1, AMD Radeon RX 7300
74A0, 00, AMD Instinct MI300A
74A1, 00, AMD Instinct MI300X
74A2, 00, AMD Instinct MI308X
74A5, 00, AMD Instinct MI325X
74A8, 00, AMD Instinct MI308X HF
74A9, 00, AMD Instinct MI300X HF
74B5, 00, AMD Instinct MI300X VF
74B6, 00, AMD Instinct MI308X
74BD, 00, AMD Instinct MI300X HF
7550, C0, AMD Radeon RX 9070 XT
7550, C2, AMD Radeon RX 9070 GRE
7550, C3, AMD Radeon RX 9070
7551, C0, AMD Radeon AI PRO R9700
7590, C0, AMD Radeon RX 9060 XT
7590, C7, AMD Radeon RX 9060
75A0, C0, AMD Instinct MI350X
75A3, C0, AMD Instinct MI355X
75B0, C0, AMD Instinct MI350X VF
75B3, C0, AMD Instinct MI355X VF
9830, 00, AMD Radeon HD 8400 / R3 Series
9831, 00, AMD Radeon HD 8400E
9832, 00, AMD Radeon HD 8330
9833, 00, AMD Radeon HD 8330E
9834, 00, AMD Radeon HD 8210
9835, 00, AMD Radeon HD 8210E
9836, 00, AMD Radeon HD 8200 / R3 Series
9837, 00, AMD Radeon HD 8280E
9838, 00, AMD Radeon HD 8200 / R3 series
9839, 00, AMD Radeon HD 8180
983D, 00, AMD Radeon HD 8250
9850, 00, AMD Radeon R3 Graphics
9850, 03, AMD Radeon R3 Graphics
9850, 40, AMD Radeon R2 Graphics
9850, 45, AMD Radeon R3 Graphics
9851, 00, AMD Radeon R4 Graphics
9851, 01, AMD Radeon R5E Graphics
9851, 05, AMD Radeon R5 Graphics
9851, 06, AMD Radeon R5E Graphics
9851, 40, AMD Radeon R4 Graphics
9851, 45, AMD Radeon R5 Graphics
9852, 00, AMD Radeon R2 Graphics
9852, 40, AMD Radeon E1 Graphics
9853, 00, AMD Radeon R2 Graphics
9853, 01, AMD Radeon R4E Graphics
9853, 03, AMD Radeon R2 Graphics
9853, 05, AMD Radeon R1E Graphics
9853, 06, AMD Radeon R1E Graphics
9853, 07, AMD Radeon R1E Graphics
9853, 08, AMD Radeon R1E Graphics
9853, 40, AMD Radeon R2 Graphics
9854, 00, AMD Radeon R3 Graphics
9854, 01, AMD Radeon R3E Graphics
9854, 02, AMD Radeon R3 Graphics
9854, 05, AMD Radeon R2 Graphics
9854, 06, AMD Radeon R4 Graphics
9854, 07, AMD Radeon R3 Graphics
9855, 02, AMD Radeon R6 Graphics
9855, 05, AMD Radeon R4 Graphics
9856, 00, AMD Radeon R2 Graphics
9856, 01, AMD Radeon R2E Graphics
9856, 02, AMD Radeon R2 Graphics
9856, 05, AMD Radeon R1E Graphics
9856, 06, AMD Radeon R2 Graphics
9856, 07, AMD Radeon R1E Graphics
9856, 08, AMD Radeon R1E Graphics
9856, 13, AMD Radeon R1E Graphics
9874, 81, AMD Radeon R6 Graphics
9874, 84, AMD Radeon R7 Graphics
9874, 85, AMD Radeon R6 Graphics
9874, 87, AMD Radeon R5 Graphics
9874, 88, AMD Radeon R7E Graphics
9874, 89, AMD Radeon R6E Graphics
9874, C4, AMD Radeon R7 Graphics
9874, C5, AMD Radeon R6 Graphics
9874, C6, AMD Radeon R6 Graphics
9874, C7, AMD Radeon R5 Graphics
9874, C8, AMD Radeon R7 Graphics
9874, C9, AMD Radeon R7 Graphics
9874, CA, AMD Radeon R5 Graphics
9874, CB, AMD Radeon R5 Graphics
9874, CC, AMD Radeon R7 Graphics
9874, CD, AMD Radeon R7 Graphics
9874, CE, AMD Radeon R5 Graphics
9874, E1, AMD Radeon R7 Graphics
9874, E2, AMD Radeon R7 Graphics
9874, E3, AMD Radeon R7 Graphics
9874, E4, AMD Radeon R7 Graphics
9874, E5, AMD Radeon R5 Graphics
9874, E6, AMD Radeon R5 Graphics
98E4, 80, AMD Radeon R5E Graphics
98E4, 81, AMD Radeon R4E Graphics
98E4, 83, AMD Radeon R2E Graphics
98E4, 84, AMD Radeon R2E Graphics
98E4, 86, AMD Radeon R1E Graphics
98E4, C0, AMD Radeon R4 Graphics
98E4, C1, AMD Radeon R5 Graphics
98E4, C2, AMD Radeon R4 Graphics
98E4, C4, AMD Radeon R5 Graphics
98E4, C6, AMD Radeon R5 Graphics
98E4, C8, AMD Radeon R4 Graphics
98E4, C9, AMD Radeon R4 Graphics
98E4, CA, AMD Radeon R5 Graphics
98E4, D0, AMD Radeon R2 Graphics
98E4, D1, AMD Radeon R2 Graphics
98E4, D2, AMD Radeon R2 Graphics
98E4, D4, AMD Radeon R2 Graphics
98E4, D9, AMD Radeon R5 Graphics
98E4, DA, AMD Radeon R5 Graphics
98E4, DB, AMD Radeon R3 Graphics
98E4, E1, AMD Radeon R3 Graphics
98E4, E2, AMD Radeon R3 Graphics
98E4, E9, AMD Radeon R4 Graphics
98E4, EA, AMD Radeon R4 Graphics
98E4, EB, AMD Radeon R3 Graphics
98E4, EB, AMD Radeon R4 Graphics
9874, 81, AMD Radeon R6 Graphics
9874, 87, AMD Radeon R5 Graphics
9874, 85, AMD Radeon R6 Graphics
9874, 84, AMD Radeon R7 Graphics
6FDF, E7, AMD Radeon RX 590 GME
6FDF, EF, AMD Radeon RX 580 2048SP

View file

@ -1,11 +0,0 @@
build = ["Android.sources.bp"]
cc_library_shared {
name: "libdrm_etnaviv",
defaults: [
"libdrm_defaults",
"libdrm_etnaviv_sources",
],
vendor: true,
shared_libs: ["libdrm"],
}

14
etnaviv/Android.mk Normal file
View file

@ -0,0 +1,14 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
# Import variables LIBDRM_ETNAVIV_FILES, LIBDRM_ETNAVIV_H_FILES
include $(LOCAL_PATH)/Makefile.sources
LOCAL_MODULE := libdrm_etnaviv
LOCAL_SHARED_LIBRARIES := libdrm
LOCAL_SRC_FILES := $(LIBDRM_ETNAVIV_FILES)
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)

View file

@ -1,13 +0,0 @@
// Autogenerated with Android.sources.bp.mk
cc_defaults {
name: "libdrm_etnaviv_sources",
srcs: [
"etnaviv_device.c",
"etnaviv_gpu.c",
"etnaviv_bo.c",
"etnaviv_bo_cache.c",
"etnaviv_pipe.c",
"etnaviv_cmd_stream.c",
],
}

13
etnaviv/Makefile.sources Normal file
View file

@ -0,0 +1,13 @@
LIBDRM_ETNAVIV_FILES := \
etnaviv_device.c \
etnaviv_gpu.c \
etnaviv_bo.c \
etnaviv_bo_cache.c \
etnaviv_perfmon.c \
etnaviv_pipe.c \
etnaviv_cmd_stream.c \
etnaviv_drm.h \
etnaviv_priv.h
LIBDRM_ETNAVIV_H_FILES := \
etnaviv_drmif.h

View file

@ -25,7 +25,8 @@
*/
#include <stdlib.h>
#include <sys/types.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <errno.h>
#include <sys/mman.h>
#include <fcntl.h>

View file

@ -32,8 +32,8 @@ libdrm_etnaviv = library(
link_with : libdrm,
c_args : libdrm_c_args,
gnu_symbol_visibility : 'hidden',
dependencies : [dep_threads, dep_rt, dep_atomic_ops],
version : '1.@0@.0'.format(patch_ver),
dependencies : [dep_pthread_stubs, dep_rt, dep_atomic_ops],
version : '1.0.0',
install : true,
)
@ -51,7 +51,9 @@ ext_libdrm_etnaviv = declare_dependency(
include_directories : [inc_drm, include_directories('.')],
)
meson.override_dependency('libdrm_etnaviv', ext_libdrm_etnaviv)
if meson.version().version_compare('>= 0.54.0')
meson.override_dependency('libdrm_etnaviv', ext_libdrm_etnaviv)
endif
test(
'etnaviv-symbols-check',
@ -59,6 +61,6 @@ test(
args : [
'--lib', libdrm_etnaviv,
'--symbols-file', files('etnaviv-symbols.txt'),
'--nm', prog_nm.full_path(),
'--nm', prog_nm.path(),
],
)

View file

@ -31,6 +31,7 @@
#include <unistd.h>
#include <sys/mman.h>
#include <linux/stddef.h>
#include <xf86drm.h>

View file

@ -30,6 +30,7 @@
#include <assert.h>
#include <sys/mman.h>
#include <linux/stddef.h>
#include <xf86drm.h>

View file

@ -25,8 +25,8 @@ libdrm_exynos = library(
gnu_symbol_visibility : 'hidden',
include_directories : [inc_root, inc_drm],
link_with : libdrm,
dependencies : [dep_threads],
version : '1.@0@.0'.format(patch_ver),
dependencies : [dep_pthread_stubs],
version : '1.0.0',
install : true,
)
@ -38,7 +38,9 @@ ext_libdrm_exynos = declare_dependency(
include_directories : [inc_drm, include_directories('.')],
)
meson.override_dependency('libdrm_exynos', ext_libdrm_exynos)
if meson.version().version_compare('>= 0.54.0')
meson.override_dependency('libdrm_exynos', ext_libdrm_exynos)
endif
pkg.generate(
libdrm_exynos,
@ -54,6 +56,6 @@ test(
args : [
'--lib', libdrm_exynos,
'--symbols-file', files('exynos-symbols.txt'),
'--nm', prog_nm.full_path(),
'--nm', prog_nm.path(),
],
)

14
freedreno/Android.mk Normal file
View file

@ -0,0 +1,14 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
# Import variables LIBDRM_FREEDRENO_FILES, LIBDRM_FREEDRENO_H_FILES
include $(LOCAL_PATH)/Makefile.sources
LOCAL_MODULE := libdrm_freedreno
LOCAL_SHARED_LIBRARIES := libdrm
LOCAL_SRC_FILES := $(LIBDRM_FREEDRENO_FILES)
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)

View file

@ -0,0 +1,25 @@
LIBDRM_FREEDRENO_FILES := \
freedreno_device.c \
freedreno_pipe.c \
freedreno_priv.h \
freedreno_ringbuffer.c \
freedreno_bo.c \
freedreno_bo_cache.c \
msm/msm_bo.c \
msm/msm_device.c \
msm/msm_pipe.c \
msm/msm_priv.h \
msm/msm_ringbuffer.c
LIBDRM_FREEDRENO_KGSL_FILES := \
kgsl/kgsl_bo.c \
kgsl/kgsl_device.c \
kgsl/kgsl_drm.h \
kgsl/kgsl_pipe.c \
kgsl/kgsl_priv.h \
kgsl/kgsl_ringbuffer.c \
kgsl/msm_kgsl.h
LIBDRM_FREEDRENO_H_FILES := \
freedreno_drmif.h \
freedreno_ringbuffer.h

View file

@ -28,6 +28,8 @@
#include "kgsl_priv.h"
#include <linux/fb.h>
static int set_memtype(struct fd_device *dev, uint32_t handle, uint32_t flags)
{
struct drm_kgsl_gem_memtype req = {

View file

@ -44,9 +44,9 @@ libdrm_freedreno = library(
[files_freedreno, config_file],
c_args : libdrm_c_args,
include_directories : [inc_root, inc_drm],
dependencies : [dep_valgrind, dep_threads, dep_rt, dep_atomic_ops],
dependencies : [dep_valgrind, dep_pthread_stubs, dep_rt, dep_atomic_ops],
link_with : libdrm,
version : '1.@0@.0'.format(patch_ver),
version : '1.0.0',
install : true,
)
@ -55,7 +55,9 @@ ext_libdrm_freedreno = declare_dependency(
include_directories : [inc_drm, include_directories('.')],
)
meson.override_dependency('libdrm_freedreno', ext_libdrm_freedreno)
if meson.version().version_compare('>= 0.54.0')
meson.override_dependency('libdrm_freedreno', ext_libdrm_freedreno)
endif
install_headers(
'freedreno_drmif.h', 'freedreno_ringbuffer.h',
@ -75,6 +77,6 @@ test(
args : [
'--lib', libdrm_freedreno,
'--symbols-file', files('freedreno-symbols.txt'),
'--nm', prog_nm.full_path(),
'--nm', prog_nm.path(),
],
)

View file

@ -56,7 +56,7 @@ with open(towrite, "w") as f:
that script instead of adding here entries manually! */
static const struct drmFormatModifierInfo drm_format_modifier_table[] = {
''')
f.write(' { DRM_MODIFIER_INVALID(NONE, INVALID) },\n')
f.write(' { DRM_MODIFIER_INVALID(NONE, INVALID_MODIFIER) },\n')
f.write(' { DRM_MODIFIER_LINEAR(NONE, LINEAR) },\n')
for entry in fm_re['intel']:

View file

@ -71,7 +71,7 @@ Note: One should not do _any_ changes to the files apart from the steps below.
In order to update the files do the following:
- Switch to a Linux kernel tree/branch which is not rebased.
For example: drm-next (https://gitlab.freedesktop.org/drm/kernel/)
For example: drm-next (https://cgit.freedesktop.org/drm/drm)
- Install the headers via `make headers_install' to a separate location.
- Copy the drm header[s] + git add + git commit.
- Note: Your commit message must include:

View file

@ -54,9 +54,6 @@ extern "C" {
#define DRM_AMDGPU_VM 0x13
#define DRM_AMDGPU_FENCE_TO_HANDLE 0x14
#define DRM_AMDGPU_SCHED 0x15
#define DRM_AMDGPU_USERQ 0x16
#define DRM_AMDGPU_USERQ_SIGNAL 0x17
#define DRM_AMDGPU_USERQ_WAIT 0x18
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@ -74,9 +71,6 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
#define DRM_IOCTL_AMDGPU_USERQ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ, union drm_amdgpu_userq)
#define DRM_IOCTL_AMDGPU_USERQ_SIGNAL DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct drm_amdgpu_userq_signal)
#define DRM_IOCTL_AMDGPU_USERQ_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct drm_amdgpu_userq_wait)
/**
* DOC: memory domains
@ -100,9 +94,6 @@ extern "C" {
*
* %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines
* for appending data.
*
* %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for
* signalling user mode queues.
*/
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@ -110,14 +101,12 @@ extern "C" {
#define AMDGPU_GEM_DOMAIN_GDS 0x8
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
#define AMDGPU_GEM_DOMAIN_DOORBELL 0x40
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
AMDGPU_GEM_DOMAIN_GTT | \
AMDGPU_GEM_DOMAIN_VRAM | \
AMDGPU_GEM_DOMAIN_GDS | \
AMDGPU_GEM_DOMAIN_GWS | \
AMDGPU_GEM_DOMAIN_OA | \
AMDGPU_GEM_DOMAIN_DOORBELL)
AMDGPU_GEM_DOMAIN_OA)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
@ -151,32 +140,6 @@ extern "C" {
* not require GTT memory accounting
*/
#define AMDGPU_GEM_CREATE_PREEMPTIBLE (1 << 11)
/* Flag that BO can be discarded under memory pressure without keeping the
* content.
*/
#define AMDGPU_GEM_CREATE_DISCARDABLE (1 << 12)
/* Flag that BO is shared coherently between multiple devices or CPU threads.
* May depend on GPU instructions to flush caches to system scope explicitly.
*
* This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
*/
#define AMDGPU_GEM_CREATE_COHERENT (1 << 13)
/* Flag that BO should not be cached by GPU. Coherent without having to flush
* GPU caches explicitly
*
* This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
*/
#define AMDGPU_GEM_CREATE_UNCACHED (1 << 14)
/* Flag that BO should be coherent across devices when using device-level
* atomics. May depend on GPU instructions to flush caches to device scope
* explicitly, promoting them to system scope automatically.
*
* This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
*/
#define AMDGPU_GEM_CREATE_EXT_COHERENT (1 << 15)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@ -255,17 +218,15 @@ union drm_amdgpu_bo_list {
/* unknown cause */
#define AMDGPU_CTX_UNKNOWN_RESET 3
/* indicate gpu reset occurred after ctx created */
/* indicate gpu reset occured after ctx created */
#define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0)
/* indicate vram lost occurred after ctx created */
/* indicate vram lost occured after ctx created */
#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
/* indicate some job from this context once cause gpu hang */
#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
/* indicate some errors are detected by RAS */
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE (1<<3)
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE (1<<4)
/* indicate that the reset hasn't completed yet */
#define AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS (1<<5)
/* Context priority level */
#define AMDGPU_CTX_PRIORITY_UNSET -2048
@ -323,261 +284,6 @@ union drm_amdgpu_ctx {
union drm_amdgpu_ctx_out out;
};
/* user queue IOCTL operations */
#define AMDGPU_USERQ_OP_CREATE 1
#define AMDGPU_USERQ_OP_FREE 2
/* queue priority levels */
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK 0x3
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT 0
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW 0
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW 1
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH 2
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH 3 /* admin only */
/* for queues that need access to protected content */
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE (1 << 2)
/*
* This structure is a container to pass input configuration
* info for all supported userqueue related operations.
* For operation AMDGPU_USERQ_OP_CREATE: user is expected
* to set all fields, excep the parameter 'queue_id'.
* For operation AMDGPU_USERQ_OP_FREE: the only input parameter expected
* to be set is 'queue_id', eveything else is ignored.
*/
struct drm_amdgpu_userq_in {
/** AMDGPU_USERQ_OP_* */
__u32 op;
/** Queue id passed for operation USERQ_OP_FREE */
__u32 queue_id;
/** the target GPU engine to execute workload (AMDGPU_HW_IP_*) */
__u32 ip_type;
/**
* @doorbell_handle: the handle of doorbell GEM object
* associated to this userqueue client.
*/
__u32 doorbell_handle;
/**
* @doorbell_offset: 32-bit offset of the doorbell in the doorbell bo.
* Kernel will generate absolute doorbell offset using doorbell_handle
* and doorbell_offset in the doorbell bo.
*/
__u32 doorbell_offset;
/**
* @flags: flags used for queue parameters
*/
__u32 flags;
/**
* @queue_va: Virtual address of the GPU memory which holds the queue
* object. The queue holds the workload packets.
*/
__u64 queue_va;
/**
* @queue_size: Size of the queue in bytes, this needs to be 256-byte
* aligned.
*/
__u64 queue_size;
/**
* @rptr_va : Virtual address of the GPU memory which holds the ring RPTR.
* This object must be at least 8 byte in size and aligned to 8-byte offset.
*/
__u64 rptr_va;
/**
* @wptr_va : Virtual address of the GPU memory which holds the ring WPTR.
* This object must be at least 8 byte in size and aligned to 8-byte offset.
*
* Queue, RPTR and WPTR can come from the same object, as long as the size
* and alignment related requirements are met.
*/
__u64 wptr_va;
/**
* @mqd: MQD (memory queue descriptor) is a set of parameters which allow
* the GPU to uniquely define and identify a usermode queue.
*
* MQD data can be of different size for different GPU IP/engine and
* their respective versions/revisions, so this points to a __u64 *
* which holds IP specific MQD of this usermode queue.
*/
__u64 mqd;
/**
* @size: size of MQD data in bytes, it must match the MQD structure
* size of the respective engine/revision defined in UAPI for ex, for
* gfx11 workloads, size = sizeof(drm_amdgpu_userq_mqd_gfx11).
*/
__u64 mqd_size;
};
/* The structure to carry output of userqueue ops */
struct drm_amdgpu_userq_out {
/**
* For operation AMDGPU_USERQ_OP_CREATE: This field contains a unique
* queue ID to represent the newly created userqueue in the system, otherwise
* it should be ignored.
*/
__u32 queue_id;
__u32 _pad;
};
union drm_amdgpu_userq {
struct drm_amdgpu_userq_in in;
struct drm_amdgpu_userq_out out;
};
/* GFX V11 IP specific MQD parameters */
struct drm_amdgpu_userq_mqd_gfx11 {
/**
* @shadow_va: Virtual address of the GPU memory to hold the shadow buffer.
* Use AMDGPU_INFO_IOCTL to find the exact size of the object.
*/
__u64 shadow_va;
/**
* @csa_va: Virtual address of the GPU memory to hold the CSA buffer.
* Use AMDGPU_INFO_IOCTL to find the exact size of the object.
*/
__u64 csa_va;
};
/* GFX V11 SDMA IP specific MQD parameters */
struct drm_amdgpu_userq_mqd_sdma_gfx11 {
/**
* @csa_va: Virtual address of the GPU memory to hold the CSA buffer.
* This must be a from a separate GPU object, and use AMDGPU_INFO IOCTL
* to get the size.
*/
__u64 csa_va;
};
/* GFX V11 Compute IP specific MQD parameters */
struct drm_amdgpu_userq_mqd_compute_gfx11 {
/**
* @eop_va: Virtual address of the GPU memory to hold the EOP buffer.
* This must be a from a separate GPU object, and use AMDGPU_INFO IOCTL
* to get the size.
*/
__u64 eop_va;
};
/* userq signal/wait ioctl */
struct drm_amdgpu_userq_signal {
/**
* @queue_id: Queue handle used by the userq fence creation function
* to retrieve the WPTR.
*/
__u32 queue_id;
__u32 pad;
/**
* @syncobj_handles: The list of syncobj handles submitted by the user queue
* job to be signaled.
*/
__u64 syncobj_handles;
/**
* @num_syncobj_handles: A count that represents the number of syncobj handles in
* @syncobj_handles.
*/
__u64 num_syncobj_handles;
/**
* @bo_read_handles: The list of BO handles that the submitted user queue job
* is using for read only. This will update BO fences in the kernel.
*/
__u64 bo_read_handles;
/**
* @bo_write_handles: The list of BO handles that the submitted user queue job
* is using for write only. This will update BO fences in the kernel.
*/
__u64 bo_write_handles;
/**
* @num_bo_read_handles: A count that represents the number of read BO handles in
* @bo_read_handles.
*/
__u32 num_bo_read_handles;
/**
* @num_bo_write_handles: A count that represents the number of write BO handles in
* @bo_write_handles.
*/
__u32 num_bo_write_handles;
};
struct drm_amdgpu_userq_fence_info {
/**
* @va: A gpu address allocated for each queue which stores the
* read pointer (RPTR) value.
*/
__u64 va;
/**
* @value: A 64 bit value represents the write pointer (WPTR) of the
* queue commands which compared with the RPTR value to signal the
* fences.
*/
__u64 value;
};
struct drm_amdgpu_userq_wait {
/**
* @waitq_id: Queue handle used by the userq wait IOCTL to retrieve the
* wait queue and maintain the fence driver references in it.
*/
__u32 waitq_id;
__u32 pad;
/**
* @syncobj_handles: The list of syncobj handles submitted by the user queue
* job to get the va/value pairs.
*/
__u64 syncobj_handles;
/**
* @syncobj_timeline_handles: The list of timeline syncobj handles submitted by
* the user queue job to get the va/value pairs at given @syncobj_timeline_points.
*/
__u64 syncobj_timeline_handles;
/**
* @syncobj_timeline_points: The list of timeline syncobj points submitted by the
* user queue job for the corresponding @syncobj_timeline_handles.
*/
__u64 syncobj_timeline_points;
/**
* @bo_read_handles: The list of read BO handles submitted by the user queue
* job to get the va/value pairs.
*/
__u64 bo_read_handles;
/**
* @bo_write_handles: The list of write BO handles submitted by the user queue
* job to get the va/value pairs.
*/
__u64 bo_write_handles;
/**
* @num_syncobj_timeline_handles: A count that represents the number of timeline
* syncobj handles in @syncobj_timeline_handles.
*/
__u16 num_syncobj_timeline_handles;
/**
* @num_fences: This field can be used both as input and output. As input it defines
* the maximum number of fences that can be returned and as output it will specify
* how many fences were actually returned from the ioctl.
*/
__u16 num_fences;
/**
* @num_syncobj_handles: A count that represents the number of syncobj handles in
* @syncobj_handles.
*/
__u32 num_syncobj_handles;
/**
* @num_bo_read_handles: A count that represents the number of read BO handles in
* @bo_read_handles.
*/
__u32 num_bo_read_handles;
/**
* @num_bo_write_handles: A count that represents the number of write BO handles in
* @bo_write_handles.
*/
__u32 num_bo_write_handles;
/**
* @out_fences: The field is a return value from the ioctl containing the list of
* address/value pairs to wait for.
*/
__u64 out_fences;
};
/* vm ioctl */
#define AMDGPU_VM_OP_RESERVE_VMID 1
#define AMDGPU_VM_OP_UNRESERVE_VMID 2
@ -653,7 +359,7 @@ struct drm_amdgpu_gem_userptr {
#define AMDGPU_TILING_NUM_BANKS_SHIFT 21
#define AMDGPU_TILING_NUM_BANKS_MASK 0x3
/* GFX9 - GFX11: */
/* GFX9 and later: */
#define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0
#define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f
#define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT 5
@ -667,17 +373,6 @@ struct drm_amdgpu_gem_userptr {
#define AMDGPU_TILING_SCANOUT_SHIFT 63
#define AMDGPU_TILING_SCANOUT_MASK 0x1
/* GFX12 and later: */
#define AMDGPU_TILING_GFX12_SWIZZLE_MODE_SHIFT 0
#define AMDGPU_TILING_GFX12_SWIZZLE_MODE_MASK 0x7
/* These are DCC recompression setting for memory management: */
#define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_SHIFT 3
#define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_MASK 0x3 /* 0:64B, 1:128B, 2:256B */
#define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_SHIFT 5
#define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_MASK 0x7 /* CB_COLOR0_INFO.NUMBER_TYPE */
#define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_SHIFT 8
#define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_MASK 0x3f /* [0:4]:CB_COLOR0_INFO.FORMAT, [5]:MM */
/* Set/Get helpers for tiling flags. */
#define AMDGPU_TILING_SET(field, value) \
(((__u64)(value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT)
@ -834,8 +529,6 @@ struct drm_amdgpu_gem_op {
#define AMDGPU_VM_MTYPE_UC (4 << 5)
/* Use Read Write MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_RW (5 << 5)
/* don't allocate MALL */
#define AMDGPU_VM_PAGE_NOALLOC (1 << 9)
struct drm_amdgpu_gem_va {
/** GEM object handle */
@ -851,19 +544,6 @@ struct drm_amdgpu_gem_va {
__u64 offset_in_bo;
/** Specify mapping size. Must be correctly aligned. */
__u64 map_size;
/**
* vm_timeline_point is a sequence number used to add new timeline point.
*/
__u64 vm_timeline_point;
/**
* The vm page table update fence is installed in given vm_timeline_syncobj_out
* at vm_timeline_point.
*/
__u32 vm_timeline_syncobj_out;
/** the number of syncobj handles in @input_fence_syncobj_handles */
__u32 num_syncobj_handles;
/** Array of sync object handle to wait for given input fences */
__u64 input_fence_syncobj_handles;
};
#define AMDGPU_HW_IP_GFX 0
@ -873,14 +553,9 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_HW_IP_VCE 4
#define AMDGPU_HW_IP_UVD_ENC 5
#define AMDGPU_HW_IP_VCN_DEC 6
/*
* From VCN4, AMDGPU_HW_IP_VCN_ENC is re-used to support
* both encoding and decoding jobs.
*/
#define AMDGPU_HW_IP_VCN_ENC 7
#define AMDGPU_HW_IP_VCN_JPEG 8
#define AMDGPU_HW_IP_VPE 9
#define AMDGPU_HW_IP_NUM 10
#define AMDGPU_HW_IP_NUM 9
#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
@ -893,7 +568,6 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09
#define AMDGPU_CHUNK_ID_CP_GFX_SHADOW 0x0a
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
@ -1010,15 +684,6 @@ struct drm_amdgpu_cs_chunk_data {
};
};
#define AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW 0x1
struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
__u64 shadow_va;
__u64 csa_va;
__u64 gds_va;
__u64 flags;
};
/*
* Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
*
@ -1026,17 +691,6 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
#define AMDGPU_IDS_FLAGS_FUSION 0x1
#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
#define AMDGPU_IDS_FLAGS_TMZ 0x4
#define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x8
/*
* Query h/w info: Flag identifying VF/PF/PT mode
*
*/
#define AMDGPU_IDS_FLAGS_MODE_MASK 0x300
#define AMDGPU_IDS_FLAGS_MODE_SHIFT 0x8
#define AMDGPU_IDS_FLAGS_MODE_PF 0x0
#define AMDGPU_IDS_FLAGS_MODE_VF 0x1
#define AMDGPU_IDS_FLAGS_MODE_PT 0x2
/* indicate if acceleration can be working */
#define AMDGPU_INFO_ACCEL_WORKING 0x00
@ -1089,20 +743,6 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
#define AMDGPU_INFO_FW_DMCUB 0x14
/* Subquery id: Query TOC firmware version */
#define AMDGPU_INFO_FW_TOC 0x15
/* Subquery id: Query CAP firmware version */
#define AMDGPU_INFO_FW_CAP 0x16
/* Subquery id: Query GFX RLCP firmware version */
#define AMDGPU_INFO_FW_GFX_RLCP 0x17
/* Subquery id: Query GFX RLCV firmware version */
#define AMDGPU_INFO_FW_GFX_RLCV 0x18
/* Subquery id: Query MES_KIQ firmware version */
#define AMDGPU_INFO_FW_MES_KIQ 0x19
/* Subquery id: Query MES firmware version */
#define AMDGPU_INFO_FW_MES 0x1a
/* Subquery id: Query IMU firmware version */
#define AMDGPU_INFO_FW_IMU 0x1b
/* Subquery id: Query VPE firmware version */
#define AMDGPU_INFO_FW_VPE 0x1c
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
@ -1156,12 +796,6 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
#define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK 0x8
/* Subquery id: Query GPU stable pstate memory clock */
#define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK 0x9
/* Subquery id: Query GPU peak pstate shader clock */
#define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK 0xa
/* Subquery id: Query GPU peak pstate memory clock */
#define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK 0xb
/* Subquery id: Query input GPU power */
#define AMDGPU_INFO_SENSOR_GPU_INPUT_POWER 0xc
/* Number of VRAM page faults on CPU access. */
#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
@ -1201,12 +835,6 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
#define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
/* Subquery id: Encode */
#define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
/* Query the max number of IBs per gang per submission */
#define AMDGPU_INFO_MAX_IBS 0x22
/* query last page fault info */
#define AMDGPU_INFO_GPUVM_FAULT 0x23
/* query FW object size and alignment */
#define AMDGPU_INFO_UQ_FW_AREAS 0x24
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@ -1358,8 +986,6 @@ struct drm_amdgpu_info_vbios {
#define AMDGPU_VRAM_TYPE_DDR4 8
#define AMDGPU_VRAM_TYPE_GDDR6 9
#define AMDGPU_VRAM_TYPE_DDR5 10
#define AMDGPU_VRAM_TYPE_LPDDR4 11
#define AMDGPU_VRAM_TYPE_LPDDR5 12
struct drm_amdgpu_info_device {
/** PCI Device ID */
@ -1385,8 +1011,7 @@ struct drm_amdgpu_info_device {
__u32 enabled_rb_pipes_mask;
__u32 num_rb_pipes;
__u32 num_hw_gfx_contexts;
/* PCIe version (the smaller of the GPU and the CPU/motherboard) */
__u32 pcie_gen;
__u32 _pad;
__u64 ids_flags;
/** Starting virtual address for UMDs. */
__u64 virtual_address_offset;
@ -1433,8 +1058,7 @@ struct drm_amdgpu_info_device {
__u32 gs_prim_buffer_depth;
/* max gs wavefront per vgt*/
__u32 max_gs_waves_per_vgt;
/* PCIe number of lanes (the smaller of the GPU and the CPU/motherboard) */
__u32 pcie_num_lanes;
__u32 _pad1;
/* always on cu bitmap */
__u32 cu_ao_bitmap[4][4];
/** Starting high virtual address for UMDs. */
@ -1445,29 +1069,6 @@ struct drm_amdgpu_info_device {
__u32 pa_sc_tile_steering_override;
/* disabled TCCs */
__u64 tcc_disabled_mask;
__u64 min_engine_clock;
__u64 min_memory_clock;
/* The following fields are only set on gfx11+, older chips set 0. */
__u32 tcp_cache_size; /* AKA GL0, VMEM cache */
__u32 num_sqc_per_wgp;
__u32 sqc_data_cache_size; /* AKA SMEM cache */
__u32 sqc_inst_cache_size;
__u32 gl1c_cache_size;
__u32 gl2c_cache_size;
__u64 mall_size; /* AKA infinity cache */
/* high 32 bits of the rb pipes mask */
__u32 enabled_rb_pipes_mask_hi;
/* shadow area size for gfx11 */
__u32 shadow_size;
/* shadow area base virtual alignment for gfx11 */
__u32 shadow_alignment;
/* context save area size for gfx11 */
__u32 csa_size;
/* context save area base virtual alignment for gfx11 */
__u32 csa_alignment;
/* Userq IP mask (1 << AMDGPU_HW_IP_*) */
__u32 userq_ip_mask;
__u32 pad;
};
struct drm_amdgpu_info_hw_ip {
@ -1482,29 +1083,7 @@ struct drm_amdgpu_info_hw_ip {
__u32 ib_size_alignment;
/** Bitmask of available rings. Bit 0 means ring 0, etc. */
__u32 available_rings;
/** version info: bits 23:16 major, 15:8 minor, 7:0 revision */
__u32 ip_discovery_version;
};
/* GFX metadata BO sizes and alignment info (in bytes) */
struct drm_amdgpu_info_uq_fw_areas_gfx {
/* shadow area size */
__u32 shadow_size;
/* shadow area base virtual mem alignment */
__u32 shadow_alignment;
/* context save area size */
__u32 csa_size;
/* context save area base virtual mem alignment */
__u32 csa_alignment;
};
/* IP specific metadata related information used in the
* subquery AMDGPU_INFO_UQ_FW_AREAS
*/
struct drm_amdgpu_info_uq_fw_areas {
union {
struct drm_amdgpu_info_uq_fw_areas_gfx gfx;
};
__u32 _pad;
};
struct drm_amdgpu_info_num_handles {
@ -1556,20 +1135,6 @@ struct drm_amdgpu_info_video_caps {
struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT];
};
#define AMDGPU_VMHUB_TYPE_MASK 0xff
#define AMDGPU_VMHUB_TYPE_SHIFT 0
#define AMDGPU_VMHUB_TYPE_GFX 0
#define AMDGPU_VMHUB_TYPE_MM0 1
#define AMDGPU_VMHUB_TYPE_MM1 2
#define AMDGPU_VMHUB_IDX_MASK 0xff00
#define AMDGPU_VMHUB_IDX_SHIFT 8
struct drm_amdgpu_info_gpuvm_fault {
__u64 addr;
__u32 status;
__u32 vmhub;
};
/*
* Supported GPU families
*/
@ -1583,13 +1148,7 @@ struct drm_amdgpu_info_gpuvm_fault {
#define AMDGPU_FAMILY_RV 142 /* Raven */
#define AMDGPU_FAMILY_NV 143 /* Navi10 */
#define AMDGPU_FAMILY_VGH 144 /* Van Gogh */
#define AMDGPU_FAMILY_GC_11_0_0 145 /* GC 11.0.0 */
#define AMDGPU_FAMILY_YC 146 /* Yellow Carp */
#define AMDGPU_FAMILY_GC_11_0_1 148 /* GC 11.0.1 */
#define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */
#define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */
#define AMDGPU_FAMILY_GC_11_5_0 150 /* GC 11.5.0 */
#define AMDGPU_FAMILY_GC_12_0_0 152 /* GC 12.0.0 */
#if defined(__cplusplus)
}

View file

@ -591,63 +591,32 @@ struct drm_set_version {
int drm_dd_minor;
};
/**
* struct drm_gem_close - Argument for &DRM_IOCTL_GEM_CLOSE ioctl.
* @handle: Handle of the object to be closed.
* @pad: Padding.
*
* Releases the handle to an mm object.
*/
/* DRM_IOCTL_GEM_CLOSE ioctl argument type */
struct drm_gem_close {
/** Handle of the object to be closed. */
__u32 handle;
__u32 pad;
};
/**
* struct drm_gem_flink - Argument for &DRM_IOCTL_GEM_FLINK ioctl.
* @handle: Handle for the object being named.
* @name: Returned global name.
*
* Create a global name for an object, returning the name.
*
* Note that the name does not hold a reference; when the object
* is freed, the name goes away.
*/
/* DRM_IOCTL_GEM_FLINK ioctl argument type */
struct drm_gem_flink {
/** Handle for the object being named */
__u32 handle;
/** Returned global name */
__u32 name;
};
/**
* struct drm_gem_open - Argument for &DRM_IOCTL_GEM_OPEN ioctl.
* @name: Name of object being opened.
* @handle: Returned handle for the object.
* @size: Returned size of the object
*
* Open an object using the global name, returning a handle and the size.
*
* This handle (of course) holds a reference to the object, so the object
* will not go away until the handle is deleted.
*/
/* DRM_IOCTL_GEM_OPEN ioctl argument type */
struct drm_gem_open {
/** Name of object being opened */
__u32 name;
__u32 handle;
__u64 size;
};
/**
* struct drm_gem_change_handle - Argument for &DRM_IOCTL_GEM_CHANGE_HANDLE ioctl.
* @handle: The handle of a gem object.
* @new_handle: An available gem handle.
*
* This ioctl changes the handle of a GEM object to the specified one.
* The new handle must be unused. On success the old handle is closed
* and all further IOCTL should refer to the new handle only.
* Calls to DRM_IOCTL_PRIME_FD_TO_HANDLE will return the new handle.
*/
struct drm_gem_change_handle {
/** Returned handle for the object */
__u32 handle;
__u32 new_handle;
/** Returned size of the object */
__u64 size;
};
/**
@ -660,8 +629,8 @@ struct drm_gem_change_handle {
/**
* DRM_CAP_VBLANK_HIGH_CRTC
*
* If set to 1, the kernel supports specifying a :ref:`CRTC index<crtc_index>`
* in the high bits of &drm_wait_vblank_request.type.
* If set to 1, the kernel supports specifying a CRTC index in the high bits of
* &drm_wait_vblank_request.type.
*
* Starting kernel version 2.6.39, this capability is always set to 1.
*/
@ -698,11 +667,8 @@ struct drm_gem_change_handle {
* Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
* and &DRM_PRIME_CAP_EXPORT.
*
* Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
* &DRM_PRIME_CAP_EXPORT are always advertised.
*
* PRIME buffers are exposed as dma-buf file descriptors.
* See :ref:`prime_buffer_sharing`.
* PRIME buffers are exposed as dma-buf file descriptors. See
* Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
*/
#define DRM_CAP_PRIME 0x5
/**
@ -710,8 +676,6 @@ struct drm_gem_change_handle {
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
* buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
*
* Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
*/
#define DRM_PRIME_CAP_IMPORT 0x1
/**
@ -719,8 +683,6 @@ struct drm_gem_change_handle {
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
* buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
*
* Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
*/
#define DRM_PRIME_CAP_EXPORT 0x2
/**
@ -738,8 +700,7 @@ struct drm_gem_change_handle {
/**
* DRM_CAP_ASYNC_PAGE_FLIP
*
* If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
* page-flips.
* If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
*/
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
/**
@ -789,23 +750,17 @@ struct drm_gem_change_handle {
/**
* DRM_CAP_SYNCOBJ
*
* If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
* If set to 1, the driver supports sync objects. See
* Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
*/
#define DRM_CAP_SYNCOBJ 0x13
/**
* DRM_CAP_SYNCOBJ_TIMELINE
*
* If set to 1, the driver supports timeline operations on sync objects. See
* :ref:`drm_sync_objects`.
* Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
*/
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
/**
* DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
*
* If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
* commits.
*/
#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15
/* DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
@ -875,46 +830,6 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
/**
* DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
*
* Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
* virtualbox) have additional restrictions for cursor planes (thus
* making cursor planes on those drivers not truly universal,) e.g.
* they need cursor planes to act like one would expect from a mouse
* cursor and have correctly set hotspot properties.
* If this client cap is not set the DRM core will hide cursor plane on
* those virtualized drivers because not setting it implies that the
* client is not capable of dealing with those extra restictions.
* Clients which do set cursor hotspot and treat the cursor plane
* like a mouse cursor should set this property.
* The client must enable &DRM_CLIENT_CAP_ATOMIC first.
*
* Setting this property on drivers which do not special case
* cursor planes (i.e. non-virtualized drivers) will return
* EOPNOTSUPP, which can be used by userspace to gauge
* requirements of the hardware/drivers they're running on.
*
* This capability is always supported for atomic-capable virtualized
* drivers starting from kernel version 6.6.
*/
#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6
/**
* DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE
*
* If set to 1 the DRM core will allow setting the COLOR_PIPELINE
* property on a &drm_plane, as well as drm_colorop properties.
*
* Setting of these plane properties will be rejected when this client
* cap is set:
* - COLOR_ENCODING
* - COLOR_RANGE
*
* The client must enable &DRM_CLIENT_CAP_ATOMIC first.
*/
#define DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE 7
/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
@ -945,17 +860,13 @@ struct drm_syncobj_destroy {
};
#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE (1 << 1)
#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE (1 << 1)
struct drm_syncobj_handle {
__u32 handle;
__u32 flags;
__s32 fd;
__u32 pad;
__u64 point;
};
struct drm_syncobj_transfer {
@ -970,7 +881,6 @@ struct drm_syncobj_transfer {
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
struct drm_syncobj_wait {
__u64 handles;
/* absolute timeout */
@ -979,14 +889,6 @@ struct drm_syncobj_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
/**
* @deadline_nsec - fence deadline hint
*
* Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
* fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
* set.
*/
__u64 deadline_nsec;
};
struct drm_syncobj_timeline_wait {
@ -999,35 +901,6 @@ struct drm_syncobj_timeline_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
/**
* @deadline_nsec - fence deadline hint
*
* Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
* fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
* set.
*/
__u64 deadline_nsec;
};
/**
* struct drm_syncobj_eventfd
* @handle: syncobj handle.
* @flags: Zero to wait for the point to be signalled, or
* &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
* available for the point.
* @point: syncobj timeline point (set to zero for binary syncobjs).
* @fd: Existing eventfd to sent events to.
* @pad: Must be zero.
*
* Register an eventfd to be signalled by a syncobj. The eventfd counter will
* be incremented by one.
*/
struct drm_syncobj_eventfd {
__u32 handle;
__u32 flags;
__u64 point;
__s32 fd;
__u32 pad;
};
@ -1068,13 +941,6 @@ struct drm_crtc_queue_sequence {
__u64 user_data; /* user data passed to event */
};
#define DRM_CLIENT_NAME_MAX_LEN 64
struct drm_set_client_name {
__u64 name_len;
__u64 name;
};
#if defined(__cplusplus)
}
#endif
@ -1100,19 +966,6 @@ extern "C" {
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
/**
* DRM_IOCTL_GEM_CLOSE - Close a GEM handle.
*
* GEM handles are not reference-counted by the kernel. User-space is
* responsible for managing their lifetime. For example, if user-space imports
* the same memory object twice on the same DRM file description, the same GEM
* handle is returned by both imports, and user-space needs to ensure
* &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen
* when a memory object is allocated, then exported and imported again on the
* same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception
* and always returns fresh new GEM handles even if an existing GEM handle
* already refers to the same memory object before the IOCTL is performed.
*/
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
@ -1153,37 +1006,7 @@ extern "C" {
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
/**
* DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD.
*
* User-space sets &drm_prime_handle.handle with the GEM handle to export and
* &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in
* &drm_prime_handle.fd.
*
* The export can fail for any driver-specific reason, e.g. because export is
* not supported for this specific GEM handle (but might be for others).
*
* Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT.
*/
#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
/**
* DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle.
*
* User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to
* import, and gets back a GEM handle in &drm_prime_handle.handle.
* &drm_prime_handle.flags is unused.
*
* If an existing GEM handle refers to the memory object backing the DMA-BUF,
* that GEM handle is returned. Therefore user-space which needs to handle
* arbitrary DMA-BUFs must have a user-space lookup data structure to manually
* reference-count duplicated GEM handles. For more information see
* &DRM_IOCTL_GEM_CLOSE.
*
* The import can fail for any driver-specific reason, e.g. because import is
* only supported for DMA-BUFs allocated on this DRM device.
*
* Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT.
*/
#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
@ -1221,40 +1044,10 @@ extern "C" {
#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
/**
* DRM_IOCTL_MODE_RMFB - Remove a framebuffer.
*
* This removes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
* argument is a framebuffer object ID.
*
* Warning: removing a framebuffer currently in-use on an enabled plane will
* disable that plane. The CRTC the plane is linked to may also be disabled
* (depending on driver capabilities).
*/
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
/**
* DRM_IOCTL_MODE_CREATE_DUMB - Create a new dumb buffer object.
*
* KMS dumb buffers provide a very primitive way to allocate a buffer object
* suitable for scanout and map it for software rendering. KMS dumb buffers are
* not suitable for hardware-accelerated rendering nor video decoding. KMS dumb
* buffers are not suitable to be displayed on any other device than the KMS
* device where they were allocated from. Also see
* :ref:`kms_dumb_buffer_objects`.
*
* The IOCTL argument is a struct drm_mode_create_dumb.
*
* User-space is expected to create a KMS dumb buffer via this IOCTL, then add
* it as a KMS framebuffer via &DRM_IOCTL_MODE_ADDFB and map it via
* &DRM_IOCTL_MODE_MAP_DUMB.
*
* &DRM_CAP_DUMB_BUFFER indicates whether this IOCTL is supported.
* &DRM_CAP_DUMB_PREFERRED_DEPTH and &DRM_CAP_DUMB_PREFER_SHADOW indicate
* driver preferences for dumb buffers.
*/
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
@ -1287,76 +1080,8 @@ extern "C" {
#define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer)
#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
/**
* DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata.
*
* This queries metadata about a framebuffer. User-space fills
* &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the
* struct as the output.
*
* If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
* will be filled with GEM buffer handles. Fresh new GEM handles are always
* returned, even if another GEM handle referring to the same memory object
* already exists on the DRM file description. The caller is responsible for
* removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same
* new handle will be returned for multiple planes in case they use the same
* memory object. Planes are valid until one has a zero handle -- this can be
* used to compute the number of planes.
*
* Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
* until one has a zero &drm_mode_fb_cmd2.pitches.
*
* If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
* in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
* modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
*
* To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space
* can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately
* close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not
* double-close handles which are specified multiple times in the array.
*/
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
/**
* DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer.
*
* This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
* argument is a framebuffer object ID.
*
* This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable
* planes and CRTCs. As long as the framebuffer is used by a plane, it's kept
* alive. When the plane no longer uses the framebuffer (because the
* framebuffer is replaced with another one, or the plane is disabled), the
* framebuffer is cleaned up.
*
* This is useful to implement flicker-free transitions between two processes.
*
* Depending on the threat model, user-space may want to ensure that the
* framebuffer doesn't expose any sensitive user information: closed
* framebuffers attached to a plane can be read back by the next DRM master.
*/
#define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb)
/**
* DRM_IOCTL_SET_CLIENT_NAME - Attach a name to a drm_file
*
* Having a name allows for easier tracking and debugging.
* The length of the name (without null ending char) must be
* <= DRM_CLIENT_NAME_MAX_LEN.
* The call will fail if the name contains whitespaces or non-printable chars.
*/
#define DRM_IOCTL_SET_CLIENT_NAME DRM_IOWR(0xD1, struct drm_set_client_name)
/**
* DRM_IOCTL_GEM_CHANGE_HANDLE - Move an object to a different handle
*
* Some applications (notably CRIU) need objects to have specific gem handles.
* This ioctl changes the object at one gem handle to use a new gem handle.
*/
#define DRM_IOCTL_GEM_CHANGE_HANDLE DRM_IOWR(0xD2, struct drm_gem_change_handle)
/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
@ -1368,50 +1093,25 @@ extern "C" {
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
/**
* struct drm_event - Header for DRM events
* @type: event type.
* @length: total number of payload bytes (including header).
/*
* Header for events written back to userspace on the drm fd. The
* type defines the type of event, the length specifies the total
* length of the event (including the header), and user_data is
* typically a 64 bit value passed with the ioctl that triggered the
* event. A read on the drm fd will always only return complete
* events, that is, if for example the read buffer is 100 bytes, and
* there are two 64 byte events pending, only one will be returned.
*
* This struct is a header for events written back to user-space on the DRM FD.
* A read on the DRM FD will always only return complete events: e.g. if the
* read buffer is 100 bytes large and there are two 64 byte events pending,
* only one will be returned.
*
* Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
* up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
* &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
* Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
* up are chipset specific.
*/
struct drm_event {
__u32 type;
__u32 length;
};
/**
* DRM_EVENT_VBLANK - vertical blanking event
*
* This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
* &_DRM_VBLANK_EVENT flag set.
*
* The event payload is a struct drm_event_vblank.
*/
#define DRM_EVENT_VBLANK 0x01
/**
* DRM_EVENT_FLIP_COMPLETE - page-flip completion event
*
* This event is sent in response to an atomic commit or legacy page-flip with
* the &DRM_MODE_PAGE_FLIP_EVENT flag set.
*
* The event payload is a struct drm_event_vblank.
*/
#define DRM_EVENT_FLIP_COMPLETE 0x02
/**
* DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
*
* This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
*
* The event payload is a struct drm_event_crtc_sequence.
*/
#define DRM_EVENT_CRTC_SEQUENCE 0x03
struct drm_event_vblank {

View file

@ -54,7 +54,7 @@ extern "C" {
* Format modifiers may change any property of the buffer, including the number
* of planes and/or the required allocation size. Format modifiers are
* vendor-namespaced, and as such the relationship between a fourcc code and a
* modifier is specific to the modifier being used. For example, some modifiers
* modifier is specific to the modifer being used. For example, some modifiers
* may preserve meaning - such as number of planes - from the fourcc code,
* whereas others may not.
*
@ -79,7 +79,7 @@ extern "C" {
* format.
* - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users
* see modifiers as opaque tokens they can check for equality and intersect.
* These users mustn't need to know to reason about the modifier value
* These users musn't need to know to reason about the modifier value
* (i.e. they are not expected to extract information out of the modifier).
*
* Vendors should document their modifier usage in as much detail as
@ -88,18 +88,6 @@ extern "C" {
*
* The authoritative list of format modifier codes is found in
* `include/uapi/drm/drm_fourcc.h`
*
* Open Source User Waiver
* -----------------------
*
* Because this is the authoritative source for pixel formats and modifiers
* referenced by GL, Vulkan extensions and other standards and hence used both
* by open source and closed source driver stacks, the usual requirement for an
* upstream in-kernel or open source userspace user does not apply.
*
* To ensure, as much as feasible, compatibility across stacks and avoid
* confusion with incompatible enumerations stakeholders for all relevant driver
* stacks should approve additions.
*/
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
@ -111,42 +99,12 @@ extern "C" {
#define DRM_FORMAT_INVALID 0
/* color index */
#define DRM_FORMAT_C1 fourcc_code('C', '1', ' ', ' ') /* [7:0] C0:C1:C2:C3:C4:C5:C6:C7 1:1:1:1:1:1:1:1 eight pixels/byte */
#define DRM_FORMAT_C2 fourcc_code('C', '2', ' ', ' ') /* [7:0] C0:C1:C2:C3 2:2:2:2 four pixels/byte */
#define DRM_FORMAT_C4 fourcc_code('C', '4', ' ', ' ') /* [7:0] C0:C1 4:4 two pixels/byte */
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
/* 1 bpp Darkness (inverse relationship between channel value and brightness) */
#define DRM_FORMAT_D1 fourcc_code('D', '1', ' ', ' ') /* [7:0] D0:D1:D2:D3:D4:D5:D6:D7 1:1:1:1:1:1:1:1 eight pixels/byte */
/* 2 bpp Darkness (inverse relationship between channel value and brightness) */
#define DRM_FORMAT_D2 fourcc_code('D', '2', ' ', ' ') /* [7:0] D0:D1:D2:D3 2:2:2:2 four pixels/byte */
/* 4 bpp Darkness (inverse relationship between channel value and brightness) */
#define DRM_FORMAT_D4 fourcc_code('D', '4', ' ', ' ') /* [7:0] D0:D1 4:4 two pixels/byte */
/* 8 bpp Darkness (inverse relationship between channel value and brightness) */
#define DRM_FORMAT_D8 fourcc_code('D', '8', ' ', ' ') /* [7:0] D */
/* 1 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R1 fourcc_code('R', '1', ' ', ' ') /* [7:0] R0:R1:R2:R3:R4:R5:R6:R7 1:1:1:1:1:1:1:1 eight pixels/byte */
/* 2 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R2 fourcc_code('R', '2', ' ', ' ') /* [7:0] R0:R1:R2:R3 2:2:2:2 four pixels/byte */
/* 4 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R4 fourcc_code('R', '4', ' ', ' ') /* [7:0] R0:R1 4:4 two pixels/byte */
/* 8 bpp Red (direct relationship between channel value and brightness) */
/* 8 bpp Red */
#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
/* 10 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R10 fourcc_code('R', '1', '0', ' ') /* [15:0] x:R 6:10 little endian */
/* 12 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R12 fourcc_code('R', '1', '2', ' ') /* [15:0] x:R 4:12 little endian */
/* 16 bpp Red (direct relationship between channel value and brightness) */
/* 16 bpp Red */
#define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */
/* 16 bpp RG */
@ -210,10 +168,6 @@ extern "C" {
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
/* 48 bpp RGB */
#define DRM_FORMAT_RGB161616 fourcc_code('R', 'G', '4', '8') /* [47:0] R:G:B 16:16:16 little endian */
#define DRM_FORMAT_BGR161616 fourcc_code('B', 'G', '4', '8') /* [47:0] B:G:R 16:16:16 little endian */
/* 64 bpp RGB */
#define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */
@ -222,7 +176,7 @@ extern "C" {
#define DRM_FORMAT_ABGR16161616 fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */
/*
* Half-Floating point - 16b/component
* Floating point 64bpp RGB
* IEEE 754-2008 binary16 half-precision float
* [15:0] sign:exponent:mantissa 1:5:10
*/
@ -232,20 +186,6 @@ extern "C" {
#define DRM_FORMAT_ARGB16161616F fourcc_code('A', 'R', '4', 'H') /* [63:0] A:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_ABGR16161616F fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */
#define DRM_FORMAT_R16F fourcc_code('R', ' ', ' ', 'H') /* [15:0] R 16 little endian */
#define DRM_FORMAT_GR1616F fourcc_code('G', 'R', ' ', 'H') /* [31:0] G:R 16:16 little endian */
#define DRM_FORMAT_BGR161616F fourcc_code('B', 'G', 'R', 'H') /* [47:0] B:G:R 16:16:16 little endian */
/*
* Floating point - 32b/component
* IEEE 754-2008 binary32 float
* [31:0] sign:exponent:mantissa 1:8:23
*/
#define DRM_FORMAT_R32F fourcc_code('R', ' ', ' ', 'F') /* [31:0] R 32 little endian */
#define DRM_FORMAT_GR3232F fourcc_code('G', 'R', ' ', 'F') /* [63:0] R:G 32:32 little endian */
#define DRM_FORMAT_BGR323232F fourcc_code('B', 'G', 'R', 'F') /* [95:0] R:G:B 32:32:32 little endian */
#define DRM_FORMAT_ABGR32323232F fourcc_code('A', 'B', '8', 'F') /* [127:0] R:G:B:A 32:32:32:32 little endian */
/*
* RGBA format with 10-bit components packed in 64-bit per pixel, with 6 bits
* of unused padding per component:
@ -259,9 +199,7 @@ extern "C" {
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
#define DRM_FORMAT_AVUY8888 fourcc_code('A', 'V', 'U', 'Y') /* [31:0] A:Cr:Cb:Y 8:8:8:8 little endian */
#define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */
#define DRM_FORMAT_XVUY8888 fourcc_code('X', 'V', 'U', 'Y') /* [31:0] X:Cr:Cb:Y 8:8:8:8 little endian */
#define DRM_FORMAT_VUY888 fourcc_code('V', 'U', '2', '4') /* [23:0] Cr:Cb:Y 8:8:8 little endian */
#define DRM_FORMAT_VUY101010 fourcc_code('V', 'U', '3', '0') /* Y followed by U then V, 10:10:10. Non-linear modifier only */
@ -341,8 +279,6 @@ extern "C" {
* index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian
*/
#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV30 fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */
/*
* 2 plane YCbCr MSB aligned
@ -372,13 +308,6 @@ extern "C" {
*/
#define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */
/* 2 plane YCbCr420.
* 3 10 bit components and 2 padding bits packed into 4 bytes.
* index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian
* index 1 = Cr:Cb plane, [63:0] x:Cr2:Cb2:Cr1:x:Cb1:Cr0:Cb0 [2:10:10:10:2:10:10:10] little endian
*/
#define DRM_FORMAT_P030 fourcc_code('P', '0', '3', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel packed */
/* 3 plane non-subsampled (444) YCbCr
* 16 bits per component, but only 10 bits are used and 6 bits are padded
* index 0: Y plane, [15:0] Y:x [10:6] little endian
@ -395,42 +324,6 @@ extern "C" {
*/
#define DRM_FORMAT_Q401 fourcc_code('Q', '4', '0', '1')
/*
* 3 plane YCbCr LSB aligned
* In order to use these formats in a similar fashion to MSB aligned ones
* implementation can multiply the values by 2^6=64. For that reason the padding
* must only contain zeros.
* index 0 = Y plane, [15:0] z:Y [6:10] little endian
* index 1 = Cr plane, [15:0] z:Cr [6:10] little endian
* index 2 = Cb plane, [15:0] z:Cb [6:10] little endian
*/
#define DRM_FORMAT_S010 fourcc_code('S', '0', '1', '0') /* 2x2 subsampled Cb (1) and Cr (2) planes 10 bits per channel */
#define DRM_FORMAT_S210 fourcc_code('S', '2', '1', '0') /* 2x1 subsampled Cb (1) and Cr (2) planes 10 bits per channel */
#define DRM_FORMAT_S410 fourcc_code('S', '4', '1', '0') /* non-subsampled Cb (1) and Cr (2) planes 10 bits per channel */
/*
* 3 plane YCbCr LSB aligned
* In order to use these formats in a similar fashion to MSB aligned ones
* implementation can multiply the values by 2^4=16. For that reason the padding
* must only contain zeros.
* index 0 = Y plane, [15:0] z:Y [4:12] little endian
* index 1 = Cr plane, [15:0] z:Cr [4:12] little endian
* index 2 = Cb plane, [15:0] z:Cb [4:12] little endian
*/
#define DRM_FORMAT_S012 fourcc_code('S', '0', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes 12 bits per channel */
#define DRM_FORMAT_S212 fourcc_code('S', '2', '1', '2') /* 2x1 subsampled Cb (1) and Cr (2) planes 12 bits per channel */
#define DRM_FORMAT_S412 fourcc_code('S', '4', '1', '2') /* non-subsampled Cb (1) and Cr (2) planes 12 bits per channel */
/*
* 3 plane YCbCr
* index 0 = Y plane, [15:0] Y little endian
* index 1 = Cr plane, [15:0] Cr little endian
* index 2 = Cb plane, [15:0] Cb little endian
*/
#define DRM_FORMAT_S016 fourcc_code('S', '0', '1', '6') /* 2x2 subsampled Cb (1) and Cr (2) planes 16 bits per channel */
#define DRM_FORMAT_S216 fourcc_code('S', '2', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes 16 bits per channel */
#define DRM_FORMAT_S416 fourcc_code('S', '4', '1', '6') /* non-subsampled Cb (1) and Cr (2) planes 16 bits per channel */
/*
* 3 plane YCbCr
* index 0: Y plane, [7:0] Y
@ -475,19 +368,11 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_ARM 0x08
#define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09
#define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a
#define DRM_FORMAT_MOD_VENDOR_MTK 0x0b
#define DRM_FORMAT_MOD_VENDOR_APPLE 0x0c
/* add more to the end as needed */
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
#define fourcc_mod_get_vendor(modifier) \
(((modifier) >> 56) & 0xff)
#define fourcc_mod_is_vendor(modifier, vendor) \
(fourcc_mod_get_vendor(modifier) == DRM_FORMAT_MOD_VENDOR_## vendor)
#define fourcc_mod_code(vendor, val) \
((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL))
@ -596,7 +481,7 @@ extern "C" {
* This is a tiled layout using 4Kb tiles in row-major layout.
* Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
* are arranged in four groups (two wide, two high) with column-major layout.
* Each group therefore consists out of four 256 byte units, which are also laid
* Each group therefore consits out of four 256 byte units, which are also laid
* out as 2x2 column-major.
* 256 byte units are made out of four 64 byte blocks of pixels, producing
* either a square block or a 2:1 unit.
@ -655,96 +540,6 @@ extern "C" {
*
* The main surface is Y-tiled and is at plane index 0 whereas CCS is linear
* and at index 1. The clear color is stored at index 2, and the pitch should
* be 64 bytes aligned. The clear color structure is 256 bits. The first 128 bits
* represents Raw Clear Color Red, Green, Blue and Alpha color each represented
* by 32 bits. The raw clear color is consumed by the 3d engine and generates
* the converted clear color of size 64 bits. The first 32 bits store the Lower
* Converted Clear Color value and the next 32 bits store the Higher Converted
* Clear Color value when applicable. The Converted Clear Color values are
* consumed by the DE. The last 64 bits are used to store Color Discard Enable
* and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
* corresponds to an area of 4x1 tiles in the main surface. The main surface
* pitch is required to be a multiple of 4 tile widths.
*/
#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC fourcc_mod_code(INTEL, 8)
/*
* Intel Tile 4 layout
*
* This is a tiled layout using 4KB tiles in a row-major layout. It has the same
* shape as Tile Y at two granularities: 4KB (128B x 32) and 64B (16B x 4). It
* only differs from Tile Y at the 256B granularity in between. At this
* granularity, Tile Y has a shape of 16B x 32 rows, but this tiling has a shape
* of 64B x 8 rows.
*/
#define I915_FORMAT_MOD_4_TILED fourcc_mod_code(INTEL, 9)
/*
* Intel color control surfaces (CCS) for DG2 render compression.
*
* The main surface is Tile 4 and at plane index 0. The CCS data is stored
* outside of the GEM object in a reserved memory area dedicated for the
* storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The
* main surface pitch is required to be a multiple of four Tile 4 widths.
*/
#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS fourcc_mod_code(INTEL, 10)
/*
* Intel color control surfaces (CCS) for DG2 media compression.
*
* The main surface is Tile 4 and at plane index 0. For semi-planar formats
* like NV12, the Y and UV planes are Tile 4 and are located at plane indices
* 0 and 1, respectively. The CCS for all planes are stored outside of the
* GEM object in a reserved memory area dedicated for the storage of the
* CCS data for all RC/RC_CC/MC compressible GEM objects. The main surface
* pitch is required to be a multiple of four Tile 4 widths.
*/
#define I915_FORMAT_MOD_4_TILED_DG2_MC_CCS fourcc_mod_code(INTEL, 11)
/*
* Intel Color Control Surface with Clear Color (CCS) for DG2 render compression.
*
* The main surface is Tile 4 and at plane index 0. The CCS data is stored
* outside of the GEM object in a reserved memory area dedicated for the
* storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The
* main surface pitch is required to be a multiple of four Tile 4 widths. The
* clear color is stored at plane index 1 and the pitch should be 64 bytes
* aligned. The format of the 256 bits of clear color data matches the one used
* for the I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC modifier, see its description
* for details.
*/
#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12)
/*
* Intel Color Control Surfaces (CCS) for display ver. 14 render compression.
*
* The main surface is tile4 and at plane index 0, the CCS is linear and
* at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
* main surface. In other words, 4 bits in CCS map to a main surface cache
* line pair. The main surface pitch is required to be a multiple of four
* tile4 widths.
*/
#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS fourcc_mod_code(INTEL, 13)
/*
* Intel Color Control Surfaces (CCS) for display ver. 14 media compression
*
* The main surface is tile4 and at plane index 0, the CCS is linear and
* at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
* main surface. In other words, 4 bits in CCS map to a main surface cache
* line pair. The main surface pitch is required to be a multiple of four
* tile4 widths. For semi-planar formats like NV12, CCS planes follow the
* Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces,
* planes 2 and 3 for the respective CCS.
*/
#define I915_FORMAT_MOD_4_TILED_MTL_MC_CCS fourcc_mod_code(INTEL, 14)
/*
* Intel Color Control Surface with Clear Color (CCS) for display ver. 14 render
* compression.
*
* The main surface is tile4 and is at plane index 0 whereas CCS is linear
* and at index 1. The clear color is stored at index 2, and the pitch should
* be ignored. The clear color structure is 256 bits. The first 128 bits
* represents Raw Clear Color Red, Green, Blue and Alpha color each represented
* by 32 bits. The raw clear color is consumed by the 3d engine and generates
@ -756,32 +551,7 @@ extern "C" {
* corresponds to an area of 4x1 tiles in the main surface. The main surface
* pitch is required to be a multiple of 4 tile widths.
*/
#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15)
/*
* Intel Color Control Surfaces (CCS) for graphics ver. 20 unified compression
* on integrated graphics
*
* The main surface is Tile 4 and at plane index 0. For semi-planar formats
* like NV12, the Y and UV planes are Tile 4 and are located at plane indices
* 0 and 1, respectively. The CCS for all planes are stored outside of the
* GEM object in a reserved memory area dedicated for the storage of the
* CCS data for all compressible GEM objects.
*/
#define I915_FORMAT_MOD_4_TILED_LNL_CCS fourcc_mod_code(INTEL, 16)
/*
* Intel Color Control Surfaces (CCS) for graphics ver. 20 unified compression
* on discrete graphics
*
* The main surface is Tile 4 and at plane index 0. For semi-planar formats
* like NV12, the Y and UV planes are Tile 4 and are located at plane indices
* 0 and 1, respectively. The CCS for all planes are stored outside of the
* GEM object in a reserved memory area dedicated for the storage of the
* CCS data for all compressible GEM objects. The GEM object must be stored in
* contiguous memory with a size aligned to 64KB
*/
#define I915_FORMAT_MOD_4_TILED_BMG_CCS fourcc_mod_code(INTEL, 17)
#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC fourcc_mod_code(INTEL, 8)
/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
@ -820,28 +590,6 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_QCOM_COMPRESSED fourcc_mod_code(QCOM, 1)
/*
* Qualcomm Tiled Format
*
* Similar to DRM_FORMAT_MOD_QCOM_COMPRESSED but not compressed.
* Implementation may be platform and base-format specific.
*
* Each macrotile consists of m x n (mostly 4 x 4) tiles.
* Pixel data pitch/stride is aligned with macrotile width.
* Pixel data height is aligned with macrotile height.
* Entire pixel data buffer is aligned with 4k(bytes).
*/
#define DRM_FORMAT_MOD_QCOM_TILED3 fourcc_mod_code(QCOM, 3)
/*
* Qualcomm Alternate Tiled Format
*
* Alternate tiled format typically only used within GMEM.
* Implementation may be platform and base-format specific.
*/
#define DRM_FORMAT_MOD_QCOM_TILED2 fourcc_mod_code(QCOM, 2)
/* Vivante framebuffer modifiers */
/*
@ -882,35 +630,6 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4)
/*
* Vivante TS (tile-status) buffer modifiers. They can be combined with all of
* the color buffer tiling modifiers defined above. When TS is present it's a
* separate buffer containing the clear/compression status of each tile. The
* modifiers are defined as VIVANTE_MOD_TS_c_s, where c is the color buffer
* tile size in bytes covered by one entry in the status buffer and s is the
* number of status bits per entry.
* We reserve the top 8 bits of the Vivante modifier space for tile status
* clear/compression modifiers, as future cores might add some more TS layout
* variations.
*/
#define VIVANTE_MOD_TS_64_4 (1ULL << 48)
#define VIVANTE_MOD_TS_64_2 (2ULL << 48)
#define VIVANTE_MOD_TS_128_4 (3ULL << 48)
#define VIVANTE_MOD_TS_256_4 (4ULL << 48)
#define VIVANTE_MOD_TS_MASK (0xfULL << 48)
/*
* Vivante compression modifiers. Those depend on a TS modifier being present
* as the TS bits get reinterpreted as compression tags instead of simple
* clear markers when compression is enabled.
*/
#define VIVANTE_MOD_COMP_DEC400 (1ULL << 52)
#define VIVANTE_MOD_COMP_MASK (0xfULL << 52)
/* Masking out the extension bits will yield the base modifier. */
#define VIVANTE_MOD_EXT_MASK (VIVANTE_MOD_TS_MASK | \
VIVANTE_MOD_COMP_MASK)
/* NVIDIA frame buffer modifiers */
/*
@ -979,20 +698,14 @@ extern "C" {
* 2 = Gob Height 8, Turing+ Page Kind mapping
* 3 = Reserved for future use.
*
* 22:22 s Sector layout. There is a further bit remapping step that occurs
* 26:27 at an even lower level than the page kind and block linear
* swizzles. This causes the bit arrangement of surfaces in memory
* to differ subtly, and prevents direct sharing of surfaces between
* GPUs with different layouts.
* 22:22 s Sector layout. On Tegra GPUs prior to Xavier, there is a further
* bit remapping step that occurs at an even lower level than the
* page kind and block linear swizzles. This causes the layout of
* surfaces mapped in those SOC's GPUs to be incompatible with the
* equivalent mapping on other GPUs in the same system.
*
* 0 = Tegra K1 - Tegra Parker/TX2 Layout
* 1 = Pre-GB20x, GB20x 32+ bpp, GB10, Tegra Xavier-Orin Layout
* 2 = GB20x(Blackwell 2)+ 8 bpp surface layout
* 3 = GB20x(Blackwell 2)+ 16 bpp surface layout
* 4 = Reserved for future use.
* 5 = Reserved for future use.
* 6 = Reserved for future use.
* 7 = Reserved for future use.
* 0 = Tegra K1 - Tegra Parker/TX2 Layout.
* 1 = Desktop GPU and Tegra Xavier+ Layout
*
* 25:23 c Lossless Framebuffer Compression type.
*
@ -1007,7 +720,7 @@ extern "C" {
* 6 = Reserved for future use
* 7 = Reserved for future use
*
* 55:28 - Reserved for future use. Must be zero.
* 55:25 - Reserved for future use. Must be zero.
*/
#define DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(c, s, g, k, h) \
fourcc_mod_code(NVIDIA, (0x10 | \
@ -1015,7 +728,6 @@ extern "C" {
(((k) & 0xff) << 12) | \
(((g) & 0x3) << 20) | \
(((s) & 0x1) << 22) | \
(((s) & 0x6) << 25) | \
(((c) & 0x7) << 23)))
/* To grandfather in prior block linear format modifiers to the above layout,
@ -1130,10 +842,6 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
* and UV. Some SAND-using hardware stores UV in a separate tiled
* image from Y to reduce the column height, which is not supported
* with these modifiers.
*
* The DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT modifier is also
* supported for DRM_FORMAT_P030 where the columns remain as 128 bytes
* wide, but as this is a 10 bpp format that translates to 96 pixels.
*/
#define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \
@ -1191,7 +899,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
*/
/*
* The top 4 bits (out of the 56 bits allotted for specifying vendor specific
* The top 4 bits (out of the 56 bits alloted for specifying vendor specific
* modifiers) denote the category for modifiers. Currently we have three
* categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of
* sixteen different categories.
@ -1507,7 +1215,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
* Amlogic FBC Memory Saving mode
*
* Indicates the storage is packed when pixel size is multiple of word
* boundaries, i.e. 8bit should be stored in this mode to save allocation
* boudaries, i.e. 8bit should be stored in this mode to save allocation
* memory.
*
* This mode reduces body layout to 3072 bytes per 64x32 superblock with
@ -1516,90 +1224,6 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
*/
#define AMLOGIC_FBC_OPTION_MEM_SAVING (1ULL << 0)
/* MediaTek modifiers
* Bits Parameter Notes
* ----- ------------------------ ---------------------------------------------
* 7: 0 TILE LAYOUT Values are MTK_FMT_MOD_TILE_*
* 15: 8 COMPRESSION Values are MTK_FMT_MOD_COMPRESS_*
* 23:16 10 BIT LAYOUT Values are MTK_FMT_MOD_10BIT_LAYOUT_*
*
*/
#define DRM_FORMAT_MOD_MTK(__flags) fourcc_mod_code(MTK, __flags)
/*
* MediaTek Tiled Modifier
* The lowest 8 bits of the modifier is used to specify the tiling
* layout. Only the 16L_32S tiling is used for now, but we define an
* "untiled" version and leave room for future expansion.
*/
#define MTK_FMT_MOD_TILE_MASK 0xf
#define MTK_FMT_MOD_TILE_NONE 0x0
#define MTK_FMT_MOD_TILE_16L32S 0x1
/*
* Bits 8-15 specify compression options
*/
#define MTK_FMT_MOD_COMPRESS_MASK (0xf << 8)
#define MTK_FMT_MOD_COMPRESS_NONE (0x0 << 8)
#define MTK_FMT_MOD_COMPRESS_V1 (0x1 << 8)
/*
* Bits 16-23 specify how the bits of 10 bit formats are
* stored out in memory
*/
#define MTK_FMT_MOD_10BIT_LAYOUT_MASK (0xf << 16)
#define MTK_FMT_MOD_10BIT_LAYOUT_PACKED (0x0 << 16)
#define MTK_FMT_MOD_10BIT_LAYOUT_LSBTILED (0x1 << 16)
#define MTK_FMT_MOD_10BIT_LAYOUT_LSBRASTER (0x2 << 16)
/* alias for the most common tiling format */
#define DRM_FORMAT_MOD_MTK_16L_32S_TILE DRM_FORMAT_MOD_MTK(MTK_FMT_MOD_TILE_16L32S)
/*
* Apple GPU-tiled layouts.
*
* Apple GPUs support nonlinear tilings with optional lossless compression.
*
* GPU-tiled images are divided into 16KiB tiles:
*
* Bytes per pixel Tile size
* --------------- ---------
* 1 128x128
* 2 128x64
* 4 64x64
* 8 64x32
* 16 32x32
*
* Tiles are raster-order. Pixels within a tile are interleaved (Morton order).
*
* Compressed images pad the body to 128-bytes and are immediately followed by a
* metadata section. The metadata section rounds the image dimensions to
* powers-of-two and contains 8 bytes for each 16x16 compression subtile.
* Subtiles are interleaved (Morton order).
*
* All images are 128-byte aligned.
*
* These layouts fundamentally do not have meaningful strides. No matter how we
* specify strides for these layouts, userspace unaware of Apple image layouts
* will be unable to use correctly the specified stride for any purpose.
* Userspace aware of the image layouts do not use strides. The most "correct"
* convention would be setting the image stride to 0. Unfortunately, some
* software assumes the stride is at least (width * bytes per pixel). We
* therefore require that stride equals (width * bytes per pixel). Since the
* stride is arbitrary here, we pick the simplest convention.
*
* Although containing two sections, compressed image layouts are treated in
* software as a single plane. This is modelled after AFBC, a similar
* scheme. Attempting to separate the sections to be "explicit" in DRM would
* only generate more confusion, as software does not treat the image this way.
*
* For detailed information on the hardware image layouts, see
* https://docs.mesa3d.org/drivers/asahi.html#image-layouts
*/
#define DRM_FORMAT_MOD_APPLE_GPU_TILED fourcc_mod_code(APPLE, 1)
#define DRM_FORMAT_MOD_APPLE_GPU_TILED_COMPRESSED fourcc_mod_code(APPLE, 2)
/*
* AMD modifiers
*
@ -1647,8 +1271,6 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
#define AMD_FMT_MOD_TILE_VER_GFX9 1
#define AMD_FMT_MOD_TILE_VER_GFX10 2
#define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3
#define AMD_FMT_MOD_TILE_VER_GFX11 4
#define AMD_FMT_MOD_TILE_VER_GFX12 5
/*
* 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical
@ -1659,30 +1281,11 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
/*
* 64K_D for non-32 bpp is the same for GFX9/GFX10/GFX10_RBPLUS and hence has
* GFX9 as canonical version.
*
* 64K_D_2D on GFX12 is identical to 64K_D on GFX11.
*/
#define AMD_FMT_MOD_TILE_GFX9_64K_D 10
#define AMD_FMT_MOD_TILE_GFX9_4K_D_X 22
#define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25
#define AMD_FMT_MOD_TILE_GFX9_64K_D_X 26
#define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27
#define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31
/* Gfx12 swizzle modes:
* 0 - LINEAR
* 1 - 256B_2D - 2D block dimensions
* 2 - 4KB_2D
* 3 - 64KB_2D
* 4 - 256KB_2D
* 5 - 4KB_3D - 3D block dimensions
* 6 - 64KB_3D
* 7 - 256KB_3D
*/
#define AMD_FMT_MOD_TILE_GFX12_256B_2D 1
#define AMD_FMT_MOD_TILE_GFX12_4K_2D 2
#define AMD_FMT_MOD_TILE_GFX12_64K_2D 3
#define AMD_FMT_MOD_TILE_GFX12_256K_2D 4
#define AMD_FMT_MOD_DCC_BLOCK_64B 0
#define AMD_FMT_MOD_DCC_BLOCK_128B 1
@ -1749,11 +1352,11 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
#define AMD_FMT_MOD_PIPE_MASK 0x7
#define AMD_FMT_MOD_SET(field, value) \
((__u64)(value) << AMD_FMT_MOD_##field##_SHIFT)
((uint64_t)(value) << AMD_FMT_MOD_##field##_SHIFT)
#define AMD_FMT_MOD_GET(field, value) \
(((value) >> AMD_FMT_MOD_##field##_SHIFT) & AMD_FMT_MOD_##field##_MASK)
#define AMD_FMT_MOD_CLEAR(field) \
(~((__u64)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT))
(~((uint64_t)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT))
#if defined(__cplusplus)
}

View file

@ -36,10 +36,10 @@ extern "C" {
/**
* DOC: overview
*
* DRM exposes many UAPI and structure definitions to have a consistent
* and standardized interface with users.
* DRM exposes many UAPI and structure definition to have a consistent
* and standardized interface with user.
* Userspace can refer to these structure definitions and UAPI formats
* to communicate to drivers.
* to communicate to driver
*/
#define DRM_CONNECTOR_NAME_LEN 32
@ -312,48 +312,16 @@ struct drm_mode_set_plane {
__u32 src_w;
};
/**
* struct drm_mode_get_plane - Get plane metadata.
*
* Userspace can perform a GETPLANE ioctl to retrieve information about a
* plane.
*
* To retrieve the number of formats supported, set @count_format_types to zero
* and call the ioctl. @count_format_types will be updated with the value.
*
* To retrieve these formats, allocate an array with the memory needed to store
* @count_format_types formats. Point @format_type_ptr to this array and call
* the ioctl again (with @count_format_types still set to the value returned in
* the first ioctl call).
*/
struct drm_mode_get_plane {
/**
* @plane_id: Object ID of the plane whose information should be
* retrieved. Set by caller.
*/
__u32 plane_id;
/** @crtc_id: Object ID of the current CRTC. */
__u32 crtc_id;
/** @fb_id: Object ID of the current fb. */
__u32 fb_id;
/**
* @possible_crtcs: Bitmask of CRTC's compatible with the plane. CRTC's
* are created and they receive an index, which corresponds to their
* position in the bitmask. Bit N corresponds to
* :ref:`CRTC index<crtc_index>` N.
*/
__u32 possible_crtcs;
/** @gamma_size: Never used. */
__u32 gamma_size;
/** @count_format_types: Number of formats. */
__u32 count_format_types;
/**
* @format_type_ptr: Pointer to ``__u32`` array of formats that are
* supported by the plane. These formats do not require modifiers.
*/
__u64 format_type_ptr;
};
@ -488,9 +456,6 @@ struct drm_mode_get_connector {
* This is not an object ID. This is a per-type connector number. Each
* (type, type_id) combination is unique across all connectors of a DRM
* device.
*
* The (type, type_id) combination is not a stable identifier: the
* type_id can change depending on the driver probe order.
*/
__u32 connector_type_id;
@ -540,78 +505,26 @@ struct drm_mode_get_connector {
/* the PROP_ATOMIC flag is used to hide properties from userspace that
* is not aware of atomic properties. This is mostly to work around
* older userspace (DDX drivers) that read/write each prop they find,
* without being aware that this could be triggering a lengthy modeset.
* witout being aware that this could be triggering a lengthy modeset.
*/
#define DRM_MODE_PROP_ATOMIC 0x80000000
/**
* struct drm_mode_property_enum - Description for an enum/bitfield entry.
* @value: numeric value for this enum entry.
* @name: symbolic name for this enum entry.
*
* See struct drm_property_enum for details.
*/
struct drm_mode_property_enum {
__u64 value;
char name[DRM_PROP_NAME_LEN];
};
/**
* struct drm_mode_get_property - Get property metadata.
*
* User-space can perform a GETPROPERTY ioctl to retrieve information about a
* property. The same property may be attached to multiple objects, see
* "Modeset Base Object Abstraction".
*
* The meaning of the @values_ptr field changes depending on the property type.
* See &drm_property.flags for more details.
*
* The @enum_blob_ptr and @count_enum_blobs fields are only meaningful when the
* property has the type &DRM_MODE_PROP_ENUM or &DRM_MODE_PROP_BITMASK. For
* backwards compatibility, the kernel will always set @count_enum_blobs to
* zero when the property has the type &DRM_MODE_PROP_BLOB. User-space must
* ignore these two fields if the property has a different type.
*
* User-space is expected to retrieve values and enums by performing this ioctl
* at least twice: the first time to retrieve the number of elements, the
* second time to retrieve the elements themselves.
*
* To retrieve the number of elements, set @count_values and @count_enum_blobs
* to zero, then call the ioctl. @count_values will be updated with the number
* of elements. If the property has the type &DRM_MODE_PROP_ENUM or
* &DRM_MODE_PROP_BITMASK, @count_enum_blobs will be updated as well.
*
* To retrieve the elements themselves, allocate an array for @values_ptr and
* set @count_values to its capacity. If the property has the type
* &DRM_MODE_PROP_ENUM or &DRM_MODE_PROP_BITMASK, allocate an array for
* @enum_blob_ptr and set @count_enum_blobs to its capacity. Calling the ioctl
* again will fill the arrays.
*/
struct drm_mode_get_property {
/** @values_ptr: Pointer to a ``__u64`` array. */
__u64 values_ptr;
/** @enum_blob_ptr: Pointer to a struct drm_mode_property_enum array. */
__u64 enum_blob_ptr;
__u64 values_ptr; /* values and blob lengths */
__u64 enum_blob_ptr; /* enum and blob id ptrs */
/**
* @prop_id: Object ID of the property which should be retrieved. Set
* by the caller.
*/
__u32 prop_id;
/**
* @flags: ``DRM_MODE_PROP_*`` bitfield. See &drm_property.flags for
* a definition of the flags.
*/
__u32 flags;
/**
* @name: Symbolic property name. User-space should use this field to
* recognize properties.
*/
char name[DRM_PROP_NAME_LEN];
/** @count_values: Number of elements in @values_ptr. */
__u32 count_values;
/** @count_enum_blobs: Number of elements in @enum_blob_ptr. */
/* This is only used to count enum values, not blobs. The _blobs is
* simply because of a historical reason, i.e. backwards compat. */
__u32 count_enum_blobs;
};
@ -629,7 +542,6 @@ struct drm_mode_connector_set_property {
#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
#define DRM_MODE_OBJECT_COLOROP 0xfafafafa
#define DRM_MODE_OBJECT_ANY 0
struct drm_mode_obj_get_properties {
@ -665,75 +577,43 @@ struct drm_mode_fb_cmd {
};
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifier[] */
#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
/**
* struct drm_mode_fb_cmd2 - Frame-buffer metadata.
*
* This struct holds frame-buffer metadata. There are two ways to use it:
*
* - User-space can fill this struct and perform a &DRM_IOCTL_MODE_ADDFB2
* ioctl to register a new frame-buffer. The new frame-buffer object ID will
* be set by the kernel in @fb_id.
* - User-space can set @fb_id and perform a &DRM_IOCTL_MODE_GETFB2 ioctl to
* fetch metadata about an existing frame-buffer.
*
* In case of planar formats, this struct allows up to 4 buffer objects with
* offsets and pitches per plane. The pitch and offset order are dictated by
* the format FourCC as defined by ``drm_fourcc.h``, e.g. NV12 is described as:
*
* YUV 4:2:0 image with a plane of 8-bit Y samples followed by an
* interleaved U/V plane containing 8-bit 2x2 subsampled colour difference
* samples.
*
* So it would consist of a Y plane at ``offsets[0]`` and a UV plane at
* ``offsets[1]``.
*
* To accommodate tiled, compressed, etc formats, a modifier can be specified.
* For more information see the "Format Modifiers" section. Note that even
* though it looks like we have a modifier per-plane, we in fact do not. The
* modifier for each plane must be identical. Thus all combinations of
* different data layouts for multi-plane formats must be enumerated as
* separate modifiers.
*
* All of the entries in @handles, @pitches, @offsets and @modifier must be
* zero when unused. Warning, for @offsets and @modifier zero can't be used to
* figure out whether the entry is used or not since it's a valid value (a zero
* offset is common, and a zero modifier is &DRM_FORMAT_MOD_LINEAR).
*/
struct drm_mode_fb_cmd2 {
/** @fb_id: Object ID of the frame-buffer. */
__u32 fb_id;
/** @width: Width of the frame-buffer. */
__u32 width;
/** @height: Height of the frame-buffer. */
__u32 height;
/**
* @pixel_format: FourCC format code, see ``DRM_FORMAT_*`` constants in
* ``drm_fourcc.h``.
*/
__u32 pixel_format;
/**
* @flags: Frame-buffer flags (see &DRM_MODE_FB_INTERLACED and
* &DRM_MODE_FB_MODIFIERS).
*/
__u32 flags;
__u32 pixel_format; /* fourcc code from drm_fourcc.h */
__u32 flags; /* see above flags */
/**
* @handles: GEM buffer handle, one per plane. Set to 0 if the plane is
* unused. The same handle can be used for multiple planes.
/*
* In case of planar formats, this ioctl allows up to 4
* buffer objects with offsets and pitches per plane.
* The pitch and offset order is dictated by the fourcc,
* e.g. NV12 (https://fourcc.org/yuv.php#NV12) is described as:
*
* YUV 4:2:0 image with a plane of 8 bit Y samples
* followed by an interleaved U/V plane containing
* 8 bit 2x2 subsampled colour difference samples.
*
* So it would consist of Y as offsets[0] and UV as
* offsets[1]. Note that offsets[0] will generally
* be 0 (but this is not required).
*
* To accommodate tiled, compressed, etc formats, a
* modifier can be specified. The default value of zero
* indicates "native" format as specified by the fourcc.
* Vendor specific modifier token. Note that even though
* it looks like we have a modifier per-plane, we in fact
* do not. The modifier for each plane must be identical.
* Thus all combinations of different data layouts for
* multi plane formats must be enumerated as separate
* modifiers.
*/
__u32 handles[4];
/** @pitches: Pitch (aka. stride) in bytes, one per plane. */
__u32 pitches[4];
/** @offsets: Offset into the buffer in bytes, one per plane. */
__u32 offsets[4];
/**
* @modifier: Format modifier, one per plane. See ``DRM_FORMAT_MOD_*``
* constants in ``drm_fourcc.h``. All planes must use the same
* modifier. Ignored unless &DRM_MODE_FB_MODIFIERS is set in @flags.
*/
__u64 modifier[4];
__u32 pitches[4]; /* pitch for each plane */
__u32 offsets[4]; /* offset of each plane */
__u64 modifier[4]; /* ie, tiling, compress */
};
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
@ -838,29 +718,10 @@ struct drm_color_ctm {
/*
* Conversion matrix in S31.32 sign-magnitude
* (not two's complement!) format.
*
* out matrix in
* |R| |0 1 2| |R|
* |G| = |3 4 5| x |G|
* |B| |6 7 8| |B|
*/
__u64 matrix[9];
};
struct drm_color_ctm_3x4 {
/*
* Conversion matrix with 3x4 dimensions in S31.32 sign-magnitude
* (not two's complement!) format.
*
* out matrix in
* |R| |0 1 2 3 | | R |
* |G| = |4 5 6 7 | x | G |
* |B| |8 9 10 11| | B |
* |1.0|
*/
__u64 matrix[12];
};
struct drm_color_lut {
/*
* Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and
@ -872,138 +733,6 @@ struct drm_color_lut {
__u16 reserved;
};
/*
* struct drm_color_lut32
*
* 32-bit per channel color LUT entry, similar to drm_color_lut.
*/
struct drm_color_lut32 {
__u32 red;
__u32 green;
__u32 blue;
__u32 reserved;
};
/**
* enum drm_colorop_type - Type of color operation
*
* drm_colorops can be of many different types. Each type behaves differently
* and defines a different set of properties. This enum defines all types and
* gives a high-level description.
*/
enum drm_colorop_type {
/**
* @DRM_COLOROP_1D_CURVE:
*
* enum string "1D Curve"
*
* A 1D curve that is being applied to all color channels. The
* curve is specified via the CURVE_1D_TYPE colorop property.
*/
DRM_COLOROP_1D_CURVE,
/**
* @DRM_COLOROP_1D_LUT:
*
* enum string "1D LUT"
*
* A simple 1D LUT of uniformly spaced &drm_color_lut32 entries,
* packed into a blob via the DATA property. The driver's
* expected LUT size is advertised via the SIZE property.
*
* The DATA blob is an array of struct drm_color_lut32 with size
* of "size".
*/
DRM_COLOROP_1D_LUT,
/**
* @DRM_COLOROP_CTM_3X4:
*
* enum string "3x4 Matrix"
*
* A 3x4 matrix. Its values are specified via the
* &drm_color_ctm_3x4 struct provided via the DATA property.
*
* The DATA blob is a float[12]:
* out matrix in
* | R | | 0 1 2 3 | | R |
* | G | = | 4 5 6 7 | x | G |
* | B | | 8 9 10 12 | | B |
*/
DRM_COLOROP_CTM_3X4,
/**
* @DRM_COLOROP_MULTIPLIER:
*
* enum string "Multiplier"
*
* A simple multiplier, applied to all color values. The
* multiplier is specified as a S31.32 via the MULTIPLIER
* property.
*/
DRM_COLOROP_MULTIPLIER,
/**
* @DRM_COLOROP_3D_LUT:
*
* enum string "3D LUT"
*
* A 3D LUT of &drm_color_lut32 entries,
* packed into a blob via the DATA property. The driver's expected
* LUT size is advertised via the SIZE property, i.e., a 3D LUT with
* 17x17x17 entries will have SIZE set to 17.
*
* The DATA blob is a 3D array of struct drm_color_lut32 with dimension
* length of "size".
* The LUT elements are traversed like so:
*
* for B in range 0..n
* for G in range 0..n
* for R in range 0..n
* index = R + n * (G + n * B)
* color = lut3d[index]
*/
DRM_COLOROP_3D_LUT,
};
/**
* enum drm_colorop_lut3d_interpolation_type - type of 3DLUT interpolation
*/
enum drm_colorop_lut3d_interpolation_type {
/**
* @DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL:
*
* Tetrahedral 3DLUT interpolation
*/
DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL,
};
/**
* enum drm_colorop_lut1d_interpolation_type - type of interpolation for 1D LUTs
*/
enum drm_colorop_lut1d_interpolation_type {
/**
* @DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR:
*
* Linear interpolation. Values between points of the LUT will be
* linearly interpolated.
*/
DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
};
/**
* struct drm_plane_size_hint - Plane size hints
* @width: The width of the plane in pixel
* @height: The height of the plane in pixel
*
* The plane SIZE_HINTS property blob contains an
* array of struct drm_plane_size_hint.
*/
struct drm_plane_size_hint {
__u16 width;
__u16 height;
};
/**
* struct hdr_metadata_infoframe - HDR Metadata Infoframe Data.
*
@ -1028,23 +757,23 @@ struct hdr_metadata_infoframe {
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
* @display_primaries.x: X coordinate of color primary.
* @display_primaries.y: Y coordinate of color primary.
* @display_primaries.x: X cordinate of color primary.
* @display_primaries.y: Y cordinate of color primary.
*/
struct {
__u16 x, y;
} display_primaries[3];
} display_primaries[3];
/**
* @white_point: White Point of Colorspace Data.
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
* @white_point.x: X coordinate of whitepoint of color primary.
* @white_point.y: Y coordinate of whitepoint of color primary.
* @white_point.x: X cordinate of whitepoint of color primary.
* @white_point.y: Y cordinate of whitepoint of color primary.
*/
struct {
__u16 x, y;
} white_point;
} white_point;
/**
* @max_display_mastering_luminance: Max Mastering Display Luminance.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
@ -1090,48 +819,12 @@ struct hdr_output_metadata {
};
};
/**
* DRM_MODE_PAGE_FLIP_EVENT
*
* Request that the kernel sends back a vblank event (see
* struct drm_event_vblank) with the &DRM_EVENT_FLIP_COMPLETE type when the
* page-flip is done.
*
* When used with atomic uAPI, one event will be delivered per CRTC included in
* the atomic commit. A CRTC is included in an atomic commit if one of its
* properties is set, or if a property is set on a connector or plane linked
* via the CRTC_ID property to the CRTC. At least one CRTC must be included,
* and all pulled in CRTCs must be either previously or newly powered on (in
* other words, a powered off CRTC which stays off cannot be included in the
* atomic commit).
*/
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
/**
* DRM_MODE_PAGE_FLIP_ASYNC
*
* Request that the page-flip is performed as soon as possible, ie. with no
* delay due to waiting for vblank. This may cause tearing to be visible on
* the screen.
*
* When used with atomic uAPI, the driver will return an error if the hardware
* doesn't support performing an asynchronous page-flip for this update.
* User-space should handle this, e.g. by falling back to a regular page-flip.
*
* Note, some hardware might need to perform one last synchronous page-flip
* before being able to switch to asynchronous page-flips. As an exception,
* the driver will return success even though that first page-flip is not
* asynchronous.
*/
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
#define DRM_MODE_PAGE_FLIP_TARGET_RELATIVE 0x8
#define DRM_MODE_PAGE_FLIP_TARGET (DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE | \
DRM_MODE_PAGE_FLIP_TARGET_RELATIVE)
/**
* DRM_MODE_PAGE_FLIP_FLAGS
*
* Bitmask of flags suitable for &drm_mode_crtc_page_flip_target.flags.
*/
#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT | \
DRM_MODE_PAGE_FLIP_ASYNC | \
DRM_MODE_PAGE_FLIP_TARGET)
@ -1196,73 +889,13 @@ struct drm_mode_crtc_page_flip_target {
__u64 user_data;
};
/**
* struct drm_mode_create_dumb - Create a KMS dumb buffer for scanout.
* @height: buffer height in pixels
* @width: buffer width in pixels
* @bpp: color mode
* @flags: must be zero
* @handle: buffer object handle
* @pitch: number of bytes between two consecutive lines
* @size: size of the whole buffer in bytes
*
* User-space fills @height, @width, @bpp and @flags. If the IOCTL succeeds,
* the kernel fills @handle, @pitch and @size.
*
* The value of @bpp is a color-mode number describing a specific format
* or a variant thereof. The value often corresponds to the number of bits
* per pixel for most modes, although there are exceptions. Each color mode
* maps to a DRM format plus a number of modes with similar pixel layout.
* Framebuffer layout is always linear.
*
* Support for all modes and formats is optional. Even if dumb-buffer
* creation with a certain color mode succeeds, it is not guaranteed that
* the DRM driver supports any of the related formats. Most drivers support
* a color mode of 32 with a format of DRM_FORMAT_XRGB8888 on their primary
* plane.
*
* +------------+------------------------+------------------------+
* | Color mode | Framebuffer format | Compatible formats |
* +============+========================+========================+
* | 32 | * DRM_FORMAT_XRGB8888 | * DRM_FORMAT_BGRX8888 |
* | | | * DRM_FORMAT_RGBX8888 |
* | | | * DRM_FORMAT_XBGR8888 |
* +------------+------------------------+------------------------+
* | 24 | * DRM_FORMAT_RGB888 | * DRM_FORMAT_BGR888 |
* +------------+------------------------+------------------------+
* | 16 | * DRM_FORMAT_RGB565 | * DRM_FORMAT_BGR565 |
* +------------+------------------------+------------------------+
* | 15 | * DRM_FORMAT_XRGB1555 | * DRM_FORMAT_BGRX1555 |
* | | | * DRM_FORMAT_RGBX1555 |
* | | | * DRM_FORMAT_XBGR1555 |
* +------------+------------------------+------------------------+
* | 8 | * DRM_FORMAT_C8 | * DRM_FORMAT_D8 |
* | | | * DRM_FORMAT_R8 |
* +------------+------------------------+------------------------+
* | 4 | * DRM_FORMAT_C4 | * DRM_FORMAT_D4 |
* | | | * DRM_FORMAT_R4 |
* +------------+------------------------+------------------------+
* | 2 | * DRM_FORMAT_C2 | * DRM_FORMAT_D2 |
* | | | * DRM_FORMAT_R2 |
* +------------+------------------------+------------------------+
* | 1 | * DRM_FORMAT_C1 | * DRM_FORMAT_D1 |
* | | | * DRM_FORMAT_R1 |
* +------------+------------------------+------------------------+
*
* Color modes of 10, 12, 15, 30 and 64 are only supported for use by
* legacy user space. Please don't use them in new code. Other modes
* are not support.
*
* Do not attempt to allocate anything but linear framebuffer memory
* with single-plane RGB data. Allocation of other framebuffer
* layouts requires dedicated ioctls in the respective DRM driver.
*/
/* create a dumb scanout buffer */
struct drm_mode_create_dumb {
__u32 height;
__u32 width;
__u32 bpp;
__u32 flags;
/* handle, pitch, size will be returned */
__u32 handle;
__u32 pitch;
__u64 size;
@ -1285,53 +918,11 @@ struct drm_mode_destroy_dumb {
__u32 handle;
};
/**
* DRM_MODE_ATOMIC_TEST_ONLY
*
* Do not apply the atomic commit, instead check whether the hardware supports
* this configuration.
*
* See &drm_mode_config_funcs.atomic_check for more details on test-only
* commits.
*/
/* page-flip flags are valid, plus: */
#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100
/**
* DRM_MODE_ATOMIC_NONBLOCK
*
* Do not block while applying the atomic commit. The &DRM_IOCTL_MODE_ATOMIC
* IOCTL returns immediately instead of waiting for the changes to be applied
* in hardware. Note, the driver will still check that the update can be
* applied before retuning.
*/
#define DRM_MODE_ATOMIC_NONBLOCK 0x0200
/**
* DRM_MODE_ATOMIC_ALLOW_MODESET
*
* Allow the update to result in temporary or transient visible artifacts while
* the update is being applied. Applying the update may also take significantly
* more time than a page flip. All visual artifacts will disappear by the time
* the update is completed, as signalled through the vblank event's timestamp
* (see struct drm_event_vblank).
*
* This flag must be set when the KMS update might cause visible artifacts.
* Without this flag such KMS update will return a EINVAL error. What kind of
* update may cause visible artifacts depends on the driver and the hardware.
* User-space that needs to know beforehand if an update might cause visible
* artifacts can use &DRM_MODE_ATOMIC_TEST_ONLY without
* &DRM_MODE_ATOMIC_ALLOW_MODESET to see if it fails.
*
* To the best of the driver's knowledge, visual artifacts are guaranteed to
* not appear when this flag is not set. Some sinks might display visual
* artifacts outside of the driver's control.
*/
#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400
/**
* DRM_MODE_ATOMIC_FLAGS
*
* Bitfield of flags accepted by the &DRM_IOCTL_MODE_ATOMIC IOCTL in
* &drm_mode_atomic.flags.
*/
#define DRM_MODE_ATOMIC_FLAGS (\
DRM_MODE_PAGE_FLIP_EVENT |\
DRM_MODE_PAGE_FLIP_ASYNC |\
@ -1435,10 +1026,6 @@ struct drm_mode_destroy_blob {
* struct drm_mode_create_lease - Create lease
*
* Lease mode resources, creating another drm_master.
*
* The @object_ids array must reference at least one CRTC, one connector and
* one plane if &DRM_CLIENT_CAP_UNIVERSAL_PLANES is enabled. Alternatively,
* the lease can be completely empty.
*/
struct drm_mode_create_lease {
/** @object_ids: Pointer to array of object ids (__u32) */
@ -1535,16 +1122,6 @@ struct drm_mode_rect {
__s32 y2;
};
/**
* struct drm_mode_closefb
* @fb_id: Framebuffer ID.
* @pad: Must be zero.
*/
struct drm_mode_closefb {
__u32 fb_id;
__u32 pad;
};
#if defined(__cplusplus)
}
#endif

File diff suppressed because it is too large Load diff

View file

@ -46,16 +46,12 @@ extern "C" {
#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
#define DRM_VIRTGPU_CONTEXT_INIT 0x0b
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
#define VIRTGPU_EXECBUF_RING_IDX 0x04
#define VIRTGPU_EXECBUF_FLAGS (\
VIRTGPU_EXECBUF_FENCE_FD_IN |\
VIRTGPU_EXECBUF_FENCE_FD_OUT |\
VIRTGPU_EXECBUF_RING_IDX |\
0)
struct drm_virtgpu_map {
@ -64,17 +60,6 @@ struct drm_virtgpu_map {
__u32 pad;
};
#define VIRTGPU_EXECBUF_SYNCOBJ_RESET 0x01
#define VIRTGPU_EXECBUF_SYNCOBJ_FLAGS ( \
VIRTGPU_EXECBUF_SYNCOBJ_RESET | \
0)
struct drm_virtgpu_execbuffer_syncobj {
__u32 handle;
__u32 flags;
__u64 point;
};
/* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */
struct drm_virtgpu_execbuffer {
__u32 flags;
__u32 size;
@ -82,22 +67,10 @@ struct drm_virtgpu_execbuffer {
__u64 bo_handles;
__u32 num_bo_handles;
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
__u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
__u32 syncobj_stride; /* size of @drm_virtgpu_execbuffer_syncobj */
__u32 num_in_syncobjs;
__u32 num_out_syncobjs;
__u64 in_syncobjs;
__u64 out_syncobjs;
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
#define VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME 8 /* Ability to set debug name from userspace */
struct drm_virtgpu_getparam {
__u64 param;
@ -127,7 +100,7 @@ struct drm_virtgpu_resource_info {
__u32 bo_handle;
__u32 res_handle;
__u32 size;
__u32 blob_mem;
__u32 stride;
};
struct drm_virtgpu_3d_box {
@ -144,8 +117,6 @@ struct drm_virtgpu_3d_transfer_to_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
__u32 stride;
__u32 layer_stride;
};
struct drm_virtgpu_3d_transfer_from_host {
@ -153,8 +124,6 @@ struct drm_virtgpu_3d_transfer_from_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
__u32 stride;
__u32 layer_stride;
};
#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
@ -163,12 +132,6 @@ struct drm_virtgpu_3d_wait {
__u32 flags;
};
#define VIRTGPU_DRM_CAPSET_VIRGL 1
#define VIRTGPU_DRM_CAPSET_VIRGL2 2
#define VIRTGPU_DRM_CAPSET_GFXSTREAM_VULKAN 3
#define VIRTGPU_DRM_CAPSET_VENUS 4
#define VIRTGPU_DRM_CAPSET_CROSS_DOMAIN 5
#define VIRTGPU_DRM_CAPSET_DRM 6
struct drm_virtgpu_get_caps {
__u32 cap_set_id;
__u32 cap_set_ver;
@ -177,55 +140,6 @@ struct drm_virtgpu_get_caps {
__u32 pad;
};
struct drm_virtgpu_resource_create_blob {
#define VIRTGPU_BLOB_MEM_GUEST 0x0001
#define VIRTGPU_BLOB_MEM_HOST3D 0x0002
#define VIRTGPU_BLOB_MEM_HOST3D_GUEST 0x0003
#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001
#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002
#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
/* zero is invalid blob_mem */
__u32 blob_mem;
__u32 blob_flags;
__u32 bo_handle;
__u32 res_handle;
__u64 size;
/*
* for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
* VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
*/
__u32 pad;
__u32 cmd_size;
__u64 cmd;
__u64 blob_id;
};
#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
#define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002
#define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
#define VIRTGPU_CONTEXT_PARAM_DEBUG_NAME 0x0004
struct drm_virtgpu_context_set_param {
__u64 param;
__u64 value;
};
struct drm_virtgpu_context_init {
__u32 num_params;
__u32 pad;
/* pointer to drm_virtgpu_context_set_param array */
__u64 ctx_set_params;
};
/*
* Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in
* effect. The event size is sizeof(drm_event), since there is no additional
* payload.
*/
#define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
@ -261,14 +175,6 @@ struct drm_virtgpu_context_init {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
struct drm_virtgpu_get_caps)
#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
struct drm_virtgpu_resource_create_blob)
#define DRM_IOCTL_VIRTGPU_CONTEXT_INIT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT, \
struct drm_virtgpu_context_init)
#if defined(__cplusplus)
}
#endif

View file

@ -1,36 +0,0 @@
//
// Copyright © 2011 Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice (including the next
// paragraph) shall be included in all copies or substantial portions of the
// Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
build = ["Android.sources.bp"]
cc_library_shared {
name: "libdrm_intel",
defaults: [
"libdrm_defaults",
"libdrm_intel_sources",
],
vendor: true,
// Removed dependency to libpciaccess: not used on Android
shared_libs: ["libdrm"],
}

38
intel/Android.mk Normal file
View file

@ -0,0 +1,38 @@
#
# Copyright © 2011 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
# Import variables LIBDRM_INTEL_FILES, LIBDRM_INTEL_H_FILES
include $(LOCAL_PATH)/Makefile.sources
LOCAL_MODULE := libdrm_intel
LOCAL_SRC_FILES := $(LIBDRM_INTEL_FILES)
LOCAL_SHARED_LIBRARIES := \
libdrm
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)

View file

@ -1,12 +0,0 @@
// Autogenerated with Android.sources.bp.mk
cc_defaults {
name: "libdrm_intel_sources",
srcs: [
"intel_bufmgr.c",
"intel_bufmgr_fake.c",
"intel_bufmgr_gem.c",
"intel_decode.c",
"mm.c",
],
}

17
intel/Makefile.sources Normal file
View file

@ -0,0 +1,17 @@
LIBDRM_INTEL_FILES := \
i915_pciids.h \
intel_bufmgr.c \
intel_bufmgr_priv.h \
intel_bufmgr_fake.c \
intel_bufmgr_gem.c \
intel_decode.c \
intel_chipset.h \
intel_chipset.c \
mm.c \
mm.h \
uthash.h
LIBDRM_INTEL_H_FILES := \
intel_bufmgr.h \
intel_aub.h \
intel_debug.h

693
intel/i915_pciids.h Normal file
View file

@ -0,0 +1,693 @@
/*
* Copyright 2013 Intel Corporation
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _I915_PCIIDS_H
#define _I915_PCIIDS_H
/*
* A pci_device_id struct {
* __u32 vendor, device;
* __u32 subvendor, subdevice;
* __u32 class, class_mask;
* kernel_ulong_t driver_data;
* };
* Don't use C99 here because "class" is reserved and we want to
* give userspace flexibility.
*/
#define INTEL_VGA_DEVICE(id, info) { \
0x8086, id, \
~0, ~0, \
0x030000, 0xff0000, \
(unsigned long) info }
#define INTEL_QUANTA_VGA_DEVICE(info) { \
0x8086, 0x16a, \
0x152d, 0x8990, \
0x030000, 0xff0000, \
(unsigned long) info }
#define INTEL_I810_IDS(info) \
INTEL_VGA_DEVICE(0x7121, info), /* I810 */ \
INTEL_VGA_DEVICE(0x7123, info), /* I810_DC100 */ \
INTEL_VGA_DEVICE(0x7125, info) /* I810_E */
#define INTEL_I815_IDS(info) \
INTEL_VGA_DEVICE(0x1132, info) /* I815*/
#define INTEL_I830_IDS(info) \
INTEL_VGA_DEVICE(0x3577, info)
#define INTEL_I845G_IDS(info) \
INTEL_VGA_DEVICE(0x2562, info)
#define INTEL_I85X_IDS(info) \
INTEL_VGA_DEVICE(0x3582, info), /* I855_GM */ \
INTEL_VGA_DEVICE(0x358e, info)
#define INTEL_I865G_IDS(info) \
INTEL_VGA_DEVICE(0x2572, info) /* I865_G */
#define INTEL_I915G_IDS(info) \
INTEL_VGA_DEVICE(0x2582, info), /* I915_G */ \
INTEL_VGA_DEVICE(0x258a, info) /* E7221_G */
#define INTEL_I915GM_IDS(info) \
INTEL_VGA_DEVICE(0x2592, info) /* I915_GM */
#define INTEL_I945G_IDS(info) \
INTEL_VGA_DEVICE(0x2772, info) /* I945_G */
#define INTEL_I945GM_IDS(info) \
INTEL_VGA_DEVICE(0x27a2, info), /* I945_GM */ \
INTEL_VGA_DEVICE(0x27ae, info) /* I945_GME */
#define INTEL_I965G_IDS(info) \
INTEL_VGA_DEVICE(0x2972, info), /* I946_GZ */ \
INTEL_VGA_DEVICE(0x2982, info), /* G35_G */ \
INTEL_VGA_DEVICE(0x2992, info), /* I965_Q */ \
INTEL_VGA_DEVICE(0x29a2, info) /* I965_G */
#define INTEL_G33_IDS(info) \
INTEL_VGA_DEVICE(0x29b2, info), /* Q35_G */ \
INTEL_VGA_DEVICE(0x29c2, info), /* G33_G */ \
INTEL_VGA_DEVICE(0x29d2, info) /* Q33_G */
#define INTEL_I965GM_IDS(info) \
INTEL_VGA_DEVICE(0x2a02, info), /* I965_GM */ \
INTEL_VGA_DEVICE(0x2a12, info) /* I965_GME */
#define INTEL_GM45_IDS(info) \
INTEL_VGA_DEVICE(0x2a42, info) /* GM45_G */
#define INTEL_G45_IDS(info) \
INTEL_VGA_DEVICE(0x2e02, info), /* IGD_E_G */ \
INTEL_VGA_DEVICE(0x2e12, info), /* Q45_G */ \
INTEL_VGA_DEVICE(0x2e22, info), /* G45_G */ \
INTEL_VGA_DEVICE(0x2e32, info), /* G41_G */ \
INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \
INTEL_VGA_DEVICE(0x2e92, info) /* B43_G.1 */
#define INTEL_PINEVIEW_G_IDS(info) \
INTEL_VGA_DEVICE(0xa001, info)
#define INTEL_PINEVIEW_M_IDS(info) \
INTEL_VGA_DEVICE(0xa011, info)
#define INTEL_IRONLAKE_D_IDS(info) \
INTEL_VGA_DEVICE(0x0042, info)
#define INTEL_IRONLAKE_M_IDS(info) \
INTEL_VGA_DEVICE(0x0046, info)
#define INTEL_SNB_D_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0102, info), \
INTEL_VGA_DEVICE(0x010A, info)
#define INTEL_SNB_D_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0112, info), \
INTEL_VGA_DEVICE(0x0122, info)
#define INTEL_SNB_D_IDS(info) \
INTEL_SNB_D_GT1_IDS(info), \
INTEL_SNB_D_GT2_IDS(info)
#define INTEL_SNB_M_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0106, info)
#define INTEL_SNB_M_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0116, info), \
INTEL_VGA_DEVICE(0x0126, info)
#define INTEL_SNB_M_IDS(info) \
INTEL_SNB_M_GT1_IDS(info), \
INTEL_SNB_M_GT2_IDS(info)
#define INTEL_IVB_M_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0156, info) /* GT1 mobile */
#define INTEL_IVB_M_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */
#define INTEL_IVB_M_IDS(info) \
INTEL_IVB_M_GT1_IDS(info), \
INTEL_IVB_M_GT2_IDS(info)
#define INTEL_IVB_D_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \
INTEL_VGA_DEVICE(0x015a, info) /* GT1 server */
#define INTEL_IVB_D_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \
INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */
#define INTEL_IVB_D_IDS(info) \
INTEL_IVB_D_GT1_IDS(info), \
INTEL_IVB_D_GT2_IDS(info)
#define INTEL_IVB_Q_IDS(info) \
INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
#define INTEL_HSW_ULT_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
INTEL_VGA_DEVICE(0x0A0B, info) /* ULT GT1 reserved */
#define INTEL_HSW_ULX_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0A0E, info) /* ULX GT1 mobile */
#define INTEL_HSW_GT1_IDS(info) \
INTEL_HSW_ULT_GT1_IDS(info), \
INTEL_HSW_ULX_GT1_IDS(info), \
INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
INTEL_VGA_DEVICE(0x040A, info), /* GT1 server */ \
INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \
INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \
INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
INTEL_VGA_DEVICE(0x0D0E, info) /* CRW GT1 reserved */
#define INTEL_HSW_ULT_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
INTEL_VGA_DEVICE(0x0A1B, info) /* ULT GT2 reserved */ \
#define INTEL_HSW_ULX_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0A1E, info) /* ULX GT2 mobile */ \
#define INTEL_HSW_GT2_IDS(info) \
INTEL_HSW_ULT_GT2_IDS(info), \
INTEL_HSW_ULX_GT2_IDS(info), \
INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
INTEL_VGA_DEVICE(0x041A, info), /* GT2 server */ \
INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
INTEL_VGA_DEVICE(0x0D1E, info) /* CRW GT2 reserved */
#define INTEL_HSW_ULT_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
INTEL_VGA_DEVICE(0x0A2E, info) /* ULT GT3 reserved */
#define INTEL_HSW_GT3_IDS(info) \
INTEL_HSW_ULT_GT3_IDS(info), \
INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
INTEL_VGA_DEVICE(0x0426, info), /* GT3 mobile */ \
INTEL_VGA_DEVICE(0x042A, info), /* GT3 server */ \
INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
INTEL_VGA_DEVICE(0x0D26, info), /* CRW GT3 mobile */ \
INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D2E, info) /* CRW GT3 reserved */
#define INTEL_HSW_IDS(info) \
INTEL_HSW_GT1_IDS(info), \
INTEL_HSW_GT2_IDS(info), \
INTEL_HSW_GT3_IDS(info)
#define INTEL_VLV_IDS(info) \
INTEL_VGA_DEVICE(0x0f30, info), \
INTEL_VGA_DEVICE(0x0f31, info), \
INTEL_VGA_DEVICE(0x0f32, info), \
INTEL_VGA_DEVICE(0x0f33, info)
#define INTEL_BDW_ULT_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
INTEL_VGA_DEVICE(0x160B, info) /* GT1 Iris */
#define INTEL_BDW_ULX_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x160E, info) /* GT1 ULX */
#define INTEL_BDW_GT1_IDS(info) \
INTEL_BDW_ULT_GT1_IDS(info), \
INTEL_BDW_ULX_GT1_IDS(info), \
INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
INTEL_VGA_DEVICE(0x160D, info) /* GT1 Workstation */
#define INTEL_BDW_ULT_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
INTEL_VGA_DEVICE(0x161B, info) /* GT2 ULT */
#define INTEL_BDW_ULX_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */
#define INTEL_BDW_GT2_IDS(info) \
INTEL_BDW_ULT_GT2_IDS(info), \
INTEL_BDW_ULX_GT2_IDS(info), \
INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */
#define INTEL_BDW_ULT_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \
INTEL_VGA_DEVICE(0x162B, info) /* Iris */ \
#define INTEL_BDW_ULX_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x162E, info) /* ULX */
#define INTEL_BDW_GT3_IDS(info) \
INTEL_BDW_ULT_GT3_IDS(info), \
INTEL_BDW_ULX_GT3_IDS(info), \
INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \
INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
INTEL_VGA_DEVICE(0x162D, info) /* Workstation */
#define INTEL_BDW_ULT_RSVD_IDS(info) \
INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \
INTEL_VGA_DEVICE(0x163B, info) /* Iris */
#define INTEL_BDW_ULX_RSVD_IDS(info) \
INTEL_VGA_DEVICE(0x163E, info) /* ULX */
#define INTEL_BDW_RSVD_IDS(info) \
INTEL_BDW_ULT_RSVD_IDS(info), \
INTEL_BDW_ULX_RSVD_IDS(info), \
INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \
INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
#define INTEL_BDW_IDS(info) \
INTEL_BDW_GT1_IDS(info), \
INTEL_BDW_GT2_IDS(info), \
INTEL_BDW_GT3_IDS(info), \
INTEL_BDW_RSVD_IDS(info)
#define INTEL_CHV_IDS(info) \
INTEL_VGA_DEVICE(0x22b0, info), \
INTEL_VGA_DEVICE(0x22b1, info), \
INTEL_VGA_DEVICE(0x22b2, info), \
INTEL_VGA_DEVICE(0x22b3, info)
#define INTEL_SKL_ULT_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
INTEL_VGA_DEVICE(0x1913, info) /* ULT GT1.5 */
#define INTEL_SKL_ULX_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
INTEL_VGA_DEVICE(0x1915, info) /* ULX GT1.5 */
#define INTEL_SKL_GT1_IDS(info) \
INTEL_SKL_ULT_GT1_IDS(info), \
INTEL_SKL_ULX_GT1_IDS(info), \
INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x1917, info) /* DT GT1.5 */
#define INTEL_SKL_ULT_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
INTEL_VGA_DEVICE(0x1921, info) /* ULT GT2F */
#define INTEL_SKL_ULX_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x191E, info) /* ULX GT2 */
#define INTEL_SKL_GT2_IDS(info) \
INTEL_SKL_ULT_GT2_IDS(info), \
INTEL_SKL_ULX_GT2_IDS(info), \
INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
#define INTEL_SKL_ULT_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3e */ \
INTEL_VGA_DEVICE(0x1927, info) /* ULT GT3e */
#define INTEL_SKL_GT3_IDS(info) \
INTEL_SKL_ULT_GT3_IDS(info), \
INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3e */ \
INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3e */
#define INTEL_SKL_GT4_IDS(info) \
INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \
INTEL_VGA_DEVICE(0x193A, info), /* SRV GT4e */ \
INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4e */ \
INTEL_VGA_DEVICE(0x193D, info) /* WKS GT4e */
#define INTEL_SKL_IDS(info) \
INTEL_SKL_GT1_IDS(info), \
INTEL_SKL_GT2_IDS(info), \
INTEL_SKL_GT3_IDS(info), \
INTEL_SKL_GT4_IDS(info)
#define INTEL_BXT_IDS(info) \
INTEL_VGA_DEVICE(0x0A84, info), \
INTEL_VGA_DEVICE(0x1A84, info), \
INTEL_VGA_DEVICE(0x1A85, info), \
INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \
INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */
#define INTEL_GLK_IDS(info) \
INTEL_VGA_DEVICE(0x3184, info), \
INTEL_VGA_DEVICE(0x3185, info)
#define INTEL_KBL_ULT_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
INTEL_VGA_DEVICE(0x5913, info) /* ULT GT1.5 */
#define INTEL_KBL_ULX_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
INTEL_VGA_DEVICE(0x5915, info) /* ULX GT1.5 */
#define INTEL_KBL_GT1_IDS(info) \
INTEL_KBL_ULT_GT1_IDS(info), \
INTEL_KBL_ULX_GT1_IDS(info), \
INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x590A, info), /* SRV GT1 */ \
INTEL_VGA_DEVICE(0x590B, info) /* Halo GT1 */
#define INTEL_KBL_ULT_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
INTEL_VGA_DEVICE(0x5921, info) /* ULT GT2F */
#define INTEL_KBL_ULX_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x591E, info) /* ULX GT2 */
#define INTEL_KBL_GT2_IDS(info) \
INTEL_KBL_ULT_GT2_IDS(info), \
INTEL_KBL_ULX_GT2_IDS(info), \
INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
#define INTEL_KBL_ULT_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x5926, info) /* ULT GT3 */
#define INTEL_KBL_GT3_IDS(info) \
INTEL_KBL_ULT_GT3_IDS(info), \
INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */
#define INTEL_KBL_GT4_IDS(info) \
INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
/* AML/KBL Y GT2 */
#define INTEL_AML_KBL_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */
/* AML/CFL Y GT2 */
#define INTEL_AML_CFL_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x87CA, info)
/* CML GT1 */
#define INTEL_CML_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x9BA2, info), \
INTEL_VGA_DEVICE(0x9BA4, info), \
INTEL_VGA_DEVICE(0x9BA5, info), \
INTEL_VGA_DEVICE(0x9BA8, info)
#define INTEL_CML_U_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x9B21, info), \
INTEL_VGA_DEVICE(0x9BAA, info), \
INTEL_VGA_DEVICE(0x9BAC, info)
/* CML GT2 */
#define INTEL_CML_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x9BC2, info), \
INTEL_VGA_DEVICE(0x9BC4, info), \
INTEL_VGA_DEVICE(0x9BC5, info), \
INTEL_VGA_DEVICE(0x9BC6, info), \
INTEL_VGA_DEVICE(0x9BC8, info), \
INTEL_VGA_DEVICE(0x9BE6, info), \
INTEL_VGA_DEVICE(0x9BF6, info)
#define INTEL_CML_U_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x9B41, info), \
INTEL_VGA_DEVICE(0x9BCA, info), \
INTEL_VGA_DEVICE(0x9BCC, info)
#define INTEL_KBL_IDS(info) \
INTEL_KBL_GT1_IDS(info), \
INTEL_KBL_GT2_IDS(info), \
INTEL_KBL_GT3_IDS(info), \
INTEL_KBL_GT4_IDS(info), \
INTEL_AML_KBL_GT2_IDS(info)
/* CFL S */
#define INTEL_CFL_S_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \
INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \
INTEL_VGA_DEVICE(0x3E99, info) /* SRV GT1 */
#define INTEL_CFL_S_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */
/* CFL H */
#define INTEL_CFL_H_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x3E9C, info)
#define INTEL_CFL_H_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x3E94, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x3E9B, info) /* Halo GT2 */
/* CFL U GT2 */
#define INTEL_CFL_U_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x3EA9, info)
/* CFL U GT3 */
#define INTEL_CFL_U_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */
/* WHL/CFL U GT1 */
#define INTEL_WHL_U_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x3EA1, info), \
INTEL_VGA_DEVICE(0x3EA4, info)
/* WHL/CFL U GT2 */
#define INTEL_WHL_U_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x3EA0, info), \
INTEL_VGA_DEVICE(0x3EA3, info)
/* WHL/CFL U GT3 */
#define INTEL_WHL_U_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x3EA2, info)
#define INTEL_CFL_IDS(info) \
INTEL_CFL_S_GT1_IDS(info), \
INTEL_CFL_S_GT2_IDS(info), \
INTEL_CFL_H_GT1_IDS(info), \
INTEL_CFL_H_GT2_IDS(info), \
INTEL_CFL_U_GT2_IDS(info), \
INTEL_CFL_U_GT3_IDS(info), \
INTEL_WHL_U_GT1_IDS(info), \
INTEL_WHL_U_GT2_IDS(info), \
INTEL_WHL_U_GT3_IDS(info), \
INTEL_AML_CFL_GT2_IDS(info), \
INTEL_CML_GT1_IDS(info), \
INTEL_CML_GT2_IDS(info), \
INTEL_CML_U_GT1_IDS(info), \
INTEL_CML_U_GT2_IDS(info)
/* CNL */
#define INTEL_CNL_PORT_F_IDS(info) \
INTEL_VGA_DEVICE(0x5A44, info), \
INTEL_VGA_DEVICE(0x5A4C, info), \
INTEL_VGA_DEVICE(0x5A54, info), \
INTEL_VGA_DEVICE(0x5A5C, info)
#define INTEL_CNL_IDS(info) \
INTEL_CNL_PORT_F_IDS(info), \
INTEL_VGA_DEVICE(0x5A40, info), \
INTEL_VGA_DEVICE(0x5A41, info), \
INTEL_VGA_DEVICE(0x5A42, info), \
INTEL_VGA_DEVICE(0x5A49, info), \
INTEL_VGA_DEVICE(0x5A4A, info), \
INTEL_VGA_DEVICE(0x5A50, info), \
INTEL_VGA_DEVICE(0x5A51, info), \
INTEL_VGA_DEVICE(0x5A52, info), \
INTEL_VGA_DEVICE(0x5A59, info), \
INTEL_VGA_DEVICE(0x5A5A, info)
/* ICL */
#define INTEL_ICL_PORT_F_IDS(info) \
INTEL_VGA_DEVICE(0x8A50, info), \
INTEL_VGA_DEVICE(0x8A52, info), \
INTEL_VGA_DEVICE(0x8A53, info), \
INTEL_VGA_DEVICE(0x8A54, info), \
INTEL_VGA_DEVICE(0x8A56, info), \
INTEL_VGA_DEVICE(0x8A57, info), \
INTEL_VGA_DEVICE(0x8A58, info), \
INTEL_VGA_DEVICE(0x8A59, info), \
INTEL_VGA_DEVICE(0x8A5A, info), \
INTEL_VGA_DEVICE(0x8A5B, info), \
INTEL_VGA_DEVICE(0x8A5C, info), \
INTEL_VGA_DEVICE(0x8A70, info), \
INTEL_VGA_DEVICE(0x8A71, info)
#define INTEL_ICL_11_IDS(info) \
INTEL_ICL_PORT_F_IDS(info), \
INTEL_VGA_DEVICE(0x8A51, info), \
INTEL_VGA_DEVICE(0x8A5D, info)
/* EHL */
#define INTEL_EHL_IDS(info) \
INTEL_VGA_DEVICE(0x4541, info), \
INTEL_VGA_DEVICE(0x4551, info), \
INTEL_VGA_DEVICE(0x4555, info), \
INTEL_VGA_DEVICE(0x4557, info), \
INTEL_VGA_DEVICE(0x4571, info)
/* JSL */
#define INTEL_JSL_IDS(info) \
INTEL_VGA_DEVICE(0x4E51, info), \
INTEL_VGA_DEVICE(0x4E55, info), \
INTEL_VGA_DEVICE(0x4E57, info), \
INTEL_VGA_DEVICE(0x4E61, info), \
INTEL_VGA_DEVICE(0x4E71, info)
/* TGL */
#define INTEL_TGL_12_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x9A60, info), \
INTEL_VGA_DEVICE(0x9A68, info), \
INTEL_VGA_DEVICE(0x9A70, info)
#define INTEL_TGL_12_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x9A40, info), \
INTEL_VGA_DEVICE(0x9A49, info), \
INTEL_VGA_DEVICE(0x9A59, info), \
INTEL_VGA_DEVICE(0x9A78, info), \
INTEL_VGA_DEVICE(0x9AC0, info), \
INTEL_VGA_DEVICE(0x9AC9, info), \
INTEL_VGA_DEVICE(0x9AD9, info), \
INTEL_VGA_DEVICE(0x9AF8, info)
#define INTEL_TGL_12_IDS(info) \
INTEL_TGL_12_GT1_IDS(info), \
INTEL_TGL_12_GT2_IDS(info)
/* RKL */
#define INTEL_RKL_IDS(info) \
INTEL_VGA_DEVICE(0x4C80, info), \
INTEL_VGA_DEVICE(0x4C8A, info), \
INTEL_VGA_DEVICE(0x4C8B, info), \
INTEL_VGA_DEVICE(0x4C8C, info), \
INTEL_VGA_DEVICE(0x4C90, info), \
INTEL_VGA_DEVICE(0x4C9A, info)
/* DG1 */
#define INTEL_DG1_IDS(info) \
INTEL_VGA_DEVICE(0x4905, info), \
INTEL_VGA_DEVICE(0x4906, info), \
INTEL_VGA_DEVICE(0x4907, info), \
INTEL_VGA_DEVICE(0x4908, info), \
INTEL_VGA_DEVICE(0x4909, info)
/* ADL-S */
#define INTEL_ADLS_IDS(info) \
INTEL_VGA_DEVICE(0x4680, info), \
INTEL_VGA_DEVICE(0x4682, info), \
INTEL_VGA_DEVICE(0x4688, info), \
INTEL_VGA_DEVICE(0x468A, info), \
INTEL_VGA_DEVICE(0x4690, info), \
INTEL_VGA_DEVICE(0x4692, info), \
INTEL_VGA_DEVICE(0x4693, info)
/* ADL-P */
#define INTEL_ADLP_IDS(info) \
INTEL_VGA_DEVICE(0x46A0, info), \
INTEL_VGA_DEVICE(0x46A1, info), \
INTEL_VGA_DEVICE(0x46A2, info), \
INTEL_VGA_DEVICE(0x46A3, info), \
INTEL_VGA_DEVICE(0x46A6, info), \
INTEL_VGA_DEVICE(0x46A8, info), \
INTEL_VGA_DEVICE(0x46AA, info), \
INTEL_VGA_DEVICE(0x462A, info), \
INTEL_VGA_DEVICE(0x4626, info), \
INTEL_VGA_DEVICE(0x4628, info), \
INTEL_VGA_DEVICE(0x46B0, info), \
INTEL_VGA_DEVICE(0x46B1, info), \
INTEL_VGA_DEVICE(0x46B2, info), \
INTEL_VGA_DEVICE(0x46B3, info), \
INTEL_VGA_DEVICE(0x46C0, info), \
INTEL_VGA_DEVICE(0x46C1, info), \
INTEL_VGA_DEVICE(0x46C2, info), \
INTEL_VGA_DEVICE(0x46C3, info)
/* RPL-P */
#define INTEL_RPLP_IDS(info) \
INTEL_VGA_DEVICE(0xA720, info), \
INTEL_VGA_DEVICE(0xA721, info), \
INTEL_VGA_DEVICE(0xA7A0, info), \
INTEL_VGA_DEVICE(0xA7A1, info), \
INTEL_VGA_DEVICE(0xA7A8, info), \
INTEL_VGA_DEVICE(0xA7A9, info)
/* ADL-N */
#define INTEL_ADLN_IDS(info) \
INTEL_VGA_DEVICE(0x46D0, info), \
INTEL_VGA_DEVICE(0x46D1, info), \
INTEL_VGA_DEVICE(0x46D2, info)
/* RPL-S */
#define INTEL_RPLS_IDS(info) \
INTEL_VGA_DEVICE(0xA780, info), \
INTEL_VGA_DEVICE(0xA781, info), \
INTEL_VGA_DEVICE(0xA782, info), \
INTEL_VGA_DEVICE(0xA783, info), \
INTEL_VGA_DEVICE(0xA788, info), \
INTEL_VGA_DEVICE(0xA789, info)
#endif /* _I915_PCIIDS_H */

View file

@ -1,7 +1,7 @@
/**************************************************************************
*
* Copyright © 2007 Red Hat Inc.
* Copyright © 2007-2012 Intel Corporation
* Copyright © 2007 Red Hat Inc.
* Copyright © 2007-2012 Intel Corporation
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
@ -28,7 +28,7 @@
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
* Keith Whitwell <keithw-at-tungstengraphics-dot-com>
* Eric Anholt <eric@anholt.net>
* Dave Airlie <airlied@linux.ie>
@ -1379,26 +1379,25 @@ static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
drm_intel_bufmgr_gem *bufmgr_gem;
struct timespec time;
assert(atomic_read(&bo_gem->refcount) > 0);
if (atomic_add_unless(&bo_gem->refcount, -1, 1))
return;
if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
drm_intel_bufmgr_gem *bufmgr_gem =
(drm_intel_bufmgr_gem *) bo->bufmgr;
struct timespec time;
bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
clock_gettime(CLOCK_MONOTONIC, &time);
clock_gettime(CLOCK_MONOTONIC, &time);
pthread_mutex_lock(&bufmgr_gem->lock);
pthread_mutex_lock(&bufmgr_gem->lock);
if (atomic_dec_and_test(&bo_gem->refcount)) {
drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
}
if (atomic_dec_and_test(&bo_gem->refcount)) {
drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
pthread_mutex_unlock(&bufmgr_gem->lock);
}
pthread_mutex_unlock(&bufmgr_gem->lock);
}
static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
@ -3378,17 +3377,16 @@ drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1))
return;
if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
pthread_mutex_lock(&bufmgr_list_mutex);
pthread_mutex_lock(&bufmgr_list_mutex);
if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
DRMLISTDEL(&bufmgr_gem->managers);
drm_intel_bufmgr_gem_destroy(bufmgr);
}
if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
DRMLISTDEL(&bufmgr_gem->managers);
drm_intel_bufmgr_gem_destroy(bufmgr);
pthread_mutex_unlock(&bufmgr_list_mutex);
}
pthread_mutex_unlock(&bufmgr_list_mutex);
}
drm_public void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
@ -3590,9 +3588,13 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->gen = 6;
else if (IS_GEN7(bufmgr_gem->pci_device))
bufmgr_gem->gen = 7;
else
/* Treat all further unmatched platforms the same as gen8 */
else if (IS_GEN8(bufmgr_gem->pci_device))
bufmgr_gem->gen = 8;
else if (!intel_get_genx(bufmgr_gem->pci_device, &bufmgr_gem->gen)) {
free(bufmgr_gem);
bufmgr_gem = NULL;
goto exit;
}
if (IS_GEN3(bufmgr_gem->pci_device) &&
bufmgr_gem->gtt_size > 256*1024*1024) {

95
intel/intel_chipset.c Normal file
View file

@ -0,0 +1,95 @@
/*
* Copyright (C) 2018 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "intel_chipset.h"
#include <inttypes.h>
#include <stdbool.h>
#include "i915_pciids.h"
#undef INTEL_VGA_DEVICE
#define INTEL_VGA_DEVICE(id, gen) { id, gen }
static const struct pci_device {
uint16_t device;
uint16_t gen;
} pciids[] = {
/* Keep ids sorted by gen; latest gen first */
INTEL_ADLN_IDS(12),
INTEL_RPLP_IDS(12),
INTEL_ADLP_IDS(12),
INTEL_RPLS_IDS(12),
INTEL_ADLS_IDS(12),
INTEL_RKL_IDS(12),
INTEL_DG1_IDS(12),
INTEL_TGL_12_IDS(12),
INTEL_JSL_IDS(11),
INTEL_EHL_IDS(11),
INTEL_ICL_11_IDS(11),
INTEL_CNL_IDS(10),
INTEL_CFL_IDS(9),
INTEL_GLK_IDS(9),
INTEL_KBL_IDS(9),
INTEL_BXT_IDS(9),
INTEL_SKL_IDS(9),
};
drm_private bool intel_is_genx(unsigned int devid, int gen)
{
const struct pci_device *p,
*pend = pciids + sizeof(pciids) / sizeof(pciids[0]);
for (p = pciids; p < pend; p++) {
/* PCI IDs are sorted */
if (p->gen < gen)
break;
if (p->device != devid)
continue;
if (gen == p->gen)
return true;
break;
}
return false;
}
drm_private bool intel_get_genx(unsigned int devid, int *gen)
{
const struct pci_device *p,
*pend = pciids + sizeof(pciids) / sizeof(pciids[0]);
for (p = pciids; p < pend; p++) {
if (p->device != devid)
continue;
if (gen)
*gen = p->gen;
return true;
}
return false;
}

View file

@ -331,6 +331,20 @@
#include <stdbool.h>
#include <libdrm_macros.h>
#define IS_9XX(dev) (!IS_GEN2(dev))
drm_private bool intel_is_genx(unsigned int devid, int gen);
drm_private bool intel_get_genx(unsigned int devid, int *gen);
#define IS_GEN9(devid) intel_is_genx(devid, 9)
#define IS_GEN10(devid) intel_is_genx(devid, 10)
#define IS_GEN11(devid) intel_is_genx(devid, 11)
#define IS_GEN12(devid) intel_is_genx(devid, 12)
#define IS_9XX(dev) (IS_GEN3(dev) || \
IS_GEN4(dev) || \
IS_GEN5(dev) || \
IS_GEN6(dev) || \
IS_GEN7(dev) || \
IS_GEN8(dev) || \
intel_get_genx(dev, NULL))
#endif /* _INTEL_CHIPSET_H */

View file

@ -3817,7 +3817,9 @@ drm_intel_decode_context_alloc(uint32_t devid)
struct drm_intel_decode *ctx;
int gen = 0;
if (IS_GEN8(devid))
if (intel_get_genx(devid, &gen))
;
else if (IS_GEN8(devid))
gen = 8;
else if (IS_GEN7(devid))
gen = 7;
@ -3827,13 +3829,10 @@ drm_intel_decode_context_alloc(uint32_t devid)
gen = 5;
else if (IS_GEN4(devid))
gen = 4;
else if (IS_GEN3(devid))
else if (IS_9XX(devid))
gen = 3;
else if (IS_GEN2(devid))
gen = 2;
else
/* Just assume future unknown platforms behave as gen8. */
gen = 8;
if (!gen)
return NULL;

View file

@ -23,16 +23,16 @@ libdrm_intel = library(
[
files(
'intel_bufmgr.c', 'intel_bufmgr_fake.c', 'intel_bufmgr_gem.c',
'intel_decode.c', 'mm.c',
'intel_decode.c', 'mm.c', 'intel_chipset.c',
),
config_file,
],
include_directories : [inc_root, inc_drm],
link_with : libdrm,
dependencies : [dep_pciaccess, dep_threads, dep_rt, dep_valgrind, dep_atomic_ops],
dependencies : [dep_pciaccess, dep_pthread_stubs, dep_rt, dep_valgrind, dep_atomic_ops],
c_args : libdrm_c_args,
gnu_symbol_visibility : 'hidden',
version : '1.@0@.0'.format(patch_ver),
version : '1.0.0',
install : true,
)
@ -41,7 +41,9 @@ ext_libdrm_intel = declare_dependency(
include_directories : [inc_drm, include_directories('.')],
)
meson.override_dependency('libdrm_intel', ext_libdrm_intel)
if meson.version().version_compare('>= 0.54.0')
meson.override_dependency('libdrm_intel', ext_libdrm_intel)
endif
install_headers(
'intel_bufmgr.h', 'intel_aub.h', 'intel_debug.h',
@ -102,6 +104,6 @@ test(
args : [
'--lib', libdrm_intel,
'--symbols-file', files('intel-symbols.txt'),
'--nm', prog_nm.full_path(),
'--nm', prog_nm.path(),
],
)

View file

@ -86,8 +86,7 @@ static void
compare_batch(struct drm_intel_decode *ctx, const char *batch_filename)
{
FILE *out = NULL;
char *ptr;
void *ref_ptr, *batch_ptr;
void *ptr, *ref_ptr, *batch_ptr;
#if HAVE_OPEN_MEMSTREAM
size_t size;
#endif
@ -107,7 +106,7 @@ compare_batch(struct drm_intel_decode *ctx, const char *batch_filename)
* inside of an automake project's test infrastructure.
*/
#if HAVE_OPEN_MEMSTREAM
out = open_memstream(&ptr, &size);
out = open_memstream((char **)&ptr, &size);
#else
fprintf(stderr, "platform lacks open_memstream, skipping.\n");
exit(77);

View file

@ -648,11 +648,11 @@ do {
#define MUR_PLUS2_ALIGNED(p) (((unsigned long)p & 3UL) == 2UL)
#define MUR_PLUS3_ALIGNED(p) (((unsigned long)p & 3UL) == 3UL)
#define WP(p) ((uint32_t*)((unsigned long)(p) & ~3UL))
#ifdef HAVE_BIG_ENDIAN
#if (defined(__BIG_ENDIAN__) || defined(SPARC) || defined(__ppc__) || defined(__ppc64__))
#define MUR_THREE_ONE(p) ((((*WP(p))&0x00ffffff) << 8) | (((*(WP(p)+1))&0xff000000) >> 24))
#define MUR_TWO_TWO(p) ((((*WP(p))&0x0000ffff) <<16) | (((*(WP(p)+1))&0xffff0000) >> 16))
#define MUR_ONE_THREE(p) ((((*WP(p))&0x000000ff) <<24) | (((*(WP(p)+1))&0xffffff00) >> 8))
#else /* little endian non-intel */
#else /* assume little endian non-intel */
#define MUR_THREE_ONE(p) ((((*WP(p))&0xffffff00) >> 8) | (((*(WP(p)+1))&0x000000ff) << 24))
#define MUR_TWO_TWO(p) ((((*WP(p))&0xffff0000) >>16) | (((*(WP(p)+1))&0x0000ffff) << 16))
#define MUR_ONE_THREE(p) ((((*WP(p))&0xff000000) >>24) | (((*(WP(p)+1))&0x00ffffff) << 8))

View file

@ -33,7 +33,7 @@
#include <stdint.h>
#include <string.h>
#include <sys/ioctl.h>
#include <poll.h>
#include <sys/poll.h>
#include <unistd.h>
#if defined(__cplusplus)

View file

@ -214,7 +214,7 @@ Reporting Bugs
==============
Bugs in this manual should be reported to
https://gitlab.freedesktop.org/mesa/libdrm/-/issues
https://gitlab.freedesktop.org/mesa/drm/-/issues
See Also
========

View file

@ -304,7 +304,7 @@ Reporting Bugs
==============
Bugs in this manual should be reported to
https://gitlab.freedesktop.org/mesa/libdrm/-/issues
https://gitlab.freedesktop.org/mesa/drm/-/issues
See Also
========

View file

@ -82,7 +82,7 @@ Reporting Bugs
==============
Bugs in this manual should be reported to
https://gitlab.freedesktop.org/mesa/libdrm/-/issues.
https://gitlab.freedesktop.org/mesa/drm/-/issues.
See Also
========

View file

@ -33,7 +33,7 @@ Reporting Bugs
==============
Bugs in this function should be reported to
https://gitlab.freedesktop.org/mesa/libdrm/-/issues
https://gitlab.freedesktop.org/mesa/drm/-/issues
See Also
========

View file

@ -53,7 +53,7 @@ Reporting Bugs
==============
Bugs in this function should be reported to
https://gitlab.freedesktop.org/mesa/libdrm/-/issues
https://gitlab.freedesktop.org/mesa/drm/-/issues
See Also
========

View file

@ -81,7 +81,7 @@ Reporting Bugs
==============
Bugs in this function should be reported to
https://gitlab.freedesktop.org/mesa/libdrm/-/issues
https://gitlab.freedesktop.org/mesa/drm/-/issues
See Also
========

View file

@ -18,26 +18,15 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The versioning should always stay at 2.4.x. If bumping away from this,
# you must ensure that all users of patch_ver are changed such that DSO versions
# continuously increment (e.g. blindly bumping from 2.4.122 to 2.5.0 would
# roll the libdrm DSO versioning from libdrm.so.2.122.0 back to libdrm.so.2.0.0
# which would be bad)
project(
'libdrm',
['c'],
version : '2.4.131',
version : '2.4.111',
license : 'MIT',
meson_version : '>= 0.59',
default_options : ['buildtype=debugoptimized', 'c_std=c11'],
meson_version : '>= 0.53',
default_options : ['buildtype=debugoptimized', 'c_std=c99'],
)
patch_ver = meson.project_version().split('.')[2]
if ['windows', 'darwin'].contains(host_machine.system())
error('unsupported OS: @0@'.format(host_machine.system()))
endif
pkg = import('pkgconfig')
config = configuration_data()
@ -45,22 +34,18 @@ config = configuration_data()
config.set10('UDEV', get_option('udev'))
with_freedreno_kgsl = get_option('freedreno-kgsl')
with_install_tests = get_option('install-test-programs')
with_tests = get_option('tests')
if ['freebsd', 'dragonfly', 'netbsd'].contains(host_machine.system())
dep_pthread_stubs = dependency('pthread-stubs', version : '>= 0.4')
else
dep_pthread_stubs = []
endif
dep_threads = dependency('threads')
cc = meson.get_compiler('c')
config.set10('HAVE_SECURE_GETENV', cc.has_function('secure_getenv'))
android = cc.compiles('''int func() { return __ANDROID__; }''')
# Solaris / Illumos
if host_machine.system() == 'sunos'
add_global_arguments('-D__EXTENSIONS__', language : 'c')
add_global_arguments('-D_POSIX_C_SOURCE=3', language : 'c')
endif
symbols_check = find_program('symbols-check.py')
prog_nm = find_program('nm')
@ -97,63 +82,104 @@ endif
config.set10('HAVE_LIBDRM_ATOMIC_PRIMITIVES', intel_atomics)
config.set10('HAVE_LIB_ATOMIC_OPS', lib_atomics)
dep_pciaccess = dependency('pciaccess', version : '>= 0.10', required : get_option('intel'))
with_intel = get_option('intel') \
.require(with_atomics, error_message : 'libdrm_intel requires atomics') \
.require(dep_pciaccess.found(), error_message : 'libdrm_intel requires libpciaccess') \
.disable_auto_if(not host_machine.cpu_family().startswith('x86')) \
.allowed()
with_intel = false
_intel = get_option('intel')
if _intel != 'false'
if _intel == 'true' and not with_atomics
error('libdrm_intel requires atomics.')
else
with_intel = _intel == 'true' or host_machine.cpu_family().startswith('x86')
endif
endif
summary('Intel', with_intel)
with_radeon = get_option('radeon') \
.require(with_atomics, error_message : 'libdrm_radeon requires atomics') \
.allowed()
with_radeon = false
_radeon = get_option('radeon')
if _radeon != 'false'
if _radeon == 'true' and not with_atomics
error('libdrm_radeon requires atomics.')
endif
with_radeon = true
endif
summary('Radeon', with_radeon)
with_amdgpu = get_option('amdgpu') \
.require(with_atomics, error_message : 'libdrm_amdgpu requires atomics') \
.allowed()
with_amdgpu = false
_amdgpu = get_option('amdgpu')
if _amdgpu != 'false'
if _amdgpu == 'true' and not with_atomics
error('libdrm_amdgpu requires atomics.')
endif
with_amdgpu = true
endif
summary('AMDGPU', with_amdgpu)
with_nouveau = get_option('nouveau') \
.require(with_atomics, error_message : 'libdrm_nouveau requires atomics') \
.allowed()
with_nouveau = false
_nouveau = get_option('nouveau')
if _nouveau != 'false'
if _nouveau == 'true' and not with_atomics
error('libdrm_nouveau requires atomics.')
endif
with_nouveau = true
endif
summary('Nouveau', with_nouveau)
with_vmwgfx = get_option('vmwgfx').allowed()
with_vmwgfx = false
_vmwgfx = get_option('vmwgfx')
if _vmwgfx != 'false'
with_vmwgfx = true
endif
summary('vmwgfx', with_vmwgfx)
with_omap = get_option('omap') \
.require(with_atomics, error_message : 'libdrm_omap requires atomics') \
.enabled()
with_omap = false
_omap = get_option('omap')
if _omap == 'true'
if not with_atomics
error('libdrm_omap requires atomics.')
endif
with_omap = true
endif
summary('OMAP', with_omap)
with_freedreno = get_option('freedreno') \
.require(with_atomics, error_message : 'libdrm_freedreno requires atomics') \
.disable_auto_if(not ['arm', 'aarch64'].contains(host_machine.cpu_family())) \
.allowed()
with_freedreno = false
_freedreno = get_option('freedreno')
if _freedreno != 'false'
if _freedreno == 'true' and not with_atomics
error('libdrm_freedreno requires atomics.')
else
with_freedreno = _freedreno == 'true' or ['arm', 'aarch64'].contains(host_machine.cpu_family())
endif
endif
summary('Freedreno', with_freedreno)
summary('Freedreon-kgsl', with_freedreno_kgsl)
with_tegra = get_option('tegra') \
.require(with_atomics, error_message : 'libdrm_tegra requires atomics') \
.disable_auto_if(not ['arm', 'aarch64'].contains(host_machine.cpu_family())) \
.enabled()
with_tegra = false
_tegra = get_option('tegra')
if _tegra == 'true'
if not with_atomics
error('libdrm_tegra requires atomics.')
endif
with_tegra = true
endif
summary('Tegra', with_tegra)
with_etnaviv = get_option('etnaviv') \
.require(with_atomics, error_message : 'libdrm_etnaviv requires atomics') \
.disable_auto_if(not ['arm', 'aarch64', 'arc', 'mips', 'mips64', 'loongarch64'].contains(host_machine.cpu_family())) \
.allowed()
with_etnaviv = false
_etnaviv = get_option('etnaviv')
if _etnaviv == 'true'
if not with_atomics
error('libdrm_etnaviv requires atomics.')
endif
with_etnaviv = true
endif
summary('Etnaviv', with_etnaviv)
with_exynos = get_option('exynos').enabled()
with_exynos = get_option('exynos') == 'true'
summary('EXYNOS', with_exynos)
with_vc4 = get_option('vc4') \
.disable_auto_if(not ['arm', 'aarch64'].contains(host_machine.cpu_family())) \
.allowed()
with_vc4 = false
_vc4 = get_option('vc4')
if _vc4 != 'false'
with_vc4 = _vc4 == 'true' or ['arm', 'aarch64'].contains(host_machine.cpu_family())
endif
summary('VC4', with_vc4)
# Among others FreeBSD does not have a separate dl library.
@ -169,9 +195,10 @@ if not cc.has_function('clock_gettime', prefix : '#define _GNU_SOURCE\n#include
else
dep_rt = []
endif
dep_m = cc.find_library('m', required : false)
# The header is not required on Linux, and is in fact deprecated in glibc 2.30+
if host_machine.system() == 'linux'
if ['linux'].contains(host_machine.system())
config.set10('HAVE_SYS_SYSCTL_H', false)
else
# From Niclas Zeising:
@ -207,18 +234,32 @@ libdrm_c_args = cc.get_supported_arguments([
'-Wno-unused-parameter', '-Wno-attributes', '-Wno-long-long',
'-Wno-missing-field-initializers'])
dep_cairo = dependency('cairo', required : get_option('cairo-tests'))
with_cairo_tests = dep_cairo.found()
valgrind_version = []
if with_freedreno
valgrind_version = '>=3.10.0'
dep_pciaccess = dependency('pciaccess', version : '>= 0.10', required : with_intel)
dep_cunit = dependency('cunit', version : '>= 2.1', required : false)
_cairo_tests = get_option('cairo-tests')
if _cairo_tests != 'false'
dep_cairo = dependency('cairo', required : _cairo_tests == 'true')
with_cairo_tests = dep_cairo.found()
else
dep_cairo = []
with_cairo_tests = false
endif
_valgrind = get_option('valgrind')
if _valgrind != 'false'
if with_freedreno
dep_valgrind = dependency('valgrind', required : _valgrind == 'true', version : '>=3.10.0')
else
dep_valgrind = dependency('valgrind', required : _valgrind == 'true')
endif
with_valgrind = dep_valgrind.found()
else
dep_valgrind = []
with_valgrind = false
endif
dep_valgrind = dependency('valgrind', required : get_option('valgrind'), version : valgrind_version)
with_valgrind = dep_valgrind.found()
prog_rst2man = find_program('rst2man', 'rst2man.py', required: get_option('man-pages'))
with_man_pages = prog_rst2man.found()
with_man_pages = get_option('man-pages')
prog_rst2man = find_program('rst2man', 'rst2man.py', required: with_man_pages == 'true')
with_man_pages = with_man_pages != 'false' and prog_rst2man.found()
config.set10('HAVE_VISIBILITY', cc.has_function_attribute('visibility:hidden'))
@ -239,20 +280,11 @@ if with_freedreno_kgsl and not with_freedreno
error('cannot enable freedreno-kgsl without freedreno support')
endif
config.set10('_GNU_SOURCE', true)
if target_machine.endian() == 'big'
config.set('HAVE_BIG_ENDIAN', 1)
endif
if android
config.set('BIONIC_IOCTL_NO_SIGNEDNESS_OVERLOAD', 1)
endif
config_file = configure_file(
configuration : config,
output : 'config.h',
)
add_project_arguments('-include', meson.current_build_dir() / 'config.h', language : 'c')
add_project_arguments('-include', '@0@'.format(config_file), language : 'c')
inc_root = include_directories('.')
inc_drm = include_directories('include/drm')
@ -268,14 +300,14 @@ libdrm_files = [files(
if android
libdrm_kw = {}
else
libdrm_kw = { 'version' : '2.@0@.0'.format(patch_ver) }
libdrm_kw = {'version' : '2.4.0'}
endif
libdrm = library(
'drm',
libdrm_files,
c_args : libdrm_c_args,
dependencies : [dep_valgrind, dep_rt],
dependencies : [dep_valgrind, dep_rt, dep_m],
include_directories : inc_drm,
install : true,
kwargs : libdrm_kw,
@ -288,7 +320,7 @@ test(
args : [
'--lib', libdrm,
'--symbols-file', files('core-symbols.txt'),
'--nm', prog_nm.full_path(),
'--nm', prog_nm.path(),
],
)
@ -297,7 +329,9 @@ ext_libdrm = declare_dependency(
include_directories : [inc_root, inc_drm],
)
meson.override_dependency('libdrm', ext_libdrm)
if meson.version().version_compare('>= 0.54.0')
meson.override_dependency('libdrm', ext_libdrm)
endif
install_headers('libsync.h', 'xf86drm.h', 'xf86drmMode.h')
install_headers(
@ -357,6 +391,4 @@ if with_man_pages
subdir('man')
endif
subdir('data')
if with_tests
subdir('tests')
endif
subdir('tests')

View file

@ -20,75 +20,100 @@
option(
'intel',
type : 'feature',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : '''Enable support for Intel's KMS API.''',
)
option(
'radeon',
type : 'feature',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : '''Enable support for radeons's KMS API.''',
)
option(
'amdgpu',
type : 'feature',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : '''Enable support for amdgpu's KMS API.''',
)
option(
'nouveau',
type : 'feature',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : '''Enable support for nouveau's KMS API.''',
)
option(
'vmwgfx',
type : 'feature',
type : 'combo',
value : 'true',
choices : ['true', 'false', 'auto'],
description : '''Enable support for vmgfx's KMS API.''',
)
option(
'omap',
type : 'feature',
value : 'disabled',
type : 'combo',
value : 'false',
choices : ['true', 'false', 'auto'],
description : '''Enable support for OMAP's experimental KMS API.''',
)
option(
'exynos',
type : 'feature',
value : 'disabled',
type : 'combo',
value : 'false',
choices : ['true', 'false', 'auto'],
description : '''Enable support for EXYNOS's experimental KMS API.''',
)
option(
'freedreno',
type : 'feature',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : '''Enable support for freedreno's KMS API.''',
)
option(
'tegra',
type : 'feature',
value : 'disabled',
type : 'combo',
value : 'false',
choices : ['true', 'false', 'auto'],
description : '''Enable support for Tegra's experimental KMS API.''',
)
option(
'vc4',
type : 'feature',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : '''Enable support for vc4's KMS API.''',
)
option(
'etnaviv',
type : 'feature',
description : '''Enable support for etnaviv's KMS API.''',
type : 'combo',
value : 'false',
choices : ['true', 'false', 'auto'],
description : '''Enable support for etnaviv's experimental KMS API.''',
)
option(
'cairo-tests',
type : 'feature',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : 'Enable support for Cairo rendering in tests.',
)
option(
'man-pages',
type : 'feature',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : 'Enable manpage generation and installation.',
)
option(
'valgrind',
type : 'feature',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : 'Build libdrm with valgrind support.',
)
option(
@ -109,9 +134,3 @@ option(
value : false,
description : 'Enable support for using udev instead of mknod.',
)
option(
'tests',
type : 'boolean',
value : true,
description : 'Build test programs.',
)

View file

@ -1,11 +0,0 @@
build = ["Android.sources.bp"]
cc_library_shared {
name: "libdrm_nouveau",
defaults: [
"libdrm_defaults",
"libdrm_nouveau_sources",
],
vendor: true,
shared_libs: ["libdrm"],
}

14
nouveau/Android.mk Normal file
View file

@ -0,0 +1,14 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
# Import variables LIBDRM_NOUVEAU_FILES, LIBDRM_NOUVEAU_H_FILES
include $(LOCAL_PATH)/Makefile.sources
LOCAL_MODULE := libdrm_nouveau
LOCAL_SHARED_LIBRARIES := libdrm
LOCAL_SRC_FILES := $(LIBDRM_NOUVEAU_FILES)
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)

View file

@ -1,11 +0,0 @@
// Autogenerated with Android.sources.bp.mk
cc_defaults {
name: "libdrm_nouveau_sources",
srcs: [
"nouveau.c",
"pushbuf.c",
"bufctx.c",
"abi16.c",
],
}

9
nouveau/Makefile.sources Normal file
View file

@ -0,0 +1,9 @@
LIBDRM_NOUVEAU_FILES := \
nouveau.c \
pushbuf.c \
bufctx.c \
abi16.c \
private.h
LIBDRM_NOUVEAU_H_FILES := \
nouveau.h

View file

@ -27,7 +27,7 @@ libdrm_nouveau = library(
include_directories : [inc_root, inc_drm],
link_with : libdrm,
dependencies : [dep_threads, dep_atomic_ops],
version : '2.@0@.0'.format(patch_ver),
version : '2.0.0',
install : true,
)
@ -36,7 +36,9 @@ ext_libdrm_nouveau = declare_dependency(
include_directories : [inc_drm, include_directories('.')],
)
meson.override_dependency('libdrm_nouveau', ext_libdrm_nouveau)
if meson.version().version_compare('>= 0.54.0')
meson.override_dependency('libdrm_nouveau', ext_libdrm_nouveau)
endif
install_headers('nouveau.h', subdir : 'libdrm/nouveau')
install_headers(
@ -58,6 +60,6 @@ test(
args : [
'--lib', libdrm_nouveau,
'--symbols-file', files('nouveau-symbols.txt'),
'--nm', prog_nm.full_path(),
'--nm', prog_nm.path(),
],
)

View file

@ -1,4 +1,3 @@
nouveau_bo_make_global
nouveau_bo_map
nouveau_bo_name_get
nouveau_bo_name_ref

View file

@ -711,7 +711,7 @@ nouveau_bo_wrap_locked(struct nouveau_device *dev, uint32_t handle,
}
static void
nouveau_nvbo_make_global(struct nouveau_bo_priv *nvbo)
nouveau_bo_make_global(struct nouveau_bo_priv *nvbo)
{
if (!nvbo->head.next) {
struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
@ -722,14 +722,6 @@ nouveau_nvbo_make_global(struct nouveau_bo_priv *nvbo)
}
}
drm_public void
nouveau_bo_make_global(struct nouveau_bo *bo)
{
struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
nouveau_nvbo_make_global(nvbo);
}
drm_public int
nouveau_bo_wrap(struct nouveau_device *dev, uint32_t handle,
struct nouveau_bo **pbo)
@ -788,7 +780,7 @@ nouveau_bo_name_get(struct nouveau_bo *bo, uint32_t *name)
}
nvbo->name = *name = req.name;
nouveau_nvbo_make_global(nvbo);
nouveau_bo_make_global(nvbo);
}
return 0;
}
@ -838,7 +830,7 @@ nouveau_bo_set_prime(struct nouveau_bo *bo, int *prime_fd)
if (ret)
return ret;
nouveau_nvbo_make_global(nvbo);
nouveau_bo_make_global(nvbo);
return 0;
}

View file

@ -137,7 +137,6 @@ struct nouveau_bo {
int nouveau_bo_new(struct nouveau_device *, uint32_t flags, uint32_t align,
uint64_t size, union nouveau_bo_config *,
struct nouveau_bo **);
void nouveau_bo_make_global(struct nouveau_bo *);
int nouveau_bo_wrap(struct nouveau_device *, uint32_t handle,
struct nouveau_bo **);
int nouveau_bo_name_ref(struct nouveau_device *v, uint32_t name,

View file

@ -1,12 +0,0 @@
build = ["Android.sources.bp"]
cc_library_shared {
name: "libdrm_omap",
defaults: [
"libdrm_defaults",
"libdrm_omap_sources",
],
vendor: true,
shared_libs: ["libdrm"],
}

13
omap/Android.mk Normal file
View file

@ -0,0 +1,13 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE := libdrm_omap
LOCAL_VENDOR_MODULE := true
LOCAL_SRC_FILES := omap_drm.c
LOCAL_SHARED_LIBRARIES := libdrm
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)

View file

@ -1,8 +0,0 @@
// Autogenerated with Android.sources.bp.mk
cc_defaults {
name: "libdrm_omap_sources",
srcs: [
"omap_drm.c",
],
}

View file

@ -25,8 +25,8 @@ libdrm_omap = library(
c_args : libdrm_c_args,
gnu_symbol_visibility : 'hidden',
link_with : libdrm,
dependencies : [dep_threads, dep_atomic_ops],
version : '1.@0@.0'.format(patch_ver),
dependencies : [dep_pthread_stubs, dep_atomic_ops],
version : '1.0.0',
install : true,
)
@ -35,7 +35,9 @@ ext_libdrm_omap = declare_dependency(
include_directories : [inc_drm, include_directories('.')],
)
meson.override_dependency('libdrm_omap', ext_libdrm_omap)
if meson.version().version_compare('>= 0.54.0')
meson.override_dependency('libdrm_omap', ext_libdrm_omap)
endif
install_headers('omap_drmif.h', subdir : 'libdrm')
install_headers('omap_drm.h', subdir : 'omap')
@ -54,6 +56,6 @@ test(
args : [
'--lib', libdrm_omap,
'--symbols-file', files('omap-symbols.txt'),
'--nm', prog_nm.full_path(),
'--nm', prog_nm.path(),
],
)

View file

@ -27,6 +27,7 @@
*/
#include <stdlib.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <errno.h>
#include <sys/mman.h>
@ -41,6 +42,10 @@
#include "omap_drm.h"
#include "omap_drmif.h"
#define __round_mask(x, y) ((__typeof__(x))((y)-1))
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
#define PAGE_SIZE 4096
static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
static void * dev_table;
@ -203,6 +208,12 @@ static struct omap_bo * omap_bo_new_impl(struct omap_device *dev,
bo = bo_from_handle(dev, req.handle);
pthread_mutex_unlock(&table_lock);
if (flags & OMAP_BO_TILED) {
bo->size = round_up(size.tiled.width, PAGE_SIZE) * size.tiled.height;
} else {
bo->size = size.bytes;
}
return bo;
fail:
@ -422,7 +433,7 @@ drm_public uint32_t omap_bo_size(struct omap_bo *bo)
drm_public void *omap_bo_map(struct omap_bo *bo)
{
if (!bo->map) {
if (!bo->size || !bo->offset) {
if (!bo->offset) {
get_buffer_info(bo);
}

View file

@ -1,11 +0,0 @@
build = ["Android.sources.bp"]
cc_library_shared {
name: "libdrm_radeon",
defaults: [
"libdrm_defaults",
"libdrm_radeon_sources",
],
vendor: true,
shared_libs: ["libdrm"],
}

14
radeon/Android.mk Normal file
View file

@ -0,0 +1,14 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
# Import variables LIBDRM_RADEON_FILES, LIBDRM_RADEON_H_FILES
include $(LOCAL_PATH)/Makefile.sources
LOCAL_MODULE := libdrm_radeon
LOCAL_SHARED_LIBRARIES := libdrm
LOCAL_SRC_FILES := $(LIBDRM_RADEON_FILES)
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)

View file

@ -1,13 +0,0 @@
// Autogenerated with Android.sources.bp.mk
cc_defaults {
name: "libdrm_radeon_sources",
srcs: [
"radeon_bo_gem.c",
"radeon_cs_gem.c",
"radeon_cs_space.c",
"radeon_bo.c",
"radeon_cs.c",
"radeon_surface.c",
],
}

21
radeon/Makefile.sources Normal file
View file

@ -0,0 +1,21 @@
LIBDRM_RADEON_FILES := \
radeon_bo_gem.c \
radeon_cs_gem.c \
radeon_cs_space.c \
radeon_bo.c \
radeon_cs.c \
radeon_surface.c
LIBDRM_RADEON_H_FILES := \
radeon_bo.h \
radeon_cs.h \
radeon_surface.h \
radeon_bo_gem.h \
radeon_cs_gem.h \
radeon_bo_int.h \
radeon_cs_int.h \
r600_pci_ids.h
LIBDRM_RADEON_BOF_FILES := \
bof.c \
bof.h

View file

@ -32,8 +32,8 @@ libdrm_radeon = library(
gnu_symbol_visibility : 'hidden',
include_directories : [inc_root, inc_drm],
link_with : libdrm,
dependencies : [dep_threads, dep_atomic_ops],
version : '1.@0@.0'.format(patch_ver),
dependencies : [dep_pthread_stubs, dep_atomic_ops],
version : '1.0.1',
install : true,
)
@ -42,7 +42,9 @@ ext_libdrm_radeon = declare_dependency(
include_directories : [inc_drm, include_directories('.')],
)
meson.override_dependency('libdrm_radeon', ext_libdrm_radeon)
if meson.version().version_compare('>= 0.54.0')
meson.override_dependency('libdrm_radeon', ext_libdrm_radeon)
endif
install_headers(
'radeon_bo.h', 'radeon_cs.h', 'radeon_surface.h', 'radeon_bo_gem.h',
@ -63,6 +65,6 @@ test(
args : [
'--lib', libdrm_radeon,
'--symbols-file', files('radeon-symbols.txt'),
'--nm', prog_nm.full_path(),
'--nm', prog_nm.path(),
],
)

View file

@ -2205,7 +2205,6 @@ static int cik_surface_sanity(struct radeon_surface_manager *surf_man,
break;
case RADEON_SURF_MODE_LINEAR_ALIGNED:
default:
*stencil_tile_mode = SI_TILE_MODE_COLOR_LINEAR_ALIGNED;
*tile_mode = SI_TILE_MODE_COLOR_LINEAR_ALIGNED;
}

View file

@ -7,7 +7,6 @@ import subprocess
# This list contains symbols that _might_ be exported for some platforms
PLATFORM_SYMBOLS = [
'_GLOBAL_OFFSET_TABLE_',
'__bss_end__',
'__bss_start__',
'__bss_start',
@ -17,9 +16,6 @@ PLATFORM_SYMBOLS = [
'_end',
'_fini',
'_init',
'_fbss',
'_fdata',
'_ftext',
]

View file

@ -1,14 +0,0 @@
cc_library_shared {
name: "libdrm_tegra",
vendor: true,
shared_libs: ["libdrm"],
srcs: ["tegra.c"],
cflags: [
"-DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1",
"-Wall",
"-Werror",
"-Wno-unused-variable",
],
}

View file

@ -33,7 +33,7 @@
#include <unistd.h>
#include <sys/ioctl.h>
#include <poll.h>
#include <sys/poll.h>
#include "private.h"

View file

@ -28,10 +28,10 @@ libdrm_tegra = library(
],
include_directories : [inc_root, inc_drm],
link_with : libdrm,
dependencies : [dep_threads, dep_atomic_ops],
dependencies : [dep_pthread_stubs, dep_atomic_ops],
c_args : libdrm_c_args,
gnu_symbol_visibility : 'hidden',
version : '0.@0@.0'.format(patch_ver),
version : '0.0.0',
install : true,
)
@ -40,7 +40,9 @@ ext_libdrm_tegra = declare_dependency(
include_directories : [inc_drm, include_directories('.')],
)
meson.override_dependency('libdrm_tegra', ext_libdrm_tegra)
if meson.version().version_compare('>= 0.54.0')
meson.override_dependency('libdrm_tegra', ext_libdrm_tegra)
endif
install_headers('tegra.h', subdir : 'libdrm')
@ -57,6 +59,6 @@ test(
args : [
'--lib', libdrm_tegra,
'--symbols-file', files('tegra-symbols.txt'),
'--nm', prog_nm.full_path(),
'--nm', prog_nm.path(),
],
)

View file

@ -1,7 +0,0 @@
subdirs = ["*"]
cc_library_headers {
name: "libdrm_test_headers",
export_include_dirs: ["."],
vendor_available: true,
}

1
tests/Android.mk Normal file
View file

@ -0,0 +1 @@
include $(call all-subdir-makefiles)

View file

@ -30,7 +30,6 @@
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <inttypes.h>
#include "drm.h"
#include "xf86drmMode.h"
@ -176,7 +175,7 @@ int alloc_bo(uint32_t domain, uint64_t size)
resources[num_buffers] = bo;
virtual[num_buffers] = addr;
fprintf(stdout, "Allocated BO number %u at 0x%" PRIx64 ", domain 0x%x, size %" PRIu64 "\n",
fprintf(stdout, "Allocated BO number %u at 0x%lx, domain 0x%x, size %lu\n",
num_buffers++, addr, domain, size);
return 0;
}
@ -274,7 +273,7 @@ int submit_ib(uint32_t from, uint32_t to, uint64_t size, uint32_t count)
delta = stop.tv_nsec + stop.tv_sec * 1000000000UL;
delta -= start.tv_nsec + start.tv_sec * 1000000000UL;
fprintf(stdout, "Submitted %u IBs to copy from %u(%" PRIx64 ") to %u(%" PRIx64 ") %" PRIu64 " bytes took %" PRIu64 " usec\n",
fprintf(stdout, "Submitted %u IBs to copy from %u(%lx) to %u(%lx) %lu bytes took %lu usec\n",
count, from, virtual[from], to, virtual[to], copied, delta / 1000);
return 0;
}
@ -294,7 +293,7 @@ uint64_t parse_size(void)
char ext[2];
ext[0] = 0;
if (sscanf(optarg, "%" PRIi64 "%1[kmgKMG]", &size, ext) < 1) {
if (sscanf(optarg, "%li%1[kmgKMG]", &size, ext) < 1) {
fprintf(stderr, "Can't parse size arg: %s\n", optarg);
exit(EXIT_FAILURE);
}
@ -376,7 +375,7 @@ int main(int argc, char **argv)
next_arg(argc, argv, "Missing buffer size");
size = parse_size();
if (size < getpagesize()) {
fprintf(stderr, "Buffer size to small %" PRIu64 "\n", size);
fprintf(stderr, "Buffer size to small %lu\n", size);
exit(EXIT_FAILURE);
}
r = alloc_bo(domain, size);

822
tests/amdgpu/amdgpu_test.c Normal file
View file

@ -0,0 +1,822 @@
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <ctype.h>
#include <fcntl.h>
#include <errno.h>
#include <signal.h>
#include <time.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <stdarg.h>
#include <stdint.h>
#ifdef __linux__
#include <linux/limits.h>
#elif __FreeBSD__
/* SPECNAMELEN in FreeBSD is defined here: */
#include <sys/param.h>
#endif
#ifdef MAJOR_IN_MKDEV
#include <sys/mkdev.h>
#endif
#ifdef MAJOR_IN_SYSMACROS
#include <sys/sysmacros.h>
#endif
#include "drm.h"
#include "xf86drmMode.h"
#include "xf86drm.h"
#include "CUnit/Basic.h"
#include "amdgpu_test.h"
#include "amdgpu_internal.h"
/* Test suite names */
#define BASIC_TESTS_STR "Basic Tests"
#define BO_TESTS_STR "BO Tests"
#define CS_TESTS_STR "CS Tests"
#define VCE_TESTS_STR "VCE Tests"
#define VCN_TESTS_STR "VCN Tests"
#define JPEG_TESTS_STR "JPEG Tests"
#define UVD_ENC_TESTS_STR "UVD ENC Tests"
#define DEADLOCK_TESTS_STR "Deadlock Tests"
#define VM_TESTS_STR "VM Tests"
#define RAS_TESTS_STR "RAS Tests"
#define SYNCOBJ_TIMELINE_TESTS_STR "SYNCOBJ TIMELINE Tests"
#define SECURITY_TESTS_STR "Security Tests"
#define HOTUNPLUG_TESTS_STR "Hotunplug Tests"
#define CP_DMA_TESTS_STR "CP DMA Tests"
/**
* Open handles for amdgpu devices
*
*/
int drm_amdgpu[MAX_CARDS_SUPPORTED];
/** Open render node to test */
int open_render_node = 0; /* By default run most tests on primary node */
/** The table of all known test suites to run */
static CU_SuiteInfo suites[] = {
{
.pName = BASIC_TESTS_STR,
.pInitFunc = suite_basic_tests_init,
.pCleanupFunc = suite_basic_tests_clean,
.pTests = basic_tests,
},
{
.pName = BO_TESTS_STR,
.pInitFunc = suite_bo_tests_init,
.pCleanupFunc = suite_bo_tests_clean,
.pTests = bo_tests,
},
{
.pName = CS_TESTS_STR,
.pInitFunc = suite_cs_tests_init,
.pCleanupFunc = suite_cs_tests_clean,
.pTests = cs_tests,
},
{
.pName = VCE_TESTS_STR,
.pInitFunc = suite_vce_tests_init,
.pCleanupFunc = suite_vce_tests_clean,
.pTests = vce_tests,
},
{
.pName = VCN_TESTS_STR,
.pInitFunc = suite_vcn_tests_init,
.pCleanupFunc = suite_vcn_tests_clean,
.pTests = vcn_tests,
},
{
.pName = JPEG_TESTS_STR,
.pInitFunc = suite_jpeg_tests_init,
.pCleanupFunc = suite_jpeg_tests_clean,
.pTests = jpeg_tests,
},
{
.pName = UVD_ENC_TESTS_STR,
.pInitFunc = suite_uvd_enc_tests_init,
.pCleanupFunc = suite_uvd_enc_tests_clean,
.pTests = uvd_enc_tests,
},
{
.pName = DEADLOCK_TESTS_STR,
.pInitFunc = suite_deadlock_tests_init,
.pCleanupFunc = suite_deadlock_tests_clean,
.pTests = deadlock_tests,
},
{
.pName = VM_TESTS_STR,
.pInitFunc = suite_vm_tests_init,
.pCleanupFunc = suite_vm_tests_clean,
.pTests = vm_tests,
},
{
.pName = RAS_TESTS_STR,
.pInitFunc = suite_ras_tests_init,
.pCleanupFunc = suite_ras_tests_clean,
.pTests = ras_tests,
},
{
.pName = SYNCOBJ_TIMELINE_TESTS_STR,
.pInitFunc = suite_syncobj_timeline_tests_init,
.pCleanupFunc = suite_syncobj_timeline_tests_clean,
.pTests = syncobj_timeline_tests,
},
{
.pName = SECURITY_TESTS_STR,
.pInitFunc = suite_security_tests_init,
.pCleanupFunc = suite_security_tests_clean,
.pTests = security_tests,
},
{
.pName = HOTUNPLUG_TESTS_STR,
.pInitFunc = suite_hotunplug_tests_init,
.pCleanupFunc = suite_hotunplug_tests_clean,
.pTests = hotunplug_tests,
},
{
.pName = CP_DMA_TESTS_STR,
.pInitFunc = suite_cp_dma_tests_init,
.pCleanupFunc = suite_cp_dma_tests_clean,
.pTests = cp_dma_tests,
},
CU_SUITE_INFO_NULL,
};
typedef CU_BOOL (*active__stat_func)(void);
typedef struct Suites_Active_Status {
char* pName;
active__stat_func pActive;
}Suites_Active_Status;
static CU_BOOL always_active()
{
return CU_TRUE;
}
static Suites_Active_Status suites_active_stat[] = {
{
.pName = BASIC_TESTS_STR,
.pActive = suite_basic_tests_enable,
},
{
.pName = BO_TESTS_STR,
.pActive = always_active,
},
{
.pName = CS_TESTS_STR,
.pActive = suite_cs_tests_enable,
},
{
.pName = VCE_TESTS_STR,
.pActive = suite_vce_tests_enable,
},
{
.pName = VCN_TESTS_STR,
.pActive = suite_vcn_tests_enable,
},
{
.pName = JPEG_TESTS_STR,
.pActive = suite_jpeg_tests_enable,
},
{
.pName = UVD_ENC_TESTS_STR,
.pActive = suite_uvd_enc_tests_enable,
},
{
.pName = DEADLOCK_TESTS_STR,
.pActive = suite_deadlock_tests_enable,
},
{
.pName = VM_TESTS_STR,
.pActive = suite_vm_tests_enable,
},
{
.pName = RAS_TESTS_STR,
.pActive = suite_ras_tests_enable,
},
{
.pName = SYNCOBJ_TIMELINE_TESTS_STR,
.pActive = suite_syncobj_timeline_tests_enable,
},
{
.pName = SECURITY_TESTS_STR,
.pActive = suite_security_tests_enable,
},
{
.pName = HOTUNPLUG_TESTS_STR,
.pActive = suite_hotunplug_tests_enable,
},
{
.pName = CP_DMA_TESTS_STR,
.pActive = suite_cp_dma_tests_enable,
},
};
/*
* Display information about all suites and their tests
*
* NOTE: Must be run after registry is initialized and suites registered.
*/
static void display_test_suites(void)
{
int iSuite;
int iTest;
CU_pSuite pSuite = NULL;
CU_pTest pTest = NULL;
printf("%5s: %2s: %8s: %s\n", "What", "ID", "Status", "Name");
for (iSuite = 0; suites[iSuite].pName != NULL; iSuite++) {
pSuite = CU_get_suite_by_index((unsigned int) iSuite + 1,
CU_get_registry());
if (!pSuite) {
fprintf(stderr, "Invalid suite id : %d\n", iSuite + 1);
continue;
}
printf("Suite: %2d: %8s: %s\n",
iSuite + 1,
pSuite->fActive ? "ENABLED" : "DISABLED",
suites[iSuite].pName);
if (!pSuite->fActive)
continue;
for (iTest = 0; suites[iSuite].pTests[iTest].pName != NULL;
iTest++) {
pTest = CU_get_test_by_index((unsigned int) iTest + 1,
pSuite);
if (!pTest) {
fprintf(stderr, "Invalid test id : %d\n", iTest + 1);
continue;
}
printf(" Test: %2d: %8s: %s\n",
iTest + 1,
pSuite->fActive && pTest->fActive ? "ENABLED" : "DISABLED",
suites[iSuite].pTests[iTest].pName);
}
}
}
/** Help string for command line parameters */
static const char usage[] =
"Usage: %s [-hlpr] [<-s <suite id>> [-t <test id>] [-f]] "
"[-b <pci_bus_id> [-d <pci_device_id>]]\n"
"where:\n"
" l - Display all suites and their tests\n"
" r - Run the tests on render node\n"
" b - Specify device's PCI bus id to run tests\n"
" d - Specify device's PCI device id to run tests (optional)\n"
" p - Display information of AMDGPU devices in system\n"
" f - Force executing inactive suite or test\n"
" h - Display this help\n";
/** Specified options strings for getopt */
static const char options[] = "hlrps:t:b:d:f";
/* Open AMD devices.
* Return the number of AMD device opened.
*/
static int amdgpu_open_devices(int open_render_node)
{
drmDevicePtr devices[MAX_CARDS_SUPPORTED];
int i;
int drm_node;
int amd_index = 0;
int drm_count;
int fd;
drmVersionPtr version;
for (i = 0; i < MAX_CARDS_SUPPORTED; i++) {
drm_amdgpu[i] = -1;
}
drm_count = drmGetDevices2(0, devices, MAX_CARDS_SUPPORTED);
if (drm_count < 0) {
fprintf(stderr,
"drmGetDevices2() returned an error %d\n",
drm_count);
return 0;
}
for (i = 0; i < drm_count; i++) {
/* If this is not PCI device, skip*/
if (devices[i]->bustype != DRM_BUS_PCI)
continue;
/* If this is not AMD GPU vender ID, skip*/
if (devices[i]->deviceinfo.pci->vendor_id != 0x1002)
continue;
if (open_render_node)
drm_node = DRM_NODE_RENDER;
else
drm_node = DRM_NODE_PRIMARY;
fd = -1;
if (devices[i]->available_nodes & 1 << drm_node)
fd = open(
devices[i]->nodes[drm_node],
O_RDWR | O_CLOEXEC);
/* This node is not available. */
if (fd < 0) continue;
version = drmGetVersion(fd);
if (!version) {
fprintf(stderr,
"Warning: Cannot get version for %s."
"Error is %s\n",
devices[i]->nodes[drm_node],
strerror(errno));
close(fd);
continue;
}
if (strcmp(version->name, "amdgpu")) {
/* This is not AMDGPU driver, skip.*/
drmFreeVersion(version);
close(fd);
continue;
}
drmFreeVersion(version);
drm_amdgpu[amd_index] = fd;
amd_index++;
}
drmFreeDevices(devices, drm_count);
return amd_index;
}
/* Close AMD devices.
*/
void amdgpu_close_devices()
{
int i;
for (i = 0; i < MAX_CARDS_SUPPORTED; i++)
if (drm_amdgpu[i] >=0) {
close(drm_amdgpu[i]);
}
}
/* Print AMD devices information */
static void amdgpu_print_devices()
{
int i;
drmDevicePtr device;
/* Open the first AMD device to print driver information. */
if (drm_amdgpu[0] >=0) {
/* Display AMD driver version information.*/
drmVersionPtr retval = drmGetVersion(drm_amdgpu[0]);
if (retval == NULL) {
perror("Cannot get version for AMDGPU device");
return;
}
printf("Driver name: %s, Date: %s, Description: %s.\n",
retval->name, retval->date, retval->desc);
drmFreeVersion(retval);
}
/* Display information of AMD devices */
printf("Devices:\n");
for (i = 0; i < MAX_CARDS_SUPPORTED && drm_amdgpu[i] >=0; i++)
if (drmGetDevice2(drm_amdgpu[i],
DRM_DEVICE_GET_PCI_REVISION,
&device) == 0) {
if (device->bustype == DRM_BUS_PCI) {
printf("PCI ");
printf(" domain:%04x",
device->businfo.pci->domain);
printf(" bus:%02x",
device->businfo.pci->bus);
printf(" device:%02x",
device->businfo.pci->dev);
printf(" function:%01x",
device->businfo.pci->func);
printf(" vendor_id:%04x",
device->deviceinfo.pci->vendor_id);
printf(" device_id:%04x",
device->deviceinfo.pci->device_id);
printf(" subvendor_id:%04x",
device->deviceinfo.pci->subvendor_id);
printf(" subdevice_id:%04x",
device->deviceinfo.pci->subdevice_id);
printf(" revision_id:%02x",
device->deviceinfo.pci->revision_id);
printf("\n");
}
drmFreeDevice(&device);
}
}
/* Find a match AMD device in PCI bus
* Return the index of the device or -1 if not found
*/
static int amdgpu_find_device(uint8_t bus, uint16_t dev)
{
int i;
drmDevicePtr device;
for (i = 0; i < MAX_CARDS_SUPPORTED && drm_amdgpu[i] >= 0; i++) {
if (drmGetDevice2(drm_amdgpu[i],
DRM_DEVICE_GET_PCI_REVISION,
&device) == 0) {
if (device->bustype == DRM_BUS_PCI)
if ((bus == 0xFF || device->businfo.pci->bus == bus) &&
device->deviceinfo.pci->device_id == dev) {
drmFreeDevice(&device);
return i;
}
drmFreeDevice(&device);
}
}
return -1;
}
static void amdgpu_disable_suites()
{
amdgpu_device_handle device_handle;
uint32_t major_version, minor_version, family_id;
drmDevicePtr devices[MAX_CARDS_SUPPORTED];
int i, drm_count;
int size = sizeof(suites_active_stat) / sizeof(suites_active_stat[0]);
if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
&minor_version, &device_handle))
return;
family_id = device_handle->info.family_id;
if (amdgpu_device_deinitialize(device_handle))
return;
drm_count = drmGetDevices2(0, devices, MAX_CARDS_SUPPORTED);
/* Set active status for suites based on their policies */
for (i = 0; i < size; ++i)
if (amdgpu_set_suite_active(suites_active_stat[i].pName,
suites_active_stat[i].pActive()))
fprintf(stderr, "suite deactivation failed - %s\n", CU_get_error_msg());
/* Explicitly disable specific tests due to known bugs or preferences */
/*
* BUG: Compute ring stalls and never recovers when the address is
* written after the command already submitted
*/
if (amdgpu_set_test_active(DEADLOCK_TESTS_STR,
"compute ring block test (set amdgpu.lockup_timeout=50)", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
if (amdgpu_set_test_active(DEADLOCK_TESTS_STR,
"sdma ring block test (set amdgpu.lockup_timeout=50)", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
/* This test was ran on GFX9 only */
//if (family_id < AMDGPU_FAMILY_AI || family_id > AMDGPU_FAMILY_RV)
if (amdgpu_set_test_active(DEADLOCK_TESTS_STR,
"gfx ring bad dispatch test (set amdgpu.lockup_timeout=50)", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
/* This test was ran on GFX9 only */
//if (family_id < AMDGPU_FAMILY_AI || family_id > AMDGPU_FAMILY_RV)
if (amdgpu_set_test_active(DEADLOCK_TESTS_STR,
"compute ring bad dispatch test (set amdgpu.lockup_timeout=50,50)", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
/* This test was ran on GFX9 only */
//if (family_id < AMDGPU_FAMILY_AI || family_id > AMDGPU_FAMILY_RV)
if (amdgpu_set_test_active(DEADLOCK_TESTS_STR,
"gfx ring bad slow dispatch test (set amdgpu.lockup_timeout=50)", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
/* This test was ran on GFX9 only */
//if (family_id < AMDGPU_FAMILY_AI || family_id > AMDGPU_FAMILY_RV)
if (amdgpu_set_test_active(DEADLOCK_TESTS_STR,
"compute ring bad slow dispatch test (set amdgpu.lockup_timeout=50,50)", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
//if (family_id < AMDGPU_FAMILY_AI || family_id > AMDGPU_FAMILY_RV)
if (amdgpu_set_test_active(DEADLOCK_TESTS_STR,
"gfx ring bad draw test (set amdgpu.lockup_timeout=50)", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
/* This test was ran on GFX9 only */
//if (family_id < AMDGPU_FAMILY_AI || family_id > AMDGPU_FAMILY_RV)
if (amdgpu_set_test_active(DEADLOCK_TESTS_STR,
"gfx ring slow bad draw test (set amdgpu.lockup_timeout=50)", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
if (amdgpu_set_test_active(BASIC_TESTS_STR, "bo eviction Test", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
/* This test was ran on GFX8 and GFX9 only */
if (family_id < AMDGPU_FAMILY_VI || family_id > AMDGPU_FAMILY_RV)
if (amdgpu_set_test_active(BASIC_TESTS_STR, "Sync dependency Test", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
/* This test was ran on GFX9 only */
if (family_id < AMDGPU_FAMILY_AI || family_id > AMDGPU_FAMILY_RV) {
if (amdgpu_set_test_active(BASIC_TESTS_STR, "Dispatch Test (GFX)", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
if (amdgpu_set_test_active(BASIC_TESTS_STR, "Dispatch Test (Compute)", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
}
/* This test was ran on GFX9 only */
if (family_id < AMDGPU_FAMILY_AI || family_id > AMDGPU_FAMILY_RV)
if (amdgpu_set_test_active(BASIC_TESTS_STR, "Draw Test", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
/* This test was ran on GFX9 only */
//if (family_id < AMDGPU_FAMILY_AI || family_id > AMDGPU_FAMILY_RV)
if (amdgpu_set_test_active(BASIC_TESTS_STR, "GPU reset Test", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
/* You need at least 2 devices for this */
if (drm_count < 2)
if (amdgpu_set_test_active(HOTUNPLUG_TESTS_STR, "Unplug with exported fence", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
}
int test_device_index;
int amdgpu_open_device_on_test_index(int render_node)
{
int i;
if (amdgpu_open_devices(open_render_node) <= 0) {
perror("Cannot open AMDGPU device");
return -1;
}
if (test_device_index >= 0) {
/* Most tests run on device of drm_amdgpu[0].
* Swap the chosen device to drm_amdgpu[0].
*/
i = drm_amdgpu[0];
drm_amdgpu[0] = drm_amdgpu[test_device_index];
drm_amdgpu[test_device_index] = i;
}
return 0;
}
static bool amdgpu_node_is_drm(int maj, int min)
{
#ifdef __linux__
char path[64];
struct stat sbuf;
snprintf(path, sizeof(path), "/sys/dev/char/%d:%d/device/drm",
maj, min);
return stat(path, &sbuf) == 0;
#elif defined(__FreeBSD__)
char name[SPECNAMELEN];
if (!devname_r(makedev(maj, min), S_IFCHR, name, sizeof(name)))
return 0;
/* Handle drm/ and dri/ as both are present in different FreeBSD version
* FreeBSD on amd64/i386/powerpc external kernel modules create node in
* in /dev/drm/ and links in /dev/dri while a WIP in kernel driver creates
* only device nodes in /dev/dri/ */
return (!strncmp(name, "drm/", 4) || !strncmp(name, "dri/", 4));
#else
return maj == DRM_MAJOR;
#endif
}
char *amdgpu_get_device_from_fd(int fd)
{
#ifdef __linux__
struct stat sbuf;
char path[PATH_MAX + 1];
unsigned int maj, min;
if (fstat(fd, &sbuf))
return NULL;
maj = major(sbuf.st_rdev);
min = minor(sbuf.st_rdev);
if (!amdgpu_node_is_drm(maj, min) || !S_ISCHR(sbuf.st_mode))
return NULL;
snprintf(path, sizeof(path), "/sys/dev/char/%d:%d/device", maj, min);
return strdup(path);
#else
return NULL;
#endif
}
/* The main() function for setting up and running the tests.
* Returns a CUE_SUCCESS on successful running, another
* CUnit error code on failure.
*/
int main(int argc, char **argv)
{
int c; /* Character received from getopt */
int i = 0;
int suite_id = -1; /* By default run everything */
int test_id = -1; /* By default run all tests in the suite */
int pci_bus_id = -1; /* By default PC bus ID is not specified */
int pci_device_id = 0; /* By default PC device ID is zero */
int display_devices = 0;/* By default not to display devices' info */
CU_pSuite pSuite = NULL;
CU_pTest pTest = NULL;
int display_list = 0;
int force_run = 0;
for (i = 0; i < MAX_CARDS_SUPPORTED; i++)
drm_amdgpu[i] = -1;
/* Parse command line string */
opterr = 0; /* Do not print error messages from getopt */
while ((c = getopt(argc, argv, options)) != -1) {
switch (c) {
case 'l':
display_list = 1;
break;
case 's':
suite_id = atoi(optarg);
break;
case 't':
test_id = atoi(optarg);
break;
case 'b':
pci_bus_id = atoi(optarg);
break;
case 'd':
sscanf(optarg, "%x", &pci_device_id);
break;
case 'p':
display_devices = 1;
break;
case 'r':
open_render_node = 1;
break;
case 'f':
force_run = 1;
break;
case '?':
case 'h':
fprintf(stderr, usage, argv[0]);
exit(EXIT_SUCCESS);
default:
fprintf(stderr, usage, argv[0]);
exit(EXIT_FAILURE);
}
}
if (amdgpu_open_devices(open_render_node) <= 0) {
perror("Cannot open AMDGPU device");
exit(EXIT_FAILURE);
}
if (drm_amdgpu[0] < 0) {
perror("Cannot open AMDGPU device");
exit(EXIT_FAILURE);
}
if (display_devices) {
amdgpu_print_devices();
amdgpu_close_devices();
exit(EXIT_SUCCESS);
}
if (pci_bus_id > 0 || pci_device_id) {
/* A device was specified to run the test */
test_device_index = amdgpu_find_device(pci_bus_id,
pci_device_id);
if (test_device_index >= 0) {
/* Most tests run on device of drm_amdgpu[0].
* Swap the chosen device to drm_amdgpu[0].
*/
i = drm_amdgpu[0];
drm_amdgpu[0] = drm_amdgpu[test_device_index];
drm_amdgpu[test_device_index] = i;
} else {
fprintf(stderr,
"The specified GPU device does not exist.\n");
exit(EXIT_FAILURE);
}
}
/* Initialize test suites to run */
/* initialize the CUnit test registry */
if (CUE_SUCCESS != CU_initialize_registry()) {
amdgpu_close_devices();
return CU_get_error();
}
/* Register suites. */
if (CU_register_suites(suites) != CUE_SUCCESS) {
fprintf(stderr, "suite registration failed - %s\n",
CU_get_error_msg());
CU_cleanup_registry();
amdgpu_close_devices();
exit(EXIT_FAILURE);
}
/* Run tests using the CUnit Basic interface */
CU_basic_set_mode(CU_BRM_VERBOSE);
/* Disable suites and individual tests based on misc. conditions */
amdgpu_disable_suites();
if (display_list) {
display_test_suites();
goto end;
}
if (suite_id != -1) { /* If user specify particular suite? */
pSuite = CU_get_suite_by_index((unsigned int) suite_id,
CU_get_registry());
if (pSuite) {
if (force_run)
CU_set_suite_active(pSuite, CU_TRUE);
if (test_id != -1) { /* If user specify test id */
pTest = CU_get_test_by_index(
(unsigned int) test_id,
pSuite);
if (pTest) {
if (force_run)
CU_set_test_active(pTest, CU_TRUE);
CU_basic_run_test(pSuite, pTest);
}
else {
fprintf(stderr, "Invalid test id: %d\n",
test_id);
CU_cleanup_registry();
amdgpu_close_devices();
exit(EXIT_FAILURE);
}
} else
CU_basic_run_suite(pSuite);
} else {
fprintf(stderr, "Invalid suite id : %d\n",
suite_id);
CU_cleanup_registry();
amdgpu_close_devices();
exit(EXIT_FAILURE);
}
} else
CU_basic_run_tests();
end:
CU_cleanup_registry();
amdgpu_close_devices();
return CU_get_error();
}

547
tests/amdgpu/amdgpu_test.h Normal file
View file

@ -0,0 +1,547 @@
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _AMDGPU_TEST_H_
#define _AMDGPU_TEST_H_
#include "amdgpu.h"
#include "amdgpu_drm.h"
/**
* Define max. number of card in system which we are able to handle
*/
#define MAX_CARDS_SUPPORTED 128
/* Forward reference for array to keep "drm" handles */
extern int drm_amdgpu[MAX_CARDS_SUPPORTED];
/* Global variables */
extern int open_render_node;
/************************* Basic test suite ********************************/
/*
* Define basic test suite to serve as the starting point for future testing
*/
/**
* Initialize basic test suite
*/
int suite_basic_tests_init();
/**
* Deinitialize basic test suite
*/
int suite_basic_tests_clean();
/**
* Decide if the suite is enabled by default or not.
*/
CU_BOOL suite_basic_tests_enable(void);
/**
* Tests in basic test suite
*/
extern CU_TestInfo basic_tests[];
/**
* Initialize bo test suite
*/
int suite_bo_tests_init();
/**
* Deinitialize bo test suite
*/
int suite_bo_tests_clean();
/**
* Tests in bo test suite
*/
extern CU_TestInfo bo_tests[];
/**
* Initialize cs test suite
*/
int suite_cs_tests_init();
/**
* Deinitialize cs test suite
*/
int suite_cs_tests_clean();
/**
* Decide if the suite is enabled by default or not.
*/
CU_BOOL suite_cs_tests_enable(void);
/**
* Tests in cs test suite
*/
extern CU_TestInfo cs_tests[];
/**
* Initialize vce test suite
*/
int suite_vce_tests_init();
/**
* Deinitialize vce test suite
*/
int suite_vce_tests_clean();
/**
* Decide if the suite is enabled by default or not.
*/
CU_BOOL suite_vce_tests_enable(void);
/**
* Tests in vce test suite
*/
extern CU_TestInfo vce_tests[];
/**
+ * Initialize vcn test suite
+ */
int suite_vcn_tests_init();
/**
+ * Deinitialize vcn test suite
+ */
int suite_vcn_tests_clean();
/**
* Decide if the suite is enabled by default or not.
*/
CU_BOOL suite_vcn_tests_enable(void);
/**
+ * Tests in vcn test suite
+ */
extern CU_TestInfo vcn_tests[];
/**
+ * Initialize jpeg test suite
+ */
int suite_jpeg_tests_init();
/**
+ * Deinitialize jpeg test suite
+ */
int suite_jpeg_tests_clean();
/**
* Decide if the suite is enabled by default or not.
*/
CU_BOOL suite_jpeg_tests_enable(void);
/**
+ * Tests in vcn test suite
+ */
extern CU_TestInfo jpeg_tests[];
/**
* Initialize uvd enc test suite
*/
int suite_uvd_enc_tests_init();
/**
* Deinitialize uvd enc test suite
*/
int suite_uvd_enc_tests_clean();
/**
* Decide if the suite is enabled by default or not.
*/
CU_BOOL suite_uvd_enc_tests_enable(void);
/**
* Tests in uvd enc test suite
*/
extern CU_TestInfo uvd_enc_tests[];
/**
* Initialize deadlock test suite
*/
int suite_deadlock_tests_init();
/**
* Deinitialize deadlock test suite
*/
int suite_deadlock_tests_clean();
/**
* Decide if the suite is enabled by default or not.
*/
CU_BOOL suite_deadlock_tests_enable(void);
/**
* Tests in uvd enc test suite
*/
extern CU_TestInfo deadlock_tests[];
/**
* Initialize vm test suite
*/
int suite_vm_tests_init();
/**
* Deinitialize deadlock test suite
*/
int suite_vm_tests_clean();
/**
* Decide if the suite is enabled by default or not.
*/
CU_BOOL suite_vm_tests_enable(void);
/**
* Tests in vm test suite
*/
extern CU_TestInfo vm_tests[];
/**
* Initialize ras test suite
*/
int suite_ras_tests_init();
/**
* Deinitialize deadlock test suite
*/
int suite_ras_tests_clean();
/**
* Decide if the suite is enabled by default or not.
*/
CU_BOOL suite_ras_tests_enable(void);
/**
* Tests in ras test suite
*/
extern CU_TestInfo ras_tests[];
/**
* Initialize syncobj timeline test suite
*/
int suite_syncobj_timeline_tests_init();
/**
* Deinitialize syncobj timeline test suite
*/
int suite_syncobj_timeline_tests_clean();
/**
* Decide if the suite is enabled by default or not.
*/
CU_BOOL suite_syncobj_timeline_tests_enable(void);
/**
* Tests in syncobj timeline test suite
*/
extern CU_TestInfo syncobj_timeline_tests[];
/**
* Initialize cp dma test suite
*/
int suite_cp_dma_tests_init();
/**
* Deinitialize cp dma test suite
*/
int suite_cp_dma_tests_clean();
/**
* Decide if the suite is enabled by default or not.
*/
CU_BOOL suite_cp_dma_tests_enable(void);
/**
* Tests in cp dma test suite
*/
extern CU_TestInfo cp_dma_tests[];
void amdgpu_dispatch_hang_helper(amdgpu_device_handle device_handle, uint32_t ip_type);
void amdgpu_dispatch_hang_slow_helper(amdgpu_device_handle device_handle, uint32_t ip_type);
void amdgpu_memcpy_draw_test(amdgpu_device_handle device_handle, uint32_t ring,
int version, int hang);
void amdgpu_memcpy_draw_hang_slow_test(amdgpu_device_handle device_handle, uint32_t ring, int version);
/**
* Initialize security test suite
*/
int suite_security_tests_init();
/**
* Deinitialize security test suite
*/
int suite_security_tests_clean();
/**
* Decide if the suite is enabled by default or not.
*/
CU_BOOL suite_security_tests_enable(void);
/**
* Tests in security test suite
*/
extern CU_TestInfo security_tests[];
extern void
amdgpu_command_submission_write_linear_helper_with_secure(amdgpu_device_handle
device,
unsigned ip_type,
bool secure);
/**
* Initialize hotunplug test suite
*/
int suite_hotunplug_tests_init();
/**
* Deinitialize hotunplug test suite
*/
int suite_hotunplug_tests_clean();
/**
* Decide if the suite is enabled by default or not.
*/
CU_BOOL suite_hotunplug_tests_enable(void);
/**
* Tests in uvd enc test suite
*/
extern CU_TestInfo hotunplug_tests[];
/**
* Helper functions
*/
static inline amdgpu_bo_handle gpu_mem_alloc(
amdgpu_device_handle device_handle,
uint64_t size,
uint64_t alignment,
uint32_t type,
uint64_t flags,
uint64_t *vmc_addr,
amdgpu_va_handle *va_handle)
{
struct amdgpu_bo_alloc_request req = {0};
amdgpu_bo_handle buf_handle = NULL;
int r;
req.alloc_size = size;
req.phys_alignment = alignment;
req.preferred_heap = type;
req.flags = flags;
r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
CU_ASSERT_EQUAL(r, 0);
if (r)
return NULL;
if (vmc_addr && va_handle) {
r = amdgpu_va_range_alloc(device_handle,
amdgpu_gpu_va_range_general,
size, alignment, 0, vmc_addr,
va_handle, 0);
CU_ASSERT_EQUAL(r, 0);
if (r)
goto error_free_bo;
r = amdgpu_bo_va_op(buf_handle, 0, size, *vmc_addr, 0,
AMDGPU_VA_OP_MAP);
CU_ASSERT_EQUAL(r, 0);
if (r)
goto error_free_va;
}
return buf_handle;
error_free_va:
r = amdgpu_va_range_free(*va_handle);
CU_ASSERT_EQUAL(r, 0);
error_free_bo:
r = amdgpu_bo_free(buf_handle);
CU_ASSERT_EQUAL(r, 0);
return NULL;
}
static inline int gpu_mem_free(amdgpu_bo_handle bo,
amdgpu_va_handle va_handle,
uint64_t vmc_addr,
uint64_t size)
{
int r;
if (!bo)
return 0;
if (va_handle) {
r = amdgpu_bo_va_op(bo, 0, size, vmc_addr, 0,
AMDGPU_VA_OP_UNMAP);
CU_ASSERT_EQUAL(r, 0);
if (r)
return r;
r = amdgpu_va_range_free(va_handle);
CU_ASSERT_EQUAL(r, 0);
if (r)
return r;
}
r = amdgpu_bo_free(bo);
CU_ASSERT_EQUAL(r, 0);
return r;
}
static inline int
amdgpu_bo_alloc_wrap(amdgpu_device_handle dev, unsigned size,
unsigned alignment, unsigned heap, uint64_t flags,
amdgpu_bo_handle *bo)
{
struct amdgpu_bo_alloc_request request = {};
amdgpu_bo_handle buf_handle;
int r;
request.alloc_size = size;
request.phys_alignment = alignment;
request.preferred_heap = heap;
request.flags = flags;
r = amdgpu_bo_alloc(dev, &request, &buf_handle);
if (r)
return r;
*bo = buf_handle;
return 0;
}
int amdgpu_bo_alloc_and_map_raw(amdgpu_device_handle dev, unsigned size,
unsigned alignment, unsigned heap, uint64_t alloc_flags,
uint64_t mapping_flags, amdgpu_bo_handle *bo, void **cpu,
uint64_t *mc_address,
amdgpu_va_handle *va_handle);
static inline int
amdgpu_bo_alloc_and_map(amdgpu_device_handle dev, unsigned size,
unsigned alignment, unsigned heap, uint64_t alloc_flags,
amdgpu_bo_handle *bo, void **cpu, uint64_t *mc_address,
amdgpu_va_handle *va_handle)
{
return amdgpu_bo_alloc_and_map_raw(dev, size, alignment, heap,
alloc_flags, 0, bo, cpu, mc_address, va_handle);
}
static inline int
amdgpu_bo_unmap_and_free(amdgpu_bo_handle bo, amdgpu_va_handle va_handle,
uint64_t mc_addr, uint64_t size)
{
amdgpu_bo_cpu_unmap(bo);
amdgpu_bo_va_op(bo, 0, size, mc_addr, 0, AMDGPU_VA_OP_UNMAP);
amdgpu_va_range_free(va_handle);
amdgpu_bo_free(bo);
return 0;
}
static inline int
amdgpu_get_bo_list(amdgpu_device_handle dev, amdgpu_bo_handle bo1,
amdgpu_bo_handle bo2, amdgpu_bo_list_handle *list)
{
amdgpu_bo_handle resources[] = {bo1, bo2};
return amdgpu_bo_list_create(dev, bo2 ? 2 : 1, resources, NULL, list);
}
static inline CU_ErrorCode amdgpu_set_suite_active(const char *suite_name,
CU_BOOL active)
{
CU_ErrorCode r = CU_set_suite_active(CU_get_suite(suite_name), active);
if (r != CUE_SUCCESS)
fprintf(stderr, "Failed to obtain suite %s\n", suite_name);
return r;
}
static inline CU_ErrorCode amdgpu_set_test_active(const char *suite_name,
const char *test_name, CU_BOOL active)
{
CU_ErrorCode r;
CU_pSuite pSuite = CU_get_suite(suite_name);
if (!pSuite) {
fprintf(stderr, "Failed to obtain suite %s\n",
suite_name);
return CUE_NOSUITE;
}
r = CU_set_test_active(CU_get_test(pSuite, test_name), active);
if (r != CUE_SUCCESS)
fprintf(stderr, "Failed to obtain test %s\n", test_name);
return r;
}
static inline bool asic_is_gfx_pipe_removed(uint32_t family_id, uint32_t chip_id, uint32_t chip_rev)
{
if (family_id != AMDGPU_FAMILY_AI)
return false;
switch (chip_id - chip_rev) {
/* Arcturus */
case 0x32:
/* Aldebaran */
case 0x3c:
return true;
default:
return false;
}
}
void amdgpu_test_exec_cs_helper_raw(amdgpu_device_handle device_handle,
amdgpu_context_handle context_handle,
unsigned ip_type, int instance, int pm4_dw,
uint32_t *pm4_src, int res_cnt,
amdgpu_bo_handle *resources,
struct amdgpu_cs_ib_info *ib_info,
struct amdgpu_cs_request *ibs_request,
bool secure);
void amdgpu_close_devices();
int amdgpu_open_device_on_test_index(int render_node);
char *amdgpu_get_device_from_fd(int fd);
#endif /* #ifdef _AMDGPU_TEST_H_ */

4329
tests/amdgpu/basic_tests.c Normal file

File diff suppressed because it is too large Load diff

317
tests/amdgpu/bo_tests.c Normal file
View file

@ -0,0 +1,317 @@
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <stdio.h>
#include "CUnit/Basic.h"
#include "amdgpu_test.h"
#include "amdgpu_drm.h"
#include "amdgpu_internal.h"
#define BUFFER_SIZE (4*1024)
#define BUFFER_ALIGN (4*1024)
static amdgpu_device_handle device_handle;
static uint32_t major_version;
static uint32_t minor_version;
static amdgpu_bo_handle buffer_handle;
static uint64_t virtual_mc_base_address;
static amdgpu_va_handle va_handle;
static void amdgpu_bo_export_import(void);
static void amdgpu_bo_metadata(void);
static void amdgpu_bo_map_unmap(void);
static void amdgpu_memory_alloc(void);
static void amdgpu_mem_fail_alloc(void);
static void amdgpu_bo_find_by_cpu_mapping(void);
CU_TestInfo bo_tests[] = {
{ "Export/Import", amdgpu_bo_export_import },
{ "Metadata", amdgpu_bo_metadata },
{ "CPU map/unmap", amdgpu_bo_map_unmap },
{ "Memory alloc Test", amdgpu_memory_alloc },
{ "Memory fail alloc Test", amdgpu_mem_fail_alloc },
{ "Find bo by CPU mapping", amdgpu_bo_find_by_cpu_mapping },
CU_TEST_INFO_NULL,
};
int suite_bo_tests_init(void)
{
struct amdgpu_bo_alloc_request req = {0};
amdgpu_bo_handle buf_handle;
uint64_t va;
int r;
r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
&minor_version, &device_handle);
if (r) {
if ((r == -EACCES) && (errno == EACCES))
printf("\n\nError:%s. "
"Hint:Try to run this test program as root.",
strerror(errno));
return CUE_SINIT_FAILED;
}
req.alloc_size = BUFFER_SIZE;
req.phys_alignment = BUFFER_ALIGN;
req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
if (r)
return CUE_SINIT_FAILED;
r = amdgpu_va_range_alloc(device_handle,
amdgpu_gpu_va_range_general,
BUFFER_SIZE, BUFFER_ALIGN, 0,
&va, &va_handle, 0);
if (r)
goto error_va_alloc;
r = amdgpu_bo_va_op(buf_handle, 0, BUFFER_SIZE, va, 0, AMDGPU_VA_OP_MAP);
if (r)
goto error_va_map;
buffer_handle = buf_handle;
virtual_mc_base_address = va;
return CUE_SUCCESS;
error_va_map:
amdgpu_va_range_free(va_handle);
error_va_alloc:
amdgpu_bo_free(buf_handle);
return CUE_SINIT_FAILED;
}
int suite_bo_tests_clean(void)
{
int r;
r = amdgpu_bo_va_op(buffer_handle, 0, BUFFER_SIZE,
virtual_mc_base_address, 0,
AMDGPU_VA_OP_UNMAP);
if (r)
return CUE_SCLEAN_FAILED;
r = amdgpu_va_range_free(va_handle);
if (r)
return CUE_SCLEAN_FAILED;
r = amdgpu_bo_free(buffer_handle);
if (r)
return CUE_SCLEAN_FAILED;
r = amdgpu_device_deinitialize(device_handle);
if (r)
return CUE_SCLEAN_FAILED;
return CUE_SUCCESS;
}
static void amdgpu_bo_export_import_do_type(enum amdgpu_bo_handle_type type)
{
struct amdgpu_bo_import_result res = {0};
uint32_t shared_handle;
int r;
r = amdgpu_bo_export(buffer_handle, type, &shared_handle);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_import(device_handle, type, shared_handle, &res);
CU_ASSERT_EQUAL(r, 0);
CU_ASSERT_EQUAL(res.buf_handle, buffer_handle);
CU_ASSERT_EQUAL(res.alloc_size, BUFFER_SIZE);
r = amdgpu_bo_free(res.buf_handle);
CU_ASSERT_EQUAL(r, 0);
}
static void amdgpu_bo_export_import(void)
{
if (open_render_node) {
printf("(DRM render node is used. Skip export/Import test) ");
return;
}
amdgpu_bo_export_import_do_type(amdgpu_bo_handle_type_gem_flink_name);
amdgpu_bo_export_import_do_type(amdgpu_bo_handle_type_dma_buf_fd);
}
static void amdgpu_bo_metadata(void)
{
struct amdgpu_bo_metadata meta = {0};
struct amdgpu_bo_info info = {0};
int r;
meta.size_metadata = 4;
meta.umd_metadata[0] = 0xdeadbeef;
r = amdgpu_bo_set_metadata(buffer_handle, &meta);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_query_info(buffer_handle, &info);
CU_ASSERT_EQUAL(r, 0);
CU_ASSERT_EQUAL(info.metadata.size_metadata, 4);
CU_ASSERT_EQUAL(info.metadata.umd_metadata[0], 0xdeadbeef);
}
static void amdgpu_bo_map_unmap(void)
{
uint32_t *ptr;
int i, r;
r = amdgpu_bo_cpu_map(buffer_handle, (void **)&ptr);
CU_ASSERT_EQUAL(r, 0);
CU_ASSERT_NOT_EQUAL(ptr, NULL);
for (i = 0; i < (BUFFER_SIZE / 4); ++i)
ptr[i] = 0xdeadbeef;
r = amdgpu_bo_cpu_unmap(buffer_handle);
CU_ASSERT_EQUAL(r, 0);
}
static void amdgpu_memory_alloc(void)
{
amdgpu_bo_handle bo;
amdgpu_va_handle va_handle;
uint64_t bo_mc;
int r;
/* Test visible VRAM */
bo = gpu_mem_alloc(device_handle,
4096, 4096,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
&bo_mc, &va_handle);
r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
CU_ASSERT_EQUAL(r, 0);
/* Test invisible VRAM */
bo = gpu_mem_alloc(device_handle,
4096, 4096,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
&bo_mc, &va_handle);
r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
CU_ASSERT_EQUAL(r, 0);
/* Test GART Cacheable */
bo = gpu_mem_alloc(device_handle,
4096, 4096,
AMDGPU_GEM_DOMAIN_GTT,
0, &bo_mc, &va_handle);
r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
CU_ASSERT_EQUAL(r, 0);
/* Test GART USWC */
bo = gpu_mem_alloc(device_handle,
4096, 4096,
AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC,
&bo_mc, &va_handle);
r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
CU_ASSERT_EQUAL(r, 0);
/* Test GDS */
bo = gpu_mem_alloc(device_handle, 1024, 0,
AMDGPU_GEM_DOMAIN_GDS, 0,
NULL, NULL);
r = gpu_mem_free(bo, NULL, 0, 4096);
CU_ASSERT_EQUAL(r, 0);
/* Test GWS */
bo = gpu_mem_alloc(device_handle, 1, 0,
AMDGPU_GEM_DOMAIN_GWS, 0,
NULL, NULL);
r = gpu_mem_free(bo, NULL, 0, 4096);
CU_ASSERT_EQUAL(r, 0);
/* Test OA */
bo = gpu_mem_alloc(device_handle, 1, 0,
AMDGPU_GEM_DOMAIN_OA, 0,
NULL, NULL);
r = gpu_mem_free(bo, NULL, 0, 4096);
CU_ASSERT_EQUAL(r, 0);
}
static void amdgpu_mem_fail_alloc(void)
{
int r;
struct amdgpu_bo_alloc_request req = {0};
amdgpu_bo_handle buf_handle;
/* Test impossible mem allocation, 1TB */
req.alloc_size = 0xE8D4A51000;
req.phys_alignment = 4096;
req.preferred_heap = AMDGPU_GEM_DOMAIN_VRAM;
req.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
CU_ASSERT_EQUAL(r, -ENOMEM);
if (!r) {
r = amdgpu_bo_free(buf_handle);
CU_ASSERT_EQUAL(r, 0);
}
}
static void amdgpu_bo_find_by_cpu_mapping(void)
{
amdgpu_bo_handle bo_handle, find_bo_handle;
amdgpu_va_handle va_handle;
void *bo_cpu;
uint64_t bo_mc_address;
uint64_t offset;
int r;
r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
AMDGPU_GEM_DOMAIN_GTT, 0,
&bo_handle, &bo_cpu,
&bo_mc_address, &va_handle);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_find_bo_by_cpu_mapping(device_handle,
bo_cpu,
4096,
&find_bo_handle,
&offset);
CU_ASSERT_EQUAL(r, 0);
CU_ASSERT_EQUAL(offset, 0);
CU_ASSERT_EQUAL(bo_handle->handle, find_bo_handle->handle);
atomic_dec(&find_bo_handle->refcount, 1);
r = amdgpu_bo_unmap_and_free(bo_handle, va_handle,
bo_mc_address, 4096);
CU_ASSERT_EQUAL(r, 0);
}

Some files were not shown because too many files have changed in this diff Show more