Compare commits

..

No commits in common. "main" and "libdrm-2.4.103" have entirely different histories.

224 changed files with 22040 additions and 18766 deletions

View file

@ -12,187 +12,69 @@
# main repository, it's recommended to remove the image from the source
# repository's container registry, so that the image from the main
# repository's registry will be used there as well.
.templates_sha: &template_sha c6aeb16f86e32525fa630fb99c66c4f3e62fc3cb # see https://docs.gitlab.com/ee/ci/yaml/#includefile
variables:
UPSTREAM_REPO: mesa/drm
DEBIAN_TAG: "2019-11-16"
DEBIAN_VERSION: buster-slim
DEBIAN_IMAGE: "$CI_REGISTRY_IMAGE/debian/$DEBIAN_VERSION:$DEBIAN_TAG"
include:
- project: 'freedesktop/ci-templates'
ref: *template_sha
file:
- '/templates/debian.yml'
- '/templates/freebsd.yml'
- '/templates/ci-fairy.yml'
variables:
FDO_UPSTREAM_REPO: mesa/libdrm
FDO_REPO_SUFFIX: "$BUILD_OS/$BUILD_ARCH"
- project: 'wayland/ci-templates'
ref: 0a9bdd33a98f05af6761ab118b5074952242aab0
file: '/templates/debian.yml'
stages:
- "Base container"
- "Build"
- containers
- build
.ci-rules:
rules:
- when: on_success
# When & how to run the CI
.ci-run-policy:
except:
- schedules
retry:
max: 2
when:
- runner_system_failure
# CONTAINERS
.os-debian:
variables:
BUILD_OS: debian
FDO_DISTRIBUTION_VERSION: bookworm
FDO_DISTRIBUTION_PACKAGES: 'build-essential docbook-xsl libatomic-ops-dev libcairo2-dev libcunit1-dev libpciaccess-dev meson ninja-build pkg-config python3 python3-pip python3-wheel python3-setuptools python3-docutils valgrind'
# bump this tag every time you change something which requires rebuilding the
# base image
FDO_DISTRIBUTION_TAG: "2024-06-25.0"
.debian-x86_64:
debian:
stage: containers
extends:
- .os-debian
- .ci-run-policy
- .debian@container-ifnot-exists
variables:
BUILD_ARCH: "x86-64"
GIT_STRATEGY: none # no need to pull the whole tree for rebuilding the image
DEBIAN_EXEC: 'bash .gitlab-ci/debian-install.sh'
.debian-aarch64:
extends:
- .os-debian
variables:
BUILD_ARCH: "aarch64"
.debian-armv7:
extends:
- .os-debian
variables:
BUILD_ARCH: "armv7"
FDO_DISTRIBUTION_PLATFORM: linux/arm/v7
.os-freebsd:
variables:
BUILD_OS: freebsd
FDO_DISTRIBUTION_VERSION: "14.2"
FDO_DISTRIBUTION_PACKAGES: 'meson ninja pkgconf libpciaccess textproc/py-docutils cairo'
# bump this tag every time you change something which requires rebuilding the
# base image
FDO_DISTRIBUTION_TAG: "2025-05-22.0"
.freebsd-x86_64:
extends:
- .os-freebsd
variables:
BUILD_ARCH: "x86_64"
# Build our base container image, which contains the core distribution, the
# toolchain, and all our build dependencies. This will be reused in the build
# stage.
x86_64-debian-container_prep:
extends:
- .ci-rules
- .debian-x86_64
- .fdo.container-build@debian
stage: "Base container"
variables:
GIT_STRATEGY: none
aarch64-debian-container_prep:
extends:
- .ci-rules
- .debian-aarch64
- .fdo.container-build@debian
tags:
- aarch64
stage: "Base container"
variables:
GIT_STRATEGY: none
armv7-debian-container_prep:
extends:
- .ci-rules
- .debian-armv7
- .fdo.container-build@debian
tags:
- aarch64
stage: "Base container"
variables:
GIT_STRATEGY: none
FDO_BASE_IMAGE: "arm32v7/debian:$FDO_DISTRIBUTION_VERSION"
x86_64-freebsd-container_prep:
extends:
- .ci-rules
- .freebsd-x86_64
- .fdo.qemu-build@freebsd@x86_64
stage: "Base container"
variables:
GIT_STRATEGY: none
# Core build environment.
.build-env:
variables:
MESON_BUILD_TYPE: "-Dbuildtype=debug -Doptimization=0 -Db_sanitize=address,undefined"
# OS/architecture-specific variants
.build-env-debian-x86_64:
extends:
- .fdo.suffixed-image@debian
- .debian-x86_64
- .build-env
needs:
- job: x86_64-debian-container_prep
artifacts: false
.build-env-debian-aarch64:
extends:
- .fdo.suffixed-image@debian
- .debian-aarch64
- .build-env
variables:
# At least with the versions we have, the LSan runtime makes fork unusably
# slow on AArch64, which is bad news since the test suite decides to fork
# for every single subtest. For now, in order to get AArch64 builds and
# tests into CI, just assume that we're not going to leak any more on
# AArch64 than we would on ARMv7 or x86-64.
ASAN_OPTIONS: "detect_leaks=0"
tags:
- aarch64
needs:
- job: aarch64-debian-container_prep
artifacts: false
.build-env-debian-armv7:
extends:
- .fdo.suffixed-image@debian
- .debian-armv7
- .build-env
tags:
- aarch64
needs:
- job: armv7-debian-container_prep
artifacts: false
.build-env-freebsd-x86_64:
variables:
# Compiling with ASan+UBSan appears to trigger an infinite loop in the
# compiler shipped with FreeBSD 13.0, so we only use UBSan here.
# Additionally, sanitizers can't be used with b_lundef on FreeBSD.
MESON_BUILD_TYPE: "-Dbuildtype=debug -Db_sanitize=undefined -Db_lundef=false"
extends:
- .fdo.suffixed-image@freebsd
- .freebsd-x86_64
- .build-env
needs:
- job: x86_64-freebsd-container_prep
artifacts: false
# BUILD
.do-build:
extends:
- .ci-rules
stage: "Build"
.meson-build:
stage: build
variables:
GIT_DEPTH: 10
script:
- meson setup build
--fatal-meson-warnings --auto-features=enabled
- meson build
-D amdgpu=true
-D cairo-tests=true
-D etnaviv=true
-D exynos=true
-D freedreno=true
-D freedreno-kgsl=true
-D intel=true
-D libkms=true
-D man-pages=true
-D nouveau=true
-D omap=true
-D radeon=true
-D tegra=true
-D udev=true
-D valgrind=auto
-D vc4=true
-D vmwgfx=true
${CROSS+--cross /cross_file-$CROSS.txt}
- ninja -C build
- ninja -C build test
- DESTDIR=$PWD/install ninja -C build install
@ -201,65 +83,49 @@ x86_64-freebsd-container_prep:
paths:
- build/meson-logs/*
.do-build-qemu:
meson-x86_64:
extends:
- .ci-rules
stage: "Build"
script:
# Start the VM and copy our workspace to the VM
- /app/vmctl start
- scp -r $PWD "vm:"
# The `set +e is needed to ensure that we always copy the meson logs back to
# the workspace to see details about the failed tests.
- |
set +e
/app/vmctl exec "pkg info; cd $CI_PROJECT_NAME ; meson setup build --fatal-meson-warnings --auto-features=enabled -D etnaviv=disabled -D nouveau=disabled -D valgrind=disabled && ninja -C build"
set -ex
scp -r vm:$CI_PROJECT_NAME/build/meson-logs .
/app/vmctl exec "ninja -C $CI_PROJECT_NAME/build install"
mkdir -p $PREFIX && scp -r vm:$PREFIX/ $PREFIX/
# Finally, shut down the VM.
- /app/vmctl stop
artifacts:
when: on_failure
paths:
- build/meson-logs/*
- .ci-run-policy
- .meson-build
image: $DEBIAN_IMAGE
needs:
- debian
# Full build and test.
x86_64-debian-build:
extends:
- .build-env-debian-x86_64
- .do-build
meson-i386:
extends: meson-x86_64
variables:
CROSS: i386
aarch64-debian-build:
extends:
- .build-env-debian-aarch64
- .do-build
meson-aarch64:
extends: meson-x86_64
variables:
CROSS: arm64
armv7-debian-build:
extends:
- .build-env-debian-armv7
- .do-build
meson-armhf:
extends: meson-x86_64
variables:
CROSS: armhf
meson-ppc64el:
extends: meson-x86_64
variables:
CROSS: ppc64el
# Daily build
meson-arch-daily:
rules:
- if: '$SCHEDULE == "arch-daily"'
when: on_success
- when: never
image: archlinux/archlinux:base-devel
image: archlinux/base
before_script:
- pacman -Syu --noconfirm --needed
base-devel
cairo
cunit
docbook-xsl
libatomic_ops
libpciaccess
libxslt
meson
valgrind
python-docutils
extends: .do-build
x86_64-freebsd-build:
extends:
- .build-env-freebsd-x86_64
- .do-build-qemu
extends: .meson-build

View file

@ -0,0 +1,67 @@
#!/usr/bin/env bash
set -o errexit
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
CROSS_ARCHITECTURES=(i386 armhf arm64 ppc64el)
for arch in ${CROSS_ARCHITECTURES[@]}; do
dpkg --add-architecture $arch
done
apt-get install -y \
ca-certificates
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
echo 'deb https://deb.debian.org/debian buster-backports main' >/etc/apt/sources.list.d/backports.list
apt-get update
# Use newer packages from backports by default
cat >/etc/apt/preferences <<EOF
Package: *
Pin: release a=buster-backports
Pin-Priority: 500
EOF
apt-get dist-upgrade -y
apt-get install -y --no-remove \
build-essential \
docbook-xsl \
libatomic-ops-dev \
libcairo2-dev \
libcunit1-dev \
libpciaccess-dev \
libxslt1-dev \
meson \
ninja-build \
pkg-config \
python3 \
python3-pip \
python3-wheel \
python3-setuptools \
valgrind \
xsltproc
for arch in ${CROSS_ARCHITECTURES[@]}; do
cross_file=/cross_file-$arch.txt
# Cross-build libdrm deps
apt-get install -y --no-remove \
libcairo2-dev:$arch \
libpciaccess-dev:$arch \
crossbuild-essential-$arch
# Generate cross build files for Meson
/usr/share/meson/debcrossgen --arch $arch -o $cross_file
# Work around a bug in debcrossgen that should be fixed in the next release
if [ $arch = i386 ]; then
sed -i "s|cpu_family = 'i686'|cpu_family = 'x86'|g" $cross_file
fi
done
# Test that the oldest Meson version we claim to support is still supported
pip3 install meson==0.43

View file

@ -1,97 +0,0 @@
//
// Copyright © 2011-2012 Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice (including the next
// paragraph) shall be included in all copies or substantial portions of the
// Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
subdirs = ["*"]
build = ["Android.sources.bp"]
cc_defaults {
name: "libdrm_defaults",
cflags: [
// XXX: Consider moving these to config.h analogous to autoconf.
"-DMAJOR_IN_SYSMACROS=1",
"-DHAVE_VISIBILITY=1",
"-DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1",
"-Wall",
"-Werror",
"-Wno-deprecated-declarations",
"-Wno-format",
"-Wno-gnu-variable-sized-type-not-at-end",
"-Wno-implicit-function-declaration",
"-Wno-int-conversion",
"-Wno-missing-field-initializers",
"-Wno-pointer-arith",
"-Wno-unused-parameter",
"-Wno-unused-variable",
],
export_system_include_dirs: ["."],
}
cc_library_headers {
name: "libdrm_headers",
vendor_available: true,
host_supported: true,
defaults: ["libdrm_defaults"],
export_include_dirs: ["include/drm", "android"],
apex_available: [
"//apex_available:platform",
"com.android.virt",
],
}
genrule {
name: "generated_static_table_fourcc_h",
out: ["generated_static_table_fourcc.h"],
srcs: ["include/drm/drm_fourcc.h"],
tool_files: ["gen_table_fourcc.py"],
cmd: "python3 $(location gen_table_fourcc.py) $(in) $(out)",
}
// Library for the device
cc_library {
name: "libdrm",
recovery_available: true,
vendor_available: true,
host_supported: true,
defaults: [
"libdrm_defaults",
"libdrm_sources",
],
generated_headers: [
"generated_static_table_fourcc_h",
],
export_include_dirs: ["include/drm", "android"],
cflags: [
"-Wno-enum-conversion",
"-Wno-pointer-arith",
"-Wno-sign-compare",
"-Wno-tautological-compare",
],
apex_available: [
"//apex_available:platform",
"com.android.virt",
],
}

22
Android.common.mk Normal file
View file

@ -0,0 +1,22 @@
# XXX: Consider moving these to config.h analogous to autoconf.
LOCAL_CFLAGS += \
-DMAJOR_IN_SYSMACROS=1 \
-DHAVE_ALLOCA_H=0 \
-DHAVE_SYS_SELECT_H=0 \
-DHAVE_SYS_SYSCTL_H=0 \
-DHAVE_VISIBILITY=1 \
-fvisibility=hidden \
-DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
LOCAL_CFLAGS += \
-Wno-error \
-Wno-unused-parameter \
-Wno-missing-field-initializers \
-Wno-pointer-arith \
-Wno-enum-conversion
# Quiet down the build system and remove any .h files from the sources
LOCAL_SRC_FILES := $(patsubst %.h, , $(LOCAL_SRC_FILES))
LOCAL_EXPORT_C_INCLUDE_DIRS += $(LOCAL_PATH)
LOCAL_PROPRIETARY_MODULE := true

74
Android.mk Normal file
View file

@ -0,0 +1,74 @@
#
# Copyright © 2011-2012 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
LIBDRM_ANDROID_MAJOR_VERSION := $(word 1, $(subst ., , $(PLATFORM_VERSION)))
ifneq ($(filter 2 4, $(LIBDRM_ANDROID_MAJOR_VERSION)),)
$(error "Android 4.4 and earlier not supported")
endif
LIBDRM_COMMON_MK := $(call my-dir)/Android.common.mk
LOCAL_PATH := $(call my-dir)
LIBDRM_TOP := $(LOCAL_PATH)
include $(CLEAR_VARS)
# Import variables LIBDRM_{,H,INCLUDE_H,INCLUDE_ANDROID_H,INCLUDE_VMWGFX_H}_FILES
include $(LOCAL_PATH)/Makefile.sources
#static library for the device (recovery)
include $(CLEAR_VARS)
LOCAL_MODULE := libdrm
LOCAL_SRC_FILES := $(LIBDRM_FILES)
LOCAL_EXPORT_C_INCLUDE_DIRS := \
$(LOCAL_PATH) \
$(LOCAL_PATH)/include/drm \
$(LOCAL_PATH)/android
LOCAL_C_INCLUDES := \
$(LOCAL_PATH)/include/drm
include $(LIBDRM_COMMON_MK)
include $(BUILD_STATIC_LIBRARY)
# Shared library for the device
include $(CLEAR_VARS)
LOCAL_MODULE := libdrm
LOCAL_SRC_FILES := $(LIBDRM_FILES)
LOCAL_EXPORT_C_INCLUDE_DIRS := \
$(LOCAL_PATH) \
$(LOCAL_PATH)/include/drm \
$(LOCAL_PATH)/android
LOCAL_SHARED_LIBRARIES := \
libcutils
LOCAL_C_INCLUDES := \
$(LOCAL_PATH)/include/drm
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)
include $(call all-makefiles-under,$(LOCAL_PATH))

View file

@ -1,12 +0,0 @@
// Autogenerated with Android.sources.bp.mk
cc_defaults {
name: "libdrm_sources",
srcs: [
"xf86drm.c",
"xf86drmHash.c",
"xf86drmRandom.c",
"xf86drmSL.c",
"xf86drmMode.c",
],
}

View file

@ -1,25 +0,0 @@
# Usage: make -f path/to/Android.sources.bp.mk NAMES=<> >Android.sources.bp
#
# It will read the Makefile.sources in the current directory, and
# write <NAME>_FILES to stdout as an Android.bp cc_defaults module.
.PHONY: all
all:
@# Do nothing
include Makefile.sources
empty :=
indent := $(empty) $(empty)
$(info // Autogenerated with Android.sources.bp.mk)
$(foreach NAME,$(NAMES), \
$(eval lower_name := $(shell echo $(PREFIX)$(NAME) | tr 'A-Z' 'a-z')) \
$(info ) \
$(info cc_defaults {) \
$(info $(indent)name: "$(lower_name)_sources",) \
$(info $(indent)srcs: [) \
$(foreach f,$(filter %.c,$($(NAME)_FILES)), \
$(info $(indent)$(indent)"$(f)",)) \
$(info $(indent)],) \
$(info }))

45
Makefile.sources Normal file
View file

@ -0,0 +1,45 @@
LIBDRM_FILES := \
xf86drm.c \
xf86drmHash.c \
xf86drmHash.h \
xf86drmRandom.c \
xf86drmRandom.h \
xf86drmSL.c \
xf86drmMode.c \
xf86atomic.h \
libdrm_macros.h \
libdrm_lists.h \
util_double_list.h \
util_math.h
LIBDRM_H_FILES := \
libsync.h \
xf86drm.h \
xf86drmMode.h
LIBDRM_INCLUDE_H_FILES := \
include/drm/drm.h \
include/drm/drm_fourcc.h \
include/drm/drm_mode.h \
include/drm/drm_sarea.h \
include/drm/i915_drm.h \
include/drm/mach64_drm.h \
include/drm/mga_drm.h \
include/drm/msm_drm.h \
include/drm/nouveau_drm.h \
include/drm/qxl_drm.h \
include/drm/r128_drm.h \
include/drm/radeon_drm.h \
include/drm/amdgpu_drm.h \
include/drm/savage_drm.h \
include/drm/sis_drm.h \
include/drm/tegra_drm.h \
include/drm/vc4_drm.h \
include/drm/via_drm.h \
include/drm/virtgpu_drm.h
LIBDRM_INCLUDE_ANDROID_H_FILES := \
android/gralloc_handle.h
LIBDRM_INCLUDE_VMWGFX_H_FILES := \
include/drm/vmwgfx_drm.h

View file

@ -13,24 +13,6 @@ but a new libdrm will always work with an older kernel.
libdrm is a low-level library, typically used by graphics drivers such as
the Mesa drivers, the X drivers, libva and similar projects.
Syncing with the Linux kernel headers
-------------------------------------
The library should be regularly updated to match the recent changes in the
`include/uapi/drm/`.
libdrm maintains a human-readable version for the token format modifier, with
the simpler ones being extracted automatically from `drm_fourcc.h` header file
with the help of a python script. This might not always possible, as some of
the vendors require decoding/extracting them programmatically. For that
reason one can enhance the current vendor functions to include/provide the
newly added token formats, or, in case there's no such decoding
function, to add one that performs the tasks of extracting them.
For simpler format modifier tokens there's a script (gen_table_fourcc.py) that
creates a static table, by going over `drm_fourcc.h` header file. The script
could be further modified if it can't handle new (simpler) token format
modifiers instead of the generated static table.
Compiling
---------
@ -49,15 +31,3 @@ Then use ninja to build and install:
If you are installing into a system location you will need to run install
separately, and as root.
AMDGPU ASIC table file
----------------------
The AMDGPU driver requires the `amdgpu.ids` file. It is usually located at
`$PREFIX/share/libdrm`, but it is possible to specify a set of alternative
paths at runtime by setting the `AMDGPU_ASIC_ID_TABLE_PATHS` environment
variable with one or more colon-separated paths where to search for the
`amdgpu.ids` file.
For this option to be available, the C library must support secure_getenv()
function. In systems without it (like NetBSD), this option won't be available.

View file

@ -18,9 +18,9 @@ Follow these steps to release a new version of libdrm:
builddir/meson-dist/ matches the number you bumped to. Move that
tarball to the libdrm repo root for the release script to pick up.
3) Push the updated main branch with the bumped version number:
3) Push the updated master branch with the bumped version number:
git push origin main
git push origin master
assuming the remote for the upstream libdrm repo is called origin.

View file

@ -1,16 +0,0 @@
build = ["Android.sources.bp"]
cc_library_shared {
name: "libdrm_amdgpu",
cflags: [
"-DAMDGPU_ASIC_ID_TABLE=\"/vendor/etc/hwdata/amdgpu.ids\""
],
defaults: [
"libdrm_defaults",
"libdrm_amdgpu_sources",
],
vendor: true,
shared_libs: ["libdrm"],
}

19
amdgpu/Android.mk Normal file
View file

@ -0,0 +1,19 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
# Import variables LIBDRM_AMDGPU_FILES, LIBDRM_AMDGPU_H_FILES
include $(LOCAL_PATH)/Makefile.sources
LOCAL_MODULE := libdrm_amdgpu
LOCAL_SHARED_LIBRARIES := libdrm
LOCAL_SRC_FILES := $(LIBDRM_AMDGPU_FILES)
LOCAL_CFLAGS := \
-DAMDGPU_ASIC_ID_TABLE=\"/vendor/etc/hwdata/amdgpu.ids\"
LOCAL_REQUIRED_MODULES := amdgpu.ids
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)

View file

@ -1,15 +0,0 @@
// Autogenerated with Android.sources.bp.mk
cc_defaults {
name: "libdrm_amdgpu_sources",
srcs: [
"amdgpu_asic_id.c",
"amdgpu_bo.c",
"amdgpu_cs.c",
"amdgpu_device.c",
"amdgpu_gpu_info.c",
"amdgpu_vamgr.c",
"amdgpu_vm.c",
"handle_table.c",
],
}

14
amdgpu/Makefile.sources Normal file
View file

@ -0,0 +1,14 @@
LIBDRM_AMDGPU_FILES := \
amdgpu_asic_id.c \
amdgpu_bo.c \
amdgpu_cs.c \
amdgpu_device.c \
amdgpu_gpu_info.c \
amdgpu_internal.h \
amdgpu_vamgr.c \
amdgpu_vm.c \
handle_table.c \
handle_table.h
LIBDRM_AMDGPU_H_FILES := \
amdgpu.h

View file

@ -14,7 +14,6 @@ amdgpu_bo_query_info
amdgpu_bo_set_metadata
amdgpu_bo_va_op
amdgpu_bo_va_op_raw
amdgpu_bo_va_op_raw2
amdgpu_bo_wait_for_idle
amdgpu_create_bo_from_user_mem
amdgpu_cs_chunk_fence_info_to_data
@ -26,7 +25,6 @@ amdgpu_cs_ctx_create
amdgpu_cs_ctx_create2
amdgpu_cs_ctx_free
amdgpu_cs_ctx_override_priority
amdgpu_cs_ctx_stable_pstate
amdgpu_cs_destroy_semaphore
amdgpu_cs_destroy_syncobj
amdgpu_cs_export_syncobj
@ -55,9 +53,7 @@ amdgpu_cs_syncobj_wait
amdgpu_cs_wait_fences
amdgpu_cs_wait_semaphore
amdgpu_device_deinitialize
amdgpu_device_get_fd
amdgpu_device_initialize
amdgpu_device_initialize2
amdgpu_find_bo_by_cpu_mapping
amdgpu_get_marketing_name
amdgpu_query_buffer_size_alignment
@ -65,26 +61,14 @@ amdgpu_query_crtc_from_id
amdgpu_query_firmware_version
amdgpu_query_gds_info
amdgpu_query_gpu_info
amdgpu_query_gpuvm_fault_info
amdgpu_query_heap_info
amdgpu_query_hw_ip_count
amdgpu_query_hw_ip_info
amdgpu_query_info
amdgpu_query_sensor_info
amdgpu_query_uq_fw_area_info
amdgpu_query_video_caps_info
amdgpu_read_mm_registers
amdgpu_va_manager_alloc
amdgpu_va_manager_init
amdgpu_va_manager_deinit
amdgpu_va_range_alloc
amdgpu_va_range_alloc2
amdgpu_va_range_free
amdgpu_va_get_start_addr
amdgpu_va_range_query
amdgpu_vm_reserve_vmid
amdgpu_vm_unreserve_vmid
amdgpu_create_userqueue
amdgpu_free_userqueue
amdgpu_userq_signal
amdgpu_userq_wait

View file

@ -42,10 +42,7 @@ extern "C" {
#endif
struct drm_amdgpu_info_hw_ip;
struct drm_amdgpu_info_uq_fw_areas;
struct drm_amdgpu_bo_list_entry;
struct drm_amdgpu_userq_signal;
struct drm_amdgpu_userq_wait;
/*--------------------------------------------------------------------------*/
/* --------------------------- Defines ------------------------------------ */
@ -141,12 +138,6 @@ typedef struct amdgpu_bo_list *amdgpu_bo_list_handle;
*/
typedef struct amdgpu_va *amdgpu_va_handle;
/**
* Define handle dealing with VA allocation. An amdgpu_device
* owns one of these, but they can also be used without a device.
*/
typedef struct amdgpu_va_manager *amdgpu_va_manager_handle;
/**
* Define handle for semaphore
*/
@ -536,20 +527,6 @@ int amdgpu_device_initialize(int fd,
uint32_t *minor_version,
amdgpu_device_handle *device_handle);
/**
* Same as amdgpu_device_initialize() except when deduplicate_device
* is false *and* fd points to a device that was already initialized.
* In this case, amdgpu_device_initialize would return the same
* amdgpu_device_handle while here amdgpu_device_initialize2 would
* return a new handle.
* amdgpu_device_initialize() should be preferred in most situations;
* the only use-case where not-deduplicating devices make sense is
* when one wants to have isolated device handles in the same process.
*/
int amdgpu_device_initialize2(int fd, bool deduplicate_device,
uint32_t *major_version,
uint32_t *minor_version,
amdgpu_device_handle *device_handle);
/**
*
* When access to such library does not needed any more the special
@ -569,19 +546,6 @@ int amdgpu_device_initialize2(int fd, bool deduplicate_device,
*/
int amdgpu_device_deinitialize(amdgpu_device_handle device_handle);
/**
*
* /param device_handle - \c [in] Device handle.
* See #amdgpu_device_initialize()
*
* \return Returns the drm fd used for operations on this
* device. This is still owned by the library and hence
* should not be closed. Guaranteed to be valid until
* #amdgpu_device_deinitialize gets called.
*
*/
int amdgpu_device_get_fd(amdgpu_device_handle device_handle);
/*
* Memory Management
*
@ -962,21 +926,6 @@ int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev,
int master_fd,
unsigned priority);
/**
* Set or query the stable power state for GPU profiling.
*
* \param dev - \c [in] device handle
* \param op - \c [in] AMDGPU_CTX_OP_{GET,SET}_STABLE_PSTATE
* \param flags - \c [in] AMDGPU_CTX_STABLE_PSTATE_*
* \param out_flags - \c [out] output current stable pstate
*
* \return 0 on success otherwise POSIX Error code.
*/
int amdgpu_cs_ctx_stable_pstate(amdgpu_context_handle context,
uint32_t op,
uint32_t flags,
uint32_t *out_flags);
/**
* Query reset state for the specific GPU Context
*
@ -1175,26 +1124,6 @@ int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
unsigned ip_instance,
struct drm_amdgpu_info_hw_ip *info);
/**
* Query FW area related information.
*
* The return size is query-specific and depends on the "type" parameter.
* No more than "size" bytes is returned.
*
* \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
* \param type - \c [in] AMDGPU_HW_IP_*
* \param ip_instance - \c [in] HW IP index.
* \param info - \c [out] The pointer to return value
*
* \return 0 on success\n
* <0 - Negative POSIX error code
*
*/
int amdgpu_query_uq_fw_area_info(amdgpu_device_handle dev,
unsigned type,
unsigned ip_instance,
struct drm_amdgpu_info_uq_fw_areas *info);
/**
* Query heap information
*
@ -1308,39 +1237,6 @@ int amdgpu_query_gds_info(amdgpu_device_handle dev,
int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned sensor_type,
unsigned size, void *value);
/**
* Query information about video capabilities
*
* The return sizeof(struct drm_amdgpu_info_video_caps)
*
* \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
* \param caps_type - \c [in] AMDGPU_INFO_VIDEO_CAPS_DECODE(ENCODE)
* \param size - \c [in] Size of the returned value.
* \param value - \c [out] Pointer to the return value.
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_query_video_caps_info(amdgpu_device_handle dev, unsigned cap_type,
unsigned size, void *value);
/**
* Query information about VM faults
*
* The return sizeof(struct drm_amdgpu_info_gpuvm_fault)
*
* \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
* \param size - \c [in] Size of the returned value.
* \param value - \c [out] Pointer to the return value.
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_query_gpuvm_fault_info(amdgpu_device_handle dev, unsigned size,
void *value);
/**
* Read a set of consecutive memory-mapped registers.
* Not all registers are allowed to be read by userspace.
@ -1367,7 +1263,6 @@ int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
*/
#define AMDGPU_VA_RANGE_32_BIT 0x1
#define AMDGPU_VA_RANGE_HIGH 0x2
#define AMDGPU_VA_RANGE_REPLAYABLE 0x4
/**
* Allocate virtual address range
@ -1427,11 +1322,6 @@ int amdgpu_va_range_alloc(amdgpu_device_handle dev,
*/
int amdgpu_va_range_free(amdgpu_va_handle va_range_handle);
/**
* Return the starting address of the allocated virtual address range.
*/
uint64_t amdgpu_va_get_start_addr(amdgpu_va_handle va_handle);
/**
* Query virtual address range
*
@ -1453,37 +1343,6 @@ int amdgpu_va_range_query(amdgpu_device_handle dev,
uint64_t *start,
uint64_t *end);
/**
* Allocate a amdgpu_va_manager object.
* The returned object has be initialized with the amdgpu_va_manager_init
* before use.
* On release, amdgpu_va_manager_deinit needs to be called, then the memory
* can be released using free().
*/
amdgpu_va_manager_handle amdgpu_va_manager_alloc(void);
void amdgpu_va_manager_init(amdgpu_va_manager_handle va_mgr,
uint64_t low_va_offset, uint64_t low_va_max,
uint64_t high_va_offset, uint64_t high_va_max,
uint32_t virtual_address_alignment);
void amdgpu_va_manager_deinit(amdgpu_va_manager_handle va_mgr);
/**
* Similar to #amdgpu_va_range_alloc() but allocates VA
* directly from an amdgpu_va_manager_handle instead of using
* the manager from an amdgpu_device.
*/
int amdgpu_va_range_alloc2(amdgpu_va_manager_handle va_mgr,
enum amdgpu_gpu_va_range va_range_type,
uint64_t size,
uint64_t va_base_alignment,
uint64_t va_base_required,
uint64_t *va_base_allocated,
amdgpu_va_handle *va_range_handle,
uint64_t flags);
/**
* VA mapping/unmapping for the buffer object
*
@ -1534,42 +1393,6 @@ int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
uint64_t flags,
uint32_t ops);
/**
* VA mapping/unmapping of buffer object for usermode queue.
*
* This is not a simple drop-in extension for amdgpu_bo_va_op; instead, all
* parameters are treated "raw2", i.e. size is not automatically aligned, and
* all flags must be specified explicitly.
*
* \param dev - \c [in] device handle
* \param bo - \c [in] BO handle (may be NULL)
* \param offset - \c [in] Start offset to map
* \param size - \c [in] Size to map
* \param addr - \c [in] Start virtual address.
* \param flags - \c [in] Supported flags for mapping/unmapping
* \param ops - \c [in] AMDGPU_VA_OP_MAP or AMDGPU_VA_OP_UNMAP
* \param vm_timeline_syncobj_out - \c [out] syncobj handle for PT update fence
* \param vm_timeline_point - \c [in] input timeline point
* \param input_fence_syncobj_handles - \c [in] Array of syncobj handles for bo unmap,
* clear and replace
* \param num_syncobj_handles - \c [in] Number of syncobj handles
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_bo_va_op_raw2(amdgpu_device_handle dev,
amdgpu_bo_handle bo,
uint64_t offset,
uint64_t size,
uint64_t addr,
uint64_t flags,
uint32_t ops,
uint32_t vm_timeline_syncobj_out,
uint64_t vm_timeline_point,
uint64_t input_fence_syncobj_array_in,
uint32_t num_syncobj_handles_in);
/**
* create semaphore
*
@ -2001,65 +1824,6 @@ int amdgpu_vm_reserve_vmid(amdgpu_device_handle dev, uint32_t flags);
*/
int amdgpu_vm_unreserve_vmid(amdgpu_device_handle dev, uint32_t flags);
/**
* Create USERQUEUE
* \param dev - \c [in] device handle
* \param ip_type - \c [in] ip type
* \param doorbell_handle - \c [in] doorbell handle
* \param doorbell_offset - \c [in] doorbell index
* \param mqd_in - \c [in] MQD data
* \param queue_va - \c [in] Virtual address of queue
* \param queue_size - \c [in] userqueue size
* \param wptr_va - \c [in] Virtual address of wptr
* \param rptr_va - \c [in] Virtual address of rptr
* \param queue_id - \c [out] queue id
*
* \return 0 on success otherwise POSIX Error code
*/
int amdgpu_create_userqueue(amdgpu_device_handle dev,
uint32_t ip_type,
uint32_t doorbell_handle,
uint32_t doorbell_offset,
uint64_t queue_va,
uint64_t queue_size,
uint64_t wptr_va,
uint64_t rptr_va,
void *mqd_in,
uint32_t flags,
uint32_t *queue_id);
/**
* Free USERQUEUE
* \param dev - \c [in] device handle
* \param queue_id - \c [in] queue id
*
* \return 0 on success otherwise POSIX Error code
*/
int amdgpu_free_userqueue(amdgpu_device_handle dev, uint32_t queue_id);
/**
* Signal USERQUEUE
* \param dev - \c [in] device handle
* \param signal_data - \c [in] pointer to struct drm_amdgpu_userq_signal
* to be filled by the caller
*
* \return 0 on success otherwise POSIX Error code
*/
int amdgpu_userq_signal(amdgpu_device_handle dev,
struct drm_amdgpu_userq_signal *signal_data);
/**
* Wait USERQUEUE
* \param dev - \c [in] device handle
* \param wait_data - \c [in/out] pointer to struct drm_amdgpu_userq_wait
* to be filled by the caller
*
* \return 0 on success otherwise POSIX Error code
*/
int amdgpu_userq_wait(amdgpu_device_handle dev,
struct drm_amdgpu_userq_wait *wait_data);
#ifdef __cplusplus
}
#endif

View file

@ -22,11 +22,6 @@
*
*/
// secure_getenv requires _GNU_SOURCE
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
@ -109,168 +104,6 @@ out:
return r;
}
static void amdgpu_parse_proc_cpuinfo(struct amdgpu_device *dev)
{
const char *search_key = "model name";
const char *radeon_key = "Radeon";
char *line = NULL;
size_t len = 0;
FILE *fp;
fp = fopen("/proc/cpuinfo", "r");
if (fp == NULL) {
fprintf(stderr, "%s\n", strerror(errno));
return;
}
while (getline(&line, &len, fp) != -1) {
char *saveptr;
char *value;
if (strncmp(line, search_key, strlen(search_key)))
continue;
/* check for parts that have both CPU and GPU information */
value = strstr(line, radeon_key);
/* get content after the first colon */
if (value == NULL) {
value = strstr(line, ":");
if (value == NULL)
continue;
value++;
}
/* strip whitespace */
while (*value == ' ' || *value == '\t')
value++;
saveptr = strchr(value, '\n');
if (saveptr)
*saveptr = '\0';
/* Add AMD to the new string if it's missing from slicing/dicing */
if (strncmp(value, "AMD", 3) != 0) {
char *tmp = malloc(strlen(value) + 5);
if (!tmp)
break;
sprintf(tmp, "AMD %s", value);
dev->marketing_name = tmp;
} else
dev->marketing_name = strdup(value);
break;
}
free(line);
fclose(fp);
}
#if HAVE_SECURE_GETENV
static char *join_path(const char *dir, const char *file) {
size_t dir_len = strlen(dir);
size_t file_len = strlen(file);
char *full_path = NULL;
int need_slash = ((dir_len > 0) && (dir[dir_len - 1] != '/'));
size_t total_len = dir_len + (need_slash ? 1 : 0) + file_len + 1; // +1 for null terminator
if (dir_len == 0) {
return strdup(file);
}
full_path = malloc(total_len);
if (!full_path) {
return NULL; // Memory allocation failed
}
strcpy(full_path, dir);
if (need_slash) {
full_path[dir_len] = '/';
dir_len++;
}
strcpy(full_path + dir_len, file);
return full_path;
}
static char **split_env_var(const char *env_var_content)
{
char **ret = NULL;
char *dup_env_val;
int elements = 1;
int index = 1;
if (!env_var_content || env_var_content[0] == '\0')
return NULL;
for(char *p = (char *)env_var_content; *p; p++) {
if (*p == ':')
elements++;
}
dup_env_val = strdup(env_var_content);
if (!dup_env_val) {
return NULL;
}
ret = malloc(sizeof(char *) * (elements + 1));
ret[0] = dup_env_val;
for(char *p = (char *)dup_env_val; *p; p++) {
if (*p == ':') {
*p = 0;
ret[index++] = p + 1;
}
}
ret[index] = NULL; // ensure that the last element in the array is NULL
return ret;
}
static void split_env_var_free(char **split_var)
{
if (split_var) {
// remember that the first element also points to the whole duplicated string,
// which was modified in place by replacing ':' with '\0' characters
free(split_var[0]);
free(split_var);
}
}
static char *find_asic_id_table(void)
{
// first check the paths in AMDGPU_ASIC_ID_TABLE_PATHS environment variable
const char *amdgpu_asic_id_table_paths = secure_getenv("AMDGPU_ASIC_ID_TABLE_PATHS");
char *file_name = NULL;
char *found_path = NULL;
char **paths = NULL;
if (!amdgpu_asic_id_table_paths)
return NULL;
// extract the file name from AMDGPU_ASIC_ID_TABLE
file_name = strrchr(AMDGPU_ASIC_ID_TABLE, '/');
if (!file_name)
return NULL;
file_name++; // skip the '/'
paths = split_env_var(amdgpu_asic_id_table_paths);
if (!paths)
return NULL;
// for each path, join with file_name and check if it exists
for (int i = 0; paths[i] != NULL; i++) {
char *full_path = join_path(paths[i], file_name);
if (!full_path) {
continue;
}
if (access(full_path, R_OK) == 0) {
found_path = full_path;
break;
}
}
split_env_var_free(paths);
return found_path;
}
#endif
void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
{
FILE *fp;
@ -280,21 +113,11 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
int line_num = 1;
int r = 0;
char *amdgpu_asic_id_table_path = NULL;
#if HAVE_SECURE_GETENV
// if this system lacks secure_getenv(), don't allow extra paths
// for security reasons.
amdgpu_asic_id_table_path = find_asic_id_table();
#endif
// if not found, use the default AMDGPU_ASIC_ID_TABLE path
if (!amdgpu_asic_id_table_path)
amdgpu_asic_id_table_path = strdup(AMDGPU_ASIC_ID_TABLE);
fp = fopen(amdgpu_asic_id_table_path, "r");
fp = fopen(AMDGPU_ASIC_ID_TABLE, "r");
if (!fp) {
fprintf(stderr, "%s: %s\n", amdgpu_asic_id_table_path,
fprintf(stderr, "%s: %s\n", AMDGPU_ASIC_ID_TABLE,
strerror(errno));
goto get_cpu;
return;
}
/* 1st valid line is file version */
@ -309,7 +132,7 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
continue;
}
drmMsg("%s version: %s\n", amdgpu_asic_id_table_path, line);
drmMsg("%s version: %s\n", AMDGPU_ASIC_ID_TABLE, line);
break;
}
@ -327,7 +150,7 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
if (r == -EINVAL) {
fprintf(stderr, "Invalid format: %s: line %d: %s\n",
amdgpu_asic_id_table_path, line_num, line);
AMDGPU_ASIC_ID_TABLE, line_num, line);
} else if (r && r != -EAGAIN) {
fprintf(stderr, "%s: Cannot parse ASIC IDs: %s\n",
__func__, strerror(-r));
@ -335,11 +158,4 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
free(line);
fclose(fp);
get_cpu:
free(amdgpu_asic_id_table_path);
if (dev->info.ids_flags & AMDGPU_IDS_FLAGS_FUSION &&
dev->marketing_name == NULL) {
amdgpu_parse_proc_cpuinfo(dev);
}
}

View file

@ -39,6 +39,14 @@
#include "amdgpu_internal.h"
#include "util_math.h"
static int amdgpu_close_kms_handle(int fd, uint32_t handle)
{
struct drm_gem_close args = {};
args.handle = handle;
return drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &args);
}
static int amdgpu_bo_create(amdgpu_device_handle dev,
uint64_t size,
uint32_t handle,
@ -74,9 +82,6 @@ drm_public int amdgpu_bo_alloc(amdgpu_device_handle dev,
union drm_amdgpu_gem_create args;
int r;
if (!alloc_buffer || !buf_handle)
return -EINVAL;
memset(&args, 0, sizeof(args));
args.in.bo_size = alloc_buffer->alloc_size;
args.in.alignment = alloc_buffer->phys_alignment;
@ -96,7 +101,7 @@ drm_public int amdgpu_bo_alloc(amdgpu_device_handle dev,
buf_handle);
pthread_mutex_unlock(&dev->bo_table_mutex);
if (r) {
drmCloseBufferHandle(dev->fd, args.out.handle);
amdgpu_close_kms_handle(dev->fd, args.out.handle);
}
out:
@ -108,9 +113,6 @@ drm_public int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
{
struct drm_amdgpu_gem_metadata args = {};
if (!info)
return -EINVAL;
args.handle = bo->handle;
args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
args.data.flags = info->flags;
@ -138,7 +140,7 @@ drm_public int amdgpu_bo_query_info(amdgpu_bo_handle bo,
int r;
/* Validate the BO passed in */
if (!bo->handle || !info)
if (!bo->handle)
return -EINVAL;
/* Query metadata. */
@ -214,7 +216,7 @@ static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
bo->flink_name = flink.name;
if (bo->dev->flink_fd != bo->dev->fd)
drmCloseBufferHandle(bo->dev->flink_fd, handle);
amdgpu_close_kms_handle(bo->dev->flink_fd, handle);
pthread_mutex_lock(&bo->dev->bo_table_mutex);
r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
@ -340,8 +342,8 @@ drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
close(dma_fd);
if (r)
goto free_bo_handle;
r = drmCloseBufferHandle(dev->flink_fd,
open_arg.handle);
r = amdgpu_close_kms_handle(dev->flink_fd,
open_arg.handle);
if (r)
goto free_bo_handle;
}
@ -379,12 +381,12 @@ drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
free_bo_handle:
if (flink_name && open_arg.handle)
drmCloseBufferHandle(dev->flink_fd, open_arg.handle);
amdgpu_close_kms_handle(dev->flink_fd, open_arg.handle);
if (bo)
amdgpu_bo_free(bo);
else
drmCloseBufferHandle(dev->fd, handle);
amdgpu_close_kms_handle(dev->fd, handle);
unlock:
pthread_mutex_unlock(&dev->bo_table_mutex);
return r;
@ -413,7 +415,7 @@ drm_public int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
amdgpu_bo_cpu_unmap(bo);
}
drmCloseBufferHandle(dev->fd, bo->handle);
amdgpu_close_kms_handle(dev->fd, bo->handle);
pthread_mutex_destroy(&bo->cpu_access_mutex);
free(bo);
}
@ -539,7 +541,7 @@ drm_public int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
amdgpu_bo_handle *buf_handle,
uint64_t *offset_in_bo)
{
struct amdgpu_bo *bo = NULL;
struct amdgpu_bo *bo;
uint32_t i;
int r = 0;
@ -557,7 +559,7 @@ drm_public int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
if (!bo || !bo->cpu_ptr || size > bo->alloc_size)
continue;
if (cpu >= bo->cpu_ptr &&
cpu < (void*)((uintptr_t)bo->cpu_ptr + (size_t)bo->alloc_size))
cpu < (void*)((uintptr_t)bo->cpu_ptr + bo->alloc_size))
break;
}
@ -596,7 +598,7 @@ drm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
pthread_mutex_unlock(&dev->bo_table_mutex);
if (r) {
drmCloseBufferHandle(dev->fd, args.handle);
amdgpu_close_kms_handle(dev->fd, args.handle);
}
out:
@ -648,7 +650,7 @@ drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
unsigned i;
int r;
if (!number_of_resources || !resources)
if (!number_of_resources)
return -EINVAL;
/* overflow check for multiplication */
@ -795,39 +797,3 @@ drm_public int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
return r;
}
drm_public int amdgpu_bo_va_op_raw2(amdgpu_device_handle dev,
amdgpu_bo_handle bo,
uint64_t offset,
uint64_t size,
uint64_t addr,
uint64_t flags,
uint32_t ops,
uint32_t vm_timeline_syncobj_out,
uint64_t vm_timeline_point,
uint64_t input_fence_syncobj_handles,
uint32_t num_syncobj_handles)
{
struct drm_amdgpu_gem_va va;
int r;
if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
return -EINVAL;
memset(&va, 0, sizeof(va));
va.handle = bo ? bo->handle : 0;
va.operation = ops;
va.flags = flags;
va.va_address = addr;
va.offset_in_bo = offset;
va.map_size = size;
va.vm_timeline_syncobj_out = vm_timeline_syncobj_out;
va.vm_timeline_point = vm_timeline_point;
va.input_fence_syncobj_handles = input_fence_syncobj_handles;
va.num_syncobj_handles = num_syncobj_handles;
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
return r;
}

View file

@ -56,22 +56,10 @@ drm_public int amdgpu_cs_ctx_create2(amdgpu_device_handle dev,
union drm_amdgpu_ctx args;
int i, j, k;
int r;
char *override_priority;
if (!dev || !context)
return -EINVAL;
override_priority = getenv("AMD_PRIORITY");
if (override_priority) {
/* The priority is a signed integer. The variable type is
* wrong. If parsing fails, priority is unchanged.
*/
if (sscanf(override_priority, "%i", &priority) == 1) {
printf("amdgpu: context priority changed to %i\n",
priority);
}
}
gpu_context = calloc(1, sizeof(struct amdgpu_context));
if (!gpu_context)
return -ENOMEM;
@ -140,8 +128,8 @@ drm_public int amdgpu_cs_ctx_free(amdgpu_context_handle context)
for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++) {
for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++) {
amdgpu_semaphore_handle sem, tmp;
LIST_FOR_EACH_ENTRY_SAFE(sem, tmp, &context->sem_list[i][j][k], list) {
amdgpu_semaphore_handle sem;
LIST_FOR_EACH_ENTRY(sem, &context->sem_list[i][j][k], list) {
list_del(&sem->list);
amdgpu_cs_reset_sem(sem);
amdgpu_cs_unreference_sem(sem);
@ -179,28 +167,6 @@ drm_public int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev,
return 0;
}
drm_public int amdgpu_cs_ctx_stable_pstate(amdgpu_context_handle context,
uint32_t op,
uint32_t flags,
uint32_t *out_flags)
{
union drm_amdgpu_ctx args;
int r;
if (!context)
return -EINVAL;
memset(&args, 0, sizeof(args));
args.in.op = op;
args.in.ctx_id = context->id;
args.in.flags = flags;
r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
&args, sizeof(args));
if (!r && out_flags)
*out_flags = args.out.pstate.flags;
return r;
}
drm_public int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
uint32_t *state, uint32_t *hangs)
{
@ -598,31 +564,24 @@ drm_public int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
uint32_t ring,
amdgpu_semaphore_handle sem)
{
int ret;
if (!ctx || !sem)
return -EINVAL;
if (ip_type >= AMDGPU_HW_IP_NUM)
return -EINVAL;
if (ring >= AMDGPU_CS_MAX_RINGS)
return -EINVAL;
pthread_mutex_lock(&ctx->sequence_mutex);
/* sem has been signaled */
if (sem->signal_fence.context) {
ret = -EINVAL;
goto unlock;
}
if (sem->signal_fence.context)
return -EINVAL;
pthread_mutex_lock(&ctx->sequence_mutex);
sem->signal_fence.context = ctx;
sem->signal_fence.ip_type = ip_type;
sem->signal_fence.ip_instance = ip_instance;
sem->signal_fence.ring = ring;
sem->signal_fence.fence = ctx->last_seq[ip_type][ip_instance][ring];
update_references(NULL, &sem->refcount);
ret = 0;
unlock:
pthread_mutex_unlock(&ctx->sequence_mutex);
return ret;
return 0;
}
drm_public int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,

View file

@ -95,26 +95,22 @@ static int amdgpu_get_auth(int fd, int *auth)
static void amdgpu_device_free_internal(amdgpu_device_handle dev)
{
/* Remove dev from dev_list, if it was added there. */
if (dev == dev_list) {
dev_list = dev->next;
} else {
for (amdgpu_device_handle node = dev_list; node; node = node->next) {
if (node->next == dev) {
node->next = dev->next;
break;
}
}
}
amdgpu_device_handle *node = &dev_list;
pthread_mutex_lock(&dev_mutex);
while (*node != dev && (*node)->next)
node = &(*node)->next;
*node = (*node)->next;
pthread_mutex_unlock(&dev_mutex);
close(dev->fd);
if ((dev->flink_fd >= 0) && (dev->fd != dev->flink_fd))
close(dev->flink_fd);
amdgpu_vamgr_deinit(&dev->va_mgr.vamgr_32);
amdgpu_vamgr_deinit(&dev->va_mgr.vamgr_low);
amdgpu_vamgr_deinit(&dev->va_mgr.vamgr_high_32);
amdgpu_vamgr_deinit(&dev->va_mgr.vamgr_high);
amdgpu_vamgr_deinit(&dev->vamgr_32);
amdgpu_vamgr_deinit(&dev->vamgr);
amdgpu_vamgr_deinit(&dev->vamgr_high_32);
amdgpu_vamgr_deinit(&dev->vamgr_high);
handle_table_fini(&dev->bo_handles);
handle_table_fini(&dev->bo_flink_names);
pthread_mutex_destroy(&dev->bo_table_mutex);
@ -144,23 +140,22 @@ static void amdgpu_device_reference(struct amdgpu_device **dst,
*dst = src;
}
static int _amdgpu_device_initialize(int fd,
uint32_t *major_version,
uint32_t *minor_version,
amdgpu_device_handle *device_handle,
bool deduplicate_device)
drm_public int amdgpu_device_initialize(int fd,
uint32_t *major_version,
uint32_t *minor_version,
amdgpu_device_handle *device_handle)
{
struct amdgpu_device *dev = NULL;
struct amdgpu_device *dev;
drmVersionPtr version;
int r;
int flag_auth = 0;
int flag_authexist=0;
uint32_t accel_working = 0;
uint64_t start, max;
*device_handle = NULL;
pthread_mutex_lock(&dev_mutex);
r = amdgpu_get_auth(fd, &flag_auth);
if (r) {
fprintf(stderr, "%s: amdgpu_get_auth (1) failed (%i)\n",
@ -169,10 +164,9 @@ static int _amdgpu_device_initialize(int fd,
return r;
}
if (deduplicate_device)
for (dev = dev_list; dev; dev = dev->next)
if (fd_compare(dev->fd, fd) == 0)
break;
for (dev = dev_list; dev; dev = dev->next)
if (fd_compare(dev->fd, fd) == 0)
break;
if (dev) {
r = amdgpu_get_auth(dev->fd, &flag_authexist);
@ -244,22 +238,35 @@ static int _amdgpu_device_initialize(int fd,
goto cleanup;
}
amdgpu_va_manager_init(&dev->va_mgr,
dev->dev_info.virtual_address_offset,
dev->dev_info.virtual_address_max,
dev->dev_info.high_va_offset,
dev->dev_info.high_va_max,
dev->dev_info.virtual_address_alignment);
start = dev->dev_info.virtual_address_offset;
max = MIN2(dev->dev_info.virtual_address_max, 0x100000000ULL);
amdgpu_vamgr_init(&dev->vamgr_32, start, max,
dev->dev_info.virtual_address_alignment);
start = max;
max = MAX2(dev->dev_info.virtual_address_max, 0x100000000ULL);
amdgpu_vamgr_init(&dev->vamgr, start, max,
dev->dev_info.virtual_address_alignment);
start = dev->dev_info.high_va_offset;
max = MIN2(dev->dev_info.high_va_max, (start & ~0xffffffffULL) +
0x100000000ULL);
amdgpu_vamgr_init(&dev->vamgr_high_32, start, max,
dev->dev_info.virtual_address_alignment);
start = max;
max = MAX2(dev->dev_info.high_va_max, (start & ~0xffffffffULL) +
0x100000000ULL);
amdgpu_vamgr_init(&dev->vamgr_high, start, max,
dev->dev_info.virtual_address_alignment);
amdgpu_parse_asic_ids(dev);
*major_version = dev->major_version;
*minor_version = dev->minor_version;
*device_handle = dev;
if (deduplicate_device) {
dev->next = dev_list;
dev_list = dev;
}
dev->next = dev_list;
dev_list = dev;
pthread_mutex_unlock(&dev_mutex);
return 0;
@ -272,41 +279,15 @@ cleanup:
return r;
}
drm_public int amdgpu_device_initialize(int fd,
uint32_t *major_version,
uint32_t *minor_version,
amdgpu_device_handle *device_handle)
{
return _amdgpu_device_initialize(fd, major_version, minor_version, device_handle, true);
}
drm_public int amdgpu_device_initialize2(int fd, bool deduplicate_device,
uint32_t *major_version,
uint32_t *minor_version,
amdgpu_device_handle *device_handle)
{
return _amdgpu_device_initialize(fd, major_version, minor_version, device_handle, deduplicate_device);
}
drm_public int amdgpu_device_deinitialize(amdgpu_device_handle dev)
{
pthread_mutex_lock(&dev_mutex);
amdgpu_device_reference(&dev, NULL);
pthread_mutex_unlock(&dev_mutex);
return 0;
}
drm_public int amdgpu_device_get_fd(amdgpu_device_handle device_handle)
{
return device_handle->fd;
}
drm_public const char *amdgpu_get_marketing_name(amdgpu_device_handle dev)
{
if (dev->marketing_name)
return dev->marketing_name;
else
return "AMD Radeon Graphics";
return dev->marketing_name;
}
drm_public int amdgpu_query_sw_info(amdgpu_device_handle dev,
@ -317,10 +298,10 @@ drm_public int amdgpu_query_sw_info(amdgpu_device_handle dev,
switch (info) {
case amdgpu_sw_info_address32_hi:
if (dev->va_mgr.vamgr_high_32.va_max)
*val32 = (dev->va_mgr.vamgr_high_32.va_max - 1) >> 32;
if (dev->vamgr_high_32.va_max)
*val32 = (dev->vamgr_high_32.va_max - 1) >> 32;
else
*val32 = (dev->va_mgr.vamgr_32.va_max - 1) >> 32;
*val32 = (dev->vamgr_32.va_max - 1) >> 32;
return 0;
}
return -EINVAL;

View file

@ -137,24 +137,6 @@ drm_public int amdgpu_query_firmware_version(amdgpu_device_handle dev,
return 0;
}
drm_public int amdgpu_query_uq_fw_area_info(amdgpu_device_handle dev,
unsigned type,
unsigned ip_instance,
struct drm_amdgpu_info_uq_fw_areas *info)
{
struct drm_amdgpu_info request;
memset(&request, 0, sizeof(request));
request.return_pointer = (uintptr_t)info;
request.return_size = sizeof(*info);
request.query = AMDGPU_INFO_UQ_FW_AREAS;
request.query_hw_ip.type = type;
request.query_hw_ip.ip_instance = ip_instance;
return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
sizeof(struct drm_amdgpu_info));
}
drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
{
int r, i;
@ -349,32 +331,3 @@ drm_public int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned senso
return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
sizeof(struct drm_amdgpu_info));
}
drm_public int amdgpu_query_video_caps_info(amdgpu_device_handle dev, unsigned cap_type,
unsigned size, void *value)
{
struct drm_amdgpu_info request;
memset(&request, 0, sizeof(request));
request.return_pointer = (uintptr_t)value;
request.return_size = size;
request.query = AMDGPU_INFO_VIDEO_CAPS;
request.sensor_info.type = cap_type;
return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
sizeof(struct drm_amdgpu_info));
}
drm_public int amdgpu_query_gpuvm_fault_info(amdgpu_device_handle dev,
unsigned size, void *value)
{
struct drm_amdgpu_info request;
memset(&request, 0, sizeof(request));
request.return_pointer = (uintptr_t)value;
request.return_size = size;
request.query = AMDGPU_INFO_GPUVM_FAULT;
return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
sizeof(struct drm_amdgpu_info));
}

View file

@ -57,23 +57,13 @@ struct amdgpu_bo_va_mgr {
};
struct amdgpu_va {
amdgpu_device_handle dev;
uint64_t address;
uint64_t size;
enum amdgpu_gpu_va_range range;
struct amdgpu_bo_va_mgr *vamgr;
};
struct amdgpu_va_manager {
/** The VA manager for the lower virtual address space */
struct amdgpu_bo_va_mgr vamgr_low;
/** The VA manager for the 32bit address space */
struct amdgpu_bo_va_mgr vamgr_32;
/** The VA manager for the high virtual address space */
struct amdgpu_bo_va_mgr vamgr_high;
/** The VA manager for the 32bit high address space */
struct amdgpu_bo_va_mgr vamgr_high_32;
};
struct amdgpu_device {
atomic_t refcount;
struct amdgpu_device *next;
@ -91,8 +81,14 @@ struct amdgpu_device {
pthread_mutex_t bo_table_mutex;
struct drm_amdgpu_info_device dev_info;
struct amdgpu_gpu_info info;
struct amdgpu_va_manager va_mgr;
/** The VA manager for the lower virtual address space */
struct amdgpu_bo_va_mgr vamgr;
/** The VA manager for the 32bit address space */
struct amdgpu_bo_va_mgr vamgr_32;
/** The VA manager for the high virtual address space */
struct amdgpu_bo_va_mgr vamgr_high;
/** The VA manager for the 32bit high address space */
struct amdgpu_bo_va_mgr vamgr_high_32;
};
struct amdgpu_bo {

View file

@ -1,123 +0,0 @@
/*
* Copyright 2024 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <string.h>
#include <errno.h>
#include "xf86drm.h"
#include "amdgpu_drm.h"
#include "amdgpu_internal.h"
drm_public int
amdgpu_create_userqueue(amdgpu_device_handle dev,
uint32_t ip_type,
uint32_t doorbell_handle,
uint32_t doorbell_offset,
uint64_t queue_va,
uint64_t queue_size,
uint64_t wptr_va,
uint64_t rptr_va,
void *mqd_in,
uint32_t flags,
uint32_t *queue_id)
{
int ret;
union drm_amdgpu_userq userq;
uint64_t mqd_size;
if (!dev)
return -EINVAL;
switch (ip_type) {
case AMDGPU_HW_IP_GFX:
mqd_size = sizeof(struct drm_amdgpu_userq_mqd_gfx11);
break;
case AMDGPU_HW_IP_DMA:
mqd_size = sizeof(struct drm_amdgpu_userq_mqd_sdma_gfx11);
break;
case AMDGPU_HW_IP_COMPUTE:
mqd_size = sizeof(struct drm_amdgpu_userq_mqd_compute_gfx11);
break;
default:
return -EINVAL;
}
memset(&userq, 0, sizeof(userq));
userq.in.op = AMDGPU_USERQ_OP_CREATE;
userq.in.ip_type = ip_type;
userq.in.doorbell_handle = doorbell_handle;
userq.in.doorbell_offset = doorbell_offset;
userq.in.queue_va = queue_va;
userq.in.queue_size = queue_size;
userq.in.wptr_va = wptr_va;
userq.in.rptr_va = rptr_va;
userq.in.mqd = (uint64_t)mqd_in;
userq.in.mqd_size = mqd_size;
userq.in.flags = flags;
ret = drmCommandWriteRead(dev->fd, DRM_AMDGPU_USERQ,
&userq, sizeof(userq));
*queue_id = userq.out.queue_id;
return ret;
}
drm_public int
amdgpu_free_userqueue(amdgpu_device_handle dev, uint32_t queue_id)
{
union drm_amdgpu_userq userq;
memset(&userq, 0, sizeof(userq));
userq.in.op = AMDGPU_USERQ_OP_FREE;
userq.in.queue_id = queue_id;
return drmCommandWriteRead(dev->fd, DRM_AMDGPU_USERQ,
&userq, sizeof(userq));
}
drm_public int
amdgpu_userq_signal(amdgpu_device_handle dev,
struct drm_amdgpu_userq_signal *signal_data)
{
int r;
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_USERQ_SIGNAL,
signal_data, sizeof(struct drm_amdgpu_userq_signal));
return r;
}
drm_public int
amdgpu_userq_wait(amdgpu_device_handle dev,
struct drm_amdgpu_userq_wait *wait_data)
{
int r;
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_USERQ_WAIT,
wait_data, sizeof(struct drm_amdgpu_userq_wait));
return r;
}

View file

@ -69,99 +69,65 @@ drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
pthread_mutex_destroy(&mgr->bo_va_mutex);
}
static drm_private int
amdgpu_vamgr_subtract_hole(struct amdgpu_bo_va_hole *hole, uint64_t start_va,
uint64_t end_va)
{
if (start_va > hole->offset && end_va - hole->offset < hole->size) {
struct amdgpu_bo_va_hole *n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
if (!n)
return -ENOMEM;
n->size = start_va - hole->offset;
n->offset = hole->offset;
list_add(&n->list, &hole->list);
hole->size -= (end_va - hole->offset);
hole->offset = end_va;
} else if (start_va > hole->offset) {
hole->size = start_va - hole->offset;
} else if (end_va - hole->offset < hole->size) {
hole->size -= (end_va - hole->offset);
hole->offset = end_va;
} else {
list_del(&hole->list);
free(hole);
}
return 0;
}
static drm_private int
static drm_private uint64_t
amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
uint64_t alignment, uint64_t base_required,
bool search_from_top, uint64_t *va_out)
uint64_t alignment, uint64_t base_required)
{
struct amdgpu_bo_va_hole *hole, *n;
uint64_t offset = 0;
int ret;
uint64_t offset = 0, waste = 0;
alignment = MAX2(alignment, mgr->va_alignment);
size = ALIGN(size, mgr->va_alignment);
if (base_required % alignment)
return -EINVAL;
return AMDGPU_INVALID_VA_ADDRESS;
pthread_mutex_lock(&mgr->bo_va_mutex);
if (!search_from_top) {
LIST_FOR_EACH_ENTRY_SAFE_REV(hole, n, &mgr->va_holes, list) {
if (base_required) {
if (hole->offset > base_required ||
(hole->offset + hole->size) < (base_required + size))
continue;
offset = base_required;
} else {
uint64_t waste = hole->offset % alignment;
waste = waste ? alignment - waste : 0;
offset = hole->offset + waste;
if (offset >= (hole->offset + hole->size) ||
size > (hole->offset + hole->size) - offset) {
continue;
}
LIST_FOR_EACH_ENTRY_SAFE_REV(hole, n, &mgr->va_holes, list) {
if (base_required) {
if (hole->offset > base_required ||
(hole->offset + hole->size) < (base_required + size))
continue;
waste = base_required - hole->offset;
offset = base_required;
} else {
offset = hole->offset;
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
offset += waste;
if (offset >= (hole->offset + hole->size)) {
continue;
}
ret = amdgpu_vamgr_subtract_hole(hole, offset, offset + size);
pthread_mutex_unlock(&mgr->bo_va_mutex);
*va_out = offset;
return ret;
}
} else {
LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
if (base_required) {
if (hole->offset > base_required ||
(hole->offset + hole->size) < (base_required + size))
continue;
offset = base_required;
} else {
if (size > hole->size)
continue;
offset = hole->offset + hole->size - size;
offset -= offset % alignment;
if (offset < hole->offset) {
continue;
}
}
ret = amdgpu_vamgr_subtract_hole(hole, offset, offset + size);
if (!waste && hole->size == size) {
offset = hole->offset;
list_del(&hole->list);
free(hole);
pthread_mutex_unlock(&mgr->bo_va_mutex);
*va_out = offset;
return ret;
return offset;
}
if ((hole->size - waste) > size) {
if (waste) {
n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
n->size = waste;
n->offset = hole->offset;
list_add(&n->list, &hole->list);
}
hole->size -= (size + waste);
hole->offset += size + waste;
pthread_mutex_unlock(&mgr->bo_va_mutex);
return offset;
}
if ((hole->size - waste) == size) {
hole->size = waste;
pthread_mutex_unlock(&mgr->bo_va_mutex);
return offset;
}
}
pthread_mutex_unlock(&mgr->bo_va_mutex);
return -ENOMEM;
return AMDGPU_INVALID_VA_ADDRESS;
}
static drm_private void
@ -228,75 +194,60 @@ drm_public int amdgpu_va_range_alloc(amdgpu_device_handle dev,
uint64_t *va_base_allocated,
amdgpu_va_handle *va_range_handle,
uint64_t flags)
{
return amdgpu_va_range_alloc2(&dev->va_mgr, va_range_type, size,
va_base_alignment, va_base_required,
va_base_allocated, va_range_handle,
flags);
}
drm_public int amdgpu_va_range_alloc2(amdgpu_va_manager_handle va_mgr,
enum amdgpu_gpu_va_range va_range_type,
uint64_t size,
uint64_t va_base_alignment,
uint64_t va_base_required,
uint64_t *va_base_allocated,
amdgpu_va_handle *va_range_handle,
uint64_t flags)
{
struct amdgpu_bo_va_mgr *vamgr;
bool search_from_top = !!(flags & AMDGPU_VA_RANGE_REPLAYABLE);
int ret;
/* Clear the flag when the high VA manager is not initialized */
if (flags & AMDGPU_VA_RANGE_HIGH && !va_mgr->vamgr_high_32.va_max)
if (flags & AMDGPU_VA_RANGE_HIGH && !dev->vamgr_high_32.va_max)
flags &= ~AMDGPU_VA_RANGE_HIGH;
if (flags & AMDGPU_VA_RANGE_HIGH) {
if (flags & AMDGPU_VA_RANGE_32_BIT)
vamgr = &va_mgr->vamgr_high_32;
vamgr = &dev->vamgr_high_32;
else
vamgr = &va_mgr->vamgr_high;
vamgr = &dev->vamgr_high;
} else {
if (flags & AMDGPU_VA_RANGE_32_BIT)
vamgr = &va_mgr->vamgr_32;
vamgr = &dev->vamgr_32;
else
vamgr = &va_mgr->vamgr_low;
vamgr = &dev->vamgr;
}
va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
size = ALIGN(size, vamgr->va_alignment);
ret = amdgpu_vamgr_find_va(vamgr, size,
va_base_alignment, va_base_required,
search_from_top, va_base_allocated);
*va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
va_base_alignment, va_base_required);
if (!(flags & AMDGPU_VA_RANGE_32_BIT) && ret) {
if (!(flags & AMDGPU_VA_RANGE_32_BIT) &&
(*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) {
/* fallback to 32bit address */
if (flags & AMDGPU_VA_RANGE_HIGH)
vamgr = &va_mgr->vamgr_high_32;
vamgr = &dev->vamgr_high_32;
else
vamgr = &va_mgr->vamgr_32;
ret = amdgpu_vamgr_find_va(vamgr, size,
va_base_alignment, va_base_required,
search_from_top, va_base_allocated);
vamgr = &dev->vamgr_32;
*va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
va_base_alignment, va_base_required);
}
if (!ret) {
if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
struct amdgpu_va* va;
va = calloc(1, sizeof(struct amdgpu_va));
if(!va){
amdgpu_vamgr_free_va(vamgr, *va_base_allocated, size);
return -ENOMEM;
}
va->dev = dev;
va->address = *va_base_allocated;
va->size = size;
va->range = va_range_type;
va->vamgr = vamgr;
*va_range_handle = va;
} else {
return -EINVAL;
}
return ret;
return 0;
}
drm_public int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
@ -310,50 +261,3 @@ drm_public int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
free(va_range_handle);
return 0;
}
drm_public uint64_t amdgpu_va_get_start_addr(amdgpu_va_handle va_handle)
{
return va_handle->address;
}
drm_public amdgpu_va_manager_handle amdgpu_va_manager_alloc(void)
{
amdgpu_va_manager_handle r = calloc(1, sizeof(struct amdgpu_va_manager));
return r;
}
drm_public void amdgpu_va_manager_init(struct amdgpu_va_manager *va_mgr,
uint64_t low_va_offset, uint64_t low_va_max,
uint64_t high_va_offset, uint64_t high_va_max,
uint32_t virtual_address_alignment)
{
uint64_t start, max;
start = low_va_offset;
max = MIN2(low_va_max, 0x100000000ULL);
amdgpu_vamgr_init(&va_mgr->vamgr_32, start, max,
virtual_address_alignment);
start = max;
max = MAX2(low_va_max, 0x100000000ULL);
amdgpu_vamgr_init(&va_mgr->vamgr_low, start, max,
virtual_address_alignment);
start = high_va_offset;
max = MIN2(high_va_max, (start & ~0xffffffffULL) + 0x100000000ULL);
amdgpu_vamgr_init(&va_mgr->vamgr_high_32, start, max,
virtual_address_alignment);
start = max;
max = MAX2(high_va_max, (start & ~0xffffffffULL) + 0x100000000ULL);
amdgpu_vamgr_init(&va_mgr->vamgr_high, start, max,
virtual_address_alignment);
}
drm_public void amdgpu_va_manager_deinit(struct amdgpu_va_manager *va_mgr)
{
amdgpu_vamgr_deinit(&va_mgr->vamgr_32);
amdgpu_vamgr_deinit(&va_mgr->vamgr_low);
amdgpu_vamgr_deinit(&va_mgr->vamgr_high_32);
amdgpu_vamgr_deinit(&va_mgr->vamgr_high);
}

View file

@ -21,13 +21,12 @@
datadir_amdgpu = join_paths(get_option('prefix'), get_option('datadir'), 'libdrm')
libdrm_amdgpu = library(
libdrm_amdgpu = shared_library(
'drm_amdgpu',
[
files(
'amdgpu_asic_id.c', 'amdgpu_bo.c', 'amdgpu_cs.c', 'amdgpu_device.c',
'amdgpu_gpu_info.c', 'amdgpu_vamgr.c', 'amdgpu_vm.c', 'handle_table.c',
'amdgpu_userq.c',
),
config_file,
],
@ -37,17 +36,19 @@ libdrm_amdgpu = library(
],
include_directories : [inc_root, inc_drm],
link_with : libdrm,
dependencies : [dep_threads, dep_atomic_ops, dep_rt],
version : '1.@0@.0'.format(patch_ver),
dependencies : [dep_pthread_stubs, dep_atomic_ops],
version : '1.0.0',
install : true,
)
install_headers('amdgpu.h', subdir : 'libdrm')
pkg.generate(
libdrm_amdgpu,
name : 'libdrm_amdgpu',
libraries : libdrm_amdgpu,
subdirs : ['.', 'libdrm'],
version : meson.project_version(),
requires_private : 'libdrm',
description : 'Userspace interface to kernel DRM services for amdgpu',
)
@ -56,14 +57,12 @@ ext_libdrm_amdgpu = declare_dependency(
include_directories : [inc_drm, include_directories('.')],
)
meson.override_dependency('libdrm_amdgpu', ext_libdrm_amdgpu)
test(
'amdgpu-symbols-check',
symbols_check,
args : [
'--lib', libdrm_amdgpu,
'--symbols-file', files('amdgpu-symbols.txt'),
'--nm', prog_nm.full_path(),
'--nm', prog_nm.path(),
],
)

View file

@ -22,7 +22,6 @@ drmAuthMagic
drmAvailable
drmCheckModesettingSupported
drmClose
drmCloseBufferHandle
drmCloseOnce
drmCommandNone
drmCommandRead
@ -58,7 +57,6 @@ drmGetContextPrivateMapping
drmGetContextTag
drmGetDevice
drmGetDevice2
drmGetDeviceFromDevId
drmGetDeviceNameFromFd
drmGetDeviceNameFromFd2
drmGetDevices
@ -70,7 +68,6 @@ drmGetLibVersion
drmGetLock
drmGetMagic
drmGetMap
drmGetNodeTypeFromDevId
drmGetNodeTypeFromFd
drmGetPrimaryDeviceNameFromFd
drmGetRenderDeviceNameFromFd
@ -86,7 +83,6 @@ drmHashInsert
drmHashLookup
drmHashNext
drmIoctl
drmIsKMS
drmIsMaster
drmMalloc
drmMap
@ -104,19 +100,14 @@ drmModeAtomicGetCursor
drmModeAtomicMerge
drmModeAtomicSetCursor
drmModeAttachMode
drmModeCloseFB
drmModeConnectorGetPossibleCrtcs
drmModeConnectorSetProperty
drmModeCreateDumbBuffer
drmModeCreateLease
drmModeCreatePropertyBlob
drmModeCrtcGetGamma
drmModeCrtcSetGamma
drmModeDestroyDumbBuffer
drmModeDestroyPropertyBlob
drmModeDetachMode
drmModeDirtyFB
drmModeFormatModifierBlobIterNext
drmModeFreeConnector
drmModeFreeCrtc
drmModeFreeEncoder
@ -131,7 +122,6 @@ drmModeFreePropertyBlob
drmModeFreeResources
drmModeGetConnector
drmModeGetConnectorCurrent
drmModeGetConnectorTypeName
drmModeGetCrtc
drmModeGetEncoder
drmModeGetFB
@ -143,7 +133,6 @@ drmModeGetProperty
drmModeGetPropertyBlob
drmModeGetResources
drmModeListLessees
drmModeMapDumbBuffer
drmModeMoveCursor
drmModeObjectGetProperties
drmModeObjectSetProperty
@ -189,7 +178,6 @@ drmSLNext
drmSwitchToContext
drmSyncobjCreate
drmSyncobjDestroy
drmSyncobjEventfd
drmSyncobjExportSyncFile
drmSyncobjFDToHandle
drmSyncobjHandleToFD
@ -207,6 +195,3 @@ drmUnmap
drmUnmapBufs
drmUpdateDrawableInfo
drmWaitVBlank
drmGetFormatModifierName
drmGetFormatModifierVendor
drmGetFormatName

View file

@ -1,6 +0,0 @@
prebuilt_etc {
name: "amdgpu.ids",
proprietary: true,
sub_dir: "hwdata",
src: "amdgpu.ids",
}

10
data/Android.mk Normal file
View file

@ -0,0 +1,10 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE := amdgpu.ids
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_CLASS := ETC
LOCAL_PROPRIETARY_MODULE := true
LOCAL_MODULE_RELATIVE_PATH := hwdata
LOCAL_SRC_FILES := $(LOCAL_MODULE)
include $(BUILD_PREBUILT)

View file

@ -4,484 +4,256 @@
# device_id, revision_id, product_name <-- single tab after comma
1.0.0
1114, C2, AMD Radeon 860M Graphics
1114, C3, AMD Radeon 840M Graphics
1114, D2, AMD Radeon 860M Graphics
1114, D3, AMD Radeon 840M Graphics
1309, 00, AMD Radeon R7 Graphics
130A, 00, AMD Radeon R6 Graphics
130B, 00, AMD Radeon R4 Graphics
130C, 00, AMD Radeon R7 Graphics
130D, 00, AMD Radeon R6 Graphics
130E, 00, AMD Radeon R5 Graphics
130F, 00, AMD Radeon R7 Graphics
130F, D4, AMD Radeon R7 Graphics
130F, D5, AMD Radeon R7 Graphics
130F, D6, AMD Radeon R7 Graphics
130F, D7, AMD Radeon R7 Graphics
1313, 00, AMD Radeon R7 Graphics
1313, D4, AMD Radeon R7 Graphics
1313, D5, AMD Radeon R7 Graphics
1313, D6, AMD Radeon R7 Graphics
1315, 00, AMD Radeon R5 Graphics
1315, D4, AMD Radeon R5 Graphics
1315, D5, AMD Radeon R5 Graphics
1315, D6, AMD Radeon R5 Graphics
1315, D7, AMD Radeon R5 Graphics
1316, 00, AMD Radeon R5 Graphics
1318, 00, AMD Radeon R5 Graphics
131B, 00, AMD Radeon R4 Graphics
131C, 00, AMD Radeon R7 Graphics
131D, 00, AMD Radeon R6 Graphics
1435, AE, AMD Custom GPU 0932
1506, C1, AMD Radeon 610M
1506, C2, AMD Radeon 610M
1506, C3, AMD Radeon 610M
1506, C4, AMD Radeon 610M
150E, C1, AMD Radeon 890M Graphics
150E, C4, AMD Radeon 880M Graphics
150E, C5, AMD Radeon 890M Graphics
150E, C6, AMD Radeon 890M Graphics
150E, D1, AMD Radeon 890M Graphics
150E, D2, AMD Radeon 880M Graphics
150E, D3, AMD Radeon 890M Graphics
1586, C1, Radeon 8060S Graphics
1586, C2, Radeon 8050S Graphics
1586, C4, Radeon 8050S Graphics
1586, D1, Radeon 8060S Graphics
1586, D2, Radeon 8050S Graphics
1586, D4, Radeon 8050S Graphics
1586, D5, Radeon 8040S Graphics
15BF, 00, AMD Radeon 780M Graphics
15BF, 01, AMD Radeon 760M Graphics
15BF, 02, AMD Radeon 780M Graphics
15BF, 03, AMD Radeon 760M Graphics
15BF, C1, AMD Radeon 780M Graphics
15BF, C2, AMD Radeon 780M Graphics
15BF, C3, AMD Radeon 760M Graphics
15BF, C4, AMD Radeon 780M Graphics
15BF, C5, AMD Radeon 740M Graphics
15BF, C6, AMD Radeon 780M Graphics
15BF, C7, AMD Radeon 780M Graphics
15BF, C8, AMD Radeon 760M Graphics
15BF, C9, AMD Radeon 780M Graphics
15BF, CA, AMD Radeon 740M Graphics
15BF, CB, AMD Radeon 760M Graphics
15BF, CC, AMD Radeon 740M Graphics
15BF, CD, AMD Radeon 760M Graphics
15BF, CF, AMD Radeon 780M Graphics
15BF, D0, AMD Radeon 780M Graphics
15BF, D1, AMD Radeon 780M Graphics
15BF, D2, AMD Radeon 780M Graphics
15BF, D3, AMD Radeon 780M Graphics
15BF, D4, AMD Radeon 780M Graphics
15BF, D5, AMD Radeon 760M Graphics
15BF, D6, AMD Radeon 760M Graphics
15BF, D7, AMD Radeon 780M Graphics
15BF, D8, AMD Radeon 740M Graphics
15BF, D9, AMD Radeon 780M Graphics
15BF, DA, AMD Radeon 780M Graphics
15BF, DB, AMD Radeon 760M Graphics
15BF, DC, AMD Radeon 760M Graphics
15BF, DD, AMD Radeon 780M Graphics
15BF, DE, AMD Radeon 740M Graphics
15BF, DF, AMD Radeon 760M Graphics
15BF, F0, AMD Radeon 760M Graphics
15C8, C1, AMD Radeon 740M Graphics
15C8, C2, AMD Radeon 740M Graphics
15C8, C3, AMD Radeon 740M Graphics
15C8, C4, AMD Radeon 740M Graphics
15C8, D1, AMD Radeon 740M Graphics
15C8, D2, AMD Radeon 740M Graphics
15C8, D3, AMD Radeon 740M Graphics
15C8, D4, AMD Radeon 740M Graphics
15D8, 00, AMD Radeon RX Vega 8 Graphics WS
15D8, 91, AMD Radeon Vega 3 Graphics
15D8, 91, AMD Ryzen Embedded R1606G with Radeon Vega Gfx
15D8, 92, AMD Radeon Vega 3 Graphics
15D8, 92, AMD Ryzen Embedded R1505G with Radeon Vega Gfx
15D8, 93, AMD Radeon Vega 1 Graphics
15D8, A1, AMD Radeon Vega 10 Graphics
15D8, A2, AMD Radeon Vega 8 Graphics
15D8, A3, AMD Radeon Vega 6 Graphics
15D8, A4, AMD Radeon Vega 3 Graphics
15D8, B1, AMD Radeon Vega 10 Graphics
15D8, B2, AMD Radeon Vega 8 Graphics
15D8, B3, AMD Radeon Vega 6 Graphics
15D8, B4, AMD Radeon Vega 3 Graphics
15D8, C1, AMD Radeon Vega 10 Graphics
15D8, C2, AMD Radeon Vega 8 Graphics
15D8, C3, AMD Radeon Vega 6 Graphics
15D8, C4, AMD Radeon Vega 3 Graphics
15D8, C5, AMD Radeon Vega 3 Graphics
15D8, C8, AMD Radeon Vega 11 Graphics
15D8, C9, AMD Radeon Vega 8 Graphics
15D8, CA, AMD Radeon Vega 11 Graphics
15D8, CB, AMD Radeon Vega 8 Graphics
15D8, CC, AMD Radeon Vega 3 Graphics
15D8, CE, AMD Radeon Vega 3 Graphics
15D8, CF, AMD Ryzen Embedded R1305G with Radeon Vega Gfx
15D8, D1, AMD Radeon Vega 10 Graphics
15D8, D2, AMD Radeon Vega 8 Graphics
15D8, D3, AMD Radeon Vega 6 Graphics
15D8, D4, AMD Radeon Vega 3 Graphics
15D8, D8, AMD Radeon Vega 11 Graphics
15D8, D9, AMD Radeon Vega 8 Graphics
15D8, DA, AMD Radeon Vega 11 Graphics
15D8, DB, AMD Radeon Vega 3 Graphics
15D8, DB, AMD Radeon Vega 8 Graphics
15D8, DC, AMD Radeon Vega 3 Graphics
15D8, DD, AMD Radeon Vega 3 Graphics
15D8, DE, AMD Radeon Vega 3 Graphics
15D8, DF, AMD Radeon Vega 3 Graphics
15D8, E3, AMD Radeon Vega 3 Graphics
15D8, E4, AMD Ryzen Embedded R1102G with Radeon Vega Gfx
15DD, C3, AMD Radeon(TM) Vega 3 Graphics
15DD, CB, AMD Radeon(TM) Vega 3 Graphics
15DD, CE, AMD Radeon(TM) Vega 3 Graphics
15DD, D8, AMD Radeon(TM) Vega 3 Graphics
15DD, CC, AMD Radeon(TM) Vega 6 Graphics
15DD, D9, AMD Radeon(TM) Vega 6 Graphics
15DD, C2, AMD Radeon(TM) Vega 8 Graphics
15DD, C4, AMD Radeon(TM) Vega 8 Graphics
15DD, C8, AMD Radeon(TM) Vega 8 Graphics
15DD, CA, AMD Radeon(TM) Vega 8 Graphics
15DD, D1, AMD Radeon(TM) Vega 8 Graphics
15DD, D5, AMD Radeon(TM) Vega 8 Graphics
15DD, D7, AMD Radeon(TM) Vega 8 Graphics
15DD, C3, AMD Radeon(TM) Vega 10 Graphics
15DD, D0, AMD Radeon(TM) Vega 10 Graphics
15DD, C1, AMD Radeon(TM) Vega 11 Graphics
15DD, C6, AMD Radeon(TM) Vega 11 Graphics
15DD, C9, AMD Radeon(TM) Vega 11 Graphics
15DD, D3, AMD Radeon(TM) Vega 11 Graphics
15DD, D6, AMD Radeon(TM) Vega 11 Graphics
15DD, 81, AMD Ryzen Embedded V1807B with Radeon Vega Gfx
15DD, 82, AMD Ryzen Embedded V1756B with Radeon Vega Gfx
15DD, 83, AMD Ryzen Embedded V1605B with Radeon Vega Gfx
15DD, 84, AMD Radeon Vega 6 Graphics
15DD, 85, AMD Ryzen Embedded V1202B with Radeon Vega Gfx
15DD, 86, AMD Radeon Vega 11 Graphics
15DD, 88, AMD Radeon Vega 8 Graphics
15DD, C1, AMD Radeon Vega 11 Graphics
15DD, C2, AMD Radeon Vega 8 Graphics
15DD, C3, AMD Radeon Vega 3 / 10 Graphics
15DD, C4, AMD Radeon Vega 8 Graphics
15DD, C5, AMD Radeon Vega 3 Graphics
15DD, C6, AMD Radeon Vega 11 Graphics
15DD, C8, AMD Radeon Vega 8 Graphics
15DD, C9, AMD Radeon Vega 11 Graphics
15DD, CA, AMD Radeon Vega 8 Graphics
15DD, CB, AMD Radeon Vega 3 Graphics
15DD, CC, AMD Radeon Vega 6 Graphics
15DD, CE, AMD Radeon Vega 3 Graphics
15DD, CF, AMD Radeon Vega 3 Graphics
15DD, D0, AMD Radeon Vega 10 Graphics
15DD, D1, AMD Radeon Vega 8 Graphics
15DD, D3, AMD Radeon Vega 11 Graphics
15DD, D5, AMD Radeon Vega 8 Graphics
15DD, D6, AMD Radeon Vega 11 Graphics
15DD, D7, AMD Radeon Vega 8 Graphics
15DD, D8, AMD Radeon Vega 3 Graphics
15DD, D9, AMD Radeon Vega 6 Graphics
15DD, E1, AMD Radeon Vega 3 Graphics
15DD, E2, AMD Radeon Vega 3 Graphics
163F, AE, AMD Custom GPU 0405
163F, E1, AMD Custom GPU 0405
164E, D8, AMD Radeon 610M
164E, D9, AMD Radeon 610M
164E, DA, AMD Radeon 610M
164E, DB, AMD Radeon 610M
164E, DC, AMD Radeon 610M
1681, 06, AMD Radeon 680M
1681, 07, AMD Radeon 660M
1681, 0A, AMD Radeon 680M
1681, 0B, AMD Radeon 660M
1681, C7, AMD Radeon 680M
1681, C8, AMD Radeon 680M
1681, C9, AMD Radeon 660M
1900, 01, AMD Radeon 780M Graphics
1900, 02, AMD Radeon 760M Graphics
1900, 03, AMD Radeon 780M Graphics
1900, 04, AMD Radeon 760M Graphics
1900, 05, AMD Radeon 780M Graphics
1900, 06, AMD Radeon 780M Graphics
1900, 07, AMD Radeon 760M Graphics
1900, B0, AMD Radeon 780M Graphics
1900, B1, AMD Radeon 780M Graphics
1900, B2, AMD Radeon 780M Graphics
1900, B3, AMD Radeon 780M Graphics
1900, B4, AMD Radeon 780M Graphics
1900, B5, AMD Radeon 780M Graphics
1900, B6, AMD Radeon 780M Graphics
1900, B7, AMD Radeon 760M Graphics
1900, B8, AMD Radeon 760M Graphics
1900, B9, AMD Radeon 780M Graphics
1900, BA, AMD Radeon 780M Graphics
1900, BB, AMD Radeon 780M Graphics
1900, C0, AMD Radeon 780M Graphics
1900, C1, AMD Radeon 760M Graphics
1900, C2, AMD Radeon 780M Graphics
1900, C3, AMD Radeon 760M Graphics
1900, C4, AMD Radeon 780M Graphics
1900, C5, AMD Radeon 780M Graphics
1900, C6, AMD Radeon 760M Graphics
1900, C7, AMD Radeon 780M Graphics
1900, C8, AMD Radeon 760M Graphics
1900, C9, AMD Radeon 780M Graphics
1900, CA, AMD Radeon 760M Graphics
1900, CB, AMD Radeon 780M Graphics
1900, CC, AMD Radeon 780M Graphics
1900, CD, AMD Radeon 760M Graphics
1900, CE, AMD Radeon 780M Graphics
1900, CF, AMD Radeon 760M Graphics
1900, D0, AMD Radeon 780M Graphics
1900, D1, AMD Radeon 760M Graphics
1900, D2, AMD Radeon 780M Graphics
1900, D3, AMD Radeon 760M Graphics
1900, D4, AMD Radeon 780M Graphics
1900, D5, AMD Radeon 780M Graphics
1900, D6, AMD Radeon 760M Graphics
1900, D7, AMD Radeon 780M Graphics
1900, D8, AMD Radeon 760M Graphics
1900, D9, AMD Radeon 780M Graphics
1900, DA, AMD Radeon 760M Graphics
1900, DB, AMD Radeon 780M Graphics
1900, DC, AMD Radeon 780M Graphics
1900, DD, AMD Radeon 760M Graphics
1900, DE, AMD Radeon 780M Graphics
1900, DF, AMD Radeon 760M Graphics
1900, F0, AMD Radeon 780M Graphics
1900, F1, AMD Radeon 780M Graphics
1900, F2, AMD Radeon 780M Graphics
1901, C1, AMD Radeon 740M Graphics
1901, C2, AMD Radeon 740M Graphics
1901, C3, AMD Radeon 740M Graphics
1901, C6, AMD Radeon 740M Graphics
1901, C7, AMD Radeon 740M Graphics
1901, C8, AMD Radeon 740M Graphics
1901, C9, AMD Radeon 740M Graphics
1901, CA, AMD Radeon 740M Graphics
1901, D1, AMD Radeon 740M Graphics
1901, D2, AMD Radeon 740M Graphics
1901, D3, AMD Radeon 740M Graphics
1901, D4, AMD Radeon 740M Graphics
1901, D5, AMD Radeon 740M Graphics
1901, D6, AMD Radeon 740M Graphics
1901, D7, AMD Radeon 740M Graphics
1901, D8, AMD Radeon 740M Graphics
6600, 00, AMD Radeon HD 8600 / 8700M
6600, 81, AMD Radeon R7 M370
6601, 00, AMD Radeon HD 8500M / 8700M
6604, 00, AMD Radeon R7 M265 Series
6604, 81, AMD Radeon R7 M350
6605, 00, AMD Radeon R7 M260 Series
6605, 81, AMD Radeon R7 M340
6606, 00, AMD Radeon HD 8790M
6607, 00, AMD Radeon R5 M240
6608, 00, AMD FirePro W2100
6610, 00, AMD Radeon R7 200 Series
6610, 81, AMD Radeon R7 350
6610, 83, AMD Radeon R5 340
6610, 87, AMD Radeon R7 200 Series
6611, 00, AMD Radeon R7 200 Series
6611, 87, AMD Radeon R7 200 Series
6613, 00, AMD Radeon R7 200 Series
6617, 00, AMD Radeon R7 240 Series
6617, 87, AMD Radeon R7 200 Series
15D8, 93, AMD Radeon(TM) Vega 1 Graphics
15D8, C4, AMD Radeon(TM) Vega 3 Graphics
15D8, C5, AMD Radeon(TM) Vega 3 Graphics
15D8, CC, AMD Radeon(TM) Vega 3 Graphics
15D8, CE, AMD Radeon(TM) Vega 3 Graphics
15D8, CF, AMD Radeon(TM) Vega 3 Graphics
15D8, D4, AMD Radeon(TM) Vega 3 Graphics
15D8, DC, AMD Radeon(TM) Vega 3 Graphics
15D8, DD, AMD Radeon(TM) Vega 3 Graphics
15D8, DE, AMD Radeon(TM) Vega 3 Graphics
15D8, DF, AMD Radeon(TM) Vega 3 Graphics
15D8, E3, AMD Radeon(TM) Vega 3 Graphics
15D8, E4, AMD Radeon(TM) Vega 3 Graphics
15D8, A3, AMD Radeon(TM) Vega 6 Graphics
15D8, B3, AMD Radeon(TM) Vega 6 Graphics
15D8, C3, AMD Radeon(TM) Vega 6 Graphics
15D8, D3, AMD Radeon(TM) Vega 6 Graphics
15D8, A2, AMD Radeon(TM) Vega 8 Graphics
15D8, B2, AMD Radeon(TM) Vega 8 Graphics
15D8, C2, AMD Radeon(TM) Vega 8 Graphics
15D8, C9, AMD Radeon(TM) Vega 8 Graphics
15D8, CB, AMD Radeon(TM) Vega 8 Graphics
15D8, D2, AMD Radeon(TM) Vega 8 Graphics
15D8, D9, AMD Radeon(TM) Vega 8 Graphics
15D8, DB, AMD Radeon(TM) Vega 8 Graphics
15D8, A1, AMD Radeon(TM) Vega 10 Graphics
15D8, B1, AMD Radeon(TM) Vega 10 Graphics
15D8, C1, AMD Radeon(TM) Vega 10 Graphics
15D8, D1, AMD Radeon(TM) Vega 10 Graphics
15D8, C8, AMD Radeon(TM) Vega 11 Graphics
15D8, CA, AMD Radeon(TM) Vega 11 Graphics
15D8, D8, AMD Radeon(TM) Vega 11 Graphics
15D8, DA, AMD Radeon(TM) Vega 11 Graphics
15D8, 91, AMD Ryzen Embedded R1606G with Radeon Vega Gfx
15D8, 92, AMD Ryzen Embedded R1505G with Radeon Vega Gfx
15D8, CF, AMD Ryzen Embedded R1305G with Radeon Vega Gfx
15D8, E4, AMD Ryzen Embedded R1102G with Radeon Vega Gfx
6600, 0, AMD Radeon HD 8600/8700M
6600, 81, AMD Radeon (TM) R7 M370
6601, 0, AMD Radeon (TM) HD 8500M/8700M
6604, 0, AMD Radeon R7 M265 Series
6604, 81, AMD Radeon (TM) R7 M350
6605, 0, AMD Radeon R7 M260 Series
6605, 81, AMD Radeon (TM) R7 M340
6606, 0, AMD Radeon HD 8790M
6607, 0, AMD Radeon (TM) HD8530M
6608, 0, AMD FirePro W2100
6610, 0, AMD Radeon HD 8600 Series
6610, 81, AMD Radeon (TM) R7 350
6610, 83, AMD Radeon (TM) R5 340
6611, 0, AMD Radeon HD 8500 Series
6613, 0, AMD Radeon HD 8500 series
6617, C7, AMD Radeon R7 240 Series
6640, 00, AMD Radeon HD 8950
6640, 80, AMD Radeon R9 M380
6646, 00, AMD Radeon R9 M280X
6646, 80, AMD Radeon R9 M385
6646, 80, AMD Radeon R9 M470X
6647, 00, AMD Radeon R9 M200X Series
6647, 80, AMD Radeon R9 M380
6649, 00, AMD FirePro W5100
6658, 00, AMD Radeon R7 200 Series
665C, 00, AMD Radeon HD 7700 Series
665D, 00, AMD Radeon R7 200 Series
665F, 81, AMD Radeon R7 360 Series
6660, 00, AMD Radeon HD 8600M Series
6660, 81, AMD Radeon R5 M335
6660, 83, AMD Radeon R5 M330
6663, 00, AMD Radeon HD 8500M Series
6663, 83, AMD Radeon R5 M320
6664, 00, AMD Radeon R5 M200 Series
6665, 00, AMD Radeon R5 M230 Series
6665, 83, AMD Radeon R5 M320
6665, C3, AMD Radeon R5 M435
6666, 00, AMD Radeon R5 M200 Series
6667, 00, AMD Radeon R5 M200 Series
666F, 00, AMD Radeon HD 8500M
66A1, 02, AMD Instinct MI60 / MI50
66A1, 06, AMD Radeon Pro VII
6640, 0, AMD Radeon HD 8950
6640, 80, AMD Radeon (TM) R9 M380
6646, 0, AMD Radeon R9 M280X
6646, 80, AMD Radeon (TM) R9 M470X
6647, 0, AMD Radeon R9 M270X
6647, 80, AMD Radeon (TM) R9 M380
6649, 0, AMD FirePro W5100
6658, 0, AMD Radeon R7 200 Series
665C, 0, AMD Radeon HD 7700 Series
665D, 0, AMD Radeon R7 200 Series
665F, 81, AMD Radeon (TM) R7 300 Series
6660, 0, AMD Radeon HD 8600M Series
6660, 81, AMD Radeon (TM) R5 M335
6660, 83, AMD Radeon (TM) R5 M330
6663, 0, AMD Radeon HD 8500M Series
6663, 83, AMD Radeon (TM) R5 M320
6664, 0, AMD Radeon R5 M200 Series
6665, 0, AMD Radeon R5 M200 Series
6665, 83, AMD Radeon (TM) R5 M320
6667, 0, AMD Radeon R5 M200 Series
666F, 0, AMD Radeon HD 8500M
66A1, 06, AMD Radeon (TM) Pro VII
66AF, C1, AMD Radeon VII
6780, 00, AMD FirePro W9000
6784, 00, ATI FirePro V (FireGL V) Graphics Adapter
6788, 00, ATI FirePro V (FireGL V) Graphics Adapter
678A, 00, AMD FirePro W8000
6798, 00, AMD Radeon R9 200 / HD 7900 Series
6799, 00, AMD Radeon HD 7900 Series
679A, 00, AMD Radeon HD 7900 Series
679B, 00, AMD Radeon HD 7900 Series
679E, 00, AMD Radeon HD 7800 Series
67A0, 00, AMD Radeon FirePro W9100
67A1, 00, AMD Radeon FirePro W8100
67B0, 00, AMD Radeon R9 200 Series
67B0, 80, AMD Radeon R9 390 Series
67B1, 00, AMD Radeon R9 200 Series
67B1, 80, AMD Radeon R9 390 Series
67B9, 00, AMD Radeon R9 200 Series
67C0, 00, AMD Radeon Pro WX 7100 Graphics
67C0, 80, AMD Radeon E9550
67C2, 01, AMD Radeon Pro V7350x2
67C2, 02, AMD Radeon Pro V7300X
67C4, 00, AMD Radeon Pro WX 7100 Graphics
67C4, 80, AMD Radeon E9560 / E9565 Graphics
67C7, 00, AMD Radeon Pro WX 5100 Graphics
67C7, 80, AMD Radeon E9390 Graphics
67D0, 01, AMD Radeon Pro V7350x2
67D0, 02, AMD Radeon Pro V7300X
67DF, C0, AMD Radeon Pro 580X
67DF, C1, AMD Radeon RX 580 Series
67DF, C2, AMD Radeon RX 570 Series
67DF, C3, AMD Radeon RX 580 Series
67DF, C4, AMD Radeon RX 480 Graphics
67DF, C5, AMD Radeon RX 470 Graphics
67DF, C6, AMD Radeon RX 570 Series
67DF, C7, AMD Radeon RX 480 Graphics
67DF, CF, AMD Radeon RX 470 Graphics
67DF, D7, AMD Radeon RX 470 Graphics
67DF, E0, AMD Radeon RX 470 Series
67DF, E1, AMD Radeon RX 590 Series
67DF, E3, AMD Radeon RX Series
67DF, E7, AMD Radeon RX 580 Series
67DF, EB, AMD Radeon Pro 580X
67DF, EF, AMD Radeon RX 570 Series
67DF, F7, AMD Radeon RX P30PH
67DF, FF, AMD Radeon RX 470 Series
67E0, 00, AMD Radeon Pro WX Series
67E3, 00, AMD Radeon Pro WX 4100
67E8, 00, AMD Radeon Pro WX Series
67E8, 01, AMD Radeon Pro WX Series
67E8, 80, AMD Radeon E9260 Graphics
67EB, 00, AMD Radeon Pro V5300X
67EF, C0, AMD Radeon RX Graphics
67EF, C1, AMD Radeon RX 460 Graphics
67EF, C2, AMD Radeon Pro Series
67EF, C3, AMD Radeon RX Series
67EF, C5, AMD Radeon RX 460 Graphics
67EF, C7, AMD Radeon RX Graphics
67EF, CF, AMD Radeon RX 460 Graphics
67EF, E0, AMD Radeon RX 560 Series
67EF, E1, AMD Radeon RX Series
67EF, E2, AMD Radeon RX 560X
67EF, E3, AMD Radeon RX Series
67EF, E5, AMD Radeon RX 560 Series
67EF, E7, AMD Radeon RX 560 Series
67EF, EF, AMD Radeon 550 Series
67EF, FF, AMD Radeon RX 460 Graphics
67FF, C0, AMD Radeon Pro 465
67FF, C1, AMD Radeon RX 560 Series
67FF, CF, AMD Radeon RX 560 Series
67FF, EF, AMD Radeon RX 560 Series
67FF, FF, AMD Radeon RX 550 Series
6800, 00, AMD Radeon HD 7970M
6801, 00, AMD Radeon HD 8970M
6806, 00, AMD Radeon R9 M290X
6808, 00, AMD FirePro W7000
6808, 00, ATI FirePro V (FireGL V) Graphics Adapter
6809, 00, ATI FirePro W5000
6810, 00, AMD Radeon R9 200 Series
6810, 81, AMD Radeon R9 370 Series
6811, 00, AMD Radeon R9 200 Series
6811, 81, AMD Radeon R7 370 Series
6818, 00, AMD Radeon HD 7800 Series
6819, 00, AMD Radeon HD 7800 Series
6820, 00, AMD Radeon R9 M275X
6820, 81, AMD Radeon R9 M375
6820, 83, AMD Radeon R9 M375X
6821, 00, AMD Radeon R9 M200X Series
6821, 83, AMD Radeon R9 M370X
6821, 87, AMD Radeon R7 M380
6822, 00, AMD Radeon E8860
6823, 00, AMD Radeon R9 M200X Series
6825, 00, AMD Radeon HD 7800M Series
6826, 00, AMD Radeon HD 7700M Series
6827, 00, AMD Radeon HD 7800M Series
6828, 00, AMD FirePro W600
682B, 00, AMD Radeon HD 8800M Series
682B, 87, AMD Radeon R9 M360
682C, 00, AMD FirePro W4100
682D, 00, AMD Radeon HD 7700M Series
682F, 00, AMD Radeon HD 7700M Series
6830, 00, AMD Radeon 7800M Series
6831, 00, AMD Radeon 7700M Series
6835, 00, AMD Radeon R7 Series / HD 9000 Series
6837, 00, AMD Radeon HD 7700 Series
683D, 00, AMD Radeon HD 7700 Series
683F, 00, AMD Radeon HD 7700 Series
684C, 00, ATI FirePro V (FireGL V) Graphics Adapter
6860, 00, AMD Radeon Instinct MI25
6860, 01, AMD Radeon Instinct MI25
6860, 02, AMD Radeon Instinct MI25
6860, 03, AMD Radeon Pro V340
6860, 04, AMD Radeon Instinct MI25x2
6860, 07, AMD Radeon Pro V320
6861, 00, AMD Radeon Pro WX 9100
6862, 00, AMD Radeon Pro SSG
6863, 00, AMD Radeon Vega Frontier Edition
6864, 03, AMD Radeon Pro V340
6864, 04, AMD Radeon Instinct MI25x2
6864, 05, AMD Radeon Pro V340
6868, 00, AMD Radeon Pro WX 8200
686C, 00, AMD Radeon Instinct MI25 MxGPU
686C, 01, AMD Radeon Instinct MI25 MxGPU
686C, 02, AMD Radeon Instinct MI25 MxGPU
686C, 03, AMD Radeon Pro V340 MxGPU
686C, 04, AMD Radeon Instinct MI25x2 MxGPU
686C, 05, AMD Radeon Pro V340L MxGPU
686C, 06, AMD Radeon Instinct MI25 MxGPU
687F, 01, AMD Radeon RX Vega
687F, C0, AMD Radeon RX Vega
687F, C1, AMD Radeon RX Vega
687F, C3, AMD Radeon RX Vega
687F, C7, AMD Radeon RX Vega
6900, 00, AMD Radeon R7 M260
6900, 81, AMD Radeon R7 M360
6900, 83, AMD Radeon R7 M340
6900, C1, AMD Radeon R5 M465 Series
6900, C3, AMD Radeon R5 M445 Series
6900, D1, AMD Radeon 530 Series
6900, D3, AMD Radeon 530 Series
6901, 00, AMD Radeon R5 M255
6902, 00, AMD Radeon Series
6907, 00, AMD Radeon R5 M255
6907, 87, AMD Radeon R5 M315
6920, 00, AMD Radeon R9 M395X
6920, 01, AMD Radeon R9 M390X
6921, 00, AMD Radeon R9 M390X
6929, 00, AMD FirePro S7150
6929, 01, AMD FirePro S7100X
692B, 00, AMD FirePro W7100
6938, 00, AMD Radeon R9 200 Series
6780, 0, ATI FirePro V (FireGL V) Graphics Adapter
678A, 0, ATI FirePro V (FireGL V) Graphics Adapter
6798, 0, AMD Radeon HD 7900 Series
679A, 0, AMD Radeon HD 7900 Series
679B, 0, AMD Radeon HD 7900 Series
679E, 0, AMD Radeon HD 7800 Series
67A0, 0, AMD Radeon FirePro W9100
67A1, 0, AMD Radeon FirePro W8100
67B0, 0, AMD Radeon R9 200 Series
67B0, 80, AMD Radeon (TM) R9 390 Series
67B1, 0, AMD Radeon R9 200 Series
67B1, 80, AMD Radeon (TM) R9 390 Series
67B9, 0, AMD Radeon R9 200 Series
67DF, C1, Radeon RX 580 Series
67DF, C2, Radeon RX 570 Series
67DF, C3, Radeon RX 580 Series
67DF, C4, AMD Radeon (TM) RX 480 Graphics
67DF, C5, AMD Radeon (TM) RX 470 Graphics
67DF, C6, Radeon RX 570 Series
67DF, C7, AMD Radeon (TM) RX 480 Graphics
67DF, CF, AMD Radeon (TM) RX 470 Graphics
67DF, D7, Radeon(TM) RX 470 Graphics
67DF, E0, Radeon RX 470 Series
67DF, E1, Radeon RX 590 Series
67DF, E3, Radeon RX Series
67DF, E7, Radeon RX 580 Series
67DF, EF, Radeon RX 570 Series
67DF, F7, Radeon RX P30PH
67C2, 01, AMD Radeon (TM) Pro V7350x2
67C2, 02, AMD Radeon (TM) Pro V7300X
67C4, 00, AMD Radeon (TM) Pro WX 7100 Graphics
67C7, 00, AMD Radeon (TM) Pro WX 5100 Graphics
67C0, 00, AMD Radeon (TM) Pro WX 7100 Graphics
67D0, 01, AMD Radeon (TM) Pro V7350x2
67D0, 02, AMD Radeon (TM) Pro V7300X
67E0, 00, AMD Radeon (TM) Pro WX Series
67E3, 00, AMD Radeon (TM) Pro WX 4100
67E8, 00, AMD Radeon (TM) Pro WX Series
67E8, 01, AMD Radeon (TM) Pro WX Series
67E8, 80, AMD Radeon (TM) E9260 Graphics
67EB, 00, AMD Radeon (TM) Pro V5300X
67EF, C0, AMD Radeon (TM) RX Graphics
67EF, C1, AMD Radeon (TM) RX 460 Graphics
67EF, C3, Radeon RX Series
67EF, C5, AMD Radeon (TM) RX 460 Graphics
67EF, C7, AMD Radeon (TM) RX Graphics
67EF, CF, AMD Radeon (TM) RX 460 Graphics
67EF, E2, RX 560X
67EF, E0, Radeon RX 560 Series
67EF, E1, Radeon RX Series
67EF, E3, Radeon RX Series
67EF, E5, Radeon RX 560 Series
67EF, EF, AMD Radeon (TM) RX Graphics
67EF, FF, Radeon(TM) RX 460 Graphics
67FF, C0, AMD Radeon (TM) RX Graphics
67FF, C1, AMD Radeon (TM) RX Graphics
67FF, CF, Radeon RX 560 Series
67FF, EF, Radeon RX 560 Series
67FF, FF, Radeon RX 550 Series
6800, 0, AMD Radeon HD 7970M
6801, 0, AMD Radeon(TM) HD8970M
6808, 0, ATI FirePro V(FireGL V) Graphics Adapter
6809, 0, ATI FirePro V(FireGL V) Graphics Adapter
6810, 0, AMD Radeon(TM) HD 8800 Series
6810, 81, AMD Radeon (TM) R7 370 Series
6811, 0, AMD Radeon(TM) HD8800 Series
6811, 81, AMD Radeon (TM) R7 300 Series
6818, 0, AMD Radeon HD 7800 Series
6819, 0, AMD Radeon HD 7800 Series
6820, 0, AMD Radeon HD 8800M Series
6820, 81, AMD Radeon (TM) R9 M375
6820, 83, AMD Radeon (TM) R9 M375X
6821, 0, AMD Radeon HD 8800M Series
6821, 87, AMD Radeon (TM) R7 M380
6821, 83, AMD Radeon R9 (TM) M370X
6822, 0, AMD Radeon E8860
6823, 0, AMD Radeon HD 8800M Series
6825, 0, AMD Radeon HD 7800M Series
6827, 0, AMD Radeon HD 7800M Series
6828, 0, ATI FirePro V(FireGL V) Graphics Adapter
682B, 0, AMD Radeon HD 8800M Series
682B, 87, AMD Radeon (TM) R9 M360
682C, 0, AMD FirePro W4100
682D, 0, AMD Radeon HD 7700M Series
682F, 0, AMD Radeon HD 7700M Series
6835, 0, AMD Radeon R7 Series / HD 9000 Series
6837, 0, AMD Radeon HD7700 Series
683D, 0, AMD Radeon HD 7700 Series
683F, 0, AMD Radeon HD 7700 Series
6860, 00, Radeon Instinct MI25
6860, 01, Radeon Instinct MI25
6860, 02, Radeon Instinct MI25
6860, 03, Radeon Pro V340
6860, 04, Radeon Instinct MI25x2
6860, 07, Radeon (TM) Pro V320
6861, 00, Radeon Pro WX 9100
6862, 00, Radeon Pro SSG
6863, 00, Radeon Vega Frontier Edition
6864, 03, Radeon Pro V340
6864, 04, Instinct MI25x2
6868, 00, Radeon (TM) PRO WX 8200
686C, 00, Radeon Instinct MI25 MxGPU
686C, 01, Radeon Instinct MI25 MxGPU
686C, 02, Radeon Instinct MI25 MxGPU
686C, 03, Radeon Pro V340 MxGPU
686C, 04, Radeon Instinct MI25x2 MxGPU
686C, 05, Radeon Pro V340L MxGPU
686C, 06, Radeon Instinct MI25 MxGPU
687F, C0, Radeon RX Vega
687F, C1, Radeon RX Vega
687F, C3, Radeon RX Vega
6900, 0, AMD Radeon R7 M260
6900, 81, AMD Radeon (TM) R7 M360
6900, 83, AMD Radeon (TM) R7 M340
6901, 0, AMD Radeon R5 M255
6907, 0, AMD Radeon R5 M255
6907, 87, AMD Radeon (TM) R5 M315
6920, 0, AMD RADEON R9 M395X
6920, 1, AMD RADEON R9 M390X
6921, 0, AMD Radeon R9 M295X
6929, 0, AMD FirePro S7150
692B, 0, AMD FirePro W7100
6938, 0, AMD Radeon R9 200 Series
6938, F0, AMD Radeon R9 200 Series
6938, F1, AMD Radeon R9 380 Series
6939, 00, AMD Radeon R9 200 Series
6938, F1, AMD Radeon (TM) R9 380 Series
6939, F0, AMD Radeon R9 200 Series
6939, F1, AMD Radeon R9 380 Series
694C, C0, AMD Radeon RX Vega M GH Graphics
694E, C0, AMD Radeon RX Vega M GL Graphics
6980, 00, AMD Radeon Pro WX 3100
6981, 00, AMD Radeon Pro WX 3200 Series
6981, 01, AMD Radeon Pro WX 3200 Series
6981, 10, AMD Radeon Pro WX 3200 Series
6985, 00, AMD Radeon Pro WX 3100
6986, 00, AMD Radeon Pro WX 2100
6939, 0, AMD Radeon R9 200 Series
6939, F1, AMD Radeon (TM) R9 380 Series
6980, 00, Radeon Pro WX3100
6981, 00, AMD Radeon (TM) Pro WX 3200 Series
6981, 01, AMD Radeon (TM) Pro WX 3200 Series
6981, 10, AMD Radeon (TM) Pro WX 3200 Series
6985, 00, AMD Radeon Pro WX3100
6987, 80, AMD Embedded Radeon E9171
6987, C0, AMD Radeon 550X Series
6987, C0, Radeon 550X Series
6987, C1, AMD Radeon RX 640
6987, C3, AMD Radeon 540X Series
6987, C7, AMD Radeon 540
6995, 00, AMD Radeon Pro WX 2100
6997, 00, AMD Radeon Pro WX 2100
6987, C3, Radeon 540X Series
6995, 00, AMD Radeon Pro WX2100
6997, 00, Radeon Pro WX2100
699F, 81, AMD Embedded Radeon E9170 Series
699F, C0, AMD Radeon 500 Series
699F, C1, AMD Radeon 540 Series
699F, C3, AMD Radeon 500 Series
699F, C7, AMD Radeon RX 550 / 550 Series
699F, C9, AMD Radeon 540
6FDF, E7, AMD Radeon RX 590 GME
6FDF, EF, AMD Radeon RX 580 2048SP
7300, C1, AMD FirePro S9300 x2
7300, C8, AMD Radeon R9 Fury Series
7300, C9, AMD Radeon Pro Duo
7300, CA, AMD Radeon R9 Fury Series
7300, CB, AMD Radeon R9 Fury Series
699F, C0, Radeon 500 Series
699F, C1, Radeon 540 Series
699F, C3, Radeon 500 Series
699F, C7, Radeon RX550/550 Series
7300, C1, AMD FirePro (TM) S9300 x2
7300, C8, AMD Radeon (TM) R9 Fury Series
7300, C9, Radeon (TM) Pro Duo
7300, CB, AMD Radeon (TM) R9 Fury Series
7300, CA, AMD Radeon (TM) R9 Fury Series
7312, 00, AMD Radeon Pro W5700
731E, C6, AMD Radeon RX 5700XTB
731E, C7, AMD Radeon RX 5700B
731F, C0, AMD Radeon RX 5700 XT 50th Anniversary
731F, C1, AMD Radeon RX 5700 XT
731F, C2, AMD Radeon RX 5600M
@ -490,211 +262,20 @@
731F, C5, AMD Radeon RX 5700 XT
731F, CA, AMD Radeon RX 5600 XT
731F, CB, AMD Radeon RX 5600 OEM
7340, C1, AMD Radeon RX 5500M
7340, C3, AMD Radeon RX 5300M
7340, C5, AMD Radeon RX 5500 XT
7340, C7, AMD Radeon RX 5500
7340, C9, AMD Radeon RX 5500XTB
7340, CF, AMD Radeon RX 5300
7340, C1, Radeon RX 5500M
7340, C5, Radeon RX 5500 XT
7340, C7, Radeon RX 5500
7340, CF, Radeon RX 5300
7341, 00, AMD Radeon Pro W5500
7347, 00, AMD Radeon Pro W5500M
7360, 41, AMD Radeon Pro 5600M
7360, C3, AMD Radeon Pro V520
7362, C1, AMD Radeon Pro V540
7362, C3, AMD Radeon Pro V520
738C, 01, AMD Instinct MI100
73A1, 00, AMD Radeon Pro V620
73A3, 00, AMD Radeon Pro W6800
73A5, C0, AMD Radeon RX 6950 XT
73AE, 00, AMD Radeon Pro V620 MxGPU
73AF, C0, AMD Radeon RX 6900 XT
73BF, C0, AMD Radeon RX 6900 XT
73BF, C1, AMD Radeon RX 6800 XT
73BF, C3, AMD Radeon RX 6800
73DF, C0, AMD Radeon RX 6750 XT
73DF, C1, AMD Radeon RX 6700 XT
73DF, C2, AMD Radeon RX 6800M
73DF, C3, AMD Radeon RX 6800M
73DF, C5, AMD Radeon RX 6700 XT
73DF, CF, AMD Radeon RX 6700M
73DF, D5, AMD Radeon RX 6750 GRE 12GB
73DF, D7, AMD TDC-235
73DF, DF, AMD Radeon RX 6700
73DF, E5, AMD Radeon RX 6750 GRE 12GB
73DF, FF, AMD Radeon RX 6700
73E0, 00, AMD Radeon RX 6600M
73E1, 00, AMD Radeon Pro W6600M
73E3, 00, AMD Radeon Pro W6600
73EF, C0, AMD Radeon RX 6800S
73EF, C1, AMD Radeon RX 6650 XT
73EF, C2, AMD Radeon RX 6700S
73EF, C3, AMD Radeon RX 6650M
73EF, C4, AMD Radeon RX 6650M XT
73FF, C1, AMD Radeon RX 6600 XT
73FF, C3, AMD Radeon RX 6600M
73FF, C7, AMD Radeon RX 6600
73FF, CB, AMD Radeon RX 6600S
73FF, CF, AMD Radeon RX 6600 LE
73FF, DF, AMD Radeon RX 6750 GRE 10GB
7408, 00, AMD Instinct MI250X
740C, 01, AMD Instinct MI250X / MI250
740F, 02, AMD Instinct MI210
7421, 00, AMD Radeon Pro W6500M
7422, 00, AMD Radeon Pro W6400
7423, 00, AMD Radeon Pro W6300M
7423, 01, AMD Radeon Pro W6300
7424, 00, AMD Radeon RX 6300
743F, C1, AMD Radeon RX 6500 XT
743F, C3, AMD Radeon RX 6500
743F, C3, AMD Radeon RX 6500M
743F, C7, AMD Radeon RX 6400
743F, C8, AMD Radeon RX 6500M
743F, CC, AMD Radeon 6550S
743F, CE, AMD Radeon RX 6450M
743F, CF, AMD Radeon RX 6300M
743F, D3, AMD Radeon RX 6550M
743F, D7, AMD Radeon RX 6400
7448, 00, AMD Radeon Pro W7900
7449, 00, AMD Radeon Pro W7800 48GB
744A, 00, AMD Radeon Pro W7900 Dual Slot
744B, 00, AMD Radeon Pro W7900D
744C, C8, AMD Radeon RX 7900 XTX
744C, CC, AMD Radeon RX 7900 XT
744C, CE, AMD Radeon RX 7900 GRE
744C, CF, AMD Radeon RX 7900M
745E, CC, AMD Radeon Pro W7800
7460, 00, AMD Radeon Pro V710
7461, 00, AMD Radeon Pro V710 MxGPU
7470, 00, AMD Radeon Pro W7700
747E, C8, AMD Radeon RX 7800 XT
747E, D8, AMD Radeon RX 7800M
747E, DB, AMD Radeon RX 7700
747E, FF, AMD Radeon RX 7700 XT
7480, 00, AMD Radeon Pro W7600
7480, C0, AMD Radeon RX 7600 XT
7480, C1, AMD Radeon RX 7700S
7480, C2, AMD Radeon RX 7650 GRE
7480, C3, AMD Radeon RX 7600S
7480, C7, AMD Radeon RX 7600M XT
7480, CF, AMD Radeon RX 7600
7481, C7, AMD Steam Machine
7483, CF, AMD Radeon RX 7600M
7489, 00, AMD Radeon Pro W7500
7499, 00, AMD Radeon Pro W7400
7499, C0, AMD Radeon RX 7400
7499, C1, AMD Radeon RX 7300
74A0, 00, AMD Instinct MI300A
74A1, 00, AMD Instinct MI300X
74A2, 00, AMD Instinct MI308X
74A5, 00, AMD Instinct MI325X
74A8, 00, AMD Instinct MI308X HF
74A9, 00, AMD Instinct MI300X HF
74B5, 00, AMD Instinct MI300X VF
74B6, 00, AMD Instinct MI308X
74BD, 00, AMD Instinct MI300X HF
7550, C0, AMD Radeon RX 9070 XT
7550, C2, AMD Radeon RX 9070 GRE
7550, C3, AMD Radeon RX 9070
7551, C0, AMD Radeon AI PRO R9700
7590, C0, AMD Radeon RX 9060 XT
7590, C7, AMD Radeon RX 9060
75A0, C0, AMD Instinct MI350X
75A3, C0, AMD Instinct MI355X
75B0, C0, AMD Instinct MI350X VF
75B3, C0, AMD Instinct MI355X VF
9830, 00, AMD Radeon HD 8400 / R3 Series
9831, 00, AMD Radeon HD 8400E
9832, 00, AMD Radeon HD 8330
9833, 00, AMD Radeon HD 8330E
9834, 00, AMD Radeon HD 8210
9835, 00, AMD Radeon HD 8210E
9836, 00, AMD Radeon HD 8200 / R3 Series
9837, 00, AMD Radeon HD 8280E
9838, 00, AMD Radeon HD 8200 / R3 series
9839, 00, AMD Radeon HD 8180
983D, 00, AMD Radeon HD 8250
9850, 00, AMD Radeon R3 Graphics
9850, 03, AMD Radeon R3 Graphics
9850, 40, AMD Radeon R2 Graphics
9850, 45, AMD Radeon R3 Graphics
9851, 00, AMD Radeon R4 Graphics
9851, 01, AMD Radeon R5E Graphics
9851, 05, AMD Radeon R5 Graphics
9851, 06, AMD Radeon R5E Graphics
9851, 40, AMD Radeon R4 Graphics
9851, 45, AMD Radeon R5 Graphics
9852, 00, AMD Radeon R2 Graphics
9852, 40, AMD Radeon E1 Graphics
9853, 00, AMD Radeon R2 Graphics
9853, 01, AMD Radeon R4E Graphics
9853, 03, AMD Radeon R2 Graphics
9853, 05, AMD Radeon R1E Graphics
9853, 06, AMD Radeon R1E Graphics
9853, 07, AMD Radeon R1E Graphics
9853, 08, AMD Radeon R1E Graphics
9853, 40, AMD Radeon R2 Graphics
9854, 00, AMD Radeon R3 Graphics
9854, 01, AMD Radeon R3E Graphics
9854, 02, AMD Radeon R3 Graphics
9854, 05, AMD Radeon R2 Graphics
9854, 06, AMD Radeon R4 Graphics
9854, 07, AMD Radeon R3 Graphics
9855, 02, AMD Radeon R6 Graphics
9855, 05, AMD Radeon R4 Graphics
9856, 00, AMD Radeon R2 Graphics
9856, 01, AMD Radeon R2E Graphics
9856, 02, AMD Radeon R2 Graphics
9856, 05, AMD Radeon R1E Graphics
9856, 06, AMD Radeon R2 Graphics
9856, 07, AMD Radeon R1E Graphics
9856, 08, AMD Radeon R1E Graphics
9856, 13, AMD Radeon R1E Graphics
9874, 81, AMD Radeon R6 Graphics
9874, 84, AMD Radeon R7 Graphics
9874, 85, AMD Radeon R6 Graphics
9874, 87, AMD Radeon R5 Graphics
9874, 88, AMD Radeon R7E Graphics
9874, 89, AMD Radeon R6E Graphics
9874, C4, AMD Radeon R7 Graphics
9874, C5, AMD Radeon R6 Graphics
9874, C6, AMD Radeon R6 Graphics
9874, C7, AMD Radeon R5 Graphics
9874, C8, AMD Radeon R7 Graphics
9874, C9, AMD Radeon R7 Graphics
9874, CA, AMD Radeon R5 Graphics
9874, CB, AMD Radeon R5 Graphics
9874, CC, AMD Radeon R7 Graphics
9874, CD, AMD Radeon R7 Graphics
9874, CE, AMD Radeon R5 Graphics
9874, E1, AMD Radeon R7 Graphics
9874, E2, AMD Radeon R7 Graphics
9874, E3, AMD Radeon R7 Graphics
9874, E4, AMD Radeon R7 Graphics
9874, E5, AMD Radeon R5 Graphics
9874, E6, AMD Radeon R5 Graphics
98E4, 80, AMD Radeon R5E Graphics
98E4, 81, AMD Radeon R4E Graphics
98E4, 83, AMD Radeon R2E Graphics
98E4, 84, AMD Radeon R2E Graphics
98E4, 86, AMD Radeon R1E Graphics
98E4, C0, AMD Radeon R4 Graphics
98E4, C1, AMD Radeon R5 Graphics
98E4, C2, AMD Radeon R4 Graphics
98E4, C4, AMD Radeon R5 Graphics
98E4, C6, AMD Radeon R5 Graphics
98E4, C8, AMD Radeon R4 Graphics
98E4, C9, AMD Radeon R4 Graphics
98E4, CA, AMD Radeon R5 Graphics
98E4, D0, AMD Radeon R2 Graphics
98E4, D1, AMD Radeon R2 Graphics
98E4, D2, AMD Radeon R2 Graphics
98E4, D4, AMD Radeon R2 Graphics
98E4, D9, AMD Radeon R5 Graphics
98E4, DA, AMD Radeon R5 Graphics
98E4, DB, AMD Radeon R3 Graphics
98E4, E1, AMD Radeon R3 Graphics
98E4, E2, AMD Radeon R3 Graphics
98E4, E9, AMD Radeon R4 Graphics
98E4, EA, AMD Radeon R4 Graphics
98E4, EB, AMD Radeon R3 Graphics
98E4, EB, AMD Radeon R4 Graphics
9874, 81, AMD Radeon R6 Graphics
9874, 87, AMD Radeon R5 Graphics
9874, 85, AMD Radeon R6 Graphics
9874, 84, AMD Radeon R7 Graphics
6FDF, E7, AMD Radeon RX 590 GME
6FDF, EF, AMD Radeon RX 580 2048SP

View file

@ -1,11 +0,0 @@
build = ["Android.sources.bp"]
cc_library_shared {
name: "libdrm_etnaviv",
defaults: [
"libdrm_defaults",
"libdrm_etnaviv_sources",
],
vendor: true,
shared_libs: ["libdrm"],
}

14
etnaviv/Android.mk Normal file
View file

@ -0,0 +1,14 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
# Import variables LIBDRM_ETNAVIV_FILES, LIBDRM_ETNAVIV_H_FILES
include $(LOCAL_PATH)/Makefile.sources
LOCAL_MODULE := libdrm_etnaviv
LOCAL_SHARED_LIBRARIES := libdrm
LOCAL_SRC_FILES := $(LIBDRM_ETNAVIV_FILES)
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)

View file

@ -1,13 +0,0 @@
// Autogenerated with Android.sources.bp.mk
cc_defaults {
name: "libdrm_etnaviv_sources",
srcs: [
"etnaviv_device.c",
"etnaviv_gpu.c",
"etnaviv_bo.c",
"etnaviv_bo_cache.c",
"etnaviv_pipe.c",
"etnaviv_cmd_stream.c",
],
}

13
etnaviv/Makefile.sources Normal file
View file

@ -0,0 +1,13 @@
LIBDRM_ETNAVIV_FILES := \
etnaviv_device.c \
etnaviv_gpu.c \
etnaviv_bo.c \
etnaviv_bo_cache.c \
etnaviv_perfmon.c \
etnaviv_pipe.c \
etnaviv_cmd_stream.c \
etnaviv_drm.h \
etnaviv_priv.h
LIBDRM_ETNAVIV_H_FILES := \
etnaviv_drmif.h

View file

@ -48,8 +48,12 @@ drm_private void bo_del(struct etna_bo *bo)
drmHashDelete(bo->dev->name_table, bo->name);
if (bo->handle) {
struct drm_gem_close req = {
.handle = bo->handle,
};
drmHashDelete(bo->dev->handle_table, bo->handle);
drmCloseBufferHandle(bo->dev->fd, bo->handle);
drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
}
free(bo);
@ -78,7 +82,12 @@ static struct etna_bo *bo_from_handle(struct etna_device *dev,
struct etna_bo *bo = calloc(sizeof(*bo), 1);
if (!bo) {
drmCloseBufferHandle(dev->fd, handle);
struct drm_gem_close req = {
.handle = handle,
};
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
return NULL;
}

View file

@ -25,7 +25,8 @@
*/
#include <stdlib.h>
#include <sys/types.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <errno.h>
#include <sys/mman.h>
#include <fcntl.h>

View file

@ -73,10 +73,6 @@ struct drm_etnaviv_timespec {
#define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18
#define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19
#define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a
#define ETNAVIV_PARAM_SOFTPIN_START_ADDR 0x1b
#define ETNAVIV_PARAM_GPU_PRODUCT_ID 0x1c
#define ETNAVIV_PARAM_GPU_CUSTOMER_ID 0x1d
#define ETNAVIV_PARAM_GPU_ECO_ID 0x1e
#define ETNA_MAX_PIPES 4
@ -152,11 +148,6 @@ struct drm_etnaviv_gem_submit_reloc {
* then patching the cmdstream for this entry is skipped. This can
* avoid kernel needing to map/access the cmdstream bo in the common
* case.
* If the submit is a softpin submit (ETNA_SUBMIT_SOFTPIN) the 'presumed'
* field is interpreted as the fixed location to map the bo into the gpu
* virtual address space. If the kernel is unable to map the buffer at
* this location the submit will fail. This means userspace is responsible
* for the whole gpu virtual address management.
*/
#define ETNA_SUBMIT_BO_READ 0x0001
#define ETNA_SUBMIT_BO_WRITE 0x0002
@ -186,11 +177,9 @@ struct drm_etnaviv_gem_submit_pmr {
#define ETNA_SUBMIT_NO_IMPLICIT 0x0001
#define ETNA_SUBMIT_FENCE_FD_IN 0x0002
#define ETNA_SUBMIT_FENCE_FD_OUT 0x0004
#define ETNA_SUBMIT_SOFTPIN 0x0008
#define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \
ETNA_SUBMIT_FENCE_FD_IN | \
ETNA_SUBMIT_FENCE_FD_OUT| \
ETNA_SUBMIT_SOFTPIN)
ETNA_SUBMIT_FENCE_FD_OUT)
#define ETNA_PIPE_3D 0x00
#define ETNA_PIPE_2D 0x01
#define ETNA_PIPE_VG 0x02

View file

@ -19,7 +19,7 @@
# SOFTWARE.
libdrm_etnaviv = library(
libdrm_etnaviv = shared_library(
'drm_etnaviv',
[
files(
@ -31,18 +31,19 @@ libdrm_etnaviv = library(
include_directories : [inc_root, inc_drm],
link_with : libdrm,
c_args : libdrm_c_args,
gnu_symbol_visibility : 'hidden',
dependencies : [dep_threads, dep_rt, dep_atomic_ops],
version : '1.@0@.0'.format(patch_ver),
dependencies : [dep_pthread_stubs, dep_rt, dep_atomic_ops],
version : '1.0.0',
install : true,
)
install_headers('etnaviv_drmif.h', subdir : 'libdrm')
pkg.generate(
libdrm_etnaviv,
name : 'libdrm_etnaviv',
libraries : libdrm_etnaviv,
subdirs : ['.', 'libdrm'],
version : meson.project_version(),
requires_private : 'libdrm',
description : 'Userspace interface to Tegra kernel DRM services',
)
@ -51,14 +52,12 @@ ext_libdrm_etnaviv = declare_dependency(
include_directories : [inc_drm, include_directories('.')],
)
meson.override_dependency('libdrm_etnaviv', ext_libdrm_etnaviv)
test(
'etnaviv-symbols-check',
symbols_check,
args : [
'--lib', libdrm_etnaviv,
'--symbols-file', files('etnaviv-symbols.txt'),
'--nm', prog_nm.full_path(),
'--nm', prog_nm.path(),
],
)

View file

@ -31,6 +31,7 @@
#include <unistd.h>
#include <sys/mman.h>
#include <linux/stddef.h>
#include <xf86drm.h>
@ -175,7 +176,11 @@ drm_public void exynos_bo_destroy(struct exynos_bo *bo)
munmap(bo->vaddr, bo->size);
if (bo->handle) {
drmCloseBufferHandle(bo->dev->fd, bo->handle);
struct drm_gem_close req = {
.handle = bo->handle,
};
drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
}
free(bo);

View file

@ -30,6 +30,7 @@
#include <assert.h>
#include <sys/mman.h>
#include <linux/stddef.h>
#include <xf86drm.h>

View file

@ -18,15 +18,14 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
libdrm_exynos = library(
libdrm_exynos = shared_library(
'drm_exynos',
[files('exynos_drm.c', 'exynos_fimg2d.c'), config_file],
c_args : libdrm_c_args,
gnu_symbol_visibility : 'hidden',
include_directories : [inc_root, inc_drm],
link_with : libdrm,
dependencies : [dep_threads],
version : '1.@0@.0'.format(patch_ver),
dependencies : [dep_pthread_stubs],
version : '1.0.0',
install : true,
)
@ -38,13 +37,12 @@ ext_libdrm_exynos = declare_dependency(
include_directories : [inc_drm, include_directories('.')],
)
meson.override_dependency('libdrm_exynos', ext_libdrm_exynos)
pkg.generate(
libdrm_exynos,
name : 'libdrm_exynos',
libraries : libdrm_exynos,
subdirs : ['.', 'libdrm', 'exynos'],
version : '0.7',
requires_private : 'libdrm',
description : 'Userspace interface to exynos kernel DRM services',
)
@ -54,6 +52,6 @@ test(
args : [
'--lib', libdrm_exynos,
'--symbols-file', files('exynos-symbols.txt'),
'--nm', prog_nm.full_path(),
'--nm', prog_nm.path(),
],
)

14
freedreno/Android.mk Normal file
View file

@ -0,0 +1,14 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
# Import variables LIBDRM_FREEDRENO_FILES, LIBDRM_FREEDRENO_H_FILES
include $(LOCAL_PATH)/Makefile.sources
LOCAL_MODULE := libdrm_freedreno
LOCAL_SHARED_LIBRARIES := libdrm
LOCAL_SRC_FILES := $(LIBDRM_FREEDRENO_FILES)
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)

View file

@ -0,0 +1,25 @@
LIBDRM_FREEDRENO_FILES := \
freedreno_device.c \
freedreno_pipe.c \
freedreno_priv.h \
freedreno_ringbuffer.c \
freedreno_bo.c \
freedreno_bo_cache.c \
msm/msm_bo.c \
msm/msm_device.c \
msm/msm_pipe.c \
msm/msm_priv.h \
msm/msm_ringbuffer.c
LIBDRM_FREEDRENO_KGSL_FILES := \
kgsl/kgsl_bo.c \
kgsl/kgsl_device.c \
kgsl/kgsl_drm.h \
kgsl/kgsl_pipe.c \
kgsl/kgsl_priv.h \
kgsl/kgsl_ringbuffer.c \
kgsl/msm_kgsl.h
LIBDRM_FREEDRENO_H_FILES := \
freedreno_drmif.h \
freedreno_ringbuffer.h

View file

@ -62,7 +62,10 @@ static struct fd_bo * bo_from_handle(struct fd_device *dev,
bo = dev->funcs->bo_from_handle(dev, size, handle);
if (!bo) {
drmCloseBufferHandle(dev->fd, handle);
struct drm_gem_close req = {
.handle = handle,
};
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
return NULL;
}
bo->dev = fd_device_ref(dev);
@ -260,10 +263,13 @@ drm_private void bo_del(struct fd_bo *bo)
*/
if (bo->handle) {
struct drm_gem_close req = {
.handle = bo->handle,
};
drmHashDelete(bo->dev->handle_table, bo->handle);
if (bo->name)
drmHashDelete(bo->dev->name_table, bo->name);
drmCloseBufferHandle(bo->dev->fd, bo->handle);
drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
}
bo->funcs->destroy(bo);

View file

@ -28,6 +28,8 @@
#include "kgsl_priv.h"
#include <linux/fb.h>
static int set_memtype(struct fd_device *dev, uint32_t handle, uint32_t flags)
{
struct drm_kgsl_gem_memtype req = {

View file

@ -39,14 +39,14 @@ if with_freedreno_kgsl
)
endif
libdrm_freedreno = library(
libdrm_freedreno = shared_library(
'drm_freedreno',
[files_freedreno, config_file],
c_args : libdrm_c_args,
include_directories : [inc_root, inc_drm],
dependencies : [dep_valgrind, dep_threads, dep_rt, dep_atomic_ops],
dependencies : [dep_valgrind, dep_pthread_stubs, dep_rt, dep_atomic_ops],
link_with : libdrm,
version : '1.@0@.0'.format(patch_ver),
version : '1.0.0',
install : true,
)
@ -55,17 +55,17 @@ ext_libdrm_freedreno = declare_dependency(
include_directories : [inc_drm, include_directories('.')],
)
meson.override_dependency('libdrm_freedreno', ext_libdrm_freedreno)
install_headers(
'freedreno_drmif.h', 'freedreno_ringbuffer.h',
subdir : 'freedreno'
)
pkg.generate(
libdrm_freedreno,
name : 'libdrm_freedreno',
libraries : libdrm_freedreno,
subdirs : ['.', 'libdrm', 'freedreno'],
version : meson.project_version(),
requires_private : 'libdrm',
description : 'Userspace interface to freedreno kernel DRM services',
)
@ -75,6 +75,6 @@ test(
args : [
'--lib', libdrm_freedreno,
'--symbols-file', files('freedreno-symbols.txt'),
'--nm', prog_nm.full_path(),
'--nm', prog_nm.path(),
],
)

View file

@ -1,84 +0,0 @@
#!/usr/bin/env python3
# Copyright 2021 Collabora, Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice (including the
# next paragraph) shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Helper script that reads drm_fourcc.h and writes a static table with the
# simpler format token modifiers
import sys
import re
filename = sys.argv[1]
towrite = sys.argv[2]
fm_re = {
'intel': r'^#define I915_FORMAT_MOD_(\w+)',
'others': r'^#define DRM_FORMAT_MOD_((?:ARM|SAMSUNG|QCOM|VIVANTE|NVIDIA|BROADCOM|ALLWINNER)\w+)\s',
'vendors': r'^#define DRM_FORMAT_MOD_VENDOR_(\w+)'
}
def print_fm_intel(f, f_mod):
f.write(' {{ DRM_MODIFIER_INTEL({}, {}) }},\n'.format(f_mod, f_mod))
# generic write func
def print_fm(f, vendor, mod, f_name):
f.write(' {{ DRM_MODIFIER({}, {}, {}) }},\n'.format(vendor, mod, f_name))
with open(filename, "r") as f:
data = f.read()
for k, v in fm_re.items():
fm_re[k] = re.findall(v, data, flags=re.M)
with open(towrite, "w") as f:
f.write('''\
/* AUTOMATICALLY GENERATED by gen_table_fourcc.py. You should modify
that script instead of adding here entries manually! */
static const struct drmFormatModifierInfo drm_format_modifier_table[] = {
''')
f.write(' { DRM_MODIFIER_INVALID(NONE, INVALID) },\n')
f.write(' { DRM_MODIFIER_LINEAR(NONE, LINEAR) },\n')
for entry in fm_re['intel']:
print_fm_intel(f, entry)
for entry in fm_re['others']:
(vendor, mod) = entry.split('_', 1)
if vendor == 'ARM' and (mod == 'TYPE_AFBC' or mod == 'TYPE_MISC' or mod == 'TYPE_AFRC'):
continue
print_fm(f, vendor, mod, mod)
f.write('''\
};
''')
f.write('''\
static const struct drmFormatModifierVendorInfo drm_format_modifier_vendor_table[] = {
''')
for entry in fm_re['vendors']:
f.write(" {{ DRM_FORMAT_MOD_VENDOR_{}, \"{}\" }},\n".format(entry, entry))
f.write('''\
};
''')

View file

@ -71,7 +71,7 @@ Note: One should not do _any_ changes to the files apart from the steps below.
In order to update the files do the following:
- Switch to a Linux kernel tree/branch which is not rebased.
For example: drm-next (https://gitlab.freedesktop.org/drm/kernel/)
For example: drm-next (https://cgit.freedesktop.org/drm/drm)
- Install the headers via `make headers_install' to a separate location.
- Copy the drm header[s] + git add + git commit.
- Note: Your commit message must include:

View file

@ -54,9 +54,6 @@ extern "C" {
#define DRM_AMDGPU_VM 0x13
#define DRM_AMDGPU_FENCE_TO_HANDLE 0x14
#define DRM_AMDGPU_SCHED 0x15
#define DRM_AMDGPU_USERQ 0x16
#define DRM_AMDGPU_USERQ_SIGNAL 0x17
#define DRM_AMDGPU_USERQ_WAIT 0x18
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@ -74,9 +71,6 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
#define DRM_IOCTL_AMDGPU_USERQ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ, union drm_amdgpu_userq)
#define DRM_IOCTL_AMDGPU_USERQ_SIGNAL DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct drm_amdgpu_userq_signal)
#define DRM_IOCTL_AMDGPU_USERQ_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct drm_amdgpu_userq_wait)
/**
* DOC: memory domains
@ -86,7 +80,7 @@ extern "C" {
*
* %AMDGPU_GEM_DOMAIN_GTT GPU accessible system memory, mapped into the
* GPU's virtual address space via gart. Gart memory linearizes non-contiguous
* pages of system memory, allows GPU access system memory in a linearized
* pages of system memory, allows GPU access system memory in a linezrized
* fashion.
*
* %AMDGPU_GEM_DOMAIN_VRAM Local video memory. For APUs, it is memory
@ -100,9 +94,6 @@ extern "C" {
*
* %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines
* for appending data.
*
* %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for
* signalling user mode queues.
*/
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@ -110,14 +101,12 @@ extern "C" {
#define AMDGPU_GEM_DOMAIN_GDS 0x8
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
#define AMDGPU_GEM_DOMAIN_DOORBELL 0x40
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
AMDGPU_GEM_DOMAIN_GTT | \
AMDGPU_GEM_DOMAIN_VRAM | \
AMDGPU_GEM_DOMAIN_GDS | \
AMDGPU_GEM_DOMAIN_GWS | \
AMDGPU_GEM_DOMAIN_OA | \
AMDGPU_GEM_DOMAIN_DOORBELL)
AMDGPU_GEM_DOMAIN_OA)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
@ -127,6 +116,8 @@ extern "C" {
#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
/* Flag that the memory should be in VRAM and cleared */
#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
/* Flag that create shadow bo(GTT) while allocating vram bo */
#define AMDGPU_GEM_CREATE_SHADOW (1 << 4)
/* Flag that allocating the BO should use linear VRAM */
#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5)
/* Flag that BO is always valid in this VM */
@ -147,36 +138,6 @@ extern "C" {
* accessing it with various hw blocks
*/
#define AMDGPU_GEM_CREATE_ENCRYPTED (1 << 10)
/* Flag that BO will be used only in preemptible context, which does
* not require GTT memory accounting
*/
#define AMDGPU_GEM_CREATE_PREEMPTIBLE (1 << 11)
/* Flag that BO can be discarded under memory pressure without keeping the
* content.
*/
#define AMDGPU_GEM_CREATE_DISCARDABLE (1 << 12)
/* Flag that BO is shared coherently between multiple devices or CPU threads.
* May depend on GPU instructions to flush caches to system scope explicitly.
*
* This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
*/
#define AMDGPU_GEM_CREATE_COHERENT (1 << 13)
/* Flag that BO should not be cached by GPU. Coherent without having to flush
* GPU caches explicitly
*
* This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
*/
#define AMDGPU_GEM_CREATE_UNCACHED (1 << 14)
/* Flag that BO should be coherent across devices when using device-level
* atomics. May depend on GPU instructions to flush caches to device scope
* explicitly, promoting them to system scope automatically.
*
* This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
*/
#define AMDGPU_GEM_CREATE_EXT_COHERENT (1 << 15)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@ -243,8 +204,6 @@ union drm_amdgpu_bo_list {
#define AMDGPU_CTX_OP_FREE_CTX 2
#define AMDGPU_CTX_OP_QUERY_STATE 3
#define AMDGPU_CTX_OP_QUERY_STATE2 4
#define AMDGPU_CTX_OP_GET_STABLE_PSTATE 5
#define AMDGPU_CTX_OP_SET_STABLE_PSTATE 6
/* GPU reset status */
#define AMDGPU_CTX_NO_RESET 0
@ -255,17 +214,15 @@ union drm_amdgpu_bo_list {
/* unknown cause */
#define AMDGPU_CTX_UNKNOWN_RESET 3
/* indicate gpu reset occurred after ctx created */
/* indicate gpu reset occured after ctx created */
#define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0)
/* indicate vram lost occurred after ctx created */
/* indicate vram lost occured after ctx created */
#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
/* indicate some job from this context once cause gpu hang */
#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
/* indicate some errors are detected by RAS */
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE (1<<3)
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE (1<<4)
/* indicate that the reset hasn't completed yet */
#define AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS (1<<5)
/* Context priority level */
#define AMDGPU_CTX_PRIORITY_UNSET -2048
@ -279,18 +236,10 @@ union drm_amdgpu_bo_list {
#define AMDGPU_CTX_PRIORITY_HIGH 512
#define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023
/* select a stable profiling pstate for perfmon tools */
#define AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK 0xf
#define AMDGPU_CTX_STABLE_PSTATE_NONE 0
#define AMDGPU_CTX_STABLE_PSTATE_STANDARD 1
#define AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK 2
#define AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK 3
#define AMDGPU_CTX_STABLE_PSTATE_PEAK 4
struct drm_amdgpu_ctx_in {
/** AMDGPU_CTX_OP_* */
__u32 op;
/** Flags */
/** For future use, no flags defined so far */
__u32 flags;
__u32 ctx_id;
/** AMDGPU_CTX_PRIORITY_* */
@ -311,11 +260,6 @@ union drm_amdgpu_ctx_out {
/** Reset status since the last call of the ioctl. */
__u32 reset_status;
} state;
struct {
__u32 flags;
__u32 _pad;
} pstate;
};
union drm_amdgpu_ctx {
@ -323,261 +267,6 @@ union drm_amdgpu_ctx {
union drm_amdgpu_ctx_out out;
};
/* user queue IOCTL operations */
#define AMDGPU_USERQ_OP_CREATE 1
#define AMDGPU_USERQ_OP_FREE 2
/* queue priority levels */
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK 0x3
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT 0
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW 0
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW 1
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH 2
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH 3 /* admin only */
/* for queues that need access to protected content */
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE (1 << 2)
/*
* This structure is a container to pass input configuration
* info for all supported userqueue related operations.
* For operation AMDGPU_USERQ_OP_CREATE: user is expected
* to set all fields, excep the parameter 'queue_id'.
* For operation AMDGPU_USERQ_OP_FREE: the only input parameter expected
* to be set is 'queue_id', eveything else is ignored.
*/
struct drm_amdgpu_userq_in {
/** AMDGPU_USERQ_OP_* */
__u32 op;
/** Queue id passed for operation USERQ_OP_FREE */
__u32 queue_id;
/** the target GPU engine to execute workload (AMDGPU_HW_IP_*) */
__u32 ip_type;
/**
* @doorbell_handle: the handle of doorbell GEM object
* associated to this userqueue client.
*/
__u32 doorbell_handle;
/**
* @doorbell_offset: 32-bit offset of the doorbell in the doorbell bo.
* Kernel will generate absolute doorbell offset using doorbell_handle
* and doorbell_offset in the doorbell bo.
*/
__u32 doorbell_offset;
/**
* @flags: flags used for queue parameters
*/
__u32 flags;
/**
* @queue_va: Virtual address of the GPU memory which holds the queue
* object. The queue holds the workload packets.
*/
__u64 queue_va;
/**
* @queue_size: Size of the queue in bytes, this needs to be 256-byte
* aligned.
*/
__u64 queue_size;
/**
* @rptr_va : Virtual address of the GPU memory which holds the ring RPTR.
* This object must be at least 8 byte in size and aligned to 8-byte offset.
*/
__u64 rptr_va;
/**
* @wptr_va : Virtual address of the GPU memory which holds the ring WPTR.
* This object must be at least 8 byte in size and aligned to 8-byte offset.
*
* Queue, RPTR and WPTR can come from the same object, as long as the size
* and alignment related requirements are met.
*/
__u64 wptr_va;
/**
* @mqd: MQD (memory queue descriptor) is a set of parameters which allow
* the GPU to uniquely define and identify a usermode queue.
*
* MQD data can be of different size for different GPU IP/engine and
* their respective versions/revisions, so this points to a __u64 *
* which holds IP specific MQD of this usermode queue.
*/
__u64 mqd;
/**
* @size: size of MQD data in bytes, it must match the MQD structure
* size of the respective engine/revision defined in UAPI for ex, for
* gfx11 workloads, size = sizeof(drm_amdgpu_userq_mqd_gfx11).
*/
__u64 mqd_size;
};
/* The structure to carry output of userqueue ops */
struct drm_amdgpu_userq_out {
/**
* For operation AMDGPU_USERQ_OP_CREATE: This field contains a unique
* queue ID to represent the newly created userqueue in the system, otherwise
* it should be ignored.
*/
__u32 queue_id;
__u32 _pad;
};
union drm_amdgpu_userq {
struct drm_amdgpu_userq_in in;
struct drm_amdgpu_userq_out out;
};
/* GFX V11 IP specific MQD parameters */
struct drm_amdgpu_userq_mqd_gfx11 {
/**
* @shadow_va: Virtual address of the GPU memory to hold the shadow buffer.
* Use AMDGPU_INFO_IOCTL to find the exact size of the object.
*/
__u64 shadow_va;
/**
* @csa_va: Virtual address of the GPU memory to hold the CSA buffer.
* Use AMDGPU_INFO_IOCTL to find the exact size of the object.
*/
__u64 csa_va;
};
/* GFX V11 SDMA IP specific MQD parameters */
struct drm_amdgpu_userq_mqd_sdma_gfx11 {
/**
* @csa_va: Virtual address of the GPU memory to hold the CSA buffer.
* This must be a from a separate GPU object, and use AMDGPU_INFO IOCTL
* to get the size.
*/
__u64 csa_va;
};
/* GFX V11 Compute IP specific MQD parameters */
struct drm_amdgpu_userq_mqd_compute_gfx11 {
/**
* @eop_va: Virtual address of the GPU memory to hold the EOP buffer.
* This must be a from a separate GPU object, and use AMDGPU_INFO IOCTL
* to get the size.
*/
__u64 eop_va;
};
/* userq signal/wait ioctl */
struct drm_amdgpu_userq_signal {
/**
* @queue_id: Queue handle used by the userq fence creation function
* to retrieve the WPTR.
*/
__u32 queue_id;
__u32 pad;
/**
* @syncobj_handles: The list of syncobj handles submitted by the user queue
* job to be signaled.
*/
__u64 syncobj_handles;
/**
* @num_syncobj_handles: A count that represents the number of syncobj handles in
* @syncobj_handles.
*/
__u64 num_syncobj_handles;
/**
* @bo_read_handles: The list of BO handles that the submitted user queue job
* is using for read only. This will update BO fences in the kernel.
*/
__u64 bo_read_handles;
/**
* @bo_write_handles: The list of BO handles that the submitted user queue job
* is using for write only. This will update BO fences in the kernel.
*/
__u64 bo_write_handles;
/**
* @num_bo_read_handles: A count that represents the number of read BO handles in
* @bo_read_handles.
*/
__u32 num_bo_read_handles;
/**
* @num_bo_write_handles: A count that represents the number of write BO handles in
* @bo_write_handles.
*/
__u32 num_bo_write_handles;
};
struct drm_amdgpu_userq_fence_info {
/**
* @va: A gpu address allocated for each queue which stores the
* read pointer (RPTR) value.
*/
__u64 va;
/**
* @value: A 64 bit value represents the write pointer (WPTR) of the
* queue commands which compared with the RPTR value to signal the
* fences.
*/
__u64 value;
};
struct drm_amdgpu_userq_wait {
/**
* @waitq_id: Queue handle used by the userq wait IOCTL to retrieve the
* wait queue and maintain the fence driver references in it.
*/
__u32 waitq_id;
__u32 pad;
/**
* @syncobj_handles: The list of syncobj handles submitted by the user queue
* job to get the va/value pairs.
*/
__u64 syncobj_handles;
/**
* @syncobj_timeline_handles: The list of timeline syncobj handles submitted by
* the user queue job to get the va/value pairs at given @syncobj_timeline_points.
*/
__u64 syncobj_timeline_handles;
/**
* @syncobj_timeline_points: The list of timeline syncobj points submitted by the
* user queue job for the corresponding @syncobj_timeline_handles.
*/
__u64 syncobj_timeline_points;
/**
* @bo_read_handles: The list of read BO handles submitted by the user queue
* job to get the va/value pairs.
*/
__u64 bo_read_handles;
/**
* @bo_write_handles: The list of write BO handles submitted by the user queue
* job to get the va/value pairs.
*/
__u64 bo_write_handles;
/**
* @num_syncobj_timeline_handles: A count that represents the number of timeline
* syncobj handles in @syncobj_timeline_handles.
*/
__u16 num_syncobj_timeline_handles;
/**
* @num_fences: This field can be used both as input and output. As input it defines
* the maximum number of fences that can be returned and as output it will specify
* how many fences were actually returned from the ioctl.
*/
__u16 num_fences;
/**
* @num_syncobj_handles: A count that represents the number of syncobj handles in
* @syncobj_handles.
*/
__u32 num_syncobj_handles;
/**
* @num_bo_read_handles: A count that represents the number of read BO handles in
* @bo_read_handles.
*/
__u32 num_bo_read_handles;
/**
* @num_bo_write_handles: A count that represents the number of write BO handles in
* @bo_write_handles.
*/
__u32 num_bo_write_handles;
/**
* @out_fences: The field is a return value from the ioctl containing the list of
* address/value pairs to wait for.
*/
__u64 out_fences;
};
/* vm ioctl */
#define AMDGPU_VM_OP_RESERVE_VMID 1
#define AMDGPU_VM_OP_UNRESERVE_VMID 2
@ -653,7 +342,7 @@ struct drm_amdgpu_gem_userptr {
#define AMDGPU_TILING_NUM_BANKS_SHIFT 21
#define AMDGPU_TILING_NUM_BANKS_MASK 0x3
/* GFX9 - GFX11: */
/* GFX9 and later: */
#define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0
#define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f
#define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT 5
@ -667,17 +356,6 @@ struct drm_amdgpu_gem_userptr {
#define AMDGPU_TILING_SCANOUT_SHIFT 63
#define AMDGPU_TILING_SCANOUT_MASK 0x1
/* GFX12 and later: */
#define AMDGPU_TILING_GFX12_SWIZZLE_MODE_SHIFT 0
#define AMDGPU_TILING_GFX12_SWIZZLE_MODE_MASK 0x7
/* These are DCC recompression setting for memory management: */
#define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_SHIFT 3
#define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_MASK 0x3 /* 0:64B, 1:128B, 2:256B */
#define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_SHIFT 5
#define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_MASK 0x7 /* CB_COLOR0_INFO.NUMBER_TYPE */
#define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_SHIFT 8
#define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_MASK 0x3f /* [0:4]:CB_COLOR0_INFO.FORMAT, [5]:MM */
/* Set/Get helpers for tiling flags. */
#define AMDGPU_TILING_SET(field, value) \
(((__u64)(value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT)
@ -824,18 +502,16 @@ struct drm_amdgpu_gem_op {
#define AMDGPU_VM_MTYPE_MASK (0xf << 5)
/* Default MTYPE. Pre-AI must use this. Recommended for newer ASICs. */
#define AMDGPU_VM_MTYPE_DEFAULT (0 << 5)
/* Use Non Coherent MTYPE instead of default MTYPE */
/* Use NC MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_NC (1 << 5)
/* Use Write Combine MTYPE instead of default MTYPE */
/* Use WC MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_WC (2 << 5)
/* Use Cache Coherent MTYPE instead of default MTYPE */
/* Use CC MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_CC (3 << 5)
/* Use UnCached MTYPE instead of default MTYPE */
/* Use UC MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_UC (4 << 5)
/* Use Read Write MTYPE instead of default MTYPE */
/* Use RW MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_RW (5 << 5)
/* don't allocate MALL */
#define AMDGPU_VM_PAGE_NOALLOC (1 << 9)
struct drm_amdgpu_gem_va {
/** GEM object handle */
@ -851,19 +527,6 @@ struct drm_amdgpu_gem_va {
__u64 offset_in_bo;
/** Specify mapping size. Must be correctly aligned. */
__u64 map_size;
/**
* vm_timeline_point is a sequence number used to add new timeline point.
*/
__u64 vm_timeline_point;
/**
* The vm page table update fence is installed in given vm_timeline_syncobj_out
* at vm_timeline_point.
*/
__u32 vm_timeline_syncobj_out;
/** the number of syncobj handles in @input_fence_syncobj_handles */
__u32 num_syncobj_handles;
/** Array of sync object handle to wait for given input fences */
__u64 input_fence_syncobj_handles;
};
#define AMDGPU_HW_IP_GFX 0
@ -873,14 +536,9 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_HW_IP_VCE 4
#define AMDGPU_HW_IP_UVD_ENC 5
#define AMDGPU_HW_IP_VCN_DEC 6
/*
* From VCN4, AMDGPU_HW_IP_VCN_ENC is re-used to support
* both encoding and decoding jobs.
*/
#define AMDGPU_HW_IP_VCN_ENC 7
#define AMDGPU_HW_IP_VCN_JPEG 8
#define AMDGPU_HW_IP_VPE 9
#define AMDGPU_HW_IP_NUM 10
#define AMDGPU_HW_IP_NUM 9
#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
@ -893,7 +551,6 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09
#define AMDGPU_CHUNK_ID_CP_GFX_SHADOW 0x0a
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
@ -1010,33 +667,12 @@ struct drm_amdgpu_cs_chunk_data {
};
};
#define AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW 0x1
struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
__u64 shadow_va;
__u64 csa_va;
__u64 gds_va;
__u64 flags;
};
/*
/**
* Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
*
*/
#define AMDGPU_IDS_FLAGS_FUSION 0x1
#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
#define AMDGPU_IDS_FLAGS_TMZ 0x4
#define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x8
/*
* Query h/w info: Flag identifying VF/PF/PT mode
*
*/
#define AMDGPU_IDS_FLAGS_MODE_MASK 0x300
#define AMDGPU_IDS_FLAGS_MODE_SHIFT 0x8
#define AMDGPU_IDS_FLAGS_MODE_PF 0x0
#define AMDGPU_IDS_FLAGS_MODE_VF 0x1
#define AMDGPU_IDS_FLAGS_MODE_PT 0x2
/* indicate if acceleration can be working */
#define AMDGPU_INFO_ACCEL_WORKING 0x00
@ -1087,22 +723,6 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
#define AMDGPU_INFO_FW_TA 0x13
/* Subquery id: Query DMCUB firmware version */
#define AMDGPU_INFO_FW_DMCUB 0x14
/* Subquery id: Query TOC firmware version */
#define AMDGPU_INFO_FW_TOC 0x15
/* Subquery id: Query CAP firmware version */
#define AMDGPU_INFO_FW_CAP 0x16
/* Subquery id: Query GFX RLCP firmware version */
#define AMDGPU_INFO_FW_GFX_RLCP 0x17
/* Subquery id: Query GFX RLCV firmware version */
#define AMDGPU_INFO_FW_GFX_RLCV 0x18
/* Subquery id: Query MES_KIQ firmware version */
#define AMDGPU_INFO_FW_MES_KIQ 0x19
/* Subquery id: Query MES firmware version */
#define AMDGPU_INFO_FW_MES 0x1a
/* Subquery id: Query IMU firmware version */
#define AMDGPU_INFO_FW_IMU 0x1b
/* Subquery id: Query VPE firmware version */
#define AMDGPU_INFO_FW_VPE 0x1c
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
@ -1132,8 +752,6 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
#define AMDGPU_INFO_VBIOS_SIZE 0x1
/* Subquery id: Query vbios image */
#define AMDGPU_INFO_VBIOS_IMAGE 0x2
/* Subquery id: Query vbios info */
#define AMDGPU_INFO_VBIOS_INFO 0x3
/* Query UVD handles */
#define AMDGPU_INFO_NUM_HANDLES 0x1C
/* Query sensor related information */
@ -1156,17 +774,12 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
#define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK 0x8
/* Subquery id: Query GPU stable pstate memory clock */
#define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK 0x9
/* Subquery id: Query GPU peak pstate shader clock */
#define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK 0xa
/* Subquery id: Query GPU peak pstate memory clock */
#define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK 0xb
/* Subquery id: Query input GPU power */
#define AMDGPU_INFO_SENSOR_GPU_INPUT_POWER 0xc
/* Number of VRAM page faults on CPU access. */
#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
/* query ras mask of enabled features*/
#define AMDGPU_INFO_RAS_ENABLED_FEATURES 0x20
/* RAS MASK: UMC (VRAM) */
#define AMDGPU_INFO_RAS_ENABLED_UMC (1 << 0)
/* RAS MASK: SDMA */
@ -1195,18 +808,6 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
#define AMDGPU_INFO_RAS_ENABLED_MP1 (1 << 12)
/* RAS MASK: FUSE */
#define AMDGPU_INFO_RAS_ENABLED_FUSE (1 << 13)
/* query video encode/decode caps */
#define AMDGPU_INFO_VIDEO_CAPS 0x21
/* Subquery id: Decode */
#define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
/* Subquery id: Encode */
#define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
/* Query the max number of IBs per gang per submission */
#define AMDGPU_INFO_MAX_IBS 0x22
/* query last page fault info */
#define AMDGPU_INFO_GPUVM_FAULT 0x23
/* query FW object size and alignment */
#define AMDGPU_INFO_UQ_FW_AREAS 0x24
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@ -1274,10 +875,6 @@ struct drm_amdgpu_info {
struct {
__u32 type;
} sensor_info;
struct {
__u32 type;
} video_cap;
};
};
@ -1338,15 +935,6 @@ struct drm_amdgpu_info_firmware {
__u32 feature;
};
struct drm_amdgpu_info_vbios {
__u8 name[64];
__u8 vbios_pn[64];
__u32 version;
__u32 pad;
__u8 vbios_ver_str[32];
__u8 date[32];
};
#define AMDGPU_VRAM_TYPE_UNKNOWN 0
#define AMDGPU_VRAM_TYPE_GDDR1 1
#define AMDGPU_VRAM_TYPE_DDR2 2
@ -1357,9 +945,6 @@ struct drm_amdgpu_info_vbios {
#define AMDGPU_VRAM_TYPE_DDR3 7
#define AMDGPU_VRAM_TYPE_DDR4 8
#define AMDGPU_VRAM_TYPE_GDDR6 9
#define AMDGPU_VRAM_TYPE_DDR5 10
#define AMDGPU_VRAM_TYPE_LPDDR4 11
#define AMDGPU_VRAM_TYPE_LPDDR5 12
struct drm_amdgpu_info_device {
/** PCI Device ID */
@ -1385,8 +970,7 @@ struct drm_amdgpu_info_device {
__u32 enabled_rb_pipes_mask;
__u32 num_rb_pipes;
__u32 num_hw_gfx_contexts;
/* PCIe version (the smaller of the GPU and the CPU/motherboard) */
__u32 pcie_gen;
__u32 _pad;
__u64 ids_flags;
/** Starting virtual address for UMDs. */
__u64 virtual_address_offset;
@ -1433,8 +1017,7 @@ struct drm_amdgpu_info_device {
__u32 gs_prim_buffer_depth;
/* max gs wavefront per vgt*/
__u32 max_gs_waves_per_vgt;
/* PCIe number of lanes (the smaller of the GPU and the CPU/motherboard) */
__u32 pcie_num_lanes;
__u32 _pad1;
/* always on cu bitmap */
__u32 cu_ao_bitmap[4][4];
/** Starting high virtual address for UMDs. */
@ -1445,29 +1028,6 @@ struct drm_amdgpu_info_device {
__u32 pa_sc_tile_steering_override;
/* disabled TCCs */
__u64 tcc_disabled_mask;
__u64 min_engine_clock;
__u64 min_memory_clock;
/* The following fields are only set on gfx11+, older chips set 0. */
__u32 tcp_cache_size; /* AKA GL0, VMEM cache */
__u32 num_sqc_per_wgp;
__u32 sqc_data_cache_size; /* AKA SMEM cache */
__u32 sqc_inst_cache_size;
__u32 gl1c_cache_size;
__u32 gl2c_cache_size;
__u64 mall_size; /* AKA infinity cache */
/* high 32 bits of the rb pipes mask */
__u32 enabled_rb_pipes_mask_hi;
/* shadow area size for gfx11 */
__u32 shadow_size;
/* shadow area base virtual alignment for gfx11 */
__u32 shadow_alignment;
/* context save area size for gfx11 */
__u32 csa_size;
/* context save area base virtual alignment for gfx11 */
__u32 csa_alignment;
/* Userq IP mask (1 << AMDGPU_HW_IP_*) */
__u32 userq_ip_mask;
__u32 pad;
};
struct drm_amdgpu_info_hw_ip {
@ -1482,29 +1042,7 @@ struct drm_amdgpu_info_hw_ip {
__u32 ib_size_alignment;
/** Bitmask of available rings. Bit 0 means ring 0, etc. */
__u32 available_rings;
/** version info: bits 23:16 major, 15:8 minor, 7:0 revision */
__u32 ip_discovery_version;
};
/* GFX metadata BO sizes and alignment info (in bytes) */
struct drm_amdgpu_info_uq_fw_areas_gfx {
/* shadow area size */
__u32 shadow_size;
/* shadow area base virtual mem alignment */
__u32 shadow_alignment;
/* context save area size */
__u32 csa_size;
/* context save area base virtual mem alignment */
__u32 csa_alignment;
};
/* IP specific metadata related information used in the
* subquery AMDGPU_INFO_UQ_FW_AREAS
*/
struct drm_amdgpu_info_uq_fw_areas {
union {
struct drm_amdgpu_info_uq_fw_areas_gfx gfx;
};
__u32 _pad;
};
struct drm_amdgpu_info_num_handles {
@ -1532,44 +1070,6 @@ struct drm_amdgpu_info_vce_clock_table {
__u32 pad;
};
/* query video encode/decode caps */
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2 0
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4 1
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1 2
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC 3
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC 4
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG 5
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9 6
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1 7
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT 8
struct drm_amdgpu_info_video_codec_info {
__u32 valid;
__u32 max_width;
__u32 max_height;
__u32 max_pixels_per_frame;
__u32 max_level;
__u32 pad;
};
struct drm_amdgpu_info_video_caps {
struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT];
};
#define AMDGPU_VMHUB_TYPE_MASK 0xff
#define AMDGPU_VMHUB_TYPE_SHIFT 0
#define AMDGPU_VMHUB_TYPE_GFX 0
#define AMDGPU_VMHUB_TYPE_MM0 1
#define AMDGPU_VMHUB_TYPE_MM1 2
#define AMDGPU_VMHUB_IDX_MASK 0xff00
#define AMDGPU_VMHUB_IDX_SHIFT 8
struct drm_amdgpu_info_gpuvm_fault {
__u64 addr;
__u32 status;
__u32 vmhub;
};
/*
* Supported GPU families
*/
@ -1582,14 +1082,6 @@ struct drm_amdgpu_info_gpuvm_fault {
#define AMDGPU_FAMILY_AI 141 /* Vega10 */
#define AMDGPU_FAMILY_RV 142 /* Raven */
#define AMDGPU_FAMILY_NV 143 /* Navi10 */
#define AMDGPU_FAMILY_VGH 144 /* Van Gogh */
#define AMDGPU_FAMILY_GC_11_0_0 145 /* GC 11.0.0 */
#define AMDGPU_FAMILY_YC 146 /* Yellow Carp */
#define AMDGPU_FAMILY_GC_11_0_1 148 /* GC 11.0.1 */
#define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */
#define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */
#define AMDGPU_FAMILY_GC_11_5_0 150 /* GC 11.5.0 */
#define AMDGPU_FAMILY_GC_12_0_0 152 /* GC 12.0.0 */
#if defined(__cplusplus)
}

View file

@ -1,10 +1,11 @@
/*
/**
* \file drm.h
* Header for the Direct Rendering Manager
*
* Author: Rickard E. (Rik) Faith <faith@valinux.com>
* \author Rickard E. (Rik) Faith <faith@valinux.com>
*
* Acknowledgments:
* Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg.
* \par Acknowledgments:
* Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
*/
/*
@ -78,7 +79,7 @@ typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
/*
/**
* Cliprect.
*
* \warning: If you change this structure, make sure you change
@ -94,7 +95,7 @@ struct drm_clip_rect {
unsigned short y2;
};
/*
/**
* Drawable information.
*/
struct drm_drawable_info {
@ -102,7 +103,7 @@ struct drm_drawable_info {
struct drm_clip_rect *rects;
};
/*
/**
* Texture region,
*/
struct drm_tex_region {
@ -113,7 +114,7 @@ struct drm_tex_region {
unsigned int age;
};
/*
/**
* Hardware lock.
*
* The lock structure is a simple cache-line aligned integer. To avoid
@ -125,7 +126,7 @@ struct drm_hw_lock {
char padding[60]; /**< Pad to cache line */
};
/*
/**
* DRM_IOCTL_VERSION ioctl argument type.
*
* \sa drmGetVersion().
@ -142,7 +143,7 @@ struct drm_version {
char *desc; /**< User-space buffer to hold desc */
};
/*
/**
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
*
* \sa drmGetBusid() and drmSetBusId().
@ -161,7 +162,7 @@ struct drm_block {
int unused;
};
/*
/**
* DRM_IOCTL_CONTROL ioctl argument type.
*
* \sa drmCtlInstHandler() and drmCtlUninstHandler().
@ -176,7 +177,7 @@ struct drm_control {
int irq;
};
/*
/**
* Type of memory to map.
*/
enum drm_map_type {
@ -188,7 +189,7 @@ enum drm_map_type {
_DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */
};
/*
/**
* Memory mapping flags.
*/
enum drm_map_flags {
@ -207,7 +208,7 @@ struct drm_ctx_priv_map {
void *handle; /**< Handle of map */
};
/*
/**
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
* argument type.
*
@ -224,7 +225,7 @@ struct drm_map {
/* Private data */
};
/*
/**
* DRM_IOCTL_GET_CLIENT ioctl argument type.
*/
struct drm_client {
@ -256,7 +257,7 @@ enum drm_stat_type {
/* Add to the *END* of the list */
};
/*
/**
* DRM_IOCTL_GET_STATS ioctl argument type.
*/
struct drm_stats {
@ -267,7 +268,7 @@ struct drm_stats {
} data[15];
};
/*
/**
* Hardware locking flags.
*/
enum drm_lock_flags {
@ -282,7 +283,7 @@ enum drm_lock_flags {
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
};
/*
/**
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
*
* \sa drmGetLock() and drmUnlock().
@ -292,7 +293,7 @@ struct drm_lock {
enum drm_lock_flags flags;
};
/*
/**
* DMA flags
*
* \warning
@ -321,7 +322,7 @@ enum drm_dma_flags {
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
};
/*
/**
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
*
* \sa drmAddBufs().
@ -344,7 +345,7 @@ struct drm_buf_desc {
*/
};
/*
/**
* DRM_IOCTL_INFO_BUFS ioctl argument type.
*/
struct drm_buf_info {
@ -352,7 +353,7 @@ struct drm_buf_info {
struct drm_buf_desc *list;
};
/*
/**
* DRM_IOCTL_FREE_BUFS ioctl argument type.
*/
struct drm_buf_free {
@ -360,7 +361,7 @@ struct drm_buf_free {
int *list;
};
/*
/**
* Buffer information
*
* \sa drm_buf_map.
@ -372,7 +373,7 @@ struct drm_buf_pub {
void *address; /**< Address of buffer */
};
/*
/**
* DRM_IOCTL_MAP_BUFS ioctl argument type.
*/
struct drm_buf_map {
@ -385,7 +386,7 @@ struct drm_buf_map {
struct drm_buf_pub *list; /**< Buffer information */
};
/*
/**
* DRM_IOCTL_DMA ioctl argument type.
*
* Indices here refer to the offset into the buffer list in drm_buf_get.
@ -410,7 +411,7 @@ enum drm_ctx_flags {
_DRM_CONTEXT_2DONLY = 0x02
};
/*
/**
* DRM_IOCTL_ADD_CTX ioctl argument type.
*
* \sa drmCreateContext() and drmDestroyContext().
@ -420,7 +421,7 @@ struct drm_ctx {
enum drm_ctx_flags flags;
};
/*
/**
* DRM_IOCTL_RES_CTX ioctl argument type.
*/
struct drm_ctx_res {
@ -428,14 +429,14 @@ struct drm_ctx_res {
struct drm_ctx *contexts;
};
/*
/**
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
*/
struct drm_draw {
drm_drawable_t handle;
};
/*
/**
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
*/
typedef enum {
@ -449,14 +450,14 @@ struct drm_update_draw {
unsigned long long data;
};
/*
/**
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
*/
struct drm_auth {
drm_magic_t magic;
};
/*
/**
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
*
* \sa drmGetInterruptFromBusID().
@ -498,7 +499,7 @@ struct drm_wait_vblank_reply {
long tval_usec;
};
/*
/**
* DRM_IOCTL_WAIT_VBLANK ioctl argument type.
*
* \sa drmWaitVBlank().
@ -511,7 +512,7 @@ union drm_wait_vblank {
#define _DRM_PRE_MODESET 1
#define _DRM_POST_MODESET 2
/*
/**
* DRM_IOCTL_MODESET_CTL ioctl argument type
*
* \sa drmModesetCtl().
@ -521,7 +522,7 @@ struct drm_modeset_ctl {
__u32 cmd;
};
/*
/**
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
*
* \sa drmAgpEnable().
@ -530,7 +531,7 @@ struct drm_agp_mode {
unsigned long mode; /**< AGP mode */
};
/*
/**
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
*
* \sa drmAgpAlloc() and drmAgpFree().
@ -542,7 +543,7 @@ struct drm_agp_buffer {
unsigned long physical; /**< Physical used by i810 */
};
/*
/**
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
*
* \sa drmAgpBind() and drmAgpUnbind().
@ -552,7 +553,7 @@ struct drm_agp_binding {
unsigned long offset; /**< In bytes -- will round to page boundary */
};
/*
/**
* DRM_IOCTL_AGP_INFO ioctl argument type.
*
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
@ -573,7 +574,7 @@ struct drm_agp_info {
unsigned short id_device;
};
/*
/**
* DRM_IOCTL_SG_ALLOC ioctl argument type.
*/
struct drm_scatter_gather {
@ -581,7 +582,7 @@ struct drm_scatter_gather {
unsigned long handle; /**< Used for mapping / unmapping */
};
/*
/**
* DRM_IOCTL_SET_VERSION ioctl argument type.
*/
struct drm_set_version {
@ -591,223 +592,61 @@ struct drm_set_version {
int drm_dd_minor;
};
/**
* struct drm_gem_close - Argument for &DRM_IOCTL_GEM_CLOSE ioctl.
* @handle: Handle of the object to be closed.
* @pad: Padding.
*
* Releases the handle to an mm object.
*/
/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
struct drm_gem_close {
/** Handle of the object to be closed. */
__u32 handle;
__u32 pad;
};
/**
* struct drm_gem_flink - Argument for &DRM_IOCTL_GEM_FLINK ioctl.
* @handle: Handle for the object being named.
* @name: Returned global name.
*
* Create a global name for an object, returning the name.
*
* Note that the name does not hold a reference; when the object
* is freed, the name goes away.
*/
/** DRM_IOCTL_GEM_FLINK ioctl argument type */
struct drm_gem_flink {
/** Handle for the object being named */
__u32 handle;
/** Returned global name */
__u32 name;
};
/**
* struct drm_gem_open - Argument for &DRM_IOCTL_GEM_OPEN ioctl.
* @name: Name of object being opened.
* @handle: Returned handle for the object.
* @size: Returned size of the object
*
* Open an object using the global name, returning a handle and the size.
*
* This handle (of course) holds a reference to the object, so the object
* will not go away until the handle is deleted.
*/
/** DRM_IOCTL_GEM_OPEN ioctl argument type */
struct drm_gem_open {
/** Name of object being opened */
__u32 name;
/** Returned handle for the object */
__u32 handle;
/** Returned size of the object */
__u64 size;
};
/**
* struct drm_gem_change_handle - Argument for &DRM_IOCTL_GEM_CHANGE_HANDLE ioctl.
* @handle: The handle of a gem object.
* @new_handle: An available gem handle.
*
* This ioctl changes the handle of a GEM object to the specified one.
* The new handle must be unused. On success the old handle is closed
* and all further IOCTL should refer to the new handle only.
* Calls to DRM_IOCTL_PRIME_FD_TO_HANDLE will return the new handle.
*/
struct drm_gem_change_handle {
__u32 handle;
__u32 new_handle;
};
/**
* DRM_CAP_DUMB_BUFFER
*
* If set to 1, the driver supports creating dumb buffers via the
* &DRM_IOCTL_MODE_CREATE_DUMB ioctl.
*/
#define DRM_CAP_DUMB_BUFFER 0x1
/**
* DRM_CAP_VBLANK_HIGH_CRTC
*
* If set to 1, the kernel supports specifying a :ref:`CRTC index<crtc_index>`
* in the high bits of &drm_wait_vblank_request.type.
*
* Starting kernel version 2.6.39, this capability is always set to 1.
*/
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
/**
* DRM_CAP_DUMB_PREFERRED_DEPTH
*
* The preferred bit depth for dumb buffers.
*
* The bit depth is the number of bits used to indicate the color of a single
* pixel excluding any padding. This is different from the number of bits per
* pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per
* pixel.
*
* Note that this preference only applies to dumb buffers, it's irrelevant for
* other types of buffers.
*/
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
/**
* DRM_CAP_DUMB_PREFER_SHADOW
*
* If set to 1, the driver prefers userspace to render to a shadow buffer
* instead of directly rendering to a dumb buffer. For best speed, userspace
* should do streaming ordered memory copies into the dumb buffer and never
* read from it.
*
* Note that this preference only applies to dumb buffers, it's irrelevant for
* other types of buffers.
*/
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
/**
* DRM_CAP_PRIME
*
* Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
* and &DRM_PRIME_CAP_EXPORT.
*
* Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
* &DRM_PRIME_CAP_EXPORT are always advertised.
*
* PRIME buffers are exposed as dma-buf file descriptors.
* See :ref:`prime_buffer_sharing`.
*/
#define DRM_CAP_PRIME 0x5
/**
* DRM_PRIME_CAP_IMPORT
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
* buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
*
* Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
*/
#define DRM_PRIME_CAP_IMPORT 0x1
/**
* DRM_PRIME_CAP_EXPORT
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
* buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
*
* Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
*/
#define DRM_PRIME_CAP_EXPORT 0x2
/**
* DRM_CAP_TIMESTAMP_MONOTONIC
*
* If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in
* struct drm_event_vblank. If set to 1, the kernel will report timestamps with
* ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these
* clocks.
*
* Starting from kernel version 2.6.39, the default value for this capability
* is 1. Starting kernel version 4.15, this capability is always set to 1.
*/
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
/**
* DRM_CAP_ASYNC_PAGE_FLIP
*
* If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
* page-flips.
*/
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
/**
* DRM_CAP_CURSOR_WIDTH
*
* The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid
* width x height combination for the hardware cursor. The intention is that a
* hardware agnostic userspace can query a cursor plane size to use.
/*
* The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight
* combination for the hardware cursor. The intention is that a hardware
* agnostic userspace can query a cursor plane size to use.
*
* Note that the cross-driver contract is to merely return a valid size;
* drivers are free to attach another meaning on top, eg. i915 returns the
* maximum plane size.
*/
#define DRM_CAP_CURSOR_WIDTH 0x8
/**
* DRM_CAP_CURSOR_HEIGHT
*
* See &DRM_CAP_CURSOR_WIDTH.
*/
#define DRM_CAP_CURSOR_HEIGHT 0x9
/**
* DRM_CAP_ADDFB2_MODIFIERS
*
* If set to 1, the driver supports supplying modifiers in the
* &DRM_IOCTL_MODE_ADDFB2 ioctl.
*/
#define DRM_CAP_ADDFB2_MODIFIERS 0x10
/**
* DRM_CAP_PAGE_FLIP_TARGET
*
* If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and
* &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in
* &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP
* ioctl.
*/
#define DRM_CAP_PAGE_FLIP_TARGET 0x11
/**
* DRM_CAP_CRTC_IN_VBLANK_EVENT
*
* If set to 1, the kernel supports reporting the CRTC ID in
* &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and
* &DRM_EVENT_FLIP_COMPLETE events.
*
* Starting kernel version 4.12, this capability is always set to 1.
*/
#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
/**
* DRM_CAP_SYNCOBJ
*
* If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ 0x13
/**
* DRM_CAP_SYNCOBJ_TIMELINE
*
* If set to 1, the driver supports timeline operations on sync objects. See
* :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
/**
* DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
*
* If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
* commits.
*/
#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15
/* DRM_IOCTL_GET_CAP ioctl argument type */
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
__u64 capability;
__u64 value;
@ -816,12 +655,9 @@ struct drm_get_cap {
/**
* DRM_CLIENT_CAP_STEREO_3D
*
* If set to 1, the DRM core will expose the stereo 3D capabilities of the
* if set to 1, the DRM core will expose the stereo 3D capabilities of the
* monitor by advertising the supported 3D layouts in the flags of struct
* drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``.
*
* This capability is always supported for all drivers starting from kernel
* version 3.13.
* drm_mode_modeinfo.
*/
#define DRM_CLIENT_CAP_STEREO_3D 1
@ -830,25 +666,13 @@ struct drm_get_cap {
*
* If set to 1, the DRM core will expose all planes (overlay, primary, and
* cursor) to userspace.
*
* This capability has been introduced in kernel version 3.15. Starting from
* kernel version 3.17, this capability is always supported for all drivers.
*/
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
/**
* DRM_CLIENT_CAP_ATOMIC
*
* If set to 1, the DRM core will expose atomic properties to userspace. This
* implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
* &DRM_CLIENT_CAP_ASPECT_RATIO.
*
* If the driver doesn't support atomic mode-setting, enabling this capability
* will fail with -EOPNOTSUPP.
*
* This capability has been introduced in kernel version 4.0. Starting from
* kernel version 4.2, this capability is always supported for atomic-capable
* drivers.
* If set to 1, the DRM core will expose atomic properties to userspace
*/
#define DRM_CLIENT_CAP_ATOMIC 3
@ -856,10 +680,6 @@ struct drm_get_cap {
* DRM_CLIENT_CAP_ASPECT_RATIO
*
* If set to 1, the DRM core will provide aspect ratio information in modes.
* See ``DRM_MODE_FLAG_PIC_AR_*``.
*
* This capability is always supported for all drivers starting from kernel
* version 4.18.
*/
#define DRM_CLIENT_CAP_ASPECT_RATIO 4
@ -867,55 +687,12 @@ struct drm_get_cap {
* DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
*
* If set to 1, the DRM core will expose special connectors to be used for
* writing back to memory the scene setup in the commit. The client must enable
* &DRM_CLIENT_CAP_ATOMIC first.
*
* This capability is always supported for atomic-capable drivers starting from
* kernel version 4.19.
* writing back to memory the scene setup in the commit. Depends on client
* also supporting DRM_CLIENT_CAP_ATOMIC
*/
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
/**
* DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
*
* Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
* virtualbox) have additional restrictions for cursor planes (thus
* making cursor planes on those drivers not truly universal,) e.g.
* they need cursor planes to act like one would expect from a mouse
* cursor and have correctly set hotspot properties.
* If this client cap is not set the DRM core will hide cursor plane on
* those virtualized drivers because not setting it implies that the
* client is not capable of dealing with those extra restictions.
* Clients which do set cursor hotspot and treat the cursor plane
* like a mouse cursor should set this property.
* The client must enable &DRM_CLIENT_CAP_ATOMIC first.
*
* Setting this property on drivers which do not special case
* cursor planes (i.e. non-virtualized drivers) will return
* EOPNOTSUPP, which can be used by userspace to gauge
* requirements of the hardware/drivers they're running on.
*
* This capability is always supported for atomic-capable virtualized
* drivers starting from kernel version 6.6.
*/
#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6
/**
* DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE
*
* If set to 1 the DRM core will allow setting the COLOR_PIPELINE
* property on a &drm_plane, as well as drm_colorop properties.
*
* Setting of these plane properties will be rejected when this client
* cap is set:
* - COLOR_ENCODING
* - COLOR_RANGE
*
* The client must enable &DRM_CLIENT_CAP_ATOMIC first.
*/
#define DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE 7
/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
__u64 value;
@ -945,17 +722,13 @@ struct drm_syncobj_destroy {
};
#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE (1 << 1)
#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE (1 << 1)
struct drm_syncobj_handle {
__u32 handle;
__u32 flags;
__s32 fd;
__u32 pad;
__u64 point;
};
struct drm_syncobj_transfer {
@ -970,7 +743,6 @@ struct drm_syncobj_transfer {
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
struct drm_syncobj_wait {
__u64 handles;
/* absolute timeout */
@ -979,14 +751,6 @@ struct drm_syncobj_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
/**
* @deadline_nsec - fence deadline hint
*
* Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
* fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
* set.
*/
__u64 deadline_nsec;
};
struct drm_syncobj_timeline_wait {
@ -999,35 +763,6 @@ struct drm_syncobj_timeline_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
/**
* @deadline_nsec - fence deadline hint
*
* Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
* fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
* set.
*/
__u64 deadline_nsec;
};
/**
* struct drm_syncobj_eventfd
* @handle: syncobj handle.
* @flags: Zero to wait for the point to be signalled, or
* &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
* available for the point.
* @point: syncobj timeline point (set to zero for binary syncobjs).
* @fd: Existing eventfd to sent events to.
* @pad: Must be zero.
*
* Register an eventfd to be signalled by a syncobj. The eventfd counter will
* be incremented by one.
*/
struct drm_syncobj_eventfd {
__u32 handle;
__u32 flags;
__u64 point;
__s32 fd;
__u32 pad;
};
@ -1068,13 +803,6 @@ struct drm_crtc_queue_sequence {
__u64 user_data; /* user data passed to event */
};
#define DRM_CLIENT_NAME_MAX_LEN 64
struct drm_set_client_name {
__u64 name_len;
__u64 name;
};
#if defined(__cplusplus)
}
#endif
@ -1100,19 +828,6 @@ extern "C" {
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
/**
* DRM_IOCTL_GEM_CLOSE - Close a GEM handle.
*
* GEM handles are not reference-counted by the kernel. User-space is
* responsible for managing their lifetime. For example, if user-space imports
* the same memory object twice on the same DRM file description, the same GEM
* handle is returned by both imports, and user-space needs to ensure
* &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen
* when a memory object is allocated, then exported and imported again on the
* same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception
* and always returns fresh new GEM handles even if an existing GEM handle
* already refers to the same memory object before the IOCTL is performed.
*/
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
@ -1153,37 +868,7 @@ extern "C" {
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
/**
* DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD.
*
* User-space sets &drm_prime_handle.handle with the GEM handle to export and
* &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in
* &drm_prime_handle.fd.
*
* The export can fail for any driver-specific reason, e.g. because export is
* not supported for this specific GEM handle (but might be for others).
*
* Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT.
*/
#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
/**
* DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle.
*
* User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to
* import, and gets back a GEM handle in &drm_prime_handle.handle.
* &drm_prime_handle.flags is unused.
*
* If an existing GEM handle refers to the memory object backing the DMA-BUF,
* that GEM handle is returned. Therefore user-space which needs to handle
* arbitrary DMA-BUFs must have a user-space lookup data structure to manually
* reference-count duplicated GEM handles. For more information see
* &DRM_IOCTL_GEM_CLOSE.
*
* The import can fail for any driver-specific reason, e.g. because import is
* only supported for DMA-BUFs allocated on this DRM device.
*
* Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT.
*/
#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
@ -1221,40 +906,10 @@ extern "C" {
#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
/**
* DRM_IOCTL_MODE_RMFB - Remove a framebuffer.
*
* This removes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
* argument is a framebuffer object ID.
*
* Warning: removing a framebuffer currently in-use on an enabled plane will
* disable that plane. The CRTC the plane is linked to may also be disabled
* (depending on driver capabilities).
*/
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
/**
* DRM_IOCTL_MODE_CREATE_DUMB - Create a new dumb buffer object.
*
* KMS dumb buffers provide a very primitive way to allocate a buffer object
* suitable for scanout and map it for software rendering. KMS dumb buffers are
* not suitable for hardware-accelerated rendering nor video decoding. KMS dumb
* buffers are not suitable to be displayed on any other device than the KMS
* device where they were allocated from. Also see
* :ref:`kms_dumb_buffer_objects`.
*
* The IOCTL argument is a struct drm_mode_create_dumb.
*
* User-space is expected to create a KMS dumb buffer via this IOCTL, then add
* it as a KMS framebuffer via &DRM_IOCTL_MODE_ADDFB and map it via
* &DRM_IOCTL_MODE_MAP_DUMB.
*
* &DRM_CAP_DUMB_BUFFER indicates whether this IOCTL is supported.
* &DRM_CAP_DUMB_PREFERRED_DEPTH and &DRM_CAP_DUMB_PREFER_SHADOW indicate
* driver preferences for dumb buffers.
*/
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
@ -1287,77 +942,9 @@ extern "C" {
#define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer)
#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
/**
* DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata.
*
* This queries metadata about a framebuffer. User-space fills
* &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the
* struct as the output.
*
* If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
* will be filled with GEM buffer handles. Fresh new GEM handles are always
* returned, even if another GEM handle referring to the same memory object
* already exists on the DRM file description. The caller is responsible for
* removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same
* new handle will be returned for multiple planes in case they use the same
* memory object. Planes are valid until one has a zero handle -- this can be
* used to compute the number of planes.
*
* Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
* until one has a zero &drm_mode_fb_cmd2.pitches.
*
* If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
* in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
* modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
*
* To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space
* can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately
* close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not
* double-close handles which are specified multiple times in the array.
*/
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
/**
* DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer.
*
* This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
* argument is a framebuffer object ID.
*
* This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable
* planes and CRTCs. As long as the framebuffer is used by a plane, it's kept
* alive. When the plane no longer uses the framebuffer (because the
* framebuffer is replaced with another one, or the plane is disabled), the
* framebuffer is cleaned up.
*
* This is useful to implement flicker-free transitions between two processes.
*
* Depending on the threat model, user-space may want to ensure that the
* framebuffer doesn't expose any sensitive user information: closed
* framebuffers attached to a plane can be read back by the next DRM master.
*/
#define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb)
/**
* DRM_IOCTL_SET_CLIENT_NAME - Attach a name to a drm_file
*
* Having a name allows for easier tracking and debugging.
* The length of the name (without null ending char) must be
* <= DRM_CLIENT_NAME_MAX_LEN.
* The call will fail if the name contains whitespaces or non-printable chars.
*/
#define DRM_IOCTL_SET_CLIENT_NAME DRM_IOWR(0xD1, struct drm_set_client_name)
/**
* DRM_IOCTL_GEM_CHANGE_HANDLE - Move an object to a different handle
*
* Some applications (notably CRIU) need objects to have specific gem handles.
* This ioctl changes the object at one gem handle to use a new gem handle.
*/
#define DRM_IOCTL_GEM_CHANGE_HANDLE DRM_IOWR(0xD2, struct drm_gem_change_handle)
/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
* Generic IOCTLS restart at 0xA0.
@ -1369,49 +956,24 @@ extern "C" {
#define DRM_COMMAND_END 0xA0
/**
* struct drm_event - Header for DRM events
* @type: event type.
* @length: total number of payload bytes (including header).
* Header for events written back to userspace on the drm fd. The
* type defines the type of event, the length specifies the total
* length of the event (including the header), and user_data is
* typically a 64 bit value passed with the ioctl that triggered the
* event. A read on the drm fd will always only return complete
* events, that is, if for example the read buffer is 100 bytes, and
* there are two 64 byte events pending, only one will be returned.
*
* This struct is a header for events written back to user-space on the DRM FD.
* A read on the DRM FD will always only return complete events: e.g. if the
* read buffer is 100 bytes large and there are two 64 byte events pending,
* only one will be returned.
*
* Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
* up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
* &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
* Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
* up are chipset specific.
*/
struct drm_event {
__u32 type;
__u32 length;
};
/**
* DRM_EVENT_VBLANK - vertical blanking event
*
* This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
* &_DRM_VBLANK_EVENT flag set.
*
* The event payload is a struct drm_event_vblank.
*/
#define DRM_EVENT_VBLANK 0x01
/**
* DRM_EVENT_FLIP_COMPLETE - page-flip completion event
*
* This event is sent in response to an atomic commit or legacy page-flip with
* the &DRM_MODE_PAGE_FLIP_EVENT flag set.
*
* The event payload is a struct drm_event_vblank.
*/
#define DRM_EVENT_FLIP_COMPLETE 0x02
/**
* DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
*
* This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
*
* The event payload is a struct drm_event_crtc_sequence.
*/
#define DRM_EVENT_CRTC_SEQUENCE 0x03
struct drm_event_vblank {

File diff suppressed because it is too large Load diff

View file

@ -33,15 +33,6 @@
extern "C" {
#endif
/**
* DOC: overview
*
* DRM exposes many UAPI and structure definitions to have a consistent
* and standardized interface with users.
* Userspace can refer to these structure definitions and UAPI formats
* to communicate to drivers.
*/
#define DRM_CONNECTOR_NAME_LEN 32
#define DRM_DISPLAY_MODE_LEN 32
#define DRM_PROP_NAME_LEN 32
@ -218,27 +209,6 @@ extern "C" {
#define DRM_MODE_CONTENT_PROTECTION_DESIRED 1
#define DRM_MODE_CONTENT_PROTECTION_ENABLED 2
/**
* struct drm_mode_modeinfo - Display mode information.
* @clock: pixel clock in kHz
* @hdisplay: horizontal display size
* @hsync_start: horizontal sync start
* @hsync_end: horizontal sync end
* @htotal: horizontal total size
* @hskew: horizontal skew
* @vdisplay: vertical display size
* @vsync_start: vertical sync start
* @vsync_end: vertical sync end
* @vtotal: vertical total size
* @vscan: vertical scan
* @vrefresh: approximate vertical refresh rate in Hz
* @flags: bitmask of misc. flags, see DRM_MODE_FLAG_* defines
* @type: bitmask of type flags, see DRM_MODE_TYPE_* defines
* @name: string describing the mode resolution
*
* This is the user-space API display mode information structure. For the
* kernel version see struct drm_display_mode.
*/
struct drm_mode_modeinfo {
__u32 clock;
__u16 hdisplay;
@ -312,48 +282,16 @@ struct drm_mode_set_plane {
__u32 src_w;
};
/**
* struct drm_mode_get_plane - Get plane metadata.
*
* Userspace can perform a GETPLANE ioctl to retrieve information about a
* plane.
*
* To retrieve the number of formats supported, set @count_format_types to zero
* and call the ioctl. @count_format_types will be updated with the value.
*
* To retrieve these formats, allocate an array with the memory needed to store
* @count_format_types formats. Point @format_type_ptr to this array and call
* the ioctl again (with @count_format_types still set to the value returned in
* the first ioctl call).
*/
struct drm_mode_get_plane {
/**
* @plane_id: Object ID of the plane whose information should be
* retrieved. Set by caller.
*/
__u32 plane_id;
/** @crtc_id: Object ID of the current CRTC. */
__u32 crtc_id;
/** @fb_id: Object ID of the current fb. */
__u32 fb_id;
/**
* @possible_crtcs: Bitmask of CRTC's compatible with the plane. CRTC's
* are created and they receive an index, which corresponds to their
* position in the bitmask. Bit N corresponds to
* :ref:`CRTC index<crtc_index>` N.
*/
__u32 possible_crtcs;
/** @gamma_size: Never used. */
__u32 gamma_size;
/** @count_format_types: Number of formats. */
__u32 count_format_types;
/**
* @format_type_ptr: Pointer to ``__u32`` array of formats that are
* supported by the plane. These formats do not require modifiers.
*/
__u64 format_type_ptr;
};
@ -385,19 +323,14 @@ struct drm_mode_get_encoder {
/* This is for connectors with multiple signal types. */
/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
enum drm_mode_subconnector {
DRM_MODE_SUBCONNECTOR_Automatic = 0, /* DVI-I, TV */
DRM_MODE_SUBCONNECTOR_Unknown = 0, /* DVI-I, TV, DP */
DRM_MODE_SUBCONNECTOR_VGA = 1, /* DP */
DRM_MODE_SUBCONNECTOR_DVID = 3, /* DVI-I DP */
DRM_MODE_SUBCONNECTOR_DVIA = 4, /* DVI-I */
DRM_MODE_SUBCONNECTOR_Composite = 5, /* TV */
DRM_MODE_SUBCONNECTOR_SVIDEO = 6, /* TV */
DRM_MODE_SUBCONNECTOR_Component = 8, /* TV */
DRM_MODE_SUBCONNECTOR_SCART = 9, /* TV */
DRM_MODE_SUBCONNECTOR_DisplayPort = 10, /* DP */
DRM_MODE_SUBCONNECTOR_HDMIA = 11, /* DP */
DRM_MODE_SUBCONNECTOR_Native = 15, /* DP */
DRM_MODE_SUBCONNECTOR_Wireless = 18, /* DP */
DRM_MODE_SUBCONNECTOR_Automatic = 0,
DRM_MODE_SUBCONNECTOR_Unknown = 0,
DRM_MODE_SUBCONNECTOR_DVID = 3,
DRM_MODE_SUBCONNECTOR_DVIA = 4,
DRM_MODE_SUBCONNECTOR_Composite = 5,
DRM_MODE_SUBCONNECTOR_SVIDEO = 6,
DRM_MODE_SUBCONNECTOR_Component = 8,
DRM_MODE_SUBCONNECTOR_SCART = 9,
};
#define DRM_MODE_CONNECTOR_Unknown 0
@ -419,99 +352,28 @@ enum drm_mode_subconnector {
#define DRM_MODE_CONNECTOR_DSI 16
#define DRM_MODE_CONNECTOR_DPI 17
#define DRM_MODE_CONNECTOR_WRITEBACK 18
#define DRM_MODE_CONNECTOR_SPI 19
#define DRM_MODE_CONNECTOR_USB 20
/**
* struct drm_mode_get_connector - Get connector metadata.
*
* User-space can perform a GETCONNECTOR ioctl to retrieve information about a
* connector. User-space is expected to retrieve encoders, modes and properties
* by performing this ioctl at least twice: the first time to retrieve the
* number of elements, the second time to retrieve the elements themselves.
*
* To retrieve the number of elements, set @count_props and @count_encoders to
* zero, set @count_modes to 1, and set @modes_ptr to a temporary struct
* drm_mode_modeinfo element.
*
* To retrieve the elements, allocate arrays for @encoders_ptr, @modes_ptr,
* @props_ptr and @prop_values_ptr, then set @count_modes, @count_props and
* @count_encoders to their capacity.
*
* Performing the ioctl only twice may be racy: the number of elements may have
* changed with a hotplug event in-between the two ioctls. User-space is
* expected to retry the last ioctl until the number of elements stabilizes.
* The kernel won't fill any array which doesn't have the expected length.
*
* **Force-probing a connector**
*
* If the @count_modes field is set to zero and the DRM client is the current
* DRM master, the kernel will perform a forced probe on the connector to
* refresh the connector status, modes and EDID. A forced-probe can be slow,
* might cause flickering and the ioctl will block.
*
* User-space needs to force-probe connectors to ensure their metadata is
* up-to-date at startup and after receiving a hot-plug event. User-space
* may perform a forced-probe when the user explicitly requests it. User-space
* shouldn't perform a forced-probe in other situations.
*/
struct drm_mode_get_connector {
/** @encoders_ptr: Pointer to ``__u32`` array of object IDs. */
__u64 encoders_ptr;
/** @modes_ptr: Pointer to struct drm_mode_modeinfo array. */
__u64 modes_ptr;
/** @props_ptr: Pointer to ``__u32`` array of property IDs. */
__u64 props_ptr;
/** @prop_values_ptr: Pointer to ``__u64`` array of property values. */
__u64 prop_values_ptr;
/** @count_modes: Number of modes. */
__u32 count_modes;
/** @count_props: Number of properties. */
__u32 count_props;
/** @count_encoders: Number of encoders. */
__u32 count_encoders;
/** @encoder_id: Object ID of the current encoder. */
__u32 encoder_id;
/** @connector_id: Object ID of the connector. */
__u32 connector_id;
/**
* @connector_type: Type of the connector.
*
* See DRM_MODE_CONNECTOR_* defines.
*/
__u32 encoder_id; /**< Current Encoder */
__u32 connector_id; /**< Id */
__u32 connector_type;
/**
* @connector_type_id: Type-specific connector number.
*
* This is not an object ID. This is a per-type connector number. Each
* (type, type_id) combination is unique across all connectors of a DRM
* device.
*
* The (type, type_id) combination is not a stable identifier: the
* type_id can change depending on the driver probe order.
*/
__u32 connector_type_id;
/**
* @connection: Status of the connector.
*
* See enum drm_connector_status.
*/
__u32 connection;
/** @mm_width: Width of the connected sink in millimeters. */
__u32 mm_width;
/** @mm_height: Height of the connected sink in millimeters. */
__u32 mm_height;
/**
* @subpixel: Subpixel order of the connected sink.
*
* See enum subpixel_order.
*/
__u32 mm_width; /**< width in millimeters */
__u32 mm_height; /**< height in millimeters */
__u32 subpixel;
/** @pad: Padding, must be zero. */
__u32 pad;
};
@ -544,74 +406,22 @@ struct drm_mode_get_connector {
*/
#define DRM_MODE_PROP_ATOMIC 0x80000000
/**
* struct drm_mode_property_enum - Description for an enum/bitfield entry.
* @value: numeric value for this enum entry.
* @name: symbolic name for this enum entry.
*
* See struct drm_property_enum for details.
*/
struct drm_mode_property_enum {
__u64 value;
char name[DRM_PROP_NAME_LEN];
};
/**
* struct drm_mode_get_property - Get property metadata.
*
* User-space can perform a GETPROPERTY ioctl to retrieve information about a
* property. The same property may be attached to multiple objects, see
* "Modeset Base Object Abstraction".
*
* The meaning of the @values_ptr field changes depending on the property type.
* See &drm_property.flags for more details.
*
* The @enum_blob_ptr and @count_enum_blobs fields are only meaningful when the
* property has the type &DRM_MODE_PROP_ENUM or &DRM_MODE_PROP_BITMASK. For
* backwards compatibility, the kernel will always set @count_enum_blobs to
* zero when the property has the type &DRM_MODE_PROP_BLOB. User-space must
* ignore these two fields if the property has a different type.
*
* User-space is expected to retrieve values and enums by performing this ioctl
* at least twice: the first time to retrieve the number of elements, the
* second time to retrieve the elements themselves.
*
* To retrieve the number of elements, set @count_values and @count_enum_blobs
* to zero, then call the ioctl. @count_values will be updated with the number
* of elements. If the property has the type &DRM_MODE_PROP_ENUM or
* &DRM_MODE_PROP_BITMASK, @count_enum_blobs will be updated as well.
*
* To retrieve the elements themselves, allocate an array for @values_ptr and
* set @count_values to its capacity. If the property has the type
* &DRM_MODE_PROP_ENUM or &DRM_MODE_PROP_BITMASK, allocate an array for
* @enum_blob_ptr and set @count_enum_blobs to its capacity. Calling the ioctl
* again will fill the arrays.
*/
struct drm_mode_get_property {
/** @values_ptr: Pointer to a ``__u64`` array. */
__u64 values_ptr;
/** @enum_blob_ptr: Pointer to a struct drm_mode_property_enum array. */
__u64 enum_blob_ptr;
__u64 values_ptr; /* values and blob lengths */
__u64 enum_blob_ptr; /* enum and blob id ptrs */
/**
* @prop_id: Object ID of the property which should be retrieved. Set
* by the caller.
*/
__u32 prop_id;
/**
* @flags: ``DRM_MODE_PROP_*`` bitfield. See &drm_property.flags for
* a definition of the flags.
*/
__u32 flags;
/**
* @name: Symbolic property name. User-space should use this field to
* recognize properties.
*/
char name[DRM_PROP_NAME_LEN];
/** @count_values: Number of elements in @values_ptr. */
__u32 count_values;
/** @count_enum_blobs: Number of elements in @enum_blob_ptr. */
/* This is only used to count enum values, not blobs. The _blobs is
* simply because of a historical reason, i.e. backwards compat. */
__u32 count_enum_blobs;
};
@ -629,7 +439,6 @@ struct drm_mode_connector_set_property {
#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
#define DRM_MODE_OBJECT_COLOROP 0xfafafafa
#define DRM_MODE_OBJECT_ANY 0
struct drm_mode_obj_get_properties {
@ -665,75 +474,43 @@ struct drm_mode_fb_cmd {
};
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifier[] */
#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
/**
* struct drm_mode_fb_cmd2 - Frame-buffer metadata.
*
* This struct holds frame-buffer metadata. There are two ways to use it:
*
* - User-space can fill this struct and perform a &DRM_IOCTL_MODE_ADDFB2
* ioctl to register a new frame-buffer. The new frame-buffer object ID will
* be set by the kernel in @fb_id.
* - User-space can set @fb_id and perform a &DRM_IOCTL_MODE_GETFB2 ioctl to
* fetch metadata about an existing frame-buffer.
*
* In case of planar formats, this struct allows up to 4 buffer objects with
* offsets and pitches per plane. The pitch and offset order are dictated by
* the format FourCC as defined by ``drm_fourcc.h``, e.g. NV12 is described as:
*
* YUV 4:2:0 image with a plane of 8-bit Y samples followed by an
* interleaved U/V plane containing 8-bit 2x2 subsampled colour difference
* samples.
*
* So it would consist of a Y plane at ``offsets[0]`` and a UV plane at
* ``offsets[1]``.
*
* To accommodate tiled, compressed, etc formats, a modifier can be specified.
* For more information see the "Format Modifiers" section. Note that even
* though it looks like we have a modifier per-plane, we in fact do not. The
* modifier for each plane must be identical. Thus all combinations of
* different data layouts for multi-plane formats must be enumerated as
* separate modifiers.
*
* All of the entries in @handles, @pitches, @offsets and @modifier must be
* zero when unused. Warning, for @offsets and @modifier zero can't be used to
* figure out whether the entry is used or not since it's a valid value (a zero
* offset is common, and a zero modifier is &DRM_FORMAT_MOD_LINEAR).
*/
struct drm_mode_fb_cmd2 {
/** @fb_id: Object ID of the frame-buffer. */
__u32 fb_id;
/** @width: Width of the frame-buffer. */
__u32 width;
/** @height: Height of the frame-buffer. */
__u32 height;
/**
* @pixel_format: FourCC format code, see ``DRM_FORMAT_*`` constants in
* ``drm_fourcc.h``.
*/
__u32 pixel_format;
/**
* @flags: Frame-buffer flags (see &DRM_MODE_FB_INTERLACED and
* &DRM_MODE_FB_MODIFIERS).
*/
__u32 flags;
__u32 pixel_format; /* fourcc code from drm_fourcc.h */
__u32 flags; /* see above flags */
/**
* @handles: GEM buffer handle, one per plane. Set to 0 if the plane is
* unused. The same handle can be used for multiple planes.
/*
* In case of planar formats, this ioctl allows up to 4
* buffer objects with offsets and pitches per plane.
* The pitch and offset order is dictated by the fourcc,
* e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
*
* YUV 4:2:0 image with a plane of 8 bit Y samples
* followed by an interleaved U/V plane containing
* 8 bit 2x2 subsampled colour difference samples.
*
* So it would consist of Y as offsets[0] and UV as
* offsets[1]. Note that offsets[0] will generally
* be 0 (but this is not required).
*
* To accommodate tiled, compressed, etc formats, a
* modifier can be specified. The default value of zero
* indicates "native" format as specified by the fourcc.
* Vendor specific modifier token. Note that even though
* it looks like we have a modifier per-plane, we in fact
* do not. The modifier for each plane must be identical.
* Thus all combinations of different data layouts for
* multi plane formats must be enumerated as separate
* modifiers.
*/
__u32 handles[4];
/** @pitches: Pitch (aka. stride) in bytes, one per plane. */
__u32 pitches[4];
/** @offsets: Offset into the buffer in bytes, one per plane. */
__u32 offsets[4];
/**
* @modifier: Format modifier, one per plane. See ``DRM_FORMAT_MOD_*``
* constants in ``drm_fourcc.h``. All planes must use the same
* modifier. Ignored unless &DRM_MODE_FB_MODIFIERS is set in @flags.
*/
__u64 modifier[4];
__u32 pitches[4]; /* pitch for each plane */
__u32 offsets[4]; /* offset of each plane */
__u64 modifier[4]; /* ie, tiling, compress */
};
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
@ -838,29 +615,10 @@ struct drm_color_ctm {
/*
* Conversion matrix in S31.32 sign-magnitude
* (not two's complement!) format.
*
* out matrix in
* |R| |0 1 2| |R|
* |G| = |3 4 5| x |G|
* |B| |6 7 8| |B|
*/
__u64 matrix[9];
};
struct drm_color_ctm_3x4 {
/*
* Conversion matrix with 3x4 dimensions in S31.32 sign-magnitude
* (not two's complement!) format.
*
* out matrix in
* |R| |0 1 2 3 | | R |
* |G| = |4 5 6 7 | x | G |
* |B| |8 9 10 11| | B |
* |1.0|
*/
__u64 matrix[12];
};
struct drm_color_lut {
/*
* Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and
@ -872,266 +630,12 @@ struct drm_color_lut {
__u16 reserved;
};
/*
* struct drm_color_lut32
*
* 32-bit per channel color LUT entry, similar to drm_color_lut.
*/
struct drm_color_lut32 {
__u32 red;
__u32 green;
__u32 blue;
__u32 reserved;
};
/**
* enum drm_colorop_type - Type of color operation
*
* drm_colorops can be of many different types. Each type behaves differently
* and defines a different set of properties. This enum defines all types and
* gives a high-level description.
*/
enum drm_colorop_type {
/**
* @DRM_COLOROP_1D_CURVE:
*
* enum string "1D Curve"
*
* A 1D curve that is being applied to all color channels. The
* curve is specified via the CURVE_1D_TYPE colorop property.
*/
DRM_COLOROP_1D_CURVE,
/**
* @DRM_COLOROP_1D_LUT:
*
* enum string "1D LUT"
*
* A simple 1D LUT of uniformly spaced &drm_color_lut32 entries,
* packed into a blob via the DATA property. The driver's
* expected LUT size is advertised via the SIZE property.
*
* The DATA blob is an array of struct drm_color_lut32 with size
* of "size".
*/
DRM_COLOROP_1D_LUT,
/**
* @DRM_COLOROP_CTM_3X4:
*
* enum string "3x4 Matrix"
*
* A 3x4 matrix. Its values are specified via the
* &drm_color_ctm_3x4 struct provided via the DATA property.
*
* The DATA blob is a float[12]:
* out matrix in
* | R | | 0 1 2 3 | | R |
* | G | = | 4 5 6 7 | x | G |
* | B | | 8 9 10 12 | | B |
*/
DRM_COLOROP_CTM_3X4,
/**
* @DRM_COLOROP_MULTIPLIER:
*
* enum string "Multiplier"
*
* A simple multiplier, applied to all color values. The
* multiplier is specified as a S31.32 via the MULTIPLIER
* property.
*/
DRM_COLOROP_MULTIPLIER,
/**
* @DRM_COLOROP_3D_LUT:
*
* enum string "3D LUT"
*
* A 3D LUT of &drm_color_lut32 entries,
* packed into a blob via the DATA property. The driver's expected
* LUT size is advertised via the SIZE property, i.e., a 3D LUT with
* 17x17x17 entries will have SIZE set to 17.
*
* The DATA blob is a 3D array of struct drm_color_lut32 with dimension
* length of "size".
* The LUT elements are traversed like so:
*
* for B in range 0..n
* for G in range 0..n
* for R in range 0..n
* index = R + n * (G + n * B)
* color = lut3d[index]
*/
DRM_COLOROP_3D_LUT,
};
/**
* enum drm_colorop_lut3d_interpolation_type - type of 3DLUT interpolation
*/
enum drm_colorop_lut3d_interpolation_type {
/**
* @DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL:
*
* Tetrahedral 3DLUT interpolation
*/
DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL,
};
/**
* enum drm_colorop_lut1d_interpolation_type - type of interpolation for 1D LUTs
*/
enum drm_colorop_lut1d_interpolation_type {
/**
* @DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR:
*
* Linear interpolation. Values between points of the LUT will be
* linearly interpolated.
*/
DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
};
/**
* struct drm_plane_size_hint - Plane size hints
* @width: The width of the plane in pixel
* @height: The height of the plane in pixel
*
* The plane SIZE_HINTS property blob contains an
* array of struct drm_plane_size_hint.
*/
struct drm_plane_size_hint {
__u16 width;
__u16 height;
};
/**
* struct hdr_metadata_infoframe - HDR Metadata Infoframe Data.
*
* HDR Metadata Infoframe as per CTA 861.G spec. This is expected
* to match exactly with the spec.
*
* Userspace is expected to pass the metadata information as per
* the format described in this structure.
*/
struct hdr_metadata_infoframe {
/**
* @eotf: Electro-Optical Transfer Function (EOTF)
* used in the stream.
*/
__u8 eotf;
/**
* @metadata_type: Static_Metadata_Descriptor_ID.
*/
__u8 metadata_type;
/**
* @display_primaries: Color Primaries of the Data.
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
* @display_primaries.x: X coordinate of color primary.
* @display_primaries.y: Y coordinate of color primary.
*/
struct {
__u16 x, y;
} display_primaries[3];
/**
* @white_point: White Point of Colorspace Data.
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
* @white_point.x: X coordinate of whitepoint of color primary.
* @white_point.y: Y coordinate of whitepoint of color primary.
*/
struct {
__u16 x, y;
} white_point;
/**
* @max_display_mastering_luminance: Max Mastering Display Luminance.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
* where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
*/
__u16 max_display_mastering_luminance;
/**
* @min_display_mastering_luminance: Min Mastering Display Luminance.
* This value is coded as an unsigned 16-bit value in units of
* 0.0001 cd/m2, where 0x0001 represents 0.0001 cd/m2 and 0xFFFF
* represents 6.5535 cd/m2.
*/
__u16 min_display_mastering_luminance;
/**
* @max_cll: Max Content Light Level.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
* where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
*/
__u16 max_cll;
/**
* @max_fall: Max Frame Average Light Level.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
* where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
*/
__u16 max_fall;
};
/**
* struct hdr_output_metadata - HDR output metadata
*
* Metadata Information to be passed from userspace
*/
struct hdr_output_metadata {
/**
* @metadata_type: Static_Metadata_Descriptor_ID.
*/
__u32 metadata_type;
/**
* @hdmi_metadata_type1: HDR Metadata Infoframe.
*/
union {
struct hdr_metadata_infoframe hdmi_metadata_type1;
};
};
/**
* DRM_MODE_PAGE_FLIP_EVENT
*
* Request that the kernel sends back a vblank event (see
* struct drm_event_vblank) with the &DRM_EVENT_FLIP_COMPLETE type when the
* page-flip is done.
*
* When used with atomic uAPI, one event will be delivered per CRTC included in
* the atomic commit. A CRTC is included in an atomic commit if one of its
* properties is set, or if a property is set on a connector or plane linked
* via the CRTC_ID property to the CRTC. At least one CRTC must be included,
* and all pulled in CRTCs must be either previously or newly powered on (in
* other words, a powered off CRTC which stays off cannot be included in the
* atomic commit).
*/
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
/**
* DRM_MODE_PAGE_FLIP_ASYNC
*
* Request that the page-flip is performed as soon as possible, ie. with no
* delay due to waiting for vblank. This may cause tearing to be visible on
* the screen.
*
* When used with atomic uAPI, the driver will return an error if the hardware
* doesn't support performing an asynchronous page-flip for this update.
* User-space should handle this, e.g. by falling back to a regular page-flip.
*
* Note, some hardware might need to perform one last synchronous page-flip
* before being able to switch to asynchronous page-flips. As an exception,
* the driver will return success even though that first page-flip is not
* asynchronous.
*/
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
#define DRM_MODE_PAGE_FLIP_TARGET_RELATIVE 0x8
#define DRM_MODE_PAGE_FLIP_TARGET (DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE | \
DRM_MODE_PAGE_FLIP_TARGET_RELATIVE)
/**
* DRM_MODE_PAGE_FLIP_FLAGS
*
* Bitmask of flags suitable for &drm_mode_crtc_page_flip_target.flags.
*/
#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT | \
DRM_MODE_PAGE_FLIP_ASYNC | \
DRM_MODE_PAGE_FLIP_TARGET)
@ -1196,73 +700,13 @@ struct drm_mode_crtc_page_flip_target {
__u64 user_data;
};
/**
* struct drm_mode_create_dumb - Create a KMS dumb buffer for scanout.
* @height: buffer height in pixels
* @width: buffer width in pixels
* @bpp: color mode
* @flags: must be zero
* @handle: buffer object handle
* @pitch: number of bytes between two consecutive lines
* @size: size of the whole buffer in bytes
*
* User-space fills @height, @width, @bpp and @flags. If the IOCTL succeeds,
* the kernel fills @handle, @pitch and @size.
*
* The value of @bpp is a color-mode number describing a specific format
* or a variant thereof. The value often corresponds to the number of bits
* per pixel for most modes, although there are exceptions. Each color mode
* maps to a DRM format plus a number of modes with similar pixel layout.
* Framebuffer layout is always linear.
*
* Support for all modes and formats is optional. Even if dumb-buffer
* creation with a certain color mode succeeds, it is not guaranteed that
* the DRM driver supports any of the related formats. Most drivers support
* a color mode of 32 with a format of DRM_FORMAT_XRGB8888 on their primary
* plane.
*
* +------------+------------------------+------------------------+
* | Color mode | Framebuffer format | Compatible formats |
* +============+========================+========================+
* | 32 | * DRM_FORMAT_XRGB8888 | * DRM_FORMAT_BGRX8888 |
* | | | * DRM_FORMAT_RGBX8888 |
* | | | * DRM_FORMAT_XBGR8888 |
* +------------+------------------------+------------------------+
* | 24 | * DRM_FORMAT_RGB888 | * DRM_FORMAT_BGR888 |
* +------------+------------------------+------------------------+
* | 16 | * DRM_FORMAT_RGB565 | * DRM_FORMAT_BGR565 |
* +------------+------------------------+------------------------+
* | 15 | * DRM_FORMAT_XRGB1555 | * DRM_FORMAT_BGRX1555 |
* | | | * DRM_FORMAT_RGBX1555 |
* | | | * DRM_FORMAT_XBGR1555 |
* +------------+------------------------+------------------------+
* | 8 | * DRM_FORMAT_C8 | * DRM_FORMAT_D8 |
* | | | * DRM_FORMAT_R8 |
* +------------+------------------------+------------------------+
* | 4 | * DRM_FORMAT_C4 | * DRM_FORMAT_D4 |
* | | | * DRM_FORMAT_R4 |
* +------------+------------------------+------------------------+
* | 2 | * DRM_FORMAT_C2 | * DRM_FORMAT_D2 |
* | | | * DRM_FORMAT_R2 |
* +------------+------------------------+------------------------+
* | 1 | * DRM_FORMAT_C1 | * DRM_FORMAT_D1 |
* | | | * DRM_FORMAT_R1 |
* +------------+------------------------+------------------------+
*
* Color modes of 10, 12, 15, 30 and 64 are only supported for use by
* legacy user space. Please don't use them in new code. Other modes
* are not support.
*
* Do not attempt to allocate anything but linear framebuffer memory
* with single-plane RGB data. Allocation of other framebuffer
* layouts requires dedicated ioctls in the respective DRM driver.
*/
/* create a dumb scanout buffer */
struct drm_mode_create_dumb {
__u32 height;
__u32 width;
__u32 bpp;
__u32 flags;
/* handle, pitch, size will be returned */
__u32 handle;
__u32 pitch;
__u64 size;
@ -1285,53 +729,11 @@ struct drm_mode_destroy_dumb {
__u32 handle;
};
/**
* DRM_MODE_ATOMIC_TEST_ONLY
*
* Do not apply the atomic commit, instead check whether the hardware supports
* this configuration.
*
* See &drm_mode_config_funcs.atomic_check for more details on test-only
* commits.
*/
/* page-flip flags are valid, plus: */
#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100
/**
* DRM_MODE_ATOMIC_NONBLOCK
*
* Do not block while applying the atomic commit. The &DRM_IOCTL_MODE_ATOMIC
* IOCTL returns immediately instead of waiting for the changes to be applied
* in hardware. Note, the driver will still check that the update can be
* applied before retuning.
*/
#define DRM_MODE_ATOMIC_NONBLOCK 0x0200
/**
* DRM_MODE_ATOMIC_ALLOW_MODESET
*
* Allow the update to result in temporary or transient visible artifacts while
* the update is being applied. Applying the update may also take significantly
* more time than a page flip. All visual artifacts will disappear by the time
* the update is completed, as signalled through the vblank event's timestamp
* (see struct drm_event_vblank).
*
* This flag must be set when the KMS update might cause visible artifacts.
* Without this flag such KMS update will return a EINVAL error. What kind of
* update may cause visible artifacts depends on the driver and the hardware.
* User-space that needs to know beforehand if an update might cause visible
* artifacts can use &DRM_MODE_ATOMIC_TEST_ONLY without
* &DRM_MODE_ATOMIC_ALLOW_MODESET to see if it fails.
*
* To the best of the driver's knowledge, visual artifacts are guaranteed to
* not appear when this flag is not set. Some sinks might display visual
* artifacts outside of the driver's control.
*/
#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400
/**
* DRM_MODE_ATOMIC_FLAGS
*
* Bitfield of flags accepted by the &DRM_IOCTL_MODE_ATOMIC IOCTL in
* &drm_mode_atomic.flags.
*/
#define DRM_MODE_ATOMIC_FLAGS (\
DRM_MODE_PAGE_FLIP_EVENT |\
DRM_MODE_PAGE_FLIP_ASYNC |\
@ -1401,68 +803,47 @@ struct drm_format_modifier {
};
/**
* struct drm_mode_create_blob - Create New blob property
*
* Create a new 'blob' data property, copying length bytes from data pointer,
* and returning new blob ID.
*/
struct drm_mode_create_blob {
/** @data: Pointer to data to copy. */
/** Pointer to data to copy. */
__u64 data;
/** @length: Length of data to copy. */
/** Length of data to copy. */
__u32 length;
/** @blob_id: Return: new property ID. */
/** Return: new property ID. */
__u32 blob_id;
};
/**
* struct drm_mode_destroy_blob - Destroy user blob
* @blob_id: blob_id to destroy
*
* Destroy a user-created blob property.
*
* User-space can release blobs as soon as they do not need to refer to them by
* their blob object ID. For instance, if you are using a MODE_ID blob in an
* atomic commit and you will not make another commit re-using the same ID, you
* can destroy the blob as soon as the commit has been issued, without waiting
* for it to complete.
*/
struct drm_mode_destroy_blob {
__u32 blob_id;
};
/**
* struct drm_mode_create_lease - Create lease
*
* Lease mode resources, creating another drm_master.
*
* The @object_ids array must reference at least one CRTC, one connector and
* one plane if &DRM_CLIENT_CAP_UNIVERSAL_PLANES is enabled. Alternatively,
* the lease can be completely empty.
*/
struct drm_mode_create_lease {
/** @object_ids: Pointer to array of object ids (__u32) */
/** Pointer to array of object ids (__u32) */
__u64 object_ids;
/** @object_count: Number of object ids */
/** Number of object ids */
__u32 object_count;
/** @flags: flags for new FD (O_CLOEXEC, etc) */
/** flags for new FD (O_CLOEXEC, etc) */
__u32 flags;
/** @lessee_id: Return: unique identifier for lessee. */
/** Return: unique identifier for lessee. */
__u32 lessee_id;
/** @fd: Return: file descriptor to new drm_master file */
/** Return: file descriptor to new drm_master file */
__u32 fd;
};
/**
* struct drm_mode_list_lessees - List lessees
*
* List lesses from a drm_master.
* List lesses from a drm_master
*/
struct drm_mode_list_lessees {
/**
* @count_lessees: Number of lessees.
*
/** Number of lessees.
* On input, provides length of the array.
* On output, provides total number. No
* more than the input number will be written
@ -1470,26 +851,19 @@ struct drm_mode_list_lessees {
* the size and then the data.
*/
__u32 count_lessees;
/** @pad: Padding. */
__u32 pad;
/**
* @lessees_ptr: Pointer to lessees.
*
* Pointer to __u64 array of lessee ids
/** Pointer to lessees.
* pointer to __u64 array of lessee ids
*/
__u64 lessees_ptr;
};
/**
* struct drm_mode_get_lease - Get Lease
*
* Get leased objects.
* Get leased objects
*/
struct drm_mode_get_lease {
/**
* @count_objects: Number of leased objects.
*
/** Number of leased objects.
* On input, provides length of the array.
* On output, provides total number. No
* more than the input number will be written
@ -1497,22 +871,20 @@ struct drm_mode_get_lease {
* the size and then the data.
*/
__u32 count_objects;
/** @pad: Padding. */
__u32 pad;
/**
* @objects_ptr: Pointer to objects.
*
* Pointer to __u32 array of object ids.
/** Pointer to objects.
* pointer to __u32 array of object ids
*/
__u64 objects_ptr;
};
/**
* struct drm_mode_revoke_lease - Revoke lease
* Revoke lease
*/
struct drm_mode_revoke_lease {
/** @lessee_id: Unique ID of lessee */
/** Unique ID of lessee
*/
__u32 lessee_id;
};
@ -1535,16 +907,6 @@ struct drm_mode_rect {
__s32 y2;
};
/**
* struct drm_mode_closefb
* @fb_id: Framebuffer ID.
* @pad: Must be zero.
*/
struct drm_mode_closefb {
__u32 fb_id;
__u32 pad;
};
#if defined(__cplusplus)
}
#endif

File diff suppressed because it is too large Load diff

View file

@ -1,8 +1,27 @@
/* SPDX-License-Identifier: MIT */
/* Copyright (c) 2012-2020 NVIDIA Corporation */
/*
* Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _UAPI_TEGRA_DRM_H_
#define _UAPI_TEGRA_DRM_H_
#ifndef _TEGRA_DRM_H_
#define _TEGRA_DRM_H_
#include "drm.h"
@ -10,8 +29,6 @@
extern "C" {
#endif
/* Tegra DRM legacy UAPI. Only enabled with STAGING */
#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0)
#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
@ -632,8 +649,8 @@ struct drm_tegra_gem_get_flags {
#define DRM_TEGRA_SYNCPT_READ 0x02
#define DRM_TEGRA_SYNCPT_INCR 0x03
#define DRM_TEGRA_SYNCPT_WAIT 0x04
#define DRM_TEGRA_OPEN_CHANNEL 0x05
#define DRM_TEGRA_CLOSE_CHANNEL 0x06
#define DRM_TEGRA_OPEN_CHANNEL 0x05
#define DRM_TEGRA_CLOSE_CHANNEL 0x06
#define DRM_TEGRA_GET_SYNCPT 0x07
#define DRM_TEGRA_SUBMIT 0x08
#define DRM_TEGRA_GET_SYNCPT_BASE 0x09
@ -657,402 +674,6 @@ struct drm_tegra_gem_get_flags {
#define DRM_IOCTL_TEGRA_GEM_SET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_SET_FLAGS, struct drm_tegra_gem_set_flags)
#define DRM_IOCTL_TEGRA_GEM_GET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_GET_FLAGS, struct drm_tegra_gem_get_flags)
/* New Tegra DRM UAPI */
/*
* Reported by the driver in the `capabilities` field.
*
* DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT: If set, the engine is cache coherent
* with regard to the system memory.
*/
#define DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT (1 << 0)
struct drm_tegra_channel_open {
/**
* @host1x_class: [in]
*
* Host1x class of the engine that will be programmed using this
* channel.
*/
__u32 host1x_class;
/**
* @flags: [in]
*
* Flags.
*/
__u32 flags;
/**
* @context: [out]
*
* Opaque identifier corresponding to the opened channel.
*/
__u32 context;
/**
* @version: [out]
*
* Version of the engine hardware. This can be used by userspace
* to determine how the engine needs to be programmed.
*/
__u32 version;
/**
* @capabilities: [out]
*
* Flags describing the hardware capabilities.
*/
__u32 capabilities;
__u32 padding;
};
struct drm_tegra_channel_close {
/**
* @context: [in]
*
* Identifier of the channel to close.
*/
__u32 context;
__u32 padding;
};
/*
* Mapping flags that can be used to influence how the mapping is created.
*
* DRM_TEGRA_CHANNEL_MAP_READ: create mapping that allows HW read access
* DRM_TEGRA_CHANNEL_MAP_WRITE: create mapping that allows HW write access
*/
#define DRM_TEGRA_CHANNEL_MAP_READ (1 << 0)
#define DRM_TEGRA_CHANNEL_MAP_WRITE (1 << 1)
#define DRM_TEGRA_CHANNEL_MAP_READ_WRITE (DRM_TEGRA_CHANNEL_MAP_READ | \
DRM_TEGRA_CHANNEL_MAP_WRITE)
struct drm_tegra_channel_map {
/**
* @context: [in]
*
* Identifier of the channel to which make memory available for.
*/
__u32 context;
/**
* @handle: [in]
*
* GEM handle of the memory to map.
*/
__u32 handle;
/**
* @flags: [in]
*
* Flags.
*/
__u32 flags;
/**
* @mapping: [out]
*
* Identifier corresponding to the mapping, to be used for
* relocations or unmapping later.
*/
__u32 mapping;
};
struct drm_tegra_channel_unmap {
/**
* @context: [in]
*
* Channel identifier of the channel to unmap memory from.
*/
__u32 context;
/**
* @mapping: [in]
*
* Mapping identifier of the memory mapping to unmap.
*/
__u32 mapping;
};
/* Submission */
/**
* Specify that bit 39 of the patched-in address should be set to switch
* swizzling between Tegra and non-Tegra sector layout on systems that store
* surfaces in system memory in non-Tegra sector layout.
*/
#define DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT (1 << 0)
struct drm_tegra_submit_buf {
/**
* @mapping: [in]
*
* Identifier of the mapping to use in the submission.
*/
__u32 mapping;
/**
* @flags: [in]
*
* Flags.
*/
__u32 flags;
/**
* Information for relocation patching.
*/
struct {
/**
* @target_offset: [in]
*
* Offset from the start of the mapping of the data whose
* address is to be patched into the gather.
*/
__u64 target_offset;
/**
* @gather_offset_words: [in]
*
* Offset in words from the start of the gather data to
* where the address should be patched into.
*/
__u32 gather_offset_words;
/**
* @shift: [in]
*
* Number of bits the address should be shifted right before
* patching in.
*/
__u32 shift;
} reloc;
};
/**
* Execute `words` words of Host1x opcodes specified in the `gather_data_ptr`
* buffer. Each GATHER_UPTR command uses successive words from the buffer.
*/
#define DRM_TEGRA_SUBMIT_CMD_GATHER_UPTR 0
/**
* Wait for a syncpoint to reach a value before continuing with further
* commands.
*/
#define DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT 1
/**
* Wait for a syncpoint to reach a value before continuing with further
* commands. The threshold is calculated relative to the start of the job.
*/
#define DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT_RELATIVE 2
struct drm_tegra_submit_cmd_gather_uptr {
__u32 words;
__u32 reserved[3];
};
struct drm_tegra_submit_cmd_wait_syncpt {
__u32 id;
__u32 value;
__u32 reserved[2];
};
struct drm_tegra_submit_cmd {
/**
* @type: [in]
*
* Command type to execute. One of the DRM_TEGRA_SUBMIT_CMD*
* defines.
*/
__u32 type;
/**
* @flags: [in]
*
* Flags.
*/
__u32 flags;
union {
struct drm_tegra_submit_cmd_gather_uptr gather_uptr;
struct drm_tegra_submit_cmd_wait_syncpt wait_syncpt;
__u32 reserved[4];
};
};
struct drm_tegra_submit_syncpt {
/**
* @id: [in]
*
* ID of the syncpoint that the job will increment.
*/
__u32 id;
/**
* @flags: [in]
*
* Flags.
*/
__u32 flags;
/**
* @increments: [in]
*
* Number of times the job will increment this syncpoint.
*/
__u32 increments;
/**
* @value: [out]
*
* Value the syncpoint will have once the job has completed all
* its specified syncpoint increments.
*
* Note that the kernel may increment the syncpoint before or after
* the job. These increments are not reflected in this field.
*
* If the job hangs or times out, not all of the increments may
* get executed.
*/
__u32 value;
};
struct drm_tegra_channel_submit {
/**
* @context: [in]
*
* Identifier of the channel to submit this job to.
*/
__u32 context;
/**
* @num_bufs: [in]
*
* Number of elements in the `bufs_ptr` array.
*/
__u32 num_bufs;
/**
* @num_cmds: [in]
*
* Number of elements in the `cmds_ptr` array.
*/
__u32 num_cmds;
/**
* @gather_data_words: [in]
*
* Number of 32-bit words in the `gather_data_ptr` array.
*/
__u32 gather_data_words;
/**
* @bufs_ptr: [in]
*
* Pointer to an array of drm_tegra_submit_buf structures.
*/
__u64 bufs_ptr;
/**
* @cmds_ptr: [in]
*
* Pointer to an array of drm_tegra_submit_cmd structures.
*/
__u64 cmds_ptr;
/**
* @gather_data_ptr: [in]
*
* Pointer to an array of Host1x opcodes to be used by GATHER_UPTR
* commands.
*/
__u64 gather_data_ptr;
/**
* @syncobj_in: [in]
*
* Handle for DRM syncobj that will be waited before submission.
* Ignored if zero.
*/
__u32 syncobj_in;
/**
* @syncobj_out: [in]
*
* Handle for DRM syncobj that will have its fence replaced with
* the job's completion fence. Ignored if zero.
*/
__u32 syncobj_out;
/**
* @syncpt_incr: [in,out]
*
* Information about the syncpoint the job will increment.
*/
struct drm_tegra_submit_syncpt syncpt;
};
struct drm_tegra_syncpoint_allocate {
/**
* @id: [out]
*
* ID of allocated syncpoint.
*/
__u32 id;
__u32 padding;
};
struct drm_tegra_syncpoint_free {
/**
* @id: [in]
*
* ID of syncpoint to free.
*/
__u32 id;
__u32 padding;
};
struct drm_tegra_syncpoint_wait {
/**
* @timeout: [in]
*
* Absolute timestamp at which the wait will time out.
*/
__s64 timeout_ns;
/**
* @id: [in]
*
* ID of syncpoint to wait on.
*/
__u32 id;
/**
* @threshold: [in]
*
* Threshold to wait for.
*/
__u32 threshold;
/**
* @value: [out]
*
* Value of the syncpoint upon wait completion.
*/
__u32 value;
__u32 padding;
};
#define DRM_IOCTL_TEGRA_CHANNEL_OPEN DRM_IOWR(DRM_COMMAND_BASE + 0x10, struct drm_tegra_channel_open)
#define DRM_IOCTL_TEGRA_CHANNEL_CLOSE DRM_IOWR(DRM_COMMAND_BASE + 0x11, struct drm_tegra_channel_close)
#define DRM_IOCTL_TEGRA_CHANNEL_MAP DRM_IOWR(DRM_COMMAND_BASE + 0x12, struct drm_tegra_channel_map)
#define DRM_IOCTL_TEGRA_CHANNEL_UNMAP DRM_IOWR(DRM_COMMAND_BASE + 0x13, struct drm_tegra_channel_unmap)
#define DRM_IOCTL_TEGRA_CHANNEL_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + 0x14, struct drm_tegra_channel_submit)
#define DRM_IOCTL_TEGRA_SYNCPOINT_ALLOCATE DRM_IOWR(DRM_COMMAND_BASE + 0x20, struct drm_tegra_syncpoint_allocate)
#define DRM_IOCTL_TEGRA_SYNCPOINT_FREE DRM_IOWR(DRM_COMMAND_BASE + 0x21, struct drm_tegra_syncpoint_free)
#define DRM_IOCTL_TEGRA_SYNCPOINT_WAIT DRM_IOWR(DRM_COMMAND_BASE + 0x22, struct drm_tegra_syncpoint_wait)
#if defined(__cplusplus)
}
#endif

View file

@ -46,16 +46,12 @@ extern "C" {
#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
#define DRM_VIRTGPU_CONTEXT_INIT 0x0b
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
#define VIRTGPU_EXECBUF_RING_IDX 0x04
#define VIRTGPU_EXECBUF_FLAGS (\
VIRTGPU_EXECBUF_FENCE_FD_IN |\
VIRTGPU_EXECBUF_FENCE_FD_OUT |\
VIRTGPU_EXECBUF_RING_IDX |\
0)
struct drm_virtgpu_map {
@ -64,17 +60,6 @@ struct drm_virtgpu_map {
__u32 pad;
};
#define VIRTGPU_EXECBUF_SYNCOBJ_RESET 0x01
#define VIRTGPU_EXECBUF_SYNCOBJ_FLAGS ( \
VIRTGPU_EXECBUF_SYNCOBJ_RESET | \
0)
struct drm_virtgpu_execbuffer_syncobj {
__u32 handle;
__u32 flags;
__u64 point;
};
/* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */
struct drm_virtgpu_execbuffer {
__u32 flags;
__u32 size;
@ -82,22 +67,10 @@ struct drm_virtgpu_execbuffer {
__u64 bo_handles;
__u32 num_bo_handles;
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
__u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
__u32 syncobj_stride; /* size of @drm_virtgpu_execbuffer_syncobj */
__u32 num_in_syncobjs;
__u32 num_out_syncobjs;
__u64 in_syncobjs;
__u64 out_syncobjs;
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
#define VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME 8 /* Ability to set debug name from userspace */
struct drm_virtgpu_getparam {
__u64 param;
@ -127,7 +100,7 @@ struct drm_virtgpu_resource_info {
__u32 bo_handle;
__u32 res_handle;
__u32 size;
__u32 blob_mem;
__u32 stride;
};
struct drm_virtgpu_3d_box {
@ -144,8 +117,6 @@ struct drm_virtgpu_3d_transfer_to_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
__u32 stride;
__u32 layer_stride;
};
struct drm_virtgpu_3d_transfer_from_host {
@ -153,8 +124,6 @@ struct drm_virtgpu_3d_transfer_from_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
__u32 stride;
__u32 layer_stride;
};
#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
@ -163,12 +132,6 @@ struct drm_virtgpu_3d_wait {
__u32 flags;
};
#define VIRTGPU_DRM_CAPSET_VIRGL 1
#define VIRTGPU_DRM_CAPSET_VIRGL2 2
#define VIRTGPU_DRM_CAPSET_GFXSTREAM_VULKAN 3
#define VIRTGPU_DRM_CAPSET_VENUS 4
#define VIRTGPU_DRM_CAPSET_CROSS_DOMAIN 5
#define VIRTGPU_DRM_CAPSET_DRM 6
struct drm_virtgpu_get_caps {
__u32 cap_set_id;
__u32 cap_set_ver;
@ -177,55 +140,6 @@ struct drm_virtgpu_get_caps {
__u32 pad;
};
struct drm_virtgpu_resource_create_blob {
#define VIRTGPU_BLOB_MEM_GUEST 0x0001
#define VIRTGPU_BLOB_MEM_HOST3D 0x0002
#define VIRTGPU_BLOB_MEM_HOST3D_GUEST 0x0003
#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001
#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002
#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
/* zero is invalid blob_mem */
__u32 blob_mem;
__u32 blob_flags;
__u32 bo_handle;
__u32 res_handle;
__u64 size;
/*
* for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
* VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
*/
__u32 pad;
__u32 cmd_size;
__u64 cmd;
__u64 blob_id;
};
#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
#define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002
#define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
#define VIRTGPU_CONTEXT_PARAM_DEBUG_NAME 0x0004
struct drm_virtgpu_context_set_param {
__u64 param;
__u64 value;
};
struct drm_virtgpu_context_init {
__u32 num_params;
__u32 pad;
/* pointer to drm_virtgpu_context_set_param array */
__u64 ctx_set_params;
};
/*
* Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in
* effect. The event size is sizeof(drm_event), since there is no additional
* payload.
*/
#define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
@ -261,14 +175,6 @@ struct drm_virtgpu_context_init {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
struct drm_virtgpu_get_caps)
#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
struct drm_virtgpu_resource_create_blob)
#define DRM_IOCTL_VIRTGPU_CONTEXT_INIT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT, \
struct drm_virtgpu_context_init)
#if defined(__cplusplus)
}
#endif

View file

@ -1,36 +0,0 @@
//
// Copyright © 2011 Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice (including the next
// paragraph) shall be included in all copies or substantial portions of the
// Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
build = ["Android.sources.bp"]
cc_library_shared {
name: "libdrm_intel",
defaults: [
"libdrm_defaults",
"libdrm_intel_sources",
],
vendor: true,
// Removed dependency to libpciaccess: not used on Android
shared_libs: ["libdrm"],
}

38
intel/Android.mk Normal file
View file

@ -0,0 +1,38 @@
#
# Copyright © 2011 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
# Import variables LIBDRM_INTEL_FILES, LIBDRM_INTEL_H_FILES
include $(LOCAL_PATH)/Makefile.sources
LOCAL_MODULE := libdrm_intel
LOCAL_SRC_FILES := $(LIBDRM_INTEL_FILES)
LOCAL_SHARED_LIBRARIES := \
libdrm
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)

View file

@ -1,12 +0,0 @@
// Autogenerated with Android.sources.bp.mk
cc_defaults {
name: "libdrm_intel_sources",
srcs: [
"intel_bufmgr.c",
"intel_bufmgr_fake.c",
"intel_bufmgr_gem.c",
"intel_decode.c",
"mm.c",
],
}

17
intel/Makefile.sources Normal file
View file

@ -0,0 +1,17 @@
LIBDRM_INTEL_FILES := \
i915_pciids.h \
intel_bufmgr.c \
intel_bufmgr_priv.h \
intel_bufmgr_fake.c \
intel_bufmgr_gem.c \
intel_decode.c \
intel_chipset.h \
intel_chipset.c \
mm.c \
mm.h \
uthash.h
LIBDRM_INTEL_H_FILES := \
intel_bufmgr.h \
intel_aub.h \
intel_debug.h

623
intel/i915_pciids.h Normal file
View file

@ -0,0 +1,623 @@
/*
* Copyright 2013 Intel Corporation
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _I915_PCIIDS_H
#define _I915_PCIIDS_H
/*
* A pci_device_id struct {
* __u32 vendor, device;
* __u32 subvendor, subdevice;
* __u32 class, class_mask;
* kernel_ulong_t driver_data;
* };
* Don't use C99 here because "class" is reserved and we want to
* give userspace flexibility.
*/
#define INTEL_VGA_DEVICE(id, info) { \
0x8086, id, \
~0, ~0, \
0x030000, 0xff0000, \
(unsigned long) info }
#define INTEL_QUANTA_VGA_DEVICE(info) { \
0x8086, 0x16a, \
0x152d, 0x8990, \
0x030000, 0xff0000, \
(unsigned long) info }
#define INTEL_I810_IDS(info) \
INTEL_VGA_DEVICE(0x7121, info), /* I810 */ \
INTEL_VGA_DEVICE(0x7123, info), /* I810_DC100 */ \
INTEL_VGA_DEVICE(0x7125, info) /* I810_E */
#define INTEL_I815_IDS(info) \
INTEL_VGA_DEVICE(0x1132, info) /* I815*/
#define INTEL_I830_IDS(info) \
INTEL_VGA_DEVICE(0x3577, info)
#define INTEL_I845G_IDS(info) \
INTEL_VGA_DEVICE(0x2562, info)
#define INTEL_I85X_IDS(info) \
INTEL_VGA_DEVICE(0x3582, info), /* I855_GM */ \
INTEL_VGA_DEVICE(0x358e, info)
#define INTEL_I865G_IDS(info) \
INTEL_VGA_DEVICE(0x2572, info) /* I865_G */
#define INTEL_I915G_IDS(info) \
INTEL_VGA_DEVICE(0x2582, info), /* I915_G */ \
INTEL_VGA_DEVICE(0x258a, info) /* E7221_G */
#define INTEL_I915GM_IDS(info) \
INTEL_VGA_DEVICE(0x2592, info) /* I915_GM */
#define INTEL_I945G_IDS(info) \
INTEL_VGA_DEVICE(0x2772, info) /* I945_G */
#define INTEL_I945GM_IDS(info) \
INTEL_VGA_DEVICE(0x27a2, info), /* I945_GM */ \
INTEL_VGA_DEVICE(0x27ae, info) /* I945_GME */
#define INTEL_I965G_IDS(info) \
INTEL_VGA_DEVICE(0x2972, info), /* I946_GZ */ \
INTEL_VGA_DEVICE(0x2982, info), /* G35_G */ \
INTEL_VGA_DEVICE(0x2992, info), /* I965_Q */ \
INTEL_VGA_DEVICE(0x29a2, info) /* I965_G */
#define INTEL_G33_IDS(info) \
INTEL_VGA_DEVICE(0x29b2, info), /* Q35_G */ \
INTEL_VGA_DEVICE(0x29c2, info), /* G33_G */ \
INTEL_VGA_DEVICE(0x29d2, info) /* Q33_G */
#define INTEL_I965GM_IDS(info) \
INTEL_VGA_DEVICE(0x2a02, info), /* I965_GM */ \
INTEL_VGA_DEVICE(0x2a12, info) /* I965_GME */
#define INTEL_GM45_IDS(info) \
INTEL_VGA_DEVICE(0x2a42, info) /* GM45_G */
#define INTEL_G45_IDS(info) \
INTEL_VGA_DEVICE(0x2e02, info), /* IGD_E_G */ \
INTEL_VGA_DEVICE(0x2e12, info), /* Q45_G */ \
INTEL_VGA_DEVICE(0x2e22, info), /* G45_G */ \
INTEL_VGA_DEVICE(0x2e32, info), /* G41_G */ \
INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \
INTEL_VGA_DEVICE(0x2e92, info) /* B43_G.1 */
#define INTEL_PINEVIEW_G_IDS(info) \
INTEL_VGA_DEVICE(0xa001, info)
#define INTEL_PINEVIEW_M_IDS(info) \
INTEL_VGA_DEVICE(0xa011, info)
#define INTEL_IRONLAKE_D_IDS(info) \
INTEL_VGA_DEVICE(0x0042, info)
#define INTEL_IRONLAKE_M_IDS(info) \
INTEL_VGA_DEVICE(0x0046, info)
#define INTEL_SNB_D_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0102, info), \
INTEL_VGA_DEVICE(0x010A, info)
#define INTEL_SNB_D_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0112, info), \
INTEL_VGA_DEVICE(0x0122, info)
#define INTEL_SNB_D_IDS(info) \
INTEL_SNB_D_GT1_IDS(info), \
INTEL_SNB_D_GT2_IDS(info)
#define INTEL_SNB_M_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0106, info)
#define INTEL_SNB_M_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0116, info), \
INTEL_VGA_DEVICE(0x0126, info)
#define INTEL_SNB_M_IDS(info) \
INTEL_SNB_M_GT1_IDS(info), \
INTEL_SNB_M_GT2_IDS(info)
#define INTEL_IVB_M_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0156, info) /* GT1 mobile */
#define INTEL_IVB_M_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */
#define INTEL_IVB_M_IDS(info) \
INTEL_IVB_M_GT1_IDS(info), \
INTEL_IVB_M_GT2_IDS(info)
#define INTEL_IVB_D_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \
INTEL_VGA_DEVICE(0x015a, info) /* GT1 server */
#define INTEL_IVB_D_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \
INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */
#define INTEL_IVB_D_IDS(info) \
INTEL_IVB_D_GT1_IDS(info), \
INTEL_IVB_D_GT2_IDS(info)
#define INTEL_IVB_Q_IDS(info) \
INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
#define INTEL_HSW_ULT_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \
INTEL_VGA_DEVICE(0x0A06, info) /* ULT GT1 mobile */
#define INTEL_HSW_ULX_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0A0E, info) /* ULX GT1 mobile */
#define INTEL_HSW_GT1_IDS(info) \
INTEL_HSW_ULT_GT1_IDS(info), \
INTEL_HSW_ULX_GT1_IDS(info), \
INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \
INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \
INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \
INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
INTEL_VGA_DEVICE(0x0D06, info) /* CRW GT1 mobile */
#define INTEL_HSW_ULT_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
INTEL_VGA_DEVICE(0x0A16, info) /* ULT GT2 mobile */
#define INTEL_HSW_ULX_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0A1E, info) /* ULX GT2 mobile */ \
#define INTEL_HSW_GT2_IDS(info) \
INTEL_HSW_ULT_GT2_IDS(info), \
INTEL_HSW_ULX_GT2_IDS(info), \
INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
INTEL_VGA_DEVICE(0x0D16, info) /* CRW GT2 mobile */
#define INTEL_HSW_ULT_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
INTEL_VGA_DEVICE(0x0A2E, info) /* ULT GT3 reserved */
#define INTEL_HSW_GT3_IDS(info) \
INTEL_HSW_ULT_GT3_IDS(info), \
INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D2E, info), /* CRW GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */
#define INTEL_HSW_IDS(info) \
INTEL_HSW_GT1_IDS(info), \
INTEL_HSW_GT2_IDS(info), \
INTEL_HSW_GT3_IDS(info)
#define INTEL_VLV_IDS(info) \
INTEL_VGA_DEVICE(0x0f30, info), \
INTEL_VGA_DEVICE(0x0f31, info), \
INTEL_VGA_DEVICE(0x0f32, info), \
INTEL_VGA_DEVICE(0x0f33, info)
#define INTEL_BDW_ULT_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
INTEL_VGA_DEVICE(0x160B, info) /* GT1 Iris */
#define INTEL_BDW_ULX_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x160E, info) /* GT1 ULX */
#define INTEL_BDW_GT1_IDS(info) \
INTEL_BDW_ULT_GT1_IDS(info), \
INTEL_BDW_ULX_GT1_IDS(info), \
INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
INTEL_VGA_DEVICE(0x160D, info) /* GT1 Workstation */
#define INTEL_BDW_ULT_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
INTEL_VGA_DEVICE(0x161B, info) /* GT2 ULT */
#define INTEL_BDW_ULX_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */
#define INTEL_BDW_GT2_IDS(info) \
INTEL_BDW_ULT_GT2_IDS(info), \
INTEL_BDW_ULX_GT2_IDS(info), \
INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */
#define INTEL_BDW_ULT_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \
INTEL_VGA_DEVICE(0x162B, info) /* Iris */ \
#define INTEL_BDW_ULX_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x162E, info) /* ULX */
#define INTEL_BDW_GT3_IDS(info) \
INTEL_BDW_ULT_GT3_IDS(info), \
INTEL_BDW_ULX_GT3_IDS(info), \
INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \
INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
INTEL_VGA_DEVICE(0x162D, info) /* Workstation */
#define INTEL_BDW_ULT_RSVD_IDS(info) \
INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \
INTEL_VGA_DEVICE(0x163B, info) /* Iris */
#define INTEL_BDW_ULX_RSVD_IDS(info) \
INTEL_VGA_DEVICE(0x163E, info) /* ULX */
#define INTEL_BDW_RSVD_IDS(info) \
INTEL_BDW_ULT_RSVD_IDS(info), \
INTEL_BDW_ULX_RSVD_IDS(info), \
INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \
INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
#define INTEL_BDW_IDS(info) \
INTEL_BDW_GT1_IDS(info), \
INTEL_BDW_GT2_IDS(info), \
INTEL_BDW_GT3_IDS(info), \
INTEL_BDW_RSVD_IDS(info)
#define INTEL_CHV_IDS(info) \
INTEL_VGA_DEVICE(0x22b0, info), \
INTEL_VGA_DEVICE(0x22b1, info), \
INTEL_VGA_DEVICE(0x22b2, info), \
INTEL_VGA_DEVICE(0x22b3, info)
#define INTEL_SKL_ULT_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x1906, info) /* ULT GT1 */
#define INTEL_SKL_ULX_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x190E, info) /* ULX GT1 */
#define INTEL_SKL_GT1_IDS(info) \
INTEL_SKL_ULT_GT1_IDS(info), \
INTEL_SKL_ULX_GT1_IDS(info), \
INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x190A, info) /* SRV GT1 */
#define INTEL_SKL_ULT_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
INTEL_VGA_DEVICE(0x1921, info) /* ULT GT2F */
#define INTEL_SKL_ULX_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x191E, info) /* ULX GT2 */
#define INTEL_SKL_GT2_IDS(info) \
INTEL_SKL_ULT_GT2_IDS(info), \
INTEL_SKL_ULX_GT2_IDS(info), \
INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
#define INTEL_SKL_ULT_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x1926, info) /* ULT GT3 */
#define INTEL_SKL_GT3_IDS(info) \
INTEL_SKL_ULT_GT3_IDS(info), \
INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3 */
#define INTEL_SKL_GT4_IDS(info) \
INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \
INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \
INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \
INTEL_VGA_DEVICE(0x192A, info), /* SRV GT4 */ \
INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4e */
#define INTEL_SKL_IDS(info) \
INTEL_SKL_GT1_IDS(info), \
INTEL_SKL_GT2_IDS(info), \
INTEL_SKL_GT3_IDS(info), \
INTEL_SKL_GT4_IDS(info)
#define INTEL_BXT_IDS(info) \
INTEL_VGA_DEVICE(0x0A84, info), \
INTEL_VGA_DEVICE(0x1A84, info), \
INTEL_VGA_DEVICE(0x1A85, info), \
INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \
INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */
#define INTEL_GLK_IDS(info) \
INTEL_VGA_DEVICE(0x3184, info), \
INTEL_VGA_DEVICE(0x3185, info)
#define INTEL_KBL_ULT_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
INTEL_VGA_DEVICE(0x5913, info) /* ULT GT1.5 */
#define INTEL_KBL_ULX_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
INTEL_VGA_DEVICE(0x5915, info) /* ULX GT1.5 */
#define INTEL_KBL_GT1_IDS(info) \
INTEL_KBL_ULT_GT1_IDS(info), \
INTEL_KBL_ULX_GT1_IDS(info), \
INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
#define INTEL_KBL_ULT_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
INTEL_VGA_DEVICE(0x5921, info) /* ULT GT2F */
#define INTEL_KBL_ULX_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x591E, info) /* ULX GT2 */
#define INTEL_KBL_GT2_IDS(info) \
INTEL_KBL_ULT_GT2_IDS(info), \
INTEL_KBL_ULX_GT2_IDS(info), \
INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
#define INTEL_KBL_ULT_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x5926, info) /* ULT GT3 */
#define INTEL_KBL_GT3_IDS(info) \
INTEL_KBL_ULT_GT3_IDS(info), \
INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */
#define INTEL_KBL_GT4_IDS(info) \
INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
/* AML/KBL Y GT2 */
#define INTEL_AML_KBL_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */
/* AML/CFL Y GT2 */
#define INTEL_AML_CFL_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x87CA, info)
/* CML GT1 */
#define INTEL_CML_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x9BA5, info), \
INTEL_VGA_DEVICE(0x9BA8, info), \
INTEL_VGA_DEVICE(0x9BA4, info), \
INTEL_VGA_DEVICE(0x9BA2, info)
#define INTEL_CML_U_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x9B21, info), \
INTEL_VGA_DEVICE(0x9BAA, info), \
INTEL_VGA_DEVICE(0x9BAC, info)
/* CML GT2 */
#define INTEL_CML_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x9BC5, info), \
INTEL_VGA_DEVICE(0x9BC8, info), \
INTEL_VGA_DEVICE(0x9BC4, info), \
INTEL_VGA_DEVICE(0x9BC2, info), \
INTEL_VGA_DEVICE(0x9BC6, info), \
INTEL_VGA_DEVICE(0x9BE6, info), \
INTEL_VGA_DEVICE(0x9BF6, info)
#define INTEL_CML_U_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x9B41, info), \
INTEL_VGA_DEVICE(0x9BCA, info), \
INTEL_VGA_DEVICE(0x9BCC, info)
#define INTEL_KBL_IDS(info) \
INTEL_KBL_GT1_IDS(info), \
INTEL_KBL_GT2_IDS(info), \
INTEL_KBL_GT3_IDS(info), \
INTEL_KBL_GT4_IDS(info), \
INTEL_AML_KBL_GT2_IDS(info)
/* CFL S */
#define INTEL_CFL_S_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \
INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \
INTEL_VGA_DEVICE(0x3E99, info) /* SRV GT1 */
#define INTEL_CFL_S_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */
/* CFL H */
#define INTEL_CFL_H_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x3E9C, info)
#define INTEL_CFL_H_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */
/* CFL U GT2 */
#define INTEL_CFL_U_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x3EA9, info)
/* CFL U GT3 */
#define INTEL_CFL_U_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */
/* WHL/CFL U GT1 */
#define INTEL_WHL_U_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x3EA1, info), \
INTEL_VGA_DEVICE(0x3EA4, info)
/* WHL/CFL U GT2 */
#define INTEL_WHL_U_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x3EA0, info), \
INTEL_VGA_DEVICE(0x3EA3, info)
/* WHL/CFL U GT3 */
#define INTEL_WHL_U_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x3EA2, info)
#define INTEL_CFL_IDS(info) \
INTEL_CFL_S_GT1_IDS(info), \
INTEL_CFL_S_GT2_IDS(info), \
INTEL_CFL_H_GT1_IDS(info), \
INTEL_CFL_H_GT2_IDS(info), \
INTEL_CFL_U_GT2_IDS(info), \
INTEL_CFL_U_GT3_IDS(info), \
INTEL_WHL_U_GT1_IDS(info), \
INTEL_WHL_U_GT2_IDS(info), \
INTEL_WHL_U_GT3_IDS(info), \
INTEL_AML_CFL_GT2_IDS(info), \
INTEL_CML_GT1_IDS(info), \
INTEL_CML_GT2_IDS(info), \
INTEL_CML_U_GT1_IDS(info), \
INTEL_CML_U_GT2_IDS(info)
/* CNL */
#define INTEL_CNL_PORT_F_IDS(info) \
INTEL_VGA_DEVICE(0x5A54, info), \
INTEL_VGA_DEVICE(0x5A5C, info), \
INTEL_VGA_DEVICE(0x5A44, info), \
INTEL_VGA_DEVICE(0x5A4C, info)
#define INTEL_CNL_IDS(info) \
INTEL_CNL_PORT_F_IDS(info), \
INTEL_VGA_DEVICE(0x5A51, info), \
INTEL_VGA_DEVICE(0x5A59, info), \
INTEL_VGA_DEVICE(0x5A41, info), \
INTEL_VGA_DEVICE(0x5A49, info), \
INTEL_VGA_DEVICE(0x5A52, info), \
INTEL_VGA_DEVICE(0x5A5A, info), \
INTEL_VGA_DEVICE(0x5A42, info), \
INTEL_VGA_DEVICE(0x5A4A, info), \
INTEL_VGA_DEVICE(0x5A50, info), \
INTEL_VGA_DEVICE(0x5A40, info)
/* ICL */
#define INTEL_ICL_PORT_F_IDS(info) \
INTEL_VGA_DEVICE(0x8A50, info), \
INTEL_VGA_DEVICE(0x8A5C, info), \
INTEL_VGA_DEVICE(0x8A59, info), \
INTEL_VGA_DEVICE(0x8A58, info), \
INTEL_VGA_DEVICE(0x8A52, info), \
INTEL_VGA_DEVICE(0x8A5A, info), \
INTEL_VGA_DEVICE(0x8A5B, info), \
INTEL_VGA_DEVICE(0x8A57, info), \
INTEL_VGA_DEVICE(0x8A56, info), \
INTEL_VGA_DEVICE(0x8A71, info), \
INTEL_VGA_DEVICE(0x8A70, info), \
INTEL_VGA_DEVICE(0x8A53, info), \
INTEL_VGA_DEVICE(0x8A54, info)
#define INTEL_ICL_11_IDS(info) \
INTEL_ICL_PORT_F_IDS(info), \
INTEL_VGA_DEVICE(0x8A51, info), \
INTEL_VGA_DEVICE(0x8A5D, info)
/* EHL/JSL */
#define INTEL_EHL_IDS(info) \
INTEL_VGA_DEVICE(0x4500, info), \
INTEL_VGA_DEVICE(0x4571, info), \
INTEL_VGA_DEVICE(0x4551, info), \
INTEL_VGA_DEVICE(0x4541, info), \
INTEL_VGA_DEVICE(0x4E71, info), \
INTEL_VGA_DEVICE(0x4557, info), \
INTEL_VGA_DEVICE(0x4555, info), \
INTEL_VGA_DEVICE(0x4E61, info), \
INTEL_VGA_DEVICE(0x4E57, info), \
INTEL_VGA_DEVICE(0x4E55, info), \
INTEL_VGA_DEVICE(0x4E51, info)
/* TGL */
#define INTEL_TGL_12_IDS(info) \
INTEL_VGA_DEVICE(0x9A40, info), \
INTEL_VGA_DEVICE(0x9A49, info), \
INTEL_VGA_DEVICE(0x9A59, info), \
INTEL_VGA_DEVICE(0x9A60, info), \
INTEL_VGA_DEVICE(0x9A68, info), \
INTEL_VGA_DEVICE(0x9A70, info), \
INTEL_VGA_DEVICE(0x9A78, info), \
INTEL_VGA_DEVICE(0x9AC0, info), \
INTEL_VGA_DEVICE(0x9AC9, info), \
INTEL_VGA_DEVICE(0x9AD9, info), \
INTEL_VGA_DEVICE(0x9AF8, info)
/* RKL */
#define INTEL_RKL_IDS(info) \
INTEL_VGA_DEVICE(0x4C80, info), \
INTEL_VGA_DEVICE(0x4C8A, info), \
INTEL_VGA_DEVICE(0x4C8B, info), \
INTEL_VGA_DEVICE(0x4C8C, info), \
INTEL_VGA_DEVICE(0x4C90, info), \
INTEL_VGA_DEVICE(0x4C9A, info)
/* DG1 */
#define INTEL_DG1_IDS(info) \
INTEL_VGA_DEVICE(0x4905, info)
#endif /* _I915_PCIIDS_H */

View file

@ -1,7 +1,7 @@
/**************************************************************************
*
* Copyright © 2007 Red Hat Inc.
* Copyright © 2007-2012 Intel Corporation
* Copyright © 2007 Red Hat Inc.
* Copyright © 2007-2012 Intel Corporation
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
@ -28,7 +28,7 @@
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
* Keith Whitwell <keithw-at-tungstengraphics-dot-com>
* Eric Anholt <eric@anholt.net>
* Dave Airlie <airlied@linux.ie>
@ -114,6 +114,7 @@ typedef struct _drm_intel_bufmgr_gem {
pthread_mutex_t lock;
struct drm_i915_gem_exec_object *exec_objects;
struct drm_i915_gem_exec_object2 *exec2_objects;
drm_intel_bo **exec_bos;
int exec_size;
@ -478,6 +479,44 @@ drm_intel_gem_bo_reference(drm_intel_bo *bo)
* with the intersection of the memory type flags and the union of the
* access flags.
*/
static void
drm_intel_add_validate_buffer(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int index;
if (bo_gem->validate_index != -1)
return;
/* Extend the array of validation entries as necessary. */
if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
int new_size = bufmgr_gem->exec_size * 2;
if (new_size == 0)
new_size = 5;
bufmgr_gem->exec_objects =
realloc(bufmgr_gem->exec_objects,
sizeof(*bufmgr_gem->exec_objects) * new_size);
bufmgr_gem->exec_bos =
realloc(bufmgr_gem->exec_bos,
sizeof(*bufmgr_gem->exec_bos) * new_size);
bufmgr_gem->exec_size = new_size;
}
index = bufmgr_gem->exec_count;
bo_gem->validate_index = index;
/* Fill in array entry */
bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
bufmgr_gem->exec_objects[index].alignment = bo->align;
bufmgr_gem->exec_objects[index].offset = 0;
bufmgr_gem->exec_bos[index] = bo;
bufmgr_gem->exec_count++;
}
static void
drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
{
@ -1152,6 +1191,7 @@ drm_intel_gem_bo_free(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
struct drm_gem_close close;
int ret;
DRMLISTDEL(&bo_gem->vma_list);
@ -1175,9 +1215,11 @@ drm_intel_gem_bo_free(drm_intel_bo *bo)
HASH_DELETE(handle_hh, bufmgr_gem->handle_table, bo_gem);
/* Close this object */
ret = drmCloseBufferHandle(bufmgr_gem->fd, bo_gem->gem_handle);
memclear(close);
close.handle = bo_gem->gem_handle;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
if (ret != 0) {
DBG("drmCloseBufferHandle %d failed (%s): %s\n",
DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
bo_gem->gem_handle, bo_gem->name, strerror(errno));
}
free(bo);
@ -1379,26 +1421,25 @@ static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
drm_intel_bufmgr_gem *bufmgr_gem;
struct timespec time;
assert(atomic_read(&bo_gem->refcount) > 0);
if (atomic_add_unless(&bo_gem->refcount, -1, 1))
return;
if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
drm_intel_bufmgr_gem *bufmgr_gem =
(drm_intel_bufmgr_gem *) bo->bufmgr;
struct timespec time;
bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
clock_gettime(CLOCK_MONOTONIC, &time);
clock_gettime(CLOCK_MONOTONIC, &time);
pthread_mutex_lock(&bufmgr_gem->lock);
pthread_mutex_lock(&bufmgr_gem->lock);
if (atomic_dec_and_test(&bo_gem->refcount)) {
drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
}
if (atomic_dec_and_test(&bo_gem->refcount)) {
drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
pthread_mutex_unlock(&bufmgr_gem->lock);
}
pthread_mutex_unlock(&bufmgr_gem->lock);
}
static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
@ -1691,82 +1732,6 @@ drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
return drm_intel_gem_bo_unmap(bo);
}
static bool is_cache_coherent(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
struct drm_i915_gem_caching arg = {};
arg.handle = bo_gem->gem_handle;
if (drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_CACHING, &arg))
assert(false);
return arg.caching != I915_CACHING_NONE;
}
static void set_domain(drm_intel_bo *bo, uint32_t read, uint32_t write)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
struct drm_i915_gem_set_domain arg = {};
arg.handle = bo_gem->gem_handle;
arg.read_domains = read;
arg.write_domain = write;
if (drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &arg))
assert(false);
}
static int mmap_write(drm_intel_bo *bo, unsigned long offset,
unsigned long length, const void *buf)
{
void *map = NULL;
if (!length)
return 0;
if (is_cache_coherent(bo)) {
map = drm_intel_gem_bo_map__cpu(bo);
if (map)
set_domain(bo, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
}
if (!map) {
map = drm_intel_gem_bo_map__wc(bo);
if (map)
set_domain(bo, I915_GEM_DOMAIN_WC, I915_GEM_DOMAIN_WC);
}
assert(map);
memcpy((char *)map + offset, buf, length);
drm_intel_gem_bo_unmap(bo);
return 0;
}
static int mmap_read(drm_intel_bo *bo, unsigned long offset,
unsigned long length, void *buf)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
void *map = NULL;
if (!length)
return 0;
if (bufmgr_gem->has_llc || is_cache_coherent(bo)) {
map = drm_intel_gem_bo_map__cpu(bo);
if (map)
set_domain(bo, I915_GEM_DOMAIN_CPU, 0);
}
if (!map) {
map = drm_intel_gem_bo_map__wc(bo);
if (map)
set_domain(bo, I915_GEM_DOMAIN_WC, 0);
}
assert(map);
memcpy(buf, (char *)map + offset, length);
drm_intel_gem_bo_unmap(bo);
return 0;
}
static int
drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, const void *data)
@ -1787,20 +1752,14 @@ drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_PWRITE,
&pwrite);
if (ret)
if (ret != 0) {
ret = -errno;
if (ret != 0 && ret != -EOPNOTSUPP) {
DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
__FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
(int)size, strerror(errno));
return ret;
}
if (ret == -EOPNOTSUPP)
mmap_write(bo, offset, size, data);
return 0;
return ret;
}
static int
@ -1848,20 +1807,14 @@ drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_PREAD,
&pread);
if (ret)
if (ret != 0) {
ret = -errno;
if (ret != 0 && ret != -EOPNOTSUPP) {
DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
__FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
(int)size, strerror(errno));
return ret;
}
if (ret == -EOPNOTSUPP)
mmap_read(bo, offset, size, data);
return 0;
return ret;
}
/** Waits for all GPU rendering with the object to have completed. */
@ -1961,9 +1914,11 @@ static void
drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
struct drm_gem_close close_bo;
int i, ret;
free(bufmgr_gem->exec2_objects);
free(bufmgr_gem->exec_objects);
free(bufmgr_gem->exec_bos);
pthread_mutex_destroy(&bufmgr_gem->lock);
@ -1985,8 +1940,9 @@ drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
/* Release userptr bo kept hanging around for optimisation. */
if (bufmgr_gem->userptr_active.ptr) {
ret = drmCloseBufferHandle(bufmgr_gem->fd,
bufmgr_gem->userptr_active.handle);
memclear(close_bo);
close_bo.handle = bufmgr_gem->userptr_active.handle;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
free(bufmgr_gem->userptr_active.ptr);
if (ret)
fprintf(stderr,
@ -2221,6 +2177,31 @@ drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
* validations to be performed and update the relocation buffers with
* index values into the validation list.
*/
static void
drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int i;
if (bo_gem->relocs == NULL)
return;
for (i = 0; i < bo_gem->reloc_count; i++) {
drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
if (target_bo == bo)
continue;
drm_intel_gem_bo_mark_mmaps_incoherent(bo);
/* Continue walking the tree depth-first. */
drm_intel_gem_bo_process_reloc(target_bo);
/* Add the target to the validate list */
drm_intel_add_validate_buffer(target_bo);
}
}
static void
drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
{
@ -2261,6 +2242,30 @@ drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
}
}
static void
drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
{
int i;
for (i = 0; i < bufmgr_gem->exec_count; i++) {
drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
/* Update the buffer offset */
if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
bo_gem->gem_handle, bo_gem->name,
upper_32_bits(bo->offset64),
lower_32_bits(bo->offset64),
upper_32_bits(bufmgr_gem->exec_objects[i].offset),
lower_32_bits(bufmgr_gem->exec_objects[i].offset));
bo->offset64 = bufmgr_gem->exec_objects[i].offset;
bo->offset = bufmgr_gem->exec_objects[i].offset;
}
}
}
static void
drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
{
@ -2296,6 +2301,73 @@ drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
{
}
static int
drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
struct drm_i915_gem_execbuffer execbuf;
int ret, i;
if (to_bo_gem(bo)->has_error)
return -ENOMEM;
pthread_mutex_lock(&bufmgr_gem->lock);
/* Update indices and set up the validate list. */
drm_intel_gem_bo_process_reloc(bo);
/* Add the batch buffer to the validation list. There are no
* relocations pointing to it.
*/
drm_intel_add_validate_buffer(bo);
memclear(execbuf);
execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
execbuf.buffer_count = bufmgr_gem->exec_count;
execbuf.batch_start_offset = 0;
execbuf.batch_len = used;
execbuf.cliprects_ptr = (uintptr_t) cliprects;
execbuf.num_cliprects = num_cliprects;
execbuf.DR1 = 0;
execbuf.DR4 = DR4;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_EXECBUFFER,
&execbuf);
if (ret != 0) {
ret = -errno;
if (errno == ENOSPC) {
DBG("Execbuffer fails to pin. "
"Estimate: %u. Actual: %u. Available: %u\n",
drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
bufmgr_gem->
exec_count),
drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
bufmgr_gem->
exec_count),
(unsigned int)bufmgr_gem->gtt_size);
}
}
drm_intel_update_buffer_offsets(bufmgr_gem);
if (bufmgr_gem->bufmgr.debug)
drm_intel_gem_dump_validation_list(bufmgr_gem);
for (i = 0; i < bufmgr_gem->exec_count; i++) {
drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
bo_gem->idle = false;
/* Disconnect the buffer from the validate list */
bo_gem->validate_index = -1;
bufmgr_gem->exec_bos[i] = NULL;
}
bufmgr_gem->exec_count = 0;
pthread_mutex_unlock(&bufmgr_gem->lock);
return ret;
}
static int
do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
@ -2773,7 +2845,9 @@ drm_public void
drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
bufmgr_gem->fenced_relocs = true;
if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
bufmgr_gem->fenced_relocs = true;
}
/**
@ -3378,17 +3452,16 @@ drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1))
return;
if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
pthread_mutex_lock(&bufmgr_list_mutex);
pthread_mutex_lock(&bufmgr_list_mutex);
if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
DRMLISTDEL(&bufmgr_gem->managers);
drm_intel_bufmgr_gem_destroy(bufmgr);
}
if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
DRMLISTDEL(&bufmgr_gem->managers);
drm_intel_bufmgr_gem_destroy(bufmgr);
pthread_mutex_unlock(&bufmgr_list_mutex);
}
pthread_mutex_unlock(&bufmgr_list_mutex);
}
drm_public void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
@ -3539,6 +3612,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
struct drm_i915_gem_get_aperture aperture;
drm_i915_getparam_t gp;
int ret, tmp;
bool exec2 = false;
pthread_mutex_lock(&bufmgr_list_mutex);
@ -3590,9 +3664,13 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->gen = 6;
else if (IS_GEN7(bufmgr_gem->pci_device))
bufmgr_gem->gen = 7;
else
/* Treat all further unmatched platforms the same as gen8 */
else if (IS_GEN8(bufmgr_gem->pci_device))
bufmgr_gem->gen = 8;
else if (!intel_get_genx(bufmgr_gem->pci_device, &bufmgr_gem->gen)) {
free(bufmgr_gem);
bufmgr_gem = NULL;
goto exit;
}
if (IS_GEN3(bufmgr_gem->pci_device) &&
bufmgr_gem->gtt_size > 256*1024*1024) {
@ -3608,12 +3686,8 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
gp.param = I915_PARAM_HAS_EXECBUF2;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
if (ret) {
fprintf(stderr, "i915 does not support EXECBUFER2\n");
free(bufmgr_gem);
bufmgr_gem = NULL;
goto exit;
}
if (!ret)
exec2 = true;
gp.param = I915_PARAM_HAS_BSD;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
@ -3716,8 +3790,12 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
/* Use the new one if available */
if (exec2) {
bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
} else
bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;

89
intel/intel_chipset.c Normal file
View file

@ -0,0 +1,89 @@
/*
* Copyright (C) 2018 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "intel_chipset.h"
#include <inttypes.h>
#include <stdbool.h>
#include "i915_pciids.h"
#undef INTEL_VGA_DEVICE
#define INTEL_VGA_DEVICE(id, gen) { id, gen }
static const struct pci_device {
uint16_t device;
uint16_t gen;
} pciids[] = {
/* Keep ids sorted by gen; latest gen first */
INTEL_RKL_IDS(12),
INTEL_DG1_IDS(12),
INTEL_TGL_12_IDS(12),
INTEL_EHL_IDS(11),
INTEL_ICL_11_IDS(11),
INTEL_CNL_IDS(10),
INTEL_CFL_IDS(9),
INTEL_GLK_IDS(9),
INTEL_KBL_IDS(9),
INTEL_BXT_IDS(9),
INTEL_SKL_IDS(9),
};
drm_private bool intel_is_genx(unsigned int devid, int gen)
{
const struct pci_device *p,
*pend = pciids + sizeof(pciids) / sizeof(pciids[0]);
for (p = pciids; p < pend; p++) {
/* PCI IDs are sorted */
if (p->gen < gen)
break;
if (p->device != devid)
continue;
if (gen == p->gen)
return true;
break;
}
return false;
}
drm_private bool intel_get_genx(unsigned int devid, int *gen)
{
const struct pci_device *p,
*pend = pciids + sizeof(pciids) / sizeof(pciids[0]);
for (p = pciids; p < pend; p++) {
if (p->device != devid)
continue;
if (gen)
*gen = p->gen;
return true;
}
return false;
}

View file

@ -331,6 +331,20 @@
#include <stdbool.h>
#include <libdrm_macros.h>
#define IS_9XX(dev) (!IS_GEN2(dev))
drm_private bool intel_is_genx(unsigned int devid, int gen);
drm_private bool intel_get_genx(unsigned int devid, int *gen);
#define IS_GEN9(devid) intel_is_genx(devid, 9)
#define IS_GEN10(devid) intel_is_genx(devid, 10)
#define IS_GEN11(devid) intel_is_genx(devid, 11)
#define IS_GEN12(devid) intel_is_genx(devid, 12)
#define IS_9XX(dev) (IS_GEN3(dev) || \
IS_GEN4(dev) || \
IS_GEN5(dev) || \
IS_GEN6(dev) || \
IS_GEN7(dev) || \
IS_GEN8(dev) || \
intel_get_genx(dev, NULL))
#endif /* _INTEL_CHIPSET_H */

View file

@ -3815,37 +3815,33 @@ drm_public struct drm_intel_decode *
drm_intel_decode_context_alloc(uint32_t devid)
{
struct drm_intel_decode *ctx;
int gen = 0;
if (IS_GEN8(devid))
gen = 8;
else if (IS_GEN7(devid))
gen = 7;
else if (IS_GEN6(devid))
gen = 6;
else if (IS_GEN5(devid))
gen = 5;
else if (IS_GEN4(devid))
gen = 4;
else if (IS_GEN3(devid))
gen = 3;
else if (IS_GEN2(devid))
gen = 2;
else
/* Just assume future unknown platforms behave as gen8. */
gen = 8;
if (!gen)
return NULL;
ctx = calloc(1, sizeof(struct drm_intel_decode));
if (!ctx)
return NULL;
ctx->devid = devid;
ctx->gen = gen;
ctx->out = stdout;
if (intel_get_genx(devid, &ctx->gen))
;
else if (IS_GEN8(devid))
ctx->gen = 8;
else if (IS_GEN7(devid))
ctx->gen = 7;
else if (IS_GEN6(devid))
ctx->gen = 6;
else if (IS_GEN5(devid))
ctx->gen = 5;
else if (IS_GEN4(devid))
ctx->gen = 4;
else if (IS_9XX(devid))
ctx->gen = 3;
else {
assert(IS_GEN2(devid));
ctx->gen = 2;
}
return ctx;
}

View file

@ -18,21 +18,20 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
libdrm_intel = library(
libdrm_intel = shared_library(
'drm_intel',
[
files(
'intel_bufmgr.c', 'intel_bufmgr_fake.c', 'intel_bufmgr_gem.c',
'intel_decode.c', 'mm.c',
'intel_decode.c', 'mm.c', 'intel_chipset.c',
),
config_file,
],
include_directories : [inc_root, inc_drm],
link_with : libdrm,
dependencies : [dep_pciaccess, dep_threads, dep_rt, dep_valgrind, dep_atomic_ops],
dependencies : [dep_pciaccess, dep_pthread_stubs, dep_rt, dep_valgrind, dep_atomic_ops],
c_args : libdrm_c_args,
gnu_symbol_visibility : 'hidden',
version : '1.@0@.0'.format(patch_ver),
version : '1.0.0',
install : true,
)
@ -41,17 +40,16 @@ ext_libdrm_intel = declare_dependency(
include_directories : [inc_drm, include_directories('.')],
)
meson.override_dependency('libdrm_intel', ext_libdrm_intel)
install_headers(
'intel_bufmgr.h', 'intel_aub.h', 'intel_debug.h',
subdir : 'libdrm',
)
pkg.generate(
libdrm_intel,
name : 'libdrm_intel',
libraries : libdrm_intel,
subdirs : ['.', 'libdrm'],
version : meson.project_version(),
requires : 'libdrm',
description : 'Userspace interface to intel kernel DRM services',
)
@ -62,7 +60,6 @@ test_decode = executable(
include_directories : [inc_root, inc_drm],
link_with : [libdrm, libdrm_intel],
c_args : libdrm_c_args,
gnu_symbol_visibility : 'hidden',
)
test(
@ -102,6 +99,6 @@ test(
args : [
'--lib', libdrm_intel,
'--symbols-file', files('intel-symbols.txt'),
'--nm', prog_nm.full_path(),
'--nm', prog_nm.path(),
],
)

View file

@ -86,8 +86,7 @@ static void
compare_batch(struct drm_intel_decode *ctx, const char *batch_filename)
{
FILE *out = NULL;
char *ptr;
void *ref_ptr, *batch_ptr;
void *ptr, *ref_ptr, *batch_ptr;
#if HAVE_OPEN_MEMSTREAM
size_t size;
#endif
@ -107,7 +106,7 @@ compare_batch(struct drm_intel_decode *ctx, const char *batch_filename)
* inside of an automake project's test infrastructure.
*/
#if HAVE_OPEN_MEMSTREAM
out = open_memstream(&ptr, &size);
out = open_memstream((char **)&ptr, &size);
#else
fprintf(stderr, "platform lacks open_memstream, skipping.\n");
exit(77);

View file

@ -648,11 +648,11 @@ do {
#define MUR_PLUS2_ALIGNED(p) (((unsigned long)p & 3UL) == 2UL)
#define MUR_PLUS3_ALIGNED(p) (((unsigned long)p & 3UL) == 3UL)
#define WP(p) ((uint32_t*)((unsigned long)(p) & ~3UL))
#ifdef HAVE_BIG_ENDIAN
#if (defined(__BIG_ENDIAN__) || defined(SPARC) || defined(__ppc__) || defined(__ppc64__))
#define MUR_THREE_ONE(p) ((((*WP(p))&0x00ffffff) << 8) | (((*(WP(p)+1))&0xff000000) >> 24))
#define MUR_TWO_TWO(p) ((((*WP(p))&0x0000ffff) <<16) | (((*(WP(p)+1))&0xffff0000) >> 16))
#define MUR_ONE_THREE(p) ((((*WP(p))&0x000000ff) <<24) | (((*(WP(p)+1))&0xffffff00) >> 8))
#else /* little endian non-intel */
#else /* assume little endian non-intel */
#define MUR_THREE_ONE(p) ((((*WP(p))&0xffffff00) >> 8) | (((*(WP(p)+1))&0x000000ff) << 24))
#define MUR_TWO_TWO(p) ((((*WP(p))&0xffff0000) >>16) | (((*(WP(p)+1))&0x0000ffff) << 16))
#define MUR_ONE_THREE(p) ((((*WP(p))&0xff000000) >>24) | (((*(WP(p)+1))&0x00ffffff) << 8))

View file

@ -96,19 +96,19 @@ typedef struct _drmMMListHead
(__item) = (__temp), (__temp) = (__item)->prev)
#define DRMLISTFOREACHENTRY(__item, __list, __head) \
for ((__item) = DRMLISTENTRY(__typeof__(*__item), (__list)->next, __head); \
&(__item)->__head != (__list); \
(__item) = DRMLISTENTRY(__typeof__(*__item), \
(__item)->__head.next, __head))
for ((__item) = DRMLISTENTRY(typeof(*__item), (__list)->next, __head); \
&(__item)->__head != (__list); \
(__item) = DRMLISTENTRY(typeof(*__item), \
(__item)->__head.next, __head))
#define DRMLISTFOREACHENTRYSAFE(__item, __temp, __list, __head) \
for ((__item) = DRMLISTENTRY(__typeof__(*__item), (__list)->next, __head), \
(__temp) = DRMLISTENTRY(__typeof__(*__item), \
(__item)->__head.next, __head); \
&(__item)->__head != (__list); \
(__item) = (__temp), \
(__temp) = DRMLISTENTRY(__typeof__(*__item), \
(__temp)->__head.next, __head))
for ((__item) = DRMLISTENTRY(typeof(*__item), (__list)->next, __head), \
(__temp) = DRMLISTENTRY(typeof(*__item), \
(__item)->__head.next, __head); \
&(__item)->__head != (__list); \
(__item) = (__temp), \
(__temp) = DRMLISTENTRY(typeof(*__item), \
(__temp)->__head.next, __head))
#define DRMLISTJOIN(__list, __join) if (!DRMLISTEMPTY(__list)) { \
(__list)->next->prev = (__join); \

51
libkms/Android.mk Normal file
View file

@ -0,0 +1,51 @@
DRM_GPU_DRIVERS := $(strip $(filter-out swrast, $(BOARD_GPU_DRIVERS)))
intel_drivers := i915 i965 i915g iris
radeon_drivers := r300g r600g radeonsi
nouveau_drivers := nouveau
virgl_drivers := virgl
vmwgfx_drivers := vmwgfx
valid_drivers := \
$(intel_drivers) \
$(radeon_drivers) \
$(nouveau_drivers) \
$(virgl_drivers) \
$(vmwgfx_drivers)
# warn about invalid drivers
invalid_drivers := $(filter-out $(valid_drivers), $(DRM_GPU_DRIVERS))
ifneq ($(invalid_drivers),)
$(warning invalid GPU drivers: $(invalid_drivers))
# tidy up
DRM_GPU_DRIVERS := $(filter-out $(invalid_drivers), $(DRM_GPU_DRIVERS))
endif
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
include $(LOCAL_PATH)/Makefile.sources
LOCAL_SRC_FILES := $(LIBKMS_FILES)
ifneq ($(filter $(vmwgfx_drivers), $(DRM_GPU_DRIVERS)),)
LOCAL_SRC_FILES += $(LIBKMS_VMWGFX_FILES)
endif
ifneq ($(filter $(intel_drivers), $(DRM_GPU_DRIVERS)),)
LOCAL_SRC_FILES += $(LIBKMS_INTEL_FILES)
endif
ifneq ($(filter $(nouveau_drivers), $(DRM_GPU_DRIVERS)),)
LOCAL_SRC_FILES += $(LIBKMS_NOUVEAU_FILES)
endif
ifneq ($(filter $(radeon_drivers), $(DRM_GPU_DRIVERS)),)
LOCAL_SRC_FILES += $(LIBKMS_RADEON_FILES)
endif
LOCAL_MODULE := libkms
LOCAL_SHARED_LIBRARIES := libdrm
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)

23
libkms/Makefile.sources Normal file
View file

@ -0,0 +1,23 @@
LIBKMS_FILES := \
internal.h \
linux.c \
dumb.c \
api.c
LIBKMS_VMWGFX_FILES := \
vmwgfx.c
LIBKMS_INTEL_FILES := \
intel.c
LIBKMS_NOUVEAU_FILES := \
nouveau.c
LIBKMS_RADEON_FILES := \
radeon.c
LIBKMS_EXYNOS_FILES := \
exynos.c
LIBKMS_H_FILES := \
libkms.h

139
libkms/api.c Normal file
View file

@ -0,0 +1,139 @@
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include "libdrm_macros.h"
#include "internal.h"
drm_public int kms_create(int fd, struct kms_driver **out)
{
return linux_create(fd, out);
}
drm_public int kms_get_prop(struct kms_driver *kms, unsigned key, unsigned *out)
{
switch (key) {
case KMS_BO_TYPE:
break;
default:
return -EINVAL;
}
return kms->get_prop(kms, key, out);
}
drm_public int kms_destroy(struct kms_driver **kms)
{
if (!(*kms))
return 0;
free(*kms);
*kms = NULL;
return 0;
}
drm_public int kms_bo_create(struct kms_driver *kms, const unsigned *attr, struct kms_bo **out)
{
unsigned width = 0;
unsigned height = 0;
enum kms_bo_type type = KMS_BO_TYPE_SCANOUT_X8R8G8B8;
int i;
for (i = 0; attr[i];) {
unsigned key = attr[i++];
unsigned value = attr[i++];
switch (key) {
case KMS_WIDTH:
width = value;
break;
case KMS_HEIGHT:
height = value;
break;
case KMS_BO_TYPE:
type = value;
break;
default:
return -EINVAL;
}
}
if (width == 0 || height == 0)
return -EINVAL;
/* XXX sanity check type */
if (type == KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8 &&
(width != 64 || height != 64))
return -EINVAL;
return kms->bo_create(kms, width, height, type, attr, out);
}
drm_public int kms_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out)
{
switch (key) {
case KMS_PITCH:
*out = bo->pitch;
break;
case KMS_HANDLE:
*out = bo->handle;
break;
default:
return -EINVAL;
}
return 0;
}
drm_public int kms_bo_map(struct kms_bo *bo, void **out)
{
return bo->kms->bo_map(bo, out);
}
drm_public int kms_bo_unmap(struct kms_bo *bo)
{
return bo->kms->bo_unmap(bo);
}
drm_public int kms_bo_destroy(struct kms_bo **bo)
{
int ret;
if (!(*bo))
return 0;
ret = (*bo)->kms->bo_destroy(*bo);
if (ret)
return ret;
*bo = NULL;
return 0;
}

216
libkms/dumb.c Normal file
View file

@ -0,0 +1,216 @@
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "internal.h"
#include <sys/ioctl.h>
#include "xf86drm.h"
#include "libdrm_macros.h"
struct dumb_bo
{
struct kms_bo base;
unsigned map_count;
};
static int
dumb_get_prop(struct kms_driver *kms, unsigned key, unsigned *out)
{
switch (key) {
case KMS_BO_TYPE:
*out = KMS_BO_TYPE_SCANOUT_X8R8G8B8 | KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8;
break;
default:
return -EINVAL;
}
return 0;
}
static int
dumb_destroy(struct kms_driver *kms)
{
free(kms);
return 0;
}
static int
dumb_bo_create(struct kms_driver *kms,
const unsigned width, const unsigned height,
const enum kms_bo_type type, const unsigned *attr,
struct kms_bo **out)
{
struct drm_mode_create_dumb arg;
struct dumb_bo *bo;
int i, ret;
for (i = 0; attr[i]; i += 2) {
switch (attr[i]) {
case KMS_WIDTH:
case KMS_HEIGHT:
break;
case KMS_BO_TYPE:
break;
default:
return -EINVAL;
}
}
bo = calloc(1, sizeof(*bo));
if (!bo)
return -ENOMEM;
memset(&arg, 0, sizeof(arg));
/* All BO_TYPE currently are 32bpp formats */
arg.bpp = 32;
arg.width = width;
arg.height = height;
ret = drmIoctl(kms->fd, DRM_IOCTL_MODE_CREATE_DUMB, &arg);
if (ret)
goto err_free;
bo->base.kms = kms;
bo->base.handle = arg.handle;
bo->base.size = arg.size;
bo->base.pitch = arg.pitch;
*out = &bo->base;
return 0;
err_free:
free(bo);
return ret;
}
static int
dumb_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out)
{
switch (key) {
default:
return -EINVAL;
}
}
static int
dumb_bo_map(struct kms_bo *_bo, void **out)
{
struct dumb_bo *bo = (struct dumb_bo *)_bo;
struct drm_mode_map_dumb arg;
void *map = NULL;
int ret;
if (bo->base.ptr) {
bo->map_count++;
*out = bo->base.ptr;
return 0;
}
memset(&arg, 0, sizeof(arg));
arg.handle = bo->base.handle;
ret = drmIoctl(bo->base.kms->fd, DRM_IOCTL_MODE_MAP_DUMB, &arg);
if (ret)
return ret;
map = drm_mmap(0, bo->base.size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->base.kms->fd, arg.offset);
if (map == MAP_FAILED)
return -errno;
bo->base.ptr = map;
bo->map_count++;
*out = bo->base.ptr;
return 0;
}
static int
dumb_bo_unmap(struct kms_bo *_bo)
{
struct dumb_bo *bo = (struct dumb_bo *)_bo;
bo->map_count--;
return 0;
}
static int
dumb_bo_destroy(struct kms_bo *_bo)
{
struct dumb_bo *bo = (struct dumb_bo *)_bo;
struct drm_mode_destroy_dumb arg;
int ret;
if (bo->base.ptr) {
/* XXX Sanity check map_count */
drm_munmap(bo->base.ptr, bo->base.size);
bo->base.ptr = NULL;
}
memset(&arg, 0, sizeof(arg));
arg.handle = bo->base.handle;
ret = drmIoctl(bo->base.kms->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &arg);
if (ret)
return -errno;
free(bo);
return 0;
}
drm_private int
dumb_create(int fd, struct kms_driver **out)
{
struct kms_driver *kms;
int ret;
uint64_t cap = 0;
ret = drmGetCap(fd, DRM_CAP_DUMB_BUFFER, &cap);
if (ret || cap == 0)
return -EINVAL;
kms = calloc(1, sizeof(*kms));
if (!kms)
return -ENOMEM;
kms->fd = fd;
kms->bo_create = dumb_bo_create;
kms->bo_map = dumb_bo_map;
kms->bo_unmap = dumb_bo_unmap;
kms->bo_get_prop = dumb_bo_get_prop;
kms->bo_destroy = dumb_bo_destroy;
kms->get_prop = dumb_get_prop;
kms->destroy = dumb_destroy;
*out = kms;
return 0;
}

220
libkms/exynos.c Normal file
View file

@ -0,0 +1,220 @@
/* exynos.c
*
* Copyright 2009 Samsung Electronics Co., Ltd.
* Authors:
* SooChan Lim <sc1.lim@samsung.com>
* Sangjin LEE <lsj119@samsung.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "internal.h"
#include <sys/mman.h>
#include <sys/ioctl.h>
#include "xf86drm.h"
#include "libdrm_macros.h"
#include "exynos_drm.h"
struct exynos_bo
{
struct kms_bo base;
unsigned map_count;
};
static int
exynos_get_prop(struct kms_driver *kms, unsigned key, unsigned *out)
{
switch (key) {
case KMS_BO_TYPE:
*out = KMS_BO_TYPE_SCANOUT_X8R8G8B8 | KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8;
break;
default:
return -EINVAL;
}
return 0;
}
static int
exynos_destroy(struct kms_driver *kms)
{
free(kms);
return 0;
}
static int
exynos_bo_create(struct kms_driver *kms,
const unsigned width, const unsigned height,
const enum kms_bo_type type, const unsigned *attr,
struct kms_bo **out)
{
struct drm_exynos_gem_create arg;
unsigned size, pitch;
struct exynos_bo *bo;
int i, ret;
for (i = 0; attr[i]; i += 2) {
switch (attr[i]) {
case KMS_WIDTH:
case KMS_HEIGHT:
case KMS_BO_TYPE:
break;
default:
return -EINVAL;
}
}
bo = calloc(1, sizeof(*bo));
if (!bo)
return -ENOMEM;
if (type == KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8) {
pitch = 64 * 4;
size = 64 * 64 * 4;
} else if (type == KMS_BO_TYPE_SCANOUT_X8R8G8B8) {
pitch = width * 4;
pitch = (pitch + 512 - 1) & ~(512 - 1);
size = pitch * ((height + 4 - 1) & ~(4 - 1));
} else {
ret = -EINVAL;
goto err_free;
}
memset(&arg, 0, sizeof(arg));
arg.size = size;
ret = drmCommandWriteRead(kms->fd, DRM_EXYNOS_GEM_CREATE, &arg, sizeof(arg));
if (ret)
goto err_free;
bo->base.kms = kms;
bo->base.handle = arg.handle;
bo->base.size = size;
bo->base.pitch = pitch;
*out = &bo->base;
return 0;
err_free:
free(bo);
return ret;
}
static int
exynos_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out)
{
switch (key) {
default:
return -EINVAL;
}
}
static int
exynos_bo_map(struct kms_bo *_bo, void **out)
{
struct exynos_bo *bo = (struct exynos_bo *)_bo;
struct drm_mode_map_dumb arg;
void *map = NULL;
int ret;
if (bo->base.ptr) {
bo->map_count++;
*out = bo->base.ptr;
return 0;
}
memset(&arg, 0, sizeof(arg));
arg.handle = bo->base.handle;
ret = drmIoctl(bo->base.kms->fd, DRM_IOCTL_MODE_MAP_DUMB, &arg);
if (ret)
return ret;
map = drm_mmap(0, bo->base.size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->base.kms->fd, arg.offset);
if (map == MAP_FAILED)
return -errno;
bo->base.ptr = map;
bo->map_count++;
*out = bo->base.ptr;
return 0;
}
static int
exynos_bo_unmap(struct kms_bo *_bo)
{
struct exynos_bo *bo = (struct exynos_bo *)_bo;
bo->map_count--;
return 0;
}
static int
exynos_bo_destroy(struct kms_bo *_bo)
{
struct exynos_bo *bo = (struct exynos_bo *)_bo;
struct drm_gem_close arg;
int ret;
if (bo->base.ptr) {
/* XXX Sanity check map_count */
munmap(bo->base.ptr, bo->base.size);
bo->base.ptr = NULL;
}
memset(&arg, 0, sizeof(arg));
arg.handle = bo->base.handle;
ret = drmIoctl(bo->base.kms->fd, DRM_IOCTL_GEM_CLOSE, &arg);
if (ret)
return -errno;
free(bo);
return 0;
}
drm_private int
exynos_create(int fd, struct kms_driver **out)
{
struct kms_driver *kms;
kms = calloc(1, sizeof(*kms));
if (!kms)
return -ENOMEM;
kms->fd = fd;
kms->bo_create = exynos_bo_create;
kms->bo_map = exynos_bo_map;
kms->bo_unmap = exynos_bo_unmap;
kms->bo_get_prop = exynos_bo_get_prop;
kms->bo_destroy = exynos_bo_destroy;
kms->get_prop = exynos_get_prop;
kms->destroy = exynos_destroy;
*out = kms;
return 0;
}

236
libkms/intel.c Normal file
View file

@ -0,0 +1,236 @@
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "internal.h"
#include <sys/ioctl.h>
#include "xf86drm.h"
#include "libdrm_macros.h"
#include "i915_drm.h"
struct intel_bo
{
struct kms_bo base;
unsigned map_count;
};
static int
intel_get_prop(struct kms_driver *kms, unsigned key, unsigned *out)
{
switch (key) {
case KMS_BO_TYPE:
*out = KMS_BO_TYPE_SCANOUT_X8R8G8B8 | KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8;
break;
default:
return -EINVAL;
}
return 0;
}
static int
intel_destroy(struct kms_driver *kms)
{
free(kms);
return 0;
}
static int
intel_bo_create(struct kms_driver *kms,
const unsigned width, const unsigned height,
const enum kms_bo_type type, const unsigned *attr,
struct kms_bo **out)
{
struct drm_i915_gem_create arg;
unsigned size, pitch;
struct intel_bo *bo;
int i, ret;
for (i = 0; attr[i]; i += 2) {
switch (attr[i]) {
case KMS_WIDTH:
case KMS_HEIGHT:
case KMS_BO_TYPE:
break;
default:
return -EINVAL;
}
}
bo = calloc(1, sizeof(*bo));
if (!bo)
return -ENOMEM;
if (type == KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8) {
pitch = 64 * 4;
size = 64 * 64 * 4;
} else if (type == KMS_BO_TYPE_SCANOUT_X8R8G8B8) {
pitch = width * 4;
pitch = (pitch + 512 - 1) & ~(512 - 1);
size = pitch * ((height + 4 - 1) & ~(4 - 1));
} else {
free(bo);
return -EINVAL;
}
memset(&arg, 0, sizeof(arg));
arg.size = size;
ret = drmCommandWriteRead(kms->fd, DRM_I915_GEM_CREATE, &arg, sizeof(arg));
if (ret)
goto err_free;
bo->base.kms = kms;
bo->base.handle = arg.handle;
bo->base.size = size;
bo->base.pitch = pitch;
*out = &bo->base;
if (type == KMS_BO_TYPE_SCANOUT_X8R8G8B8 && pitch > 512) {
struct drm_i915_gem_set_tiling tile;
memset(&tile, 0, sizeof(tile));
tile.handle = bo->base.handle;
tile.tiling_mode = I915_TILING_X;
tile.stride = bo->base.pitch;
ret = drmCommandWriteRead(kms->fd, DRM_I915_GEM_SET_TILING, &tile, sizeof(tile));
#if 0
if (ret) {
kms_bo_destroy(out);
return ret;
}
#endif
}
return 0;
err_free:
free(bo);
return ret;
}
static int
intel_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out)
{
switch (key) {
default:
return -EINVAL;
}
}
static int
intel_bo_map(struct kms_bo *_bo, void **out)
{
struct intel_bo *bo = (struct intel_bo *)_bo;
struct drm_i915_gem_mmap_gtt arg;
void *map = NULL;
int ret;
if (bo->base.ptr) {
bo->map_count++;
*out = bo->base.ptr;
return 0;
}
memset(&arg, 0, sizeof(arg));
arg.handle = bo->base.handle;
ret = drmCommandWriteRead(bo->base.kms->fd, DRM_I915_GEM_MMAP_GTT, &arg, sizeof(arg));
if (ret)
return ret;
map = drm_mmap(0, bo->base.size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->base.kms->fd, arg.offset);
if (map == MAP_FAILED)
return -errno;
bo->base.ptr = map;
bo->map_count++;
*out = bo->base.ptr;
return 0;
}
static int
intel_bo_unmap(struct kms_bo *_bo)
{
struct intel_bo *bo = (struct intel_bo *)_bo;
bo->map_count--;
return 0;
}
static int
intel_bo_destroy(struct kms_bo *_bo)
{
struct intel_bo *bo = (struct intel_bo *)_bo;
struct drm_gem_close arg;
int ret;
if (bo->base.ptr) {
/* XXX Sanity check map_count */
drm_munmap(bo->base.ptr, bo->base.size);
bo->base.ptr = NULL;
}
memset(&arg, 0, sizeof(arg));
arg.handle = bo->base.handle;
ret = drmIoctl(bo->base.kms->fd, DRM_IOCTL_GEM_CLOSE, &arg);
if (ret)
return -errno;
free(bo);
return 0;
}
drm_private int
intel_create(int fd, struct kms_driver **out)
{
struct kms_driver *kms;
kms = calloc(1, sizeof(*kms));
if (!kms)
return -ENOMEM;
kms->fd = fd;
kms->bo_create = intel_bo_create;
kms->bo_map = intel_bo_map;
kms->bo_unmap = intel_bo_unmap;
kms->bo_get_prop = intel_bo_get_prop;
kms->bo_destroy = intel_bo_destroy;
kms->get_prop = intel_get_prop;
kms->destroy = intel_destroy;
*out = kms;
return 0;
}

80
libkms/internal.h Normal file
View file

@ -0,0 +1,80 @@
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTERNAL_H_
#define INTERNAL_H_
#include "libdrm_macros.h"
#include "libkms.h"
struct kms_driver
{
int (*get_prop)(struct kms_driver *kms, const unsigned key,
unsigned *out);
int (*destroy)(struct kms_driver *kms);
int (*bo_create)(struct kms_driver *kms,
unsigned width,
unsigned height,
enum kms_bo_type type,
const unsigned *attr,
struct kms_bo **out);
int (*bo_get_prop)(struct kms_bo *bo, const unsigned key,
unsigned *out);
int (*bo_map)(struct kms_bo *bo, void **out);
int (*bo_unmap)(struct kms_bo *bo);
int (*bo_destroy)(struct kms_bo *bo);
int fd;
};
struct kms_bo
{
struct kms_driver *kms;
void *ptr;
size_t size;
size_t offset;
size_t pitch;
unsigned handle;
};
drm_private int linux_create(int fd, struct kms_driver **out);
drm_private int vmwgfx_create(int fd, struct kms_driver **out);
drm_private int intel_create(int fd, struct kms_driver **out);
drm_private int dumb_create(int fd, struct kms_driver **out);
drm_private int nouveau_create(int fd, struct kms_driver **out);
drm_private int radeon_create(int fd, struct kms_driver **out);
drm_private int exynos_create(int fd, struct kms_driver **out);
#endif

8
libkms/kms-symbols.txt Normal file
View file

@ -0,0 +1,8 @@
kms_bo_create
kms_bo_destroy
kms_bo_get_prop
kms_bo_map
kms_bo_unmap
kms_create
kms_destroy
kms_get_prop

82
libkms/libkms.h Normal file
View file

@ -0,0 +1,82 @@
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef _LIBKMS_H_
#define _LIBKMS_H_
#if defined(__cplusplus)
extern "C" {
#endif
/**
* \file
*
*/
struct kms_driver;
struct kms_bo;
enum kms_attrib
{
KMS_TERMINATE_PROP_LIST,
#define KMS_TERMINATE_PROP_LIST KMS_TERMINATE_PROP_LIST
KMS_BO_TYPE,
#define KMS_BO_TYPE KMS_BO_TYPE
KMS_WIDTH,
#define KMS_WIDTH KMS_WIDTH
KMS_HEIGHT,
#define KMS_HEIGHT KMS_HEIGHT
KMS_PITCH,
#define KMS_PITCH KMS_PITCH
KMS_HANDLE,
#define KMS_HANDLE KMS_HANDLE
};
enum kms_bo_type
{
KMS_BO_TYPE_SCANOUT_X8R8G8B8 = (1 << 0),
#define KMS_BO_TYPE_SCANOUT_X8R8G8B8 KMS_BO_TYPE_SCANOUT_X8R8G8B8
KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8 = (1 << 1),
#define KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8 KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8
};
int kms_create(int fd, struct kms_driver **out);
int kms_get_prop(struct kms_driver *kms, unsigned key, unsigned *out);
int kms_destroy(struct kms_driver **kms);
int kms_bo_create(struct kms_driver *kms, const unsigned *attr, struct kms_bo **out);
int kms_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out);
int kms_bo_map(struct kms_bo *bo, void **out);
int kms_bo_unmap(struct kms_bo *bo);
int kms_bo_destroy(struct kms_bo **bo);
#if defined(__cplusplus)
};
#endif
#endif

11
libkms/libkms.pc.in Normal file
View file

@ -0,0 +1,11 @@
prefix=@prefix@
exec_prefix=@exec_prefix@
libdir=@libdir@
includedir=@includedir@
Name: libkms
Description: Library that abstracts away the different mm interface for kernel drivers
Version: 1.0.0
Libs: -L${libdir} -lkms
Cflags: -I${includedir}/libkms
Requires.private: libdrm

147
libkms/linux.c Normal file
View file

@ -0,0 +1,147 @@
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Thanks to krh and jcristau for the tips on
* going from fd to pci id via fstat and udev.
*/
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <xf86drm.h>
#include <string.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#ifdef MAJOR_IN_MKDEV
#include <sys/mkdev.h>
#endif
#ifdef MAJOR_IN_SYSMACROS
#include <sys/sysmacros.h>
#endif
#include "libdrm_macros.h"
#include "internal.h"
#define PATH_SIZE 512
static int
linux_name_from_sysfs(int fd, char **out)
{
char path[PATH_SIZE+1] = ""; /* initialize to please valgrind */
char link[PATH_SIZE+1] = "";
struct stat buffer;
unsigned maj, min;
char* slash_name;
int ret;
/*
* Inside the sysfs directory for the device there is a symlink
* to the directory representing the driver module, that path
* happens to hold the name of the driver.
*
* So lets get the symlink for the drm device. Then read the link
* and filter out the last directory which happens to be the name
* of the driver, which we can use to load the correct interface.
*
* Thanks to Ray Strode of Plymouth for the code.
*/
ret = fstat(fd, &buffer);
if (ret)
return -EINVAL;
if (!S_ISCHR(buffer.st_mode))
return -EINVAL;
maj = major(buffer.st_rdev);
min = minor(buffer.st_rdev);
snprintf(path, PATH_SIZE, "/sys/dev/char/%d:%d/device/driver", maj, min);
if (readlink(path, link, PATH_SIZE) < 0)
return -EINVAL;
/* link looks something like this: ../../../bus/pci/drivers/intel */
slash_name = strrchr(link, '/');
if (!slash_name)
return -EINVAL;
/* copy name and at the same time remove the slash */
*out = strdup(slash_name + 1);
return 0;
}
static int
linux_from_sysfs(int fd, struct kms_driver **out)
{
char *name;
int ret;
ret = linux_name_from_sysfs(fd, &name);
if (ret)
return ret;
#if HAVE_INTEL
if (!strcmp(name, "intel"))
ret = intel_create(fd, out);
else
#endif
#if HAVE_VMWGFX
if (!strcmp(name, "vmwgfx"))
ret = vmwgfx_create(fd, out);
else
#endif
#if HAVE_NOUVEAU
if (!strcmp(name, "nouveau"))
ret = nouveau_create(fd, out);
else
#endif
#if HAVE_RADEON
if (!strcmp(name, "radeon"))
ret = radeon_create(fd, out);
else
#endif
#if HAVE_EXYNOS
if (!strcmp(name, "exynos"))
ret = exynos_create(fd, out);
else
#endif
ret = -ENOSYS;
free(name);
return ret;
}
drm_private int
linux_create(int fd, struct kms_driver **out)
{
if (!dumb_create(fd, out))
return 0;
return linux_from_sysfs(fd, out);
}

78
libkms/meson.build Normal file
View file

@ -0,0 +1,78 @@
# Copyright © 2017-2018 Intel Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
libkms_include = [inc_root, inc_drm]
files_libkms = files(
'linux.c',
'dumb.c',
'api.c',
)
if with_vmwgfx
files_libkms += files('vmwgfx.c')
endif
if with_intel
files_libkms += files('intel.c')
endif
if with_nouveau
files_libkms += files('nouveau.c')
endif
if with_radeon
files_libkms += files('radeon.c')
endif
if with_exynos
files_libkms += files('exynos.c')
libkms_include += include_directories('../exynos')
endif
libkms = shared_library(
'kms',
[files_libkms, config_file],
c_args : libdrm_c_args,
include_directories : libkms_include,
link_with : libdrm,
version : '1.0.0',
install : true,
)
ext_libkms = declare_dependency(
link_with : [libdrm, libkms],
include_directories : [libkms_include],
)
install_headers('libkms.h', subdir : 'libkms')
pkg.generate(
name : 'libkms',
libraries : libkms,
subdirs : ['libkms'],
version : '1.0.0',
requires_private : 'libdrm',
description : 'Library that abstracts away the different mm interfaces for kernel drivers',
)
test(
'kms-symbols-check',
symbols_check,
args : [
'--lib', libkms,
'--symbols-file', files('kms-symbols.txt'),
'--nm', prog_nm.path(),
],
)

218
libkms/nouveau.c Normal file
View file

@ -0,0 +1,218 @@
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "internal.h"
#include <sys/ioctl.h>
#include "xf86drm.h"
#include "libdrm_macros.h"
#include "nouveau_drm.h"
struct nouveau_bo
{
struct kms_bo base;
uint64_t map_handle;
unsigned map_count;
};
static int
nouveau_get_prop(struct kms_driver *kms, unsigned key, unsigned *out)
{
switch (key) {
case KMS_BO_TYPE:
*out = KMS_BO_TYPE_SCANOUT_X8R8G8B8 | KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8;
break;
default:
return -EINVAL;
}
return 0;
}
static int
nouveau_destroy(struct kms_driver *kms)
{
free(kms);
return 0;
}
static int
nouveau_bo_create(struct kms_driver *kms,
const unsigned width, const unsigned height,
const enum kms_bo_type type, const unsigned *attr,
struct kms_bo **out)
{
struct drm_nouveau_gem_new arg;
unsigned size, pitch;
struct nouveau_bo *bo;
int i, ret;
for (i = 0; attr[i]; i += 2) {
switch (attr[i]) {
case KMS_WIDTH:
case KMS_HEIGHT:
case KMS_BO_TYPE:
break;
default:
return -EINVAL;
}
}
bo = calloc(1, sizeof(*bo));
if (!bo)
return -ENOMEM;
if (type == KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8) {
pitch = 64 * 4;
size = 64 * 64 * 4;
} else if (type == KMS_BO_TYPE_SCANOUT_X8R8G8B8) {
pitch = width * 4;
pitch = (pitch + 512 - 1) & ~(512 - 1);
size = pitch * height;
} else {
free(bo);
return -EINVAL;
}
memset(&arg, 0, sizeof(arg));
arg.info.size = size;
arg.info.domain = NOUVEAU_GEM_DOMAIN_MAPPABLE | NOUVEAU_GEM_DOMAIN_VRAM;
arg.info.tile_mode = 0;
arg.info.tile_flags = 0;
arg.align = 512;
arg.channel_hint = 0;
ret = drmCommandWriteRead(kms->fd, DRM_NOUVEAU_GEM_NEW, &arg, sizeof(arg));
if (ret)
goto err_free;
bo->base.kms = kms;
bo->base.handle = arg.info.handle;
bo->base.size = size;
bo->base.pitch = pitch;
bo->map_handle = arg.info.map_handle;
*out = &bo->base;
return 0;
err_free:
free(bo);
return ret;
}
static int
nouveau_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out)
{
switch (key) {
default:
return -EINVAL;
}
}
static int
nouveau_bo_map(struct kms_bo *_bo, void **out)
{
struct nouveau_bo *bo = (struct nouveau_bo *)_bo;
void *map = NULL;
if (bo->base.ptr) {
bo->map_count++;
*out = bo->base.ptr;
return 0;
}
map = drm_mmap(0, bo->base.size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->base.kms->fd, bo->map_handle);
if (map == MAP_FAILED)
return -errno;
bo->base.ptr = map;
bo->map_count++;
*out = bo->base.ptr;
return 0;
}
static int
nouveau_bo_unmap(struct kms_bo *_bo)
{
struct nouveau_bo *bo = (struct nouveau_bo *)_bo;
bo->map_count--;
return 0;
}
static int
nouveau_bo_destroy(struct kms_bo *_bo)
{
struct nouveau_bo *bo = (struct nouveau_bo *)_bo;
struct drm_gem_close arg;
int ret;
if (bo->base.ptr) {
/* XXX Sanity check map_count */
drm_munmap(bo->base.ptr, bo->base.size);
bo->base.ptr = NULL;
}
memset(&arg, 0, sizeof(arg));
arg.handle = bo->base.handle;
ret = drmIoctl(bo->base.kms->fd, DRM_IOCTL_GEM_CLOSE, &arg);
if (ret)
return -errno;
free(bo);
return 0;
}
drm_private int
nouveau_create(int fd, struct kms_driver **out)
{
struct kms_driver *kms;
kms = calloc(1, sizeof(*kms));
if (!kms)
return -ENOMEM;
kms->fd = fd;
kms->bo_create = nouveau_bo_create;
kms->bo_map = nouveau_bo_map;
kms->bo_unmap = nouveau_bo_unmap;
kms->bo_get_prop = nouveau_bo_get_prop;
kms->bo_destroy = nouveau_bo_destroy;
kms->get_prop = nouveau_get_prop;
kms->destroy = nouveau_destroy;
*out = kms;
return 0;
}

239
libkms/radeon.c Normal file
View file

@ -0,0 +1,239 @@
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "internal.h"
#include <sys/ioctl.h>
#include "xf86drm.h"
#include "libdrm_macros.h"
#include "radeon_drm.h"
#define ALIGNMENT 512
struct radeon_bo
{
struct kms_bo base;
unsigned map_count;
};
static int
radeon_get_prop(struct kms_driver *kms, unsigned key, unsigned *out)
{
switch (key) {
case KMS_BO_TYPE:
*out = KMS_BO_TYPE_SCANOUT_X8R8G8B8 | KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8;
break;
default:
return -EINVAL;
}
return 0;
}
static int
radeon_destroy(struct kms_driver *kms)
{
free(kms);
return 0;
}
static int
radeon_bo_create(struct kms_driver *kms,
const unsigned width, const unsigned height,
const enum kms_bo_type type, const unsigned *attr,
struct kms_bo **out)
{
struct drm_radeon_gem_create arg;
unsigned size, pitch;
struct radeon_bo *bo;
int i, ret;
for (i = 0; attr[i]; i += 2) {
switch (attr[i]) {
case KMS_WIDTH:
case KMS_HEIGHT:
case KMS_BO_TYPE:
break;
default:
return -EINVAL;
}
}
switch (type) {
case KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8:
pitch = 4 * 64;
size = 4 * 64 * 64;
break;
case KMS_BO_TYPE_SCANOUT_X8R8G8B8:
pitch = width * 4;
pitch = (pitch + ALIGNMENT - 1) & ~(ALIGNMENT - 1);
size = pitch * height;
break;
default:
return -EINVAL;
}
bo = calloc(1, sizeof(*bo));
if (!bo)
return -ENOMEM;
memset(&arg, 0, sizeof(arg));
arg.size = size;
arg.alignment = ALIGNMENT;
arg.initial_domain = RADEON_GEM_DOMAIN_CPU;
arg.flags = 0;
arg.handle = 0;
ret = drmCommandWriteRead(kms->fd, DRM_RADEON_GEM_CREATE,
&arg, sizeof(arg));
if (ret)
goto err_free;
bo->base.kms = kms;
bo->base.handle = arg.handle;
bo->base.size = size;
bo->base.pitch = pitch;
bo->base.offset = 0;
bo->map_count = 0;
*out = &bo->base;
return 0;
err_free:
free(bo);
return ret;
}
static int
radeon_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out)
{
switch (key) {
default:
return -EINVAL;
}
}
static int
radeon_bo_map(struct kms_bo *_bo, void **out)
{
struct radeon_bo *bo = (struct radeon_bo *)_bo;
struct drm_radeon_gem_mmap arg;
void *map = NULL;
int ret;
if (bo->base.ptr) {
bo->map_count++;
*out = bo->base.ptr;
return 0;
}
memset(&arg, 0, sizeof(arg));
arg.handle = bo->base.handle;
arg.offset = bo->base.offset;
arg.size = (uint64_t)bo->base.size;
ret = drmCommandWriteRead(bo->base.kms->fd, DRM_RADEON_GEM_MMAP,
&arg, sizeof(arg));
if (ret)
return -errno;
map = drm_mmap(0, arg.size, PROT_READ | PROT_WRITE, MAP_SHARED,
bo->base.kms->fd, arg.addr_ptr);
if (map == MAP_FAILED)
return -errno;
bo->base.ptr = map;
bo->map_count++;
*out = bo->base.ptr;
return 0;
}
static int
radeon_bo_unmap(struct kms_bo *_bo)
{
struct radeon_bo *bo = (struct radeon_bo *)_bo;
if (--bo->map_count == 0) {
drm_munmap(bo->base.ptr, bo->base.size);
bo->base.ptr = NULL;
}
return 0;
}
static int
radeon_bo_destroy(struct kms_bo *_bo)
{
struct radeon_bo *bo = (struct radeon_bo *)_bo;
struct drm_gem_close arg;
int ret;
if (bo->base.ptr) {
/* XXX Sanity check map_count */
drm_munmap(bo->base.ptr, bo->base.size);
bo->base.ptr = NULL;
}
memset(&arg, 0, sizeof(arg));
arg.handle = bo->base.handle;
ret = drmIoctl(bo->base.kms->fd, DRM_IOCTL_GEM_CLOSE, &arg);
if (ret)
return -errno;
free(bo);
return 0;
}
drm_private int
radeon_create(int fd, struct kms_driver **out)
{
struct kms_driver *kms;
kms = calloc(1, sizeof(*kms));
if (!kms)
return -ENOMEM;
kms->fd = fd;
kms->bo_create = radeon_bo_create;
kms->bo_map = radeon_bo_map;
kms->bo_unmap = radeon_bo_unmap;
kms->bo_get_prop = radeon_bo_get_prop;
kms->bo_destroy = radeon_bo_destroy;
kms->get_prop = radeon_get_prop;
kms->destroy = radeon_destroy;
*out = kms;
return 0;
}

204
libkms/vmwgfx.c Normal file
View file

@ -0,0 +1,204 @@
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include "internal.h"
#include "xf86drm.h"
#include "libdrm_macros.h"
#include "vmwgfx_drm.h"
struct vmwgfx_bo
{
struct kms_bo base;
uint64_t map_handle;
unsigned map_count;
};
static int
vmwgfx_get_prop(struct kms_driver *kms, unsigned key, unsigned *out)
{
switch (key) {
case KMS_BO_TYPE:
*out = KMS_BO_TYPE_SCANOUT_X8R8G8B8 | KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8;
break;
default:
return -EINVAL;
}
return 0;
}
static int
vmwgfx_destroy(struct kms_driver *kms)
{
free(kms);
return 0;
}
static int
vmwgfx_bo_create(struct kms_driver *kms,
const unsigned width, const unsigned height,
const enum kms_bo_type type, const unsigned *attr,
struct kms_bo **out)
{
struct vmwgfx_bo *bo;
int i, ret;
for (i = 0; attr[i]; i += 2) {
switch (attr[i]) {
case KMS_WIDTH:
case KMS_HEIGHT:
case KMS_BO_TYPE:
break;
default:
return -EINVAL;
}
}
bo = calloc(1, sizeof(*bo));
if (!bo)
return -EINVAL;
{
union drm_vmw_alloc_dmabuf_arg arg;
struct drm_vmw_alloc_dmabuf_req *req = &arg.req;
struct drm_vmw_dmabuf_rep *rep = &arg.rep;
memset(&arg, 0, sizeof(arg));
req->size = width * height * 4;
bo->base.size = req->size;
bo->base.pitch = width * 4;
bo->base.kms = kms;
do {
ret = drmCommandWriteRead(bo->base.kms->fd,
DRM_VMW_ALLOC_DMABUF,
&arg, sizeof(arg));
} while (ret == -ERESTART);
if (ret)
goto err_free;
bo->base.handle = rep->handle;
bo->map_handle = rep->map_handle;
bo->base.handle = rep->cur_gmr_id;
bo->base.offset = rep->cur_gmr_offset;
}
*out = &bo->base;
return 0;
err_free:
free(bo);
return ret;
}
static int
vmwgfx_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out)
{
switch (key) {
default:
return -EINVAL;
}
}
static int
vmwgfx_bo_map(struct kms_bo *_bo, void **out)
{
struct vmwgfx_bo *bo = (struct vmwgfx_bo *)_bo;
void *map;
if (bo->base.ptr) {
bo->map_count++;
*out = bo->base.ptr;
return 0;
}
map = drm_mmap(NULL, bo->base.size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->base.kms->fd, bo->map_handle);
if (map == MAP_FAILED)
return -errno;
bo->base.ptr = map;
bo->map_count++;
*out = bo->base.ptr;
return 0;
}
static int
vmwgfx_bo_unmap(struct kms_bo *_bo)
{
struct vmwgfx_bo *bo = (struct vmwgfx_bo *)_bo;
bo->map_count--;
return 0;
}
static int
vmwgfx_bo_destroy(struct kms_bo *_bo)
{
struct vmwgfx_bo *bo = (struct vmwgfx_bo *)_bo;
struct drm_vmw_unref_dmabuf_arg arg;
if (bo->base.ptr) {
/* XXX Sanity check map_count */
drm_munmap(bo->base.ptr, bo->base.size);
bo->base.ptr = NULL;
}
memset(&arg, 0, sizeof(arg));
arg.handle = bo->base.handle;
drmCommandWrite(bo->base.kms->fd, DRM_VMW_UNREF_DMABUF, &arg, sizeof(arg));
free(bo);
return 0;
}
drm_private int
vmwgfx_create(int fd, struct kms_driver **out)
{
struct kms_driver *kms;
kms = calloc(1, sizeof(*kms));
if (!kms)
return -ENOMEM;
kms->fd = fd;
kms->bo_create = vmwgfx_bo_create;
kms->bo_map = vmwgfx_bo_map;
kms->bo_unmap = vmwgfx_bo_unmap;
kms->bo_get_prop = vmwgfx_bo_get_prop;
kms->bo_destroy = vmwgfx_bo_destroy;
kms->get_prop = vmwgfx_get_prop;
kms->destroy = vmwgfx_destroy;
*out = kms;
return 0;
}

View file

@ -33,7 +33,7 @@
#include <stdint.h>
#include <string.h>
#include <sys/ioctl.h>
#include <poll.h>
#include <sys/poll.h>
#include <unistd.h>
#if defined(__cplusplus)

View file

@ -1,229 +0,0 @@
=======
drm-kms
=======
-------------------
Kernel Mode-Setting
-------------------
:Date: September 2012
:Manual section: 7
:Manual group: Direct Rendering Manager
Synopsis
========
``#include <xf86drm.h>``
``#include <xf86drmMode.h>``
Description
===========
Each DRM device provides access to manage which monitors and displays are
currently used and what frames to be displayed. This task is called *Kernel
Mode-Setting* (KMS). Historically, this was done in user-space and called
*User-space Mode-Setting* (UMS). Almost all open-source drivers now provide the
KMS kernel API to do this in the kernel, however, many non-open-source binary
drivers from different vendors still do not support this. You can use
**drmModeSettingSupported**\ (3) to check whether your driver supports this. To
understand how KMS works, we need to introduce 5 objects: *CRTCs*, *Planes*,
*Encoders*, *Connectors* and *Framebuffers*.
CRTCs
A *CRTC* short for *CRT Controller* is an abstraction representing a part of
the chip that contains a pointer to a scanout buffer. Therefore, the number
of CRTCs available determines how many independent scanout buffers can be
active at any given time. The CRTC structure contains several fields to
support this: a pointer to some video memory (abstracted as a frame-buffer
object), a list of driven connectors, a display mode and an (x, y) offset
into the video memory to support panning or configurations where one piece
of video memory spans multiple CRTCs. A CRTC is the central point where
configuration of displays happens. You select which objects to use, which
modes and which parameters and then configure each CRTC via
**drmModeCrtcSet**\ (3) to drive the display devices.
Planes
A *plane* respresents an image source that can be blended with or overlayed
on top of a CRTC during the scanout process. Planes are associated with a
frame-buffer to crop a portion of the image memory (source) and optionally
scale it to a destination size. The result is then blended with or overlayed
on top of a CRTC. Planes are not provided by all hardware and the number of
available planes is limited. If planes are not available or if not enough
planes are available, the user should fall back to normal software blending
(via GPU or CPU).
Encoders
An *encoder* takes pixel data from a CRTC and converts it to a format
suitable for any attached connectors. On some devices, it may be possible to
have a CRTC send data to more than one encoder. In that case, both encoders
would receive data from the same scanout buffer, resulting in a *cloned*
display configuration across the connectors attached to each encoder.
Connectors
A *connector* is the final destination of pixel-data on a device, and
usually connects directly to an external display device like a monitor or
laptop panel. A connector can only be attached to one encoder at a time. The
connector is also the structure where information about the attached display
is kept, so it contains fields for display data, *EDID* data, *DPMS* and
*connection status*, and information about modes supported on the attached
displays.
Framebuffers
*Framebuffers* are abstract memory objects that provide a source of pixel
data to scanout to a CRTC. Applications explicitly request the creation of
framebuffers and can control their behavior. Framebuffers rely on the
underneath memory manager for low-level memory operations. When creating a
framebuffer, applications pass a memory handle through the API which is used
as backing storage. The framebuffer itself is only an abstract object with
no data. It just refers to memory buffers that must be created with the
**drm-memory**\ (7) API.
Mode-Setting
------------
Before mode-setting can be performed, an application needs to call
**drmSetMaster**\ (3) to become *DRM-Master*. It then has exclusive access to
the KMS API. A call to **drmModeGetResources**\ (3) returns a list of *CRTCs*,
*Connectors*, *Encoders* and *Planes*.
Normal procedure now includes: First, you select which connectors you want to
use. Users are mostly interested in which monitor or display-panel is active so
you need to make sure to arrange them in the correct logical order and select
the correct ones to use. For each connector, you need to find a CRTC to drive
this connector. If you want to clone output to two or more connectors, you may
use a single CRTC for all cloned connectors (if the hardware supports this). To
find a suitable CRTC, you need to iterate over the list of encoders that are
available for each connector. Each encoder contains a list of CRTCs that it can
work with and you simply select one of these CRTCs. If you later program the
CRTC to control a connector, it automatically selects the best encoder.
However, this procedure is needed so your CRTC has at least one working encoder
for the selected connector. See the *Examples* section below for more
information.
All valid modes for a connector can be retrieved with a call to
**drmModeGetConnector**\ (3) You need to select the mode you want to use and save it.
The first mode in the list is the default mode with the highest resolution
possible and often a suitable choice.
After you have a working connector+CRTC+mode combination, you need to create a
framebuffer that is used for scanout. Memory buffer allocation is
driver-dependent and described in **drm-memory**\ (7). You need to create a
buffer big enough for your selected mode. Now you can create a framebuffer
object that uses your memory-buffer as scanout buffer. You can do this with
**drmModeAddFB**\ (3) and **drmModeAddFB2**\ (3).
As a last step, you want to program your CRTC to drive your selected connector.
You can do this with a call to **drmModeSetCrtc**\ (3).
Page-Flipping
-------------
A call to **drmModeSetCrtc**\ (3) is executed immediately and forces the CRTC
to use the new scanout buffer. If you want smooth-transitions without tearing,
you probably use double-buffering. You need to create one framebuffer object
for each buffer you use. You can then call **drmModeSetCrtc**\ (3) on the next
buffer to flip. If you want to synchronize your flips with *vertical-blanks*,
you can use **drmModePageFlip**\ (3) which schedules your page-flip for the
next *vblank*.
Planes
------
Planes are controlled independently from CRTCs. That is, a call to
**drmModeSetCrtc**\ (3) does not affect planes. Instead, you need to call
**drmModeSetPlane**\ (3) to configure a plane. This requires the plane ID, a
CRTC, a framebuffer and offsets into the plane-framebuffer and the
CRTC-framebuffer. The CRTC then blends the content from the plane over the CRTC
framebuffer buffer during scanout. As this does not involve any
software-blending, it is way faster than traditional blending. However, plane
resources are limited. See **drmModeGetPlaneResources**\ (3) for more
information.
Cursors
-------
Similar to planes, many hardware also supports cursors. A cursor is a very
small buffer with an image that is blended over the CRTC framebuffer. You can
set a different cursor for each CRTC with **drmModeSetCursor**\ (3) and move it
on the screen with **drmModeMoveCursor**\ (3). This allows to move the cursor
on the screen without rerendering. If no hardware cursors are supported, you
need to rerender for each frame the cursor is moved.
Examples
========
Some examples of how basic mode-setting can be done. See the man-page of each
DRM function for more information.
CRTC/Encoder Selection
----------------------
If you retrieved all display configuration information via
**drmModeGetResources**\ (3) as ``drmModeRes *res``, selected a connector from
the list in ``res->connectors`` and retrieved the connector-information as
``drmModeConnector *conn`` via **drmModeGetConnector**\ (3) then this example
shows, how you can find a suitable CRTC id to drive this connector. This
function takes a file-descriptor to the DRM device (see **drmOpen**\ (3)) as
``fd``, a pointer to the retrieved resources as ``res`` and a pointer to the
selected connector as ``conn``. It returns an integer smaller than 0 on
failure, otherwise, a valid CRTC id is returned.
::
static int modeset_find_crtc(int fd, drmModeRes *res, drmModeConnector *conn)
{
drmModeEncoder *enc;
unsigned int i, j;
/* iterate all encoders of this connector */
for (i = 0; i < conn->count_encoders; ++i) {
enc = drmModeGetEncoder(fd, conn->encoders[i]);
if (!enc) {
/* cannot retrieve encoder, ignoring... */
continue;
}
/* iterate all global CRTCs */
for (j = 0; j < res->count_crtcs; ++j) {
/* check whether this CRTC works with the encoder */
if (!(enc->possible_crtcs & (1 << j)))
continue;
/* Here you need to check that no other connector
* currently uses the CRTC with id "crtc". If you intend
* to drive one connector only, then you can skip this
* step. Otherwise, simply scan your list of configured
* connectors and CRTCs whether this CRTC is already
* used. If it is, then simply continue the search here. */
if (res->crtcs[j] "is unused") {
drmModeFreeEncoder(enc);
return res->crtcs[j];
}
}
drmModeFreeEncoder(enc);
}
/* cannot find a suitable CRTC */
return -ENOENT;
}
Reporting Bugs
==============
Bugs in this manual should be reported to
https://gitlab.freedesktop.org/mesa/libdrm/-/issues
See Also
========
**drm**\ (7), **drm-memory**\ (7), **drmModeGetResources**\ (3),
**drmModeGetConnector**\ (3), **drmModeGetEncoder**\ (3),
**drmModeGetCrtc**\ (3), **drmModeSetCrtc**\ (3), **drmModeGetFB**\ (3),
**drmModeAddFB**\ (3), **drmModeAddFB2**\ (3), **drmModeRmFB**\ (3),
**drmModePageFlip**\ (3), **drmModeGetPlaneResources**\ (3),
**drmModeGetPlane**\ (3), **drmModeSetPlane**\ (3), **drmModeSetCursor**\ (3),
**drmModeMoveCursor**\ (3), **drmSetMaster**\ (3), **drmAvailable**\ (3),
**drmCheckModesettingSupported**\ (3), **drmOpen**\ (3)

341
man/drm-kms.xml Normal file
View file

@ -0,0 +1,341 @@
<?xml version='1.0'?> <!--*-nxml-*-->
<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<!--
Written 2012 by David Herrmann <dh.herrmann@googlemail.com>
Dedicated to the Public Domain
-->
<refentry id="drm-kms">
<refentryinfo>
<title>Direct Rendering Manager</title>
<productname>libdrm</productname>
<date>September 2012</date>
<authorgroup>
<author>
<contrib>Developer</contrib>
<firstname>David</firstname>
<surname>Herrmann</surname>
<email>dh.herrmann@googlemail.com</email>
</author>
</authorgroup>
</refentryinfo>
<refmeta>
<refentrytitle>drm-kms</refentrytitle>
<manvolnum>7</manvolnum>
</refmeta>
<refnamediv>
<refname>drm-kms</refname>
<refpurpose>Kernel Mode-Setting</refpurpose>
</refnamediv>
<refsynopsisdiv>
<funcsynopsis>
<funcsynopsisinfo>#include &lt;xf86drm.h&gt;</funcsynopsisinfo>
<funcsynopsisinfo>#include &lt;xf86drmMode.h&gt;</funcsynopsisinfo>
</funcsynopsis>
</refsynopsisdiv>
<refsect1>
<title>Description</title>
<para>Each DRM device provides access to manage which monitors and displays
are currently used and what frames to be displayed. This task is
called <emphasis>Kernel Mode-Setting</emphasis> (KMS). Historically,
this was done in user-space and called
<emphasis>User-space Mode-Setting</emphasis> (UMS). Almost all
open-source drivers now provide the KMS kernel API to do this in the
kernel, however, many non-open-source binary drivers from different
vendors still do not support this. You can use
<citerefentry><refentrytitle>drmModeSettingSupported</refentrytitle><manvolnum>3</manvolnum></citerefentry>
to check whether your driver supports this. To understand how KMS
works, we need to introduce 5 objects: <emphasis>CRTCs</emphasis>,
<emphasis>Planes</emphasis>, <emphasis>Encoders</emphasis>,
<emphasis>Connectors</emphasis> and
<emphasis>Framebuffers</emphasis>.
<variablelist>
<varlistentry>
<term>CRTCs</term>
<listitem>
<para>A <emphasis>CRTC</emphasis> short for
<emphasis>CRT Controller</emphasis> is an abstraction
representing a part of the chip that contains a pointer to a
scanout buffer. Therefore, the number of CRTCs available
determines how many independent scanout buffers can be active
at any given time. The CRTC structure contains several fields
to support this: a pointer to some video memory (abstracted as
a frame-buffer object), a list of driven connectors, a display
mode and an (x, y) offset into the video memory to support
panning or configurations where one piece of video memory
spans multiple CRTCs. A CRTC is the central point where
configuration of displays happens. You select which objects to
use, which modes and which parameters and then configure each
CRTC via
<citerefentry><refentrytitle>drmModeCrtcSet</refentrytitle><manvolnum>3</manvolnum></citerefentry>
to drive the display devices.</para>
</listitem>
</varlistentry>
<varlistentry>
<term>Planes</term>
<listitem>
<para>A <emphasis>plane</emphasis> respresents an image source that
can be blended with or overlayed on top of a CRTC during the
scanout process. Planes are associated with a frame-buffer to
crop a portion of the image memory (source) and optionally
scale it to a destination size. The result is then blended
with or overlayed on top of a CRTC. Planes are not provided by
all hardware and the number of available planes is limited. If
planes are not available or if not enough planes are
available, the user should fall back to normal software
blending (via GPU or CPU).</para>
</listitem>
</varlistentry>
<varlistentry>
<term>Encoders</term>
<listitem>
<para>An <emphasis>encoder</emphasis> takes pixel data from a CRTC
and converts it to a format suitable for any attached
connectors. On some devices, it may be possible to have a CRTC
send data to more than one encoder. In that case, both
encoders would receive data from the same scanout buffer,
resulting in a <emphasis>cloned</emphasis> display
configuration across the connectors attached to each
encoder.</para>
</listitem>
</varlistentry>
<varlistentry>
<term>Connectors</term>
<listitem>
<para>A <emphasis>connector</emphasis> is the final destination of
pixel-data on a device, and usually connects directly to an
external display device like a monitor or laptop panel. A
connector can only be attached to one encoder at a time. The
connector is also the structure where information about the
attached display is kept, so it contains fields for display
data, <emphasis>EDID</emphasis> data,
<emphasis>DPMS</emphasis> and
<emphasis>connection status</emphasis>, and information about
modes supported on the attached displays.</para>
</listitem>
</varlistentry>
<varlistentry>
<term>Framebuffers</term>
<listitem>
<para><emphasis>Framebuffers</emphasis> are abstract memory objects
that provide a source of pixel data to scanout to a CRTC.
Applications explicitly request the creation of framebuffers
and can control their behavior. Framebuffers rely on the
underneath memory manager for low-level memory operations.
When creating a framebuffer, applications pass a memory handle
through the API which is used as backing storage. The
framebuffer itself is only an abstract object with no data. It
just refers to memory buffers that must be created with the
<citerefentry><refentrytitle>drm-memory</refentrytitle><manvolnum>7</manvolnum></citerefentry>
API.</para>
</listitem>
</varlistentry>
</variablelist>
</para>
<refsect2>
<title>Mode-Setting</title>
<para>Before mode-setting can be performed, an application needs to call
<citerefentry><refentrytitle>drmSetMaster</refentrytitle><manvolnum>3</manvolnum></citerefentry>
to become <emphasis>DRM-Master</emphasis>. It then has exclusive
access to the KMS API. A call to
<citerefentry><refentrytitle>drmModeGetResources</refentrytitle><manvolnum>3</manvolnum></citerefentry>
returns a list of <emphasis>CRTCs</emphasis>,
<emphasis>Connectors</emphasis>, <emphasis>Encoders</emphasis> and
<emphasis>Planes</emphasis>.</para>
<para>Normal procedure now includes: First, you select which connectors
you want to use. Users are mostly interested in which monitor or
display-panel is active so you need to make sure to arrange them in
the correct logical order and select the correct ones to use. For
each connector, you need to find a CRTC to drive this connector. If
you want to clone output to two or more connectors, you may use a
single CRTC for all cloned connectors (if the hardware supports
this). To find a suitable CRTC, you need to iterate over the list of
encoders that are available for each connector. Each encoder
contains a list of CRTCs that it can work with and you simply select
one of these CRTCs. If you later program the CRTC to control a
connector, it automatically selects the best encoder. However, this
procedure is needed so your CRTC has at least one working encoder
for the selected connector. See the <emphasis>Examples</emphasis>
section below for more information.</para>
<para>All valid modes for a connector can be retrieved with a call to
<citerefentry><refentrytitle>drmModeGetConnector</refentrytitle><manvolnum>3</manvolnum></citerefentry>
You need to select the mode you want to use and save it. The first
mode in the list is the default mode with the highest resolution
possible and often a suitable choice.</para>
<para>After you have a working connector+CRTC+mode combination, you need
to create a framebuffer that is used for scanout. Memory buffer
allocation is driver-depedent and described in
<citerefentry><refentrytitle>drm-memory</refentrytitle><manvolnum>7</manvolnum></citerefentry>.
You need to create a buffer big enough for your selected mode. Now
you can create a framebuffer object that uses your memory-buffer as
scanout buffer. You can do this with
<citerefentry><refentrytitle>drmModeAddFB</refentrytitle><manvolnum>3</manvolnum></citerefentry>
and
<citerefentry><refentrytitle>drmModeAddFB2</refentrytitle><manvolnum>3</manvolnum></citerefentry>.</para>
<para>As a last step, you want to program your CRTC to drive your selected
connector. You can do this with a call to
<citerefentry><refentrytitle>drmModeSetCrtc</refentrytitle><manvolnum>3</manvolnum></citerefentry>.</para>
</refsect2>
<refsect2>
<title>Page-Flipping</title>
<para>A call to
<citerefentry><refentrytitle>drmModeSetCrtc</refentrytitle><manvolnum>3</manvolnum></citerefentry>
is executed immediately and forces the CRTC to use the new scanout
buffer. If you want smooth-transitions without tearing, you probably
use double-buffering. You need to create one framebuffer object for
each buffer you use. You can then call
<citerefentry><refentrytitle>drmModeSetCrtc</refentrytitle><manvolnum>3</manvolnum></citerefentry>
on the next buffer to flip. If you want to synchronize your flips
with <emphasis>vertical-blanks</emphasis>, you can use
<citerefentry><refentrytitle>drmModePageFlip</refentrytitle><manvolnum>3</manvolnum></citerefentry>
which schedules your page-flip for the next
<emphasis>vblank</emphasis>.</para>
</refsect2>
<refsect2>
<title>Planes</title>
<para>Planes are controlled independently from CRTCs. That is, a call to
<citerefentry><refentrytitle>drmModeSetCrtc</refentrytitle><manvolnum>3</manvolnum></citerefentry>
does not affect planes. Instead, you need to call
<citerefentry><refentrytitle>drmModeSetPlane</refentrytitle><manvolnum>3</manvolnum></citerefentry>
to configure a plane. This requires the plane ID, a CRTC, a
framebuffer and offsets into the plane-framebuffer and the
CRTC-framebuffer. The CRTC then blends the content from the plane
over the CRTC framebuffer buffer during scanout. As this does not
involve any software-blending, it is way faster than traditional
blending. However, plane resources are limited. See
<citerefentry><refentrytitle>drmModeGetPlaneResources</refentrytitle><manvolnum>3</manvolnum></citerefentry>
for more information.</para>
</refsect2>
<refsect2>
<title>Cursors</title>
<para>Similar to planes, many hardware also supports cursors. A cursor is
a very small buffer with an image that is blended over the CRTC
framebuffer. You can set a different cursor for each CRTC with
<citerefentry><refentrytitle>drmModeSetCursor</refentrytitle><manvolnum>3</manvolnum></citerefentry>
and move it on the screen with
<citerefentry><refentrytitle>drmModeMoveCursor</refentrytitle><manvolnum>3</manvolnum></citerefentry>.
This allows to move the cursor on the screen without rerendering. If
no hardware cursors are supported, you need to rerender for each
frame the cursor is moved.</para>
</refsect2>
</refsect1>
<refsect1>
<title>Examples</title>
<para>Some examples of how basic mode-setting can be done. See the man-page
of each DRM function for more information.</para>
<refsect2>
<title>CRTC/Encoder Selection</title>
<para>If you retrieved all display configuration information via
<citerefentry><refentrytitle>drmModeGetResources</refentrytitle><manvolnum>3</manvolnum></citerefentry>
as <structname>drmModeRes</structname> *<varname>res</varname>,
selected a connector from the list in
<varname>res</varname>-><structfield>connectors</structfield>
and retrieved the connector-information as
<structname>drmModeConnector</structname> *<varname>conn</varname>
via
<citerefentry><refentrytitle>drmModeGetConnector</refentrytitle><manvolnum>3</manvolnum></citerefentry>
then this example shows, how you can find a suitable CRTC id to
drive this connector. This function takes a file-descriptor to the
DRM device (see
<citerefentry><refentrytitle>drmOpen</refentrytitle><manvolnum>3</manvolnum></citerefentry>)
as <varname>fd</varname>, a pointer to the retrieved resources as
<varname>res</varname> and a pointer to the selected connector as
<varname>conn</varname>. It returns an integer smaller than 0 on
failure, otherwise, a valid CRTC id is returned.</para>
<programlisting>
static int modeset_find_crtc(int fd, drmModeRes *res, drmModeConnector *conn)
{
drmModeEncoder *enc;
unsigned int i, j;
/* iterate all encoders of this connector */
for (i = 0; i &lt; conn->count_encoders; ++i) {
enc = drmModeGetEncoder(fd, conn->encoders[i]);
if (!enc) {
/* cannot retrieve encoder, ignoring... */
continue;
}
/* iterate all global CRTCs */
for (j = 0; j &lt; res->count_crtcs; ++j) {
/* check whether this CRTC works with the encoder */
if (!(enc->possible_crtcs &amp; (1 &lt;&lt; j)))
continue;
/* Here you need to check that no other connector
* currently uses the CRTC with id "crtc". If you intend
* to drive one connector only, then you can skip this
* step. Otherwise, simply scan your list of configured
* connectors and CRTCs whether this CRTC is already
* used. If it is, then simply continue the search here. */
if (res->crtcs[j] "is unused") {
drmModeFreeEncoder(enc);
return res->crtcs[j];
}
}
drmModeFreeEncoder(enc);
}
/* cannot find a suitable CRTC */
return -ENOENT;
}
</programlisting>
</refsect2>
</refsect1>
<refsect1>
<title>Reporting Bugs</title>
<para>Bugs in this manual should be reported to
https://gitlab.freedesktop.org/mesa/drm/-/issues</para>
</refsect1>
<refsect1>
<title>See Also</title>
<para>
<citerefentry><refentrytitle>drm</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drm-memory</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeGetResources</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeGetConnector</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeGetEncoder</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeGetCrtc</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeSetCrtc</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeGetFB</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeAddFB</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeAddFB2</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeRmFB</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModePageFlip</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeGetPlaneResources</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeGetPlane</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeSetPlane</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeSetCursor</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeMoveCursor</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmSetMaster</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmAvailable</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmCheckModesettingSupported</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmOpen</refentrytitle><manvolnum>3</manvolnum></citerefentry>
</para>
</refsect1>
</refentry>

View file

@ -1,313 +0,0 @@
==========
drm-memory
==========
---------------------
DRM Memory Management
---------------------
:Date: September 2012
:Manual section: 7
:Manual group: Direct Rendering Manager
Synopsis
========
``#include <xf86drm.h>``
Description
===========
Many modern high-end GPUs come with their own memory managers. They even
include several different caches that need to be synchronized during access.
Textures, framebuffers, command buffers and more need to be stored in memory
that can be accessed quickly by the GPU. Therefore, memory management on GPUs
is highly driver- and hardware-dependent.
However, there are several frameworks in the kernel that are used by more than
one driver. These can be used for trivial mode-setting without requiring
driver-dependent code. But for hardware-accelerated rendering you need to read
the manual pages for the driver you want to work with.
Dumb-Buffers
------------
Almost all in-kernel DRM hardware drivers support an API called *Dumb-Buffers*.
This API allows to create buffers of arbitrary size that can be used for
scanout. These buffers can be memory mapped via **mmap**\ (2) so you can render
into them on the CPU. However, GPU access to these buffers is often not
possible. Therefore, they are fine for simple tasks but not suitable for
complex compositions and renderings.
The ``DRM_IOCTL_MODE_CREATE_DUMB`` ioctl can be used to create a dumb buffer.
The kernel will return a 32-bit handle that can be used to manage the buffer
with the DRM API. You can create framebuffers with **drmModeAddFB**\ (3) and
use it for mode-setting and scanout. To access the buffer, you first need to
retrieve the offset of the buffer. The ``DRM_IOCTL_MODE_MAP_DUMB`` ioctl
requests the DRM subsystem to prepare the buffer for memory-mapping and returns
a fake-offset that can be used with **mmap**\ (2).
The ``DRM_IOCTL_MODE_CREATE_DUMB`` ioctl takes as argument a structure of type
``struct drm_mode_create_dumb``:
::
struct drm_mode_create_dumb {
__u32 height;
__u32 width;
__u32 bpp;
__u32 flags;
__u32 handle;
__u32 pitch;
__u64 size;
};
The fields *height*, *width*, *bpp* and *flags* have to be provided by the
caller. The other fields are filled by the kernel with the return values.
*height* and *width* are the dimensions of the rectangular buffer that is
created. *bpp* is the number of bits-per-pixel and must be a multiple of 8. You
most commonly want to pass 32 here. The flags field is currently unused and
must be zeroed. Different flags to modify the behavior may be added in the
future. After calling the ioctl, the handle, pitch and size fields are filled
by the kernel. *handle* is a 32-bit gem handle that identifies the buffer. This
is used by several other calls that take a gem-handle or memory-buffer as
argument. The *pitch* field is the pitch (or stride) of the new buffer. Most
drivers use 32-bit or 64-bit aligned stride-values. The size field contains the
absolute size in bytes of the buffer. This can normally also be computed with
``(height * pitch + width) * bpp / 4``.
To prepare the buffer for **mmap**\ (2) you need to use the
``DRM_IOCTL_MODE_MAP_DUMB`` ioctl. It takes as argument a structure of type
``struct drm_mode_map_dumb``:
::
struct drm_mode_map_dumb {
__u32 handle;
__u32 pad;
__u64 offset;
};
You need to put the gem-handle that was previously retrieved via
``DRM_IOCTL_MODE_CREATE_DUMB`` into the *handle* field. The *pad* field is
unused padding and must be zeroed. After completion, the *offset* field will
contain an offset that can be used with **mmap**\ (2) on the DRM
file-descriptor.
If you don't need your dumb-buffer, anymore, you have to destroy it with
``DRM_IOCTL_MODE_DESTROY_DUMB``. If you close the DRM file-descriptor, all open
dumb-buffers are automatically destroyed. This ioctl takes as argument a
structure of type ``struct drm_mode_destroy_dumb``:
::
struct drm_mode_destroy_dumb {
__u32 handle;
};
You only need to put your handle into the *handle* field. After this call, the
handle is invalid and may be reused for new buffers by the dumb-API.
TTM
---
*TTM* stands for *Translation Table Manager* and is a generic memory-manager
provided by the kernel. It does not provide a common user-space API so you need
to look at each driver interface if you want to use it. See for instance the
radeon man pages for more information on memory-management with radeon and TTM.
GEM
---
*GEM* stands for *Graphics Execution Manager* and is a generic DRM
memory-management framework in the kernel, that is used by many different
drivers. GEM is designed to manage graphics memory, control access to the
graphics device execution context and handle essentially NUMA environment
unique to modern graphics hardware. GEM allows multiple applications to share
graphics device resources without the need to constantly reload the entire
graphics card. Data may be shared between multiple applications with gem
ensuring that the correct memory synchronization occurs.
GEM provides simple mechanisms to manage graphics data and control execution
flow within the linux DRM subsystem. However, GEM is not a complete framework
that is fully driver independent. Instead, if provides many functions that are
shared between many drivers, but each driver has to implement most of
memory-management with driver-dependent ioctls. This manpage tries to describe
the semantics (and if it applies, the syntax) that is shared between all
drivers that use GEM.
All GEM APIs are defined as **ioctl**\ (2) on the DRM file descriptor. An
application must be authorized via **drmAuthMagic**\ (3) to the current
DRM-Master to access the GEM subsystem. A driver that does not support GEM will
return ``ENODEV`` for all these ioctls. Invalid object handles return
``EINVAL`` and invalid object names return ``ENOENT``.
Gem provides explicit memory management primitives. System pages are allocated
when the object is created, either as the fundamental storage for hardware
where system memory is used by the graphics processor directly, or as backing
store for graphics-processor resident memory.
Objects are referenced from user-space using handles. These are, for all
intents and purposes, equivalent to file descriptors but avoid the overhead.
Newer kernel drivers also support the **drm-prime** (7) infrastructure which
can return real file-descriptor for GEM-handles using the linux DMA-BUF API.
Objects may be published with a name so that other applications and processes
can access them. The name remains valid as long as the object exists.
GEM-objects are reference counted in the kernel. The object is only destroyed
when all handles from user-space were closed.
GEM-buffers cannot be created with a generic API. Each driver provides its own
API to create GEM-buffers. See for example ``DRM_I915_GEM_CREATE``,
``DRM_NOUVEAU_GEM_NEW`` or ``DRM_RADEON_GEM_CREATE``. Each of these ioctls
returns a GEM-handle that can be passed to different generic ioctls. The
*libgbm* library from the *mesa3D* distribution tries to provide a
driver-independent API to create GBM buffers and retrieve a GBM-handle to them.
It allows to create buffers for different use-cases including scanout,
rendering, cursors and CPU-access. See the libgbm library for more information
or look at the driver-dependent man-pages (for example **drm-intel**\ (7) or
**drm-radeon**\ (7)).
GEM-buffers can be closed with **drmCloseBufferHandle**\ (3). It takes as
argument the GEM-handle to be closed. After this call the GEM handle cannot be
used by this process anymore and may be reused for new GEM objects by the GEM
API.
If you want to share GEM-objects between different processes, you can create a
name for them and pass this name to other processes which can then open this
GEM-object. Names are currently 32-bit integer IDs and have no special
protection. That is, if you put a name on your GEM-object, every other client
that has access to the DRM device and is authenticated via
**drmAuthMagic**\ (3) to the current DRM-Master, can *guess* the name and open
or access the GEM-object. If you want more fine-grained access control, you can
use the new **drm-prime**\ (7) API to retrieve file-descriptors for
GEM-handles. To create a name for a GEM-handle, you use the
``DRM_IOCTL_GEM_FLINK`` ioctl. It takes as argument a structure of type
``struct drm_gem_flink``:
::
struct drm_gem_flink {
__u32 handle;
__u32 name;
};
You have to put your handle into the *handle* field. After completion, the
kernel has put the new unique name into the name field. You can now pass
this name to other processes which can then import the name with the
``DRM_IOCTL_GEM_OPEN`` ioctl. It takes as argument a structure of type
``struct drm_gem_open``:
::
struct drm_gem_open {
__u32 name;
__u32 handle;
__u32 size;
};
You have to fill in the *name* field with the name of the GEM-object that you
want to open. The kernel will fill in the *handle* and *size* fields with the
new handle and size of the GEM-object. You can now access the GEM-object via
the handle as if you created it with the GEM API.
Besides generic buffer management, the GEM API does not provide any generic
access. Each driver implements its own functionality on top of this API. This
includes execution-buffers, GTT management, context creation, CPU access, GPU
I/O and more. The next higher-level API is *OpenGL*. So if you want to use more
GPU features, you should use the *mesa3D* library to create OpenGL contexts on
DRM devices. This does *not* require any windowing-system like X11, but can
also be done on raw DRM devices. However, this is beyond the scope of this
man-page. You may have a look at other mesa3D man pages, including libgbm and
libEGL. 2D software-rendering (rendering with the CPU) can be achieved with the
dumb-buffer-API in a driver-independent fashion, however, for
hardware-accelerated 2D or 3D rendering you must use OpenGL. Any other API that
tries to abstract the driver-internals to access GEM-execution-buffers and
other GPU internals, would simply reinvent OpenGL so it is not provided. But if
you need more detailed information for a specific driver, you may have a look
into the driver-manpages, including **drm-intel**\ (7), **drm-radeon**\ (7) and
**drm-nouveau**\ (7). However, the **drm-prime**\ (7) infrastructure and the
generic GEM API as described here allow display-managers to handle
graphics-buffers and render-clients without any deeper knowledge of the GPU
that is used. Moreover, it allows to move objects between GPUs and implement
complex display-servers that don't do any rendering on their own. See its
man-page for more information.
Examples
========
This section includes examples for basic memory-management tasks.
Dumb-Buffers
------------
This examples shows how to create a dumb-buffer via the generic DRM API.
This is driver-independent (as long as the driver supports dumb-buffers)
and provides memory-mapped buffers that can be used for scanout. This
example creates a full-HD 1920x1080 buffer with 32 bits-per-pixel and a
color-depth of 24 bits. The buffer is then bound to a framebuffer which
can be used for scanout with the KMS API (see **drm-kms**\ (7)).
::
struct drm_mode_create_dumb creq;
struct drm_mode_destroy_dumb dreq;
struct drm_mode_map_dumb mreq;
uint32_t fb;
int ret;
void *map;
/* create dumb buffer */
memset(&creq, 0, sizeof(creq));
creq.width = 1920;
creq.height = 1080;
creq.bpp = 32;
ret = drmIoctl(fd, DRM_IOCTL_MODE_CREATE_DUMB, &creq);
if (ret < 0) {
/* buffer creation failed; see "errno" for more error codes */
...
}
/* creq.pitch, creq.handle and creq.size are filled by this ioctl with
* the requested values and can be used now. */
/* create framebuffer object for the dumb-buffer */
ret = drmModeAddFB(fd, 1920, 1080, 24, 32, creq.pitch, creq.handle, &fb);
if (ret) {
/* frame buffer creation failed; see "errno" */
...
}
/* the framebuffer "fb" can now used for scanout with KMS */
/* prepare buffer for memory mapping */
memset(&mreq, 0, sizeof(mreq));
mreq.handle = creq.handle;
ret = drmIoctl(fd, DRM_IOCTL_MODE_MAP_DUMB, &mreq);
if (ret) {
/* DRM buffer preparation failed; see "errno" */
...
}
/* mreq.offset now contains the new offset that can be used with mmap() */
/* perform actual memory mapping */
map = mmap(0, creq.size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, mreq.offset);
if (map == MAP_FAILED) {
/* memory-mapping failed; see "errno" */
...
}
/* clear the framebuffer to 0 */
memset(map, 0, creq.size);
Reporting Bugs
==============
Bugs in this manual should be reported to
https://gitlab.freedesktop.org/mesa/libdrm/-/issues
See Also
========
**drm**\ (7), **drm-kms**\ (7), **drm-prime**\ (7), **drmAvailable**\ (3),
**drmOpen**\ (3), **drm-intel**\ (7), **drm-radeon**\ (7), **drm-nouveau**\ (7)

429
man/drm-memory.xml Normal file
View file

@ -0,0 +1,429 @@
<?xml version='1.0'?> <!--*-nxml-*-->
<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<!--
Written 2012 by David Herrmann <dh.herrmann@googlemail.com>
Dedicated to the Public Domain
-->
<refentry id="drm-memory">
<refentryinfo>
<title>Direct Rendering Manager</title>
<productname>libdrm</productname>
<date>September 2012</date>
<authorgroup>
<author>
<contrib>Developer</contrib>
<firstname>David</firstname>
<surname>Herrmann</surname>
<email>dh.herrmann@googlemail.com</email>
</author>
</authorgroup>
</refentryinfo>
<refmeta>
<refentrytitle>drm-memory</refentrytitle>
<manvolnum>7</manvolnum>
</refmeta>
<refnamediv>
<refname>drm-memory</refname>
<refname>drm-mm</refname>
<refname>drm-gem</refname>
<refname>drm-ttm</refname>
<refpurpose>DRM Memory Management</refpurpose>
</refnamediv>
<refsynopsisdiv>
<funcsynopsis>
<funcsynopsisinfo>#include &lt;xf86drm.h&gt;</funcsynopsisinfo>
</funcsynopsis>
</refsynopsisdiv>
<refsect1>
<title>Description</title>
<para>Many modern high-end GPUs come with their own memory managers. They
even include several different caches that need to be synchronized
during access. Textures, framebuffers, command buffers and more need
to be stored in memory that can be accessed quickly by the GPU.
Therefore, memory management on GPUs is highly driver- and
hardware-dependent.</para>
<para>However, there are several frameworks in the kernel that are used by
more than one driver. These can be used for trivial mode-setting
without requiring driver-dependent code. But for
hardware-accelerated rendering you need to read the manual pages for
the driver you want to work with.</para>
<refsect2>
<title>Dumb-Buffers</title>
<para>Almost all in-kernel DRM hardware drivers support an API called
<emphasis>Dumb-Buffers</emphasis>. This API allows to create buffers
of arbitrary size that can be used for scanout. These buffers can be
memory mapped via
<citerefentry><refentrytitle>mmap</refentrytitle><manvolnum>2</manvolnum></citerefentry>
so you can render into them on the CPU. However, GPU access to these
buffers is often not possible. Therefore, they are fine for simple
tasks but not suitable for complex compositions and
renderings.</para>
<para>The <constant>DRM_IOCTL_MODE_CREATE_DUMB</constant> ioctl can be
used to create a dumb buffer. The kernel will return a 32bit handle
that can be used to manage the buffer with the DRM API. You can
create framebuffers with
<citerefentry><refentrytitle>drmModeAddFB</refentrytitle><manvolnum>3</manvolnum></citerefentry>
and use it for mode-setting and scanout. To access the buffer, you
first need to retrieve the offset of the buffer. The
<constant>DRM_IOCTL_MODE_MAP_DUMB</constant> ioctl requests the DRM
subsystem to prepare the buffer for memory-mapping and returns a
fake-offset that can be used with
<citerefentry><refentrytitle>mmap</refentrytitle><manvolnum>2</manvolnum></citerefentry>.</para>
<para>The <constant>DRM_IOCTL_MODE_CREATE_DUMB</constant> ioctl takes as
argument a structure of type
<structname>struct drm_mode_create_dumb</structname>:
<programlisting>
struct drm_mode_create_dumb {
__u32 height;
__u32 width;
__u32 bpp;
__u32 flags;
__u32 handle;
__u32 pitch;
__u64 size;
};
</programlisting>
The fields <structfield>height</structfield>,
<structfield>width</structfield>, <structfield>bpp</structfield> and
<structfield>flags</structfield> have to be provided by the caller.
The other fields are filled by the kernel with the return values.
<structfield>height</structfield> and
<structfield>width</structfield> are the dimensions of the
rectangular buffer that is created. <structfield>bpp</structfield>
is the number of bits-per-pixel and must be a multiple of
<literal>8</literal>. You most commonly want to pass
<literal>32</literal> here. The <structfield>flags</structfield>
field is currently unused and must be zeroed. Different flags to
modify the behavior may be added in the future. After calling the
ioctl, the <structfield>handle</structfield>,
<structfield>pitch</structfield> and <structfield>size</structfield>
fields are filled by the kernel. <structfield>handle</structfield>
is a 32bit gem handle that identifies the buffer. This is used by
several other calls that take a gem-handle or memory-buffer as
argument. The <structfield>pitch</structfield> field is the
pitch (or stride) of the new buffer. Most drivers use 32bit or 64bit
aligned stride-values. The <structfield>size</structfield> field
contains the absolute size in bytes of the buffer. This can normally
also be computed with
<emphasis>(height * pitch + width) * bpp / 4</emphasis>.</para>
<para>To prepare the buffer for
<citerefentry><refentrytitle>mmap</refentrytitle><manvolnum>2</manvolnum></citerefentry>
you need to use the <constant>DRM_IOCTL_MODE_MAP_DUMB</constant>
ioctl. It takes as argument a structure of type
<structname>struct drm_mode_map_dumb</structname>:
<programlisting>
struct drm_mode_map_dumb {
__u32 handle;
__u32 pad;
__u64 offset;
};
</programlisting>
You need to put the gem-handle that was previously retrieved via
<constant>DRM_IOCTL_MODE_CREATE_DUMB</constant> into the
<structfield>handle</structfield> field. The
<structfield>pad</structfield> field is unused padding and must be
zeroed. After completion, the <structfield>offset</structfield>
field will contain an offset that can be used with
<citerefentry><refentrytitle>mmap</refentrytitle><manvolnum>2</manvolnum></citerefentry>
on the DRM file-descriptor.</para>
<para>If you don't need your dumb-buffer, anymore, you have to destroy it
with <constant>DRM_IOCTL_MODE_DESTROY_DUMB</constant>. If you close
the DRM file-descriptor, all open dumb-buffers are automatically
destroyed. This ioctl takes as argument a structure of type
<structname>struct drm_mode_destroy_dumb</structname>:
<programlisting>
struct drm_mode_destroy_dumb {
__u32 handle;
};
</programlisting>
You only need to put your handle into the
<structfield>handle</structfield> field. After this call, the handle
is invalid and may be reused for new buffers by the dumb-API.</para>
</refsect2>
<refsect2>
<title>TTM</title>
<para><emphasis>TTM</emphasis> stands for
<emphasis>Translation Table Manager</emphasis> and is a generic
memory-manager provided by the kernel. It does not provide a common
user-space API so you need to look at each driver interface if you
want to use it. See for instance the radeon manpages for more
information on memory-management with radeon and TTM.</para>
</refsect2>
<refsect2>
<title>GEM</title>
<para><emphasis>GEM</emphasis> stands for
<emphasis>Graphics Execution Manager</emphasis> and is a generic DRM
memory-management framework in the kernel, that is used by many
different drivers. Gem is designed to manage graphics memory,
control access to the graphics device execution context and handle
essentially NUMA environment unique to modern graphics hardware. Gem
allows multiple applications to share graphics device resources
without the need to constantly reload the entire graphics card. Data
may be shared between multiple applications with gem ensuring that
the correct memory synchronization occurs.</para>
<para>Gem provides simple mechanisms to manage graphics data and control
execution flow within the linux DRM subsystem. However, gem is not a
complete framework that is fully driver independent. Instead, if
provides many functions that are shared between many drivers, but
each driver has to implement most of memory-management with
driver-dependent ioctls. This manpage tries to describe the
semantics (and if it applies, the syntax) that is shared between all
drivers that use gem.</para>
<para>All GEM APIs are defined as
<citerefentry><refentrytitle>ioctl</refentrytitle><manvolnum>2</manvolnum></citerefentry>
on the DRM file descriptor. An application must be authorized via
<citerefentry><refentrytitle>drmAuthMagic</refentrytitle><manvolnum>3</manvolnum></citerefentry>
to the current DRM-Master to access the GEM subsystem. A driver that
does not support gem will return <constant>ENODEV</constant> for all
these ioctls. Invalid object handles return
<constant>EINVAL</constant> and invalid object names return
<constant>ENOENT</constant>.</para>
<para>Gem provides explicit memory management primitives. System pages are
allocated when the object is created, either as the fundamental
storage for hardware where system memory is used by the graphics
processor directly, or as backing store for graphics-processor
resident memory.</para>
<para>Objects are referenced from user-space using handles. These are, for
all intents and purposes, equivalent to file descriptors but avoid
the overhead. Newer kernel drivers also support the
<citerefentry><refentrytitle>drm-prime</refentrytitle><manvolnum>7</manvolnum></citerefentry>
infrastructure which can return real file-descriptor for gem-handles
using the linux dma-buf API. Objects may be published with a name so
that other applications and processes can access them. The name
remains valid as long as the object exists. Gem-objects are
reference counted in the kernel. The object is only destroyed when
all handles from user-space were closed.</para>
<para>Gem-buffers cannot be created with a generic API. Each driver
provides its own API to create gem-buffers. See for example
<constant>DRM_I915_GEM_CREATE</constant>,
<constant>DRM_NOUVEAU_GEM_NEW</constant> or
<constant>DRM_RADEON_GEM_CREATE</constant>. Each of these ioctls
returns a gem-handle that can be passed to different generic ioctls.
The <emphasis>libgbm</emphasis> library from the
<emphasis>mesa3D</emphasis> distribution tries to provide a
driver-independent API to create gbm buffers and retrieve a
gbm-handle to them. It allows to create buffers for different
use-cases including scanout, rendering, cursors and CPU-access. See
the libgbm library for more information or look at the
driver-dependent man-pages (for example
<citerefentry><refentrytitle>drm-intel</refentrytitle><manvolnum>7</manvolnum></citerefentry>
or
<citerefentry><refentrytitle>drm-radeon</refentrytitle><manvolnum>7</manvolnum></citerefentry>).</para>
<para>Gem-buffers can be closed with the
<constant>DRM_IOCTL_GEM_CLOSE</constant> ioctl. It takes as argument
a structure of type <structname>struct drm_gem_close</structname>:
<programlisting>
struct drm_gem_close {
__u32 handle;
__u32 pad;
};
</programlisting>
The <structfield>handle</structfield> field is the gem-handle to be
closed. The <structfield>pad</structfield> field is unused padding.
It must be zeroed. After this call the gem handle cannot be used by
this process anymore and may be reused for new gem objects by the
gem API.</para>
<para>If you want to share gem-objects between different processes, you
can create a name for them and pass this name to other processes
which can then open this gem-object. Names are currently 32bit
integer IDs and have no special protection. That is, if you put a
name on your gem-object, every other client that has access to the
DRM device and is authenticated via
<citerefentry><refentrytitle>drmAuthMagic</refentrytitle><manvolnum>3</manvolnum></citerefentry>
to the current DRM-Master, can <emphasis>guess</emphasis> the name
and open or access the gem-object. If you want more fine-grained
access control, you can use the new
<citerefentry><refentrytitle>drm-prime</refentrytitle><manvolnum>7</manvolnum></citerefentry>
API to retrieve file-descriptors for gem-handles. To create a name
for a gem-handle, you use the
<constant>DRM_IOCTL_GEM_FLINK</constant> ioctl. It takes as argument
a structure of type <structname>struct drm_gem_flink</structname>:
<programlisting>
struct drm_gem_flink {
__u32 handle;
__u32 name;
};
</programlisting>
You have to put your handle into the
<structfield>handle</structfield> field. After completion, the
kernel has put the new unique name into the
<structfield>name</structfield> field. You can now pass this name to
other processes which can then import the name with the
<constant>DRM_IOCTL_GEM_OPEN</constant> ioctl. It takes as argument
a structure of type <structname>struct drm_gem_open</structname>:
<programlisting>
struct drm_gem_open {
__u32 name;
__u32 handle;
__u32 size;
};
</programlisting>
You have to fill in the <structfield>name</structfield> field with
the name of the gem-object that you want to open. The kernel will
fill in the <structfield>handle</structfield> and
<structfield>size</structfield> fields with the new handle and size
of the gem-object. You can now access the gem-object via the handle
as if you created it with the gem API.</para>
<para>Besides generic buffer management, the GEM API does not provide any
generic access. Each driver implements its own functionality on top
of this API. This includes execution-buffers, GTT management,
context creation, CPU access, GPU I/O and more. The next
higher-level API is <emphasis>OpenGL</emphasis>. So if you want to
use more GPU features, you should use the
<emphasis>mesa3D</emphasis> library to create OpenGL contexts on DRM
devices. This does <emphasis>not</emphasis> require any
windowing-system like X11, but can also be done on raw DRM devices.
However, this is beyond the scope of this man-page. You may have a
look at other mesa3D manpages, including libgbm and libEGL. 2D
software-rendering (rendering with the CPU) can be achieved with the
dumb-buffer-API in a driver-independent fashion, however, for
hardware-accelerated 2D or 3D rendering you must use OpenGL. Any
other API that tries to abstract the driver-internals to access
GEM-execution-buffers and other GPU internals, would simply reinvent
OpenGL so it is not provided. But if you need more detailed
information for a specific driver, you may have a look into the
driver-manpages, including
<citerefentry><refentrytitle>drm-intel</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drm-radeon</refentrytitle><manvolnum>7</manvolnum></citerefentry>
and
<citerefentry><refentrytitle>drm-nouveau</refentrytitle><manvolnum>7</manvolnum></citerefentry>.
However, the
<citerefentry><refentrytitle>drm-prime</refentrytitle><manvolnum>7</manvolnum></citerefentry>
infrastructure and the generic gem API as described here allow
display-managers to handle graphics-buffers and render-clients
without any deeper knowledge of the GPU that is used. Moreover, it
allows to move objects between GPUs and implement complex
display-servers that don't do any rendering on their own. See its
man-page for more information.</para>
</refsect2>
</refsect1>
<refsect1>
<title>Examples</title>
<para>This section includes examples for basic memory-management
tasks.</para>
<refsect2>
<title>Dumb-Buffers</title>
<para>This examples shows how to create a dumb-buffer via the generic
DRM API. This is driver-independent (as long as the driver
supports dumb-buffers) and provides memory-mapped buffers that can
be used for scanout. This example creates a full-HD 1920x1080
buffer with 32 bits-per-pixel and a color-depth of 24 bits. The
buffer is then bound to a framebuffer which can be used for
scanout with the KMS API (see
<citerefentry><refentrytitle>drm-kms</refentrytitle><manvolnum>7</manvolnum></citerefentry>).</para>
<programlisting>
struct drm_mode_create_dumb creq;
struct drm_mode_destroy_dumb dreq;
struct drm_mode_map_dumb mreq;
uint32_t fb;
int ret;
void *map;
/* create dumb buffer */
memset(&amp;creq, 0, sizeof(creq));
creq.width = 1920;
creq.height = 1080;
creq.bpp = 32;
ret = drmIoctl(fd, DRM_IOCTL_MODE_CREATE_DUMB, &amp;creq);
if (ret &lt; 0) {
/* buffer creation failed; see "errno" for more error codes */
...
}
/* creq.pitch, creq.handle and creq.size are filled by this ioctl with
* the requested values and can be used now. */
/* create framebuffer object for the dumb-buffer */
ret = drmModeAddFB(fd, 1920, 1080, 24, 32, creq.pitch, creq.handle, &amp;fb);
if (ret) {
/* frame buffer creation failed; see "errno" */
...
}
/* the framebuffer "fb" can now used for scanout with KMS */
/* prepare buffer for memory mapping */
memset(&amp;mreq, 0, sizeof(mreq));
mreq.handle = creq.handle;
ret = drmIoctl(fd, DRM_IOCTL_MODE_MAP_DUMB, &amp;mreq);
if (ret) {
/* DRM buffer preparation failed; see "errno" */
...
}
/* mreq.offset now contains the new offset that can be used with mmap() */
/* perform actual memory mapping */
map = mmap(0, creq.size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, mreq.offset);
if (map == MAP_FAILED) {
/* memory-mapping failed; see "errno" */
...
}
/* clear the framebuffer to 0 */
memset(map, 0, creq.size);
</programlisting>
</refsect2>
</refsect1>
<refsect1>
<title>Reporting Bugs</title>
<para>Bugs in this manual should be reported to
https://gitlab.freedesktop.org/mesa/drm/-/issues</para>
</refsect1>
<refsect1>
<title>See Also</title>
<para>
<citerefentry><refentrytitle>drm</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drm-kms</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drm-prime</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmAvailable</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmOpen</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drm-intel</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drm-radeon</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drm-nouveau</refentrytitle><manvolnum>7</manvolnum></citerefentry>
</para>
</refsect1>
</refentry>

View file

@ -1,91 +0,0 @@
===
drm
===
------------------------
Direct Rendering Manager
------------------------
:Date: September 2012
:Manual section: 7
:Manual group: Direct Rendering Manager
Synopsis
========
``#include <xf86drm.h>``
Description
===========
The *Direct Rendering Manager* (DRM) is a framework to manage *Graphics
Processing Units* (GPUs). It is designed to support the needs of complex
graphics devices, usually containing programmable pipelines well suited
to 3D graphics acceleration. Furthermore, it is responsible for memory
management, interrupt handling and DMA to provide a uniform interface to
applications.
In earlier days, the kernel framework was solely used to provide raw
hardware access to privileged user-space processes which implement all
the hardware abstraction layers. But more and more tasks were moved into
the kernel. All these interfaces are based on **ioctl**\ (2) commands on
the DRM character device. The *libdrm* library provides wrappers for these
system-calls and many helpers to simplify the API.
When a GPU is detected, the DRM system loads a driver for the detected
hardware type. Each connected GPU is then presented to user-space via a
character-device that is usually available as ``/dev/dri/card0`` and can
be accessed with **open**\ (2) and **close**\ (2). However, it still
depends on the graphics driver which interfaces are available on these
devices. If an interface is not available, the syscalls will fail with
``EINVAL``.
Authentication
--------------
All DRM devices provide authentication mechanisms. Only a DRM master is
allowed to perform mode-setting or modify core state and only one user
can be DRM master at a time. See **drmSetMaster**\ (3) for information
on how to become DRM master and what the limitations are. Other DRM users
can be authenticated to the DRM-Master via **drmAuthMagic**\ (3) so they
can perform buffer allocations and rendering.
Mode-Setting
------------
Managing connected monitors and displays and changing the current modes
is called *Mode-Setting*. This is restricted to the current DRM master.
Historically, this was implemented in user-space, but new DRM drivers
implement a kernel interface to perform mode-setting called *Kernel Mode
Setting* (KMS). If your hardware-driver supports it, you can use the KMS
API provided by DRM. This includes allocating framebuffers, selecting
modes and managing CRTCs and encoders. See **drm-kms**\ (7) for more.
Memory Management
-----------------
The most sophisticated tasks for GPUs today is managing memory objects.
Textures, framebuffers, command-buffers and all other kinds of commands
for the GPU have to be stored in memory. The DRM driver takes care of
managing all memory objects, flushing caches, synchronizing access and
providing CPU access to GPU memory. All memory management is hardware
driver dependent. However, two generic frameworks are available that are
used by most DRM drivers. These are the *Translation Table Manager*
(TTM) and the *Graphics Execution Manager* (GEM). They provide generic
APIs to create, destroy and access buffers from user-space. However,
there are still many differences between the drivers so driver-dependent
code is still needed. Many helpers are provided in *libgbm* (Graphics
Buffer Manager) from the *Mesa* project. For more information on DRM
memory management, see **drm-memory**\ (7).
Reporting Bugs
==============
Bugs in this manual should be reported to
https://gitlab.freedesktop.org/mesa/libdrm/-/issues.
See Also
========
**drm-kms**\ (7), **drm-memory**\ (7), **drmSetMaster**\ (3),
**drmAuthMagic**\ (3), **drmAvailable**\ (3), **drmOpen**\ (3)

136
man/drm.xml Normal file
View file

@ -0,0 +1,136 @@
<?xml version='1.0'?> <!--*-nxml-*-->
<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<!--
Written 2012 by David Herrmann <dh.herrmann@googlemail.com>
Dedicated to the Public Domain
-->
<refentry id="drm">
<refentryinfo>
<title>Direct Rendering Manager</title>
<productname>libdrm</productname>
<date>September 2012</date>
<authorgroup>
<author>
<contrib>Developer</contrib>
<firstname>David</firstname>
<surname>Herrmann</surname>
<email>dh.herrmann@googlemail.com</email>
</author>
</authorgroup>
</refentryinfo>
<refmeta>
<refentrytitle>drm</refentrytitle>
<manvolnum>7</manvolnum>
</refmeta>
<refnamediv>
<refname>drm</refname>
<refpurpose>Direct Rendering Manager</refpurpose>
</refnamediv>
<refsynopsisdiv>
<funcsynopsis>
<funcsynopsisinfo>#include &lt;xf86drm.h&gt;</funcsynopsisinfo>
</funcsynopsis>
</refsynopsisdiv>
<refsect1>
<title>Description</title>
<para>The <emphasis>Direct Rendering Manager</emphasis> (DRM) is a framework
to manage <emphasis>Graphics Processing Units</emphasis> (GPUs). It is
designed to support the needs of complex graphics devices, usually
containing programmable pipelines well suited to 3D graphics
acceleration. Furthermore, it is responsible for memory management,
interrupt handling and DMA to provide a uniform interface to
applications.</para>
<para>In earlier days, the kernel framework was solely used to provide raw
hardware access to privileged user-space processes which implement
all the hardware abstraction layers. But more and more tasks were
moved into the kernel. All these interfaces are based on
<citerefentry><refentrytitle>ioctl</refentrytitle><manvolnum>2</manvolnum></citerefentry>
commands on the DRM character device. The <emphasis>libdrm</emphasis>
library provides wrappers for these system-calls and many helpers to
simplify the API.</para>
<para>When a GPU is detected, the DRM system loads a driver for the detected
hardware type. Each connected GPU is then presented to user-space via
a character-device that is usually available as
<filename>/dev/dri/card0</filename> and can be accessed with
<citerefentry><refentrytitle>open</refentrytitle><manvolnum>2</manvolnum></citerefentry>
and
<citerefentry><refentrytitle>close</refentrytitle><manvolnum>2</manvolnum></citerefentry>.
However, it still depends on the graphics driver which interfaces are
available on these devices. If an interface is not available, the
syscalls will fail with <literal>EINVAL</literal>.</para>
<refsect2>
<title>Authentication</title>
<para>All DRM devices provide authentication mechanisms. Only a DRM-Master
is allowed to perform mode-setting or modify core state and only one
user can be DRM-Master at a time. See
<citerefentry><refentrytitle>drmSetMaster</refentrytitle><manvolnum>3</manvolnum></citerefentry>
for information on how to become DRM-Master and what the limitations
are. Other DRM users can be authenticated to the DRM-Master via
<citerefentry><refentrytitle>drmAuthMagic</refentrytitle><manvolnum>3</manvolnum></citerefentry>
so they can perform buffer allocations and rendering.</para>
</refsect2>
<refsect2>
<title>Mode-Setting</title>
<para>Managing connected monitors and displays and changing the current
modes is called <emphasis>Mode-Setting</emphasis>. This is
restricted to the current DRM-Master. Historically, this was
implemented in user-space, but new DRM drivers implement a kernel
interface to perform mode-setting called
<emphasis>Kernel Mode Setting</emphasis> (KMS). If your
hardware-driver supports it, you can use the KMS API provided by
DRM. This includes allocating framebuffers, selecting modes and
managing CRTCs and encoders. See
<citerefentry><refentrytitle>drm-kms</refentrytitle><manvolnum>7</manvolnum></citerefentry>
for more.</para>
</refsect2>
<refsect2>
<title>Memory Management</title>
<para>The most sophisticated tasks for GPUs today is managing memory
objects. Textures, framebuffers, command-buffers and all other kinds
of commands for the GPU have to be stored in memory. The DRM driver
takes care of managing all memory objects, flushing caches,
synchronizing access and providing CPU access to GPU memory. All
memory management is hardware driver dependent. However, two generic
frameworks are available that are used by most DRM drivers. These
are the <emphasis>Translation Table Manager</emphasis> (TTM) and the
<emphasis>Graphics Execution Manager</emphasis> (GEM). They provide
generic APIs to create, destroy and access buffers from user-space.
However, there are still many differences between the drivers so
driver-depedent code is still needed. Many helpers are provided in
<emphasis>libgbm</emphasis> (Graphics Buffer Manager) from the
<emphasis>mesa-project</emphasis>. For more information on DRM
memory-management, see
<citerefentry><refentrytitle>drm-memory</refentrytitle><manvolnum>7</manvolnum></citerefentry>.</para>
</refsect2>
</refsect1>
<refsect1>
<title>Reporting Bugs</title>
<para>Bugs in this manual should be reported to
https://gitlab.freedesktop.org/mesa/drm/-/issues</para>
</refsect1>
<refsect1>
<title>See Also</title>
<para>
<citerefentry><refentrytitle>drm-kms</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drm-memory</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmSetMaster</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmAuthMagic</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmAvailable</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmOpen</refentrytitle><manvolnum>3</manvolnum></citerefentry>
</para>
</refsect1>
</refentry>

View file

@ -1,41 +0,0 @@
============
drmAvailable
============
-----------------------------------------------------
determine whether a DRM kernel driver has been loaded
-----------------------------------------------------
:Date: September 2012
:Manual section: 3
:Manual group: Direct Rendering Manager
Synopsis
========
``#include <xf86drm.h>``
``int drmAvailable(void);``
Description
===========
``drmAvailable`` allows the caller to determine whether a kernel DRM
driver is loaded.
Return Value
============
``drmAvailable`` returns 1 if a DRM driver is currently loaded.
Otherwise 0 is returned.
Reporting Bugs
==============
Bugs in this function should be reported to
https://gitlab.freedesktop.org/mesa/libdrm/-/issues
See Also
========
**drm**\ (7), **drmOpen**\ (3)

74
man/drmAvailable.xml Normal file
View file

@ -0,0 +1,74 @@
<?xml version='1.0'?> <!--*-nxml-*-->
<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<!--
Written 2012 by David Herrmann <dh.herrmann@googlemail.com>
Dedicated to the Public Domain
-->
<refentry id="drmAvailable">
<refentryinfo>
<title>Direct Rendering Manager</title>
<productname>libdrm</productname>
<date>September 2012</date>
<authorgroup>
<author>
<contrib>Developer</contrib>
<firstname>David</firstname>
<surname>Herrmann</surname>
<email>dh.herrmann@googlemail.com</email>
</author>
</authorgroup>
</refentryinfo>
<refmeta>
<refentrytitle>drmAvailable</refentrytitle>
<manvolnum>3</manvolnum>
</refmeta>
<refnamediv>
<refname>drmAvailable</refname>
<refpurpose>determine whether a DRM kernel driver has been
loaded</refpurpose>
</refnamediv>
<refsynopsisdiv>
<funcsynopsis>
<funcsynopsisinfo>#include &lt;xf86drm.h&gt;</funcsynopsisinfo>
<funcprototype>
<funcdef>int <function>drmAvailable</function></funcdef>
<paramdef>void</paramdef>
</funcprototype>
</funcsynopsis>
</refsynopsisdiv>
<refsect1>
<title>Description</title>
<para><function>drmAvailable</function> allows the caller to determine
whether a kernel DRM driver is loaded.</para>
</refsect1>
<refsect1>
<title>Return Value</title>
<para><function>drmAvailable</function> returns 1 if a DRM driver is
currently loaded. Otherwise 0 is returned.</para>
</refsect1>
<refsect1>
<title>Reporting Bugs</title>
<para>Bugs in this function should be reported to
https://gitlab.freedesktop.org/mesa/drm/-/issues</para>
</refsect1>
<refsect1>
<title>See Also</title>
<para>
<citerefentry><refentrytitle>drm</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmOpen</refentrytitle><manvolnum>3</manvolnum></citerefentry>
</para>
</refsect1>
</refentry>

View file

@ -1,62 +0,0 @@
==============
drmHandleEvent
==============
-----------------------------------
read and process pending DRM events
-----------------------------------
:Date: September 2012
:Manual section: 3
:Manual group: Direct Rendering Manager
Synopsis
========
``#include <xf86drm.h>``
``int drmHandleEvent(int fd, drmEventContextPtr evctx);``
Description
===========
``drmHandleEvent`` processes outstanding DRM events on the DRM
file-descriptor passed as ``fd``. This function should be called after
the DRM file-descriptor has polled readable; it will read the events and
use the passed-in ``evctx`` structure to call function pointers with the
parameters noted below:
::
typedef struct _drmEventContext {
int version;
void (*vblank_handler) (int fd,
unsigned int sequence,
unsigned int tv_sec,
unsigned int tv_usec,
void *user_data)
void (*page_flip_handler) (int fd,
unsigned int sequence,
unsigned int tv_sec,
unsigned int tv_usec,
void *user_data)
} drmEventContext, *drmEventContextPtr;
Return Value
============
``drmHandleEvent`` returns 0 on success, or if there is no data to
read from the file-descriptor. Returns -1 if the read on the
file-descriptor fails or returns less than a full event record.
Reporting Bugs
==============
Bugs in this function should be reported to
https://gitlab.freedesktop.org/mesa/libdrm/-/issues
See Also
========
**drm**\ (7), **drm-kms**\ (7), **drmModePageFlip**\ (3),
**drmWaitVBlank**\ (3)

101
man/drmHandleEvent.xml Normal file
View file

@ -0,0 +1,101 @@
<?xml version='1.0'?> <!--*-nxml-*-->
<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<!--
Written 2012 by David Herrmann <dh.herrmann@googlemail.com>
Dedicated to the Public Domain
-->
<refentry id="drmHandleEvent">
<refentryinfo>
<title>Direct Rendering Manager</title>
<productname>libdrm</productname>
<date>September 2012</date>
<authorgroup>
<author>
<contrib>Developer</contrib>
<firstname>David</firstname>
<surname>Herrmann</surname>
<email>dh.herrmann@googlemail.com</email>
</author>
</authorgroup>
</refentryinfo>
<refmeta>
<refentrytitle>drmHandleEvent</refentrytitle>
<manvolnum>3</manvolnum>
</refmeta>
<refnamediv>
<refname>drmHandleEvent</refname>
<refpurpose>read and process pending DRM events</refpurpose>
</refnamediv>
<refsynopsisdiv>
<funcsynopsis>
<funcsynopsisinfo>#include &lt;xf86drm.h&gt;</funcsynopsisinfo>
<funcprototype>
<funcdef>int <function>drmHandleEvent</function></funcdef>
<paramdef>int <parameter>fd</parameter></paramdef>
<paramdef>drmEventContextPtr <parameter>evctx</parameter></paramdef>
</funcprototype>
</funcsynopsis>
</refsynopsisdiv>
<refsect1>
<title>Description</title>
<para><function>drmHandleEvent</function> processes outstanding DRM events
on the DRM file-descriptor passed as <parameter>fd</parameter>. This
function should be called after the DRM file-descriptor has polled
readable; it will read the events and use the passed-in
<parameter>evctx</parameter> structure to call function pointers
with the parameters noted below:
<programlisting>
typedef struct _drmEventContext {
int version;
void (*vblank_handler) (int fd,
unsigned int sequence,
unsigned int tv_sec,
unsigned int tv_usec,
void *user_data)
void (*page_flip_handler) (int fd,
unsigned int sequence,
unsigned int tv_sec,
unsigned int tv_usec,
void *user_data)
} drmEventContext, *drmEventContextPtr;
</programlisting>
</para>
</refsect1>
<refsect1>
<title>Return Value</title>
<para><function>drmHandleEvent</function> returns <literal>0</literal> on
success, or if there is no data to read from the file-descriptor.
Returns <literal>-1</literal> if the read on the file-descriptor fails
or returns less than a full event record.</para>
</refsect1>
<refsect1>
<title>Reporting Bugs</title>
<para>Bugs in this function should be reported to
https://gitlab.freedesktop.org/mesa/drm/-/issues</para>
</refsect1>
<refsect1>
<title>See Also</title>
<para>
<citerefentry><refentrytitle>drm</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drm-kms</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModePageFlip</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmWaitVBlank</refentrytitle><manvolnum>3</manvolnum></citerefentry>
</para>
</refsect1>
</refentry>

View file

@ -1,92 +0,0 @@
===================
drmModeGetResources
===================
--------------------------------------------------
retrieve current display configuration information
--------------------------------------------------
:Date: September 2012
:Manual section: 3
:Manual group: Direct Rendering Manager
Synopsis
========
``#include <xf86drm.h>``
``#include <xf86drmMode.h>``
``drmModeResPtr drmModeGetResources(int fd);``
Description
===========
``drmModeGetResources`` allocates, populates, and returns a drmModeRes
structure containing information about the current display
configuration. The structure contains the following fields:
::
typedef struct _drmModeRes {
int count_fbs;
uint32_t *fbs;
int count_crtcs;
uint32_t *crtcs;
int count_connectors;
uint32_t *connectors;
int count_encoders;
uint32_t *encoders;
uint32_t min_width, max_width;
uint32_t min_height, max_height;
} drmModeRes, *drmModeResPtr;
The *count_fbs* and *fbs* fields indicate the number of currently allocated
framebuffer objects (i.e., objects that can be attached to a given CRTC
or sprite for display).
The *count_crtcs* and *crtcs* fields list the available CRTCs in the
configuration. A CRTC is simply an object that can scan out a
framebuffer to a display sink, and contains mode timing and relative
position information. CRTCs drive encoders, which are responsible for
converting the pixel stream into a specific display protocol (e.g., MIPI
or HDMI).
The *count_connectors* and *connectors* fields list the available physical
connectors on the system. Note that some of these may not be exposed
from the chassis (e.g., LVDS or eDP). Connectors are attached to
encoders and contain information about the attached display sink (e.g.,
width and height in mm, subpixel ordering, and various other
properties).
The *count_encoders* and *encoders* fields list the available encoders on
the device. Each encoder may be associated with a CRTC, and may be used
to drive a particular encoder.
The *min_\** and *max_\** fields indicate the maximum size of a framebuffer
for this device (i.e., the scanout size limit).
Return Value
============
``drmModeGetResources`` returns a drmModeRes structure pointer on
success, NULL on failure. The returned structure must be freed with
**drmModeFreeResources**\ (3).
Reporting Bugs
==============
Bugs in this function should be reported to
https://gitlab.freedesktop.org/mesa/libdrm/-/issues
See Also
========
**drm**\ (7), **drm-kms**\ (7), **drmModeGetFB**\ (3), **drmModeAddFB**\ (3),
**drmModeAddFB2**\ (3), **drmModeRmFB**\ (3), **drmModeDirtyFB**\ (3),
**drmModeGetCrtc**\ (3), **drmModeSetCrtc** (3), **drmModeGetEncoder** (3),
**drmModeGetConnector**\ (3)

138
man/drmModeGetResources.xml Normal file
View file

@ -0,0 +1,138 @@
<?xml version='1.0'?> <!--*-nxml-*-->
<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<!--
Written 2012 by David Herrmann <dh.herrmann@googlemail.com>
Dedicated to the Public Domain
-->
<refentry id="drmModeGetResources">
<refentryinfo>
<title>Direct Rendering Manager</title>
<productname>libdrm</productname>
<date>September 2012</date>
<authorgroup>
<author>
<contrib>Developer</contrib>
<firstname>David</firstname>
<surname>Herrmann</surname>
<email>dh.herrmann@googlemail.com</email>
</author>
</authorgroup>
</refentryinfo>
<refmeta>
<refentrytitle>drmModeGetResources</refentrytitle>
<manvolnum>3</manvolnum>
</refmeta>
<refnamediv>
<refname>drmModeGetResources</refname>
<refpurpose>retrieve current display configuration information</refpurpose>
</refnamediv>
<refsynopsisdiv>
<funcsynopsis>
<funcsynopsisinfo>#include &lt;xf86drm.h&gt;</funcsynopsisinfo>
<funcsynopsisinfo>#include &lt;xf86drmMode.h&gt;</funcsynopsisinfo>
<funcprototype>
<funcdef>drmModeResPtr <function>drmModeGetResources</function></funcdef>
<paramdef>int <parameter>fd</parameter></paramdef>
</funcprototype>
</funcsynopsis>
</refsynopsisdiv>
<refsect1>
<title>Description</title>
<para><function>drmModeGetResources</function> allocates, populates, and
returns a <structname>drmModeRes</structname> structure containing
information about the current display configuration. The structure
contains the following fields:
<programlisting>
typedef struct _drmModeRes {
int count_fbs;
uint32_t *fbs;
int count_crtcs;
uint32_t *crtcs;
int count_connectors;
uint32_t *connectors;
int count_encoders;
uint32_t *encoders;
uint32_t min_width, max_width;
uint32_t min_height, max_height;
} drmModeRes, *drmModeResPtr;
</programlisting>
</para>
<para>The <structfield>count_fbs</structfield> and
<structfield>fbs</structfield> fields indicate the number of currently
allocated framebuffer objects (i.e., objects that can be attached to
a given CRTC or sprite for display).</para>
<para>The <structfield>count_crtcs</structfield> and
<structfield>crtcs</structfield> fields list the available CRTCs in
the configuration. A CRTC is simply an object that can scan out a
framebuffer to a display sink, and contains mode timing and relative
position information. CRTCs drive encoders, which are responsible for
converting the pixel stream into a specific display protocol (e.g.,
MIPI or HDMI).</para>
<para>The <structfield>count_connectors</structfield> and
<structfield>connectors</structfield> fields list the available
physical connectors on the system. Note that some of these may not be
exposed from the chassis (e.g., LVDS or eDP). Connectors are attached
to encoders and contain information about the attached display sink
(e.g., width and height in mm, subpixel ordering, and various other
properties).</para>
<para>The <structfield>count_encoders</structfield> and
<structfield>encoders</structfield> fields list the available encoders
on the device. Each encoder may be associated with a CRTC, and may be
used to drive a particular encoder.</para>
<para>The <structfield>min*</structfield> and
<structfield>max*</structfield> fields indicate the maximum size of a
framebuffer for this device (i.e., the scanout size limit).</para>
</refsect1>
<refsect1>
<title>Return Value</title>
<para><function>drmModeGetResources</function> returns a drmModeRes
structure pointer on success, <literal>NULL</literal> on failure. The
returned structure must be freed with
<citerefentry><refentrytitle>drmModeFreeResources</refentrytitle><manvolnum>3</manvolnum></citerefentry>.</para>
</refsect1>
<refsect1>
<title>Reporting Bugs</title>
<para>Bugs in this function should be reported to
https://gitlab.freedesktop.org/mesa/drm/-/issues</para>
</refsect1>
<refsect1>
<title>See Also</title>
<para>
<citerefentry><refentrytitle>drm</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drm-kms</refentrytitle><manvolnum>7</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeGetFB</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeAddFB</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeAddFB2</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeRmFB</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeDirtyFB</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeGetCrtc</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeSetCrtc</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeGetEncoder</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
<citerefentry><refentrytitle>drmModeGetConnector</refentrytitle><manvolnum>3</manvolnum></citerefentry>
</para>
</refsect1>
</refentry>

View file

@ -18,23 +18,50 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
rst_pages = [
['drm', '7'],
['drm-kms', '7'],
['drm-memory', '7'],
['drmAvailable', '3'],
['drmHandleEvent', '3'],
['drmModeGetResources', '3'],
xsltproc_args = [
'--stringparam', 'man.authors.section.enabled', '0',
'--stringparam', 'man.copyright.section.enabled', '0',
'--stringparam', 'funcsynopsis.style', 'ansi',
'--stringparam', 'man.output.quietly', '1',
'--nonet', manpage_style,
]
foreach page : rst_pages
name = page[0] + '.' + page[1]
rst = files(name + '.rst')
xmls = [
['drm', '7'], ['drm-kms', '7'], ['drm-memory', '7'], ['drmAvailable', '3'],
['drmHandleEvent', '3'], ['drmModeGetResources', '3']
]
foreach x : xmls
m = x[0]
s = x[1]
custom_target(
name,
input : rst,
output : name,
command : [prog_rst2man, '@INPUT@', '@OUTPUT@'],
m,
input : files('@0@.xml'.format(m)),
output : '@0@.@1@'.format(m, s),
command : [prog_xslt, '-o', '@OUTPUT@', xsltproc_args, '@INPUT0@'],
install : true,
install_dir : join_paths(get_option('mandir'), 'man' + page[1]),
install_dir : join_paths(get_option('mandir'), 'man@0@'.format(s)),
build_by_default : true,
)
endforeach
foreach x : ['drm-mm', 'drm-gem', 'drm-ttm']
gen = custom_target(
'gen-@0@'.format(x),
input : 'drm-memory.xml',
output : '@0@.xml'.format(x),
command : [
prog_sed, '-e', 's@^\.so \([a-z_]\+\)\.\([0-9]\)$$@\.so man\2\/\1\.\2@',
'@INPUT@',
],
capture : true,
)
custom_target(
'@0@.7'.format(x),
input : gen,
output : '@0@.7'.format(x, '7'),
command : [prog_xslt, '-o', '@OUTPUT@', xsltproc_args, '@INPUT@'],
install : true,
install_dir : join_paths(get_option('mandir'), 'man7'),
build_by_default : true,
)
endforeach

View file

@ -18,26 +18,15 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The versioning should always stay at 2.4.x. If bumping away from this,
# you must ensure that all users of patch_ver are changed such that DSO versions
# continuously increment (e.g. blindly bumping from 2.4.122 to 2.5.0 would
# roll the libdrm DSO versioning from libdrm.so.2.122.0 back to libdrm.so.2.0.0
# which would be bad)
project(
'libdrm',
['c'],
version : '2.4.131',
version : '2.4.103',
license : 'MIT',
meson_version : '>= 0.59',
default_options : ['buildtype=debugoptimized', 'c_std=c11'],
meson_version : '>= 0.43',
default_options : ['buildtype=debugoptimized', 'c_std=gnu99'],
)
patch_ver = meson.project_version().split('.')[2]
if ['windows', 'darwin'].contains(host_machine.system())
error('unsupported OS: @0@'.format(host_machine.system()))
endif
pkg = import('pkgconfig')
config = configuration_data()
@ -45,22 +34,16 @@ config = configuration_data()
config.set10('UDEV', get_option('udev'))
with_freedreno_kgsl = get_option('freedreno-kgsl')
with_install_tests = get_option('install-test-programs')
with_tests = get_option('tests')
if ['freebsd', 'dragonfly', 'netbsd'].contains(host_machine.system())
dep_pthread_stubs = dependency('pthread-stubs', version : '>= 0.4')
else
dep_pthread_stubs = []
endif
dep_threads = dependency('threads')
cc = meson.get_compiler('c')
config.set10('HAVE_SECURE_GETENV', cc.has_function('secure_getenv'))
android = cc.compiles('''int func() { return __ANDROID__; }''')
# Solaris / Illumos
if host_machine.system() == 'sunos'
add_global_arguments('-D__EXTENSIONS__', language : 'c')
add_global_arguments('-D_POSIX_C_SOURCE=3', language : 'c')
endif
symbols_check = find_program('symbols-check.py')
prog_nm = find_program('nm')
@ -68,13 +51,6 @@ prog_nm = find_program('nm')
intel_atomics = false
lib_atomics = false
python3 = import('python').find_installation()
format_mod_static_table = custom_target(
'format_mod_static_table',
output : 'generated_static_table_fourcc.h',
input : 'include/drm/drm_fourcc.h',
command : [python3, files('gen_table_fourcc.py'), '@INPUT@', '@OUTPUT@'])
dep_atomic_ops = dependency('atomic_ops', required : false)
if cc.links('''
int atomic_add(int *i) { return __sync_add_and_fetch (i, 1); }
@ -97,64 +73,101 @@ endif
config.set10('HAVE_LIBDRM_ATOMIC_PRIMITIVES', intel_atomics)
config.set10('HAVE_LIB_ATOMIC_OPS', lib_atomics)
dep_pciaccess = dependency('pciaccess', version : '>= 0.10', required : get_option('intel'))
with_intel = false
_intel = get_option('intel')
if _intel != 'false'
if _intel == 'true' and not with_atomics
error('libdrm_intel requires atomics.')
else
with_intel = _intel == 'true' or host_machine.cpu_family().startswith('x86')
endif
endif
with_intel = get_option('intel') \
.require(with_atomics, error_message : 'libdrm_intel requires atomics') \
.require(dep_pciaccess.found(), error_message : 'libdrm_intel requires libpciaccess') \
.disable_auto_if(not host_machine.cpu_family().startswith('x86')) \
.allowed()
summary('Intel', with_intel)
with_radeon = false
_radeon = get_option('radeon')
if _radeon != 'false'
if _radeon == 'true' and not with_atomics
error('libdrm_radeon requires atomics.')
endif
with_radeon = true
endif
with_radeon = get_option('radeon') \
.require(with_atomics, error_message : 'libdrm_radeon requires atomics') \
.allowed()
summary('Radeon', with_radeon)
with_amdgpu = false
_amdgpu = get_option('amdgpu')
if _amdgpu != 'false'
if _amdgpu == 'true' and not with_atomics
error('libdrm_amdgpu requires atomics.')
endif
with_amdgpu = true
endif
with_amdgpu = get_option('amdgpu') \
.require(with_atomics, error_message : 'libdrm_amdgpu requires atomics') \
.allowed()
summary('AMDGPU', with_amdgpu)
with_nouveau = false
_nouveau = get_option('nouveau')
if _nouveau != 'false'
if _nouveau == 'true' and not with_atomics
error('libdrm_nouveau requires atomics.')
endif
with_nouveau = true
endif
with_nouveau = get_option('nouveau') \
.require(with_atomics, error_message : 'libdrm_nouveau requires atomics') \
.allowed()
summary('Nouveau', with_nouveau)
with_vmwgfx = false
_vmwgfx = get_option('vmwgfx')
if _vmwgfx != 'false'
with_vmwgfx = true
endif
with_vmwgfx = get_option('vmwgfx').allowed()
summary('vmwgfx', with_vmwgfx)
with_omap = false
_omap = get_option('omap')
if _omap == 'true'
if not with_atomics
error('libdrm_omap requires atomics.')
endif
with_omap = true
endif
with_omap = get_option('omap') \
.require(with_atomics, error_message : 'libdrm_omap requires atomics') \
.enabled()
summary('OMAP', with_omap)
with_freedreno = false
_freedreno = get_option('freedreno')
if _freedreno != 'false'
if _freedreno == 'true' and not with_atomics
error('libdrm_freedreno requires atomics.')
else
with_freedreno = _freedreno == 'true' or ['arm', 'aarch64'].contains(host_machine.cpu_family())
endif
endif
with_freedreno = get_option('freedreno') \
.require(with_atomics, error_message : 'libdrm_freedreno requires atomics') \
.disable_auto_if(not ['arm', 'aarch64'].contains(host_machine.cpu_family())) \
.allowed()
summary('Freedreno', with_freedreno)
summary('Freedreon-kgsl', with_freedreno_kgsl)
with_tegra = false
_tegra = get_option('tegra')
if _tegra == 'true'
if not with_atomics
error('libdrm_tegra requires atomics.')
endif
with_tegra = true
endif
with_tegra = get_option('tegra') \
.require(with_atomics, error_message : 'libdrm_tegra requires atomics') \
.disable_auto_if(not ['arm', 'aarch64'].contains(host_machine.cpu_family())) \
.enabled()
summary('Tegra', with_tegra)
with_etnaviv = false
_etnaviv = get_option('etnaviv')
if _etnaviv == 'true'
if not with_atomics
error('libdrm_etnaviv requires atomics.')
endif
with_etnaviv = true
endif
with_etnaviv = get_option('etnaviv') \
.require(with_atomics, error_message : 'libdrm_etnaviv requires atomics') \
.disable_auto_if(not ['arm', 'aarch64', 'arc', 'mips', 'mips64', 'loongarch64'].contains(host_machine.cpu_family())) \
.allowed()
summary('Etnaviv', with_etnaviv)
with_exynos = get_option('exynos') == 'true'
with_exynos = get_option('exynos').enabled()
summary('EXYNOS', with_exynos)
with_vc4 = false
_vc4 = get_option('vc4')
if _vc4 != 'false'
with_vc4 = _vc4 == 'true' or ['arm', 'aarch64'].contains(host_machine.cpu_family())
endif
with_vc4 = get_option('vc4') \
.disable_auto_if(not ['arm', 'aarch64'].contains(host_machine.cpu_family())) \
.allowed()
summary('VC4', with_vc4)
# XXX: Apparently only freebsd and dragonfly bsd actually need this (and
# gnu/kfreebsd), not openbsd and netbsd
with_libkms = false
_libkms = get_option('libkms')
if _libkms != 'false'
with_libkms = _libkms == 'true' or ['linux', 'freebsd', 'dragonfly'].contains(host_machine.system())
endif
# Among others FreeBSD does not have a separate dl library.
if not cc.has_function('dlsym')
@ -169,9 +182,10 @@ if not cc.has_function('clock_gettime', prefix : '#define _GNU_SOURCE\n#include
else
dep_rt = []
endif
dep_m = cc.find_library('m', required : false)
# The header is not required on Linux, and is in fact deprecated in glibc 2.30+
if host_machine.system() == 'linux'
if ['linux'].contains(host_machine.system())
config.set10('HAVE_SYS_SYSCTL_H', false)
else
# From Niclas Zeising:
@ -182,7 +196,8 @@ else
endif
foreach header : ['sys/select.h', 'alloca.h']
config.set10('HAVE_' + header.underscorify().to_upper(), cc.check_header(header))
config.set10('HAVE_' + header.underscorify().to_upper(),
cc.compiles('#include <@0@>'.format(header), name : '@0@ works'.format(header)))
endforeach
if (cc.has_header_symbol('sys/sysmacros.h', 'major') and
@ -197,30 +212,71 @@ if (cc.has_header_symbol('sys/mkdev.h', 'major') and
endif
config.set10('HAVE_OPEN_MEMSTREAM', cc.has_function('open_memstream'))
libdrm_c_args = cc.get_supported_arguments([
'-Wsign-compare', '-Werror=undef', '-Werror=implicit-function-declaration',
'-Wpointer-arith', '-Wwrite-strings', '-Wstrict-prototypes',
'-Wmissing-prototypes', '-Wmissing-declarations', '-Wnested-externs',
'-Wpacked', '-Wswitch-enum', '-Wmissing-format-attribute',
'-Wstrict-aliasing=2', '-Winit-self', '-Winline', '-Wshadow',
'-Wdeclaration-after-statement', '-Wold-style-definition',
'-Wno-unused-parameter', '-Wno-attributes', '-Wno-long-long',
'-Wno-missing-field-initializers'])
warn_c_args = []
foreach a : ['-Wall', '-Wextra', '-Wsign-compare', '-Werror=undef',
'-Werror=implicit-function-declaration', '-Wpointer-arith',
'-Wwrite-strings', '-Wstrict-prototypes', '-Wmissing-prototypes',
'-Wmissing-declarations', '-Wnested-externs', '-Wpacked',
'-Wswitch-enum', '-Wmissing-format-attribute',
'-Wstrict-aliasing=2', '-Winit-self', '-Winline', '-Wshadow',
'-Wdeclaration-after-statement', '-Wold-style-definition']
if cc.has_argument(a)
warn_c_args += a
endif
endforeach
# GCC will never error for -Wno-*, so check for -W* then add -Wno-* to the list
# of options
foreach a : ['unused-parameter', 'attributes', 'long-long',
'missing-field-initializers']
if cc.has_argument('-W@0@'.format(a))
warn_c_args += '-Wno-@0@'.format(a)
endif
endforeach
dep_cairo = dependency('cairo', required : get_option('cairo-tests'))
with_cairo_tests = dep_cairo.found()
# all c args:
libdrm_c_args = warn_c_args + ['-fvisibility=hidden']
valgrind_version = []
if with_freedreno
valgrind_version = '>=3.10.0'
dep_pciaccess = dependency('pciaccess', version : '>= 0.10', required : with_intel)
dep_cunit = dependency('cunit', version : '>= 2.1', required : false)
_cairo_tests = get_option('cairo-tests')
if _cairo_tests != 'false'
dep_cairo = dependency('cairo', required : _cairo_tests == 'true')
with_cairo_tests = dep_cairo.found()
else
dep_cairo = []
with_cairo_tests = false
endif
_valgrind = get_option('valgrind')
if _valgrind != 'false'
if with_freedreno
dep_valgrind = dependency('valgrind', required : _valgrind == 'true', version : '>=3.10.0')
else
dep_valgrind = dependency('valgrind', required : _valgrind == 'true')
endif
with_valgrind = dep_valgrind.found()
else
dep_valgrind = []
with_valgrind = false
endif
dep_valgrind = dependency('valgrind', required : get_option('valgrind'), version : valgrind_version)
with_valgrind = dep_valgrind.found()
prog_rst2man = find_program('rst2man', 'rst2man.py', required: get_option('man-pages'))
with_man_pages = prog_rst2man.found()
with_man_pages = get_option('man-pages')
prog_xslt = find_program('xsltproc', required : with_man_pages == 'true')
prog_sed = find_program('sed', required : with_man_pages == 'true')
manpage_style = 'http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl'
if prog_xslt.found()
if run_command(prog_xslt, '--nonet', manpage_style).returncode() != 0
if with_man_pages == 'true'
error('Manpage style sheet cannot be found')
endif
with_man_pages = 'false'
endif
endif
with_man_pages = with_man_pages != 'false' and prog_xslt.found() and prog_sed.found()
config.set10('HAVE_VISIBILITY', cc.has_function_attribute('visibility:hidden'))
config.set10('HAVE_VISIBILITY',
cc.compiles('''int foo_hidden(void) __attribute__((visibility(("hidden"))));''',
name : 'compiler supports __attribute__(("hidden"))'))
foreach t : [
[with_exynos, 'EXYNOS'],
@ -239,47 +295,28 @@ if with_freedreno_kgsl and not with_freedreno
error('cannot enable freedreno-kgsl without freedreno support')
endif
config.set10('_GNU_SOURCE', true)
if target_machine.endian() == 'big'
config.set('HAVE_BIG_ENDIAN', 1)
endif
if android
config.set('BIONIC_IOCTL_NO_SIGNEDNESS_OVERLOAD', 1)
endif
config_file = configure_file(
configuration : config,
output : 'config.h',
)
add_project_arguments('-include', meson.current_build_dir() / 'config.h', language : 'c')
add_project_arguments('-include', '@0@'.format(config_file), language : 'c')
inc_root = include_directories('.')
inc_drm = include_directories('include/drm')
libdrm_files = [files(
'xf86drm.c', 'xf86drmHash.c', 'xf86drmRandom.c', 'xf86drmSL.c',
'xf86drmMode.c'
),
config_file, format_mod_static_table
]
# Build an unversioned so on android
if android
libdrm_kw = {}
else
libdrm_kw = { 'version' : '2.@0@.0'.format(patch_ver) }
endif
libdrm = library(
libdrm = shared_library(
'drm',
libdrm_files,
[files(
'xf86drm.c', 'xf86drmHash.c', 'xf86drmRandom.c', 'xf86drmSL.c',
'xf86drmMode.c'
),
config_file,
],
c_args : libdrm_c_args,
dependencies : [dep_valgrind, dep_rt],
dependencies : [dep_valgrind, dep_rt, dep_m],
include_directories : inc_drm,
version : '2.4.0',
install : true,
kwargs : libdrm_kw,
gnu_symbol_visibility : 'hidden',
)
test(
@ -288,7 +325,7 @@ test(
args : [
'--lib', libdrm,
'--symbols-file', files('core-symbols.txt'),
'--nm', prog_nm.full_path(),
'--nm', prog_nm.path(),
],
)
@ -297,8 +334,6 @@ ext_libdrm = declare_dependency(
include_directories : [inc_root, inc_drm],
)
meson.override_dependency('libdrm', ext_libdrm)
install_headers('libsync.h', 'xf86drm.h', 'xf86drmMode.h')
install_headers(
'include/drm/drm.h', 'include/drm/drm_fourcc.h', 'include/drm/drm_mode.h',
@ -317,12 +352,16 @@ if with_vmwgfx
endif
pkg.generate(
libdrm,
name : 'libdrm',
libraries : libdrm,
subdirs : ['.', 'libdrm'],
version : meson.project_version(),
description : 'Userspace interface to kernel DRM services',
)
if with_libkms
subdir('libkms')
endif
if with_intel
subdir('intel')
endif
@ -357,6 +396,21 @@ if with_man_pages
subdir('man')
endif
subdir('data')
if with_tests
subdir('tests')
endif
subdir('tests')
message('')
message('@0@ will be compiled with:'.format(meson.project_name()))
message('')
message(' libkms @0@'.format(with_libkms))
message(' Intel API @0@'.format(with_intel))
message(' vmwgfx API @0@'.format(with_vmwgfx))
message(' Radeon API @0@'.format(with_radeon))
message(' AMDGPU API @0@'.format(with_amdgpu))
message(' Nouveau API @0@'.format(with_nouveau))
message(' OMAP API @0@'.format(with_omap))
message(' EXYNOS API @0@'.format(with_exynos))
message(' Freedreno API @0@ (kgsl: @1@)'.format(with_freedreno, with_freedreno_kgsl))
message(' Tegra API @0@'.format(with_tegra))
message(' VC4 API @0@'.format(with_vc4))
message(' Etnaviv API @0@'.format(with_etnaviv))
message('')

View file

@ -18,77 +18,109 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
option(
'libkms',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : 'Build libkms mm abstraction library.',
)
option(
'intel',
type : 'feature',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : '''Enable support for Intel's KMS API.''',
)
option(
'radeon',
type : 'feature',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : '''Enable support for radeons's KMS API.''',
)
option(
'amdgpu',
type : 'feature',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : '''Enable support for amdgpu's KMS API.''',
)
option(
'nouveau',
type : 'feature',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : '''Enable support for nouveau's KMS API.''',
)
option(
'vmwgfx',
type : 'feature',
type : 'combo',
value : 'true',
choices : ['true', 'false', 'auto'],
description : '''Enable support for vmgfx's KMS API.''',
)
option(
'omap',
type : 'feature',
value : 'disabled',
type : 'combo',
value : 'false',
choices : ['true', 'false', 'auto'],
description : '''Enable support for OMAP's experimental KMS API.''',
)
option(
'exynos',
type : 'feature',
value : 'disabled',
type : 'combo',
value : 'false',
choices : ['true', 'false', 'auto'],
description : '''Enable support for EXYNOS's experimental KMS API.''',
)
option(
'freedreno',
type : 'feature',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : '''Enable support for freedreno's KMS API.''',
)
option(
'tegra',
type : 'feature',
value : 'disabled',
type : 'combo',
value : 'false',
choices : ['true', 'false', 'auto'],
description : '''Enable support for Tegra's experimental KMS API.''',
)
option(
'vc4',
type : 'feature',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : '''Enable support for vc4's KMS API.''',
)
option(
'etnaviv',
type : 'feature',
description : '''Enable support for etnaviv's KMS API.''',
type : 'combo',
value : 'false',
choices : ['true', 'false', 'auto'],
description : '''Enable support for etnaviv's experimental KMS API.''',
)
option(
'cairo-tests',
type : 'feature',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : 'Enable support for Cairo rendering in tests.',
)
option(
'man-pages',
type : 'feature',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : 'Enable manpage generation and installation.',
)
option(
'valgrind',
type : 'feature',
type : 'combo',
value : 'auto',
choices : ['true', 'false', 'auto'],
description : 'Build libdrm with valgrind support.',
)
option(
@ -109,9 +141,3 @@ option(
value : false,
description : 'Enable support for using udev instead of mknod.',
)
option(
'tests',
type : 'boolean',
value : true,
description : 'Build test programs.',
)

View file

@ -1,11 +0,0 @@
build = ["Android.sources.bp"]
cc_library_shared {
name: "libdrm_nouveau",
defaults: [
"libdrm_defaults",
"libdrm_nouveau_sources",
],
vendor: true,
shared_libs: ["libdrm"],
}

14
nouveau/Android.mk Normal file
View file

@ -0,0 +1,14 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
# Import variables LIBDRM_NOUVEAU_FILES, LIBDRM_NOUVEAU_H_FILES
include $(LOCAL_PATH)/Makefile.sources
LOCAL_MODULE := libdrm_nouveau
LOCAL_SHARED_LIBRARIES := libdrm
LOCAL_SRC_FILES := $(LIBDRM_NOUVEAU_FILES)
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)

Some files were not shown because too many files have changed in this diff Show more