Compare commits

..

No commits in common. "main" and "libdrm-2.4.93" have entirely different histories.

317 changed files with 21645 additions and 24575 deletions

109
.gitignore vendored
View file

@ -1 +1,108 @@
/build*
bsd-core/*/@
bsd-core/*/machine
*~
*.1
*.3
*.5
*.7
*.flags
*.ko
*.ko.cmd
*.la
*.lo
*.log
*.mod.c
*.mod.o
*.o
*.o.cmd
*.sw?
*.trs
.depend
.deps
.libs
.tmp_versions
.*check*
.*install*
Makefile
Makefile.in
TAGS
aclocal.m4
autom4te.cache
build-aux
bus_if.h
compile
config.guess
config.h
config.h.in
config.log
config.status
config.sub
configure
configure.lineno
cscope.*
depcomp
device_if.h
drm.kld
drm_pciids.h
export_syms
i915.kld
install-sh
libdrm/config.h.in
libdrm.pc
libdrm_intel.pc
libdrm_nouveau.pc
libdrm_radeon.pc
libdrm_omap.pc
libdrm_exynos.pc
libdrm_freedreno.pc
libdrm_amdgpu.pc
libdrm_vc4.pc
libdrm_etnaviv.pc
libkms.pc
libtool
ltmain.sh
mach64.kld
man/.man_fixup
mga.kld
missing
mkinstalldirs
opt_drm.h
pci_if.h
r128.kld
radeon.kld
savage.kld
sis.kld
stamp-h1
tdfx.kld
via.kld
tests/auth
tests/amdgpu/amdgpu_test
tests/dristat
tests/drmdevice
tests/drmsl
tests/drmstat
tests/getclient
tests/getstats
tests/getversion
tests/hash
tests/lock
tests/openclose
tests/random
tests/setversion
tests/updatedraw
tests/modeprint/modeprint
tests/modetest/modetest
tests/name_from_fd
tests/proptest/proptest
tests/kms/kms-steal-crtc
tests/kms/kms-universal-planes
tests/kmstest/kmstest
tests/vbltest/vbltest
tests/radeon/radeon_ttm
tests/exynos/exynos_fimg2d_event
tests/exynos/exynos_fimg2d_perf
tests/exynos/exynos_fimg2d_test
tests/etnaviv/etnaviv_2d_test
tests/etnaviv/etnaviv_cmd_stream_test
tests/etnaviv/etnaviv_bo_cache_test
man/*.3

View file

@ -1,265 +0,0 @@
# This is the tag of the docker image used for the build jobs. If the
# image doesn't exist yet, the containers stage generates it.
#
# In order to generate a new image, one should generally change the tag.
# While removing the image from the registry would also work, that's not
# recommended except for ephemeral images during development: Replacing
# an image after a significant amount of time might pull in newer
# versions of gcc/clang or other packages, which might break the build
# with older commits using the same tag.
#
# After merging a change resulting in generating a new image to the
# main repository, it's recommended to remove the image from the source
# repository's container registry, so that the image from the main
# repository's registry will be used there as well.
.templates_sha: &template_sha c6aeb16f86e32525fa630fb99c66c4f3e62fc3cb # see https://docs.gitlab.com/ee/ci/yaml/#includefile
include:
- project: 'freedesktop/ci-templates'
ref: *template_sha
file:
- '/templates/debian.yml'
- '/templates/freebsd.yml'
- '/templates/ci-fairy.yml'
variables:
FDO_UPSTREAM_REPO: mesa/libdrm
FDO_REPO_SUFFIX: "$BUILD_OS/$BUILD_ARCH"
stages:
- "Base container"
- "Build"
.ci-rules:
rules:
- when: on_success
# CONTAINERS
.os-debian:
variables:
BUILD_OS: debian
FDO_DISTRIBUTION_VERSION: bookworm
FDO_DISTRIBUTION_PACKAGES: 'build-essential docbook-xsl libatomic-ops-dev libcairo2-dev libcunit1-dev libpciaccess-dev meson ninja-build pkg-config python3 python3-pip python3-wheel python3-setuptools python3-docutils valgrind'
# bump this tag every time you change something which requires rebuilding the
# base image
FDO_DISTRIBUTION_TAG: "2024-06-25.0"
.debian-x86_64:
extends:
- .os-debian
variables:
BUILD_ARCH: "x86-64"
.debian-aarch64:
extends:
- .os-debian
variables:
BUILD_ARCH: "aarch64"
.debian-armv7:
extends:
- .os-debian
variables:
BUILD_ARCH: "armv7"
FDO_DISTRIBUTION_PLATFORM: linux/arm/v7
.os-freebsd:
variables:
BUILD_OS: freebsd
FDO_DISTRIBUTION_VERSION: "14.2"
FDO_DISTRIBUTION_PACKAGES: 'meson ninja pkgconf libpciaccess textproc/py-docutils cairo'
# bump this tag every time you change something which requires rebuilding the
# base image
FDO_DISTRIBUTION_TAG: "2025-05-22.0"
.freebsd-x86_64:
extends:
- .os-freebsd
variables:
BUILD_ARCH: "x86_64"
# Build our base container image, which contains the core distribution, the
# toolchain, and all our build dependencies. This will be reused in the build
# stage.
x86_64-debian-container_prep:
extends:
- .ci-rules
- .debian-x86_64
- .fdo.container-build@debian
stage: "Base container"
variables:
GIT_STRATEGY: none
aarch64-debian-container_prep:
extends:
- .ci-rules
- .debian-aarch64
- .fdo.container-build@debian
tags:
- aarch64
stage: "Base container"
variables:
GIT_STRATEGY: none
armv7-debian-container_prep:
extends:
- .ci-rules
- .debian-armv7
- .fdo.container-build@debian
tags:
- aarch64
stage: "Base container"
variables:
GIT_STRATEGY: none
FDO_BASE_IMAGE: "arm32v7/debian:$FDO_DISTRIBUTION_VERSION"
x86_64-freebsd-container_prep:
extends:
- .ci-rules
- .freebsd-x86_64
- .fdo.qemu-build@freebsd@x86_64
stage: "Base container"
variables:
GIT_STRATEGY: none
# Core build environment.
.build-env:
variables:
MESON_BUILD_TYPE: "-Dbuildtype=debug -Doptimization=0 -Db_sanitize=address,undefined"
# OS/architecture-specific variants
.build-env-debian-x86_64:
extends:
- .fdo.suffixed-image@debian
- .debian-x86_64
- .build-env
needs:
- job: x86_64-debian-container_prep
artifacts: false
.build-env-debian-aarch64:
extends:
- .fdo.suffixed-image@debian
- .debian-aarch64
- .build-env
variables:
# At least with the versions we have, the LSan runtime makes fork unusably
# slow on AArch64, which is bad news since the test suite decides to fork
# for every single subtest. For now, in order to get AArch64 builds and
# tests into CI, just assume that we're not going to leak any more on
# AArch64 than we would on ARMv7 or x86-64.
ASAN_OPTIONS: "detect_leaks=0"
tags:
- aarch64
needs:
- job: aarch64-debian-container_prep
artifacts: false
.build-env-debian-armv7:
extends:
- .fdo.suffixed-image@debian
- .debian-armv7
- .build-env
tags:
- aarch64
needs:
- job: armv7-debian-container_prep
artifacts: false
.build-env-freebsd-x86_64:
variables:
# Compiling with ASan+UBSan appears to trigger an infinite loop in the
# compiler shipped with FreeBSD 13.0, so we only use UBSan here.
# Additionally, sanitizers can't be used with b_lundef on FreeBSD.
MESON_BUILD_TYPE: "-Dbuildtype=debug -Db_sanitize=undefined -Db_lundef=false"
extends:
- .fdo.suffixed-image@freebsd
- .freebsd-x86_64
- .build-env
needs:
- job: x86_64-freebsd-container_prep
artifacts: false
# BUILD
.do-build:
extends:
- .ci-rules
stage: "Build"
variables:
GIT_DEPTH: 10
script:
- meson setup build
--fatal-meson-warnings --auto-features=enabled
-D udev=true
- ninja -C build
- ninja -C build test
- DESTDIR=$PWD/install ninja -C build install
artifacts:
when: on_failure
paths:
- build/meson-logs/*
.do-build-qemu:
extends:
- .ci-rules
stage: "Build"
script:
# Start the VM and copy our workspace to the VM
- /app/vmctl start
- scp -r $PWD "vm:"
# The `set +e is needed to ensure that we always copy the meson logs back to
# the workspace to see details about the failed tests.
- |
set +e
/app/vmctl exec "pkg info; cd $CI_PROJECT_NAME ; meson setup build --fatal-meson-warnings --auto-features=enabled -D etnaviv=disabled -D nouveau=disabled -D valgrind=disabled && ninja -C build"
set -ex
scp -r vm:$CI_PROJECT_NAME/build/meson-logs .
/app/vmctl exec "ninja -C $CI_PROJECT_NAME/build install"
mkdir -p $PREFIX && scp -r vm:$PREFIX/ $PREFIX/
# Finally, shut down the VM.
- /app/vmctl stop
artifacts:
when: on_failure
paths:
- build/meson-logs/*
# Full build and test.
x86_64-debian-build:
extends:
- .build-env-debian-x86_64
- .do-build
aarch64-debian-build:
extends:
- .build-env-debian-aarch64
- .do-build
armv7-debian-build:
extends:
- .build-env-debian-armv7
- .do-build
# Daily build
meson-arch-daily:
rules:
- if: '$SCHEDULE == "arch-daily"'
when: on_success
- when: never
image: archlinux/archlinux:base-devel
before_script:
- pacman -Syu --noconfirm --needed
cairo
cunit
libatomic_ops
libpciaccess
meson
valgrind
python-docutils
extends: .do-build
x86_64-freebsd-build:
extends:
- .build-env-freebsd-x86_64
- .do-build-qemu

View file

@ -1,97 +0,0 @@
//
// Copyright © 2011-2012 Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice (including the next
// paragraph) shall be included in all copies or substantial portions of the
// Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
subdirs = ["*"]
build = ["Android.sources.bp"]
cc_defaults {
name: "libdrm_defaults",
cflags: [
// XXX: Consider moving these to config.h analogous to autoconf.
"-DMAJOR_IN_SYSMACROS=1",
"-DHAVE_VISIBILITY=1",
"-DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1",
"-Wall",
"-Werror",
"-Wno-deprecated-declarations",
"-Wno-format",
"-Wno-gnu-variable-sized-type-not-at-end",
"-Wno-implicit-function-declaration",
"-Wno-int-conversion",
"-Wno-missing-field-initializers",
"-Wno-pointer-arith",
"-Wno-unused-parameter",
"-Wno-unused-variable",
],
export_system_include_dirs: ["."],
}
cc_library_headers {
name: "libdrm_headers",
vendor_available: true,
host_supported: true,
defaults: ["libdrm_defaults"],
export_include_dirs: ["include/drm", "android"],
apex_available: [
"//apex_available:platform",
"com.android.virt",
],
}
genrule {
name: "generated_static_table_fourcc_h",
out: ["generated_static_table_fourcc.h"],
srcs: ["include/drm/drm_fourcc.h"],
tool_files: ["gen_table_fourcc.py"],
cmd: "python3 $(location gen_table_fourcc.py) $(in) $(out)",
}
// Library for the device
cc_library {
name: "libdrm",
recovery_available: true,
vendor_available: true,
host_supported: true,
defaults: [
"libdrm_defaults",
"libdrm_sources",
],
generated_headers: [
"generated_static_table_fourcc_h",
],
export_include_dirs: ["include/drm", "android"],
cflags: [
"-Wno-enum-conversion",
"-Wno-pointer-arith",
"-Wno-sign-compare",
"-Wno-tautological-compare",
],
apex_available: [
"//apex_available:platform",
"com.android.virt",
],
}

18
Android.common.mk Normal file
View file

@ -0,0 +1,18 @@
# XXX: Consider moving these to config.h analogous to autoconf.
LOCAL_CFLAGS += \
-DMAJOR_IN_SYSMACROS=1 \
-DHAVE_VISIBILITY=1 \
-DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
LOCAL_CFLAGS += \
-Wno-error \
-Wno-unused-parameter \
-Wno-missing-field-initializers \
-Wno-pointer-arith \
-Wno-enum-conversion
# Quiet down the build system and remove any .h files from the sources
LOCAL_SRC_FILES := $(patsubst %.h, , $(LOCAL_SRC_FILES))
LOCAL_EXPORT_C_INCLUDE_DIRS += $(LOCAL_PATH)
LOCAL_PROPRIETARY_MODULE := true

69
Android.mk Normal file
View file

@ -0,0 +1,69 @@
#
# Copyright © 2011-2012 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
LIBDRM_COMMON_MK := $(call my-dir)/Android.common.mk
LOCAL_PATH := $(call my-dir)
LIBDRM_TOP := $(LOCAL_PATH)
include $(CLEAR_VARS)
# Import variables LIBDRM_{,H,INCLUDE_H,INCLUDE_ANDROID_H,INCLUDE_VMWGFX_H}_FILES
include $(LOCAL_PATH)/Makefile.sources
#static library for the device (recovery)
include $(CLEAR_VARS)
LOCAL_MODULE := libdrm
LOCAL_SRC_FILES := $(LIBDRM_FILES)
LOCAL_EXPORT_C_INCLUDE_DIRS := \
$(LOCAL_PATH) \
$(LOCAL_PATH)/include/drm \
$(LOCAL_PATH)/android
LOCAL_C_INCLUDES := \
$(LOCAL_PATH)/include/drm
include $(LIBDRM_COMMON_MK)
include $(BUILD_STATIC_LIBRARY)
# Shared library for the device
include $(CLEAR_VARS)
LOCAL_MODULE := libdrm
LOCAL_SRC_FILES := $(LIBDRM_FILES)
LOCAL_EXPORT_C_INCLUDE_DIRS := \
$(LOCAL_PATH) \
$(LOCAL_PATH)/include/drm \
$(LOCAL_PATH)/android
LOCAL_SHARED_LIBRARIES := \
libcutils
LOCAL_C_INCLUDES := \
$(LOCAL_PATH)/include/drm
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)
include $(call all-makefiles-under,$(LOCAL_PATH))

View file

@ -1,12 +0,0 @@
// Autogenerated with Android.sources.bp.mk
cc_defaults {
name: "libdrm_sources",
srcs: [
"xf86drm.c",
"xf86drmHash.c",
"xf86drmRandom.c",
"xf86drmSL.c",
"xf86drmMode.c",
],
}

View file

@ -1,25 +0,0 @@
# Usage: make -f path/to/Android.sources.bp.mk NAMES=<> >Android.sources.bp
#
# It will read the Makefile.sources in the current directory, and
# write <NAME>_FILES to stdout as an Android.bp cc_defaults module.
.PHONY: all
all:
@# Do nothing
include Makefile.sources
empty :=
indent := $(empty) $(empty)
$(info // Autogenerated with Android.sources.bp.mk)
$(foreach NAME,$(NAMES), \
$(eval lower_name := $(shell echo $(PREFIX)$(NAME) | tr 'A-Z' 'a-z')) \
$(info ) \
$(info cc_defaults {) \
$(info $(indent)name: "$(lower_name)_sources",) \
$(info $(indent)srcs: [) \
$(foreach f,$(filter %.c,$($(NAME)_FILES)), \
$(info $(indent)$(indent)"$(f)",)) \
$(info $(indent)],) \
$(info }))

View file

@ -1,105 +0,0 @@
Contributing to libdrm
======================
Submitting Patches
------------------
Patches should be sent to dri-devel@lists.freedesktop.org, using git
send-email. For patches only touching driver specific code one of the driver
mailing lists (like amd-gfx@lists.freedesktop.org) is also appropriate. See git
documentation for help:
http://git-scm.com/documentation
Since dri-devel is a very busy mailing list please use --subject-prefix="PATCH
libdrm" to make it easier to find libdrm patches. This is best done by running
git config --local format.subjectprefix "PATCH libdrm"
The first line of a commit message should contain a prefix indicating what part
is affected by the patch followed by one sentence that describes the change. For
examples:
amdgpu: Use uint32_t i in amdgpu_find_bo_by_cpu_mapping
The body of the commit message should describe what the patch changes and why,
and also note any particular side effects. For a recommended reading on
writing commit messages, see:
http://who-t.blogspot.de/2009/12/on-commit-messages.html
Your patches should also include a Signed-off-by line with your name and email
address. If you're not the patch's original author, you should also gather
S-o-b's by them (and/or whomever gave the patch to you.) The significance of
this is that it certifies that you created the patch, that it was created under
an appropriate open source license, or provided to you under those terms. This
lets us indicate a chain of responsibility for the copyright status of the code.
For more details:
https://developercertificate.org/
We won't reject patches that lack S-o-b, but it is strongly recommended.
Review and Merging
------------------
Patches should have at least one positive review (Reviewed-by: tag) or
indication of approval (Acked-by: tag) before merging. For any code shared
between drivers this is mandatory.
Please note that kernel/userspace API header files have special rules, see
include/drm/README.
Coding style in the project loosely follows the CodingStyle of the linux kernel:
https://www.kernel.org/doc/html/latest/process/coding-style.html?highlight=coding%20style
Commit Rights
-------------
Commit rights will be granted to anyone who requests them and fulfills the
below criteria:
- Submitted a few (5-10 as a rule of thumb) non-trivial (not just simple
spelling fixes and whitespace adjustment) patches that have been merged
already. Since libdrm is just a glue library between the kernel and userspace
drivers, merged patches to those components also count towards the commit
criteria.
- Are actively participating on discussions about their work (on the mailing
list or IRC). This should not be interpreted as a requirement to review other
peoples patches but just make sure that patch submission isn't one-way
communication. Cross-review is still highly encouraged.
- Will be regularly contributing further patches. This includes regular
contributors to other parts of the open source graphics stack who only
do the oddball rare patch within libdrm itself.
- Agrees to use their commit rights in accordance with the documented merge
criteria, tools, and processes.
To apply for commit rights ("Developer" role in gitlab) send a mail to
dri-devel@lists.freedesktop.org and please ping the maintainers if your request
is stuck.
Committers are encouraged to request their commit rights get removed when they
no longer contribute to the project. Commit rights will be reinstated when they
come back to the project.
Maintainers and committers should encourage contributors to request commit
rights, as especially junior contributors tend to underestimate their skills.
Code of Conduct
---------------
Please be aware the fd.o Code of Conduct also applies to libdrm:
https://www.freedesktop.org/wiki/CodeOfConduct/
See the gitlab project owners for contact details of the libdrm maintainers.
Abuse of commit rights, like engaging in commit fights or willfully pushing
patches that violate the documented merge criteria, will also be handled through
the Code of Conduct enforcement process.
Happy hacking!

175
Makefile.am Normal file
View file

@ -0,0 +1,175 @@
# Copyright 2005 Adam Jackson.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# ADAM JACKSON BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
include Makefile.sources
ACLOCAL_AMFLAGS = -I m4 ${ACLOCAL_FLAGS}
AM_MAKEFLAGS = -s
AM_DISTCHECK_CONFIGURE_FLAGS = \
--enable-udev \
--enable-libkms \
--enable-intel \
--enable-radeon \
--enable-amdgpu \
--enable-nouveau \
--enable-vc4 \
--enable-vmwgfx \
--enable-omap-experimental-api \
--enable-exynos-experimental-api \
--enable-freedreno \
--enable-freedreno-kgsl\
--enable-tegra-experimental-api \
--enable-etnaviv-experimental-api \
--enable-install-test-programs \
--enable-cairo-tests \
--enable-manpages \
--enable-valgrind
pkgconfigdir = @pkgconfigdir@
pkgconfig_DATA = libdrm.pc
if HAVE_LIBKMS
LIBKMS_SUBDIR = libkms
endif
if HAVE_INTEL
INTEL_SUBDIR = intel
endif
if HAVE_NOUVEAU
NOUVEAU_SUBDIR = nouveau
endif
if HAVE_RADEON
RADEON_SUBDIR = radeon
endif
if HAVE_AMDGPU
AMDGPU_SUBDIR = amdgpu
endif
if HAVE_OMAP
OMAP_SUBDIR = omap
endif
if HAVE_EXYNOS
EXYNOS_SUBDIR = exynos
endif
if HAVE_FREEDRENO
FREEDRENO_SUBDIR = freedreno
endif
if HAVE_TEGRA
TEGRA_SUBDIR = tegra
endif
if HAVE_VC4
VC4_SUBDIR = vc4
endif
if HAVE_ETNAVIV
ETNAVIV_SUBDIR = etnaviv
endif
if BUILD_MANPAGES
if HAVE_MANPAGES_STYLESHEET
MAN_SUBDIR = man
endif
endif
SUBDIRS = \
. \
$(LIBKMS_SUBDIR) \
$(INTEL_SUBDIR) \
$(NOUVEAU_SUBDIR) \
$(RADEON_SUBDIR) \
$(AMDGPU_SUBDIR) \
$(OMAP_SUBDIR) \
$(EXYNOS_SUBDIR) \
$(FREEDRENO_SUBDIR) \
$(TEGRA_SUBDIR) \
$(VC4_SUBDIR) \
$(ETNAVIV_SUBDIR) \
data \
tests \
$(MAN_SUBDIR)
libdrm_la_LTLIBRARIES = libdrm.la
libdrm_ladir = $(libdir)
libdrm_la_LDFLAGS = -version-number 2:4:0 -no-undefined
libdrm_la_LIBADD = @CLOCK_LIB@ -lm
libdrm_la_CPPFLAGS = -I$(top_srcdir)/include/drm
AM_CFLAGS = \
$(WARN_CFLAGS) \
$(VALGRIND_CFLAGS)
libdrm_la_SOURCES = $(LIBDRM_FILES)
libdrmincludedir = ${includedir}
libdrminclude_HEADERS = $(LIBDRM_H_FILES)
klibdrmincludedir = ${includedir}/libdrm
klibdrminclude_HEADERS = $(LIBDRM_INCLUDE_H_FILES)
if HAVE_VMWGFX
klibdrminclude_HEADERS += $(LIBDRM_INCLUDE_VMWGFX_H_FILES)
endif
EXTRA_DIST = \
include/drm/README \
amdgpu/meson.build \
data/meson.build \
etnaviv/meson.build \
exynos/meson.build \
freedreno/meson.build \
intel/meson.build \
libkms/meson.build \
man/meson.build \
nouveau/meson.build \
omap/meson.build \
radeon/meson.build \
tegra/meson.build \
tests/amdgpu/meson.build \
tests/etnaviv/meson.build \
tests/exynos/meson.build \
tests/kms/meson.build \
tests/kmstest/meson.build \
tests/meson.build \
tests/modeprint/meson.build \
tests/modetest/meson.build \
tests/nouveau/meson.build \
tests/proptest/meson.build \
tests/radeon/meson.build \
tests/tegra/meson.build \
tests/util/meson.build \
tests/vbltest/meson.build \
vc4/meson.build \
meson.build \
meson_options.txt
copy-headers :
cp -r $(kernel_source)/include/uapi/drm/*.h $(top_srcdir)/include/drm/
commit-headers : copy-headers
git add include/drm/*.h
git commit -am "Copy headers from kernel $$(GIT_DIR=$(kernel_source)/.git git describe)"

44
Makefile.sources Normal file
View file

@ -0,0 +1,44 @@
LIBDRM_FILES := \
xf86drm.c \
xf86drmHash.c \
xf86drmHash.h \
xf86drmRandom.c \
xf86drmRandom.h \
xf86drmSL.c \
xf86drmMode.c \
xf86atomic.h \
libdrm_macros.h \
libdrm_lists.h \
util_double_list.h \
util_math.h
LIBDRM_H_FILES := \
libsync.h \
xf86drm.h \
xf86drmMode.h
LIBDRM_INCLUDE_H_FILES := \
include/drm/drm.h \
include/drm/drm_fourcc.h \
include/drm/drm_mode.h \
include/drm/drm_sarea.h \
include/drm/i915_drm.h \
include/drm/mach64_drm.h \
include/drm/mga_drm.h \
include/drm/nouveau_drm.h \
include/drm/qxl_drm.h \
include/drm/r128_drm.h \
include/drm/radeon_drm.h \
include/drm/amdgpu_drm.h \
include/drm/savage_drm.h \
include/drm/sis_drm.h \
include/drm/tegra_drm.h \
include/drm/vc4_drm.h \
include/drm/via_drm.h \
include/drm/virtgpu_drm.h
LIBDRM_INCLUDE_ANDROID_H_FILES := \
android/gralloc_handle.h
LIBDRM_INCLUDE_VMWGFX_H_FILES := \
include/drm/vmwgfx_drm.h

59
README Normal file
View file

@ -0,0 +1,59 @@
libdrm - userspace library for drm
This is libdrm, a userspace library for accessing the DRM, direct
rendering manager, on Linux, BSD and other operating systems that
support the ioctl interface. The library provides wrapper functions
for the ioctls to avoid exposing the kernel interface directly, and
for chipsets with drm memory manager, support for tracking relocations
and buffers. libdrm is a low-level library, typically used by
graphics drivers such as the Mesa DRI drivers, the X drivers, libva
and similar projects. New functionality in the kernel DRM drivers
typically requires a new libdrm, but a new libdrm will always work
with an older kernel.
Compiling
---------
libdrm has two build systems, a legacy autotools build system, and a newer
meson build system. The meson build system is much faster, and offers a
slightly different interface, but otherwise provides an equivalent feature set.
To use it:
meson builddir/
By default this will install into /usr/local, you can change your prefix
with --prefix=/usr (or `meson configure builddir/ -Dprefix=/usr` after
the initial meson setup).
Then use ninja to build and install:
ninja -C builddir/ install
If you are installing into a system location you will need to run install
separately, and as root.
Alternatively you can invoke autotools configure:
./configure
By default, libdrm will install into the /usr/local/ prefix. If you
want to install this DRM to replace your system copy, pass
--prefix=/usr and --exec-prefix=/ to configure. If you are building
libdrm from a git checkout, you first need to run the autogen.sh
script. You can pass any options to autogen.sh that you would other
wise pass to configure, or you can just re-run configure with the
options you need once autogen.sh finishes.
Next step is to build libdrm:
make
and once make finishes successfully, install the package using
make install
If you are installing into a system location, you will need to be root
to perform the install step.

View file

@ -1,63 +0,0 @@
libdrm - userspace library for drm
----------------------------------
This is libdrm, a userspace library for accessing the DRM, direct rendering
manager, on Linux, BSD and other operating systems that support the ioctl
interface.
The library provides wrapper functions for the ioctls to avoid exposing the
kernel interface directly, and for chipsets with drm memory manager, support
for tracking relocations and buffers.
New functionality in the kernel DRM drivers typically requires a new libdrm,
but a new libdrm will always work with an older kernel.
libdrm is a low-level library, typically used by graphics drivers such as
the Mesa drivers, the X drivers, libva and similar projects.
Syncing with the Linux kernel headers
-------------------------------------
The library should be regularly updated to match the recent changes in the
`include/uapi/drm/`.
libdrm maintains a human-readable version for the token format modifier, with
the simpler ones being extracted automatically from `drm_fourcc.h` header file
with the help of a python script. This might not always possible, as some of
the vendors require decoding/extracting them programmatically. For that
reason one can enhance the current vendor functions to include/provide the
newly added token formats, or, in case there's no such decoding
function, to add one that performs the tasks of extracting them.
For simpler format modifier tokens there's a script (gen_table_fourcc.py) that
creates a static table, by going over `drm_fourcc.h` header file. The script
could be further modified if it can't handle new (simpler) token format
modifiers instead of the generated static table.
Compiling
---------
To set up meson:
meson builddir/
By default this will install into /usr/local, you can change your prefix
with --prefix=/usr (or `meson configure builddir/ -Dprefix=/usr` after
the initial meson setup).
Then use ninja to build and install:
ninja -C builddir/ install
If you are installing into a system location you will need to run install
separately, and as root.
AMDGPU ASIC table file
----------------------
The AMDGPU driver requires the `amdgpu.ids` file. It is usually located at
`$PREFIX/share/libdrm`, but it is possible to specify a set of alternative
paths at runtime by setting the `AMDGPU_ASIC_ID_TABLE_PATHS` environment
variable with one or more colon-separated paths where to search for the
`amdgpu.ids` file.
For this option to be available, the C library must support secure_getenv()
function. In systems without it (like NetBSD), this option won't be available.

View file

@ -9,22 +9,33 @@ However, this is up to whoever is driving the feature in question.
Follow these steps to release a new version of libdrm:
1) Bump the version number in meson.build. We seem to have settled for
2.4.x as the versioning scheme for libdrm, so just bump the micro
version.
1) Bump the version number in configure.ac and meson.build. We seem
to have settled for 2.4.x as the versioning scheme for libdrm, so
just bump the micro version.
2) Run `ninja -C builddir/ dist` to generate the tarballs.
Make sure that the version number of the tarball name in
builddir/meson-dist/ matches the number you bumped to. Move that
tarball to the libdrm repo root for the release script to pick up.
2) Run autoconf and then re-run ./configure so the build system
picks up the new version number.
3) Push the updated main branch with the bumped version number:
3) Verify that the code passes "make distcheck". Running "make
distcheck" should result in no warnings or errors and end with a
message of the form:
git push origin main
=============================================
libdrm-X.Y.Z archives ready for distribution:
libdrm-X.Y.Z.tar.gz
libdrm-X.Y.Z.tar.bz2
=============================================
Make sure that the version number reported by distcheck and in
the tarball names matches the number you bumped to in configure.ac.
4) Push the updated master branch with the bumped version number:
git push origin master
assuming the remote for the upstream libdrm repo is called origin.
4) Use the release.sh script from the xorg/util/modular repo to
5) Use the release.sh script from the xorg/util/modular repo to
upload the tarballs to the freedesktop.org download area and
create an announce email template. The script takes one argument:
the path to the libdrm checkout. So, if a checkout of modular is

View file

@ -1,16 +0,0 @@
build = ["Android.sources.bp"]
cc_library_shared {
name: "libdrm_amdgpu",
cflags: [
"-DAMDGPU_ASIC_ID_TABLE=\"/vendor/etc/hwdata/amdgpu.ids\""
],
defaults: [
"libdrm_defaults",
"libdrm_amdgpu_sources",
],
vendor: true,
shared_libs: ["libdrm"],
}

19
amdgpu/Android.mk Normal file
View file

@ -0,0 +1,19 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
# Import variables LIBDRM_AMDGPU_FILES, LIBDRM_AMDGPU_H_FILES
include $(LOCAL_PATH)/Makefile.sources
LOCAL_MODULE := libdrm_amdgpu
LOCAL_SHARED_LIBRARIES := libdrm
LOCAL_SRC_FILES := $(LIBDRM_AMDGPU_FILES)
LOCAL_CFLAGS := \
-DAMDGPU_ASIC_ID_TABLE=\"/vendor/etc/hwdata/amdgpu.ids\"
LOCAL_REQUIRED_MODULES := amdgpu.ids
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)

View file

@ -1,15 +0,0 @@
// Autogenerated with Android.sources.bp.mk
cc_defaults {
name: "libdrm_amdgpu_sources",
srcs: [
"amdgpu_asic_id.c",
"amdgpu_bo.c",
"amdgpu_cs.c",
"amdgpu_device.c",
"amdgpu_gpu_info.c",
"amdgpu_vamgr.c",
"amdgpu_vm.c",
"handle_table.c",
],
}

51
amdgpu/Makefile.am Normal file
View file

@ -0,0 +1,51 @@
# Copyright © 2008 Jérôme Glisse
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Jérôme Glisse <glisse@freedesktop.org>
include Makefile.sources
AM_CFLAGS = \
$(WARN_CFLAGS) \
-I$(top_srcdir) \
$(PTHREADSTUBS_CFLAGS) \
-I$(top_srcdir)/include/drm
libdrmdatadir = @libdrmdatadir@
AM_CPPFLAGS = -DAMDGPU_ASIC_ID_TABLE=\"${libdrmdatadir}/amdgpu.ids\"
libdrm_amdgpu_la_LTLIBRARIES = libdrm_amdgpu.la
libdrm_amdgpu_ladir = $(libdir)
libdrm_amdgpu_la_LDFLAGS = -version-number 1:0:0 -no-undefined
libdrm_amdgpu_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@
libdrm_amdgpu_la_SOURCES = $(LIBDRM_AMDGPU_FILES)
amdgpu_asic_id.lo: $(top_srcdir)/data/amdgpu.ids
libdrm_amdgpuincludedir = ${includedir}/libdrm
libdrm_amdgpuinclude_HEADERS = $(LIBDRM_AMDGPU_H_FILES)
pkgconfigdir = @pkgconfigdir@
pkgconfig_DATA = libdrm_amdgpu.pc
TESTS = amdgpu-symbol-check
EXTRA_DIST = $(TESTS)

16
amdgpu/Makefile.sources Normal file
View file

@ -0,0 +1,16 @@
LIBDRM_AMDGPU_FILES := \
amdgpu_asic_id.c \
amdgpu_bo.c \
amdgpu_cs.c \
amdgpu_device.c \
amdgpu_gpu_info.c \
amdgpu_internal.h \
amdgpu_vamgr.c \
amdgpu_vm.c \
util_hash.c \
util_hash.h \
util_hash_table.c \
util_hash_table.h
LIBDRM_AMDGPU_H_FILES := \
amdgpu.h

View file

@ -1,12 +1,21 @@
#!/bin/bash
# The following symbols (past the first five) are taken from the public headers.
# A list of the latter should be available Makefile.am/libdrm_amdgpuinclude_HEADERS
FUNCS=$($NM -D --format=bsd --defined-only ${1-.libs/libdrm_amdgpu.so} | awk '{print $3}' | while read func; do
( grep -q "^$func$" || echo $func ) <<EOF
__bss_start
_edata
_end
_fini
_init
amdgpu_bo_alloc
amdgpu_bo_cpu_map
amdgpu_bo_cpu_unmap
amdgpu_bo_export
amdgpu_bo_free
amdgpu_bo_import
amdgpu_bo_inc_ref
amdgpu_bo_list_create_raw
amdgpu_bo_list_destroy_raw
amdgpu_bo_list_create
amdgpu_bo_list_destroy
amdgpu_bo_list_update
@ -14,7 +23,6 @@ amdgpu_bo_query_info
amdgpu_bo_set_metadata
amdgpu_bo_va_op
amdgpu_bo_va_op_raw
amdgpu_bo_va_op_raw2
amdgpu_bo_wait_for_idle
amdgpu_create_bo_from_user_mem
amdgpu_cs_chunk_fence_info_to_data
@ -25,8 +33,6 @@ amdgpu_cs_create_syncobj2
amdgpu_cs_ctx_create
amdgpu_cs_ctx_create2
amdgpu_cs_ctx_free
amdgpu_cs_ctx_override_priority
amdgpu_cs_ctx_stable_pstate
amdgpu_cs_destroy_semaphore
amdgpu_cs_destroy_syncobj
amdgpu_cs_export_syncobj
@ -34,57 +40,38 @@ amdgpu_cs_fence_to_handle
amdgpu_cs_import_syncobj
amdgpu_cs_query_fence_status
amdgpu_cs_query_reset_state
amdgpu_cs_query_reset_state2
amdgpu_query_sw_info
amdgpu_cs_signal_semaphore
amdgpu_cs_submit
amdgpu_cs_submit_raw
amdgpu_cs_submit_raw2
amdgpu_cs_syncobj_export_sync_file
amdgpu_cs_syncobj_export_sync_file2
amdgpu_cs_syncobj_import_sync_file
amdgpu_cs_syncobj_import_sync_file2
amdgpu_cs_syncobj_query
amdgpu_cs_syncobj_query2
amdgpu_cs_syncobj_reset
amdgpu_cs_syncobj_signal
amdgpu_cs_syncobj_timeline_signal
amdgpu_cs_syncobj_timeline_wait
amdgpu_cs_syncobj_transfer
amdgpu_cs_syncobj_wait
amdgpu_cs_wait_fences
amdgpu_cs_wait_semaphore
amdgpu_device_deinitialize
amdgpu_device_get_fd
amdgpu_device_initialize
amdgpu_device_initialize2
amdgpu_find_bo_by_cpu_mapping
amdgpu_get_marketing_name
amdgpu_query_buffer_size_alignment
amdgpu_query_crtc_from_id
amdgpu_query_firmware_version
amdgpu_query_gds_info
amdgpu_query_gpu_info
amdgpu_query_gpuvm_fault_info
amdgpu_query_heap_info
amdgpu_query_hw_ip_count
amdgpu_query_hw_ip_info
amdgpu_query_info
amdgpu_query_sensor_info
amdgpu_query_uq_fw_area_info
amdgpu_query_video_caps_info
amdgpu_read_mm_registers
amdgpu_va_manager_alloc
amdgpu_va_manager_init
amdgpu_va_manager_deinit
amdgpu_va_range_alloc
amdgpu_va_range_alloc2
amdgpu_va_range_free
amdgpu_va_get_start_addr
amdgpu_va_range_query
amdgpu_vm_reserve_vmid
amdgpu_vm_unreserve_vmid
amdgpu_create_userqueue
amdgpu_free_userqueue
amdgpu_userq_signal
amdgpu_userq_wait
EOF
done)
test ! -n "$FUNCS" || echo $FUNCS
test ! -n "$FUNCS"

View file

@ -42,10 +42,6 @@ extern "C" {
#endif
struct drm_amdgpu_info_hw_ip;
struct drm_amdgpu_info_uq_fw_areas;
struct drm_amdgpu_bo_list_entry;
struct drm_amdgpu_userq_signal;
struct drm_amdgpu_userq_wait;
/*--------------------------------------------------------------------------*/
/* --------------------------- Defines ------------------------------------ */
@ -90,8 +86,8 @@ enum amdgpu_bo_handle_type {
/** DMA-buf fd handle */
amdgpu_bo_handle_type_dma_buf_fd = 2,
/** Deprecated in favour of and same behaviour as
* amdgpu_bo_handle_type_kms, use that instead of this
/** KMS handle, but re-importing as a DMABUF handle through
* drmPrimeHandleToFD is forbidden. (Glamor does that)
*/
amdgpu_bo_handle_type_kms_noimport = 3,
};
@ -141,12 +137,6 @@ typedef struct amdgpu_bo_list *amdgpu_bo_list_handle;
*/
typedef struct amdgpu_va *amdgpu_va_handle;
/**
* Define handle dealing with VA allocation. An amdgpu_device
* owns one of these, but they can also be used without a device.
*/
typedef struct amdgpu_va_manager *amdgpu_va_manager_handle;
/**
* Define handle for semaphore
*/
@ -536,20 +526,6 @@ int amdgpu_device_initialize(int fd,
uint32_t *minor_version,
amdgpu_device_handle *device_handle);
/**
* Same as amdgpu_device_initialize() except when deduplicate_device
* is false *and* fd points to a device that was already initialized.
* In this case, amdgpu_device_initialize would return the same
* amdgpu_device_handle while here amdgpu_device_initialize2 would
* return a new handle.
* amdgpu_device_initialize() should be preferred in most situations;
* the only use-case where not-deduplicating devices make sense is
* when one wants to have isolated device handles in the same process.
*/
int amdgpu_device_initialize2(int fd, bool deduplicate_device,
uint32_t *major_version,
uint32_t *minor_version,
amdgpu_device_handle *device_handle);
/**
*
* When access to such library does not needed any more the special
@ -569,19 +545,6 @@ int amdgpu_device_initialize2(int fd, bool deduplicate_device,
*/
int amdgpu_device_deinitialize(amdgpu_device_handle device_handle);
/**
*
* /param device_handle - \c [in] Device handle.
* See #amdgpu_device_initialize()
*
* \return Returns the drm fd used for operations on this
* device. This is still owned by the library and hence
* should not be closed. Guaranteed to be valid until
* #amdgpu_device_deinitialize gets called.
*
*/
int amdgpu_device_get_fd(amdgpu_device_handle device_handle);
/*
* Memory Management
*
@ -715,30 +678,7 @@ int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
amdgpu_bo_handle *buf_handle);
/**
* Validate if the user memory comes from BO
*
* \param dev - [in] Device handle. See #amdgpu_device_initialize()
* \param cpu - [in] CPU address of user allocated memory which we
* want to map to GPU address space (make GPU accessible)
* (This address must be correctly aligned).
* \param size - [in] Size of allocation (must be correctly aligned)
* \param buf_handle - [out] Buffer handle for the userptr memory
* if the user memory is not from BO, the buf_handle will be NULL.
* \param offset_in_bo - [out] offset in this BO for this user memory
*
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
void *cpu,
uint64_t size,
amdgpu_bo_handle *buf_handle,
uint64_t *offset_in_bo);
/**
* Free previously allocated memory
* Free previosuly allocated memory
*
* \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
* \param buf_handle - \c [in] Buffer handle to free
@ -757,16 +697,6 @@ int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
*/
int amdgpu_bo_free(amdgpu_bo_handle buf_handle);
/**
* Increase the reference count of a buffer object
*
* \param bo - \c [in] Buffer object handle to increase the reference count
*
* \sa amdgpu_bo_alloc(), amdgpu_bo_free()
*
*/
void amdgpu_bo_inc_ref(amdgpu_bo_handle bo);
/**
* Request CPU access to GPU accessible memory
*
@ -811,37 +741,6 @@ int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
uint64_t timeout_ns,
bool *buffer_busy);
/**
* Creates a BO list handle for command submission.
*
* \param dev - \c [in] Device handle.
* See #amdgpu_device_initialize()
* \param number_of_buffers - \c [in] Number of BOs in the list
* \param buffers - \c [in] List of BO handles
* \param result - \c [out] Created BO list handle
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
* \sa amdgpu_bo_list_destroy_raw(), amdgpu_cs_submit_raw2()
*/
int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
uint32_t number_of_buffers,
struct drm_amdgpu_bo_list_entry *buffers,
uint32_t *result);
/**
* Destroys a BO list handle.
*
* \param bo_list - \c [in] BO list handle.
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
* \sa amdgpu_bo_list_create_raw(), amdgpu_cs_submit_raw2()
*/
int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev, uint32_t bo_list);
/**
* Creates a BO list handle for command submission.
*
@ -947,36 +846,6 @@ int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
*/
int amdgpu_cs_ctx_free(amdgpu_context_handle context);
/**
* Override the submission priority for the given context using a master fd.
*
* \param dev - \c [in] device handle
* \param context - \c [in] context handle for context id
* \param master_fd - \c [in] The master fd to authorize the override.
* \param priority - \c [in] The priority to assign to the context.
*
* \return 0 on success or a a negative Posix error code on failure.
*/
int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev,
amdgpu_context_handle context,
int master_fd,
unsigned priority);
/**
* Set or query the stable power state for GPU profiling.
*
* \param dev - \c [in] device handle
* \param op - \c [in] AMDGPU_CTX_OP_{GET,SET}_STABLE_PSTATE
* \param flags - \c [in] AMDGPU_CTX_STABLE_PSTATE_*
* \param out_flags - \c [out] output current stable pstate
*
* \return 0 on success otherwise POSIX Error code.
*/
int amdgpu_cs_ctx_stable_pstate(amdgpu_context_handle context,
uint32_t op,
uint32_t flags,
uint32_t *out_flags);
/**
* Query reset state for the specific GPU Context
*
@ -993,21 +862,6 @@ int amdgpu_cs_ctx_stable_pstate(amdgpu_context_handle context,
int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
uint32_t *state, uint32_t *hangs);
/**
* Query reset state for the specific GPU Context.
*
* \param context - \c [in] GPU Context handle
* \param flags - \c [out] A combination of AMDGPU_CTX_QUERY2_FLAGS_*
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
* \sa amdgpu_cs_ctx_create()
*
*/
int amdgpu_cs_query_reset_state2(amdgpu_context_handle context,
uint64_t *flags);
/*
* Command Buffers Management
*
@ -1175,26 +1029,6 @@ int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
unsigned ip_instance,
struct drm_amdgpu_info_hw_ip *info);
/**
* Query FW area related information.
*
* The return size is query-specific and depends on the "type" parameter.
* No more than "size" bytes is returned.
*
* \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
* \param type - \c [in] AMDGPU_HW_IP_*
* \param ip_instance - \c [in] HW IP index.
* \param info - \c [out] The pointer to return value
*
* \return 0 on success\n
* <0 - Negative POSIX error code
*
*/
int amdgpu_query_uq_fw_area_info(amdgpu_device_handle dev,
unsigned type,
unsigned ip_instance,
struct drm_amdgpu_info_uq_fw_areas *info);
/**
* Query heap information
*
@ -1308,39 +1142,6 @@ int amdgpu_query_gds_info(amdgpu_device_handle dev,
int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned sensor_type,
unsigned size, void *value);
/**
* Query information about video capabilities
*
* The return sizeof(struct drm_amdgpu_info_video_caps)
*
* \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
* \param caps_type - \c [in] AMDGPU_INFO_VIDEO_CAPS_DECODE(ENCODE)
* \param size - \c [in] Size of the returned value.
* \param value - \c [out] Pointer to the return value.
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_query_video_caps_info(amdgpu_device_handle dev, unsigned cap_type,
unsigned size, void *value);
/**
* Query information about VM faults
*
* The return sizeof(struct drm_amdgpu_info_gpuvm_fault)
*
* \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
* \param size - \c [in] Size of the returned value.
* \param value - \c [out] Pointer to the return value.
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_query_gpuvm_fault_info(amdgpu_device_handle dev, unsigned size,
void *value);
/**
* Read a set of consecutive memory-mapped registers.
* Not all registers are allowed to be read by userspace.
@ -1367,7 +1168,6 @@ int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
*/
#define AMDGPU_VA_RANGE_32_BIT 0x1
#define AMDGPU_VA_RANGE_HIGH 0x2
#define AMDGPU_VA_RANGE_REPLAYABLE 0x4
/**
* Allocate virtual address range
@ -1398,7 +1198,7 @@ int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
* \notes \n
* It is client responsibility to correctly handle VA assignments and usage.
* Neither kernel driver nor libdrm_amdpgu are able to prevent and
* detect wrong va assignment.
* detect wrong va assignemnt.
*
* It is client responsibility to correctly handle multi-GPU cases and to pass
* the corresponding arrays of all devices handles where corresponding VA will
@ -1427,11 +1227,6 @@ int amdgpu_va_range_alloc(amdgpu_device_handle dev,
*/
int amdgpu_va_range_free(amdgpu_va_handle va_range_handle);
/**
* Return the starting address of the allocated virtual address range.
*/
uint64_t amdgpu_va_get_start_addr(amdgpu_va_handle va_handle);
/**
* Query virtual address range
*
@ -1453,37 +1248,6 @@ int amdgpu_va_range_query(amdgpu_device_handle dev,
uint64_t *start,
uint64_t *end);
/**
* Allocate a amdgpu_va_manager object.
* The returned object has be initialized with the amdgpu_va_manager_init
* before use.
* On release, amdgpu_va_manager_deinit needs to be called, then the memory
* can be released using free().
*/
amdgpu_va_manager_handle amdgpu_va_manager_alloc(void);
void amdgpu_va_manager_init(amdgpu_va_manager_handle va_mgr,
uint64_t low_va_offset, uint64_t low_va_max,
uint64_t high_va_offset, uint64_t high_va_max,
uint32_t virtual_address_alignment);
void amdgpu_va_manager_deinit(amdgpu_va_manager_handle va_mgr);
/**
* Similar to #amdgpu_va_range_alloc() but allocates VA
* directly from an amdgpu_va_manager_handle instead of using
* the manager from an amdgpu_device.
*/
int amdgpu_va_range_alloc2(amdgpu_va_manager_handle va_mgr,
enum amdgpu_gpu_va_range va_range_type,
uint64_t size,
uint64_t va_base_alignment,
uint64_t va_base_required,
uint64_t *va_base_allocated,
amdgpu_va_handle *va_range_handle,
uint64_t flags);
/**
* VA mapping/unmapping for the buffer object
*
@ -1534,42 +1298,6 @@ int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
uint64_t flags,
uint32_t ops);
/**
* VA mapping/unmapping of buffer object for usermode queue.
*
* This is not a simple drop-in extension for amdgpu_bo_va_op; instead, all
* parameters are treated "raw2", i.e. size is not automatically aligned, and
* all flags must be specified explicitly.
*
* \param dev - \c [in] device handle
* \param bo - \c [in] BO handle (may be NULL)
* \param offset - \c [in] Start offset to map
* \param size - \c [in] Size to map
* \param addr - \c [in] Start virtual address.
* \param flags - \c [in] Supported flags for mapping/unmapping
* \param ops - \c [in] AMDGPU_VA_OP_MAP or AMDGPU_VA_OP_UNMAP
* \param vm_timeline_syncobj_out - \c [out] syncobj handle for PT update fence
* \param vm_timeline_point - \c [in] input timeline point
* \param input_fence_syncobj_handles - \c [in] Array of syncobj handles for bo unmap,
* clear and replace
* \param num_syncobj_handles - \c [in] Number of syncobj handles
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_bo_va_op_raw2(amdgpu_device_handle dev,
amdgpu_bo_handle bo,
uint64_t offset,
uint64_t size,
uint64_t addr,
uint64_t flags,
uint32_t ops,
uint32_t vm_timeline_syncobj_out,
uint64_t vm_timeline_point,
uint64_t input_fence_syncobj_array_in,
uint32_t num_syncobj_handles_in);
/**
* create semaphore
*
@ -1708,23 +1436,6 @@ int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev,
int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
const uint32_t *syncobjs, uint32_t syncobj_count);
/**
* Signal kernel timeline sync objects.
*
* \param dev - \c [in] device handle
* \param syncobjs - \c [in] array of sync object handles
* \param points - \c [in] array of timeline points
* \param syncobj_count - \c [in] number of handles in syncobjs
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_timeline_signal(amdgpu_device_handle dev,
const uint32_t *syncobjs,
uint64_t *points,
uint32_t syncobj_count);
/**
* Wait for one or all sync objects to signal.
*
@ -1745,63 +1456,6 @@ int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
int64_t timeout_nsec, unsigned flags,
uint32_t *first_signaled);
/**
* Wait for one or all sync objects on their points to signal.
*
* \param dev - \c [in] self-explanatory
* \param handles - \c [in] array of sync object handles
* \param points - \c [in] array of sync points to wait
* \param num_handles - \c [in] self-explanatory
* \param timeout_nsec - \c [in] self-explanatory
* \param flags - \c [in] a bitmask of DRM_SYNCOBJ_WAIT_FLAGS_*
* \param first_signaled - \c [in] self-explanatory
*
* \return 0 on success\n
* -ETIME - Timeout
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_timeline_wait(amdgpu_device_handle dev,
uint32_t *handles, uint64_t *points,
unsigned num_handles,
int64_t timeout_nsec, unsigned flags,
uint32_t *first_signaled);
/**
* Query sync objects payloads.
*
* \param dev - \c [in] self-explanatory
* \param handles - \c [in] array of sync object handles
* \param points - \c [out] array of sync points returned, which presents
* syncobj payload.
* \param num_handles - \c [in] self-explanatory
*
* \return 0 on success\n
* -ETIME - Timeout
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_query(amdgpu_device_handle dev,
uint32_t *handles, uint64_t *points,
unsigned num_handles);
/**
* Query sync objects last signaled or submitted point.
*
* \param dev - \c [in] self-explanatory
* \param handles - \c [in] array of sync object handles
* \param points - \c [out] array of sync points returned, which presents
* syncobj payload.
* \param num_handles - \c [in] self-explanatory
* \param flags - \c [in] a bitmask of DRM_SYNCOBJ_QUERY_FLAGS_*
*
* \return 0 on success\n
* -ETIME - Timeout
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_query2(amdgpu_device_handle dev,
uint32_t *handles, uint64_t *points,
unsigned num_handles, uint32_t flags);
/**
* Export kernel sync object to shareable fd.
*
@ -1860,62 +1514,6 @@ int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev,
int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
uint32_t syncobj,
int sync_file_fd);
/**
* Export kernel timeline sync object to a sync_file.
*
* \param dev - \c [in] device handle
* \param syncobj - \c [in] sync object handle
* \param point - \c [in] timeline point
* \param flags - \c [in] flags
* \param sync_file_fd - \c [out] sync_file file descriptor.
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_export_sync_file2(amdgpu_device_handle dev,
uint32_t syncobj,
uint64_t point,
uint32_t flags,
int *sync_file_fd);
/**
* Import kernel timeline sync object from a sync_file.
*
* \param dev - \c [in] device handle
* \param syncobj - \c [in] sync object handle
* \param point - \c [in] timeline point
* \param sync_file_fd - \c [in] sync_file file descriptor.
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_import_sync_file2(amdgpu_device_handle dev,
uint32_t syncobj,
uint64_t point,
int sync_file_fd);
/**
* transfer between syncbojs.
*
* \param dev - \c [in] device handle
* \param dst_handle - \c [in] sync object handle
* \param dst_point - \c [in] timeline point, 0 presents dst is binary
* \param src_handle - \c [in] sync object handle
* \param src_point - \c [in] timeline point, 0 presents src is binary
* \param flags - \c [in] flags
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_transfer(amdgpu_device_handle dev,
uint32_t dst_handle,
uint64_t dst_point,
uint32_t src_handle,
uint64_t src_point,
uint32_t flags);
/**
* Export an amdgpu fence as a handle (syncobj or fd).
@ -1956,28 +1554,6 @@ int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
struct drm_amdgpu_cs_chunk *chunks,
uint64_t *seq_no);
/**
* Submit raw command submission to the kernel with a raw BO list handle.
*
* \param dev - \c [in] device handle
* \param context - \c [in] context handle for context id
* \param bo_list_handle - \c [in] raw bo list handle (0 for none)
* \param num_chunks - \c [in] number of CS chunks to submit
* \param chunks - \c [in] array of CS chunks
* \param seq_no - \c [out] output sequence number for submission.
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
* \sa amdgpu_bo_list_create_raw(), amdgpu_bo_list_destroy_raw()
*/
int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
amdgpu_context_handle context,
uint32_t bo_list_handle,
int num_chunks,
struct drm_amdgpu_cs_chunk *chunks,
uint64_t *seq_no);
void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
struct drm_amdgpu_cs_chunk_dep *dep);
void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
@ -2001,65 +1577,6 @@ int amdgpu_vm_reserve_vmid(amdgpu_device_handle dev, uint32_t flags);
*/
int amdgpu_vm_unreserve_vmid(amdgpu_device_handle dev, uint32_t flags);
/**
* Create USERQUEUE
* \param dev - \c [in] device handle
* \param ip_type - \c [in] ip type
* \param doorbell_handle - \c [in] doorbell handle
* \param doorbell_offset - \c [in] doorbell index
* \param mqd_in - \c [in] MQD data
* \param queue_va - \c [in] Virtual address of queue
* \param queue_size - \c [in] userqueue size
* \param wptr_va - \c [in] Virtual address of wptr
* \param rptr_va - \c [in] Virtual address of rptr
* \param queue_id - \c [out] queue id
*
* \return 0 on success otherwise POSIX Error code
*/
int amdgpu_create_userqueue(amdgpu_device_handle dev,
uint32_t ip_type,
uint32_t doorbell_handle,
uint32_t doorbell_offset,
uint64_t queue_va,
uint64_t queue_size,
uint64_t wptr_va,
uint64_t rptr_va,
void *mqd_in,
uint32_t flags,
uint32_t *queue_id);
/**
* Free USERQUEUE
* \param dev - \c [in] device handle
* \param queue_id - \c [in] queue id
*
* \return 0 on success otherwise POSIX Error code
*/
int amdgpu_free_userqueue(amdgpu_device_handle dev, uint32_t queue_id);
/**
* Signal USERQUEUE
* \param dev - \c [in] device handle
* \param signal_data - \c [in] pointer to struct drm_amdgpu_userq_signal
* to be filled by the caller
*
* \return 0 on success otherwise POSIX Error code
*/
int amdgpu_userq_signal(amdgpu_device_handle dev,
struct drm_amdgpu_userq_signal *signal_data);
/**
* Wait USERQUEUE
* \param dev - \c [in] device handle
* \param wait_data - \c [in/out] pointer to struct drm_amdgpu_userq_wait
* to be filled by the caller
*
* \return 0 on success otherwise POSIX Error code
*/
int amdgpu_userq_wait(amdgpu_device_handle dev,
struct drm_amdgpu_userq_wait *wait_data);
#ifdef __cplusplus
}
#endif

View file

@ -22,11 +22,6 @@
*
*/
// secure_getenv requires _GNU_SOURCE
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
@ -109,168 +104,6 @@ out:
return r;
}
static void amdgpu_parse_proc_cpuinfo(struct amdgpu_device *dev)
{
const char *search_key = "model name";
const char *radeon_key = "Radeon";
char *line = NULL;
size_t len = 0;
FILE *fp;
fp = fopen("/proc/cpuinfo", "r");
if (fp == NULL) {
fprintf(stderr, "%s\n", strerror(errno));
return;
}
while (getline(&line, &len, fp) != -1) {
char *saveptr;
char *value;
if (strncmp(line, search_key, strlen(search_key)))
continue;
/* check for parts that have both CPU and GPU information */
value = strstr(line, radeon_key);
/* get content after the first colon */
if (value == NULL) {
value = strstr(line, ":");
if (value == NULL)
continue;
value++;
}
/* strip whitespace */
while (*value == ' ' || *value == '\t')
value++;
saveptr = strchr(value, '\n');
if (saveptr)
*saveptr = '\0';
/* Add AMD to the new string if it's missing from slicing/dicing */
if (strncmp(value, "AMD", 3) != 0) {
char *tmp = malloc(strlen(value) + 5);
if (!tmp)
break;
sprintf(tmp, "AMD %s", value);
dev->marketing_name = tmp;
} else
dev->marketing_name = strdup(value);
break;
}
free(line);
fclose(fp);
}
#if HAVE_SECURE_GETENV
static char *join_path(const char *dir, const char *file) {
size_t dir_len = strlen(dir);
size_t file_len = strlen(file);
char *full_path = NULL;
int need_slash = ((dir_len > 0) && (dir[dir_len - 1] != '/'));
size_t total_len = dir_len + (need_slash ? 1 : 0) + file_len + 1; // +1 for null terminator
if (dir_len == 0) {
return strdup(file);
}
full_path = malloc(total_len);
if (!full_path) {
return NULL; // Memory allocation failed
}
strcpy(full_path, dir);
if (need_slash) {
full_path[dir_len] = '/';
dir_len++;
}
strcpy(full_path + dir_len, file);
return full_path;
}
static char **split_env_var(const char *env_var_content)
{
char **ret = NULL;
char *dup_env_val;
int elements = 1;
int index = 1;
if (!env_var_content || env_var_content[0] == '\0')
return NULL;
for(char *p = (char *)env_var_content; *p; p++) {
if (*p == ':')
elements++;
}
dup_env_val = strdup(env_var_content);
if (!dup_env_val) {
return NULL;
}
ret = malloc(sizeof(char *) * (elements + 1));
ret[0] = dup_env_val;
for(char *p = (char *)dup_env_val; *p; p++) {
if (*p == ':') {
*p = 0;
ret[index++] = p + 1;
}
}
ret[index] = NULL; // ensure that the last element in the array is NULL
return ret;
}
static void split_env_var_free(char **split_var)
{
if (split_var) {
// remember that the first element also points to the whole duplicated string,
// which was modified in place by replacing ':' with '\0' characters
free(split_var[0]);
free(split_var);
}
}
static char *find_asic_id_table(void)
{
// first check the paths in AMDGPU_ASIC_ID_TABLE_PATHS environment variable
const char *amdgpu_asic_id_table_paths = secure_getenv("AMDGPU_ASIC_ID_TABLE_PATHS");
char *file_name = NULL;
char *found_path = NULL;
char **paths = NULL;
if (!amdgpu_asic_id_table_paths)
return NULL;
// extract the file name from AMDGPU_ASIC_ID_TABLE
file_name = strrchr(AMDGPU_ASIC_ID_TABLE, '/');
if (!file_name)
return NULL;
file_name++; // skip the '/'
paths = split_env_var(amdgpu_asic_id_table_paths);
if (!paths)
return NULL;
// for each path, join with file_name and check if it exists
for (int i = 0; paths[i] != NULL; i++) {
char *full_path = join_path(paths[i], file_name);
if (!full_path) {
continue;
}
if (access(full_path, R_OK) == 0) {
found_path = full_path;
break;
}
}
split_env_var_free(paths);
return found_path;
}
#endif
void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
{
FILE *fp;
@ -280,21 +113,11 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
int line_num = 1;
int r = 0;
char *amdgpu_asic_id_table_path = NULL;
#if HAVE_SECURE_GETENV
// if this system lacks secure_getenv(), don't allow extra paths
// for security reasons.
amdgpu_asic_id_table_path = find_asic_id_table();
#endif
// if not found, use the default AMDGPU_ASIC_ID_TABLE path
if (!amdgpu_asic_id_table_path)
amdgpu_asic_id_table_path = strdup(AMDGPU_ASIC_ID_TABLE);
fp = fopen(amdgpu_asic_id_table_path, "r");
fp = fopen(AMDGPU_ASIC_ID_TABLE, "r");
if (!fp) {
fprintf(stderr, "%s: %s\n", amdgpu_asic_id_table_path,
fprintf(stderr, "%s: %s\n", AMDGPU_ASIC_ID_TABLE,
strerror(errno));
goto get_cpu;
return;
}
/* 1st valid line is file version */
@ -309,7 +132,7 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
continue;
}
drmMsg("%s version: %s\n", amdgpu_asic_id_table_path, line);
drmMsg("%s version: %s\n", AMDGPU_ASIC_ID_TABLE, line);
break;
}
@ -327,7 +150,7 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
if (r == -EINVAL) {
fprintf(stderr, "Invalid format: %s: line %d: %s\n",
amdgpu_asic_id_table_path, line_num, line);
AMDGPU_ASIC_ID_TABLE, line_num, line);
} else if (r && r != -EAGAIN) {
fprintf(stderr, "%s: Cannot parse ASIC IDs: %s\n",
__func__, strerror(-r));
@ -335,11 +158,4 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
free(line);
fclose(fp);
get_cpu:
free(amdgpu_asic_id_table_path);
if (dev->info.ids_flags & AMDGPU_IDS_FLAGS_FUSION &&
dev->marketing_name == NULL) {
amdgpu_parse_proc_cpuinfo(dev);
}
}

View file

@ -37,80 +37,68 @@
#include "xf86drm.h"
#include "amdgpu_drm.h"
#include "amdgpu_internal.h"
#include "util_hash_table.h"
#include "util_math.h"
static int amdgpu_bo_create(amdgpu_device_handle dev,
uint64_t size,
uint32_t handle,
amdgpu_bo_handle *buf_handle)
static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
uint32_t handle)
{
struct drm_gem_close args = {};
args.handle = handle;
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
}
int amdgpu_bo_alloc(amdgpu_device_handle dev,
struct amdgpu_bo_alloc_request *alloc_buffer,
amdgpu_bo_handle *buf_handle)
{
struct amdgpu_bo *bo;
int r;
union drm_amdgpu_gem_create args;
unsigned heap = alloc_buffer->preferred_heap;
int r = 0;
/* It's an error if the heap is not specified */
if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
return -EINVAL;
bo = calloc(1, sizeof(struct amdgpu_bo));
if (!bo)
return -ENOMEM;
r = handle_table_insert(&dev->bo_handles, handle, bo);
if (r) {
free(bo);
return r;
}
atomic_set(&bo->refcount, 1);
bo->dev = dev;
bo->alloc_size = size;
bo->handle = handle;
pthread_mutex_init(&bo->cpu_access_mutex, NULL);
*buf_handle = bo;
return 0;
}
drm_public int amdgpu_bo_alloc(amdgpu_device_handle dev,
struct amdgpu_bo_alloc_request *alloc_buffer,
amdgpu_bo_handle *buf_handle)
{
union drm_amdgpu_gem_create args;
int r;
if (!alloc_buffer || !buf_handle)
return -EINVAL;
bo->alloc_size = alloc_buffer->alloc_size;
memset(&args, 0, sizeof(args));
args.in.bo_size = alloc_buffer->alloc_size;
args.in.alignment = alloc_buffer->phys_alignment;
/* Set the placement. */
args.in.domains = alloc_buffer->preferred_heap;
args.in.domains = heap;
args.in.domain_flags = alloc_buffer->flags;
/* Allocate the buffer with the preferred heap. */
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
&args, sizeof(args));
if (r)
goto out;
pthread_mutex_lock(&dev->bo_table_mutex);
r = amdgpu_bo_create(dev, alloc_buffer->alloc_size, args.out.handle,
buf_handle);
pthread_mutex_unlock(&dev->bo_table_mutex);
if (r) {
drmCloseBufferHandle(dev->fd, args.out.handle);
free(bo);
return r;
}
out:
return r;
bo->handle = args.out.handle;
pthread_mutex_init(&bo->cpu_access_mutex, NULL);
*buf_handle = bo;
return 0;
}
drm_public int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
struct amdgpu_bo_metadata *info)
int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
struct amdgpu_bo_metadata *info)
{
struct drm_amdgpu_gem_metadata args = {};
if (!info)
return -EINVAL;
args.handle = bo->handle;
args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
args.data.flags = info->flags;
@ -129,8 +117,8 @@ drm_public int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
&args, sizeof(args));
}
drm_public int amdgpu_bo_query_info(amdgpu_bo_handle bo,
struct amdgpu_bo_info *info)
int amdgpu_bo_query_info(amdgpu_bo_handle bo,
struct amdgpu_bo_info *info)
{
struct drm_amdgpu_gem_metadata metadata = {};
struct drm_amdgpu_gem_create_in bo_info = {};
@ -138,7 +126,7 @@ drm_public int amdgpu_bo_query_info(amdgpu_bo_handle bo,
int r;
/* Validate the BO passed in */
if (!bo->handle || !info)
if (!bo->handle)
return -EINVAL;
/* Query metadata. */
@ -180,6 +168,14 @@ drm_public int amdgpu_bo_query_info(amdgpu_bo_handle bo,
return 0;
}
static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
{
pthread_mutex_lock(&bo->dev->bo_table_mutex);
util_hash_table_set(bo->dev->bo_handles,
(void*)(uintptr_t)bo->handle, bo);
pthread_mutex_unlock(&bo->dev->bo_table_mutex);
}
static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
{
struct drm_gem_flink flink;
@ -213,19 +209,24 @@ static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
bo->flink_name = flink.name;
if (bo->dev->flink_fd != bo->dev->fd)
drmCloseBufferHandle(bo->dev->flink_fd, handle);
if (bo->dev->flink_fd != bo->dev->fd) {
struct drm_gem_close args = {};
args.handle = handle;
drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
}
pthread_mutex_lock(&bo->dev->bo_table_mutex);
r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
util_hash_table_set(bo->dev->bo_flink_names,
(void*)(uintptr_t)bo->flink_name,
bo);
pthread_mutex_unlock(&bo->dev->bo_table_mutex);
return r;
return 0;
}
drm_public int amdgpu_bo_export(amdgpu_bo_handle bo,
enum amdgpu_bo_handle_type type,
uint32_t *shared_handle)
int amdgpu_bo_export(amdgpu_bo_handle bo,
enum amdgpu_bo_handle_type type,
uint32_t *shared_handle)
{
int r;
@ -239,11 +240,14 @@ drm_public int amdgpu_bo_export(amdgpu_bo_handle bo,
return 0;
case amdgpu_bo_handle_type_kms:
amdgpu_add_handle_to_table(bo);
/* fall through */
case amdgpu_bo_handle_type_kms_noimport:
*shared_handle = bo->handle;
return 0;
case amdgpu_bo_handle_type_dma_buf_fd:
amdgpu_add_handle_to_table(bo);
return drmPrimeHandleToFD(bo->dev->fd, bo->handle,
DRM_CLOEXEC | DRM_RDWR,
(int*)shared_handle);
@ -251,16 +255,14 @@ drm_public int amdgpu_bo_export(amdgpu_bo_handle bo,
return -EINVAL;
}
drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
enum amdgpu_bo_handle_type type,
uint32_t shared_handle,
int amdgpu_bo_import(amdgpu_device_handle dev,
enum amdgpu_bo_handle_type type,
uint32_t shared_handle,
struct amdgpu_bo_import_result *output)
{
struct drm_gem_open open_arg = {};
struct amdgpu_bo *bo = NULL;
uint32_t handle = 0, flink_name = 0;
uint64_t alloc_size = 0;
int r = 0;
int r;
int dma_fd;
uint64_t dma_buf_size = 0;
@ -270,18 +272,22 @@ drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
/* Convert a DMA buf handle to a KMS handle now. */
if (type == amdgpu_bo_handle_type_dma_buf_fd) {
uint32_t handle;
off_t size;
/* Get a KMS handle. */
r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
if (r)
goto unlock;
if (r) {
pthread_mutex_unlock(&dev->bo_table_mutex);
return r;
}
/* Query the buffer size. */
size = lseek(shared_handle, 0, SEEK_END);
if (size == (off_t)-1) {
r = -errno;
goto free_bo_handle;
pthread_mutex_unlock(&dev->bo_table_mutex);
amdgpu_close_kms_handle(dev, handle);
return -errno;
}
lseek(shared_handle, 0, SEEK_SET);
@ -292,22 +298,24 @@ drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
/* If we have already created a buffer with this handle, find it. */
switch (type) {
case amdgpu_bo_handle_type_gem_flink_name:
bo = handle_table_lookup(&dev->bo_flink_names, shared_handle);
bo = util_hash_table_get(dev->bo_flink_names,
(void*)(uintptr_t)shared_handle);
break;
case amdgpu_bo_handle_type_dma_buf_fd:
bo = handle_table_lookup(&dev->bo_handles, shared_handle);
bo = util_hash_table_get(dev->bo_handles,
(void*)(uintptr_t)shared_handle);
break;
case amdgpu_bo_handle_type_kms:
case amdgpu_bo_handle_type_kms_noimport:
/* Importing a KMS handle in not allowed. */
r = -EPERM;
goto unlock;
pthread_mutex_unlock(&dev->bo_table_mutex);
return -EPERM;
default:
r = -EINVAL;
goto unlock;
pthread_mutex_unlock(&dev->bo_table_mutex);
return -EINVAL;
}
if (bo) {
@ -320,37 +328,53 @@ drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
return 0;
}
bo = calloc(1, sizeof(struct amdgpu_bo));
if (!bo) {
pthread_mutex_unlock(&dev->bo_table_mutex);
if (type == amdgpu_bo_handle_type_dma_buf_fd) {
amdgpu_close_kms_handle(dev, shared_handle);
}
return -ENOMEM;
}
/* Open the handle. */
switch (type) {
case amdgpu_bo_handle_type_gem_flink_name:
open_arg.name = shared_handle;
r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
if (r)
goto unlock;
flink_name = shared_handle;
handle = open_arg.handle;
alloc_size = open_arg.size;
if (dev->flink_fd != dev->fd) {
r = drmPrimeHandleToFD(dev->flink_fd, handle,
DRM_CLOEXEC, &dma_fd);
if (r)
goto free_bo_handle;
r = drmPrimeFDToHandle(dev->fd, dma_fd, &handle);
close(dma_fd);
if (r)
goto free_bo_handle;
r = drmCloseBufferHandle(dev->flink_fd,
open_arg.handle);
if (r)
goto free_bo_handle;
if (r) {
free(bo);
pthread_mutex_unlock(&dev->bo_table_mutex);
return r;
}
open_arg.handle = 0;
bo->handle = open_arg.handle;
if (dev->flink_fd != dev->fd) {
r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
if (r) {
free(bo);
pthread_mutex_unlock(&dev->bo_table_mutex);
return r;
}
r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
close(dma_fd);
if (r) {
free(bo);
pthread_mutex_unlock(&dev->bo_table_mutex);
return r;
}
}
bo->flink_name = shared_handle;
bo->alloc_size = open_arg.size;
util_hash_table_set(dev->bo_flink_names,
(void*)(uintptr_t)bo->flink_name, bo);
break;
case amdgpu_bo_handle_type_dma_buf_fd:
handle = shared_handle;
alloc_size = dma_buf_size;
bo->handle = shared_handle;
bo->alloc_size = dma_buf_size;
break;
case amdgpu_bo_handle_type_kms:
@ -359,38 +383,19 @@ drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
}
/* Initialize it. */
r = amdgpu_bo_create(dev, alloc_size, handle, &bo);
if (r)
goto free_bo_handle;
atomic_set(&bo->refcount, 1);
bo->dev = dev;
pthread_mutex_init(&bo->cpu_access_mutex, NULL);
if (flink_name) {
bo->flink_name = flink_name;
r = handle_table_insert(&dev->bo_flink_names, flink_name,
bo);
if (r)
goto free_bo_handle;
}
util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
pthread_mutex_unlock(&dev->bo_table_mutex);
output->buf_handle = bo;
output->alloc_size = bo->alloc_size;
pthread_mutex_unlock(&dev->bo_table_mutex);
return 0;
free_bo_handle:
if (flink_name && open_arg.handle)
drmCloseBufferHandle(dev->flink_fd, open_arg.handle);
if (bo)
amdgpu_bo_free(bo);
else
drmCloseBufferHandle(dev->fd, handle);
unlock:
pthread_mutex_unlock(&dev->bo_table_mutex);
return r;
}
drm_public int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
{
struct amdgpu_device *dev;
struct amdgpu_bo *bo = buf_handle;
@ -401,11 +406,13 @@ drm_public int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
if (update_references(&bo->refcount, NULL)) {
/* Remove the buffer from the hash tables. */
handle_table_remove(&dev->bo_handles, bo->handle);
util_hash_table_remove(dev->bo_handles,
(void*)(uintptr_t)bo->handle);
if (bo->flink_name)
handle_table_remove(&dev->bo_flink_names,
bo->flink_name);
if (bo->flink_name) {
util_hash_table_remove(dev->bo_flink_names,
(void*)(uintptr_t)bo->flink_name);
}
/* Release CPU access. */
if (bo->cpu_map_count > 0) {
@ -413,22 +420,16 @@ drm_public int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
amdgpu_bo_cpu_unmap(bo);
}
drmCloseBufferHandle(dev->fd, bo->handle);
amdgpu_close_kms_handle(dev, bo->handle);
pthread_mutex_destroy(&bo->cpu_access_mutex);
free(bo);
}
pthread_mutex_unlock(&dev->bo_table_mutex);
return 0;
}
drm_public void amdgpu_bo_inc_ref(amdgpu_bo_handle bo)
{
atomic_inc(&bo->refcount);
}
drm_public int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
{
union drm_amdgpu_gem_mmap args;
void *ptr;
@ -476,7 +477,7 @@ drm_public int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
return 0;
}
drm_public int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
{
int r;
@ -502,7 +503,7 @@ drm_public int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
return r;
}
drm_public int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
struct amdgpu_buffer_size_alignments *info)
{
info->size_local = dev->dev_info.pte_fragment_size;
@ -510,8 +511,8 @@ drm_public int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
return 0;
}
drm_public int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
uint64_t timeout_ns,
int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
uint64_t timeout_ns,
bool *busy)
{
union drm_amdgpu_gem_wait_idle args;
@ -533,54 +534,13 @@ drm_public int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
}
}
drm_public int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
void *cpu,
uint64_t size,
amdgpu_bo_handle *buf_handle,
uint64_t *offset_in_bo)
{
struct amdgpu_bo *bo = NULL;
uint32_t i;
int r = 0;
if (cpu == NULL || size == 0)
return -EINVAL;
/*
* Workaround for a buggy application which tries to import previously
* exposed CPU pointers. If we find a real world use case we should
* improve that by asking the kernel for the right handle.
*/
pthread_mutex_lock(&dev->bo_table_mutex);
for (i = 0; i < dev->bo_handles.max_key; i++) {
bo = handle_table_lookup(&dev->bo_handles, i);
if (!bo || !bo->cpu_ptr || size > bo->alloc_size)
continue;
if (cpu >= bo->cpu_ptr &&
cpu < (void*)((uintptr_t)bo->cpu_ptr + (size_t)bo->alloc_size))
break;
}
if (i < dev->bo_handles.max_key) {
atomic_inc(&bo->refcount);
*buf_handle = bo;
*offset_in_bo = (uintptr_t)cpu - (uintptr_t)bo->cpu_ptr;
} else {
*buf_handle = NULL;
*offset_in_bo = 0;
r = -ENXIO;
}
pthread_mutex_unlock(&dev->bo_table_mutex);
return r;
}
drm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
void *cpu,
uint64_t size,
amdgpu_bo_handle *buf_handle)
int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
void *cpu,
uint64_t size,
amdgpu_bo_handle *buf_handle)
{
int r;
struct amdgpu_bo *bo;
struct drm_amdgpu_gem_userptr args;
args.addr = (uintptr_t)cpu;
@ -590,65 +550,34 @@ drm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
&args, sizeof(args));
if (r)
goto out;
return r;
pthread_mutex_lock(&dev->bo_table_mutex);
r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
pthread_mutex_unlock(&dev->bo_table_mutex);
if (r) {
drmCloseBufferHandle(dev->fd, args.handle);
}
bo = calloc(1, sizeof(struct amdgpu_bo));
if (!bo)
return -ENOMEM;
atomic_set(&bo->refcount, 1);
bo->dev = dev;
bo->alloc_size = size;
bo->handle = args.handle;
*buf_handle = bo;
out:
return r;
}
drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
uint32_t number_of_buffers,
struct drm_amdgpu_bo_list_entry *buffers,
uint32_t *result)
{
union drm_amdgpu_bo_list args;
int r;
memset(&args, 0, sizeof(args));
args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
args.in.bo_number = number_of_buffers;
args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
&args, sizeof(args));
if (!r)
*result = args.out.list_handle;
return r;
}
drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
uint32_t bo_list)
{
union drm_amdgpu_bo_list args;
memset(&args, 0, sizeof(args));
args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
args.in.list_handle = bo_list;
return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
&args, sizeof(args));
}
drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
uint32_t number_of_resources,
amdgpu_bo_handle *resources,
uint8_t *resource_prios,
amdgpu_bo_list_handle *result)
int amdgpu_bo_list_create(amdgpu_device_handle dev,
uint32_t number_of_resources,
amdgpu_bo_handle *resources,
uint8_t *resource_prios,
amdgpu_bo_list_handle *result)
{
struct drm_amdgpu_bo_list_entry *list;
union drm_amdgpu_bo_list args;
unsigned i;
int r;
if (!number_of_resources || !resources)
if (!number_of_resources)
return -EINVAL;
/* overflow check for multiplication */
@ -692,7 +621,7 @@ drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
return 0;
}
drm_public int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
{
union drm_amdgpu_bo_list args;
int r;
@ -710,10 +639,10 @@ drm_public int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
return r;
}
drm_public int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
uint32_t number_of_resources,
amdgpu_bo_handle *resources,
uint8_t *resource_prios)
int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
uint32_t number_of_resources,
amdgpu_bo_handle *resources,
uint8_t *resource_prios)
{
struct drm_amdgpu_bo_list_entry *list;
union drm_amdgpu_bo_list args;
@ -751,12 +680,12 @@ drm_public int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
return r;
}
drm_public int amdgpu_bo_va_op(amdgpu_bo_handle bo,
uint64_t offset,
uint64_t size,
uint64_t addr,
uint64_t flags,
uint32_t ops)
int amdgpu_bo_va_op(amdgpu_bo_handle bo,
uint64_t offset,
uint64_t size,
uint64_t addr,
uint64_t flags,
uint32_t ops)
{
amdgpu_device_handle dev = bo->dev;
@ -768,13 +697,13 @@ drm_public int amdgpu_bo_va_op(amdgpu_bo_handle bo,
AMDGPU_VM_PAGE_EXECUTABLE, ops);
}
drm_public int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
amdgpu_bo_handle bo,
uint64_t offset,
uint64_t size,
uint64_t addr,
uint64_t flags,
uint32_t ops)
int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
amdgpu_bo_handle bo,
uint64_t offset,
uint64_t size,
uint64_t addr,
uint64_t flags,
uint32_t ops)
{
struct drm_amdgpu_gem_va va;
int r;
@ -795,39 +724,3 @@ drm_public int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
return r;
}
drm_public int amdgpu_bo_va_op_raw2(amdgpu_device_handle dev,
amdgpu_bo_handle bo,
uint64_t offset,
uint64_t size,
uint64_t addr,
uint64_t flags,
uint32_t ops,
uint32_t vm_timeline_syncobj_out,
uint64_t vm_timeline_point,
uint64_t input_fence_syncobj_handles,
uint32_t num_syncobj_handles)
{
struct drm_amdgpu_gem_va va;
int r;
if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
return -EINVAL;
memset(&va, 0, sizeof(va));
va.handle = bo ? bo->handle : 0;
va.operation = ops;
va.flags = flags;
va.va_address = addr;
va.offset_in_bo = offset;
va.map_size = size;
va.vm_timeline_syncobj_out = vm_timeline_syncobj_out;
va.vm_timeline_point = vm_timeline_point;
va.input_fence_syncobj_handles = input_fence_syncobj_handles;
va.num_syncobj_handles = num_syncobj_handles;
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
return r;
}

View file

@ -28,7 +28,7 @@
#include <pthread.h>
#include <sched.h>
#include <sys/ioctl.h>
#if HAVE_ALLOCA_H
#ifdef HAVE_ALLOCA_H
# include <alloca.h>
#endif
@ -48,30 +48,17 @@ static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem);
*
* \return 0 on success otherwise POSIX Error code
*/
drm_public int amdgpu_cs_ctx_create2(amdgpu_device_handle dev,
uint32_t priority,
amdgpu_context_handle *context)
int amdgpu_cs_ctx_create2(amdgpu_device_handle dev, uint32_t priority,
amdgpu_context_handle *context)
{
struct amdgpu_context *gpu_context;
union drm_amdgpu_ctx args;
int i, j, k;
int r;
char *override_priority;
if (!dev || !context)
return -EINVAL;
override_priority = getenv("AMD_PRIORITY");
if (override_priority) {
/* The priority is a signed integer. The variable type is
* wrong. If parsing fails, priority is unchanged.
*/
if (sscanf(override_priority, "%i", &priority) == 1) {
printf("amdgpu: context priority changed to %i\n",
priority);
}
}
gpu_context = calloc(1, sizeof(struct amdgpu_context));
if (!gpu_context)
return -ENOMEM;
@ -106,8 +93,8 @@ error:
return r;
}
drm_public int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
amdgpu_context_handle *context)
int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
amdgpu_context_handle *context)
{
return amdgpu_cs_ctx_create2(dev, AMDGPU_CTX_PRIORITY_NORMAL, context);
}
@ -120,7 +107,7 @@ drm_public int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
*
* \return 0 on success otherwise POSIX Error code
*/
drm_public int amdgpu_cs_ctx_free(amdgpu_context_handle context)
int amdgpu_cs_ctx_free(amdgpu_context_handle context)
{
union drm_amdgpu_ctx args;
int i, j, k;
@ -140,8 +127,8 @@ drm_public int amdgpu_cs_ctx_free(amdgpu_context_handle context)
for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++) {
for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++) {
amdgpu_semaphore_handle sem, tmp;
LIST_FOR_EACH_ENTRY_SAFE(sem, tmp, &context->sem_list[i][j][k], list) {
amdgpu_semaphore_handle sem;
LIST_FOR_EACH_ENTRY(sem, &context->sem_list[i][j][k], list) {
list_del(&sem->list);
amdgpu_cs_reset_sem(sem);
amdgpu_cs_unreference_sem(sem);
@ -154,55 +141,8 @@ drm_public int amdgpu_cs_ctx_free(amdgpu_context_handle context)
return r;
}
drm_public int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev,
amdgpu_context_handle context,
int master_fd,
unsigned priority)
{
union drm_amdgpu_sched args;
int r;
if (!dev || !context || master_fd < 0)
return -EINVAL;
memset(&args, 0, sizeof(args));
args.in.op = AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE;
args.in.fd = dev->fd;
args.in.priority = priority;
args.in.ctx_id = context->id;
r = drmCommandWrite(master_fd, DRM_AMDGPU_SCHED, &args, sizeof(args));
if (r)
return r;
return 0;
}
drm_public int amdgpu_cs_ctx_stable_pstate(amdgpu_context_handle context,
uint32_t op,
uint32_t flags,
uint32_t *out_flags)
{
union drm_amdgpu_ctx args;
int r;
if (!context)
return -EINVAL;
memset(&args, 0, sizeof(args));
args.in.op = op;
args.in.ctx_id = context->id;
args.in.flags = flags;
r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
&args, sizeof(args));
if (!r && out_flags)
*out_flags = args.out.pstate.flags;
return r;
}
drm_public int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
uint32_t *state, uint32_t *hangs)
int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
uint32_t *state, uint32_t *hangs)
{
union drm_amdgpu_ctx args;
int r;
@ -222,25 +162,6 @@ drm_public int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
return r;
}
drm_public int amdgpu_cs_query_reset_state2(amdgpu_context_handle context,
uint64_t *flags)
{
union drm_amdgpu_ctx args;
int r;
if (!context)
return -EINVAL;
memset(&args, 0, sizeof(args));
args.in.op = AMDGPU_CTX_OP_QUERY_STATE2;
args.in.ctx_id = context->id;
r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
&args, sizeof(args));
if (!r)
*flags = args.out.state.flags;
return r;
}
/**
* Submit command to kernel DRM
* \param dev - \c [in] Device handle
@ -254,15 +175,15 @@ drm_public int amdgpu_cs_query_reset_state2(amdgpu_context_handle context,
static int amdgpu_cs_submit_one(amdgpu_context_handle context,
struct amdgpu_cs_request *ibs_request)
{
union drm_amdgpu_cs cs;
uint64_t *chunk_array;
struct drm_amdgpu_cs_chunk *chunks;
struct drm_amdgpu_cs_chunk_data *chunk_data;
struct drm_amdgpu_cs_chunk_dep *dependencies = NULL;
struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
amdgpu_device_handle dev = context->dev;
struct list_head *sem_list;
amdgpu_semaphore_handle sem, tmp;
uint32_t i, size, num_chunks, bo_list_handle = 0, sem_count = 0;
uint64_t seq_no;
uint32_t i, size, sem_count = 0;
bool user_fence;
int r = 0;
@ -278,18 +199,23 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
size = ibs_request->number_of_ibs + (user_fence ? 2 : 1) + 1;
chunk_array = alloca(sizeof(uint64_t) * size);
chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
size = ibs_request->number_of_ibs + (user_fence ? 1 : 0);
chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
memset(&cs, 0, sizeof(cs));
cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
cs.in.ctx_id = context->id;
if (ibs_request->resources)
bo_list_handle = ibs_request->resources->handle;
num_chunks = ibs_request->number_of_ibs;
cs.in.bo_list_handle = ibs_request->resources->handle;
cs.in.num_chunks = ibs_request->number_of_ibs;
/* IB chunks */
for (i = 0; i < ibs_request->number_of_ibs; i++) {
struct amdgpu_cs_ib_info *ib;
chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
@ -308,9 +234,10 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
pthread_mutex_lock(&context->sequence_mutex);
if (user_fence) {
i = num_chunks++;
i = cs.in.num_chunks++;
/* fence chunk */
chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
@ -323,7 +250,7 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
}
if (ibs_request->number_of_dependencies) {
dependencies = alloca(sizeof(struct drm_amdgpu_cs_chunk_dep) *
dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) *
ibs_request->number_of_dependencies);
if (!dependencies) {
r = -ENOMEM;
@ -340,9 +267,10 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
dep->handle = info->fence;
}
i = num_chunks++;
i = cs.in.num_chunks++;
/* dependencies chunk */
chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4
* ibs_request->number_of_dependencies;
@ -353,7 +281,7 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
LIST_FOR_EACH_ENTRY(sem, sem_list, list)
sem_count++;
if (sem_count) {
sem_dependencies = alloca(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_count);
sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_count);
if (!sem_dependencies) {
r = -ENOMEM;
goto error_unlock;
@ -372,30 +300,33 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
amdgpu_cs_reset_sem(sem);
amdgpu_cs_unreference_sem(sem);
}
i = num_chunks++;
i = cs.in.num_chunks++;
/* dependencies chunk */
chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
}
r = amdgpu_cs_submit_raw2(dev, context, bo_list_handle, num_chunks,
chunks, &seq_no);
r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CS,
&cs, sizeof(cs));
if (r)
goto error_unlock;
ibs_request->seq_no = seq_no;
ibs_request->seq_no = cs.out.handle;
context->last_seq[ibs_request->ip_type][ibs_request->ip_instance][ibs_request->ring] = ibs_request->seq_no;
error_unlock:
pthread_mutex_unlock(&context->sequence_mutex);
free(dependencies);
free(sem_dependencies);
return r;
}
drm_public int amdgpu_cs_submit(amdgpu_context_handle context,
uint64_t flags,
struct amdgpu_cs_request *ibs_request,
uint32_t number_of_requests)
int amdgpu_cs_submit(amdgpu_context_handle context,
uint64_t flags,
struct amdgpu_cs_request *ibs_request,
uint32_t number_of_requests)
{
uint32_t i;
int r;
@ -476,10 +407,10 @@ static int amdgpu_ioctl_wait_cs(amdgpu_context_handle context,
return 0;
}
drm_public int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
uint64_t timeout_ns,
uint64_t flags,
uint32_t *expired)
int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
uint64_t timeout_ns,
uint64_t flags,
uint32_t *expired)
{
bool busy = true;
int r;
@ -547,12 +478,12 @@ static int amdgpu_ioctl_wait_fences(struct amdgpu_cs_fence *fences,
return 0;
}
drm_public int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
uint32_t fence_count,
bool wait_all,
uint64_t timeout_ns,
uint32_t *status,
uint32_t *first)
int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
uint32_t fence_count,
bool wait_all,
uint64_t timeout_ns,
uint32_t *status,
uint32_t *first)
{
uint32_t i;
@ -575,7 +506,7 @@ drm_public int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
timeout_ns, status, first);
}
drm_public int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
{
struct amdgpu_semaphore *gpu_semaphore;
@ -592,41 +523,34 @@ drm_public int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
return 0;
}
drm_public int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
uint32_t ip_type,
int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
uint32_t ip_type,
uint32_t ip_instance,
uint32_t ring,
amdgpu_semaphore_handle sem)
{
int ret;
if (!ctx || !sem)
return -EINVAL;
if (ip_type >= AMDGPU_HW_IP_NUM)
return -EINVAL;
if (ring >= AMDGPU_CS_MAX_RINGS)
return -EINVAL;
pthread_mutex_lock(&ctx->sequence_mutex);
/* sem has been signaled */
if (sem->signal_fence.context) {
ret = -EINVAL;
goto unlock;
}
if (sem->signal_fence.context)
return -EINVAL;
pthread_mutex_lock(&ctx->sequence_mutex);
sem->signal_fence.context = ctx;
sem->signal_fence.ip_type = ip_type;
sem->signal_fence.ip_instance = ip_instance;
sem->signal_fence.ring = ring;
sem->signal_fence.fence = ctx->last_seq[ip_type][ip_instance][ring];
update_references(NULL, &sem->refcount);
ret = 0;
unlock:
pthread_mutex_unlock(&ctx->sequence_mutex);
return ret;
return 0;
}
drm_public int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
uint32_t ip_type,
int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
uint32_t ip_type,
uint32_t ip_instance,
uint32_t ring,
amdgpu_semaphore_handle sem)
@ -671,14 +595,14 @@ static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
return 0;
}
drm_public int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
{
return amdgpu_cs_unreference_sem(sem);
}
drm_public int amdgpu_cs_create_syncobj2(amdgpu_device_handle dev,
uint32_t flags,
uint32_t *handle)
int amdgpu_cs_create_syncobj2(amdgpu_device_handle dev,
uint32_t flags,
uint32_t *handle)
{
if (NULL == dev)
return -EINVAL;
@ -686,8 +610,8 @@ drm_public int amdgpu_cs_create_syncobj2(amdgpu_device_handle dev,
return drmSyncobjCreate(dev->fd, flags, handle);
}
drm_public int amdgpu_cs_create_syncobj(amdgpu_device_handle dev,
uint32_t *handle)
int amdgpu_cs_create_syncobj(amdgpu_device_handle dev,
uint32_t *handle)
{
if (NULL == dev)
return -EINVAL;
@ -695,8 +619,8 @@ drm_public int amdgpu_cs_create_syncobj(amdgpu_device_handle dev,
return drmSyncobjCreate(dev->fd, 0, handle);
}
drm_public int amdgpu_cs_destroy_syncobj(amdgpu_device_handle dev,
uint32_t handle)
int amdgpu_cs_destroy_syncobj(amdgpu_device_handle dev,
uint32_t handle)
{
if (NULL == dev)
return -EINVAL;
@ -704,9 +628,8 @@ drm_public int amdgpu_cs_destroy_syncobj(amdgpu_device_handle dev,
return drmSyncobjDestroy(dev->fd, handle);
}
drm_public int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev,
const uint32_t *syncobjs,
uint32_t syncobj_count)
int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev,
const uint32_t *syncobjs, uint32_t syncobj_count)
{
if (NULL == dev)
return -EINVAL;
@ -714,9 +637,8 @@ drm_public int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev,
return drmSyncobjReset(dev->fd, syncobjs, syncobj_count);
}
drm_public int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
const uint32_t *syncobjs,
uint32_t syncobj_count)
int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
const uint32_t *syncobjs, uint32_t syncobj_count)
{
if (NULL == dev)
return -EINVAL;
@ -724,22 +646,10 @@ drm_public int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
return drmSyncobjSignal(dev->fd, syncobjs, syncobj_count);
}
drm_public int amdgpu_cs_syncobj_timeline_signal(amdgpu_device_handle dev,
const uint32_t *syncobjs,
uint64_t *points,
uint32_t syncobj_count)
{
if (NULL == dev)
return -EINVAL;
return drmSyncobjTimelineSignal(dev->fd, syncobjs,
points, syncobj_count);
}
drm_public int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
uint32_t *handles, unsigned num_handles,
int64_t timeout_nsec, unsigned flags,
uint32_t *first_signaled)
int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
uint32_t *handles, unsigned num_handles,
int64_t timeout_nsec, unsigned flags,
uint32_t *first_signaled)
{
if (NULL == dev)
return -EINVAL;
@ -748,42 +658,9 @@ drm_public int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
flags, first_signaled);
}
drm_public int amdgpu_cs_syncobj_timeline_wait(amdgpu_device_handle dev,
uint32_t *handles, uint64_t *points,
unsigned num_handles,
int64_t timeout_nsec, unsigned flags,
uint32_t *first_signaled)
{
if (NULL == dev)
return -EINVAL;
return drmSyncobjTimelineWait(dev->fd, handles, points, num_handles,
timeout_nsec, flags, first_signaled);
}
drm_public int amdgpu_cs_syncobj_query(amdgpu_device_handle dev,
uint32_t *handles, uint64_t *points,
unsigned num_handles)
{
if (NULL == dev)
return -EINVAL;
return drmSyncobjQuery(dev->fd, handles, points, num_handles);
}
drm_public int amdgpu_cs_syncobj_query2(amdgpu_device_handle dev,
uint32_t *handles, uint64_t *points,
unsigned num_handles, uint32_t flags)
{
if (!dev)
return -EINVAL;
return drmSyncobjQuery2(dev->fd, handles, points, num_handles, flags);
}
drm_public int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
uint32_t handle,
int *shared_fd)
int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
uint32_t handle,
int *shared_fd)
{
if (NULL == dev)
return -EINVAL;
@ -791,9 +668,9 @@ drm_public int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
return drmSyncobjHandleToFD(dev->fd, handle, shared_fd);
}
drm_public int amdgpu_cs_import_syncobj(amdgpu_device_handle dev,
int shared_fd,
uint32_t *handle)
int amdgpu_cs_import_syncobj(amdgpu_device_handle dev,
int shared_fd,
uint32_t *handle)
{
if (NULL == dev)
return -EINVAL;
@ -801,9 +678,9 @@ drm_public int amdgpu_cs_import_syncobj(amdgpu_device_handle dev,
return drmSyncobjFDToHandle(dev->fd, shared_fd, handle);
}
drm_public int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev,
uint32_t syncobj,
int *sync_file_fd)
int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev,
uint32_t syncobj,
int *sync_file_fd)
{
if (NULL == dev)
return -EINVAL;
@ -811,9 +688,9 @@ drm_public int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev,
return drmSyncobjExportSyncFile(dev->fd, syncobj, sync_file_fd);
}
drm_public int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
uint32_t syncobj,
int sync_file_fd)
int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
uint32_t syncobj,
int sync_file_fd)
{
if (NULL == dev)
return -EINVAL;
@ -821,92 +698,19 @@ drm_public int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
}
drm_public int amdgpu_cs_syncobj_export_sync_file2(amdgpu_device_handle dev,
uint32_t syncobj,
uint64_t point,
uint32_t flags,
int *sync_file_fd)
int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
amdgpu_context_handle context,
amdgpu_bo_list_handle bo_list_handle,
int num_chunks,
struct drm_amdgpu_cs_chunk *chunks,
uint64_t *seq_no)
{
uint32_t binary_handle;
int ret;
if (NULL == dev)
return -EINVAL;
if (!point)
return drmSyncobjExportSyncFile(dev->fd, syncobj, sync_file_fd);
ret = drmSyncobjCreate(dev->fd, 0, &binary_handle);
if (ret)
return ret;
ret = drmSyncobjTransfer(dev->fd, binary_handle, 0,
syncobj, point, flags);
if (ret)
goto out;
ret = drmSyncobjExportSyncFile(dev->fd, binary_handle, sync_file_fd);
out:
drmSyncobjDestroy(dev->fd, binary_handle);
return ret;
}
drm_public int amdgpu_cs_syncobj_import_sync_file2(amdgpu_device_handle dev,
uint32_t syncobj,
uint64_t point,
int sync_file_fd)
{
uint32_t binary_handle;
int ret;
if (NULL == dev)
return -EINVAL;
if (!point)
return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
ret = drmSyncobjCreate(dev->fd, 0, &binary_handle);
if (ret)
return ret;
ret = drmSyncobjImportSyncFile(dev->fd, binary_handle, sync_file_fd);
if (ret)
goto out;
ret = drmSyncobjTransfer(dev->fd, syncobj, point,
binary_handle, 0, 0);
out:
drmSyncobjDestroy(dev->fd, binary_handle);
return ret;
}
drm_public int amdgpu_cs_syncobj_transfer(amdgpu_device_handle dev,
uint32_t dst_handle,
uint64_t dst_point,
uint32_t src_handle,
uint64_t src_point,
uint32_t flags)
{
if (NULL == dev)
return -EINVAL;
return drmSyncobjTransfer(dev->fd,
dst_handle, dst_point,
src_handle, src_point,
flags);
}
drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
amdgpu_context_handle context,
amdgpu_bo_list_handle bo_list_handle,
int num_chunks,
struct drm_amdgpu_cs_chunk *chunks,
uint64_t *seq_no)
{
union drm_amdgpu_cs cs;
union drm_amdgpu_cs cs = {0};
uint64_t *chunk_array;
int i, r;
if (num_chunks == 0)
return -EINVAL;
memset(&cs, 0, sizeof(cs));
chunk_array = alloca(sizeof(uint64_t) * num_chunks);
for (i = 0; i < num_chunks; i++)
chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
@ -924,41 +728,15 @@ drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
return 0;
}
drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
amdgpu_context_handle context,
uint32_t bo_list_handle,
int num_chunks,
struct drm_amdgpu_cs_chunk *chunks,
uint64_t *seq_no)
{
union drm_amdgpu_cs cs;
uint64_t *chunk_array;
int i, r;
memset(&cs, 0, sizeof(cs));
chunk_array = alloca(sizeof(uint64_t) * num_chunks);
for (i = 0; i < num_chunks; i++)
chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
cs.in.ctx_id = context->id;
cs.in.bo_list_handle = bo_list_handle;
cs.in.num_chunks = num_chunks;
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
&cs, sizeof(cs));
if (!r && seq_no)
*seq_no = cs.out.handle;
return r;
}
drm_public void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
struct drm_amdgpu_cs_chunk_data *data)
{
data->fence_data.handle = fence_info->handle->handle;
data->fence_data.offset = fence_info->offset * sizeof(uint64_t);
}
drm_public void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
struct drm_amdgpu_cs_chunk_dep *dep)
void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
struct drm_amdgpu_cs_chunk_dep *dep)
{
dep->ip_type = fence->ip_type;
dep->ip_instance = fence->ip_instance;
@ -967,15 +745,14 @@ drm_public void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
dep->handle = fence->fence;
}
drm_public int amdgpu_cs_fence_to_handle(amdgpu_device_handle dev,
struct amdgpu_cs_fence *fence,
uint32_t what,
uint32_t *out_handle)
int amdgpu_cs_fence_to_handle(amdgpu_device_handle dev,
struct amdgpu_cs_fence *fence,
uint32_t what,
uint32_t *out_handle)
{
union drm_amdgpu_fence_to_handle fth;
union drm_amdgpu_fence_to_handle fth = {0};
int r;
memset(&fth, 0, sizeof(fth));
fth.in.fence.ctx_id = fence->context->id;
fth.in.fence.ip_type = fence->ip_type;
fth.in.fence.ip_instance = fence->ip_instance;

View file

@ -39,15 +39,47 @@
#include "xf86drm.h"
#include "amdgpu_drm.h"
#include "amdgpu_internal.h"
#include "util_hash_table.h"
#include "util_math.h"
#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
#define UINT_TO_PTR(x) ((void *)((intptr_t)(x)))
static pthread_mutex_t dev_mutex = PTHREAD_MUTEX_INITIALIZER;
static amdgpu_device_handle dev_list;
static pthread_mutex_t fd_mutex = PTHREAD_MUTEX_INITIALIZER;
static struct util_hash_table *fd_tab;
static int fd_compare(int fd1, int fd2)
static unsigned handle_hash(void *key)
{
return PTR_TO_UINT(key);
}
static int handle_compare(void *key1, void *key2)
{
return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
}
static unsigned fd_hash(void *key)
{
int fd = PTR_TO_UINT(key);
char *name = drmGetPrimaryDeviceNameFromFd(fd);
unsigned result = 0;
char *c;
if (name == NULL)
return 0;
for (c = name; *c; ++c)
result += *c;
free(name);
return result;
}
static int fd_compare(void *key1, void *key2)
{
int fd1 = PTR_TO_UINT(key1);
int fd2 = PTR_TO_UINT(key2);
char *name1 = drmGetPrimaryDeviceNameFromFd(fd1);
char *name2 = drmGetPrimaryDeviceNameFromFd(fd2);
int result;
@ -95,28 +127,23 @@ static int amdgpu_get_auth(int fd, int *auth)
static void amdgpu_device_free_internal(amdgpu_device_handle dev)
{
/* Remove dev from dev_list, if it was added there. */
if (dev == dev_list) {
dev_list = dev->next;
} else {
for (amdgpu_device_handle node = dev_list; node; node = node->next) {
if (node->next == dev) {
node->next = dev->next;
break;
}
}
pthread_mutex_lock(&fd_mutex);
util_hash_table_remove(fd_tab, UINT_TO_PTR(dev->fd));
if (util_hash_table_count(fd_tab) == 0) {
util_hash_table_destroy(fd_tab);
fd_tab = NULL;
}
close(dev->fd);
if ((dev->flink_fd >= 0) && (dev->fd != dev->flink_fd))
close(dev->flink_fd);
pthread_mutex_unlock(&fd_mutex);
amdgpu_vamgr_deinit(&dev->va_mgr.vamgr_32);
amdgpu_vamgr_deinit(&dev->va_mgr.vamgr_low);
amdgpu_vamgr_deinit(&dev->va_mgr.vamgr_high_32);
amdgpu_vamgr_deinit(&dev->va_mgr.vamgr_high);
handle_table_fini(&dev->bo_handles);
handle_table_fini(&dev->bo_flink_names);
amdgpu_vamgr_deinit(&dev->vamgr_32);
amdgpu_vamgr_deinit(&dev->vamgr);
amdgpu_vamgr_deinit(&dev->vamgr_high_32);
amdgpu_vamgr_deinit(&dev->vamgr_high);
util_hash_table_destroy(dev->bo_flink_names);
util_hash_table_destroy(dev->bo_handles);
pthread_mutex_destroy(&dev->bo_table_mutex);
free(dev->marketing_name);
free(dev);
@ -137,49 +164,45 @@ static void amdgpu_device_free_internal(amdgpu_device_handle dev)
* // incremented. dst is freed if its reference counter is 0.
*/
static void amdgpu_device_reference(struct amdgpu_device **dst,
struct amdgpu_device *src)
struct amdgpu_device *src)
{
if (update_references(&(*dst)->refcount, &src->refcount))
amdgpu_device_free_internal(*dst);
*dst = src;
}
static int _amdgpu_device_initialize(int fd,
uint32_t *major_version,
uint32_t *minor_version,
amdgpu_device_handle *device_handle,
bool deduplicate_device)
int amdgpu_device_initialize(int fd,
uint32_t *major_version,
uint32_t *minor_version,
amdgpu_device_handle *device_handle)
{
struct amdgpu_device *dev = NULL;
struct amdgpu_device *dev;
drmVersionPtr version;
int r;
int flag_auth = 0;
int flag_authexist=0;
uint32_t accel_working = 0;
uint64_t start, max;
*device_handle = NULL;
pthread_mutex_lock(&dev_mutex);
pthread_mutex_lock(&fd_mutex);
if (!fd_tab)
fd_tab = util_hash_table_create(fd_hash, fd_compare);
r = amdgpu_get_auth(fd, &flag_auth);
if (r) {
fprintf(stderr, "%s: amdgpu_get_auth (1) failed (%i)\n",
__func__, r);
pthread_mutex_unlock(&dev_mutex);
pthread_mutex_unlock(&fd_mutex);
return r;
}
if (deduplicate_device)
for (dev = dev_list; dev; dev = dev->next)
if (fd_compare(dev->fd, fd) == 0)
break;
dev = util_hash_table_get(fd_tab, UINT_TO_PTR(fd));
if (dev) {
r = amdgpu_get_auth(dev->fd, &flag_authexist);
if (r) {
fprintf(stderr, "%s: amdgpu_get_auth (2) failed (%i)\n",
__func__, r);
pthread_mutex_unlock(&dev_mutex);
pthread_mutex_unlock(&fd_mutex);
return r;
}
if ((flag_auth) && (!flag_authexist)) {
@ -188,14 +211,14 @@ static int _amdgpu_device_initialize(int fd,
*major_version = dev->major_version;
*minor_version = dev->minor_version;
amdgpu_device_reference(device_handle, dev);
pthread_mutex_unlock(&dev_mutex);
pthread_mutex_unlock(&fd_mutex);
return 0;
}
dev = calloc(1, sizeof(struct amdgpu_device));
if (!dev) {
fprintf(stderr, "%s: calloc failed\n", __func__);
pthread_mutex_unlock(&dev_mutex);
pthread_mutex_unlock(&fd_mutex);
return -ENOMEM;
}
@ -223,6 +246,9 @@ static int _amdgpu_device_initialize(int fd,
dev->minor_version = version->version_minor;
drmFreeVersion(version);
dev->bo_flink_names = util_hash_table_create(handle_hash,
handle_compare);
dev->bo_handles = util_hash_table_create(handle_hash, handle_compare);
pthread_mutex_init(&dev->bo_table_mutex, NULL);
/* Check if acceleration is working. */
@ -244,23 +270,35 @@ static int _amdgpu_device_initialize(int fd,
goto cleanup;
}
amdgpu_va_manager_init(&dev->va_mgr,
dev->dev_info.virtual_address_offset,
dev->dev_info.virtual_address_max,
dev->dev_info.high_va_offset,
dev->dev_info.high_va_max,
dev->dev_info.virtual_address_alignment);
start = dev->dev_info.virtual_address_offset;
max = MIN2(dev->dev_info.virtual_address_max, 0x100000000ULL);
amdgpu_vamgr_init(&dev->vamgr_32, start, max,
dev->dev_info.virtual_address_alignment);
start = max;
max = MAX2(dev->dev_info.virtual_address_max, 0x100000000ULL);
amdgpu_vamgr_init(&dev->vamgr, start, max,
dev->dev_info.virtual_address_alignment);
start = dev->dev_info.high_va_offset;
max = MIN2(dev->dev_info.high_va_max, (start & ~0xffffffffULL) +
0x100000000ULL);
amdgpu_vamgr_init(&dev->vamgr_high_32, start, max,
dev->dev_info.virtual_address_alignment);
start = max;
max = MAX2(dev->dev_info.high_va_max, (start & ~0xffffffffULL) +
0x100000000ULL);
amdgpu_vamgr_init(&dev->vamgr_high, start, max,
dev->dev_info.virtual_address_alignment);
amdgpu_parse_asic_ids(dev);
*major_version = dev->major_version;
*minor_version = dev->minor_version;
*device_handle = dev;
if (deduplicate_device) {
dev->next = dev_list;
dev_list = dev;
}
pthread_mutex_unlock(&dev_mutex);
util_hash_table_set(fd_tab, UINT_TO_PTR(dev->fd), dev);
pthread_mutex_unlock(&fd_mutex);
return 0;
@ -268,59 +306,32 @@ cleanup:
if (dev->fd >= 0)
close(dev->fd);
free(dev);
pthread_mutex_unlock(&dev_mutex);
pthread_mutex_unlock(&fd_mutex);
return r;
}
drm_public int amdgpu_device_initialize(int fd,
uint32_t *major_version,
uint32_t *minor_version,
amdgpu_device_handle *device_handle)
int amdgpu_device_deinitialize(amdgpu_device_handle dev)
{
return _amdgpu_device_initialize(fd, major_version, minor_version, device_handle, true);
}
drm_public int amdgpu_device_initialize2(int fd, bool deduplicate_device,
uint32_t *major_version,
uint32_t *minor_version,
amdgpu_device_handle *device_handle)
{
return _amdgpu_device_initialize(fd, major_version, minor_version, device_handle, deduplicate_device);
}
drm_public int amdgpu_device_deinitialize(amdgpu_device_handle dev)
{
pthread_mutex_lock(&dev_mutex);
amdgpu_device_reference(&dev, NULL);
pthread_mutex_unlock(&dev_mutex);
return 0;
}
drm_public int amdgpu_device_get_fd(amdgpu_device_handle device_handle)
const char *amdgpu_get_marketing_name(amdgpu_device_handle dev)
{
return device_handle->fd;
return dev->marketing_name;
}
drm_public const char *amdgpu_get_marketing_name(amdgpu_device_handle dev)
{
if (dev->marketing_name)
return dev->marketing_name;
else
return "AMD Radeon Graphics";
}
drm_public int amdgpu_query_sw_info(amdgpu_device_handle dev,
enum amdgpu_sw_info info,
void *value)
int amdgpu_query_sw_info(amdgpu_device_handle dev, enum amdgpu_sw_info info,
void *value)
{
uint32_t *val32 = (uint32_t*)value;
switch (info) {
case amdgpu_sw_info_address32_hi:
if (dev->va_mgr.vamgr_high_32.va_max)
*val32 = (dev->va_mgr.vamgr_high_32.va_max - 1) >> 32;
if (dev->vamgr_high_32.va_max)
*val32 = (dev->vamgr_high_32.va_max - 1) >> 32;
else
*val32 = (dev->va_mgr.vamgr_32.va_max - 1) >> 32;
*val32 = (dev->vamgr_32.va_max - 1) >> 32;
return 0;
}
return -EINVAL;

View file

@ -30,8 +30,8 @@
#include "amdgpu_internal.h"
#include "xf86drm.h"
drm_public int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
unsigned size, void *value)
int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
unsigned size, void *value)
{
struct drm_amdgpu_info request;
@ -44,8 +44,8 @@ drm_public int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
sizeof(struct drm_amdgpu_info));
}
drm_public int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
int32_t *result)
int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
int32_t *result)
{
struct drm_amdgpu_info request;
@ -59,9 +59,9 @@ drm_public int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
sizeof(struct drm_amdgpu_info));
}
drm_public int amdgpu_read_mm_registers(amdgpu_device_handle dev,
unsigned dword_offset, unsigned count, uint32_t instance,
uint32_t flags, uint32_t *values)
int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
unsigned count, uint32_t instance, uint32_t flags,
uint32_t *values)
{
struct drm_amdgpu_info request;
@ -78,9 +78,8 @@ drm_public int amdgpu_read_mm_registers(amdgpu_device_handle dev,
sizeof(struct drm_amdgpu_info));
}
drm_public int amdgpu_query_hw_ip_count(amdgpu_device_handle dev,
unsigned type,
uint32_t *count)
int amdgpu_query_hw_ip_count(amdgpu_device_handle dev, unsigned type,
uint32_t *count)
{
struct drm_amdgpu_info request;
@ -94,9 +93,9 @@ drm_public int amdgpu_query_hw_ip_count(amdgpu_device_handle dev,
sizeof(struct drm_amdgpu_info));
}
drm_public int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
unsigned ip_instance,
struct drm_amdgpu_info_hw_ip *info)
int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
unsigned ip_instance,
struct drm_amdgpu_info_hw_ip *info)
{
struct drm_amdgpu_info request;
@ -111,9 +110,9 @@ drm_public int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
sizeof(struct drm_amdgpu_info));
}
drm_public int amdgpu_query_firmware_version(amdgpu_device_handle dev,
unsigned fw_type, unsigned ip_instance, unsigned index,
uint32_t *version, uint32_t *feature)
int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type,
unsigned ip_instance, unsigned index,
uint32_t *version, uint32_t *feature)
{
struct drm_amdgpu_info request;
struct drm_amdgpu_info_firmware firmware = {};
@ -137,24 +136,6 @@ drm_public int amdgpu_query_firmware_version(amdgpu_device_handle dev,
return 0;
}
drm_public int amdgpu_query_uq_fw_area_info(amdgpu_device_handle dev,
unsigned type,
unsigned ip_instance,
struct drm_amdgpu_info_uq_fw_areas *info)
{
struct drm_amdgpu_info request;
memset(&request, 0, sizeof(request));
request.return_pointer = (uintptr_t)info;
request.return_size = sizeof(*info);
request.query = AMDGPU_INFO_UQ_FW_AREAS;
request.query_hw_ip.type = type;
request.query_hw_ip.ip_instance = ip_instance;
return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
sizeof(struct drm_amdgpu_info));
}
drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
{
int r, i;
@ -246,8 +227,8 @@ drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
return 0;
}
drm_public int amdgpu_query_gpu_info(amdgpu_device_handle dev,
struct amdgpu_gpu_info *info)
int amdgpu_query_gpu_info(amdgpu_device_handle dev,
struct amdgpu_gpu_info *info)
{
if (!dev || !info)
return -EINVAL;
@ -258,10 +239,10 @@ drm_public int amdgpu_query_gpu_info(amdgpu_device_handle dev,
return 0;
}
drm_public int amdgpu_query_heap_info(amdgpu_device_handle dev,
uint32_t heap,
uint32_t flags,
struct amdgpu_heap_info *info)
int amdgpu_query_heap_info(amdgpu_device_handle dev,
uint32_t heap,
uint32_t flags,
struct amdgpu_heap_info *info)
{
struct drm_amdgpu_info_vram_gtt vram_gtt_info = {};
int r;
@ -310,8 +291,8 @@ drm_public int amdgpu_query_heap_info(amdgpu_device_handle dev,
return 0;
}
drm_public int amdgpu_query_gds_info(amdgpu_device_handle dev,
struct amdgpu_gds_resource_info *gds_info)
int amdgpu_query_gds_info(amdgpu_device_handle dev,
struct amdgpu_gds_resource_info *gds_info)
{
struct drm_amdgpu_info_gds gds_config = {};
int r;
@ -335,8 +316,8 @@ drm_public int amdgpu_query_gds_info(amdgpu_device_handle dev,
return 0;
}
drm_public int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned sensor_type,
unsigned size, void *value)
int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned sensor_type,
unsigned size, void *value)
{
struct drm_amdgpu_info request;
@ -349,32 +330,3 @@ drm_public int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned senso
return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
sizeof(struct drm_amdgpu_info));
}
drm_public int amdgpu_query_video_caps_info(amdgpu_device_handle dev, unsigned cap_type,
unsigned size, void *value)
{
struct drm_amdgpu_info request;
memset(&request, 0, sizeof(request));
request.return_pointer = (uintptr_t)value;
request.return_size = size;
request.query = AMDGPU_INFO_VIDEO_CAPS;
request.sensor_info.type = cap_type;
return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
sizeof(struct drm_amdgpu_info));
}
drm_public int amdgpu_query_gpuvm_fault_info(amdgpu_device_handle dev,
unsigned size, void *value)
{
struct drm_amdgpu_info request;
memset(&request, 0, sizeof(request));
request.return_pointer = (uintptr_t)value;
request.return_size = size;
request.query = AMDGPU_INFO_GPUVM_FAULT;
return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
sizeof(struct drm_amdgpu_info));
}

View file

@ -32,7 +32,6 @@
#include "xf86atomic.h"
#include "amdgpu.h"
#include "util_double_list.h"
#include "handle_table.h"
#define AMDGPU_CS_MAX_RINGS 8
/* do not use below macro if b is not power of 2 aligned value */
@ -57,26 +56,15 @@ struct amdgpu_bo_va_mgr {
};
struct amdgpu_va {
amdgpu_device_handle dev;
uint64_t address;
uint64_t size;
enum amdgpu_gpu_va_range range;
struct amdgpu_bo_va_mgr *vamgr;
};
struct amdgpu_va_manager {
/** The VA manager for the lower virtual address space */
struct amdgpu_bo_va_mgr vamgr_low;
/** The VA manager for the 32bit address space */
struct amdgpu_bo_va_mgr vamgr_32;
/** The VA manager for the high virtual address space */
struct amdgpu_bo_va_mgr vamgr_high;
/** The VA manager for the 32bit high address space */
struct amdgpu_bo_va_mgr vamgr_high_32;
};
struct amdgpu_device {
atomic_t refcount;
struct amdgpu_device *next;
int fd;
int flink_fd;
unsigned major_version;
@ -84,15 +72,21 @@ struct amdgpu_device {
char *marketing_name;
/** List of buffer handles. Protected by bo_table_mutex. */
struct handle_table bo_handles;
struct util_hash_table *bo_handles;
/** List of buffer GEM flink names. Protected by bo_table_mutex. */
struct handle_table bo_flink_names;
struct util_hash_table *bo_flink_names;
/** This protects all hash tables. */
pthread_mutex_t bo_table_mutex;
struct drm_amdgpu_info_device dev_info;
struct amdgpu_gpu_info info;
struct amdgpu_va_manager va_mgr;
/** The VA manager for the lower virtual address space */
struct amdgpu_bo_va_mgr vamgr;
/** The VA manager for the 32bit address space */
struct amdgpu_bo_va_mgr vamgr_32;
/** The VA manager for the high virtual address space */
struct amdgpu_bo_va_mgr vamgr_high;
/** The VA manager for the 32bit high address space */
struct amdgpu_bo_va_mgr vamgr_high_32;
};
struct amdgpu_bo {
@ -106,7 +100,7 @@ struct amdgpu_bo {
pthread_mutex_t cpu_access_mutex;
void *cpu_ptr;
int64_t cpu_map_count;
int cpu_map_count;
};
struct amdgpu_bo_list {

View file

@ -1,123 +0,0 @@
/*
* Copyright 2024 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <string.h>
#include <errno.h>
#include "xf86drm.h"
#include "amdgpu_drm.h"
#include "amdgpu_internal.h"
drm_public int
amdgpu_create_userqueue(amdgpu_device_handle dev,
uint32_t ip_type,
uint32_t doorbell_handle,
uint32_t doorbell_offset,
uint64_t queue_va,
uint64_t queue_size,
uint64_t wptr_va,
uint64_t rptr_va,
void *mqd_in,
uint32_t flags,
uint32_t *queue_id)
{
int ret;
union drm_amdgpu_userq userq;
uint64_t mqd_size;
if (!dev)
return -EINVAL;
switch (ip_type) {
case AMDGPU_HW_IP_GFX:
mqd_size = sizeof(struct drm_amdgpu_userq_mqd_gfx11);
break;
case AMDGPU_HW_IP_DMA:
mqd_size = sizeof(struct drm_amdgpu_userq_mqd_sdma_gfx11);
break;
case AMDGPU_HW_IP_COMPUTE:
mqd_size = sizeof(struct drm_amdgpu_userq_mqd_compute_gfx11);
break;
default:
return -EINVAL;
}
memset(&userq, 0, sizeof(userq));
userq.in.op = AMDGPU_USERQ_OP_CREATE;
userq.in.ip_type = ip_type;
userq.in.doorbell_handle = doorbell_handle;
userq.in.doorbell_offset = doorbell_offset;
userq.in.queue_va = queue_va;
userq.in.queue_size = queue_size;
userq.in.wptr_va = wptr_va;
userq.in.rptr_va = rptr_va;
userq.in.mqd = (uint64_t)mqd_in;
userq.in.mqd_size = mqd_size;
userq.in.flags = flags;
ret = drmCommandWriteRead(dev->fd, DRM_AMDGPU_USERQ,
&userq, sizeof(userq));
*queue_id = userq.out.queue_id;
return ret;
}
drm_public int
amdgpu_free_userqueue(amdgpu_device_handle dev, uint32_t queue_id)
{
union drm_amdgpu_userq userq;
memset(&userq, 0, sizeof(userq));
userq.in.op = AMDGPU_USERQ_OP_FREE;
userq.in.queue_id = queue_id;
return drmCommandWriteRead(dev->fd, DRM_AMDGPU_USERQ,
&userq, sizeof(userq));
}
drm_public int
amdgpu_userq_signal(amdgpu_device_handle dev,
struct drm_amdgpu_userq_signal *signal_data)
{
int r;
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_USERQ_SIGNAL,
signal_data, sizeof(struct drm_amdgpu_userq_signal));
return r;
}
drm_public int
amdgpu_userq_wait(amdgpu_device_handle dev,
struct drm_amdgpu_userq_wait *wait_data)
{
int r;
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_USERQ_WAIT,
wait_data, sizeof(struct drm_amdgpu_userq_wait));
return r;
}

View file

@ -29,9 +29,9 @@
#include "amdgpu_internal.h"
#include "util_math.h"
drm_public int amdgpu_va_range_query(amdgpu_device_handle dev,
enum amdgpu_gpu_va_range type,
uint64_t *start, uint64_t *end)
int amdgpu_va_range_query(amdgpu_device_handle dev,
enum amdgpu_gpu_va_range type,
uint64_t *start, uint64_t *end)
{
if (type != amdgpu_gpu_va_range_general)
return -EINVAL;
@ -69,99 +69,65 @@ drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
pthread_mutex_destroy(&mgr->bo_va_mutex);
}
static drm_private int
amdgpu_vamgr_subtract_hole(struct amdgpu_bo_va_hole *hole, uint64_t start_va,
uint64_t end_va)
{
if (start_va > hole->offset && end_va - hole->offset < hole->size) {
struct amdgpu_bo_va_hole *n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
if (!n)
return -ENOMEM;
n->size = start_va - hole->offset;
n->offset = hole->offset;
list_add(&n->list, &hole->list);
hole->size -= (end_va - hole->offset);
hole->offset = end_va;
} else if (start_va > hole->offset) {
hole->size = start_va - hole->offset;
} else if (end_va - hole->offset < hole->size) {
hole->size -= (end_va - hole->offset);
hole->offset = end_va;
} else {
list_del(&hole->list);
free(hole);
}
return 0;
}
static drm_private int
static drm_private uint64_t
amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
uint64_t alignment, uint64_t base_required,
bool search_from_top, uint64_t *va_out)
uint64_t alignment, uint64_t base_required)
{
struct amdgpu_bo_va_hole *hole, *n;
uint64_t offset = 0;
int ret;
uint64_t offset = 0, waste = 0;
alignment = MAX2(alignment, mgr->va_alignment);
size = ALIGN(size, mgr->va_alignment);
if (base_required % alignment)
return -EINVAL;
return AMDGPU_INVALID_VA_ADDRESS;
pthread_mutex_lock(&mgr->bo_va_mutex);
if (!search_from_top) {
LIST_FOR_EACH_ENTRY_SAFE_REV(hole, n, &mgr->va_holes, list) {
if (base_required) {
if (hole->offset > base_required ||
(hole->offset + hole->size) < (base_required + size))
continue;
offset = base_required;
} else {
uint64_t waste = hole->offset % alignment;
waste = waste ? alignment - waste : 0;
offset = hole->offset + waste;
if (offset >= (hole->offset + hole->size) ||
size > (hole->offset + hole->size) - offset) {
continue;
}
LIST_FOR_EACH_ENTRY_SAFE_REV(hole, n, &mgr->va_holes, list) {
if (base_required) {
if (hole->offset > base_required ||
(hole->offset + hole->size) < (base_required + size))
continue;
waste = base_required - hole->offset;
offset = base_required;
} else {
offset = hole->offset;
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
offset += waste;
if (offset >= (hole->offset + hole->size)) {
continue;
}
ret = amdgpu_vamgr_subtract_hole(hole, offset, offset + size);
pthread_mutex_unlock(&mgr->bo_va_mutex);
*va_out = offset;
return ret;
}
} else {
LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
if (base_required) {
if (hole->offset > base_required ||
(hole->offset + hole->size) < (base_required + size))
continue;
offset = base_required;
} else {
if (size > hole->size)
continue;
offset = hole->offset + hole->size - size;
offset -= offset % alignment;
if (offset < hole->offset) {
continue;
}
}
ret = amdgpu_vamgr_subtract_hole(hole, offset, offset + size);
if (!waste && hole->size == size) {
offset = hole->offset;
list_del(&hole->list);
free(hole);
pthread_mutex_unlock(&mgr->bo_va_mutex);
*va_out = offset;
return ret;
return offset;
}
if ((hole->size - waste) > size) {
if (waste) {
n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
n->size = waste;
n->offset = hole->offset;
list_add(&n->list, &hole->list);
}
hole->size -= (size + waste);
hole->offset += size + waste;
pthread_mutex_unlock(&mgr->bo_va_mutex);
return offset;
}
if ((hole->size - waste) == size) {
hole->size = waste;
pthread_mutex_unlock(&mgr->bo_va_mutex);
return offset;
}
}
pthread_mutex_unlock(&mgr->bo_va_mutex);
return -ENOMEM;
return AMDGPU_INVALID_VA_ADDRESS;
}
static drm_private void
@ -220,86 +186,71 @@ out:
pthread_mutex_unlock(&mgr->bo_va_mutex);
}
drm_public int amdgpu_va_range_alloc(amdgpu_device_handle dev,
enum amdgpu_gpu_va_range va_range_type,
uint64_t size,
uint64_t va_base_alignment,
uint64_t va_base_required,
uint64_t *va_base_allocated,
amdgpu_va_handle *va_range_handle,
uint64_t flags)
{
return amdgpu_va_range_alloc2(&dev->va_mgr, va_range_type, size,
va_base_alignment, va_base_required,
va_base_allocated, va_range_handle,
flags);
}
drm_public int amdgpu_va_range_alloc2(amdgpu_va_manager_handle va_mgr,
enum amdgpu_gpu_va_range va_range_type,
uint64_t size,
uint64_t va_base_alignment,
uint64_t va_base_required,
uint64_t *va_base_allocated,
amdgpu_va_handle *va_range_handle,
uint64_t flags)
int amdgpu_va_range_alloc(amdgpu_device_handle dev,
enum amdgpu_gpu_va_range va_range_type,
uint64_t size,
uint64_t va_base_alignment,
uint64_t va_base_required,
uint64_t *va_base_allocated,
amdgpu_va_handle *va_range_handle,
uint64_t flags)
{
struct amdgpu_bo_va_mgr *vamgr;
bool search_from_top = !!(flags & AMDGPU_VA_RANGE_REPLAYABLE);
int ret;
/* Clear the flag when the high VA manager is not initialized */
if (flags & AMDGPU_VA_RANGE_HIGH && !va_mgr->vamgr_high_32.va_max)
if (flags & AMDGPU_VA_RANGE_HIGH && !dev->vamgr_high_32.va_max)
flags &= ~AMDGPU_VA_RANGE_HIGH;
if (flags & AMDGPU_VA_RANGE_HIGH) {
if (flags & AMDGPU_VA_RANGE_32_BIT)
vamgr = &va_mgr->vamgr_high_32;
vamgr = &dev->vamgr_high_32;
else
vamgr = &va_mgr->vamgr_high;
vamgr = &dev->vamgr_high;
} else {
if (flags & AMDGPU_VA_RANGE_32_BIT)
vamgr = &va_mgr->vamgr_32;
vamgr = &dev->vamgr_32;
else
vamgr = &va_mgr->vamgr_low;
vamgr = &dev->vamgr;
}
va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
size = ALIGN(size, vamgr->va_alignment);
ret = amdgpu_vamgr_find_va(vamgr, size,
va_base_alignment, va_base_required,
search_from_top, va_base_allocated);
*va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
va_base_alignment, va_base_required);
if (!(flags & AMDGPU_VA_RANGE_32_BIT) && ret) {
if (!(flags & AMDGPU_VA_RANGE_32_BIT) &&
(*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) {
/* fallback to 32bit address */
if (flags & AMDGPU_VA_RANGE_HIGH)
vamgr = &va_mgr->vamgr_high_32;
vamgr = &dev->vamgr_high_32;
else
vamgr = &va_mgr->vamgr_32;
ret = amdgpu_vamgr_find_va(vamgr, size,
va_base_alignment, va_base_required,
search_from_top, va_base_allocated);
vamgr = &dev->vamgr_32;
*va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
va_base_alignment, va_base_required);
}
if (!ret) {
if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
struct amdgpu_va* va;
va = calloc(1, sizeof(struct amdgpu_va));
if(!va){
amdgpu_vamgr_free_va(vamgr, *va_base_allocated, size);
return -ENOMEM;
}
va->dev = dev;
va->address = *va_base_allocated;
va->size = size;
va->range = va_range_type;
va->vamgr = vamgr;
*va_range_handle = va;
} else {
return -EINVAL;
}
return ret;
return 0;
}
drm_public int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
{
if(!va_range_handle || !va_range_handle->address)
return 0;
@ -310,50 +261,3 @@ drm_public int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
free(va_range_handle);
return 0;
}
drm_public uint64_t amdgpu_va_get_start_addr(amdgpu_va_handle va_handle)
{
return va_handle->address;
}
drm_public amdgpu_va_manager_handle amdgpu_va_manager_alloc(void)
{
amdgpu_va_manager_handle r = calloc(1, sizeof(struct amdgpu_va_manager));
return r;
}
drm_public void amdgpu_va_manager_init(struct amdgpu_va_manager *va_mgr,
uint64_t low_va_offset, uint64_t low_va_max,
uint64_t high_va_offset, uint64_t high_va_max,
uint32_t virtual_address_alignment)
{
uint64_t start, max;
start = low_va_offset;
max = MIN2(low_va_max, 0x100000000ULL);
amdgpu_vamgr_init(&va_mgr->vamgr_32, start, max,
virtual_address_alignment);
start = max;
max = MAX2(low_va_max, 0x100000000ULL);
amdgpu_vamgr_init(&va_mgr->vamgr_low, start, max,
virtual_address_alignment);
start = high_va_offset;
max = MIN2(high_va_max, (start & ~0xffffffffULL) + 0x100000000ULL);
amdgpu_vamgr_init(&va_mgr->vamgr_high_32, start, max,
virtual_address_alignment);
start = max;
max = MAX2(high_va_max, (start & ~0xffffffffULL) + 0x100000000ULL);
amdgpu_vamgr_init(&va_mgr->vamgr_high, start, max,
virtual_address_alignment);
}
drm_public void amdgpu_va_manager_deinit(struct amdgpu_va_manager *va_mgr)
{
amdgpu_vamgr_deinit(&va_mgr->vamgr_32);
amdgpu_vamgr_deinit(&va_mgr->vamgr_low);
amdgpu_vamgr_deinit(&va_mgr->vamgr_high_32);
amdgpu_vamgr_deinit(&va_mgr->vamgr_high);
}

View file

@ -26,7 +26,7 @@
#include "xf86drm.h"
#include "amdgpu_internal.h"
drm_public int amdgpu_vm_reserve_vmid(amdgpu_device_handle dev, uint32_t flags)
int amdgpu_vm_reserve_vmid(amdgpu_device_handle dev, uint32_t flags)
{
union drm_amdgpu_vm vm;
@ -37,8 +37,7 @@ drm_public int amdgpu_vm_reserve_vmid(amdgpu_device_handle dev, uint32_t flags)
&vm, sizeof(vm));
}
drm_public int amdgpu_vm_unreserve_vmid(amdgpu_device_handle dev,
uint32_t flags)
int amdgpu_vm_unreserve_vmid(amdgpu_device_handle dev, uint32_t flags)
{
union drm_amdgpu_vm vm;

View file

@ -1,72 +0,0 @@
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include "handle_table.h"
#include "util_math.h"
drm_private int handle_table_insert(struct handle_table *table, uint32_t key,
void *value)
{
if (key >= table->max_key) {
uint32_t alignment = sysconf(_SC_PAGESIZE) / sizeof(void*);
uint32_t max_key = ALIGN(key + 1, alignment);
void **values;
values = realloc(table->values, max_key * sizeof(void *));
if (!values)
return -ENOMEM;
memset(values + table->max_key, 0, (max_key - table->max_key) *
sizeof(void *));
table->max_key = max_key;
table->values = values;
}
table->values[key] = value;
return 0;
}
drm_private void handle_table_remove(struct handle_table *table, uint32_t key)
{
if (key < table->max_key)
table->values[key] = NULL;
}
drm_private void *handle_table_lookup(struct handle_table *table, uint32_t key)
{
if (key < table->max_key)
return table->values[key];
else
return NULL;
}
drm_private void handle_table_fini(struct handle_table *table)
{
free(table->values);
table->max_key = 0;
table->values = NULL;
}

View file

@ -1,41 +0,0 @@
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _HANDLE_TABLE_H_
#define _HANDLE_TABLE_H_
#include <stdint.h>
#include "libdrm_macros.h"
struct handle_table {
uint32_t max_key;
void **values;
};
drm_private int handle_table_insert(struct handle_table *table, uint32_t key,
void *value);
drm_private void handle_table_remove(struct handle_table *table, uint32_t key);
drm_private void *handle_table_lookup(struct handle_table *table, uint32_t key);
drm_private void handle_table_fini(struct handle_table *table);
#endif /* _HANDLE_TABLE_H_ */

View file

@ -21,33 +21,35 @@
datadir_amdgpu = join_paths(get_option('prefix'), get_option('datadir'), 'libdrm')
libdrm_amdgpu = library(
libdrm_amdgpu = shared_library(
'drm_amdgpu',
[
files(
'amdgpu_asic_id.c', 'amdgpu_bo.c', 'amdgpu_cs.c', 'amdgpu_device.c',
'amdgpu_gpu_info.c', 'amdgpu_vamgr.c', 'amdgpu_vm.c', 'handle_table.c',
'amdgpu_userq.c',
'amdgpu_gpu_info.c', 'amdgpu_vamgr.c', 'amdgpu_vm.c', 'util_hash.c',
'util_hash_table.c',
),
config_file,
],
c_args : [
libdrm_c_args,
warn_c_args,
'-DAMDGPU_ASIC_ID_TABLE="@0@"'.format(join_paths(datadir_amdgpu, 'amdgpu.ids')),
],
include_directories : [inc_root, inc_drm],
link_with : libdrm,
dependencies : [dep_threads, dep_atomic_ops, dep_rt],
version : '1.@0@.0'.format(patch_ver),
dependencies : [dep_pthread_stubs, dep_atomic_ops],
version : '1.0.0',
install : true,
)
install_headers('amdgpu.h', subdir : 'libdrm')
pkg.generate(
libdrm_amdgpu,
name : 'libdrm_amdgpu',
libraries : libdrm_amdgpu,
subdirs : ['.', 'libdrm'],
version : meson.project_version(),
requires_private : 'libdrm',
description : 'Userspace interface to kernel DRM services for amdgpu',
)
@ -56,14 +58,9 @@ ext_libdrm_amdgpu = declare_dependency(
include_directories : [inc_drm, include_directories('.')],
)
meson.override_dependency('libdrm_amdgpu', ext_libdrm_amdgpu)
test(
'amdgpu-symbols-check',
symbols_check,
args : [
'--lib', libdrm_amdgpu,
'--symbols-file', files('amdgpu-symbols.txt'),
'--nm', prog_nm.full_path(),
],
'amdgpu-symbol-check',
prog_bash,
env : env_test,
args : [files('amdgpu-symbol-check'), libdrm_amdgpu]
)

383
amdgpu/util_hash.c Normal file
View file

@ -0,0 +1,383 @@
/**************************************************************************
*
* Copyright 2007 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors:
* Zack Rusin <zackr@vmware.com>
*/
#include "util_hash.h"
#include <stdlib.h>
#include <assert.h>
#define MAX(a, b) ((a > b) ? (a) : (b))
static const int MinNumBits = 4;
static const unsigned char prime_deltas[] = {
0, 0, 1, 3, 1, 5, 3, 3, 1, 9, 7, 5, 3, 9, 25, 3,
1, 21, 3, 21, 7, 15, 9, 5, 3, 29, 15, 0, 0, 0, 0, 0
};
static int primeForNumBits(int numBits)
{
return (1 << numBits) + prime_deltas[numBits];
}
/* Returns the smallest integer n such that
primeForNumBits(n) >= hint.
*/
static int countBits(int hint)
{
int numBits = 0;
int bits = hint;
while (bits > 1) {
bits >>= 1;
numBits++;
}
if (numBits >= (int)sizeof(prime_deltas)) {
numBits = sizeof(prime_deltas) - 1;
} else if (primeForNumBits(numBits) < hint) {
++numBits;
}
return numBits;
}
struct util_node {
struct util_node *next;
unsigned key;
void *value;
};
struct util_hash_data {
struct util_node *fakeNext;
struct util_node **buckets;
int size;
int nodeSize;
short userNumBits;
short numBits;
int numBuckets;
};
struct util_hash {
union {
struct util_hash_data *d;
struct util_node *e;
} data;
};
static void *util_data_allocate_node(struct util_hash_data *hash)
{
return malloc(hash->nodeSize);
}
static void util_free_node(struct util_node *node)
{
free(node);
}
static struct util_node *
util_hash_create_node(struct util_hash *hash,
unsigned akey, void *avalue,
struct util_node **anextNode)
{
struct util_node *node = util_data_allocate_node(hash->data.d);
if (!node)
return NULL;
node->key = akey;
node->value = avalue;
node->next = (struct util_node*)(*anextNode);
*anextNode = node;
++hash->data.d->size;
return node;
}
static void util_data_rehash(struct util_hash_data *hash, int hint)
{
if (hint < 0) {
hint = countBits(-hint);
if (hint < MinNumBits)
hint = MinNumBits;
hash->userNumBits = (short)hint;
while (primeForNumBits(hint) < (hash->size >> 1))
++hint;
} else if (hint < MinNumBits) {
hint = MinNumBits;
}
if (hash->numBits != hint) {
struct util_node *e = (struct util_node *)(hash);
struct util_node **oldBuckets = hash->buckets;
int oldNumBuckets = hash->numBuckets;
int i = 0;
hash->numBits = (short)hint;
hash->numBuckets = primeForNumBits(hint);
hash->buckets = malloc(sizeof(struct util_node*) * hash->numBuckets);
for (i = 0; i < hash->numBuckets; ++i)
hash->buckets[i] = e;
for (i = 0; i < oldNumBuckets; ++i) {
struct util_node *firstNode = oldBuckets[i];
while (firstNode != e) {
unsigned h = firstNode->key;
struct util_node *lastNode = firstNode;
struct util_node *afterLastNode;
struct util_node **beforeFirstNode;
while (lastNode->next != e && lastNode->next->key == h)
lastNode = lastNode->next;
afterLastNode = lastNode->next;
beforeFirstNode = &hash->buckets[h % hash->numBuckets];
while (*beforeFirstNode != e)
beforeFirstNode = &(*beforeFirstNode)->next;
lastNode->next = *beforeFirstNode;
*beforeFirstNode = firstNode;
firstNode = afterLastNode;
}
}
free(oldBuckets);
}
}
static void util_data_might_grow(struct util_hash_data *hash)
{
if (hash->size >= hash->numBuckets)
util_data_rehash(hash, hash->numBits + 1);
}
static void util_data_has_shrunk(struct util_hash_data *hash)
{
if (hash->size <= (hash->numBuckets >> 3) &&
hash->numBits > hash->userNumBits) {
int max = MAX(hash->numBits-2, hash->userNumBits);
util_data_rehash(hash, max);
}
}
static struct util_node *util_data_first_node(struct util_hash_data *hash)
{
struct util_node *e = (struct util_node *)(hash);
struct util_node **bucket = hash->buckets;
int n = hash->numBuckets;
while (n--) {
if (*bucket != e)
return *bucket;
++bucket;
}
return e;
}
static struct util_node **util_hash_find_node(struct util_hash *hash, unsigned akey)
{
struct util_node **node;
if (hash->data.d->numBuckets) {
node = (struct util_node **)(&hash->data.d->buckets[akey % hash->data.d->numBuckets]);
assert(*node == hash->data.e || (*node)->next);
while (*node != hash->data.e && (*node)->key != akey)
node = &(*node)->next;
} else {
node = (struct util_node **)((const struct util_node * const *)(&hash->data.e));
}
return node;
}
drm_private struct util_hash_iter
util_hash_insert(struct util_hash *hash, unsigned key, void *data)
{
util_data_might_grow(hash->data.d);
{
struct util_node **nextNode = util_hash_find_node(hash, key);
struct util_node *node = util_hash_create_node(hash, key, data, nextNode);
if (!node) {
struct util_hash_iter null_iter = {hash, 0};
return null_iter;
}
{
struct util_hash_iter iter = {hash, node};
return iter;
}
}
}
drm_private struct util_hash *util_hash_create(void)
{
struct util_hash *hash = malloc(sizeof(struct util_hash));
if (!hash)
return NULL;
hash->data.d = malloc(sizeof(struct util_hash_data));
if (!hash->data.d) {
free(hash);
return NULL;
}
hash->data.d->fakeNext = 0;
hash->data.d->buckets = 0;
hash->data.d->size = 0;
hash->data.d->nodeSize = sizeof(struct util_node);
hash->data.d->userNumBits = (short)MinNumBits;
hash->data.d->numBits = 0;
hash->data.d->numBuckets = 0;
return hash;
}
drm_private void util_hash_delete(struct util_hash *hash)
{
struct util_node *e_for_x = (struct util_node *)(hash->data.d);
struct util_node **bucket = (struct util_node **)(hash->data.d->buckets);
int n = hash->data.d->numBuckets;
while (n--) {
struct util_node *cur = *bucket++;
while (cur != e_for_x) {
struct util_node *next = cur->next;
util_free_node(cur);
cur = next;
}
}
free(hash->data.d->buckets);
free(hash->data.d);
free(hash);
}
drm_private struct util_hash_iter
util_hash_find(struct util_hash *hash, unsigned key)
{
struct util_node **nextNode = util_hash_find_node(hash, key);
struct util_hash_iter iter = {hash, *nextNode};
return iter;
}
drm_private unsigned util_hash_iter_key(struct util_hash_iter iter)
{
if (!iter.node || iter.hash->data.e == iter.node)
return 0;
return iter.node->key;
}
drm_private void *util_hash_iter_data(struct util_hash_iter iter)
{
if (!iter.node || iter.hash->data.e == iter.node)
return 0;
return iter.node->value;
}
static struct util_node *util_hash_data_next(struct util_node *node)
{
union {
struct util_node *next;
struct util_node *e;
struct util_hash_data *d;
} a;
int start;
struct util_node **bucket;
int n;
a.next = node->next;
if (!a.next) {
/* iterating beyond the last element */
return 0;
}
if (a.next->next)
return a.next;
start = (node->key % a.d->numBuckets) + 1;
bucket = a.d->buckets + start;
n = a.d->numBuckets - start;
while (n--) {
if (*bucket != a.e)
return *bucket;
++bucket;
}
return a.e;
}
drm_private struct util_hash_iter
util_hash_iter_next(struct util_hash_iter iter)
{
struct util_hash_iter next = {iter.hash, util_hash_data_next(iter.node)};
return next;
}
drm_private int util_hash_iter_is_null(struct util_hash_iter iter)
{
if (!iter.node || iter.node == iter.hash->data.e)
return 1;
return 0;
}
drm_private void *util_hash_take(struct util_hash *hash, unsigned akey)
{
struct util_node **node = util_hash_find_node(hash, akey);
if (*node != hash->data.e) {
void *t = (*node)->value;
struct util_node *next = (*node)->next;
util_free_node(*node);
*node = next;
--hash->data.d->size;
util_data_has_shrunk(hash->data.d);
return t;
}
return 0;
}
drm_private struct util_hash_iter util_hash_first_node(struct util_hash *hash)
{
struct util_hash_iter iter = {hash, util_data_first_node(hash->data.d)};
return iter;
}
drm_private struct util_hash_iter
util_hash_erase(struct util_hash *hash, struct util_hash_iter iter)
{
struct util_hash_iter ret = iter;
struct util_node *node = iter.node;
struct util_node **node_ptr;
if (node == hash->data.e)
return iter;
ret = util_hash_iter_next(ret);
node_ptr = (struct util_node**)(&hash->data.d->buckets[node->key % hash->data.d->numBuckets]);
while (*node_ptr != node)
node_ptr = &(*node_ptr)->next;
*node_ptr = node->next;
util_free_node(node);
--hash->data.d->size;
return ret;
}

103
amdgpu/util_hash.h Normal file
View file

@ -0,0 +1,103 @@
/**************************************************************************
*
* Copyright 2007 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* @file
* Hash implementation.
*
* This file provides a hash implementation that is capable of dealing
* with collisions. It stores colliding entries in linked list. All
* functions operating on the hash return an iterator. The iterator
* itself points to the collision list. If there wasn't any collision
* the list will have just one entry, otherwise client code should
* iterate over the entries to find the exact entry among ones that
* had the same key (e.g. memcmp could be used on the data to check
* that)
*
* @author Zack Rusin <zackr@vmware.com>
*/
#ifndef UTIL_HASH_H
#define UTIL_HASH_H
#include <stdbool.h>
#include "libdrm_macros.h"
struct util_hash;
struct util_node;
struct util_hash_iter {
struct util_hash *hash;
struct util_node *node;
};
drm_private struct util_hash *util_hash_create(void);
drm_private void util_hash_delete(struct util_hash *hash);
/**
* Adds a data with the given key to the hash. If entry with the given
* key is already in the hash, this current entry is instered before it
* in the collision list.
* Function returns iterator pointing to the inserted item in the hash.
*/
drm_private struct util_hash_iter
util_hash_insert(struct util_hash *hash, unsigned key, void *data);
/**
* Removes the item pointed to by the current iterator from the hash.
* Note that the data itself is not erased and if it was a malloc'ed pointer
* it will have to be freed after calling this function by the callee.
* Function returns iterator pointing to the item after the removed one in
* the hash.
*/
drm_private struct util_hash_iter
util_hash_erase(struct util_hash *hash, struct util_hash_iter iter);
drm_private void *util_hash_take(struct util_hash *hash, unsigned key);
drm_private struct util_hash_iter util_hash_first_node(struct util_hash *hash);
/**
* Return an iterator pointing to the first entry in the collision list.
*/
drm_private struct util_hash_iter
util_hash_find(struct util_hash *hash, unsigned key);
drm_private int util_hash_iter_is_null(struct util_hash_iter iter);
drm_private unsigned util_hash_iter_key(struct util_hash_iter iter);
drm_private void *util_hash_iter_data(struct util_hash_iter iter);
drm_private struct util_hash_iter
util_hash_iter_next(struct util_hash_iter iter);
#endif

270
amdgpu/util_hash_table.c Normal file
View file

@ -0,0 +1,270 @@
/**************************************************************************
*
* Copyright 2008 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* @file
* General purpose hash table implementation.
*
* Just uses the util_hash for now, but it might be better switch to a linear
* probing hash table implementation at some point -- as it is said they have
* better lookup and cache performance and it appears to be possible to write
* a lock-free implementation of such hash tables .
*
* @author José Fonseca <jfonseca@vmware.com>
*/
#include "util_hash_table.h"
#include "util_hash.h"
#include <stdlib.h>
#include <assert.h>
struct util_hash_table
{
struct util_hash *head;
/** Hash function */
unsigned (*make_hash)(void *key);
/** Compare two keys */
int (*compare)(void *key1, void *key2);
};
struct util_hash_table_item
{
void *key;
void *value;
};
static struct util_hash_table_item *
util_hash_table_item(struct util_hash_iter iter)
{
return (struct util_hash_table_item *)util_hash_iter_data(iter);
}
drm_private struct util_hash_table *
util_hash_table_create(unsigned (*hash)(void *key),
int (*compare)(void *key1, void *key2))
{
struct util_hash_table *ht;
ht = malloc(sizeof(struct util_hash_table));
if(!ht)
return NULL;
ht->head = util_hash_create();
if(!ht->head) {
free(ht);
return NULL;
}
ht->make_hash = hash;
ht->compare = compare;
return ht;
}
static struct util_hash_iter
util_hash_table_find_iter(struct util_hash_table *ht,
void *key, unsigned key_hash)
{
struct util_hash_iter iter;
struct util_hash_table_item *item;
iter = util_hash_find(ht->head, key_hash);
while (!util_hash_iter_is_null(iter)) {
item = (struct util_hash_table_item *)util_hash_iter_data(iter);
if (!ht->compare(item->key, key))
break;
iter = util_hash_iter_next(iter);
}
return iter;
}
static struct util_hash_table_item *
util_hash_table_find_item(struct util_hash_table *ht,
void *key, unsigned key_hash)
{
struct util_hash_iter iter;
struct util_hash_table_item *item;
iter = util_hash_find(ht->head, key_hash);
while (!util_hash_iter_is_null(iter)) {
item = (struct util_hash_table_item *)util_hash_iter_data(iter);
if (!ht->compare(item->key, key))
return item;
iter = util_hash_iter_next(iter);
}
return NULL;
}
drm_private void
util_hash_table_set(struct util_hash_table *ht, void *key, void *value)
{
unsigned key_hash;
struct util_hash_table_item *item;
struct util_hash_iter iter;
assert(ht);
if (!ht)
return;
key_hash = ht->make_hash(key);
item = util_hash_table_find_item(ht, key, key_hash);
if(item) {
/* TODO: key/value destruction? */
item->value = value;
return;
}
item = malloc(sizeof(struct util_hash_table_item));
if(!item)
return;
item->key = key;
item->value = value;
iter = util_hash_insert(ht->head, key_hash, item);
if(util_hash_iter_is_null(iter)) {
free(item);
return;
}
}
drm_private void *util_hash_table_get(struct util_hash_table *ht, void *key)
{
unsigned key_hash;
struct util_hash_table_item *item;
assert(ht);
if (!ht)
return NULL;
key_hash = ht->make_hash(key);
item = util_hash_table_find_item(ht, key, key_hash);
if(!item)
return NULL;
return item->value;
}
drm_private void util_hash_table_remove(struct util_hash_table *ht, void *key)
{
unsigned key_hash;
struct util_hash_iter iter;
struct util_hash_table_item *item;
assert(ht);
if (!ht)
return;
key_hash = ht->make_hash(key);
iter = util_hash_table_find_iter(ht, key, key_hash);
if(util_hash_iter_is_null(iter))
return;
item = util_hash_table_item(iter);
assert(item);
free(item);
util_hash_erase(ht->head, iter);
}
drm_private void util_hash_table_clear(struct util_hash_table *ht)
{
struct util_hash_iter iter;
struct util_hash_table_item *item;
assert(ht);
if (!ht)
return;
iter = util_hash_first_node(ht->head);
while (!util_hash_iter_is_null(iter)) {
item = (struct util_hash_table_item *)util_hash_take(ht->head, util_hash_iter_key(iter));
free(item);
iter = util_hash_first_node(ht->head);
}
}
drm_private void util_hash_table_foreach(struct util_hash_table *ht,
void (*callback)(void *key, void *value, void *data),
void *data)
{
struct util_hash_iter iter;
struct util_hash_table_item *item;
assert(ht);
if (!ht)
return;
iter = util_hash_first_node(ht->head);
while (!util_hash_iter_is_null(iter)) {
item = (struct util_hash_table_item *)util_hash_iter_data(iter);
callback(item->key, item->value, data);
iter = util_hash_iter_next(iter);
}
}
static void util_hash_table_inc(void *k, void *v, void *d)
{
++*(size_t *)d;
}
drm_private size_t util_hash_table_count(struct util_hash_table *ht)
{
size_t count = 0;
util_hash_table_foreach(ht, util_hash_table_inc, &count);
return count;
}
drm_private void util_hash_table_destroy(struct util_hash_table *ht)
{
struct util_hash_iter iter;
struct util_hash_table_item *item;
assert(ht);
if (!ht)
return;
iter = util_hash_first_node(ht->head);
while (!util_hash_iter_is_null(iter)) {
item = (struct util_hash_table_item *)util_hash_iter_data(iter);
free(item);
iter = util_hash_iter_next(iter);
}
util_hash_delete(ht->head);
free(ht);
}

71
amdgpu/util_hash_table.h Normal file
View file

@ -0,0 +1,71 @@
/**************************************************************************
*
* Copyright 2008 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* General purpose hash table.
*
* @author José Fonseca <jfonseca@vmware.com>
*/
#ifndef U_HASH_TABLE_H_
#define U_HASH_TABLE_H_
#include "libdrm_macros.h"
/**
* Generic purpose hash table.
*/
struct util_hash_table;
/**
* Create an hash table.
*
* @param hash hash function
* @param compare should return 0 for two equal keys.
*/
drm_private struct util_hash_table *
util_hash_table_create(unsigned (*hash)(void *key),
int (*compare)(void *key1, void *key2));
drm_private void
util_hash_table_set(struct util_hash_table *ht, void *key, void *value);
drm_private void *util_hash_table_get(struct util_hash_table *ht, void *key);
drm_private void util_hash_table_remove(struct util_hash_table *ht, void *key);
drm_private void util_hash_table_clear(struct util_hash_table *ht);
drm_private void util_hash_table_foreach(struct util_hash_table *ht,
void (*callback)(void *key, void *value, void *data),
void *data);
drm_private size_t util_hash_table_count(struct util_hash_table *ht);
drm_private void util_hash_table_destroy(struct util_hash_table *ht);
#endif /* U_HASH_TABLE_H_ */

View file

@ -60,16 +60,16 @@ struct gralloc_handle_t {
uint32_t usage; /* android libhardware usage flags */
uint32_t stride; /* the stride in bytes */
int data_owner; /* owner of data (for validation) */
uint64_t modifier __attribute__((aligned(8))); /* buffer modifiers */
uint64_t modifier; /* buffer modifiers */
int data_owner; /* owner of data (for validation) */
union {
void *data; /* pointer to struct gralloc_gbm_bo_t */
uint64_t reserved;
} __attribute__((aligned(8)));
};
#define GRALLOC_HANDLE_VERSION 4
#define GRALLOC_HANDLE_VERSION 3
#define GRALLOC_HANDLE_MAGIC 0x60585350
#define GRALLOC_HANDLE_NUM_FDS 1
#define GRALLOC_HANDLE_NUM_INTS ( \

20
autogen.sh Executable file
View file

@ -0,0 +1,20 @@
#! /bin/sh
srcdir=`dirname "$0"`
test -z "$srcdir" && srcdir=.
ORIGDIR=`pwd`
cd "$srcdir"
git config --local --get format.subjectPrefix >/dev/null ||
git config --local format.subjectPrefix "PATCH libdrm" 2>/dev/null
git config --local --get sendemail.to >/dev/null ||
git config --local sendemail.to "dri-devel@lists.freedesktop.org" 2>/dev/null
autoreconf --force --verbose --install || exit 1
cd "$ORIGDIR" || exit $?
if test -z "$NOCONFIGURE"; then
"$srcdir"/configure "$@"
fi

605
configure.ac Normal file
View file

@ -0,0 +1,605 @@
# Copyright 2005 Adam Jackson.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# ADAM JACKSON BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
AC_PREREQ([2.63])
AC_INIT([libdrm],
[2.4.93],
[https://bugs.freedesktop.org/enter_bug.cgi?product=DRI],
[libdrm])
AC_CONFIG_HEADERS([config.h])
AC_CONFIG_SRCDIR([Makefile.am])
AC_CONFIG_MACRO_DIR([m4])
AC_CONFIG_AUX_DIR([build-aux])
PKG_PROG_PKG_CONFIG
# Require xorg-macros minimum of 1.12 for XORG_WITH_XSLTPROC
m4_ifndef([XORG_MACROS_VERSION],
[m4_fatal([must install xorg-macros 1.12 or later before running autoconf/autogen])])
XORG_MACROS_VERSION(1.12)
XORG_WITH_XSLTPROC
XORG_MANPAGE_SECTIONS
AM_INIT_AUTOMAKE([1.10 foreign dist-bzip2])
# Enable quiet compiles on automake 1.11.
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
# Check for programs
AC_PROG_CC
AC_PROG_CC_C99
AC_PROG_NM
if test "x$ac_cv_prog_cc_c99" = xno; then
AC_MSG_ERROR([Building libdrm requires C99 enabled compiler])
fi
AC_USE_SYSTEM_EXTENSIONS
AC_SYS_LARGEFILE
AC_FUNC_ALLOCA
save_CFLAGS="$CFLAGS"
export CFLAGS="$CFLAGS -Werror"
AC_HEADER_MAJOR
CFLAGS="$save_CFLAGS"
AC_CHECK_HEADERS([sys/sysctl.h sys/select.h])
# Initialize libtool
LT_PREREQ([2.2])
LT_INIT([disable-static])
dnl pthread-stubs is mandatory on some BSD platforms, due to the nature of the
dnl project. Even then there's a notable issue as described in the project README
case "$host_os" in
linux* | cygwin* | darwin* | solaris* | *-gnu* | gnu* | openbsd*)
pthread_stubs_possible="no"
;;
* )
pthread_stubs_possible="yes"
;;
esac
if test "x$pthread_stubs_possible" = xyes; then
PKG_CHECK_MODULES(PTHREADSTUBS, pthread-stubs >= 0.4)
AC_SUBST(PTHREADSTUBS_CFLAGS)
AC_SUBST(PTHREADSTUBS_LIBS)
fi
pkgconfigdir=${libdir}/pkgconfig
AC_SUBST(pkgconfigdir)
libdrmdatadir=${datadir}/libdrm
AC_SUBST(libdrmdatadir)
AC_ARG_ENABLE([udev],
[AS_HELP_STRING([--enable-udev],
[Enable support for using udev instead of mknod (default: disabled)])],
[UDEV=$enableval], [UDEV=no])
AC_ARG_ENABLE(libkms,
AS_HELP_STRING([--disable-libkms],
[Disable KMS mm abstraction library (default: auto, enabled on supported platforms)]),
[LIBKMS=$enableval], [LIBKMS=auto])
AC_ARG_ENABLE(intel,
AS_HELP_STRING([--disable-intel],
[Enable support for intel's KMS API (default: auto, enabled on x86)]),
[INTEL=$enableval], [INTEL=auto])
AC_ARG_ENABLE(radeon,
AS_HELP_STRING([--disable-radeon],
[Enable support for radeon's KMS API (default: auto)]),
[RADEON=$enableval], [RADEON=auto])
AC_ARG_ENABLE(amdgpu,
AS_HELP_STRING([--disable-amdgpu],
[Enable support for amdgpu's KMS API (default: auto)]),
[AMDGPU=$enableval], [AMDGPU=auto])
AC_ARG_ENABLE(nouveau,
AS_HELP_STRING([--disable-nouveau],
[Enable support for nouveau's KMS API (default: auto)]),
[NOUVEAU=$enableval], [NOUVEAU=auto])
AC_ARG_ENABLE(vmwgfx,
AS_HELP_STRING([--disable-vmwgfx],
[Enable support for vmwgfx's KMS API (default: yes)]),
[VMWGFX=$enableval], [VMWGFX=yes])
AC_ARG_ENABLE(omap-experimental-api,
AS_HELP_STRING([--enable-omap-experimental-api],
[Enable support for OMAP's experimental API (default: disabled)]),
[OMAP=$enableval], [OMAP=no])
AC_ARG_ENABLE(exynos-experimental-api,
AS_HELP_STRING([--enable-exynos-experimental-api],
[Enable support for EXYNOS's experimental API (default: disabled)]),
[EXYNOS=$enableval], [EXYNOS=no])
AC_ARG_ENABLE(freedreno,
AS_HELP_STRING([--disable-freedreno],
[Enable support for freedreno's KMS API (default: auto, enabled on arm)]),
[FREEDRENO=$enableval], [FREEDRENO=auto])
AC_ARG_ENABLE(freedreno-kgsl,
AS_HELP_STRING([--enable-freedreno-kgsl],
[Enable support for freedreno's to use downstream android kernel API (default: disabled)]),
[FREEDRENO_KGSL=$enableval], [FREEDRENO_KGSL=no])
AC_ARG_ENABLE(tegra-experimental-api,
AS_HELP_STRING([--enable-tegra-experimental-api],
[Enable support for Tegra's experimental API (default: disabled)]),
[TEGRA=$enableval], [TEGRA=no])
AC_ARG_ENABLE(vc4,
AS_HELP_STRING([--disable-vc4],
[Enable support for vc4's API (default: auto, enabled on arm)]),
[VC4=$enableval], [VC4=auto])
AC_ARG_ENABLE(etnaviv-experimental-api,
AS_HELP_STRING([--enable-etnaviv-experimental-api],
[Enable support for etnaviv's experimental API (default: disabled)]),
[ETNAVIV=$enableval], [ETNAVIV=no])
AC_ARG_ENABLE(install-test-programs,
AS_HELP_STRING([--enable-install-test-programs],
[Install test programs (default: no)]),
[INSTALL_TESTS=$enableval], [INSTALL_TESTS=no])
dnl ===========================================================================
dnl check compiler flags
AC_DEFUN([LIBDRM_CC_TRY_FLAG], [
AC_MSG_CHECKING([whether $CC supports $1])
libdrm_save_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS $1"
AC_COMPILE_IFELSE([AC_LANG_SOURCE([ ])], [libdrm_cc_flag=yes], [libdrm_cc_flag=no])
CFLAGS="$libdrm_save_CFLAGS"
if test "x$libdrm_cc_flag" = "xyes"; then
ifelse([$2], , :, [$2])
else
ifelse([$3], , :, [$3])
fi
AC_MSG_RESULT([$libdrm_cc_flag])
])
dnl We use clock_gettime to check for timeouts in drmWaitVBlank
AC_CHECK_FUNCS([clock_gettime], [CLOCK_LIB=],
[AC_CHECK_LIB([rt], [clock_gettime], [CLOCK_LIB=-lrt],
[AC_MSG_ERROR([Couldn't find clock_gettime])])])
AC_SUBST([CLOCK_LIB])
AC_CHECK_FUNCS([open_memstream],
[AC_DEFINE([HAVE_OPEN_MEMSTREAM], 1, [Have open_memstream()])],
[AC_DEFINE([HAVE_OPEN_MEMSTREAM], 0)])
dnl Use lots of warning flags with with gcc and compatible compilers
dnl Note: if you change the following variable, the cache is automatically
dnl skipped and all flags rechecked. So there's no need to do anything
dnl else. If for any reason you need to force a recheck, just change
dnl MAYBE_WARN in an ignorable way (like adding whitespace)
MAYBE_WARN="-Wall -Wextra -Werror=undef \
-Wsign-compare -Werror-implicit-function-declaration \
-Wpointer-arith -Wwrite-strings -Wstrict-prototypes \
-Wmissing-prototypes -Wmissing-declarations -Wnested-externs \
-Wpacked -Wswitch-enum -Wmissing-format-attribute \
-Wstrict-aliasing=2 -Winit-self \
-Wdeclaration-after-statement -Wold-style-definition \
-Wno-unused-parameter \
-Wno-attributes -Wno-long-long -Winline -Wshadow \
-Wno-missing-field-initializers"
# invalidate cached value if MAYBE_WARN has changed
if test "x$libdrm_cv_warn_maybe" != "x$MAYBE_WARN"; then
unset libdrm_cv_warn_cflags
fi
AC_CACHE_CHECK([for supported warning flags], libdrm_cv_warn_cflags, [
echo
WARN_CFLAGS=""
# Some warning options are not supported by all versions of
# gcc, so test all desired options against the current
# compiler.
#
# Note that there are some order dependencies
# here. Specifically, an option that disables a warning will
# have no net effect if a later option then enables that
# warnings, (perhaps implicitly). So we put some grouped
# options (-Wall and -Wextra) up front and the -Wno options
# last.
for W in $MAYBE_WARN; do
LIBDRM_CC_TRY_FLAG([$W], [WARN_CFLAGS="$WARN_CFLAGS $W"])
done
libdrm_cv_warn_cflags=$WARN_CFLAGS
libdrm_cv_warn_maybe=$MAYBE_WARN
AC_MSG_CHECKING([which warning flags were supported])])
WARN_CFLAGS="$libdrm_cv_warn_cflags"
# Check for atomic intrinsics
AC_CACHE_CHECK([for native atomic primitives], drm_cv_atomic_primitives, [
drm_cv_atomic_primitives="none"
AC_LINK_IFELSE([AC_LANG_PROGRAM([[
int atomic_add(int *i) { return __sync_add_and_fetch (i, 1); }
int atomic_cmpxchg(int *i, int j, int k) { return __sync_val_compare_and_swap (i, j, k); }
]],[[]])], [drm_cv_atomic_primitives="Intel"],[])
if test "x$drm_cv_atomic_primitives" = "xnone"; then
AC_CHECK_HEADER([atomic_ops.h], drm_cv_atomic_primitives="libatomic-ops")
fi
# atomic functions defined in <atomic.h> & libc on Solaris
if test "x$drm_cv_atomic_primitives" = "xnone"; then
AC_CHECK_FUNC([atomic_cas_uint], drm_cv_atomic_primitives="Solaris")
fi
])
if test "x$drm_cv_atomic_primitives" = xIntel; then
AC_DEFINE(HAVE_LIBDRM_ATOMIC_PRIMITIVES, 1,
[Enable if your compiler supports the Intel __sync_* atomic primitives])
else
AC_DEFINE(HAVE_LIBDRM_ATOMIC_PRIMITIVES, 0)
fi
if test "x$drm_cv_atomic_primitives" = "xlibatomic-ops"; then
AC_DEFINE(HAVE_LIB_ATOMIC_OPS, 1, [Enable if you have libatomic-ops-dev installed])
else
AC_DEFINE(HAVE_LIB_ATOMIC_OPS, 0)
fi
dnl Print out the approapriate message considering the value set be the
dnl respective in $1.
dnl $1 - value to be evaluated. Eg. $INTEL, $NOUVEAU, ...
dnl $2 - libdrm shortname. Eg. intel, freedreno, ...
dnl $3 - GPU name/brand. Eg. Intel, NVIDIA Tegra, ...
dnl $4 - Configure switch. Eg. intel, omap-experimental-api, ...
AC_DEFUN([LIBDRM_ATOMICS_NOT_FOUND_MSG], [
case "x$1" in
xyes) AC_MSG_ERROR([libdrm_$2 depends upon atomic operations, which were not found for your compiler/cpu. Try compiling with -march=native, or install the libatomics-op-dev package, or, failing both of those, disable support for $3 GPUs by passing --disable-$4 to ./configure]) ;;
xauto) AC_MSG_WARN([Disabling $2. It depends on atomic operations, which were not found for your compiler/cpu. Try compiling with -march=native, or install the libatomics-op-dev package.]) ;;
*) ;;
esac
])
if test "x$drm_cv_atomic_primitives" = "xnone"; then
LIBDRM_ATOMICS_NOT_FOUND_MSG($INTEL, intel, Intel, intel)
INTEL=no
LIBDRM_ATOMICS_NOT_FOUND_MSG($RADEON, radeon, Radeon, radeon)
RADEON=no
LIBDRM_ATOMICS_NOT_FOUND_MSG($AMDGPU, amdgpu, AMD, amdgpu)
AMDGPU=no
LIBDRM_ATOMICS_NOT_FOUND_MSG($NOUVEAU, nouveau, NVIDIA, nouveau)
NOUVEAU=no
LIBDRM_ATOMICS_NOT_FOUND_MSG($OMAP, omap, OMAP, omap-experimental-api)
OMAP=no
LIBDRM_ATOMICS_NOT_FOUND_MSG($FREEDRENO, freedreno, Qualcomm Adreno, freedreno)
FREEDRENO=no
LIBDRM_ATOMICS_NOT_FOUND_MSG($TEGRA, tegra, NVIDIA Tegra, tegra-experimental-api)
TEGRA=no
LIBDRM_ATOMICS_NOT_FOUND_MSG($ETNAVIV, etnaviv, Vivante, etnaviv-experimental-api)
ETNAVIV=no
else
if test "x$INTEL" = xauto; then
case $host_cpu in
i?86|x86_64) INTEL=yes ;;
*) INTEL=no ;;
esac
fi
if test "x$RADEON" = xauto; then
RADEON=yes
fi
if test "x$AMDGPU" = xauto; then
AMDGPU=yes
fi
if test "x$NOUVEAU" = xauto; then
NOUVEAU=yes
fi
if test "x$FREEDRENO" = xauto; then
case $host_cpu in
arm*|aarch64) FREEDRENO=yes ;;
*) FREEDRENO=no ;;
esac
fi
if test "x$VC4" = xauto; then
case $host_cpu in
arm*|aarch64) VC4=yes ;;
*) VC4=no ;;
esac
fi
fi
if test "x$INTEL" != "xno"; then
PKG_CHECK_MODULES(PCIACCESS, [pciaccess >= 0.10])
fi
AC_SUBST(PCIACCESS_CFLAGS)
AC_SUBST(PCIACCESS_LIBS)
if test "x$UDEV" = xyes; then
AC_DEFINE(UDEV, 1, [Have UDEV support])
else
AC_DEFINE(UDEV, 0)
fi
AC_CANONICAL_HOST
if test "x$LIBKMS" = xauto ; then
case $host_os in
linux*) LIBKMS="yes" ;;
freebsd* | kfreebsd*-gnu)
LIBKMS="yes" ;;
dragonfly*) LIBKMS="yes" ;;
*) LIBKMS="no" ;;
esac
fi
AM_CONDITIONAL(HAVE_LIBKMS, [test "x$LIBKMS" = xyes])
AM_CONDITIONAL(HAVE_INTEL, [test "x$INTEL" = xyes])
if test "x$INTEL" = xyes; then
AC_DEFINE(HAVE_INTEL, 1, [Have intel support])
else
AC_DEFINE(HAVE_INTEL, 0)
fi
AM_CONDITIONAL(HAVE_VMWGFX, [test "x$VMWGFX" = xyes])
if test "x$VMWGFX" = xyes; then
AC_DEFINE(HAVE_VMWGFX, 1, [Have vmwgfx kernel headers])
else
AC_DEFINE(HAVE_VMWGFX, 0)
fi
AM_CONDITIONAL(HAVE_NOUVEAU, [test "x$NOUVEAU" = xyes])
if test "x$NOUVEAU" = xyes; then
AC_DEFINE(HAVE_NOUVEAU, 1, [Have nouveau (nvidia) support])
else
AC_DEFINE(HAVE_NOUVEAU, 0)
fi
AM_CONDITIONAL(HAVE_OMAP, [test "x$OMAP" = xyes])
AM_CONDITIONAL(HAVE_EXYNOS, [test "x$EXYNOS" = xyes])
if test "x$EXYNOS" = xyes; then
AC_DEFINE(HAVE_EXYNOS, 1, [Have EXYNOS support])
else
AC_DEFINE(HAVE_EXYNOS, 0)
fi
AM_CONDITIONAL(HAVE_FREEDRENO, [test "x$FREEDRENO" = xyes])
if test "x$FREEDRENO_KGSL" = xyes; then
if test "x$FREEDRENO" != xyes; then
AC_MSG_ERROR([Cannot enable freedreno KGSL interface if freedreno is disabled])
fi
fi
AM_CONDITIONAL(HAVE_FREEDRENO_KGSL, [test "x$FREEDRENO_KGSL" = xyes])
if test "x$FREEDRENO_KGSL" = xyes; then
AC_DEFINE(HAVE_FREEDRENO_KGSL, 1, [Have freedreno support for KGSL kernel interface])
else
AC_DEFINE(HAVE_FREEDRENO_KGSL, 0)
fi
AM_CONDITIONAL(HAVE_RADEON, [test "x$RADEON" = xyes])
if test "x$RADEON" = xyes; then
AC_DEFINE(HAVE_RADEON, 1, [Have radeon support])
else
AC_DEFINE(HAVE_RADEON, 0)
fi
if test "x$AMDGPU" != xno; then
# Detect cunit library
PKG_CHECK_MODULES([CUNIT], [cunit >= 2.1], [have_cunit=yes], [have_cunit=no])
# If pkg-config does not find cunit, check it using AC_CHECK_LIB. We
# do this because Debian (Ubuntu) lacks pkg-config file for cunit.
# fixed in 2.1-2.dfsg-3: http://anonscm.debian.org/cgit/collab-maint/cunit.git/commit/?h=debian
if test "x${have_cunit}" = "xno"; then
AC_CHECK_LIB([cunit], [CU_initialize_registry], [have_cunit=yes], [have_cunit=no])
if test "x${have_cunit}" = "xyes"; then
CUNIT_LIBS="-lcunit"
CUNIT_CFLAGS=""
AC_SUBST([CUNIT_LIBS])
AC_SUBST([CUNIT_CFLAGS])
fi
fi
else
have_cunit=no
fi
AM_CONDITIONAL(HAVE_CUNIT, [test "x$have_cunit" != "xno"])
AM_CONDITIONAL(HAVE_AMDGPU, [test "x$AMDGPU" = xyes])
if test "x$AMDGPU" = xyes; then
AC_DEFINE(HAVE_AMDGPU, 1, [Have amdgpu support])
if test "x$have_cunit" = "xno"; then
AC_MSG_WARN([Could not find cunit library. Disabling amdgpu tests])
fi
else
AC_DEFINE(HAVE_AMDGPU, 0)
fi
AM_CONDITIONAL(HAVE_TEGRA, [test "x$TEGRA" = xyes])
AM_CONDITIONAL(HAVE_VC4, [test "x$VC4" = xyes])
if test "x$VC4" = xyes; then
AC_DEFINE(HAVE_VC4, 1, [Have VC4 support])
else
AC_DEFINE(HAVE_VC4, 0)
fi
AM_CONDITIONAL(HAVE_ETNAVIV, [test "x$ETNAVIV" = xyes])
AM_CONDITIONAL(HAVE_INSTALL_TESTS, [test "x$INSTALL_TESTS" = xyes])
AC_ARG_ENABLE([cairo-tests],
[AS_HELP_STRING([--enable-cairo-tests],
[Enable support for Cairo rendering in tests (default: auto)])],
[CAIRO=$enableval], [CAIRO=auto])
if test "x$CAIRO" != xno; then
PKG_CHECK_MODULES(CAIRO, cairo, [HAVE_CAIRO=yes], [HAVE_CAIRO=no])
fi
AC_MSG_CHECKING([whether to enable Cairo tests])
if test "x$CAIRO" = xauto; then
CAIRO="$HAVE_CAIRO"
fi
if test "x$CAIRO" = xyes; then
if ! test "x$HAVE_CAIRO" = xyes; then
AC_MSG_ERROR([Cairo support required but not present])
fi
AC_DEFINE(HAVE_CAIRO, 1, [Have Cairo support])
else
AC_DEFINE(HAVE_CAIRO, 0)
fi
AC_MSG_RESULT([$CAIRO])
AM_CONDITIONAL(HAVE_CAIRO, [test "x$CAIRO" = xyes])
# xsltproc for docbook manpages
AC_ARG_ENABLE([manpages],
AS_HELP_STRING([--enable-manpages], [enable manpages @<:@default=auto@:>@]),
[MANS=$enableval], [MANS=auto])
AM_CONDITIONAL([BUILD_MANPAGES], [test "x$XSLTPROC" != "x" -a "x$MANS" != "xno"])
# check for offline man-pages stylesheet
AC_MSG_CHECKING([for docbook manpages stylesheet])
MANPAGES_STYLESHEET="http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl"
AC_PATH_PROGS_FEATURE_CHECK([XSLTPROC_TMP], [xsltproc],
AS_IF([`"$ac_path_XSLTPROC_TMP" --nonet "$MANPAGES_STYLESHEET" > /dev/null 2>&1`],
[HAVE_MANPAGES_STYLESHEET=yes]))
if test "x$HAVE_MANPAGES_STYLESHEET" = "xyes"; then
AC_SUBST(MANPAGES_STYLESHEET)
AC_MSG_RESULT([yes])
else
AC_MSG_RESULT([no])
fi
AM_CONDITIONAL([HAVE_MANPAGES_STYLESHEET], [test "x$HAVE_MANPAGES_STYLESHEET" = "xyes"])
AC_ARG_ENABLE(valgrind,
[AS_HELP_STRING([--enable-valgrind],
[Build libdrm with valgrind support (default: auto)])],
[VALGRIND=$enableval], [VALGRIND=auto])
if test "x$VALGRIND" != xno; then
PKG_CHECK_MODULES(VALGRIND, [valgrind], [have_valgrind=yes], [have_valgrind=no])
fi
AC_MSG_CHECKING([whether to enable Valgrind support])
if test "x$VALGRIND" = xauto; then
VALGRIND="$have_valgrind"
fi
if test "x$VALGRIND" = "xyes"; then
if ! test "x$have_valgrind" = xyes; then
AC_MSG_ERROR([Valgrind support required but not present])
fi
AC_DEFINE([HAVE_VALGRIND], 1, [Use valgrind intrinsics to suppress false warnings])
else
AC_DEFINE([HAVE_VALGRIND], 0)
fi
AC_MSG_RESULT([$VALGRIND])
AC_ARG_WITH([kernel-source],
[AS_HELP_STRING([--with-kernel-source],
[specify path to linux kernel source])],
[kernel_source="$with_kernel_source"])
AC_SUBST(kernel_source)
AC_MSG_CHECKING([whether $CC supports __attribute__(("hidden"))])
AC_LINK_IFELSE([AC_LANG_PROGRAM([
int foo_hidden( void ) __attribute__((visibility("hidden")));
])], HAVE_ATTRIBUTE_VISIBILITY="yes"; AC_MSG_RESULT([yes]), AC_MSG_RESULT([no]));
if test "x$HAVE_ATTRIBUTE_VISIBILITY" = xyes; then
AC_DEFINE(HAVE_VISIBILITY, 1, [Compiler supports __attribute__(("hidden"))])
else
AC_DEFINE(HAVE_VISIBILITY, 0)
fi
CFLAGS="$CFLAGS -include config.h"
AC_SUBST(WARN_CFLAGS)
AC_CONFIG_FILES([
Makefile
data/Makefile
libkms/Makefile
libkms/libkms.pc
intel/Makefile
intel/libdrm_intel.pc
radeon/Makefile
radeon/libdrm_radeon.pc
amdgpu/Makefile
amdgpu/libdrm_amdgpu.pc
nouveau/Makefile
nouveau/libdrm_nouveau.pc
omap/Makefile
omap/libdrm_omap.pc
exynos/Makefile
exynos/libdrm_exynos.pc
freedreno/Makefile
freedreno/libdrm_freedreno.pc
tegra/Makefile
tegra/libdrm_tegra.pc
vc4/Makefile
vc4/libdrm_vc4.pc
etnaviv/Makefile
etnaviv/libdrm_etnaviv.pc
tests/Makefile
tests/modeprint/Makefile
tests/modetest/Makefile
tests/kms/Makefile
tests/kmstest/Makefile
tests/proptest/Makefile
tests/radeon/Makefile
tests/amdgpu/Makefile
tests/vbltest/Makefile
tests/exynos/Makefile
tests/tegra/Makefile
tests/nouveau/Makefile
tests/etnaviv/Makefile
tests/util/Makefile
man/Makefile
libdrm.pc])
AC_OUTPUT
echo ""
echo "$PACKAGE_STRING will be compiled with:"
echo ""
echo " libkms $LIBKMS"
echo " Intel API $INTEL"
echo " vmwgfx API $VMWGFX"
echo " Radeon API $RADEON"
echo " AMDGPU API $AMDGPU"
echo " Nouveau API $NOUVEAU"
echo " OMAP API $OMAP"
echo " EXYNOS API $EXYNOS"
echo " Freedreno API $FREEDRENO (kgsl: $FREEDRENO_KGSL)"
echo " Tegra API $TEGRA"
echo " VC4 API $VC4"
echo " Etnaviv API $ETNAVIV"
echo ""

View file

@ -1,212 +0,0 @@
drmAddBufs
drmAddContextPrivateMapping
drmAddContextTag
drmAddMap
drmAgpAcquire
drmAgpAlloc
drmAgpBase
drmAgpBind
drmAgpDeviceId
drmAgpEnable
drmAgpFree
drmAgpGetMode
drmAgpMemoryAvail
drmAgpMemoryUsed
drmAgpRelease
drmAgpSize
drmAgpUnbind
drmAgpVendorId
drmAgpVersionMajor
drmAgpVersionMinor
drmAuthMagic
drmAvailable
drmCheckModesettingSupported
drmClose
drmCloseBufferHandle
drmCloseOnce
drmCommandNone
drmCommandRead
drmCommandWrite
drmCommandWriteRead
drmCreateContext
drmCreateDrawable
drmCrtcGetSequence
drmCrtcQueueSequence
drmCtlInstHandler
drmCtlUninstHandler
drmDelContextTag
drmDestroyContext
drmDestroyDrawable
drmDevicesEqual
drmDMA
drmDropMaster
drmError
drmFinish
drmFree
drmFreeBufs
drmFreeBusid
drmFreeDevice
drmFreeDevices
drmFreeReservedContextList
drmFreeVersion
drmGetBufInfo
drmGetBusid
drmGetCap
drmGetClient
drmGetContextFlags
drmGetContextPrivateMapping
drmGetContextTag
drmGetDevice
drmGetDevice2
drmGetDeviceFromDevId
drmGetDeviceNameFromFd
drmGetDeviceNameFromFd2
drmGetDevices
drmGetDevices2
drmGetEntry
drmGetHashTable
drmGetInterruptFromBusID
drmGetLibVersion
drmGetLock
drmGetMagic
drmGetMap
drmGetNodeTypeFromDevId
drmGetNodeTypeFromFd
drmGetPrimaryDeviceNameFromFd
drmGetRenderDeviceNameFromFd
drmGetReservedContextList
drmGetStats
drmGetVersion
drmHandleEvent
drmHashCreate
drmHashDelete
drmHashDestroy
drmHashFirst
drmHashInsert
drmHashLookup
drmHashNext
drmIoctl
drmIsKMS
drmIsMaster
drmMalloc
drmMap
drmMapBufs
drmMarkBufs
drmModeAddFB
drmModeAddFB2
drmModeAddFB2WithModifiers
drmModeAtomicAddProperty
drmModeAtomicAlloc
drmModeAtomicCommit
drmModeAtomicDuplicate
drmModeAtomicFree
drmModeAtomicGetCursor
drmModeAtomicMerge
drmModeAtomicSetCursor
drmModeAttachMode
drmModeCloseFB
drmModeConnectorGetPossibleCrtcs
drmModeConnectorSetProperty
drmModeCreateDumbBuffer
drmModeCreateLease
drmModeCreatePropertyBlob
drmModeCrtcGetGamma
drmModeCrtcSetGamma
drmModeDestroyDumbBuffer
drmModeDestroyPropertyBlob
drmModeDetachMode
drmModeDirtyFB
drmModeFormatModifierBlobIterNext
drmModeFreeConnector
drmModeFreeCrtc
drmModeFreeEncoder
drmModeFreeFB
drmModeFreeFB2
drmModeFreeModeInfo
drmModeFreeObjectProperties
drmModeFreePlane
drmModeFreePlaneResources
drmModeFreeProperty
drmModeFreePropertyBlob
drmModeFreeResources
drmModeGetConnector
drmModeGetConnectorCurrent
drmModeGetConnectorTypeName
drmModeGetCrtc
drmModeGetEncoder
drmModeGetFB
drmModeGetFB2
drmModeGetLease
drmModeGetPlane
drmModeGetPlaneResources
drmModeGetProperty
drmModeGetPropertyBlob
drmModeGetResources
drmModeListLessees
drmModeMapDumbBuffer
drmModeMoveCursor
drmModeObjectGetProperties
drmModeObjectSetProperty
drmModePageFlip
drmModePageFlipTarget
drmModeRevokeLease
drmModeRmFB
drmModeSetCrtc
drmModeSetCursor
drmModeSetCursor2
drmModeSetPlane
drmMsg
drmOpen
drmOpenControl
drmOpenOnce
drmOpenOnceWithType
drmOpenRender
drmOpenWithType
drmPrimeFDToHandle
drmPrimeHandleToFD
drmRandom
drmRandomCreate
drmRandomDestroy
drmRandomDouble
drmRmMap
drmScatterGatherAlloc
drmScatterGatherFree
drmSetBusid
drmSetClientCap
drmSetContextFlags
drmSetInterfaceVersion
drmSetMaster
drmSetServerInfo
drmSLCreate
drmSLDelete
drmSLDestroy
drmSLDump
drmSLFirst
drmSLInsert
drmSLLookup
drmSLLookupNeighbors
drmSLNext
drmSwitchToContext
drmSyncobjCreate
drmSyncobjDestroy
drmSyncobjEventfd
drmSyncobjExportSyncFile
drmSyncobjFDToHandle
drmSyncobjHandleToFD
drmSyncobjImportSyncFile
drmSyncobjQuery
drmSyncobjQuery2
drmSyncobjReset
drmSyncobjSignal
drmSyncobjTimelineSignal
drmSyncobjTimelineWait
drmSyncobjTransfer
drmSyncobjWait
drmUnlock
drmUnmap
drmUnmapBufs
drmUpdateDrawableInfo
drmWaitVBlank
drmGetFormatModifierName
drmGetFormatModifierVendor
drmGetFormatName

View file

@ -1,6 +0,0 @@
prebuilt_etc {
name: "amdgpu.ids",
proprietary: true,
sub_dir: "hwdata",
src: "amdgpu.ids",
}

10
data/Android.mk Normal file
View file

@ -0,0 +1,10 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE := amdgpu.ids
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_CLASS := ETC
LOCAL_PROPRIETARY_MODULE := true
LOCAL_MODULE_RELATIVE_PATH := hwdata
LOCAL_SRC_FILES := $(LOCAL_MODULE)
include $(BUILD_PREBUILT)

25
data/Makefile.am Normal file
View file

@ -0,0 +1,25 @@
# Copyright © 2017 Advanced Micro Devices, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# ADAM JACKSON BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
libdrmdatadir = @libdrmdatadir@
if HAVE_AMDGPU
dist_libdrmdata_DATA = amdgpu.ids
endif

View file

@ -4,697 +4,184 @@
# device_id, revision_id, product_name <-- single tab after comma
1.0.0
1114, C2, AMD Radeon 860M Graphics
1114, C3, AMD Radeon 840M Graphics
1114, D2, AMD Radeon 860M Graphics
1114, D3, AMD Radeon 840M Graphics
1309, 00, AMD Radeon R7 Graphics
130A, 00, AMD Radeon R6 Graphics
130B, 00, AMD Radeon R4 Graphics
130C, 00, AMD Radeon R7 Graphics
130D, 00, AMD Radeon R6 Graphics
130E, 00, AMD Radeon R5 Graphics
130F, 00, AMD Radeon R7 Graphics
130F, D4, AMD Radeon R7 Graphics
130F, D5, AMD Radeon R7 Graphics
130F, D6, AMD Radeon R7 Graphics
130F, D7, AMD Radeon R7 Graphics
1313, 00, AMD Radeon R7 Graphics
1313, D4, AMD Radeon R7 Graphics
1313, D5, AMD Radeon R7 Graphics
1313, D6, AMD Radeon R7 Graphics
1315, 00, AMD Radeon R5 Graphics
1315, D4, AMD Radeon R5 Graphics
1315, D5, AMD Radeon R5 Graphics
1315, D6, AMD Radeon R5 Graphics
1315, D7, AMD Radeon R5 Graphics
1316, 00, AMD Radeon R5 Graphics
1318, 00, AMD Radeon R5 Graphics
131B, 00, AMD Radeon R4 Graphics
131C, 00, AMD Radeon R7 Graphics
131D, 00, AMD Radeon R6 Graphics
1435, AE, AMD Custom GPU 0932
1506, C1, AMD Radeon 610M
1506, C2, AMD Radeon 610M
1506, C3, AMD Radeon 610M
1506, C4, AMD Radeon 610M
150E, C1, AMD Radeon 890M Graphics
150E, C4, AMD Radeon 880M Graphics
150E, C5, AMD Radeon 890M Graphics
150E, C6, AMD Radeon 890M Graphics
150E, D1, AMD Radeon 890M Graphics
150E, D2, AMD Radeon 880M Graphics
150E, D3, AMD Radeon 890M Graphics
1586, C1, Radeon 8060S Graphics
1586, C2, Radeon 8050S Graphics
1586, C4, Radeon 8050S Graphics
1586, D1, Radeon 8060S Graphics
1586, D2, Radeon 8050S Graphics
1586, D4, Radeon 8050S Graphics
1586, D5, Radeon 8040S Graphics
15BF, 00, AMD Radeon 780M Graphics
15BF, 01, AMD Radeon 760M Graphics
15BF, 02, AMD Radeon 780M Graphics
15BF, 03, AMD Radeon 760M Graphics
15BF, C1, AMD Radeon 780M Graphics
15BF, C2, AMD Radeon 780M Graphics
15BF, C3, AMD Radeon 760M Graphics
15BF, C4, AMD Radeon 780M Graphics
15BF, C5, AMD Radeon 740M Graphics
15BF, C6, AMD Radeon 780M Graphics
15BF, C7, AMD Radeon 780M Graphics
15BF, C8, AMD Radeon 760M Graphics
15BF, C9, AMD Radeon 780M Graphics
15BF, CA, AMD Radeon 740M Graphics
15BF, CB, AMD Radeon 760M Graphics
15BF, CC, AMD Radeon 740M Graphics
15BF, CD, AMD Radeon 760M Graphics
15BF, CF, AMD Radeon 780M Graphics
15BF, D0, AMD Radeon 780M Graphics
15BF, D1, AMD Radeon 780M Graphics
15BF, D2, AMD Radeon 780M Graphics
15BF, D3, AMD Radeon 780M Graphics
15BF, D4, AMD Radeon 780M Graphics
15BF, D5, AMD Radeon 760M Graphics
15BF, D6, AMD Radeon 760M Graphics
15BF, D7, AMD Radeon 780M Graphics
15BF, D8, AMD Radeon 740M Graphics
15BF, D9, AMD Radeon 780M Graphics
15BF, DA, AMD Radeon 780M Graphics
15BF, DB, AMD Radeon 760M Graphics
15BF, DC, AMD Radeon 760M Graphics
15BF, DD, AMD Radeon 780M Graphics
15BF, DE, AMD Radeon 740M Graphics
15BF, DF, AMD Radeon 760M Graphics
15BF, F0, AMD Radeon 760M Graphics
15C8, C1, AMD Radeon 740M Graphics
15C8, C2, AMD Radeon 740M Graphics
15C8, C3, AMD Radeon 740M Graphics
15C8, C4, AMD Radeon 740M Graphics
15C8, D1, AMD Radeon 740M Graphics
15C8, D2, AMD Radeon 740M Graphics
15C8, D3, AMD Radeon 740M Graphics
15C8, D4, AMD Radeon 740M Graphics
15D8, 00, AMD Radeon RX Vega 8 Graphics WS
15D8, 91, AMD Radeon Vega 3 Graphics
15D8, 91, AMD Ryzen Embedded R1606G with Radeon Vega Gfx
15D8, 92, AMD Radeon Vega 3 Graphics
15D8, 92, AMD Ryzen Embedded R1505G with Radeon Vega Gfx
15D8, 93, AMD Radeon Vega 1 Graphics
15D8, A1, AMD Radeon Vega 10 Graphics
15D8, A2, AMD Radeon Vega 8 Graphics
15D8, A3, AMD Radeon Vega 6 Graphics
15D8, A4, AMD Radeon Vega 3 Graphics
15D8, B1, AMD Radeon Vega 10 Graphics
15D8, B2, AMD Radeon Vega 8 Graphics
15D8, B3, AMD Radeon Vega 6 Graphics
15D8, B4, AMD Radeon Vega 3 Graphics
15D8, C1, AMD Radeon Vega 10 Graphics
15D8, C2, AMD Radeon Vega 8 Graphics
15D8, C3, AMD Radeon Vega 6 Graphics
15D8, C4, AMD Radeon Vega 3 Graphics
15D8, C5, AMD Radeon Vega 3 Graphics
15D8, C8, AMD Radeon Vega 11 Graphics
15D8, C9, AMD Radeon Vega 8 Graphics
15D8, CA, AMD Radeon Vega 11 Graphics
15D8, CB, AMD Radeon Vega 8 Graphics
15D8, CC, AMD Radeon Vega 3 Graphics
15D8, CE, AMD Radeon Vega 3 Graphics
15D8, CF, AMD Ryzen Embedded R1305G with Radeon Vega Gfx
15D8, D1, AMD Radeon Vega 10 Graphics
15D8, D2, AMD Radeon Vega 8 Graphics
15D8, D3, AMD Radeon Vega 6 Graphics
15D8, D4, AMD Radeon Vega 3 Graphics
15D8, D8, AMD Radeon Vega 11 Graphics
15D8, D9, AMD Radeon Vega 8 Graphics
15D8, DA, AMD Radeon Vega 11 Graphics
15D8, DB, AMD Radeon Vega 3 Graphics
15D8, DB, AMD Radeon Vega 8 Graphics
15D8, DC, AMD Radeon Vega 3 Graphics
15D8, DD, AMD Radeon Vega 3 Graphics
15D8, DE, AMD Radeon Vega 3 Graphics
15D8, DF, AMD Radeon Vega 3 Graphics
15D8, E3, AMD Radeon Vega 3 Graphics
15D8, E4, AMD Ryzen Embedded R1102G with Radeon Vega Gfx
15DD, 81, AMD Ryzen Embedded V1807B with Radeon Vega Gfx
15DD, 82, AMD Ryzen Embedded V1756B with Radeon Vega Gfx
15DD, 83, AMD Ryzen Embedded V1605B with Radeon Vega Gfx
15DD, 84, AMD Radeon Vega 6 Graphics
15DD, 85, AMD Ryzen Embedded V1202B with Radeon Vega Gfx
15DD, 86, AMD Radeon Vega 11 Graphics
15DD, 88, AMD Radeon Vega 8 Graphics
15DD, C1, AMD Radeon Vega 11 Graphics
15DD, C2, AMD Radeon Vega 8 Graphics
15DD, C3, AMD Radeon Vega 3 / 10 Graphics
15DD, C4, AMD Radeon Vega 8 Graphics
15DD, C5, AMD Radeon Vega 3 Graphics
15DD, C6, AMD Radeon Vega 11 Graphics
15DD, C8, AMD Radeon Vega 8 Graphics
15DD, C9, AMD Radeon Vega 11 Graphics
15DD, CA, AMD Radeon Vega 8 Graphics
15DD, CB, AMD Radeon Vega 3 Graphics
15DD, CC, AMD Radeon Vega 6 Graphics
15DD, CE, AMD Radeon Vega 3 Graphics
15DD, CF, AMD Radeon Vega 3 Graphics
15DD, D0, AMD Radeon Vega 10 Graphics
15DD, D1, AMD Radeon Vega 8 Graphics
15DD, D3, AMD Radeon Vega 11 Graphics
15DD, D5, AMD Radeon Vega 8 Graphics
15DD, D6, AMD Radeon Vega 11 Graphics
15DD, D7, AMD Radeon Vega 8 Graphics
15DD, D8, AMD Radeon Vega 3 Graphics
15DD, D9, AMD Radeon Vega 6 Graphics
15DD, E1, AMD Radeon Vega 3 Graphics
15DD, E2, AMD Radeon Vega 3 Graphics
163F, AE, AMD Custom GPU 0405
163F, E1, AMD Custom GPU 0405
164E, D8, AMD Radeon 610M
164E, D9, AMD Radeon 610M
164E, DA, AMD Radeon 610M
164E, DB, AMD Radeon 610M
164E, DC, AMD Radeon 610M
1681, 06, AMD Radeon 680M
1681, 07, AMD Radeon 660M
1681, 0A, AMD Radeon 680M
1681, 0B, AMD Radeon 660M
1681, C7, AMD Radeon 680M
1681, C8, AMD Radeon 680M
1681, C9, AMD Radeon 660M
1900, 01, AMD Radeon 780M Graphics
1900, 02, AMD Radeon 760M Graphics
1900, 03, AMD Radeon 780M Graphics
1900, 04, AMD Radeon 760M Graphics
1900, 05, AMD Radeon 780M Graphics
1900, 06, AMD Radeon 780M Graphics
1900, 07, AMD Radeon 760M Graphics
1900, B0, AMD Radeon 780M Graphics
1900, B1, AMD Radeon 780M Graphics
1900, B2, AMD Radeon 780M Graphics
1900, B3, AMD Radeon 780M Graphics
1900, B4, AMD Radeon 780M Graphics
1900, B5, AMD Radeon 780M Graphics
1900, B6, AMD Radeon 780M Graphics
1900, B7, AMD Radeon 760M Graphics
1900, B8, AMD Radeon 760M Graphics
1900, B9, AMD Radeon 780M Graphics
1900, BA, AMD Radeon 780M Graphics
1900, BB, AMD Radeon 780M Graphics
1900, C0, AMD Radeon 780M Graphics
1900, C1, AMD Radeon 760M Graphics
1900, C2, AMD Radeon 780M Graphics
1900, C3, AMD Radeon 760M Graphics
1900, C4, AMD Radeon 780M Graphics
1900, C5, AMD Radeon 780M Graphics
1900, C6, AMD Radeon 760M Graphics
1900, C7, AMD Radeon 780M Graphics
1900, C8, AMD Radeon 760M Graphics
1900, C9, AMD Radeon 780M Graphics
1900, CA, AMD Radeon 760M Graphics
1900, CB, AMD Radeon 780M Graphics
1900, CC, AMD Radeon 780M Graphics
1900, CD, AMD Radeon 760M Graphics
1900, CE, AMD Radeon 780M Graphics
1900, CF, AMD Radeon 760M Graphics
1900, D0, AMD Radeon 780M Graphics
1900, D1, AMD Radeon 760M Graphics
1900, D2, AMD Radeon 780M Graphics
1900, D3, AMD Radeon 760M Graphics
1900, D4, AMD Radeon 780M Graphics
1900, D5, AMD Radeon 780M Graphics
1900, D6, AMD Radeon 760M Graphics
1900, D7, AMD Radeon 780M Graphics
1900, D8, AMD Radeon 760M Graphics
1900, D9, AMD Radeon 780M Graphics
1900, DA, AMD Radeon 760M Graphics
1900, DB, AMD Radeon 780M Graphics
1900, DC, AMD Radeon 780M Graphics
1900, DD, AMD Radeon 760M Graphics
1900, DE, AMD Radeon 780M Graphics
1900, DF, AMD Radeon 760M Graphics
1900, F0, AMD Radeon 780M Graphics
1900, F1, AMD Radeon 780M Graphics
1900, F2, AMD Radeon 780M Graphics
1901, C1, AMD Radeon 740M Graphics
1901, C2, AMD Radeon 740M Graphics
1901, C3, AMD Radeon 740M Graphics
1901, C6, AMD Radeon 740M Graphics
1901, C7, AMD Radeon 740M Graphics
1901, C8, AMD Radeon 740M Graphics
1901, C9, AMD Radeon 740M Graphics
1901, CA, AMD Radeon 740M Graphics
1901, D1, AMD Radeon 740M Graphics
1901, D2, AMD Radeon 740M Graphics
1901, D3, AMD Radeon 740M Graphics
1901, D4, AMD Radeon 740M Graphics
1901, D5, AMD Radeon 740M Graphics
1901, D6, AMD Radeon 740M Graphics
1901, D7, AMD Radeon 740M Graphics
1901, D8, AMD Radeon 740M Graphics
6600, 00, AMD Radeon HD 8600 / 8700M
6600, 81, AMD Radeon R7 M370
6601, 00, AMD Radeon HD 8500M / 8700M
6604, 00, AMD Radeon R7 M265 Series
6604, 81, AMD Radeon R7 M350
6605, 00, AMD Radeon R7 M260 Series
6605, 81, AMD Radeon R7 M340
6606, 00, AMD Radeon HD 8790M
6607, 00, AMD Radeon R5 M240
6608, 00, AMD FirePro W2100
6610, 00, AMD Radeon R7 200 Series
6610, 81, AMD Radeon R7 350
6610, 83, AMD Radeon R5 340
6610, 87, AMD Radeon R7 200 Series
6611, 00, AMD Radeon R7 200 Series
6611, 87, AMD Radeon R7 200 Series
6613, 00, AMD Radeon R7 200 Series
6617, 00, AMD Radeon R7 240 Series
6617, 87, AMD Radeon R7 200 Series
6600, 0, AMD Radeon HD 8600/8700M
6600, 81, AMD Radeon (TM) R7 M370
6601, 0, AMD Radeon (TM) HD 8500M/8700M
6604, 0, AMD Radeon R7 M265 Series
6604, 81, AMD Radeon (TM) R7 M350
6605, 0, AMD Radeon R7 M260 Series
6605, 81, AMD Radeon (TM) R7 M340
6606, 0, AMD Radeon HD 8790M
6607, 0, AMD Radeon (TM) HD8530M
6608, 0, AMD FirePro W2100
6610, 0, AMD Radeon HD 8600 Series
6610, 81, AMD Radeon (TM) R7 350
6610, 83, AMD Radeon (TM) R5 340
6611, 0, AMD Radeon HD 8500 Series
6613, 0, AMD Radeon HD 8500 series
6617, C7, AMD Radeon R7 240 Series
6640, 00, AMD Radeon HD 8950
6640, 80, AMD Radeon R9 M380
6646, 00, AMD Radeon R9 M280X
6646, 80, AMD Radeon R9 M385
6646, 80, AMD Radeon R9 M470X
6647, 00, AMD Radeon R9 M200X Series
6647, 80, AMD Radeon R9 M380
6649, 00, AMD FirePro W5100
6658, 00, AMD Radeon R7 200 Series
665C, 00, AMD Radeon HD 7700 Series
665D, 00, AMD Radeon R7 200 Series
665F, 81, AMD Radeon R7 360 Series
6660, 00, AMD Radeon HD 8600M Series
6660, 81, AMD Radeon R5 M335
6660, 83, AMD Radeon R5 M330
6663, 00, AMD Radeon HD 8500M Series
6663, 83, AMD Radeon R5 M320
6664, 00, AMD Radeon R5 M200 Series
6665, 00, AMD Radeon R5 M230 Series
6665, 83, AMD Radeon R5 M320
6665, C3, AMD Radeon R5 M435
6666, 00, AMD Radeon R5 M200 Series
6667, 00, AMD Radeon R5 M200 Series
666F, 00, AMD Radeon HD 8500M
66A1, 02, AMD Instinct MI60 / MI50
66A1, 06, AMD Radeon Pro VII
66AF, C1, AMD Radeon VII
6780, 00, AMD FirePro W9000
6784, 00, ATI FirePro V (FireGL V) Graphics Adapter
6788, 00, ATI FirePro V (FireGL V) Graphics Adapter
678A, 00, AMD FirePro W8000
6798, 00, AMD Radeon R9 200 / HD 7900 Series
6799, 00, AMD Radeon HD 7900 Series
679A, 00, AMD Radeon HD 7900 Series
679B, 00, AMD Radeon HD 7900 Series
679E, 00, AMD Radeon HD 7800 Series
67A0, 00, AMD Radeon FirePro W9100
67A1, 00, AMD Radeon FirePro W8100
67B0, 00, AMD Radeon R9 200 Series
67B0, 80, AMD Radeon R9 390 Series
67B1, 00, AMD Radeon R9 200 Series
67B1, 80, AMD Radeon R9 390 Series
67B9, 00, AMD Radeon R9 200 Series
67C0, 00, AMD Radeon Pro WX 7100 Graphics
67C0, 80, AMD Radeon E9550
67C2, 01, AMD Radeon Pro V7350x2
67C2, 02, AMD Radeon Pro V7300X
67C4, 00, AMD Radeon Pro WX 7100 Graphics
67C4, 80, AMD Radeon E9560 / E9565 Graphics
67C7, 00, AMD Radeon Pro WX 5100 Graphics
67C7, 80, AMD Radeon E9390 Graphics
67D0, 01, AMD Radeon Pro V7350x2
67D0, 02, AMD Radeon Pro V7300X
67DF, C0, AMD Radeon Pro 580X
67DF, C1, AMD Radeon RX 580 Series
67DF, C2, AMD Radeon RX 570 Series
67DF, C3, AMD Radeon RX 580 Series
67DF, C4, AMD Radeon RX 480 Graphics
67DF, C5, AMD Radeon RX 470 Graphics
67DF, C6, AMD Radeon RX 570 Series
67DF, C7, AMD Radeon RX 480 Graphics
67DF, CF, AMD Radeon RX 470 Graphics
67DF, D7, AMD Radeon RX 470 Graphics
67DF, E0, AMD Radeon RX 470 Series
67DF, E1, AMD Radeon RX 590 Series
67DF, E3, AMD Radeon RX Series
67DF, E7, AMD Radeon RX 580 Series
67DF, EB, AMD Radeon Pro 580X
67DF, EF, AMD Radeon RX 570 Series
67DF, F7, AMD Radeon RX P30PH
67DF, FF, AMD Radeon RX 470 Series
67E0, 00, AMD Radeon Pro WX Series
67E3, 00, AMD Radeon Pro WX 4100
67E8, 00, AMD Radeon Pro WX Series
67E8, 01, AMD Radeon Pro WX Series
67E8, 80, AMD Radeon E9260 Graphics
67EB, 00, AMD Radeon Pro V5300X
67EF, C0, AMD Radeon RX Graphics
67EF, C1, AMD Radeon RX 460 Graphics
67EF, C2, AMD Radeon Pro Series
67EF, C3, AMD Radeon RX Series
67EF, C5, AMD Radeon RX 460 Graphics
67EF, C7, AMD Radeon RX Graphics
67EF, CF, AMD Radeon RX 460 Graphics
67EF, E0, AMD Radeon RX 560 Series
67EF, E1, AMD Radeon RX Series
67EF, E2, AMD Radeon RX 560X
67EF, E3, AMD Radeon RX Series
67EF, E5, AMD Radeon RX 560 Series
67EF, E7, AMD Radeon RX 560 Series
67EF, EF, AMD Radeon 550 Series
67EF, FF, AMD Radeon RX 460 Graphics
67FF, C0, AMD Radeon Pro 465
67FF, C1, AMD Radeon RX 560 Series
67FF, CF, AMD Radeon RX 560 Series
67FF, EF, AMD Radeon RX 560 Series
67FF, FF, AMD Radeon RX 550 Series
6800, 00, AMD Radeon HD 7970M
6801, 00, AMD Radeon HD 8970M
6806, 00, AMD Radeon R9 M290X
6808, 00, AMD FirePro W7000
6808, 00, ATI FirePro V (FireGL V) Graphics Adapter
6809, 00, ATI FirePro W5000
6810, 00, AMD Radeon R9 200 Series
6810, 81, AMD Radeon R9 370 Series
6811, 00, AMD Radeon R9 200 Series
6811, 81, AMD Radeon R7 370 Series
6818, 00, AMD Radeon HD 7800 Series
6819, 00, AMD Radeon HD 7800 Series
6820, 00, AMD Radeon R9 M275X
6820, 81, AMD Radeon R9 M375
6820, 83, AMD Radeon R9 M375X
6821, 00, AMD Radeon R9 M200X Series
6821, 83, AMD Radeon R9 M370X
6821, 87, AMD Radeon R7 M380
6822, 00, AMD Radeon E8860
6823, 00, AMD Radeon R9 M200X Series
6825, 00, AMD Radeon HD 7800M Series
6826, 00, AMD Radeon HD 7700M Series
6827, 00, AMD Radeon HD 7800M Series
6828, 00, AMD FirePro W600
682B, 00, AMD Radeon HD 8800M Series
682B, 87, AMD Radeon R9 M360
682C, 00, AMD FirePro W4100
682D, 00, AMD Radeon HD 7700M Series
682F, 00, AMD Radeon HD 7700M Series
6830, 00, AMD Radeon 7800M Series
6831, 00, AMD Radeon 7700M Series
6835, 00, AMD Radeon R7 Series / HD 9000 Series
6837, 00, AMD Radeon HD 7700 Series
683D, 00, AMD Radeon HD 7700 Series
683F, 00, AMD Radeon HD 7700 Series
684C, 00, ATI FirePro V (FireGL V) Graphics Adapter
6860, 00, AMD Radeon Instinct MI25
6860, 01, AMD Radeon Instinct MI25
6860, 02, AMD Radeon Instinct MI25
6860, 03, AMD Radeon Pro V340
6860, 04, AMD Radeon Instinct MI25x2
6860, 07, AMD Radeon Pro V320
6861, 00, AMD Radeon Pro WX 9100
6862, 00, AMD Radeon Pro SSG
6863, 00, AMD Radeon Vega Frontier Edition
6864, 03, AMD Radeon Pro V340
6864, 04, AMD Radeon Instinct MI25x2
6864, 05, AMD Radeon Pro V340
6868, 00, AMD Radeon Pro WX 8200
686C, 00, AMD Radeon Instinct MI25 MxGPU
686C, 01, AMD Radeon Instinct MI25 MxGPU
686C, 02, AMD Radeon Instinct MI25 MxGPU
686C, 03, AMD Radeon Pro V340 MxGPU
686C, 04, AMD Radeon Instinct MI25x2 MxGPU
686C, 05, AMD Radeon Pro V340L MxGPU
686C, 06, AMD Radeon Instinct MI25 MxGPU
687F, 01, AMD Radeon RX Vega
687F, C0, AMD Radeon RX Vega
687F, C1, AMD Radeon RX Vega
687F, C3, AMD Radeon RX Vega
687F, C7, AMD Radeon RX Vega
6900, 00, AMD Radeon R7 M260
6900, 81, AMD Radeon R7 M360
6900, 83, AMD Radeon R7 M340
6900, C1, AMD Radeon R5 M465 Series
6900, C3, AMD Radeon R5 M445 Series
6900, D1, AMD Radeon 530 Series
6900, D3, AMD Radeon 530 Series
6901, 00, AMD Radeon R5 M255
6902, 00, AMD Radeon Series
6907, 00, AMD Radeon R5 M255
6907, 87, AMD Radeon R5 M315
6920, 00, AMD Radeon R9 M395X
6920, 01, AMD Radeon R9 M390X
6921, 00, AMD Radeon R9 M390X
6929, 00, AMD FirePro S7150
6929, 01, AMD FirePro S7100X
692B, 00, AMD FirePro W7100
6938, 00, AMD Radeon R9 200 Series
6640, 0, AMD Radeon HD 8950
6640, 80, AMD Radeon (TM) R9 M380
6646, 0, AMD Radeon R9 M280X
6646, 80, AMD Radeon (TM) R9 M470X
6647, 0, AMD Radeon R9 M270X
6647, 80, AMD Radeon (TM) R9 M380
6649, 0, AMD FirePro W5100
6658, 0, AMD Radeon R7 200 Series
665C, 0, AMD Radeon HD 7700 Series
665D, 0, AMD Radeon R7 200 Series
665F, 81, AMD Radeon (TM) R7 300 Series
6660, 0, AMD Radeon HD 8600M Series
6660, 81, AMD Radeon (TM) R5 M335
6660, 83, AMD Radeon (TM) R5 M330
6663, 0, AMD Radeon HD 8500M Series
6663, 83, AMD Radeon (TM) R5 M320
6664, 0, AMD Radeon R5 M200 Series
6665, 0, AMD Radeon R5 M200 Series
6665, 83, AMD Radeon (TM) R5 M320
6667, 0, AMD Radeon R5 M200 Series
666F, 0, AMD Radeon HD 8500M
6780, 0, ATI FirePro V (FireGL V) Graphics Adapter
678A, 0, ATI FirePro V (FireGL V) Graphics Adapter
6798, 0, AMD Radeon HD 7900 Series
679A, 0, AMD Radeon HD 7900 Series
679B, 0, AMD Radeon HD 7900 Series
679E, 0, AMD Radeon HD 7800 Series
67A0, 0, AMD Radeon FirePro W9100
67A1, 0, AMD Radeon FirePro W8100
67B0, 0, AMD Radeon R9 200 Series
67B0, 80, AMD Radeon (TM) R9 390 Series
67B1, 0, AMD Radeon R9 200 Series
67B1, 80, AMD Radeon (TM) R9 390 Series
67B9, 0, AMD Radeon R9 200 Series
67DF, C1, Radeon RX 580 Series
67DF, C2, Radeon RX 570 Series
67DF, C3, Radeon RX 580 Series
67DF, C4, AMD Radeon (TM) RX 480 Graphics
67DF, C5, AMD Radeon (TM) RX 470 Graphics
67DF, C6, Radeon RX 570 Series
67DF, C7, AMD Radeon (TM) RX 480 Graphics
67DF, CF, AMD Radeon (TM) RX 470 Graphics
67DF, D7, Radeon(TM) RX 470 Graphics
67DF, E3, Radeon RX Series
67DF, E7, Radeon RX 580 Series
67DF, EF, Radeon RX 570 Series
67C2, 01, AMD Radeon (TM) Pro V7350x2
67C2, 02, AMD Radeon (TM) Pro V7300X
67C4, 00, AMD Radeon (TM) Pro WX 7100 Graphics
67C7, 00, AMD Radeon (TM) Pro WX 5100 Graphics
67C0, 00, AMD Radeon (TM) Pro WX 7100 Graphics
67D0, 01, AMD Radeon (TM) Pro V7350x2
67D0, 02, AMD Radeon (TM) Pro V7300X
67E0, 00, AMD Radeon (TM) Pro WX Series
67E3, 00, AMD Radeon (TM) Pro WX 4100
67E8, 00, AMD Radeon (TM) Pro WX Series
67E8, 01, AMD Radeon (TM) Pro WX Series
67E8, 80, AMD Radeon (TM) E9260 Graphics
67EB, 00, AMD Radeon (TM) Pro V5300X
67EF, C0, AMD Radeon (TM) RX Graphics
67EF, C1, AMD Radeon (TM) RX 460 Graphics
67EF, C3, Radeon RX Series
67EF, C5, AMD Radeon (TM) RX 460 Graphics
67EF, C7, AMD Radeon (TM) RX Graphics
67EF, CF, AMD Radeon (TM) RX 460 Graphics
67EF, E0, Radeon RX 560 Series
67EF, E1, Radeon RX Series
67EF, E3, Radeon RX Series
67EF, E5, Radeon RX 560 Series
67EF, EF, AMD Radeon (TM) RX Graphics
67EF, FF, Radeon(TM) RX 460 Graphics
67FF, C0, AMD Radeon (TM) RX Graphics
67FF, C1, AMD Radeon (TM) RX Graphics
67FF, CF, Radeon RX 560 Series
67FF, EF, Radeon RX 560 Series
67FF, FF, Radeon RX 550 Series
6800, 0, AMD Radeon HD 7970M
6801, 0, AMD Radeon(TM) HD8970M
6808, 0, ATI FirePro V(FireGL V) Graphics Adapter
6809, 0, ATI FirePro V(FireGL V) Graphics Adapter
6810, 0, AMD Radeon(TM) HD 8800 Series
6810, 81, AMD Radeon (TM) R7 370 Series
6811, 0, AMD Radeon(TM) HD8800 Series
6811, 81, AMD Radeon (TM) R7 300 Series
6818, 0, AMD Radeon HD 7800 Series
6819, 0, AMD Radeon HD 7800 Series
6820, 0, AMD Radeon HD 8800M Series
6820, 81, AMD Radeon (TM) R9 M375
6820, 83, AMD Radeon (TM) R9 M375X
6821, 0, AMD Radeon HD 8800M Series
6821, 87, AMD Radeon (TM) R7 M380
6821, 83, AMD Radeon R9 (TM) M370X
6822, 0, AMD Radeon E8860
6823, 0, AMD Radeon HD 8800M Series
6825, 0, AMD Radeon HD 7800M Series
6827, 0, AMD Radeon HD 7800M Series
6828, 0, ATI FirePro V(FireGL V) Graphics Adapter
682B, 0, AMD Radeon HD 8800M Series
682B, 87, AMD Radeon (TM) R9 M360
682C, 0, AMD FirePro W4100
682D, 0, AMD Radeon HD 7700M Series
682F, 0, AMD Radeon HD 7700M Series
6835, 0, AMD Radeon R7 Series / HD 9000 Series
6837, 0, AMD Radeon HD7700 Series
683D, 0, AMD Radeon HD 7700 Series
683F, 0, AMD Radeon HD 7700 Series
6860, 00, Radeon Instinct MI25
6860, 01, Radeon Pro V320
6860, 02, Radeon Instinct MI25
6860, 03, Radeon Pro V340
6860, 04, Radeon Instinct MI25x2
6861, 00, Radeon(TM) Pro WX9100
6862, 00, Radeon Pro SSG
6863, 00, Radeon Vega Frontier Edition
6864, 03, Radeon Pro V340
6864, 04, Instinct MI25x2
6868, 00, Radeon(TM) Pro WX8100
686C, 00, GLXT (Radeon Instinct MI25) MxGPU VFID
686C, 01, GLXT (Radeon Pro V320) MxGPU
686C, 02, GLXT (Radeon Instinct MI25) MxGPU
686C, 03, GLXT (Radeon Pro V340) MxGPU
686C, 04, GLXT (Radeon Instinct MI25x2) MxGPU
687F, C0, Radeon RX Vega
687F, C1, Radeon RX Vega
687F, C3, Radeon RX Vega
6900, 0, AMD Radeon R7 M260
6900, 81, AMD Radeon (TM) R7 M360
6900, 83, AMD Radeon (TM) R7 M340
6901, 0, AMD Radeon R5 M255
6907, 0, AMD Radeon R5 M255
6907, 87, AMD Radeon (TM) R5 M315
6920, 0, AMD RADEON R9 M395X
6920, 1, AMD RADEON R9 M390X
6921, 0, AMD Radeon R9 M295X
6929, 0, AMD FirePro S7150
692B, 0, AMD FirePro W7100
6938, 0, AMD Radeon R9 200 Series
6938, F0, AMD Radeon R9 200 Series
6938, F1, AMD Radeon R9 380 Series
6939, 00, AMD Radeon R9 200 Series
6938, F1, AMD Radeon (TM) R9 380 Series
6939, F0, AMD Radeon R9 200 Series
6939, F1, AMD Radeon R9 380 Series
694C, C0, AMD Radeon RX Vega M GH Graphics
694E, C0, AMD Radeon RX Vega M GL Graphics
6980, 00, AMD Radeon Pro WX 3100
6981, 00, AMD Radeon Pro WX 3200 Series
6981, 01, AMD Radeon Pro WX 3200 Series
6981, 10, AMD Radeon Pro WX 3200 Series
6985, 00, AMD Radeon Pro WX 3100
6986, 00, AMD Radeon Pro WX 2100
6939, 0, AMD Radeon R9 200 Series
6939, F1, AMD Radeon (TM) R9 380 Series
6980, 00, Radeon Pro WX3100
6985, 00, AMD Radeon Pro WX3100
6987, 80, AMD Embedded Radeon E9171
6987, C0, AMD Radeon 550X Series
6987, C1, AMD Radeon RX 640
6987, C3, AMD Radeon 540X Series
6987, C7, AMD Radeon 540
6995, 00, AMD Radeon Pro WX 2100
6997, 00, AMD Radeon Pro WX 2100
6995, 00, AMD Radeon Pro WX2100
6997, 00, Radeon Pro WX2100
699F, 81, AMD Embedded Radeon E9170 Series
699F, C0, AMD Radeon 500 Series
699F, C1, AMD Radeon 540 Series
699F, C3, AMD Radeon 500 Series
699F, C7, AMD Radeon RX 550 / 550 Series
699F, C9, AMD Radeon 540
6FDF, E7, AMD Radeon RX 590 GME
6FDF, EF, AMD Radeon RX 580 2048SP
7300, C1, AMD FirePro S9300 x2
7300, C8, AMD Radeon R9 Fury Series
7300, C9, AMD Radeon Pro Duo
7300, CA, AMD Radeon R9 Fury Series
7300, CB, AMD Radeon R9 Fury Series
7312, 00, AMD Radeon Pro W5700
731E, C6, AMD Radeon RX 5700XTB
731E, C7, AMD Radeon RX 5700B
731F, C0, AMD Radeon RX 5700 XT 50th Anniversary
731F, C1, AMD Radeon RX 5700 XT
731F, C2, AMD Radeon RX 5600M
731F, C3, AMD Radeon RX 5700M
731F, C4, AMD Radeon RX 5700
731F, C5, AMD Radeon RX 5700 XT
731F, CA, AMD Radeon RX 5600 XT
731F, CB, AMD Radeon RX 5600 OEM
7340, C1, AMD Radeon RX 5500M
7340, C3, AMD Radeon RX 5300M
7340, C5, AMD Radeon RX 5500 XT
7340, C7, AMD Radeon RX 5500
7340, C9, AMD Radeon RX 5500XTB
7340, CF, AMD Radeon RX 5300
7341, 00, AMD Radeon Pro W5500
7347, 00, AMD Radeon Pro W5500M
7360, 41, AMD Radeon Pro 5600M
7360, C3, AMD Radeon Pro V520
7362, C1, AMD Radeon Pro V540
7362, C3, AMD Radeon Pro V520
738C, 01, AMD Instinct MI100
73A1, 00, AMD Radeon Pro V620
73A3, 00, AMD Radeon Pro W6800
73A5, C0, AMD Radeon RX 6950 XT
73AE, 00, AMD Radeon Pro V620 MxGPU
73AF, C0, AMD Radeon RX 6900 XT
73BF, C0, AMD Radeon RX 6900 XT
73BF, C1, AMD Radeon RX 6800 XT
73BF, C3, AMD Radeon RX 6800
73DF, C0, AMD Radeon RX 6750 XT
73DF, C1, AMD Radeon RX 6700 XT
73DF, C2, AMD Radeon RX 6800M
73DF, C3, AMD Radeon RX 6800M
73DF, C5, AMD Radeon RX 6700 XT
73DF, CF, AMD Radeon RX 6700M
73DF, D5, AMD Radeon RX 6750 GRE 12GB
73DF, D7, AMD TDC-235
73DF, DF, AMD Radeon RX 6700
73DF, E5, AMD Radeon RX 6750 GRE 12GB
73DF, FF, AMD Radeon RX 6700
73E0, 00, AMD Radeon RX 6600M
73E1, 00, AMD Radeon Pro W6600M
73E3, 00, AMD Radeon Pro W6600
73EF, C0, AMD Radeon RX 6800S
73EF, C1, AMD Radeon RX 6650 XT
73EF, C2, AMD Radeon RX 6700S
73EF, C3, AMD Radeon RX 6650M
73EF, C4, AMD Radeon RX 6650M XT
73FF, C1, AMD Radeon RX 6600 XT
73FF, C3, AMD Radeon RX 6600M
73FF, C7, AMD Radeon RX 6600
73FF, CB, AMD Radeon RX 6600S
73FF, CF, AMD Radeon RX 6600 LE
73FF, DF, AMD Radeon RX 6750 GRE 10GB
7408, 00, AMD Instinct MI250X
740C, 01, AMD Instinct MI250X / MI250
740F, 02, AMD Instinct MI210
7421, 00, AMD Radeon Pro W6500M
7422, 00, AMD Radeon Pro W6400
7423, 00, AMD Radeon Pro W6300M
7423, 01, AMD Radeon Pro W6300
7424, 00, AMD Radeon RX 6300
743F, C1, AMD Radeon RX 6500 XT
743F, C3, AMD Radeon RX 6500
743F, C3, AMD Radeon RX 6500M
743F, C7, AMD Radeon RX 6400
743F, C8, AMD Radeon RX 6500M
743F, CC, AMD Radeon 6550S
743F, CE, AMD Radeon RX 6450M
743F, CF, AMD Radeon RX 6300M
743F, D3, AMD Radeon RX 6550M
743F, D7, AMD Radeon RX 6400
7448, 00, AMD Radeon Pro W7900
7449, 00, AMD Radeon Pro W7800 48GB
744A, 00, AMD Radeon Pro W7900 Dual Slot
744B, 00, AMD Radeon Pro W7900D
744C, C8, AMD Radeon RX 7900 XTX
744C, CC, AMD Radeon RX 7900 XT
744C, CE, AMD Radeon RX 7900 GRE
744C, CF, AMD Radeon RX 7900M
745E, CC, AMD Radeon Pro W7800
7460, 00, AMD Radeon Pro V710
7461, 00, AMD Radeon Pro V710 MxGPU
7470, 00, AMD Radeon Pro W7700
747E, C8, AMD Radeon RX 7800 XT
747E, D8, AMD Radeon RX 7800M
747E, DB, AMD Radeon RX 7700
747E, FF, AMD Radeon RX 7700 XT
7480, 00, AMD Radeon Pro W7600
7480, C0, AMD Radeon RX 7600 XT
7480, C1, AMD Radeon RX 7700S
7480, C2, AMD Radeon RX 7650 GRE
7480, C3, AMD Radeon RX 7600S
7480, C7, AMD Radeon RX 7600M XT
7480, CF, AMD Radeon RX 7600
7481, C7, AMD Steam Machine
7483, CF, AMD Radeon RX 7600M
7489, 00, AMD Radeon Pro W7500
7499, 00, AMD Radeon Pro W7400
7499, C0, AMD Radeon RX 7400
7499, C1, AMD Radeon RX 7300
74A0, 00, AMD Instinct MI300A
74A1, 00, AMD Instinct MI300X
74A2, 00, AMD Instinct MI308X
74A5, 00, AMD Instinct MI325X
74A8, 00, AMD Instinct MI308X HF
74A9, 00, AMD Instinct MI300X HF
74B5, 00, AMD Instinct MI300X VF
74B6, 00, AMD Instinct MI308X
74BD, 00, AMD Instinct MI300X HF
7550, C0, AMD Radeon RX 9070 XT
7550, C2, AMD Radeon RX 9070 GRE
7550, C3, AMD Radeon RX 9070
7551, C0, AMD Radeon AI PRO R9700
7590, C0, AMD Radeon RX 9060 XT
7590, C7, AMD Radeon RX 9060
75A0, C0, AMD Instinct MI350X
75A3, C0, AMD Instinct MI355X
75B0, C0, AMD Instinct MI350X VF
75B3, C0, AMD Instinct MI355X VF
9830, 00, AMD Radeon HD 8400 / R3 Series
9831, 00, AMD Radeon HD 8400E
9832, 00, AMD Radeon HD 8330
9833, 00, AMD Radeon HD 8330E
9834, 00, AMD Radeon HD 8210
9835, 00, AMD Radeon HD 8210E
9836, 00, AMD Radeon HD 8200 / R3 Series
9837, 00, AMD Radeon HD 8280E
9838, 00, AMD Radeon HD 8200 / R3 series
9839, 00, AMD Radeon HD 8180
983D, 00, AMD Radeon HD 8250
9850, 00, AMD Radeon R3 Graphics
9850, 03, AMD Radeon R3 Graphics
9850, 40, AMD Radeon R2 Graphics
9850, 45, AMD Radeon R3 Graphics
9851, 00, AMD Radeon R4 Graphics
9851, 01, AMD Radeon R5E Graphics
9851, 05, AMD Radeon R5 Graphics
9851, 06, AMD Radeon R5E Graphics
9851, 40, AMD Radeon R4 Graphics
9851, 45, AMD Radeon R5 Graphics
9852, 00, AMD Radeon R2 Graphics
9852, 40, AMD Radeon E1 Graphics
9853, 00, AMD Radeon R2 Graphics
9853, 01, AMD Radeon R4E Graphics
9853, 03, AMD Radeon R2 Graphics
9853, 05, AMD Radeon R1E Graphics
9853, 06, AMD Radeon R1E Graphics
9853, 07, AMD Radeon R1E Graphics
9853, 08, AMD Radeon R1E Graphics
9853, 40, AMD Radeon R2 Graphics
9854, 00, AMD Radeon R3 Graphics
9854, 01, AMD Radeon R3E Graphics
9854, 02, AMD Radeon R3 Graphics
9854, 05, AMD Radeon R2 Graphics
9854, 06, AMD Radeon R4 Graphics
9854, 07, AMD Radeon R3 Graphics
9855, 02, AMD Radeon R6 Graphics
9855, 05, AMD Radeon R4 Graphics
9856, 00, AMD Radeon R2 Graphics
9856, 01, AMD Radeon R2E Graphics
9856, 02, AMD Radeon R2 Graphics
9856, 05, AMD Radeon R1E Graphics
9856, 06, AMD Radeon R2 Graphics
9856, 07, AMD Radeon R1E Graphics
9856, 08, AMD Radeon R1E Graphics
9856, 13, AMD Radeon R1E Graphics
9874, 81, AMD Radeon R6 Graphics
9874, 84, AMD Radeon R7 Graphics
9874, 85, AMD Radeon R6 Graphics
9874, 87, AMD Radeon R5 Graphics
9874, 88, AMD Radeon R7E Graphics
9874, 89, AMD Radeon R6E Graphics
699F, C0, Radeon 500 Series
699F, C3, Radeon 500 Series
699F, C7, Radeon RX 550 Series
7300, C1, AMD FirePro (TM) S9300 x2
7300, C8, AMD Radeon (TM) R9 Fury Series
7300, C9, Radeon (TM) Pro Duo
7300, CB, AMD Radeon (TM) R9 Fury Series
7300, CA, AMD Radeon (TM) R9 Fury Series
9874, C4, AMD Radeon R7 Graphics
9874, C5, AMD Radeon R6 Graphics
9874, C6, AMD Radeon R6 Graphics
9874, C7, AMD Radeon R5 Graphics
9874, C8, AMD Radeon R7 Graphics
9874, C9, AMD Radeon R7 Graphics
9874, CA, AMD Radeon R5 Graphics
9874, CB, AMD Radeon R5 Graphics
9874, CC, AMD Radeon R7 Graphics
9874, CD, AMD Radeon R7 Graphics
9874, CE, AMD Radeon R5 Graphics
9874, E1, AMD Radeon R7 Graphics
9874, E2, AMD Radeon R7 Graphics
9874, E3, AMD Radeon R7 Graphics
9874, E4, AMD Radeon R7 Graphics
9874, E5, AMD Radeon R5 Graphics
9874, E6, AMD Radeon R5 Graphics
98E4, 80, AMD Radeon R5E Graphics
98E4, 81, AMD Radeon R4E Graphics
98E4, 83, AMD Radeon R2E Graphics
98E4, 84, AMD Radeon R2E Graphics
98E4, 86, AMD Radeon R1E Graphics
98E4, C0, AMD Radeon R4 Graphics
98E4, C1, AMD Radeon R5 Graphics
98E4, C2, AMD Radeon R4 Graphics
98E4, C4, AMD Radeon R5 Graphics
98E4, C6, AMD Radeon R5 Graphics
98E4, C8, AMD Radeon R4 Graphics
98E4, C9, AMD Radeon R4 Graphics
98E4, CA, AMD Radeon R5 Graphics
98E4, D0, AMD Radeon R2 Graphics
98E4, D1, AMD Radeon R2 Graphics
98E4, D2, AMD Radeon R2 Graphics
98E4, D4, AMD Radeon R2 Graphics
98E4, D9, AMD Radeon R5 Graphics
98E4, DA, AMD Radeon R5 Graphics
98E4, DB, AMD Radeon R3 Graphics
98E4, E1, AMD Radeon R3 Graphics
98E4, E2, AMD Radeon R3 Graphics
98E4, E9, AMD Radeon R4 Graphics
98E4, EA, AMD Radeon R4 Graphics
98E4, EB, AMD Radeon R3 Graphics
98E4, EB, AMD Radeon R4 Graphics
9874, 81, AMD Radeon R6 Graphics
9874, 87, AMD Radeon R5 Graphics
9874, 85, AMD Radeon R6 Graphics
9874, 84, AMD Radeon R7 Graphics

View file

@ -1,11 +0,0 @@
build = ["Android.sources.bp"]
cc_library_shared {
name: "libdrm_etnaviv",
defaults: [
"libdrm_defaults",
"libdrm_etnaviv_sources",
],
vendor: true,
shared_libs: ["libdrm"],
}

14
etnaviv/Android.mk Normal file
View file

@ -0,0 +1,14 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
# Import variables LIBDRM_ETNAVIV_FILES, LIBDRM_ETNAVIV_H_FILES
include $(LOCAL_PATH)/Makefile.sources
LOCAL_MODULE := libdrm_etnaviv
LOCAL_SHARED_LIBRARIES := libdrm
LOCAL_SRC_FILES := $(LIBDRM_ETNAVIV_FILES)
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)

View file

@ -1,13 +0,0 @@
// Autogenerated with Android.sources.bp.mk
cc_defaults {
name: "libdrm_etnaviv_sources",
srcs: [
"etnaviv_device.c",
"etnaviv_gpu.c",
"etnaviv_bo.c",
"etnaviv_bo_cache.c",
"etnaviv_pipe.c",
"etnaviv_cmd_stream.c",
],
}

26
etnaviv/Makefile.am Normal file
View file

@ -0,0 +1,26 @@
include Makefile.sources
AM_CFLAGS = \
$(WARN_CFLAGS) \
-I$(top_srcdir) \
$(PTHREADSTUBS_CFLAGS) \
-I$(top_srcdir)/include/drm
libdrm_etnaviv_ladir = $(libdir)
libdrm_etnaviv_la_LTLIBRARIES = libdrm_etnaviv.la
libdrm_etnaviv_la_LDFLAGS = -version-number 1:0:0 -no-undefined
libdrm_etnaviv_la_LIBADD = \
../libdrm.la \
@PTHREADSTUBS_LIBS@ \
@CLOCK_LIB@
libdrm_etnaviv_la_SOURCES = $(LIBDRM_ETNAVIV_FILES)
libdrm_etnavivincludedir = ${includedir}/libdrm
libdrm_etnavivinclude_HEADERS = $(LIBDRM_ETNAVIV_H_FILES)
pkgconfigdir = @pkgconfigdir@
pkgconfig_DATA = libdrm_etnaviv.pc
TESTS = etnaviv-symbol-check
EXTRA_DIST = $(TESTS)

13
etnaviv/Makefile.sources Normal file
View file

@ -0,0 +1,13 @@
LIBDRM_ETNAVIV_FILES := \
etnaviv_device.c \
etnaviv_gpu.c \
etnaviv_bo.c \
etnaviv_bo_cache.c \
etnaviv_perfmon.c \
etnaviv_pipe.c \
etnaviv_cmd_stream.c \
etnaviv_drm.h \
etnaviv_priv.h
LIBDRM_ETNAVIV_H_FILES := \
etnaviv_drmif.h

View file

@ -1,3 +1,15 @@
#!/bin/bash
# The following symbols (past the first five) are taken from the public headers.
# A list of the latter should be available Makefile.sources/LIBDRM_ETNAVIV_H_FILES
FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_etnaviv.so} | awk '{print $3}'| while read func; do
( grep -q "^$func$" || echo $func ) <<EOF
__bss_start
_edata
_end
_fini
_init
etna_device_new
etna_device_new_dup
etna_device_ref
@ -11,6 +23,7 @@ etna_pipe_del
etna_pipe_wait
etna_pipe_wait_ns
etna_bo_new
etna_bo_from_handle
etna_bo_from_name
etna_bo_from_dmabuf
etna_bo_ref
@ -34,3 +47,8 @@ etna_perfmon_create
etna_perfmon_del
etna_perfmon_get_dom_by_name
etna_perfmon_get_sig_by_name
EOF
done)
test ! -n "$FUNCS" || echo $FUNCS
test ! -n "$FUNCS"

View file

@ -48,8 +48,12 @@ drm_private void bo_del(struct etna_bo *bo)
drmHashDelete(bo->dev->name_table, bo->name);
if (bo->handle) {
struct drm_gem_close req = {
.handle = bo->handle,
};
drmHashDelete(bo->dev->handle_table, bo->handle);
drmCloseBufferHandle(bo->dev->fd, bo->handle);
drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
}
free(bo);
@ -78,7 +82,12 @@ static struct etna_bo *bo_from_handle(struct etna_device *dev,
struct etna_bo *bo = calloc(sizeof(*bo), 1);
if (!bo) {
drmCloseBufferHandle(dev->fd, handle);
struct drm_gem_close req = {
.handle = handle,
};
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
return NULL;
}
@ -95,7 +104,7 @@ static struct etna_bo *bo_from_handle(struct etna_device *dev,
}
/* allocate a new (un-tiled) buffer object */
drm_public struct etna_bo *etna_bo_new(struct etna_device *dev, uint32_t size,
struct etna_bo *etna_bo_new(struct etna_device *dev, uint32_t size,
uint32_t flags)
{
struct etna_bo *bo;
@ -122,7 +131,7 @@ drm_public struct etna_bo *etna_bo_new(struct etna_device *dev, uint32_t size,
return bo;
}
drm_public struct etna_bo *etna_bo_ref(struct etna_bo *bo)
struct etna_bo *etna_bo_ref(struct etna_bo *bo)
{
atomic_inc(&bo->refcnt);
@ -150,8 +159,7 @@ static int get_buffer_info(struct etna_bo *bo)
}
/* import a buffer object from DRI2 name */
drm_public struct etna_bo *etna_bo_from_name(struct etna_device *dev,
uint32_t name)
struct etna_bo *etna_bo_from_name(struct etna_device *dev, uint32_t name)
{
struct etna_bo *bo;
struct drm_gem_open req = {
@ -188,7 +196,7 @@ out_unlock:
* fd so caller should close() the fd when it is otherwise done
* with it (even if it is still using the 'struct etna_bo *')
*/
drm_public struct etna_bo *etna_bo_from_dmabuf(struct etna_device *dev, int fd)
struct etna_bo *etna_bo_from_dmabuf(struct etna_device *dev, int fd)
{
struct etna_bo *bo;
int ret, size;
@ -223,7 +231,7 @@ out_unlock:
}
/* destroy a buffer object */
drm_public void etna_bo_del(struct etna_bo *bo)
void etna_bo_del(struct etna_bo *bo)
{
struct etna_device *dev = bo->dev;
@ -245,7 +253,7 @@ out:
}
/* get the global flink/DRI2 buffer name */
drm_public int etna_bo_get_name(struct etna_bo *bo, uint32_t *name)
int etna_bo_get_name(struct etna_bo *bo, uint32_t *name)
{
if (!bo->name) {
struct drm_gem_flink req = {
@ -269,7 +277,7 @@ drm_public int etna_bo_get_name(struct etna_bo *bo, uint32_t *name)
return 0;
}
drm_public uint32_t etna_bo_handle(struct etna_bo *bo)
uint32_t etna_bo_handle(struct etna_bo *bo)
{
return bo->handle;
}
@ -277,7 +285,7 @@ drm_public uint32_t etna_bo_handle(struct etna_bo *bo)
/* caller owns the dmabuf fd that is returned and is responsible
* to close() it when done
*/
drm_public int etna_bo_dmabuf(struct etna_bo *bo)
int etna_bo_dmabuf(struct etna_bo *bo)
{
int ret, prime_fd;
@ -293,12 +301,12 @@ drm_public int etna_bo_dmabuf(struct etna_bo *bo)
return prime_fd;
}
drm_public uint32_t etna_bo_size(struct etna_bo *bo)
uint32_t etna_bo_size(struct etna_bo *bo)
{
return bo->size;
}
drm_public void *etna_bo_map(struct etna_bo *bo)
void *etna_bo_map(struct etna_bo *bo)
{
if (!bo->map) {
if (!bo->offset) {
@ -316,7 +324,7 @@ drm_public void *etna_bo_map(struct etna_bo *bo)
return bo->map;
}
drm_public int etna_bo_cpu_prep(struct etna_bo *bo, uint32_t op)
int etna_bo_cpu_prep(struct etna_bo *bo, uint32_t op)
{
struct drm_etnaviv_gem_cpu_prep req = {
.handle = bo->handle,
@ -329,7 +337,7 @@ drm_public int etna_bo_cpu_prep(struct etna_bo *bo, uint32_t op)
&req, sizeof(req));
}
drm_public void etna_bo_cpu_fini(struct etna_bo *bo)
void etna_bo_cpu_fini(struct etna_bo *bo)
{
struct drm_etnaviv_gem_cpu_fini req = {
.handle = bo->handle,

View file

@ -55,8 +55,7 @@ etna_cmd_stream_priv(struct etna_cmd_stream *stream)
return (struct etna_cmd_stream_priv *)stream;
}
drm_public struct etna_cmd_stream *etna_cmd_stream_new(struct etna_pipe *pipe,
uint32_t size,
struct etna_cmd_stream *etna_cmd_stream_new(struct etna_pipe *pipe, uint32_t size,
void (*reset_notify)(struct etna_cmd_stream *stream, void *priv),
void *priv)
{
@ -96,7 +95,7 @@ fail:
return NULL;
}
drm_public void etna_cmd_stream_del(struct etna_cmd_stream *stream)
void etna_cmd_stream_del(struct etna_cmd_stream *stream)
{
struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
@ -120,7 +119,7 @@ static void reset_buffer(struct etna_cmd_stream *stream)
priv->reset_notify(stream, priv->reset_notify_priv);
}
drm_public uint32_t etna_cmd_stream_timestamp(struct etna_cmd_stream *stream)
uint32_t etna_cmd_stream_timestamp(struct etna_cmd_stream *stream)
{
return etna_cmd_stream_priv(stream)->last_timestamp;
}
@ -150,7 +149,11 @@ static uint32_t bo2idx(struct etna_cmd_stream *stream, struct etna_bo *bo,
pthread_mutex_lock(&idx_lock);
if (bo->current_stream == stream) {
if (!bo->current_stream) {
idx = append_bo(stream, bo);
bo->current_stream = stream;
bo->idx = idx;
} else if (bo->current_stream == stream) {
idx = bo->idx;
} else {
/* slow-path: */
@ -161,8 +164,6 @@ static uint32_t bo2idx(struct etna_cmd_stream *stream, struct etna_bo *bo,
/* not found */
idx = append_bo(stream, bo);
}
bo->current_stream = stream;
bo->idx = idx;
}
pthread_mutex_unlock(&idx_lock);
@ -221,21 +222,20 @@ static void flush(struct etna_cmd_stream *stream, int in_fence_fd,
*out_fence_fd = req.fence_fd;
}
drm_public void etna_cmd_stream_flush(struct etna_cmd_stream *stream)
void etna_cmd_stream_flush(struct etna_cmd_stream *stream)
{
flush(stream, -1, NULL);
reset_buffer(stream);
}
drm_public void etna_cmd_stream_flush2(struct etna_cmd_stream *stream,
int in_fence_fd,
int *out_fence_fd)
void etna_cmd_stream_flush2(struct etna_cmd_stream *stream, int in_fence_fd,
int *out_fence_fd)
{
flush(stream, in_fence_fd, out_fence_fd);
reset_buffer(stream);
}
drm_public void etna_cmd_stream_finish(struct etna_cmd_stream *stream)
void etna_cmd_stream_finish(struct etna_cmd_stream *stream)
{
struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
@ -244,8 +244,7 @@ drm_public void etna_cmd_stream_finish(struct etna_cmd_stream *stream)
reset_buffer(stream);
}
drm_public void etna_cmd_stream_reloc(struct etna_cmd_stream *stream,
const struct etna_reloc *r)
void etna_cmd_stream_reloc(struct etna_cmd_stream *stream, const struct etna_reloc *r)
{
struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
struct drm_etnaviv_gem_submit_reloc *reloc;
@ -262,7 +261,7 @@ drm_public void etna_cmd_stream_reloc(struct etna_cmd_stream *stream,
etna_cmd_stream_emit(stream, addr);
}
drm_public void etna_cmd_stream_perf(struct etna_cmd_stream *stream, const struct etna_perf *p)
void etna_cmd_stream_perf(struct etna_cmd_stream *stream, const struct etna_perf *p)
{
struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
struct drm_etnaviv_gem_submit_pmr *pmr;

View file

@ -25,7 +25,8 @@
*/
#include <stdlib.h>
#include <sys/types.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <errno.h>
#include <sys/mman.h>
#include <fcntl.h>
@ -40,7 +41,7 @@
static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
drm_public struct etna_device *etna_device_new(int fd)
struct etna_device *etna_device_new(int fd)
{
struct etna_device *dev = calloc(sizeof(*dev), 1);
@ -58,7 +59,7 @@ drm_public struct etna_device *etna_device_new(int fd)
/* like etna_device_new() but creates it's own private dup() of the fd
* which is close()d when the device is finalized. */
drm_public struct etna_device *etna_device_new_dup(int fd)
struct etna_device *etna_device_new_dup(int fd)
{
int dup_fd = dup(fd);
struct etna_device *dev = etna_device_new(dup_fd);
@ -71,7 +72,7 @@ drm_public struct etna_device *etna_device_new_dup(int fd)
return dev;
}
drm_public struct etna_device *etna_device_ref(struct etna_device *dev)
struct etna_device *etna_device_ref(struct etna_device *dev)
{
atomic_inc(&dev->refcnt);
@ -98,7 +99,7 @@ drm_private void etna_device_del_locked(struct etna_device *dev)
etna_device_del_impl(dev);
}
drm_public void etna_device_del(struct etna_device *dev)
void etna_device_del(struct etna_device *dev)
{
if (!atomic_dec_and_test(&dev->refcnt))
return;
@ -108,7 +109,7 @@ drm_public void etna_device_del(struct etna_device *dev)
pthread_mutex_unlock(&table_lock);
}
drm_public int etna_device_fd(struct etna_device *dev)
int etna_device_fd(struct etna_device *dev)
{
return dev->fd;
}

View file

@ -73,10 +73,6 @@ struct drm_etnaviv_timespec {
#define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18
#define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19
#define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a
#define ETNAVIV_PARAM_SOFTPIN_START_ADDR 0x1b
#define ETNAVIV_PARAM_GPU_PRODUCT_ID 0x1c
#define ETNAVIV_PARAM_GPU_CUSTOMER_ID 0x1d
#define ETNAVIV_PARAM_GPU_ECO_ID 0x1e
#define ETNA_MAX_PIPES 4
@ -152,11 +148,6 @@ struct drm_etnaviv_gem_submit_reloc {
* then patching the cmdstream for this entry is skipped. This can
* avoid kernel needing to map/access the cmdstream bo in the common
* case.
* If the submit is a softpin submit (ETNA_SUBMIT_SOFTPIN) the 'presumed'
* field is interpreted as the fixed location to map the bo into the gpu
* virtual address space. If the kernel is unable to map the buffer at
* this location the submit will fail. This means userspace is responsible
* for the whole gpu virtual address management.
*/
#define ETNA_SUBMIT_BO_READ 0x0001
#define ETNA_SUBMIT_BO_WRITE 0x0002
@ -186,11 +177,9 @@ struct drm_etnaviv_gem_submit_pmr {
#define ETNA_SUBMIT_NO_IMPLICIT 0x0001
#define ETNA_SUBMIT_FENCE_FD_IN 0x0002
#define ETNA_SUBMIT_FENCE_FD_OUT 0x0004
#define ETNA_SUBMIT_SOFTPIN 0x0008
#define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \
ETNA_SUBMIT_FENCE_FD_IN | \
ETNA_SUBMIT_FENCE_FD_OUT| \
ETNA_SUBMIT_SOFTPIN)
ETNA_SUBMIT_FENCE_FD_OUT)
#define ETNA_PIPE_3D 0x00
#define ETNA_PIPE_2D 0x01
#define ETNA_PIPE_VG 0x02

View file

@ -115,6 +115,8 @@ int etna_pipe_wait_ns(struct etna_pipe *pipe, uint32_t timestamp, uint64_t ns);
struct etna_bo *etna_bo_new(struct etna_device *dev,
uint32_t size, uint32_t flags);
struct etna_bo *etna_bo_from_handle(struct etna_device *dev,
uint32_t handle, uint32_t size);
struct etna_bo *etna_bo_from_name(struct etna_device *dev, uint32_t name);
struct etna_bo *etna_bo_from_dmabuf(struct etna_device *dev, int fd);
struct etna_bo *etna_bo_ref(struct etna_bo *bo);

View file

@ -44,7 +44,7 @@ static uint64_t get_param(struct etna_device *dev, uint32_t core, uint32_t param
return req.value;
}
drm_public struct etna_gpu *etna_gpu_new(struct etna_device *dev, unsigned int core)
struct etna_gpu *etna_gpu_new(struct etna_device *dev, unsigned int core)
{
struct etna_gpu *gpu;
@ -73,12 +73,12 @@ fail:
return NULL;
}
drm_public void etna_gpu_del(struct etna_gpu *gpu)
void etna_gpu_del(struct etna_gpu *gpu)
{
free(gpu);
}
drm_public int etna_gpu_get_param(struct etna_gpu *gpu, enum etna_param_id param,
int etna_gpu_get_param(struct etna_gpu *gpu, enum etna_param_id param,
uint64_t *value)
{
struct etna_device *dev = gpu->dev;

View file

@ -121,7 +121,7 @@ static void etna_perfmon_free_domains(struct etna_perfmon *pm)
}
}
drm_public struct etna_perfmon *etna_perfmon_create(struct etna_pipe *pipe)
struct etna_perfmon *etna_perfmon_create(struct etna_pipe *pipe)
{
struct etna_perfmon *pm;
int ret;
@ -147,7 +147,7 @@ fail:
return NULL;
}
drm_public void etna_perfmon_del(struct etna_perfmon *pm)
void etna_perfmon_del(struct etna_perfmon *pm)
{
if (!pm)
return;
@ -156,7 +156,7 @@ drm_public void etna_perfmon_del(struct etna_perfmon *pm)
free(pm);
}
drm_public struct etna_perfmon_domain *etna_perfmon_get_dom_by_name(struct etna_perfmon *pm, const char *name)
struct etna_perfmon_domain *etna_perfmon_get_dom_by_name(struct etna_perfmon *pm, const char *name)
{
struct etna_perfmon_domain *dom;
@ -170,7 +170,7 @@ drm_public struct etna_perfmon_domain *etna_perfmon_get_dom_by_name(struct etna_
return NULL;
}
drm_public struct etna_perfmon_signal *etna_perfmon_get_sig_by_name(struct etna_perfmon_domain *dom, const char *name)
struct etna_perfmon_signal *etna_perfmon_get_sig_by_name(struct etna_perfmon_domain *dom, const char *name)
{
struct etna_perfmon_signal *signal;

View file

@ -26,12 +26,12 @@
#include "etnaviv_priv.h"
drm_public int etna_pipe_wait(struct etna_pipe *pipe, uint32_t timestamp, uint32_t ms)
int etna_pipe_wait(struct etna_pipe *pipe, uint32_t timestamp, uint32_t ms)
{
return etna_pipe_wait_ns(pipe, timestamp, ms * 1000000);
}
drm_public int etna_pipe_wait_ns(struct etna_pipe *pipe, uint32_t timestamp, uint64_t ns)
int etna_pipe_wait_ns(struct etna_pipe *pipe, uint32_t timestamp, uint64_t ns)
{
struct etna_device *dev = pipe->gpu->dev;
int ret;
@ -55,12 +55,12 @@ drm_public int etna_pipe_wait_ns(struct etna_pipe *pipe, uint32_t timestamp, uin
return 0;
}
drm_public void etna_pipe_del(struct etna_pipe *pipe)
void etna_pipe_del(struct etna_pipe *pipe)
{
free(pipe);
}
drm_public struct etna_pipe *etna_pipe_new(struct etna_gpu *gpu, enum etna_pipe_id id)
struct etna_pipe *etna_pipe_new(struct etna_gpu *gpu, enum etna_pipe_id id)
{
struct etna_pipe *pipe;

View file

@ -150,7 +150,7 @@ struct etna_cmd_stream_priv {
struct etna_bo **bos;
uint32_t nr_bos, max_bos;
/* notify callback if buffer reset happened */
/* notify callback if buffer reset happend */
void (*reset_notify)(struct etna_cmd_stream *stream, void *priv);
void *reset_notify_priv;
};

View file

@ -19,7 +19,7 @@
# SOFTWARE.
libdrm_etnaviv = library(
libdrm_etnaviv = shared_library(
'drm_etnaviv',
[
files(
@ -30,19 +30,20 @@ libdrm_etnaviv = library(
],
include_directories : [inc_root, inc_drm],
link_with : libdrm,
c_args : libdrm_c_args,
gnu_symbol_visibility : 'hidden',
dependencies : [dep_threads, dep_rt, dep_atomic_ops],
version : '1.@0@.0'.format(patch_ver),
c_args : warn_c_args,
dependencies : [dep_pthread_stubs, dep_rt, dep_atomic_ops],
version : '1.0.0',
install : true,
)
install_headers('etnaviv_drmif.h', subdir : 'libdrm')
pkg.generate(
libdrm_etnaviv,
name : 'libdrm_etnaviv',
libraries : libdrm_etnaviv,
subdirs : ['.', 'libdrm'],
version : meson.project_version(),
requires_private : 'libdrm',
description : 'Userspace interface to Tegra kernel DRM services',
)
@ -51,14 +52,8 @@ ext_libdrm_etnaviv = declare_dependency(
include_directories : [inc_drm, include_directories('.')],
)
meson.override_dependency('libdrm_etnaviv', ext_libdrm_etnaviv)
test(
'etnaviv-symbols-check',
symbols_check,
args : [
'--lib', libdrm_etnaviv,
'--symbols-file', files('etnaviv-symbols.txt'),
'--nm', prog_nm.full_path(),
],
'etnaviv-symbol-check',
prog_bash,
args : [files('etnaviv-symbol-check'), libdrm_etnaviv]
)

27
exynos/Makefile.am Normal file
View file

@ -0,0 +1,27 @@
AM_CFLAGS = \
$(WARN_CFLAGS) \
-I$(top_srcdir) \
$(PTHREADSTUBS_CFLAGS) \
-I$(top_srcdir)/include/drm
libdrm_exynos_la_LTLIBRARIES = libdrm_exynos.la
libdrm_exynos_ladir = $(libdir)
libdrm_exynos_la_LDFLAGS = -version-number 1:0:0 -no-undefined
libdrm_exynos_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@
libdrm_exynos_la_SOURCES = \
exynos_drm.c \
exynos_fimg2d.c \
fimg2d_reg.h
libdrm_exynoscommonincludedir = ${includedir}/exynos
libdrm_exynoscommoninclude_HEADERS = exynos_drm.h exynos_fimg2d.h
libdrm_exynosincludedir = ${includedir}/libdrm
libdrm_exynosinclude_HEADERS = exynos_drmif.h
pkgconfigdir = @pkgconfigdir@
pkgconfig_DATA = libdrm_exynos.pc
TESTS = exynos-symbol-check
EXTRA_DIST = $(TESTS)

40
exynos/exynos-symbol-check Executable file
View file

@ -0,0 +1,40 @@
#!/bin/bash
# The following symbols (past the first five) are taken from the public headers.
# A list of the latter should be available Makefile.am/libdrm_exynos*_HEADERS
FUNCS=$($NM -D --format=bsd --defined-only ${1-.libs/libdrm_exynos.so} | awk '{print $3}'| while read func; do
( grep -q "^$func$" || echo $func ) <<EOF
__bss_start
_edata
_end
_fini
_init
exynos_bo_create
exynos_bo_destroy
exynos_bo_from_name
exynos_bo_get_info
exynos_bo_get_name
exynos_bo_handle
exynos_bo_map
exynos_device_create
exynos_device_destroy
exynos_prime_fd_to_handle
exynos_prime_handle_to_fd
exynos_vidi_connection
exynos_handle_event
g2d_blend
g2d_copy
g2d_copy_with_scale
g2d_exec
g2d_config_event
g2d_fini
g2d_init
g2d_move
g2d_scale_and_blend
g2d_solid_fill
EOF
done)
test ! -n "$FUNCS" || echo $FUNCS
test ! -n "$FUNCS"

View file

@ -1,23 +0,0 @@
exynos_bo_create
exynos_bo_destroy
exynos_bo_from_name
exynos_bo_get_info
exynos_bo_get_name
exynos_bo_handle
exynos_bo_map
exynos_device_create
exynos_device_destroy
exynos_prime_fd_to_handle
exynos_prime_handle_to_fd
exynos_vidi_connection
exynos_handle_event
g2d_blend
g2d_copy
g2d_copy_with_scale
g2d_exec
g2d_config_event
g2d_fini
g2d_init
g2d_move
g2d_scale_and_blend
g2d_solid_fill

View file

@ -31,6 +31,7 @@
#include <unistd.h>
#include <sys/mman.h>
#include <linux/stddef.h>
#include <xf86drm.h>
@ -47,7 +48,7 @@
*
* if true, return the device object else NULL.
*/
drm_public struct exynos_device * exynos_device_create(int fd)
struct exynos_device * exynos_device_create(int fd)
{
struct exynos_device *dev;
@ -68,7 +69,7 @@ drm_public struct exynos_device * exynos_device_create(int fd)
*
* @dev: exynos drm device object.
*/
drm_public void exynos_device_destroy(struct exynos_device *dev)
void exynos_device_destroy(struct exynos_device *dev)
{
free(dev);
}
@ -86,8 +87,8 @@ drm_public void exynos_device_destroy(struct exynos_device *dev)
*
* if true, return a exynos buffer object else NULL.
*/
drm_public struct exynos_bo * exynos_bo_create(struct exynos_device *dev,
size_t size, uint32_t flags)
struct exynos_bo * exynos_bo_create(struct exynos_device *dev,
size_t size, uint32_t flags)
{
struct exynos_bo *bo;
struct drm_exynos_gem_create req = {
@ -140,8 +141,8 @@ fail:
*
* if true, return 0 else negative.
*/
drm_public int exynos_bo_get_info(struct exynos_device *dev, uint32_t handle,
size_t *size, uint32_t *flags)
int exynos_bo_get_info(struct exynos_device *dev, uint32_t handle,
size_t *size, uint32_t *flags)
{
int ret;
struct drm_exynos_gem_info req = {
@ -166,7 +167,7 @@ drm_public int exynos_bo_get_info(struct exynos_device *dev, uint32_t handle,
*
* @bo: a exynos buffer object to be destroyed.
*/
drm_public void exynos_bo_destroy(struct exynos_bo *bo)
void exynos_bo_destroy(struct exynos_bo *bo)
{
if (!bo)
return;
@ -175,7 +176,11 @@ drm_public void exynos_bo_destroy(struct exynos_bo *bo)
munmap(bo->vaddr, bo->size);
if (bo->handle) {
drmCloseBufferHandle(bo->dev->fd, bo->handle);
struct drm_gem_close req = {
.handle = bo->handle,
};
drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
}
free(bo);
@ -194,7 +199,7 @@ drm_public void exynos_bo_destroy(struct exynos_bo *bo)
* if true, return a exynos buffer object else NULL.
*
*/
drm_public struct exynos_bo *
struct exynos_bo *
exynos_bo_from_name(struct exynos_device *dev, uint32_t name)
{
struct exynos_bo *bo;
@ -237,7 +242,7 @@ err_free_bo:
*
* if true, return 0 else negative.
*/
drm_public int exynos_bo_get_name(struct exynos_bo *bo, uint32_t *name)
int exynos_bo_get_name(struct exynos_bo *bo, uint32_t *name)
{
if (!bo->name) {
struct drm_gem_flink req = {
@ -260,7 +265,7 @@ drm_public int exynos_bo_get_name(struct exynos_bo *bo, uint32_t *name)
return 0;
}
drm_public uint32_t exynos_bo_handle(struct exynos_bo *bo)
uint32_t exynos_bo_handle(struct exynos_bo *bo)
{
return bo->handle;
}
@ -271,9 +276,9 @@ drm_public uint32_t exynos_bo_handle(struct exynos_bo *bo)
* @bo: a exynos buffer object including a gem object handle to be mmapped
* to user space.
*
* if true, user pointer mmapped else NULL.
* if true, user pointer mmaped else NULL.
*/
drm_public void *exynos_bo_map(struct exynos_bo *bo)
void *exynos_bo_map(struct exynos_bo *bo)
{
if (!bo->vaddr) {
struct exynos_device *dev = bo->dev;
@ -310,7 +315,7 @@ drm_public void *exynos_bo_map(struct exynos_bo *bo)
*
* @return: 0 on success, -1 on error, and errno will be set
*/
drm_public int
int
exynos_prime_handle_to_fd(struct exynos_device *dev, uint32_t handle, int *fd)
{
return drmPrimeHandleToFD(dev->fd, handle, 0, fd);
@ -325,7 +330,7 @@ exynos_prime_handle_to_fd(struct exynos_device *dev, uint32_t handle, int *fd)
*
* @return: 0 on success, -1 on error, and errno will be set
*/
drm_public int
int
exynos_prime_fd_to_handle(struct exynos_device *dev, int fd, uint32_t *handle)
{
return drmPrimeFDToHandle(dev->fd, fd, handle);
@ -348,7 +353,7 @@ exynos_prime_fd_to_handle(struct exynos_device *dev, int fd, uint32_t *handle)
*
* if true, return 0 else negative.
*/
drm_public int
int
exynos_vidi_connection(struct exynos_device *dev, uint32_t connect,
uint32_t ext, void *edid)
{
@ -389,7 +394,7 @@ exynos_handle_vendor(int fd, struct drm_event *e, void *ctx)
}
}
drm_public int
int
exynos_handle_event(struct exynos_device *dev, struct exynos_event_context *ctx)
{
char buffer[1024];

View file

@ -64,7 +64,7 @@ struct drm_exynos_gem_info {
/**
* A structure for user connection request of virtual display.
*
* @connection: indicate whether doing connection or not by user.
* @connection: indicate whether doing connetion or not by user.
* @extensions: if this value is 1 then the vidi driver would need additional
* 128bytes edid data.
* @edid: the edid data pointer from user side.

View file

@ -46,7 +46,7 @@ struct exynos_device {
* @handle: a gem handle to gem object created.
* @flags: indicate memory allocation and cache attribute types.
* @size: size to the buffer created.
* @vaddr: user space address to a gem buffer mmapped.
* @vaddr: user space address to a gem buffer mmaped.
* @name: a gem global handle from flink request.
*/
struct exynos_bo {

View file

@ -30,6 +30,7 @@
#include <assert.h>
#include <sys/mman.h>
#include <linux/stddef.h>
#include <xf86drm.h>
@ -355,7 +356,7 @@ static int g2d_flush(struct g2d_context *ctx)
*
* fd: a file descriptor to an opened drm device.
*/
drm_public struct g2d_context *g2d_init(int fd)
struct g2d_context *g2d_init(int fd)
{
struct drm_exynos_g2d_get_ver ver;
struct g2d_context *ctx;
@ -383,7 +384,7 @@ drm_public struct g2d_context *g2d_init(int fd)
return ctx;
}
drm_public void g2d_fini(struct g2d_context *ctx)
void g2d_fini(struct g2d_context *ctx)
{
free(ctx);
}
@ -399,7 +400,7 @@ drm_public void g2d_fini(struct g2d_context *ctx)
* @ctx: a pointer to g2d_context structure.
* @userdata: a pointer to the user data
*/
drm_public void g2d_config_event(struct g2d_context *ctx, void *userdata)
void g2d_config_event(struct g2d_context *ctx, void *userdata)
{
ctx->event_userdata = userdata;
}
@ -409,7 +410,7 @@ drm_public void g2d_config_event(struct g2d_context *ctx, void *userdata)
*
* @ctx: a pointer to g2d_context structure.
*/
drm_public int g2d_exec(struct g2d_context *ctx)
int g2d_exec(struct g2d_context *ctx)
{
struct drm_exynos_g2d_exec exec;
int ret;
@ -441,7 +442,7 @@ drm_public int g2d_exec(struct g2d_context *ctx)
* @w: width value to buffer filled with given color data.
* @h: height value to buffer filled with given color data.
*/
drm_public int
int
g2d_solid_fill(struct g2d_context *ctx, struct g2d_image *img,
unsigned int x, unsigned int y, unsigned int w,
unsigned int h)
@ -494,7 +495,7 @@ g2d_solid_fill(struct g2d_context *ctx, struct g2d_image *img,
* @w: width value to source and destination buffers.
* @h: height value to source and destination buffers.
*/
drm_public int
int
g2d_copy(struct g2d_context *ctx, struct g2d_image *src,
struct g2d_image *dst, unsigned int src_x, unsigned int src_y,
unsigned int dst_x, unsigned dst_y, unsigned int w,
@ -577,7 +578,7 @@ g2d_copy(struct g2d_context *ctx, struct g2d_image *src,
* @w: width of rectangle to move.
* @h: height of rectangle to move.
*/
drm_public int
int
g2d_move(struct g2d_context *ctx, struct g2d_image *img,
unsigned int src_x, unsigned int src_y,
unsigned int dst_x, unsigned dst_y, unsigned int w,
@ -675,7 +676,7 @@ g2d_move(struct g2d_context *ctx, struct g2d_image *img,
* @negative: indicate that it uses color negative to source and
* destination buffers.
*/
drm_public int
int
g2d_copy_with_scale(struct g2d_context *ctx, struct g2d_image *src,
struct g2d_image *dst, unsigned int src_x,
unsigned int src_y, unsigned int src_w,
@ -784,7 +785,7 @@ g2d_copy_with_scale(struct g2d_context *ctx, struct g2d_image *src,
* @h: height value to source and destination buffer.
* @op: blend operation type.
*/
drm_public int
int
g2d_blend(struct g2d_context *ctx, struct g2d_image *src,
struct g2d_image *dst, unsigned int src_x,
unsigned int src_y, unsigned int dst_x, unsigned int dst_y,
@ -901,7 +902,7 @@ g2d_blend(struct g2d_context *ctx, struct g2d_image *src,
* @dst_h: height value to destination buffer.
* @op: blend operation type.
*/
drm_public int
int
g2d_scale_and_blend(struct g2d_context *ctx, struct g2d_image *src,
struct g2d_image *dst, unsigned int src_x, unsigned int src_y,
unsigned int src_w, unsigned int src_h, unsigned int dst_x,

View file

@ -18,15 +18,14 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
libdrm_exynos = library(
libdrm_exynos = shared_library(
'drm_exynos',
[files('exynos_drm.c', 'exynos_fimg2d.c'), config_file],
c_args : libdrm_c_args,
gnu_symbol_visibility : 'hidden',
c_args : warn_c_args,
include_directories : [inc_root, inc_drm],
link_with : libdrm,
dependencies : [dep_threads],
version : '1.@0@.0'.format(patch_ver),
dependencies : [dep_pthread_stubs],
version : '1.0.0',
install : true,
)
@ -38,22 +37,18 @@ ext_libdrm_exynos = declare_dependency(
include_directories : [inc_drm, include_directories('.')],
)
meson.override_dependency('libdrm_exynos', ext_libdrm_exynos)
pkg.generate(
libdrm_exynos,
name : 'libdrm_exynos',
libraries : libdrm_exynos,
subdirs : ['.', 'libdrm', 'exynos'],
version : '0.7',
requires_private : 'libdrm',
description : 'Userspace interface to exynos kernel DRM services',
)
test(
'exynos-symbols-check',
symbols_check,
args : [
'--lib', libdrm_exynos,
'--symbols-file', files('exynos-symbols.txt'),
'--nm', prog_nm.full_path(),
],
'exynos-symbol-check',
prog_bash,
env : env_test,
args : [files('exynos-symbol-check'), libdrm_exynos]
)

14
freedreno/Android.mk Normal file
View file

@ -0,0 +1,14 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
# Import variables LIBDRM_FREEDRENO_FILES, LIBDRM_FREEDRENO_H_FILES
include $(LOCAL_PATH)/Makefile.sources
LOCAL_MODULE := libdrm_freedreno
LOCAL_SHARED_LIBRARIES := libdrm
LOCAL_SRC_FILES := $(LIBDRM_FREEDRENO_FILES)
include $(LIBDRM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)

31
freedreno/Makefile.am Normal file
View file

@ -0,0 +1,31 @@
AUTOMAKE_OPTIONS=subdir-objects
include Makefile.sources
AM_CFLAGS = \
$(WARN_CFLAGS) \
-I$(top_srcdir) \
$(PTHREADSTUBS_CFLAGS) \
$(VALGRIND_CFLAGS) \
-I$(top_srcdir)/include/drm
libdrm_freedreno_la_LTLIBRARIES = libdrm_freedreno.la
libdrm_freedreno_ladir = $(libdir)
libdrm_freedreno_la_LDFLAGS = -version-number 1:0:0 -no-undefined
libdrm_freedreno_la_LIBADD = \
../libdrm.la \
@PTHREADSTUBS_LIBS@ \
@CLOCK_LIB@
libdrm_freedreno_la_SOURCES = $(LIBDRM_FREEDRENO_FILES)
if HAVE_FREEDRENO_KGSL
libdrm_freedreno_la_SOURCES += $(LIBDRM_FREEDRENO_KGSL_FILES)
endif
libdrm_freedrenocommonincludedir = ${includedir}/freedreno
libdrm_freedrenocommoninclude_HEADERS = $(LIBDRM_FREEDRENO_H_FILES)
pkgconfigdir = @pkgconfigdir@
pkgconfig_DATA = libdrm_freedreno.pc
TESTS = freedreno-symbol-check
EXTRA_DIST = $(TESTS)

View file

@ -0,0 +1,26 @@
LIBDRM_FREEDRENO_FILES := \
freedreno_device.c \
freedreno_pipe.c \
freedreno_priv.h \
freedreno_ringbuffer.c \
freedreno_bo.c \
freedreno_bo_cache.c \
msm/msm_bo.c \
msm/msm_device.c \
msm/msm_drm.h \
msm/msm_pipe.c \
msm/msm_priv.h \
msm/msm_ringbuffer.c
LIBDRM_FREEDRENO_KGSL_FILES := \
kgsl/kgsl_bo.c \
kgsl/kgsl_device.c \
kgsl/kgsl_drm.h \
kgsl/kgsl_pipe.c \
kgsl/kgsl_priv.h \
kgsl/kgsl_ringbuffer.c \
kgsl/msm_kgsl.h
LIBDRM_FREEDRENO_H_FILES := \
freedreno_drmif.h \
freedreno_ringbuffer.h

View file

@ -1,3 +1,15 @@
#!/bin/bash
# The following symbols (past the first five) are taken from the public headers.
# A list of the latter should be available Makefile.sources/LIBDRM_FREEDRENO_H_FILES
FUNCS=$($NM -D --format=bsd --defined-only ${1-.libs/libdrm_freedreno.so} | awk '{print $3}'| while read func; do
( grep -q "^$func$" || echo $func ) <<EOF
__bss_start
_edata
_end
_fini
_init
fd_bo_cpu_fini
fd_bo_cpu_prep
fd_bo_del
@ -29,17 +41,26 @@ fd_pipe_wait
fd_pipe_wait_timeout
fd_ringbuffer_cmd_count
fd_ringbuffer_del
fd_ringbuffer_emit_reloc_ring
fd_ringbuffer_emit_reloc_ring_full
fd_ringbuffer_flush
fd_ringbuffer_grow
fd_ringbuffer_new
fd_ringbuffer_new_flags
fd_ringbuffer_new_object
fd_ringbuffer_ref
fd_ringbuffer_reloc
fd_ringbuffer_reloc2
fd_ringbuffer_reset
fd_ringbuffer_set_parent
fd_ringbuffer_size
fd_ringbuffer_timestamp
fd_ringmarker_del
fd_ringmarker_dwords
fd_ringmarker_flush
fd_ringbuffer_flush2
fd_ringmarker_mark
fd_ringmarker_new
EOF
done)
test ! -n "$FUNCS" || echo $FUNCS
test ! -n "$FUNCS"

View file

@ -62,7 +62,10 @@ static struct fd_bo * bo_from_handle(struct fd_device *dev,
bo = dev->funcs->bo_from_handle(dev, size, handle);
if (!bo) {
drmCloseBufferHandle(dev->fd, handle);
struct drm_gem_close req = {
.handle = handle,
};
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
return NULL;
}
bo->dev = fd_device_ref(dev);
@ -75,15 +78,14 @@ static struct fd_bo * bo_from_handle(struct fd_device *dev,
return bo;
}
static struct fd_bo *
bo_new(struct fd_device *dev, uint32_t size, uint32_t flags,
struct fd_bo_cache *cache)
struct fd_bo *
fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
{
struct fd_bo *bo = NULL;
uint32_t handle;
int ret;
bo = fd_bo_cache_alloc(cache, &size, flags);
bo = fd_bo_cache_alloc(&dev->bo_cache, &size, flags);
if (bo)
return bo;
@ -93,6 +95,7 @@ bo_new(struct fd_device *dev, uint32_t size, uint32_t flags,
pthread_mutex_lock(&table_lock);
bo = bo_from_handle(dev, size, handle);
bo->bo_reuse = TRUE;
pthread_mutex_unlock(&table_lock);
VG_BO_ALLOC(bo);
@ -100,30 +103,7 @@ bo_new(struct fd_device *dev, uint32_t size, uint32_t flags,
return bo;
}
drm_public struct fd_bo *
fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
{
struct fd_bo *bo = bo_new(dev, size, flags, &dev->bo_cache);
if (bo)
bo->bo_reuse = BO_CACHE;
return bo;
}
/* internal function to allocate bo's that use the ringbuffer cache
* instead of the normal bo_cache. The purpose is, because cmdstream
* bo's get vmap'd on the kernel side, and that is expensive, we want
* to re-use cmdstream bo's for cmdstream and not unrelated purposes.
*/
drm_private struct fd_bo *
fd_bo_new_ring(struct fd_device *dev, uint32_t size, uint32_t flags)
{
struct fd_bo *bo = bo_new(dev, size, flags, &dev->ring_cache);
if (bo)
bo->bo_reuse = RING_CACHE;
return bo;
}
drm_public struct fd_bo *
struct fd_bo *
fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
{
struct fd_bo *bo = NULL;
@ -144,7 +124,7 @@ out_unlock:
return bo;
}
drm_public struct fd_bo *
struct fd_bo *
fd_bo_from_dmabuf(struct fd_device *dev, int fd)
{
int ret, size;
@ -176,7 +156,7 @@ out_unlock:
return bo;
}
drm_public struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
{
struct drm_gem_open req = {
.name = name,
@ -211,23 +191,23 @@ out_unlock:
return bo;
}
drm_public uint64_t fd_bo_get_iova(struct fd_bo *bo)
uint64_t fd_bo_get_iova(struct fd_bo *bo)
{
return bo->funcs->iova(bo);
}
drm_public void fd_bo_put_iova(struct fd_bo *bo)
void fd_bo_put_iova(struct fd_bo *bo)
{
/* currently a no-op */
}
drm_public struct fd_bo * fd_bo_ref(struct fd_bo *bo)
struct fd_bo * fd_bo_ref(struct fd_bo *bo)
{
atomic_inc(&bo->refcnt);
return bo;
}
drm_public void fd_bo_del(struct fd_bo *bo)
void fd_bo_del(struct fd_bo *bo)
{
struct fd_device *dev = bo->dev;
@ -236,9 +216,7 @@ drm_public void fd_bo_del(struct fd_bo *bo)
pthread_mutex_lock(&table_lock);
if ((bo->bo_reuse == BO_CACHE) && (fd_bo_cache_free(&dev->bo_cache, bo) == 0))
goto out;
if ((bo->bo_reuse == RING_CACHE) && (fd_bo_cache_free(&dev->ring_cache, bo) == 0))
if (bo->bo_reuse && (fd_bo_cache_free(&dev->bo_cache, bo) == 0))
goto out;
bo_del(bo);
@ -260,16 +238,19 @@ drm_private void bo_del(struct fd_bo *bo)
*/
if (bo->handle) {
struct drm_gem_close req = {
.handle = bo->handle,
};
drmHashDelete(bo->dev->handle_table, bo->handle);
if (bo->name)
drmHashDelete(bo->dev->name_table, bo->name);
drmCloseBufferHandle(bo->dev->fd, bo->handle);
drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
}
bo->funcs->destroy(bo);
}
drm_public int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
{
if (!bo->name) {
struct drm_gem_flink req = {
@ -285,7 +266,7 @@ drm_public int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
pthread_mutex_lock(&table_lock);
set_name(bo, req.name);
pthread_mutex_unlock(&table_lock);
bo->bo_reuse = NO_CACHE;
bo->bo_reuse = FALSE;
}
*name = bo->name;
@ -293,12 +274,12 @@ drm_public int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
return 0;
}
drm_public uint32_t fd_bo_handle(struct fd_bo *bo)
uint32_t fd_bo_handle(struct fd_bo *bo)
{
return bo->handle;
}
drm_public int fd_bo_dmabuf(struct fd_bo *bo)
int fd_bo_dmabuf(struct fd_bo *bo)
{
int ret, prime_fd;
@ -309,17 +290,17 @@ drm_public int fd_bo_dmabuf(struct fd_bo *bo)
return ret;
}
bo->bo_reuse = NO_CACHE;
bo->bo_reuse = FALSE;
return prime_fd;
}
drm_public uint32_t fd_bo_size(struct fd_bo *bo)
uint32_t fd_bo_size(struct fd_bo *bo)
{
return bo->size;
}
drm_public void * fd_bo_map(struct fd_bo *bo)
void * fd_bo_map(struct fd_bo *bo)
{
if (!bo->map) {
uint64_t offset;
@ -341,18 +322,18 @@ drm_public void * fd_bo_map(struct fd_bo *bo)
}
/* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
drm_public int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
{
return bo->funcs->cpu_prep(bo, pipe, op);
}
drm_public void fd_bo_cpu_fini(struct fd_bo *bo)
void fd_bo_cpu_fini(struct fd_bo *bo)
{
bo->funcs->cpu_fini(bo);
}
#if !HAVE_FREEDRENO_KGSL
drm_public struct fd_bo * fd_bo_from_fbdev(struct fd_pipe *pipe, int fbfd, uint32_t size)
struct fd_bo * fd_bo_from_fbdev(struct fd_pipe *pipe, int fbfd, uint32_t size)
{
return NULL;
}

View file

@ -49,7 +49,7 @@ add_bucket(struct fd_bo_cache *cache, int size)
* fill in for a bit smoother size curve..
*/
drm_private void
fd_bo_cache_init(struct fd_bo_cache *cache, int coarse)
fd_bo_cache_init(struct fd_bo_cache *cache, int course)
{
unsigned long size, cache_max_size = 64 * 1024 * 1024;
@ -63,13 +63,13 @@ fd_bo_cache_init(struct fd_bo_cache *cache, int coarse)
*/
add_bucket(cache, 4096);
add_bucket(cache, 4096 * 2);
if (!coarse)
if (!course)
add_bucket(cache, 4096 * 3);
/* Initialize the linked lists for BO reuse cache. */
for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
add_bucket(cache, size);
if (!coarse) {
if (!course) {
add_bucket(cache, size + size * 1 / 4);
add_bucket(cache, size + size * 2 / 4);
add_bucket(cache, size + size * 3 / 4);

View file

@ -38,7 +38,7 @@ static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
struct fd_device * kgsl_device_new(int fd);
struct fd_device * msm_device_new(int fd);
drm_public struct fd_device * fd_device_new(int fd)
struct fd_device * fd_device_new(int fd)
{
struct fd_device *dev;
drmVersionPtr version;
@ -82,7 +82,6 @@ out:
dev->handle_table = drmHashCreate();
dev->name_table = drmHashCreate();
fd_bo_cache_init(&dev->bo_cache, FALSE);
fd_bo_cache_init(&dev->ring_cache, TRUE);
return dev;
}
@ -90,7 +89,7 @@ out:
/* like fd_device_new() but creates it's own private dup() of the fd
* which is close()d when the device is finalized.
*/
drm_public struct fd_device * fd_device_new_dup(int fd)
struct fd_device * fd_device_new_dup(int fd)
{
int dup_fd = dup(fd);
struct fd_device *dev = fd_device_new(dup_fd);
@ -101,7 +100,7 @@ drm_public struct fd_device * fd_device_new_dup(int fd)
return dev;
}
drm_public struct fd_device * fd_device_ref(struct fd_device *dev)
struct fd_device * fd_device_ref(struct fd_device *dev)
{
atomic_inc(&dev->refcnt);
return dev;
@ -125,7 +124,7 @@ drm_private void fd_device_del_locked(struct fd_device *dev)
fd_device_del_impl(dev);
}
drm_public void fd_device_del(struct fd_device *dev)
void fd_device_del(struct fd_device *dev)
{
if (!atomic_dec_and_test(&dev->refcnt))
return;
@ -134,12 +133,12 @@ drm_public void fd_device_del(struct fd_device *dev)
pthread_mutex_unlock(&table_lock);
}
drm_public int fd_device_fd(struct fd_device *dev)
int fd_device_fd(struct fd_device *dev)
{
return dev->fd;
}
drm_public enum fd_version fd_device_version(struct fd_device *dev)
enum fd_version fd_device_version(struct fd_device *dev)
{
return dev->version;
}

View file

@ -33,7 +33,7 @@
* priority of zero is highest priority, and higher numeric values are
* lower priorities
*/
drm_public struct fd_pipe *
struct fd_pipe *
fd_pipe_new2(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio)
{
struct fd_pipe *pipe;
@ -65,37 +65,37 @@ fd_pipe_new2(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio)
return pipe;
}
drm_public struct fd_pipe *
struct fd_pipe *
fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id)
{
return fd_pipe_new2(dev, id, 1);
}
drm_public struct fd_pipe * fd_pipe_ref(struct fd_pipe *pipe)
struct fd_pipe * fd_pipe_ref(struct fd_pipe *pipe)
{
atomic_inc(&pipe->refcnt);
return pipe;
}
drm_public void fd_pipe_del(struct fd_pipe *pipe)
void fd_pipe_del(struct fd_pipe *pipe)
{
if (!atomic_dec_and_test(&pipe->refcnt))
return;
pipe->funcs->destroy(pipe);
}
drm_public int fd_pipe_get_param(struct fd_pipe *pipe,
int fd_pipe_get_param(struct fd_pipe *pipe,
enum fd_param_id param, uint64_t *value)
{
return pipe->funcs->get_param(pipe, param, value);
}
drm_public int fd_pipe_wait(struct fd_pipe *pipe, uint32_t timestamp)
int fd_pipe_wait(struct fd_pipe *pipe, uint32_t timestamp)
{
return fd_pipe_wait_timeout(pipe, timestamp, ~0);
}
drm_public int fd_pipe_wait_timeout(struct fd_pipe *pipe, uint32_t timestamp,
int fd_pipe_wait_timeout(struct fd_pipe *pipe, uint32_t timestamp,
uint64_t timeout)
{
return pipe->funcs->wait(pipe, timestamp, timeout);

View file

@ -98,7 +98,6 @@ struct fd_device {
const struct fd_device_funcs *funcs;
struct fd_bo_cache bo_cache;
struct fd_bo_cache ring_cache;
int closefd; /* call close(fd) upon destruction */
@ -115,6 +114,10 @@ drm_private int fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo);
/* for where @table_lock is already held: */
drm_private void fd_device_del_locked(struct fd_device *dev);
enum fd_ringbuffer_flags {
FD_RINGBUFFER_OBJECT = 0x1,
};
struct fd_pipe_funcs {
struct fd_ringbuffer * (*ringbuffer_new)(struct fd_pipe *pipe, uint32_t size,
enum fd_ringbuffer_flags flags);
@ -131,6 +134,11 @@ struct fd_pipe {
const struct fd_pipe_funcs *funcs;
};
struct fd_ringmarker {
struct fd_ringbuffer *ring;
uint32_t *cur;
};
struct fd_ringbuffer_funcs {
void * (*hostptr)(struct fd_ringbuffer *ring);
int (*flush)(struct fd_ringbuffer *ring, uint32_t *last_start,
@ -140,7 +148,8 @@ struct fd_ringbuffer_funcs {
void (*emit_reloc)(struct fd_ringbuffer *ring,
const struct fd_reloc *reloc);
uint32_t (*emit_reloc_ring)(struct fd_ringbuffer *ring,
struct fd_ringbuffer *target, uint32_t cmd_idx);
struct fd_ringbuffer *target, uint32_t cmd_idx,
uint32_t submit_offset, uint32_t size);
uint32_t (*cmd_count)(struct fd_ringbuffer *ring);
void (*destroy)(struct fd_ringbuffer *ring);
};
@ -163,19 +172,11 @@ struct fd_bo {
atomic_t refcnt;
const struct fd_bo_funcs *funcs;
enum {
NO_CACHE = 0,
BO_CACHE = 1,
RING_CACHE = 2,
} bo_reuse;
int bo_reuse;
struct list_head list; /* bucket-list entry */
time_t free_time; /* time when added to bucket-list */
};
drm_private struct fd_bo *fd_bo_new_ring(struct fd_device *dev,
uint32_t size, uint32_t flags);
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#define enable_debug 0 /* TODO make dynamic */

View file

@ -32,19 +32,12 @@
#include "freedreno_priv.h"
#include "freedreno_ringbuffer.h"
drm_public struct fd_ringbuffer *
fd_ringbuffer_new_flags(struct fd_pipe *pipe, uint32_t size,
static struct fd_ringbuffer *
ringbuffer_new(struct fd_pipe *pipe, uint32_t size,
enum fd_ringbuffer_flags flags)
{
struct fd_ringbuffer *ring;
/* we can't really support "growable" rb's in general for
* stateobj's since we need a single gpu addr (ie. can't
* do the trick of a chain of IB packets):
*/
if (flags & FD_RINGBUFFER_OBJECT)
assert(size);
ring = pipe->funcs->ringbuffer_new(pipe, size, flags);
if (!ring)
return NULL;
@ -59,40 +52,35 @@ fd_ringbuffer_new_flags(struct fd_pipe *pipe, uint32_t size,
return ring;
}
drm_public struct fd_ringbuffer *
struct fd_ringbuffer *
fd_ringbuffer_new(struct fd_pipe *pipe, uint32_t size)
{
return fd_ringbuffer_new_flags(pipe, size, 0);
return ringbuffer_new(pipe, size, 0);
}
drm_public struct fd_ringbuffer *
struct fd_ringbuffer *
fd_ringbuffer_new_object(struct fd_pipe *pipe, uint32_t size)
{
return fd_ringbuffer_new_flags(pipe, size, FD_RINGBUFFER_OBJECT);
/* we can't really support "growable" rb's in general for
* stateobj's since we need a single gpu addr (ie. can't
* do the trick of a chain of IB packets):
*/
assert(size);
return ringbuffer_new(pipe, size, FD_RINGBUFFER_OBJECT);
}
drm_public void fd_ringbuffer_del(struct fd_ringbuffer *ring)
void fd_ringbuffer_del(struct fd_ringbuffer *ring)
{
if (!atomic_dec_and_test(&ring->refcnt))
return;
fd_ringbuffer_reset(ring);
if (!(ring->flags & FD_RINGBUFFER_OBJECT))
fd_ringbuffer_reset(ring);
ring->funcs->destroy(ring);
}
drm_public struct fd_ringbuffer *
fd_ringbuffer_ref(struct fd_ringbuffer *ring)
{
STATIC_ASSERT(sizeof(ring->refcnt) <= sizeof(ring->__pad));
atomic_inc(&ring->refcnt);
return ring;
}
/* ringbuffers which are IB targets should set the toplevel rb (ie.
* the IB source) as it's parent before emitting reloc's, to ensure
* the bookkeeping works out properly.
*/
drm_public void fd_ringbuffer_set_parent(struct fd_ringbuffer *ring,
void fd_ringbuffer_set_parent(struct fd_ringbuffer *ring,
struct fd_ringbuffer *parent)
{
/* state objects should not be parented! */
@ -100,7 +88,7 @@ drm_public void fd_ringbuffer_set_parent(struct fd_ringbuffer *ring,
ring->parent = parent;
}
drm_public void fd_ringbuffer_reset(struct fd_ringbuffer *ring)
void fd_ringbuffer_reset(struct fd_ringbuffer *ring)
{
uint32_t *start = ring->start;
if (ring->pipe->id == FD_PIPE_2D)
@ -110,18 +98,18 @@ drm_public void fd_ringbuffer_reset(struct fd_ringbuffer *ring)
ring->funcs->reset(ring);
}
drm_public int fd_ringbuffer_flush(struct fd_ringbuffer *ring)
int fd_ringbuffer_flush(struct fd_ringbuffer *ring)
{
return ring->funcs->flush(ring, ring->last_start, -1, NULL);
}
drm_public int fd_ringbuffer_flush2(struct fd_ringbuffer *ring, int in_fence_fd,
int fd_ringbuffer_flush2(struct fd_ringbuffer *ring, int in_fence_fd,
int *out_fence_fd)
{
return ring->funcs->flush(ring, ring->last_start, in_fence_fd, out_fence_fd);
}
drm_public void fd_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t ndwords)
void fd_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t ndwords)
{
assert(ring->funcs->grow); /* unsupported on kgsl */
@ -137,39 +125,55 @@ drm_public void fd_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t ndwords)
ring->cur = ring->last_start = ring->start;
}
drm_public uint32_t fd_ringbuffer_timestamp(struct fd_ringbuffer *ring)
uint32_t fd_ringbuffer_timestamp(struct fd_ringbuffer *ring)
{
return ring->last_timestamp;
}
drm_public void fd_ringbuffer_reloc(struct fd_ringbuffer *ring,
void fd_ringbuffer_reloc(struct fd_ringbuffer *ring,
const struct fd_reloc *reloc)
{
assert(ring->pipe->gpu_id < 500);
ring->funcs->emit_reloc(ring, reloc);
}
drm_public void fd_ringbuffer_reloc2(struct fd_ringbuffer *ring,
void fd_ringbuffer_reloc2(struct fd_ringbuffer *ring,
const struct fd_reloc *reloc)
{
ring->funcs->emit_reloc(ring, reloc);
}
drm_public uint32_t fd_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
void fd_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
struct fd_ringmarker *target, struct fd_ringmarker *end)
{
uint32_t submit_offset, size;
/* This function is deprecated and not supported on 64b devices: */
assert(ring->pipe->gpu_id < 500);
assert(target->ring == end->ring);
submit_offset = offset_bytes(target->cur, target->ring->start);
size = offset_bytes(end->cur, target->cur);
ring->funcs->emit_reloc_ring(ring, target->ring, 0, submit_offset, size);
}
uint32_t fd_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
{
if (!ring->funcs->cmd_count)
return 1;
return ring->funcs->cmd_count(ring);
}
drm_public uint32_t
uint32_t
fd_ringbuffer_emit_reloc_ring_full(struct fd_ringbuffer *ring,
struct fd_ringbuffer *target, uint32_t cmd_idx)
{
return ring->funcs->emit_reloc_ring(ring, target, cmd_idx);
uint32_t size = offset_bytes(target->cur, target->start);
return ring->funcs->emit_reloc_ring(ring, target, cmd_idx, 0, size);
}
drm_public uint32_t
uint32_t
fd_ringbuffer_size(struct fd_ringbuffer *ring)
{
/* only really needed for stateobj ringbuffers, and won't really
@ -180,3 +184,45 @@ fd_ringbuffer_size(struct fd_ringbuffer *ring)
return offset_bytes(ring->cur, ring->start);
}
/*
* Deprecated ringmarker API:
*/
struct fd_ringmarker * fd_ringmarker_new(struct fd_ringbuffer *ring)
{
struct fd_ringmarker *marker = NULL;
marker = calloc(1, sizeof(*marker));
if (!marker) {
ERROR_MSG("allocation failed");
return NULL;
}
marker->ring = ring;
marker->cur = marker->ring->cur;
return marker;
}
void fd_ringmarker_del(struct fd_ringmarker *marker)
{
free(marker);
}
void fd_ringmarker_mark(struct fd_ringmarker *marker)
{
marker->cur = marker->ring->cur;
}
uint32_t fd_ringmarker_dwords(struct fd_ringmarker *start,
struct fd_ringmarker *end)
{
return end->cur - start->cur;
}
int fd_ringmarker_flush(struct fd_ringmarker *marker)
{
struct fd_ringbuffer *ring = marker->ring;
return ring->funcs->flush(ring, marker->cur, -1, NULL);
}

View file

@ -37,29 +37,7 @@
*/
struct fd_ringbuffer_funcs;
enum fd_ringbuffer_flags {
/* Ringbuffer is a "state object", which is potentially reused
* many times, rather than being used in one-shot mode linked
* to a parent ringbuffer.
*/
FD_RINGBUFFER_OBJECT = 0x1,
/* Hint that the stateobj will be used for streaming state
* that is used once or a few times and then discarded.
*
* For sub-allocation, non streaming stateobj's should be
* sub-allocated from a page size buffer, so one long lived
* state obj doesn't prevent other pages from being freed.
* (Ie. it would be no worse than allocating a page sized
* bo for each small non-streaming stateobj).
*
* But streaming stateobj's could be sub-allocated from a
* larger buffer to reduce the alloc/del overhead.
*/
FD_RINGBUFFER_STREAMING = 0x2,
};
struct fd_ringmarker;
struct fd_ringbuffer {
int size;
@ -74,35 +52,17 @@ struct fd_ringbuffer {
*/
void *user;
enum fd_ringbuffer_flags flags;
/* This is a bit gross, but we can't use atomic_t in exported
* headers. OTOH, we don't need the refcnt to be publicly
* visible. The only reason that this struct is exported is
* because fd_ringbuffer_emit needs to be something that can
* be inlined for performance reasons.
*/
union {
#ifdef HAS_ATOMIC_OPS
atomic_t refcnt;
#endif
uint64_t __pad;
};
uint32_t flags;
};
struct fd_ringbuffer * fd_ringbuffer_new(struct fd_pipe *pipe,
uint32_t size);
will_be_deprecated
struct fd_ringbuffer * fd_ringbuffer_new_object(struct fd_pipe *pipe,
uint32_t size);
struct fd_ringbuffer * fd_ringbuffer_new_flags(struct fd_pipe *pipe,
uint32_t size, enum fd_ringbuffer_flags flags);
struct fd_ringbuffer *fd_ringbuffer_ref(struct fd_ringbuffer *ring);
void fd_ringbuffer_del(struct fd_ringbuffer *ring);
void fd_ringbuffer_set_parent(struct fd_ringbuffer *ring,
struct fd_ringbuffer *parent);
will_be_deprecated
void fd_ringbuffer_reset(struct fd_ringbuffer *ring);
int fd_ringbuffer_flush(struct fd_ringbuffer *ring);
/* in_fence_fd: -1 for no in-fence, else fence fd
@ -134,9 +94,18 @@ struct fd_reloc {
void fd_ringbuffer_reloc2(struct fd_ringbuffer *ring, const struct fd_reloc *reloc);
will_be_deprecated void fd_ringbuffer_reloc(struct fd_ringbuffer *ring, const struct fd_reloc *reloc);
will_be_deprecated void fd_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
struct fd_ringmarker *target, struct fd_ringmarker *end);
uint32_t fd_ringbuffer_cmd_count(struct fd_ringbuffer *ring);
uint32_t fd_ringbuffer_emit_reloc_ring_full(struct fd_ringbuffer *ring,
struct fd_ringbuffer *target, uint32_t cmd_idx);
uint32_t fd_ringbuffer_size(struct fd_ringbuffer *ring);
will_be_deprecated struct fd_ringmarker * fd_ringmarker_new(struct fd_ringbuffer *ring);
will_be_deprecated void fd_ringmarker_del(struct fd_ringmarker *marker);
will_be_deprecated void fd_ringmarker_mark(struct fd_ringmarker *marker);
will_be_deprecated uint32_t fd_ringmarker_dwords(struct fd_ringmarker *start,
struct fd_ringmarker *end);
will_be_deprecated int fd_ringmarker_flush(struct fd_ringmarker *marker);
#endif /* FREEDRENO_RINGBUFFER_H_ */

View file

@ -1,4 +1,4 @@
This is a historical description of what is now the kgsl backend
This is a historical discription of what is now the kgsl backend
in libdrm freedreno (before the upstream drm/msm driver). Note
that the kgsl backend requires the "kgsl-drm" shim driver, which
usually is in disrepair (QCOM does not build it for android), and

View file

@ -28,6 +28,8 @@
#include "kgsl_priv.h"
#include <linux/fb.h>
static int set_memtype(struct fd_device *dev, uint32_t handle, uint32_t flags)
{
struct drm_kgsl_gem_memtype req = {
@ -175,7 +177,7 @@ drm_private struct fd_bo * kgsl_bo_from_handle(struct fd_device *dev,
return bo;
}
drm_public struct fd_bo *
struct fd_bo *
fd_bo_from_fbdev(struct fd_pipe *pipe, int fbfd, uint32_t size)
{
struct fd_bo *bo;

View file

@ -28,7 +28,6 @@
#include <assert.h>
#include "xf86atomic.h"
#include "freedreno_ringbuffer.h"
#include "kgsl_priv.h"
@ -175,12 +174,13 @@ static void kgsl_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
}
static uint32_t kgsl_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
struct fd_ringbuffer *target, uint32_t cmd_idx)
struct fd_ringbuffer *target, uint32_t cmd_idx,
uint32_t submit_offset, uint32_t size)
{
struct kgsl_ringbuffer *target_ring = to_kgsl_ringbuffer(target);
assert(cmd_idx == 0);
(*ring->cur++) = target_ring->bo->gpuaddr;
return offset_bytes(target->cur, target->start);
(*ring->cur++) = target_ring->bo->gpuaddr + submit_offset;
return size;
}
static void kgsl_ringbuffer_destroy(struct fd_ringbuffer *ring)
@ -216,8 +216,6 @@ drm_private struct fd_ringbuffer * kgsl_ringbuffer_new(struct fd_pipe *pipe,
}
ring = &kgsl_ring->base;
atomic_set(&ring->refcnt, 1);
ring->funcs = &funcs;
ring->size = size;

View file

@ -39,14 +39,14 @@ if with_freedreno_kgsl
)
endif
libdrm_freedreno = library(
libdrm_freedreno = shared_library(
'drm_freedreno',
[files_freedreno, config_file],
c_args : libdrm_c_args,
c_args : warn_c_args,
include_directories : [inc_root, inc_drm],
dependencies : [dep_valgrind, dep_threads, dep_rt, dep_atomic_ops],
dependencies : [dep_valgrind, dep_pthread_stubs, dep_rt, dep_atomic_ops],
link_with : libdrm,
version : '1.@0@.0'.format(patch_ver),
version : '1.0.0',
install : true,
)
@ -55,26 +55,23 @@ ext_libdrm_freedreno = declare_dependency(
include_directories : [inc_drm, include_directories('.')],
)
meson.override_dependency('libdrm_freedreno', ext_libdrm_freedreno)
install_headers(
'freedreno_drmif.h', 'freedreno_ringbuffer.h',
subdir : 'freedreno'
)
pkg.generate(
libdrm_freedreno,
name : 'libdrm_freedreno',
libraries : libdrm_freedreno,
subdirs : ['.', 'libdrm', 'freedreno'],
version : meson.project_version(),
requires_private : 'libdrm',
description : 'Userspace interface to freedreno kernel DRM services',
)
test(
'freedreno-symbols-check',
symbols_check,
args : [
'--lib', libdrm_freedreno,
'--symbols-file', files('freedreno-symbols.txt'),
'--nm', prog_nm.full_path(),
],
'freedreno-symbol-check',
prog_bash,
env : env_test,
args : [files('freedreno-symbol-check'), libdrm_freedreno]
)

View file

@ -35,6 +35,7 @@
static void msm_device_destroy(struct fd_device *dev)
{
struct msm_device *msm_dev = to_msm_device(dev);
fd_bo_cache_cleanup(&msm_dev->ring_cache, 0);
free(msm_dev);
}
@ -57,6 +58,8 @@ drm_private struct fd_device * msm_device_new(int fd)
dev = &msm_dev->base;
dev->funcs = &funcs;
fd_bo_cache_init(&msm_dev->ring_cache, TRUE);
dev->bo_size = sizeof(struct msm_bo);
return dev;

View file

@ -25,6 +25,7 @@
#ifndef __MSM_DRM_H__
#define __MSM_DRM_H__
#include <stddef.h>
#include "drm.h"
#if defined(__cplusplus)
@ -201,12 +202,10 @@ struct drm_msm_gem_submit_bo {
#define MSM_SUBMIT_NO_IMPLICIT 0x80000000 /* disable implicit sync */
#define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */
#define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */
#define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */
#define MSM_SUBMIT_FLAGS ( \
MSM_SUBMIT_NO_IMPLICIT | \
MSM_SUBMIT_FENCE_FD_IN | \
MSM_SUBMIT_FENCE_FD_OUT | \
MSM_SUBMIT_SUDO | \
0)
/* Each cmdstream submit consists of a table of buffers involved, and

View file

@ -138,12 +138,6 @@ static void msm_pipe_destroy(struct fd_pipe *pipe)
{
struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
close_submitqueue(pipe, msm_pipe->queue_id);
if (msm_pipe->suballoc_ring) {
fd_ringbuffer_del(msm_pipe->suballoc_ring);
msm_pipe->suballoc_ring = NULL;
}
free(msm_pipe);
}

View file

@ -57,17 +57,6 @@ struct msm_pipe {
uint32_t gmem;
uint32_t chip_id;
uint32_t queue_id;
/* Allow for sub-allocation of stateobj ring buffers (ie. sharing
* the same underlying bo)..
*
* This takes advantage of each context having it's own fd_pipe,
* so we don't have to worry about access from multiple threads.
*
* We also rely on previous stateobj having been fully constructed
* so we can reclaim extra space at it's end.
*/
struct fd_ringbuffer *suballoc_ring;
};
static inline struct msm_pipe * to_msm_pipe(struct fd_pipe *x)

View file

@ -29,7 +29,6 @@
#include <assert.h>
#include <inttypes.h>
#include "xf86atomic.h"
#include "freedreno_ringbuffer.h"
#include "msm_priv.h"
@ -46,14 +45,13 @@ struct msm_cmd {
DECLARE_ARRAY(struct drm_msm_gem_submit_reloc, relocs);
uint32_t size;
/* has cmd already been added to parent rb's submit.cmds table? */
int is_appended_to_submit;
};
struct msm_ringbuffer {
struct fd_ringbuffer base;
atomic_t refcnt;
/* submit ioctl related tables:
* Note that bos and cmds are tracked by the parent ringbuffer, since
* that is global to the submit ioctl call. The reloc's table is tracked
@ -74,7 +72,7 @@ struct msm_ringbuffer {
/* should have matching entries in submit.cmds: */
DECLARE_ARRAY(struct msm_cmd *, cmds);
/* List of physical cmdstream buffers (msm_cmd) associated with this
/* List of physical cmdstream buffers (msm_cmd) assocated with this
* logical fd_ringbuffer.
*
* Note that this is different from msm_ringbuffer::cmds (which
@ -88,24 +86,10 @@ struct msm_ringbuffer {
int is_growable;
unsigned cmd_count;
unsigned offset; /* for sub-allocated stateobj rb's */
unsigned seqno;
/* maps fd_bo to idx: */
void *bo_table;
/* maps msm_cmd to drm_msm_gem_submit_cmd in parent rb. Each rb has a
* list of msm_cmd's which correspond to each chunk of cmdstream in
* a 'growable' rb. For each of those we need to create one
* drm_msm_gem_submit_cmd in the parent rb which collects the state
* for the submit ioctl. Because we can have multiple IB's to the same
* target rb (for example, or same stateobj emit multiple times), and
* because in theory we can have multiple different rb's that have a
* reference to a given target, we need a hashtable to track this per
* rb.
*/
void *cmd_table;
};
static inline struct msm_ringbuffer * to_msm_ringbuffer(struct fd_ringbuffer *x)
@ -113,28 +97,57 @@ static inline struct msm_ringbuffer * to_msm_ringbuffer(struct fd_ringbuffer *x)
return (struct msm_ringbuffer *)x;
}
static void msm_ringbuffer_unref(struct fd_ringbuffer *ring);
static void msm_ringbuffer_ref(struct fd_ringbuffer *ring);
#define INIT_SIZE 0x1000
static pthread_mutex_t idx_lock = PTHREAD_MUTEX_INITIALIZER;
drm_private extern pthread_mutex_t table_lock;
static struct msm_cmd *current_cmd(struct fd_ringbuffer *ring)
static void ring_bo_del(struct fd_device *dev, struct fd_bo *bo)
{
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
assert(!LIST_IS_EMPTY(&msm_ring->cmd_list));
return LIST_LAST_ENTRY(&msm_ring->cmd_list, struct msm_cmd, list);
int ret;
pthread_mutex_lock(&table_lock);
ret = fd_bo_cache_free(&to_msm_device(dev)->ring_cache, bo);
pthread_mutex_unlock(&table_lock);
if (ret == 0)
return;
fd_bo_del(bo);
}
static struct fd_bo * ring_bo_new(struct fd_device *dev, uint32_t size)
{
struct fd_bo *bo;
bo = fd_bo_cache_alloc(&to_msm_device(dev)->ring_cache, &size, 0);
if (bo)
return bo;
bo = fd_bo_new(dev, size, 0);
if (!bo)
return NULL;
/* keep ringbuffer bo's out of the normal bo cache: */
bo->bo_reuse = FALSE;
return bo;
}
static void ring_cmd_del(struct msm_cmd *cmd)
{
fd_bo_del(cmd->ring_bo);
if (cmd->ring_bo)
ring_bo_del(cmd->ring->pipe->dev, cmd->ring_bo);
list_del(&cmd->list);
to_msm_ringbuffer(cmd->ring)->cmd_count--;
free(cmd->relocs);
free(cmd);
}
static struct msm_cmd * ring_cmd_new(struct fd_ringbuffer *ring, uint32_t size,
enum fd_ringbuffer_flags flags)
static struct msm_cmd * ring_cmd_new(struct fd_ringbuffer *ring, uint32_t size)
{
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
struct msm_cmd *cmd = calloc(1, sizeof(*cmd));
@ -143,48 +156,7 @@ static struct msm_cmd * ring_cmd_new(struct fd_ringbuffer *ring, uint32_t size,
return NULL;
cmd->ring = ring;
/* TODO separate suballoc buffer for small non-streaming state, using
* smaller page-sized backing bo's.
*/
if (flags & FD_RINGBUFFER_STREAMING) {
struct msm_pipe *msm_pipe = to_msm_pipe(ring->pipe);
unsigned suballoc_offset = 0;
struct fd_bo *suballoc_bo = NULL;
if (msm_pipe->suballoc_ring) {
struct msm_ringbuffer *suballoc_ring = to_msm_ringbuffer(msm_pipe->suballoc_ring);
assert(msm_pipe->suballoc_ring->flags & FD_RINGBUFFER_OBJECT);
assert(suballoc_ring->cmd_count == 1);
suballoc_bo = current_cmd(msm_pipe->suballoc_ring)->ring_bo;
suballoc_offset = fd_ringbuffer_size(msm_pipe->suballoc_ring) +
suballoc_ring->offset;
suballoc_offset = ALIGN(suballoc_offset, 0x10);
if ((size + suballoc_offset) > suballoc_bo->size) {
suballoc_bo = NULL;
}
}
if (!suballoc_bo) {
cmd->ring_bo = fd_bo_new_ring(ring->pipe->dev, 0x8000, 0);
msm_ring->offset = 0;
} else {
cmd->ring_bo = fd_bo_ref(suballoc_bo);
msm_ring->offset = suballoc_offset;
}
if (msm_pipe->suballoc_ring)
fd_ringbuffer_del(msm_pipe->suballoc_ring);
msm_pipe->suballoc_ring = fd_ringbuffer_ref(ring);
} else {
cmd->ring_bo = fd_bo_new_ring(ring->pipe->dev, size, 0);
}
cmd->ring_bo = ring_bo_new(ring->pipe->dev, size);
if (!cmd->ring_bo)
goto fail;
@ -198,6 +170,13 @@ fail:
return NULL;
}
static struct msm_cmd *current_cmd(struct fd_ringbuffer *ring)
{
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
assert(!LIST_IS_EMPTY(&msm_ring->cmd_list));
return LIST_LAST_ENTRY(&msm_ring->cmd_list, struct msm_cmd, list);
}
static uint32_t append_bo(struct fd_ringbuffer *ring, struct fd_bo *bo)
{
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
@ -249,43 +228,31 @@ static uint32_t bo2idx(struct fd_ringbuffer *ring, struct fd_bo *bo, uint32_t fl
return idx;
}
static int check_cmd_bo(struct fd_ringbuffer *ring,
struct drm_msm_gem_submit_cmd *cmd, struct fd_bo *bo)
{
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
return msm_ring->submit.bos[cmd->submit_idx].handle == bo->handle;
}
/* Ensure that submit has corresponding entry in cmds table for the
* target cmdstream buffer:
*
* Returns TRUE if new cmd added (else FALSE if it was already in
* the cmds table)
*/
static int get_cmd(struct fd_ringbuffer *ring, struct msm_cmd *target_cmd,
static void get_cmd(struct fd_ringbuffer *ring, struct msm_cmd *target_cmd,
uint32_t submit_offset, uint32_t size, uint32_t type)
{
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
struct drm_msm_gem_submit_cmd *cmd;
uint32_t i;
void *val;
if (!msm_ring->cmd_table)
msm_ring->cmd_table = drmHashCreate();
/* figure out if we already have a cmd buf.. short-circuit hash
* lookup if:
* - target cmd has never been added to submit.cmds
* - target cmd is not a streaming stateobj (which unlike longer
* lived CSO stateobj, is not expected to be reused with multiple
* submits)
*/
if (target_cmd->is_appended_to_submit &&
!(target_cmd->ring->flags & FD_RINGBUFFER_STREAMING) &&
!drmHashLookup(msm_ring->cmd_table, (unsigned long)target_cmd, &val)) {
i = VOID2U64(val);
/* figure out if we already have a cmd buf: */
for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
cmd = &msm_ring->submit.cmds[i];
assert(cmd->submit_offset == submit_offset);
assert(cmd->size == size);
assert(cmd->type == type);
assert(msm_ring->submit.bos[cmd->submit_idx].handle ==
target_cmd->ring_bo->handle);
return FALSE;
if ((cmd->submit_offset == submit_offset) &&
(cmd->size == size) &&
(cmd->type == type) &&
check_cmd_bo(ring, cmd, target_cmd->ring_bo))
return;
}
/* create cmd buf if not: */
@ -299,23 +266,27 @@ static int get_cmd(struct fd_ringbuffer *ring, struct msm_cmd *target_cmd,
cmd->size = size;
cmd->pad = 0;
target_cmd->is_appended_to_submit = TRUE;
if (!(target_cmd->ring->flags & FD_RINGBUFFER_STREAMING)) {
drmHashInsert(msm_ring->cmd_table, (unsigned long)target_cmd,
U642VOID(i));
}
target_cmd->size = size;
return TRUE;
}
static void * msm_ringbuffer_hostptr(struct fd_ringbuffer *ring)
{
struct msm_cmd *cmd = current_cmd(ring);
uint8_t *base = fd_bo_map(cmd->ring_bo);
return base + to_msm_ringbuffer(ring)->offset;
return fd_bo_map(current_cmd(ring)->ring_bo);
}
static uint32_t find_next_reloc_idx(struct msm_cmd *msm_cmd,
uint32_t start, uint32_t offset)
{
uint32_t i;
/* a binary search would be more clever.. */
for (i = start; i < msm_cmd->nr_relocs; i++) {
struct drm_msm_gem_submit_reloc *reloc = &msm_cmd->relocs[i];
if (reloc->submit_offset >= offset)
return i;
}
return i;
}
static void delete_cmds(struct msm_ringbuffer *msm_ring)
@ -334,20 +305,16 @@ static void flush_reset(struct fd_ringbuffer *ring)
for (i = 0; i < msm_ring->nr_bos; i++) {
struct msm_bo *msm_bo = to_msm_bo(msm_ring->bos[i]);
if (!msm_bo)
continue;
msm_bo->current_ring_seqno = 0;
fd_bo_del(&msm_bo->base);
}
for (i = 0; i < msm_ring->nr_cmds; i++) {
struct msm_cmd *msm_cmd = msm_ring->cmds[i];
if (msm_cmd->ring == ring)
/* for each of the cmd buffers, clear their reloc's: */
for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
struct msm_cmd *target_cmd = msm_ring->cmds[i];
if (target_cmd->ring->flags & FD_RINGBUFFER_OBJECT)
continue;
if (msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT)
fd_ringbuffer_del(msm_cmd->ring);
target_cmd->nr_relocs = 0;
}
msm_ring->submit.nr_cmds = 0;
@ -360,11 +327,6 @@ static void flush_reset(struct fd_ringbuffer *ring)
msm_ring->bo_table = NULL;
}
if (msm_ring->cmd_table) {
drmHashDestroy(msm_ring->cmd_table);
msm_ring->cmd_table = NULL;
}
if (msm_ring->is_growable) {
delete_cmds(msm_ring);
} else {
@ -436,24 +398,6 @@ handle_stateobj_relocs(struct fd_ringbuffer *parent, struct fd_ringbuffer *state
relocs[i].reloc_idx = bo2idx(parent, bo, flags);
}
/* stateobj rb's could have reloc's to other stateobj rb's which didn't
* get propagated to the parent rb at _emit_reloc_ring() time (because
* the parent wasn't known then), so fix that up now:
*/
for (i = 0; i < msm_ring->nr_cmds; i++) {
struct msm_cmd *msm_cmd = msm_ring->cmds[i];
struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
if (msm_ring->cmds[i]->ring == stateobj)
continue;
assert(msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT);
if (get_cmd(parent, msm_cmd, cmd->submit_offset, cmd->size, cmd->type)) {
fd_ringbuffer_ref(msm_cmd->ring);
}
}
return relocs;
}
@ -461,10 +405,9 @@ static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start
int in_fence_fd, int *out_fence_fd)
{
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
struct msm_pipe *msm_pipe = to_msm_pipe(ring->pipe);
struct drm_msm_gem_submit req = {
.flags = msm_pipe->pipe,
.queueid = msm_pipe->queue_id,
.flags = to_msm_pipe(ring->pipe)->pipe,
.queueid = to_msm_pipe(ring->pipe)->queue_id,
};
uint32_t i;
int ret;
@ -484,10 +427,12 @@ static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start
/* for each of the cmd's fix up their reloc's: */
for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
struct msm_cmd *msm_cmd = msm_ring->cmds[i];
struct drm_msm_gem_submit_reloc *relocs = msm_cmd->relocs;
struct drm_msm_gem_submit_cmd *cmd;
unsigned nr_relocs = msm_cmd->nr_relocs;
uint32_t a = find_next_reloc_idx(msm_cmd, 0, cmd->submit_offset);
uint32_t b = find_next_reloc_idx(msm_cmd, a, cmd->submit_offset + cmd->size);
struct drm_msm_gem_submit_reloc *relocs = &msm_cmd->relocs[a];
unsigned nr_relocs = (b > a) ? b - a : 0;
/* for reusable stateobjs, the reloc table has reloc_idx that
* points into it's own private bos table, rather than the global
@ -500,7 +445,6 @@ static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start
relocs, nr_relocs);
}
cmd = &msm_ring->submit.cmds[i];
cmd->relocs = VOID2U64(relocs);
cmd->nr_relocs = nr_relocs;
}
@ -535,6 +479,7 @@ static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start
struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
struct msm_cmd *msm_cmd = msm_ring->cmds[i];
if (msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT) {
msm_ringbuffer_unref(msm_cmd->ring);
free(U642VOID(cmd->relocs));
}
}
@ -548,7 +493,7 @@ static void msm_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t size)
{
assert(to_msm_ringbuffer(ring)->is_growable);
finalize_current_cmd(ring, ring->last_start);
ring_cmd_new(ring, size, 0);
ring_cmd_new(ring, size);
}
static void msm_ringbuffer_reset(struct fd_ringbuffer *ring)
@ -572,8 +517,7 @@ static void msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
reloc->reloc_offset = r->offset;
reloc->or = r->or;
reloc->shift = r->shift;
reloc->submit_offset = offset_bytes(ring->cur, ring->start) +
to_msm_ringbuffer(ring)->offset;
reloc->submit_offset = offset_bytes(ring->cur, ring->start);
addr = msm_bo->presumed;
if (reloc->shift < 0)
@ -598,8 +542,7 @@ static void msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
reloc_hi->reloc_offset = r->offset;
reloc_hi->or = r->orhi;
reloc_hi->shift = r->shift - 32;
reloc_hi->submit_offset = offset_bytes(ring->cur, ring->start) +
to_msm_ringbuffer(ring)->offset;
reloc_hi->submit_offset = offset_bytes(ring->cur, ring->start);
addr = msm_bo->presumed >> 32;
if (reloc_hi->shift < 0)
@ -611,16 +554,13 @@ static void msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
}
static uint32_t msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
struct fd_ringbuffer *target, uint32_t cmd_idx)
struct fd_ringbuffer *target, uint32_t cmd_idx,
uint32_t submit_offset, uint32_t size)
{
struct msm_cmd *cmd = NULL;
struct msm_ringbuffer *msm_target = to_msm_ringbuffer(target);
uint32_t idx = 0;
int added_cmd = FALSE;
uint32_t size;
uint32_t submit_offset = msm_target->offset;
LIST_FOR_EACH_ENTRY(cmd, &msm_target->cmd_list, list) {
LIST_FOR_EACH_ENTRY(cmd, &to_msm_ringbuffer(target)->cmd_list, list) {
if (idx == cmd_idx)
break;
idx++;
@ -628,7 +568,7 @@ static uint32_t msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
assert(cmd && (idx == cmd_idx));
if (idx < (msm_target->cmd_count - 1)) {
if (idx < (to_msm_ringbuffer(target)->cmd_count - 1)) {
/* All but the last cmd buffer is fully "baked" (ie. already has
* done get_cmd() to add it to the cmds table). But in this case,
* the size we get is invalid (since it is calculated from the
@ -637,9 +577,7 @@ static uint32_t msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
size = cmd->size;
} else {
struct fd_ringbuffer *parent = ring->parent ? ring->parent : ring;
size = offset_bytes(target->cur, target->start);
added_cmd = get_cmd(parent, cmd, submit_offset, size,
MSM_SUBMIT_CMD_IB_TARGET_BUF);
get_cmd(parent, cmd, submit_offset, size, MSM_SUBMIT_CMD_IB_TARGET_BUF);
}
msm_ringbuffer_emit_reloc(ring, &(struct fd_reloc){
@ -652,8 +590,8 @@ static uint32_t msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
* being flushed), mesa can't really guarantee that a stateobj isn't
* destroyed after emitted but before flush, so we must hold a ref:
*/
if (added_cmd && (target->flags & FD_RINGBUFFER_OBJECT)) {
fd_ringbuffer_ref(target);
if (target->flags & FD_RINGBUFFER_OBJECT) {
msm_ringbuffer_ref(target);
}
return size;
@ -664,10 +602,13 @@ static uint32_t msm_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
return to_msm_ringbuffer(ring)->cmd_count;
}
static void msm_ringbuffer_destroy(struct fd_ringbuffer *ring)
static void msm_ringbuffer_unref(struct fd_ringbuffer *ring)
{
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
if (!atomic_dec_and_test(&msm_ring->refcnt))
return;
flush_reset(ring);
delete_cmds(msm_ring);
@ -678,6 +619,12 @@ static void msm_ringbuffer_destroy(struct fd_ringbuffer *ring)
free(msm_ring);
}
static void msm_ringbuffer_ref(struct fd_ringbuffer *ring)
{
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
atomic_inc(&msm_ring->refcnt);
}
static const struct fd_ringbuffer_funcs funcs = {
.hostptr = msm_ringbuffer_hostptr,
.flush = msm_ringbuffer_flush,
@ -686,7 +633,7 @@ static const struct fd_ringbuffer_funcs funcs = {
.emit_reloc = msm_ringbuffer_emit_reloc,
.emit_reloc_ring = msm_ringbuffer_emit_reloc_ring,
.cmd_count = msm_ringbuffer_cmd_count,
.destroy = msm_ringbuffer_destroy,
.destroy = msm_ringbuffer_unref,
};
drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
@ -709,15 +656,14 @@ drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
list_inithead(&msm_ring->cmd_list);
msm_ring->seqno = ++to_msm_device(pipe->dev)->ring_cnt;
atomic_set(&msm_ring->refcnt, 1);
ring = &msm_ring->base;
atomic_set(&ring->refcnt, 1);
ring->funcs = &funcs;
ring->size = size;
ring->pipe = pipe; /* needed in ring_cmd_new() */
ring_cmd_new(ring, size, flags);
ring_cmd_new(ring, size);
return ring;
}

View file

@ -1,84 +0,0 @@
#!/usr/bin/env python3
# Copyright 2021 Collabora, Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice (including the
# next paragraph) shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Helper script that reads drm_fourcc.h and writes a static table with the
# simpler format token modifiers
import sys
import re
filename = sys.argv[1]
towrite = sys.argv[2]
fm_re = {
'intel': r'^#define I915_FORMAT_MOD_(\w+)',
'others': r'^#define DRM_FORMAT_MOD_((?:ARM|SAMSUNG|QCOM|VIVANTE|NVIDIA|BROADCOM|ALLWINNER)\w+)\s',
'vendors': r'^#define DRM_FORMAT_MOD_VENDOR_(\w+)'
}
def print_fm_intel(f, f_mod):
f.write(' {{ DRM_MODIFIER_INTEL({}, {}) }},\n'.format(f_mod, f_mod))
# generic write func
def print_fm(f, vendor, mod, f_name):
f.write(' {{ DRM_MODIFIER({}, {}, {}) }},\n'.format(vendor, mod, f_name))
with open(filename, "r") as f:
data = f.read()
for k, v in fm_re.items():
fm_re[k] = re.findall(v, data, flags=re.M)
with open(towrite, "w") as f:
f.write('''\
/* AUTOMATICALLY GENERATED by gen_table_fourcc.py. You should modify
that script instead of adding here entries manually! */
static const struct drmFormatModifierInfo drm_format_modifier_table[] = {
''')
f.write(' { DRM_MODIFIER_INVALID(NONE, INVALID) },\n')
f.write(' { DRM_MODIFIER_LINEAR(NONE, LINEAR) },\n')
for entry in fm_re['intel']:
print_fm_intel(f, entry)
for entry in fm_re['others']:
(vendor, mod) = entry.split('_', 1)
if vendor == 'ARM' and (mod == 'TYPE_AFBC' or mod == 'TYPE_MISC' or mod == 'TYPE_AFRC'):
continue
print_fm(f, vendor, mod, mod)
f.write('''\
};
''')
f.write('''\
static const struct drmFormatModifierVendorInfo drm_format_modifier_vendor_table[] = {
''')
for entry in fm_re['vendors']:
f.write(" {{ DRM_FORMAT_MOD_VENDOR_{}, \"{}\" }},\n".format(entry, entry))
f.write('''\
};
''')

View file

@ -71,7 +71,7 @@ Note: One should not do _any_ changes to the files apart from the steps below.
In order to update the files do the following:
- Switch to a Linux kernel tree/branch which is not rebased.
For example: drm-next (https://gitlab.freedesktop.org/drm/kernel/)
For example: airlied/drm-next
- Install the headers via `make headers_install' to a separate location.
- Copy the drm header[s] + git add + git commit.
- Note: Your commit message must include:
@ -122,6 +122,11 @@ omap_drm.h (living in $TOP/omap)
- License mismatch, missing DRM_IOCTL_OMAP_GEM_NEW and related struct
Status: ?
msm_drm.h (located in $TOP/freedreno/msm/)
- License mismatch, missing MSM_PIPE_*, MSM_SUBMIT_*. Renamed
drm_msm_gem_submit::flags, missing drm_msm_gem_submit::fence_fd.
Status: ?
exynos_drm.h (living in $TOP/exynos)
- License mismatch, now using fixed size ints (but not everywhere). Lots of
new stuff.

View file

@ -54,9 +54,6 @@ extern "C" {
#define DRM_AMDGPU_VM 0x13
#define DRM_AMDGPU_FENCE_TO_HANDLE 0x14
#define DRM_AMDGPU_SCHED 0x15
#define DRM_AMDGPU_USERQ 0x16
#define DRM_AMDGPU_USERQ_SIGNAL 0x17
#define DRM_AMDGPU_USERQ_WAIT 0x18
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@ -74,50 +71,13 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
#define DRM_IOCTL_AMDGPU_USERQ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ, union drm_amdgpu_userq)
#define DRM_IOCTL_AMDGPU_USERQ_SIGNAL DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct drm_amdgpu_userq_signal)
#define DRM_IOCTL_AMDGPU_USERQ_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct drm_amdgpu_userq_wait)
/**
* DOC: memory domains
*
* %AMDGPU_GEM_DOMAIN_CPU System memory that is not GPU accessible.
* Memory in this pool could be swapped out to disk if there is pressure.
*
* %AMDGPU_GEM_DOMAIN_GTT GPU accessible system memory, mapped into the
* GPU's virtual address space via gart. Gart memory linearizes non-contiguous
* pages of system memory, allows GPU access system memory in a linearized
* fashion.
*
* %AMDGPU_GEM_DOMAIN_VRAM Local video memory. For APUs, it is memory
* carved out by the BIOS.
*
* %AMDGPU_GEM_DOMAIN_GDS Global on-chip data storage used to share data
* across shader threads.
*
* %AMDGPU_GEM_DOMAIN_GWS Global wave sync, used to synchronize the
* execution of all the waves on a device.
*
* %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines
* for appending data.
*
* %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for
* signalling user mode queues.
*/
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
#define AMDGPU_GEM_DOMAIN_VRAM 0x4
#define AMDGPU_GEM_DOMAIN_GDS 0x8
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
#define AMDGPU_GEM_DOMAIN_DOORBELL 0x40
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
AMDGPU_GEM_DOMAIN_GTT | \
AMDGPU_GEM_DOMAIN_VRAM | \
AMDGPU_GEM_DOMAIN_GDS | \
AMDGPU_GEM_DOMAIN_GWS | \
AMDGPU_GEM_DOMAIN_OA | \
AMDGPU_GEM_DOMAIN_DOORBELL)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
@ -127,56 +87,14 @@ extern "C" {
#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
/* Flag that the memory should be in VRAM and cleared */
#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
/* Flag that create shadow bo(GTT) while allocating vram bo */
#define AMDGPU_GEM_CREATE_SHADOW (1 << 4)
/* Flag that allocating the BO should use linear VRAM */
#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5)
/* Flag that BO is always valid in this VM */
#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
/* Flag that BO sharing will be explicitly synchronized */
#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7)
/* Flag that indicates allocating MQD gart on GFX9, where the mtype
* for the second page onward should be set to NC. It should never
* be used by user space applications.
*/
#define AMDGPU_GEM_CREATE_CP_MQD_GFX9 (1 << 8)
/* Flag that BO may contain sensitive data that must be wiped before
* releasing the memory
*/
#define AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE (1 << 9)
/* Flag that BO will be encrypted and that the TMZ bit should be
* set in the PTEs when mapping this buffer via GPUVM or
* accessing it with various hw blocks
*/
#define AMDGPU_GEM_CREATE_ENCRYPTED (1 << 10)
/* Flag that BO will be used only in preemptible context, which does
* not require GTT memory accounting
*/
#define AMDGPU_GEM_CREATE_PREEMPTIBLE (1 << 11)
/* Flag that BO can be discarded under memory pressure without keeping the
* content.
*/
#define AMDGPU_GEM_CREATE_DISCARDABLE (1 << 12)
/* Flag that BO is shared coherently between multiple devices or CPU threads.
* May depend on GPU instructions to flush caches to system scope explicitly.
*
* This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
*/
#define AMDGPU_GEM_CREATE_COHERENT (1 << 13)
/* Flag that BO should not be cached by GPU. Coherent without having to flush
* GPU caches explicitly
*
* This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
*/
#define AMDGPU_GEM_CREATE_UNCACHED (1 << 14)
/* Flag that BO should be coherent across devices when using device-level
* atomics. May depend on GPU instructions to flush caches to device scope
* explicitly, promoting them to system scope automatically.
*
* This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
*/
#define AMDGPU_GEM_CREATE_EXT_COHERENT (1 << 15)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@ -243,8 +161,6 @@ union drm_amdgpu_bo_list {
#define AMDGPU_CTX_OP_FREE_CTX 2
#define AMDGPU_CTX_OP_QUERY_STATE 3
#define AMDGPU_CTX_OP_QUERY_STATE2 4
#define AMDGPU_CTX_OP_GET_STABLE_PSTATE 5
#define AMDGPU_CTX_OP_SET_STABLE_PSTATE 6
/* GPU reset status */
#define AMDGPU_CTX_NO_RESET 0
@ -255,45 +171,28 @@ union drm_amdgpu_bo_list {
/* unknown cause */
#define AMDGPU_CTX_UNKNOWN_RESET 3
/* indicate gpu reset occurred after ctx created */
/* indicate gpu reset occured after ctx created */
#define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0)
/* indicate vram lost occurred after ctx created */
/* indicate vram lost occured after ctx created */
#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
/* indicate some job from this context once cause gpu hang */
#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
/* indicate some errors are detected by RAS */
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE (1<<3)
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE (1<<4)
/* indicate that the reset hasn't completed yet */
#define AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS (1<<5)
/* Context priority level */
#define AMDGPU_CTX_PRIORITY_UNSET -2048
#define AMDGPU_CTX_PRIORITY_VERY_LOW -1023
#define AMDGPU_CTX_PRIORITY_LOW -512
#define AMDGPU_CTX_PRIORITY_NORMAL 0
/*
* When used in struct drm_amdgpu_ctx_in, a priority above NORMAL requires
* CAP_SYS_NICE or DRM_MASTER
*/
/* Selecting a priority above NORMAL requires CAP_SYS_NICE or DRM_MASTER */
#define AMDGPU_CTX_PRIORITY_HIGH 512
#define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023
/* select a stable profiling pstate for perfmon tools */
#define AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK 0xf
#define AMDGPU_CTX_STABLE_PSTATE_NONE 0
#define AMDGPU_CTX_STABLE_PSTATE_STANDARD 1
#define AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK 2
#define AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK 3
#define AMDGPU_CTX_STABLE_PSTATE_PEAK 4
struct drm_amdgpu_ctx_in {
/** AMDGPU_CTX_OP_* */
__u32 op;
/** Flags */
/** For future use, no flags defined so far */
__u32 flags;
__u32 ctx_id;
/** AMDGPU_CTX_PRIORITY_* */
__s32 priority;
};
@ -311,11 +210,6 @@ union drm_amdgpu_ctx_out {
/** Reset status since the last call of the ioctl. */
__u32 reset_status;
} state;
struct {
__u32 flags;
__u32 _pad;
} pstate;
};
union drm_amdgpu_ctx {
@ -323,261 +217,6 @@ union drm_amdgpu_ctx {
union drm_amdgpu_ctx_out out;
};
/* user queue IOCTL operations */
#define AMDGPU_USERQ_OP_CREATE 1
#define AMDGPU_USERQ_OP_FREE 2
/* queue priority levels */
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK 0x3
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT 0
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW 0
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW 1
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH 2
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH 3 /* admin only */
/* for queues that need access to protected content */
#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE (1 << 2)
/*
* This structure is a container to pass input configuration
* info for all supported userqueue related operations.
* For operation AMDGPU_USERQ_OP_CREATE: user is expected
* to set all fields, excep the parameter 'queue_id'.
* For operation AMDGPU_USERQ_OP_FREE: the only input parameter expected
* to be set is 'queue_id', eveything else is ignored.
*/
struct drm_amdgpu_userq_in {
/** AMDGPU_USERQ_OP_* */
__u32 op;
/** Queue id passed for operation USERQ_OP_FREE */
__u32 queue_id;
/** the target GPU engine to execute workload (AMDGPU_HW_IP_*) */
__u32 ip_type;
/**
* @doorbell_handle: the handle of doorbell GEM object
* associated to this userqueue client.
*/
__u32 doorbell_handle;
/**
* @doorbell_offset: 32-bit offset of the doorbell in the doorbell bo.
* Kernel will generate absolute doorbell offset using doorbell_handle
* and doorbell_offset in the doorbell bo.
*/
__u32 doorbell_offset;
/**
* @flags: flags used for queue parameters
*/
__u32 flags;
/**
* @queue_va: Virtual address of the GPU memory which holds the queue
* object. The queue holds the workload packets.
*/
__u64 queue_va;
/**
* @queue_size: Size of the queue in bytes, this needs to be 256-byte
* aligned.
*/
__u64 queue_size;
/**
* @rptr_va : Virtual address of the GPU memory which holds the ring RPTR.
* This object must be at least 8 byte in size and aligned to 8-byte offset.
*/
__u64 rptr_va;
/**
* @wptr_va : Virtual address of the GPU memory which holds the ring WPTR.
* This object must be at least 8 byte in size and aligned to 8-byte offset.
*
* Queue, RPTR and WPTR can come from the same object, as long as the size
* and alignment related requirements are met.
*/
__u64 wptr_va;
/**
* @mqd: MQD (memory queue descriptor) is a set of parameters which allow
* the GPU to uniquely define and identify a usermode queue.
*
* MQD data can be of different size for different GPU IP/engine and
* their respective versions/revisions, so this points to a __u64 *
* which holds IP specific MQD of this usermode queue.
*/
__u64 mqd;
/**
* @size: size of MQD data in bytes, it must match the MQD structure
* size of the respective engine/revision defined in UAPI for ex, for
* gfx11 workloads, size = sizeof(drm_amdgpu_userq_mqd_gfx11).
*/
__u64 mqd_size;
};
/* The structure to carry output of userqueue ops */
struct drm_amdgpu_userq_out {
/**
* For operation AMDGPU_USERQ_OP_CREATE: This field contains a unique
* queue ID to represent the newly created userqueue in the system, otherwise
* it should be ignored.
*/
__u32 queue_id;
__u32 _pad;
};
union drm_amdgpu_userq {
struct drm_amdgpu_userq_in in;
struct drm_amdgpu_userq_out out;
};
/* GFX V11 IP specific MQD parameters */
struct drm_amdgpu_userq_mqd_gfx11 {
/**
* @shadow_va: Virtual address of the GPU memory to hold the shadow buffer.
* Use AMDGPU_INFO_IOCTL to find the exact size of the object.
*/
__u64 shadow_va;
/**
* @csa_va: Virtual address of the GPU memory to hold the CSA buffer.
* Use AMDGPU_INFO_IOCTL to find the exact size of the object.
*/
__u64 csa_va;
};
/* GFX V11 SDMA IP specific MQD parameters */
struct drm_amdgpu_userq_mqd_sdma_gfx11 {
/**
* @csa_va: Virtual address of the GPU memory to hold the CSA buffer.
* This must be a from a separate GPU object, and use AMDGPU_INFO IOCTL
* to get the size.
*/
__u64 csa_va;
};
/* GFX V11 Compute IP specific MQD parameters */
struct drm_amdgpu_userq_mqd_compute_gfx11 {
/**
* @eop_va: Virtual address of the GPU memory to hold the EOP buffer.
* This must be a from a separate GPU object, and use AMDGPU_INFO IOCTL
* to get the size.
*/
__u64 eop_va;
};
/* userq signal/wait ioctl */
struct drm_amdgpu_userq_signal {
/**
* @queue_id: Queue handle used by the userq fence creation function
* to retrieve the WPTR.
*/
__u32 queue_id;
__u32 pad;
/**
* @syncobj_handles: The list of syncobj handles submitted by the user queue
* job to be signaled.
*/
__u64 syncobj_handles;
/**
* @num_syncobj_handles: A count that represents the number of syncobj handles in
* @syncobj_handles.
*/
__u64 num_syncobj_handles;
/**
* @bo_read_handles: The list of BO handles that the submitted user queue job
* is using for read only. This will update BO fences in the kernel.
*/
__u64 bo_read_handles;
/**
* @bo_write_handles: The list of BO handles that the submitted user queue job
* is using for write only. This will update BO fences in the kernel.
*/
__u64 bo_write_handles;
/**
* @num_bo_read_handles: A count that represents the number of read BO handles in
* @bo_read_handles.
*/
__u32 num_bo_read_handles;
/**
* @num_bo_write_handles: A count that represents the number of write BO handles in
* @bo_write_handles.
*/
__u32 num_bo_write_handles;
};
struct drm_amdgpu_userq_fence_info {
/**
* @va: A gpu address allocated for each queue which stores the
* read pointer (RPTR) value.
*/
__u64 va;
/**
* @value: A 64 bit value represents the write pointer (WPTR) of the
* queue commands which compared with the RPTR value to signal the
* fences.
*/
__u64 value;
};
struct drm_amdgpu_userq_wait {
/**
* @waitq_id: Queue handle used by the userq wait IOCTL to retrieve the
* wait queue and maintain the fence driver references in it.
*/
__u32 waitq_id;
__u32 pad;
/**
* @syncobj_handles: The list of syncobj handles submitted by the user queue
* job to get the va/value pairs.
*/
__u64 syncobj_handles;
/**
* @syncobj_timeline_handles: The list of timeline syncobj handles submitted by
* the user queue job to get the va/value pairs at given @syncobj_timeline_points.
*/
__u64 syncobj_timeline_handles;
/**
* @syncobj_timeline_points: The list of timeline syncobj points submitted by the
* user queue job for the corresponding @syncobj_timeline_handles.
*/
__u64 syncobj_timeline_points;
/**
* @bo_read_handles: The list of read BO handles submitted by the user queue
* job to get the va/value pairs.
*/
__u64 bo_read_handles;
/**
* @bo_write_handles: The list of write BO handles submitted by the user queue
* job to get the va/value pairs.
*/
__u64 bo_write_handles;
/**
* @num_syncobj_timeline_handles: A count that represents the number of timeline
* syncobj handles in @syncobj_timeline_handles.
*/
__u16 num_syncobj_timeline_handles;
/**
* @num_fences: This field can be used both as input and output. As input it defines
* the maximum number of fences that can be returned and as output it will specify
* how many fences were actually returned from the ioctl.
*/
__u16 num_fences;
/**
* @num_syncobj_handles: A count that represents the number of syncobj handles in
* @syncobj_handles.
*/
__u32 num_syncobj_handles;
/**
* @num_bo_read_handles: A count that represents the number of read BO handles in
* @bo_read_handles.
*/
__u32 num_bo_read_handles;
/**
* @num_bo_write_handles: A count that represents the number of write BO handles in
* @bo_write_handles.
*/
__u32 num_bo_write_handles;
/**
* @out_fences: The field is a return value from the ioctl containing the list of
* address/value pairs to wait for.
*/
__u64 out_fences;
};
/* vm ioctl */
#define AMDGPU_VM_OP_RESERVE_VMID 1
#define AMDGPU_VM_OP_UNRESERVE_VMID 2
@ -600,15 +239,13 @@ union drm_amdgpu_vm {
/* sched ioctl */
#define AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE 1
#define AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE 2
struct drm_amdgpu_sched_in {
/* AMDGPU_SCHED_OP_* */
__u32 op;
__u32 fd;
/** AMDGPU_CTX_PRIORITY_* */
__s32 priority;
__u32 ctx_id;
__u32 flags;
};
union drm_amdgpu_sched {
@ -653,30 +290,9 @@ struct drm_amdgpu_gem_userptr {
#define AMDGPU_TILING_NUM_BANKS_SHIFT 21
#define AMDGPU_TILING_NUM_BANKS_MASK 0x3
/* GFX9 - GFX11: */
/* GFX9 and later: */
#define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0
#define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f
#define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT 5
#define AMDGPU_TILING_DCC_OFFSET_256B_MASK 0xFFFFFF
#define AMDGPU_TILING_DCC_PITCH_MAX_SHIFT 29
#define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF
#define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43
#define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1
#define AMDGPU_TILING_DCC_INDEPENDENT_128B_SHIFT 44
#define AMDGPU_TILING_DCC_INDEPENDENT_128B_MASK 0x1
#define AMDGPU_TILING_SCANOUT_SHIFT 63
#define AMDGPU_TILING_SCANOUT_MASK 0x1
/* GFX12 and later: */
#define AMDGPU_TILING_GFX12_SWIZZLE_MODE_SHIFT 0
#define AMDGPU_TILING_GFX12_SWIZZLE_MODE_MASK 0x7
/* These are DCC recompression setting for memory management: */
#define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_SHIFT 3
#define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_MASK 0x3 /* 0:64B, 1:128B, 2:256B */
#define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_SHIFT 5
#define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_MASK 0x7 /* CB_COLOR0_INFO.NUMBER_TYPE */
#define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_SHIFT 8
#define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_MASK 0x3f /* [0:4]:CB_COLOR0_INFO.FORMAT, [5]:MM */
/* Set/Get helpers for tiling flags. */
#define AMDGPU_TILING_SET(field, value) \
@ -824,18 +440,14 @@ struct drm_amdgpu_gem_op {
#define AMDGPU_VM_MTYPE_MASK (0xf << 5)
/* Default MTYPE. Pre-AI must use this. Recommended for newer ASICs. */
#define AMDGPU_VM_MTYPE_DEFAULT (0 << 5)
/* Use Non Coherent MTYPE instead of default MTYPE */
/* Use NC MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_NC (1 << 5)
/* Use Write Combine MTYPE instead of default MTYPE */
/* Use WC MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_WC (2 << 5)
/* Use Cache Coherent MTYPE instead of default MTYPE */
/* Use CC MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_CC (3 << 5)
/* Use UnCached MTYPE instead of default MTYPE */
/* Use UC MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_UC (4 << 5)
/* Use Read Write MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_RW (5 << 5)
/* don't allocate MALL */
#define AMDGPU_VM_PAGE_NOALLOC (1 << 9)
struct drm_amdgpu_gem_va {
/** GEM object handle */
@ -851,19 +463,6 @@ struct drm_amdgpu_gem_va {
__u64 offset_in_bo;
/** Specify mapping size. Must be correctly aligned. */
__u64 map_size;
/**
* vm_timeline_point is a sequence number used to add new timeline point.
*/
__u64 vm_timeline_point;
/**
* The vm page table update fence is installed in given vm_timeline_syncobj_out
* at vm_timeline_point.
*/
__u32 vm_timeline_syncobj_out;
/** the number of syncobj handles in @input_fence_syncobj_handles */
__u32 num_syncobj_handles;
/** Array of sync object handle to wait for given input fences */
__u64 input_fence_syncobj_handles;
};
#define AMDGPU_HW_IP_GFX 0
@ -873,14 +472,8 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_HW_IP_VCE 4
#define AMDGPU_HW_IP_UVD_ENC 5
#define AMDGPU_HW_IP_VCN_DEC 6
/*
* From VCN4, AMDGPU_HW_IP_VCN_ENC is re-used to support
* both encoding and decoding jobs.
*/
#define AMDGPU_HW_IP_VCN_ENC 7
#define AMDGPU_HW_IP_VCN_JPEG 8
#define AMDGPU_HW_IP_VPE 9
#define AMDGPU_HW_IP_NUM 10
#define AMDGPU_HW_IP_NUM 8
#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
@ -889,11 +482,6 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04
#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
#define AMDGPU_CHUNK_ID_BO_HANDLES 0x06
#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09
#define AMDGPU_CHUNK_ID_CP_GFX_SHADOW 0x0a
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
@ -907,7 +495,7 @@ struct drm_amdgpu_cs_in {
/** Handle of resource list associated with CS */
__u32 bo_list_handle;
__u32 num_chunks;
__u32 flags;
__u32 _pad;
/** this points to __u64 * which point to cs chunks */
__u64 chunks;
};
@ -932,23 +520,6 @@ union drm_amdgpu_cs {
/* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */
#define AMDGPU_IB_FLAG_PREEMPT (1<<2)
/* The IB fence should do the L2 writeback but not invalidate any shader
* caches (L2/vL1/sL1/I$). */
#define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3)
/* Set GDS_COMPUTE_MAX_WAVE_ID = DEFAULT before PACKET3_INDIRECT_BUFFER.
* This will reset wave ID counters for the IB.
*/
#define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4)
/* Flag the IB as secure (TMZ)
*/
#define AMDGPU_IB_FLAGS_SECURE (1 << 5)
/* Tell KMD to flush and invalidate caches
*/
#define AMDGPU_IB_FLAG_EMIT_MEM_SYNC (1 << 6)
struct drm_amdgpu_cs_chunk_ib {
__u32 _pad;
/** AMDGPU_IB_FLAG_* */
@ -982,12 +553,6 @@ struct drm_amdgpu_cs_chunk_sem {
__u32 handle;
};
struct drm_amdgpu_cs_chunk_syncobj {
__u32 handle;
__u32 flags;
__u64 point;
};
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2
@ -1010,33 +575,12 @@ struct drm_amdgpu_cs_chunk_data {
};
};
#define AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW 0x1
struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
__u64 shadow_va;
__u64 csa_va;
__u64 gds_va;
__u64 flags;
};
/*
/**
* Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
*
*/
#define AMDGPU_IDS_FLAGS_FUSION 0x1
#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
#define AMDGPU_IDS_FLAGS_TMZ 0x4
#define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x8
/*
* Query h/w info: Flag identifying VF/PF/PT mode
*
*/
#define AMDGPU_IDS_FLAGS_MODE_MASK 0x300
#define AMDGPU_IDS_FLAGS_MODE_SHIFT 0x8
#define AMDGPU_IDS_FLAGS_MODE_PF 0x0
#define AMDGPU_IDS_FLAGS_MODE_VF 0x1
#define AMDGPU_IDS_FLAGS_MODE_PT 0x2
/* indicate if acceleration can be working */
#define AMDGPU_INFO_ACCEL_WORKING 0x00
@ -1076,34 +620,6 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
#define AMDGPU_INFO_FW_ASD 0x0d
/* Subquery id: Query VCN firmware version */
#define AMDGPU_INFO_FW_VCN 0x0e
/* Subquery id: Query GFX RLC SRLC firmware version */
#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL 0x0f
/* Subquery id: Query GFX RLC SRLG firmware version */
#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM 0x10
/* Subquery id: Query GFX RLC SRLS firmware version */
#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11
/* Subquery id: Query DMCU firmware version */
#define AMDGPU_INFO_FW_DMCU 0x12
#define AMDGPU_INFO_FW_TA 0x13
/* Subquery id: Query DMCUB firmware version */
#define AMDGPU_INFO_FW_DMCUB 0x14
/* Subquery id: Query TOC firmware version */
#define AMDGPU_INFO_FW_TOC 0x15
/* Subquery id: Query CAP firmware version */
#define AMDGPU_INFO_FW_CAP 0x16
/* Subquery id: Query GFX RLCP firmware version */
#define AMDGPU_INFO_FW_GFX_RLCP 0x17
/* Subquery id: Query GFX RLCV firmware version */
#define AMDGPU_INFO_FW_GFX_RLCV 0x18
/* Subquery id: Query MES_KIQ firmware version */
#define AMDGPU_INFO_FW_MES_KIQ 0x19
/* Subquery id: Query MES firmware version */
#define AMDGPU_INFO_FW_MES 0x1a
/* Subquery id: Query IMU firmware version */
#define AMDGPU_INFO_FW_IMU 0x1b
/* Subquery id: Query VPE firmware version */
#define AMDGPU_INFO_FW_VPE 0x1c
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
/* the used VRAM size */
@ -1132,8 +648,6 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
#define AMDGPU_INFO_VBIOS_SIZE 0x1
/* Subquery id: Query vbios image */
#define AMDGPU_INFO_VBIOS_IMAGE 0x2
/* Subquery id: Query vbios info */
#define AMDGPU_INFO_VBIOS_INFO 0x3
/* Query UVD handles */
#define AMDGPU_INFO_NUM_HANDLES 0x1C
/* Query sensor related information */
@ -1156,57 +670,9 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
#define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK 0x8
/* Subquery id: Query GPU stable pstate memory clock */
#define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK 0x9
/* Subquery id: Query GPU peak pstate shader clock */
#define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK 0xa
/* Subquery id: Query GPU peak pstate memory clock */
#define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK 0xb
/* Subquery id: Query input GPU power */
#define AMDGPU_INFO_SENSOR_GPU_INPUT_POWER 0xc
/* Number of VRAM page faults on CPU access. */
#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
/* query ras mask of enabled features*/
#define AMDGPU_INFO_RAS_ENABLED_FEATURES 0x20
/* RAS MASK: UMC (VRAM) */
#define AMDGPU_INFO_RAS_ENABLED_UMC (1 << 0)
/* RAS MASK: SDMA */
#define AMDGPU_INFO_RAS_ENABLED_SDMA (1 << 1)
/* RAS MASK: GFX */
#define AMDGPU_INFO_RAS_ENABLED_GFX (1 << 2)
/* RAS MASK: MMHUB */
#define AMDGPU_INFO_RAS_ENABLED_MMHUB (1 << 3)
/* RAS MASK: ATHUB */
#define AMDGPU_INFO_RAS_ENABLED_ATHUB (1 << 4)
/* RAS MASK: PCIE */
#define AMDGPU_INFO_RAS_ENABLED_PCIE (1 << 5)
/* RAS MASK: HDP */
#define AMDGPU_INFO_RAS_ENABLED_HDP (1 << 6)
/* RAS MASK: XGMI */
#define AMDGPU_INFO_RAS_ENABLED_XGMI (1 << 7)
/* RAS MASK: DF */
#define AMDGPU_INFO_RAS_ENABLED_DF (1 << 8)
/* RAS MASK: SMN */
#define AMDGPU_INFO_RAS_ENABLED_SMN (1 << 9)
/* RAS MASK: SEM */
#define AMDGPU_INFO_RAS_ENABLED_SEM (1 << 10)
/* RAS MASK: MP0 */
#define AMDGPU_INFO_RAS_ENABLED_MP0 (1 << 11)
/* RAS MASK: MP1 */
#define AMDGPU_INFO_RAS_ENABLED_MP1 (1 << 12)
/* RAS MASK: FUSE */
#define AMDGPU_INFO_RAS_ENABLED_FUSE (1 << 13)
/* query video encode/decode caps */
#define AMDGPU_INFO_VIDEO_CAPS 0x21
/* Subquery id: Decode */
#define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
/* Subquery id: Encode */
#define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
/* Query the max number of IBs per gang per submission */
#define AMDGPU_INFO_MAX_IBS 0x22
/* query last page fault info */
#define AMDGPU_INFO_GPUVM_FAULT 0x23
/* query FW object size and alignment */
#define AMDGPU_INFO_UQ_FW_AREAS 0x24
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@ -1274,10 +740,6 @@ struct drm_amdgpu_info {
struct {
__u32 type;
} sensor_info;
struct {
__u32 type;
} video_cap;
};
};
@ -1338,15 +800,6 @@ struct drm_amdgpu_info_firmware {
__u32 feature;
};
struct drm_amdgpu_info_vbios {
__u8 name[64];
__u8 vbios_pn[64];
__u32 version;
__u32 pad;
__u8 vbios_ver_str[32];
__u8 date[32];
};
#define AMDGPU_VRAM_TYPE_UNKNOWN 0
#define AMDGPU_VRAM_TYPE_GDDR1 1
#define AMDGPU_VRAM_TYPE_DDR2 2
@ -1356,10 +809,6 @@ struct drm_amdgpu_info_vbios {
#define AMDGPU_VRAM_TYPE_HBM 6
#define AMDGPU_VRAM_TYPE_DDR3 7
#define AMDGPU_VRAM_TYPE_DDR4 8
#define AMDGPU_VRAM_TYPE_GDDR6 9
#define AMDGPU_VRAM_TYPE_DDR5 10
#define AMDGPU_VRAM_TYPE_LPDDR4 11
#define AMDGPU_VRAM_TYPE_LPDDR5 12
struct drm_amdgpu_info_device {
/** PCI Device ID */
@ -1385,8 +834,7 @@ struct drm_amdgpu_info_device {
__u32 enabled_rb_pipes_mask;
__u32 num_rb_pipes;
__u32 num_hw_gfx_contexts;
/* PCIe version (the smaller of the GPU and the CPU/motherboard) */
__u32 pcie_gen;
__u32 _pad;
__u64 ids_flags;
/** Starting virtual address for UMDs. */
__u64 virtual_address_offset;
@ -1433,41 +881,13 @@ struct drm_amdgpu_info_device {
__u32 gs_prim_buffer_depth;
/* max gs wavefront per vgt*/
__u32 max_gs_waves_per_vgt;
/* PCIe number of lanes (the smaller of the GPU and the CPU/motherboard) */
__u32 pcie_num_lanes;
__u32 _pad1;
/* always on cu bitmap */
__u32 cu_ao_bitmap[4][4];
/** Starting high virtual address for UMDs. */
__u64 high_va_offset;
/** The maximum high virtual address */
__u64 high_va_max;
/* gfx10 pa_sc_tile_steering_override */
__u32 pa_sc_tile_steering_override;
/* disabled TCCs */
__u64 tcc_disabled_mask;
__u64 min_engine_clock;
__u64 min_memory_clock;
/* The following fields are only set on gfx11+, older chips set 0. */
__u32 tcp_cache_size; /* AKA GL0, VMEM cache */
__u32 num_sqc_per_wgp;
__u32 sqc_data_cache_size; /* AKA SMEM cache */
__u32 sqc_inst_cache_size;
__u32 gl1c_cache_size;
__u32 gl2c_cache_size;
__u64 mall_size; /* AKA infinity cache */
/* high 32 bits of the rb pipes mask */
__u32 enabled_rb_pipes_mask_hi;
/* shadow area size for gfx11 */
__u32 shadow_size;
/* shadow area base virtual alignment for gfx11 */
__u32 shadow_alignment;
/* context save area size for gfx11 */
__u32 csa_size;
/* context save area base virtual alignment for gfx11 */
__u32 csa_alignment;
/* Userq IP mask (1 << AMDGPU_HW_IP_*) */
__u32 userq_ip_mask;
__u32 pad;
};
struct drm_amdgpu_info_hw_ip {
@ -1482,29 +902,7 @@ struct drm_amdgpu_info_hw_ip {
__u32 ib_size_alignment;
/** Bitmask of available rings. Bit 0 means ring 0, etc. */
__u32 available_rings;
/** version info: bits 23:16 major, 15:8 minor, 7:0 revision */
__u32 ip_discovery_version;
};
/* GFX metadata BO sizes and alignment info (in bytes) */
struct drm_amdgpu_info_uq_fw_areas_gfx {
/* shadow area size */
__u32 shadow_size;
/* shadow area base virtual mem alignment */
__u32 shadow_alignment;
/* context save area size */
__u32 csa_size;
/* context save area base virtual mem alignment */
__u32 csa_alignment;
};
/* IP specific metadata related information used in the
* subquery AMDGPU_INFO_UQ_FW_AREAS
*/
struct drm_amdgpu_info_uq_fw_areas {
union {
struct drm_amdgpu_info_uq_fw_areas_gfx gfx;
};
__u32 _pad;
};
struct drm_amdgpu_info_num_handles {
@ -1532,44 +930,6 @@ struct drm_amdgpu_info_vce_clock_table {
__u32 pad;
};
/* query video encode/decode caps */
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2 0
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4 1
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1 2
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC 3
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC 4
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG 5
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9 6
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1 7
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT 8
struct drm_amdgpu_info_video_codec_info {
__u32 valid;
__u32 max_width;
__u32 max_height;
__u32 max_pixels_per_frame;
__u32 max_level;
__u32 pad;
};
struct drm_amdgpu_info_video_caps {
struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT];
};
#define AMDGPU_VMHUB_TYPE_MASK 0xff
#define AMDGPU_VMHUB_TYPE_SHIFT 0
#define AMDGPU_VMHUB_TYPE_GFX 0
#define AMDGPU_VMHUB_TYPE_MM0 1
#define AMDGPU_VMHUB_TYPE_MM1 2
#define AMDGPU_VMHUB_IDX_MASK 0xff00
#define AMDGPU_VMHUB_IDX_SHIFT 8
struct drm_amdgpu_info_gpuvm_fault {
__u64 addr;
__u32 status;
__u32 vmhub;
};
/*
* Supported GPU families
*/
@ -1581,15 +941,6 @@ struct drm_amdgpu_info_gpuvm_fault {
#define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */
#define AMDGPU_FAMILY_AI 141 /* Vega10 */
#define AMDGPU_FAMILY_RV 142 /* Raven */
#define AMDGPU_FAMILY_NV 143 /* Navi10 */
#define AMDGPU_FAMILY_VGH 144 /* Van Gogh */
#define AMDGPU_FAMILY_GC_11_0_0 145 /* GC 11.0.0 */
#define AMDGPU_FAMILY_YC 146 /* Yellow Carp */
#define AMDGPU_FAMILY_GC_11_0_1 148 /* GC 11.0.1 */
#define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */
#define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */
#define AMDGPU_FAMILY_GC_11_5_0 150 /* GC 11.5.0 */
#define AMDGPU_FAMILY_GC_12_0_0 152 /* GC 12.0.0 */
#if defined(__cplusplus)
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -73,11 +73,15 @@ struct drm_nouveau_gpuobj_free {
uint32_t handle;
};
/* FIXME : maybe unify {GET,SET}PARAMs */
#define NOUVEAU_GETPARAM_PCI_VENDOR 3
#define NOUVEAU_GETPARAM_PCI_DEVICE 4
#define NOUVEAU_GETPARAM_BUS_TYPE 5
#define NOUVEAU_GETPARAM_FB_PHYSICAL 6
#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7
#define NOUVEAU_GETPARAM_FB_SIZE 8
#define NOUVEAU_GETPARAM_AGP_SIZE 9
#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10
#define NOUVEAU_GETPARAM_CHIPSET_ID 11
#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
@ -171,12 +175,12 @@ struct drm_nouveau_gem_pushbuf {
__u64 push;
__u32 suffix0;
__u32 suffix1;
#define NOUVEAU_GEM_PUSHBUF_SYNC (1ULL << 0)
__u64 vram_available;
__u64 gart_available;
};
#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002
#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
struct drm_nouveau_gem_cpu_prep {
__u32 handle;
@ -187,68 +191,29 @@ struct drm_nouveau_gem_cpu_fini {
__u32 handle;
};
#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */
#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02 /* deprecated */
#define DRM_NOUVEAU_CHANNEL_FREE 0x03 /* deprecated */
#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
enum nouveau_bus_type {
NV_AGP = 0,
NV_PCI = 1,
NV_PCIE = 2,
};
struct drm_nouveau_sarea {
};
#define DRM_NOUVEAU_GETPARAM 0x00
#define DRM_NOUVEAU_SETPARAM 0x01
#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
#define DRM_NOUVEAU_CHANNEL_FREE 0x03
#define DRM_NOUVEAU_GROBJ_ALLOC 0x04
#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05
#define DRM_NOUVEAU_GPUOBJ_FREE 0x06
#define DRM_NOUVEAU_NVIF 0x07
#define DRM_NOUVEAU_SVM_INIT 0x08
#define DRM_NOUVEAU_SVM_BIND 0x09
#define DRM_NOUVEAU_GEM_NEW 0x40
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
#define DRM_NOUVEAU_GEM_CPU_FINI 0x43
#define DRM_NOUVEAU_GEM_INFO 0x44
struct drm_nouveau_svm_init {
__u64 unmanaged_addr;
__u64 unmanaged_size;
};
struct drm_nouveau_svm_bind {
__u64 header;
__u64 va_start;
__u64 va_end;
__u64 npages;
__u64 stride;
__u64 result;
__u64 reserved0;
__u64 reserved1;
};
#define NOUVEAU_SVM_BIND_COMMAND_SHIFT 0
#define NOUVEAU_SVM_BIND_COMMAND_BITS 8
#define NOUVEAU_SVM_BIND_COMMAND_MASK ((1 << 8) - 1)
#define NOUVEAU_SVM_BIND_PRIORITY_SHIFT 8
#define NOUVEAU_SVM_BIND_PRIORITY_BITS 8
#define NOUVEAU_SVM_BIND_PRIORITY_MASK ((1 << 8) - 1)
#define NOUVEAU_SVM_BIND_TARGET_SHIFT 16
#define NOUVEAU_SVM_BIND_TARGET_BITS 32
#define NOUVEAU_SVM_BIND_TARGET_MASK 0xffffffff
/*
* Below is use to validate ioctl argument, userspace can also use it to make
* sure that no bit are set beyond known fields for a given kernel version.
*/
#define NOUVEAU_SVM_BIND_VALID_BITS 48
#define NOUVEAU_SVM_BIND_VALID_MASK ((1ULL << NOUVEAU_SVM_BIND_VALID_BITS) - 1)
/*
* NOUVEAU_BIND_COMMAND__MIGRATE: synchronous migrate to target memory.
* result: number of page successfuly migrate to the target memory.
*/
#define NOUVEAU_SVM_BIND_COMMAND__MIGRATE 0
/*
* NOUVEAU_SVM_BIND_HEADER_TARGET__GPU_VRAM: target the GPU VRAM memory.
*/
#define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM (1UL << 31)
#if defined(__cplusplus)
}
#endif

View file

@ -24,6 +24,7 @@
#ifndef QXL_DRM_H
#define QXL_DRM_H
#include <stddef.h>
#include "drm.h"
#if defined(__cplusplus)
@ -88,6 +89,7 @@ struct drm_qxl_command {
__u32 pad;
};
/* XXX: call it drm_qxl_commands? */
struct drm_qxl_execbuffer {
__u32 flags; /* for future use */
__u32 commands_num;

File diff suppressed because it is too large Load diff

View file

@ -183,17 +183,10 @@ struct drm_vc4_submit_cl {
/* ID of the perfmon to attach to this job. 0 means no perfmon. */
__u32 perfmonid;
/* Syncobj handle to wait on. If set, processing of this render job
* will not start until the syncobj is signaled. 0 means ignore.
/* Unused field to align this struct on 64 bits. Must be set to 0.
* If one ever needs to add an u32 field to this struct, this field
* can be used.
*/
__u32 in_sync;
/* Syncobj handle to export fence to. If set, the fence in the syncobj
* will be replaced with a fence that signals upon completion of this
* render job. 0 means ignore.
*/
__u32 out_sync;
__u32 pad2;
};

View file

@ -46,17 +46,6 @@ extern "C" {
#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
#define DRM_VIRTGPU_CONTEXT_INIT 0x0b
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
#define VIRTGPU_EXECBUF_RING_IDX 0x04
#define VIRTGPU_EXECBUF_FLAGS (\
VIRTGPU_EXECBUF_FENCE_FD_IN |\
VIRTGPU_EXECBUF_FENCE_FD_OUT |\
VIRTGPU_EXECBUF_RING_IDX |\
0)
struct drm_virtgpu_map {
__u64 offset; /* use for mmap system call */
@ -64,40 +53,17 @@ struct drm_virtgpu_map {
__u32 pad;
};
#define VIRTGPU_EXECBUF_SYNCOBJ_RESET 0x01
#define VIRTGPU_EXECBUF_SYNCOBJ_FLAGS ( \
VIRTGPU_EXECBUF_SYNCOBJ_RESET | \
0)
struct drm_virtgpu_execbuffer_syncobj {
__u32 handle;
__u32 flags;
__u64 point;
};
/* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */
struct drm_virtgpu_execbuffer {
__u32 flags;
__u32 flags; /* for future use */
__u32 size;
__u64 command; /* void* */
__u64 bo_handles;
__u32 num_bo_handles;
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
__u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
__u32 syncobj_stride; /* size of @drm_virtgpu_execbuffer_syncobj */
__u32 num_in_syncobjs;
__u32 num_out_syncobjs;
__u64 in_syncobjs;
__u64 out_syncobjs;
__u32 pad;
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
#define VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME 8 /* Ability to set debug name from userspace */
struct drm_virtgpu_getparam {
__u64 param;
@ -127,7 +93,7 @@ struct drm_virtgpu_resource_info {
__u32 bo_handle;
__u32 res_handle;
__u32 size;
__u32 blob_mem;
__u32 stride;
};
struct drm_virtgpu_3d_box {
@ -144,8 +110,6 @@ struct drm_virtgpu_3d_transfer_to_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
__u32 stride;
__u32 layer_stride;
};
struct drm_virtgpu_3d_transfer_from_host {
@ -153,8 +117,6 @@ struct drm_virtgpu_3d_transfer_from_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
__u32 stride;
__u32 layer_stride;
};
#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
@ -163,12 +125,6 @@ struct drm_virtgpu_3d_wait {
__u32 flags;
};
#define VIRTGPU_DRM_CAPSET_VIRGL 1
#define VIRTGPU_DRM_CAPSET_VIRGL2 2
#define VIRTGPU_DRM_CAPSET_GFXSTREAM_VULKAN 3
#define VIRTGPU_DRM_CAPSET_VENUS 4
#define VIRTGPU_DRM_CAPSET_CROSS_DOMAIN 5
#define VIRTGPU_DRM_CAPSET_DRM 6
struct drm_virtgpu_get_caps {
__u32 cap_set_id;
__u32 cap_set_ver;
@ -177,60 +133,11 @@ struct drm_virtgpu_get_caps {
__u32 pad;
};
struct drm_virtgpu_resource_create_blob {
#define VIRTGPU_BLOB_MEM_GUEST 0x0001
#define VIRTGPU_BLOB_MEM_HOST3D 0x0002
#define VIRTGPU_BLOB_MEM_HOST3D_GUEST 0x0003
#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001
#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002
#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
/* zero is invalid blob_mem */
__u32 blob_mem;
__u32 blob_flags;
__u32 bo_handle;
__u32 res_handle;
__u64 size;
/*
* for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
* VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
*/
__u32 pad;
__u32 cmd_size;
__u64 cmd;
__u64 blob_id;
};
#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
#define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002
#define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
#define VIRTGPU_CONTEXT_PARAM_DEBUG_NAME 0x0004
struct drm_virtgpu_context_set_param {
__u64 param;
__u64 value;
};
struct drm_virtgpu_context_init {
__u32 num_params;
__u32 pad;
/* pointer to drm_virtgpu_context_set_param array */
__u64 ctx_set_params;
};
/*
* Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in
* effect. The event size is sizeof(drm_event), since there is no additional
* payload.
*/
#define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
#define DRM_IOCTL_VIRTGPU_EXECBUFFER \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
DRM_IOW(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
struct drm_virtgpu_execbuffer)
#define DRM_IOCTL_VIRTGPU_GETPARAM \
@ -261,14 +168,6 @@ struct drm_virtgpu_context_init {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
struct drm_virtgpu_get_caps)
#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
struct drm_virtgpu_resource_create_blob)
#define DRM_IOCTL_VIRTGPU_CONTEXT_INIT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT, \
struct drm_virtgpu_context_init)
#if defined(__cplusplus)
}
#endif

Some files were not shown because too many files have changed in this diff Show more