mirror of
https://gitlab.freedesktop.org/mesa/drm.git
synced 2026-05-09 19:08:10 +02:00
Compare commits
No commits in common. "main" and "2.4.64" have entirely different histories.
354 changed files with 19999 additions and 45879 deletions
|
|
@ -1,23 +0,0 @@
|
|||
# To use this config with your editor, follow the instructions at:
|
||||
# http://editorconfig.org
|
||||
|
||||
root = true
|
||||
|
||||
[*]
|
||||
charset = utf-8
|
||||
insert_final_newline = true
|
||||
|
||||
[*.{c,h}]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
[{Makefile.*,*.mk}]
|
||||
indent_style = tab
|
||||
|
||||
[*.m4]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
[{meson.build,meson_options.txt}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
99
.gitignore
vendored
99
.gitignore
vendored
|
|
@ -1 +1,98 @@
|
|||
/build*
|
||||
bsd-core/*/@
|
||||
bsd-core/*/machine
|
||||
*~
|
||||
*.1
|
||||
*.3
|
||||
*.5
|
||||
*.7
|
||||
*.flags
|
||||
*.ko
|
||||
*.ko.cmd
|
||||
*.la
|
||||
*.lo
|
||||
*.log
|
||||
*.mod.c
|
||||
*.mod.o
|
||||
*.o
|
||||
*.o.cmd
|
||||
*.sw?
|
||||
*.trs
|
||||
.depend
|
||||
.deps
|
||||
.libs
|
||||
.tmp_versions
|
||||
.*check*
|
||||
.*install*
|
||||
Makefile
|
||||
Makefile.in
|
||||
TAGS
|
||||
aclocal.m4
|
||||
autom4te.cache
|
||||
build-aux
|
||||
bus_if.h
|
||||
compile
|
||||
config.guess
|
||||
config.h
|
||||
config.h.in
|
||||
config.log
|
||||
config.status
|
||||
config.sub
|
||||
configure
|
||||
configure.lineno
|
||||
cscope.*
|
||||
depcomp
|
||||
device_if.h
|
||||
drm.kld
|
||||
drm_pciids.h
|
||||
export_syms
|
||||
i915.kld
|
||||
install-sh
|
||||
libdrm/config.h.in
|
||||
libdrm.pc
|
||||
libdrm_intel.pc
|
||||
libdrm_nouveau.pc
|
||||
libdrm_radeon.pc
|
||||
libdrm_omap.pc
|
||||
libdrm_exynos.pc
|
||||
libdrm_freedreno.pc
|
||||
libdrm_amdgpu.pc
|
||||
libkms.pc
|
||||
libtool
|
||||
ltmain.sh
|
||||
mach64.kld
|
||||
man/.man_fixup
|
||||
mga.kld
|
||||
missing
|
||||
mkinstalldirs
|
||||
opt_drm.h
|
||||
pci_if.h
|
||||
r128.kld
|
||||
radeon.kld
|
||||
savage.kld
|
||||
sis.kld
|
||||
stamp-h1
|
||||
tdfx.kld
|
||||
via.kld
|
||||
tests/auth
|
||||
tests/amdgpu/amdgpu_test
|
||||
tests/dristat
|
||||
tests/drmsl
|
||||
tests/drmstat
|
||||
tests/getclient
|
||||
tests/getstats
|
||||
tests/getversion
|
||||
tests/hash
|
||||
tests/lock
|
||||
tests/openclose
|
||||
tests/random
|
||||
tests/setversion
|
||||
tests/updatedraw
|
||||
tests/modeprint/modeprint
|
||||
tests/modetest/modetest
|
||||
tests/name_from_fd
|
||||
tests/proptest/proptest
|
||||
tests/kmstest/kmstest
|
||||
tests/vbltest/vbltest
|
||||
tests/radeon/radeon_ttm
|
||||
tests/exynos/exynos_fimg2d_test
|
||||
man/*.3
|
||||
|
|
|
|||
265
.gitlab-ci.yml
265
.gitlab-ci.yml
|
|
@ -1,265 +0,0 @@
|
|||
# This is the tag of the docker image used for the build jobs. If the
|
||||
# image doesn't exist yet, the containers stage generates it.
|
||||
#
|
||||
# In order to generate a new image, one should generally change the tag.
|
||||
# While removing the image from the registry would also work, that's not
|
||||
# recommended except for ephemeral images during development: Replacing
|
||||
# an image after a significant amount of time might pull in newer
|
||||
# versions of gcc/clang or other packages, which might break the build
|
||||
# with older commits using the same tag.
|
||||
#
|
||||
# After merging a change resulting in generating a new image to the
|
||||
# main repository, it's recommended to remove the image from the source
|
||||
# repository's container registry, so that the image from the main
|
||||
# repository's registry will be used there as well.
|
||||
.templates_sha: &template_sha c6aeb16f86e32525fa630fb99c66c4f3e62fc3cb # see https://docs.gitlab.com/ee/ci/yaml/#includefile
|
||||
|
||||
include:
|
||||
- project: 'freedesktop/ci-templates'
|
||||
ref: *template_sha
|
||||
file:
|
||||
- '/templates/debian.yml'
|
||||
- '/templates/freebsd.yml'
|
||||
- '/templates/ci-fairy.yml'
|
||||
|
||||
variables:
|
||||
FDO_UPSTREAM_REPO: mesa/libdrm
|
||||
FDO_REPO_SUFFIX: "$BUILD_OS/$BUILD_ARCH"
|
||||
|
||||
stages:
|
||||
- "Base container"
|
||||
- "Build"
|
||||
|
||||
.ci-rules:
|
||||
rules:
|
||||
- when: on_success
|
||||
|
||||
# CONTAINERS
|
||||
|
||||
.os-debian:
|
||||
variables:
|
||||
BUILD_OS: debian
|
||||
FDO_DISTRIBUTION_VERSION: trixie-slim
|
||||
FDO_DISTRIBUTION_PACKAGES: 'build-essential docbook-xsl libatomic-ops-dev libcairo2-dev libcunit1-dev libpciaccess-dev meson ninja-build pkg-config python3 python3-pip python3-wheel python3-setuptools python3-docutils valgrind'
|
||||
# bump this tag every time you change something which requires rebuilding the
|
||||
# base image
|
||||
FDO_DISTRIBUTION_TAG: "2026-04-27.0"
|
||||
|
||||
.debian-x86_64:
|
||||
extends:
|
||||
- .os-debian
|
||||
variables:
|
||||
BUILD_ARCH: "x86-64"
|
||||
|
||||
.debian-aarch64:
|
||||
extends:
|
||||
- .os-debian
|
||||
variables:
|
||||
BUILD_ARCH: "aarch64"
|
||||
|
||||
.debian-armv7:
|
||||
extends:
|
||||
- .os-debian
|
||||
variables:
|
||||
BUILD_ARCH: "armv7"
|
||||
FDO_DISTRIBUTION_PLATFORM: linux/arm/v7
|
||||
|
||||
.os-freebsd:
|
||||
variables:
|
||||
BUILD_OS: freebsd
|
||||
FDO_DISTRIBUTION_VERSION: "14.2"
|
||||
FDO_DISTRIBUTION_PACKAGES: 'meson ninja pkgconf libpciaccess textproc/py-docutils cairo'
|
||||
# bump this tag every time you change something which requires rebuilding the
|
||||
# base image
|
||||
FDO_DISTRIBUTION_TAG: "2025-05-22.0"
|
||||
|
||||
.freebsd-x86_64:
|
||||
extends:
|
||||
- .os-freebsd
|
||||
variables:
|
||||
BUILD_ARCH: "x86_64"
|
||||
|
||||
# Build our base container image, which contains the core distribution, the
|
||||
# toolchain, and all our build dependencies. This will be reused in the build
|
||||
# stage.
|
||||
x86_64-debian-container_prep:
|
||||
extends:
|
||||
- .ci-rules
|
||||
- .debian-x86_64
|
||||
- .fdo.container-build@debian
|
||||
stage: "Base container"
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
|
||||
aarch64-debian-container_prep:
|
||||
extends:
|
||||
- .ci-rules
|
||||
- .debian-aarch64
|
||||
- .fdo.container-build@debian
|
||||
tags:
|
||||
- aarch64
|
||||
stage: "Base container"
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
|
||||
armv7-debian-container_prep:
|
||||
extends:
|
||||
- .ci-rules
|
||||
- .debian-armv7
|
||||
- .fdo.container-build@debian
|
||||
tags:
|
||||
- aarch64
|
||||
stage: "Base container"
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
FDO_BASE_IMAGE: "arm32v7/debian:$FDO_DISTRIBUTION_VERSION"
|
||||
|
||||
x86_64-freebsd-container_prep:
|
||||
extends:
|
||||
- .ci-rules
|
||||
- .freebsd-x86_64
|
||||
- .fdo.qemu-build@freebsd@x86_64
|
||||
stage: "Base container"
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
|
||||
# Core build environment.
|
||||
.build-env:
|
||||
variables:
|
||||
MESON_BUILD_TYPE: "-Dbuildtype=debug -Doptimization=0 -Db_sanitize=address,undefined"
|
||||
|
||||
# OS/architecture-specific variants
|
||||
.build-env-debian-x86_64:
|
||||
extends:
|
||||
- .fdo.suffixed-image@debian
|
||||
- .debian-x86_64
|
||||
- .build-env
|
||||
needs:
|
||||
- job: x86_64-debian-container_prep
|
||||
artifacts: false
|
||||
|
||||
.build-env-debian-aarch64:
|
||||
extends:
|
||||
- .fdo.suffixed-image@debian
|
||||
- .debian-aarch64
|
||||
- .build-env
|
||||
variables:
|
||||
# At least with the versions we have, the LSan runtime makes fork unusably
|
||||
# slow on AArch64, which is bad news since the test suite decides to fork
|
||||
# for every single subtest. For now, in order to get AArch64 builds and
|
||||
# tests into CI, just assume that we're not going to leak any more on
|
||||
# AArch64 than we would on ARMv7 or x86-64.
|
||||
ASAN_OPTIONS: "detect_leaks=0"
|
||||
tags:
|
||||
- aarch64
|
||||
needs:
|
||||
- job: aarch64-debian-container_prep
|
||||
artifacts: false
|
||||
|
||||
.build-env-debian-armv7:
|
||||
extends:
|
||||
- .fdo.suffixed-image@debian
|
||||
- .debian-armv7
|
||||
- .build-env
|
||||
tags:
|
||||
- aarch64
|
||||
needs:
|
||||
- job: armv7-debian-container_prep
|
||||
artifacts: false
|
||||
|
||||
.build-env-freebsd-x86_64:
|
||||
variables:
|
||||
# Compiling with ASan+UBSan appears to trigger an infinite loop in the
|
||||
# compiler shipped with FreeBSD 13.0, so we only use UBSan here.
|
||||
# Additionally, sanitizers can't be used with b_lundef on FreeBSD.
|
||||
MESON_BUILD_TYPE: "-Dbuildtype=debug -Db_sanitize=undefined -Db_lundef=false"
|
||||
extends:
|
||||
- .fdo.suffixed-image@freebsd
|
||||
- .freebsd-x86_64
|
||||
- .build-env
|
||||
needs:
|
||||
- job: x86_64-freebsd-container_prep
|
||||
artifacts: false
|
||||
|
||||
# BUILD
|
||||
|
||||
.do-build:
|
||||
extends:
|
||||
- .ci-rules
|
||||
stage: "Build"
|
||||
variables:
|
||||
GIT_DEPTH: 10
|
||||
script:
|
||||
- meson setup build
|
||||
--fatal-meson-warnings --auto-features=enabled
|
||||
-D udev=true
|
||||
- ninja -C build
|
||||
- ninja -C build test
|
||||
- DESTDIR=$PWD/install ninja -C build install
|
||||
artifacts:
|
||||
when: on_failure
|
||||
paths:
|
||||
- build/meson-logs/*
|
||||
|
||||
.do-build-qemu:
|
||||
extends:
|
||||
- .ci-rules
|
||||
stage: "Build"
|
||||
script:
|
||||
# Start the VM and copy our workspace to the VM
|
||||
- /app/vmctl start
|
||||
- scp -r $PWD "vm:"
|
||||
# The `set +e is needed to ensure that we always copy the meson logs back to
|
||||
# the workspace to see details about the failed tests.
|
||||
- |
|
||||
set +e
|
||||
/app/vmctl exec "pkg info; cd $CI_PROJECT_NAME ; meson setup build --fatal-meson-warnings --auto-features=enabled -D etnaviv=disabled -D nouveau=disabled -D valgrind=disabled && ninja -C build"
|
||||
set -ex
|
||||
scp -r vm:$CI_PROJECT_NAME/build/meson-logs .
|
||||
/app/vmctl exec "ninja -C $CI_PROJECT_NAME/build install"
|
||||
mkdir -p $PREFIX && scp -r vm:$PREFIX/ $PREFIX/
|
||||
# Finally, shut down the VM.
|
||||
- /app/vmctl stop
|
||||
artifacts:
|
||||
when: on_failure
|
||||
paths:
|
||||
- build/meson-logs/*
|
||||
|
||||
# Full build and test.
|
||||
x86_64-debian-build:
|
||||
extends:
|
||||
- .build-env-debian-x86_64
|
||||
- .do-build
|
||||
|
||||
aarch64-debian-build:
|
||||
extends:
|
||||
- .build-env-debian-aarch64
|
||||
- .do-build
|
||||
|
||||
armv7-debian-build:
|
||||
extends:
|
||||
- .build-env-debian-armv7
|
||||
- .do-build
|
||||
|
||||
# Daily build
|
||||
meson-arch-daily:
|
||||
rules:
|
||||
- if: '$SCHEDULE == "arch-daily"'
|
||||
when: on_success
|
||||
- when: never
|
||||
image: archlinux/archlinux:base-devel
|
||||
before_script:
|
||||
- pacman -Syu --noconfirm --needed
|
||||
cairo
|
||||
cunit
|
||||
libatomic_ops
|
||||
libpciaccess
|
||||
meson
|
||||
valgrind
|
||||
python-docutils
|
||||
extends: .do-build
|
||||
|
||||
x86_64-freebsd-build:
|
||||
extends:
|
||||
- .build-env-freebsd-x86_64
|
||||
- .do-build-qemu
|
||||
97
Android.bp
97
Android.bp
|
|
@ -1,97 +0,0 @@
|
|||
//
|
||||
// Copyright © 2011-2012 Intel Corporation
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice (including the next
|
||||
// paragraph) shall be included in all copies or substantial portions of the
|
||||
// Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
// IN THE SOFTWARE.
|
||||
//
|
||||
|
||||
subdirs = ["*"]
|
||||
build = ["Android.sources.bp"]
|
||||
|
||||
cc_defaults {
|
||||
name: "libdrm_defaults",
|
||||
cflags: [
|
||||
// XXX: Consider moving these to config.h analogous to autoconf.
|
||||
"-DMAJOR_IN_SYSMACROS=1",
|
||||
"-DHAVE_VISIBILITY=1",
|
||||
"-DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1",
|
||||
|
||||
"-Wall",
|
||||
"-Werror",
|
||||
"-Wno-deprecated-declarations",
|
||||
"-Wno-format",
|
||||
"-Wno-gnu-variable-sized-type-not-at-end",
|
||||
"-Wno-implicit-function-declaration",
|
||||
"-Wno-int-conversion",
|
||||
"-Wno-missing-field-initializers",
|
||||
"-Wno-pointer-arith",
|
||||
"-Wno-unused-parameter",
|
||||
"-Wno-unused-variable",
|
||||
],
|
||||
export_system_include_dirs: ["."],
|
||||
}
|
||||
|
||||
cc_library_headers {
|
||||
name: "libdrm_headers",
|
||||
vendor_available: true,
|
||||
host_supported: true,
|
||||
defaults: ["libdrm_defaults"],
|
||||
export_include_dirs: ["include/drm", "android"],
|
||||
apex_available: [
|
||||
"//apex_available:platform",
|
||||
"com.android.virt",
|
||||
],
|
||||
}
|
||||
|
||||
genrule {
|
||||
name: "generated_static_table_fourcc_h",
|
||||
out: ["generated_static_table_fourcc.h"],
|
||||
srcs: ["include/drm/drm_fourcc.h"],
|
||||
tool_files: ["gen_table_fourcc.py"],
|
||||
cmd: "python3 $(location gen_table_fourcc.py) $(in) $(out)",
|
||||
}
|
||||
|
||||
// Library for the device
|
||||
cc_library {
|
||||
name: "libdrm",
|
||||
recovery_available: true,
|
||||
vendor_available: true,
|
||||
host_supported: true,
|
||||
defaults: [
|
||||
"libdrm_defaults",
|
||||
"libdrm_sources",
|
||||
],
|
||||
|
||||
generated_headers: [
|
||||
"generated_static_table_fourcc_h",
|
||||
],
|
||||
|
||||
export_include_dirs: ["include/drm", "android"],
|
||||
|
||||
cflags: [
|
||||
"-Wno-enum-conversion",
|
||||
"-Wno-pointer-arith",
|
||||
"-Wno-sign-compare",
|
||||
"-Wno-tautological-compare",
|
||||
],
|
||||
apex_available: [
|
||||
"//apex_available:platform",
|
||||
"com.android.virt",
|
||||
],
|
||||
}
|
||||
47
Android.mk
Normal file
47
Android.mk
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
#
|
||||
# Copyright © 2011-2012 Intel Corporation
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice (including the next
|
||||
# paragraph) shall be included in all copies or substantial portions of the
|
||||
# Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
|
||||
LOCAL_PATH := $(call my-dir)
|
||||
include $(CLEAR_VARS)
|
||||
|
||||
# Import variables LIBDRM_{,H_,INCLUDE_H_,INCLUDE_VMWGFX_H_}FILES
|
||||
include $(LOCAL_PATH)/Makefile.sources
|
||||
|
||||
LOCAL_MODULE := libdrm
|
||||
LOCAL_MODULE_TAGS := optional
|
||||
|
||||
LOCAL_SRC_FILES := $(LIBDRM_FILES)
|
||||
LOCAL_EXPORT_C_INCLUDE_DIRS := \
|
||||
$(LOCAL_PATH) \
|
||||
$(LOCAL_PATH)/include/drm
|
||||
|
||||
LOCAL_C_INCLUDES := \
|
||||
$(LOCAL_PATH)/include/drm
|
||||
|
||||
LOCAL_CFLAGS := \
|
||||
-DHAVE_VISIBILITY=1 \
|
||||
-DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
|
||||
|
||||
include $(BUILD_SHARED_LIBRARY)
|
||||
|
||||
include $(call all-makefiles-under,$(LOCAL_PATH))
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
// Autogenerated with Android.sources.bp.mk
|
||||
|
||||
cc_defaults {
|
||||
name: "libdrm_sources",
|
||||
srcs: [
|
||||
"xf86drm.c",
|
||||
"xf86drmHash.c",
|
||||
"xf86drmRandom.c",
|
||||
"xf86drmSL.c",
|
||||
"xf86drmMode.c",
|
||||
],
|
||||
}
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
# Usage: make -f path/to/Android.sources.bp.mk NAMES=<> >Android.sources.bp
|
||||
#
|
||||
# It will read the Makefile.sources in the current directory, and
|
||||
# write <NAME>_FILES to stdout as an Android.bp cc_defaults module.
|
||||
|
||||
.PHONY: all
|
||||
all:
|
||||
@# Do nothing
|
||||
|
||||
include Makefile.sources
|
||||
|
||||
empty :=
|
||||
indent := $(empty) $(empty)
|
||||
|
||||
$(info // Autogenerated with Android.sources.bp.mk)
|
||||
$(foreach NAME,$(NAMES), \
|
||||
$(eval lower_name := $(shell echo $(PREFIX)$(NAME) | tr 'A-Z' 'a-z')) \
|
||||
$(info ) \
|
||||
$(info cc_defaults {) \
|
||||
$(info $(indent)name: "$(lower_name)_sources",) \
|
||||
$(info $(indent)srcs: [) \
|
||||
$(foreach f,$(filter %.c,$($(NAME)_FILES)), \
|
||||
$(info $(indent)$(indent)"$(f)",)) \
|
||||
$(info $(indent)],) \
|
||||
$(info }))
|
||||
105
CONTRIBUTING.rst
105
CONTRIBUTING.rst
|
|
@ -1,105 +0,0 @@
|
|||
Contributing to libdrm
|
||||
======================
|
||||
|
||||
Submitting Patches
|
||||
------------------
|
||||
|
||||
Patches should be sent to dri-devel@lists.freedesktop.org, using git
|
||||
send-email. For patches only touching driver specific code one of the driver
|
||||
mailing lists (like amd-gfx@lists.freedesktop.org) is also appropriate. See git
|
||||
documentation for help:
|
||||
|
||||
http://git-scm.com/documentation
|
||||
|
||||
Since dri-devel is a very busy mailing list please use --subject-prefix="PATCH
|
||||
libdrm" to make it easier to find libdrm patches. This is best done by running
|
||||
|
||||
git config --local format.subjectprefix "PATCH libdrm"
|
||||
|
||||
The first line of a commit message should contain a prefix indicating what part
|
||||
is affected by the patch followed by one sentence that describes the change. For
|
||||
examples:
|
||||
|
||||
amdgpu: Use uint32_t i in amdgpu_find_bo_by_cpu_mapping
|
||||
|
||||
The body of the commit message should describe what the patch changes and why,
|
||||
and also note any particular side effects. For a recommended reading on
|
||||
writing commit messages, see:
|
||||
|
||||
http://who-t.blogspot.de/2009/12/on-commit-messages.html
|
||||
|
||||
Your patches should also include a Signed-off-by line with your name and email
|
||||
address. If you're not the patch's original author, you should also gather
|
||||
S-o-b's by them (and/or whomever gave the patch to you.) The significance of
|
||||
this is that it certifies that you created the patch, that it was created under
|
||||
an appropriate open source license, or provided to you under those terms. This
|
||||
lets us indicate a chain of responsibility for the copyright status of the code.
|
||||
For more details:
|
||||
|
||||
https://developercertificate.org/
|
||||
|
||||
We won't reject patches that lack S-o-b, but it is strongly recommended.
|
||||
|
||||
Review and Merging
|
||||
------------------
|
||||
|
||||
Patches should have at least one positive review (Reviewed-by: tag) or
|
||||
indication of approval (Acked-by: tag) before merging. For any code shared
|
||||
between drivers this is mandatory.
|
||||
|
||||
Please note that kernel/userspace API header files have special rules, see
|
||||
include/drm/README.
|
||||
|
||||
Coding style in the project loosely follows the CodingStyle of the linux kernel:
|
||||
|
||||
https://www.kernel.org/doc/html/latest/process/coding-style.html?highlight=coding%20style
|
||||
|
||||
Commit Rights
|
||||
-------------
|
||||
|
||||
Commit rights will be granted to anyone who requests them and fulfills the
|
||||
below criteria:
|
||||
|
||||
- Submitted a few (5-10 as a rule of thumb) non-trivial (not just simple
|
||||
spelling fixes and whitespace adjustment) patches that have been merged
|
||||
already. Since libdrm is just a glue library between the kernel and userspace
|
||||
drivers, merged patches to those components also count towards the commit
|
||||
criteria.
|
||||
|
||||
- Are actively participating on discussions about their work (on the mailing
|
||||
list or IRC). This should not be interpreted as a requirement to review other
|
||||
peoples patches but just make sure that patch submission isn't one-way
|
||||
communication. Cross-review is still highly encouraged.
|
||||
|
||||
- Will be regularly contributing further patches. This includes regular
|
||||
contributors to other parts of the open source graphics stack who only
|
||||
do the oddball rare patch within libdrm itself.
|
||||
|
||||
- Agrees to use their commit rights in accordance with the documented merge
|
||||
criteria, tools, and processes.
|
||||
|
||||
To apply for commit rights ("Developer" role in gitlab) send a mail to
|
||||
dri-devel@lists.freedesktop.org and please ping the maintainers if your request
|
||||
is stuck.
|
||||
|
||||
Committers are encouraged to request their commit rights get removed when they
|
||||
no longer contribute to the project. Commit rights will be reinstated when they
|
||||
come back to the project.
|
||||
|
||||
Maintainers and committers should encourage contributors to request commit
|
||||
rights, as especially junior contributors tend to underestimate their skills.
|
||||
|
||||
Code of Conduct
|
||||
---------------
|
||||
|
||||
Please be aware the fd.o Code of Conduct also applies to libdrm:
|
||||
|
||||
https://www.freedesktop.org/wiki/CodeOfConduct/
|
||||
|
||||
See the gitlab project owners for contact details of the libdrm maintainers.
|
||||
|
||||
Abuse of commit rights, like engaging in commit fights or willfully pushing
|
||||
patches that violate the documented merge criteria, will also be handled through
|
||||
the Code of Conduct enforcement process.
|
||||
|
||||
Happy hacking!
|
||||
|
|
@ -1,4 +1,3 @@
|
|||
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/include/libdrm)
|
||||
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/include/freedreno)
|
||||
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libdrm_*intermediates)
|
||||
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libdrm_*intermediates)
|
||||
|
|
|
|||
131
Makefile.am
Normal file
131
Makefile.am
Normal file
|
|
@ -0,0 +1,131 @@
|
|||
# Copyright 2005 Adam Jackson.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# on the rights to use, copy, modify, merge, publish, distribute, sub
|
||||
# license, and/or sell copies of the Software, and to permit persons to whom
|
||||
# the Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice (including the next
|
||||
# paragraph) shall be included in all copies or substantial portions of the
|
||||
# Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
# ADAM JACKSON BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
include Makefile.sources
|
||||
|
||||
ACLOCAL_AMFLAGS = -I m4 ${ACLOCAL_FLAGS}
|
||||
|
||||
AM_DISTCHECK_CONFIGURE_FLAGS = \
|
||||
--enable-udev \
|
||||
--enable-libkms \
|
||||
--enable-intel \
|
||||
--enable-radeon \
|
||||
--enable-amdgpu \
|
||||
--enable-nouveau \
|
||||
--enable-vmwgfx \
|
||||
--enable-omap-experimental-api \
|
||||
--enable-exynos-experimental-api \
|
||||
--enable-freedreno \
|
||||
--enable-freedreno-kgsl\
|
||||
--enable-tegra-experimental-api \
|
||||
--enable-install-test-programs \
|
||||
--enable-cairo-tests \
|
||||
--enable-manpages
|
||||
|
||||
pkgconfigdir = @pkgconfigdir@
|
||||
pkgconfig_DATA = libdrm.pc
|
||||
|
||||
if HAVE_LIBKMS
|
||||
LIBKMS_SUBDIR = libkms
|
||||
endif
|
||||
|
||||
if HAVE_INTEL
|
||||
INTEL_SUBDIR = intel
|
||||
endif
|
||||
|
||||
if HAVE_NOUVEAU
|
||||
NOUVEAU_SUBDIR = nouveau
|
||||
endif
|
||||
|
||||
if HAVE_RADEON
|
||||
RADEON_SUBDIR = radeon
|
||||
endif
|
||||
|
||||
if HAVE_AMDGPU
|
||||
AMDGPU_SUBDIR = amdgpu
|
||||
endif
|
||||
|
||||
if HAVE_OMAP
|
||||
OMAP_SUBDIR = omap
|
||||
endif
|
||||
|
||||
if HAVE_EXYNOS
|
||||
EXYNOS_SUBDIR = exynos
|
||||
endif
|
||||
|
||||
if HAVE_FREEDRENO
|
||||
FREEDRENO_SUBDIR = freedreno
|
||||
endif
|
||||
|
||||
if HAVE_TEGRA
|
||||
TEGRA_SUBDIR = tegra
|
||||
endif
|
||||
|
||||
if BUILD_MANPAGES
|
||||
if HAVE_MANPAGES_STYLESHEET
|
||||
MAN_SUBDIR = man
|
||||
endif
|
||||
endif
|
||||
|
||||
SUBDIRS = \
|
||||
. \
|
||||
$(LIBKMS_SUBDIR) \
|
||||
$(INTEL_SUBDIR) \
|
||||
$(NOUVEAU_SUBDIR) \
|
||||
$(RADEON_SUBDIR) \
|
||||
$(AMDGPU_SUBDIR) \
|
||||
$(OMAP_SUBDIR) \
|
||||
$(EXYNOS_SUBDIR) \
|
||||
$(FREEDRENO_SUBDIR) \
|
||||
$(TEGRA_SUBDIR) \
|
||||
tests \
|
||||
$(MAN_SUBDIR)
|
||||
|
||||
libdrm_la_LTLIBRARIES = libdrm.la
|
||||
libdrm_ladir = $(libdir)
|
||||
libdrm_la_LDFLAGS = -version-number 2:4:0 -no-undefined
|
||||
libdrm_la_LIBADD = @CLOCK_LIB@
|
||||
|
||||
libdrm_la_CPPFLAGS = -I$(top_srcdir)/include/drm
|
||||
AM_CFLAGS = \
|
||||
$(WARN_CFLAGS) \
|
||||
$(VALGRIND_CFLAGS)
|
||||
|
||||
libdrm_la_SOURCES = $(LIBDRM_FILES)
|
||||
|
||||
libdrmincludedir = ${includedir}
|
||||
libdrminclude_HEADERS = $(LIBDRM_H_FILES)
|
||||
|
||||
EXTRA_DIST = Android.mk
|
||||
|
||||
klibdrmincludedir = ${includedir}/libdrm
|
||||
klibdrminclude_HEADERS = $(LIBDRM_INCLUDE_H_FILES)
|
||||
|
||||
if HAVE_VMWGFX
|
||||
klibdrminclude_HEADERS += $(LIBDRM_INCLUDE_VMWGFX_H_FILES)
|
||||
endif
|
||||
|
||||
|
||||
copy-headers :
|
||||
cp -r $(kernel_source)/include/uapi/drm/*.h $(top_srcdir)/include/drm/
|
||||
|
||||
commit-headers : copy-headers
|
||||
git add include/drm/*.h
|
||||
git commit -am "Copy headers from kernel $$(GIT_DIR=$(kernel_source)/.git git describe)"
|
||||
38
Makefile.sources
Normal file
38
Makefile.sources
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
LIBDRM_FILES := \
|
||||
xf86drm.c \
|
||||
xf86drmHash.c \
|
||||
xf86drmHash.h \
|
||||
xf86drmRandom.c \
|
||||
xf86drmRandom.h \
|
||||
xf86drmSL.c \
|
||||
xf86drmMode.c \
|
||||
xf86atomic.h \
|
||||
libdrm_macros.h \
|
||||
libdrm_lists.h \
|
||||
util_double_list.h \
|
||||
util_math.h
|
||||
|
||||
LIBDRM_H_FILES := \
|
||||
xf86drm.h \
|
||||
xf86drmMode.h
|
||||
|
||||
LIBDRM_INCLUDE_H_FILES := \
|
||||
include/drm/drm.h \
|
||||
include/drm/drm_fourcc.h \
|
||||
include/drm/drm_mode.h \
|
||||
include/drm/drm_sarea.h \
|
||||
include/drm/i915_drm.h \
|
||||
include/drm/mach64_drm.h \
|
||||
include/drm/mga_drm.h \
|
||||
include/drm/nouveau_drm.h \
|
||||
include/drm/qxl_drm.h \
|
||||
include/drm/r128_drm.h \
|
||||
include/drm/radeon_drm.h \
|
||||
include/drm/amdgpu_drm.h \
|
||||
include/drm/savage_drm.h \
|
||||
include/drm/sis_drm.h \
|
||||
include/drm/tegra_drm.h \
|
||||
include/drm/via_drm.h
|
||||
|
||||
LIBDRM_INCLUDE_VMWGFX_H_FILES := \
|
||||
include/drm/vmwgfx_drm.h
|
||||
41
README
Normal file
41
README
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
libdrm - userspace library for drm
|
||||
|
||||
This is libdrm, a userspace library for accessing the DRM, direct
|
||||
rendering manager, on Linux, BSD and other operating systes that
|
||||
support the ioctl interface. The library provides wrapper functions
|
||||
for the ioctls to avoid exposing the kernel interface directly, and
|
||||
for chipsets with drm memory manager, support for tracking relocations
|
||||
and buffers. libdrm is a low-level library, typically used by
|
||||
graphics drivers such as the Mesa DRI drivers, the X drivers, libva
|
||||
and similar projects. New functionality in the kernel DRM drivers
|
||||
typically requires a new libdrm, but a new libdrm will always work
|
||||
with an older kernel.
|
||||
|
||||
|
||||
Compiling
|
||||
---------
|
||||
|
||||
libdrm is a standard autotools packages and follows the normal
|
||||
configure, build and install steps. The first step is to configure
|
||||
the package, which is done by running the configure shell script:
|
||||
|
||||
./configure
|
||||
|
||||
By default, libdrm will install into the /usr/local/ prefix. If you
|
||||
want to install this DRM to replace your system copy, pass
|
||||
--prefix=/usr and --exec-prefix=/ to configure. If you are building
|
||||
libdrm from a git checkout, you first need to run the autogen.sh
|
||||
script. You can pass any options to autogen.sh that you would other
|
||||
wise pass to configure, or you can just re-run configure with the
|
||||
options you need once autogen.sh finishes.
|
||||
|
||||
Next step is to build libdrm:
|
||||
|
||||
make
|
||||
|
||||
and once make finishes successfully, install the package using
|
||||
|
||||
make install
|
||||
|
||||
If you are install into a system location, you will need to be root to
|
||||
perform the install step.
|
||||
63
README.rst
63
README.rst
|
|
@ -1,63 +0,0 @@
|
|||
libdrm - userspace library for drm
|
||||
----------------------------------
|
||||
|
||||
This is libdrm, a userspace library for accessing the DRM, direct rendering
|
||||
manager, on Linux, BSD and other operating systems that support the ioctl
|
||||
interface.
|
||||
The library provides wrapper functions for the ioctls to avoid exposing the
|
||||
kernel interface directly, and for chipsets with drm memory manager, support
|
||||
for tracking relocations and buffers.
|
||||
New functionality in the kernel DRM drivers typically requires a new libdrm,
|
||||
but a new libdrm will always work with an older kernel.
|
||||
|
||||
libdrm is a low-level library, typically used by graphics drivers such as
|
||||
the Mesa drivers, the X drivers, libva and similar projects.
|
||||
|
||||
Syncing with the Linux kernel headers
|
||||
-------------------------------------
|
||||
|
||||
The library should be regularly updated to match the recent changes in the
|
||||
`include/uapi/drm/`.
|
||||
|
||||
libdrm maintains a human-readable version for the token format modifier, with
|
||||
the simpler ones being extracted automatically from `drm_fourcc.h` header file
|
||||
with the help of a python script. This might not always possible, as some of
|
||||
the vendors require decoding/extracting them programmatically. For that
|
||||
reason one can enhance the current vendor functions to include/provide the
|
||||
newly added token formats, or, in case there's no such decoding
|
||||
function, to add one that performs the tasks of extracting them.
|
||||
|
||||
For simpler format modifier tokens there's a script (gen_table_fourcc.py) that
|
||||
creates a static table, by going over `drm_fourcc.h` header file. The script
|
||||
could be further modified if it can't handle new (simpler) token format
|
||||
modifiers instead of the generated static table.
|
||||
|
||||
Compiling
|
||||
---------
|
||||
|
||||
To set up meson:
|
||||
|
||||
meson builddir/
|
||||
|
||||
By default this will install into /usr/local, you can change your prefix
|
||||
with --prefix=/usr (or `meson configure builddir/ -Dprefix=/usr` after
|
||||
the initial meson setup).
|
||||
|
||||
Then use ninja to build and install:
|
||||
|
||||
ninja -C builddir/ install
|
||||
|
||||
If you are installing into a system location you will need to run install
|
||||
separately, and as root.
|
||||
|
||||
AMDGPU ASIC table file
|
||||
----------------------
|
||||
|
||||
The AMDGPU driver requires the `amdgpu.ids` file. It is usually located at
|
||||
`$PREFIX/share/libdrm`, but it is possible to specify a set of alternative
|
||||
paths at runtime by setting the `AMDGPU_ASIC_ID_TABLE_PATHS` environment
|
||||
variable with one or more colon-separated paths where to search for the
|
||||
`amdgpu.ids` file.
|
||||
|
||||
For this option to be available, the C library must support secure_getenv()
|
||||
function. In systems without it (like NetBSD), this option won't be available.
|
||||
45
RELEASING
45
RELEASING
|
|
@ -9,22 +9,47 @@ However, this is up to whoever is driving the feature in question.
|
|||
|
||||
Follow these steps to release a new version of libdrm:
|
||||
|
||||
1) Bump the version number in meson.build. We seem to have settled for
|
||||
2.4.x as the versioning scheme for libdrm, so just bump the micro
|
||||
version.
|
||||
1) Ensure that there are no local, uncommitted/unpushed
|
||||
modifications. You're probably in a good state if both "git diff
|
||||
HEAD" and "git log master..origin/master" give no output.
|
||||
|
||||
2) Run `ninja -C builddir/ dist` to generate the tarballs.
|
||||
Make sure that the version number of the tarball name in
|
||||
builddir/meson-dist/ matches the number you bumped to. Move that
|
||||
tarball to the libdrm repo root for the release script to pick up.
|
||||
2) Bump the version number in configure.ac. We seem to have settled
|
||||
for 2.4.x as the versioning scheme for libdrm, so just bump the
|
||||
micro version.
|
||||
|
||||
3) Push the updated main branch with the bumped version number:
|
||||
3) Run autoconf and then re-run ./configure so the build system
|
||||
picks up the new version number.
|
||||
|
||||
git push origin main
|
||||
4) (optional step, release.sh will make distcheck for you, but it can be
|
||||
heart warming to verify that make distcheck passes)
|
||||
|
||||
Verify that the code passes "make distcheck". Running "make
|
||||
distcheck" should result in no warnings or errors and end with a
|
||||
message of the form:
|
||||
|
||||
=============================================
|
||||
libdrm-X.Y.Z archives ready for distribution:
|
||||
libdrm-X.Y.Z.tar.gz
|
||||
libdrm-X.Y.Z.tar.bz2
|
||||
=============================================
|
||||
|
||||
Make sure that the version number reported by distcheck and in
|
||||
the tarball names matches the number you bumped to in configure.ac.
|
||||
|
||||
5) Commit the configure.ac change and make an annotated tag for that
|
||||
commit with the version number of the release as the name and a
|
||||
message of "libdrm X.Y.Z". For example, for the 2.4.16 release
|
||||
the command is:
|
||||
|
||||
git tag -a 2.4.16 -m "libdrm 2.4.16"
|
||||
|
||||
6) Push the commit and tag by saying
|
||||
|
||||
git push --tags origin master
|
||||
|
||||
assuming the remote for the upstream libdrm repo is called origin.
|
||||
|
||||
4) Use the release.sh script from the xorg/util/modular repo to
|
||||
7) Use the release.sh script from the xorg/util/modular repo to
|
||||
upload the tarballs to the freedesktop.org download area and
|
||||
create an announce email template. The script takes one argument:
|
||||
the path to the libdrm checkout. So, if a checkout of modular is
|
||||
|
|
|
|||
|
|
@ -1,13 +0,0 @@
|
|||
# To use this config with your editor, follow the instructions at:
|
||||
# http://editorconfig.org
|
||||
|
||||
[*]
|
||||
charset = utf-8
|
||||
indent_style = tab
|
||||
indent_size = 8
|
||||
tab_width = 8
|
||||
insert_final_newline = true
|
||||
|
||||
[meson.build]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
build = ["Android.sources.bp"]
|
||||
|
||||
cc_library_shared {
|
||||
name: "libdrm_amdgpu",
|
||||
|
||||
cflags: [
|
||||
"-DAMDGPU_ASIC_ID_TABLE=\"/vendor/etc/hwdata/amdgpu.ids\""
|
||||
],
|
||||
|
||||
defaults: [
|
||||
"libdrm_defaults",
|
||||
"libdrm_amdgpu_sources",
|
||||
],
|
||||
vendor: true,
|
||||
shared_libs: ["libdrm"],
|
||||
}
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
// Autogenerated with Android.sources.bp.mk
|
||||
|
||||
cc_defaults {
|
||||
name: "libdrm_amdgpu_sources",
|
||||
srcs: [
|
||||
"amdgpu_asic_id.c",
|
||||
"amdgpu_bo.c",
|
||||
"amdgpu_cs.c",
|
||||
"amdgpu_device.c",
|
||||
"amdgpu_gpu_info.c",
|
||||
"amdgpu_vamgr.c",
|
||||
"amdgpu_vm.c",
|
||||
"handle_table.c",
|
||||
],
|
||||
}
|
||||
57
amdgpu/Makefile.am
Normal file
57
amdgpu/Makefile.am
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
# Copyright © 2008 Jérôme Glisse
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice (including the next
|
||||
# paragraph) shall be included in all copies or substantial portions of the
|
||||
# Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
# Authors:
|
||||
# Jérôme Glisse <glisse@freedesktop.org>
|
||||
|
||||
AM_CFLAGS = \
|
||||
$(WARN_CFLAGS) -Wno-switch-enum \
|
||||
-I$(top_srcdir) \
|
||||
$(PTHREADSTUBS_CFLAGS) \
|
||||
-I$(top_srcdir)/include/drm
|
||||
|
||||
libdrm_amdgpu_la_LTLIBRARIES = libdrm_amdgpu.la
|
||||
libdrm_amdgpu_ladir = $(libdir)
|
||||
libdrm_amdgpu_la_LDFLAGS = -version-number 1:0:0 -no-undefined
|
||||
libdrm_amdgpu_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@
|
||||
|
||||
libdrm_amdgpu_la_SOURCES = \
|
||||
amdgpu.h \
|
||||
amdgpu_bo.c \
|
||||
amdgpu_cs.c \
|
||||
amdgpu_device.c \
|
||||
amdgpu_gpu_info.c \
|
||||
amdgpu_internal.h \
|
||||
amdgpu_vamgr.c \
|
||||
util_hash.c \
|
||||
util_hash.h \
|
||||
util_hash_table.c \
|
||||
util_hash_table.h
|
||||
|
||||
libdrm_amdgpuincludedir = ${includedir}/libdrm
|
||||
libdrm_amdgpuinclude_HEADERS = \
|
||||
amdgpu.h
|
||||
|
||||
pkgconfigdir = @pkgconfigdir@
|
||||
pkgconfig_DATA = libdrm_amdgpu.pc
|
||||
|
||||
TESTS = amdgpu-symbol-check
|
||||
EXTRA_DIST = $(TESTS)
|
||||
51
amdgpu/amdgpu-symbol-check
Executable file
51
amdgpu/amdgpu-symbol-check
Executable file
|
|
@ -0,0 +1,51 @@
|
|||
#!/bin/bash
|
||||
|
||||
# The following symbols (past the first five) are taken from the public headers.
|
||||
# A list of the latter should be available Makefile.am/libdrm_amdgpuinclude_HEADERS
|
||||
|
||||
FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_amdgpu.so} | awk '{print $3}' | while read func; do
|
||||
( grep -q "^$func$" || echo $func ) <<EOF
|
||||
__bss_start
|
||||
_edata
|
||||
_end
|
||||
_fini
|
||||
_init
|
||||
amdgpu_bo_alloc
|
||||
amdgpu_bo_cpu_map
|
||||
amdgpu_bo_cpu_unmap
|
||||
amdgpu_bo_export
|
||||
amdgpu_bo_free
|
||||
amdgpu_bo_import
|
||||
amdgpu_bo_list_create
|
||||
amdgpu_bo_list_destroy
|
||||
amdgpu_bo_list_update
|
||||
amdgpu_bo_query_info
|
||||
amdgpu_bo_set_metadata
|
||||
amdgpu_bo_va_op
|
||||
amdgpu_bo_wait_for_idle
|
||||
amdgpu_create_bo_from_user_mem
|
||||
amdgpu_cs_ctx_create
|
||||
amdgpu_cs_ctx_free
|
||||
amdgpu_cs_query_fence_status
|
||||
amdgpu_cs_query_reset_state
|
||||
amdgpu_cs_submit
|
||||
amdgpu_device_deinitialize
|
||||
amdgpu_device_initialize
|
||||
amdgpu_query_buffer_size_alignment
|
||||
amdgpu_query_crtc_from_id
|
||||
amdgpu_query_firmware_version
|
||||
amdgpu_query_gds_info
|
||||
amdgpu_query_gpu_info
|
||||
amdgpu_query_heap_info
|
||||
amdgpu_query_hw_ip_count
|
||||
amdgpu_query_hw_ip_info
|
||||
amdgpu_query_info
|
||||
amdgpu_read_mm_registers
|
||||
amdgpu_va_range_alloc
|
||||
amdgpu_va_range_free
|
||||
amdgpu_va_range_query
|
||||
EOF
|
||||
done)
|
||||
|
||||
test ! -n "$FUNCS" || echo $FUNCS
|
||||
test ! -n "$FUNCS"
|
||||
|
|
@ -1,92 +0,0 @@
|
|||
amdgpu_bo_alloc
|
||||
amdgpu_bo_cpu_map
|
||||
amdgpu_bo_cpu_unmap
|
||||
amdgpu_bo_export
|
||||
amdgpu_bo_free
|
||||
amdgpu_bo_import
|
||||
amdgpu_bo_inc_ref
|
||||
amdgpu_bo_list_create_raw
|
||||
amdgpu_bo_list_destroy_raw
|
||||
amdgpu_bo_list_create
|
||||
amdgpu_bo_list_destroy
|
||||
amdgpu_bo_list_update
|
||||
amdgpu_bo_query_info
|
||||
amdgpu_bo_set_metadata
|
||||
amdgpu_bo_va_op
|
||||
amdgpu_bo_va_op_raw
|
||||
amdgpu_bo_va_op_raw2
|
||||
amdgpu_bo_wait_for_idle
|
||||
amdgpu_create_bo_from_user_mem
|
||||
amdgpu_cs_chunk_fence_info_to_data
|
||||
amdgpu_cs_chunk_fence_to_dep
|
||||
amdgpu_cs_create_semaphore
|
||||
amdgpu_cs_create_syncobj
|
||||
amdgpu_cs_create_syncobj2
|
||||
amdgpu_cs_ctx_create
|
||||
amdgpu_cs_ctx_create2
|
||||
amdgpu_cs_ctx_free
|
||||
amdgpu_cs_ctx_override_priority
|
||||
amdgpu_cs_ctx_stable_pstate
|
||||
amdgpu_cs_destroy_semaphore
|
||||
amdgpu_cs_destroy_syncobj
|
||||
amdgpu_cs_export_syncobj
|
||||
amdgpu_cs_fence_to_handle
|
||||
amdgpu_cs_import_syncobj
|
||||
amdgpu_cs_query_fence_status
|
||||
amdgpu_cs_query_reset_state
|
||||
amdgpu_cs_query_reset_state2
|
||||
amdgpu_query_sw_info
|
||||
amdgpu_cs_signal_semaphore
|
||||
amdgpu_cs_submit
|
||||
amdgpu_cs_submit_raw
|
||||
amdgpu_cs_submit_raw2
|
||||
amdgpu_cs_syncobj_export_sync_file
|
||||
amdgpu_cs_syncobj_export_sync_file2
|
||||
amdgpu_cs_syncobj_import_sync_file
|
||||
amdgpu_cs_syncobj_import_sync_file2
|
||||
amdgpu_cs_syncobj_query
|
||||
amdgpu_cs_syncobj_query2
|
||||
amdgpu_cs_syncobj_reset
|
||||
amdgpu_cs_syncobj_signal
|
||||
amdgpu_cs_syncobj_timeline_signal
|
||||
amdgpu_cs_syncobj_timeline_wait
|
||||
amdgpu_cs_syncobj_transfer
|
||||
amdgpu_cs_syncobj_wait
|
||||
amdgpu_cs_wait_fences
|
||||
amdgpu_cs_wait_semaphore
|
||||
amdgpu_device_deinitialize
|
||||
amdgpu_device_get_fd
|
||||
amdgpu_device_initialize
|
||||
amdgpu_device_initialize2
|
||||
amdgpu_find_bo_by_cpu_mapping
|
||||
amdgpu_get_marketing_name
|
||||
amdgpu_query_buffer_size_alignment
|
||||
amdgpu_query_crtc_from_id
|
||||
amdgpu_query_firmware_version
|
||||
amdgpu_query_gds_info
|
||||
amdgpu_query_gpu_info
|
||||
amdgpu_query_gpuvm_fault_info
|
||||
amdgpu_query_heap_info
|
||||
amdgpu_query_hw_ip_count
|
||||
amdgpu_query_hw_ip_info
|
||||
amdgpu_query_info
|
||||
amdgpu_query_sensor_info
|
||||
amdgpu_query_uq_fw_area_info
|
||||
amdgpu_query_video_caps_info
|
||||
amdgpu_read_mm_registers
|
||||
amdgpu_va_manager_alloc
|
||||
amdgpu_va_manager_init
|
||||
amdgpu_va_manager_init2
|
||||
amdgpu_va_manager_deinit
|
||||
amdgpu_va_manager_query_sw_info
|
||||
amdgpu_va_range_alloc
|
||||
amdgpu_va_range_alloc2
|
||||
amdgpu_va_range_free
|
||||
amdgpu_va_get_start_addr
|
||||
amdgpu_va_range_query
|
||||
amdgpu_vm_reserve_vmid
|
||||
amdgpu_vm_unreserve_vmid
|
||||
amdgpu_create_userqueue
|
||||
amdgpu_free_userqueue
|
||||
amdgpu_userq_signal
|
||||
amdgpu_userq_wait
|
||||
933
amdgpu/amdgpu.h
933
amdgpu/amdgpu.h
File diff suppressed because it is too large
Load diff
|
|
@ -1,345 +0,0 @@
|
|||
/*
|
||||
* Copyright © 2017 Advanced Micro Devices, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
// secure_getenv requires _GNU_SOURCE
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
|
||||
#include <ctype.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include "xf86drm.h"
|
||||
#include "amdgpu_drm.h"
|
||||
#include "amdgpu_internal.h"
|
||||
|
||||
static int parse_one_line(struct amdgpu_device *dev, const char *line)
|
||||
{
|
||||
char *buf, *saveptr;
|
||||
char *s_did;
|
||||
uint32_t did;
|
||||
char *s_rid;
|
||||
uint32_t rid;
|
||||
char *s_name;
|
||||
char *endptr;
|
||||
int r = -EINVAL;
|
||||
|
||||
/* ignore empty line and commented line */
|
||||
if (strlen(line) == 0 || line[0] == '#')
|
||||
return -EAGAIN;
|
||||
|
||||
buf = strdup(line);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
/* device id */
|
||||
s_did = strtok_r(buf, ",", &saveptr);
|
||||
if (!s_did)
|
||||
goto out;
|
||||
|
||||
did = strtol(s_did, &endptr, 16);
|
||||
if (*endptr)
|
||||
goto out;
|
||||
|
||||
if (did != dev->info.asic_id) {
|
||||
r = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* revision id */
|
||||
s_rid = strtok_r(NULL, ",", &saveptr);
|
||||
if (!s_rid)
|
||||
goto out;
|
||||
|
||||
rid = strtol(s_rid, &endptr, 16);
|
||||
if (*endptr)
|
||||
goto out;
|
||||
|
||||
if (rid != dev->info.pci_rev_id) {
|
||||
r = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* marketing name */
|
||||
s_name = strtok_r(NULL, ",", &saveptr);
|
||||
if (!s_name)
|
||||
goto out;
|
||||
|
||||
/* trim leading whitespaces or tabs */
|
||||
while (isblank(*s_name))
|
||||
s_name++;
|
||||
if (strlen(s_name) == 0)
|
||||
goto out;
|
||||
|
||||
dev->marketing_name = strdup(s_name);
|
||||
if (dev->marketing_name)
|
||||
r = 0;
|
||||
else
|
||||
r = -ENOMEM;
|
||||
|
||||
out:
|
||||
free(buf);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static void amdgpu_parse_proc_cpuinfo(struct amdgpu_device *dev)
|
||||
{
|
||||
const char *search_key = "model name";
|
||||
const char *radeon_key = "Radeon";
|
||||
char *line = NULL;
|
||||
size_t len = 0;
|
||||
FILE *fp;
|
||||
|
||||
fp = fopen("/proc/cpuinfo", "r");
|
||||
if (fp == NULL) {
|
||||
fprintf(stderr, "%s\n", strerror(errno));
|
||||
return;
|
||||
}
|
||||
|
||||
while (getline(&line, &len, fp) != -1) {
|
||||
char *saveptr;
|
||||
char *value;
|
||||
|
||||
if (strncmp(line, search_key, strlen(search_key)))
|
||||
continue;
|
||||
|
||||
/* check for parts that have both CPU and GPU information */
|
||||
value = strstr(line, radeon_key);
|
||||
|
||||
/* get content after the first colon */
|
||||
if (value == NULL) {
|
||||
value = strstr(line, ":");
|
||||
if (value == NULL)
|
||||
continue;
|
||||
value++;
|
||||
}
|
||||
|
||||
/* strip whitespace */
|
||||
while (*value == ' ' || *value == '\t')
|
||||
value++;
|
||||
saveptr = strchr(value, '\n');
|
||||
if (saveptr)
|
||||
*saveptr = '\0';
|
||||
|
||||
/* Add AMD to the new string if it's missing from slicing/dicing */
|
||||
if (strncmp(value, "AMD", 3) != 0) {
|
||||
char *tmp = malloc(strlen(value) + 5);
|
||||
|
||||
if (!tmp)
|
||||
break;
|
||||
sprintf(tmp, "AMD %s", value);
|
||||
dev->marketing_name = tmp;
|
||||
} else
|
||||
dev->marketing_name = strdup(value);
|
||||
break;
|
||||
}
|
||||
|
||||
free(line);
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
#if HAVE_SECURE_GETENV
|
||||
static char *join_path(const char *dir, const char *file) {
|
||||
size_t dir_len = strlen(dir);
|
||||
size_t file_len = strlen(file);
|
||||
char *full_path = NULL;
|
||||
|
||||
int need_slash = ((dir_len > 0) && (dir[dir_len - 1] != '/'));
|
||||
size_t total_len = dir_len + (need_slash ? 1 : 0) + file_len + 1; // +1 for null terminator
|
||||
|
||||
if (dir_len == 0) {
|
||||
return strdup(file);
|
||||
}
|
||||
|
||||
full_path = malloc(total_len);
|
||||
if (!full_path) {
|
||||
return NULL; // Memory allocation failed
|
||||
}
|
||||
|
||||
strcpy(full_path, dir);
|
||||
if (need_slash) {
|
||||
full_path[dir_len] = '/';
|
||||
dir_len++;
|
||||
}
|
||||
strcpy(full_path + dir_len, file);
|
||||
|
||||
return full_path;
|
||||
}
|
||||
|
||||
static char **split_env_var(const char *env_var_content)
|
||||
{
|
||||
char **ret = NULL;
|
||||
char *dup_env_val;
|
||||
int elements = 1;
|
||||
int index = 1;
|
||||
|
||||
if (!env_var_content || env_var_content[0] == '\0')
|
||||
return NULL;
|
||||
|
||||
for(char *p = (char *)env_var_content; *p; p++) {
|
||||
if (*p == ':')
|
||||
elements++;
|
||||
}
|
||||
|
||||
dup_env_val = strdup(env_var_content);
|
||||
if (!dup_env_val) {
|
||||
return NULL;
|
||||
}
|
||||
ret = malloc(sizeof(char *) * (elements + 1));
|
||||
ret[0] = dup_env_val;
|
||||
for(char *p = (char *)dup_env_val; *p; p++) {
|
||||
if (*p == ':') {
|
||||
*p = 0;
|
||||
ret[index++] = p + 1;
|
||||
}
|
||||
}
|
||||
ret[index] = NULL; // ensure that the last element in the array is NULL
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void split_env_var_free(char **split_var)
|
||||
{
|
||||
if (split_var) {
|
||||
// remember that the first element also points to the whole duplicated string,
|
||||
// which was modified in place by replacing ':' with '\0' characters
|
||||
free(split_var[0]);
|
||||
free(split_var);
|
||||
}
|
||||
}
|
||||
|
||||
static char *find_asic_id_table(void)
|
||||
{
|
||||
// first check the paths in AMDGPU_ASIC_ID_TABLE_PATHS environment variable
|
||||
const char *amdgpu_asic_id_table_paths = secure_getenv("AMDGPU_ASIC_ID_TABLE_PATHS");
|
||||
const char *file_name = NULL;
|
||||
char *found_path = NULL;
|
||||
char **paths = NULL;
|
||||
|
||||
if (!amdgpu_asic_id_table_paths)
|
||||
return NULL;
|
||||
|
||||
// extract the file name from AMDGPU_ASIC_ID_TABLE
|
||||
file_name = strrchr(AMDGPU_ASIC_ID_TABLE, '/');
|
||||
if (!file_name)
|
||||
return NULL;
|
||||
file_name++; // skip the '/'
|
||||
|
||||
paths = split_env_var(amdgpu_asic_id_table_paths);
|
||||
if (!paths)
|
||||
return NULL;
|
||||
|
||||
// for each path, join with file_name and check if it exists
|
||||
for (int i = 0; paths[i] != NULL; i++) {
|
||||
char *full_path = join_path(paths[i], file_name);
|
||||
if (!full_path) {
|
||||
continue;
|
||||
}
|
||||
if (access(full_path, R_OK) == 0) {
|
||||
found_path = full_path;
|
||||
break;
|
||||
}
|
||||
}
|
||||
split_env_var_free(paths);
|
||||
return found_path;
|
||||
}
|
||||
#endif
|
||||
|
||||
void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
|
||||
{
|
||||
FILE *fp;
|
||||
char *line = NULL;
|
||||
size_t len = 0;
|
||||
ssize_t n;
|
||||
int line_num = 1;
|
||||
int r = 0;
|
||||
|
||||
char *amdgpu_asic_id_table_path = NULL;
|
||||
#if HAVE_SECURE_GETENV
|
||||
// if this system lacks secure_getenv(), don't allow extra paths
|
||||
// for security reasons.
|
||||
amdgpu_asic_id_table_path = find_asic_id_table();
|
||||
#endif
|
||||
// if not found, use the default AMDGPU_ASIC_ID_TABLE path
|
||||
if (!amdgpu_asic_id_table_path)
|
||||
amdgpu_asic_id_table_path = strdup(AMDGPU_ASIC_ID_TABLE);
|
||||
|
||||
fp = fopen(amdgpu_asic_id_table_path, "r");
|
||||
if (!fp) {
|
||||
fprintf(stderr, "%s: %s\n", amdgpu_asic_id_table_path,
|
||||
strerror(errno));
|
||||
goto get_cpu;
|
||||
}
|
||||
|
||||
/* 1st valid line is file version */
|
||||
while ((n = getline(&line, &len, fp)) != -1) {
|
||||
/* trim trailing newline */
|
||||
if (line[n - 1] == '\n')
|
||||
line[n - 1] = '\0';
|
||||
|
||||
/* ignore empty line and commented line */
|
||||
if (strlen(line) == 0 || line[0] == '#') {
|
||||
line_num++;
|
||||
continue;
|
||||
}
|
||||
|
||||
drmMsg("%s version: %s\n", amdgpu_asic_id_table_path, line);
|
||||
break;
|
||||
}
|
||||
|
||||
while ((n = getline(&line, &len, fp)) != -1) {
|
||||
/* trim trailing newline */
|
||||
if (line[n - 1] == '\n')
|
||||
line[n - 1] = '\0';
|
||||
|
||||
r = parse_one_line(dev, line);
|
||||
if (r != -EAGAIN)
|
||||
break;
|
||||
|
||||
line_num++;
|
||||
}
|
||||
|
||||
if (r == -EINVAL) {
|
||||
fprintf(stderr, "Invalid format: %s: line %d: %s\n",
|
||||
amdgpu_asic_id_table_path, line_num, line);
|
||||
} else if (r && r != -EAGAIN) {
|
||||
fprintf(stderr, "%s: Cannot parse ASIC IDs: %s\n",
|
||||
__func__, strerror(-r));
|
||||
}
|
||||
|
||||
free(line);
|
||||
fclose(fp);
|
||||
|
||||
get_cpu:
|
||||
free(amdgpu_asic_id_table_path);
|
||||
if (dev->info.ids_flags & AMDGPU_IDS_FLAGS_FUSION &&
|
||||
dev->marketing_name == NULL) {
|
||||
amdgpu_parse_proc_cpuinfo(dev);
|
||||
}
|
||||
}
|
||||
|
|
@ -22,6 +22,10 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
|
|
@ -37,80 +41,91 @@
|
|||
#include "xf86drm.h"
|
||||
#include "amdgpu_drm.h"
|
||||
#include "amdgpu_internal.h"
|
||||
#include "util_hash_table.h"
|
||||
#include "util_math.h"
|
||||
|
||||
static int amdgpu_bo_create(amdgpu_device_handle dev,
|
||||
uint64_t size,
|
||||
uint32_t handle,
|
||||
amdgpu_bo_handle *buf_handle)
|
||||
static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
|
||||
uint32_t handle)
|
||||
{
|
||||
struct drm_gem_close args = {};
|
||||
|
||||
args.handle = handle;
|
||||
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
|
||||
}
|
||||
|
||||
drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
|
||||
{
|
||||
/* Remove the buffer from the hash tables. */
|
||||
pthread_mutex_lock(&bo->dev->bo_table_mutex);
|
||||
util_hash_table_remove(bo->dev->bo_handles,
|
||||
(void*)(uintptr_t)bo->handle);
|
||||
if (bo->flink_name) {
|
||||
util_hash_table_remove(bo->dev->bo_flink_names,
|
||||
(void*)(uintptr_t)bo->flink_name);
|
||||
}
|
||||
pthread_mutex_unlock(&bo->dev->bo_table_mutex);
|
||||
|
||||
/* Release CPU access. */
|
||||
if (bo->cpu_map_count > 0) {
|
||||
bo->cpu_map_count = 1;
|
||||
amdgpu_bo_cpu_unmap(bo);
|
||||
}
|
||||
|
||||
amdgpu_close_kms_handle(bo->dev, bo->handle);
|
||||
pthread_mutex_destroy(&bo->cpu_access_mutex);
|
||||
free(bo);
|
||||
}
|
||||
|
||||
int amdgpu_bo_alloc(amdgpu_device_handle dev,
|
||||
struct amdgpu_bo_alloc_request *alloc_buffer,
|
||||
amdgpu_bo_handle *buf_handle)
|
||||
{
|
||||
struct amdgpu_bo *bo;
|
||||
int r;
|
||||
union drm_amdgpu_gem_create args;
|
||||
unsigned heap = alloc_buffer->preferred_heap;
|
||||
int r = 0;
|
||||
|
||||
/* It's an error if the heap is not specified */
|
||||
if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
|
||||
return -EINVAL;
|
||||
|
||||
bo = calloc(1, sizeof(struct amdgpu_bo));
|
||||
if (!bo)
|
||||
return -ENOMEM;
|
||||
|
||||
r = handle_table_insert(&dev->bo_handles, handle, bo);
|
||||
if (r) {
|
||||
free(bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
atomic_set(&bo->refcount, 1);
|
||||
bo->dev = dev;
|
||||
bo->alloc_size = size;
|
||||
bo->handle = handle;
|
||||
pthread_mutex_init(&bo->cpu_access_mutex, NULL);
|
||||
|
||||
*buf_handle = bo;
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_bo_alloc(amdgpu_device_handle dev,
|
||||
struct amdgpu_bo_alloc_request *alloc_buffer,
|
||||
amdgpu_bo_handle *buf_handle)
|
||||
{
|
||||
union drm_amdgpu_gem_create args;
|
||||
int r;
|
||||
|
||||
if (!alloc_buffer || !buf_handle)
|
||||
return -EINVAL;
|
||||
bo->alloc_size = alloc_buffer->alloc_size;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.in.bo_size = alloc_buffer->alloc_size;
|
||||
args.in.alignment = alloc_buffer->phys_alignment;
|
||||
|
||||
/* Set the placement. */
|
||||
args.in.domains = alloc_buffer->preferred_heap;
|
||||
args.in.domains = heap;
|
||||
args.in.domain_flags = alloc_buffer->flags;
|
||||
|
||||
/* Allocate the buffer with the preferred heap. */
|
||||
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
|
||||
&args, sizeof(args));
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
pthread_mutex_lock(&dev->bo_table_mutex);
|
||||
r = amdgpu_bo_create(dev, alloc_buffer->alloc_size, args.out.handle,
|
||||
buf_handle);
|
||||
pthread_mutex_unlock(&dev->bo_table_mutex);
|
||||
if (r) {
|
||||
drmCloseBufferHandle(dev->fd, args.out.handle);
|
||||
free(bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
out:
|
||||
return r;
|
||||
bo->handle = args.out.handle;
|
||||
|
||||
pthread_mutex_init(&bo->cpu_access_mutex, NULL);
|
||||
|
||||
*buf_handle = bo;
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
|
||||
struct amdgpu_bo_metadata *info)
|
||||
int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
|
||||
struct amdgpu_bo_metadata *info)
|
||||
{
|
||||
struct drm_amdgpu_gem_metadata args = {};
|
||||
|
||||
if (!info)
|
||||
return -EINVAL;
|
||||
|
||||
args.handle = bo->handle;
|
||||
args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
|
||||
args.data.flags = info->flags;
|
||||
|
|
@ -129,8 +144,8 @@ drm_public int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
|
|||
&args, sizeof(args));
|
||||
}
|
||||
|
||||
drm_public int amdgpu_bo_query_info(amdgpu_bo_handle bo,
|
||||
struct amdgpu_bo_info *info)
|
||||
int amdgpu_bo_query_info(amdgpu_bo_handle bo,
|
||||
struct amdgpu_bo_info *info)
|
||||
{
|
||||
struct drm_amdgpu_gem_metadata metadata = {};
|
||||
struct drm_amdgpu_gem_create_in bo_info = {};
|
||||
|
|
@ -138,7 +153,7 @@ drm_public int amdgpu_bo_query_info(amdgpu_bo_handle bo,
|
|||
int r;
|
||||
|
||||
/* Validate the BO passed in */
|
||||
if (!bo->handle || !info)
|
||||
if (!bo->handle)
|
||||
return -EINVAL;
|
||||
|
||||
/* Query metadata. */
|
||||
|
|
@ -180,6 +195,14 @@ drm_public int amdgpu_bo_query_info(amdgpu_bo_handle bo,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
|
||||
{
|
||||
pthread_mutex_lock(&bo->dev->bo_table_mutex);
|
||||
util_hash_table_set(bo->dev->bo_handles,
|
||||
(void*)(uintptr_t)bo->handle, bo);
|
||||
pthread_mutex_unlock(&bo->dev->bo_table_mutex);
|
||||
}
|
||||
|
||||
static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
|
||||
{
|
||||
struct drm_gem_flink flink;
|
||||
|
|
@ -213,19 +236,24 @@ static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
|
|||
|
||||
bo->flink_name = flink.name;
|
||||
|
||||
if (bo->dev->flink_fd != bo->dev->fd)
|
||||
drmCloseBufferHandle(bo->dev->flink_fd, handle);
|
||||
if (bo->dev->flink_fd != bo->dev->fd) {
|
||||
struct drm_gem_close args = {};
|
||||
args.handle = handle;
|
||||
drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&bo->dev->bo_table_mutex);
|
||||
r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
|
||||
util_hash_table_set(bo->dev->bo_flink_names,
|
||||
(void*)(uintptr_t)bo->flink_name,
|
||||
bo);
|
||||
pthread_mutex_unlock(&bo->dev->bo_table_mutex);
|
||||
|
||||
return r;
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_bo_export(amdgpu_bo_handle bo,
|
||||
enum amdgpu_bo_handle_type type,
|
||||
uint32_t *shared_handle)
|
||||
int amdgpu_bo_export(amdgpu_bo_handle bo,
|
||||
enum amdgpu_bo_handle_type type,
|
||||
uint32_t *shared_handle)
|
||||
{
|
||||
int r;
|
||||
|
||||
|
|
@ -239,49 +267,45 @@ drm_public int amdgpu_bo_export(amdgpu_bo_handle bo,
|
|||
return 0;
|
||||
|
||||
case amdgpu_bo_handle_type_kms:
|
||||
case amdgpu_bo_handle_type_kms_noimport:
|
||||
amdgpu_add_handle_to_table(bo);
|
||||
*shared_handle = bo->handle;
|
||||
return 0;
|
||||
|
||||
case amdgpu_bo_handle_type_dma_buf_fd:
|
||||
return drmPrimeHandleToFD(bo->dev->fd, bo->handle,
|
||||
DRM_CLOEXEC | DRM_RDWR,
|
||||
(int*)shared_handle);
|
||||
amdgpu_add_handle_to_table(bo);
|
||||
return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
|
||||
(int*)shared_handle);
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
|
||||
enum amdgpu_bo_handle_type type,
|
||||
uint32_t shared_handle,
|
||||
int amdgpu_bo_import(amdgpu_device_handle dev,
|
||||
enum amdgpu_bo_handle_type type,
|
||||
uint32_t shared_handle,
|
||||
struct amdgpu_bo_import_result *output)
|
||||
{
|
||||
struct drm_gem_open open_arg = {};
|
||||
struct amdgpu_bo *bo = NULL;
|
||||
uint32_t handle = 0, flink_name = 0;
|
||||
uint64_t alloc_size = 0;
|
||||
int r = 0;
|
||||
int r;
|
||||
int dma_fd;
|
||||
uint64_t dma_buf_size = 0;
|
||||
|
||||
/* We must maintain a list of pairs <handle, bo>, so that we always
|
||||
* return the same amdgpu_bo instance for the same handle. */
|
||||
pthread_mutex_lock(&dev->bo_table_mutex);
|
||||
|
||||
/* Convert a DMA buf handle to a KMS handle now. */
|
||||
if (type == amdgpu_bo_handle_type_dma_buf_fd) {
|
||||
uint32_t handle;
|
||||
off_t size;
|
||||
|
||||
/* Get a KMS handle. */
|
||||
r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
|
||||
if (r)
|
||||
goto unlock;
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Query the buffer size. */
|
||||
size = lseek(shared_handle, 0, SEEK_END);
|
||||
if (size == (off_t)-1) {
|
||||
r = -errno;
|
||||
goto free_bo_handle;
|
||||
amdgpu_close_kms_handle(dev, handle);
|
||||
return -errno;
|
||||
}
|
||||
lseek(shared_handle, 0, SEEK_SET);
|
||||
|
||||
|
|
@ -289,146 +313,117 @@ drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
|
|||
shared_handle = handle;
|
||||
}
|
||||
|
||||
/* We must maintain a list of pairs <handle, bo>, so that we always
|
||||
* return the same amdgpu_bo instance for the same handle. */
|
||||
pthread_mutex_lock(&dev->bo_table_mutex);
|
||||
|
||||
/* If we have already created a buffer with this handle, find it. */
|
||||
switch (type) {
|
||||
case amdgpu_bo_handle_type_gem_flink_name:
|
||||
bo = handle_table_lookup(&dev->bo_flink_names, shared_handle);
|
||||
bo = util_hash_table_get(dev->bo_flink_names,
|
||||
(void*)(uintptr_t)shared_handle);
|
||||
break;
|
||||
|
||||
case amdgpu_bo_handle_type_dma_buf_fd:
|
||||
bo = handle_table_lookup(&dev->bo_handles, shared_handle);
|
||||
bo = util_hash_table_get(dev->bo_handles,
|
||||
(void*)(uintptr_t)shared_handle);
|
||||
break;
|
||||
|
||||
case amdgpu_bo_handle_type_kms:
|
||||
case amdgpu_bo_handle_type_kms_noimport:
|
||||
/* Importing a KMS handle in not allowed. */
|
||||
r = -EPERM;
|
||||
goto unlock;
|
||||
pthread_mutex_unlock(&dev->bo_table_mutex);
|
||||
return -EPERM;
|
||||
|
||||
default:
|
||||
r = -EINVAL;
|
||||
goto unlock;
|
||||
pthread_mutex_unlock(&dev->bo_table_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (bo) {
|
||||
pthread_mutex_unlock(&dev->bo_table_mutex);
|
||||
|
||||
/* The buffer already exists, just bump the refcount. */
|
||||
atomic_inc(&bo->refcount);
|
||||
pthread_mutex_unlock(&dev->bo_table_mutex);
|
||||
|
||||
output->buf_handle = bo;
|
||||
output->alloc_size = bo->alloc_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bo = calloc(1, sizeof(struct amdgpu_bo));
|
||||
if (!bo) {
|
||||
pthread_mutex_unlock(&dev->bo_table_mutex);
|
||||
if (type == amdgpu_bo_handle_type_dma_buf_fd) {
|
||||
amdgpu_close_kms_handle(dev, shared_handle);
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Open the handle. */
|
||||
switch (type) {
|
||||
case amdgpu_bo_handle_type_gem_flink_name:
|
||||
open_arg.name = shared_handle;
|
||||
r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
|
||||
if (r)
|
||||
goto unlock;
|
||||
|
||||
flink_name = shared_handle;
|
||||
handle = open_arg.handle;
|
||||
alloc_size = open_arg.size;
|
||||
if (dev->flink_fd != dev->fd) {
|
||||
r = drmPrimeHandleToFD(dev->flink_fd, handle,
|
||||
DRM_CLOEXEC, &dma_fd);
|
||||
if (r)
|
||||
goto free_bo_handle;
|
||||
r = drmPrimeFDToHandle(dev->fd, dma_fd, &handle);
|
||||
close(dma_fd);
|
||||
if (r)
|
||||
goto free_bo_handle;
|
||||
r = drmCloseBufferHandle(dev->flink_fd,
|
||||
open_arg.handle);
|
||||
if (r)
|
||||
goto free_bo_handle;
|
||||
if (r) {
|
||||
free(bo);
|
||||
pthread_mutex_unlock(&dev->bo_table_mutex);
|
||||
return r;
|
||||
}
|
||||
open_arg.handle = 0;
|
||||
|
||||
bo->handle = open_arg.handle;
|
||||
if (dev->flink_fd != dev->fd) {
|
||||
r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
|
||||
if (r) {
|
||||
free(bo);
|
||||
pthread_mutex_unlock(&dev->bo_table_mutex);
|
||||
return r;
|
||||
}
|
||||
r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
|
||||
|
||||
close(dma_fd);
|
||||
|
||||
if (r) {
|
||||
free(bo);
|
||||
pthread_mutex_unlock(&dev->bo_table_mutex);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
bo->flink_name = shared_handle;
|
||||
bo->alloc_size = open_arg.size;
|
||||
util_hash_table_set(dev->bo_flink_names,
|
||||
(void*)(uintptr_t)bo->flink_name, bo);
|
||||
break;
|
||||
|
||||
case amdgpu_bo_handle_type_dma_buf_fd:
|
||||
handle = shared_handle;
|
||||
alloc_size = dma_buf_size;
|
||||
bo->handle = shared_handle;
|
||||
bo->alloc_size = dma_buf_size;
|
||||
break;
|
||||
|
||||
case amdgpu_bo_handle_type_kms:
|
||||
case amdgpu_bo_handle_type_kms_noimport:
|
||||
assert(0); /* unreachable */
|
||||
}
|
||||
|
||||
/* Initialize it. */
|
||||
r = amdgpu_bo_create(dev, alloc_size, handle, &bo);
|
||||
if (r)
|
||||
goto free_bo_handle;
|
||||
atomic_set(&bo->refcount, 1);
|
||||
bo->dev = dev;
|
||||
pthread_mutex_init(&bo->cpu_access_mutex, NULL);
|
||||
|
||||
if (flink_name) {
|
||||
bo->flink_name = flink_name;
|
||||
r = handle_table_insert(&dev->bo_flink_names, flink_name,
|
||||
bo);
|
||||
if (r)
|
||||
goto free_bo_handle;
|
||||
|
||||
}
|
||||
util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
|
||||
pthread_mutex_unlock(&dev->bo_table_mutex);
|
||||
|
||||
output->buf_handle = bo;
|
||||
output->alloc_size = bo->alloc_size;
|
||||
pthread_mutex_unlock(&dev->bo_table_mutex);
|
||||
return 0;
|
||||
|
||||
free_bo_handle:
|
||||
if (flink_name && open_arg.handle)
|
||||
drmCloseBufferHandle(dev->flink_fd, open_arg.handle);
|
||||
|
||||
if (bo)
|
||||
amdgpu_bo_free(bo);
|
||||
else
|
||||
drmCloseBufferHandle(dev->fd, handle);
|
||||
unlock:
|
||||
pthread_mutex_unlock(&dev->bo_table_mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
|
||||
{
|
||||
struct amdgpu_device *dev;
|
||||
struct amdgpu_bo *bo = buf_handle;
|
||||
|
||||
assert(bo != NULL);
|
||||
dev = bo->dev;
|
||||
pthread_mutex_lock(&dev->bo_table_mutex);
|
||||
|
||||
if (update_references(&bo->refcount, NULL)) {
|
||||
/* Remove the buffer from the hash tables. */
|
||||
handle_table_remove(&dev->bo_handles, bo->handle);
|
||||
|
||||
if (bo->flink_name)
|
||||
handle_table_remove(&dev->bo_flink_names,
|
||||
bo->flink_name);
|
||||
|
||||
/* Release CPU access. */
|
||||
if (bo->cpu_map_count > 0) {
|
||||
bo->cpu_map_count = 1;
|
||||
amdgpu_bo_cpu_unmap(bo);
|
||||
}
|
||||
|
||||
drmCloseBufferHandle(dev->fd, bo->handle);
|
||||
pthread_mutex_destroy(&bo->cpu_access_mutex);
|
||||
free(bo);
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&dev->bo_table_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_public void amdgpu_bo_inc_ref(amdgpu_bo_handle bo)
|
||||
int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
|
||||
{
|
||||
atomic_inc(&bo->refcount);
|
||||
/* Just drop the reference. */
|
||||
amdgpu_bo_reference(&buf_handle, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
|
||||
int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
|
||||
{
|
||||
union drm_amdgpu_gem_mmap args;
|
||||
void *ptr;
|
||||
|
|
@ -476,7 +471,7 @@ drm_public int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
|
||||
int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
|
||||
{
|
||||
int r;
|
||||
|
||||
|
|
@ -486,7 +481,7 @@ drm_public int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
|
|||
if (bo->cpu_map_count == 0) {
|
||||
/* not mapped */
|
||||
pthread_mutex_unlock(&bo->cpu_access_mutex);
|
||||
return -EINVAL;
|
||||
return -EBADMSG;
|
||||
}
|
||||
|
||||
bo->cpu_map_count--;
|
||||
|
|
@ -502,7 +497,7 @@ drm_public int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
|
|||
return r;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
|
||||
int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
|
||||
struct amdgpu_buffer_size_alignments *info)
|
||||
{
|
||||
info->size_local = dev->dev_info.pte_fragment_size;
|
||||
|
|
@ -510,8 +505,8 @@ drm_public int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
|
||||
uint64_t timeout_ns,
|
||||
int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
|
||||
uint64_t timeout_ns,
|
||||
bool *busy)
|
||||
{
|
||||
union drm_amdgpu_gem_wait_idle args;
|
||||
|
|
@ -533,122 +528,58 @@ drm_public int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
|
|||
}
|
||||
}
|
||||
|
||||
drm_public int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
|
||||
void *cpu,
|
||||
uint64_t size,
|
||||
amdgpu_bo_handle *buf_handle,
|
||||
uint64_t *offset_in_bo)
|
||||
{
|
||||
struct amdgpu_bo *bo = NULL;
|
||||
uint32_t i;
|
||||
int r = 0;
|
||||
|
||||
if (cpu == NULL || size == 0)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Workaround for a buggy application which tries to import previously
|
||||
* exposed CPU pointers. If we find a real world use case we should
|
||||
* improve that by asking the kernel for the right handle.
|
||||
*/
|
||||
pthread_mutex_lock(&dev->bo_table_mutex);
|
||||
for (i = 0; i < dev->bo_handles.max_key; i++) {
|
||||
bo = handle_table_lookup(&dev->bo_handles, i);
|
||||
if (!bo || !bo->cpu_ptr || size > bo->alloc_size)
|
||||
continue;
|
||||
if (cpu >= bo->cpu_ptr &&
|
||||
cpu < (void*)((uintptr_t)bo->cpu_ptr + (size_t)bo->alloc_size))
|
||||
break;
|
||||
}
|
||||
|
||||
if (i < dev->bo_handles.max_key) {
|
||||
atomic_inc(&bo->refcount);
|
||||
*buf_handle = bo;
|
||||
*offset_in_bo = (uintptr_t)cpu - (uintptr_t)bo->cpu_ptr;
|
||||
} else {
|
||||
*buf_handle = NULL;
|
||||
*offset_in_bo = 0;
|
||||
r = -ENXIO;
|
||||
}
|
||||
pthread_mutex_unlock(&dev->bo_table_mutex);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
|
||||
void *cpu,
|
||||
uint64_t size,
|
||||
amdgpu_bo_handle *buf_handle)
|
||||
int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
|
||||
void *cpu,
|
||||
uint64_t size,
|
||||
amdgpu_bo_handle *buf_handle)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_bo *bo;
|
||||
struct drm_amdgpu_gem_userptr args;
|
||||
uintptr_t cpu0;
|
||||
uint32_t ps, off;
|
||||
|
||||
args.addr = (uintptr_t)cpu;
|
||||
args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
|
||||
AMDGPU_GEM_USERPTR_VALIDATE;
|
||||
memset(&args, 0, sizeof(args));
|
||||
ps = getpagesize();
|
||||
|
||||
cpu0 = ROUND_DOWN((uintptr_t)cpu, ps);
|
||||
off = (uintptr_t)cpu - cpu0;
|
||||
size = ROUND_UP(size + off, ps);
|
||||
|
||||
args.addr = cpu0;
|
||||
args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER;
|
||||
args.size = size;
|
||||
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
|
||||
&args, sizeof(args));
|
||||
if (r)
|
||||
goto out;
|
||||
return r;
|
||||
|
||||
pthread_mutex_lock(&dev->bo_table_mutex);
|
||||
r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
|
||||
pthread_mutex_unlock(&dev->bo_table_mutex);
|
||||
if (r) {
|
||||
drmCloseBufferHandle(dev->fd, args.handle);
|
||||
}
|
||||
bo = calloc(1, sizeof(struct amdgpu_bo));
|
||||
if (!bo)
|
||||
return -ENOMEM;
|
||||
|
||||
atomic_set(&bo->refcount, 1);
|
||||
bo->dev = dev;
|
||||
bo->alloc_size = size;
|
||||
bo->handle = args.handle;
|
||||
|
||||
*buf_handle = bo;
|
||||
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
|
||||
uint32_t number_of_buffers,
|
||||
struct drm_amdgpu_bo_list_entry *buffers,
|
||||
uint32_t *result)
|
||||
{
|
||||
union drm_amdgpu_bo_list args;
|
||||
int r;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
|
||||
args.in.bo_number = number_of_buffers;
|
||||
args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
|
||||
args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
|
||||
|
||||
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
|
||||
&args, sizeof(args));
|
||||
if (!r)
|
||||
*result = args.out.list_handle;
|
||||
return r;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
|
||||
uint32_t bo_list)
|
||||
{
|
||||
union drm_amdgpu_bo_list args;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
|
||||
args.in.list_handle = bo_list;
|
||||
|
||||
return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
|
||||
&args, sizeof(args));
|
||||
}
|
||||
|
||||
drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
|
||||
uint32_t number_of_resources,
|
||||
amdgpu_bo_handle *resources,
|
||||
uint8_t *resource_prios,
|
||||
amdgpu_bo_list_handle *result)
|
||||
int amdgpu_bo_list_create(amdgpu_device_handle dev,
|
||||
uint32_t number_of_resources,
|
||||
amdgpu_bo_handle *resources,
|
||||
uint8_t *resource_prios,
|
||||
amdgpu_bo_list_handle *result)
|
||||
{
|
||||
struct drm_amdgpu_bo_list_entry *list;
|
||||
union drm_amdgpu_bo_list args;
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
if (!number_of_resources || !resources)
|
||||
if (!number_of_resources)
|
||||
return -EINVAL;
|
||||
|
||||
/* overflow check for multiplication */
|
||||
|
|
@ -659,12 +590,6 @@ drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
|
|||
if (!list)
|
||||
return -ENOMEM;
|
||||
|
||||
*result = malloc(sizeof(struct amdgpu_bo_list));
|
||||
if (!*result) {
|
||||
free(list);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
|
||||
args.in.bo_number = number_of_resources;
|
||||
|
|
@ -682,17 +607,16 @@ drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
|
|||
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
|
||||
&args, sizeof(args));
|
||||
free(list);
|
||||
if (r) {
|
||||
free(*result);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
*result = malloc(sizeof(struct amdgpu_bo_list));
|
||||
(*result)->dev = dev;
|
||||
(*result)->handle = args.out.list_handle;
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
|
||||
int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
|
||||
{
|
||||
union drm_amdgpu_bo_list args;
|
||||
int r;
|
||||
|
|
@ -710,10 +634,10 @@ drm_public int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
|
|||
return r;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
|
||||
uint32_t number_of_resources,
|
||||
amdgpu_bo_handle *resources,
|
||||
uint8_t *resource_prios)
|
||||
int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
|
||||
uint32_t number_of_resources,
|
||||
amdgpu_bo_handle *resources,
|
||||
uint8_t *resource_prios)
|
||||
{
|
||||
struct drm_amdgpu_bo_list_entry *list;
|
||||
union drm_amdgpu_bo_list args;
|
||||
|
|
@ -728,7 +652,7 @@ drm_public int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
|
|||
return -EINVAL;
|
||||
|
||||
list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
|
||||
if (!list)
|
||||
if (list == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
|
||||
|
|
@ -751,81 +675,29 @@ drm_public int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
|
|||
return r;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_bo_va_op(amdgpu_bo_handle bo,
|
||||
uint64_t offset,
|
||||
uint64_t size,
|
||||
uint64_t addr,
|
||||
uint64_t flags,
|
||||
uint32_t ops)
|
||||
int amdgpu_bo_va_op(amdgpu_bo_handle bo,
|
||||
uint64_t offset,
|
||||
uint64_t size,
|
||||
uint64_t addr,
|
||||
uint64_t flags,
|
||||
uint32_t ops)
|
||||
{
|
||||
amdgpu_device_handle dev = bo->dev;
|
||||
|
||||
size = ALIGN(size, getpagesize());
|
||||
|
||||
return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr,
|
||||
AMDGPU_VM_PAGE_READABLE |
|
||||
AMDGPU_VM_PAGE_WRITEABLE |
|
||||
AMDGPU_VM_PAGE_EXECUTABLE, ops);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
|
||||
amdgpu_bo_handle bo,
|
||||
uint64_t offset,
|
||||
uint64_t size,
|
||||
uint64_t addr,
|
||||
uint64_t flags,
|
||||
uint32_t ops)
|
||||
{
|
||||
struct drm_amdgpu_gem_va va;
|
||||
int r;
|
||||
|
||||
if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
|
||||
ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
|
||||
if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&va, 0, sizeof(va));
|
||||
va.handle = bo ? bo->handle : 0;
|
||||
va.handle = bo->handle;
|
||||
va.operation = ops;
|
||||
va.flags = flags;
|
||||
va.flags = AMDGPU_VM_PAGE_READABLE |
|
||||
AMDGPU_VM_PAGE_WRITEABLE |
|
||||
AMDGPU_VM_PAGE_EXECUTABLE;
|
||||
va.va_address = addr;
|
||||
va.offset_in_bo = offset;
|
||||
va.map_size = size;
|
||||
|
||||
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_bo_va_op_raw2(amdgpu_device_handle dev,
|
||||
amdgpu_bo_handle bo,
|
||||
uint64_t offset,
|
||||
uint64_t size,
|
||||
uint64_t addr,
|
||||
uint64_t flags,
|
||||
uint32_t ops,
|
||||
uint32_t vm_timeline_syncobj_out,
|
||||
uint64_t vm_timeline_point,
|
||||
uint64_t input_fence_syncobj_handles,
|
||||
uint32_t num_syncobj_handles)
|
||||
{
|
||||
struct drm_amdgpu_gem_va va;
|
||||
int r;
|
||||
|
||||
if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
|
||||
ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&va, 0, sizeof(va));
|
||||
va.handle = bo ? bo->handle : 0;
|
||||
va.operation = ops;
|
||||
va.flags = flags;
|
||||
va.va_address = addr;
|
||||
va.offset_in_bo = offset;
|
||||
va.map_size = size;
|
||||
va.vm_timeline_syncobj_out = vm_timeline_syncobj_out;
|
||||
va.vm_timeline_point = vm_timeline_point;
|
||||
va.input_fence_syncobj_handles = input_fence_syncobj_handles;
|
||||
va.num_syncobj_handles = num_syncobj_handles;
|
||||
va.map_size = ALIGN(size, getpagesize());
|
||||
|
||||
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
|
||||
|
||||
|
|
|
|||
|
|
@ -21,6 +21,10 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
|
@ -28,52 +32,33 @@
|
|||
#include <pthread.h>
|
||||
#include <sched.h>
|
||||
#include <sys/ioctl.h>
|
||||
#if HAVE_ALLOCA_H
|
||||
# include <alloca.h>
|
||||
#endif
|
||||
|
||||
#include "xf86drm.h"
|
||||
#include "amdgpu_drm.h"
|
||||
#include "amdgpu_internal.h"
|
||||
|
||||
static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem);
|
||||
static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem);
|
||||
|
||||
/**
|
||||
* Create command submission context
|
||||
*
|
||||
* \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
|
||||
* \param priority - \c [in] Context creation flags. See AMDGPU_CTX_PRIORITY_*
|
||||
* \param context - \c [out] GPU Context handle
|
||||
* \param dev - \c [in] amdgpu device handle
|
||||
* \param context - \c [out] amdgpu context handle
|
||||
*
|
||||
* \return 0 on success otherwise POSIX Error code
|
||||
*/
|
||||
drm_public int amdgpu_cs_ctx_create2(amdgpu_device_handle dev,
|
||||
uint32_t priority,
|
||||
amdgpu_context_handle *context)
|
||||
int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
|
||||
amdgpu_context_handle *context)
|
||||
{
|
||||
struct amdgpu_context *gpu_context;
|
||||
union drm_amdgpu_ctx args;
|
||||
int i, j, k;
|
||||
int r;
|
||||
char *override_priority;
|
||||
|
||||
if (!dev || !context)
|
||||
if (NULL == dev)
|
||||
return -EINVAL;
|
||||
if (NULL == context)
|
||||
return -EINVAL;
|
||||
|
||||
override_priority = getenv("AMD_PRIORITY");
|
||||
if (override_priority) {
|
||||
/* The priority is a signed integer. The variable type is
|
||||
* wrong. If parsing fails, priority is unchanged.
|
||||
*/
|
||||
if (sscanf(override_priority, "%i", &priority) == 1) {
|
||||
printf("amdgpu: context priority changed to %i\n",
|
||||
priority);
|
||||
}
|
||||
}
|
||||
|
||||
gpu_context = calloc(1, sizeof(struct amdgpu_context));
|
||||
if (!gpu_context)
|
||||
if (NULL == gpu_context)
|
||||
return -ENOMEM;
|
||||
|
||||
gpu_context->dev = dev;
|
||||
|
|
@ -85,17 +70,11 @@ drm_public int amdgpu_cs_ctx_create2(amdgpu_device_handle dev,
|
|||
/* Create the context */
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
|
||||
args.in.priority = priority;
|
||||
|
||||
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
gpu_context->id = args.out.alloc.ctx_id;
|
||||
for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
|
||||
for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++)
|
||||
for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++)
|
||||
list_inithead(&gpu_context->sem_list[i][j][k]);
|
||||
*context = (amdgpu_context_handle)gpu_context;
|
||||
|
||||
return 0;
|
||||
|
|
@ -106,12 +85,6 @@ error:
|
|||
return r;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
|
||||
amdgpu_context_handle *context)
|
||||
{
|
||||
return amdgpu_cs_ctx_create2(dev, AMDGPU_CTX_PRIORITY_NORMAL, context);
|
||||
}
|
||||
|
||||
/**
|
||||
* Release command submission context
|
||||
*
|
||||
|
|
@ -120,13 +93,12 @@ drm_public int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
|
|||
*
|
||||
* \return 0 on success otherwise POSIX Error code
|
||||
*/
|
||||
drm_public int amdgpu_cs_ctx_free(amdgpu_context_handle context)
|
||||
int amdgpu_cs_ctx_free(amdgpu_context_handle context)
|
||||
{
|
||||
union drm_amdgpu_ctx args;
|
||||
int i, j, k;
|
||||
int r;
|
||||
|
||||
if (!context)
|
||||
if (NULL == context)
|
||||
return -EINVAL;
|
||||
|
||||
pthread_mutex_destroy(&context->sequence_mutex);
|
||||
|
|
@ -137,72 +109,14 @@ drm_public int amdgpu_cs_ctx_free(amdgpu_context_handle context)
|
|||
args.in.ctx_id = context->id;
|
||||
r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
|
||||
&args, sizeof(args));
|
||||
for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
|
||||
for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++) {
|
||||
for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++) {
|
||||
amdgpu_semaphore_handle sem, tmp;
|
||||
LIST_FOR_EACH_ENTRY_SAFE(sem, tmp, &context->sem_list[i][j][k], list) {
|
||||
list_del(&sem->list);
|
||||
amdgpu_cs_reset_sem(sem);
|
||||
amdgpu_cs_unreference_sem(sem);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
free(context);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev,
|
||||
amdgpu_context_handle context,
|
||||
int master_fd,
|
||||
unsigned priority)
|
||||
{
|
||||
union drm_amdgpu_sched args;
|
||||
int r;
|
||||
|
||||
if (!dev || !context || master_fd < 0)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
args.in.op = AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE;
|
||||
args.in.fd = dev->fd;
|
||||
args.in.priority = priority;
|
||||
args.in.ctx_id = context->id;
|
||||
|
||||
r = drmCommandWrite(master_fd, DRM_AMDGPU_SCHED, &args, sizeof(args));
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_ctx_stable_pstate(amdgpu_context_handle context,
|
||||
uint32_t op,
|
||||
uint32_t flags,
|
||||
uint32_t *out_flags)
|
||||
{
|
||||
union drm_amdgpu_ctx args;
|
||||
int r;
|
||||
|
||||
if (!context)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.in.op = op;
|
||||
args.in.ctx_id = context->id;
|
||||
args.in.flags = flags;
|
||||
r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
|
||||
&args, sizeof(args));
|
||||
if (!r && out_flags)
|
||||
*out_flags = args.out.pstate.flags;
|
||||
return r;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
|
||||
uint32_t *state, uint32_t *hangs)
|
||||
int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
|
||||
uint32_t *state, uint32_t *hangs)
|
||||
{
|
||||
union drm_amdgpu_ctx args;
|
||||
int r;
|
||||
|
|
@ -222,25 +136,6 @@ drm_public int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
|
|||
return r;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_query_reset_state2(amdgpu_context_handle context,
|
||||
uint64_t *flags)
|
||||
{
|
||||
union drm_amdgpu_ctx args;
|
||||
int r;
|
||||
|
||||
if (!context)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.in.op = AMDGPU_CTX_OP_QUERY_STATE2;
|
||||
args.in.ctx_id = context->id;
|
||||
r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
|
||||
&args, sizeof(args));
|
||||
if (!r)
|
||||
*flags = args.out.state.flags;
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* Submit command to kernel DRM
|
||||
* \param dev - \c [in] Device handle
|
||||
|
|
@ -254,15 +149,12 @@ drm_public int amdgpu_cs_query_reset_state2(amdgpu_context_handle context,
|
|||
static int amdgpu_cs_submit_one(amdgpu_context_handle context,
|
||||
struct amdgpu_cs_request *ibs_request)
|
||||
{
|
||||
union drm_amdgpu_cs cs;
|
||||
uint64_t *chunk_array;
|
||||
struct drm_amdgpu_cs_chunk *chunks;
|
||||
struct drm_amdgpu_cs_chunk_data *chunk_data;
|
||||
struct drm_amdgpu_cs_chunk_dep *dependencies = NULL;
|
||||
struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
|
||||
amdgpu_device_handle dev = context->dev;
|
||||
struct list_head *sem_list;
|
||||
amdgpu_semaphore_handle sem, tmp;
|
||||
uint32_t i, size, num_chunks, bo_list_handle = 0, sem_count = 0;
|
||||
uint64_t seq_no;
|
||||
uint32_t i, size;
|
||||
bool user_fence;
|
||||
int r = 0;
|
||||
|
||||
|
|
@ -270,26 +162,29 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
|
|||
return -EINVAL;
|
||||
if (ibs_request->ring >= AMDGPU_CS_MAX_RINGS)
|
||||
return -EINVAL;
|
||||
if (ibs_request->number_of_ibs == 0) {
|
||||
ibs_request->seq_no = AMDGPU_NULL_SUBMIT_SEQ;
|
||||
return 0;
|
||||
}
|
||||
if (ibs_request->number_of_ibs > AMDGPU_CS_MAX_IBS_PER_SUBMIT)
|
||||
return -EINVAL;
|
||||
user_fence = (ibs_request->fence_info.handle != NULL);
|
||||
|
||||
size = ibs_request->number_of_ibs + (user_fence ? 2 : 1) + 1;
|
||||
size = ibs_request->number_of_ibs + (user_fence ? 2 : 1);
|
||||
|
||||
chunk_array = alloca(sizeof(uint64_t) * size);
|
||||
chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
|
||||
|
||||
size = ibs_request->number_of_ibs + (user_fence ? 1 : 0);
|
||||
|
||||
chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
|
||||
|
||||
memset(&cs, 0, sizeof(cs));
|
||||
cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
|
||||
cs.in.ctx_id = context->id;
|
||||
if (ibs_request->resources)
|
||||
bo_list_handle = ibs_request->resources->handle;
|
||||
num_chunks = ibs_request->number_of_ibs;
|
||||
cs.in.bo_list_handle = ibs_request->resources->handle;
|
||||
cs.in.num_chunks = ibs_request->number_of_ibs;
|
||||
/* IB chunks */
|
||||
for (i = 0; i < ibs_request->number_of_ibs; i++) {
|
||||
struct amdgpu_cs_ib_info *ib;
|
||||
chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
|
||||
chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
|
||||
chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
|
||||
chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
|
||||
|
|
@ -308,9 +203,10 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
|
|||
pthread_mutex_lock(&context->sequence_mutex);
|
||||
|
||||
if (user_fence) {
|
||||
i = num_chunks++;
|
||||
i = cs.in.num_chunks++;
|
||||
|
||||
/* fence chunk */
|
||||
chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
|
||||
chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
|
||||
chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
|
||||
chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
|
||||
|
|
@ -323,7 +219,7 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
|
|||
}
|
||||
|
||||
if (ibs_request->number_of_dependencies) {
|
||||
dependencies = alloca(sizeof(struct drm_amdgpu_cs_chunk_dep) *
|
||||
dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) *
|
||||
ibs_request->number_of_dependencies);
|
||||
if (!dependencies) {
|
||||
r = -ENOMEM;
|
||||
|
|
@ -340,67 +236,40 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
|
|||
dep->handle = info->fence;
|
||||
}
|
||||
|
||||
i = num_chunks++;
|
||||
i = cs.in.num_chunks++;
|
||||
|
||||
/* dependencies chunk */
|
||||
chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
|
||||
chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
|
||||
chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4
|
||||
* ibs_request->number_of_dependencies;
|
||||
chunks[i].chunk_data = (uint64_t)(uintptr_t)dependencies;
|
||||
}
|
||||
|
||||
sem_list = &context->sem_list[ibs_request->ip_type][ibs_request->ip_instance][ibs_request->ring];
|
||||
LIST_FOR_EACH_ENTRY(sem, sem_list, list)
|
||||
sem_count++;
|
||||
if (sem_count) {
|
||||
sem_dependencies = alloca(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_count);
|
||||
if (!sem_dependencies) {
|
||||
r = -ENOMEM;
|
||||
goto error_unlock;
|
||||
}
|
||||
sem_count = 0;
|
||||
LIST_FOR_EACH_ENTRY_SAFE(sem, tmp, sem_list, list) {
|
||||
struct amdgpu_cs_fence *info = &sem->signal_fence;
|
||||
struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
|
||||
dep->ip_type = info->ip_type;
|
||||
dep->ip_instance = info->ip_instance;
|
||||
dep->ring = info->ring;
|
||||
dep->ctx_id = info->context->id;
|
||||
dep->handle = info->fence;
|
||||
|
||||
list_del(&sem->list);
|
||||
amdgpu_cs_reset_sem(sem);
|
||||
amdgpu_cs_unreference_sem(sem);
|
||||
}
|
||||
i = num_chunks++;
|
||||
|
||||
/* dependencies chunk */
|
||||
chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
|
||||
chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
|
||||
chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
|
||||
}
|
||||
|
||||
r = amdgpu_cs_submit_raw2(dev, context, bo_list_handle, num_chunks,
|
||||
chunks, &seq_no);
|
||||
r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CS,
|
||||
&cs, sizeof(cs));
|
||||
if (r)
|
||||
goto error_unlock;
|
||||
|
||||
ibs_request->seq_no = seq_no;
|
||||
context->last_seq[ibs_request->ip_type][ibs_request->ip_instance][ibs_request->ring] = ibs_request->seq_no;
|
||||
ibs_request->seq_no = cs.out.handle;
|
||||
|
||||
error_unlock:
|
||||
pthread_mutex_unlock(&context->sequence_mutex);
|
||||
free(dependencies);
|
||||
return r;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_submit(amdgpu_context_handle context,
|
||||
uint64_t flags,
|
||||
struct amdgpu_cs_request *ibs_request,
|
||||
uint32_t number_of_requests)
|
||||
int amdgpu_cs_submit(amdgpu_context_handle context,
|
||||
uint64_t flags,
|
||||
struct amdgpu_cs_request *ibs_request,
|
||||
uint32_t number_of_requests)
|
||||
{
|
||||
uint32_t i;
|
||||
int r;
|
||||
|
||||
if (!context || !ibs_request)
|
||||
if (NULL == context)
|
||||
return -EINVAL;
|
||||
if (NULL == ibs_request)
|
||||
return -EINVAL;
|
||||
|
||||
r = 0;
|
||||
|
|
@ -427,18 +296,12 @@ drm_private uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout)
|
|||
|
||||
if (timeout != AMDGPU_TIMEOUT_INFINITE) {
|
||||
struct timespec current;
|
||||
uint64_t current_ns;
|
||||
r = clock_gettime(CLOCK_MONOTONIC, ¤t);
|
||||
if (r) {
|
||||
fprintf(stderr, "clock_gettime() returned error (%d)!", errno);
|
||||
return AMDGPU_TIMEOUT_INFINITE;
|
||||
}
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
current_ns = ((uint64_t)current.tv_sec) * 1000000000ull;
|
||||
current_ns += current.tv_nsec;
|
||||
timeout += current_ns;
|
||||
if (timeout < current_ns)
|
||||
timeout = AMDGPU_TIMEOUT_INFINITE;
|
||||
timeout += ((uint64_t)current.tv_sec) * 1000000000ull;
|
||||
timeout += current.tv_nsec;
|
||||
}
|
||||
return timeout;
|
||||
}
|
||||
|
|
@ -476,24 +339,24 @@ static int amdgpu_ioctl_wait_cs(amdgpu_context_handle context,
|
|||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
|
||||
uint64_t timeout_ns,
|
||||
uint64_t flags,
|
||||
uint32_t *expired)
|
||||
int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
|
||||
uint64_t timeout_ns,
|
||||
uint64_t flags,
|
||||
uint32_t *expired)
|
||||
{
|
||||
bool busy = true;
|
||||
int r;
|
||||
|
||||
if (!fence || !expired || !fence->context)
|
||||
if (NULL == fence)
|
||||
return -EINVAL;
|
||||
if (NULL == expired)
|
||||
return -EINVAL;
|
||||
if (NULL == fence->context)
|
||||
return -EINVAL;
|
||||
if (fence->ip_type >= AMDGPU_HW_IP_NUM)
|
||||
return -EINVAL;
|
||||
if (fence->ring >= AMDGPU_CS_MAX_RINGS)
|
||||
return -EINVAL;
|
||||
if (fence->fence == AMDGPU_NULL_SUBMIT_SEQ) {
|
||||
*expired = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*expired = false;
|
||||
|
||||
|
|
@ -507,485 +370,3 @@ drm_public int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
|
|||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_ioctl_wait_fences(struct amdgpu_cs_fence *fences,
|
||||
uint32_t fence_count,
|
||||
bool wait_all,
|
||||
uint64_t timeout_ns,
|
||||
uint32_t *status,
|
||||
uint32_t *first)
|
||||
{
|
||||
struct drm_amdgpu_fence *drm_fences;
|
||||
amdgpu_device_handle dev = fences[0].context->dev;
|
||||
union drm_amdgpu_wait_fences args;
|
||||
int r;
|
||||
uint32_t i;
|
||||
|
||||
drm_fences = alloca(sizeof(struct drm_amdgpu_fence) * fence_count);
|
||||
for (i = 0; i < fence_count; i++) {
|
||||
drm_fences[i].ctx_id = fences[i].context->id;
|
||||
drm_fences[i].ip_type = fences[i].ip_type;
|
||||
drm_fences[i].ip_instance = fences[i].ip_instance;
|
||||
drm_fences[i].ring = fences[i].ring;
|
||||
drm_fences[i].seq_no = fences[i].fence;
|
||||
}
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.in.fences = (uint64_t)(uintptr_t)drm_fences;
|
||||
args.in.fence_count = fence_count;
|
||||
args.in.wait_all = wait_all;
|
||||
args.in.timeout_ns = amdgpu_cs_calculate_timeout(timeout_ns);
|
||||
|
||||
r = drmIoctl(dev->fd, DRM_IOCTL_AMDGPU_WAIT_FENCES, &args);
|
||||
if (r)
|
||||
return -errno;
|
||||
|
||||
*status = args.out.status;
|
||||
|
||||
if (first)
|
||||
*first = args.out.first_signaled;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
|
||||
uint32_t fence_count,
|
||||
bool wait_all,
|
||||
uint64_t timeout_ns,
|
||||
uint32_t *status,
|
||||
uint32_t *first)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
/* Sanity check */
|
||||
if (!fences || !status || !fence_count)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < fence_count; i++) {
|
||||
if (NULL == fences[i].context)
|
||||
return -EINVAL;
|
||||
if (fences[i].ip_type >= AMDGPU_HW_IP_NUM)
|
||||
return -EINVAL;
|
||||
if (fences[i].ring >= AMDGPU_CS_MAX_RINGS)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*status = 0;
|
||||
|
||||
return amdgpu_ioctl_wait_fences(fences, fence_count, wait_all,
|
||||
timeout_ns, status, first);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
|
||||
{
|
||||
struct amdgpu_semaphore *gpu_semaphore;
|
||||
|
||||
if (!sem)
|
||||
return -EINVAL;
|
||||
|
||||
gpu_semaphore = calloc(1, sizeof(struct amdgpu_semaphore));
|
||||
if (!gpu_semaphore)
|
||||
return -ENOMEM;
|
||||
|
||||
atomic_set(&gpu_semaphore->refcount, 1);
|
||||
*sem = gpu_semaphore;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
|
||||
uint32_t ip_type,
|
||||
uint32_t ip_instance,
|
||||
uint32_t ring,
|
||||
amdgpu_semaphore_handle sem)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!ctx || !sem)
|
||||
return -EINVAL;
|
||||
if (ip_type >= AMDGPU_HW_IP_NUM)
|
||||
return -EINVAL;
|
||||
if (ring >= AMDGPU_CS_MAX_RINGS)
|
||||
return -EINVAL;
|
||||
|
||||
pthread_mutex_lock(&ctx->sequence_mutex);
|
||||
/* sem has been signaled */
|
||||
if (sem->signal_fence.context) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
sem->signal_fence.context = ctx;
|
||||
sem->signal_fence.ip_type = ip_type;
|
||||
sem->signal_fence.ip_instance = ip_instance;
|
||||
sem->signal_fence.ring = ring;
|
||||
sem->signal_fence.fence = ctx->last_seq[ip_type][ip_instance][ring];
|
||||
update_references(NULL, &sem->refcount);
|
||||
ret = 0;
|
||||
unlock:
|
||||
pthread_mutex_unlock(&ctx->sequence_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
|
||||
uint32_t ip_type,
|
||||
uint32_t ip_instance,
|
||||
uint32_t ring,
|
||||
amdgpu_semaphore_handle sem)
|
||||
{
|
||||
if (!ctx || !sem)
|
||||
return -EINVAL;
|
||||
if (ip_type >= AMDGPU_HW_IP_NUM)
|
||||
return -EINVAL;
|
||||
if (ring >= AMDGPU_CS_MAX_RINGS)
|
||||
return -EINVAL;
|
||||
/* must signal first */
|
||||
if (!sem->signal_fence.context)
|
||||
return -EINVAL;
|
||||
|
||||
pthread_mutex_lock(&ctx->sequence_mutex);
|
||||
list_add(&sem->list, &ctx->sem_list[ip_type][ip_instance][ring]);
|
||||
pthread_mutex_unlock(&ctx->sequence_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem)
|
||||
{
|
||||
if (!sem || !sem->signal_fence.context)
|
||||
return -EINVAL;
|
||||
|
||||
sem->signal_fence.context = NULL;
|
||||
sem->signal_fence.ip_type = 0;
|
||||
sem->signal_fence.ip_instance = 0;
|
||||
sem->signal_fence.ring = 0;
|
||||
sem->signal_fence.fence = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
|
||||
{
|
||||
if (!sem)
|
||||
return -EINVAL;
|
||||
|
||||
if (update_references(&sem->refcount, NULL))
|
||||
free(sem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
|
||||
{
|
||||
return amdgpu_cs_unreference_sem(sem);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_create_syncobj2(amdgpu_device_handle dev,
|
||||
uint32_t flags,
|
||||
uint32_t *handle)
|
||||
{
|
||||
if (NULL == dev)
|
||||
return -EINVAL;
|
||||
|
||||
return drmSyncobjCreate(dev->fd, flags, handle);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_create_syncobj(amdgpu_device_handle dev,
|
||||
uint32_t *handle)
|
||||
{
|
||||
if (NULL == dev)
|
||||
return -EINVAL;
|
||||
|
||||
return drmSyncobjCreate(dev->fd, 0, handle);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_destroy_syncobj(amdgpu_device_handle dev,
|
||||
uint32_t handle)
|
||||
{
|
||||
if (NULL == dev)
|
||||
return -EINVAL;
|
||||
|
||||
return drmSyncobjDestroy(dev->fd, handle);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev,
|
||||
const uint32_t *syncobjs,
|
||||
uint32_t syncobj_count)
|
||||
{
|
||||
if (NULL == dev)
|
||||
return -EINVAL;
|
||||
|
||||
return drmSyncobjReset(dev->fd, syncobjs, syncobj_count);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
|
||||
const uint32_t *syncobjs,
|
||||
uint32_t syncobj_count)
|
||||
{
|
||||
if (NULL == dev)
|
||||
return -EINVAL;
|
||||
|
||||
return drmSyncobjSignal(dev->fd, syncobjs, syncobj_count);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_syncobj_timeline_signal(amdgpu_device_handle dev,
|
||||
const uint32_t *syncobjs,
|
||||
uint64_t *points,
|
||||
uint32_t syncobj_count)
|
||||
{
|
||||
if (NULL == dev)
|
||||
return -EINVAL;
|
||||
|
||||
return drmSyncobjTimelineSignal(dev->fd, syncobjs,
|
||||
points, syncobj_count);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
|
||||
uint32_t *handles, unsigned num_handles,
|
||||
int64_t timeout_nsec, unsigned flags,
|
||||
uint32_t *first_signaled)
|
||||
{
|
||||
if (NULL == dev)
|
||||
return -EINVAL;
|
||||
|
||||
return drmSyncobjWait(dev->fd, handles, num_handles, timeout_nsec,
|
||||
flags, first_signaled);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_syncobj_timeline_wait(amdgpu_device_handle dev,
|
||||
uint32_t *handles, uint64_t *points,
|
||||
unsigned num_handles,
|
||||
int64_t timeout_nsec, unsigned flags,
|
||||
uint32_t *first_signaled)
|
||||
{
|
||||
if (NULL == dev)
|
||||
return -EINVAL;
|
||||
|
||||
return drmSyncobjTimelineWait(dev->fd, handles, points, num_handles,
|
||||
timeout_nsec, flags, first_signaled);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_syncobj_query(amdgpu_device_handle dev,
|
||||
uint32_t *handles, uint64_t *points,
|
||||
unsigned num_handles)
|
||||
{
|
||||
if (NULL == dev)
|
||||
return -EINVAL;
|
||||
|
||||
return drmSyncobjQuery(dev->fd, handles, points, num_handles);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_syncobj_query2(amdgpu_device_handle dev,
|
||||
uint32_t *handles, uint64_t *points,
|
||||
unsigned num_handles, uint32_t flags)
|
||||
{
|
||||
if (!dev)
|
||||
return -EINVAL;
|
||||
|
||||
return drmSyncobjQuery2(dev->fd, handles, points, num_handles, flags);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
|
||||
uint32_t handle,
|
||||
int *shared_fd)
|
||||
{
|
||||
if (NULL == dev)
|
||||
return -EINVAL;
|
||||
|
||||
return drmSyncobjHandleToFD(dev->fd, handle, shared_fd);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_import_syncobj(amdgpu_device_handle dev,
|
||||
int shared_fd,
|
||||
uint32_t *handle)
|
||||
{
|
||||
if (NULL == dev)
|
||||
return -EINVAL;
|
||||
|
||||
return drmSyncobjFDToHandle(dev->fd, shared_fd, handle);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev,
|
||||
uint32_t syncobj,
|
||||
int *sync_file_fd)
|
||||
{
|
||||
if (NULL == dev)
|
||||
return -EINVAL;
|
||||
|
||||
return drmSyncobjExportSyncFile(dev->fd, syncobj, sync_file_fd);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
|
||||
uint32_t syncobj,
|
||||
int sync_file_fd)
|
||||
{
|
||||
if (NULL == dev)
|
||||
return -EINVAL;
|
||||
|
||||
return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_syncobj_export_sync_file2(amdgpu_device_handle dev,
|
||||
uint32_t syncobj,
|
||||
uint64_t point,
|
||||
uint32_t flags,
|
||||
int *sync_file_fd)
|
||||
{
|
||||
uint32_t binary_handle;
|
||||
int ret;
|
||||
|
||||
if (NULL == dev)
|
||||
return -EINVAL;
|
||||
|
||||
if (!point)
|
||||
return drmSyncobjExportSyncFile(dev->fd, syncobj, sync_file_fd);
|
||||
|
||||
ret = drmSyncobjCreate(dev->fd, 0, &binary_handle);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drmSyncobjTransfer(dev->fd, binary_handle, 0,
|
||||
syncobj, point, flags);
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = drmSyncobjExportSyncFile(dev->fd, binary_handle, sync_file_fd);
|
||||
out:
|
||||
drmSyncobjDestroy(dev->fd, binary_handle);
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_syncobj_import_sync_file2(amdgpu_device_handle dev,
|
||||
uint32_t syncobj,
|
||||
uint64_t point,
|
||||
int sync_file_fd)
|
||||
{
|
||||
uint32_t binary_handle;
|
||||
int ret;
|
||||
|
||||
if (NULL == dev)
|
||||
return -EINVAL;
|
||||
|
||||
if (!point)
|
||||
return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
|
||||
|
||||
ret = drmSyncobjCreate(dev->fd, 0, &binary_handle);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = drmSyncobjImportSyncFile(dev->fd, binary_handle, sync_file_fd);
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = drmSyncobjTransfer(dev->fd, syncobj, point,
|
||||
binary_handle, 0, 0);
|
||||
out:
|
||||
drmSyncobjDestroy(dev->fd, binary_handle);
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_syncobj_transfer(amdgpu_device_handle dev,
|
||||
uint32_t dst_handle,
|
||||
uint64_t dst_point,
|
||||
uint32_t src_handle,
|
||||
uint64_t src_point,
|
||||
uint32_t flags)
|
||||
{
|
||||
if (NULL == dev)
|
||||
return -EINVAL;
|
||||
|
||||
return drmSyncobjTransfer(dev->fd,
|
||||
dst_handle, dst_point,
|
||||
src_handle, src_point,
|
||||
flags);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
|
||||
amdgpu_context_handle context,
|
||||
amdgpu_bo_list_handle bo_list_handle,
|
||||
int num_chunks,
|
||||
struct drm_amdgpu_cs_chunk *chunks,
|
||||
uint64_t *seq_no)
|
||||
{
|
||||
union drm_amdgpu_cs cs;
|
||||
uint64_t *chunk_array;
|
||||
int i, r;
|
||||
if (num_chunks == 0)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&cs, 0, sizeof(cs));
|
||||
chunk_array = alloca(sizeof(uint64_t) * num_chunks);
|
||||
for (i = 0; i < num_chunks; i++)
|
||||
chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
|
||||
cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
|
||||
cs.in.ctx_id = context->id;
|
||||
cs.in.bo_list_handle = bo_list_handle ? bo_list_handle->handle : 0;
|
||||
cs.in.num_chunks = num_chunks;
|
||||
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
|
||||
&cs, sizeof(cs));
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (seq_no)
|
||||
*seq_no = cs.out.handle;
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
|
||||
amdgpu_context_handle context,
|
||||
uint32_t bo_list_handle,
|
||||
int num_chunks,
|
||||
struct drm_amdgpu_cs_chunk *chunks,
|
||||
uint64_t *seq_no)
|
||||
{
|
||||
union drm_amdgpu_cs cs;
|
||||
uint64_t *chunk_array;
|
||||
int i, r;
|
||||
|
||||
memset(&cs, 0, sizeof(cs));
|
||||
chunk_array = alloca(sizeof(uint64_t) * num_chunks);
|
||||
for (i = 0; i < num_chunks; i++)
|
||||
chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
|
||||
cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
|
||||
cs.in.ctx_id = context->id;
|
||||
cs.in.bo_list_handle = bo_list_handle;
|
||||
cs.in.num_chunks = num_chunks;
|
||||
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
|
||||
&cs, sizeof(cs));
|
||||
if (!r && seq_no)
|
||||
*seq_no = cs.out.handle;
|
||||
return r;
|
||||
}
|
||||
|
||||
drm_public void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
|
||||
struct drm_amdgpu_cs_chunk_data *data)
|
||||
{
|
||||
data->fence_data.handle = fence_info->handle->handle;
|
||||
data->fence_data.offset = fence_info->offset * sizeof(uint64_t);
|
||||
}
|
||||
|
||||
drm_public void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
|
||||
struct drm_amdgpu_cs_chunk_dep *dep)
|
||||
{
|
||||
dep->ip_type = fence->ip_type;
|
||||
dep->ip_instance = fence->ip_instance;
|
||||
dep->ring = fence->ring;
|
||||
dep->ctx_id = fence->context->id;
|
||||
dep->handle = fence->fence;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_cs_fence_to_handle(amdgpu_device_handle dev,
|
||||
struct amdgpu_cs_fence *fence,
|
||||
uint32_t what,
|
||||
uint32_t *out_handle)
|
||||
{
|
||||
union drm_amdgpu_fence_to_handle fth;
|
||||
int r;
|
||||
|
||||
memset(&fth, 0, sizeof(fth));
|
||||
fth.in.fence.ctx_id = fence->context->id;
|
||||
fth.in.fence.ip_type = fence->ip_type;
|
||||
fth.in.fence.ip_instance = fence->ip_instance;
|
||||
fth.in.fence.ring = fence->ring;
|
||||
fth.in.fence.seq_no = fence->fence;
|
||||
fth.in.what = what;
|
||||
|
||||
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_FENCE_TO_HANDLE,
|
||||
&fth, sizeof(fth));
|
||||
if (r == 0)
|
||||
*out_handle = fth.out.handle;
|
||||
return r;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,26 +28,61 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
#include "xf86drm.h"
|
||||
#include "amdgpu_drm.h"
|
||||
#include "amdgpu_internal.h"
|
||||
#include "util_hash_table.h"
|
||||
#include "util_math.h"
|
||||
|
||||
#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
|
||||
#define UINT_TO_PTR(x) ((void *)((intptr_t)(x)))
|
||||
|
||||
static pthread_mutex_t dev_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
static amdgpu_device_handle dev_list;
|
||||
static pthread_mutex_t fd_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
static struct util_hash_table *fd_tab;
|
||||
|
||||
static int fd_compare(int fd1, int fd2)
|
||||
static unsigned handle_hash(void *key)
|
||||
{
|
||||
return PTR_TO_UINT(key);
|
||||
}
|
||||
|
||||
static int handle_compare(void *key1, void *key2)
|
||||
{
|
||||
return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
|
||||
}
|
||||
|
||||
static unsigned fd_hash(void *key)
|
||||
{
|
||||
int fd = PTR_TO_UINT(key);
|
||||
char *name = drmGetPrimaryDeviceNameFromFd(fd);
|
||||
unsigned result = 0;
|
||||
char *c;
|
||||
|
||||
if (name == NULL)
|
||||
return 0;
|
||||
|
||||
for (c = name; *c; ++c)
|
||||
result += *c;
|
||||
|
||||
free(name);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int fd_compare(void *key1, void *key2)
|
||||
{
|
||||
int fd1 = PTR_TO_UINT(key1);
|
||||
int fd2 = PTR_TO_UINT(key2);
|
||||
char *name1 = drmGetPrimaryDeviceNameFromFd(fd1);
|
||||
char *name2 = drmGetPrimaryDeviceNameFromFd(fd2);
|
||||
int result;
|
||||
|
|
@ -95,30 +130,15 @@ static int amdgpu_get_auth(int fd, int *auth)
|
|||
|
||||
static void amdgpu_device_free_internal(amdgpu_device_handle dev)
|
||||
{
|
||||
/* Remove dev from dev_list, if it was added there. */
|
||||
if (dev == dev_list) {
|
||||
dev_list = dev->next;
|
||||
} else {
|
||||
for (amdgpu_device_handle node = dev_list; node; node = node->next) {
|
||||
if (node->next == dev) {
|
||||
node->next = dev->next;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
amdgpu_vamgr_deinit(dev->vamgr);
|
||||
free(dev->vamgr);
|
||||
util_hash_table_destroy(dev->bo_flink_names);
|
||||
util_hash_table_destroy(dev->bo_handles);
|
||||
pthread_mutex_destroy(&dev->bo_table_mutex);
|
||||
util_hash_table_remove(fd_tab, UINT_TO_PTR(dev->fd));
|
||||
close(dev->fd);
|
||||
if ((dev->flink_fd >= 0) && (dev->fd != dev->flink_fd))
|
||||
close(dev->flink_fd);
|
||||
|
||||
amdgpu_vamgr_deinit(&dev->va_mgr.vamgr_32);
|
||||
amdgpu_vamgr_deinit(&dev->va_mgr.vamgr_low);
|
||||
amdgpu_vamgr_deinit(&dev->va_mgr.vamgr_high_32);
|
||||
amdgpu_vamgr_deinit(&dev->va_mgr.vamgr_high);
|
||||
handle_table_fini(&dev->bo_handles);
|
||||
handle_table_fini(&dev->bo_flink_names);
|
||||
pthread_mutex_destroy(&dev->bo_table_mutex);
|
||||
free(dev->marketing_name);
|
||||
free(dev);
|
||||
}
|
||||
|
||||
|
|
@ -137,100 +157,56 @@ static void amdgpu_device_free_internal(amdgpu_device_handle dev)
|
|||
* // incremented. dst is freed if its reference counter is 0.
|
||||
*/
|
||||
static void amdgpu_device_reference(struct amdgpu_device **dst,
|
||||
struct amdgpu_device *src)
|
||||
struct amdgpu_device *src)
|
||||
{
|
||||
if (update_references(&(*dst)->refcount, &src->refcount))
|
||||
amdgpu_device_free_internal(*dst);
|
||||
*dst = src;
|
||||
}
|
||||
|
||||
static int amdgpu_query_gfx_level_major(amdgpu_device_handle dev,
|
||||
uint8_t *gfx_ip_version_major)
|
||||
int amdgpu_device_initialize(int fd,
|
||||
uint32_t *major_version,
|
||||
uint32_t *minor_version,
|
||||
amdgpu_device_handle *device_handle)
|
||||
{
|
||||
struct drm_amdgpu_info_hw_ip ip_info;
|
||||
uint32_t gfx_ip_count = 0;
|
||||
int r;
|
||||
|
||||
*gfx_ip_version_major = 0;
|
||||
|
||||
r = amdgpu_query_hw_ip_count(dev, AMDGPU_HW_IP_GFX, &gfx_ip_count);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* No graphics support. */
|
||||
if (gfx_ip_count == 0)
|
||||
return 0;
|
||||
|
||||
memset(&ip_info, 0, sizeof(ip_info));
|
||||
|
||||
r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_GFX, 0, &ip_info);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* GFX6-8 don't set ip_discovery_version. */
|
||||
if (dev->minor_version >= 48 && ip_info.ip_discovery_version) {
|
||||
*gfx_ip_version_major = (ip_info.ip_discovery_version >> 16) & 0xff;
|
||||
} else {
|
||||
*gfx_ip_version_major = ip_info.hw_ip_version_major;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int _amdgpu_device_initialize(int fd,
|
||||
uint32_t *major_version,
|
||||
uint32_t *minor_version,
|
||||
amdgpu_device_handle *device_handle,
|
||||
bool deduplicate_device)
|
||||
{
|
||||
struct amdgpu_device *dev = NULL;
|
||||
uint8_t gfx_ip_version_major = 0;
|
||||
struct amdgpu_device *dev;
|
||||
drmVersionPtr version;
|
||||
int r;
|
||||
int flag_auth = 0;
|
||||
int flag_authexist=0;
|
||||
uint32_t accel_working = 0;
|
||||
uint32_t va_mgr_flags = 0;
|
||||
uint64_t start, max;
|
||||
|
||||
*device_handle = NULL;
|
||||
|
||||
pthread_mutex_lock(&dev_mutex);
|
||||
|
||||
pthread_mutex_lock(&fd_mutex);
|
||||
if (!fd_tab)
|
||||
fd_tab = util_hash_table_create(fd_hash, fd_compare);
|
||||
r = amdgpu_get_auth(fd, &flag_auth);
|
||||
if (r) {
|
||||
fprintf(stderr, "%s: amdgpu_get_auth (1) failed (%i)\n",
|
||||
__func__, r);
|
||||
pthread_mutex_unlock(&dev_mutex);
|
||||
pthread_mutex_unlock(&fd_mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (deduplicate_device)
|
||||
for (dev = dev_list; dev; dev = dev->next)
|
||||
if (fd_compare(dev->fd, fd) == 0)
|
||||
break;
|
||||
|
||||
dev = util_hash_table_get(fd_tab, UINT_TO_PTR(fd));
|
||||
if (dev) {
|
||||
r = amdgpu_get_auth(dev->fd, &flag_authexist);
|
||||
if (r) {
|
||||
fprintf(stderr, "%s: amdgpu_get_auth (2) failed (%i)\n",
|
||||
__func__, r);
|
||||
pthread_mutex_unlock(&dev_mutex);
|
||||
pthread_mutex_unlock(&fd_mutex);
|
||||
return r;
|
||||
}
|
||||
if ((flag_auth) && (!flag_authexist)) {
|
||||
dev->flink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
|
||||
dev->flink_fd = dup(fd);
|
||||
}
|
||||
*major_version = dev->major_version;
|
||||
*minor_version = dev->minor_version;
|
||||
amdgpu_device_reference(device_handle, dev);
|
||||
pthread_mutex_unlock(&dev_mutex);
|
||||
pthread_mutex_unlock(&fd_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
dev = calloc(1, sizeof(struct amdgpu_device));
|
||||
if (!dev) {
|
||||
fprintf(stderr, "%s: calloc failed\n", __func__);
|
||||
pthread_mutex_unlock(&dev_mutex);
|
||||
pthread_mutex_unlock(&fd_mutex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
@ -252,127 +228,76 @@ static int _amdgpu_device_initialize(int fd,
|
|||
goto cleanup;
|
||||
}
|
||||
|
||||
dev->fd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
|
||||
dev->fd = dup(fd);
|
||||
dev->flink_fd = dev->fd;
|
||||
dev->major_version = version->version_major;
|
||||
dev->minor_version = version->version_minor;
|
||||
drmFreeVersion(version);
|
||||
|
||||
dev->bo_flink_names = util_hash_table_create(handle_hash,
|
||||
handle_compare);
|
||||
dev->bo_handles = util_hash_table_create(handle_hash, handle_compare);
|
||||
pthread_mutex_init(&dev->bo_table_mutex, NULL);
|
||||
|
||||
/* Check if acceleration is working. */
|
||||
r = amdgpu_query_info(dev, AMDGPU_INFO_ACCEL_WORKING, 4, &accel_working);
|
||||
if (r) {
|
||||
fprintf(stderr, "%s: amdgpu_query_info(ACCEL_WORKING) failed (%i)\n",
|
||||
__func__, r);
|
||||
if (r)
|
||||
goto cleanup;
|
||||
}
|
||||
if (!accel_working) {
|
||||
fprintf(stderr, "%s: AMDGPU_INFO_ACCEL_WORKING = 0\n", __func__);
|
||||
r = -EBADF;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_query_gpu_info_init(dev);
|
||||
if (r) {
|
||||
fprintf(stderr, "%s: amdgpu_query_gpu_info_init failed\n", __func__);
|
||||
if (r)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_query_gfx_level_major(dev, &gfx_ip_version_major);
|
||||
if (!r) {
|
||||
/* Split the HIGH addr space for GFX6-GFX12, except GFX9 to
|
||||
* implement a workaround for SMEM loads with NULL PRT pages.
|
||||
* This is silently ignored if querying the GFX level failed.
|
||||
*/
|
||||
if (gfx_ip_version_major >= 6 && gfx_ip_version_major <= 12 &&
|
||||
gfx_ip_version_major != 9) {
|
||||
va_mgr_flags |= AMDGPU_VA_MGR_RESERVE_HALF_VA_FOR_PRT;
|
||||
}
|
||||
}
|
||||
dev->vamgr = calloc(1, sizeof(struct amdgpu_bo_va_mgr));
|
||||
if (dev->vamgr == NULL)
|
||||
goto cleanup;
|
||||
|
||||
amdgpu_va_manager_init2(&dev->va_mgr,
|
||||
dev->dev_info.virtual_address_offset,
|
||||
dev->dev_info.virtual_address_max,
|
||||
dev->dev_info.high_va_offset,
|
||||
dev->dev_info.high_va_max,
|
||||
dev->dev_info.virtual_address_alignment,
|
||||
va_mgr_flags);
|
||||
amdgpu_vamgr_init(dev->vamgr, dev->dev_info.virtual_address_offset,
|
||||
dev->dev_info.virtual_address_max,
|
||||
dev->dev_info.virtual_address_alignment);
|
||||
|
||||
amdgpu_parse_asic_ids(dev);
|
||||
max = MIN2(dev->dev_info.virtual_address_max, 0xffffffff);
|
||||
start = amdgpu_vamgr_find_va(dev->vamgr,
|
||||
max - dev->dev_info.virtual_address_offset,
|
||||
dev->dev_info.virtual_address_alignment, 0);
|
||||
if (start > 0xffffffff)
|
||||
goto free_va; /* shouldn't get here */
|
||||
|
||||
dev->vamgr_32 = calloc(1, sizeof(struct amdgpu_bo_va_mgr));
|
||||
if (dev->vamgr_32 == NULL)
|
||||
goto free_va;
|
||||
amdgpu_vamgr_init(dev->vamgr_32, start, max,
|
||||
dev->dev_info.virtual_address_alignment);
|
||||
|
||||
*major_version = dev->major_version;
|
||||
*minor_version = dev->minor_version;
|
||||
*device_handle = dev;
|
||||
if (deduplicate_device) {
|
||||
dev->next = dev_list;
|
||||
dev_list = dev;
|
||||
}
|
||||
pthread_mutex_unlock(&dev_mutex);
|
||||
util_hash_table_set(fd_tab, UINT_TO_PTR(dev->fd), dev);
|
||||
pthread_mutex_unlock(&fd_mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
free_va:
|
||||
r = -ENOMEM;
|
||||
amdgpu_vamgr_free_va(dev->vamgr, start,
|
||||
max - dev->dev_info.virtual_address_offset);
|
||||
amdgpu_vamgr_deinit(dev->vamgr);
|
||||
free(dev->vamgr);
|
||||
|
||||
cleanup:
|
||||
if (dev->fd >= 0)
|
||||
close(dev->fd);
|
||||
free(dev);
|
||||
pthread_mutex_unlock(&dev_mutex);
|
||||
pthread_mutex_unlock(&fd_mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_device_initialize(int fd,
|
||||
uint32_t *major_version,
|
||||
uint32_t *minor_version,
|
||||
amdgpu_device_handle *device_handle)
|
||||
int amdgpu_device_deinitialize(amdgpu_device_handle dev)
|
||||
{
|
||||
return _amdgpu_device_initialize(fd, major_version, minor_version, device_handle, true);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_device_initialize2(int fd, bool deduplicate_device,
|
||||
uint32_t *major_version,
|
||||
uint32_t *minor_version,
|
||||
amdgpu_device_handle *device_handle)
|
||||
{
|
||||
return _amdgpu_device_initialize(fd, major_version, minor_version, device_handle, deduplicate_device);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_device_deinitialize(amdgpu_device_handle dev)
|
||||
{
|
||||
pthread_mutex_lock(&dev_mutex);
|
||||
amdgpu_device_reference(&dev, NULL);
|
||||
pthread_mutex_unlock(&dev_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_device_get_fd(amdgpu_device_handle device_handle)
|
||||
{
|
||||
return device_handle->fd;
|
||||
}
|
||||
|
||||
drm_public const char *amdgpu_get_marketing_name(amdgpu_device_handle dev)
|
||||
{
|
||||
if (dev->marketing_name)
|
||||
return dev->marketing_name;
|
||||
else
|
||||
return "AMD Radeon Graphics";
|
||||
}
|
||||
|
||||
drm_public int amdgpu_query_sw_info(amdgpu_device_handle dev,
|
||||
enum amdgpu_sw_info info,
|
||||
void *value)
|
||||
{
|
||||
uint32_t *val32 = (uint32_t*)value;
|
||||
|
||||
switch (info) {
|
||||
case amdgpu_sw_info_address32_hi:
|
||||
if (dev->va_mgr.vamgr_high_32.va_max)
|
||||
*val32 = (dev->va_mgr.vamgr_high_32.va_max - 1) >> 32;
|
||||
else
|
||||
*val32 = (dev->va_mgr.vamgr_32.va_max - 1) >> 32;
|
||||
return 0;
|
||||
case amdgpu_sw_info_address_prt_wa_control_bit:
|
||||
*val32 = dev->va_mgr.address_prt_wa_control_bit;
|
||||
return 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,6 +22,10 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
|
||||
|
|
@ -30,8 +34,8 @@
|
|||
#include "amdgpu_internal.h"
|
||||
#include "xf86drm.h"
|
||||
|
||||
drm_public int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
|
||||
unsigned size, void *value)
|
||||
int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
|
||||
unsigned size, void *value)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
|
||||
|
|
@ -44,8 +48,8 @@ drm_public int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
|
|||
sizeof(struct drm_amdgpu_info));
|
||||
}
|
||||
|
||||
drm_public int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
|
||||
int32_t *result)
|
||||
int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
|
||||
int32_t *result)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
|
||||
|
|
@ -59,9 +63,9 @@ drm_public int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
|
|||
sizeof(struct drm_amdgpu_info));
|
||||
}
|
||||
|
||||
drm_public int amdgpu_read_mm_registers(amdgpu_device_handle dev,
|
||||
unsigned dword_offset, unsigned count, uint32_t instance,
|
||||
uint32_t flags, uint32_t *values)
|
||||
int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
|
||||
unsigned count, uint32_t instance, uint32_t flags,
|
||||
uint32_t *values)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
|
||||
|
|
@ -78,9 +82,8 @@ drm_public int amdgpu_read_mm_registers(amdgpu_device_handle dev,
|
|||
sizeof(struct drm_amdgpu_info));
|
||||
}
|
||||
|
||||
drm_public int amdgpu_query_hw_ip_count(amdgpu_device_handle dev,
|
||||
unsigned type,
|
||||
uint32_t *count)
|
||||
int amdgpu_query_hw_ip_count(amdgpu_device_handle dev, unsigned type,
|
||||
uint32_t *count)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
|
||||
|
|
@ -94,9 +97,9 @@ drm_public int amdgpu_query_hw_ip_count(amdgpu_device_handle dev,
|
|||
sizeof(struct drm_amdgpu_info));
|
||||
}
|
||||
|
||||
drm_public int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
|
||||
unsigned ip_instance,
|
||||
struct drm_amdgpu_info_hw_ip *info)
|
||||
int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
|
||||
unsigned ip_instance,
|
||||
struct drm_amdgpu_info_hw_ip *info)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
|
||||
|
|
@ -111,12 +114,12 @@ drm_public int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
|
|||
sizeof(struct drm_amdgpu_info));
|
||||
}
|
||||
|
||||
drm_public int amdgpu_query_firmware_version(amdgpu_device_handle dev,
|
||||
unsigned fw_type, unsigned ip_instance, unsigned index,
|
||||
uint32_t *version, uint32_t *feature)
|
||||
int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type,
|
||||
unsigned ip_instance, unsigned index,
|
||||
uint32_t *version, uint32_t *feature)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
struct drm_amdgpu_info_firmware firmware = {};
|
||||
struct drm_amdgpu_info_firmware firmware;
|
||||
int r;
|
||||
|
||||
memset(&request, 0, sizeof(request));
|
||||
|
|
@ -137,24 +140,6 @@ drm_public int amdgpu_query_firmware_version(amdgpu_device_handle dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_query_uq_fw_area_info(amdgpu_device_handle dev,
|
||||
unsigned type,
|
||||
unsigned ip_instance,
|
||||
struct drm_amdgpu_info_uq_fw_areas *info)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
|
||||
memset(&request, 0, sizeof(request));
|
||||
request.return_pointer = (uintptr_t)info;
|
||||
request.return_size = sizeof(*info);
|
||||
request.query = AMDGPU_INFO_UQ_FW_AREAS;
|
||||
request.query_hw_ip.type = type;
|
||||
request.query_hw_ip.ip_instance = ip_instance;
|
||||
|
||||
return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
|
||||
sizeof(struct drm_amdgpu_info));
|
||||
}
|
||||
|
||||
drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
|
||||
{
|
||||
int r, i;
|
||||
|
|
@ -184,57 +169,49 @@ drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
|
|||
dev->info.vce_harvest_config = dev->dev_info.vce_harvest_config;
|
||||
dev->info.pci_rev_id = dev->dev_info.pci_rev;
|
||||
|
||||
if (dev->info.family_id < AMDGPU_FAMILY_AI) {
|
||||
for (i = 0; i < (int)dev->info.num_shader_engines; i++) {
|
||||
unsigned instance = (i << AMDGPU_INFO_MMR_SE_INDEX_SHIFT) |
|
||||
(AMDGPU_INFO_MMR_SH_INDEX_MASK <<
|
||||
AMDGPU_INFO_MMR_SH_INDEX_SHIFT);
|
||||
for (i = 0; i < (int)dev->info.num_shader_engines; i++) {
|
||||
unsigned instance = (i << AMDGPU_INFO_MMR_SE_INDEX_SHIFT) |
|
||||
(AMDGPU_INFO_MMR_SH_INDEX_MASK <<
|
||||
AMDGPU_INFO_MMR_SH_INDEX_SHIFT);
|
||||
|
||||
r = amdgpu_read_mm_registers(dev, 0x263d, 1, instance, 0,
|
||||
&dev->info.backend_disable[i]);
|
||||
if (r)
|
||||
return r;
|
||||
/* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
|
||||
dev->info.backend_disable[i] =
|
||||
(dev->info.backend_disable[i] >> 16) & 0xff;
|
||||
r = amdgpu_read_mm_registers(dev, 0x263d, 1, instance, 0,
|
||||
&dev->info.backend_disable[i]);
|
||||
if (r)
|
||||
return r;
|
||||
/* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
|
||||
dev->info.backend_disable[i] =
|
||||
(dev->info.backend_disable[i] >> 16) & 0xff;
|
||||
|
||||
r = amdgpu_read_mm_registers(dev, 0xa0d4, 1, instance, 0,
|
||||
&dev->info.pa_sc_raster_cfg[i]);
|
||||
if (r)
|
||||
return r;
|
||||
r = amdgpu_read_mm_registers(dev, 0xa0d4, 1, instance, 0,
|
||||
&dev->info.pa_sc_raster_cfg[i]);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (dev->info.family_id >= AMDGPU_FAMILY_CI) {
|
||||
r = amdgpu_read_mm_registers(dev, 0xa0d5, 1, instance, 0,
|
||||
&dev->info.pa_sc_raster_cfg1[i]);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
}
|
||||
r = amdgpu_read_mm_registers(dev, 0xa0d5, 1, instance, 0,
|
||||
&dev->info.pa_sc_raster_cfg1[i]);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0,
|
||||
&dev->info.gb_addr_cfg);
|
||||
r = amdgpu_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0,
|
||||
dev->info.gb_tile_mode);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (dev->info.family_id < AMDGPU_FAMILY_AI) {
|
||||
r = amdgpu_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0,
|
||||
dev->info.gb_tile_mode);
|
||||
if (r)
|
||||
return r;
|
||||
r = amdgpu_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0,
|
||||
dev->info.gb_macro_tile_mode);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (dev->info.family_id >= AMDGPU_FAMILY_CI) {
|
||||
r = amdgpu_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0,
|
||||
dev->info.gb_macro_tile_mode);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0,
|
||||
&dev->info.gb_addr_cfg);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_read_mm_registers(dev, 0x9d8, 1, 0xffffffff, 0,
|
||||
&dev->info.mc_arb_ramcfg);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_read_mm_registers(dev, 0x9d8, 1, 0xffffffff, 0,
|
||||
&dev->info.mc_arb_ramcfg);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
dev->info.cu_active_number = dev->dev_info.cu_active_number;
|
||||
dev->info.cu_ao_mask = dev->dev_info.cu_ao_mask;
|
||||
|
|
@ -246,22 +223,19 @@ drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_query_gpu_info(amdgpu_device_handle dev,
|
||||
struct amdgpu_gpu_info *info)
|
||||
int amdgpu_query_gpu_info(amdgpu_device_handle dev,
|
||||
struct amdgpu_gpu_info *info)
|
||||
{
|
||||
if (!dev || !info)
|
||||
return -EINVAL;
|
||||
|
||||
/* Get ASIC info*/
|
||||
*info = dev->info;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_query_heap_info(amdgpu_device_handle dev,
|
||||
uint32_t heap,
|
||||
uint32_t flags,
|
||||
struct amdgpu_heap_info *info)
|
||||
int amdgpu_query_heap_info(amdgpu_device_handle dev,
|
||||
uint32_t heap,
|
||||
uint32_t flags,
|
||||
struct amdgpu_heap_info *info)
|
||||
{
|
||||
struct drm_amdgpu_info_vram_gtt vram_gtt_info = {};
|
||||
int r;
|
||||
|
|
@ -310,13 +284,13 @@ drm_public int amdgpu_query_heap_info(amdgpu_device_handle dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_query_gds_info(amdgpu_device_handle dev,
|
||||
struct amdgpu_gds_resource_info *gds_info)
|
||||
int amdgpu_query_gds_info(amdgpu_device_handle dev,
|
||||
struct amdgpu_gds_resource_info *gds_info)
|
||||
{
|
||||
struct drm_amdgpu_info_gds gds_config = {};
|
||||
int r;
|
||||
|
||||
if (!gds_info)
|
||||
if (gds_info == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
r = amdgpu_query_info(dev, AMDGPU_INFO_GDS_CONFIG,
|
||||
|
|
@ -334,47 +308,3 @@ drm_public int amdgpu_query_gds_info(amdgpu_device_handle dev,
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned sensor_type,
|
||||
unsigned size, void *value)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
|
||||
memset(&request, 0, sizeof(request));
|
||||
request.return_pointer = (uintptr_t)value;
|
||||
request.return_size = size;
|
||||
request.query = AMDGPU_INFO_SENSOR;
|
||||
request.sensor_info.type = sensor_type;
|
||||
|
||||
return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
|
||||
sizeof(struct drm_amdgpu_info));
|
||||
}
|
||||
|
||||
drm_public int amdgpu_query_video_caps_info(amdgpu_device_handle dev, unsigned cap_type,
|
||||
unsigned size, void *value)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
|
||||
memset(&request, 0, sizeof(request));
|
||||
request.return_pointer = (uintptr_t)value;
|
||||
request.return_size = size;
|
||||
request.query = AMDGPU_INFO_VIDEO_CAPS;
|
||||
request.sensor_info.type = cap_type;
|
||||
|
||||
return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
|
||||
sizeof(struct drm_amdgpu_info));
|
||||
}
|
||||
|
||||
drm_public int amdgpu_query_gpuvm_fault_info(amdgpu_device_handle dev,
|
||||
unsigned size, void *value)
|
||||
{
|
||||
struct drm_amdgpu_info request;
|
||||
|
||||
memset(&request, 0, sizeof(request));
|
||||
request.return_pointer = (uintptr_t)value;
|
||||
request.return_size = size;
|
||||
request.query = AMDGPU_INFO_GPUVM_FAULT;
|
||||
|
||||
return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
|
||||
sizeof(struct drm_amdgpu_info));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,6 +25,10 @@
|
|||
#ifndef _AMDGPU_INTERNAL_H_
|
||||
#define _AMDGPU_INTERNAL_H_
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <assert.h>
|
||||
#include <pthread.h>
|
||||
|
||||
|
|
@ -32,7 +36,6 @@
|
|||
#include "xf86atomic.h"
|
||||
#include "amdgpu.h"
|
||||
#include "util_double_list.h"
|
||||
#include "handle_table.h"
|
||||
|
||||
#define AMDGPU_CS_MAX_RINGS 8
|
||||
/* do not use below macro if b is not power of 2 aligned value */
|
||||
|
|
@ -41,7 +44,6 @@
|
|||
#define ROUND_DOWN(x, y) ((x) & ~__round_mask(x, y))
|
||||
|
||||
#define AMDGPU_INVALID_VA_ADDRESS 0xffffffffffffffff
|
||||
#define AMDGPU_NULL_SUBMIT_SEQ 0
|
||||
|
||||
struct amdgpu_bo_va_hole {
|
||||
struct list_head list;
|
||||
|
|
@ -50,6 +52,8 @@ struct amdgpu_bo_va_hole {
|
|||
};
|
||||
|
||||
struct amdgpu_bo_va_mgr {
|
||||
/* the start virtual address */
|
||||
uint64_t va_offset;
|
||||
uint64_t va_max;
|
||||
struct list_head va_holes;
|
||||
pthread_mutex_t bo_va_mutex;
|
||||
|
|
@ -57,47 +61,32 @@ struct amdgpu_bo_va_mgr {
|
|||
};
|
||||
|
||||
struct amdgpu_va {
|
||||
amdgpu_device_handle dev;
|
||||
uint64_t address;
|
||||
uint64_t size;
|
||||
enum amdgpu_gpu_va_range range;
|
||||
struct amdgpu_bo_va_mgr *vamgr;
|
||||
};
|
||||
|
||||
struct amdgpu_va_manager {
|
||||
/** The VA manager for the lower virtual address space */
|
||||
struct amdgpu_bo_va_mgr vamgr_low;
|
||||
/** The VA manager for the 32bit address space */
|
||||
struct amdgpu_bo_va_mgr vamgr_32;
|
||||
/** The VA manager for the high virtual address space */
|
||||
struct amdgpu_bo_va_mgr vamgr_high;
|
||||
/** The VA manager for the 32bit high address space */
|
||||
struct amdgpu_bo_va_mgr vamgr_high_32;
|
||||
|
||||
/** The bit to control whether it's the "LOW" or "HIGH" halves, when
|
||||
* half of the address space is reserved for PRT to implement a SW
|
||||
* workaround. */
|
||||
unsigned address_prt_wa_control_bit;
|
||||
};
|
||||
|
||||
struct amdgpu_device {
|
||||
atomic_t refcount;
|
||||
struct amdgpu_device *next;
|
||||
int fd;
|
||||
int flink_fd;
|
||||
unsigned major_version;
|
||||
unsigned minor_version;
|
||||
|
||||
char *marketing_name;
|
||||
/** List of buffer handles. Protected by bo_table_mutex. */
|
||||
struct handle_table bo_handles;
|
||||
struct util_hash_table *bo_handles;
|
||||
/** List of buffer GEM flink names. Protected by bo_table_mutex. */
|
||||
struct handle_table bo_flink_names;
|
||||
struct util_hash_table *bo_flink_names;
|
||||
/** This protects all hash tables. */
|
||||
pthread_mutex_t bo_table_mutex;
|
||||
struct drm_amdgpu_info_device dev_info;
|
||||
struct amdgpu_gpu_info info;
|
||||
|
||||
struct amdgpu_va_manager va_mgr;
|
||||
/** The global VA manager for the whole virtual address space */
|
||||
struct amdgpu_bo_va_mgr *vamgr;
|
||||
/** The VA manager for the 32bit address space */
|
||||
struct amdgpu_bo_va_mgr *vamgr_32;
|
||||
};
|
||||
|
||||
struct amdgpu_bo {
|
||||
|
|
@ -111,7 +100,7 @@ struct amdgpu_bo {
|
|||
|
||||
pthread_mutex_t cpu_access_mutex;
|
||||
void *cpu_ptr;
|
||||
int64_t cpu_map_count;
|
||||
int cpu_map_count;
|
||||
};
|
||||
|
||||
struct amdgpu_bo_list {
|
||||
|
|
@ -127,30 +116,25 @@ struct amdgpu_context {
|
|||
pthread_mutex_t sequence_mutex;
|
||||
/* context id*/
|
||||
uint32_t id;
|
||||
uint64_t last_seq[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
|
||||
struct list_head sem_list[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
|
||||
};
|
||||
|
||||
/**
|
||||
* Structure describing sw semaphore based on scheduler
|
||||
*
|
||||
*/
|
||||
struct amdgpu_semaphore {
|
||||
atomic_t refcount;
|
||||
struct list_head list;
|
||||
struct amdgpu_cs_fence signal_fence;
|
||||
};
|
||||
|
||||
/**
|
||||
* Functions.
|
||||
*/
|
||||
|
||||
drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo);
|
||||
|
||||
drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
|
||||
uint64_t max, uint64_t alignment);
|
||||
|
||||
drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr);
|
||||
|
||||
drm_private void amdgpu_parse_asic_ids(struct amdgpu_device *dev);
|
||||
drm_private uint64_t
|
||||
amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
|
||||
uint64_t alignment, uint64_t base_required);
|
||||
|
||||
drm_private void
|
||||
amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size);
|
||||
|
||||
drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev);
|
||||
|
||||
|
|
@ -182,4 +166,26 @@ static inline bool update_references(atomic_t *dst, atomic_t *src)
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Assignment between two amdgpu_bo pointers with reference counting.
|
||||
*
|
||||
* Usage:
|
||||
* struct amdgpu_bo *dst = ... , *src = ...;
|
||||
*
|
||||
* dst = src;
|
||||
* // No reference counting. Only use this when you need to move
|
||||
* // a reference from one pointer to another.
|
||||
*
|
||||
* amdgpu_bo_reference(&dst, src);
|
||||
* // Reference counters are updated. dst is decremented and src is
|
||||
* // incremented. dst is freed if its reference counter is 0.
|
||||
*/
|
||||
static inline void amdgpu_bo_reference(struct amdgpu_bo **dst,
|
||||
struct amdgpu_bo *src)
|
||||
{
|
||||
if (update_references(&(*dst)->refcount, &src->refcount))
|
||||
amdgpu_bo_free_internal(*dst);
|
||||
*dst = src;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -1,123 +0,0 @@
|
|||
/*
|
||||
* Copyright 2024 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include "xf86drm.h"
|
||||
#include "amdgpu_drm.h"
|
||||
#include "amdgpu_internal.h"
|
||||
|
||||
drm_public int
|
||||
amdgpu_create_userqueue(amdgpu_device_handle dev,
|
||||
uint32_t ip_type,
|
||||
uint32_t doorbell_handle,
|
||||
uint32_t doorbell_offset,
|
||||
uint64_t queue_va,
|
||||
uint64_t queue_size,
|
||||
uint64_t wptr_va,
|
||||
uint64_t rptr_va,
|
||||
void *mqd_in,
|
||||
uint32_t flags,
|
||||
uint32_t *queue_id)
|
||||
{
|
||||
int ret;
|
||||
union drm_amdgpu_userq userq;
|
||||
uint64_t mqd_size;
|
||||
|
||||
if (!dev)
|
||||
return -EINVAL;
|
||||
|
||||
switch (ip_type) {
|
||||
case AMDGPU_HW_IP_GFX:
|
||||
mqd_size = sizeof(struct drm_amdgpu_userq_mqd_gfx11);
|
||||
break;
|
||||
case AMDGPU_HW_IP_DMA:
|
||||
mqd_size = sizeof(struct drm_amdgpu_userq_mqd_sdma_gfx11);
|
||||
break;
|
||||
case AMDGPU_HW_IP_COMPUTE:
|
||||
mqd_size = sizeof(struct drm_amdgpu_userq_mqd_compute_gfx11);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(&userq, 0, sizeof(userq));
|
||||
|
||||
userq.in.op = AMDGPU_USERQ_OP_CREATE;
|
||||
userq.in.ip_type = ip_type;
|
||||
|
||||
userq.in.doorbell_handle = doorbell_handle;
|
||||
userq.in.doorbell_offset = doorbell_offset;
|
||||
|
||||
userq.in.queue_va = queue_va;
|
||||
userq.in.queue_size = queue_size;
|
||||
userq.in.wptr_va = wptr_va;
|
||||
userq.in.rptr_va = rptr_va;
|
||||
|
||||
userq.in.mqd = (uint64_t)mqd_in;
|
||||
userq.in.mqd_size = mqd_size;
|
||||
userq.in.flags = flags;
|
||||
|
||||
ret = drmCommandWriteRead(dev->fd, DRM_AMDGPU_USERQ,
|
||||
&userq, sizeof(userq));
|
||||
*queue_id = userq.out.queue_id;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_public int
|
||||
amdgpu_free_userqueue(amdgpu_device_handle dev, uint32_t queue_id)
|
||||
{
|
||||
union drm_amdgpu_userq userq;
|
||||
|
||||
memset(&userq, 0, sizeof(userq));
|
||||
userq.in.op = AMDGPU_USERQ_OP_FREE;
|
||||
userq.in.queue_id = queue_id;
|
||||
|
||||
return drmCommandWriteRead(dev->fd, DRM_AMDGPU_USERQ,
|
||||
&userq, sizeof(userq));
|
||||
}
|
||||
|
||||
drm_public int
|
||||
amdgpu_userq_signal(amdgpu_device_handle dev,
|
||||
struct drm_amdgpu_userq_signal *signal_data)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_USERQ_SIGNAL,
|
||||
signal_data, sizeof(struct drm_amdgpu_userq_signal));
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
drm_public int
|
||||
amdgpu_userq_wait(amdgpu_device_handle dev,
|
||||
struct drm_amdgpu_userq_wait *wait_data)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_USERQ_WAIT,
|
||||
wait_data, sizeof(struct drm_amdgpu_userq_wait));
|
||||
|
||||
return r;
|
||||
}
|
||||
|
|
@ -21,6 +21,10 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
|
|
@ -29,145 +33,129 @@
|
|||
#include "amdgpu_internal.h"
|
||||
#include "util_math.h"
|
||||
|
||||
drm_public int amdgpu_va_range_query(amdgpu_device_handle dev,
|
||||
enum amdgpu_gpu_va_range type,
|
||||
uint64_t *start, uint64_t *end)
|
||||
int amdgpu_va_range_query(amdgpu_device_handle dev,
|
||||
enum amdgpu_gpu_va_range type, uint64_t *start, uint64_t *end)
|
||||
{
|
||||
if (type != amdgpu_gpu_va_range_general)
|
||||
return -EINVAL;
|
||||
|
||||
*start = dev->dev_info.virtual_address_offset;
|
||||
*end = dev->dev_info.virtual_address_max;
|
||||
return 0;
|
||||
if (type == amdgpu_gpu_va_range_general) {
|
||||
*start = dev->dev_info.virtual_address_offset;
|
||||
*end = dev->dev_info.virtual_address_max;
|
||||
return 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
|
||||
uint64_t max, uint64_t alignment)
|
||||
uint64_t max, uint64_t alignment)
|
||||
{
|
||||
struct amdgpu_bo_va_hole *n;
|
||||
|
||||
mgr->va_offset = start;
|
||||
mgr->va_max = max;
|
||||
mgr->va_alignment = alignment;
|
||||
|
||||
list_inithead(&mgr->va_holes);
|
||||
pthread_mutex_init(&mgr->bo_va_mutex, NULL);
|
||||
pthread_mutex_lock(&mgr->bo_va_mutex);
|
||||
n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
|
||||
n->size = mgr->va_max - start;
|
||||
n->offset = start;
|
||||
list_add(&n->list, &mgr->va_holes);
|
||||
pthread_mutex_unlock(&mgr->bo_va_mutex);
|
||||
}
|
||||
|
||||
drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
|
||||
{
|
||||
struct amdgpu_bo_va_hole *hole, *tmp;
|
||||
LIST_FOR_EACH_ENTRY_SAFE(hole, tmp, &mgr->va_holes, list) {
|
||||
struct amdgpu_bo_va_hole *hole;
|
||||
LIST_FOR_EACH_ENTRY(hole, &mgr->va_holes, list) {
|
||||
list_del(&hole->list);
|
||||
free(hole);
|
||||
}
|
||||
pthread_mutex_destroy(&mgr->bo_va_mutex);
|
||||
}
|
||||
|
||||
static drm_private int
|
||||
amdgpu_vamgr_subtract_hole(struct amdgpu_bo_va_hole *hole, uint64_t start_va,
|
||||
uint64_t end_va)
|
||||
{
|
||||
if (start_va > hole->offset && end_va - hole->offset < hole->size) {
|
||||
struct amdgpu_bo_va_hole *n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
|
||||
if (!n)
|
||||
return -ENOMEM;
|
||||
|
||||
n->size = start_va - hole->offset;
|
||||
n->offset = hole->offset;
|
||||
list_add(&n->list, &hole->list);
|
||||
|
||||
hole->size -= (end_va - hole->offset);
|
||||
hole->offset = end_va;
|
||||
} else if (start_va > hole->offset) {
|
||||
hole->size = start_va - hole->offset;
|
||||
} else if (end_va - hole->offset < hole->size) {
|
||||
hole->size -= (end_va - hole->offset);
|
||||
hole->offset = end_va;
|
||||
} else {
|
||||
list_del(&hole->list);
|
||||
free(hole);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static drm_private int
|
||||
drm_private uint64_t
|
||||
amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
|
||||
uint64_t alignment, uint64_t base_required,
|
||||
bool search_from_top, uint64_t *va_out)
|
||||
uint64_t alignment, uint64_t base_required)
|
||||
{
|
||||
struct amdgpu_bo_va_hole *hole, *n;
|
||||
uint64_t offset = 0;
|
||||
int ret;
|
||||
|
||||
uint64_t offset = 0, waste = 0;
|
||||
|
||||
alignment = MAX2(alignment, mgr->va_alignment);
|
||||
size = ALIGN(size, mgr->va_alignment);
|
||||
|
||||
if (base_required % alignment)
|
||||
return -EINVAL;
|
||||
return AMDGPU_INVALID_VA_ADDRESS;
|
||||
|
||||
pthread_mutex_lock(&mgr->bo_va_mutex);
|
||||
if (!search_from_top) {
|
||||
LIST_FOR_EACH_ENTRY_SAFE_REV(hole, n, &mgr->va_holes, list) {
|
||||
if (base_required) {
|
||||
if (hole->offset > base_required ||
|
||||
(hole->offset + hole->size) < (base_required + size))
|
||||
continue;
|
||||
offset = base_required;
|
||||
} else {
|
||||
uint64_t waste = hole->offset % alignment;
|
||||
waste = waste ? alignment - waste : 0;
|
||||
offset = hole->offset + waste;
|
||||
if (offset >= (hole->offset + hole->size) ||
|
||||
size > (hole->offset + hole->size) - offset) {
|
||||
continue;
|
||||
}
|
||||
/* TODO: using more appropriate way to track the holes */
|
||||
/* first look for a hole */
|
||||
LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
|
||||
if (base_required) {
|
||||
if(hole->offset > base_required ||
|
||||
(hole->offset + hole->size) < (base_required + size))
|
||||
continue;
|
||||
waste = base_required - hole->offset;
|
||||
offset = base_required;
|
||||
} else {
|
||||
offset = hole->offset;
|
||||
waste = offset % alignment;
|
||||
waste = waste ? alignment - waste : 0;
|
||||
offset += waste;
|
||||
if (offset >= (hole->offset + hole->size)) {
|
||||
continue;
|
||||
}
|
||||
ret = amdgpu_vamgr_subtract_hole(hole, offset, offset + size);
|
||||
pthread_mutex_unlock(&mgr->bo_va_mutex);
|
||||
*va_out = offset;
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
|
||||
if (base_required) {
|
||||
if (hole->offset > base_required ||
|
||||
(hole->offset + hole->size) < (base_required + size))
|
||||
continue;
|
||||
offset = base_required;
|
||||
} else {
|
||||
if (size > hole->size)
|
||||
continue;
|
||||
|
||||
offset = hole->offset + hole->size - size;
|
||||
offset -= offset % alignment;
|
||||
if (offset < hole->offset) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
ret = amdgpu_vamgr_subtract_hole(hole, offset, offset + size);
|
||||
if (!waste && hole->size == size) {
|
||||
offset = hole->offset;
|
||||
list_del(&hole->list);
|
||||
free(hole);
|
||||
pthread_mutex_unlock(&mgr->bo_va_mutex);
|
||||
*va_out = offset;
|
||||
return ret;
|
||||
return offset;
|
||||
}
|
||||
if ((hole->size - waste) > size) {
|
||||
if (waste) {
|
||||
n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
|
||||
n->size = waste;
|
||||
n->offset = hole->offset;
|
||||
list_add(&n->list, &hole->list);
|
||||
}
|
||||
hole->size -= (size + waste);
|
||||
hole->offset += size + waste;
|
||||
pthread_mutex_unlock(&mgr->bo_va_mutex);
|
||||
return offset;
|
||||
}
|
||||
if ((hole->size - waste) == size) {
|
||||
hole->size = waste;
|
||||
pthread_mutex_unlock(&mgr->bo_va_mutex);
|
||||
return offset;
|
||||
}
|
||||
}
|
||||
|
||||
if (base_required) {
|
||||
if (base_required < mgr->va_offset)
|
||||
return AMDGPU_INVALID_VA_ADDRESS;
|
||||
offset = mgr->va_offset;
|
||||
waste = base_required - mgr->va_offset;
|
||||
} else {
|
||||
offset = mgr->va_offset;
|
||||
waste = offset % alignment;
|
||||
waste = waste ? alignment - waste : 0;
|
||||
}
|
||||
|
||||
if (offset + waste + size > mgr->va_max) {
|
||||
pthread_mutex_unlock(&mgr->bo_va_mutex);
|
||||
return AMDGPU_INVALID_VA_ADDRESS;
|
||||
}
|
||||
|
||||
if (waste) {
|
||||
n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
|
||||
n->size = waste;
|
||||
n->offset = offset;
|
||||
list_add(&n->list, &mgr->va_holes);
|
||||
}
|
||||
|
||||
offset += waste;
|
||||
mgr->va_offset += size + waste;
|
||||
pthread_mutex_unlock(&mgr->bo_va_mutex);
|
||||
return -ENOMEM;
|
||||
return offset;
|
||||
}
|
||||
|
||||
static drm_private void
|
||||
drm_private void
|
||||
amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
|
||||
{
|
||||
struct amdgpu_bo_va_hole *hole, *next;
|
||||
struct amdgpu_bo_va_hole *hole;
|
||||
|
||||
if (va == AMDGPU_INVALID_VA_ADDRESS)
|
||||
return;
|
||||
|
|
@ -175,131 +163,116 @@ amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
|
|||
size = ALIGN(size, mgr->va_alignment);
|
||||
|
||||
pthread_mutex_lock(&mgr->bo_va_mutex);
|
||||
hole = container_of(&mgr->va_holes, hole, list);
|
||||
LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
|
||||
if (next->offset < va)
|
||||
break;
|
||||
hole = next;
|
||||
}
|
||||
|
||||
if (&hole->list != &mgr->va_holes) {
|
||||
/* Grow upper hole if it's adjacent */
|
||||
if (hole->offset == (va + size)) {
|
||||
hole->offset = va;
|
||||
hole->size += size;
|
||||
/* Merge lower hole if it's adjacent */
|
||||
if (next != hole &&
|
||||
&next->list != &mgr->va_holes &&
|
||||
(next->offset + next->size) == va) {
|
||||
next->size += hole->size;
|
||||
if ((va + size) == mgr->va_offset) {
|
||||
mgr->va_offset = va;
|
||||
/* Delete uppermost hole if it reaches the new top */
|
||||
if (!LIST_IS_EMPTY(&mgr->va_holes)) {
|
||||
hole = container_of(mgr->va_holes.next, hole, list);
|
||||
if ((hole->offset + hole->size) == va) {
|
||||
mgr->va_offset = hole->offset;
|
||||
list_del(&hole->list);
|
||||
free(hole);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
struct amdgpu_bo_va_hole *next;
|
||||
|
||||
hole = container_of(&mgr->va_holes, hole, list);
|
||||
LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
|
||||
if (next->offset < va)
|
||||
break;
|
||||
hole = next;
|
||||
}
|
||||
|
||||
if (&hole->list != &mgr->va_holes) {
|
||||
/* Grow upper hole if it's adjacent */
|
||||
if (hole->offset == (va + size)) {
|
||||
hole->offset = va;
|
||||
hole->size += size;
|
||||
/* Merge lower hole if it's adjacent */
|
||||
if (next != hole
|
||||
&& &next->list != &mgr->va_holes
|
||||
&& (next->offset + next->size) == va) {
|
||||
next->size += hole->size;
|
||||
list_del(&hole->list);
|
||||
free(hole);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* Grow lower hole if it's adjacent */
|
||||
if (next != hole && &next->list != &mgr->va_holes &&
|
||||
(next->offset + next->size) == va) {
|
||||
next->size += size;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* Grow lower hole if it's adjacent */
|
||||
if (next != hole && &next->list != &mgr->va_holes &&
|
||||
(next->offset + next->size) == va) {
|
||||
next->size += size;
|
||||
goto out;
|
||||
/* FIXME on allocation failure we just lose virtual address space
|
||||
* maybe print a warning
|
||||
*/
|
||||
next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
|
||||
if (next) {
|
||||
next->size = size;
|
||||
next->offset = va;
|
||||
list_add(&next->list, &hole->list);
|
||||
}
|
||||
}
|
||||
|
||||
/* FIXME on allocation failure we just lose virtual address space
|
||||
* maybe print a warning
|
||||
*/
|
||||
next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
|
||||
if (next) {
|
||||
next->size = size;
|
||||
next->offset = va;
|
||||
list_add(&next->list, &hole->list);
|
||||
}
|
||||
|
||||
out:
|
||||
pthread_mutex_unlock(&mgr->bo_va_mutex);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_va_range_alloc(amdgpu_device_handle dev,
|
||||
enum amdgpu_gpu_va_range va_range_type,
|
||||
uint64_t size,
|
||||
uint64_t va_base_alignment,
|
||||
uint64_t va_base_required,
|
||||
uint64_t *va_base_allocated,
|
||||
amdgpu_va_handle *va_range_handle,
|
||||
uint64_t flags)
|
||||
{
|
||||
return amdgpu_va_range_alloc2(&dev->va_mgr, va_range_type, size,
|
||||
va_base_alignment, va_base_required,
|
||||
va_base_allocated, va_range_handle,
|
||||
flags);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_va_range_alloc2(amdgpu_va_manager_handle va_mgr,
|
||||
enum amdgpu_gpu_va_range va_range_type,
|
||||
uint64_t size,
|
||||
uint64_t va_base_alignment,
|
||||
uint64_t va_base_required,
|
||||
uint64_t *va_base_allocated,
|
||||
amdgpu_va_handle *va_range_handle,
|
||||
uint64_t flags)
|
||||
int amdgpu_va_range_alloc(amdgpu_device_handle dev,
|
||||
enum amdgpu_gpu_va_range va_range_type,
|
||||
uint64_t size,
|
||||
uint64_t va_base_alignment,
|
||||
uint64_t va_base_required,
|
||||
uint64_t *va_base_allocated,
|
||||
amdgpu_va_handle *va_range_handle,
|
||||
uint64_t flags)
|
||||
{
|
||||
struct amdgpu_bo_va_mgr *vamgr;
|
||||
bool search_from_top = !!(flags & AMDGPU_VA_RANGE_REPLAYABLE);
|
||||
int ret;
|
||||
|
||||
/* Clear the flag when the high VA manager is not initialized */
|
||||
if (flags & AMDGPU_VA_RANGE_HIGH && !va_mgr->vamgr_high_32.va_max)
|
||||
flags &= ~AMDGPU_VA_RANGE_HIGH;
|
||||
|
||||
if (flags & AMDGPU_VA_RANGE_HIGH) {
|
||||
if (flags & AMDGPU_VA_RANGE_32_BIT)
|
||||
vamgr = &va_mgr->vamgr_high_32;
|
||||
else
|
||||
vamgr = &va_mgr->vamgr_high;
|
||||
} else {
|
||||
if (flags & AMDGPU_VA_RANGE_32_BIT)
|
||||
vamgr = &va_mgr->vamgr_32;
|
||||
else
|
||||
vamgr = &va_mgr->vamgr_low;
|
||||
}
|
||||
if (flags & AMDGPU_VA_RANGE_32_BIT)
|
||||
vamgr = dev->vamgr_32;
|
||||
else
|
||||
vamgr = dev->vamgr;
|
||||
|
||||
va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
|
||||
size = ALIGN(size, vamgr->va_alignment);
|
||||
|
||||
ret = amdgpu_vamgr_find_va(vamgr, size,
|
||||
va_base_alignment, va_base_required,
|
||||
search_from_top, va_base_allocated);
|
||||
*va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
|
||||
va_base_alignment, va_base_required);
|
||||
|
||||
if (!(flags & AMDGPU_VA_RANGE_32_BIT) && ret) {
|
||||
if (!(flags & AMDGPU_VA_RANGE_32_BIT) &&
|
||||
(*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) {
|
||||
/* fallback to 32bit address */
|
||||
if (flags & AMDGPU_VA_RANGE_HIGH)
|
||||
vamgr = &va_mgr->vamgr_high_32;
|
||||
else
|
||||
vamgr = &va_mgr->vamgr_32;
|
||||
ret = amdgpu_vamgr_find_va(vamgr, size,
|
||||
va_base_alignment, va_base_required,
|
||||
search_from_top, va_base_allocated);
|
||||
vamgr = dev->vamgr_32;
|
||||
*va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
|
||||
va_base_alignment, va_base_required);
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
|
||||
struct amdgpu_va* va;
|
||||
va = calloc(1, sizeof(struct amdgpu_va));
|
||||
if(!va){
|
||||
amdgpu_vamgr_free_va(vamgr, *va_base_allocated, size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
va->dev = dev;
|
||||
va->address = *va_base_allocated;
|
||||
va->size = size;
|
||||
va->range = va_range_type;
|
||||
va->vamgr = vamgr;
|
||||
*va_range_handle = va;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_public int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
|
||||
int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
|
||||
{
|
||||
if(!va_range_handle || !va_range_handle->address)
|
||||
return 0;
|
||||
|
|
@ -310,97 +283,3 @@ drm_public int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
|
|||
free(va_range_handle);
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_public uint64_t amdgpu_va_get_start_addr(amdgpu_va_handle va_handle)
|
||||
{
|
||||
return va_handle->address;
|
||||
}
|
||||
|
||||
drm_public amdgpu_va_manager_handle amdgpu_va_manager_alloc(void)
|
||||
{
|
||||
amdgpu_va_manager_handle r = calloc(1, sizeof(struct amdgpu_va_manager));
|
||||
return r;
|
||||
}
|
||||
|
||||
drm_public void amdgpu_va_manager_init(struct amdgpu_va_manager *va_mgr,
|
||||
uint64_t low_va_offset, uint64_t low_va_max,
|
||||
uint64_t high_va_offset, uint64_t high_va_max,
|
||||
uint32_t virtual_address_alignment)
|
||||
{
|
||||
amdgpu_va_manager_init2(va_mgr, low_va_offset, low_va_max,
|
||||
high_va_offset, high_va_max,
|
||||
virtual_address_alignment, 0);
|
||||
}
|
||||
|
||||
drm_public void amdgpu_va_manager_init2(struct amdgpu_va_manager *va_mgr,
|
||||
uint64_t low_va_offset, uint64_t low_va_max,
|
||||
uint64_t high_va_offset, uint64_t high_va_max,
|
||||
uint32_t virtual_address_alignment,
|
||||
uint32_t flags)
|
||||
{
|
||||
uint64_t start, max;
|
||||
|
||||
va_mgr->address_prt_wa_control_bit = ~0;
|
||||
|
||||
start = low_va_offset;
|
||||
max = MIN2(low_va_max, 0x100000000ULL);
|
||||
amdgpu_vamgr_init(&va_mgr->vamgr_32, start, max,
|
||||
virtual_address_alignment);
|
||||
|
||||
start = max;
|
||||
if ((flags & AMDGPU_VA_MGR_RESERVE_HALF_VA_FOR_PRT) && !high_va_max) {
|
||||
/* Reserve the half VA range for PRT by splitting it in two
|
||||
* equal halves where one bit controls whether it's the LOW or
|
||||
* HIGH half.
|
||||
*/
|
||||
va_mgr->address_prt_wa_control_bit = util_last_bit64(low_va_offset ^ low_va_max) - 1;
|
||||
max = low_va_max ^ (1ull << va_mgr->address_prt_wa_control_bit);
|
||||
} else {
|
||||
max = MAX2(low_va_max, 0x100000000ULL);
|
||||
}
|
||||
|
||||
amdgpu_vamgr_init(&va_mgr->vamgr_low, start, max,
|
||||
virtual_address_alignment);
|
||||
|
||||
start = high_va_offset;
|
||||
max = MIN2(high_va_max, (start & ~0xffffffffULL) + 0x100000000ULL);
|
||||
amdgpu_vamgr_init(&va_mgr->vamgr_high_32, start, max,
|
||||
virtual_address_alignment);
|
||||
|
||||
start = max;
|
||||
if ((flags & AMDGPU_VA_MGR_RESERVE_HALF_VA_FOR_PRT) && high_va_max) {
|
||||
/* Reserve the half VA range for PRT by splitting it in two
|
||||
* equal halves where one bit controls whether it's the LOW or
|
||||
* HIGH half.
|
||||
*/
|
||||
va_mgr->address_prt_wa_control_bit = util_last_bit64(high_va_offset ^ high_va_max) - 1;
|
||||
max = high_va_max ^ (1ull << va_mgr->address_prt_wa_control_bit);
|
||||
} else {
|
||||
max = MAX2(high_va_max, (start & ~0xffffffffULL) + 0x100000000ULL);
|
||||
}
|
||||
|
||||
amdgpu_vamgr_init(&va_mgr->vamgr_high, start, max,
|
||||
virtual_address_alignment);
|
||||
}
|
||||
|
||||
drm_public void amdgpu_va_manager_deinit(struct amdgpu_va_manager *va_mgr)
|
||||
{
|
||||
amdgpu_vamgr_deinit(&va_mgr->vamgr_32);
|
||||
amdgpu_vamgr_deinit(&va_mgr->vamgr_low);
|
||||
amdgpu_vamgr_deinit(&va_mgr->vamgr_high_32);
|
||||
amdgpu_vamgr_deinit(&va_mgr->vamgr_high);
|
||||
}
|
||||
|
||||
drm_public int amdgpu_va_manager_query_sw_info(struct amdgpu_va_manager *va_mgr,
|
||||
enum amdgpu_va_manager_sw_info info,
|
||||
void *value)
|
||||
{
|
||||
uint32_t *val32 = (uint32_t*)value;
|
||||
|
||||
switch (info) {
|
||||
case amdgpu_va_manager_sw_info_address_prt_wa_control_bit:
|
||||
*val32 = va_mgr->address_prt_wa_control_bit;
|
||||
return 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,50 +0,0 @@
|
|||
/*
|
||||
* Copyright 2017 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_drm.h"
|
||||
#include "xf86drm.h"
|
||||
#include "amdgpu_internal.h"
|
||||
|
||||
drm_public int amdgpu_vm_reserve_vmid(amdgpu_device_handle dev, uint32_t flags)
|
||||
{
|
||||
union drm_amdgpu_vm vm;
|
||||
|
||||
vm.in.op = AMDGPU_VM_OP_RESERVE_VMID;
|
||||
vm.in.flags = flags;
|
||||
|
||||
return drmCommandWriteRead(dev->fd, DRM_AMDGPU_VM,
|
||||
&vm, sizeof(vm));
|
||||
}
|
||||
|
||||
drm_public int amdgpu_vm_unreserve_vmid(amdgpu_device_handle dev,
|
||||
uint32_t flags)
|
||||
{
|
||||
union drm_amdgpu_vm vm;
|
||||
|
||||
vm.in.op = AMDGPU_VM_OP_UNRESERVE_VMID;
|
||||
vm.in.flags = flags;
|
||||
|
||||
return drmCommandWriteRead(dev->fd, DRM_AMDGPU_VM,
|
||||
&vm, sizeof(vm));
|
||||
}
|
||||
|
|
@ -1,72 +0,0 @@
|
|||
/*
|
||||
* Copyright 2018 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
#include "handle_table.h"
|
||||
#include "util_math.h"
|
||||
|
||||
drm_private int handle_table_insert(struct handle_table *table, uint32_t key,
|
||||
void *value)
|
||||
{
|
||||
if (key >= table->max_key) {
|
||||
uint32_t alignment = sysconf(_SC_PAGESIZE) / sizeof(void*);
|
||||
uint32_t max_key = ALIGN(key + 1, alignment);
|
||||
void **values;
|
||||
|
||||
values = realloc(table->values, max_key * sizeof(void *));
|
||||
if (!values)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(values + table->max_key, 0, (max_key - table->max_key) *
|
||||
sizeof(void *));
|
||||
|
||||
table->max_key = max_key;
|
||||
table->values = values;
|
||||
}
|
||||
table->values[key] = value;
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_private void handle_table_remove(struct handle_table *table, uint32_t key)
|
||||
{
|
||||
if (key < table->max_key)
|
||||
table->values[key] = NULL;
|
||||
}
|
||||
|
||||
drm_private void *handle_table_lookup(struct handle_table *table, uint32_t key)
|
||||
{
|
||||
if (key < table->max_key)
|
||||
return table->values[key];
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
drm_private void handle_table_fini(struct handle_table *table)
|
||||
{
|
||||
free(table->values);
|
||||
table->max_key = 0;
|
||||
table->values = NULL;
|
||||
}
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
/*
|
||||
* Copyright 2018 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _HANDLE_TABLE_H_
|
||||
#define _HANDLE_TABLE_H_
|
||||
|
||||
#include <stdint.h>
|
||||
#include "libdrm_macros.h"
|
||||
|
||||
struct handle_table {
|
||||
uint32_t max_key;
|
||||
void **values;
|
||||
};
|
||||
|
||||
drm_private int handle_table_insert(struct handle_table *table, uint32_t key,
|
||||
void *value);
|
||||
drm_private void handle_table_remove(struct handle_table *table, uint32_t key);
|
||||
drm_private void *handle_table_lookup(struct handle_table *table, uint32_t key);
|
||||
drm_private void handle_table_fini(struct handle_table *table);
|
||||
|
||||
#endif /* _HANDLE_TABLE_H_ */
|
||||
|
|
@ -8,4 +8,3 @@ Description: Userspace interface to kernel DRM services for amdgpu
|
|||
Version: @PACKAGE_VERSION@
|
||||
Libs: -L${libdir} -ldrm_amdgpu
|
||||
Cflags: -I${includedir} -I${includedir}/libdrm
|
||||
Requires.private: libdrm
|
||||
|
|
|
|||
|
|
@ -1,69 +0,0 @@
|
|||
# Copyright © 2017-2018 Intel Corporation
|
||||
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
|
||||
datadir_amdgpu = join_paths(get_option('prefix'), get_option('datadir'), 'libdrm')
|
||||
|
||||
libdrm_amdgpu = library(
|
||||
'drm_amdgpu',
|
||||
[
|
||||
files(
|
||||
'amdgpu_asic_id.c', 'amdgpu_bo.c', 'amdgpu_cs.c', 'amdgpu_device.c',
|
||||
'amdgpu_gpu_info.c', 'amdgpu_vamgr.c', 'amdgpu_vm.c', 'handle_table.c',
|
||||
'amdgpu_userq.c',
|
||||
),
|
||||
config_file,
|
||||
],
|
||||
c_args : [
|
||||
libdrm_c_args,
|
||||
'-DAMDGPU_ASIC_ID_TABLE="@0@"'.format(join_paths(datadir_amdgpu, 'amdgpu.ids')),
|
||||
],
|
||||
include_directories : [inc_root, inc_drm],
|
||||
link_with : libdrm,
|
||||
dependencies : [dep_threads, dep_atomic_ops, dep_rt],
|
||||
version : '1.@0@.0'.format(patch_ver),
|
||||
install : true,
|
||||
)
|
||||
|
||||
install_headers('amdgpu.h', subdir : 'libdrm')
|
||||
|
||||
pkg.generate(
|
||||
libdrm_amdgpu,
|
||||
name : 'libdrm_amdgpu',
|
||||
subdirs : ['.', 'libdrm'],
|
||||
description : 'Userspace interface to kernel DRM services for amdgpu',
|
||||
)
|
||||
|
||||
ext_libdrm_amdgpu = declare_dependency(
|
||||
link_with : [libdrm, libdrm_amdgpu],
|
||||
include_directories : [inc_drm, include_directories('.')],
|
||||
)
|
||||
|
||||
meson.override_dependency('libdrm_amdgpu', ext_libdrm_amdgpu)
|
||||
|
||||
test(
|
||||
'amdgpu-symbols-check',
|
||||
symbols_check,
|
||||
args : [
|
||||
'--lib', libdrm_amdgpu,
|
||||
'--symbols-file', files('amdgpu-symbols.txt'),
|
||||
'--nm', prog_nm.full_path(),
|
||||
],
|
||||
)
|
||||
387
amdgpu/util_hash.c
Normal file
387
amdgpu/util_hash.c
Normal file
|
|
@ -0,0 +1,387 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2007 VMware, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
|
||||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/*
|
||||
* Authors:
|
||||
* Zack Rusin <zackr@vmware.com>
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include "util_hash.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
|
||||
#define MAX(a, b) ((a > b) ? (a) : (b))
|
||||
|
||||
static const int MinNumBits = 4;
|
||||
|
||||
static const unsigned char prime_deltas[] = {
|
||||
0, 0, 1, 3, 1, 5, 3, 3, 1, 9, 7, 5, 3, 9, 25, 3,
|
||||
1, 21, 3, 21, 7, 15, 9, 5, 3, 29, 15, 0, 0, 0, 0, 0
|
||||
};
|
||||
|
||||
static int primeForNumBits(int numBits)
|
||||
{
|
||||
return (1 << numBits) + prime_deltas[numBits];
|
||||
}
|
||||
|
||||
/* Returns the smallest integer n such that
|
||||
primeForNumBits(n) >= hint.
|
||||
*/
|
||||
static int countBits(int hint)
|
||||
{
|
||||
int numBits = 0;
|
||||
int bits = hint;
|
||||
|
||||
while (bits > 1) {
|
||||
bits >>= 1;
|
||||
numBits++;
|
||||
}
|
||||
|
||||
if (numBits >= (int)sizeof(prime_deltas)) {
|
||||
numBits = sizeof(prime_deltas) - 1;
|
||||
} else if (primeForNumBits(numBits) < hint) {
|
||||
++numBits;
|
||||
}
|
||||
return numBits;
|
||||
}
|
||||
|
||||
struct util_node {
|
||||
struct util_node *next;
|
||||
unsigned key;
|
||||
void *value;
|
||||
};
|
||||
|
||||
struct util_hash_data {
|
||||
struct util_node *fakeNext;
|
||||
struct util_node **buckets;
|
||||
int size;
|
||||
int nodeSize;
|
||||
short userNumBits;
|
||||
short numBits;
|
||||
int numBuckets;
|
||||
};
|
||||
|
||||
struct util_hash {
|
||||
union {
|
||||
struct util_hash_data *d;
|
||||
struct util_node *e;
|
||||
} data;
|
||||
};
|
||||
|
||||
static void *util_data_allocate_node(struct util_hash_data *hash)
|
||||
{
|
||||
return malloc(hash->nodeSize);
|
||||
}
|
||||
|
||||
static void util_free_node(struct util_node *node)
|
||||
{
|
||||
free(node);
|
||||
}
|
||||
|
||||
static struct util_node *
|
||||
util_hash_create_node(struct util_hash *hash,
|
||||
unsigned akey, void *avalue,
|
||||
struct util_node **anextNode)
|
||||
{
|
||||
struct util_node *node = util_data_allocate_node(hash->data.d);
|
||||
|
||||
if (!node)
|
||||
return NULL;
|
||||
|
||||
node->key = akey;
|
||||
node->value = avalue;
|
||||
|
||||
node->next = (struct util_node*)(*anextNode);
|
||||
*anextNode = node;
|
||||
++hash->data.d->size;
|
||||
return node;
|
||||
}
|
||||
|
||||
static void util_data_rehash(struct util_hash_data *hash, int hint)
|
||||
{
|
||||
if (hint < 0) {
|
||||
hint = countBits(-hint);
|
||||
if (hint < MinNumBits)
|
||||
hint = MinNumBits;
|
||||
hash->userNumBits = (short)hint;
|
||||
while (primeForNumBits(hint) < (hash->size >> 1))
|
||||
++hint;
|
||||
} else if (hint < MinNumBits) {
|
||||
hint = MinNumBits;
|
||||
}
|
||||
|
||||
if (hash->numBits != hint) {
|
||||
struct util_node *e = (struct util_node *)(hash);
|
||||
struct util_node **oldBuckets = hash->buckets;
|
||||
int oldNumBuckets = hash->numBuckets;
|
||||
int i = 0;
|
||||
|
||||
hash->numBits = (short)hint;
|
||||
hash->numBuckets = primeForNumBits(hint);
|
||||
hash->buckets = malloc(sizeof(struct util_node*) * hash->numBuckets);
|
||||
for (i = 0; i < hash->numBuckets; ++i)
|
||||
hash->buckets[i] = e;
|
||||
|
||||
for (i = 0; i < oldNumBuckets; ++i) {
|
||||
struct util_node *firstNode = oldBuckets[i];
|
||||
while (firstNode != e) {
|
||||
unsigned h = firstNode->key;
|
||||
struct util_node *lastNode = firstNode;
|
||||
struct util_node *afterLastNode;
|
||||
struct util_node **beforeFirstNode;
|
||||
|
||||
while (lastNode->next != e && lastNode->next->key == h)
|
||||
lastNode = lastNode->next;
|
||||
|
||||
afterLastNode = lastNode->next;
|
||||
beforeFirstNode = &hash->buckets[h % hash->numBuckets];
|
||||
while (*beforeFirstNode != e)
|
||||
beforeFirstNode = &(*beforeFirstNode)->next;
|
||||
lastNode->next = *beforeFirstNode;
|
||||
*beforeFirstNode = firstNode;
|
||||
firstNode = afterLastNode;
|
||||
}
|
||||
}
|
||||
free(oldBuckets);
|
||||
}
|
||||
}
|
||||
|
||||
static void util_data_might_grow(struct util_hash_data *hash)
|
||||
{
|
||||
if (hash->size >= hash->numBuckets)
|
||||
util_data_rehash(hash, hash->numBits + 1);
|
||||
}
|
||||
|
||||
static void util_data_has_shrunk(struct util_hash_data *hash)
|
||||
{
|
||||
if (hash->size <= (hash->numBuckets >> 3) &&
|
||||
hash->numBits > hash->userNumBits) {
|
||||
int max = MAX(hash->numBits-2, hash->userNumBits);
|
||||
util_data_rehash(hash, max);
|
||||
}
|
||||
}
|
||||
|
||||
static struct util_node *util_data_first_node(struct util_hash_data *hash)
|
||||
{
|
||||
struct util_node *e = (struct util_node *)(hash);
|
||||
struct util_node **bucket = hash->buckets;
|
||||
int n = hash->numBuckets;
|
||||
while (n--) {
|
||||
if (*bucket != e)
|
||||
return *bucket;
|
||||
++bucket;
|
||||
}
|
||||
return e;
|
||||
}
|
||||
|
||||
static struct util_node **util_hash_find_node(struct util_hash *hash, unsigned akey)
|
||||
{
|
||||
struct util_node **node;
|
||||
|
||||
if (hash->data.d->numBuckets) {
|
||||
node = (struct util_node **)(&hash->data.d->buckets[akey % hash->data.d->numBuckets]);
|
||||
assert(*node == hash->data.e || (*node)->next);
|
||||
while (*node != hash->data.e && (*node)->key != akey)
|
||||
node = &(*node)->next;
|
||||
} else {
|
||||
node = (struct util_node **)((const struct util_node * const *)(&hash->data.e));
|
||||
}
|
||||
return node;
|
||||
}
|
||||
|
||||
drm_private struct util_hash_iter
|
||||
util_hash_insert(struct util_hash *hash, unsigned key, void *data)
|
||||
{
|
||||
util_data_might_grow(hash->data.d);
|
||||
|
||||
{
|
||||
struct util_node **nextNode = util_hash_find_node(hash, key);
|
||||
struct util_node *node = util_hash_create_node(hash, key, data, nextNode);
|
||||
if (!node) {
|
||||
struct util_hash_iter null_iter = {hash, 0};
|
||||
return null_iter;
|
||||
}
|
||||
|
||||
{
|
||||
struct util_hash_iter iter = {hash, node};
|
||||
return iter;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
drm_private struct util_hash *util_hash_create(void)
|
||||
{
|
||||
struct util_hash *hash = malloc(sizeof(struct util_hash));
|
||||
if (!hash)
|
||||
return NULL;
|
||||
|
||||
hash->data.d = malloc(sizeof(struct util_hash_data));
|
||||
if (!hash->data.d) {
|
||||
free(hash);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
hash->data.d->fakeNext = 0;
|
||||
hash->data.d->buckets = 0;
|
||||
hash->data.d->size = 0;
|
||||
hash->data.d->nodeSize = sizeof(struct util_node);
|
||||
hash->data.d->userNumBits = (short)MinNumBits;
|
||||
hash->data.d->numBits = 0;
|
||||
hash->data.d->numBuckets = 0;
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
drm_private void util_hash_delete(struct util_hash *hash)
|
||||
{
|
||||
struct util_node *e_for_x = (struct util_node *)(hash->data.d);
|
||||
struct util_node **bucket = (struct util_node **)(hash->data.d->buckets);
|
||||
int n = hash->data.d->numBuckets;
|
||||
while (n--) {
|
||||
struct util_node *cur = *bucket++;
|
||||
while (cur != e_for_x) {
|
||||
struct util_node *next = cur->next;
|
||||
util_free_node(cur);
|
||||
cur = next;
|
||||
}
|
||||
}
|
||||
free(hash->data.d->buckets);
|
||||
free(hash->data.d);
|
||||
free(hash);
|
||||
}
|
||||
|
||||
drm_private struct util_hash_iter
|
||||
util_hash_find(struct util_hash *hash, unsigned key)
|
||||
{
|
||||
struct util_node **nextNode = util_hash_find_node(hash, key);
|
||||
struct util_hash_iter iter = {hash, *nextNode};
|
||||
return iter;
|
||||
}
|
||||
|
||||
drm_private unsigned util_hash_iter_key(struct util_hash_iter iter)
|
||||
{
|
||||
if (!iter.node || iter.hash->data.e == iter.node)
|
||||
return 0;
|
||||
return iter.node->key;
|
||||
}
|
||||
|
||||
drm_private void *util_hash_iter_data(struct util_hash_iter iter)
|
||||
{
|
||||
if (!iter.node || iter.hash->data.e == iter.node)
|
||||
return 0;
|
||||
return iter.node->value;
|
||||
}
|
||||
|
||||
static struct util_node *util_hash_data_next(struct util_node *node)
|
||||
{
|
||||
union {
|
||||
struct util_node *next;
|
||||
struct util_node *e;
|
||||
struct util_hash_data *d;
|
||||
} a;
|
||||
int start;
|
||||
struct util_node **bucket;
|
||||
int n;
|
||||
|
||||
a.next = node->next;
|
||||
if (!a.next) {
|
||||
/* iterating beyond the last element */
|
||||
return 0;
|
||||
}
|
||||
if (a.next->next)
|
||||
return a.next;
|
||||
|
||||
start = (node->key % a.d->numBuckets) + 1;
|
||||
bucket = a.d->buckets + start;
|
||||
n = a.d->numBuckets - start;
|
||||
while (n--) {
|
||||
if (*bucket != a.e)
|
||||
return *bucket;
|
||||
++bucket;
|
||||
}
|
||||
return a.e;
|
||||
}
|
||||
|
||||
drm_private struct util_hash_iter
|
||||
util_hash_iter_next(struct util_hash_iter iter)
|
||||
{
|
||||
struct util_hash_iter next = {iter.hash, util_hash_data_next(iter.node)};
|
||||
return next;
|
||||
}
|
||||
|
||||
drm_private int util_hash_iter_is_null(struct util_hash_iter iter)
|
||||
{
|
||||
if (!iter.node || iter.node == iter.hash->data.e)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_private void *util_hash_take(struct util_hash *hash, unsigned akey)
|
||||
{
|
||||
struct util_node **node = util_hash_find_node(hash, akey);
|
||||
if (*node != hash->data.e) {
|
||||
void *t = (*node)->value;
|
||||
struct util_node *next = (*node)->next;
|
||||
util_free_node(*node);
|
||||
*node = next;
|
||||
--hash->data.d->size;
|
||||
util_data_has_shrunk(hash->data.d);
|
||||
return t;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_private struct util_hash_iter util_hash_first_node(struct util_hash *hash)
|
||||
{
|
||||
struct util_hash_iter iter = {hash, util_data_first_node(hash->data.d)};
|
||||
return iter;
|
||||
}
|
||||
|
||||
drm_private struct util_hash_iter
|
||||
util_hash_erase(struct util_hash *hash, struct util_hash_iter iter)
|
||||
{
|
||||
struct util_hash_iter ret = iter;
|
||||
struct util_node *node = iter.node;
|
||||
struct util_node **node_ptr;
|
||||
|
||||
if (node == hash->data.e)
|
||||
return iter;
|
||||
|
||||
ret = util_hash_iter_next(ret);
|
||||
node_ptr = (struct util_node**)(&hash->data.d->buckets[node->key % hash->data.d->numBuckets]);
|
||||
while (*node_ptr != node)
|
||||
node_ptr = &(*node_ptr)->next;
|
||||
*node_ptr = node->next;
|
||||
util_free_node(node);
|
||||
--hash->data.d->size;
|
||||
return ret;
|
||||
}
|
||||
107
amdgpu/util_hash.h
Normal file
107
amdgpu/util_hash.h
Normal file
|
|
@ -0,0 +1,107 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2007 VMware, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
|
||||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Hash implementation.
|
||||
*
|
||||
* This file provides a hash implementation that is capable of dealing
|
||||
* with collisions. It stores colliding entries in linked list. All
|
||||
* functions operating on the hash return an iterator. The iterator
|
||||
* itself points to the collision list. If there wasn't any collision
|
||||
* the list will have just one entry, otherwise client code should
|
||||
* iterate over the entries to find the exact entry among ones that
|
||||
* had the same key (e.g. memcmp could be used on the data to check
|
||||
* that)
|
||||
*
|
||||
* @author Zack Rusin <zackr@vmware.com>
|
||||
*/
|
||||
|
||||
#ifndef UTIL_HASH_H
|
||||
#define UTIL_HASH_H
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <stdbool.h>
|
||||
|
||||
#include "libdrm_macros.h"
|
||||
|
||||
struct util_hash;
|
||||
struct util_node;
|
||||
|
||||
struct util_hash_iter {
|
||||
struct util_hash *hash;
|
||||
struct util_node *node;
|
||||
};
|
||||
|
||||
|
||||
drm_private struct util_hash *util_hash_create(void);
|
||||
drm_private void util_hash_delete(struct util_hash *hash);
|
||||
|
||||
|
||||
/**
|
||||
* Adds a data with the given key to the hash. If entry with the given
|
||||
* key is already in the hash, this current entry is instered before it
|
||||
* in the collision list.
|
||||
* Function returns iterator pointing to the inserted item in the hash.
|
||||
*/
|
||||
drm_private struct util_hash_iter
|
||||
util_hash_insert(struct util_hash *hash, unsigned key, void *data);
|
||||
|
||||
/**
|
||||
* Removes the item pointed to by the current iterator from the hash.
|
||||
* Note that the data itself is not erased and if it was a malloc'ed pointer
|
||||
* it will have to be freed after calling this function by the callee.
|
||||
* Function returns iterator pointing to the item after the removed one in
|
||||
* the hash.
|
||||
*/
|
||||
drm_private struct util_hash_iter
|
||||
util_hash_erase(struct util_hash *hash, struct util_hash_iter iter);
|
||||
|
||||
drm_private void *util_hash_take(struct util_hash *hash, unsigned key);
|
||||
|
||||
|
||||
drm_private struct util_hash_iter util_hash_first_node(struct util_hash *hash);
|
||||
|
||||
/**
|
||||
* Return an iterator pointing to the first entry in the collision list.
|
||||
*/
|
||||
drm_private struct util_hash_iter
|
||||
util_hash_find(struct util_hash *hash, unsigned key);
|
||||
|
||||
|
||||
drm_private int util_hash_iter_is_null(struct util_hash_iter iter);
|
||||
drm_private unsigned util_hash_iter_key(struct util_hash_iter iter);
|
||||
drm_private void *util_hash_iter_data(struct util_hash_iter iter);
|
||||
|
||||
|
||||
drm_private struct util_hash_iter
|
||||
util_hash_iter_next(struct util_hash_iter iter);
|
||||
|
||||
#endif
|
||||
262
amdgpu/util_hash_table.c
Normal file
262
amdgpu/util_hash_table.c
Normal file
|
|
@ -0,0 +1,262 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2008 VMware, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
|
||||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* General purpose hash table implementation.
|
||||
*
|
||||
* Just uses the util_hash for now, but it might be better switch to a linear
|
||||
* probing hash table implementation at some point -- as it is said they have
|
||||
* better lookup and cache performance and it appears to be possible to write
|
||||
* a lock-free implementation of such hash tables .
|
||||
*
|
||||
* @author José Fonseca <jfonseca@vmware.com>
|
||||
*/
|
||||
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include "util_hash_table.h"
|
||||
#include "util_hash.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
|
||||
struct util_hash_table
|
||||
{
|
||||
struct util_hash *head;
|
||||
|
||||
/** Hash function */
|
||||
unsigned (*make_hash)(void *key);
|
||||
|
||||
/** Compare two keys */
|
||||
int (*compare)(void *key1, void *key2);
|
||||
};
|
||||
|
||||
struct util_hash_table_item
|
||||
{
|
||||
void *key;
|
||||
void *value;
|
||||
};
|
||||
|
||||
|
||||
static struct util_hash_table_item *
|
||||
util_hash_table_item(struct util_hash_iter iter)
|
||||
{
|
||||
return (struct util_hash_table_item *)util_hash_iter_data(iter);
|
||||
}
|
||||
|
||||
drm_private struct util_hash_table *
|
||||
util_hash_table_create(unsigned (*hash)(void *key),
|
||||
int (*compare)(void *key1, void *key2))
|
||||
{
|
||||
struct util_hash_table *ht;
|
||||
|
||||
ht = malloc(sizeof(struct util_hash_table));
|
||||
if(!ht)
|
||||
return NULL;
|
||||
|
||||
ht->head = util_hash_create();
|
||||
if(!ht->head) {
|
||||
free(ht);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ht->make_hash = hash;
|
||||
ht->compare = compare;
|
||||
|
||||
return ht;
|
||||
}
|
||||
|
||||
static struct util_hash_iter
|
||||
util_hash_table_find_iter(struct util_hash_table *ht,
|
||||
void *key, unsigned key_hash)
|
||||
{
|
||||
struct util_hash_iter iter;
|
||||
struct util_hash_table_item *item;
|
||||
|
||||
iter = util_hash_find(ht->head, key_hash);
|
||||
while (!util_hash_iter_is_null(iter)) {
|
||||
item = (struct util_hash_table_item *)util_hash_iter_data(iter);
|
||||
if (!ht->compare(item->key, key))
|
||||
break;
|
||||
iter = util_hash_iter_next(iter);
|
||||
}
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
||||
static struct util_hash_table_item *
|
||||
util_hash_table_find_item(struct util_hash_table *ht,
|
||||
void *key, unsigned key_hash)
|
||||
{
|
||||
struct util_hash_iter iter;
|
||||
struct util_hash_table_item *item;
|
||||
|
||||
iter = util_hash_find(ht->head, key_hash);
|
||||
while (!util_hash_iter_is_null(iter)) {
|
||||
item = (struct util_hash_table_item *)util_hash_iter_data(iter);
|
||||
if (!ht->compare(item->key, key))
|
||||
return item;
|
||||
iter = util_hash_iter_next(iter);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
drm_private void
|
||||
util_hash_table_set(struct util_hash_table *ht, void *key, void *value)
|
||||
{
|
||||
unsigned key_hash;
|
||||
struct util_hash_table_item *item;
|
||||
struct util_hash_iter iter;
|
||||
|
||||
assert(ht);
|
||||
if (!ht)
|
||||
return;
|
||||
|
||||
key_hash = ht->make_hash(key);
|
||||
|
||||
item = util_hash_table_find_item(ht, key, key_hash);
|
||||
if(item) {
|
||||
/* TODO: key/value destruction? */
|
||||
item->value = value;
|
||||
return;
|
||||
}
|
||||
|
||||
item = malloc(sizeof(struct util_hash_table_item));
|
||||
if(!item)
|
||||
return;
|
||||
|
||||
item->key = key;
|
||||
item->value = value;
|
||||
|
||||
iter = util_hash_insert(ht->head, key_hash, item);
|
||||
if(util_hash_iter_is_null(iter)) {
|
||||
free(item);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
drm_private void *util_hash_table_get(struct util_hash_table *ht, void *key)
|
||||
{
|
||||
unsigned key_hash;
|
||||
struct util_hash_table_item *item;
|
||||
|
||||
assert(ht);
|
||||
if (!ht)
|
||||
return NULL;
|
||||
|
||||
key_hash = ht->make_hash(key);
|
||||
|
||||
item = util_hash_table_find_item(ht, key, key_hash);
|
||||
if(!item)
|
||||
return NULL;
|
||||
|
||||
return item->value;
|
||||
}
|
||||
|
||||
drm_private void util_hash_table_remove(struct util_hash_table *ht, void *key)
|
||||
{
|
||||
unsigned key_hash;
|
||||
struct util_hash_iter iter;
|
||||
struct util_hash_table_item *item;
|
||||
|
||||
assert(ht);
|
||||
if (!ht)
|
||||
return;
|
||||
|
||||
key_hash = ht->make_hash(key);
|
||||
|
||||
iter = util_hash_table_find_iter(ht, key, key_hash);
|
||||
if(util_hash_iter_is_null(iter))
|
||||
return;
|
||||
|
||||
item = util_hash_table_item(iter);
|
||||
assert(item);
|
||||
free(item);
|
||||
|
||||
util_hash_erase(ht->head, iter);
|
||||
}
|
||||
|
||||
drm_private void util_hash_table_clear(struct util_hash_table *ht)
|
||||
{
|
||||
struct util_hash_iter iter;
|
||||
struct util_hash_table_item *item;
|
||||
|
||||
assert(ht);
|
||||
if (!ht)
|
||||
return;
|
||||
|
||||
iter = util_hash_first_node(ht->head);
|
||||
while (!util_hash_iter_is_null(iter)) {
|
||||
item = (struct util_hash_table_item *)util_hash_take(ht->head, util_hash_iter_key(iter));
|
||||
free(item);
|
||||
iter = util_hash_first_node(ht->head);
|
||||
}
|
||||
}
|
||||
|
||||
drm_private void util_hash_table_foreach(struct util_hash_table *ht,
|
||||
void (*callback)(void *key, void *value, void *data),
|
||||
void *data)
|
||||
{
|
||||
struct util_hash_iter iter;
|
||||
struct util_hash_table_item *item;
|
||||
|
||||
assert(ht);
|
||||
if (!ht)
|
||||
return;
|
||||
|
||||
iter = util_hash_first_node(ht->head);
|
||||
while (!util_hash_iter_is_null(iter)) {
|
||||
item = (struct util_hash_table_item *)util_hash_iter_data(iter);
|
||||
callback(item->key, item->value, data);
|
||||
iter = util_hash_iter_next(iter);
|
||||
}
|
||||
}
|
||||
|
||||
drm_private void util_hash_table_destroy(struct util_hash_table *ht)
|
||||
{
|
||||
struct util_hash_iter iter;
|
||||
struct util_hash_table_item *item;
|
||||
|
||||
assert(ht);
|
||||
if (!ht)
|
||||
return;
|
||||
|
||||
iter = util_hash_first_node(ht->head);
|
||||
while (!util_hash_iter_is_null(iter)) {
|
||||
item = (struct util_hash_table_item *)util_hash_iter_data(iter);
|
||||
free(item);
|
||||
iter = util_hash_iter_next(iter);
|
||||
}
|
||||
|
||||
util_hash_delete(ht->head);
|
||||
free(ht);
|
||||
}
|
||||
73
amdgpu/util_hash_table.h
Normal file
73
amdgpu/util_hash_table.h
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2008 VMware, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
|
||||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/**
|
||||
* General purpose hash table.
|
||||
*
|
||||
* @author José Fonseca <jfonseca@vmware.com>
|
||||
*/
|
||||
|
||||
#ifndef U_HASH_TABLE_H_
|
||||
#define U_HASH_TABLE_H_
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include "libdrm_macros.h"
|
||||
|
||||
/**
|
||||
* Generic purpose hash table.
|
||||
*/
|
||||
struct util_hash_table;
|
||||
|
||||
/**
|
||||
* Create an hash table.
|
||||
*
|
||||
* @param hash hash function
|
||||
* @param compare should return 0 for two equal keys.
|
||||
*/
|
||||
drm_private struct util_hash_table *
|
||||
util_hash_table_create(unsigned (*hash)(void *key),
|
||||
int (*compare)(void *key1, void *key2));
|
||||
|
||||
drm_private void
|
||||
util_hash_table_set(struct util_hash_table *ht, void *key, void *value);
|
||||
|
||||
drm_private void *util_hash_table_get(struct util_hash_table *ht, void *key);
|
||||
|
||||
drm_private void util_hash_table_remove(struct util_hash_table *ht, void *key);
|
||||
|
||||
drm_private void util_hash_table_clear(struct util_hash_table *ht);
|
||||
|
||||
drm_private void util_hash_table_foreach(struct util_hash_table *ht,
|
||||
void (*callback)(void *key, void *value, void *data),
|
||||
void *data);
|
||||
|
||||
drm_private void util_hash_table_destroy(struct util_hash_table *ht);
|
||||
|
||||
#endif /* U_HASH_TABLE_H_ */
|
||||
|
|
@ -1,111 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2010-2011 Chia-I Wu <olvaffe@gmail.com>
|
||||
* Copyright (C) 2010-2011 LunarG Inc.
|
||||
* Copyright (C) 2016 Linaro, Ltd., Rob Herring <robh@kernel.org>
|
||||
* Copyright (C) 2018 Collabora, Robert Foss <robert.foss@collabora.com>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __ANDROID_GRALLOC_HANDLE_H__
|
||||
#define __ANDROID_GRALLOC_HANDLE_H__
|
||||
|
||||
#include <cutils/native_handle.h>
|
||||
#include <stdint.h>
|
||||
|
||||
/* support users of drm_gralloc/gbm_gralloc */
|
||||
#define gralloc_gbm_handle_t gralloc_handle_t
|
||||
#define gralloc_drm_handle_t gralloc_handle_t
|
||||
|
||||
struct gralloc_handle_t {
|
||||
native_handle_t base;
|
||||
|
||||
/* dma-buf file descriptor
|
||||
* Must be located first since, native_handle_t is allocated
|
||||
* using native_handle_create(), which allocates space for
|
||||
* sizeof(native_handle_t) + sizeof(int) * (numFds + numInts)
|
||||
* numFds = GRALLOC_HANDLE_NUM_FDS
|
||||
* numInts = GRALLOC_HANDLE_NUM_INTS
|
||||
* Where numFds represents the number of FDs and
|
||||
* numInts represents the space needed for the
|
||||
* remainder of this struct.
|
||||
* And the FDs are expected to be found first following
|
||||
* native_handle_t.
|
||||
*/
|
||||
int prime_fd;
|
||||
|
||||
/* api variables */
|
||||
uint32_t magic; /* differentiate between allocator impls */
|
||||
uint32_t version; /* api version */
|
||||
|
||||
uint32_t width; /* width of buffer in pixels */
|
||||
uint32_t height; /* height of buffer in pixels */
|
||||
uint32_t format; /* pixel format (Android) */
|
||||
uint32_t usage; /* android libhardware usage flags */
|
||||
|
||||
uint32_t stride; /* the stride in bytes */
|
||||
int data_owner; /* owner of data (for validation) */
|
||||
uint64_t modifier __attribute__((aligned(8))); /* buffer modifiers */
|
||||
|
||||
union {
|
||||
void *data; /* pointer to struct gralloc_gbm_bo_t */
|
||||
uint64_t reserved;
|
||||
} __attribute__((aligned(8)));
|
||||
};
|
||||
|
||||
#define GRALLOC_HANDLE_VERSION 4
|
||||
#define GRALLOC_HANDLE_MAGIC 0x60585350
|
||||
#define GRALLOC_HANDLE_NUM_FDS 1
|
||||
#define GRALLOC_HANDLE_NUM_INTS ( \
|
||||
((sizeof(struct gralloc_handle_t) - sizeof(native_handle_t))/sizeof(int)) \
|
||||
- GRALLOC_HANDLE_NUM_FDS)
|
||||
|
||||
static inline struct gralloc_handle_t *gralloc_handle(buffer_handle_t handle)
|
||||
{
|
||||
return (struct gralloc_handle_t *)handle;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a buffer handle.
|
||||
*/
|
||||
static inline native_handle_t *gralloc_handle_create(int32_t width,
|
||||
int32_t height,
|
||||
int32_t hal_format,
|
||||
int32_t usage)
|
||||
{
|
||||
struct gralloc_handle_t *handle;
|
||||
native_handle_t *nhandle = native_handle_create(GRALLOC_HANDLE_NUM_FDS,
|
||||
GRALLOC_HANDLE_NUM_INTS);
|
||||
|
||||
if (!nhandle)
|
||||
return NULL;
|
||||
|
||||
handle = gralloc_handle(nhandle);
|
||||
handle->magic = GRALLOC_HANDLE_MAGIC;
|
||||
handle->version = GRALLOC_HANDLE_VERSION;
|
||||
handle->width = width;
|
||||
handle->height = height;
|
||||
handle->format = hal_format;
|
||||
handle->usage = usage;
|
||||
handle->prime_fd = -1;
|
||||
|
||||
return nhandle;
|
||||
}
|
||||
|
||||
#endif
|
||||
14
autogen.sh
Executable file
14
autogen.sh
Executable file
|
|
@ -0,0 +1,14 @@
|
|||
#! /bin/sh
|
||||
|
||||
srcdir=`dirname "$0"`
|
||||
test -z "$srcdir" && srcdir=.
|
||||
|
||||
ORIGDIR=`pwd`
|
||||
cd "$srcdir"
|
||||
|
||||
autoreconf --force --verbose --install || exit 1
|
||||
cd "$ORIGDIR" || exit $?
|
||||
|
||||
if test -z "$NOCONFIGURE"; then
|
||||
"$srcdir"/configure "$@"
|
||||
fi
|
||||
527
configure.ac
Normal file
527
configure.ac
Normal file
|
|
@ -0,0 +1,527 @@
|
|||
# Copyright 2005 Adam Jackson.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# on the rights to use, copy, modify, merge, publish, distribute, sub
|
||||
# license, and/or sell copies of the Software, and to permit persons to whom
|
||||
# the Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice (including the next
|
||||
# paragraph) shall be included in all copies or substantial portions of the
|
||||
# Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
# ADAM JACKSON BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
AC_PREREQ([2.63])
|
||||
AC_INIT([libdrm],
|
||||
[2.4.64],
|
||||
[https://bugs.freedesktop.org/enter_bug.cgi?product=DRI],
|
||||
[libdrm])
|
||||
|
||||
AC_CONFIG_HEADERS([config.h])
|
||||
AC_CONFIG_SRCDIR([Makefile.am])
|
||||
AC_CONFIG_MACRO_DIR([m4])
|
||||
AC_CONFIG_AUX_DIR([build-aux])
|
||||
|
||||
# Require xorg-macros minimum of 1.12 for XORG_WITH_XSLTPROC
|
||||
m4_ifndef([XORG_MACROS_VERSION],
|
||||
[m4_fatal([must install xorg-macros 1.12 or later before running autoconf/autogen])])
|
||||
XORG_MACROS_VERSION(1.12)
|
||||
XORG_WITH_XSLTPROC
|
||||
XORG_MANPAGE_SECTIONS
|
||||
|
||||
AM_INIT_AUTOMAKE([1.10 foreign dist-bzip2])
|
||||
|
||||
# Enable quiet compiles on automake 1.11.
|
||||
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
|
||||
|
||||
# Check for programs
|
||||
AC_PROG_CC
|
||||
AC_PROG_CC_C99
|
||||
|
||||
if test "x$ac_cv_prog_cc_c99" = xno; then
|
||||
AC_MSG_ERROR([Building libdrm requires C99 enabled compiler])
|
||||
fi
|
||||
|
||||
AC_USE_SYSTEM_EXTENSIONS
|
||||
AC_SYS_LARGEFILE
|
||||
AC_FUNC_ALLOCA
|
||||
|
||||
AC_CHECK_HEADERS([sys/mkdev.h sys/sysctl.h])
|
||||
|
||||
# Initialize libtool
|
||||
LT_PREREQ([2.2])
|
||||
LT_INIT([disable-static])
|
||||
|
||||
|
||||
PKG_CHECK_MODULES(PTHREADSTUBS, pthread-stubs)
|
||||
AC_SUBST(PTHREADSTUBS_CFLAGS)
|
||||
AC_SUBST(PTHREADSTUBS_LIBS)
|
||||
|
||||
pkgconfigdir=${libdir}/pkgconfig
|
||||
AC_SUBST(pkgconfigdir)
|
||||
AC_ARG_ENABLE([udev],
|
||||
[AS_HELP_STRING([--enable-udev],
|
||||
[Enable support for using udev instead of mknod (default: disabled)])],
|
||||
[UDEV=$enableval], [UDEV=no])
|
||||
|
||||
AC_ARG_ENABLE(libkms,
|
||||
AS_HELP_STRING([--disable-libkms],
|
||||
[Disable KMS mm abstraction library (default: auto, enabled on supported platforms)]),
|
||||
[LIBKMS=$enableval], [LIBKMS=auto])
|
||||
|
||||
AC_ARG_ENABLE(intel,
|
||||
AS_HELP_STRING([--disable-intel],
|
||||
[Enable support for intel's KMS API (default: auto, enabled on x86)]),
|
||||
[INTEL=$enableval], [INTEL=auto])
|
||||
|
||||
AC_ARG_ENABLE(radeon,
|
||||
AS_HELP_STRING([--disable-radeon],
|
||||
[Enable support for radeon's KMS API (default: auto)]),
|
||||
[RADEON=$enableval], [RADEON=auto])
|
||||
|
||||
AC_ARG_ENABLE(amdgpu,
|
||||
AS_HELP_STRING([--disable-amdgpu],
|
||||
[Enable support for amdgpu's KMS API (default: auto)]),
|
||||
[AMDGPU=$enableval], [AMDGPU=auto])
|
||||
|
||||
AC_ARG_ENABLE(nouveau,
|
||||
AS_HELP_STRING([--disable-nouveau],
|
||||
[Enable support for nouveau's KMS API (default: auto)]),
|
||||
[NOUVEAU=$enableval], [NOUVEAU=auto])
|
||||
|
||||
AC_ARG_ENABLE(vmwgfx,
|
||||
AS_HELP_STRING([--disable-vmwgfx],
|
||||
[Enable support for vmwgfx's KMS API (default: yes)]),
|
||||
[VMWGFX=$enableval], [VMWGFX=yes])
|
||||
|
||||
AC_ARG_ENABLE(omap-experimental-api,
|
||||
AS_HELP_STRING([--enable-omap-experimental-api],
|
||||
[Enable support for OMAP's experimental API (default: disabled)]),
|
||||
[OMAP=$enableval], [OMAP=no])
|
||||
|
||||
AC_ARG_ENABLE(exynos-experimental-api,
|
||||
AS_HELP_STRING([--enable-exynos-experimental-api],
|
||||
[Enable support for EXYNOS's experimental API (default: disabled)]),
|
||||
[EXYNOS=$enableval], [EXYNOS=no])
|
||||
|
||||
AC_ARG_ENABLE(freedreno,
|
||||
AS_HELP_STRING([--disable-freedreno],
|
||||
[Enable support for freedreno's KMS API (default: auto, enabled on arm)]),
|
||||
[FREEDRENO=$enableval], [FREEDRENO=auto])
|
||||
|
||||
AC_ARG_ENABLE(freedreno-kgsl,
|
||||
AS_HELP_STRING([--enable-freedreno-kgsl],
|
||||
[Enable support for freedreno's to use downstream android kernel API (default: disabled)]),
|
||||
[FREEDRENO_KGSL=$enableval], [FREEDRENO_KGSL=no])
|
||||
|
||||
AC_ARG_ENABLE(tegra-experimental-api,
|
||||
AS_HELP_STRING([--enable-tegra-experimental-api],
|
||||
[Enable support for Tegra's experimental API (default: disabled)]),
|
||||
[TEGRA=$enableval], [TEGRA=no])
|
||||
|
||||
AC_ARG_ENABLE(install-test-programs,
|
||||
AS_HELP_STRING([--enable-install-test-programs],
|
||||
[Install test programs (default: no)]),
|
||||
[INSTALL_TESTS=$enableval], [INSTALL_TESTS=no])
|
||||
|
||||
dnl ===========================================================================
|
||||
dnl check compiler flags
|
||||
AC_DEFUN([LIBDRM_CC_TRY_FLAG], [
|
||||
AC_MSG_CHECKING([whether $CC supports $1])
|
||||
|
||||
libdrm_save_CFLAGS="$CFLAGS"
|
||||
CFLAGS="$CFLAGS $1"
|
||||
|
||||
AC_COMPILE_IFELSE([AC_LANG_SOURCE([ ])], [libdrm_cc_flag=yes], [libdrm_cc_flag=no])
|
||||
CFLAGS="$libdrm_save_CFLAGS"
|
||||
|
||||
if test "x$libdrm_cc_flag" = "xyes"; then
|
||||
ifelse([$2], , :, [$2])
|
||||
else
|
||||
ifelse([$3], , :, [$3])
|
||||
fi
|
||||
AC_MSG_RESULT([$libdrm_cc_flag])
|
||||
])
|
||||
|
||||
dnl We use clock_gettime to check for timeouts in drmWaitVBlank
|
||||
|
||||
AC_CHECK_FUNCS([clock_gettime], [CLOCK_LIB=],
|
||||
[AC_CHECK_LIB([rt], [clock_gettime], [CLOCK_LIB=-lrt],
|
||||
[AC_MSG_ERROR([Couldn't find clock_gettime])])])
|
||||
AC_SUBST([CLOCK_LIB])
|
||||
|
||||
AC_CHECK_FUNCS([open_memstream], [HAVE_OPEN_MEMSTREAM=yes])
|
||||
|
||||
dnl Use lots of warning flags with with gcc and compatible compilers
|
||||
|
||||
dnl Note: if you change the following variable, the cache is automatically
|
||||
dnl skipped and all flags rechecked. So there's no need to do anything
|
||||
dnl else. If for any reason you need to force a recheck, just change
|
||||
dnl MAYBE_WARN in an ignorable way (like adding whitespace)
|
||||
|
||||
MAYBE_WARN="-Wall -Wextra \
|
||||
-Wsign-compare -Werror-implicit-function-declaration \
|
||||
-Wpointer-arith -Wwrite-strings -Wstrict-prototypes \
|
||||
-Wmissing-prototypes -Wmissing-declarations -Wnested-externs \
|
||||
-Wpacked -Wswitch-enum -Wmissing-format-attribute \
|
||||
-Wstrict-aliasing=2 -Winit-self \
|
||||
-Wdeclaration-after-statement -Wold-style-definition \
|
||||
-Wno-missing-field-initializers -Wno-unused-parameter \
|
||||
-Wno-attributes -Wno-long-long -Winline"
|
||||
|
||||
# invalidate cached value if MAYBE_WARN has changed
|
||||
if test "x$libdrm_cv_warn_maybe" != "x$MAYBE_WARN"; then
|
||||
unset libdrm_cv_warn_cflags
|
||||
fi
|
||||
AC_CACHE_CHECK([for supported warning flags], libdrm_cv_warn_cflags, [
|
||||
echo
|
||||
WARN_CFLAGS=""
|
||||
|
||||
# Some warning options are not supported by all versions of
|
||||
# gcc, so test all desired options against the current
|
||||
# compiler.
|
||||
#
|
||||
# Note that there are some order dependencies
|
||||
# here. Specifically, an option that disables a warning will
|
||||
# have no net effect if a later option then enables that
|
||||
# warnings, (perhaps implicitly). So we put some grouped
|
||||
# options (-Wall and -Wextra) up front and the -Wno options
|
||||
# last.
|
||||
|
||||
for W in $MAYBE_WARN; do
|
||||
LIBDRM_CC_TRY_FLAG([$W], [WARN_CFLAGS="$WARN_CFLAGS $W"])
|
||||
done
|
||||
|
||||
libdrm_cv_warn_cflags=$WARN_CFLAGS
|
||||
libdrm_cv_warn_maybe=$MAYBE_WARN
|
||||
|
||||
AC_MSG_CHECKING([which warning flags were supported])])
|
||||
WARN_CFLAGS="$libdrm_cv_warn_cflags"
|
||||
|
||||
# Check for atomic intrinsics
|
||||
AC_CACHE_CHECK([for native atomic primitives], drm_cv_atomic_primitives, [
|
||||
drm_cv_atomic_primitives="none"
|
||||
|
||||
AC_LINK_IFELSE([AC_LANG_PROGRAM([[
|
||||
int atomic_add(int i) { return __sync_fetch_and_add (&i, 1); }
|
||||
int atomic_cmpxchg(int i, int j, int k) { return __sync_val_compare_and_swap (&i, j, k); }
|
||||
]],[[]])], [drm_cv_atomic_primitives="Intel"],[])
|
||||
|
||||
if test "x$drm_cv_atomic_primitives" = "xnone"; then
|
||||
AC_CHECK_HEADER([atomic_ops.h], drm_cv_atomic_primitives="libatomic-ops")
|
||||
fi
|
||||
|
||||
# atomic functions defined in <atomic.h> & libc on Solaris
|
||||
if test "x$drm_cv_atomic_primitives" = "xnone"; then
|
||||
AC_CHECK_FUNC([atomic_cas_uint], drm_cv_atomic_primitives="Solaris")
|
||||
fi
|
||||
])
|
||||
|
||||
if test "x$drm_cv_atomic_primitives" = xIntel; then
|
||||
AC_DEFINE(HAVE_LIBDRM_ATOMIC_PRIMITIVES, 1,
|
||||
[Enable if your compiler supports the Intel __sync_* atomic primitives])
|
||||
fi
|
||||
if test "x$drm_cv_atomic_primitives" = "xlibatomic-ops"; then
|
||||
AC_DEFINE(HAVE_LIB_ATOMIC_OPS, 1, [Enable if you have libatomic-ops-dev installed])
|
||||
fi
|
||||
|
||||
dnl Print out the approapriate message considering the value set be the
|
||||
dnl respective in $1.
|
||||
dnl $1 - value to be evaluated. Eg. $INTEL, $NOUVEAU, ...
|
||||
dnl $2 - libdrm shortname. Eg. intel, freedreno, ...
|
||||
dnl $3 - GPU name/brand. Eg. Intel, NVIDIA Tegra, ...
|
||||
dnl $4 - Configure switch. Eg. intel, omap-experimental-api, ...
|
||||
AC_DEFUN([LIBDRM_ATOMICS_NOT_FOUND_MSG], [
|
||||
case "x$1" in
|
||||
xyes) AC_MSG_ERROR([libdrm_$2 depends upon atomic operations, which were not found for your compiler/cpu. Try compiling with -march=native, or install the libatomics-op-dev package, or, failing both of those, disable support for $3 GPUs by passing --disable-$4 to ./configure]) ;;
|
||||
xauto) AC_MSG_WARN([Disabling $2. It depends on atomic operations, which were not found for your compiler/cpu. Try compiling with -march=native, or install the libatomics-op-dev package.]) ;;
|
||||
*) ;;
|
||||
esac
|
||||
])
|
||||
|
||||
if test "x$drm_cv_atomic_primitives" = "xnone"; then
|
||||
LIBDRM_ATOMICS_NOT_FOUND_MSG($INTEL, intel, Intel, intel)
|
||||
INTEL=no
|
||||
|
||||
LIBDRM_ATOMICS_NOT_FOUND_MSG($RADEON, radeon, Radeon, radeon)
|
||||
RADEON=no
|
||||
|
||||
LIBDRM_ATOMICS_NOT_FOUND_MSG($AMDGPU, amdgpu, AMD, amdgpu)
|
||||
AMDGPU=no
|
||||
|
||||
LIBDRM_ATOMICS_NOT_FOUND_MSG($NOUVEAU, nouveau, NVIDIA, nouveau)
|
||||
NOUVEAU=no
|
||||
|
||||
LIBDRM_ATOMICS_NOT_FOUND_MSG($OMAP, omap, OMAP, omap-experimental-api)
|
||||
OMAP=no
|
||||
|
||||
LIBDRM_ATOMICS_NOT_FOUND_MSG($FREEDRENO, freedreno, Qualcomm Adreno, freedreno)
|
||||
FREEDRENO=no
|
||||
|
||||
LIBDRM_ATOMICS_NOT_FOUND_MSG($TEGRA, tegra, NVIDIA Tegra, tegra-experimental-api)
|
||||
TEGRA=no
|
||||
else
|
||||
if test "x$INTEL" = xauto; then
|
||||
case $host_cpu in
|
||||
i?86|x86_64) INTEL=yes ;;
|
||||
*) INTEL=no ;;
|
||||
esac
|
||||
fi
|
||||
if test "x$RADEON" = xauto; then
|
||||
RADEON=yes
|
||||
fi
|
||||
if test "x$AMDGPU" = xauto; then
|
||||
AMDGPU=yes
|
||||
fi
|
||||
if test "x$NOUVEAU" = xauto; then
|
||||
NOUVEAU=yes
|
||||
fi
|
||||
if test "x$FREEDRENO" = xauto; then
|
||||
case $host_cpu in
|
||||
arm*|aarch64) FREEDRENO=yes ;;
|
||||
*) FREEDRENO=no ;;
|
||||
esac
|
||||
fi
|
||||
fi
|
||||
|
||||
if test "x$INTEL" != "xno"; then
|
||||
PKG_CHECK_MODULES(PCIACCESS, [pciaccess >= 0.10])
|
||||
fi
|
||||
AC_SUBST(PCIACCESS_CFLAGS)
|
||||
AC_SUBST(PCIACCESS_LIBS)
|
||||
|
||||
if test "x$UDEV" = xyes; then
|
||||
AC_DEFINE(UDEV, 1, [Have UDEV support])
|
||||
fi
|
||||
|
||||
AC_CANONICAL_HOST
|
||||
if test "x$LIBKMS" = xauto ; then
|
||||
case $host_os in
|
||||
linux*) LIBKMS="yes" ;;
|
||||
freebsd* | kfreebsd*-gnu)
|
||||
LIBKMS="yes" ;;
|
||||
dragonfly*) LIBKMS="yes" ;;
|
||||
*) LIBKMS="no" ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
AM_CONDITIONAL(HAVE_LIBKMS, [test "x$LIBKMS" = xyes])
|
||||
|
||||
AM_CONDITIONAL(HAVE_INTEL, [test "x$INTEL" = xyes])
|
||||
if test "x$INTEL" = xyes; then
|
||||
AC_DEFINE(HAVE_INTEL, 1, [Have intel support])
|
||||
fi
|
||||
|
||||
AM_CONDITIONAL(HAVE_VMWGFX, [test "x$VMWGFX" = xyes])
|
||||
if test "x$VMWGFX" = xyes; then
|
||||
AC_DEFINE(HAVE_VMWGFX, 1, [Have vmwgfx kernel headers])
|
||||
fi
|
||||
|
||||
AM_CONDITIONAL(HAVE_NOUVEAU, [test "x$NOUVEAU" = xyes])
|
||||
if test "x$NOUVEAU" = xyes; then
|
||||
AC_DEFINE(HAVE_NOUVEAU, 1, [Have nouveau (nvidia) support])
|
||||
fi
|
||||
|
||||
AM_CONDITIONAL(HAVE_OMAP, [test "x$OMAP" = xyes])
|
||||
if test "x$OMAP" = xyes; then
|
||||
AC_DEFINE(HAVE_OMAP, 1, [Have OMAP support])
|
||||
fi
|
||||
|
||||
AM_CONDITIONAL(HAVE_EXYNOS, [test "x$EXYNOS" = xyes])
|
||||
if test "x$EXYNOS" = xyes; then
|
||||
AC_DEFINE(HAVE_EXYNOS, 1, [Have EXYNOS support])
|
||||
fi
|
||||
|
||||
AM_CONDITIONAL(HAVE_FREEDRENO, [test "x$FREEDRENO" = xyes])
|
||||
if test "x$FREEDRENO" = xyes; then
|
||||
AC_DEFINE(HAVE_FREEDRENO, 1, [Have freedreno support])
|
||||
fi
|
||||
|
||||
if test "x$FREEDRENO_KGSL" = xyes; then
|
||||
if test "x$FREEDRENO" != xyes; then
|
||||
AC_MSG_ERROR([Cannot enable freedreno KGSL interface if freedreno is disabled])
|
||||
fi
|
||||
fi
|
||||
AM_CONDITIONAL(HAVE_FREEDRENO_KGSL, [test "x$FREEDRENO_KGSL" = xyes])
|
||||
if test "x$FREEDRENO_KGSL" = xyes; then
|
||||
AC_DEFINE(HAVE_FREEDRENO_KGSL, 1, [Have freedreno support for KGSL kernel interface])
|
||||
fi
|
||||
|
||||
AM_CONDITIONAL(HAVE_RADEON, [test "x$RADEON" = xyes])
|
||||
if test "x$RADEON" = xyes; then
|
||||
AC_DEFINE(HAVE_RADEON, 1, [Have radeon support])
|
||||
fi
|
||||
|
||||
# Detect cunit library
|
||||
PKG_CHECK_MODULES([CUNIT], [cunit >= 2.1], [have_cunit=yes], [have_cunit=no])
|
||||
# If pkg-config does not find cunit, check it using AC_CHECK_LIB. We
|
||||
# do this because Debian (Ubuntu) lacks pkg-config file for cunit.
|
||||
# fixed in 2.1-2.dfsg-3: http://anonscm.debian.org/cgit/collab-maint/cunit.git/commit/?h=debian
|
||||
if test "x${have_cunit}" = "xno"; then
|
||||
AC_CHECK_LIB([cunit], [CU_initialize_registry], [have_cunit=yes], [have_cunit=no])
|
||||
if test "x${have_cunit}" = "xyes"; then
|
||||
CUNIT_LIBS="-lcunit"
|
||||
CUNIT_CFLAGS=""
|
||||
AC_SUBST([CUNIT_LIBS])
|
||||
AC_SUBST([CUNIT_CFLAGS])
|
||||
fi
|
||||
fi
|
||||
AM_CONDITIONAL(HAVE_CUNIT, [test "x$have_cunit" != "xno"])
|
||||
|
||||
AM_CONDITIONAL(HAVE_AMDGPU, [test "x$AMDGPU" = xyes])
|
||||
if test "x$AMDGPU" = xyes; then
|
||||
AC_DEFINE(HAVE_AMDGPU, 1, [Have amdgpu support])
|
||||
|
||||
AC_DEFINE(HAVE_CUNIT, [test "x$have_cunit" != "xno"], [Enable CUNIT Have amdgpu support])
|
||||
|
||||
if test "x$have_cunit" = "xno"; then
|
||||
AC_MSG_WARN([Could not find cunit library. Disabling amdgpu tests])
|
||||
fi
|
||||
fi
|
||||
|
||||
AM_CONDITIONAL(HAVE_TEGRA, [test "x$TEGRA" = xyes])
|
||||
if test "x$TEGRA" = xyes; then
|
||||
AC_DEFINE(HAVE_TEGRA, 1, [Have Tegra support])
|
||||
fi
|
||||
|
||||
AM_CONDITIONAL(HAVE_INSTALL_TESTS, [test "x$INSTALL_TESTS" = xyes])
|
||||
if test "x$INSTALL_TESTS" = xyes; then
|
||||
AC_DEFINE(HAVE_INSTALL_TESTS, 1, [Install test programs])
|
||||
fi
|
||||
|
||||
AC_ARG_ENABLE([cairo-tests],
|
||||
[AS_HELP_STRING([--enable-cairo-tests],
|
||||
[Enable support for Cairo rendering in tests (default: auto)])],
|
||||
[CAIRO=$enableval], [CAIRO=auto])
|
||||
PKG_CHECK_MODULES(CAIRO, cairo, [HAVE_CAIRO=yes], [HAVE_CAIRO=no])
|
||||
AC_MSG_CHECKING([whether to enable Cairo tests])
|
||||
if test "x$CAIRO" = xauto; then
|
||||
CAIRO="$HAVE_CAIRO"
|
||||
fi
|
||||
if test "x$CAIRO" = xyes; then
|
||||
if ! test "x$HAVE_CAIRO" = xyes; then
|
||||
AC_MSG_ERROR([Cairo support required but not present])
|
||||
fi
|
||||
AC_DEFINE(HAVE_CAIRO, 1, [Have Cairo support])
|
||||
fi
|
||||
AC_MSG_RESULT([$CAIRO])
|
||||
AM_CONDITIONAL(HAVE_CAIRO, [test "x$CAIRO" = xyes])
|
||||
|
||||
# For enumerating devices in test case
|
||||
PKG_CHECK_MODULES(LIBUDEV, libudev, [HAVE_LIBUDEV=yes], [HAVE_LIBUDEV=no])
|
||||
if test "x$HAVE_LIBUDEV" = xyes; then
|
||||
AC_DEFINE(HAVE_LIBUDEV, 1, [Have libudev support])
|
||||
fi
|
||||
AM_CONDITIONAL(HAVE_LIBUDEV, [test "x$HAVE_LIBUDEV" = xyes])
|
||||
|
||||
# xsltproc for docbook manpages
|
||||
AC_ARG_ENABLE([manpages],
|
||||
AS_HELP_STRING([--enable-manpages], [enable manpages @<:@default=auto@:>@]),
|
||||
[MANS=$enableval], [MANS=auto])
|
||||
AM_CONDITIONAL([BUILD_MANPAGES], [test "x$XSLTPROC" != "x" -a "x$MANS" != "xno"])
|
||||
|
||||
# check for offline man-pages stylesheet
|
||||
AC_MSG_CHECKING([for docbook manpages stylesheet])
|
||||
MANPAGES_STYLESHEET="http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl"
|
||||
AC_PATH_PROGS_FEATURE_CHECK([XSLTPROC_TMP], [xsltproc],
|
||||
AS_IF([`"$ac_path_XSLTPROC_TMP" --nonet "$MANPAGES_STYLESHEET" > /dev/null 2>&1`],
|
||||
[HAVE_MANPAGES_STYLESHEET=yes]))
|
||||
if test "x$HAVE_MANPAGES_STYLESHEET" = "xyes"; then
|
||||
AC_SUBST(MANPAGES_STYLESHEET)
|
||||
AC_MSG_RESULT([yes])
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
fi
|
||||
AM_CONDITIONAL([HAVE_MANPAGES_STYLESHEET], [test "x$HAVE_MANPAGES_STYLESHEET" = "xyes"])
|
||||
|
||||
AC_ARG_ENABLE(valgrind,
|
||||
[AS_HELP_STRING([--enable-valgrind],
|
||||
[Build libdrm with valgrind support (default: auto)])],
|
||||
[VALGRIND=$enableval], [VALGRIND=auto])
|
||||
PKG_CHECK_MODULES(VALGRIND, [valgrind], [have_valgrind=yes], [have_valgrind=no])
|
||||
AC_MSG_CHECKING([whether to enable Valgrind support])
|
||||
if test "x$VALGRIND" = xauto; then
|
||||
VALGRIND="$have_valgrind"
|
||||
fi
|
||||
|
||||
if test "x$VALGRIND" = "xyes"; then
|
||||
if ! test "x$have_valgrind" = xyes; then
|
||||
AC_MSG_ERROR([Valgrind support required but not present])
|
||||
fi
|
||||
AC_DEFINE([HAVE_VALGRIND], 1, [Use valgrind intrinsics to suppress false warnings])
|
||||
fi
|
||||
|
||||
AC_MSG_RESULT([$VALGRIND])
|
||||
|
||||
AC_ARG_WITH([kernel-source],
|
||||
[AS_HELP_STRING([--with-kernel-source],
|
||||
[specify path to linux kernel source])],
|
||||
[kernel_source="$with_kernel_source"])
|
||||
AC_SUBST(kernel_source)
|
||||
|
||||
AC_MSG_CHECKING([whether $CC supports __attribute__(("hidden"))])
|
||||
AC_LINK_IFELSE([AC_LANG_PROGRAM([
|
||||
int foo_hidden( void ) __attribute__((visibility("hidden")));
|
||||
])], HAVE_ATTRIBUTE_VISIBILITY="yes"; AC_MSG_RESULT([yes]), AC_MSG_RESULT([no]));
|
||||
|
||||
if test "x$HAVE_ATTRIBUTE_VISIBILITY" = xyes; then
|
||||
AC_DEFINE(HAVE_VISIBILITY, 1, [Compiler supports __attribute__(("hidden"))])
|
||||
fi
|
||||
|
||||
AC_SUBST(WARN_CFLAGS)
|
||||
AC_CONFIG_FILES([
|
||||
Makefile
|
||||
libkms/Makefile
|
||||
libkms/libkms.pc
|
||||
intel/Makefile
|
||||
intel/libdrm_intel.pc
|
||||
radeon/Makefile
|
||||
radeon/libdrm_radeon.pc
|
||||
amdgpu/Makefile
|
||||
amdgpu/libdrm_amdgpu.pc
|
||||
nouveau/Makefile
|
||||
nouveau/libdrm_nouveau.pc
|
||||
omap/Makefile
|
||||
omap/libdrm_omap.pc
|
||||
exynos/Makefile
|
||||
exynos/libdrm_exynos.pc
|
||||
freedreno/Makefile
|
||||
freedreno/libdrm_freedreno.pc
|
||||
tegra/Makefile
|
||||
tegra/libdrm_tegra.pc
|
||||
tests/Makefile
|
||||
tests/modeprint/Makefile
|
||||
tests/modetest/Makefile
|
||||
tests/kmstest/Makefile
|
||||
tests/proptest/Makefile
|
||||
tests/radeon/Makefile
|
||||
tests/amdgpu/Makefile
|
||||
tests/vbltest/Makefile
|
||||
tests/exynos/Makefile
|
||||
tests/tegra/Makefile
|
||||
tests/nouveau/Makefile
|
||||
man/Makefile
|
||||
libdrm.pc])
|
||||
AC_OUTPUT
|
||||
|
||||
echo ""
|
||||
echo "$PACKAGE_STRING will be compiled with:"
|
||||
echo ""
|
||||
echo " libkms $LIBKMS"
|
||||
echo " Intel API $INTEL"
|
||||
echo " vmwgfx API $VMWGFX"
|
||||
echo " Radeon API $RADEON"
|
||||
echo " AMDGPU API $AMDGPU"
|
||||
echo " Nouveau API $NOUVEAU"
|
||||
echo " OMAP API $OMAP"
|
||||
echo " EXYNOS API $EXYNOS"
|
||||
echo " Freedreno API $FREEDRENO (kgsl: $FREEDRENO_KGSL)"
|
||||
echo " Tegra API $TEGRA"
|
||||
echo ""
|
||||
212
core-symbols.txt
212
core-symbols.txt
|
|
@ -1,212 +0,0 @@
|
|||
drmAddBufs
|
||||
drmAddContextPrivateMapping
|
||||
drmAddContextTag
|
||||
drmAddMap
|
||||
drmAgpAcquire
|
||||
drmAgpAlloc
|
||||
drmAgpBase
|
||||
drmAgpBind
|
||||
drmAgpDeviceId
|
||||
drmAgpEnable
|
||||
drmAgpFree
|
||||
drmAgpGetMode
|
||||
drmAgpMemoryAvail
|
||||
drmAgpMemoryUsed
|
||||
drmAgpRelease
|
||||
drmAgpSize
|
||||
drmAgpUnbind
|
||||
drmAgpVendorId
|
||||
drmAgpVersionMajor
|
||||
drmAgpVersionMinor
|
||||
drmAuthMagic
|
||||
drmAvailable
|
||||
drmCheckModesettingSupported
|
||||
drmClose
|
||||
drmCloseBufferHandle
|
||||
drmCloseOnce
|
||||
drmCommandNone
|
||||
drmCommandRead
|
||||
drmCommandWrite
|
||||
drmCommandWriteRead
|
||||
drmCreateContext
|
||||
drmCreateDrawable
|
||||
drmCrtcGetSequence
|
||||
drmCrtcQueueSequence
|
||||
drmCtlInstHandler
|
||||
drmCtlUninstHandler
|
||||
drmDelContextTag
|
||||
drmDestroyContext
|
||||
drmDestroyDrawable
|
||||
drmDevicesEqual
|
||||
drmDMA
|
||||
drmDropMaster
|
||||
drmError
|
||||
drmFinish
|
||||
drmFree
|
||||
drmFreeBufs
|
||||
drmFreeBusid
|
||||
drmFreeDevice
|
||||
drmFreeDevices
|
||||
drmFreeReservedContextList
|
||||
drmFreeVersion
|
||||
drmGetBufInfo
|
||||
drmGetBusid
|
||||
drmGetCap
|
||||
drmGetClient
|
||||
drmGetContextFlags
|
||||
drmGetContextPrivateMapping
|
||||
drmGetContextTag
|
||||
drmGetDevice
|
||||
drmGetDevice2
|
||||
drmGetDeviceFromDevId
|
||||
drmGetDeviceNameFromFd
|
||||
drmGetDeviceNameFromFd2
|
||||
drmGetDevices
|
||||
drmGetDevices2
|
||||
drmGetEntry
|
||||
drmGetHashTable
|
||||
drmGetInterruptFromBusID
|
||||
drmGetLibVersion
|
||||
drmGetLock
|
||||
drmGetMagic
|
||||
drmGetMap
|
||||
drmGetNodeTypeFromDevId
|
||||
drmGetNodeTypeFromFd
|
||||
drmGetPrimaryDeviceNameFromFd
|
||||
drmGetRenderDeviceNameFromFd
|
||||
drmGetReservedContextList
|
||||
drmGetStats
|
||||
drmGetVersion
|
||||
drmHandleEvent
|
||||
drmHashCreate
|
||||
drmHashDelete
|
||||
drmHashDestroy
|
||||
drmHashFirst
|
||||
drmHashInsert
|
||||
drmHashLookup
|
||||
drmHashNext
|
||||
drmIoctl
|
||||
drmIsKMS
|
||||
drmIsMaster
|
||||
drmMalloc
|
||||
drmMap
|
||||
drmMapBufs
|
||||
drmMarkBufs
|
||||
drmModeAddFB
|
||||
drmModeAddFB2
|
||||
drmModeAddFB2WithModifiers
|
||||
drmModeAtomicAddProperty
|
||||
drmModeAtomicAlloc
|
||||
drmModeAtomicCommit
|
||||
drmModeAtomicDuplicate
|
||||
drmModeAtomicFree
|
||||
drmModeAtomicGetCursor
|
||||
drmModeAtomicMerge
|
||||
drmModeAtomicSetCursor
|
||||
drmModeAttachMode
|
||||
drmModeCloseFB
|
||||
drmModeConnectorGetPossibleCrtcs
|
||||
drmModeConnectorSetProperty
|
||||
drmModeCreateDumbBuffer
|
||||
drmModeCreateLease
|
||||
drmModeCreatePropertyBlob
|
||||
drmModeCrtcGetGamma
|
||||
drmModeCrtcSetGamma
|
||||
drmModeDestroyDumbBuffer
|
||||
drmModeDestroyPropertyBlob
|
||||
drmModeDetachMode
|
||||
drmModeDirtyFB
|
||||
drmModeFormatModifierBlobIterNext
|
||||
drmModeFreeConnector
|
||||
drmModeFreeCrtc
|
||||
drmModeFreeEncoder
|
||||
drmModeFreeFB
|
||||
drmModeFreeFB2
|
||||
drmModeFreeModeInfo
|
||||
drmModeFreeObjectProperties
|
||||
drmModeFreePlane
|
||||
drmModeFreePlaneResources
|
||||
drmModeFreeProperty
|
||||
drmModeFreePropertyBlob
|
||||
drmModeFreeResources
|
||||
drmModeGetConnector
|
||||
drmModeGetConnectorCurrent
|
||||
drmModeGetConnectorTypeName
|
||||
drmModeGetCrtc
|
||||
drmModeGetEncoder
|
||||
drmModeGetFB
|
||||
drmModeGetFB2
|
||||
drmModeGetLease
|
||||
drmModeGetPlane
|
||||
drmModeGetPlaneResources
|
||||
drmModeGetProperty
|
||||
drmModeGetPropertyBlob
|
||||
drmModeGetResources
|
||||
drmModeListLessees
|
||||
drmModeMapDumbBuffer
|
||||
drmModeMoveCursor
|
||||
drmModeObjectGetProperties
|
||||
drmModeObjectSetProperty
|
||||
drmModePageFlip
|
||||
drmModePageFlipTarget
|
||||
drmModeRevokeLease
|
||||
drmModeRmFB
|
||||
drmModeSetCrtc
|
||||
drmModeSetCursor
|
||||
drmModeSetCursor2
|
||||
drmModeSetPlane
|
||||
drmMsg
|
||||
drmOpen
|
||||
drmOpenControl
|
||||
drmOpenOnce
|
||||
drmOpenOnceWithType
|
||||
drmOpenRender
|
||||
drmOpenWithType
|
||||
drmPrimeFDToHandle
|
||||
drmPrimeHandleToFD
|
||||
drmRandom
|
||||
drmRandomCreate
|
||||
drmRandomDestroy
|
||||
drmRandomDouble
|
||||
drmRmMap
|
||||
drmScatterGatherAlloc
|
||||
drmScatterGatherFree
|
||||
drmSetBusid
|
||||
drmSetClientCap
|
||||
drmSetContextFlags
|
||||
drmSetInterfaceVersion
|
||||
drmSetMaster
|
||||
drmSetServerInfo
|
||||
drmSLCreate
|
||||
drmSLDelete
|
||||
drmSLDestroy
|
||||
drmSLDump
|
||||
drmSLFirst
|
||||
drmSLInsert
|
||||
drmSLLookup
|
||||
drmSLLookupNeighbors
|
||||
drmSLNext
|
||||
drmSwitchToContext
|
||||
drmSyncobjCreate
|
||||
drmSyncobjDestroy
|
||||
drmSyncobjEventfd
|
||||
drmSyncobjExportSyncFile
|
||||
drmSyncobjFDToHandle
|
||||
drmSyncobjHandleToFD
|
||||
drmSyncobjImportSyncFile
|
||||
drmSyncobjQuery
|
||||
drmSyncobjQuery2
|
||||
drmSyncobjReset
|
||||
drmSyncobjSignal
|
||||
drmSyncobjTimelineSignal
|
||||
drmSyncobjTimelineWait
|
||||
drmSyncobjTransfer
|
||||
drmSyncobjWait
|
||||
drmUnlock
|
||||
drmUnmap
|
||||
drmUnmapBufs
|
||||
drmUpdateDrawableInfo
|
||||
drmWaitVBlank
|
||||
drmGetFormatModifierName
|
||||
drmGetFormatModifierVendor
|
||||
drmGetFormatName
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
prebuilt_etc {
|
||||
name: "amdgpu.ids",
|
||||
proprietary: true,
|
||||
sub_dir: "hwdata",
|
||||
src: "amdgpu.ids",
|
||||
}
|
||||
749
data/amdgpu.ids
749
data/amdgpu.ids
|
|
@ -1,749 +0,0 @@
|
|||
# List of AMDGPU IDs
|
||||
#
|
||||
# Syntax:
|
||||
# device_id, revision_id, product_name <-- single tab after comma
|
||||
|
||||
1.0.0
|
||||
1114, C2, AMD Radeon 860M Graphics
|
||||
1114, C3, AMD Radeon 840M Graphics
|
||||
1114, D2, AMD Radeon 860M Graphics
|
||||
1114, D3, AMD Radeon 840M Graphics
|
||||
1114, E2, AMD Radeon 860M Graphics
|
||||
1114, E4, AMD Radeon 860M Graphics
|
||||
1114, E5, AMD Radeon 840M Graphics
|
||||
1114, E9, AMD Radeon 860M Graphics
|
||||
1114, EA, AMD Radeon 840M Graphics
|
||||
1114, ED, AMD Radeon 860M Graphics
|
||||
1114, EE, AMD Radeon 840M Graphics
|
||||
1114, F2, AMD Radeon 860M Graphics
|
||||
1114, F3, AMD Radeon 840M Graphics
|
||||
1114, F9, AMD Radeon 860M Graphics
|
||||
1114, FA, AMD Radeon 840M Graphics
|
||||
1114, FC, AMD Radeon 860M Graphics
|
||||
1114, FD, AMD Radeon 840M Graphics
|
||||
1309, 00, AMD Radeon R7 Graphics
|
||||
130A, 00, AMD Radeon R6 Graphics
|
||||
130B, 00, AMD Radeon R4 Graphics
|
||||
130C, 00, AMD Radeon R7 Graphics
|
||||
130D, 00, AMD Radeon R6 Graphics
|
||||
130E, 00, AMD Radeon R5 Graphics
|
||||
130F, 00, AMD Radeon R7 Graphics
|
||||
130F, D4, AMD Radeon R7 Graphics
|
||||
130F, D5, AMD Radeon R7 Graphics
|
||||
130F, D6, AMD Radeon R7 Graphics
|
||||
130F, D7, AMD Radeon R7 Graphics
|
||||
1313, 00, AMD Radeon R7 Graphics
|
||||
1313, D4, AMD Radeon R7 Graphics
|
||||
1313, D5, AMD Radeon R7 Graphics
|
||||
1313, D6, AMD Radeon R7 Graphics
|
||||
1315, 00, AMD Radeon R5 Graphics
|
||||
1315, D4, AMD Radeon R5 Graphics
|
||||
1315, D5, AMD Radeon R5 Graphics
|
||||
1315, D6, AMD Radeon R5 Graphics
|
||||
1315, D7, AMD Radeon R5 Graphics
|
||||
1316, 00, AMD Radeon R5 Graphics
|
||||
1318, 00, AMD Radeon R5 Graphics
|
||||
131B, 00, AMD Radeon R4 Graphics
|
||||
131C, 00, AMD Radeon R7 Graphics
|
||||
131D, 00, AMD Radeon R6 Graphics
|
||||
1435, AE, AMD Custom GPU 0932
|
||||
1506, C1, AMD Radeon 610M
|
||||
1506, C2, AMD Radeon 610M
|
||||
1506, C3, AMD Radeon 610M
|
||||
1506, C4, AMD Radeon 610M
|
||||
150E, C1, AMD Radeon 890M Graphics
|
||||
150E, C4, AMD Radeon 880M Graphics
|
||||
150E, C5, AMD Radeon 890M Graphics
|
||||
150E, C6, AMD Radeon 890M Graphics
|
||||
150E, C7, AMD Radeon 890M Graphics
|
||||
150E, D1, AMD Radeon 890M Graphics
|
||||
150E, D2, AMD Radeon 880M Graphics
|
||||
150E, D3, AMD Radeon 890M Graphics
|
||||
150E, E1, AMD Radeon 890M Graphics
|
||||
150E, E3, AMD Radeon 890M Graphics
|
||||
150E, E4, AMD Radeon 890M Graphics
|
||||
150E, F1, AMD Radeon 890M Graphics
|
||||
150E, F3, AMD Radeon 890M Graphics
|
||||
1586, C1, AMD Radeon 8060S Graphics
|
||||
1586, C2, AMD Radeon 8050S Graphics
|
||||
1586, C3, AMD Radeon 8060S Graphics
|
||||
1586, C4, AMD Radeon 8050S Graphics
|
||||
1586, C6, AMD Radeon 8060S Graphics
|
||||
1586, D1, AMD Radeon 8060S Graphics
|
||||
1586, D2, AMD Radeon 8050S Graphics
|
||||
1586, D4, AMD Radeon 8050S Graphics
|
||||
1586, D5, AMD Radeon 8040S Graphics
|
||||
15BF, 00, AMD Radeon 780M Graphics
|
||||
15BF, 01, AMD Radeon 760M Graphics
|
||||
15BF, 02, AMD Radeon 780M Graphics
|
||||
15BF, 03, AMD Radeon 760M Graphics
|
||||
15BF, 05, AMD Radeon 760M Graphics
|
||||
15BF, 06, AMD Radeon 780M Graphics
|
||||
15BF, 07, AMD Radeon 740M Graphics
|
||||
15BF, 08, AMD Radeon 740M Graphics
|
||||
15BF, C1, AMD Radeon 780M Graphics
|
||||
15BF, C2, AMD Radeon 780M Graphics
|
||||
15BF, C3, AMD Radeon 760M Graphics
|
||||
15BF, C4, AMD Radeon 780M Graphics
|
||||
15BF, C5, AMD Radeon 740M Graphics
|
||||
15BF, C6, AMD Radeon 780M Graphics
|
||||
15BF, C7, AMD Radeon 780M Graphics
|
||||
15BF, C8, AMD Radeon 760M Graphics
|
||||
15BF, C9, AMD Radeon 780M Graphics
|
||||
15BF, CA, AMD Radeon 740M Graphics
|
||||
15BF, CB, AMD Radeon 760M Graphics
|
||||
15BF, CC, AMD Radeon 740M Graphics
|
||||
15BF, CD, AMD Radeon 760M Graphics
|
||||
15BF, CE, AMD Radeon 740M Graphics
|
||||
15BF, CF, AMD Radeon 780M Graphics
|
||||
15BF, D0, AMD Radeon 780M Graphics
|
||||
15BF, D1, AMD Radeon 780M Graphics
|
||||
15BF, D2, AMD Radeon 780M Graphics
|
||||
15BF, D3, AMD Radeon 780M Graphics
|
||||
15BF, D4, AMD Radeon 780M Graphics
|
||||
15BF, D5, AMD Radeon 760M Graphics
|
||||
15BF, D6, AMD Radeon 760M Graphics
|
||||
15BF, D7, AMD Radeon 780M Graphics
|
||||
15BF, D8, AMD Radeon 740M Graphics
|
||||
15BF, D9, AMD Radeon 780M Graphics
|
||||
15BF, DA, AMD Radeon 780M Graphics
|
||||
15BF, DB, AMD Radeon 760M Graphics
|
||||
15BF, DC, AMD Radeon 760M Graphics
|
||||
15BF, DD, AMD Radeon 780M Graphics
|
||||
15BF, DE, AMD Radeon 740M Graphics
|
||||
15BF, DF, AMD Radeon 760M Graphics
|
||||
15BF, F0, AMD Radeon 760M Graphics
|
||||
15C8, C1, AMD Radeon 740M Graphics
|
||||
15C8, C2, AMD Radeon 740M Graphics
|
||||
15C8, C3, AMD Radeon 740M Graphics
|
||||
15C8, C4, AMD Radeon 740M Graphics
|
||||
15C8, C5, AMD Radeon 740M Graphics
|
||||
15C8, C6, AMD Radeon 740M Graphics
|
||||
15C8, C7, AMD Radeon 740M Graphics
|
||||
15C8, C8, AMD Radeon 740M Graphics
|
||||
15C8, D1, AMD Radeon 740M Graphics
|
||||
15C8, D2, AMD Radeon 740M Graphics
|
||||
15C8, D3, AMD Radeon 740M Graphics
|
||||
15C8, D4, AMD Radeon 740M Graphics
|
||||
15C8, D5, AMD Radeon 740M Graphics
|
||||
15C8, D6, AMD Radeon 740M Graphics
|
||||
15C8, D7, AMD Radeon 740M Graphics
|
||||
15C8, D8, AMD Radeon 740M Graphics
|
||||
15D8, 00, AMD Radeon RX Vega 8 Graphics WS
|
||||
15D8, 91, AMD Radeon Vega 3 Graphics
|
||||
15D8, 91, AMD Ryzen Embedded R1606G with Radeon Vega Gfx
|
||||
15D8, 92, AMD Radeon Vega 3 Graphics
|
||||
15D8, 92, AMD Ryzen Embedded R1505G with Radeon Vega Gfx
|
||||
15D8, 93, AMD Radeon Vega 1 Graphics
|
||||
15D8, A1, AMD Radeon Vega 10 Graphics
|
||||
15D8, A2, AMD Radeon Vega 8 Graphics
|
||||
15D8, A3, AMD Radeon Vega 6 Graphics
|
||||
15D8, A4, AMD Radeon Vega 3 Graphics
|
||||
15D8, B1, AMD Radeon Vega 10 Graphics
|
||||
15D8, B2, AMD Radeon Vega 8 Graphics
|
||||
15D8, B3, AMD Radeon Vega 6 Graphics
|
||||
15D8, B4, AMD Radeon Vega 3 Graphics
|
||||
15D8, C1, AMD Radeon Vega 10 Graphics
|
||||
15D8, C2, AMD Radeon Vega 8 Graphics
|
||||
15D8, C3, AMD Radeon Vega 6 Graphics
|
||||
15D8, C4, AMD Radeon Vega 3 Graphics
|
||||
15D8, C5, AMD Radeon Vega 3 Graphics
|
||||
15D8, C8, AMD Radeon Vega 11 Graphics
|
||||
15D8, C9, AMD Radeon Vega 8 Graphics
|
||||
15D8, CA, AMD Radeon Vega 11 Graphics
|
||||
15D8, CB, AMD Radeon Vega 8 Graphics
|
||||
15D8, CC, AMD Radeon Vega 3 Graphics
|
||||
15D8, CE, AMD Radeon Vega 3 Graphics
|
||||
15D8, CF, AMD Ryzen Embedded R1305G with Radeon Vega Gfx
|
||||
15D8, D1, AMD Radeon Vega 10 Graphics
|
||||
15D8, D2, AMD Radeon Vega 8 Graphics
|
||||
15D8, D3, AMD Radeon Vega 6 Graphics
|
||||
15D8, D4, AMD Radeon Vega 3 Graphics
|
||||
15D8, D8, AMD Radeon Vega 11 Graphics
|
||||
15D8, D9, AMD Radeon Vega 8 Graphics
|
||||
15D8, DA, AMD Radeon Vega 11 Graphics
|
||||
15D8, DB, AMD Radeon Vega 3 Graphics
|
||||
15D8, DB, AMD Radeon Vega 8 Graphics
|
||||
15D8, DC, AMD Radeon Vega 3 Graphics
|
||||
15D8, DD, AMD Radeon Vega 3 Graphics
|
||||
15D8, DE, AMD Radeon Vega 3 Graphics
|
||||
15D8, DF, AMD Radeon Vega 3 Graphics
|
||||
15D8, E3, AMD Radeon Vega 3 Graphics
|
||||
15D8, E4, AMD Ryzen Embedded R1102G with Radeon Vega Gfx
|
||||
15DD, 81, AMD Ryzen Embedded V1807B with Radeon Vega Gfx
|
||||
15DD, 82, AMD Ryzen Embedded V1756B with Radeon Vega Gfx
|
||||
15DD, 83, AMD Ryzen Embedded V1605B with Radeon Vega Gfx
|
||||
15DD, 84, AMD Radeon Vega 6 Graphics
|
||||
15DD, 85, AMD Ryzen Embedded V1202B with Radeon Vega Gfx
|
||||
15DD, 86, AMD Radeon Vega 11 Graphics
|
||||
15DD, 88, AMD Radeon Vega 8 Graphics
|
||||
15DD, C1, AMD Radeon Vega 11 Graphics
|
||||
15DD, C2, AMD Radeon Vega 8 Graphics
|
||||
15DD, C3, AMD Radeon Vega 3 / 10 Graphics
|
||||
15DD, C4, AMD Radeon Vega 8 Graphics
|
||||
15DD, C5, AMD Radeon Vega 3 Graphics
|
||||
15DD, C6, AMD Radeon Vega 11 Graphics
|
||||
15DD, C8, AMD Radeon Vega 8 Graphics
|
||||
15DD, C9, AMD Radeon Vega 11 Graphics
|
||||
15DD, CA, AMD Radeon Vega 8 Graphics
|
||||
15DD, CB, AMD Radeon Vega 3 Graphics
|
||||
15DD, CC, AMD Radeon Vega 6 Graphics
|
||||
15DD, CE, AMD Radeon Vega 3 Graphics
|
||||
15DD, CF, AMD Radeon Vega 3 Graphics
|
||||
15DD, D0, AMD Radeon Vega 10 Graphics
|
||||
15DD, D1, AMD Radeon Vega 8 Graphics
|
||||
15DD, D3, AMD Radeon Vega 11 Graphics
|
||||
15DD, D5, AMD Radeon Vega 8 Graphics
|
||||
15DD, D6, AMD Radeon Vega 11 Graphics
|
||||
15DD, D7, AMD Radeon Vega 8 Graphics
|
||||
15DD, D8, AMD Radeon Vega 3 Graphics
|
||||
15DD, D9, AMD Radeon Vega 6 Graphics
|
||||
15DD, E1, AMD Radeon Vega 3 Graphics
|
||||
15DD, E2, AMD Radeon Vega 3 Graphics
|
||||
163F, AE, AMD Custom GPU 0405
|
||||
163F, E1, AMD Custom GPU 0405
|
||||
164E, D8, AMD Radeon 610M
|
||||
164E, D9, AMD Radeon 610M
|
||||
164E, DA, AMD Radeon 610M
|
||||
164E, DB, AMD Radeon 610M
|
||||
164E, DC, AMD Radeon 610M
|
||||
1681, 06, AMD Radeon 680M
|
||||
1681, 07, AMD Radeon 660M
|
||||
1681, 0A, AMD Radeon 680M
|
||||
1681, 0B, AMD Radeon 660M
|
||||
1681, C7, AMD Radeon 680M
|
||||
1681, C8, AMD Radeon 680M
|
||||
1681, C9, AMD Radeon 660M
|
||||
1900, 01, AMD Radeon 780M Graphics
|
||||
1900, 02, AMD Radeon 760M Graphics
|
||||
1900, 03, AMD Radeon 780M Graphics
|
||||
1900, 04, AMD Radeon 760M Graphics
|
||||
1900, 05, AMD Radeon 780M Graphics
|
||||
1900, 06, AMD Radeon 780M Graphics
|
||||
1900, 07, AMD Radeon 760M Graphics
|
||||
1900, B0, AMD Radeon 780M Graphics
|
||||
1900, B1, AMD Radeon 780M Graphics
|
||||
1900, B2, AMD Radeon 780M Graphics
|
||||
1900, B3, AMD Radeon 780M Graphics
|
||||
1900, B4, AMD Radeon 780M Graphics
|
||||
1900, B5, AMD Radeon 780M Graphics
|
||||
1900, B6, AMD Radeon 780M Graphics
|
||||
1900, B7, AMD Radeon 760M Graphics
|
||||
1900, B8, AMD Radeon 760M Graphics
|
||||
1900, B9, AMD Radeon 780M Graphics
|
||||
1900, BA, AMD Radeon 780M Graphics
|
||||
1900, BB, AMD Radeon 780M Graphics
|
||||
1900, C0, AMD Radeon 780M Graphics
|
||||
1900, C1, AMD Radeon 760M Graphics
|
||||
1900, C2, AMD Radeon 780M Graphics
|
||||
1900, C3, AMD Radeon 760M Graphics
|
||||
1900, C4, AMD Radeon 780M Graphics
|
||||
1900, C5, AMD Radeon 780M Graphics
|
||||
1900, C6, AMD Radeon 760M Graphics
|
||||
1900, C7, AMD Radeon 780M Graphics
|
||||
1900, C8, AMD Radeon 760M Graphics
|
||||
1900, C9, AMD Radeon 780M Graphics
|
||||
1900, CA, AMD Radeon 760M Graphics
|
||||
1900, CB, AMD Radeon 780M Graphics
|
||||
1900, CC, AMD Radeon 780M Graphics
|
||||
1900, CD, AMD Radeon 760M Graphics
|
||||
1900, CE, AMD Radeon 780M Graphics
|
||||
1900, CF, AMD Radeon 760M Graphics
|
||||
1900, D0, AMD Radeon 780M Graphics
|
||||
1900, D1, AMD Radeon 760M Graphics
|
||||
1900, D2, AMD Radeon 780M Graphics
|
||||
1900, D3, AMD Radeon 760M Graphics
|
||||
1900, D4, AMD Radeon 780M Graphics
|
||||
1900, D5, AMD Radeon 780M Graphics
|
||||
1900, D6, AMD Radeon 760M Graphics
|
||||
1900, D7, AMD Radeon 780M Graphics
|
||||
1900, D8, AMD Radeon 760M Graphics
|
||||
1900, D9, AMD Radeon 780M Graphics
|
||||
1900, DA, AMD Radeon 760M Graphics
|
||||
1900, DB, AMD Radeon 780M Graphics
|
||||
1900, DC, AMD Radeon 780M Graphics
|
||||
1900, DD, AMD Radeon 760M Graphics
|
||||
1900, DE, AMD Radeon 780M Graphics
|
||||
1900, DF, AMD Radeon 760M Graphics
|
||||
1900, F0, AMD Radeon 780M Graphics
|
||||
1900, F1, AMD Radeon 780M Graphics
|
||||
1900, F2, AMD Radeon 780M Graphics
|
||||
1901, C1, AMD Radeon 740M Graphics
|
||||
1901, C2, AMD Radeon 740M Graphics
|
||||
1901, C3, AMD Radeon 740M Graphics
|
||||
1901, C6, AMD Radeon 740M Graphics
|
||||
1901, C7, AMD Radeon 740M Graphics
|
||||
1901, C8, AMD Radeon 740M Graphics
|
||||
1901, C9, AMD Radeon 740M Graphics
|
||||
1901, CA, AMD Radeon 740M Graphics
|
||||
1901, D1, AMD Radeon 740M Graphics
|
||||
1901, D2, AMD Radeon 740M Graphics
|
||||
1901, D3, AMD Radeon 740M Graphics
|
||||
1901, D4, AMD Radeon 740M Graphics
|
||||
1901, D5, AMD Radeon 740M Graphics
|
||||
1901, D6, AMD Radeon 740M Graphics
|
||||
1901, D7, AMD Radeon 740M Graphics
|
||||
1901, D8, AMD Radeon 740M Graphics
|
||||
1902, C0, AMD Radeon 840M Graphics
|
||||
1902, C1, AMD Radeon 840M Graphics
|
||||
1902, C2, AMD Radeon 820M Graphics
|
||||
1902, C3, AMD Radeon 840M Graphics
|
||||
1902, C6, AMD Radeon 820M Graphics
|
||||
1902, C7, AMD Radeon 840M Graphics
|
||||
1902, C8, AMD Radeon 840M Graphics
|
||||
1902, C9, AMD Radeon 820M Graphics
|
||||
1902, CA, AMD Radeon 840M Graphics
|
||||
1902, D1, AMD Radeon 840M Graphics
|
||||
1902, D3, AMD Radeon 840M Graphics
|
||||
1902, D7, AMD Radeon 840M Graphics
|
||||
1902, D8, AMD Radeon 840M Graphics
|
||||
6600, 00, AMD Radeon HD 8600 / 8700M
|
||||
6600, 81, AMD Radeon R7 M370
|
||||
6601, 00, AMD Radeon HD 8500M / 8700M
|
||||
6604, 00, AMD Radeon R7 M265 Series
|
||||
6604, 81, AMD Radeon R7 M350
|
||||
6605, 00, AMD Radeon R7 M260 Series
|
||||
6605, 81, AMD Radeon R7 M340
|
||||
6606, 00, AMD Radeon HD 8790M
|
||||
6607, 00, AMD Radeon R5 M240
|
||||
6608, 00, AMD FirePro W2100
|
||||
6610, 00, AMD Radeon R7 200 Series
|
||||
6610, 81, AMD Radeon R7 350
|
||||
6610, 83, AMD Radeon R5 340
|
||||
6610, 87, AMD Radeon R7 200 Series
|
||||
6611, 00, AMD Radeon R7 200 Series
|
||||
6611, 87, AMD Radeon R7 200 Series
|
||||
6613, 00, AMD Radeon R7 200 Series
|
||||
6617, 00, AMD Radeon R7 240 Series
|
||||
6617, 87, AMD Radeon R7 200 Series
|
||||
6617, C7, AMD Radeon R7 240 Series
|
||||
6640, 00, AMD Radeon HD 8950
|
||||
6640, 80, AMD Radeon R9 M380
|
||||
6646, 00, AMD Radeon R9 M280X
|
||||
6646, 80, AMD Radeon R9 M385
|
||||
6646, 80, AMD Radeon R9 M470X
|
||||
6647, 00, AMD Radeon R9 M200X Series
|
||||
6647, 80, AMD Radeon R9 M380
|
||||
6649, 00, AMD FirePro W5100
|
||||
6658, 00, AMD Radeon R7 200 Series
|
||||
665C, 00, AMD Radeon HD 7700 Series
|
||||
665D, 00, AMD Radeon R7 200 Series
|
||||
665F, 81, AMD Radeon R7 360 Series
|
||||
6660, 00, AMD Radeon HD 8600M Series
|
||||
6660, 81, AMD Radeon R5 M335
|
||||
6660, 83, AMD Radeon R5 M330
|
||||
6663, 00, AMD Radeon HD 8500M Series
|
||||
6663, 83, AMD Radeon R5 M320
|
||||
6664, 00, AMD Radeon R5 M200 Series
|
||||
6665, 00, AMD Radeon R5 M230 Series
|
||||
6665, 83, AMD Radeon R5 M320
|
||||
6665, C3, AMD Radeon R5 M435
|
||||
6666, 00, AMD Radeon R5 M200 Series
|
||||
6667, 00, AMD Radeon R5 M200 Series
|
||||
666F, 00, AMD Radeon HD 8500M
|
||||
66A1, 02, AMD Instinct MI60 / MI50
|
||||
66A1, 06, AMD Radeon Pro VII
|
||||
66AF, C1, AMD Radeon VII
|
||||
6780, 00, AMD FirePro W9000
|
||||
6784, 00, ATI FirePro V (FireGL V) Graphics Adapter
|
||||
6788, 00, ATI FirePro V (FireGL V) Graphics Adapter
|
||||
678A, 00, AMD FirePro W8000
|
||||
6798, 00, AMD Radeon R9 200 / HD 7900 Series
|
||||
6799, 00, AMD Radeon HD 7900 Series
|
||||
679A, 00, AMD Radeon HD 7900 Series
|
||||
679B, 00, AMD Radeon HD 7900 Series
|
||||
679E, 00, AMD Radeon HD 7800 Series
|
||||
67A0, 00, AMD Radeon FirePro W9100
|
||||
67A1, 00, AMD Radeon FirePro W8100
|
||||
67B0, 00, AMD Radeon R9 200 Series
|
||||
67B0, 80, AMD Radeon R9 390 Series
|
||||
67B1, 00, AMD Radeon R9 200 Series
|
||||
67B1, 80, AMD Radeon R9 390 Series
|
||||
67B9, 00, AMD Radeon R9 200 Series
|
||||
67C0, 00, AMD Radeon Pro WX 7100 Graphics
|
||||
67C0, 80, AMD Radeon E9550
|
||||
67C2, 01, AMD Radeon Pro V7350x2
|
||||
67C2, 02, AMD Radeon Pro V7300X
|
||||
67C4, 00, AMD Radeon Pro WX 7100 Graphics
|
||||
67C4, 80, AMD Radeon E9560 / E9565 Graphics
|
||||
67C7, 00, AMD Radeon Pro WX 5100 Graphics
|
||||
67C7, 80, AMD Radeon E9390 Graphics
|
||||
67D0, 01, AMD Radeon Pro V7350x2
|
||||
67D0, 02, AMD Radeon Pro V7300X
|
||||
67DF, C0, AMD Radeon Pro 580X
|
||||
67DF, C1, AMD Radeon RX 580 Series
|
||||
67DF, C2, AMD Radeon RX 570 Series
|
||||
67DF, C3, AMD Radeon RX 580 Series
|
||||
67DF, C4, AMD Radeon RX 480 Graphics
|
||||
67DF, C5, AMD Radeon RX 470 Graphics
|
||||
67DF, C6, AMD Radeon RX 570 Series
|
||||
67DF, C7, AMD Radeon RX 480 Graphics
|
||||
67DF, CF, AMD Radeon RX 470 Graphics
|
||||
67DF, D7, AMD Radeon RX 470 Graphics
|
||||
67DF, E0, AMD Radeon RX 470 Series
|
||||
67DF, E1, AMD Radeon RX 590 Series
|
||||
67DF, E3, AMD Radeon RX Series
|
||||
67DF, E7, AMD Radeon RX 580 Series
|
||||
67DF, EB, AMD Radeon Pro 580X
|
||||
67DF, EF, AMD Radeon RX 570 Series
|
||||
67DF, F7, AMD Radeon RX P30PH
|
||||
67DF, FF, AMD Radeon RX 470 Series
|
||||
67E0, 00, AMD Radeon Pro WX Series
|
||||
67E3, 00, AMD Radeon Pro WX 4100
|
||||
67E8, 00, AMD Radeon Pro WX Series
|
||||
67E8, 01, AMD Radeon Pro WX Series
|
||||
67E8, 80, AMD Radeon E9260 Graphics
|
||||
67EB, 00, AMD Radeon Pro V5300X
|
||||
67EF, C0, AMD Radeon RX Graphics
|
||||
67EF, C1, AMD Radeon RX 460 Graphics
|
||||
67EF, C2, AMD Radeon Pro Series
|
||||
67EF, C3, AMD Radeon RX Series
|
||||
67EF, C5, AMD Radeon RX 460 Graphics
|
||||
67EF, C7, AMD Radeon RX Graphics
|
||||
67EF, CF, AMD Radeon RX 460 Graphics
|
||||
67EF, E0, AMD Radeon RX 560 Series
|
||||
67EF, E1, AMD Radeon RX Series
|
||||
67EF, E2, AMD Radeon RX 560X
|
||||
67EF, E3, AMD Radeon RX Series
|
||||
67EF, E5, AMD Radeon RX 560 Series
|
||||
67EF, E7, AMD Radeon RX 560 Series
|
||||
67EF, EF, AMD Radeon 550 Series
|
||||
67EF, FF, AMD Radeon RX 460 Graphics
|
||||
67FF, C0, AMD Radeon Pro 465
|
||||
67FF, C1, AMD Radeon RX 560 Series
|
||||
67FF, CF, AMD Radeon RX 560 Series
|
||||
67FF, EF, AMD Radeon RX 560 Series
|
||||
67FF, FF, AMD Radeon RX 550 Series
|
||||
6800, 00, AMD Radeon HD 7970M
|
||||
6801, 00, AMD Radeon HD 8970M
|
||||
6806, 00, AMD Radeon R9 M290X
|
||||
6808, 00, AMD FirePro W7000
|
||||
6808, 00, ATI FirePro V (FireGL V) Graphics Adapter
|
||||
6809, 00, ATI FirePro W5000
|
||||
6810, 00, AMD Radeon R9 200 Series
|
||||
6810, 81, AMD Radeon R9 370 Series
|
||||
6811, 00, AMD Radeon R9 200 Series
|
||||
6811, 81, AMD Radeon R7 370 Series
|
||||
6818, 00, AMD Radeon HD 7800 Series
|
||||
6819, 00, AMD Radeon HD 7800 Series
|
||||
6820, 00, AMD Radeon R9 M275X
|
||||
6820, 81, AMD Radeon R9 M375
|
||||
6820, 83, AMD Radeon R9 M375X
|
||||
6821, 00, AMD Radeon R9 M200X Series
|
||||
6821, 83, AMD Radeon R9 M370X
|
||||
6821, 87, AMD Radeon R7 M380
|
||||
6822, 00, AMD Radeon E8860
|
||||
6823, 00, AMD Radeon R9 M200X Series
|
||||
6825, 00, AMD Radeon HD 7800M Series
|
||||
6826, 00, AMD Radeon HD 7700M Series
|
||||
6827, 00, AMD Radeon HD 7800M Series
|
||||
6828, 00, AMD FirePro W600
|
||||
682B, 00, AMD Radeon HD 8800M Series
|
||||
682B, 87, AMD Radeon R9 M360
|
||||
682C, 00, AMD FirePro W4100
|
||||
682D, 00, AMD Radeon HD 7700M Series
|
||||
682F, 00, AMD Radeon HD 7700M Series
|
||||
6830, 00, AMD Radeon 7800M Series
|
||||
6831, 00, AMD Radeon 7700M Series
|
||||
6835, 00, AMD Radeon R7 Series / HD 9000 Series
|
||||
6837, 00, AMD Radeon HD 7700 Series
|
||||
683D, 00, AMD Radeon HD 7700 Series
|
||||
683F, 00, AMD Radeon HD 7700 Series
|
||||
684C, 00, ATI FirePro V (FireGL V) Graphics Adapter
|
||||
6860, 00, AMD Radeon Instinct MI25
|
||||
6860, 01, AMD Radeon Instinct MI25
|
||||
6860, 02, AMD Radeon Instinct MI25
|
||||
6860, 03, AMD Radeon Pro V340
|
||||
6860, 04, AMD Radeon Instinct MI25x2
|
||||
6860, 07, AMD Radeon Pro V320
|
||||
6861, 00, AMD Radeon Pro WX 9100
|
||||
6862, 00, AMD Radeon Pro SSG
|
||||
6863, 00, AMD Radeon Vega Frontier Edition
|
||||
6864, 03, AMD Radeon Pro V340
|
||||
6864, 04, AMD Radeon Instinct MI25x2
|
||||
6864, 05, AMD Radeon Pro V340
|
||||
6868, 00, AMD Radeon Pro WX 8200
|
||||
686C, 00, AMD Radeon Instinct MI25 MxGPU
|
||||
686C, 01, AMD Radeon Instinct MI25 MxGPU
|
||||
686C, 02, AMD Radeon Instinct MI25 MxGPU
|
||||
686C, 03, AMD Radeon Pro V340 MxGPU
|
||||
686C, 04, AMD Radeon Instinct MI25x2 MxGPU
|
||||
686C, 05, AMD Radeon Pro V340L MxGPU
|
||||
686C, 06, AMD Radeon Instinct MI25 MxGPU
|
||||
687F, 01, AMD Radeon RX Vega
|
||||
687F, C0, AMD Radeon RX Vega
|
||||
687F, C1, AMD Radeon RX Vega
|
||||
687F, C3, AMD Radeon RX Vega
|
||||
687F, C7, AMD Radeon RX Vega
|
||||
6900, 00, AMD Radeon R7 M260
|
||||
6900, 81, AMD Radeon R7 M360
|
||||
6900, 83, AMD Radeon R7 M340
|
||||
6900, C1, AMD Radeon R5 M465 Series
|
||||
6900, C3, AMD Radeon R5 M445 Series
|
||||
6900, D1, AMD Radeon 530 Series
|
||||
6900, D3, AMD Radeon 530 Series
|
||||
6901, 00, AMD Radeon R5 M255
|
||||
6902, 00, AMD Radeon Series
|
||||
6907, 00, AMD Radeon R5 M255
|
||||
6907, 87, AMD Radeon R5 M315
|
||||
6920, 00, AMD Radeon R9 M395X
|
||||
6920, 01, AMD Radeon R9 M390X
|
||||
6921, 00, AMD Radeon R9 M390X
|
||||
6929, 00, AMD FirePro S7150
|
||||
6929, 01, AMD FirePro S7100X
|
||||
692B, 00, AMD FirePro W7100
|
||||
6938, 00, AMD Radeon R9 200 Series
|
||||
6938, F0, AMD Radeon R9 200 Series
|
||||
6938, F1, AMD Radeon R9 380 Series
|
||||
6939, 00, AMD Radeon R9 200 Series
|
||||
6939, F0, AMD Radeon R9 200 Series
|
||||
6939, F1, AMD Radeon R9 380 Series
|
||||
694C, C0, AMD Radeon RX Vega M GH Graphics
|
||||
694E, C0, AMD Radeon RX Vega M GL Graphics
|
||||
6980, 00, AMD Radeon Pro WX 3100
|
||||
6981, 00, AMD Radeon Pro WX 3200 Series
|
||||
6981, 01, AMD Radeon Pro WX 3200 Series
|
||||
6981, 10, AMD Radeon Pro WX 3200 Series
|
||||
6985, 00, AMD Radeon Pro WX 3100
|
||||
6986, 00, AMD Radeon Pro WX 2100
|
||||
6987, 80, AMD Embedded Radeon E9171
|
||||
6987, C0, AMD Radeon 550X Series
|
||||
6987, C1, AMD Radeon RX 640
|
||||
6987, C3, AMD Radeon 540X Series
|
||||
6987, C7, AMD Radeon 540
|
||||
6995, 00, AMD Radeon Pro WX 2100
|
||||
6997, 00, AMD Radeon Pro WX 2100
|
||||
699F, 81, AMD Embedded Radeon E9170 Series
|
||||
699F, C0, AMD Radeon 500 Series
|
||||
699F, C1, AMD Radeon 540 Series
|
||||
699F, C3, AMD Radeon 500 Series
|
||||
699F, C7, AMD Radeon RX 550 / 550 Series
|
||||
699F, C9, AMD Radeon 540
|
||||
6FDF, E7, AMD Radeon RX 590 GME
|
||||
6FDF, EF, AMD Radeon RX 580 2048SP
|
||||
7300, C1, AMD FirePro S9300 x2
|
||||
7300, C8, AMD Radeon R9 Fury Series
|
||||
7300, C9, AMD Radeon Pro Duo
|
||||
7300, CA, AMD Radeon R9 Fury Series
|
||||
7300, CB, AMD Radeon R9 Fury Series
|
||||
7312, 00, AMD Radeon Pro W5700
|
||||
731E, C6, AMD Radeon RX 5700XTB
|
||||
731E, C7, AMD Radeon RX 5700B
|
||||
731F, C0, AMD Radeon RX 5700 XT 50th Anniversary
|
||||
731F, C1, AMD Radeon RX 5700 XT
|
||||
731F, C2, AMD Radeon RX 5600M
|
||||
731F, C3, AMD Radeon RX 5700M
|
||||
731F, C4, AMD Radeon RX 5700
|
||||
731F, C5, AMD Radeon RX 5700 XT
|
||||
731F, CA, AMD Radeon RX 5600 XT
|
||||
731F, CB, AMD Radeon RX 5600 OEM
|
||||
7340, C1, AMD Radeon RX 5500M
|
||||
7340, C3, AMD Radeon RX 5300M
|
||||
7340, C5, AMD Radeon RX 5500 XT
|
||||
7340, C7, AMD Radeon RX 5500
|
||||
7340, C9, AMD Radeon RX 5500XTB
|
||||
7340, CF, AMD Radeon RX 5300
|
||||
7341, 00, AMD Radeon Pro W5500
|
||||
7347, 00, AMD Radeon Pro W5500M
|
||||
7360, 41, AMD Radeon Pro 5600M
|
||||
7360, C3, AMD Radeon Pro V520
|
||||
7362, C1, AMD Radeon Pro V540
|
||||
7362, C3, AMD Radeon Pro V520
|
||||
738C, 01, AMD Instinct MI100
|
||||
73A1, 00, AMD Radeon Pro V620
|
||||
73A3, 00, AMD Radeon Pro W6800
|
||||
73A5, C0, AMD Radeon RX 6950 XT
|
||||
73AE, 00, AMD Radeon Pro V620 MxGPU
|
||||
73AF, C0, AMD Radeon RX 6900 XT
|
||||
73BF, C0, AMD Radeon RX 6900 XT
|
||||
73BF, C1, AMD Radeon RX 6800 XT
|
||||
73BF, C3, AMD Radeon RX 6800
|
||||
73DF, C0, AMD Radeon RX 6750 XT
|
||||
73DF, C1, AMD Radeon RX 6700 XT
|
||||
73DF, C2, AMD Radeon RX 6800M
|
||||
73DF, C3, AMD Radeon RX 6800M
|
||||
73DF, C5, AMD Radeon RX 6700 XT
|
||||
73DF, CF, AMD Radeon RX 6700M
|
||||
73DF, D5, AMD Radeon RX 6750 GRE 12GB
|
||||
73DF, D7, AMD TDC-235
|
||||
73DF, DF, AMD Radeon RX 6700
|
||||
73DF, E5, AMD Radeon RX 6750 GRE 12GB
|
||||
73DF, FF, AMD Radeon RX 6700
|
||||
73E0, 00, AMD Radeon RX 6600M
|
||||
73E1, 00, AMD Radeon Pro W6600M
|
||||
73E3, 00, AMD Radeon Pro W6600
|
||||
73EF, C0, AMD Radeon RX 6800S
|
||||
73EF, C1, AMD Radeon RX 6650 XT
|
||||
73EF, C2, AMD Radeon RX 6700S
|
||||
73EF, C3, AMD Radeon RX 6650M
|
||||
73EF, C4, AMD Radeon RX 6650M XT
|
||||
73FF, C1, AMD Radeon RX 6600 XT
|
||||
73FF, C3, AMD Radeon RX 6600M
|
||||
73FF, C7, AMD Radeon RX 6600
|
||||
73FF, CB, AMD Radeon RX 6600S
|
||||
73FF, CF, AMD Radeon RX 6600 LE
|
||||
73FF, DF, AMD Radeon RX 6750 GRE 10GB
|
||||
7408, 00, AMD Instinct MI250X
|
||||
740C, 01, AMD Instinct MI250X / MI250
|
||||
740F, 02, AMD Instinct MI210
|
||||
7421, 00, AMD Radeon Pro W6500M
|
||||
7422, 00, AMD Radeon Pro W6400
|
||||
7423, 00, AMD Radeon Pro W6300M
|
||||
7423, 01, AMD Radeon Pro W6300
|
||||
7424, 00, AMD Radeon RX 6300
|
||||
743F, C1, AMD Radeon RX 6500 XT
|
||||
743F, C3, AMD Radeon RX 6500
|
||||
743F, C3, AMD Radeon RX 6500M
|
||||
743F, C7, AMD Radeon RX 6400
|
||||
743F, C8, AMD Radeon RX 6500M
|
||||
743F, CC, AMD Radeon 6550S
|
||||
743F, CE, AMD Radeon RX 6450M
|
||||
743F, CF, AMD Radeon RX 6300M
|
||||
743F, D3, AMD Radeon RX 6550M
|
||||
743F, D7, AMD Radeon RX 6400
|
||||
7448, 00, AMD Radeon Pro W7900
|
||||
7449, 00, AMD Radeon Pro W7800 48GB
|
||||
744A, 00, AMD Radeon Pro W7900 Dual Slot
|
||||
744B, 00, AMD Radeon Pro W7900D
|
||||
744C, C8, AMD Radeon RX 7900 XTX
|
||||
744C, CC, AMD Radeon RX 7900 XT
|
||||
744C, CE, AMD Radeon RX 7900 GRE
|
||||
744C, CF, AMD Radeon RX 7900M
|
||||
745E, CC, AMD Radeon Pro W7800
|
||||
7460, 00, AMD Radeon Pro V710
|
||||
7461, 00, AMD Radeon Pro V710 MxGPU
|
||||
7470, 00, AMD Radeon Pro W7700
|
||||
747E, C8, AMD Radeon RX 7800 XT
|
||||
747E, D8, AMD Radeon RX 7800M
|
||||
747E, DB, AMD Radeon RX 7700
|
||||
747E, FF, AMD Radeon RX 7700 XT
|
||||
7480, 00, AMD Radeon Pro W7600
|
||||
7480, C0, AMD Radeon RX 7600 XT
|
||||
7480, C1, AMD Radeon RX 7700S
|
||||
7480, C2, AMD Radeon RX 7650 GRE
|
||||
7480, C3, AMD Radeon RX 7600S
|
||||
7480, C7, AMD Radeon RX 7600M XT
|
||||
7480, CF, AMD Radeon RX 7600
|
||||
7481, C7, AMD Steam Machine
|
||||
7483, CF, AMD Radeon RX 7600M
|
||||
7489, 00, AMD Radeon Pro W7500
|
||||
7499, 00, AMD Radeon Pro W7400
|
||||
7499, C0, AMD Radeon RX 7400
|
||||
7499, C1, AMD Radeon RX 7300
|
||||
74A0, 00, AMD Instinct MI300A
|
||||
74A1, 00, AMD Instinct MI300X
|
||||
74A2, 00, AMD Instinct MI308X
|
||||
74A5, 00, AMD Instinct MI325X
|
||||
74A8, 00, AMD Instinct MI308X HF
|
||||
74A9, 00, AMD Instinct MI300X HF
|
||||
74B5, 00, AMD Instinct MI300X VF
|
||||
74B6, 00, AMD Instinct MI308X
|
||||
74BD, 00, AMD Instinct MI300X HF
|
||||
7550, C0, AMD Radeon RX 9070 XT
|
||||
7550, C2, AMD Radeon RX 9070 GRE
|
||||
7550, C3, AMD Radeon RX 9070
|
||||
7551, C0, AMD Radeon AI PRO R9700
|
||||
7551, C8, AMD Radeon AI PRO R9600D
|
||||
7590, C0, AMD Radeon RX 9060 XT
|
||||
7590, C1, AMD Radeon RX 9060 XT LP
|
||||
7590, C7, AMD Radeon RX 9060
|
||||
75A0, 00, AMD Instinct MI350X
|
||||
75A3, 00, AMD Instinct MI355X
|
||||
75B0, 00, AMD Instinct MI350X VF
|
||||
75B3, 00, AMD Instinct MI355X VF
|
||||
9830, 00, AMD Radeon HD 8400 / R3 Series
|
||||
9831, 00, AMD Radeon HD 8400E
|
||||
9832, 00, AMD Radeon HD 8330
|
||||
9833, 00, AMD Radeon HD 8330E
|
||||
9834, 00, AMD Radeon HD 8210
|
||||
9835, 00, AMD Radeon HD 8210E
|
||||
9836, 00, AMD Radeon HD 8200 / R3 Series
|
||||
9837, 00, AMD Radeon HD 8280E
|
||||
9838, 00, AMD Radeon HD 8200 / R3 series
|
||||
9839, 00, AMD Radeon HD 8180
|
||||
983D, 00, AMD Radeon HD 8250
|
||||
9850, 00, AMD Radeon R3 Graphics
|
||||
9850, 03, AMD Radeon R3 Graphics
|
||||
9850, 40, AMD Radeon R2 Graphics
|
||||
9850, 45, AMD Radeon R3 Graphics
|
||||
9851, 00, AMD Radeon R4 Graphics
|
||||
9851, 01, AMD Radeon R5E Graphics
|
||||
9851, 05, AMD Radeon R5 Graphics
|
||||
9851, 06, AMD Radeon R5E Graphics
|
||||
9851, 40, AMD Radeon R4 Graphics
|
||||
9851, 45, AMD Radeon R5 Graphics
|
||||
9852, 00, AMD Radeon R2 Graphics
|
||||
9852, 40, AMD Radeon E1 Graphics
|
||||
9853, 00, AMD Radeon R2 Graphics
|
||||
9853, 01, AMD Radeon R4E Graphics
|
||||
9853, 03, AMD Radeon R2 Graphics
|
||||
9853, 05, AMD Radeon R1E Graphics
|
||||
9853, 06, AMD Radeon R1E Graphics
|
||||
9853, 07, AMD Radeon R1E Graphics
|
||||
9853, 08, AMD Radeon R1E Graphics
|
||||
9853, 40, AMD Radeon R2 Graphics
|
||||
9854, 00, AMD Radeon R3 Graphics
|
||||
9854, 01, AMD Radeon R3E Graphics
|
||||
9854, 02, AMD Radeon R3 Graphics
|
||||
9854, 05, AMD Radeon R2 Graphics
|
||||
9854, 06, AMD Radeon R4 Graphics
|
||||
9854, 07, AMD Radeon R3 Graphics
|
||||
9855, 02, AMD Radeon R6 Graphics
|
||||
9855, 05, AMD Radeon R4 Graphics
|
||||
9856, 00, AMD Radeon R2 Graphics
|
||||
9856, 01, AMD Radeon R2E Graphics
|
||||
9856, 02, AMD Radeon R2 Graphics
|
||||
9856, 05, AMD Radeon R1E Graphics
|
||||
9856, 06, AMD Radeon R2 Graphics
|
||||
9856, 07, AMD Radeon R1E Graphics
|
||||
9856, 08, AMD Radeon R1E Graphics
|
||||
9856, 13, AMD Radeon R1E Graphics
|
||||
9874, 81, AMD Radeon R6 Graphics
|
||||
9874, 84, AMD Radeon R7 Graphics
|
||||
9874, 85, AMD Radeon R6 Graphics
|
||||
9874, 87, AMD Radeon R5 Graphics
|
||||
9874, 88, AMD Radeon R7E Graphics
|
||||
9874, 89, AMD Radeon R6E Graphics
|
||||
9874, C4, AMD Radeon R7 Graphics
|
||||
9874, C5, AMD Radeon R6 Graphics
|
||||
9874, C6, AMD Radeon R6 Graphics
|
||||
9874, C7, AMD Radeon R5 Graphics
|
||||
9874, C8, AMD Radeon R7 Graphics
|
||||
9874, C9, AMD Radeon R7 Graphics
|
||||
9874, CA, AMD Radeon R5 Graphics
|
||||
9874, CB, AMD Radeon R5 Graphics
|
||||
9874, CC, AMD Radeon R7 Graphics
|
||||
9874, CD, AMD Radeon R7 Graphics
|
||||
9874, CE, AMD Radeon R5 Graphics
|
||||
9874, E1, AMD Radeon R7 Graphics
|
||||
9874, E2, AMD Radeon R7 Graphics
|
||||
9874, E3, AMD Radeon R7 Graphics
|
||||
9874, E4, AMD Radeon R7 Graphics
|
||||
9874, E5, AMD Radeon R5 Graphics
|
||||
9874, E6, AMD Radeon R5 Graphics
|
||||
98E4, 80, AMD Radeon R5E Graphics
|
||||
98E4, 81, AMD Radeon R4E Graphics
|
||||
98E4, 83, AMD Radeon R2E Graphics
|
||||
98E4, 84, AMD Radeon R2E Graphics
|
||||
98E4, 86, AMD Radeon R1E Graphics
|
||||
98E4, C0, AMD Radeon R4 Graphics
|
||||
98E4, C1, AMD Radeon R5 Graphics
|
||||
98E4, C2, AMD Radeon R4 Graphics
|
||||
98E4, C4, AMD Radeon R5 Graphics
|
||||
98E4, C6, AMD Radeon R5 Graphics
|
||||
98E4, C8, AMD Radeon R4 Graphics
|
||||
98E4, C9, AMD Radeon R4 Graphics
|
||||
98E4, CA, AMD Radeon R5 Graphics
|
||||
98E4, D0, AMD Radeon R2 Graphics
|
||||
98E4, D1, AMD Radeon R2 Graphics
|
||||
98E4, D2, AMD Radeon R2 Graphics
|
||||
98E4, D4, AMD Radeon R2 Graphics
|
||||
98E4, D9, AMD Radeon R5 Graphics
|
||||
98E4, DA, AMD Radeon R5 Graphics
|
||||
98E4, DB, AMD Radeon R3 Graphics
|
||||
98E4, E1, AMD Radeon R3 Graphics
|
||||
98E4, E2, AMD Radeon R3 Graphics
|
||||
98E4, E9, AMD Radeon R4 Graphics
|
||||
98E4, EA, AMD Radeon R4 Graphics
|
||||
98E4, EB, AMD Radeon R3 Graphics
|
||||
98E4, EB, AMD Radeon R4 Graphics
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
# Copyright © 2017-2018 Intel Corporation
|
||||
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
if with_amdgpu
|
||||
install_data(
|
||||
'amdgpu.ids',
|
||||
install_mode : 'rw-r--r--',
|
||||
install_dir : datadir_amdgpu,
|
||||
)
|
||||
endif
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
build = ["Android.sources.bp"]
|
||||
|
||||
cc_library_shared {
|
||||
name: "libdrm_etnaviv",
|
||||
defaults: [
|
||||
"libdrm_defaults",
|
||||
"libdrm_etnaviv_sources",
|
||||
],
|
||||
vendor: true,
|
||||
shared_libs: ["libdrm"],
|
||||
}
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
// Autogenerated with Android.sources.bp.mk
|
||||
|
||||
cc_defaults {
|
||||
name: "libdrm_etnaviv_sources",
|
||||
srcs: [
|
||||
"etnaviv_device.c",
|
||||
"etnaviv_gpu.c",
|
||||
"etnaviv_bo.c",
|
||||
"etnaviv_bo_cache.c",
|
||||
"etnaviv_pipe.c",
|
||||
"etnaviv_cmd_stream.c",
|
||||
],
|
||||
}
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
etna_device_new
|
||||
etna_device_new_dup
|
||||
etna_device_ref
|
||||
etna_device_del
|
||||
etna_device_fd
|
||||
etna_gpu_new
|
||||
etna_gpu_del
|
||||
etna_gpu_get_param
|
||||
etna_pipe_new
|
||||
etna_pipe_del
|
||||
etna_pipe_wait
|
||||
etna_pipe_wait_ns
|
||||
etna_bo_new
|
||||
etna_bo_from_name
|
||||
etna_bo_from_dmabuf
|
||||
etna_bo_ref
|
||||
etna_bo_del
|
||||
etna_bo_get_name
|
||||
etna_bo_handle
|
||||
etna_bo_dmabuf
|
||||
etna_bo_size
|
||||
etna_bo_map
|
||||
etna_bo_cpu_prep
|
||||
etna_bo_cpu_fini
|
||||
etna_cmd_stream_new
|
||||
etna_cmd_stream_del
|
||||
etna_cmd_stream_timestamp
|
||||
etna_cmd_stream_flush
|
||||
etna_cmd_stream_flush2
|
||||
etna_cmd_stream_finish
|
||||
etna_cmd_stream_perf
|
||||
etna_cmd_stream_reloc
|
||||
etna_perfmon_create
|
||||
etna_perfmon_del
|
||||
etna_perfmon_get_dom_by_name
|
||||
etna_perfmon_get_sig_by_name
|
||||
|
|
@ -1,340 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2014 Etnaviv Project
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Christian Gmeiner <christian.gmeiner@gmail.com>
|
||||
*/
|
||||
|
||||
#include "etnaviv_priv.h"
|
||||
#include "etnaviv_drmif.h"
|
||||
|
||||
drm_private pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
drm_private void bo_del(struct etna_bo *bo);
|
||||
|
||||
/* set buffer name, and add to table, call w/ table_lock held: */
|
||||
static void set_name(struct etna_bo *bo, uint32_t name)
|
||||
{
|
||||
bo->name = name;
|
||||
/* add ourself into the name table: */
|
||||
drmHashInsert(bo->dev->name_table, name, bo);
|
||||
}
|
||||
|
||||
/* Called under table_lock */
|
||||
drm_private void bo_del(struct etna_bo *bo)
|
||||
{
|
||||
if (bo->map)
|
||||
drm_munmap(bo->map, bo->size);
|
||||
|
||||
if (bo->name)
|
||||
drmHashDelete(bo->dev->name_table, bo->name);
|
||||
|
||||
if (bo->handle) {
|
||||
drmHashDelete(bo->dev->handle_table, bo->handle);
|
||||
drmCloseBufferHandle(bo->dev->fd, bo->handle);
|
||||
}
|
||||
|
||||
free(bo);
|
||||
}
|
||||
|
||||
/* lookup a buffer from it's handle, call w/ table_lock held: */
|
||||
static struct etna_bo *lookup_bo(void *tbl, uint32_t handle)
|
||||
{
|
||||
struct etna_bo *bo = NULL;
|
||||
|
||||
if (!drmHashLookup(tbl, handle, (void **)&bo)) {
|
||||
/* found, incr refcnt and return: */
|
||||
bo = etna_bo_ref(bo);
|
||||
|
||||
/* don't break the bucket if this bo was found in one */
|
||||
list_delinit(&bo->list);
|
||||
}
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
||||
/* allocate a new buffer object, call w/ table_lock held */
|
||||
static struct etna_bo *bo_from_handle(struct etna_device *dev,
|
||||
uint32_t size, uint32_t handle, uint32_t flags)
|
||||
{
|
||||
struct etna_bo *bo = calloc(sizeof(*bo), 1);
|
||||
|
||||
if (!bo) {
|
||||
drmCloseBufferHandle(dev->fd, handle);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bo->dev = etna_device_ref(dev);
|
||||
bo->size = size;
|
||||
bo->handle = handle;
|
||||
bo->flags = flags;
|
||||
atomic_set(&bo->refcnt, 1);
|
||||
list_inithead(&bo->list);
|
||||
/* add ourselves to the handle table: */
|
||||
drmHashInsert(dev->handle_table, handle, bo);
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
||||
/* allocate a new (un-tiled) buffer object */
|
||||
drm_public struct etna_bo *etna_bo_new(struct etna_device *dev, uint32_t size,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct etna_bo *bo;
|
||||
int ret;
|
||||
struct drm_etnaviv_gem_new req = {
|
||||
.flags = flags,
|
||||
};
|
||||
|
||||
bo = etna_bo_cache_alloc(&dev->bo_cache, &size, flags);
|
||||
if (bo)
|
||||
return bo;
|
||||
|
||||
req.size = size;
|
||||
ret = drmCommandWriteRead(dev->fd, DRM_ETNAVIV_GEM_NEW,
|
||||
&req, sizeof(req));
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
pthread_mutex_lock(&table_lock);
|
||||
bo = bo_from_handle(dev, size, req.handle, flags);
|
||||
bo->reuse = 1;
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
||||
drm_public struct etna_bo *etna_bo_ref(struct etna_bo *bo)
|
||||
{
|
||||
atomic_inc(&bo->refcnt);
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
||||
/* get buffer info */
|
||||
static int get_buffer_info(struct etna_bo *bo)
|
||||
{
|
||||
int ret;
|
||||
struct drm_etnaviv_gem_info req = {
|
||||
.handle = bo->handle,
|
||||
};
|
||||
|
||||
ret = drmCommandWriteRead(bo->dev->fd, DRM_ETNAVIV_GEM_INFO,
|
||||
&req, sizeof(req));
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* really all we need for now is mmap offset */
|
||||
bo->offset = req.offset;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* import a buffer object from DRI2 name */
|
||||
drm_public struct etna_bo *etna_bo_from_name(struct etna_device *dev,
|
||||
uint32_t name)
|
||||
{
|
||||
struct etna_bo *bo;
|
||||
struct drm_gem_open req = {
|
||||
.name = name,
|
||||
};
|
||||
|
||||
pthread_mutex_lock(&table_lock);
|
||||
|
||||
/* check name table first, to see if bo is already open: */
|
||||
bo = lookup_bo(dev->name_table, name);
|
||||
if (bo)
|
||||
goto out_unlock;
|
||||
|
||||
if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
|
||||
ERROR_MSG("gem-open failed: %s", strerror(errno));
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
bo = lookup_bo(dev->handle_table, req.handle);
|
||||
if (bo)
|
||||
goto out_unlock;
|
||||
|
||||
bo = bo_from_handle(dev, req.size, req.handle, 0);
|
||||
if (bo)
|
||||
set_name(bo, name);
|
||||
|
||||
out_unlock:
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
||||
/* import a buffer from dmabuf fd, does not take ownership of the
|
||||
* fd so caller should close() the fd when it is otherwise done
|
||||
* with it (even if it is still using the 'struct etna_bo *')
|
||||
*/
|
||||
drm_public struct etna_bo *etna_bo_from_dmabuf(struct etna_device *dev, int fd)
|
||||
{
|
||||
struct etna_bo *bo;
|
||||
int ret, size;
|
||||
uint32_t handle;
|
||||
|
||||
/* take the lock before calling drmPrimeFDToHandle to avoid
|
||||
* racing against etna_bo_del, which might invalidate the
|
||||
* returned handle.
|
||||
*/
|
||||
pthread_mutex_lock(&table_lock);
|
||||
|
||||
ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
|
||||
if (ret) {
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bo = lookup_bo(dev->handle_table, handle);
|
||||
if (bo)
|
||||
goto out_unlock;
|
||||
|
||||
/* lseek() to get bo size */
|
||||
size = lseek(fd, 0, SEEK_END);
|
||||
lseek(fd, 0, SEEK_CUR);
|
||||
|
||||
bo = bo_from_handle(dev, size, handle, 0);
|
||||
|
||||
out_unlock:
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
||||
/* destroy a buffer object */
|
||||
drm_public void etna_bo_del(struct etna_bo *bo)
|
||||
{
|
||||
struct etna_device *dev = bo->dev;
|
||||
|
||||
if (!bo)
|
||||
return;
|
||||
|
||||
if (!atomic_dec_and_test(&bo->refcnt))
|
||||
return;
|
||||
|
||||
pthread_mutex_lock(&table_lock);
|
||||
|
||||
if (bo->reuse && (etna_bo_cache_free(&dev->bo_cache, bo) == 0))
|
||||
goto out;
|
||||
|
||||
bo_del(bo);
|
||||
etna_device_del_locked(dev);
|
||||
out:
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
}
|
||||
|
||||
/* get the global flink/DRI2 buffer name */
|
||||
drm_public int etna_bo_get_name(struct etna_bo *bo, uint32_t *name)
|
||||
{
|
||||
if (!bo->name) {
|
||||
struct drm_gem_flink req = {
|
||||
.handle = bo->handle,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&table_lock);
|
||||
set_name(bo, req.name);
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
bo->reuse = 0;
|
||||
}
|
||||
|
||||
*name = bo->name;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_public uint32_t etna_bo_handle(struct etna_bo *bo)
|
||||
{
|
||||
return bo->handle;
|
||||
}
|
||||
|
||||
/* caller owns the dmabuf fd that is returned and is responsible
|
||||
* to close() it when done
|
||||
*/
|
||||
drm_public int etna_bo_dmabuf(struct etna_bo *bo)
|
||||
{
|
||||
int ret, prime_fd;
|
||||
|
||||
ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
|
||||
&prime_fd);
|
||||
if (ret) {
|
||||
ERROR_MSG("failed to get dmabuf fd: %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bo->reuse = 0;
|
||||
|
||||
return prime_fd;
|
||||
}
|
||||
|
||||
drm_public uint32_t etna_bo_size(struct etna_bo *bo)
|
||||
{
|
||||
return bo->size;
|
||||
}
|
||||
|
||||
drm_public void *etna_bo_map(struct etna_bo *bo)
|
||||
{
|
||||
if (!bo->map) {
|
||||
if (!bo->offset) {
|
||||
get_buffer_info(bo);
|
||||
}
|
||||
|
||||
bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED, bo->dev->fd, bo->offset);
|
||||
if (bo->map == MAP_FAILED) {
|
||||
ERROR_MSG("mmap failed: %s", strerror(errno));
|
||||
bo->map = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return bo->map;
|
||||
}
|
||||
|
||||
drm_public int etna_bo_cpu_prep(struct etna_bo *bo, uint32_t op)
|
||||
{
|
||||
struct drm_etnaviv_gem_cpu_prep req = {
|
||||
.handle = bo->handle,
|
||||
.op = op,
|
||||
};
|
||||
|
||||
get_abs_timeout(&req.timeout, 5000000000);
|
||||
|
||||
return drmCommandWrite(bo->dev->fd, DRM_ETNAVIV_GEM_CPU_PREP,
|
||||
&req, sizeof(req));
|
||||
}
|
||||
|
||||
drm_public void etna_bo_cpu_fini(struct etna_bo *bo)
|
||||
{
|
||||
struct drm_etnaviv_gem_cpu_fini req = {
|
||||
.handle = bo->handle,
|
||||
};
|
||||
|
||||
drmCommandWrite(bo->dev->fd, DRM_ETNAVIV_GEM_CPU_FINI,
|
||||
&req, sizeof(req));
|
||||
}
|
||||
|
|
@ -1,204 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2016 Etnaviv Project
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Christian Gmeiner <christian.gmeiner@gmail.com>
|
||||
*/
|
||||
|
||||
#include "etnaviv_priv.h"
|
||||
#include "etnaviv_drmif.h"
|
||||
|
||||
drm_private void bo_del(struct etna_bo *bo);
|
||||
drm_private extern pthread_mutex_t table_lock;
|
||||
|
||||
static void add_bucket(struct etna_bo_cache *cache, int size)
|
||||
{
|
||||
unsigned i = cache->num_buckets;
|
||||
|
||||
assert(i < ARRAY_SIZE(cache->cache_bucket));
|
||||
|
||||
list_inithead(&cache->cache_bucket[i].list);
|
||||
cache->cache_bucket[i].size = size;
|
||||
cache->num_buckets++;
|
||||
}
|
||||
|
||||
drm_private void etna_bo_cache_init(struct etna_bo_cache *cache)
|
||||
{
|
||||
unsigned long size, cache_max_size = 64 * 1024 * 1024;
|
||||
|
||||
/* OK, so power of two buckets was too wasteful of memory.
|
||||
* Give 3 other sizes between each power of two, to hopefully
|
||||
* cover things accurately enough. (The alternative is
|
||||
* probably to just go for exact matching of sizes, and assume
|
||||
* that for things like composited window resize the tiled
|
||||
* width/height alignment and rounding of sizes to pages will
|
||||
* get us useful cache hit rates anyway)
|
||||
*/
|
||||
add_bucket(cache, 4096);
|
||||
add_bucket(cache, 4096 * 2);
|
||||
add_bucket(cache, 4096 * 3);
|
||||
|
||||
/* Initialize the linked lists for BO reuse cache. */
|
||||
for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
|
||||
add_bucket(cache, size);
|
||||
add_bucket(cache, size + size * 1 / 4);
|
||||
add_bucket(cache, size + size * 2 / 4);
|
||||
add_bucket(cache, size + size * 3 / 4);
|
||||
}
|
||||
}
|
||||
|
||||
/* Frees older cached buffers. Called under table_lock */
|
||||
drm_private void etna_bo_cache_cleanup(struct etna_bo_cache *cache, time_t time)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
if (cache->time == time)
|
||||
return;
|
||||
|
||||
for (i = 0; i < cache->num_buckets; i++) {
|
||||
struct etna_bo_bucket *bucket = &cache->cache_bucket[i];
|
||||
struct etna_bo *bo;
|
||||
|
||||
while (!LIST_IS_EMPTY(&bucket->list)) {
|
||||
bo = LIST_ENTRY(struct etna_bo, bucket->list.next, list);
|
||||
|
||||
/* keep things in cache for at least 1 second: */
|
||||
if (time && ((time - bo->free_time) <= 1))
|
||||
break;
|
||||
|
||||
list_del(&bo->list);
|
||||
bo_del(bo);
|
||||
}
|
||||
}
|
||||
|
||||
cache->time = time;
|
||||
}
|
||||
|
||||
static struct etna_bo_bucket *get_bucket(struct etna_bo_cache *cache, uint32_t size)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
/* hmm, this is what intel does, but I suppose we could calculate our
|
||||
* way to the correct bucket size rather than looping..
|
||||
*/
|
||||
for (i = 0; i < cache->num_buckets; i++) {
|
||||
struct etna_bo_bucket *bucket = &cache->cache_bucket[i];
|
||||
if (bucket->size >= size) {
|
||||
return bucket;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int is_idle(struct etna_bo *bo)
|
||||
{
|
||||
return etna_bo_cpu_prep(bo,
|
||||
DRM_ETNA_PREP_READ |
|
||||
DRM_ETNA_PREP_WRITE |
|
||||
DRM_ETNA_PREP_NOSYNC) == 0;
|
||||
}
|
||||
|
||||
static struct etna_bo *find_in_bucket(struct etna_bo_bucket *bucket, uint32_t flags)
|
||||
{
|
||||
struct etna_bo *bo = NULL, *tmp;
|
||||
|
||||
pthread_mutex_lock(&table_lock);
|
||||
|
||||
if (LIST_IS_EMPTY(&bucket->list))
|
||||
goto out_unlock;
|
||||
|
||||
LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &bucket->list, list) {
|
||||
/* skip BOs with different flags */
|
||||
if (bo->flags != flags)
|
||||
continue;
|
||||
|
||||
/* check if the first BO with matching flags is idle */
|
||||
if (is_idle(bo)) {
|
||||
list_delinit(&bo->list);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* If the oldest BO is still busy, don't try younger ones */
|
||||
break;
|
||||
}
|
||||
|
||||
/* There was no matching buffer found */
|
||||
bo = NULL;
|
||||
|
||||
out_unlock:
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
||||
/* allocate a new (un-tiled) buffer object
|
||||
*
|
||||
* NOTE: size is potentially rounded up to bucket size
|
||||
*/
|
||||
drm_private struct etna_bo *etna_bo_cache_alloc(struct etna_bo_cache *cache, uint32_t *size,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct etna_bo *bo;
|
||||
struct etna_bo_bucket *bucket;
|
||||
|
||||
*size = ALIGN(*size, 4096);
|
||||
bucket = get_bucket(cache, *size);
|
||||
|
||||
/* see if we can be green and recycle: */
|
||||
if (bucket) {
|
||||
*size = bucket->size;
|
||||
bo = find_in_bucket(bucket, flags);
|
||||
if (bo) {
|
||||
atomic_set(&bo->refcnt, 1);
|
||||
etna_device_ref(bo->dev);
|
||||
return bo;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
drm_private int etna_bo_cache_free(struct etna_bo_cache *cache, struct etna_bo *bo)
|
||||
{
|
||||
struct etna_bo_bucket *bucket = get_bucket(cache, bo->size);
|
||||
|
||||
/* see if we can be green and recycle: */
|
||||
if (bucket) {
|
||||
struct timespec time;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &time);
|
||||
|
||||
bo->free_time = time.tv_sec;
|
||||
list_addtail(&bo->list, &bucket->list);
|
||||
etna_bo_cache_cleanup(cache, time.tv_sec);
|
||||
|
||||
/* bo's in the bucket cache don't have a ref and
|
||||
* don't hold a ref to the dev:
|
||||
*/
|
||||
etna_device_del_locked(bo->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
|
@ -1,279 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2014-2015 Etnaviv Project
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Christian Gmeiner <christian.gmeiner@gmail.com>
|
||||
*/
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include "etnaviv_drmif.h"
|
||||
#include "etnaviv_priv.h"
|
||||
|
||||
static pthread_mutex_t idx_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
static void *grow(void *ptr, uint32_t nr, uint32_t *max, uint32_t sz)
|
||||
{
|
||||
if ((nr + 1) > *max) {
|
||||
if ((*max * 2) < (nr + 1))
|
||||
*max = nr + 5;
|
||||
else
|
||||
*max = *max * 2;
|
||||
ptr = realloc(ptr, *max * sz);
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
#define APPEND(x, name) ({ \
|
||||
(x)->name = grow((x)->name, (x)->nr_ ## name, &(x)->max_ ## name, sizeof((x)->name[0])); \
|
||||
(x)->nr_ ## name ++; \
|
||||
})
|
||||
|
||||
static inline struct etna_cmd_stream_priv *
|
||||
etna_cmd_stream_priv(struct etna_cmd_stream *stream)
|
||||
{
|
||||
return (struct etna_cmd_stream_priv *)stream;
|
||||
}
|
||||
|
||||
drm_public struct etna_cmd_stream *etna_cmd_stream_new(struct etna_pipe *pipe,
|
||||
uint32_t size,
|
||||
void (*reset_notify)(struct etna_cmd_stream *stream, void *priv),
|
||||
void *priv)
|
||||
{
|
||||
struct etna_cmd_stream_priv *stream = NULL;
|
||||
|
||||
if (size == 0) {
|
||||
ERROR_MSG("invalid size of 0");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
stream = calloc(1, sizeof(*stream));
|
||||
if (!stream) {
|
||||
ERROR_MSG("allocation failed");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* allocate even number of 32-bit words */
|
||||
size = ALIGN(size, 2);
|
||||
|
||||
stream->base.buffer = malloc(size * sizeof(uint32_t));
|
||||
if (!stream->base.buffer) {
|
||||
ERROR_MSG("allocation failed");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
stream->base.size = size;
|
||||
stream->pipe = pipe;
|
||||
stream->reset_notify = reset_notify;
|
||||
stream->reset_notify_priv = priv;
|
||||
|
||||
return &stream->base;
|
||||
|
||||
fail:
|
||||
if (stream)
|
||||
etna_cmd_stream_del(&stream->base);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
drm_public void etna_cmd_stream_del(struct etna_cmd_stream *stream)
|
||||
{
|
||||
struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
|
||||
|
||||
free(stream->buffer);
|
||||
free(priv->submit.relocs);
|
||||
free(priv->submit.pmrs);
|
||||
free(priv);
|
||||
}
|
||||
|
||||
static void reset_buffer(struct etna_cmd_stream *stream)
|
||||
{
|
||||
struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
|
||||
|
||||
stream->offset = 0;
|
||||
priv->submit.nr_bos = 0;
|
||||
priv->submit.nr_relocs = 0;
|
||||
priv->submit.nr_pmrs = 0;
|
||||
priv->nr_bos = 0;
|
||||
|
||||
if (priv->reset_notify)
|
||||
priv->reset_notify(stream, priv->reset_notify_priv);
|
||||
}
|
||||
|
||||
drm_public uint32_t etna_cmd_stream_timestamp(struct etna_cmd_stream *stream)
|
||||
{
|
||||
return etna_cmd_stream_priv(stream)->last_timestamp;
|
||||
}
|
||||
|
||||
static uint32_t append_bo(struct etna_cmd_stream *stream, struct etna_bo *bo)
|
||||
{
|
||||
struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
|
||||
uint32_t idx;
|
||||
|
||||
idx = APPEND(&priv->submit, bos);
|
||||
idx = APPEND(priv, bos);
|
||||
|
||||
priv->submit.bos[idx].flags = 0;
|
||||
priv->submit.bos[idx].handle = bo->handle;
|
||||
|
||||
priv->bos[idx] = etna_bo_ref(bo);
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
/* add (if needed) bo, return idx: */
|
||||
static uint32_t bo2idx(struct etna_cmd_stream *stream, struct etna_bo *bo,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
|
||||
uint32_t idx;
|
||||
|
||||
pthread_mutex_lock(&idx_lock);
|
||||
|
||||
if (bo->current_stream == stream) {
|
||||
idx = bo->idx;
|
||||
} else {
|
||||
/* slow-path: */
|
||||
for (idx = 0; idx < priv->nr_bos; idx++)
|
||||
if (priv->bos[idx] == bo)
|
||||
break;
|
||||
if (idx == priv->nr_bos) {
|
||||
/* not found */
|
||||
idx = append_bo(stream, bo);
|
||||
}
|
||||
bo->current_stream = stream;
|
||||
bo->idx = idx;
|
||||
}
|
||||
pthread_mutex_unlock(&idx_lock);
|
||||
|
||||
if (flags & ETNA_RELOC_READ)
|
||||
priv->submit.bos[idx].flags |= ETNA_SUBMIT_BO_READ;
|
||||
if (flags & ETNA_RELOC_WRITE)
|
||||
priv->submit.bos[idx].flags |= ETNA_SUBMIT_BO_WRITE;
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static void flush(struct etna_cmd_stream *stream, int in_fence_fd,
|
||||
int *out_fence_fd)
|
||||
{
|
||||
struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
|
||||
int ret, id = priv->pipe->id;
|
||||
struct etna_gpu *gpu = priv->pipe->gpu;
|
||||
|
||||
struct drm_etnaviv_gem_submit req = {
|
||||
.pipe = gpu->core,
|
||||
.exec_state = id,
|
||||
.bos = VOID2U64(priv->submit.bos),
|
||||
.nr_bos = priv->submit.nr_bos,
|
||||
.relocs = VOID2U64(priv->submit.relocs),
|
||||
.nr_relocs = priv->submit.nr_relocs,
|
||||
.pmrs = VOID2U64(priv->submit.pmrs),
|
||||
.nr_pmrs = priv->submit.nr_pmrs,
|
||||
.stream = VOID2U64(stream->buffer),
|
||||
.stream_size = stream->offset * 4, /* in bytes */
|
||||
};
|
||||
|
||||
if (in_fence_fd != -1) {
|
||||
req.flags |= ETNA_SUBMIT_FENCE_FD_IN | ETNA_SUBMIT_NO_IMPLICIT;
|
||||
req.fence_fd = in_fence_fd;
|
||||
}
|
||||
|
||||
if (out_fence_fd)
|
||||
req.flags |= ETNA_SUBMIT_FENCE_FD_OUT;
|
||||
|
||||
ret = drmCommandWriteRead(gpu->dev->fd, DRM_ETNAVIV_GEM_SUBMIT,
|
||||
&req, sizeof(req));
|
||||
|
||||
if (ret)
|
||||
ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
|
||||
else
|
||||
priv->last_timestamp = req.fence;
|
||||
|
||||
for (uint32_t i = 0; i < priv->nr_bos; i++) {
|
||||
struct etna_bo *bo = priv->bos[i];
|
||||
|
||||
bo->current_stream = NULL;
|
||||
etna_bo_del(bo);
|
||||
}
|
||||
|
||||
if (out_fence_fd)
|
||||
*out_fence_fd = req.fence_fd;
|
||||
}
|
||||
|
||||
drm_public void etna_cmd_stream_flush(struct etna_cmd_stream *stream)
|
||||
{
|
||||
flush(stream, -1, NULL);
|
||||
reset_buffer(stream);
|
||||
}
|
||||
|
||||
drm_public void etna_cmd_stream_flush2(struct etna_cmd_stream *stream,
|
||||
int in_fence_fd,
|
||||
int *out_fence_fd)
|
||||
{
|
||||
flush(stream, in_fence_fd, out_fence_fd);
|
||||
reset_buffer(stream);
|
||||
}
|
||||
|
||||
drm_public void etna_cmd_stream_finish(struct etna_cmd_stream *stream)
|
||||
{
|
||||
struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
|
||||
|
||||
flush(stream, -1, NULL);
|
||||
etna_pipe_wait(priv->pipe, priv->last_timestamp, 5000);
|
||||
reset_buffer(stream);
|
||||
}
|
||||
|
||||
drm_public void etna_cmd_stream_reloc(struct etna_cmd_stream *stream,
|
||||
const struct etna_reloc *r)
|
||||
{
|
||||
struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
|
||||
struct drm_etnaviv_gem_submit_reloc *reloc;
|
||||
uint32_t idx = APPEND(&priv->submit, relocs);
|
||||
uint32_t addr = 0;
|
||||
|
||||
reloc = &priv->submit.relocs[idx];
|
||||
|
||||
reloc->reloc_idx = bo2idx(stream, r->bo, r->flags);
|
||||
reloc->reloc_offset = r->offset;
|
||||
reloc->submit_offset = stream->offset * 4; /* in bytes */
|
||||
reloc->flags = 0;
|
||||
|
||||
etna_cmd_stream_emit(stream, addr);
|
||||
}
|
||||
|
||||
drm_public void etna_cmd_stream_perf(struct etna_cmd_stream *stream, const struct etna_perf *p)
|
||||
{
|
||||
struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
|
||||
struct drm_etnaviv_gem_submit_pmr *pmr;
|
||||
uint32_t idx = APPEND(&priv->submit, pmrs);
|
||||
|
||||
pmr = &priv->submit.pmrs[idx];
|
||||
|
||||
pmr->flags = p->flags;
|
||||
pmr->sequence = p->sequence;
|
||||
pmr->read_offset = p->offset;
|
||||
pmr->read_idx = bo2idx(stream, p->bo, ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE);
|
||||
pmr->domain = p->signal->domain->id;
|
||||
pmr->signal = p->signal->signal;
|
||||
}
|
||||
|
|
@ -1,114 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2014 Etnaviv Project
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Christian Gmeiner <christian.gmeiner@gmail.com>
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <sys/types.h>
|
||||
#include <errno.h>
|
||||
#include <sys/mman.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <pthread.h>
|
||||
|
||||
#include <xf86drm.h>
|
||||
#include <xf86atomic.h>
|
||||
|
||||
#include "etnaviv_priv.h"
|
||||
#include "etnaviv_drmif.h"
|
||||
|
||||
static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
drm_public struct etna_device *etna_device_new(int fd)
|
||||
{
|
||||
struct etna_device *dev = calloc(sizeof(*dev), 1);
|
||||
|
||||
if (!dev)
|
||||
return NULL;
|
||||
|
||||
atomic_set(&dev->refcnt, 1);
|
||||
dev->fd = fd;
|
||||
dev->handle_table = drmHashCreate();
|
||||
dev->name_table = drmHashCreate();
|
||||
etna_bo_cache_init(&dev->bo_cache);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
/* like etna_device_new() but creates it's own private dup() of the fd
|
||||
* which is close()d when the device is finalized. */
|
||||
drm_public struct etna_device *etna_device_new_dup(int fd)
|
||||
{
|
||||
int dup_fd = dup(fd);
|
||||
struct etna_device *dev = etna_device_new(dup_fd);
|
||||
|
||||
if (dev)
|
||||
dev->closefd = 1;
|
||||
else
|
||||
close(dup_fd);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
drm_public struct etna_device *etna_device_ref(struct etna_device *dev)
|
||||
{
|
||||
atomic_inc(&dev->refcnt);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
static void etna_device_del_impl(struct etna_device *dev)
|
||||
{
|
||||
etna_bo_cache_cleanup(&dev->bo_cache, 0);
|
||||
drmHashDestroy(dev->handle_table);
|
||||
drmHashDestroy(dev->name_table);
|
||||
|
||||
if (dev->closefd)
|
||||
close(dev->fd);
|
||||
|
||||
free(dev);
|
||||
}
|
||||
|
||||
drm_private void etna_device_del_locked(struct etna_device *dev)
|
||||
{
|
||||
if (!atomic_dec_and_test(&dev->refcnt))
|
||||
return;
|
||||
|
||||
etna_device_del_impl(dev);
|
||||
}
|
||||
|
||||
drm_public void etna_device_del(struct etna_device *dev)
|
||||
{
|
||||
if (!atomic_dec_and_test(&dev->refcnt))
|
||||
return;
|
||||
|
||||
pthread_mutex_lock(&table_lock);
|
||||
etna_device_del_impl(dev);
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
}
|
||||
|
||||
drm_public int etna_device_fd(struct etna_device *dev)
|
||||
{
|
||||
return dev->fd;
|
||||
}
|
||||
|
|
@ -1,300 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
* Copyright (C) 2015 Etnaviv Project
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __ETNAVIV_DRM_H__
|
||||
#define __ETNAVIV_DRM_H__
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Please note that modifications to all structs defined here are
|
||||
* subject to backwards-compatibility constraints:
|
||||
* 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
|
||||
* user/kernel compatibility
|
||||
* 2) Keep fields aligned to their size
|
||||
* 3) Because of how drm_ioctl() works, we can add new fields at
|
||||
* the end of an ioctl if some care is taken: drm_ioctl() will
|
||||
* zero out the new fields at the tail of the ioctl, so a zero
|
||||
* value should have a backwards compatible meaning. And for
|
||||
* output params, userspace won't see the newly added output
|
||||
* fields.. so that has to be somehow ok.
|
||||
*/
|
||||
|
||||
/* timeouts are specified in clock-monotonic absolute times (to simplify
|
||||
* restarting interrupted ioctls). The following struct is logically the
|
||||
* same as 'struct timespec' but 32/64b ABI safe.
|
||||
*/
|
||||
struct drm_etnaviv_timespec {
|
||||
__s64 tv_sec; /* seconds */
|
||||
__s64 tv_nsec; /* nanoseconds */
|
||||
};
|
||||
|
||||
#define ETNAVIV_PARAM_GPU_MODEL 0x01
|
||||
#define ETNAVIV_PARAM_GPU_REVISION 0x02
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_0 0x03
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_1 0x04
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_2 0x05
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_3 0x06
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_4 0x07
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_5 0x08
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_6 0x09
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_7 0x0a
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_8 0x0b
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_9 0x0c
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_10 0x0d
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_11 0x0e
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_12 0x0f
|
||||
|
||||
#define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10
|
||||
#define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11
|
||||
#define ETNAVIV_PARAM_GPU_THREAD_COUNT 0x12
|
||||
#define ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE 0x13
|
||||
#define ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT 0x14
|
||||
#define ETNAVIV_PARAM_GPU_PIXEL_PIPES 0x15
|
||||
#define ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE 0x16
|
||||
#define ETNAVIV_PARAM_GPU_BUFFER_SIZE 0x17
|
||||
#define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18
|
||||
#define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19
|
||||
#define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a
|
||||
#define ETNAVIV_PARAM_SOFTPIN_START_ADDR 0x1b
|
||||
#define ETNAVIV_PARAM_GPU_PRODUCT_ID 0x1c
|
||||
#define ETNAVIV_PARAM_GPU_CUSTOMER_ID 0x1d
|
||||
#define ETNAVIV_PARAM_GPU_ECO_ID 0x1e
|
||||
|
||||
#define ETNA_MAX_PIPES 4
|
||||
|
||||
struct drm_etnaviv_param {
|
||||
__u32 pipe; /* in */
|
||||
__u32 param; /* in, ETNAVIV_PARAM_x */
|
||||
__u64 value; /* out (get_param) or in (set_param) */
|
||||
};
|
||||
|
||||
/*
|
||||
* GEM buffers:
|
||||
*/
|
||||
|
||||
#define ETNA_BO_CACHE_MASK 0x000f0000
|
||||
/* cache modes */
|
||||
#define ETNA_BO_CACHED 0x00010000
|
||||
#define ETNA_BO_WC 0x00020000
|
||||
#define ETNA_BO_UNCACHED 0x00040000
|
||||
/* map flags */
|
||||
#define ETNA_BO_FORCE_MMU 0x00100000
|
||||
|
||||
struct drm_etnaviv_gem_new {
|
||||
__u64 size; /* in */
|
||||
__u32 flags; /* in, mask of ETNA_BO_x */
|
||||
__u32 handle; /* out */
|
||||
};
|
||||
|
||||
struct drm_etnaviv_gem_info {
|
||||
__u32 handle; /* in */
|
||||
__u32 pad;
|
||||
__u64 offset; /* out, offset to pass to mmap() */
|
||||
};
|
||||
|
||||
#define ETNA_PREP_READ 0x01
|
||||
#define ETNA_PREP_WRITE 0x02
|
||||
#define ETNA_PREP_NOSYNC 0x04
|
||||
|
||||
struct drm_etnaviv_gem_cpu_prep {
|
||||
__u32 handle; /* in */
|
||||
__u32 op; /* in, mask of ETNA_PREP_x */
|
||||
struct drm_etnaviv_timespec timeout; /* in */
|
||||
};
|
||||
|
||||
struct drm_etnaviv_gem_cpu_fini {
|
||||
__u32 handle; /* in */
|
||||
__u32 flags; /* in, placeholder for now, no defined values */
|
||||
};
|
||||
|
||||
/*
|
||||
* Cmdstream Submission:
|
||||
*/
|
||||
|
||||
/* The value written into the cmdstream is logically:
|
||||
* relocbuf->gpuaddr + reloc_offset
|
||||
*
|
||||
* NOTE that reloc's must be sorted by order of increasing submit_offset,
|
||||
* otherwise EINVAL.
|
||||
*/
|
||||
struct drm_etnaviv_gem_submit_reloc {
|
||||
__u32 submit_offset; /* in, offset from submit_bo */
|
||||
__u32 reloc_idx; /* in, index of reloc_bo buffer */
|
||||
__u64 reloc_offset; /* in, offset from start of reloc_bo */
|
||||
__u32 flags; /* in, placeholder for now, no defined values */
|
||||
};
|
||||
|
||||
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
|
||||
* cmdstream buffer(s) themselves or reloc entries) has one (and only
|
||||
* one) entry in the submit->bos[] table.
|
||||
*
|
||||
* As a optimization, the current buffer (gpu virtual address) can be
|
||||
* passed back through the 'presumed' field. If on a subsequent reloc,
|
||||
* userspace passes back a 'presumed' address that is still valid,
|
||||
* then patching the cmdstream for this entry is skipped. This can
|
||||
* avoid kernel needing to map/access the cmdstream bo in the common
|
||||
* case.
|
||||
* If the submit is a softpin submit (ETNA_SUBMIT_SOFTPIN) the 'presumed'
|
||||
* field is interpreted as the fixed location to map the bo into the gpu
|
||||
* virtual address space. If the kernel is unable to map the buffer at
|
||||
* this location the submit will fail. This means userspace is responsible
|
||||
* for the whole gpu virtual address management.
|
||||
*/
|
||||
#define ETNA_SUBMIT_BO_READ 0x0001
|
||||
#define ETNA_SUBMIT_BO_WRITE 0x0002
|
||||
struct drm_etnaviv_gem_submit_bo {
|
||||
__u32 flags; /* in, mask of ETNA_SUBMIT_BO_x */
|
||||
__u32 handle; /* in, GEM handle */
|
||||
__u64 presumed; /* in/out, presumed buffer address */
|
||||
};
|
||||
|
||||
/* performance monitor request (pmr) */
|
||||
#define ETNA_PM_PROCESS_PRE 0x0001
|
||||
#define ETNA_PM_PROCESS_POST 0x0002
|
||||
struct drm_etnaviv_gem_submit_pmr {
|
||||
__u32 flags; /* in, when to process request (ETNA_PM_PROCESS_x) */
|
||||
__u8 domain; /* in, pm domain */
|
||||
__u8 pad;
|
||||
__u16 signal; /* in, pm signal */
|
||||
__u32 sequence; /* in, sequence number */
|
||||
__u32 read_offset; /* in, offset from read_bo */
|
||||
__u32 read_idx; /* in, index of read_bo buffer */
|
||||
};
|
||||
|
||||
/* Each cmdstream submit consists of a table of buffers involved, and
|
||||
* one or more cmdstream buffers. This allows for conditional execution
|
||||
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
|
||||
*/
|
||||
#define ETNA_SUBMIT_NO_IMPLICIT 0x0001
|
||||
#define ETNA_SUBMIT_FENCE_FD_IN 0x0002
|
||||
#define ETNA_SUBMIT_FENCE_FD_OUT 0x0004
|
||||
#define ETNA_SUBMIT_SOFTPIN 0x0008
|
||||
#define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \
|
||||
ETNA_SUBMIT_FENCE_FD_IN | \
|
||||
ETNA_SUBMIT_FENCE_FD_OUT| \
|
||||
ETNA_SUBMIT_SOFTPIN)
|
||||
#define ETNA_PIPE_3D 0x00
|
||||
#define ETNA_PIPE_2D 0x01
|
||||
#define ETNA_PIPE_VG 0x02
|
||||
struct drm_etnaviv_gem_submit {
|
||||
__u32 fence; /* out */
|
||||
__u32 pipe; /* in */
|
||||
__u32 exec_state; /* in, initial execution state (ETNA_PIPE_x) */
|
||||
__u32 nr_bos; /* in, number of submit_bo's */
|
||||
__u32 nr_relocs; /* in, number of submit_reloc's */
|
||||
__u32 stream_size; /* in, cmdstream size */
|
||||
__u64 bos; /* in, ptr to array of submit_bo's */
|
||||
__u64 relocs; /* in, ptr to array of submit_reloc's */
|
||||
__u64 stream; /* in, ptr to cmdstream */
|
||||
__u32 flags; /* in, mask of ETNA_SUBMIT_x */
|
||||
__s32 fence_fd; /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */
|
||||
__u64 pmrs; /* in, ptr to array of submit_pmr's */
|
||||
__u32 nr_pmrs; /* in, number of submit_pmr's */
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
/* The normal way to synchronize with the GPU is just to CPU_PREP on
|
||||
* a buffer if you need to access it from the CPU (other cmdstream
|
||||
* submission from same or other contexts, PAGE_FLIP ioctl, etc, all
|
||||
* handle the required synchronization under the hood). This ioctl
|
||||
* mainly just exists as a way to implement the gallium pipe_fence
|
||||
* APIs without requiring a dummy bo to synchronize on.
|
||||
*/
|
||||
#define ETNA_WAIT_NONBLOCK 0x01
|
||||
struct drm_etnaviv_wait_fence {
|
||||
__u32 pipe; /* in */
|
||||
__u32 fence; /* in */
|
||||
__u32 flags; /* in, mask of ETNA_WAIT_x */
|
||||
__u32 pad;
|
||||
struct drm_etnaviv_timespec timeout; /* in */
|
||||
};
|
||||
|
||||
#define ETNA_USERPTR_READ 0x01
|
||||
#define ETNA_USERPTR_WRITE 0x02
|
||||
struct drm_etnaviv_gem_userptr {
|
||||
__u64 user_ptr; /* in, page aligned user pointer */
|
||||
__u64 user_size; /* in, page aligned user size */
|
||||
__u32 flags; /* in, flags */
|
||||
__u32 handle; /* out, non-zero handle */
|
||||
};
|
||||
|
||||
struct drm_etnaviv_gem_wait {
|
||||
__u32 pipe; /* in */
|
||||
__u32 handle; /* in, bo to be waited for */
|
||||
__u32 flags; /* in, mask of ETNA_WAIT_x */
|
||||
__u32 pad;
|
||||
struct drm_etnaviv_timespec timeout; /* in */
|
||||
};
|
||||
|
||||
/*
|
||||
* Performance Monitor (PM):
|
||||
*/
|
||||
|
||||
struct drm_etnaviv_pm_domain {
|
||||
__u32 pipe; /* in */
|
||||
__u8 iter; /* in/out, select pm domain at index iter */
|
||||
__u8 id; /* out, id of domain */
|
||||
__u16 nr_signals; /* out, how many signals does this domain provide */
|
||||
char name[64]; /* out, name of domain */
|
||||
};
|
||||
|
||||
struct drm_etnaviv_pm_signal {
|
||||
__u32 pipe; /* in */
|
||||
__u8 domain; /* in, pm domain index */
|
||||
__u8 pad;
|
||||
__u16 iter; /* in/out, select pm source at index iter */
|
||||
__u16 id; /* out, id of signal */
|
||||
char name[64]; /* out, name of domain */
|
||||
};
|
||||
|
||||
#define DRM_ETNAVIV_GET_PARAM 0x00
|
||||
/* placeholder:
|
||||
#define DRM_ETNAVIV_SET_PARAM 0x01
|
||||
*/
|
||||
#define DRM_ETNAVIV_GEM_NEW 0x02
|
||||
#define DRM_ETNAVIV_GEM_INFO 0x03
|
||||
#define DRM_ETNAVIV_GEM_CPU_PREP 0x04
|
||||
#define DRM_ETNAVIV_GEM_CPU_FINI 0x05
|
||||
#define DRM_ETNAVIV_GEM_SUBMIT 0x06
|
||||
#define DRM_ETNAVIV_WAIT_FENCE 0x07
|
||||
#define DRM_ETNAVIV_GEM_USERPTR 0x08
|
||||
#define DRM_ETNAVIV_GEM_WAIT 0x09
|
||||
#define DRM_ETNAVIV_PM_QUERY_DOM 0x0a
|
||||
#define DRM_ETNAVIV_PM_QUERY_SIG 0x0b
|
||||
#define DRM_ETNAVIV_NUM_IOCTLS 0x0c
|
||||
|
||||
#define DRM_IOCTL_ETNAVIV_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param)
|
||||
#define DRM_IOCTL_ETNAVIV_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new)
|
||||
#define DRM_IOCTL_ETNAVIV_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_INFO, struct drm_etnaviv_gem_info)
|
||||
#define DRM_IOCTL_ETNAVIV_GEM_CPU_PREP DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_PREP, struct drm_etnaviv_gem_cpu_prep)
|
||||
#define DRM_IOCTL_ETNAVIV_GEM_CPU_FINI DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_FINI, struct drm_etnaviv_gem_cpu_fini)
|
||||
#define DRM_IOCTL_ETNAVIV_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_SUBMIT, struct drm_etnaviv_gem_submit)
|
||||
#define DRM_IOCTL_ETNAVIV_WAIT_FENCE DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence)
|
||||
#define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr)
|
||||
#define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait)
|
||||
#define DRM_IOCTL_ETNAVIV_PM_QUERY_DOM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_PM_QUERY_DOM, struct drm_etnaviv_pm_domain)
|
||||
#define DRM_IOCTL_ETNAVIV_PM_QUERY_SIG DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_PM_QUERY_SIG, struct drm_etnaviv_pm_signal)
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __ETNAVIV_DRM_H__ */
|
||||
|
|
@ -1,214 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2014-2015 Etnaviv Project
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Christian Gmeiner <christian.gmeiner@gmail.com>
|
||||
*/
|
||||
|
||||
#ifndef ETNAVIV_DRMIF_H_
|
||||
#define ETNAVIV_DRMIF_H_
|
||||
|
||||
#include <xf86drm.h>
|
||||
#include <stdint.h>
|
||||
|
||||
struct etna_bo;
|
||||
struct etna_pipe;
|
||||
struct etna_gpu;
|
||||
struct etna_device;
|
||||
struct etna_cmd_stream;
|
||||
struct etna_perfmon;
|
||||
struct etna_perfmon_domain;
|
||||
struct etna_perfmon_signal;
|
||||
|
||||
enum etna_pipe_id {
|
||||
ETNA_PIPE_3D = 0,
|
||||
ETNA_PIPE_2D = 1,
|
||||
ETNA_PIPE_VG = 2,
|
||||
ETNA_PIPE_MAX
|
||||
};
|
||||
|
||||
enum etna_param_id {
|
||||
ETNA_GPU_MODEL = 0x1,
|
||||
ETNA_GPU_REVISION = 0x2,
|
||||
ETNA_GPU_FEATURES_0 = 0x3,
|
||||
ETNA_GPU_FEATURES_1 = 0x4,
|
||||
ETNA_GPU_FEATURES_2 = 0x5,
|
||||
ETNA_GPU_FEATURES_3 = 0x6,
|
||||
ETNA_GPU_FEATURES_4 = 0x7,
|
||||
ETNA_GPU_FEATURES_5 = 0x8,
|
||||
ETNA_GPU_FEATURES_6 = 0x9,
|
||||
|
||||
ETNA_GPU_STREAM_COUNT = 0x10,
|
||||
ETNA_GPU_REGISTER_MAX = 0x11,
|
||||
ETNA_GPU_THREAD_COUNT = 0x12,
|
||||
ETNA_GPU_VERTEX_CACHE_SIZE = 0x13,
|
||||
ETNA_GPU_SHADER_CORE_COUNT = 0x14,
|
||||
ETNA_GPU_PIXEL_PIPES = 0x15,
|
||||
ETNA_GPU_VERTEX_OUTPUT_BUFFER_SIZE = 0x16,
|
||||
ETNA_GPU_BUFFER_SIZE = 0x17,
|
||||
ETNA_GPU_INSTRUCTION_COUNT = 0x18,
|
||||
ETNA_GPU_NUM_CONSTANTS = 0x19,
|
||||
ETNA_GPU_NUM_VARYINGS = 0x1a
|
||||
};
|
||||
|
||||
/* bo flags: */
|
||||
#define DRM_ETNA_GEM_CACHE_CACHED 0x00010000
|
||||
#define DRM_ETNA_GEM_CACHE_WC 0x00020000
|
||||
#define DRM_ETNA_GEM_CACHE_UNCACHED 0x00040000
|
||||
#define DRM_ETNA_GEM_CACHE_MASK 0x000f0000
|
||||
/* map flags */
|
||||
#define DRM_ETNA_GEM_FORCE_MMU 0x00100000
|
||||
|
||||
/* bo access flags: (keep aligned to ETNA_PREP_x) */
|
||||
#define DRM_ETNA_PREP_READ 0x01
|
||||
#define DRM_ETNA_PREP_WRITE 0x02
|
||||
#define DRM_ETNA_PREP_NOSYNC 0x04
|
||||
|
||||
/* device functions:
|
||||
*/
|
||||
|
||||
struct etna_device *etna_device_new(int fd);
|
||||
struct etna_device *etna_device_new_dup(int fd);
|
||||
struct etna_device *etna_device_ref(struct etna_device *dev);
|
||||
void etna_device_del(struct etna_device *dev);
|
||||
int etna_device_fd(struct etna_device *dev);
|
||||
|
||||
/* gpu functions:
|
||||
*/
|
||||
|
||||
struct etna_gpu *etna_gpu_new(struct etna_device *dev, unsigned int core);
|
||||
void etna_gpu_del(struct etna_gpu *gpu);
|
||||
int etna_gpu_get_param(struct etna_gpu *gpu, enum etna_param_id param,
|
||||
uint64_t *value);
|
||||
|
||||
|
||||
/* pipe functions:
|
||||
*/
|
||||
|
||||
struct etna_pipe *etna_pipe_new(struct etna_gpu *gpu, enum etna_pipe_id id);
|
||||
void etna_pipe_del(struct etna_pipe *pipe);
|
||||
int etna_pipe_wait(struct etna_pipe *pipe, uint32_t timestamp, uint32_t ms);
|
||||
int etna_pipe_wait_ns(struct etna_pipe *pipe, uint32_t timestamp, uint64_t ns);
|
||||
|
||||
|
||||
/* buffer-object functions:
|
||||
*/
|
||||
|
||||
struct etna_bo *etna_bo_new(struct etna_device *dev,
|
||||
uint32_t size, uint32_t flags);
|
||||
struct etna_bo *etna_bo_from_name(struct etna_device *dev, uint32_t name);
|
||||
struct etna_bo *etna_bo_from_dmabuf(struct etna_device *dev, int fd);
|
||||
struct etna_bo *etna_bo_ref(struct etna_bo *bo);
|
||||
void etna_bo_del(struct etna_bo *bo);
|
||||
int etna_bo_get_name(struct etna_bo *bo, uint32_t *name);
|
||||
uint32_t etna_bo_handle(struct etna_bo *bo);
|
||||
int etna_bo_dmabuf(struct etna_bo *bo);
|
||||
uint32_t etna_bo_size(struct etna_bo *bo);
|
||||
void * etna_bo_map(struct etna_bo *bo);
|
||||
int etna_bo_cpu_prep(struct etna_bo *bo, uint32_t op);
|
||||
void etna_bo_cpu_fini(struct etna_bo *bo);
|
||||
|
||||
|
||||
/* cmd stream functions:
|
||||
*/
|
||||
|
||||
struct etna_cmd_stream {
|
||||
uint32_t *buffer;
|
||||
uint32_t offset; /* in 32-bit words */
|
||||
uint32_t size; /* in 32-bit words */
|
||||
};
|
||||
|
||||
struct etna_cmd_stream *etna_cmd_stream_new(struct etna_pipe *pipe, uint32_t size,
|
||||
void (*reset_notify)(struct etna_cmd_stream *stream, void *priv),
|
||||
void *priv);
|
||||
void etna_cmd_stream_del(struct etna_cmd_stream *stream);
|
||||
uint32_t etna_cmd_stream_timestamp(struct etna_cmd_stream *stream);
|
||||
void etna_cmd_stream_flush(struct etna_cmd_stream *stream);
|
||||
void etna_cmd_stream_flush2(struct etna_cmd_stream *stream, int in_fence_fd,
|
||||
int *out_fence_fd);
|
||||
void etna_cmd_stream_finish(struct etna_cmd_stream *stream);
|
||||
|
||||
static inline uint32_t etna_cmd_stream_avail(struct etna_cmd_stream *stream)
|
||||
{
|
||||
static const uint32_t END_CLEARANCE = 2; /* LINK op code */
|
||||
|
||||
return stream->size - stream->offset - END_CLEARANCE;
|
||||
}
|
||||
|
||||
static inline void etna_cmd_stream_reserve(struct etna_cmd_stream *stream, size_t n)
|
||||
{
|
||||
if (etna_cmd_stream_avail(stream) < n)
|
||||
etna_cmd_stream_flush(stream);
|
||||
}
|
||||
|
||||
static inline void etna_cmd_stream_emit(struct etna_cmd_stream *stream, uint32_t data)
|
||||
{
|
||||
stream->buffer[stream->offset++] = data;
|
||||
}
|
||||
|
||||
static inline uint32_t etna_cmd_stream_get(struct etna_cmd_stream *stream, uint32_t offset)
|
||||
{
|
||||
return stream->buffer[offset];
|
||||
}
|
||||
|
||||
static inline void etna_cmd_stream_set(struct etna_cmd_stream *stream, uint32_t offset,
|
||||
uint32_t data)
|
||||
{
|
||||
stream->buffer[offset] = data;
|
||||
}
|
||||
|
||||
static inline uint32_t etna_cmd_stream_offset(struct etna_cmd_stream *stream)
|
||||
{
|
||||
return stream->offset;
|
||||
}
|
||||
|
||||
struct etna_reloc {
|
||||
struct etna_bo *bo;
|
||||
#define ETNA_RELOC_READ 0x0001
|
||||
#define ETNA_RELOC_WRITE 0x0002
|
||||
uint32_t flags;
|
||||
uint32_t offset;
|
||||
};
|
||||
|
||||
void etna_cmd_stream_reloc(struct etna_cmd_stream *stream, const struct etna_reloc *r);
|
||||
|
||||
/* performance monitoring functions:
|
||||
*/
|
||||
|
||||
struct etna_perfmon *etna_perfmon_create(struct etna_pipe *pipe);
|
||||
void etna_perfmon_del(struct etna_perfmon *perfmon);
|
||||
struct etna_perfmon_domain *etna_perfmon_get_dom_by_name(struct etna_perfmon *pm, const char *name);
|
||||
struct etna_perfmon_signal *etna_perfmon_get_sig_by_name(struct etna_perfmon_domain *dom, const char *name);
|
||||
|
||||
struct etna_perf {
|
||||
#define ETNA_PM_PROCESS_PRE 0x0001
|
||||
#define ETNA_PM_PROCESS_POST 0x0002
|
||||
uint32_t flags;
|
||||
uint32_t sequence;
|
||||
struct etna_perfmon_signal *signal;
|
||||
struct etna_bo *bo;
|
||||
uint32_t offset;
|
||||
};
|
||||
|
||||
void etna_cmd_stream_perf(struct etna_cmd_stream *stream, const struct etna_perf *p);
|
||||
|
||||
#endif /* ETNAVIV_DRMIF_H_ */
|
||||
|
|
@ -1,155 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2015 Etnaviv Project
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Christian Gmeiner <christian.gmeiner@gmail.com>
|
||||
*/
|
||||
|
||||
#include "etnaviv_priv.h"
|
||||
#include "etnaviv_drmif.h"
|
||||
|
||||
static uint64_t get_param(struct etna_device *dev, uint32_t core, uint32_t param)
|
||||
{
|
||||
struct drm_etnaviv_param req = {
|
||||
.pipe = core,
|
||||
.param = param,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = drmCommandWriteRead(dev->fd, DRM_ETNAVIV_GET_PARAM, &req, sizeof(req));
|
||||
if (ret) {
|
||||
ERROR_MSG("get-param (%x) failed! %d (%s)", param, ret, strerror(errno));
|
||||
return 0;
|
||||
}
|
||||
|
||||
return req.value;
|
||||
}
|
||||
|
||||
drm_public struct etna_gpu *etna_gpu_new(struct etna_device *dev, unsigned int core)
|
||||
{
|
||||
struct etna_gpu *gpu;
|
||||
|
||||
gpu = calloc(1, sizeof(*gpu));
|
||||
if (!gpu) {
|
||||
ERROR_MSG("allocation failed");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
gpu->dev = dev;
|
||||
gpu->core = core;
|
||||
|
||||
gpu->model = get_param(dev, core, ETNAVIV_PARAM_GPU_MODEL);
|
||||
gpu->revision = get_param(dev, core, ETNAVIV_PARAM_GPU_REVISION);
|
||||
|
||||
if (!gpu->model)
|
||||
goto fail;
|
||||
|
||||
INFO_MSG(" GPU model: 0x%x (rev %x)", gpu->model, gpu->revision);
|
||||
|
||||
return gpu;
|
||||
fail:
|
||||
if (gpu)
|
||||
etna_gpu_del(gpu);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
drm_public void etna_gpu_del(struct etna_gpu *gpu)
|
||||
{
|
||||
free(gpu);
|
||||
}
|
||||
|
||||
drm_public int etna_gpu_get_param(struct etna_gpu *gpu, enum etna_param_id param,
|
||||
uint64_t *value)
|
||||
{
|
||||
struct etna_device *dev = gpu->dev;
|
||||
unsigned int core = gpu->core;
|
||||
|
||||
switch(param) {
|
||||
case ETNA_GPU_MODEL:
|
||||
*value = gpu->model;
|
||||
return 0;
|
||||
case ETNA_GPU_REVISION:
|
||||
*value = gpu->revision;
|
||||
return 0;
|
||||
case ETNA_GPU_FEATURES_0:
|
||||
*value = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_0);
|
||||
return 0;
|
||||
case ETNA_GPU_FEATURES_1:
|
||||
*value = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_1);
|
||||
return 0;
|
||||
case ETNA_GPU_FEATURES_2:
|
||||
*value = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_2);
|
||||
return 0;
|
||||
case ETNA_GPU_FEATURES_3:
|
||||
*value = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_3);
|
||||
return 0;
|
||||
case ETNA_GPU_FEATURES_4:
|
||||
*value = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_4);
|
||||
return 0;
|
||||
case ETNA_GPU_FEATURES_5:
|
||||
*value = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_5);
|
||||
return 0;
|
||||
case ETNA_GPU_FEATURES_6:
|
||||
*value = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_6);
|
||||
return 0;
|
||||
case ETNA_GPU_STREAM_COUNT:
|
||||
*value = get_param(dev, core, ETNA_GPU_STREAM_COUNT);
|
||||
return 0;
|
||||
case ETNA_GPU_REGISTER_MAX:
|
||||
*value = get_param(dev, core, ETNA_GPU_REGISTER_MAX);
|
||||
return 0;
|
||||
case ETNA_GPU_THREAD_COUNT:
|
||||
*value = get_param(dev, core, ETNA_GPU_THREAD_COUNT);
|
||||
return 0;
|
||||
case ETNA_GPU_VERTEX_CACHE_SIZE:
|
||||
*value = get_param(dev, core, ETNA_GPU_VERTEX_CACHE_SIZE);
|
||||
return 0;
|
||||
case ETNA_GPU_SHADER_CORE_COUNT:
|
||||
*value = get_param(dev, core, ETNA_GPU_SHADER_CORE_COUNT);
|
||||
return 0;
|
||||
case ETNA_GPU_PIXEL_PIPES:
|
||||
*value = get_param(dev, core, ETNA_GPU_PIXEL_PIPES);
|
||||
return 0;
|
||||
case ETNA_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
|
||||
*value = get_param(dev, core, ETNA_GPU_VERTEX_OUTPUT_BUFFER_SIZE);
|
||||
return 0;
|
||||
case ETNA_GPU_BUFFER_SIZE:
|
||||
*value = get_param(dev, core, ETNA_GPU_BUFFER_SIZE);
|
||||
return 0;
|
||||
case ETNA_GPU_INSTRUCTION_COUNT:
|
||||
*value = get_param(dev, core, ETNA_GPU_INSTRUCTION_COUNT);
|
||||
return 0;
|
||||
case ETNA_GPU_NUM_CONSTANTS:
|
||||
*value = get_param(dev, core, ETNA_GPU_NUM_CONSTANTS);
|
||||
return 0;
|
||||
case ETNA_GPU_NUM_VARYINGS:
|
||||
*value = get_param(dev, core, ETNA_GPU_NUM_VARYINGS);
|
||||
return 0;
|
||||
|
||||
default:
|
||||
ERROR_MSG("invalid param id: %d", param);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1,185 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2017 Etnaviv Project
|
||||
* Copyright (C) 2017 Zodiac Inflight Innovations
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Christian Gmeiner <christian.gmeiner@gmail.com>
|
||||
*/
|
||||
|
||||
#include "etnaviv_priv.h"
|
||||
|
||||
static int etna_perfmon_query_signals(struct etna_perfmon *pm, struct etna_perfmon_domain *dom)
|
||||
{
|
||||
struct etna_device *dev = pm->pipe->gpu->dev;
|
||||
struct drm_etnaviv_pm_signal req = {
|
||||
.pipe = pm->pipe->id,
|
||||
.domain = dom->id
|
||||
};
|
||||
|
||||
do {
|
||||
struct etna_perfmon_signal *sig;
|
||||
int ret;
|
||||
|
||||
ret = drmCommandWriteRead(dev->fd, DRM_ETNAVIV_PM_QUERY_SIG, &req, sizeof(req));
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
sig = calloc(1, sizeof(*sig));
|
||||
if (!sig)
|
||||
return -ENOMEM;
|
||||
|
||||
INFO_MSG("perfmon signal:");
|
||||
INFO_MSG("id = %d", req.id);
|
||||
INFO_MSG("name = %s", req.name);
|
||||
|
||||
sig->domain = dom;
|
||||
sig->signal = req.id;
|
||||
strncpy(sig->name, req.name, sizeof(sig->name));
|
||||
list_addtail(&sig->head, &dom->signals);
|
||||
} while (req.iter != 0xffff);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int etna_perfmon_query_domains(struct etna_perfmon *pm)
|
||||
{
|
||||
struct etna_device *dev = pm->pipe->gpu->dev;
|
||||
struct drm_etnaviv_pm_domain req = {
|
||||
.pipe = pm->pipe->id
|
||||
};
|
||||
|
||||
do {
|
||||
struct etna_perfmon_domain *dom;
|
||||
int ret;
|
||||
|
||||
ret = drmCommandWriteRead(dev->fd, DRM_ETNAVIV_PM_QUERY_DOM, &req, sizeof(req));
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
dom = calloc(1, sizeof(*dom));
|
||||
if (!dom)
|
||||
return -ENOMEM;
|
||||
|
||||
list_inithead(&dom->signals);
|
||||
dom->id = req.id;
|
||||
strncpy(dom->name, req.name, sizeof(dom->name));
|
||||
list_addtail(&dom->head, &pm->domains);
|
||||
|
||||
INFO_MSG("perfmon domain:");
|
||||
INFO_MSG("id = %d", req.id);
|
||||
INFO_MSG("name = %s", req.name);
|
||||
INFO_MSG("nr_signals = %d", req.nr_signals);
|
||||
|
||||
/* Query all available signals for this domain. */
|
||||
if (req.nr_signals > 0) {
|
||||
ret = etna_perfmon_query_signals(pm, dom);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
} while (req.iter != 0xff);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void etna_perfmon_free_signals(struct etna_perfmon_domain *dom)
|
||||
{
|
||||
struct etna_perfmon_signal *sig, *next;
|
||||
|
||||
LIST_FOR_EACH_ENTRY_SAFE(sig, next, &dom->signals, head) {
|
||||
list_del(&sig->head);
|
||||
free(sig);
|
||||
}
|
||||
}
|
||||
|
||||
static void etna_perfmon_free_domains(struct etna_perfmon *pm)
|
||||
{
|
||||
struct etna_perfmon_domain *dom, *next;
|
||||
|
||||
LIST_FOR_EACH_ENTRY_SAFE(dom, next, &pm->domains, head) {
|
||||
etna_perfmon_free_signals(dom);
|
||||
list_del(&dom->head);
|
||||
free(dom);
|
||||
}
|
||||
}
|
||||
|
||||
drm_public struct etna_perfmon *etna_perfmon_create(struct etna_pipe *pipe)
|
||||
{
|
||||
struct etna_perfmon *pm;
|
||||
int ret;
|
||||
|
||||
pm = calloc(1, sizeof(*pm));
|
||||
if (!pm) {
|
||||
ERROR_MSG("allocation failed");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
list_inithead(&pm->domains);
|
||||
pm->pipe = pipe;
|
||||
|
||||
/* query all available domains and sources for this device */
|
||||
ret = etna_perfmon_query_domains(pm);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
return pm;
|
||||
|
||||
fail:
|
||||
etna_perfmon_del(pm);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
drm_public void etna_perfmon_del(struct etna_perfmon *pm)
|
||||
{
|
||||
if (!pm)
|
||||
return;
|
||||
|
||||
etna_perfmon_free_domains(pm);
|
||||
free(pm);
|
||||
}
|
||||
|
||||
drm_public struct etna_perfmon_domain *etna_perfmon_get_dom_by_name(struct etna_perfmon *pm, const char *name)
|
||||
{
|
||||
struct etna_perfmon_domain *dom;
|
||||
|
||||
if (pm) {
|
||||
LIST_FOR_EACH_ENTRY(dom, &pm->domains, head) {
|
||||
if (!strcmp(dom->name, name))
|
||||
return dom;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
drm_public struct etna_perfmon_signal *etna_perfmon_get_sig_by_name(struct etna_perfmon_domain *dom, const char *name)
|
||||
{
|
||||
struct etna_perfmon_signal *signal;
|
||||
|
||||
if (dom) {
|
||||
LIST_FOR_EACH_ENTRY(signal, &dom->signals, head) {
|
||||
if (!strcmp(signal->name, name))
|
||||
return signal;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -1,79 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2014-2015 Etnaviv Project
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Christian Gmeiner <christian.gmeiner@gmail.com>
|
||||
*/
|
||||
|
||||
#include "etnaviv_priv.h"
|
||||
|
||||
drm_public int etna_pipe_wait(struct etna_pipe *pipe, uint32_t timestamp, uint32_t ms)
|
||||
{
|
||||
return etna_pipe_wait_ns(pipe, timestamp, ms * 1000000);
|
||||
}
|
||||
|
||||
drm_public int etna_pipe_wait_ns(struct etna_pipe *pipe, uint32_t timestamp, uint64_t ns)
|
||||
{
|
||||
struct etna_device *dev = pipe->gpu->dev;
|
||||
int ret;
|
||||
|
||||
struct drm_etnaviv_wait_fence req = {
|
||||
.pipe = pipe->gpu->core,
|
||||
.fence = timestamp,
|
||||
};
|
||||
|
||||
if (ns == 0)
|
||||
req.flags |= ETNA_WAIT_NONBLOCK;
|
||||
|
||||
get_abs_timeout(&req.timeout, ns);
|
||||
|
||||
ret = drmCommandWrite(dev->fd, DRM_ETNAVIV_WAIT_FENCE, &req, sizeof(req));
|
||||
if (ret) {
|
||||
ERROR_MSG("wait-fence failed! %d (%s)", ret, strerror(errno));
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_public void etna_pipe_del(struct etna_pipe *pipe)
|
||||
{
|
||||
free(pipe);
|
||||
}
|
||||
|
||||
drm_public struct etna_pipe *etna_pipe_new(struct etna_gpu *gpu, enum etna_pipe_id id)
|
||||
{
|
||||
struct etna_pipe *pipe;
|
||||
|
||||
pipe = calloc(1, sizeof(*pipe));
|
||||
if (!pipe) {
|
||||
ERROR_MSG("allocation failed");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
pipe->id = id;
|
||||
pipe->gpu = gpu;
|
||||
|
||||
return pipe;
|
||||
fail:
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -1,208 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2014-2015 Etnaviv Project
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Christian Gmeiner <christian.gmeiner@gmail.com>
|
||||
*/
|
||||
|
||||
#ifndef ETNAVIV_PRIV_H_
|
||||
#define ETNAVIV_PRIV_H_
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include "libdrm_macros.h"
|
||||
#include "xf86drm.h"
|
||||
#include "xf86atomic.h"
|
||||
|
||||
#include "util_double_list.h"
|
||||
|
||||
#include "etnaviv_drmif.h"
|
||||
#include "etnaviv_drm.h"
|
||||
|
||||
struct etna_bo_bucket {
|
||||
uint32_t size;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct etna_bo_cache {
|
||||
struct etna_bo_bucket cache_bucket[14 * 4];
|
||||
unsigned num_buckets;
|
||||
time_t time;
|
||||
};
|
||||
|
||||
struct etna_device {
|
||||
int fd;
|
||||
atomic_t refcnt;
|
||||
|
||||
/* tables to keep track of bo's, to avoid "evil-twin" etna_bo objects:
|
||||
*
|
||||
* handle_table: maps handle to etna_bo
|
||||
* name_table: maps flink name to etna_bo
|
||||
*
|
||||
* We end up needing two tables, because DRM_IOCTL_GEM_OPEN always
|
||||
* returns a new handle. So we need to figure out if the bo is already
|
||||
* open in the process first, before calling gem-open.
|
||||
*/
|
||||
void *handle_table, *name_table;
|
||||
|
||||
struct etna_bo_cache bo_cache;
|
||||
|
||||
int closefd; /* call close(fd) upon destruction */
|
||||
};
|
||||
|
||||
drm_private void etna_bo_cache_init(struct etna_bo_cache *cache);
|
||||
drm_private void etna_bo_cache_cleanup(struct etna_bo_cache *cache, time_t time);
|
||||
drm_private struct etna_bo *etna_bo_cache_alloc(struct etna_bo_cache *cache,
|
||||
uint32_t *size, uint32_t flags);
|
||||
drm_private int etna_bo_cache_free(struct etna_bo_cache *cache, struct etna_bo *bo);
|
||||
|
||||
/* for where @table_lock is already held: */
|
||||
drm_private void etna_device_del_locked(struct etna_device *dev);
|
||||
|
||||
/* a GEM buffer object allocated from the DRM device */
|
||||
struct etna_bo {
|
||||
struct etna_device *dev;
|
||||
void *map; /* userspace mmap'ing (if there is one) */
|
||||
uint32_t size;
|
||||
uint32_t handle;
|
||||
uint32_t flags;
|
||||
uint32_t name; /* flink global handle (DRI2 name) */
|
||||
uint64_t offset; /* offset to mmap() */
|
||||
atomic_t refcnt;
|
||||
|
||||
/* in the common case, a bo won't be referenced by more than a single
|
||||
* command stream. So to avoid looping over all the bo's in the
|
||||
* reloc table to find the idx of a bo that might already be in the
|
||||
* table, we cache the idx in the bo. But in order to detect the
|
||||
* slow-path where bo is ref'd in multiple streams, we also must track
|
||||
* the current_stream for which the idx is valid. See bo2idx().
|
||||
*/
|
||||
struct etna_cmd_stream *current_stream;
|
||||
uint32_t idx;
|
||||
|
||||
int reuse;
|
||||
struct list_head list; /* bucket-list entry */
|
||||
time_t free_time; /* time when added to bucket-list */
|
||||
};
|
||||
|
||||
struct etna_gpu {
|
||||
struct etna_device *dev;
|
||||
uint32_t core;
|
||||
uint32_t model;
|
||||
uint32_t revision;
|
||||
};
|
||||
|
||||
struct etna_pipe {
|
||||
enum etna_pipe_id id;
|
||||
struct etna_gpu *gpu;
|
||||
};
|
||||
|
||||
struct etna_cmd_stream_priv {
|
||||
struct etna_cmd_stream base;
|
||||
struct etna_pipe *pipe;
|
||||
|
||||
uint32_t last_timestamp;
|
||||
|
||||
/* submit ioctl related tables: */
|
||||
struct {
|
||||
/* bo's table: */
|
||||
struct drm_etnaviv_gem_submit_bo *bos;
|
||||
uint32_t nr_bos, max_bos;
|
||||
|
||||
/* reloc's table: */
|
||||
struct drm_etnaviv_gem_submit_reloc *relocs;
|
||||
uint32_t nr_relocs, max_relocs;
|
||||
|
||||
/* perf's table: */
|
||||
struct drm_etnaviv_gem_submit_pmr *pmrs;
|
||||
uint32_t nr_pmrs, max_pmrs;
|
||||
} submit;
|
||||
|
||||
/* should have matching entries in submit.bos: */
|
||||
struct etna_bo **bos;
|
||||
uint32_t nr_bos, max_bos;
|
||||
|
||||
/* notify callback if buffer reset happened */
|
||||
void (*reset_notify)(struct etna_cmd_stream *stream, void *priv);
|
||||
void *reset_notify_priv;
|
||||
};
|
||||
|
||||
struct etna_perfmon {
|
||||
struct list_head domains;
|
||||
struct etna_pipe *pipe;
|
||||
};
|
||||
|
||||
struct etna_perfmon_domain
|
||||
{
|
||||
struct list_head head;
|
||||
struct list_head signals;
|
||||
uint8_t id;
|
||||
char name[64];
|
||||
};
|
||||
|
||||
struct etna_perfmon_signal
|
||||
{
|
||||
struct list_head head;
|
||||
struct etna_perfmon_domain *domain;
|
||||
uint8_t signal;
|
||||
char name[64];
|
||||
};
|
||||
|
||||
#define ALIGN(v,a) (((v) + (a) - 1) & ~((a) - 1))
|
||||
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
|
||||
|
||||
#define enable_debug 1 /* TODO make dynamic */
|
||||
|
||||
#define INFO_MSG(fmt, ...) \
|
||||
do { drmMsg("[I] "fmt " (%s:%d)\n", \
|
||||
##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
|
||||
#define DEBUG_MSG(fmt, ...) \
|
||||
do if (enable_debug) { drmMsg("[D] "fmt " (%s:%d)\n", \
|
||||
##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
|
||||
#define WARN_MSG(fmt, ...) \
|
||||
do { drmMsg("[W] "fmt " (%s:%d)\n", \
|
||||
##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
|
||||
#define ERROR_MSG(fmt, ...) \
|
||||
do { drmMsg("[E] " fmt " (%s:%d)\n", \
|
||||
##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
|
||||
|
||||
#define VOID2U64(x) ((uint64_t)(unsigned long)(x))
|
||||
|
||||
static inline void get_abs_timeout(struct drm_etnaviv_timespec *tv, uint64_t ns)
|
||||
{
|
||||
struct timespec t;
|
||||
uint32_t s = ns / 1000000000;
|
||||
clock_gettime(CLOCK_MONOTONIC, &t);
|
||||
tv->tv_sec = t.tv_sec + s;
|
||||
tv->tv_nsec = t.tv_nsec + ns - (s * 1000000000);
|
||||
}
|
||||
|
||||
#endif /* ETNAVIV_PRIV_H_ */
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
prefix=@prefix@
|
||||
exec_prefix=@exec_prefix@
|
||||
libdir=@libdir@
|
||||
includedir=@includedir@
|
||||
|
||||
Name: libdrm_etnaviv
|
||||
Description: Userspace interface to etnaviv kernel DRM services
|
||||
Version: @PACKAGE_VERSION@
|
||||
Libs: -L${libdir} -ldrm_etnaviv
|
||||
Cflags: -I${includedir} -I${includedir}/libdrm
|
||||
Requires.private: libdrm
|
||||
|
|
@ -1,64 +0,0 @@
|
|||
# Copyright © 2017-2018 Intel Corporation
|
||||
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
|
||||
libdrm_etnaviv = library(
|
||||
'drm_etnaviv',
|
||||
[
|
||||
files(
|
||||
'etnaviv_device.c', 'etnaviv_gpu.c', 'etnaviv_bo.c', 'etnaviv_bo_cache.c',
|
||||
'etnaviv_perfmon.c', 'etnaviv_pipe.c', 'etnaviv_cmd_stream.c',
|
||||
),
|
||||
config_file
|
||||
],
|
||||
include_directories : [inc_root, inc_drm],
|
||||
link_with : libdrm,
|
||||
c_args : libdrm_c_args,
|
||||
gnu_symbol_visibility : 'hidden',
|
||||
dependencies : [dep_threads, dep_rt, dep_atomic_ops],
|
||||
version : '1.@0@.0'.format(patch_ver),
|
||||
install : true,
|
||||
)
|
||||
|
||||
install_headers('etnaviv_drmif.h', subdir : 'libdrm')
|
||||
|
||||
pkg.generate(
|
||||
libdrm_etnaviv,
|
||||
name : 'libdrm_etnaviv',
|
||||
subdirs : ['.', 'libdrm'],
|
||||
description : 'Userspace interface to Tegra kernel DRM services',
|
||||
)
|
||||
|
||||
ext_libdrm_etnaviv = declare_dependency(
|
||||
link_with : [libdrm, libdrm_etnaviv],
|
||||
include_directories : [inc_drm, include_directories('.')],
|
||||
)
|
||||
|
||||
meson.override_dependency('libdrm_etnaviv', ext_libdrm_etnaviv)
|
||||
|
||||
test(
|
||||
'etnaviv-symbols-check',
|
||||
symbols_check,
|
||||
args : [
|
||||
'--lib', libdrm_etnaviv,
|
||||
'--symbols-file', files('etnaviv-symbols.txt'),
|
||||
'--nm', prog_nm.full_path(),
|
||||
],
|
||||
)
|
||||
27
exynos/Makefile.am
Normal file
27
exynos/Makefile.am
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
AM_CFLAGS = \
|
||||
$(WARN_CFLAGS) \
|
||||
-I$(top_srcdir) \
|
||||
$(PTHREADSTUBS_CFLAGS) \
|
||||
-I$(top_srcdir)/include/drm
|
||||
|
||||
libdrm_exynos_la_LTLIBRARIES = libdrm_exynos.la
|
||||
libdrm_exynos_ladir = $(libdir)
|
||||
libdrm_exynos_la_LDFLAGS = -version-number 1:0:0 -no-undefined
|
||||
libdrm_exynos_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@
|
||||
|
||||
libdrm_exynos_la_SOURCES = \
|
||||
exynos_drm.c \
|
||||
exynos_fimg2d.c \
|
||||
fimg2d_reg.h
|
||||
|
||||
libdrm_exynoscommonincludedir = ${includedir}/exynos
|
||||
libdrm_exynoscommoninclude_HEADERS = exynos_drm.h exynos_fimg2d.h
|
||||
|
||||
libdrm_exynosincludedir = ${includedir}/libdrm
|
||||
libdrm_exynosinclude_HEADERS = exynos_drmif.h
|
||||
|
||||
pkgconfigdir = @pkgconfigdir@
|
||||
pkgconfig_DATA = libdrm_exynos.pc
|
||||
|
||||
TESTS = exynos-symbol-check
|
||||
EXTRA_DIST = $(TESTS)
|
||||
37
exynos/exynos-symbol-check
Executable file
37
exynos/exynos-symbol-check
Executable file
|
|
@ -0,0 +1,37 @@
|
|||
#!/bin/bash
|
||||
|
||||
# The following symbols (past the first five) are taken from the public headers.
|
||||
# A list of the latter should be available Makefile.am/libdrm_exynos*_HEADERS
|
||||
|
||||
FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_exynos.so} | awk '{print $3}'| while read func; do
|
||||
( grep -q "^$func$" || echo $func ) <<EOF
|
||||
__bss_start
|
||||
_edata
|
||||
_end
|
||||
_fini
|
||||
_init
|
||||
exynos_bo_create
|
||||
exynos_bo_destroy
|
||||
exynos_bo_from_name
|
||||
exynos_bo_get_info
|
||||
exynos_bo_get_name
|
||||
exynos_bo_handle
|
||||
exynos_bo_map
|
||||
exynos_device_create
|
||||
exynos_device_destroy
|
||||
exynos_prime_fd_to_handle
|
||||
exynos_prime_handle_to_fd
|
||||
exynos_vidi_connection
|
||||
g2d_blend
|
||||
g2d_copy
|
||||
g2d_copy_with_scale
|
||||
g2d_exec
|
||||
g2d_fini
|
||||
g2d_init
|
||||
g2d_scale_and_blend
|
||||
g2d_solid_fill
|
||||
EOF
|
||||
done)
|
||||
|
||||
test ! -n "$FUNCS" || echo $FUNCS
|
||||
test ! -n "$FUNCS"
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
exynos_bo_create
|
||||
exynos_bo_destroy
|
||||
exynos_bo_from_name
|
||||
exynos_bo_get_info
|
||||
exynos_bo_get_name
|
||||
exynos_bo_handle
|
||||
exynos_bo_map
|
||||
exynos_device_create
|
||||
exynos_device_destroy
|
||||
exynos_prime_fd_to_handle
|
||||
exynos_prime_handle_to_fd
|
||||
exynos_vidi_connection
|
||||
exynos_handle_event
|
||||
g2d_blend
|
||||
g2d_copy
|
||||
g2d_copy_with_scale
|
||||
g2d_exec
|
||||
g2d_config_event
|
||||
g2d_fini
|
||||
g2d_init
|
||||
g2d_move
|
||||
g2d_scale_and_blend
|
||||
g2d_solid_fill
|
||||
|
|
@ -24,13 +24,17 @@
|
|||
* Inki Dae <inki.dae@samsung.com>
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include <linux/stddef.h>
|
||||
|
||||
#include <xf86drm.h>
|
||||
|
||||
|
|
@ -38,8 +42,6 @@
|
|||
#include "exynos_drm.h"
|
||||
#include "exynos_drmif.h"
|
||||
|
||||
#define U642VOID(x) ((void *)(unsigned long)(x))
|
||||
|
||||
/*
|
||||
* Create exynos drm device object.
|
||||
*
|
||||
|
|
@ -47,7 +49,7 @@
|
|||
*
|
||||
* if true, return the device object else NULL.
|
||||
*/
|
||||
drm_public struct exynos_device * exynos_device_create(int fd)
|
||||
struct exynos_device * exynos_device_create(int fd)
|
||||
{
|
||||
struct exynos_device *dev;
|
||||
|
||||
|
|
@ -68,7 +70,7 @@ drm_public struct exynos_device * exynos_device_create(int fd)
|
|||
*
|
||||
* @dev: exynos drm device object.
|
||||
*/
|
||||
drm_public void exynos_device_destroy(struct exynos_device *dev)
|
||||
void exynos_device_destroy(struct exynos_device *dev)
|
||||
{
|
||||
free(dev);
|
||||
}
|
||||
|
|
@ -86,8 +88,8 @@ drm_public void exynos_device_destroy(struct exynos_device *dev)
|
|||
*
|
||||
* if true, return a exynos buffer object else NULL.
|
||||
*/
|
||||
drm_public struct exynos_bo * exynos_bo_create(struct exynos_device *dev,
|
||||
size_t size, uint32_t flags)
|
||||
struct exynos_bo * exynos_bo_create(struct exynos_device *dev,
|
||||
size_t size, uint32_t flags)
|
||||
{
|
||||
struct exynos_bo *bo;
|
||||
struct drm_exynos_gem_create req = {
|
||||
|
|
@ -140,8 +142,8 @@ fail:
|
|||
*
|
||||
* if true, return 0 else negative.
|
||||
*/
|
||||
drm_public int exynos_bo_get_info(struct exynos_device *dev, uint32_t handle,
|
||||
size_t *size, uint32_t *flags)
|
||||
int exynos_bo_get_info(struct exynos_device *dev, uint32_t handle,
|
||||
size_t *size, uint32_t *flags)
|
||||
{
|
||||
int ret;
|
||||
struct drm_exynos_gem_info req = {
|
||||
|
|
@ -166,7 +168,7 @@ drm_public int exynos_bo_get_info(struct exynos_device *dev, uint32_t handle,
|
|||
*
|
||||
* @bo: a exynos buffer object to be destroyed.
|
||||
*/
|
||||
drm_public void exynos_bo_destroy(struct exynos_bo *bo)
|
||||
void exynos_bo_destroy(struct exynos_bo *bo)
|
||||
{
|
||||
if (!bo)
|
||||
return;
|
||||
|
|
@ -175,7 +177,11 @@ drm_public void exynos_bo_destroy(struct exynos_bo *bo)
|
|||
munmap(bo->vaddr, bo->size);
|
||||
|
||||
if (bo->handle) {
|
||||
drmCloseBufferHandle(bo->dev->fd, bo->handle);
|
||||
struct drm_gem_close req = {
|
||||
.handle = bo->handle,
|
||||
};
|
||||
|
||||
drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
|
||||
}
|
||||
|
||||
free(bo);
|
||||
|
|
@ -194,7 +200,7 @@ drm_public void exynos_bo_destroy(struct exynos_bo *bo)
|
|||
* if true, return a exynos buffer object else NULL.
|
||||
*
|
||||
*/
|
||||
drm_public struct exynos_bo *
|
||||
struct exynos_bo *
|
||||
exynos_bo_from_name(struct exynos_device *dev, uint32_t name)
|
||||
{
|
||||
struct exynos_bo *bo;
|
||||
|
|
@ -237,7 +243,7 @@ err_free_bo:
|
|||
*
|
||||
* if true, return 0 else negative.
|
||||
*/
|
||||
drm_public int exynos_bo_get_name(struct exynos_bo *bo, uint32_t *name)
|
||||
int exynos_bo_get_name(struct exynos_bo *bo, uint32_t *name)
|
||||
{
|
||||
if (!bo->name) {
|
||||
struct drm_gem_flink req = {
|
||||
|
|
@ -260,7 +266,7 @@ drm_public int exynos_bo_get_name(struct exynos_bo *bo, uint32_t *name)
|
|||
return 0;
|
||||
}
|
||||
|
||||
drm_public uint32_t exynos_bo_handle(struct exynos_bo *bo)
|
||||
uint32_t exynos_bo_handle(struct exynos_bo *bo)
|
||||
{
|
||||
return bo->handle;
|
||||
}
|
||||
|
|
@ -271,9 +277,9 @@ drm_public uint32_t exynos_bo_handle(struct exynos_bo *bo)
|
|||
* @bo: a exynos buffer object including a gem object handle to be mmapped
|
||||
* to user space.
|
||||
*
|
||||
* if true, user pointer mmapped else NULL.
|
||||
* if true, user pointer mmaped else NULL.
|
||||
*/
|
||||
drm_public void *exynos_bo_map(struct exynos_bo *bo)
|
||||
void *exynos_bo_map(struct exynos_bo *bo)
|
||||
{
|
||||
if (!bo->vaddr) {
|
||||
struct exynos_device *dev = bo->dev;
|
||||
|
|
@ -310,7 +316,7 @@ drm_public void *exynos_bo_map(struct exynos_bo *bo)
|
|||
*
|
||||
* @return: 0 on success, -1 on error, and errno will be set
|
||||
*/
|
||||
drm_public int
|
||||
int
|
||||
exynos_prime_handle_to_fd(struct exynos_device *dev, uint32_t handle, int *fd)
|
||||
{
|
||||
return drmPrimeHandleToFD(dev->fd, handle, 0, fd);
|
||||
|
|
@ -325,7 +331,7 @@ exynos_prime_handle_to_fd(struct exynos_device *dev, uint32_t handle, int *fd)
|
|||
*
|
||||
* @return: 0 on success, -1 on error, and errno will be set
|
||||
*/
|
||||
drm_public int
|
||||
int
|
||||
exynos_prime_fd_to_handle(struct exynos_device *dev, int fd, uint32_t *handle)
|
||||
{
|
||||
return drmPrimeFDToHandle(dev->fd, fd, handle);
|
||||
|
|
@ -338,7 +344,7 @@ exynos_prime_fd_to_handle(struct exynos_device *dev, int fd, uint32_t *handle)
|
|||
*
|
||||
* @dev: a exynos device object.
|
||||
* @connect: indicate whether connectoin or disconnection request.
|
||||
* @ext: indicate whether edid data includes extensions data or not.
|
||||
* @ext: indicate whether edid data includes extentions data or not.
|
||||
* @edid: a pointer to edid data from Wireless Display device.
|
||||
*
|
||||
* this interface is used to request Virtual Display driver connection or
|
||||
|
|
@ -348,7 +354,7 @@ exynos_prime_fd_to_handle(struct exynos_device *dev, int fd, uint32_t *handle)
|
|||
*
|
||||
* if true, return 0 else negative.
|
||||
*/
|
||||
drm_public int
|
||||
int
|
||||
exynos_vidi_connection(struct exynos_device *dev, uint32_t connect,
|
||||
uint32_t ext, void *edid)
|
||||
{
|
||||
|
|
@ -368,76 +374,3 @@ exynos_vidi_connection(struct exynos_device *dev, uint32_t connect,
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
exynos_handle_vendor(int fd, struct drm_event *e, void *ctx)
|
||||
{
|
||||
struct drm_exynos_g2d_event *g2d;
|
||||
struct exynos_event_context *ectx = ctx;
|
||||
|
||||
switch (e->type) {
|
||||
case DRM_EXYNOS_G2D_EVENT:
|
||||
if (ectx->version < 1 || ectx->g2d_event_handler == NULL)
|
||||
break;
|
||||
g2d = (struct drm_exynos_g2d_event *)e;
|
||||
ectx->g2d_event_handler(fd, g2d->cmdlist_no, g2d->tv_sec,
|
||||
g2d->tv_usec, U642VOID(g2d->user_data));
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
drm_public int
|
||||
exynos_handle_event(struct exynos_device *dev, struct exynos_event_context *ctx)
|
||||
{
|
||||
char buffer[1024];
|
||||
int len, i;
|
||||
struct drm_event *e;
|
||||
struct drm_event_vblank *vblank;
|
||||
drmEventContextPtr evctx = &ctx->base;
|
||||
|
||||
/* The DRM read semantics guarantees that we always get only
|
||||
* complete events. */
|
||||
len = read(dev->fd, buffer, sizeof buffer);
|
||||
if (len == 0)
|
||||
return 0;
|
||||
if (len < (int)sizeof *e)
|
||||
return -1;
|
||||
|
||||
i = 0;
|
||||
while (i < len) {
|
||||
e = (struct drm_event *)(buffer + i);
|
||||
switch (e->type) {
|
||||
case DRM_EVENT_VBLANK:
|
||||
if (evctx->version < 1 ||
|
||||
evctx->vblank_handler == NULL)
|
||||
break;
|
||||
vblank = (struct drm_event_vblank *) e;
|
||||
evctx->vblank_handler(dev->fd,
|
||||
vblank->sequence,
|
||||
vblank->tv_sec,
|
||||
vblank->tv_usec,
|
||||
U642VOID (vblank->user_data));
|
||||
break;
|
||||
case DRM_EVENT_FLIP_COMPLETE:
|
||||
if (evctx->version < 2 ||
|
||||
evctx->page_flip_handler == NULL)
|
||||
break;
|
||||
vblank = (struct drm_event_vblank *) e;
|
||||
evctx->page_flip_handler(dev->fd,
|
||||
vblank->sequence,
|
||||
vblank->tv_sec,
|
||||
vblank->tv_usec,
|
||||
U642VOID (vblank->user_data));
|
||||
break;
|
||||
default:
|
||||
exynos_handle_vendor(dev->fd, e, evctx);
|
||||
break;
|
||||
}
|
||||
i += e->length;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ struct drm_exynos_gem_info {
|
|||
/**
|
||||
* A structure for user connection request of virtual display.
|
||||
*
|
||||
* @connection: indicate whether doing connection or not by user.
|
||||
* @connection: indicate whether doing connetion or not by user.
|
||||
* @extensions: if this value is 1 then the vidi driver would need additional
|
||||
* 128bytes edid data.
|
||||
* @edid: the edid data pointer from user side.
|
||||
|
|
@ -157,16 +157,4 @@ struct drm_exynos_g2d_exec {
|
|||
#define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \
|
||||
DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
|
||||
|
||||
/* EXYNOS specific events */
|
||||
#define DRM_EXYNOS_G2D_EVENT 0x80000000
|
||||
|
||||
struct drm_exynos_g2d_event {
|
||||
struct drm_event base;
|
||||
__u64 user_data;
|
||||
__u32 tv_sec;
|
||||
__u32 tv_usec;
|
||||
__u32 cmdlist_no;
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -31,10 +31,6 @@
|
|||
#include <stdint.h>
|
||||
#include "exynos_drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct exynos_device {
|
||||
int fd;
|
||||
};
|
||||
|
|
@ -46,7 +42,7 @@ struct exynos_device {
|
|||
* @handle: a gem handle to gem object created.
|
||||
* @flags: indicate memory allocation and cache attribute types.
|
||||
* @size: size to the buffer created.
|
||||
* @vaddr: user space address to a gem buffer mmapped.
|
||||
* @vaddr: user space address to a gem buffer mmaped.
|
||||
* @name: a gem global handle from flink request.
|
||||
*/
|
||||
struct exynos_bo {
|
||||
|
|
@ -58,25 +54,6 @@ struct exynos_bo {
|
|||
uint32_t name;
|
||||
};
|
||||
|
||||
#define EXYNOS_EVENT_CONTEXT_VERSION 1
|
||||
|
||||
/*
|
||||
* Exynos Event Context structure.
|
||||
*
|
||||
* @base: base context (for core events).
|
||||
* @version: version info similar to the one in 'drmEventContext'.
|
||||
* @g2d_event_handler: handler for G2D events.
|
||||
*/
|
||||
struct exynos_event_context {
|
||||
drmEventContext base;
|
||||
|
||||
int version;
|
||||
|
||||
void (*g2d_event_handler)(int fd, unsigned int cmdlist_no,
|
||||
unsigned int tv_sec, unsigned int tv_usec,
|
||||
void *user_data);
|
||||
};
|
||||
|
||||
/*
|
||||
* device related functions:
|
||||
*/
|
||||
|
|
@ -106,15 +83,4 @@ int exynos_prime_fd_to_handle(struct exynos_device *dev, int fd,
|
|||
int exynos_vidi_connection(struct exynos_device *dev, uint32_t connect,
|
||||
uint32_t ext, void *edid);
|
||||
|
||||
/*
|
||||
* event handling related functions:
|
||||
*/
|
||||
int exynos_handle_event(struct exynos_device *dev,
|
||||
struct exynos_event_context *ctx);
|
||||
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* EXYNOS_DRMIF_H_ */
|
||||
|
|
|
|||
|
|
@ -3,33 +3,24 @@
|
|||
* Authors:
|
||||
* Inki Dae <inki.dae@samsung.com>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include <linux/stddef.h>
|
||||
|
||||
#include <xf86drm.h>
|
||||
|
||||
|
|
@ -50,67 +41,11 @@
|
|||
|
||||
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
||||
|
||||
#define MSG_PREFIX "exynos/fimg2d: "
|
||||
|
||||
#define G2D_MAX_CMD_NR 64
|
||||
#define G2D_MAX_GEM_CMD_NR 64
|
||||
#define G2D_MAX_CMD_LIST_NR 64
|
||||
|
||||
struct g2d_context {
|
||||
int fd;
|
||||
unsigned int major;
|
||||
unsigned int minor;
|
||||
struct drm_exynos_g2d_cmd cmd[G2D_MAX_CMD_NR];
|
||||
struct drm_exynos_g2d_cmd cmd_buf[G2D_MAX_GEM_CMD_NR];
|
||||
unsigned int cmd_nr;
|
||||
unsigned int cmd_buf_nr;
|
||||
unsigned int cmdlist_nr;
|
||||
void *event_userdata;
|
||||
};
|
||||
|
||||
enum g2d_base_addr_reg {
|
||||
g2d_dst = 0,
|
||||
g2d_src
|
||||
};
|
||||
|
||||
enum e_g2d_dir_mode {
|
||||
G2D_DIR_MODE_POSITIVE = 0,
|
||||
G2D_DIR_MODE_NEGATIVE = 1
|
||||
};
|
||||
|
||||
union g2d_direction_val {
|
||||
unsigned int val[2];
|
||||
struct {
|
||||
/* SRC_MSK_DIRECT_REG [0:1] (source) */
|
||||
enum e_g2d_dir_mode src_x_direction:1;
|
||||
enum e_g2d_dir_mode src_y_direction:1;
|
||||
|
||||
/* SRC_MSK_DIRECT_REG [2:3] */
|
||||
unsigned int reversed1:2;
|
||||
|
||||
/* SRC_MSK_DIRECT_REG [4:5] (mask) */
|
||||
enum e_g2d_dir_mode mask_x_direction:1;
|
||||
enum e_g2d_dir_mode mask_y_direction:1;
|
||||
|
||||
/* SRC_MSK_DIRECT_REG [6:31] */
|
||||
unsigned int padding1:26;
|
||||
|
||||
/* DST_PAT_DIRECT_REG [0:1] (destination) */
|
||||
enum e_g2d_dir_mode dst_x_direction:1;
|
||||
enum e_g2d_dir_mode dst_y_direction:1;
|
||||
|
||||
/* DST_PAT_DIRECT_REG [2:3] */
|
||||
unsigned int reversed2:2;
|
||||
|
||||
/* DST_PAT_DIRECT_REG [4:5] (pattern) */
|
||||
enum e_g2d_dir_mode pat_x_direction:1;
|
||||
enum e_g2d_dir_mode pat_y_direction:1;
|
||||
|
||||
/* DST_PAT_DIRECT_REG [6:31] */
|
||||
unsigned int padding2:26;
|
||||
} data;
|
||||
};
|
||||
|
||||
static unsigned int g2d_get_scaling(unsigned int src, unsigned int dst)
|
||||
{
|
||||
/*
|
||||
|
|
@ -129,11 +64,6 @@ static unsigned int g2d_get_blend_op(enum e_g2d_op op)
|
|||
|
||||
val.val = 0;
|
||||
|
||||
/*
|
||||
* The switch statement is missing the default branch since
|
||||
* we assume that the caller checks the blending operation
|
||||
* via g2d_validate_blending_op() first.
|
||||
*/
|
||||
switch (op) {
|
||||
case G2D_OP_CLEAR:
|
||||
case G2D_OP_DISJOINT_CLEAR:
|
||||
|
|
@ -161,87 +91,24 @@ static unsigned int g2d_get_blend_op(enum e_g2d_op op)
|
|||
SET_BF(val, G2D_COEFF_MODE_SRC_ALPHA, 0, 0, 0,
|
||||
G2D_COEFF_MODE_SRC_ALPHA, 1, 0, 0);
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "Not support operation(%d).\n", op);
|
||||
SET_BF(val, G2D_COEFF_MODE_ONE, 0, 0, 0, G2D_COEFF_MODE_ZERO,
|
||||
0, 0, 0);
|
||||
break;
|
||||
}
|
||||
|
||||
return val.val;
|
||||
}
|
||||
|
||||
/*
|
||||
* g2d_check_space - check if command buffers have enough space left.
|
||||
*
|
||||
* @ctx: a pointer to g2d_context structure.
|
||||
* @num_cmds: number of (regular) commands.
|
||||
* @num_gem_cmds: number of GEM commands.
|
||||
*/
|
||||
static unsigned int g2d_check_space(const struct g2d_context *ctx,
|
||||
unsigned int num_cmds, unsigned int num_gem_cmds)
|
||||
{
|
||||
if (ctx->cmd_nr + num_cmds >= G2D_MAX_CMD_NR ||
|
||||
ctx->cmd_buf_nr + num_gem_cmds >= G2D_MAX_GEM_CMD_NR)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* g2d_validate_select_mode - validate select mode.
|
||||
*
|
||||
* @mode: the mode to validate
|
||||
*
|
||||
* Returns zero for an invalid mode and one otherwise.
|
||||
*/
|
||||
static int g2d_validate_select_mode(
|
||||
enum e_g2d_select_mode mode)
|
||||
{
|
||||
switch (mode) {
|
||||
case G2D_SELECT_MODE_NORMAL:
|
||||
case G2D_SELECT_MODE_FGCOLOR:
|
||||
case G2D_SELECT_MODE_BGCOLOR:
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* g2d_validate_blending_op - validate blending operation.
|
||||
*
|
||||
* @operation: the operation to validate
|
||||
*
|
||||
* Returns zero for an invalid mode and one otherwise.
|
||||
*/
|
||||
static int g2d_validate_blending_op(
|
||||
enum e_g2d_op operation)
|
||||
{
|
||||
switch (operation) {
|
||||
case G2D_OP_CLEAR:
|
||||
case G2D_OP_SRC:
|
||||
case G2D_OP_DST:
|
||||
case G2D_OP_OVER:
|
||||
case G2D_OP_INTERPOLATE:
|
||||
case G2D_OP_DISJOINT_CLEAR:
|
||||
case G2D_OP_DISJOINT_SRC:
|
||||
case G2D_OP_DISJOINT_DST:
|
||||
case G2D_OP_CONJOINT_CLEAR:
|
||||
case G2D_OP_CONJOINT_SRC:
|
||||
case G2D_OP_CONJOINT_DST:
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* g2d_add_cmd - set given command and value to user side command buffer.
|
||||
*
|
||||
* @ctx: a pointer to g2d_context structure.
|
||||
* @cmd: command data.
|
||||
* @value: value data.
|
||||
*
|
||||
* The caller has to make sure that the commands buffers have enough space
|
||||
* left to hold the command. Use g2d_check_space() to ensure this.
|
||||
*/
|
||||
static void g2d_add_cmd(struct g2d_context *ctx, unsigned long cmd,
|
||||
static int g2d_add_cmd(struct g2d_context *ctx, unsigned long cmd,
|
||||
unsigned long value)
|
||||
{
|
||||
switch (cmd & ~(G2D_BUF_USERPTR)) {
|
||||
|
|
@ -251,20 +118,28 @@ static void g2d_add_cmd(struct g2d_context *ctx, unsigned long cmd,
|
|||
case DST_PLANE2_BASE_ADDR_REG:
|
||||
case PAT_BASE_ADDR_REG:
|
||||
case MASK_BASE_ADDR_REG:
|
||||
assert(ctx->cmd_buf_nr < G2D_MAX_GEM_CMD_NR);
|
||||
if (ctx->cmd_buf_nr >= G2D_MAX_GEM_CMD_NR) {
|
||||
fprintf(stderr, "Overflow cmd_gem size.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctx->cmd_buf[ctx->cmd_buf_nr].offset = cmd;
|
||||
ctx->cmd_buf[ctx->cmd_buf_nr].data = value;
|
||||
ctx->cmd_buf_nr++;
|
||||
break;
|
||||
default:
|
||||
assert(ctx->cmd_nr < G2D_MAX_CMD_NR);
|
||||
if (ctx->cmd_nr >= G2D_MAX_CMD_NR) {
|
||||
fprintf(stderr, "Overflow cmd size.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctx->cmd[ctx->cmd_nr].offset = cmd;
|
||||
ctx->cmd[ctx->cmd_nr].data = value;
|
||||
ctx->cmd_nr++;
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -288,16 +163,17 @@ static void g2d_add_base_addr(struct g2d_context *ctx, struct g2d_image *img,
|
|||
}
|
||||
|
||||
/*
|
||||
* g2d_set_direction - setup direction register (useful for overlapping blits).
|
||||
* g2d_reset - reset fimg2d hardware.
|
||||
*
|
||||
* @ctx: a pointer to g2d_context structure.
|
||||
* @dir: a pointer to the g2d_direction_val structure.
|
||||
*
|
||||
*/
|
||||
static void g2d_set_direction(struct g2d_context *ctx,
|
||||
const union g2d_direction_val *dir)
|
||||
static void g2d_reset(struct g2d_context *ctx)
|
||||
{
|
||||
g2d_add_cmd(ctx, SRC_MASK_DIRECT_REG, dir->val[0]);
|
||||
g2d_add_cmd(ctx, DST_PAT_DIRECT_REG, dir->val[1]);
|
||||
ctx->cmd_nr = 0;
|
||||
ctx->cmd_buf_nr = 0;
|
||||
|
||||
g2d_add_cmd(ctx, SOFT_RESET_REG, 0x01);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -315,10 +191,10 @@ static int g2d_flush(struct g2d_context *ctx)
|
|||
struct drm_exynos_g2d_set_cmdlist cmdlist = {0};
|
||||
|
||||
if (ctx->cmd_nr == 0 && ctx->cmd_buf_nr == 0)
|
||||
return 0;
|
||||
return -1;
|
||||
|
||||
if (ctx->cmdlist_nr >= G2D_MAX_CMD_LIST_NR) {
|
||||
fprintf(stderr, MSG_PREFIX "command list overflow.\n");
|
||||
fprintf(stderr, "Overflow cmdlist.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
@ -326,22 +202,15 @@ static int g2d_flush(struct g2d_context *ctx)
|
|||
cmdlist.cmd_buf = (uint64_t)(uintptr_t)&ctx->cmd_buf[0];
|
||||
cmdlist.cmd_nr = ctx->cmd_nr;
|
||||
cmdlist.cmd_buf_nr = ctx->cmd_buf_nr;
|
||||
|
||||
if (ctx->event_userdata) {
|
||||
cmdlist.event_type = G2D_EVENT_NONSTOP;
|
||||
cmdlist.user_data = (uint64_t)(uintptr_t)(ctx->event_userdata);
|
||||
ctx->event_userdata = NULL;
|
||||
} else {
|
||||
cmdlist.event_type = G2D_EVENT_NOT;
|
||||
cmdlist.user_data = 0;
|
||||
}
|
||||
cmdlist.event_type = G2D_EVENT_NOT;
|
||||
cmdlist.user_data = 0;
|
||||
|
||||
ctx->cmd_nr = 0;
|
||||
ctx->cmd_buf_nr = 0;
|
||||
|
||||
ret = drmIoctl(ctx->fd, DRM_IOCTL_EXYNOS_G2D_SET_CMDLIST, &cmdlist);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, MSG_PREFIX "failed to set cmdlist.\n");
|
||||
fprintf(stderr, "failed to set cmdlist.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -355,7 +224,7 @@ static int g2d_flush(struct g2d_context *ctx)
|
|||
*
|
||||
* fd: a file descriptor to an opened drm device.
|
||||
*/
|
||||
drm_public struct g2d_context *g2d_init(int fd)
|
||||
struct g2d_context *g2d_init(int fd)
|
||||
{
|
||||
struct drm_exynos_g2d_get_ver ver;
|
||||
struct g2d_context *ctx;
|
||||
|
|
@ -363,7 +232,7 @@ drm_public struct g2d_context *g2d_init(int fd)
|
|||
|
||||
ctx = calloc(1, sizeof(*ctx));
|
||||
if (!ctx) {
|
||||
fprintf(stderr, MSG_PREFIX "failed to allocate context.\n");
|
||||
fprintf(stderr, "failed to allocate context.\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -371,7 +240,7 @@ drm_public struct g2d_context *g2d_init(int fd)
|
|||
|
||||
ret = drmIoctl(fd, DRM_IOCTL_EXYNOS_G2D_GET_VER, &ver);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, MSG_PREFIX "failed to get version.\n");
|
||||
fprintf(stderr, "failed to get version.\n");
|
||||
free(ctx);
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -379,37 +248,21 @@ drm_public struct g2d_context *g2d_init(int fd)
|
|||
ctx->major = ver.major;
|
||||
ctx->minor = ver.minor;
|
||||
|
||||
printf(MSG_PREFIX "G2D version (%d.%d).\n", ctx->major, ctx->minor);
|
||||
printf("g2d version(%d.%d).\n", ctx->major, ctx->minor);
|
||||
return ctx;
|
||||
}
|
||||
|
||||
drm_public void g2d_fini(struct g2d_context *ctx)
|
||||
void g2d_fini(struct g2d_context *ctx)
|
||||
{
|
||||
free(ctx);
|
||||
}
|
||||
|
||||
/**
|
||||
* g2d_config_event - setup userdata configuration for a g2d event.
|
||||
* The next invocation of a g2d call (e.g. g2d_solid_fill) is
|
||||
* then going to flag the command buffer as 'nonstop'.
|
||||
* Completion of the command buffer execution can then be
|
||||
* determined by using drmHandleEvent on the DRM fd.
|
||||
* The userdata is 'consumed' in the process.
|
||||
*
|
||||
* @ctx: a pointer to g2d_context structure.
|
||||
* @userdata: a pointer to the user data
|
||||
*/
|
||||
drm_public void g2d_config_event(struct g2d_context *ctx, void *userdata)
|
||||
{
|
||||
ctx->event_userdata = userdata;
|
||||
}
|
||||
|
||||
/**
|
||||
* g2d_exec - start the dma to process all commands summited by g2d_flush().
|
||||
*
|
||||
* @ctx: a pointer to g2d_context structure.
|
||||
*/
|
||||
drm_public int g2d_exec(struct g2d_context *ctx)
|
||||
int g2d_exec(struct g2d_context *ctx)
|
||||
{
|
||||
struct drm_exynos_g2d_exec exec;
|
||||
int ret;
|
||||
|
|
@ -421,7 +274,7 @@ drm_public int g2d_exec(struct g2d_context *ctx)
|
|||
|
||||
ret = drmIoctl(ctx->fd, DRM_IOCTL_EXYNOS_G2D_EXEC, &exec);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, MSG_PREFIX "failed to execute.\n");
|
||||
fprintf(stderr, "failed to execute.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -441,7 +294,7 @@ drm_public int g2d_exec(struct g2d_context *ctx)
|
|||
* @w: width value to buffer filled with given color data.
|
||||
* @h: height value to buffer filled with given color data.
|
||||
*/
|
||||
drm_public int
|
||||
int
|
||||
g2d_solid_fill(struct g2d_context *ctx, struct g2d_image *img,
|
||||
unsigned int x, unsigned int y, unsigned int w,
|
||||
unsigned int h)
|
||||
|
|
@ -449,9 +302,6 @@ g2d_solid_fill(struct g2d_context *ctx, struct g2d_image *img,
|
|||
union g2d_bitblt_cmd_val bitblt;
|
||||
union g2d_point_val pt;
|
||||
|
||||
if (g2d_check_space(ctx, 7, 1))
|
||||
return -ENOSPC;
|
||||
|
||||
g2d_add_cmd(ctx, DST_SELECT_REG, G2D_SELECT_MODE_NORMAL);
|
||||
g2d_add_cmd(ctx, DST_COLOR_MODE_REG, img->color_mode);
|
||||
g2d_add_base_addr(ctx, img, g2d_dst);
|
||||
|
|
@ -462,12 +312,15 @@ g2d_solid_fill(struct g2d_context *ctx, struct g2d_image *img,
|
|||
if (y + h > img->height)
|
||||
h = img->height - y;
|
||||
|
||||
pt.val = 0;
|
||||
pt.data.x = x;
|
||||
pt.data.y = y;
|
||||
g2d_add_cmd(ctx, DST_LEFT_TOP_REG, pt.val);
|
||||
|
||||
pt.val = 0;
|
||||
pt.data.x = x + w;
|
||||
pt.data.y = y + h;
|
||||
|
||||
g2d_add_cmd(ctx, DST_RIGHT_BOTTOM_REG, pt.val);
|
||||
|
||||
g2d_add_cmd(ctx, SF_COLOR_REG, img->color);
|
||||
|
|
@ -494,7 +347,7 @@ g2d_solid_fill(struct g2d_context *ctx, struct g2d_image *img,
|
|||
* @w: width value to source and destination buffers.
|
||||
* @h: height value to source and destination buffers.
|
||||
*/
|
||||
drm_public int
|
||||
int
|
||||
g2d_copy(struct g2d_context *ctx, struct g2d_image *src,
|
||||
struct g2d_image *dst, unsigned int src_x, unsigned int src_y,
|
||||
unsigned int dst_x, unsigned dst_y, unsigned int w,
|
||||
|
|
@ -502,7 +355,17 @@ g2d_copy(struct g2d_context *ctx, struct g2d_image *src,
|
|||
{
|
||||
union g2d_rop4_val rop4;
|
||||
union g2d_point_val pt;
|
||||
unsigned int src_w, src_h, dst_w, dst_h;
|
||||
unsigned int src_w = 0, src_h = 0, dst_w = 0, dst_h = 0;
|
||||
|
||||
g2d_add_cmd(ctx, DST_SELECT_REG, G2D_SELECT_MODE_BGCOLOR);
|
||||
g2d_add_cmd(ctx, DST_COLOR_MODE_REG, dst->color_mode);
|
||||
g2d_add_base_addr(ctx, dst, g2d_dst);
|
||||
g2d_add_cmd(ctx, DST_STRIDE_REG, dst->stride);
|
||||
|
||||
g2d_add_cmd(ctx, SRC_SELECT_REG, G2D_SELECT_MODE_NORMAL);
|
||||
g2d_add_cmd(ctx, SRC_COLOR_MODE_REG, src->color_mode);
|
||||
g2d_add_base_addr(ctx, src, g2d_src);
|
||||
g2d_add_cmd(ctx, SRC_STRIDE_REG, src->stride);
|
||||
|
||||
src_w = w;
|
||||
src_h = h;
|
||||
|
|
@ -522,128 +385,25 @@ g2d_copy(struct g2d_context *ctx, struct g2d_image *src,
|
|||
h = MIN(src_h, dst_h);
|
||||
|
||||
if (w <= 0 || h <= 0) {
|
||||
fprintf(stderr, MSG_PREFIX "invalid width or height.\n");
|
||||
fprintf(stderr, "invalid width or height.\n");
|
||||
g2d_reset(ctx);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (g2d_check_space(ctx, 11, 2))
|
||||
return -ENOSPC;
|
||||
|
||||
g2d_add_cmd(ctx, DST_SELECT_REG, G2D_SELECT_MODE_BGCOLOR);
|
||||
g2d_add_cmd(ctx, DST_COLOR_MODE_REG, dst->color_mode);
|
||||
g2d_add_base_addr(ctx, dst, g2d_dst);
|
||||
g2d_add_cmd(ctx, DST_STRIDE_REG, dst->stride);
|
||||
|
||||
g2d_add_cmd(ctx, SRC_SELECT_REG, G2D_SELECT_MODE_NORMAL);
|
||||
g2d_add_cmd(ctx, SRC_COLOR_MODE_REG, src->color_mode);
|
||||
g2d_add_base_addr(ctx, src, g2d_src);
|
||||
g2d_add_cmd(ctx, SRC_STRIDE_REG, src->stride);
|
||||
|
||||
pt.data.x = src_x;
|
||||
pt.data.y = src_y;
|
||||
g2d_add_cmd(ctx, SRC_LEFT_TOP_REG, pt.val);
|
||||
pt.data.x = src_x + w;
|
||||
pt.data.y = src_y + h;
|
||||
g2d_add_cmd(ctx, SRC_RIGHT_BOTTOM_REG, pt.val);
|
||||
|
||||
pt.data.x = dst_x;
|
||||
pt.data.y = dst_y;
|
||||
g2d_add_cmd(ctx, DST_LEFT_TOP_REG, pt.val);
|
||||
pt.data.x = dst_x + w;
|
||||
pt.data.y = dst_y + h;
|
||||
g2d_add_cmd(ctx, DST_RIGHT_BOTTOM_REG, pt.val);
|
||||
|
||||
rop4.val = 0;
|
||||
rop4.data.unmasked_rop3 = G2D_ROP3_SRC;
|
||||
g2d_add_cmd(ctx, ROP4_REG, rop4.val);
|
||||
|
||||
return g2d_flush(ctx);
|
||||
}
|
||||
|
||||
/**
|
||||
* g2d_move - copy content inside single buffer.
|
||||
* Similar to libc's memmove() this copies a rectangular
|
||||
* region of the provided buffer to another location, while
|
||||
* properly handling the situation where source and
|
||||
* destination rectangle overlap.
|
||||
*
|
||||
* @ctx: a pointer to g2d_context structure.
|
||||
* @img: a pointer to g2d_image structure providing
|
||||
* buffer information.
|
||||
* @src_x: x position of source rectangle.
|
||||
* @src_y: y position of source rectangle.
|
||||
* @dst_x: x position of destination rectangle.
|
||||
* @dst_y: y position of destination rectangle.
|
||||
* @w: width of rectangle to move.
|
||||
* @h: height of rectangle to move.
|
||||
*/
|
||||
drm_public int
|
||||
g2d_move(struct g2d_context *ctx, struct g2d_image *img,
|
||||
unsigned int src_x, unsigned int src_y,
|
||||
unsigned int dst_x, unsigned dst_y, unsigned int w,
|
||||
unsigned int h)
|
||||
{
|
||||
union g2d_rop4_val rop4;
|
||||
union g2d_point_val pt;
|
||||
union g2d_direction_val dir;
|
||||
unsigned int src_w, src_h, dst_w, dst_h;
|
||||
|
||||
src_w = w;
|
||||
src_h = h;
|
||||
if (src_x + img->width > w)
|
||||
src_w = img->width - src_x;
|
||||
if (src_y + img->height > h)
|
||||
src_h = img->height - src_y;
|
||||
|
||||
dst_w = w;
|
||||
dst_h = w;
|
||||
if (dst_x + img->width > w)
|
||||
dst_w = img->width - dst_x;
|
||||
if (dst_y + img->height > h)
|
||||
dst_h = img->height - dst_y;
|
||||
|
||||
w = MIN(src_w, dst_w);
|
||||
h = MIN(src_h, dst_h);
|
||||
|
||||
if (w == 0 || h == 0) {
|
||||
fprintf(stderr, MSG_PREFIX "invalid width or height.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (g2d_check_space(ctx, 13, 2))
|
||||
return -ENOSPC;
|
||||
|
||||
g2d_add_cmd(ctx, DST_SELECT_REG, G2D_SELECT_MODE_BGCOLOR);
|
||||
g2d_add_cmd(ctx, SRC_SELECT_REG, G2D_SELECT_MODE_NORMAL);
|
||||
|
||||
g2d_add_cmd(ctx, DST_COLOR_MODE_REG, img->color_mode);
|
||||
g2d_add_cmd(ctx, SRC_COLOR_MODE_REG, img->color_mode);
|
||||
|
||||
g2d_add_base_addr(ctx, img, g2d_dst);
|
||||
g2d_add_base_addr(ctx, img, g2d_src);
|
||||
|
||||
g2d_add_cmd(ctx, DST_STRIDE_REG, img->stride);
|
||||
g2d_add_cmd(ctx, SRC_STRIDE_REG, img->stride);
|
||||
|
||||
dir.val[0] = dir.val[1] = 0;
|
||||
|
||||
if (dst_x >= src_x)
|
||||
dir.data.src_x_direction = dir.data.dst_x_direction = 1;
|
||||
if (dst_y >= src_y)
|
||||
dir.data.src_y_direction = dir.data.dst_y_direction = 1;
|
||||
|
||||
g2d_set_direction(ctx, &dir);
|
||||
|
||||
pt.val = 0;
|
||||
pt.data.x = src_x;
|
||||
pt.data.y = src_y;
|
||||
g2d_add_cmd(ctx, SRC_LEFT_TOP_REG, pt.val);
|
||||
pt.val = 0;
|
||||
pt.data.x = src_x + w;
|
||||
pt.data.y = src_y + h;
|
||||
g2d_add_cmd(ctx, SRC_RIGHT_BOTTOM_REG, pt.val);
|
||||
|
||||
pt.val = 0;
|
||||
pt.data.x = dst_x;
|
||||
pt.data.y = dst_y;
|
||||
g2d_add_cmd(ctx, DST_LEFT_TOP_REG, pt.val);
|
||||
pt.val = 0;
|
||||
pt.data.x = dst_x + w;
|
||||
pt.data.y = dst_y + h;
|
||||
g2d_add_cmd(ctx, DST_RIGHT_BOTTOM_REG, pt.val);
|
||||
|
|
@ -675,7 +435,7 @@ g2d_move(struct g2d_context *ctx, struct g2d_image *img,
|
|||
* @negative: indicate that it uses color negative to source and
|
||||
* destination buffers.
|
||||
*/
|
||||
drm_public int
|
||||
int
|
||||
g2d_copy_with_scale(struct g2d_context *ctx, struct g2d_image *src,
|
||||
struct g2d_image *dst, unsigned int src_x,
|
||||
unsigned int src_y, unsigned int src_w,
|
||||
|
|
@ -685,12 +445,23 @@ g2d_copy_with_scale(struct g2d_context *ctx, struct g2d_image *src,
|
|||
{
|
||||
union g2d_rop4_val rop4;
|
||||
union g2d_point_val pt;
|
||||
unsigned int scale, repeat_pad;
|
||||
unsigned int scale;
|
||||
unsigned int scale_x, scale_y;
|
||||
|
||||
/* Sanitize this parameter to facilitate space computation below. */
|
||||
if (negative)
|
||||
negative = 1;
|
||||
g2d_add_cmd(ctx, DST_SELECT_REG, G2D_SELECT_MODE_BGCOLOR);
|
||||
g2d_add_cmd(ctx, DST_COLOR_MODE_REG, dst->color_mode);
|
||||
g2d_add_base_addr(ctx, dst, g2d_dst);
|
||||
g2d_add_cmd(ctx, DST_STRIDE_REG, dst->stride);
|
||||
|
||||
g2d_add_cmd(ctx, SRC_SELECT_REG, G2D_SELECT_MODE_NORMAL);
|
||||
g2d_add_cmd(ctx, SRC_COLOR_MODE_REG, src->color_mode);
|
||||
|
||||
g2d_add_cmd(ctx, SRC_REPEAT_MODE_REG, src->repeat_mode);
|
||||
if (src->repeat_mode == G2D_REPEAT_MODE_PAD)
|
||||
g2d_add_cmd(ctx, SRC_PAD_VALUE_REG, dst->color);
|
||||
|
||||
g2d_add_base_addr(ctx, src, g2d_src);
|
||||
g2d_add_cmd(ctx, SRC_STRIDE_REG, src->stride);
|
||||
|
||||
if (src_w == dst_w && src_h == dst_h)
|
||||
scale = 0;
|
||||
|
|
@ -700,8 +471,6 @@ g2d_copy_with_scale(struct g2d_context *ctx, struct g2d_image *src,
|
|||
scale_y = g2d_get_scaling(src_h, dst_h);
|
||||
}
|
||||
|
||||
repeat_pad = src->repeat_mode == G2D_REPEAT_MODE_PAD ? 1 : 0;
|
||||
|
||||
if (src_x + src_w > src->width)
|
||||
src_w = src->width - src_x;
|
||||
if (src_y + src_h > src->height)
|
||||
|
|
@ -713,54 +482,42 @@ g2d_copy_with_scale(struct g2d_context *ctx, struct g2d_image *src,
|
|||
dst_h = dst->height - dst_y;
|
||||
|
||||
if (src_w <= 0 || src_h <= 0 || dst_w <= 0 || dst_h <= 0) {
|
||||
fprintf(stderr, MSG_PREFIX "invalid width or height.\n");
|
||||
fprintf(stderr, "invalid width or height.\n");
|
||||
g2d_reset(ctx);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (g2d_check_space(ctx, 12 + scale * 3 + negative + repeat_pad, 2))
|
||||
return -ENOSPC;
|
||||
|
||||
g2d_add_cmd(ctx, DST_SELECT_REG, G2D_SELECT_MODE_BGCOLOR);
|
||||
g2d_add_cmd(ctx, DST_COLOR_MODE_REG, dst->color_mode);
|
||||
g2d_add_base_addr(ctx, dst, g2d_dst);
|
||||
g2d_add_cmd(ctx, DST_STRIDE_REG, dst->stride);
|
||||
|
||||
g2d_add_cmd(ctx, SRC_SELECT_REG, G2D_SELECT_MODE_NORMAL);
|
||||
g2d_add_cmd(ctx, SRC_COLOR_MODE_REG, src->color_mode);
|
||||
|
||||
g2d_add_cmd(ctx, SRC_REPEAT_MODE_REG, src->repeat_mode);
|
||||
if (repeat_pad)
|
||||
g2d_add_cmd(ctx, SRC_PAD_VALUE_REG, dst->color);
|
||||
|
||||
g2d_add_base_addr(ctx, src, g2d_src);
|
||||
g2d_add_cmd(ctx, SRC_STRIDE_REG, src->stride);
|
||||
|
||||
rop4.val = 0;
|
||||
rop4.data.unmasked_rop3 = G2D_ROP3_SRC;
|
||||
|
||||
if (negative) {
|
||||
g2d_add_cmd(ctx, BG_COLOR_REG, 0x00FFFFFF);
|
||||
rop4.data.unmasked_rop3 ^= G2D_ROP3_DST;
|
||||
rop4.val = 0;
|
||||
rop4.data.unmasked_rop3 = G2D_ROP3_SRC^G2D_ROP3_DST;
|
||||
g2d_add_cmd(ctx, ROP4_REG, rop4.val);
|
||||
} else {
|
||||
rop4.val = 0;
|
||||
rop4.data.unmasked_rop3 = G2D_ROP3_SRC;
|
||||
g2d_add_cmd(ctx, ROP4_REG, rop4.val);
|
||||
}
|
||||
|
||||
g2d_add_cmd(ctx, ROP4_REG, rop4.val);
|
||||
|
||||
if (scale) {
|
||||
g2d_add_cmd(ctx, SRC_SCALE_CTRL_REG, G2D_SCALE_MODE_BILINEAR);
|
||||
g2d_add_cmd(ctx, SRC_XSCALE_REG, scale_x);
|
||||
g2d_add_cmd(ctx, SRC_YSCALE_REG, scale_y);
|
||||
}
|
||||
|
||||
pt.val = 0;
|
||||
pt.data.x = src_x;
|
||||
pt.data.y = src_y;
|
||||
g2d_add_cmd(ctx, SRC_LEFT_TOP_REG, pt.val);
|
||||
pt.val = 0;
|
||||
pt.data.x = src_x + src_w;
|
||||
pt.data.y = src_y + src_h;
|
||||
g2d_add_cmd(ctx, SRC_RIGHT_BOTTOM_REG, pt.val);
|
||||
|
||||
pt.val = 0;
|
||||
pt.data.x = dst_x;
|
||||
pt.data.y = dst_y;
|
||||
g2d_add_cmd(ctx, DST_LEFT_TOP_REG, pt.val);
|
||||
pt.val = 0;
|
||||
pt.data.x = dst_x + dst_w;
|
||||
pt.data.y = dst_y + dst_h;
|
||||
g2d_add_cmd(ctx, DST_RIGHT_BOTTOM_REG, pt.val);
|
||||
|
|
@ -784,7 +541,7 @@ g2d_copy_with_scale(struct g2d_context *ctx, struct g2d_image *src,
|
|||
* @h: height value to source and destination buffer.
|
||||
* @op: blend operation type.
|
||||
*/
|
||||
drm_public int
|
||||
int
|
||||
g2d_blend(struct g2d_context *ctx, struct g2d_image *src,
|
||||
struct g2d_image *dst, unsigned int src_x,
|
||||
unsigned int src_y, unsigned int dst_x, unsigned int dst_y,
|
||||
|
|
@ -793,45 +550,7 @@ g2d_blend(struct g2d_context *ctx, struct g2d_image *src,
|
|||
union g2d_point_val pt;
|
||||
union g2d_bitblt_cmd_val bitblt;
|
||||
union g2d_blend_func_val blend;
|
||||
unsigned int gem_space;
|
||||
unsigned int src_w, src_h, dst_w, dst_h;
|
||||
|
||||
src_w = w;
|
||||
src_h = h;
|
||||
if (src_x + w > src->width)
|
||||
src_w = src->width - src_x;
|
||||
if (src_y + h > src->height)
|
||||
src_h = src->height - src_y;
|
||||
|
||||
dst_w = w;
|
||||
dst_h = h;
|
||||
if (dst_x + w > dst->width)
|
||||
dst_w = dst->width - dst_x;
|
||||
if (dst_y + h > dst->height)
|
||||
dst_h = dst->height - dst_y;
|
||||
|
||||
w = MIN(src_w, dst_w);
|
||||
h = MIN(src_h, dst_h);
|
||||
|
||||
if (w <= 0 || h <= 0) {
|
||||
fprintf(stderr, MSG_PREFIX "invalid width or height.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!g2d_validate_select_mode(src->select_mode)) {
|
||||
fprintf(stderr , MSG_PREFIX "invalid select mode for source.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!g2d_validate_blending_op(op)) {
|
||||
fprintf(stderr , MSG_PREFIX "unsupported blending operation.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gem_space = src->select_mode == G2D_SELECT_MODE_NORMAL ? 2 : 1;
|
||||
|
||||
if (g2d_check_space(ctx, 12, gem_space))
|
||||
return -ENOSPC;
|
||||
unsigned int src_w = 0, src_h = 0, dst_w = 0, dst_h = 0;
|
||||
|
||||
bitblt.val = 0;
|
||||
blend.val = 0;
|
||||
|
|
@ -859,6 +578,32 @@ g2d_blend(struct g2d_context *ctx, struct g2d_image *src,
|
|||
case G2D_SELECT_MODE_BGCOLOR:
|
||||
g2d_add_cmd(ctx, BG_COLOR_REG, src->color);
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr , "failed to set src.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
src_w = w;
|
||||
src_h = h;
|
||||
if (src_x + w > src->width)
|
||||
src_w = src->width - src_x;
|
||||
if (src_y + h > src->height)
|
||||
src_h = src->height - src_y;
|
||||
|
||||
dst_w = w;
|
||||
dst_h = h;
|
||||
if (dst_x + w > dst->width)
|
||||
dst_w = dst->width - dst_x;
|
||||
if (dst_y + h > dst->height)
|
||||
dst_h = dst->height - dst_y;
|
||||
|
||||
w = MIN(src_w, dst_w);
|
||||
h = MIN(src_h, dst_h);
|
||||
|
||||
if (w <= 0 || h <= 0) {
|
||||
fprintf(stderr, "invalid width or height.\n");
|
||||
g2d_reset(ctx);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bitblt.data.alpha_blend_mode = G2D_ALPHA_BLEND_MODE_ENABLE;
|
||||
|
|
@ -866,16 +611,20 @@ g2d_blend(struct g2d_context *ctx, struct g2d_image *src,
|
|||
g2d_add_cmd(ctx, BITBLT_COMMAND_REG, bitblt.val);
|
||||
g2d_add_cmd(ctx, BLEND_FUNCTION_REG, blend.val);
|
||||
|
||||
pt.val = 0;
|
||||
pt.data.x = src_x;
|
||||
pt.data.y = src_y;
|
||||
g2d_add_cmd(ctx, SRC_LEFT_TOP_REG, pt.val);
|
||||
pt.val = 0;
|
||||
pt.data.x = src_x + w;
|
||||
pt.data.y = src_y + h;
|
||||
g2d_add_cmd(ctx, SRC_RIGHT_BOTTOM_REG, pt.val);
|
||||
|
||||
pt.val = 0;
|
||||
pt.data.x = dst_x;
|
||||
pt.data.y = dst_y;
|
||||
g2d_add_cmd(ctx, DST_LEFT_TOP_REG, pt.val);
|
||||
pt.val = 0;
|
||||
pt.data.x = dst_x + w;
|
||||
pt.data.y = dst_y + h;
|
||||
g2d_add_cmd(ctx, DST_RIGHT_BOTTOM_REG, pt.val);
|
||||
|
|
@ -901,7 +650,7 @@ g2d_blend(struct g2d_context *ctx, struct g2d_image *src,
|
|||
* @dst_h: height value to destination buffer.
|
||||
* @op: blend operation type.
|
||||
*/
|
||||
drm_public int
|
||||
int
|
||||
g2d_scale_and_blend(struct g2d_context *ctx, struct g2d_image *src,
|
||||
struct g2d_image *dst, unsigned int src_x, unsigned int src_y,
|
||||
unsigned int src_w, unsigned int src_h, unsigned int dst_x,
|
||||
|
|
@ -911,9 +660,50 @@ g2d_scale_and_blend(struct g2d_context *ctx, struct g2d_image *src,
|
|||
union g2d_point_val pt;
|
||||
union g2d_bitblt_cmd_val bitblt;
|
||||
union g2d_blend_func_val blend;
|
||||
unsigned int scale, gem_space;
|
||||
unsigned int scale;
|
||||
unsigned int scale_x, scale_y;
|
||||
|
||||
bitblt.val = 0;
|
||||
blend.val = 0;
|
||||
|
||||
if (op == G2D_OP_SRC || op == G2D_OP_CLEAR)
|
||||
g2d_add_cmd(ctx, DST_SELECT_REG, G2D_SELECT_MODE_BGCOLOR);
|
||||
else
|
||||
g2d_add_cmd(ctx, DST_SELECT_REG, G2D_SELECT_MODE_NORMAL);
|
||||
|
||||
g2d_add_cmd(ctx, DST_COLOR_MODE_REG, dst->color_mode);
|
||||
if (dst->buf_type == G2D_IMGBUF_USERPTR)
|
||||
g2d_add_cmd(ctx, DST_BASE_ADDR_REG | G2D_BUF_USERPTR,
|
||||
(unsigned long)&dst->user_ptr[0]);
|
||||
else
|
||||
g2d_add_cmd(ctx, DST_BASE_ADDR_REG, dst->bo[0]);
|
||||
|
||||
g2d_add_cmd(ctx, DST_STRIDE_REG, dst->stride);
|
||||
|
||||
g2d_add_cmd(ctx, SRC_SELECT_REG, src->select_mode);
|
||||
g2d_add_cmd(ctx, SRC_COLOR_MODE_REG, src->color_mode);
|
||||
|
||||
switch (src->select_mode) {
|
||||
case G2D_SELECT_MODE_NORMAL:
|
||||
if (src->buf_type == G2D_IMGBUF_USERPTR)
|
||||
g2d_add_cmd(ctx, SRC_BASE_ADDR_REG | G2D_BUF_USERPTR,
|
||||
(unsigned long)&src->user_ptr[0]);
|
||||
else
|
||||
g2d_add_cmd(ctx, SRC_BASE_ADDR_REG, src->bo[0]);
|
||||
|
||||
g2d_add_cmd(ctx, SRC_STRIDE_REG, src->stride);
|
||||
break;
|
||||
case G2D_SELECT_MODE_FGCOLOR:
|
||||
g2d_add_cmd(ctx, FG_COLOR_REG, src->color);
|
||||
break;
|
||||
case G2D_SELECT_MODE_BGCOLOR:
|
||||
g2d_add_cmd(ctx, BG_COLOR_REG, src->color);
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr , "failed to set src.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (src_w == dst_w && src_h == dst_h)
|
||||
scale = 0;
|
||||
else {
|
||||
|
|
@ -933,53 +723,11 @@ g2d_scale_and_blend(struct g2d_context *ctx, struct g2d_image *src,
|
|||
dst_h = dst->height - dst_y;
|
||||
|
||||
if (src_w <= 0 || src_h <= 0 || dst_w <= 0 || dst_h <= 0) {
|
||||
fprintf(stderr, MSG_PREFIX "invalid width or height.\n");
|
||||
fprintf(stderr, "invalid width or height.\n");
|
||||
g2d_reset(ctx);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!g2d_validate_select_mode(src->select_mode)) {
|
||||
fprintf(stderr , MSG_PREFIX "invalid select mode for source.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!g2d_validate_blending_op(op)) {
|
||||
fprintf(stderr , MSG_PREFIX "unsupported blending operation.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gem_space = src->select_mode == G2D_SELECT_MODE_NORMAL ? 2 : 1;
|
||||
|
||||
if (g2d_check_space(ctx, 12 + scale * 3, gem_space))
|
||||
return -ENOSPC;
|
||||
|
||||
bitblt.val = 0;
|
||||
blend.val = 0;
|
||||
|
||||
if (op == G2D_OP_SRC || op == G2D_OP_CLEAR)
|
||||
g2d_add_cmd(ctx, DST_SELECT_REG, G2D_SELECT_MODE_BGCOLOR);
|
||||
else
|
||||
g2d_add_cmd(ctx, DST_SELECT_REG, G2D_SELECT_MODE_NORMAL);
|
||||
|
||||
g2d_add_cmd(ctx, DST_COLOR_MODE_REG, dst->color_mode);
|
||||
g2d_add_base_addr(ctx, dst, g2d_dst);
|
||||
g2d_add_cmd(ctx, DST_STRIDE_REG, dst->stride);
|
||||
|
||||
g2d_add_cmd(ctx, SRC_SELECT_REG, src->select_mode);
|
||||
g2d_add_cmd(ctx, SRC_COLOR_MODE_REG, src->color_mode);
|
||||
|
||||
switch (src->select_mode) {
|
||||
case G2D_SELECT_MODE_NORMAL:
|
||||
g2d_add_base_addr(ctx, src, g2d_src);
|
||||
g2d_add_cmd(ctx, SRC_STRIDE_REG, src->stride);
|
||||
break;
|
||||
case G2D_SELECT_MODE_FGCOLOR:
|
||||
g2d_add_cmd(ctx, FG_COLOR_REG, src->color);
|
||||
break;
|
||||
case G2D_SELECT_MODE_BGCOLOR:
|
||||
g2d_add_cmd(ctx, BG_COLOR_REG, src->color);
|
||||
break;
|
||||
}
|
||||
|
||||
if (scale) {
|
||||
g2d_add_cmd(ctx, SRC_SCALE_CTRL_REG, G2D_SCALE_MODE_BILINEAR);
|
||||
g2d_add_cmd(ctx, SRC_XSCALE_REG, scale_x);
|
||||
|
|
@ -991,16 +739,20 @@ g2d_scale_and_blend(struct g2d_context *ctx, struct g2d_image *src,
|
|||
g2d_add_cmd(ctx, BITBLT_COMMAND_REG, bitblt.val);
|
||||
g2d_add_cmd(ctx, BLEND_FUNCTION_REG, blend.val);
|
||||
|
||||
pt.val = 0;
|
||||
pt.data.x = src_x;
|
||||
pt.data.y = src_y;
|
||||
g2d_add_cmd(ctx, SRC_LEFT_TOP_REG, pt.val);
|
||||
pt.val = 0;
|
||||
pt.data.x = src_x + src_w;
|
||||
pt.data.y = src_y + src_h;
|
||||
g2d_add_cmd(ctx, SRC_RIGHT_BOTTOM_REG, pt.val);
|
||||
|
||||
pt.val = 0;
|
||||
pt.data.x = dst_x;
|
||||
pt.data.y = dst_y;
|
||||
g2d_add_cmd(ctx, DST_LEFT_TOP_REG, pt.val);
|
||||
pt.val = 0;
|
||||
pt.data.x = dst_x + dst_w;
|
||||
pt.data.y = dst_y + dst_h;
|
||||
g2d_add_cmd(ctx, DST_RIGHT_BOTTOM_REG, pt.val);
|
||||
|
|
|
|||
|
|
@ -3,29 +3,19 @@
|
|||
* Authors:
|
||||
* Inki Dae <inki.dae@samsung.com>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _FIMG2D_H_
|
||||
#define _FIMG2D_H_
|
||||
|
||||
#define G2D_MAX_CMD_NR 64
|
||||
#define G2D_MAX_GEM_CMD_NR 64
|
||||
#define G2D_MAX_CMD_LIST_NR 64
|
||||
#define G2D_PLANE_MAX_NR 2
|
||||
|
||||
enum e_g2d_color_mode {
|
||||
|
|
@ -299,11 +289,19 @@ struct g2d_image {
|
|||
void *mapped_ptr[G2D_PLANE_MAX_NR];
|
||||
};
|
||||
|
||||
struct g2d_context;
|
||||
struct g2d_context {
|
||||
int fd;
|
||||
unsigned int major;
|
||||
unsigned int minor;
|
||||
struct drm_exynos_g2d_cmd cmd[G2D_MAX_CMD_NR];
|
||||
struct drm_exynos_g2d_cmd cmd_buf[G2D_MAX_GEM_CMD_NR];
|
||||
unsigned int cmd_nr;
|
||||
unsigned int cmd_buf_nr;
|
||||
unsigned int cmdlist_nr;
|
||||
};
|
||||
|
||||
struct g2d_context *g2d_init(int fd);
|
||||
void g2d_fini(struct g2d_context *ctx);
|
||||
void g2d_config_event(struct g2d_context *ctx, void *userdata);
|
||||
int g2d_exec(struct g2d_context *ctx);
|
||||
int g2d_solid_fill(struct g2d_context *ctx, struct g2d_image *img,
|
||||
unsigned int x, unsigned int y, unsigned int w,
|
||||
|
|
@ -312,9 +310,6 @@ int g2d_copy(struct g2d_context *ctx, struct g2d_image *src,
|
|||
struct g2d_image *dst, unsigned int src_x,
|
||||
unsigned int src_y, unsigned int dst_x, unsigned int dst_y,
|
||||
unsigned int w, unsigned int h);
|
||||
int g2d_move(struct g2d_context *ctx, struct g2d_image *img,
|
||||
unsigned int src_x, unsigned int src_y, unsigned int dst_x,
|
||||
unsigned dst_y, unsigned int w, unsigned int h);
|
||||
int g2d_copy_with_scale(struct g2d_context *ctx, struct g2d_image *src,
|
||||
struct g2d_image *dst, unsigned int src_x,
|
||||
unsigned int src_y, unsigned int src_w,
|
||||
|
|
|
|||
|
|
@ -3,24 +3,11 @@
|
|||
* Authors:
|
||||
* Inki Dae <inki.dae@samsung.com>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _FIMG2D_REG_H_
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ includedir=@includedir@
|
|||
|
||||
Name: libdrm_exynos
|
||||
Description: Userspace interface to exynos kernel DRM services
|
||||
Version: 0.7
|
||||
Version: 0.6
|
||||
Libs: -L${libdir} -ldrm_exynos
|
||||
Cflags: -I${includedir} -I${includedir}/libdrm -I${includedir}/exynos
|
||||
Requires.private: libdrm
|
||||
|
|
|
|||
|
|
@ -1,59 +0,0 @@
|
|||
# Copyright © 2017-2018 Intel Corporation
|
||||
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
libdrm_exynos = library(
|
||||
'drm_exynos',
|
||||
[files('exynos_drm.c', 'exynos_fimg2d.c'), config_file],
|
||||
c_args : libdrm_c_args,
|
||||
gnu_symbol_visibility : 'hidden',
|
||||
include_directories : [inc_root, inc_drm],
|
||||
link_with : libdrm,
|
||||
dependencies : [dep_threads],
|
||||
version : '1.@0@.0'.format(patch_ver),
|
||||
install : true,
|
||||
)
|
||||
|
||||
install_headers('exynos_drmif.h', subdir : 'libdrm')
|
||||
install_headers('exynos_drm.h', 'exynos_fimg2d.h', subdir : 'exynos')
|
||||
|
||||
ext_libdrm_exynos = declare_dependency(
|
||||
link_with : [libdrm, libdrm_exynos],
|
||||
include_directories : [inc_drm, include_directories('.')],
|
||||
)
|
||||
|
||||
meson.override_dependency('libdrm_exynos', ext_libdrm_exynos)
|
||||
|
||||
pkg.generate(
|
||||
libdrm_exynos,
|
||||
name : 'libdrm_exynos',
|
||||
subdirs : ['.', 'libdrm', 'exynos'],
|
||||
version : '0.7',
|
||||
description : 'Userspace interface to exynos kernel DRM services',
|
||||
)
|
||||
|
||||
test(
|
||||
'exynos-symbols-check',
|
||||
symbols_check,
|
||||
args : [
|
||||
'--lib', libdrm_exynos,
|
||||
'--symbols-file', files('exynos-symbols.txt'),
|
||||
'--nm', prog_nm.full_path(),
|
||||
],
|
||||
)
|
||||
18
freedreno/Android.mk
Normal file
18
freedreno/Android.mk
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
LOCAL_PATH := $(call my-dir)
|
||||
include $(CLEAR_VARS)
|
||||
|
||||
# Import variables LIBDRM_FREEDRENO_FILES, LIBDRM_FREEDRENO_H_FILES
|
||||
include $(LOCAL_PATH)/Makefile.sources
|
||||
|
||||
LOCAL_MODULE := libdrm_freedreno
|
||||
LOCAL_MODULE_TAGS := optional
|
||||
|
||||
LOCAL_SHARED_LIBRARIES := libdrm
|
||||
|
||||
LOCAL_SRC_FILES := $(LIBDRM_FREEDRENO_FILES)
|
||||
LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
|
||||
|
||||
LOCAL_CFLAGS := \
|
||||
-DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
|
||||
|
||||
include $(BUILD_SHARED_LIBRARY)
|
||||
30
freedreno/Makefile.am
Normal file
30
freedreno/Makefile.am
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
AUTOMAKE_OPTIONS=subdir-objects
|
||||
include Makefile.sources
|
||||
|
||||
AM_CFLAGS = \
|
||||
$(WARN_CFLAGS) \
|
||||
-I$(top_srcdir) \
|
||||
$(PTHREADSTUBS_CFLAGS) \
|
||||
-I$(top_srcdir)/include/drm
|
||||
|
||||
libdrm_freedreno_la_LTLIBRARIES = libdrm_freedreno.la
|
||||
libdrm_freedreno_ladir = $(libdir)
|
||||
libdrm_freedreno_la_LDFLAGS = -version-number 1:0:0 -no-undefined
|
||||
libdrm_freedreno_la_LIBADD = \
|
||||
../libdrm.la \
|
||||
@PTHREADSTUBS_LIBS@ \
|
||||
@CLOCK_LIB@
|
||||
|
||||
libdrm_freedreno_la_SOURCES = $(LIBDRM_FREEDRENO_FILES)
|
||||
if HAVE_FREEDRENO_KGSL
|
||||
libdrm_freedreno_la_SOURCES += $(LIBDRM_FREEDRENO_KGSL_FILES)
|
||||
endif
|
||||
|
||||
libdrm_freedrenocommonincludedir = ${includedir}/freedreno
|
||||
libdrm_freedrenocommoninclude_HEADERS = $(LIBDRM_FREEDRENO_H_FILES)
|
||||
|
||||
pkgconfigdir = @pkgconfigdir@
|
||||
pkgconfig_DATA = libdrm_freedreno.pc
|
||||
|
||||
TESTS = freedreno-symbol-check
|
||||
EXTRA_DIST = Android.mk $(TESTS)
|
||||
25
freedreno/Makefile.sources
Normal file
25
freedreno/Makefile.sources
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
LIBDRM_FREEDRENO_FILES := \
|
||||
freedreno_device.c \
|
||||
freedreno_pipe.c \
|
||||
freedreno_priv.h \
|
||||
freedreno_ringbuffer.c \
|
||||
freedreno_bo.c \
|
||||
msm/msm_bo.c \
|
||||
msm/msm_device.c \
|
||||
msm/msm_drm.h \
|
||||
msm/msm_pipe.c \
|
||||
msm/msm_priv.h \
|
||||
msm/msm_ringbuffer.c
|
||||
|
||||
LIBDRM_FREEDRENO_KGSL_FILES := \
|
||||
kgsl/kgsl_bo.c \
|
||||
kgsl/kgsl_device.c \
|
||||
kgsl/kgsl_drm.h \
|
||||
kgsl/kgsl_pipe.c \
|
||||
kgsl/kgsl_priv.h \
|
||||
kgsl/kgsl_ringbuffer.c \
|
||||
kgsl/msm_kgsl.h
|
||||
|
||||
LIBDRM_FREEDRENO_H_FILES := \
|
||||
freedreno_drmif.h \
|
||||
freedreno_ringbuffer.h
|
||||
|
|
@ -1,13 +1,3 @@
|
|||
This is a historical description of what is now the kgsl backend
|
||||
in libdrm freedreno (before the upstream drm/msm driver). Note
|
||||
that the kgsl backend requires the "kgsl-drm" shim driver, which
|
||||
usually is in disrepair (QCOM does not build it for android), and
|
||||
due to random differences between different downstream android
|
||||
kernel branches it may or may not work. So YMMV.
|
||||
|
||||
Original README:
|
||||
----------------
|
||||
|
||||
Note that current msm kernel driver is a bit strange. It provides a
|
||||
DRM interface for GEM, which is basically sufficient to have DRI2
|
||||
working. But it does not provide KMS. And interface to 2d and 3d
|
||||
53
freedreno/freedreno-symbol-check
Executable file
53
freedreno/freedreno-symbol-check
Executable file
|
|
@ -0,0 +1,53 @@
|
|||
#!/bin/bash
|
||||
|
||||
# The following symbols (past the first five) are taken from the public headers.
|
||||
# A list of the latter should be available Makefile.sources/LIBDRM_FREEDRENO_H_FILES
|
||||
|
||||
FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_freedreno.so} | awk '{print $3}'| while read func; do
|
||||
( grep -q "^$func$" || echo $func ) <<EOF
|
||||
__bss_start
|
||||
_edata
|
||||
_end
|
||||
_fini
|
||||
_init
|
||||
fd_bo_cpu_fini
|
||||
fd_bo_cpu_prep
|
||||
fd_bo_del
|
||||
fd_bo_dmabuf
|
||||
fd_bo_from_dmabuf
|
||||
fd_bo_from_fbdev
|
||||
fd_bo_from_handle
|
||||
fd_bo_from_name
|
||||
fd_bo_get_name
|
||||
fd_bo_handle
|
||||
fd_bo_map
|
||||
fd_bo_new
|
||||
fd_bo_ref
|
||||
fd_bo_size
|
||||
fd_device_del
|
||||
fd_device_new
|
||||
fd_device_new_dup
|
||||
fd_device_ref
|
||||
fd_pipe_del
|
||||
fd_pipe_get_param
|
||||
fd_pipe_new
|
||||
fd_pipe_wait
|
||||
fd_pipe_wait_timeout
|
||||
fd_ringbuffer_del
|
||||
fd_ringbuffer_emit_reloc_ring
|
||||
fd_ringbuffer_flush
|
||||
fd_ringbuffer_new
|
||||
fd_ringbuffer_reloc
|
||||
fd_ringbuffer_reset
|
||||
fd_ringbuffer_set_parent
|
||||
fd_ringbuffer_timestamp
|
||||
fd_ringmarker_del
|
||||
fd_ringmarker_dwords
|
||||
fd_ringmarker_flush
|
||||
fd_ringmarker_mark
|
||||
fd_ringmarker_new
|
||||
EOF
|
||||
done)
|
||||
|
||||
test ! -n "$FUNCS" || echo $FUNCS
|
||||
test ! -n "$FUNCS"
|
||||
|
|
@ -1,45 +0,0 @@
|
|||
fd_bo_cpu_fini
|
||||
fd_bo_cpu_prep
|
||||
fd_bo_del
|
||||
fd_bo_dmabuf
|
||||
fd_bo_from_dmabuf
|
||||
fd_bo_from_fbdev
|
||||
fd_bo_from_handle
|
||||
fd_bo_from_name
|
||||
fd_bo_get_iova
|
||||
fd_bo_get_name
|
||||
fd_bo_handle
|
||||
fd_bo_map
|
||||
fd_bo_new
|
||||
fd_bo_put_iova
|
||||
fd_bo_ref
|
||||
fd_bo_size
|
||||
fd_device_del
|
||||
fd_device_fd
|
||||
fd_device_new
|
||||
fd_device_new_dup
|
||||
fd_device_ref
|
||||
fd_device_version
|
||||
fd_pipe_del
|
||||
fd_pipe_get_param
|
||||
fd_pipe_new
|
||||
fd_pipe_new2
|
||||
fd_pipe_ref
|
||||
fd_pipe_wait
|
||||
fd_pipe_wait_timeout
|
||||
fd_ringbuffer_cmd_count
|
||||
fd_ringbuffer_del
|
||||
fd_ringbuffer_emit_reloc_ring_full
|
||||
fd_ringbuffer_flush
|
||||
fd_ringbuffer_grow
|
||||
fd_ringbuffer_new
|
||||
fd_ringbuffer_new_flags
|
||||
fd_ringbuffer_new_object
|
||||
fd_ringbuffer_ref
|
||||
fd_ringbuffer_reloc
|
||||
fd_ringbuffer_reloc2
|
||||
fd_ringbuffer_reset
|
||||
fd_ringbuffer_set_parent
|
||||
fd_ringbuffer_size
|
||||
fd_ringbuffer_timestamp
|
||||
fd_ringbuffer_flush2
|
||||
|
|
@ -26,11 +26,16 @@
|
|||
* Rob Clark <robclark@freedesktop.org>
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
# include <config.h>
|
||||
#endif
|
||||
|
||||
#include "freedreno_drmif.h"
|
||||
#include "freedreno_priv.h"
|
||||
|
||||
drm_private pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
drm_private void bo_del(struct fd_bo *bo);
|
||||
static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
static void bo_del(struct fd_bo *bo);
|
||||
|
||||
/* set buffer name, and add to table, call w/ table_lock held: */
|
||||
static void set_name(struct fd_bo *bo, uint32_t name)
|
||||
|
|
@ -47,9 +52,6 @@ static struct fd_bo * lookup_bo(void *tbl, uint32_t key)
|
|||
if (!drmHashLookup(tbl, key, (void **)&bo)) {
|
||||
/* found, incr refcnt and return: */
|
||||
bo = fd_bo_ref(bo);
|
||||
|
||||
/* don't break the bucket if this bo was found in one */
|
||||
list_delinit(&bo->list);
|
||||
}
|
||||
return bo;
|
||||
}
|
||||
|
|
@ -62,7 +64,10 @@ static struct fd_bo * bo_from_handle(struct fd_device *dev,
|
|||
|
||||
bo = dev->funcs->bo_from_handle(dev, size, handle);
|
||||
if (!bo) {
|
||||
drmCloseBufferHandle(dev->fd, handle);
|
||||
struct drm_gem_close req = {
|
||||
.handle = handle,
|
||||
};
|
||||
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
|
||||
return NULL;
|
||||
}
|
||||
bo->dev = fd_device_ref(dev);
|
||||
|
|
@ -75,17 +80,114 @@ static struct fd_bo * bo_from_handle(struct fd_device *dev,
|
|||
return bo;
|
||||
}
|
||||
|
||||
static struct fd_bo *
|
||||
bo_new(struct fd_device *dev, uint32_t size, uint32_t flags,
|
||||
struct fd_bo_cache *cache)
|
||||
/* Frees older cached buffers. Called under table_lock */
|
||||
drm_private void fd_cleanup_bo_cache(struct fd_device *dev, time_t time)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (dev->time == time)
|
||||
return;
|
||||
|
||||
for (i = 0; i < dev->num_buckets; i++) {
|
||||
struct fd_bo_bucket *bucket = &dev->cache_bucket[i];
|
||||
struct fd_bo *bo;
|
||||
|
||||
while (!LIST_IS_EMPTY(&bucket->list)) {
|
||||
bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
|
||||
|
||||
/* keep things in cache for at least 1 second: */
|
||||
if (time && ((time - bo->free_time) <= 1))
|
||||
break;
|
||||
|
||||
list_del(&bo->list);
|
||||
bo_del(bo);
|
||||
}
|
||||
}
|
||||
|
||||
dev->time = time;
|
||||
}
|
||||
|
||||
static struct fd_bo_bucket * get_bucket(struct fd_device *dev, uint32_t size)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* hmm, this is what intel does, but I suppose we could calculate our
|
||||
* way to the correct bucket size rather than looping..
|
||||
*/
|
||||
for (i = 0; i < dev->num_buckets; i++) {
|
||||
struct fd_bo_bucket *bucket = &dev->cache_bucket[i];
|
||||
if (bucket->size >= size) {
|
||||
return bucket;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int is_idle(struct fd_bo *bo)
|
||||
{
|
||||
return fd_bo_cpu_prep(bo, NULL,
|
||||
DRM_FREEDRENO_PREP_READ |
|
||||
DRM_FREEDRENO_PREP_WRITE |
|
||||
DRM_FREEDRENO_PREP_NOSYNC) == 0;
|
||||
}
|
||||
|
||||
static struct fd_bo *find_in_bucket(struct fd_device *dev,
|
||||
struct fd_bo_bucket *bucket, uint32_t flags)
|
||||
{
|
||||
struct fd_bo *bo = NULL;
|
||||
|
||||
/* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could
|
||||
* skip the busy check.. if it is only going to be a render target
|
||||
* then we probably don't need to stall..
|
||||
*
|
||||
* NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail
|
||||
* (MRU, since likely to be in GPU cache), rather than head (LRU)..
|
||||
*/
|
||||
pthread_mutex_lock(&table_lock);
|
||||
while (!LIST_IS_EMPTY(&bucket->list)) {
|
||||
bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
|
||||
if (0 /* TODO: if madvise tells us bo is gone... */) {
|
||||
list_del(&bo->list);
|
||||
bo_del(bo);
|
||||
bo = NULL;
|
||||
continue;
|
||||
}
|
||||
/* TODO check for compatible flags? */
|
||||
if (is_idle(bo)) {
|
||||
list_del(&bo->list);
|
||||
break;
|
||||
}
|
||||
bo = NULL;
|
||||
break;
|
||||
}
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
||||
|
||||
struct fd_bo *
|
||||
fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
|
||||
{
|
||||
struct fd_bo *bo = NULL;
|
||||
struct fd_bo_bucket *bucket;
|
||||
uint32_t handle;
|
||||
int ret;
|
||||
|
||||
bo = fd_bo_cache_alloc(cache, &size, flags);
|
||||
if (bo)
|
||||
return bo;
|
||||
size = ALIGN(size, 4096);
|
||||
bucket = get_bucket(dev, size);
|
||||
|
||||
/* see if we can be green and recycle: */
|
||||
if (bucket) {
|
||||
size = bucket->size;
|
||||
bo = find_in_bucket(dev, bucket, flags);
|
||||
if (bo) {
|
||||
atomic_set(&bo->refcnt, 1);
|
||||
fd_device_ref(bo->dev);
|
||||
return bo;
|
||||
}
|
||||
}
|
||||
|
||||
ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
|
||||
if (ret)
|
||||
|
|
@ -93,37 +195,13 @@ bo_new(struct fd_device *dev, uint32_t size, uint32_t flags,
|
|||
|
||||
pthread_mutex_lock(&table_lock);
|
||||
bo = bo_from_handle(dev, size, handle);
|
||||
bo->bo_reuse = 1;
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
|
||||
VG_BO_ALLOC(bo);
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
||||
drm_public struct fd_bo *
|
||||
fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
|
||||
{
|
||||
struct fd_bo *bo = bo_new(dev, size, flags, &dev->bo_cache);
|
||||
if (bo)
|
||||
bo->bo_reuse = BO_CACHE;
|
||||
return bo;
|
||||
}
|
||||
|
||||
/* internal function to allocate bo's that use the ringbuffer cache
|
||||
* instead of the normal bo_cache. The purpose is, because cmdstream
|
||||
* bo's get vmap'd on the kernel side, and that is expensive, we want
|
||||
* to re-use cmdstream bo's for cmdstream and not unrelated purposes.
|
||||
*/
|
||||
drm_private struct fd_bo *
|
||||
fd_bo_new_ring(struct fd_device *dev, uint32_t size, uint32_t flags)
|
||||
{
|
||||
struct fd_bo *bo = bo_new(dev, size, flags, &dev->ring_cache);
|
||||
if (bo)
|
||||
bo->bo_reuse = RING_CACHE;
|
||||
return bo;
|
||||
}
|
||||
|
||||
drm_public struct fd_bo *
|
||||
struct fd_bo *
|
||||
fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
|
||||
{
|
||||
struct fd_bo *bo = NULL;
|
||||
|
|
@ -136,47 +214,32 @@ fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
|
|||
|
||||
bo = bo_from_handle(dev, size, handle);
|
||||
|
||||
VG_BO_ALLOC(bo);
|
||||
|
||||
out_unlock:
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
||||
drm_public struct fd_bo *
|
||||
struct fd_bo *
|
||||
fd_bo_from_dmabuf(struct fd_device *dev, int fd)
|
||||
{
|
||||
struct drm_prime_handle req = {
|
||||
.fd = fd,
|
||||
};
|
||||
int ret, size;
|
||||
uint32_t handle;
|
||||
struct fd_bo *bo;
|
||||
|
||||
pthread_mutex_lock(&table_lock);
|
||||
ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
|
||||
ret = drmIoctl(dev->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &req);
|
||||
if (ret) {
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bo = lookup_bo(dev->handle_table, handle);
|
||||
if (bo)
|
||||
goto out_unlock;
|
||||
/* hmm, would be nice if we had a way to figure out the size.. */
|
||||
size = 0;
|
||||
|
||||
/* lseek() to get bo size */
|
||||
size = lseek(fd, 0, SEEK_END);
|
||||
lseek(fd, 0, SEEK_CUR);
|
||||
|
||||
bo = bo_from_handle(dev, size, handle);
|
||||
|
||||
VG_BO_ALLOC(bo);
|
||||
|
||||
out_unlock:
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
|
||||
return bo;
|
||||
return fd_bo_from_handle(dev, req.handle, size);
|
||||
}
|
||||
|
||||
drm_public struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
|
||||
struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
|
||||
{
|
||||
struct drm_gem_open req = {
|
||||
.name = name,
|
||||
|
|
@ -200,10 +263,8 @@ drm_public struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
|
|||
goto out_unlock;
|
||||
|
||||
bo = bo_from_handle(dev, req.size, req.handle);
|
||||
if (bo) {
|
||||
if (bo)
|
||||
set_name(bo, name);
|
||||
VG_BO_ALLOC(bo);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
|
|
@ -211,47 +272,56 @@ out_unlock:
|
|||
return bo;
|
||||
}
|
||||
|
||||
drm_public uint64_t fd_bo_get_iova(struct fd_bo *bo)
|
||||
{
|
||||
return bo->funcs->iova(bo);
|
||||
}
|
||||
|
||||
drm_public void fd_bo_put_iova(struct fd_bo *bo)
|
||||
{
|
||||
/* currently a no-op */
|
||||
}
|
||||
|
||||
drm_public struct fd_bo * fd_bo_ref(struct fd_bo *bo)
|
||||
struct fd_bo * fd_bo_ref(struct fd_bo *bo)
|
||||
{
|
||||
atomic_inc(&bo->refcnt);
|
||||
return bo;
|
||||
}
|
||||
|
||||
drm_public void fd_bo_del(struct fd_bo *bo)
|
||||
void fd_bo_del(struct fd_bo *bo)
|
||||
{
|
||||
struct fd_device *dev = bo->dev;
|
||||
|
||||
if (!atomic_dec_and_test(&bo->refcnt))
|
||||
return;
|
||||
|
||||
if (bo->fd >= 0) {
|
||||
close(bo->fd);
|
||||
bo->fd = -1;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&table_lock);
|
||||
|
||||
if ((bo->bo_reuse == BO_CACHE) && (fd_bo_cache_free(&dev->bo_cache, bo) == 0))
|
||||
goto out;
|
||||
if ((bo->bo_reuse == RING_CACHE) && (fd_bo_cache_free(&dev->ring_cache, bo) == 0))
|
||||
goto out;
|
||||
if (bo->bo_reuse) {
|
||||
struct fd_bo_bucket *bucket = get_bucket(dev, bo->size);
|
||||
|
||||
/* see if we can be green and recycle: */
|
||||
if (bucket) {
|
||||
struct timespec time;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &time);
|
||||
|
||||
bo->free_time = time.tv_sec;
|
||||
list_addtail(&bo->list, &bucket->list);
|
||||
fd_cleanup_bo_cache(dev, time.tv_sec);
|
||||
|
||||
/* bo's in the bucket cache don't have a ref and
|
||||
* don't hold a ref to the dev:
|
||||
*/
|
||||
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
bo_del(bo);
|
||||
fd_device_del_locked(dev);
|
||||
out:
|
||||
fd_device_del_locked(dev);
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
}
|
||||
|
||||
/* Called under table_lock */
|
||||
drm_private void bo_del(struct fd_bo *bo)
|
||||
static void bo_del(struct fd_bo *bo)
|
||||
{
|
||||
VG_BO_FREE(bo);
|
||||
|
||||
if (bo->map)
|
||||
drm_munmap(bo->map, bo->size);
|
||||
|
||||
|
|
@ -260,16 +330,19 @@ drm_private void bo_del(struct fd_bo *bo)
|
|||
*/
|
||||
|
||||
if (bo->handle) {
|
||||
struct drm_gem_close req = {
|
||||
.handle = bo->handle,
|
||||
};
|
||||
drmHashDelete(bo->dev->handle_table, bo->handle);
|
||||
if (bo->name)
|
||||
drmHashDelete(bo->dev->name_table, bo->name);
|
||||
drmCloseBufferHandle(bo->dev->fd, bo->handle);
|
||||
drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
|
||||
}
|
||||
|
||||
bo->funcs->destroy(bo);
|
||||
}
|
||||
|
||||
drm_public int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
|
||||
int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
|
||||
{
|
||||
if (!bo->name) {
|
||||
struct drm_gem_flink req = {
|
||||
|
|
@ -285,7 +358,6 @@ drm_public int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
|
|||
pthread_mutex_lock(&table_lock);
|
||||
set_name(bo, req.name);
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
bo->bo_reuse = NO_CACHE;
|
||||
}
|
||||
|
||||
*name = bo->name;
|
||||
|
|
@ -293,33 +365,36 @@ drm_public int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
|
|||
return 0;
|
||||
}
|
||||
|
||||
drm_public uint32_t fd_bo_handle(struct fd_bo *bo)
|
||||
uint32_t fd_bo_handle(struct fd_bo *bo)
|
||||
{
|
||||
return bo->handle;
|
||||
}
|
||||
|
||||
drm_public int fd_bo_dmabuf(struct fd_bo *bo)
|
||||
int fd_bo_dmabuf(struct fd_bo *bo)
|
||||
{
|
||||
int ret, prime_fd;
|
||||
if (bo->fd < 0) {
|
||||
struct drm_prime_handle req = {
|
||||
.handle = bo->handle,
|
||||
.flags = DRM_CLOEXEC,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
|
||||
&prime_fd);
|
||||
if (ret) {
|
||||
ERROR_MSG("failed to get dmabuf fd: %d", ret);
|
||||
return ret;
|
||||
ret = drmIoctl(bo->dev->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &req);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
bo->fd = req.fd;
|
||||
}
|
||||
|
||||
bo->bo_reuse = NO_CACHE;
|
||||
|
||||
return prime_fd;
|
||||
return dup(bo->fd);
|
||||
}
|
||||
|
||||
drm_public uint32_t fd_bo_size(struct fd_bo *bo)
|
||||
uint32_t fd_bo_size(struct fd_bo *bo)
|
||||
{
|
||||
return bo->size;
|
||||
}
|
||||
|
||||
drm_public void * fd_bo_map(struct fd_bo *bo)
|
||||
void * fd_bo_map(struct fd_bo *bo)
|
||||
{
|
||||
if (!bo->map) {
|
||||
uint64_t offset;
|
||||
|
|
@ -341,19 +416,12 @@ drm_public void * fd_bo_map(struct fd_bo *bo)
|
|||
}
|
||||
|
||||
/* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
|
||||
drm_public int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
|
||||
int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
|
||||
{
|
||||
return bo->funcs->cpu_prep(bo, pipe, op);
|
||||
}
|
||||
|
||||
drm_public void fd_bo_cpu_fini(struct fd_bo *bo)
|
||||
void fd_bo_cpu_fini(struct fd_bo *bo)
|
||||
{
|
||||
bo->funcs->cpu_fini(bo);
|
||||
}
|
||||
|
||||
#if !HAVE_FREEDRENO_KGSL
|
||||
drm_public struct fd_bo * fd_bo_from_fbdev(struct fd_pipe *pipe, int fbfd, uint32_t size)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -1,220 +0,0 @@
|
|||
/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
|
||||
|
||||
/*
|
||||
* Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rob Clark <robclark@freedesktop.org>
|
||||
*/
|
||||
|
||||
#include "freedreno_drmif.h"
|
||||
#include "freedreno_priv.h"
|
||||
|
||||
drm_private void bo_del(struct fd_bo *bo);
|
||||
drm_private extern pthread_mutex_t table_lock;
|
||||
|
||||
static void
|
||||
add_bucket(struct fd_bo_cache *cache, int size)
|
||||
{
|
||||
unsigned int i = cache->num_buckets;
|
||||
|
||||
assert(i < ARRAY_SIZE(cache->cache_bucket));
|
||||
|
||||
list_inithead(&cache->cache_bucket[i].list);
|
||||
cache->cache_bucket[i].size = size;
|
||||
cache->num_buckets++;
|
||||
}
|
||||
|
||||
/**
|
||||
* @coarse: if true, only power-of-two bucket sizes, otherwise
|
||||
* fill in for a bit smoother size curve..
|
||||
*/
|
||||
drm_private void
|
||||
fd_bo_cache_init(struct fd_bo_cache *cache, int coarse)
|
||||
{
|
||||
unsigned long size, cache_max_size = 64 * 1024 * 1024;
|
||||
|
||||
/* OK, so power of two buckets was too wasteful of memory.
|
||||
* Give 3 other sizes between each power of two, to hopefully
|
||||
* cover things accurately enough. (The alternative is
|
||||
* probably to just go for exact matching of sizes, and assume
|
||||
* that for things like composited window resize the tiled
|
||||
* width/height alignment and rounding of sizes to pages will
|
||||
* get us useful cache hit rates anyway)
|
||||
*/
|
||||
add_bucket(cache, 4096);
|
||||
add_bucket(cache, 4096 * 2);
|
||||
if (!coarse)
|
||||
add_bucket(cache, 4096 * 3);
|
||||
|
||||
/* Initialize the linked lists for BO reuse cache. */
|
||||
for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
|
||||
add_bucket(cache, size);
|
||||
if (!coarse) {
|
||||
add_bucket(cache, size + size * 1 / 4);
|
||||
add_bucket(cache, size + size * 2 / 4);
|
||||
add_bucket(cache, size + size * 3 / 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Frees older cached buffers. Called under table_lock */
|
||||
drm_private void
|
||||
fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (cache->time == time)
|
||||
return;
|
||||
|
||||
for (i = 0; i < cache->num_buckets; i++) {
|
||||
struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
|
||||
struct fd_bo *bo;
|
||||
|
||||
while (!LIST_IS_EMPTY(&bucket->list)) {
|
||||
bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
|
||||
|
||||
/* keep things in cache for at least 1 second: */
|
||||
if (time && ((time - bo->free_time) <= 1))
|
||||
break;
|
||||
|
||||
VG_BO_OBTAIN(bo);
|
||||
list_del(&bo->list);
|
||||
bo_del(bo);
|
||||
}
|
||||
}
|
||||
|
||||
cache->time = time;
|
||||
}
|
||||
|
||||
static struct fd_bo_bucket * get_bucket(struct fd_bo_cache *cache, uint32_t size)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* hmm, this is what intel does, but I suppose we could calculate our
|
||||
* way to the correct bucket size rather than looping..
|
||||
*/
|
||||
for (i = 0; i < cache->num_buckets; i++) {
|
||||
struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
|
||||
if (bucket->size >= size) {
|
||||
return bucket;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int is_idle(struct fd_bo *bo)
|
||||
{
|
||||
return fd_bo_cpu_prep(bo, NULL,
|
||||
DRM_FREEDRENO_PREP_READ |
|
||||
DRM_FREEDRENO_PREP_WRITE |
|
||||
DRM_FREEDRENO_PREP_NOSYNC) == 0;
|
||||
}
|
||||
|
||||
static struct fd_bo *find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags)
|
||||
{
|
||||
struct fd_bo *bo = NULL;
|
||||
|
||||
/* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could
|
||||
* skip the busy check.. if it is only going to be a render target
|
||||
* then we probably don't need to stall..
|
||||
*
|
||||
* NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail
|
||||
* (MRU, since likely to be in GPU cache), rather than head (LRU)..
|
||||
*/
|
||||
pthread_mutex_lock(&table_lock);
|
||||
if (!LIST_IS_EMPTY(&bucket->list)) {
|
||||
bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
|
||||
/* TODO check for compatible flags? */
|
||||
if (is_idle(bo)) {
|
||||
list_del(&bo->list);
|
||||
} else {
|
||||
bo = NULL;
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
||||
/* NOTE: size is potentially rounded up to bucket size: */
|
||||
drm_private struct fd_bo *
|
||||
fd_bo_cache_alloc(struct fd_bo_cache *cache, uint32_t *size, uint32_t flags)
|
||||
{
|
||||
struct fd_bo *bo = NULL;
|
||||
struct fd_bo_bucket *bucket;
|
||||
|
||||
*size = ALIGN(*size, 4096);
|
||||
bucket = get_bucket(cache, *size);
|
||||
|
||||
/* see if we can be green and recycle: */
|
||||
retry:
|
||||
if (bucket) {
|
||||
*size = bucket->size;
|
||||
bo = find_in_bucket(bucket, flags);
|
||||
if (bo) {
|
||||
VG_BO_OBTAIN(bo);
|
||||
if (bo->funcs->madvise(bo, TRUE) <= 0) {
|
||||
/* we've lost the backing pages, delete and try again: */
|
||||
pthread_mutex_lock(&table_lock);
|
||||
bo_del(bo);
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
goto retry;
|
||||
}
|
||||
atomic_set(&bo->refcnt, 1);
|
||||
fd_device_ref(bo->dev);
|
||||
return bo;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
drm_private int
|
||||
fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo)
|
||||
{
|
||||
struct fd_bo_bucket *bucket = get_bucket(cache, bo->size);
|
||||
|
||||
/* see if we can be green and recycle: */
|
||||
if (bucket) {
|
||||
struct timespec time;
|
||||
|
||||
bo->funcs->madvise(bo, FALSE);
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &time);
|
||||
|
||||
bo->free_time = time.tv_sec;
|
||||
VG_BO_RELEASE(bo);
|
||||
list_addtail(&bo->list, &bucket->list);
|
||||
fd_bo_cache_cleanup(cache, time.tv_sec);
|
||||
|
||||
/* bo's in the bucket cache don't have a ref and
|
||||
* don't hold a ref to the dev:
|
||||
*/
|
||||
fd_device_del_locked(bo->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
|
@ -26,6 +26,10 @@
|
|||
* Rob Clark <robclark@freedesktop.org>
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
# include <config.h>
|
||||
#endif
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
|
@ -38,7 +42,45 @@ static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
|
|||
struct fd_device * kgsl_device_new(int fd);
|
||||
struct fd_device * msm_device_new(int fd);
|
||||
|
||||
drm_public struct fd_device * fd_device_new(int fd)
|
||||
static void
|
||||
add_bucket(struct fd_device *dev, int size)
|
||||
{
|
||||
unsigned int i = dev->num_buckets;
|
||||
|
||||
assert(i < ARRAY_SIZE(dev->cache_bucket));
|
||||
|
||||
list_inithead(&dev->cache_bucket[i].list);
|
||||
dev->cache_bucket[i].size = size;
|
||||
dev->num_buckets++;
|
||||
}
|
||||
|
||||
static void
|
||||
init_cache_buckets(struct fd_device *dev)
|
||||
{
|
||||
unsigned long size, cache_max_size = 64 * 1024 * 1024;
|
||||
|
||||
/* OK, so power of two buckets was too wasteful of memory.
|
||||
* Give 3 other sizes between each power of two, to hopefully
|
||||
* cover things accurately enough. (The alternative is
|
||||
* probably to just go for exact matching of sizes, and assume
|
||||
* that for things like composited window resize the tiled
|
||||
* width/height alignment and rounding of sizes to pages will
|
||||
* get us useful cache hit rates anyway)
|
||||
*/
|
||||
add_bucket(dev, 4096);
|
||||
add_bucket(dev, 4096 * 2);
|
||||
add_bucket(dev, 4096 * 3);
|
||||
|
||||
/* Initialize the linked lists for BO reuse cache. */
|
||||
for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
|
||||
add_bucket(dev, size);
|
||||
add_bucket(dev, size + size * 1 / 4);
|
||||
add_bucket(dev, size + size * 2 / 4);
|
||||
add_bucket(dev, size + size * 3 / 4);
|
||||
}
|
||||
}
|
||||
|
||||
struct fd_device * fd_device_new(int fd)
|
||||
{
|
||||
struct fd_device *dev;
|
||||
drmVersionPtr version;
|
||||
|
|
@ -52,16 +94,8 @@ drm_public struct fd_device * fd_device_new(int fd)
|
|||
|
||||
if (!strcmp(version->name, "msm")) {
|
||||
DEBUG_MSG("msm DRM device");
|
||||
if (version->version_major != 1) {
|
||||
ERROR_MSG("unsupported version: %u.%u.%u", version->version_major,
|
||||
version->version_minor, version->version_patchlevel);
|
||||
dev = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev = msm_device_new(fd);
|
||||
dev->version = version->version_minor;
|
||||
#if HAVE_FREEDRENO_KGSL
|
||||
#ifdef HAVE_FREEDRENO_KGSL
|
||||
} else if (!strcmp(version->name, "kgsl")) {
|
||||
DEBUG_MSG("kgsl DRM device");
|
||||
dev = kgsl_device_new(fd);
|
||||
|
|
@ -70,8 +104,6 @@ drm_public struct fd_device * fd_device_new(int fd)
|
|||
ERROR_MSG("unknown device: %s", version->name);
|
||||
dev = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
drmFreeVersion(version);
|
||||
|
||||
if (!dev)
|
||||
|
|
@ -81,8 +113,7 @@ out:
|
|||
dev->fd = fd;
|
||||
dev->handle_table = drmHashCreate();
|
||||
dev->name_table = drmHashCreate();
|
||||
fd_bo_cache_init(&dev->bo_cache, FALSE);
|
||||
fd_bo_cache_init(&dev->ring_cache, TRUE);
|
||||
init_cache_buckets(dev);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
|
@ -90,18 +121,15 @@ out:
|
|||
/* like fd_device_new() but creates it's own private dup() of the fd
|
||||
* which is close()d when the device is finalized.
|
||||
*/
|
||||
drm_public struct fd_device * fd_device_new_dup(int fd)
|
||||
struct fd_device * fd_device_new_dup(int fd)
|
||||
{
|
||||
int dup_fd = dup(fd);
|
||||
struct fd_device *dev = fd_device_new(dup_fd);
|
||||
struct fd_device *dev = fd_device_new(dup(fd));
|
||||
if (dev)
|
||||
dev->closefd = 1;
|
||||
else
|
||||
close(dup_fd);
|
||||
return dev;
|
||||
}
|
||||
|
||||
drm_public struct fd_device * fd_device_ref(struct fd_device *dev)
|
||||
struct fd_device * fd_device_ref(struct fd_device *dev)
|
||||
{
|
||||
atomic_inc(&dev->refcnt);
|
||||
return dev;
|
||||
|
|
@ -109,13 +137,12 @@ drm_public struct fd_device * fd_device_ref(struct fd_device *dev)
|
|||
|
||||
static void fd_device_del_impl(struct fd_device *dev)
|
||||
{
|
||||
int close_fd = dev->closefd ? dev->fd : -1;
|
||||
fd_bo_cache_cleanup(&dev->bo_cache, 0);
|
||||
fd_cleanup_bo_cache(dev, 0);
|
||||
drmHashDestroy(dev->handle_table);
|
||||
drmHashDestroy(dev->name_table);
|
||||
if (dev->closefd)
|
||||
close(dev->fd);
|
||||
dev->funcs->destroy(dev);
|
||||
if (close_fd >= 0)
|
||||
close(close_fd);
|
||||
}
|
||||
|
||||
drm_private void fd_device_del_locked(struct fd_device *dev)
|
||||
|
|
@ -125,7 +152,7 @@ drm_private void fd_device_del_locked(struct fd_device *dev)
|
|||
fd_device_del_impl(dev);
|
||||
}
|
||||
|
||||
drm_public void fd_device_del(struct fd_device *dev)
|
||||
void fd_device_del(struct fd_device *dev)
|
||||
{
|
||||
if (!atomic_dec_and_test(&dev->refcnt))
|
||||
return;
|
||||
|
|
@ -133,13 +160,3 @@ drm_public void fd_device_del(struct fd_device *dev)
|
|||
fd_device_del_impl(dev);
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
}
|
||||
|
||||
drm_public int fd_device_fd(struct fd_device *dev)
|
||||
{
|
||||
return dev->fd;
|
||||
}
|
||||
|
||||
drm_public enum fd_version fd_device_version(struct fd_device *dev)
|
||||
{
|
||||
return dev->version;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,15 +32,6 @@
|
|||
#include <xf86drm.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#if defined(__GNUC__)
|
||||
# define drm_deprecated __attribute__((__deprecated__))
|
||||
#else
|
||||
# define drm_deprecated
|
||||
#endif
|
||||
|
||||
/* an empty marker for things that will be deprecated in the future: */
|
||||
#define will_be_deprecated
|
||||
|
||||
struct fd_bo;
|
||||
struct fd_pipe;
|
||||
struct fd_device;
|
||||
|
|
@ -59,9 +50,6 @@ enum fd_param_id {
|
|||
FD_GMEM_SIZE,
|
||||
FD_GPU_ID,
|
||||
FD_CHIP_ID,
|
||||
FD_MAX_FREQ,
|
||||
FD_TIMESTAMP,
|
||||
FD_NR_RINGS, /* # of rings == # of distinct priority levels */
|
||||
};
|
||||
|
||||
/* bo flags: */
|
||||
|
|
@ -88,23 +76,12 @@ struct fd_device * fd_device_new(int fd);
|
|||
struct fd_device * fd_device_new_dup(int fd);
|
||||
struct fd_device * fd_device_ref(struct fd_device *dev);
|
||||
void fd_device_del(struct fd_device *dev);
|
||||
int fd_device_fd(struct fd_device *dev);
|
||||
|
||||
enum fd_version {
|
||||
FD_VERSION_MADVISE = 1, /* kernel supports madvise */
|
||||
FD_VERSION_UNLIMITED_CMDS = 1, /* submits w/ >4 cmd buffers (growable ringbuffer) */
|
||||
FD_VERSION_FENCE_FD = 2, /* submit command supports in/out fences */
|
||||
FD_VERSION_SUBMIT_QUEUES = 3, /* submit queues and multiple priority levels */
|
||||
FD_VERSION_BO_IOVA = 3, /* supports fd_bo_get/put_iova() */
|
||||
};
|
||||
enum fd_version fd_device_version(struct fd_device *dev);
|
||||
|
||||
/* pipe functions:
|
||||
*/
|
||||
|
||||
struct fd_pipe * fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id);
|
||||
struct fd_pipe * fd_pipe_new2(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio);
|
||||
struct fd_pipe * fd_pipe_ref(struct fd_pipe *pipe);
|
||||
void fd_pipe_del(struct fd_pipe *pipe);
|
||||
int fd_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param,
|
||||
uint64_t *value);
|
||||
|
|
@ -125,8 +102,6 @@ struct fd_bo *fd_bo_from_handle(struct fd_device *dev,
|
|||
uint32_t handle, uint32_t size);
|
||||
struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name);
|
||||
struct fd_bo * fd_bo_from_dmabuf(struct fd_device *dev, int fd);
|
||||
uint64_t fd_bo_get_iova(struct fd_bo *bo);
|
||||
void fd_bo_put_iova(struct fd_bo *bo);
|
||||
struct fd_bo * fd_bo_ref(struct fd_bo *bo);
|
||||
void fd_bo_del(struct fd_bo *bo);
|
||||
int fd_bo_get_name(struct fd_bo *bo, uint32_t *name);
|
||||
|
|
|
|||
|
|
@ -26,76 +26,56 @@
|
|||
* Rob Clark <robclark@freedesktop.org>
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
# include <config.h>
|
||||
#endif
|
||||
|
||||
#include "freedreno_drmif.h"
|
||||
#include "freedreno_priv.h"
|
||||
|
||||
/**
|
||||
* priority of zero is highest priority, and higher numeric values are
|
||||
* lower priorities
|
||||
*/
|
||||
drm_public struct fd_pipe *
|
||||
fd_pipe_new2(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio)
|
||||
struct fd_pipe *
|
||||
fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id)
|
||||
{
|
||||
struct fd_pipe *pipe;
|
||||
uint64_t val;
|
||||
struct fd_pipe *pipe = NULL;
|
||||
|
||||
if (id > FD_PIPE_MAX) {
|
||||
ERROR_MSG("invalid pipe id: %d", id);
|
||||
return NULL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if ((prio != 1) && (fd_device_version(dev) < FD_VERSION_SUBMIT_QUEUES)) {
|
||||
ERROR_MSG("invalid priority!");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pipe = dev->funcs->pipe_new(dev, id, prio);
|
||||
pipe = dev->funcs->pipe_new(dev, id);
|
||||
if (!pipe) {
|
||||
ERROR_MSG("allocation failed");
|
||||
return NULL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
pipe->dev = dev;
|
||||
pipe->id = id;
|
||||
atomic_set(&pipe->refcnt, 1);
|
||||
|
||||
fd_pipe_get_param(pipe, FD_GPU_ID, &val);
|
||||
pipe->gpu_id = val;
|
||||
|
||||
return pipe;
|
||||
fail:
|
||||
if (pipe)
|
||||
fd_pipe_del(pipe);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
drm_public struct fd_pipe *
|
||||
fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id)
|
||||
void fd_pipe_del(struct fd_pipe *pipe)
|
||||
{
|
||||
return fd_pipe_new2(dev, id, 1);
|
||||
}
|
||||
|
||||
drm_public struct fd_pipe * fd_pipe_ref(struct fd_pipe *pipe)
|
||||
{
|
||||
atomic_inc(&pipe->refcnt);
|
||||
return pipe;
|
||||
}
|
||||
|
||||
drm_public void fd_pipe_del(struct fd_pipe *pipe)
|
||||
{
|
||||
if (!atomic_dec_and_test(&pipe->refcnt))
|
||||
return;
|
||||
pipe->funcs->destroy(pipe);
|
||||
}
|
||||
|
||||
drm_public int fd_pipe_get_param(struct fd_pipe *pipe,
|
||||
int fd_pipe_get_param(struct fd_pipe *pipe,
|
||||
enum fd_param_id param, uint64_t *value)
|
||||
{
|
||||
return pipe->funcs->get_param(pipe, param, value);
|
||||
}
|
||||
|
||||
drm_public int fd_pipe_wait(struct fd_pipe *pipe, uint32_t timestamp)
|
||||
int fd_pipe_wait(struct fd_pipe *pipe, uint32_t timestamp)
|
||||
{
|
||||
return fd_pipe_wait_timeout(pipe, timestamp, ~0);
|
||||
}
|
||||
|
||||
drm_public int fd_pipe_wait_timeout(struct fd_pipe *pipe, uint32_t timestamp,
|
||||
int fd_pipe_wait_timeout(struct fd_pipe *pipe, uint32_t timestamp,
|
||||
uint64_t timeout)
|
||||
{
|
||||
return pipe->funcs->wait(pipe, timestamp, timeout);
|
||||
|
|
|
|||
|
|
@ -29,6 +29,10 @@
|
|||
#ifndef FREEDRENO_PRIV_H_
|
||||
#define FREEDRENO_PRIV_H_
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
|
|
@ -45,26 +49,17 @@
|
|||
#include "xf86atomic.h"
|
||||
|
||||
#include "util_double_list.h"
|
||||
#include "util_math.h"
|
||||
|
||||
#include "freedreno_drmif.h"
|
||||
#include "freedreno_ringbuffer.h"
|
||||
#include "drm.h"
|
||||
|
||||
#ifndef TRUE
|
||||
# define TRUE 1
|
||||
#endif
|
||||
#ifndef FALSE
|
||||
# define FALSE 0
|
||||
#endif
|
||||
|
||||
struct fd_device_funcs {
|
||||
int (*bo_new_handle)(struct fd_device *dev, uint32_t size,
|
||||
uint32_t flags, uint32_t *handle);
|
||||
struct fd_bo * (*bo_from_handle)(struct fd_device *dev,
|
||||
uint32_t size, uint32_t handle);
|
||||
struct fd_pipe * (*pipe_new)(struct fd_device *dev, enum fd_pipe_id id,
|
||||
unsigned prio);
|
||||
struct fd_pipe * (*pipe_new)(struct fd_device *dev, enum fd_pipe_id id);
|
||||
void (*destroy)(struct fd_device *dev);
|
||||
};
|
||||
|
||||
|
|
@ -73,15 +68,8 @@ struct fd_bo_bucket {
|
|||
struct list_head list;
|
||||
};
|
||||
|
||||
struct fd_bo_cache {
|
||||
struct fd_bo_bucket cache_bucket[14 * 4];
|
||||
int num_buckets;
|
||||
time_t time;
|
||||
};
|
||||
|
||||
struct fd_device {
|
||||
int fd;
|
||||
enum fd_version version;
|
||||
atomic_t refcnt;
|
||||
|
||||
/* tables to keep track of bo's, to avoid "evil-twin" fd_bo objects:
|
||||
|
|
@ -95,29 +83,22 @@ struct fd_device {
|
|||
*/
|
||||
void *handle_table, *name_table;
|
||||
|
||||
const struct fd_device_funcs *funcs;
|
||||
struct fd_device_funcs *funcs;
|
||||
|
||||
struct fd_bo_cache bo_cache;
|
||||
struct fd_bo_cache ring_cache;
|
||||
struct fd_bo_bucket cache_bucket[14 * 4];
|
||||
int num_buckets;
|
||||
time_t time;
|
||||
|
||||
int closefd; /* call close(fd) upon destruction */
|
||||
|
||||
/* just for valgrind: */
|
||||
int bo_size;
|
||||
};
|
||||
|
||||
drm_private void fd_bo_cache_init(struct fd_bo_cache *cache, int coarse);
|
||||
drm_private void fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time);
|
||||
drm_private struct fd_bo * fd_bo_cache_alloc(struct fd_bo_cache *cache,
|
||||
uint32_t *size, uint32_t flags);
|
||||
drm_private int fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo);
|
||||
drm_private void fd_cleanup_bo_cache(struct fd_device *dev, time_t time);
|
||||
|
||||
/* for where @table_lock is already held: */
|
||||
drm_private void fd_device_del_locked(struct fd_device *dev);
|
||||
|
||||
struct fd_pipe_funcs {
|
||||
struct fd_ringbuffer * (*ringbuffer_new)(struct fd_pipe *pipe, uint32_t size,
|
||||
enum fd_ringbuffer_flags flags);
|
||||
struct fd_ringbuffer * (*ringbuffer_new)(struct fd_pipe *pipe, uint32_t size);
|
||||
int (*get_param)(struct fd_pipe *pipe, enum fd_param_id param, uint64_t *value);
|
||||
int (*wait)(struct fd_pipe *pipe, uint32_t timestamp, uint64_t timeout);
|
||||
void (*destroy)(struct fd_pipe *pipe);
|
||||
|
|
@ -126,22 +107,22 @@ struct fd_pipe_funcs {
|
|||
struct fd_pipe {
|
||||
struct fd_device *dev;
|
||||
enum fd_pipe_id id;
|
||||
uint32_t gpu_id;
|
||||
atomic_t refcnt;
|
||||
const struct fd_pipe_funcs *funcs;
|
||||
struct fd_pipe_funcs *funcs;
|
||||
};
|
||||
|
||||
struct fd_ringmarker {
|
||||
struct fd_ringbuffer *ring;
|
||||
uint32_t *cur;
|
||||
};
|
||||
|
||||
struct fd_ringbuffer_funcs {
|
||||
void * (*hostptr)(struct fd_ringbuffer *ring);
|
||||
int (*flush)(struct fd_ringbuffer *ring, uint32_t *last_start,
|
||||
int in_fence_fd, int *out_fence_fd);
|
||||
void (*grow)(struct fd_ringbuffer *ring, uint32_t size);
|
||||
int (*flush)(struct fd_ringbuffer *ring, uint32_t *last_start);
|
||||
void (*reset)(struct fd_ringbuffer *ring);
|
||||
void (*emit_reloc)(struct fd_ringbuffer *ring,
|
||||
const struct fd_reloc *reloc);
|
||||
uint32_t (*emit_reloc_ring)(struct fd_ringbuffer *ring,
|
||||
struct fd_ringbuffer *target, uint32_t cmd_idx);
|
||||
uint32_t (*cmd_count)(struct fd_ringbuffer *ring);
|
||||
void (*emit_reloc_ring)(struct fd_ringbuffer *ring,
|
||||
struct fd_ringmarker *target, struct fd_ringmarker *end);
|
||||
void (*destroy)(struct fd_ringbuffer *ring);
|
||||
};
|
||||
|
||||
|
|
@ -149,8 +130,6 @@ struct fd_bo_funcs {
|
|||
int (*offset)(struct fd_bo *bo, uint64_t *offset);
|
||||
int (*cpu_prep)(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op);
|
||||
void (*cpu_fini)(struct fd_bo *bo);
|
||||
int (*madvise)(struct fd_bo *bo, int willneed);
|
||||
uint64_t (*iova)(struct fd_bo *bo);
|
||||
void (*destroy)(struct fd_bo *bo);
|
||||
};
|
||||
|
||||
|
|
@ -159,23 +138,17 @@ struct fd_bo {
|
|||
uint32_t size;
|
||||
uint32_t handle;
|
||||
uint32_t name;
|
||||
int fd; /* dmabuf handle */
|
||||
void *map;
|
||||
atomic_t refcnt;
|
||||
const struct fd_bo_funcs *funcs;
|
||||
|
||||
enum {
|
||||
NO_CACHE = 0,
|
||||
BO_CACHE = 1,
|
||||
RING_CACHE = 2,
|
||||
} bo_reuse;
|
||||
struct fd_bo_funcs *funcs;
|
||||
|
||||
int bo_reuse;
|
||||
struct list_head list; /* bucket-list entry */
|
||||
time_t free_time; /* time when added to bucket-list */
|
||||
};
|
||||
|
||||
drm_private struct fd_bo *fd_bo_new_ring(struct fd_device *dev,
|
||||
uint32_t size, uint32_t flags);
|
||||
|
||||
#define ALIGN(v,a) (((v) + (a) - 1) & ~((a) - 1))
|
||||
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
|
||||
|
||||
#define enable_debug 0 /* TODO make dynamic */
|
||||
|
|
@ -196,63 +169,4 @@ drm_private struct fd_bo *fd_bo_new_ring(struct fd_device *dev,
|
|||
#define U642VOID(x) ((void *)(unsigned long)(x))
|
||||
#define VOID2U64(x) ((uint64_t)(unsigned long)(x))
|
||||
|
||||
static inline uint32_t
|
||||
offset_bytes(void *end, void *start)
|
||||
{
|
||||
return ((char *)end) - ((char *)start);
|
||||
}
|
||||
|
||||
#if HAVE_VALGRIND
|
||||
# include <memcheck.h>
|
||||
|
||||
/*
|
||||
* For tracking the backing memory (if valgrind enabled, we force a mmap
|
||||
* for the purposes of tracking)
|
||||
*/
|
||||
static inline void VG_BO_ALLOC(struct fd_bo *bo)
|
||||
{
|
||||
if (bo && RUNNING_ON_VALGRIND) {
|
||||
VALGRIND_MALLOCLIKE_BLOCK(fd_bo_map(bo), bo->size, 0, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void VG_BO_FREE(struct fd_bo *bo)
|
||||
{
|
||||
VALGRIND_FREELIKE_BLOCK(bo->map, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* For tracking bo structs that are in the buffer-cache, so that valgrind
|
||||
* doesn't attribute ownership to the first one to allocate the recycled
|
||||
* bo.
|
||||
*
|
||||
* Note that the list_head in fd_bo is used to track the buffers in cache
|
||||
* so disable error reporting on the range while they are in cache so
|
||||
* valgrind doesn't squawk about list traversal.
|
||||
*
|
||||
*/
|
||||
static inline void VG_BO_RELEASE(struct fd_bo *bo)
|
||||
{
|
||||
if (RUNNING_ON_VALGRIND) {
|
||||
VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
|
||||
VALGRIND_MAKE_MEM_NOACCESS(bo, bo->dev->bo_size);
|
||||
VALGRIND_FREELIKE_BLOCK(bo->map, 0);
|
||||
}
|
||||
}
|
||||
static inline void VG_BO_OBTAIN(struct fd_bo *bo)
|
||||
{
|
||||
if (RUNNING_ON_VALGRIND) {
|
||||
VALGRIND_MAKE_MEM_DEFINED(bo, bo->dev->bo_size);
|
||||
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
|
||||
VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void VG_BO_ALLOC(struct fd_bo *bo) {}
|
||||
static inline void VG_BO_FREE(struct fd_bo *bo) {}
|
||||
static inline void VG_BO_RELEASE(struct fd_bo *bo) {}
|
||||
static inline void VG_BO_OBTAIN(struct fd_bo *bo) {}
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* FREEDRENO_PRIV_H_ */
|
||||
|
|
|
|||
|
|
@ -26,81 +26,51 @@
|
|||
* Rob Clark <robclark@freedesktop.org>
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
# include <config.h>
|
||||
#endif
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include "freedreno_drmif.h"
|
||||
#include "freedreno_priv.h"
|
||||
#include "freedreno_ringbuffer.h"
|
||||
|
||||
drm_public struct fd_ringbuffer *
|
||||
fd_ringbuffer_new_flags(struct fd_pipe *pipe, uint32_t size,
|
||||
enum fd_ringbuffer_flags flags)
|
||||
struct fd_ringbuffer *
|
||||
fd_ringbuffer_new(struct fd_pipe *pipe, uint32_t size)
|
||||
{
|
||||
struct fd_ringbuffer *ring;
|
||||
|
||||
/* we can't really support "growable" rb's in general for
|
||||
* stateobj's since we need a single gpu addr (ie. can't
|
||||
* do the trick of a chain of IB packets):
|
||||
*/
|
||||
if (flags & FD_RINGBUFFER_OBJECT)
|
||||
assert(size);
|
||||
|
||||
ring = pipe->funcs->ringbuffer_new(pipe, size, flags);
|
||||
ring = pipe->funcs->ringbuffer_new(pipe, size);
|
||||
if (!ring)
|
||||
return NULL;
|
||||
|
||||
ring->flags = flags;
|
||||
ring->size = size;
|
||||
ring->pipe = pipe;
|
||||
ring->start = ring->funcs->hostptr(ring);
|
||||
ring->end = &(ring->start[ring->size/4]);
|
||||
ring->end = &(ring->start[size/4]);
|
||||
|
||||
ring->cur = ring->last_start = ring->start;
|
||||
|
||||
return ring;
|
||||
}
|
||||
|
||||
drm_public struct fd_ringbuffer *
|
||||
fd_ringbuffer_new(struct fd_pipe *pipe, uint32_t size)
|
||||
void fd_ringbuffer_del(struct fd_ringbuffer *ring)
|
||||
{
|
||||
return fd_ringbuffer_new_flags(pipe, size, 0);
|
||||
}
|
||||
|
||||
drm_public struct fd_ringbuffer *
|
||||
fd_ringbuffer_new_object(struct fd_pipe *pipe, uint32_t size)
|
||||
{
|
||||
return fd_ringbuffer_new_flags(pipe, size, FD_RINGBUFFER_OBJECT);
|
||||
}
|
||||
|
||||
drm_public void fd_ringbuffer_del(struct fd_ringbuffer *ring)
|
||||
{
|
||||
if (!atomic_dec_and_test(&ring->refcnt))
|
||||
return;
|
||||
|
||||
fd_ringbuffer_reset(ring);
|
||||
ring->funcs->destroy(ring);
|
||||
}
|
||||
|
||||
drm_public struct fd_ringbuffer *
|
||||
fd_ringbuffer_ref(struct fd_ringbuffer *ring)
|
||||
{
|
||||
STATIC_ASSERT(sizeof(ring->refcnt) <= sizeof(ring->__pad));
|
||||
atomic_inc(&ring->refcnt);
|
||||
return ring;
|
||||
}
|
||||
|
||||
/* ringbuffers which are IB targets should set the toplevel rb (ie.
|
||||
* the IB source) as it's parent before emitting reloc's, to ensure
|
||||
* the bookkeeping works out properly.
|
||||
*/
|
||||
drm_public void fd_ringbuffer_set_parent(struct fd_ringbuffer *ring,
|
||||
void fd_ringbuffer_set_parent(struct fd_ringbuffer *ring,
|
||||
struct fd_ringbuffer *parent)
|
||||
{
|
||||
/* state objects should not be parented! */
|
||||
assert(!(ring->flags & FD_RINGBUFFER_OBJECT));
|
||||
ring->parent = parent;
|
||||
}
|
||||
|
||||
drm_public void fd_ringbuffer_reset(struct fd_ringbuffer *ring)
|
||||
void fd_ringbuffer_reset(struct fd_ringbuffer *ring)
|
||||
{
|
||||
uint32_t *start = ring->start;
|
||||
if (ring->pipe->id == FD_PIPE_2D)
|
||||
|
|
@ -110,73 +80,67 @@ drm_public void fd_ringbuffer_reset(struct fd_ringbuffer *ring)
|
|||
ring->funcs->reset(ring);
|
||||
}
|
||||
|
||||
drm_public int fd_ringbuffer_flush(struct fd_ringbuffer *ring)
|
||||
/* maybe get rid of this and use fd_ringmarker_flush() from DDX too? */
|
||||
int fd_ringbuffer_flush(struct fd_ringbuffer *ring)
|
||||
{
|
||||
return ring->funcs->flush(ring, ring->last_start, -1, NULL);
|
||||
return ring->funcs->flush(ring, ring->last_start);
|
||||
}
|
||||
|
||||
drm_public int fd_ringbuffer_flush2(struct fd_ringbuffer *ring, int in_fence_fd,
|
||||
int *out_fence_fd)
|
||||
{
|
||||
return ring->funcs->flush(ring, ring->last_start, in_fence_fd, out_fence_fd);
|
||||
}
|
||||
|
||||
drm_public void fd_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t ndwords)
|
||||
{
|
||||
assert(ring->funcs->grow); /* unsupported on kgsl */
|
||||
|
||||
/* there is an upper bound on IB size, which appears to be 0x100000 */
|
||||
if (ring->size < 0x100000)
|
||||
ring->size *= 2;
|
||||
|
||||
ring->funcs->grow(ring, ring->size);
|
||||
|
||||
ring->start = ring->funcs->hostptr(ring);
|
||||
ring->end = &(ring->start[ring->size/4]);
|
||||
|
||||
ring->cur = ring->last_start = ring->start;
|
||||
}
|
||||
|
||||
drm_public uint32_t fd_ringbuffer_timestamp(struct fd_ringbuffer *ring)
|
||||
uint32_t fd_ringbuffer_timestamp(struct fd_ringbuffer *ring)
|
||||
{
|
||||
return ring->last_timestamp;
|
||||
}
|
||||
|
||||
drm_public void fd_ringbuffer_reloc(struct fd_ringbuffer *ring,
|
||||
void fd_ringbuffer_reloc(struct fd_ringbuffer *ring,
|
||||
const struct fd_reloc *reloc)
|
||||
{
|
||||
assert(ring->pipe->gpu_id < 500);
|
||||
ring->funcs->emit_reloc(ring, reloc);
|
||||
}
|
||||
|
||||
drm_public void fd_ringbuffer_reloc2(struct fd_ringbuffer *ring,
|
||||
const struct fd_reloc *reloc)
|
||||
void
|
||||
fd_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
|
||||
struct fd_ringmarker *target,
|
||||
struct fd_ringmarker *end)
|
||||
{
|
||||
ring->funcs->emit_reloc(ring, reloc);
|
||||
assert(target->ring == end->ring);
|
||||
ring->funcs->emit_reloc_ring(ring, target, end);
|
||||
}
|
||||
|
||||
drm_public uint32_t fd_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
|
||||
struct fd_ringmarker * fd_ringmarker_new(struct fd_ringbuffer *ring)
|
||||
{
|
||||
if (!ring->funcs->cmd_count)
|
||||
return 1;
|
||||
return ring->funcs->cmd_count(ring);
|
||||
struct fd_ringmarker *marker = NULL;
|
||||
|
||||
marker = calloc(1, sizeof(*marker));
|
||||
if (!marker) {
|
||||
ERROR_MSG("allocation failed");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
marker->ring = ring;
|
||||
|
||||
fd_ringmarker_mark(marker);
|
||||
|
||||
return marker;
|
||||
}
|
||||
|
||||
drm_public uint32_t
|
||||
fd_ringbuffer_emit_reloc_ring_full(struct fd_ringbuffer *ring,
|
||||
struct fd_ringbuffer *target, uint32_t cmd_idx)
|
||||
void fd_ringmarker_del(struct fd_ringmarker *marker)
|
||||
{
|
||||
return ring->funcs->emit_reloc_ring(ring, target, cmd_idx);
|
||||
free(marker);
|
||||
}
|
||||
|
||||
drm_public uint32_t
|
||||
fd_ringbuffer_size(struct fd_ringbuffer *ring)
|
||||
void fd_ringmarker_mark(struct fd_ringmarker *marker)
|
||||
{
|
||||
/* only really needed for stateobj ringbuffers, and won't really
|
||||
* do what you expect for growable rb's.. so lets just restrict
|
||||
* this to stateobj's for now:
|
||||
*/
|
||||
assert(ring->flags & FD_RINGBUFFER_OBJECT);
|
||||
return offset_bytes(ring->cur, ring->start);
|
||||
marker->cur = marker->ring->cur;
|
||||
}
|
||||
|
||||
uint32_t fd_ringmarker_dwords(struct fd_ringmarker *start,
|
||||
struct fd_ringmarker *end)
|
||||
{
|
||||
return end->cur - start->cur;
|
||||
}
|
||||
|
||||
int fd_ringmarker_flush(struct fd_ringmarker *marker)
|
||||
{
|
||||
struct fd_ringbuffer *ring = marker->ring;
|
||||
return ring->funcs->flush(ring, marker->cur);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -33,84 +33,29 @@
|
|||
|
||||
/* the ringbuffer object is not opaque so that OUT_RING() type stuff
|
||||
* can be inlined. Note that users should not make assumptions about
|
||||
* the size of this struct.
|
||||
* the size of this struct.. more stuff will be added when we eventually
|
||||
* have a kernel driver that can deal w/ reloc's..
|
||||
*/
|
||||
|
||||
struct fd_ringbuffer_funcs;
|
||||
|
||||
enum fd_ringbuffer_flags {
|
||||
|
||||
/* Ringbuffer is a "state object", which is potentially reused
|
||||
* many times, rather than being used in one-shot mode linked
|
||||
* to a parent ringbuffer.
|
||||
*/
|
||||
FD_RINGBUFFER_OBJECT = 0x1,
|
||||
|
||||
/* Hint that the stateobj will be used for streaming state
|
||||
* that is used once or a few times and then discarded.
|
||||
*
|
||||
* For sub-allocation, non streaming stateobj's should be
|
||||
* sub-allocated from a page size buffer, so one long lived
|
||||
* state obj doesn't prevent other pages from being freed.
|
||||
* (Ie. it would be no worse than allocating a page sized
|
||||
* bo for each small non-streaming stateobj).
|
||||
*
|
||||
* But streaming stateobj's could be sub-allocated from a
|
||||
* larger buffer to reduce the alloc/del overhead.
|
||||
*/
|
||||
FD_RINGBUFFER_STREAMING = 0x2,
|
||||
};
|
||||
struct fd_ringmarker;
|
||||
|
||||
struct fd_ringbuffer {
|
||||
int size;
|
||||
uint32_t *cur, *end, *start, *last_start;
|
||||
struct fd_pipe *pipe;
|
||||
const struct fd_ringbuffer_funcs *funcs;
|
||||
struct fd_ringbuffer_funcs *funcs;
|
||||
uint32_t last_timestamp;
|
||||
struct fd_ringbuffer *parent;
|
||||
|
||||
/* for users of fd_ringbuffer to store their own private per-
|
||||
* ringbuffer data
|
||||
*/
|
||||
void *user;
|
||||
|
||||
enum fd_ringbuffer_flags flags;
|
||||
|
||||
/* This is a bit gross, but we can't use atomic_t in exported
|
||||
* headers. OTOH, we don't need the refcnt to be publicly
|
||||
* visible. The only reason that this struct is exported is
|
||||
* because fd_ringbuffer_emit needs to be something that can
|
||||
* be inlined for performance reasons.
|
||||
*/
|
||||
union {
|
||||
#ifdef HAS_ATOMIC_OPS
|
||||
atomic_t refcnt;
|
||||
#endif
|
||||
uint64_t __pad;
|
||||
};
|
||||
};
|
||||
|
||||
struct fd_ringbuffer * fd_ringbuffer_new(struct fd_pipe *pipe,
|
||||
uint32_t size);
|
||||
will_be_deprecated
|
||||
struct fd_ringbuffer * fd_ringbuffer_new_object(struct fd_pipe *pipe,
|
||||
uint32_t size);
|
||||
struct fd_ringbuffer * fd_ringbuffer_new_flags(struct fd_pipe *pipe,
|
||||
uint32_t size, enum fd_ringbuffer_flags flags);
|
||||
|
||||
struct fd_ringbuffer *fd_ringbuffer_ref(struct fd_ringbuffer *ring);
|
||||
void fd_ringbuffer_del(struct fd_ringbuffer *ring);
|
||||
void fd_ringbuffer_set_parent(struct fd_ringbuffer *ring,
|
||||
struct fd_ringbuffer *parent);
|
||||
will_be_deprecated
|
||||
void fd_ringbuffer_reset(struct fd_ringbuffer *ring);
|
||||
int fd_ringbuffer_flush(struct fd_ringbuffer *ring);
|
||||
/* in_fence_fd: -1 for no in-fence, else fence fd
|
||||
* out_fence_fd: NULL for no output-fence requested, else ptr to return out-fence
|
||||
*/
|
||||
int fd_ringbuffer_flush2(struct fd_ringbuffer *ring, int in_fence_fd,
|
||||
int *out_fence_fd);
|
||||
void fd_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t ndwords);
|
||||
uint32_t fd_ringbuffer_timestamp(struct fd_ringbuffer *ring);
|
||||
|
||||
static inline void fd_ringbuffer_emit(struct fd_ringbuffer *ring,
|
||||
|
|
@ -127,16 +72,17 @@ struct fd_reloc {
|
|||
uint32_t offset;
|
||||
uint32_t or;
|
||||
int32_t shift;
|
||||
uint32_t orhi; /* used for a5xx+ */
|
||||
};
|
||||
|
||||
/* NOTE: relocs are 2 dwords on a5xx+ */
|
||||
void fd_ringbuffer_reloc(struct fd_ringbuffer *ring, const struct fd_reloc *reloc);
|
||||
void fd_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
|
||||
struct fd_ringmarker *target, struct fd_ringmarker *end);
|
||||
|
||||
void fd_ringbuffer_reloc2(struct fd_ringbuffer *ring, const struct fd_reloc *reloc);
|
||||
will_be_deprecated void fd_ringbuffer_reloc(struct fd_ringbuffer *ring, const struct fd_reloc *reloc);
|
||||
uint32_t fd_ringbuffer_cmd_count(struct fd_ringbuffer *ring);
|
||||
uint32_t fd_ringbuffer_emit_reloc_ring_full(struct fd_ringbuffer *ring,
|
||||
struct fd_ringbuffer *target, uint32_t cmd_idx);
|
||||
uint32_t fd_ringbuffer_size(struct fd_ringbuffer *ring);
|
||||
struct fd_ringmarker * fd_ringmarker_new(struct fd_ringbuffer *ring);
|
||||
void fd_ringmarker_del(struct fd_ringmarker *marker);
|
||||
void fd_ringmarker_mark(struct fd_ringmarker *marker);
|
||||
uint32_t fd_ringmarker_dwords(struct fd_ringmarker *start,
|
||||
struct fd_ringmarker *end);
|
||||
int fd_ringmarker_flush(struct fd_ringmarker *marker);
|
||||
|
||||
#endif /* FREEDRENO_RINGBUFFER_H_ */
|
||||
|
|
|
|||
|
|
@ -26,8 +26,14 @@
|
|||
* Rob Clark <robclark@freedesktop.org>
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
# include <config.h>
|
||||
#endif
|
||||
|
||||
#include "kgsl_priv.h"
|
||||
|
||||
#include <linux/fb.h>
|
||||
|
||||
static int set_memtype(struct fd_device *dev, uint32_t handle, uint32_t flags)
|
||||
{
|
||||
struct drm_kgsl_gem_memtype req = {
|
||||
|
|
@ -110,11 +116,6 @@ static void kgsl_bo_cpu_fini(struct fd_bo *bo)
|
|||
{
|
||||
}
|
||||
|
||||
static int kgsl_bo_madvise(struct fd_bo *bo, int willneed)
|
||||
{
|
||||
return willneed; /* not supported by kgsl */
|
||||
}
|
||||
|
||||
static void kgsl_bo_destroy(struct fd_bo *bo)
|
||||
{
|
||||
struct kgsl_bo *kgsl_bo = to_kgsl_bo(bo);
|
||||
|
|
@ -122,11 +123,10 @@ static void kgsl_bo_destroy(struct fd_bo *bo)
|
|||
|
||||
}
|
||||
|
||||
static const struct fd_bo_funcs funcs = {
|
||||
static struct fd_bo_funcs funcs = {
|
||||
.offset = kgsl_bo_offset,
|
||||
.cpu_prep = kgsl_bo_cpu_prep,
|
||||
.cpu_fini = kgsl_bo_cpu_fini,
|
||||
.madvise = kgsl_bo_madvise,
|
||||
.destroy = kgsl_bo_destroy,
|
||||
};
|
||||
|
||||
|
|
@ -168,6 +168,7 @@ drm_private struct fd_bo * kgsl_bo_from_handle(struct fd_device *dev,
|
|||
|
||||
bo = &kgsl_bo->base;
|
||||
bo->funcs = &funcs;
|
||||
bo->fd = -1;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(kgsl_bo->list); i++)
|
||||
list_inithead(&kgsl_bo->list[i]);
|
||||
|
|
@ -175,7 +176,7 @@ drm_private struct fd_bo * kgsl_bo_from_handle(struct fd_device *dev,
|
|||
return bo;
|
||||
}
|
||||
|
||||
drm_public struct fd_bo *
|
||||
struct fd_bo *
|
||||
fd_bo_from_fbdev(struct fd_pipe *pipe, int fbfd, uint32_t size)
|
||||
{
|
||||
struct fd_bo *bo;
|
||||
|
|
|
|||
|
|
@ -26,6 +26,10 @@
|
|||
* Rob Clark <robclark@freedesktop.org>
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
# include <config.h>
|
||||
#endif
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
|
@ -38,7 +42,7 @@ static void kgsl_device_destroy(struct fd_device *dev)
|
|||
free(kgsl_dev);
|
||||
}
|
||||
|
||||
static const struct fd_device_funcs funcs = {
|
||||
static struct fd_device_funcs funcs = {
|
||||
.bo_new_handle = kgsl_bo_new_handle,
|
||||
.bo_from_handle = kgsl_bo_from_handle,
|
||||
.pipe_new = kgsl_pipe_new,
|
||||
|
|
@ -57,7 +61,5 @@ drm_private struct fd_device * kgsl_device_new(int fd)
|
|||
dev = &kgsl_dev->base;
|
||||
dev->funcs = &funcs;
|
||||
|
||||
dev->bo_size = sizeof(struct kgsl_bo);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ struct drm_kgsl_gem_create_fd)
|
|||
/* Memory types - these define the source and caching policies
|
||||
of the GEM memory chunk */
|
||||
|
||||
/* Legacy definitions left for compatibility */
|
||||
/* Legacy definitions left for compatability */
|
||||
|
||||
#define DRM_KGSL_GEM_TYPE_EBI 0
|
||||
#define DRM_KGSL_GEM_TYPE_SMI 1
|
||||
|
|
|
|||
|
|
@ -26,6 +26,10 @@
|
|||
* Rob Clark <robclark@freedesktop.org>
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
# include <config.h>
|
||||
#endif
|
||||
|
||||
#include "kgsl_priv.h"
|
||||
|
||||
|
||||
|
|
@ -46,11 +50,6 @@ static int kgsl_pipe_get_param(struct fd_pipe *pipe,
|
|||
case FD_CHIP_ID:
|
||||
*value = kgsl_pipe->devinfo.chip_id;
|
||||
return 0;
|
||||
case FD_MAX_FREQ:
|
||||
case FD_TIMESTAMP:
|
||||
case FD_NR_RINGS:
|
||||
/* unsupported on kgsl */
|
||||
return -1;
|
||||
default:
|
||||
ERROR_MSG("invalid param id: %d", param);
|
||||
return -1;
|
||||
|
|
@ -109,7 +108,7 @@ static void kgsl_pipe_destroy(struct fd_pipe *pipe)
|
|||
free(kgsl_pipe);
|
||||
}
|
||||
|
||||
static const struct fd_pipe_funcs funcs = {
|
||||
static struct fd_pipe_funcs funcs = {
|
||||
.ringbuffer_new = kgsl_ringbuffer_new,
|
||||
.get_param = kgsl_pipe_get_param,
|
||||
.wait = kgsl_pipe_wait,
|
||||
|
|
@ -207,7 +206,7 @@ static int getprop(int fd, enum kgsl_property_type type,
|
|||
|
||||
|
||||
drm_private struct fd_pipe * kgsl_pipe_new(struct fd_device *dev,
|
||||
enum fd_pipe_id id, uint32_t prio)
|
||||
enum fd_pipe_id id)
|
||||
{
|
||||
static const char *paths[] = {
|
||||
[FD_PIPE_3D] = "/dev/kgsl-3d0",
|
||||
|
|
@ -252,11 +251,6 @@ drm_private struct fd_pipe * kgsl_pipe_new(struct fd_device *dev,
|
|||
GETPROP(fd, VERSION, kgsl_pipe->version);
|
||||
GETPROP(fd, DEVICE_INFO, kgsl_pipe->devinfo);
|
||||
|
||||
if (kgsl_pipe->devinfo.gpu_id >= 500) {
|
||||
ERROR_MSG("64b unsupported with kgsl");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
INFO_MSG("Pipe Info:");
|
||||
INFO_MSG(" Device: %s", paths[id]);
|
||||
INFO_MSG(" Chip-id: %d.%d.%d.%d",
|
||||
|
|
|
|||
|
|
@ -103,10 +103,10 @@ drm_private void kgsl_pipe_post_submit(struct kgsl_pipe *pipe,
|
|||
drm_private void kgsl_pipe_process_pending(struct kgsl_pipe *pipe,
|
||||
uint32_t timestamp);
|
||||
drm_private struct fd_pipe * kgsl_pipe_new(struct fd_device *dev,
|
||||
enum fd_pipe_id id, uint32_t prio);
|
||||
enum fd_pipe_id id);
|
||||
|
||||
drm_private struct fd_ringbuffer * kgsl_ringbuffer_new(struct fd_pipe *pipe,
|
||||
uint32_t size, enum fd_ringbuffer_flags flags);
|
||||
uint32_t size);
|
||||
|
||||
drm_private int kgsl_bo_new_handle(struct fd_device *dev,
|
||||
uint32_t size, uint32_t flags, uint32_t *handle);
|
||||
|
|
|
|||
|
|
@ -26,9 +26,12 @@
|
|||
* Rob Clark <robclark@freedesktop.org>
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
# include <config.h>
|
||||
#endif
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include "xf86atomic.h"
|
||||
#include "freedreno_ringbuffer.h"
|
||||
#include "kgsl_priv.h"
|
||||
|
||||
|
|
@ -110,8 +113,7 @@ static void * kgsl_ringbuffer_hostptr(struct fd_ringbuffer *ring)
|
|||
return kgsl_ring->bo->hostptr;
|
||||
}
|
||||
|
||||
static int kgsl_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start,
|
||||
int in_fence_fd, int *out_fence_fd)
|
||||
static int kgsl_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start)
|
||||
{
|
||||
struct kgsl_ringbuffer *kgsl_ring = to_kgsl_ringbuffer(ring);
|
||||
struct kgsl_pipe *kgsl_pipe = to_kgsl_pipe(ring->pipe);
|
||||
|
|
@ -129,9 +131,6 @@ static int kgsl_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_star
|
|||
};
|
||||
int ret;
|
||||
|
||||
assert(in_fence_fd == -1);
|
||||
assert(out_fence_fd == NULL);
|
||||
|
||||
kgsl_pipe_pre_submit(kgsl_pipe);
|
||||
|
||||
/* z180_cmdstream_issueibcmds() is made of fail: */
|
||||
|
|
@ -143,7 +142,7 @@ static int kgsl_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_star
|
|||
ibdesc.gpuaddr = kgsl_ring->bo->gpuaddr;
|
||||
ibdesc.hostptr = kgsl_ring->bo->hostptr;
|
||||
ibdesc.sizedwords = 0x145;
|
||||
req.timestamp = (uintptr_t)kgsl_ring->bo->hostptr;
|
||||
req.timestamp = (uint32_t)kgsl_ring->bo->hostptr;
|
||||
}
|
||||
|
||||
do {
|
||||
|
|
@ -174,13 +173,12 @@ static void kgsl_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
|
|||
kgsl_pipe_add_submit(to_kgsl_pipe(ring->pipe), kgsl_bo);
|
||||
}
|
||||
|
||||
static uint32_t kgsl_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
|
||||
struct fd_ringbuffer *target, uint32_t cmd_idx)
|
||||
static void kgsl_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
|
||||
struct fd_ringmarker *target, struct fd_ringmarker *end)
|
||||
{
|
||||
struct kgsl_ringbuffer *target_ring = to_kgsl_ringbuffer(target);
|
||||
assert(cmd_idx == 0);
|
||||
(*ring->cur++) = target_ring->bo->gpuaddr;
|
||||
return offset_bytes(target->cur, target->start);
|
||||
struct kgsl_ringbuffer *target_ring = to_kgsl_ringbuffer(target->ring);
|
||||
(*ring->cur++) = target_ring->bo->gpuaddr +
|
||||
(uint8_t *)target->cur - (uint8_t *)target->ring->start;
|
||||
}
|
||||
|
||||
static void kgsl_ringbuffer_destroy(struct fd_ringbuffer *ring)
|
||||
|
|
@ -193,7 +191,7 @@ static void kgsl_ringbuffer_destroy(struct fd_ringbuffer *ring)
|
|||
free(kgsl_ring);
|
||||
}
|
||||
|
||||
static const struct fd_ringbuffer_funcs funcs = {
|
||||
static struct fd_ringbuffer_funcs funcs = {
|
||||
.hostptr = kgsl_ringbuffer_hostptr,
|
||||
.flush = kgsl_ringbuffer_flush,
|
||||
.emit_reloc = kgsl_ringbuffer_emit_reloc,
|
||||
|
|
@ -202,13 +200,11 @@ static const struct fd_ringbuffer_funcs funcs = {
|
|||
};
|
||||
|
||||
drm_private struct fd_ringbuffer * kgsl_ringbuffer_new(struct fd_pipe *pipe,
|
||||
uint32_t size, enum fd_ringbuffer_flags flags)
|
||||
uint32_t size)
|
||||
{
|
||||
struct kgsl_ringbuffer *kgsl_ring;
|
||||
struct fd_ringbuffer *ring = NULL;
|
||||
|
||||
assert(!flags);
|
||||
|
||||
kgsl_ring = calloc(1, sizeof(*kgsl_ring));
|
||||
if (!kgsl_ring) {
|
||||
ERROR_MSG("allocation failed");
|
||||
|
|
@ -216,10 +212,7 @@ drm_private struct fd_ringbuffer * kgsl_ringbuffer_new(struct fd_pipe *pipe,
|
|||
}
|
||||
|
||||
ring = &kgsl_ring->base;
|
||||
atomic_set(&ring->refcnt, 1);
|
||||
|
||||
ring->funcs = &funcs;
|
||||
ring->size = size;
|
||||
|
||||
kgsl_ring->bo = kgsl_rb_bo_new(to_kgsl_pipe(pipe), size);
|
||||
if (!kgsl_ring->bo) {
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@
|
|||
#define KGSL_FLAGS_SOFT_RESET 0x00000100
|
||||
#define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
|
||||
|
||||
/* Clock flags to show which clocks should be controlled by a given platform */
|
||||
/* Clock flags to show which clocks should be controled by a given platform */
|
||||
#define KGSL_CLK_SRC 0x00000001
|
||||
#define KGSL_CLK_CORE 0x00000002
|
||||
#define KGSL_CLK_IFACE 0x00000004
|
||||
|
|
@ -295,7 +295,7 @@ struct kgsl_cmdstream_freememontimestamp {
|
|||
|
||||
/* Previous versions of this header had incorrectly defined
|
||||
IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
|
||||
of a write only ioctl. To ensure binary compatibility, the following
|
||||
of a write only ioctl. To ensure binary compatability, the following
|
||||
#define will be used to intercept the incorrect ioctl
|
||||
*/
|
||||
|
||||
|
|
|
|||
|
|
@ -1,80 +0,0 @@
|
|||
# Copyright © 2017-2018 Intel Corporation
|
||||
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
files_freedreno = files(
|
||||
'freedreno_device.c',
|
||||
'freedreno_pipe.c',
|
||||
'freedreno_ringbuffer.c',
|
||||
'freedreno_bo.c',
|
||||
'freedreno_bo_cache.c',
|
||||
'msm/msm_bo.c',
|
||||
'msm/msm_device.c',
|
||||
'msm/msm_pipe.c',
|
||||
'msm/msm_ringbuffer.c',
|
||||
)
|
||||
|
||||
if with_freedreno_kgsl
|
||||
files_freedreno += files(
|
||||
'kgsl/kgsl_bo.c',
|
||||
'kgsl/kgsl_device.c',
|
||||
'kgsl/kgsl_pipe.c',
|
||||
'kgsl/kgsl_ringbuffer.c',
|
||||
)
|
||||
endif
|
||||
|
||||
libdrm_freedreno = library(
|
||||
'drm_freedreno',
|
||||
[files_freedreno, config_file],
|
||||
c_args : libdrm_c_args,
|
||||
include_directories : [inc_root, inc_drm],
|
||||
dependencies : [dep_valgrind, dep_threads, dep_rt, dep_atomic_ops],
|
||||
link_with : libdrm,
|
||||
version : '1.@0@.0'.format(patch_ver),
|
||||
install : true,
|
||||
)
|
||||
|
||||
ext_libdrm_freedreno = declare_dependency(
|
||||
link_with : [libdrm, libdrm_freedreno],
|
||||
include_directories : [inc_drm, include_directories('.')],
|
||||
)
|
||||
|
||||
meson.override_dependency('libdrm_freedreno', ext_libdrm_freedreno)
|
||||
|
||||
install_headers(
|
||||
'freedreno_drmif.h', 'freedreno_ringbuffer.h',
|
||||
subdir : 'freedreno'
|
||||
)
|
||||
|
||||
pkg.generate(
|
||||
libdrm_freedreno,
|
||||
name : 'libdrm_freedreno',
|
||||
subdirs : ['.', 'libdrm', 'freedreno'],
|
||||
description : 'Userspace interface to freedreno kernel DRM services',
|
||||
)
|
||||
|
||||
test(
|
||||
'freedreno-symbols-check',
|
||||
symbols_check,
|
||||
args : [
|
||||
'--lib', libdrm_freedreno,
|
||||
'--symbols-file', files('freedreno-symbols.txt'),
|
||||
'--nm', prog_nm.full_path(),
|
||||
],
|
||||
)
|
||||
|
|
@ -26,6 +26,10 @@
|
|||
* Rob Clark <robclark@freedesktop.org>
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
# include <config.h>
|
||||
#endif
|
||||
|
||||
#include "msm_priv.h"
|
||||
|
||||
static int bo_allocate(struct msm_bo *msm_bo)
|
||||
|
|
@ -85,37 +89,6 @@ static void msm_bo_cpu_fini(struct fd_bo *bo)
|
|||
drmCommandWrite(bo->dev->fd, DRM_MSM_GEM_CPU_FINI, &req, sizeof(req));
|
||||
}
|
||||
|
||||
static int msm_bo_madvise(struct fd_bo *bo, int willneed)
|
||||
{
|
||||
struct drm_msm_gem_madvise req = {
|
||||
.handle = bo->handle,
|
||||
.madv = willneed ? MSM_MADV_WILLNEED : MSM_MADV_DONTNEED,
|
||||
};
|
||||
int ret;
|
||||
|
||||
/* older kernels do not support this: */
|
||||
if (bo->dev->version < FD_VERSION_MADVISE)
|
||||
return willneed;
|
||||
|
||||
ret = drmCommandWriteRead(bo->dev->fd, DRM_MSM_GEM_MADVISE, &req, sizeof(req));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return req.retained;
|
||||
}
|
||||
|
||||
static uint64_t msm_bo_iova(struct fd_bo *bo)
|
||||
{
|
||||
struct drm_msm_gem_info req = {
|
||||
.handle = bo->handle,
|
||||
.flags = MSM_INFO_IOVA,
|
||||
};
|
||||
|
||||
drmCommandWriteRead(bo->dev->fd, DRM_MSM_GEM_INFO, &req, sizeof(req));
|
||||
|
||||
return req.offset;
|
||||
}
|
||||
|
||||
static void msm_bo_destroy(struct fd_bo *bo)
|
||||
{
|
||||
struct msm_bo *msm_bo = to_msm_bo(bo);
|
||||
|
|
@ -123,12 +96,10 @@ static void msm_bo_destroy(struct fd_bo *bo)
|
|||
|
||||
}
|
||||
|
||||
static const struct fd_bo_funcs funcs = {
|
||||
static struct fd_bo_funcs funcs = {
|
||||
.offset = msm_bo_offset,
|
||||
.cpu_prep = msm_bo_cpu_prep,
|
||||
.cpu_fini = msm_bo_cpu_fini,
|
||||
.madvise = msm_bo_madvise,
|
||||
.iova = msm_bo_iova,
|
||||
.destroy = msm_bo_destroy,
|
||||
};
|
||||
|
||||
|
|
@ -165,6 +136,7 @@ drm_private struct fd_bo * msm_bo_from_handle(struct fd_device *dev,
|
|||
|
||||
bo = &msm_bo->base;
|
||||
bo->funcs = &funcs;
|
||||
bo->fd = -1;
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,6 +26,10 @@
|
|||
* Rob Clark <robclark@freedesktop.org>
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
# include <config.h>
|
||||
#endif
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
|
@ -38,7 +42,7 @@ static void msm_device_destroy(struct fd_device *dev)
|
|||
free(msm_dev);
|
||||
}
|
||||
|
||||
static const struct fd_device_funcs funcs = {
|
||||
static struct fd_device_funcs funcs = {
|
||||
.bo_new_handle = msm_bo_new_handle,
|
||||
.bo_from_handle = msm_bo_from_handle,
|
||||
.pipe_new = msm_pipe_new,
|
||||
|
|
@ -57,7 +61,5 @@ drm_private struct fd_device * msm_device_new(int fd)
|
|||
dev = &msm_dev->base;
|
||||
dev->funcs = &funcs;
|
||||
|
||||
dev->bo_size = sizeof(struct msm_bo);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,15 +25,12 @@
|
|||
#ifndef __MSM_DRM_H__
|
||||
#define __MSM_DRM_H__
|
||||
|
||||
#include <stddef.h>
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Please note that modifications to all structs defined here are
|
||||
* subject to backwards-compatibility constraints:
|
||||
* 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
|
||||
* 1) Do not use pointers, use uint64_t instead for 32 bit / 64 bit
|
||||
* user/kernel compatibility
|
||||
* 2) Keep fields aligned to their size
|
||||
* 3) Because of how drm_ioctl() works, we can add new fields at
|
||||
|
|
@ -49,36 +46,23 @@ extern "C" {
|
|||
#define MSM_PIPE_2D1 0x02
|
||||
#define MSM_PIPE_3D0 0x10
|
||||
|
||||
/* The pipe-id just uses the lower bits, so can be OR'd with flags in
|
||||
* the upper 16 bits (which could be extended further, if needed, maybe
|
||||
* we extend/overload the pipe-id some day to deal with multiple rings,
|
||||
* but even then I don't think we need the full lower 16 bits).
|
||||
*/
|
||||
#define MSM_PIPE_ID_MASK 0xffff
|
||||
#define MSM_PIPE_ID(x) ((x) & MSM_PIPE_ID_MASK)
|
||||
#define MSM_PIPE_FLAGS(x) ((x) & ~MSM_PIPE_ID_MASK)
|
||||
|
||||
/* timeouts are specified in clock-monotonic absolute times (to simplify
|
||||
* restarting interrupted ioctls). The following struct is logically the
|
||||
* same as 'struct timespec' but 32/64b ABI safe.
|
||||
*/
|
||||
struct drm_msm_timespec {
|
||||
__s64 tv_sec; /* seconds */
|
||||
__s64 tv_nsec; /* nanoseconds */
|
||||
int64_t tv_sec; /* seconds */
|
||||
int64_t tv_nsec; /* nanoseconds */
|
||||
};
|
||||
|
||||
#define MSM_PARAM_GPU_ID 0x01
|
||||
#define MSM_PARAM_GMEM_SIZE 0x02
|
||||
#define MSM_PARAM_CHIP_ID 0x03
|
||||
#define MSM_PARAM_MAX_FREQ 0x04
|
||||
#define MSM_PARAM_TIMESTAMP 0x05
|
||||
#define MSM_PARAM_GMEM_BASE 0x06
|
||||
#define MSM_PARAM_NR_RINGS 0x07
|
||||
|
||||
struct drm_msm_param {
|
||||
__u32 pipe; /* in, MSM_PIPE_x */
|
||||
__u32 param; /* in, MSM_PARAM_x */
|
||||
__u64 value; /* out (get_param) or in (set_param) */
|
||||
uint32_t pipe; /* in, MSM_PIPE_x */
|
||||
uint32_t param; /* in, MSM_PARAM_x */
|
||||
uint64_t value; /* out (get_param) or in (set_param) */
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
@ -100,19 +84,15 @@ struct drm_msm_param {
|
|||
MSM_BO_UNCACHED)
|
||||
|
||||
struct drm_msm_gem_new {
|
||||
__u64 size; /* in */
|
||||
__u32 flags; /* in, mask of MSM_BO_x */
|
||||
__u32 handle; /* out */
|
||||
uint64_t size; /* in */
|
||||
uint32_t flags; /* in, mask of MSM_BO_x */
|
||||
uint32_t handle; /* out */
|
||||
};
|
||||
|
||||
#define MSM_INFO_IOVA 0x01
|
||||
|
||||
#define MSM_INFO_FLAGS (MSM_INFO_IOVA)
|
||||
|
||||
struct drm_msm_gem_info {
|
||||
__u32 handle; /* in */
|
||||
__u32 flags; /* in - combination of MSM_INFO_* flags */
|
||||
__u64 offset; /* out, mmap() offset or iova */
|
||||
uint32_t handle; /* in */
|
||||
uint32_t pad;
|
||||
uint64_t offset; /* out, offset to pass to mmap() */
|
||||
};
|
||||
|
||||
#define MSM_PREP_READ 0x01
|
||||
|
|
@ -122,13 +102,13 @@ struct drm_msm_gem_info {
|
|||
#define MSM_PREP_FLAGS (MSM_PREP_READ | MSM_PREP_WRITE | MSM_PREP_NOSYNC)
|
||||
|
||||
struct drm_msm_gem_cpu_prep {
|
||||
__u32 handle; /* in */
|
||||
__u32 op; /* in, mask of MSM_PREP_x */
|
||||
uint32_t handle; /* in */
|
||||
uint32_t op; /* in, mask of MSM_PREP_x */
|
||||
struct drm_msm_timespec timeout; /* in */
|
||||
};
|
||||
|
||||
struct drm_msm_gem_cpu_fini {
|
||||
__u32 handle; /* in */
|
||||
uint32_t handle; /* in */
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
@ -147,11 +127,11 @@ struct drm_msm_gem_cpu_fini {
|
|||
* otherwise EINVAL.
|
||||
*/
|
||||
struct drm_msm_gem_submit_reloc {
|
||||
__u32 submit_offset; /* in, offset from submit_bo */
|
||||
__u32 or; /* in, value OR'd with result */
|
||||
__s32 shift; /* in, amount of left shift (can be negative) */
|
||||
__u32 reloc_idx; /* in, index of reloc_bo buffer */
|
||||
__u64 reloc_offset; /* in, offset from start of reloc_bo */
|
||||
uint32_t submit_offset; /* in, offset from submit_bo */
|
||||
uint32_t or; /* in, value OR'd with result */
|
||||
int32_t shift; /* in, amount of left shift (can be negative) */
|
||||
uint32_t reloc_idx; /* in, index of reloc_bo buffer */
|
||||
uint64_t reloc_offset; /* in, offset from start of reloc_bo */
|
||||
};
|
||||
|
||||
/* submit-types:
|
||||
|
|
@ -166,13 +146,13 @@ struct drm_msm_gem_submit_reloc {
|
|||
#define MSM_SUBMIT_CMD_IB_TARGET_BUF 0x0002
|
||||
#define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003
|
||||
struct drm_msm_gem_submit_cmd {
|
||||
__u32 type; /* in, one of MSM_SUBMIT_CMD_x */
|
||||
__u32 submit_idx; /* in, index of submit_bo cmdstream buffer */
|
||||
__u32 submit_offset; /* in, offset into submit_bo */
|
||||
__u32 size; /* in, cmdstream size */
|
||||
__u32 pad;
|
||||
__u32 nr_relocs; /* in, number of submit_reloc's */
|
||||
__u64 relocs; /* in, ptr to array of submit_reloc's */
|
||||
uint32_t type; /* in, one of MSM_SUBMIT_CMD_x */
|
||||
uint32_t submit_idx; /* in, index of submit_bo cmdstream buffer */
|
||||
uint32_t submit_offset; /* in, offset into submit_bo */
|
||||
uint32_t size; /* in, cmdstream size */
|
||||
uint32_t pad;
|
||||
uint32_t nr_relocs; /* in, number of submit_reloc's */
|
||||
uint64_t __user relocs; /* in, ptr to array of submit_reloc's */
|
||||
};
|
||||
|
||||
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
|
||||
|
|
@ -192,36 +172,22 @@ struct drm_msm_gem_submit_cmd {
|
|||
#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
|
||||
|
||||
struct drm_msm_gem_submit_bo {
|
||||
__u32 flags; /* in, mask of MSM_SUBMIT_BO_x */
|
||||
__u32 handle; /* in, GEM handle */
|
||||
__u64 presumed; /* in/out, presumed buffer address */
|
||||
uint32_t flags; /* in, mask of MSM_SUBMIT_BO_x */
|
||||
uint32_t handle; /* in, GEM handle */
|
||||
uint64_t presumed; /* in/out, presumed buffer address */
|
||||
};
|
||||
|
||||
/* Valid submit ioctl flags: */
|
||||
#define MSM_SUBMIT_NO_IMPLICIT 0x80000000 /* disable implicit sync */
|
||||
#define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */
|
||||
#define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */
|
||||
#define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */
|
||||
#define MSM_SUBMIT_FLAGS ( \
|
||||
MSM_SUBMIT_NO_IMPLICIT | \
|
||||
MSM_SUBMIT_FENCE_FD_IN | \
|
||||
MSM_SUBMIT_FENCE_FD_OUT | \
|
||||
MSM_SUBMIT_SUDO | \
|
||||
0)
|
||||
|
||||
/* Each cmdstream submit consists of a table of buffers involved, and
|
||||
* one or more cmdstream buffers. This allows for conditional execution
|
||||
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
|
||||
*/
|
||||
struct drm_msm_gem_submit {
|
||||
__u32 flags; /* MSM_PIPE_x | MSM_SUBMIT_x */
|
||||
__u32 fence; /* out */
|
||||
__u32 nr_bos; /* in, number of submit_bo's */
|
||||
__u32 nr_cmds; /* in, number of submit_cmd's */
|
||||
__u64 bos; /* in, ptr to array of submit_bo's */
|
||||
__u64 cmds; /* in, ptr to array of submit_cmd's */
|
||||
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
|
||||
__u32 queueid; /* in, submitqueue id */
|
||||
uint32_t pipe; /* in, MSM_PIPE_x */
|
||||
uint32_t fence; /* out */
|
||||
uint32_t nr_bos; /* in, number of submit_bo's */
|
||||
uint32_t nr_cmds; /* in, number of submit_cmd's */
|
||||
uint64_t __user bos; /* in, ptr to array of submit_bo's */
|
||||
uint64_t __user cmds; /* in, ptr to array of submit_cmd's */
|
||||
};
|
||||
|
||||
/* The normal way to synchronize with the GPU is just to CPU_PREP on
|
||||
|
|
@ -232,45 +198,9 @@ struct drm_msm_gem_submit {
|
|||
* APIs without requiring a dummy bo to synchronize on.
|
||||
*/
|
||||
struct drm_msm_wait_fence {
|
||||
__u32 fence; /* in */
|
||||
__u32 pad;
|
||||
uint32_t fence; /* in */
|
||||
uint32_t pad;
|
||||
struct drm_msm_timespec timeout; /* in */
|
||||
__u32 queueid; /* in, submitqueue id */
|
||||
};
|
||||
|
||||
/* madvise provides a way to tell the kernel in case a buffers contents
|
||||
* can be discarded under memory pressure, which is useful for userspace
|
||||
* bo cache where we want to optimistically hold on to buffer allocate
|
||||
* and potential mmap, but allow the pages to be discarded under memory
|
||||
* pressure.
|
||||
*
|
||||
* Typical usage would involve madvise(DONTNEED) when buffer enters BO
|
||||
* cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
|
||||
* In the WILLNEED case, 'retained' indicates to userspace whether the
|
||||
* backing pages still exist.
|
||||
*/
|
||||
#define MSM_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */
|
||||
#define MSM_MADV_DONTNEED 1 /* backing pages not needed */
|
||||
#define __MSM_MADV_PURGED 2 /* internal state */
|
||||
|
||||
struct drm_msm_gem_madvise {
|
||||
__u32 handle; /* in, GEM handle */
|
||||
__u32 madv; /* in, MSM_MADV_x */
|
||||
__u32 retained; /* out, whether backing store still exists */
|
||||
};
|
||||
|
||||
/*
|
||||
* Draw queues allow the user to set specific submission parameter. Command
|
||||
* submissions specify a specific submitqueue to use. ID 0 is reserved for
|
||||
* backwards compatibility as a "default" submitqueue
|
||||
*/
|
||||
|
||||
#define MSM_SUBMITQUEUE_FLAGS (0)
|
||||
|
||||
struct drm_msm_submitqueue {
|
||||
__u32 flags; /* in, MSM_SUBMITQUEUE_x */
|
||||
__u32 prio; /* in, Priority level */
|
||||
__u32 id; /* out, identifier */
|
||||
};
|
||||
|
||||
#define DRM_MSM_GET_PARAM 0x00
|
||||
|
|
@ -283,12 +213,7 @@ struct drm_msm_submitqueue {
|
|||
#define DRM_MSM_GEM_CPU_FINI 0x05
|
||||
#define DRM_MSM_GEM_SUBMIT 0x06
|
||||
#define DRM_MSM_WAIT_FENCE 0x07
|
||||
#define DRM_MSM_GEM_MADVISE 0x08
|
||||
/* placeholder:
|
||||
#define DRM_MSM_GEM_SVM_NEW 0x09
|
||||
*/
|
||||
#define DRM_MSM_SUBMITQUEUE_NEW 0x0A
|
||||
#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
|
||||
#define DRM_MSM_NUM_IOCTLS 0x08
|
||||
|
||||
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
|
||||
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
|
||||
|
|
@ -297,12 +222,5 @@ struct drm_msm_submitqueue {
|
|||
#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini)
|
||||
#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
|
||||
#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
|
||||
#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
|
||||
#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
|
||||
#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __MSM_DRM_H__ */
|
||||
|
|
@ -26,27 +26,12 @@
|
|||
* Rob Clark <robclark@freedesktop.org>
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
# include <config.h>
|
||||
#endif
|
||||
|
||||
#include "msm_priv.h"
|
||||
|
||||
static int query_param(struct fd_pipe *pipe, uint32_t param,
|
||||
uint64_t *value)
|
||||
{
|
||||
struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
|
||||
struct drm_msm_param req = {
|
||||
.pipe = msm_pipe->pipe,
|
||||
.param = param,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = drmCommandWriteRead(pipe->dev->fd, DRM_MSM_GET_PARAM,
|
||||
&req, sizeof(req));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*value = req.value;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int msm_pipe_get_param(struct fd_pipe *pipe,
|
||||
enum fd_param_id param, uint64_t *value)
|
||||
|
|
@ -63,12 +48,6 @@ static int msm_pipe_get_param(struct fd_pipe *pipe,
|
|||
case FD_CHIP_ID:
|
||||
*value = msm_pipe->chip_id;
|
||||
return 0;
|
||||
case FD_MAX_FREQ:
|
||||
return query_param(pipe, MSM_PARAM_MAX_FREQ, value);
|
||||
case FD_TIMESTAMP:
|
||||
return query_param(pipe, MSM_PARAM_TIMESTAMP, value);
|
||||
case FD_NR_RINGS:
|
||||
return query_param(pipe, MSM_PARAM_NR_RINGS, value);
|
||||
default:
|
||||
ERROR_MSG("invalid param id: %d", param);
|
||||
return -1;
|
||||
|
|
@ -81,7 +60,6 @@ static int msm_pipe_wait(struct fd_pipe *pipe, uint32_t timestamp,
|
|||
struct fd_device *dev = pipe->dev;
|
||||
struct drm_msm_wait_fence req = {
|
||||
.fence = timestamp,
|
||||
.queueid = to_msm_pipe(pipe)->queue_id,
|
||||
};
|
||||
int ret;
|
||||
|
||||
|
|
@ -96,77 +74,38 @@ static int msm_pipe_wait(struct fd_pipe *pipe, uint32_t timestamp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int open_submitqueue(struct fd_pipe *pipe, uint32_t prio)
|
||||
{
|
||||
struct drm_msm_submitqueue req = {
|
||||
.flags = 0,
|
||||
.prio = prio,
|
||||
};
|
||||
uint64_t nr_rings = 1;
|
||||
int ret;
|
||||
|
||||
if (fd_device_version(pipe->dev) < FD_VERSION_SUBMIT_QUEUES) {
|
||||
to_msm_pipe(pipe)->queue_id = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
msm_pipe_get_param(pipe, FD_NR_RINGS, &nr_rings);
|
||||
|
||||
req.prio = MIN2(req.prio, MAX2(nr_rings, 1) - 1);
|
||||
|
||||
ret = drmCommandWriteRead(pipe->dev->fd, DRM_MSM_SUBMITQUEUE_NEW,
|
||||
&req, sizeof(req));
|
||||
if (ret) {
|
||||
ERROR_MSG("could not create submitqueue! %d (%s)", ret, strerror(errno));
|
||||
return ret;
|
||||
}
|
||||
|
||||
to_msm_pipe(pipe)->queue_id = req.id;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void close_submitqueue(struct fd_pipe *pipe, uint32_t queue_id)
|
||||
{
|
||||
if (fd_device_version(pipe->dev) < FD_VERSION_SUBMIT_QUEUES)
|
||||
return;
|
||||
|
||||
drmCommandWrite(pipe->dev->fd, DRM_MSM_SUBMITQUEUE_CLOSE,
|
||||
&queue_id, sizeof(queue_id));
|
||||
}
|
||||
|
||||
static void msm_pipe_destroy(struct fd_pipe *pipe)
|
||||
{
|
||||
struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
|
||||
close_submitqueue(pipe, msm_pipe->queue_id);
|
||||
|
||||
if (msm_pipe->suballoc_ring) {
|
||||
fd_ringbuffer_del(msm_pipe->suballoc_ring);
|
||||
msm_pipe->suballoc_ring = NULL;
|
||||
}
|
||||
|
||||
free(msm_pipe);
|
||||
}
|
||||
|
||||
static const struct fd_pipe_funcs funcs = {
|
||||
static struct fd_pipe_funcs funcs = {
|
||||
.ringbuffer_new = msm_ringbuffer_new,
|
||||
.get_param = msm_pipe_get_param,
|
||||
.wait = msm_pipe_wait,
|
||||
.destroy = msm_pipe_destroy,
|
||||
};
|
||||
|
||||
static uint64_t get_param(struct fd_pipe *pipe, uint32_t param)
|
||||
static uint64_t get_param(struct fd_device *dev, uint32_t pipe, uint32_t param)
|
||||
{
|
||||
uint64_t value;
|
||||
int ret = query_param(pipe, param, &value);
|
||||
struct drm_msm_param req = {
|
||||
.pipe = pipe,
|
||||
.param = param,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = drmCommandWriteRead(dev->fd, DRM_MSM_GET_PARAM, &req, sizeof(req));
|
||||
if (ret) {
|
||||
ERROR_MSG("get-param failed! %d (%s)", ret, strerror(errno));
|
||||
return 0;
|
||||
}
|
||||
return value;
|
||||
|
||||
return req.value;
|
||||
}
|
||||
|
||||
drm_private struct fd_pipe * msm_pipe_new(struct fd_device *dev,
|
||||
enum fd_pipe_id id, uint32_t prio)
|
||||
enum fd_pipe_id id)
|
||||
{
|
||||
static const uint32_t pipe_id[] = {
|
||||
[FD_PIPE_3D] = MSM_PIPE_3D0,
|
||||
|
|
@ -184,14 +123,10 @@ drm_private struct fd_pipe * msm_pipe_new(struct fd_device *dev,
|
|||
pipe = &msm_pipe->base;
|
||||
pipe->funcs = &funcs;
|
||||
|
||||
/* initialize before get_param(): */
|
||||
pipe->dev = dev;
|
||||
msm_pipe->pipe = pipe_id[id];
|
||||
|
||||
/* these params should be supported since the first version of drm/msm: */
|
||||
msm_pipe->gpu_id = get_param(pipe, MSM_PARAM_GPU_ID);
|
||||
msm_pipe->gmem = get_param(pipe, MSM_PARAM_GMEM_SIZE);
|
||||
msm_pipe->chip_id = get_param(pipe, MSM_PARAM_CHIP_ID);
|
||||
msm_pipe->gpu_id = get_param(dev, pipe_id[id], MSM_PARAM_GPU_ID);
|
||||
msm_pipe->gmem = get_param(dev, pipe_id[id], MSM_PARAM_GMEM_SIZE);
|
||||
msm_pipe->chip_id = get_param(dev, pipe_id[id], MSM_PARAM_CHIP_ID);
|
||||
|
||||
if (! msm_pipe->gpu_id)
|
||||
goto fail;
|
||||
|
|
@ -201,9 +136,6 @@ drm_private struct fd_pipe * msm_pipe_new(struct fd_device *dev,
|
|||
INFO_MSG(" Chip-id: 0x%08x", msm_pipe->chip_id);
|
||||
INFO_MSG(" GMEM size: 0x%08x", msm_pipe->gmem);
|
||||
|
||||
if (open_submitqueue(pipe, prio))
|
||||
goto fail;
|
||||
|
||||
return pipe;
|
||||
fail:
|
||||
if (pipe)
|
||||
|
|
|
|||
|
|
@ -39,8 +39,6 @@
|
|||
|
||||
struct msm_device {
|
||||
struct fd_device base;
|
||||
struct fd_bo_cache ring_cache;
|
||||
unsigned ring_cnt;
|
||||
};
|
||||
|
||||
static inline struct msm_device * to_msm_device(struct fd_device *x)
|
||||
|
|
@ -56,18 +54,6 @@ struct msm_pipe {
|
|||
uint32_t gpu_id;
|
||||
uint32_t gmem;
|
||||
uint32_t chip_id;
|
||||
uint32_t queue_id;
|
||||
|
||||
/* Allow for sub-allocation of stateobj ring buffers (ie. sharing
|
||||
* the same underlying bo)..
|
||||
*
|
||||
* This takes advantage of each context having it's own fd_pipe,
|
||||
* so we don't have to worry about access from multiple threads.
|
||||
*
|
||||
* We also rely on previous stateobj having been fully constructed
|
||||
* so we can reclaim extra space at it's end.
|
||||
*/
|
||||
struct fd_ringbuffer *suballoc_ring;
|
||||
};
|
||||
|
||||
static inline struct msm_pipe * to_msm_pipe(struct fd_pipe *x)
|
||||
|
|
@ -76,20 +62,27 @@ static inline struct msm_pipe * to_msm_pipe(struct fd_pipe *x)
|
|||
}
|
||||
|
||||
drm_private struct fd_pipe * msm_pipe_new(struct fd_device *dev,
|
||||
enum fd_pipe_id id, uint32_t prio);
|
||||
enum fd_pipe_id id);
|
||||
|
||||
drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
|
||||
uint32_t size, enum fd_ringbuffer_flags flags);
|
||||
uint32_t size);
|
||||
|
||||
struct msm_bo {
|
||||
struct fd_bo base;
|
||||
uint64_t offset;
|
||||
uint64_t presumed;
|
||||
/* to avoid excess hashtable lookups, cache the ring this bo was
|
||||
* last emitted on (since that will probably also be the next ring
|
||||
* it is emitted on)
|
||||
/* in the common case, a bo won't be referenced by more than a single
|
||||
* (parent) ring[*]. So to avoid looping over all the bo's in the
|
||||
* reloc table to find the idx of a bo that might already be in the
|
||||
* table, we cache the idx in the bo. But in order to detect the
|
||||
* slow-path where bo is ref'd in multiple rb's, we also must track
|
||||
* the current_ring for which the idx is valid. See bo2idx().
|
||||
*
|
||||
* [*] in case multiple ringbuffers, ie. one toplevel and other rb(s)
|
||||
* used for IB target(s), the toplevel rb is the parent which is
|
||||
* tracking bo's for the submit
|
||||
*/
|
||||
unsigned current_ring_seqno;
|
||||
struct fd_ringbuffer *current_ring;
|
||||
uint32_t idx;
|
||||
};
|
||||
|
||||
|
|
@ -112,30 +105,4 @@ static inline void get_abs_timeout(struct drm_msm_timespec *tv, uint64_t ns)
|
|||
tv->tv_nsec = t.tv_nsec + ns - (s * 1000000000);
|
||||
}
|
||||
|
||||
/*
|
||||
* Stupid/simple growable array implementation:
|
||||
*/
|
||||
|
||||
static inline void *
|
||||
grow(void *ptr, uint32_t nr, uint32_t *max, uint32_t sz)
|
||||
{
|
||||
if ((nr + 1) > *max) {
|
||||
if ((*max * 2) < (nr + 1))
|
||||
*max = nr + 5;
|
||||
else
|
||||
*max = *max * 2;
|
||||
ptr = realloc(ptr, *max * sz);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
#define DECLARE_ARRAY(type, name) \
|
||||
unsigned nr_ ## name, max_ ## name; \
|
||||
type * name;
|
||||
|
||||
#define APPEND(x, name) ({ \
|
||||
(x)->name = grow((x)->name, (x)->nr_ ## name, &(x)->max_ ## name, sizeof((x)->name[0])); \
|
||||
(x)->nr_ ## name ++; \
|
||||
})
|
||||
|
||||
#endif /* MSM_PRIV_H_ */
|
||||
|
|
|
|||
|
|
@ -26,178 +26,68 @@
|
|||
* Rob Clark <robclark@freedesktop.org>
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
# include <config.h>
|
||||
#endif
|
||||
|
||||
#include <assert.h>
|
||||
#include <inttypes.h>
|
||||
|
||||
#include "xf86atomic.h"
|
||||
#include "freedreno_ringbuffer.h"
|
||||
#include "msm_priv.h"
|
||||
|
||||
/* represents a single cmd buffer in the submit ioctl. Each cmd buffer has
|
||||
* a backing bo, and a reloc table.
|
||||
*/
|
||||
struct msm_cmd {
|
||||
struct list_head list;
|
||||
|
||||
struct fd_ringbuffer *ring;
|
||||
struct fd_bo *ring_bo;
|
||||
|
||||
/* reloc's table: */
|
||||
DECLARE_ARRAY(struct drm_msm_gem_submit_reloc, relocs);
|
||||
|
||||
uint32_t size;
|
||||
|
||||
/* has cmd already been added to parent rb's submit.cmds table? */
|
||||
int is_appended_to_submit;
|
||||
};
|
||||
|
||||
struct msm_ringbuffer {
|
||||
struct fd_ringbuffer base;
|
||||
struct fd_bo *ring_bo;
|
||||
|
||||
/* submit ioctl related tables:
|
||||
* Note that bos and cmds are tracked by the parent ringbuffer, since
|
||||
* that is global to the submit ioctl call. The reloc's table is tracked
|
||||
* per cmd-buffer.
|
||||
*/
|
||||
/* submit ioctl related tables: */
|
||||
struct {
|
||||
/* bo's table: */
|
||||
DECLARE_ARRAY(struct drm_msm_gem_submit_bo, bos);
|
||||
struct drm_msm_gem_submit_bo *bos;
|
||||
uint32_t nr_bos, max_bos;
|
||||
|
||||
/* cmd's table: */
|
||||
DECLARE_ARRAY(struct drm_msm_gem_submit_cmd, cmds);
|
||||
struct drm_msm_gem_submit_cmd *cmds;
|
||||
uint32_t nr_cmds, max_cmds;
|
||||
|
||||
/* reloc's table: */
|
||||
struct drm_msm_gem_submit_reloc *relocs;
|
||||
uint32_t nr_relocs, max_relocs;
|
||||
} submit;
|
||||
|
||||
/* should have matching entries in submit.bos: */
|
||||
/* Note, only in parent ringbuffer */
|
||||
DECLARE_ARRAY(struct fd_bo *, bos);
|
||||
struct fd_bo **bos;
|
||||
uint32_t nr_bos, max_bos;
|
||||
|
||||
/* should have matching entries in submit.cmds: */
|
||||
DECLARE_ARRAY(struct msm_cmd *, cmds);
|
||||
|
||||
/* List of physical cmdstream buffers (msm_cmd) associated with this
|
||||
* logical fd_ringbuffer.
|
||||
*
|
||||
* Note that this is different from msm_ringbuffer::cmds (which
|
||||
* shadows msm_ringbuffer::submit::cmds for tracking submit ioctl
|
||||
* related stuff, and *only* is tracked in the parent ringbuffer.
|
||||
* And only has "completed" cmd buffers (ie. we already know the
|
||||
* size) added via get_cmd().
|
||||
*/
|
||||
struct list_head cmd_list;
|
||||
|
||||
int is_growable;
|
||||
unsigned cmd_count;
|
||||
|
||||
unsigned offset; /* for sub-allocated stateobj rb's */
|
||||
|
||||
unsigned seqno;
|
||||
|
||||
/* maps fd_bo to idx: */
|
||||
void *bo_table;
|
||||
|
||||
/* maps msm_cmd to drm_msm_gem_submit_cmd in parent rb. Each rb has a
|
||||
* list of msm_cmd's which correspond to each chunk of cmdstream in
|
||||
* a 'growable' rb. For each of those we need to create one
|
||||
* drm_msm_gem_submit_cmd in the parent rb which collects the state
|
||||
* for the submit ioctl. Because we can have multiple IB's to the same
|
||||
* target rb (for example, or same stateobj emit multiple times), and
|
||||
* because in theory we can have multiple different rb's that have a
|
||||
* reference to a given target, we need a hashtable to track this per
|
||||
* rb.
|
||||
*/
|
||||
void *cmd_table;
|
||||
struct fd_ringbuffer **rings;
|
||||
uint32_t nr_rings, max_rings;
|
||||
};
|
||||
|
||||
static pthread_mutex_t idx_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
static void *grow(void *ptr, uint32_t nr, uint32_t *max, uint32_t sz)
|
||||
{
|
||||
if ((nr + 1) > *max) {
|
||||
if ((*max * 2) < (nr + 1))
|
||||
*max = nr + 5;
|
||||
else
|
||||
*max = *max * 2;
|
||||
ptr = realloc(ptr, *max * sz);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
#define APPEND(x, name) ({ \
|
||||
(x)->name = grow((x)->name, (x)->nr_ ## name, &(x)->max_ ## name, sizeof((x)->name[0])); \
|
||||
(x)->nr_ ## name ++; \
|
||||
})
|
||||
|
||||
static inline struct msm_ringbuffer * to_msm_ringbuffer(struct fd_ringbuffer *x)
|
||||
{
|
||||
return (struct msm_ringbuffer *)x;
|
||||
}
|
||||
|
||||
#define INIT_SIZE 0x1000
|
||||
|
||||
static pthread_mutex_t idx_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
static struct msm_cmd *current_cmd(struct fd_ringbuffer *ring)
|
||||
{
|
||||
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
||||
assert(!LIST_IS_EMPTY(&msm_ring->cmd_list));
|
||||
return LIST_LAST_ENTRY(&msm_ring->cmd_list, struct msm_cmd, list);
|
||||
}
|
||||
|
||||
static void ring_cmd_del(struct msm_cmd *cmd)
|
||||
{
|
||||
fd_bo_del(cmd->ring_bo);
|
||||
list_del(&cmd->list);
|
||||
to_msm_ringbuffer(cmd->ring)->cmd_count--;
|
||||
free(cmd->relocs);
|
||||
free(cmd);
|
||||
}
|
||||
|
||||
static struct msm_cmd * ring_cmd_new(struct fd_ringbuffer *ring, uint32_t size,
|
||||
enum fd_ringbuffer_flags flags)
|
||||
{
|
||||
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
||||
struct msm_cmd *cmd = calloc(1, sizeof(*cmd));
|
||||
|
||||
if (!cmd)
|
||||
return NULL;
|
||||
|
||||
cmd->ring = ring;
|
||||
|
||||
/* TODO separate suballoc buffer for small non-streaming state, using
|
||||
* smaller page-sized backing bo's.
|
||||
*/
|
||||
if (flags & FD_RINGBUFFER_STREAMING) {
|
||||
struct msm_pipe *msm_pipe = to_msm_pipe(ring->pipe);
|
||||
unsigned suballoc_offset = 0;
|
||||
struct fd_bo *suballoc_bo = NULL;
|
||||
|
||||
if (msm_pipe->suballoc_ring) {
|
||||
struct msm_ringbuffer *suballoc_ring = to_msm_ringbuffer(msm_pipe->suballoc_ring);
|
||||
|
||||
assert(msm_pipe->suballoc_ring->flags & FD_RINGBUFFER_OBJECT);
|
||||
assert(suballoc_ring->cmd_count == 1);
|
||||
|
||||
suballoc_bo = current_cmd(msm_pipe->suballoc_ring)->ring_bo;
|
||||
|
||||
suballoc_offset = fd_ringbuffer_size(msm_pipe->suballoc_ring) +
|
||||
suballoc_ring->offset;
|
||||
|
||||
suballoc_offset = ALIGN(suballoc_offset, 0x10);
|
||||
|
||||
if ((size + suballoc_offset) > suballoc_bo->size) {
|
||||
suballoc_bo = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (!suballoc_bo) {
|
||||
cmd->ring_bo = fd_bo_new_ring(ring->pipe->dev, 0x8000, 0);
|
||||
msm_ring->offset = 0;
|
||||
} else {
|
||||
cmd->ring_bo = fd_bo_ref(suballoc_bo);
|
||||
msm_ring->offset = suballoc_offset;
|
||||
}
|
||||
|
||||
if (msm_pipe->suballoc_ring)
|
||||
fd_ringbuffer_del(msm_pipe->suballoc_ring);
|
||||
|
||||
msm_pipe->suballoc_ring = fd_ringbuffer_ref(ring);
|
||||
} else {
|
||||
cmd->ring_bo = fd_bo_new_ring(ring->pipe->dev, size, 0);
|
||||
}
|
||||
if (!cmd->ring_bo)
|
||||
goto fail;
|
||||
|
||||
list_addtail(&cmd->list, &msm_ring->cmd_list);
|
||||
msm_ring->cmd_count++;
|
||||
|
||||
return cmd;
|
||||
|
||||
fail:
|
||||
ring_cmd_del(cmd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static uint32_t append_bo(struct fd_ringbuffer *ring, struct fd_bo *bo)
|
||||
{
|
||||
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
||||
|
|
@ -222,24 +112,21 @@ static uint32_t bo2idx(struct fd_ringbuffer *ring, struct fd_bo *bo, uint32_t fl
|
|||
struct msm_bo *msm_bo = to_msm_bo(bo);
|
||||
uint32_t idx;
|
||||
pthread_mutex_lock(&idx_lock);
|
||||
if (msm_bo->current_ring_seqno == msm_ring->seqno) {
|
||||
if (!msm_bo->current_ring) {
|
||||
idx = append_bo(ring, bo);
|
||||
msm_bo->current_ring = ring;
|
||||
msm_bo->idx = idx;
|
||||
} else if (msm_bo->current_ring == ring) {
|
||||
idx = msm_bo->idx;
|
||||
} else {
|
||||
void *val;
|
||||
|
||||
if (!msm_ring->bo_table)
|
||||
msm_ring->bo_table = drmHashCreate();
|
||||
|
||||
if (!drmHashLookup(msm_ring->bo_table, bo->handle, &val)) {
|
||||
/* found */
|
||||
idx = (uint32_t)(uintptr_t)val;
|
||||
} else {
|
||||
/* slow-path: */
|
||||
for (idx = 0; idx < msm_ring->nr_bos; idx++)
|
||||
if (msm_ring->bos[idx] == bo)
|
||||
break;
|
||||
if (idx == msm_ring->nr_bos) {
|
||||
/* not found */
|
||||
idx = append_bo(ring, bo);
|
||||
val = (void *)(uintptr_t)idx;
|
||||
drmHashInsert(msm_ring->bo_table, bo->handle, val);
|
||||
}
|
||||
msm_bo->current_ring_seqno = msm_ring->seqno;
|
||||
msm_bo->idx = idx;
|
||||
}
|
||||
pthread_mutex_unlock(&idx_lock);
|
||||
if (flags & FD_RELOC_READ)
|
||||
|
|
@ -249,82 +136,72 @@ static uint32_t bo2idx(struct fd_ringbuffer *ring, struct fd_bo *bo, uint32_t fl
|
|||
return idx;
|
||||
}
|
||||
|
||||
/* Ensure that submit has corresponding entry in cmds table for the
|
||||
* target cmdstream buffer:
|
||||
*
|
||||
* Returns TRUE if new cmd added (else FALSE if it was already in
|
||||
* the cmds table)
|
||||
*/
|
||||
static int get_cmd(struct fd_ringbuffer *ring, struct msm_cmd *target_cmd,
|
||||
static int check_cmd_bo(struct fd_ringbuffer *ring,
|
||||
struct drm_msm_gem_submit_cmd *cmd, struct fd_bo *bo)
|
||||
{
|
||||
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
||||
return msm_ring->submit.bos[cmd->submit_idx].handle == bo->handle;
|
||||
}
|
||||
|
||||
static uint32_t offset_bytes(void *end, void *start)
|
||||
{
|
||||
return ((char *)end) - ((char *)start);
|
||||
}
|
||||
|
||||
static struct drm_msm_gem_submit_cmd * get_cmd(struct fd_ringbuffer *ring,
|
||||
struct fd_ringbuffer *target_ring, struct fd_bo *target_bo,
|
||||
uint32_t submit_offset, uint32_t size, uint32_t type)
|
||||
{
|
||||
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
||||
struct drm_msm_gem_submit_cmd *cmd;
|
||||
struct drm_msm_gem_submit_cmd *cmd = NULL;
|
||||
uint32_t i;
|
||||
void *val;
|
||||
|
||||
if (!msm_ring->cmd_table)
|
||||
msm_ring->cmd_table = drmHashCreate();
|
||||
|
||||
/* figure out if we already have a cmd buf.. short-circuit hash
|
||||
* lookup if:
|
||||
* - target cmd has never been added to submit.cmds
|
||||
* - target cmd is not a streaming stateobj (which unlike longer
|
||||
* lived CSO stateobj, is not expected to be reused with multiple
|
||||
* submits)
|
||||
*/
|
||||
if (target_cmd->is_appended_to_submit &&
|
||||
!(target_cmd->ring->flags & FD_RINGBUFFER_STREAMING) &&
|
||||
!drmHashLookup(msm_ring->cmd_table, (unsigned long)target_cmd, &val)) {
|
||||
i = VOID2U64(val);
|
||||
/* figure out if we already have a cmd buf: */
|
||||
for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
|
||||
cmd = &msm_ring->submit.cmds[i];
|
||||
|
||||
assert(cmd->submit_offset == submit_offset);
|
||||
assert(cmd->size == size);
|
||||
assert(cmd->type == type);
|
||||
assert(msm_ring->submit.bos[cmd->submit_idx].handle ==
|
||||
target_cmd->ring_bo->handle);
|
||||
|
||||
return FALSE;
|
||||
if ((cmd->submit_offset == submit_offset) &&
|
||||
(cmd->size == size) &&
|
||||
(cmd->type == type) &&
|
||||
check_cmd_bo(ring, cmd, target_bo))
|
||||
break;
|
||||
cmd = NULL;
|
||||
}
|
||||
|
||||
/* create cmd buf if not: */
|
||||
i = APPEND(&msm_ring->submit, cmds);
|
||||
APPEND(msm_ring, cmds);
|
||||
msm_ring->cmds[i] = target_cmd;
|
||||
cmd = &msm_ring->submit.cmds[i];
|
||||
cmd->type = type;
|
||||
cmd->submit_idx = bo2idx(ring, target_cmd->ring_bo, FD_RELOC_READ);
|
||||
cmd->submit_offset = submit_offset;
|
||||
cmd->size = size;
|
||||
cmd->pad = 0;
|
||||
|
||||
target_cmd->is_appended_to_submit = TRUE;
|
||||
|
||||
if (!(target_cmd->ring->flags & FD_RINGBUFFER_STREAMING)) {
|
||||
drmHashInsert(msm_ring->cmd_table, (unsigned long)target_cmd,
|
||||
U642VOID(i));
|
||||
if (!cmd) {
|
||||
uint32_t idx = APPEND(&msm_ring->submit, cmds);
|
||||
APPEND(msm_ring, rings);
|
||||
msm_ring->rings[idx] = target_ring;
|
||||
cmd = &msm_ring->submit.cmds[idx];
|
||||
cmd->type = type;
|
||||
cmd->submit_idx = bo2idx(ring, target_bo, FD_RELOC_READ);
|
||||
cmd->submit_offset = submit_offset;
|
||||
cmd->size = size;
|
||||
cmd->pad = 0;
|
||||
}
|
||||
|
||||
target_cmd->size = size;
|
||||
|
||||
return TRUE;
|
||||
return cmd;
|
||||
}
|
||||
|
||||
static void * msm_ringbuffer_hostptr(struct fd_ringbuffer *ring)
|
||||
{
|
||||
struct msm_cmd *cmd = current_cmd(ring);
|
||||
uint8_t *base = fd_bo_map(cmd->ring_bo);
|
||||
return base + to_msm_ringbuffer(ring)->offset;
|
||||
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
||||
return fd_bo_map(msm_ring->ring_bo);
|
||||
}
|
||||
|
||||
static void delete_cmds(struct msm_ringbuffer *msm_ring)
|
||||
static uint32_t find_next_reloc_idx(struct msm_ringbuffer *msm_ring,
|
||||
uint32_t start, uint32_t offset)
|
||||
{
|
||||
struct msm_cmd *cmd, *tmp;
|
||||
uint32_t i;
|
||||
|
||||
LIST_FOR_EACH_ENTRY_SAFE(cmd, tmp, &msm_ring->cmd_list, list) {
|
||||
ring_cmd_del(cmd);
|
||||
/* a binary search would be more clever.. */
|
||||
for (i = start; i < msm_ring->submit.nr_relocs; i++) {
|
||||
struct drm_msm_gem_submit_reloc *reloc = &msm_ring->submit.relocs[i];
|
||||
if (reloc->submit_offset >= offset)
|
||||
return i;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static void flush_reset(struct fd_ringbuffer *ring)
|
||||
|
|
@ -332,178 +209,33 @@ static void flush_reset(struct fd_ringbuffer *ring)
|
|||
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < msm_ring->nr_bos; i++) {
|
||||
struct msm_bo *msm_bo = to_msm_bo(msm_ring->bos[i]);
|
||||
if (!msm_bo)
|
||||
continue;
|
||||
msm_bo->current_ring_seqno = 0;
|
||||
fd_bo_del(&msm_bo->base);
|
||||
}
|
||||
|
||||
for (i = 0; i < msm_ring->nr_cmds; i++) {
|
||||
struct msm_cmd *msm_cmd = msm_ring->cmds[i];
|
||||
|
||||
if (msm_cmd->ring == ring)
|
||||
continue;
|
||||
|
||||
if (msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT)
|
||||
fd_ringbuffer_del(msm_cmd->ring);
|
||||
/* for each of the cmd buffers, clear their reloc's: */
|
||||
for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
|
||||
struct msm_ringbuffer *target_ring = to_msm_ringbuffer(msm_ring->rings[i]);
|
||||
target_ring->submit.nr_relocs = 0;
|
||||
}
|
||||
|
||||
msm_ring->submit.nr_relocs = 0;
|
||||
msm_ring->submit.nr_cmds = 0;
|
||||
msm_ring->submit.nr_bos = 0;
|
||||
msm_ring->nr_cmds = 0;
|
||||
msm_ring->nr_rings = 0;
|
||||
msm_ring->nr_bos = 0;
|
||||
|
||||
if (msm_ring->bo_table) {
|
||||
drmHashDestroy(msm_ring->bo_table);
|
||||
msm_ring->bo_table = NULL;
|
||||
}
|
||||
|
||||
if (msm_ring->cmd_table) {
|
||||
drmHashDestroy(msm_ring->cmd_table);
|
||||
msm_ring->cmd_table = NULL;
|
||||
}
|
||||
|
||||
if (msm_ring->is_growable) {
|
||||
delete_cmds(msm_ring);
|
||||
} else {
|
||||
/* in old mode, just reset the # of relocs: */
|
||||
current_cmd(ring)->nr_relocs = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void finalize_current_cmd(struct fd_ringbuffer *ring, uint32_t *last_start)
|
||||
static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start)
|
||||
{
|
||||
uint32_t submit_offset, size, type;
|
||||
struct fd_ringbuffer *parent;
|
||||
|
||||
if (ring->parent) {
|
||||
parent = ring->parent;
|
||||
type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
|
||||
} else {
|
||||
parent = ring;
|
||||
type = MSM_SUBMIT_CMD_BUF;
|
||||
}
|
||||
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
||||
struct fd_bo *ring_bo = msm_ring->ring_bo;
|
||||
struct drm_msm_gem_submit req = {
|
||||
.pipe = to_msm_pipe(ring->pipe)->pipe,
|
||||
};
|
||||
uint32_t i, j, submit_offset, size;
|
||||
int ret;
|
||||
|
||||
submit_offset = offset_bytes(last_start, ring->start);
|
||||
size = offset_bytes(ring->cur, last_start);
|
||||
|
||||
get_cmd(parent, current_cmd(ring), submit_offset, size, type);
|
||||
}
|
||||
|
||||
static void dump_submit(struct msm_ringbuffer *msm_ring)
|
||||
{
|
||||
uint32_t i, j;
|
||||
|
||||
for (i = 0; i < msm_ring->submit.nr_bos; i++) {
|
||||
struct drm_msm_gem_submit_bo *bo = &msm_ring->submit.bos[i];
|
||||
ERROR_MSG(" bos[%d]: handle=%u, flags=%x", i, bo->handle, bo->flags);
|
||||
}
|
||||
for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
|
||||
struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
|
||||
struct drm_msm_gem_submit_reloc *relocs = U642VOID(cmd->relocs);
|
||||
ERROR_MSG(" cmd[%d]: type=%u, submit_idx=%u, submit_offset=%u, size=%u",
|
||||
i, cmd->type, cmd->submit_idx, cmd->submit_offset, cmd->size);
|
||||
for (j = 0; j < cmd->nr_relocs; j++) {
|
||||
struct drm_msm_gem_submit_reloc *r = &relocs[j];
|
||||
ERROR_MSG(" reloc[%d]: submit_offset=%u, or=%08x, shift=%d, reloc_idx=%u"
|
||||
", reloc_offset=%"PRIu64, j, r->submit_offset, r->or, r->shift,
|
||||
r->reloc_idx, r->reloc_offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static struct drm_msm_gem_submit_reloc *
|
||||
handle_stateobj_relocs(struct fd_ringbuffer *parent, struct fd_ringbuffer *stateobj,
|
||||
struct drm_msm_gem_submit_reloc *orig_relocs, unsigned nr_relocs)
|
||||
{
|
||||
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(stateobj);
|
||||
struct drm_msm_gem_submit_reloc *relocs = malloc(nr_relocs * sizeof(*relocs));
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < nr_relocs; i++) {
|
||||
unsigned idx = orig_relocs[i].reloc_idx;
|
||||
struct fd_bo *bo = msm_ring->bos[idx];
|
||||
unsigned flags = 0;
|
||||
|
||||
if (msm_ring->submit.bos[idx].flags & MSM_SUBMIT_BO_READ)
|
||||
flags |= FD_RELOC_READ;
|
||||
if (msm_ring->submit.bos[idx].flags & MSM_SUBMIT_BO_WRITE)
|
||||
flags |= FD_RELOC_WRITE;
|
||||
|
||||
relocs[i] = orig_relocs[i];
|
||||
relocs[i].reloc_idx = bo2idx(parent, bo, flags);
|
||||
}
|
||||
|
||||
/* stateobj rb's could have reloc's to other stateobj rb's which didn't
|
||||
* get propagated to the parent rb at _emit_reloc_ring() time (because
|
||||
* the parent wasn't known then), so fix that up now:
|
||||
*/
|
||||
for (i = 0; i < msm_ring->nr_cmds; i++) {
|
||||
struct msm_cmd *msm_cmd = msm_ring->cmds[i];
|
||||
struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
|
||||
|
||||
if (msm_ring->cmds[i]->ring == stateobj)
|
||||
continue;
|
||||
|
||||
assert(msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT);
|
||||
|
||||
if (get_cmd(parent, msm_cmd, cmd->submit_offset, cmd->size, cmd->type)) {
|
||||
fd_ringbuffer_ref(msm_cmd->ring);
|
||||
}
|
||||
}
|
||||
|
||||
return relocs;
|
||||
}
|
||||
|
||||
static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start,
|
||||
int in_fence_fd, int *out_fence_fd)
|
||||
{
|
||||
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
||||
struct msm_pipe *msm_pipe = to_msm_pipe(ring->pipe);
|
||||
struct drm_msm_gem_submit req = {
|
||||
.flags = msm_pipe->pipe,
|
||||
.queueid = msm_pipe->queue_id,
|
||||
};
|
||||
uint32_t i;
|
||||
int ret;
|
||||
|
||||
assert(!ring->parent);
|
||||
|
||||
if (in_fence_fd != -1) {
|
||||
req.flags |= MSM_SUBMIT_FENCE_FD_IN | MSM_SUBMIT_NO_IMPLICIT;
|
||||
req.fence_fd = in_fence_fd;
|
||||
}
|
||||
|
||||
if (out_fence_fd) {
|
||||
req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
|
||||
}
|
||||
|
||||
finalize_current_cmd(ring, last_start);
|
||||
|
||||
/* for each of the cmd's fix up their reloc's: */
|
||||
for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
|
||||
struct msm_cmd *msm_cmd = msm_ring->cmds[i];
|
||||
struct drm_msm_gem_submit_reloc *relocs = msm_cmd->relocs;
|
||||
struct drm_msm_gem_submit_cmd *cmd;
|
||||
unsigned nr_relocs = msm_cmd->nr_relocs;
|
||||
|
||||
/* for reusable stateobjs, the reloc table has reloc_idx that
|
||||
* points into it's own private bos table, rather than the global
|
||||
* bos table used for the submit, so we need to add the stateobj's
|
||||
* bos to the global table and construct new relocs table with
|
||||
* corresponding reloc_idx
|
||||
*/
|
||||
if (msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT) {
|
||||
relocs = handle_stateobj_relocs(ring, msm_cmd->ring,
|
||||
relocs, nr_relocs);
|
||||
}
|
||||
|
||||
cmd = &msm_ring->submit.cmds[i];
|
||||
cmd->relocs = VOID2U64(relocs);
|
||||
cmd->nr_relocs = nr_relocs;
|
||||
}
|
||||
get_cmd(ring, ring, ring_bo, submit_offset, size, MSM_SUBMIT_CMD_BUF);
|
||||
|
||||
/* needs to be after get_cmd() as that could create bos/cmds table: */
|
||||
req.bos = VOID2U64(msm_ring->submit.bos),
|
||||
|
|
@ -511,32 +243,52 @@ static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start
|
|||
req.cmds = VOID2U64(msm_ring->submit.cmds),
|
||||
req.nr_cmds = msm_ring->submit.nr_cmds;
|
||||
|
||||
DEBUG_MSG("nr_cmds=%u, nr_bos=%u", req.nr_cmds, req.nr_bos);
|
||||
/* for each of the cmd's fix up their reloc's: */
|
||||
for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
|
||||
struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
|
||||
struct msm_ringbuffer *target_ring = to_msm_ringbuffer(msm_ring->rings[i]);
|
||||
uint32_t a = find_next_reloc_idx(target_ring, 0, cmd->submit_offset);
|
||||
uint32_t b = find_next_reloc_idx(target_ring, a, cmd->submit_offset + cmd->size);
|
||||
cmd->relocs = VOID2U64(&target_ring->submit.relocs[a]);
|
||||
cmd->nr_relocs = (b > a) ? b - a : 0;
|
||||
}
|
||||
|
||||
DEBUG_MSG("nr_cmds=%u, nr_bos=%u\n", req.nr_cmds, req.nr_bos);
|
||||
|
||||
ret = drmCommandWriteRead(ring->pipe->dev->fd, DRM_MSM_GEM_SUBMIT,
|
||||
&req, sizeof(req));
|
||||
if (ret) {
|
||||
ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
|
||||
dump_submit(msm_ring);
|
||||
} else if (!ret) {
|
||||
ERROR_MSG(" pipe: %u", req.pipe);
|
||||
for (i = 0; i < msm_ring->submit.nr_bos; i++) {
|
||||
struct drm_msm_gem_submit_bo *bo = &msm_ring->submit.bos[i];
|
||||
ERROR_MSG(" bos[%d]: handle=%u, flags=%x", i, bo->handle, bo->flags);
|
||||
}
|
||||
for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
|
||||
struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
|
||||
struct drm_msm_gem_submit_reloc *relocs = U642VOID(cmd->relocs);
|
||||
ERROR_MSG(" cmd[%d]: type=%u, submit_idx=%u, submit_offset=%u, size=%u\n",
|
||||
i, cmd->type, cmd->submit_idx, cmd->submit_offset, cmd->size);
|
||||
for (j = 0; j < cmd->nr_relocs; j++) {
|
||||
struct drm_msm_gem_submit_reloc *r = &relocs[j];
|
||||
ERROR_MSG(" reloc[%d]: submit_offset=%u, or=%08x, shift=%d, reloc_idx=%u"
|
||||
", reloc_offset=%"PRIu64, j, r->submit_offset, r->or, r->shift,
|
||||
r->reloc_idx, r->reloc_offset);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* update timestamp on all rings associated with submit: */
|
||||
for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
|
||||
struct msm_cmd *msm_cmd = msm_ring->cmds[i];
|
||||
msm_cmd->ring->last_timestamp = req.fence;
|
||||
}
|
||||
|
||||
if (out_fence_fd) {
|
||||
*out_fence_fd = req.fence_fd;
|
||||
struct fd_ringbuffer *target_ring = msm_ring->rings[i];
|
||||
if (!ret)
|
||||
target_ring->last_timestamp = req.fence;
|
||||
}
|
||||
}
|
||||
|
||||
/* free dynamically constructed stateobj relocs tables: */
|
||||
for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
|
||||
struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
|
||||
struct msm_cmd *msm_cmd = msm_ring->cmds[i];
|
||||
if (msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT) {
|
||||
free(U642VOID(cmd->relocs));
|
||||
}
|
||||
for (i = 0; i < msm_ring->nr_bos; i++) {
|
||||
struct msm_bo *msm_bo = to_msm_bo(msm_ring->bos[i]);
|
||||
msm_bo->current_ring = NULL;
|
||||
fd_bo_del(&msm_bo->base);
|
||||
}
|
||||
|
||||
flush_reset(ring);
|
||||
|
|
@ -544,13 +296,6 @@ static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void msm_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t size)
|
||||
{
|
||||
assert(to_msm_ringbuffer(ring)->is_growable);
|
||||
finalize_current_cmd(ring, ring->last_start);
|
||||
ring_cmd_new(ring, size, 0);
|
||||
}
|
||||
|
||||
static void msm_ringbuffer_reset(struct fd_ringbuffer *ring)
|
||||
{
|
||||
flush_reset(ring);
|
||||
|
|
@ -559,165 +304,91 @@ static void msm_ringbuffer_reset(struct fd_ringbuffer *ring)
|
|||
static void msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
|
||||
const struct fd_reloc *r)
|
||||
{
|
||||
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
||||
struct fd_ringbuffer *parent = ring->parent ? ring->parent : ring;
|
||||
struct msm_bo *msm_bo = to_msm_bo(r->bo);
|
||||
struct drm_msm_gem_submit_reloc *reloc;
|
||||
struct msm_cmd *cmd = current_cmd(ring);
|
||||
uint32_t idx = APPEND(cmd, relocs);
|
||||
uint32_t idx = APPEND(&msm_ring->submit, relocs);
|
||||
uint32_t addr;
|
||||
|
||||
reloc = &cmd->relocs[idx];
|
||||
reloc = &msm_ring->submit.relocs[idx];
|
||||
|
||||
reloc->reloc_idx = bo2idx(parent, r->bo, r->flags);
|
||||
reloc->reloc_offset = r->offset;
|
||||
reloc->or = r->or;
|
||||
reloc->shift = r->shift;
|
||||
reloc->submit_offset = offset_bytes(ring->cur, ring->start) +
|
||||
to_msm_ringbuffer(ring)->offset;
|
||||
reloc->submit_offset = offset_bytes(ring->cur, ring->start);
|
||||
|
||||
addr = msm_bo->presumed;
|
||||
if (reloc->shift < 0)
|
||||
addr >>= -reloc->shift;
|
||||
if (r->shift < 0)
|
||||
addr >>= -r->shift;
|
||||
else
|
||||
addr <<= reloc->shift;
|
||||
addr <<= r->shift;
|
||||
(*ring->cur++) = addr | r->or;
|
||||
|
||||
if (ring->pipe->gpu_id >= 500) {
|
||||
struct drm_msm_gem_submit_reloc *reloc_hi;
|
||||
|
||||
/* NOTE: grab reloc_idx *before* APPEND() since that could
|
||||
* realloc() meaning that 'reloc' ptr is no longer valid:
|
||||
*/
|
||||
uint32_t reloc_idx = reloc->reloc_idx;
|
||||
|
||||
idx = APPEND(cmd, relocs);
|
||||
|
||||
reloc_hi = &cmd->relocs[idx];
|
||||
|
||||
reloc_hi->reloc_idx = reloc_idx;
|
||||
reloc_hi->reloc_offset = r->offset;
|
||||
reloc_hi->or = r->orhi;
|
||||
reloc_hi->shift = r->shift - 32;
|
||||
reloc_hi->submit_offset = offset_bytes(ring->cur, ring->start) +
|
||||
to_msm_ringbuffer(ring)->offset;
|
||||
|
||||
addr = msm_bo->presumed >> 32;
|
||||
if (reloc_hi->shift < 0)
|
||||
addr >>= -reloc_hi->shift;
|
||||
else
|
||||
addr <<= reloc_hi->shift;
|
||||
(*ring->cur++) = addr | r->orhi;
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
|
||||
struct fd_ringbuffer *target, uint32_t cmd_idx)
|
||||
static void msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
|
||||
struct fd_ringmarker *target, struct fd_ringmarker *end)
|
||||
{
|
||||
struct msm_cmd *cmd = NULL;
|
||||
struct msm_ringbuffer *msm_target = to_msm_ringbuffer(target);
|
||||
uint32_t idx = 0;
|
||||
int added_cmd = FALSE;
|
||||
uint32_t size;
|
||||
uint32_t submit_offset = msm_target->offset;
|
||||
struct fd_bo *target_bo = to_msm_ringbuffer(target->ring)->ring_bo;
|
||||
struct drm_msm_gem_submit_cmd *cmd;
|
||||
uint32_t submit_offset, size;
|
||||
|
||||
LIST_FOR_EACH_ENTRY(cmd, &msm_target->cmd_list, list) {
|
||||
if (idx == cmd_idx)
|
||||
break;
|
||||
idx++;
|
||||
}
|
||||
submit_offset = offset_bytes(target->cur, target->ring->start);
|
||||
size = offset_bytes(end->cur, target->cur);
|
||||
|
||||
assert(cmd && (idx == cmd_idx));
|
||||
|
||||
if (idx < (msm_target->cmd_count - 1)) {
|
||||
/* All but the last cmd buffer is fully "baked" (ie. already has
|
||||
* done get_cmd() to add it to the cmds table). But in this case,
|
||||
* the size we get is invalid (since it is calculated from the
|
||||
* last cmd buffer):
|
||||
*/
|
||||
size = cmd->size;
|
||||
} else {
|
||||
struct fd_ringbuffer *parent = ring->parent ? ring->parent : ring;
|
||||
size = offset_bytes(target->cur, target->start);
|
||||
added_cmd = get_cmd(parent, cmd, submit_offset, size,
|
||||
MSM_SUBMIT_CMD_IB_TARGET_BUF);
|
||||
}
|
||||
cmd = get_cmd(ring, target->ring, target_bo, submit_offset, size,
|
||||
MSM_SUBMIT_CMD_IB_TARGET_BUF);
|
||||
assert(cmd);
|
||||
|
||||
msm_ringbuffer_emit_reloc(ring, &(struct fd_reloc){
|
||||
.bo = cmd->ring_bo,
|
||||
.bo = target_bo,
|
||||
.flags = FD_RELOC_READ,
|
||||
.offset = submit_offset,
|
||||
});
|
||||
|
||||
/* Unlike traditional ringbuffers which are deleted as a set (after
|
||||
* being flushed), mesa can't really guarantee that a stateobj isn't
|
||||
* destroyed after emitted but before flush, so we must hold a ref:
|
||||
*/
|
||||
if (added_cmd && (target->flags & FD_RINGBUFFER_OBJECT)) {
|
||||
fd_ringbuffer_ref(target);
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static uint32_t msm_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
|
||||
{
|
||||
return to_msm_ringbuffer(ring)->cmd_count;
|
||||
}
|
||||
|
||||
static void msm_ringbuffer_destroy(struct fd_ringbuffer *ring)
|
||||
{
|
||||
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
||||
|
||||
flush_reset(ring);
|
||||
delete_cmds(msm_ring);
|
||||
|
||||
free(msm_ring->submit.cmds);
|
||||
free(msm_ring->submit.bos);
|
||||
free(msm_ring->bos);
|
||||
free(msm_ring->cmds);
|
||||
if (msm_ring->ring_bo)
|
||||
fd_bo_del(msm_ring->ring_bo);
|
||||
free(msm_ring);
|
||||
}
|
||||
|
||||
static const struct fd_ringbuffer_funcs funcs = {
|
||||
static struct fd_ringbuffer_funcs funcs = {
|
||||
.hostptr = msm_ringbuffer_hostptr,
|
||||
.flush = msm_ringbuffer_flush,
|
||||
.grow = msm_ringbuffer_grow,
|
||||
.reset = msm_ringbuffer_reset,
|
||||
.emit_reloc = msm_ringbuffer_emit_reloc,
|
||||
.emit_reloc_ring = msm_ringbuffer_emit_reloc_ring,
|
||||
.cmd_count = msm_ringbuffer_cmd_count,
|
||||
.destroy = msm_ringbuffer_destroy,
|
||||
};
|
||||
|
||||
drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
|
||||
uint32_t size, enum fd_ringbuffer_flags flags)
|
||||
uint32_t size)
|
||||
{
|
||||
struct msm_ringbuffer *msm_ring;
|
||||
struct fd_ringbuffer *ring;
|
||||
struct fd_ringbuffer *ring = NULL;
|
||||
|
||||
msm_ring = calloc(1, sizeof(*msm_ring));
|
||||
if (!msm_ring) {
|
||||
ERROR_MSG("allocation failed");
|
||||
return NULL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (size == 0) {
|
||||
assert(pipe->dev->version >= FD_VERSION_UNLIMITED_CMDS);
|
||||
size = INIT_SIZE;
|
||||
msm_ring->is_growable = TRUE;
|
||||
}
|
||||
|
||||
list_inithead(&msm_ring->cmd_list);
|
||||
msm_ring->seqno = ++to_msm_device(pipe->dev)->ring_cnt;
|
||||
|
||||
ring = &msm_ring->base;
|
||||
atomic_set(&ring->refcnt, 1);
|
||||
|
||||
ring->funcs = &funcs;
|
||||
ring->size = size;
|
||||
ring->pipe = pipe; /* needed in ring_cmd_new() */
|
||||
|
||||
ring_cmd_new(ring, size, flags);
|
||||
msm_ring->ring_bo = fd_bo_new(pipe->dev, size, 0);
|
||||
if (!msm_ring->ring_bo) {
|
||||
ERROR_MSG("ringbuffer allocation failed");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return ring;
|
||||
fail:
|
||||
if (ring)
|
||||
fd_ringbuffer_del(ring);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,84 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright 2021 Collabora, Ltd.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice (including the
|
||||
# next paragraph) shall be included in all copies or substantial
|
||||
# portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
# Helper script that reads drm_fourcc.h and writes a static table with the
|
||||
# simpler format token modifiers
|
||||
|
||||
import sys
|
||||
import re
|
||||
|
||||
filename = sys.argv[1]
|
||||
towrite = sys.argv[2]
|
||||
|
||||
fm_re = {
|
||||
'intel': r'^#define I915_FORMAT_MOD_(\w+)',
|
||||
'others': r'^#define DRM_FORMAT_MOD_((?:ARM|APPLE|SAMSUNG|QCOM|VIVANTE|NVIDIA|BROADCOM|ALLWINNER)\w+)\s',
|
||||
'vendors': r'^#define DRM_FORMAT_MOD_VENDOR_(\w+)'
|
||||
}
|
||||
|
||||
def print_fm_intel(f, f_mod):
|
||||
f.write(' {{ DRM_MODIFIER_INTEL({}, {}) }},\n'.format(f_mod, f_mod))
|
||||
|
||||
# generic write func
|
||||
def print_fm(f, vendor, mod, f_name):
|
||||
f.write(' {{ DRM_MODIFIER({}, {}, {}) }},\n'.format(vendor, mod, f_name))
|
||||
|
||||
with open(filename, "r") as f:
|
||||
data = f.read()
|
||||
for k, v in fm_re.items():
|
||||
fm_re[k] = re.findall(v, data, flags=re.M)
|
||||
|
||||
with open(towrite, "w") as f:
|
||||
f.write('''\
|
||||
/* AUTOMATICALLY GENERATED by gen_table_fourcc.py. You should modify
|
||||
that script instead of adding here entries manually! */
|
||||
static const struct drmFormatModifierInfo drm_format_modifier_table[] = {
|
||||
''')
|
||||
f.write(' { DRM_MODIFIER_INVALID(NONE, INVALID) },\n')
|
||||
f.write(' { DRM_MODIFIER_LINEAR(NONE, LINEAR) },\n')
|
||||
|
||||
for entry in fm_re['intel']:
|
||||
print_fm_intel(f, entry)
|
||||
|
||||
for entry in fm_re['others']:
|
||||
(vendor, mod) = entry.split('_', 1)
|
||||
if vendor == 'ARM' and (mod == 'TYPE_AFBC' or mod == 'TYPE_MISC' or mod == 'TYPE_AFRC'):
|
||||
continue
|
||||
print_fm(f, vendor, mod, mod)
|
||||
|
||||
f.write('''\
|
||||
};
|
||||
''')
|
||||
|
||||
f.write('''\
|
||||
static const struct drmFormatModifierVendorInfo drm_format_modifier_vendor_table[] = {
|
||||
''')
|
||||
|
||||
for entry in fm_re['vendors']:
|
||||
f.write(" {{ DRM_FORMAT_MOD_VENDOR_{}, \"{}\" }},\n".format(entry, entry))
|
||||
|
||||
f.write('''\
|
||||
};
|
||||
''')
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue