pvr: Add powervr winsys implementation

Co-authored-by: Rajnesh Kanwal <rajnesh.kanwal@imgtec.com>
Co-authored-by: Karmjit Mahil <Karmjit.Mahil@imgtec.com>
Co-authored-by: Sarah Walker <sarah.walker@imgtec.com>
Co-authored-by: Matt Coster <matt.coster@imgtec.com>
Co-authored-by: Donald Robson <donald.robson@imgtec.com>
Co-authored-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Frank Binns <frank.binns@imgtec.com>
Signed-off-by: Rajnesh Kanwal <rajnesh.kanwal@imgtec.com>
Signed-off-by: Karmjit Mahil <Karmjit.Mahil@imgtec.com>
Signed-off-by: Sarah Walker <sarah.walker@imgtec.com>
Signed-off-by: Matt Coster <matt.coster@imgtec.com>
Signed-off-by: Donald Robson <donald.robson@imgtec.com>
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/15507>
This commit is contained in:
Frank Binns 2022-03-21 11:31:38 +00:00 committed by Marge Bot
parent be82fe7ab9
commit b6f3b55b88
14 changed files with 2819 additions and 2 deletions

View file

@ -35,6 +35,11 @@ pvr_entrypoints = custom_target(
pvr_files = files(
'winsys/powervr/pvr_drm.c',
'winsys/powervr/pvr_drm_bo.c',
'winsys/powervr/pvr_drm_job_compute.c',
'winsys/powervr/pvr_drm_job_null.c',
'winsys/powervr/pvr_drm_job_render.c',
'winsys/powervr/pvr_drm_job_transfer.c',
'winsys/pvr_winsys.c',
'winsys/pvr_winsys_helper.c',
'pvr_blit.c',

View file

@ -21,18 +21,732 @@
* SOFTWARE.
*/
#include <assert.h>
#include <stdbool.h>
#include <stdint.h>
#include <vulkan/vulkan.h>
#include <xf86drm.h>
#include "drm-uapi/pvr_drm.h"
#include "pvr_device_info.h"
#include "pvr_drm.h"
#include "pvr_drm_bo.h"
#include "pvr_drm_job_compute.h"
#include "pvr_drm_job_null.h"
#include "pvr_drm_job_render.h"
#include "pvr_drm_job_transfer.h"
#include "pvr_drm_public.h"
#include "pvr_private.h"
#include "pvr_winsys.h"
#include "pvr_winsys_helper.h"
#include "vk_alloc.h"
#include "vk_drm_syncobj.h"
#include "vk_log.h"
static void pvr_drm_finish_heaps(struct pvr_drm_winsys *const drm_ws)
{
if (!pvr_winsys_helper_winsys_heap_finish(
&drm_ws->transfer_frag_heap.base)) {
vk_errorf(NULL,
VK_ERROR_UNKNOWN,
"Transfer fragment heap in use, can't deinit");
}
if (!pvr_winsys_helper_winsys_heap_finish(&drm_ws->vis_test_heap.base)) {
vk_errorf(NULL,
VK_ERROR_UNKNOWN,
"Visibility test heap in use, can't deinit");
}
if (drm_ws->rgn_hdr_heap_present) {
if (!pvr_winsys_helper_winsys_heap_finish(&drm_ws->rgn_hdr_heap.base)) {
vk_errorf(NULL,
VK_ERROR_UNKNOWN,
"Region header heap in use, can't deinit");
}
}
if (!pvr_winsys_helper_winsys_heap_finish(&drm_ws->usc_heap.base))
vk_errorf(NULL, VK_ERROR_UNKNOWN, "USC heap in use, can't deinit");
if (!pvr_winsys_helper_winsys_heap_finish(&drm_ws->pds_heap.base))
vk_errorf(NULL, VK_ERROR_UNKNOWN, "PDS heap in use, can't deinit");
if (!pvr_winsys_helper_winsys_heap_finish(&drm_ws->general_heap.base))
vk_errorf(NULL, VK_ERROR_UNKNOWN, "General heap in use, can't deinit");
}
static void pvr_drm_winsys_destroy(struct pvr_winsys *ws)
{
struct pvr_drm_winsys *const drm_ws = to_pvr_drm_winsys(ws);
struct drm_pvr_ioctl_destroy_vm_context_args destroy_vm_context_args = {
.handle = drm_ws->vm_context,
};
pvr_winsys_helper_free_static_memory(drm_ws->general_vma,
drm_ws->pds_vma,
drm_ws->usc_vma);
pvr_drm_finish_heaps(drm_ws);
pvr_ioctl(ws->render_fd,
DRM_IOCTL_PVR_DESTROY_VM_CONTEXT,
&destroy_vm_context_args,
VK_ERROR_UNKNOWN);
vk_free(ws->alloc, drm_ws);
}
/**
* Linear search a uint32_t array for a value.
*
* \param array Pointer to array start.
* \param len Number of uint32_t terms to compare.
* \param val The value to search for.
* \return
* * true if val is found, or
* * false.
*/
static bool
pvr_u32_in_array(const uint32_t *array, const size_t len, const uint32_t val)
{
for (int i = 0; i < len; i++) {
if (array[i] == val)
return true;
}
return false;
}
static VkResult pvr_drm_override_quirks(struct pvr_drm_winsys *drm_ws,
struct pvr_device_info *dev_info)
{
struct drm_pvr_dev_query_quirks query = { 0 };
struct drm_pvr_ioctl_dev_query_args args = {
.type = DRM_PVR_DEV_QUERY_QUIRKS_GET,
.size = sizeof(query),
.pointer = (__u64)&query,
};
/* clang-format off */
#define PVR_QUIRKS(x) \
x(48545) \
x(49927) \
x(51764) \
x(62269)
/* clang-format on */
#define PVR_QUIRK_EXPAND_COMMA(number) number,
const uint32_t supported_quirks[] = { PVR_QUIRKS(PVR_QUIRK_EXPAND_COMMA) };
#undef PVR_QUIRK_EXPAND_COMMA
VkResult result;
/* Get the length and allocate enough for it */
result = pvr_ioctl(drm_ws->base.render_fd,
DRM_IOCTL_PVR_DEV_QUERY,
&args,
VK_ERROR_INITIALIZATION_FAILED);
if (result != VK_SUCCESS)
goto out;
/* It's possible there are no quirks, so we can skip the rest. */
if (!query.count) {
result = VK_SUCCESS;
goto out;
}
query.quirks = (__u64)vk_zalloc(drm_ws->base.alloc,
sizeof(uint32_t) * query.count,
8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!query.quirks) {
result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
goto out;
}
/* Get the data */
result = pvr_ioctl(drm_ws->base.render_fd,
DRM_IOCTL_PVR_DEV_QUERY,
&args,
VK_ERROR_INITIALIZATION_FAILED);
if (result != VK_SUCCESS)
goto out_free_quirks;
#define PVR_QUIRK_EXPAND_SET(number) \
dev_info->quirks.has_brn##number = \
pvr_u32_in_array((uint32_t *)query.quirks, query.count, number);
/*
* For each quirk, check that if it is a "must have" that it is set in
* dev_info, then set the dev_info value to the one received from the kernel.
*/
PVR_QUIRKS(PVR_QUIRK_EXPAND_SET);
#undef PVR_QUIRK_EXPAND_SET
#undef PVR_QUIRKS
/* Check all musthave quirks are supported */
for (int i = 0; i < query.musthave_count; i++) {
if (!pvr_u32_in_array(supported_quirks,
ARRAY_SIZE(supported_quirks),
((uint32_t *)query.quirks)[i])) {
result = VK_ERROR_INCOMPATIBLE_DRIVER;
goto out_free_quirks;
}
}
result = VK_SUCCESS;
out_free_quirks:
vk_free(drm_ws->base.alloc, (__u64 *)query.quirks);
out:
return result;
}
static VkResult pvr_drm_override_enhancements(struct pvr_drm_winsys *drm_ws,
struct pvr_device_info *dev_info)
{
struct drm_pvr_dev_query_enhancements query = { 0 };
struct drm_pvr_ioctl_dev_query_args args = {
.type = DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET,
.size = sizeof(query),
.pointer = (__u64)&query
};
VkResult result;
/* Get the length and allocate enough for it */
result = pvr_ioctl(drm_ws->base.render_fd,
DRM_IOCTL_PVR_DEV_QUERY,
&args,
VK_ERROR_INITIALIZATION_FAILED);
if (result != VK_SUCCESS)
goto out;
/* It's possible there are no enhancements, so we can skip the rest. */
if (!query.count) {
result = VK_SUCCESS;
goto out;
}
query.enhancements = (__u64)vk_zalloc(drm_ws->base.alloc,
sizeof(uint32_t) * query.count,
8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!query.enhancements) {
result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
goto out;
}
/* Get the data */
result = pvr_ioctl(drm_ws->base.render_fd,
DRM_IOCTL_PVR_DEV_QUERY,
&args,
VK_ERROR_INITIALIZATION_FAILED);
if (result != VK_SUCCESS)
goto out_free_enhancements;
/* clang-format off */
#define PVR_ENHANCEMENT_SET(number) \
dev_info->enhancements.has_ern##number = \
pvr_u32_in_array((uint32_t *)query.enhancements, query.count, number)
/* clang-format on */
PVR_ENHANCEMENT_SET(35421);
#undef PVR_ENHANCEMENT_SET
result = VK_SUCCESS;
out_free_enhancements:
vk_free(drm_ws->base.alloc, (__u64 *)query.enhancements);
out:
return result;
}
static VkResult
pvr_drm_get_runtime_info(struct pvr_drm_winsys *drm_ws,
struct drm_pvr_dev_query_runtime_info *const value)
{
struct drm_pvr_ioctl_dev_query_args args = {
.type = DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET,
.size = sizeof(*value),
.pointer = (__u64)value
};
return pvr_ioctl(drm_ws->base.render_fd,
DRM_IOCTL_PVR_DEV_QUERY,
&args,
VK_ERROR_INITIALIZATION_FAILED);
}
static VkResult
pvr_drm_get_gpu_info(struct pvr_drm_winsys *drm_ws,
struct drm_pvr_dev_query_gpu_info *const value)
{
struct drm_pvr_ioctl_dev_query_args args = {
.type = DRM_PVR_DEV_QUERY_GPU_INFO_GET,
.size = sizeof(*value),
.pointer = (__u64)value
};
return pvr_ioctl(drm_ws->base.render_fd,
DRM_IOCTL_PVR_DEV_QUERY,
&args,
VK_ERROR_INITIALIZATION_FAILED);
}
static VkResult
pvr_drm_winsys_device_info_init(struct pvr_winsys *ws,
struct pvr_device_info *dev_info,
struct pvr_device_runtime_info *runtime_info)
{
struct drm_pvr_dev_query_runtime_info kmd_runtime_info = { 0 };
struct drm_pvr_dev_query_gpu_info gpu_info = { 0 };
struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ws);
VkResult result;
int ret;
ret = pvr_device_info_init(dev_info, drm_ws->bvnc);
if (ret) {
result = vk_errorf(NULL,
VK_ERROR_INCOMPATIBLE_DRIVER,
"Unsupported BVNC: %u.%u.%u.%u\n",
PVR_BVNC_UNPACK_B(drm_ws->bvnc),
PVR_BVNC_UNPACK_V(drm_ws->bvnc),
PVR_BVNC_UNPACK_N(drm_ws->bvnc),
PVR_BVNC_UNPACK_C(drm_ws->bvnc));
goto err_out;
}
result = pvr_drm_override_quirks(drm_ws, dev_info);
if (result != VK_SUCCESS) {
mesa_logw("Failed to get quirks for this GPU\n");
goto err_out;
}
result = pvr_drm_override_enhancements(drm_ws, dev_info);
if (result != VK_SUCCESS) {
mesa_logw("Failed to get enhancements for this GPU\n");
goto err_out;
}
/* TODO: When kernel support is added, fetch the actual core count. */
if (PVR_HAS_FEATURE(dev_info, gpu_multicore_support))
mesa_logw("Core count fetching is unimplemented. Setting 1 for now.");
runtime_info->core_count = 1;
result = pvr_drm_get_gpu_info(drm_ws, &gpu_info);
if (result != VK_SUCCESS)
goto err_out;
runtime_info->num_phantoms = gpu_info.num_phantoms;
result = pvr_drm_get_runtime_info(drm_ws, &kmd_runtime_info);
if (result != VK_SUCCESS)
goto err_out;
runtime_info->min_free_list_size = kmd_runtime_info.free_list_min_pages
<< ROGUE_BIF_PM_PHYSICAL_PAGE_SHIFT;
runtime_info->max_free_list_size = kmd_runtime_info.free_list_max_pages
<< ROGUE_BIF_PM_PHYSICAL_PAGE_SHIFT;
runtime_info->reserved_shared_size =
kmd_runtime_info.common_store_alloc_region_size;
runtime_info->total_reserved_partition_size =
kmd_runtime_info.common_store_partition_space_size;
runtime_info->max_coeffs = kmd_runtime_info.max_coeffs;
runtime_info->cdm_max_local_mem_size_regs =
kmd_runtime_info.cdm_max_local_mem_size_regs;
return VK_SUCCESS;
err_out:
return result;
}
static void pvr_drm_winsys_get_heaps_info(struct pvr_winsys *ws,
struct pvr_winsys_heaps *heaps)
{
struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ws);
heaps->general_heap = &drm_ws->general_heap.base;
heaps->pds_heap = &drm_ws->pds_heap.base;
heaps->transfer_frag_heap = &drm_ws->transfer_frag_heap.base;
heaps->usc_heap = &drm_ws->usc_heap.base;
heaps->vis_test_heap = &drm_ws->vis_test_heap.base;
if (drm_ws->rgn_hdr_heap_present)
heaps->rgn_hdr_heap = &drm_ws->rgn_hdr_heap.base;
else
heaps->rgn_hdr_heap = &drm_ws->general_heap.base;
}
static const struct pvr_winsys_ops drm_winsys_ops = {
.destroy = pvr_drm_winsys_destroy,
.device_info_init = pvr_drm_winsys_device_info_init,
.get_heaps_info = pvr_drm_winsys_get_heaps_info,
.buffer_create = pvr_drm_winsys_buffer_create,
.buffer_create_from_fd = pvr_drm_winsys_buffer_create_from_fd,
.buffer_destroy = pvr_drm_winsys_buffer_destroy,
.buffer_get_fd = pvr_drm_winsys_buffer_get_fd,
.buffer_map = pvr_drm_winsys_buffer_map,
.buffer_unmap = pvr_drm_winsys_buffer_unmap,
.heap_alloc = pvr_drm_winsys_heap_alloc,
.heap_free = pvr_drm_winsys_heap_free,
.vma_map = pvr_drm_winsys_vma_map,
.vma_unmap = pvr_drm_winsys_vma_unmap,
.free_list_create = pvr_drm_winsys_free_list_create,
.free_list_destroy = pvr_drm_winsys_free_list_destroy,
.render_target_dataset_create = pvr_drm_render_target_dataset_create,
.render_target_dataset_destroy = pvr_drm_render_target_dataset_destroy,
.render_ctx_create = pvr_drm_winsys_render_ctx_create,
.render_ctx_destroy = pvr_drm_winsys_render_ctx_destroy,
.render_submit = pvr_drm_winsys_render_submit,
.compute_ctx_create = pvr_drm_winsys_compute_ctx_create,
.compute_ctx_destroy = pvr_drm_winsys_compute_ctx_destroy,
.compute_submit = pvr_drm_winsys_compute_submit,
.transfer_ctx_create = pvr_drm_winsys_transfer_ctx_create,
.transfer_ctx_destroy = pvr_drm_winsys_transfer_ctx_destroy,
.transfer_submit = pvr_drm_winsys_transfer_submit,
.null_job_submit = pvr_drm_winsys_null_job_submit,
};
struct pvr_static_data_area_description {
struct pvr_winsys_static_data_offsets offsets;
size_t total_size;
};
static VkResult pvr_drm_get_heap_static_data_descriptions(
struct pvr_drm_winsys *const drm_ws,
struct pvr_static_data_area_description desc_out[DRM_PVR_HEAP_COUNT])
{
struct drm_pvr_dev_query_static_data_areas query = { 0 };
struct drm_pvr_ioctl_dev_query_args args = {
.type = DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET,
.size = sizeof(query),
.pointer = (__u64)&query
};
struct drm_pvr_static_data_area *array;
VkResult result;
/* Get the array length */
result = pvr_ioctlf(drm_ws->base.render_fd,
DRM_IOCTL_PVR_DEV_QUERY,
&args,
VK_ERROR_INITIALIZATION_FAILED,
"Failed to fetch static area array size");
if (result != VK_SUCCESS)
goto out;
array = vk_alloc(drm_ws->base.alloc,
sizeof(*array) * query.static_data_areas.count,
8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!array) {
result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
goto out;
}
VG(VALGRIND_MAKE_MEM_DEFINED(array,
sizeof(*array) *
query.static_data_areas.count));
query.static_data_areas.array = (__u64)array;
/* Get the array */
result = pvr_ioctlf(drm_ws->base.render_fd,
DRM_IOCTL_PVR_DEV_QUERY,
&args,
VK_ERROR_INITIALIZATION_FAILED,
"Failed to fetch static area offset array");
if (result != VK_SUCCESS)
goto out_free_array;
for (size_t i = 0; i < query.static_data_areas.count; i++) {
/* Unknown heaps might cause a write outside the array bounds. */
if (array[i].location_heap_id >= DRM_PVR_HEAP_COUNT)
continue;
switch (array[i].area_usage) {
case DRM_PVR_STATIC_DATA_AREA_EOT:
desc_out[array[i].location_heap_id].offsets.eot = array[i].offset;
break;
case DRM_PVR_STATIC_DATA_AREA_FENCE:
desc_out[array[i].location_heap_id].offsets.fence = array[i].offset;
break;
case DRM_PVR_STATIC_DATA_AREA_VDM_SYNC:
desc_out[array[i].location_heap_id].offsets.vdm_sync = array[i].offset;
break;
case DRM_PVR_STATIC_DATA_AREA_YUV_CSC:
desc_out[array[i].location_heap_id].offsets.yuv_csc = array[i].offset;
break;
default:
mesa_logd("Unknown drm static area id. ID: %d.", array[i].area_usage);
continue;
}
desc_out[array[i].location_heap_id].total_size += array[i].size;
}
result = VK_SUCCESS;
out_free_array:
vk_free(drm_ws->base.alloc, array);
out:
return result;
}
static VkResult pvr_drm_setup_heaps(struct pvr_drm_winsys *const drm_ws)
{
struct pvr_winsys_heap *const winsys_heaps[DRM_PVR_HEAP_COUNT] = {
[DRM_PVR_HEAP_GENERAL] = &drm_ws->general_heap.base,
[DRM_PVR_HEAP_PDS_CODE_DATA] = &drm_ws->pds_heap.base,
[DRM_PVR_HEAP_USC_CODE] = &drm_ws->usc_heap.base,
[DRM_PVR_HEAP_RGNHDR] = &drm_ws->rgn_hdr_heap.base,
[DRM_PVR_HEAP_VIS_TEST] = &drm_ws->vis_test_heap.base,
[DRM_PVR_HEAP_TRANSFER_FRAG] = &drm_ws->transfer_frag_heap.base,
};
struct pvr_static_data_area_description
static_data_descriptions[DRM_PVR_HEAP_COUNT] = { 0 };
struct drm_pvr_dev_query_heap_info query = { 0 };
struct drm_pvr_ioctl_dev_query_args args = {
.type = DRM_PVR_DEV_QUERY_HEAP_INFO_GET,
.size = sizeof(query),
.pointer = (__u64)&query
};
struct drm_pvr_heap *array;
VkResult result;
int i = 0;
/* Get the array length */
result = pvr_ioctlf(drm_ws->base.render_fd,
DRM_IOCTL_PVR_DEV_QUERY,
&args,
VK_ERROR_INITIALIZATION_FAILED,
"Failed to fetch heap info array size");
if (result != VK_SUCCESS)
goto out;
array = vk_alloc(drm_ws->base.alloc,
sizeof(*array) * query.heaps.count,
8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!array) {
result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
goto out;
}
VG(VALGRIND_MAKE_MEM_DEFINED(array, sizeof(*array) * query.heaps.count));
query.heaps.array = (__u64)array;
/* Get the array */
result = pvr_ioctlf(drm_ws->base.render_fd,
DRM_IOCTL_PVR_DEV_QUERY,
&args,
VK_ERROR_INITIALIZATION_FAILED,
"Failed to fetch heap info array");
if (result != VK_SUCCESS)
goto out_free_array;
result = pvr_drm_get_heap_static_data_descriptions(drm_ws,
static_data_descriptions);
if (result != VK_SUCCESS)
goto out_free_array;
for (; i < query.heaps.count; i++) {
const bool present = array[i].size;
const pvr_dev_addr_t base_addr = PVR_DEV_ADDR(array[i].base);
const pvr_dev_addr_t vma_heap_begin_addr =
PVR_DEV_ADDR_OFFSET(base_addr, static_data_descriptions[i].total_size);
const uint64_t vma_heap_size =
array[i].size - static_data_descriptions[i].total_size;
/* Optional heaps */
switch (i) {
case DRM_PVR_HEAP_RGNHDR:
drm_ws->rgn_hdr_heap_present = present;
if (!present)
continue;
break;
default:
break;
}
/* Required heaps */
if (!present) {
result = vk_errorf(NULL,
VK_ERROR_INITIALIZATION_FAILED,
"Required heap not present: %d.",
i);
goto err_pvr_drm_heap_finish_all_heaps;
}
assert(base_addr.addr);
assert(static_data_descriptions[i].total_size <= array[i].size);
winsys_heaps[i]->ws = &drm_ws->base;
winsys_heaps[i]->base_addr = base_addr;
winsys_heaps[i]->static_data_carveout_addr = base_addr;
winsys_heaps[i]->size = array[i].size;
winsys_heaps[i]->static_data_carveout_size =
static_data_descriptions[i].total_size;
winsys_heaps[i]->page_size = 1 << array[i].page_size_log2;
winsys_heaps[i]->log2_page_size = array[i].page_size_log2;
/* For now we don't support the heap page size being different from the
* host page size.
*/
assert(winsys_heaps[i]->page_size == drm_ws->base.page_size);
assert(winsys_heaps[i]->log2_page_size == drm_ws->base.log2_page_size);
winsys_heaps[i]->static_data_offsets =
static_data_descriptions[i].offsets;
util_vma_heap_init(&winsys_heaps[i]->vma_heap,
vma_heap_begin_addr.addr,
vma_heap_size);
winsys_heaps[i]->vma_heap.alloc_high = false;
/* It's expected that the heap destroy function to be the last thing that
* is called, so we start the ref_count at 0.
*/
p_atomic_set(&winsys_heaps[i]->ref_count, 0);
if (pthread_mutex_init(&winsys_heaps[i]->lock, NULL)) {
result = vk_error(NULL, VK_ERROR_INITIALIZATION_FAILED);
goto err_pvr_drm_heap_finish_all_heaps;
}
}
result = VK_SUCCESS;
goto out_free_array;
err_pvr_drm_heap_finish_all_heaps:
/* Undo from where we left off */
while (--i >= 0) {
/* Optional heaps */
switch (i) {
case DRM_PVR_HEAP_RGNHDR:
if (drm_ws->rgn_hdr_heap_present)
break;
continue;
default:
break;
}
pvr_winsys_helper_winsys_heap_finish(winsys_heaps[i]);
}
out_free_array:
vk_free(drm_ws->base.alloc, array);
out:
return result;
}
VkResult pvr_drm_winsys_create(const int render_fd,
const int display_fd,
const VkAllocationCallbacks *alloc,
struct pvr_winsys **const ws_out)
{
pvr_finishme("Add implementation once powervr UAPI is stable.");
struct drm_pvr_ioctl_create_vm_context_args create_vm_context_args = { 0 };
struct drm_pvr_ioctl_destroy_vm_context_args destroy_vm_context_args = { 0 };
struct drm_pvr_dev_query_gpu_info gpu_info = { 0 };
return VK_ERROR_INCOMPATIBLE_DRIVER;
struct pvr_drm_winsys *drm_ws;
VkResult result;
drm_ws =
vk_zalloc(alloc, sizeof(*drm_ws), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!drm_ws) {
result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
goto err_out;
}
drm_ws->base.ops = &drm_winsys_ops;
drm_ws->base.render_fd = render_fd;
drm_ws->base.display_fd = display_fd;
drm_ws->base.alloc = alloc;
os_get_page_size(&drm_ws->base.page_size);
drm_ws->base.log2_page_size = util_logbase2(drm_ws->base.page_size);
drm_ws->base.syncobj_type = vk_drm_syncobj_get_type(render_fd);
drm_ws->base.sync_types[0] = &drm_ws->base.syncobj_type;
drm_ws->base.sync_types[1] = NULL;
result = pvr_drm_get_gpu_info(drm_ws, &gpu_info);
if (result != VK_SUCCESS)
goto err_vk_free_drm_ws;
drm_ws->bvnc = gpu_info.gpu_id;
result = pvr_ioctl(render_fd,
DRM_IOCTL_PVR_CREATE_VM_CONTEXT,
&create_vm_context_args,
VK_ERROR_INITIALIZATION_FAILED);
if (result != VK_SUCCESS)
goto err_pvr_destroy_vm_context;
drm_ws->vm_context = create_vm_context_args.handle;
result = pvr_drm_setup_heaps(drm_ws);
if (result != VK_SUCCESS)
goto err_pvr_destroy_vm_context;
result =
pvr_winsys_helper_allocate_static_memory(&drm_ws->base,
pvr_drm_heap_alloc_carveout,
&drm_ws->general_heap.base,
&drm_ws->pds_heap.base,
&drm_ws->usc_heap.base,
&drm_ws->general_vma,
&drm_ws->pds_vma,
&drm_ws->usc_vma);
if (result != VK_SUCCESS)
goto err_pvr_heap_finish;
result = pvr_winsys_helper_fill_static_memory(&drm_ws->base,
drm_ws->general_vma,
drm_ws->pds_vma,
drm_ws->usc_vma);
if (result != VK_SUCCESS)
goto err_pvr_free_static_memory;
*ws_out = &drm_ws->base;
return VK_SUCCESS;
err_pvr_free_static_memory:
pvr_winsys_helper_free_static_memory(drm_ws->general_vma,
drm_ws->pds_vma,
drm_ws->usc_vma);
err_pvr_heap_finish:
pvr_drm_finish_heaps(drm_ws);
err_pvr_destroy_vm_context:
destroy_vm_context_args.handle = drm_ws->vm_context;
pvr_ioctl(render_fd,
DRM_IOCTL_PVR_DESTROY_VM_CONTEXT,
&destroy_vm_context_args,
VK_ERROR_UNKNOWN);
err_vk_free_drm_ws:
vk_free(alloc, drm_ws);
err_out:
return result;
}

View file

@ -0,0 +1,71 @@
/*
* Copyright © 2022 Imagination Technologies Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef PVR_DRM_H
#define PVR_DRM_H
#include <stdbool.h>
#include <stdint.h>
#include <vulkan/vulkan.h>
#include "pvr_winsys.h"
#include "util/macros.h"
struct pvr_drm_winsys_heap {
struct pvr_winsys_heap base;
};
struct pvr_drm_winsys {
struct pvr_winsys base;
/* Packed bvnc */
uint64_t bvnc;
/* Required heaps */
struct pvr_drm_winsys_heap general_heap;
struct pvr_drm_winsys_heap pds_heap;
struct pvr_drm_winsys_heap usc_heap;
struct pvr_drm_winsys_heap vis_test_heap;
struct pvr_drm_winsys_heap transfer_frag_heap;
/* Optional heaps */
bool rgn_hdr_heap_present;
struct pvr_drm_winsys_heap rgn_hdr_heap;
/* vma's for carveout memory regions */
struct pvr_winsys_vma *pds_vma;
struct pvr_winsys_vma *usc_vma;
struct pvr_winsys_vma *general_vma;
uint32_t vm_context;
};
/*******************************************
helper macros
*******************************************/
#define to_pvr_drm_winsys(ws) container_of((ws), struct pvr_drm_winsys, base)
#define to_pvr_drm_winsys_heap(heap) \
container_of((heap), struct pvr_drm_winsys_heap, base)
#endif /* PVR_DRM_H */

View file

@ -0,0 +1,513 @@
/*
* Copyright © 2022 Imagination Technologies Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <assert.h>
#include <fcntl.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <vulkan/vulkan.h>
#include <unistd.h>
#include <xf86drm.h>
#include "drm-uapi/pvr_drm.h"
#include "pvr_drm.h"
#include "pvr_drm_bo.h"
#include "pvr_private.h"
#include "pvr_winsys_helper.h"
#include "util/bitscan.h"
#include "util/macros.h"
#include "vk_log.h"
static VkResult pvr_drm_create_gem_bo(struct pvr_drm_winsys *drm_ws,
uint32_t drm_flags,
uint64_t size,
uint32_t *const handle_out)
{
struct drm_pvr_ioctl_create_bo_args args = {
.size = size,
.flags = drm_flags,
};
VkResult result;
result = pvr_ioctlf(drm_ws->base.render_fd,
DRM_IOCTL_PVR_CREATE_BO,
&args,
VK_ERROR_OUT_OF_DEVICE_MEMORY,
"Failed to create gem bo");
if (result != VK_SUCCESS)
return result;
*handle_out = args.handle;
return VK_SUCCESS;
}
static VkResult pvr_drm_destroy_gem_bo(struct pvr_drm_winsys *drm_ws,
uint32_t handle)
{
struct drm_gem_close args = {
.handle = handle,
};
/* The kernel driver doesn't have a corresponding DRM_IOCTL_PVR_DESTROY_BO
* IOCTL as DRM provides a common IOCTL for doing this.
*/
return pvr_ioctlf(drm_ws->base.render_fd,
DRM_IOCTL_GEM_CLOSE,
&args,
VK_ERROR_UNKNOWN,
"Failed to destroy gem bo");
}
static VkResult pvr_drm_get_bo_mmap_offset(struct pvr_drm_winsys *drm_ws,
uint32_t handle,
uint64_t *const offset_out)
{
struct drm_pvr_ioctl_get_bo_mmap_offset_args args = {
.handle = handle,
};
VkResult result;
result = pvr_ioctl(drm_ws->base.render_fd,
DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET,
&args,
VK_ERROR_MEMORY_MAP_FAILED);
if (result != VK_SUCCESS)
return result;
*offset_out = args.offset;
return VK_SUCCESS;
}
static void pvr_drm_buffer_acquire(struct pvr_drm_winsys_bo *drm_bo)
{
p_atomic_inc(&drm_bo->ref_count);
}
static void pvr_drm_buffer_release(struct pvr_drm_winsys_bo *drm_bo)
{
if (p_atomic_dec_return(&drm_bo->ref_count) == 0) {
struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(drm_bo->base.ws);
pvr_drm_destroy_gem_bo(drm_ws, drm_bo->handle);
vk_free(drm_ws->base.alloc, drm_bo);
}
}
static VkResult
pvr_drm_display_buffer_create(struct pvr_drm_winsys *drm_ws,
uint64_t size,
struct pvr_winsys_bo **const bo_out)
{
uint32_t handle;
VkResult result;
int ret;
int fd;
result =
pvr_winsys_helper_display_buffer_create(&drm_ws->base, size, &handle);
if (result != VK_SUCCESS)
return result;
ret = drmPrimeHandleToFD(drm_ws->base.display_fd, handle, DRM_CLOEXEC, &fd);
pvr_winsys_helper_display_buffer_destroy(&drm_ws->base, handle);
if (ret)
return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
result = pvr_drm_winsys_buffer_create_from_fd(&drm_ws->base, fd, bo_out);
close(fd);
if (result != VK_SUCCESS)
return result;
assert((*bo_out)->size >= size);
return VK_SUCCESS;
}
static uint64_t pvr_drm_get_alloc_flags(uint32_t ws_flags)
{
uint64_t drm_flags = 0U;
if (ws_flags & PVR_WINSYS_BO_FLAG_GPU_UNCACHED)
drm_flags |= DRM_PVR_BO_BYPASS_DEVICE_CACHE;
if (ws_flags & PVR_WINSYS_BO_FLAG_PM_FW_PROTECT)
drm_flags |= DRM_PVR_BO_PM_FW_PROTECT;
if (ws_flags & PVR_WINSYS_BO_FLAG_CPU_ACCESS)
drm_flags |= DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS;
return drm_flags;
}
VkResult pvr_drm_winsys_buffer_create(struct pvr_winsys *ws,
uint64_t size,
uint64_t alignment,
enum pvr_winsys_bo_type type,
uint32_t ws_flags,
struct pvr_winsys_bo **const bo_out)
{
const uint64_t drm_flags = pvr_drm_get_alloc_flags(ws_flags);
struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ws);
struct pvr_drm_winsys_bo *drm_bo;
uint32_t handle = 0;
VkResult result;
assert(util_is_power_of_two_nonzero(alignment));
size = ALIGN_POT(size, alignment);
size = ALIGN_POT(size, ws->page_size);
if (type == PVR_WINSYS_BO_TYPE_DISPLAY)
return pvr_drm_display_buffer_create(drm_ws, size, bo_out);
drm_bo = vk_zalloc(ws->alloc,
sizeof(*drm_bo),
8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!drm_bo)
return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
result = pvr_drm_create_gem_bo(drm_ws, drm_flags, size, &handle);
if (result != VK_SUCCESS)
goto err_vk_free_drm_bo;
drm_bo->base.size = size;
drm_bo->base.ws = ws;
drm_bo->handle = handle;
drm_bo->flags = drm_flags;
p_atomic_set(&drm_bo->ref_count, 1);
*bo_out = &drm_bo->base;
return VK_SUCCESS;
err_vk_free_drm_bo:
vk_free(ws->alloc, drm_bo);
return result;
}
VkResult
pvr_drm_winsys_buffer_create_from_fd(struct pvr_winsys *ws,
int fd,
struct pvr_winsys_bo **const bo_out)
{
struct pvr_drm_winsys_bo *drm_bo;
uint32_t handle;
VkResult result;
off_t size;
int ret;
drm_bo = vk_zalloc(ws->alloc,
sizeof(*drm_bo),
8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!drm_bo)
return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
size = lseek(fd, 0, SEEK_END);
if (size == (off_t)-1) {
result = vk_error(NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE);
goto err_vk_free_drm_bo;
}
ret = drmPrimeFDToHandle(ws->render_fd, fd, &handle);
if (ret) {
result = vk_error(NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE);
goto err_vk_free_drm_bo;
}
drm_bo->base.ws = ws;
drm_bo->base.size = (uint64_t)size;
drm_bo->base.is_imported = true;
drm_bo->handle = handle;
p_atomic_set(&drm_bo->ref_count, 1);
*bo_out = &drm_bo->base;
return VK_SUCCESS;
err_vk_free_drm_bo:
vk_free(ws->alloc, drm_bo);
return result;
}
void pvr_drm_winsys_buffer_destroy(struct pvr_winsys_bo *bo)
{
struct pvr_drm_winsys_bo *drm_bo = to_pvr_drm_winsys_bo(bo);
pvr_drm_buffer_release(drm_bo);
}
VkResult pvr_drm_winsys_buffer_get_fd(struct pvr_winsys_bo *bo,
int *const fd_out)
{
struct pvr_drm_winsys_bo *drm_bo = to_pvr_drm_winsys_bo(bo);
struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(bo->ws);
int ret;
ret = drmPrimeHandleToFD(drm_ws->base.render_fd,
drm_bo->handle,
DRM_CLOEXEC,
fd_out);
if (ret)
return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
return VK_SUCCESS;
}
VkResult pvr_drm_winsys_buffer_map(struct pvr_winsys_bo *bo)
{
struct pvr_drm_winsys_bo *drm_bo = to_pvr_drm_winsys_bo(bo);
struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(bo->ws);
uint64_t offset = 0;
void *map = NULL;
VkResult result;
assert(!bo->map);
result = pvr_drm_get_bo_mmap_offset(drm_ws, drm_bo->handle, &offset);
if (result != VK_SUCCESS)
goto err_out;
result = pvr_mmap(bo->size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
drm_ws->base.render_fd,
offset,
&map);
if (result != VK_SUCCESS)
goto err_out;
VG(VALGRIND_MALLOCLIKE_BLOCK(map, bo->size, 0, true));
pvr_drm_buffer_acquire(drm_bo);
bo->map = map;
return VK_SUCCESS;
err_out:
return result;
}
void pvr_drm_winsys_buffer_unmap(struct pvr_winsys_bo *bo)
{
struct pvr_drm_winsys_bo *drm_bo = to_pvr_drm_winsys_bo(bo);
assert(bo->map);
pvr_munmap(bo->map, bo->size);
VG(VALGRIND_FREELIKE_BLOCK(bo->map, 0));
bo->map = NULL;
pvr_drm_buffer_release(drm_bo);
}
/* This function must be used to allocate from a heap carveout and must only be
* used within the winsys code. This also means whoever is using it, must know
* what they are doing.
*/
VkResult pvr_drm_heap_alloc_carveout(struct pvr_winsys_heap *const heap,
const pvr_dev_addr_t carveout_dev_addr,
uint64_t size,
uint64_t alignment,
struct pvr_winsys_vma **const vma_out)
{
const struct pvr_drm_winsys *const drm_ws = to_pvr_drm_winsys(heap->ws);
struct pvr_drm_winsys_vma *drm_vma;
VkResult result;
assert(util_is_power_of_two_nonzero(alignment));
drm_vma = vk_zalloc(drm_ws->base.alloc,
sizeof(*drm_vma),
8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!drm_vma) {
result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
goto err_out;
}
/* The powervr kernel mode driver returns a page aligned size when
* allocating buffers.
*/
alignment = MAX2(alignment, heap->page_size);
size = ALIGN_POT(size, alignment);
/* TODO: Should we keep track of the allocations in the carveout? */
drm_vma->base.dev_addr = carveout_dev_addr;
drm_vma->base.heap = heap;
drm_vma->base.size = size;
p_atomic_inc(&heap->ref_count);
*vma_out = &drm_vma->base;
return VK_SUCCESS;
err_out:
return result;
}
VkResult pvr_drm_winsys_heap_alloc(struct pvr_winsys_heap *heap,
uint64_t size,
uint64_t alignment,
struct pvr_winsys_vma **const vma_out)
{
const struct pvr_drm_winsys *const drm_ws = to_pvr_drm_winsys(heap->ws);
struct pvr_drm_winsys_vma *drm_vma;
VkResult result;
drm_vma = vk_alloc(drm_ws->base.alloc,
sizeof(*drm_vma),
8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!drm_vma) {
result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
goto err_out;
}
result = pvr_winsys_helper_heap_alloc(heap, size, alignment, &drm_vma->base);
if (result != VK_SUCCESS)
goto err_free_vma;
*vma_out = &drm_vma->base;
return VK_SUCCESS;
err_free_vma:
vk_free(drm_ws->base.alloc, drm_vma);
err_out:
return result;
}
void pvr_drm_winsys_heap_free(struct pvr_winsys_vma *vma)
{
struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(vma->heap->ws);
struct pvr_drm_winsys_vma *drm_vma = to_pvr_drm_winsys_vma(vma);
const uint64_t carveout_addr = vma->heap->static_data_carveout_addr.addr;
/* A vma with an existing device mapping should not be freed. */
assert(!drm_vma->base.bo);
/* Check if we are dealing with carveout address range. */
if (vma->dev_addr.addr >= carveout_addr &&
vma->dev_addr.addr <
(carveout_addr + vma->heap->static_data_carveout_size)) {
/* For the carveout addresses just decrement the reference count. */
p_atomic_dec(&vma->heap->ref_count);
} else {
/* Free allocated virtual space. */
pvr_winsys_helper_heap_free(vma);
}
vk_free(drm_ws->base.alloc, drm_vma);
}
VkResult pvr_drm_winsys_vma_map(struct pvr_winsys_vma *vma,
struct pvr_winsys_bo *bo,
uint64_t offset,
uint64_t size,
pvr_dev_addr_t *const dev_addr_out)
{
struct pvr_drm_winsys_bo *const drm_bo = to_pvr_drm_winsys_bo(bo);
struct pvr_drm_winsys *const drm_ws = to_pvr_drm_winsys(bo->ws);
const uint32_t virt_offset = offset & (vma->heap->page_size - 1);
const uint64_t aligned_virt_size =
ALIGN_POT(virt_offset + size, vma->heap->page_size);
const uint32_t phys_page_offset = offset - virt_offset;
struct drm_pvr_ioctl_vm_map_args args = { .device_addr = vma->dev_addr.addr,
.flags = 0U,
.handle = drm_bo->handle,
.offset = phys_page_offset,
.size = aligned_virt_size,
.vm_context_handle =
drm_ws->vm_context };
VkResult result;
/* Address should not be mapped already. */
assert(!vma->bo);
/* Check if bo and vma can accommodate the given size and offset. */
if (ALIGN_POT(offset + size, vma->heap->page_size) > bo->size ||
aligned_virt_size > vma->size) {
return vk_error(NULL, VK_ERROR_MEMORY_MAP_FAILED);
}
result = pvr_ioctl(drm_ws->base.render_fd,
DRM_IOCTL_PVR_VM_MAP,
&args,
VK_ERROR_MEMORY_MAP_FAILED);
if (result != VK_SUCCESS)
return result;
pvr_drm_buffer_acquire(drm_bo);
vma->bo = &drm_bo->base;
vma->bo_offset = offset;
vma->mapped_size = aligned_virt_size;
if (dev_addr_out)
*dev_addr_out = PVR_DEV_ADDR_OFFSET(vma->dev_addr, virt_offset);
return VK_SUCCESS;
}
void pvr_drm_winsys_vma_unmap(struct pvr_winsys_vma *vma)
{
struct pvr_drm_winsys_bo *const drm_bo = to_pvr_drm_winsys_bo(vma->bo);
struct pvr_drm_winsys *const drm_ws = to_pvr_drm_winsys(vma->bo->ws);
struct drm_pvr_ioctl_vm_unmap_args args = {
.vm_context_handle = drm_ws->vm_context,
.device_addr = vma->dev_addr.addr,
.size = vma->mapped_size,
};
/* Address should be mapped. */
assert(vma->bo);
pvr_ioctlf(drm_ws->base.render_fd,
DRM_IOCTL_PVR_VM_UNMAP,
&args,
VK_ERROR_UNKNOWN,
"Unmap failed");
pvr_drm_buffer_release(drm_bo);
vma->bo = NULL;
}

View file

@ -0,0 +1,100 @@
/*
* Copyright © 2022 Imagination Technologies Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef PVR_DRM_BO_H
#define PVR_DRM_BO_H
#include <stdbool.h>
#include <stdint.h>
#include <vulkan/vulkan.h>
#include "pvr_drm.h"
#include "pvr_winsys.h"
#include "util/macros.h"
/*******************************************
struct definitions
*******************************************/
struct pvr_drm_winsys_bo {
struct pvr_winsys_bo base;
uint32_t ref_count;
uint32_t handle;
uint64_t flags;
};
struct pvr_drm_winsys_vma {
struct pvr_winsys_vma base;
};
/*******************************************
function prototypes
*******************************************/
VkResult pvr_drm_winsys_buffer_create(struct pvr_winsys *ws,
uint64_t size,
uint64_t alignment,
enum pvr_winsys_bo_type type,
uint32_t ws_flags,
struct pvr_winsys_bo **const bo_out);
VkResult
pvr_drm_winsys_buffer_create_from_fd(struct pvr_winsys *ws,
int fd,
struct pvr_winsys_bo **const bo_out);
void pvr_drm_winsys_buffer_destroy(struct pvr_winsys_bo *bo);
VkResult pvr_drm_winsys_buffer_get_fd(struct pvr_winsys_bo *bo,
int *const fd_out);
VkResult pvr_drm_winsys_buffer_map(struct pvr_winsys_bo *bo);
void pvr_drm_winsys_buffer_unmap(struct pvr_winsys_bo *bo);
VkResult pvr_drm_heap_alloc_carveout(struct pvr_winsys_heap *const heap,
const pvr_dev_addr_t carveout_dev_addr,
uint64_t size,
uint64_t alignment,
struct pvr_winsys_vma **vma_out);
VkResult pvr_drm_winsys_heap_alloc(struct pvr_winsys_heap *heap,
uint64_t size,
uint64_t alignment,
struct pvr_winsys_vma **vma_out);
void pvr_drm_winsys_heap_free(struct pvr_winsys_vma *vma);
VkResult pvr_drm_winsys_vma_map(struct pvr_winsys_vma *vma,
struct pvr_winsys_bo *bo,
uint64_t offset,
uint64_t size,
pvr_dev_addr_t *dev_addr_out);
void pvr_drm_winsys_vma_unmap(struct pvr_winsys_vma *vma);
/*******************************************
helper macros
*******************************************/
#define to_pvr_drm_winsys_bo(bo) \
container_of((bo), struct pvr_drm_winsys_bo, base)
#define to_pvr_drm_winsys_vma(vma) \
container_of((vma), struct pvr_drm_winsys_vma, base)
#endif /* PVR_DRM_BO_H */

View file

@ -0,0 +1,46 @@
/*
* Copyright © 2022 Imagination Technologies Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef PVR_DRM_JOB_COMMON_H
#define PVR_DRM_JOB_COMMON_H
#include "drm-uapi/pvr_drm.h"
#include "pvr_winsys.h"
#include "util/macros.h"
static enum drm_pvr_ctx_priority
pvr_drm_from_winsys_priority(enum pvr_winsys_ctx_priority priority)
{
switch (priority) {
case PVR_WINSYS_CTX_PRIORITY_HIGH:
return DRM_PVR_CTX_PRIORITY_HIGH;
case PVR_WINSYS_CTX_PRIORITY_MEDIUM:
return DRM_PVR_CTX_PRIORITY_NORMAL;
case PVR_WINSYS_CTX_PRIORITY_LOW:
return DRM_PVR_CTX_PRIORITY_LOW;
default:
unreachable("Invalid winsys context priority.");
}
}
#endif /* PVR_DRM_JOB_COMMON_H */

View file

@ -0,0 +1,220 @@
/*
* Copyright © 2022 Imagination Technologies Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <errno.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <vulkan/vulkan.h>
#include <xf86drm.h>
#include "drm-uapi/pvr_drm.h"
#include "pvr_drm.h"
#include "pvr_drm_job_common.h"
#include "pvr_drm_job_compute.h"
#include "pvr_private.h"
#include "pvr_winsys.h"
#include "pvr_winsys_helper.h"
#include "util/macros.h"
#include "vk_alloc.h"
#include "vk_drm_syncobj.h"
#include "vk_log.h"
struct pvr_drm_winsys_compute_ctx {
struct pvr_winsys_compute_ctx base;
/* Handle to kernel context. */
uint32_t handle;
};
#define to_pvr_drm_winsys_compute_ctx(ctx) \
container_of(ctx, struct pvr_drm_winsys_compute_ctx, base)
static void pvr_drm_compute_ctx_static_state_init(
const struct pvr_winsys_compute_ctx_create_info *create_info,
uint8_t *stream_ptr_start,
uint32_t *stream_len_ptr)
{
const struct pvr_winsys_compute_ctx_static_state *ws_static_state =
&create_info->static_state;
uint64_t *stream_ptr = (uint64_t *)stream_ptr_start;
/* Leave space for stream header. */
stream_ptr += pvr_cmd_length(KMD_STREAM_HDR) / 2;
*stream_ptr++ = ws_static_state->cdm_ctx_store_pds0;
*stream_ptr++ = ws_static_state->cdm_ctx_store_pds1;
*stream_ptr++ = ws_static_state->cdm_ctx_terminate_pds;
*stream_ptr++ = ws_static_state->cdm_ctx_terminate_pds1;
*stream_ptr++ = ws_static_state->cdm_ctx_resume_pds0;
*stream_ptr++ = ws_static_state->cdm_ctx_store_pds0_b;
*stream_ptr++ = ws_static_state->cdm_ctx_resume_pds0_b;
*stream_len_ptr = ((uint8_t *)stream_ptr - stream_ptr_start);
pvr_csb_pack ((uint64_t *)stream_ptr_start, KMD_STREAM_HDR, value) {
value.length = *stream_len_ptr;
}
}
VkResult pvr_drm_winsys_compute_ctx_create(
struct pvr_winsys *ws,
const struct pvr_winsys_compute_ctx_create_info *create_info,
struct pvr_winsys_compute_ctx **const ctx_out)
{
uint8_t static_ctx_state_fw_stream[64];
struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ws);
struct drm_pvr_ioctl_create_context_args ctx_args = {
.type = DRM_PVR_CTX_TYPE_COMPUTE,
.priority = pvr_drm_from_winsys_priority(create_info->priority),
.static_context_state = (__u64)static_ctx_state_fw_stream,
.static_context_state_len = (__u32)sizeof(static_ctx_state_fw_stream),
.vm_context_handle = drm_ws->vm_context,
};
struct pvr_drm_winsys_compute_ctx *drm_ctx;
VkResult result;
drm_ctx = vk_alloc(ws->alloc,
sizeof(*drm_ctx),
8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!drm_ctx) {
result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
goto err_out;
}
pvr_drm_compute_ctx_static_state_init(create_info,
static_ctx_state_fw_stream,
&ctx_args.static_context_state_len);
result = pvr_ioctlf(ws->render_fd,
DRM_IOCTL_PVR_CREATE_CONTEXT,
&ctx_args,
VK_ERROR_INITIALIZATION_FAILED,
"Failed to create compute context");
if (result != VK_SUCCESS)
goto err_free_ctx;
drm_ctx->base.ws = ws;
drm_ctx->handle = ctx_args.handle;
*ctx_out = &drm_ctx->base;
return VK_SUCCESS;
err_free_ctx:
vk_free(ws->alloc, drm_ctx);
err_out:
return result;
}
void pvr_drm_winsys_compute_ctx_destroy(struct pvr_winsys_compute_ctx *ctx)
{
struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ctx->ws);
struct pvr_drm_winsys_compute_ctx *drm_ctx =
to_pvr_drm_winsys_compute_ctx(ctx);
struct drm_pvr_ioctl_destroy_context_args args = {
.handle = drm_ctx->handle,
};
pvr_ioctlf(drm_ws->base.render_fd,
DRM_IOCTL_PVR_DESTROY_CONTEXT,
&args,
VK_ERROR_UNKNOWN,
"Error destroying compute context");
vk_free(drm_ws->base.alloc, drm_ctx);
}
static uint32_t pvr_winsys_compute_flags_to_drm(
const struct pvr_winsys_compute_submit_flags *const ws_flags)
{
uint32_t flags = 0U;
if (ws_flags->prevent_all_overlap)
flags |= DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP;
if (ws_flags->use_single_core)
flags |= DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE;
return flags;
}
VkResult pvr_drm_winsys_compute_submit(
const struct pvr_winsys_compute_ctx *ctx,
const struct pvr_winsys_compute_submit_info *submit_info,
UNUSED const struct pvr_device_info *dev_info,
struct vk_sync *signal_sync)
{
const struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ctx->ws);
const struct pvr_drm_winsys_compute_ctx *drm_ctx =
to_pvr_drm_winsys_compute_ctx(ctx);
struct drm_pvr_sync_op sync_ops[2];
struct drm_pvr_job job_args = {
.type = DRM_PVR_JOB_TYPE_COMPUTE,
.context_handle = drm_ctx->handle,
.cmd_stream = (__u64)&submit_info->fw_stream[0],
.cmd_stream_len = submit_info->fw_stream_len,
/* bo_handles is unused and zeroed. */
/* num_bo_handles is unused and zeroed. */
.flags = pvr_winsys_compute_flags_to_drm(&submit_info->flags),
.sync_ops = DRM_PVR_OBJ_ARRAY(0, sync_ops),
};
struct drm_pvr_ioctl_submit_jobs_args args = {
.jobs = DRM_PVR_OBJ_ARRAY(1, &job_args),
};
if (submit_info->wait) {
struct vk_sync *sync = submit_info->wait;
assert(!(sync->flags & VK_SYNC_IS_TIMELINE));
sync_ops[job_args.sync_ops.count++] = (struct drm_pvr_sync_op){
.handle = vk_sync_as_drm_syncobj(sync)->syncobj,
.flags = DRM_PVR_SYNC_OP_FLAG_WAIT |
DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ,
.value = 0,
};
}
if (signal_sync) {
assert(!(signal_sync->flags & VK_SYNC_IS_TIMELINE));
sync_ops[job_args.sync_ops.count++] = (struct drm_pvr_sync_op){
.handle = vk_sync_as_drm_syncobj(signal_sync)->syncobj,
.flags = DRM_PVR_SYNC_OP_FLAG_SIGNAL |
DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ,
.value = 0,
};
}
/* Returns VK_ERROR_OUT_OF_DEVICE_MEMORY to match pvrsrv. */
return pvr_ioctlf(drm_ws->base.render_fd,
DRM_IOCTL_PVR_SUBMIT_JOBS,
&args,
VK_ERROR_OUT_OF_DEVICE_MEMORY,
"Failed to submit compute job");
}

View file

@ -0,0 +1,51 @@
/*
* Copyright © 2022 Imagination Technologies Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef PVR_DRM_JOB_COMPUTE_H
#define PVR_DRM_JOB_COMPUTE_H
#include <vulkan/vulkan.h>
struct pvr_winsys;
struct pvr_winsys_compute_ctx;
struct pvr_winsys_compute_ctx_create_info;
struct pvr_winsys_compute_submit_info;
struct vk_sync;
/*******************************************
Function prototypes
*******************************************/
VkResult pvr_drm_winsys_compute_ctx_create(
struct pvr_winsys *ws,
const struct pvr_winsys_compute_ctx_create_info *create_info,
struct pvr_winsys_compute_ctx **const ctx_out);
void pvr_drm_winsys_compute_ctx_destroy(struct pvr_winsys_compute_ctx *ctx);
VkResult pvr_drm_winsys_compute_submit(
const struct pvr_winsys_compute_ctx *ctx,
const struct pvr_winsys_compute_submit_info *submit_info,
const struct pvr_device_info *dev_info,
struct vk_sync *signal_sync);
#endif /* PVR_DRM_JOB_COMPUTE_H */

View file

@ -0,0 +1,125 @@
/*
* Copyright © 2022 Imagination Technologies Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
#include <vulkan/vulkan.h>
#include <xf86drm.h>
#include "drm-uapi/pvr_drm.h"
#include "pvr_drm.h"
#include "pvr_drm_job_null.h"
#include "pvr_winsys.h"
#include "util/libsync.h"
#include "vk_alloc.h"
#include "vk_drm_syncobj.h"
#include "vk_log.h"
#include "vk_sync.h"
#include "vk_util.h"
VkResult pvr_drm_winsys_null_job_submit(struct pvr_winsys *ws,
struct vk_sync_wait *waits,
uint32_t wait_count,
struct vk_sync_signal *signal_sync)
{
const struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ws);
uint32_t tmp_syncobj;
VkResult result;
int ret;
if (wait_count == 1) {
struct vk_sync *src_sync = waits[0].sync;
struct vk_sync *dst_sync = signal_sync->sync;
ret = drmSyncobjTransfer(drm_ws->base.render_fd,
vk_sync_as_drm_syncobj(dst_sync)->syncobj,
signal_sync->signal_value,
vk_sync_as_drm_syncobj(src_sync)->syncobj,
waits[0].wait_value,
0);
if (ret) {
return vk_errorf(NULL,
VK_ERROR_OUT_OF_DEVICE_MEMORY,
"Failed to submit transfer syncobj. Errno: %d - %s.",
errno,
strerror(errno));
}
return VK_SUCCESS;
}
ret = drmSyncobjCreate(drm_ws->base.render_fd,
wait_count == 0 ? DRM_SYNCOBJ_CREATE_SIGNALED : 0,
&tmp_syncobj);
if (ret) {
return vk_errorf(NULL,
VK_ERROR_OUT_OF_DEVICE_MEMORY,
"Failed to create temporary syncobj. Errno: %d - %s.",
errno,
strerror(errno));
}
for (uint32_t i = 0; i < wait_count; i++) {
struct vk_sync *src_sync = waits[i].sync;
if (!src_sync)
continue;
ret = drmSyncobjTransfer(drm_ws->base.render_fd,
tmp_syncobj,
i + 1,
vk_sync_as_drm_syncobj(src_sync)->syncobj,
waits[i].wait_value,
0);
if (ret) {
result =
vk_errorf(NULL,
VK_ERROR_OUT_OF_DEVICE_MEMORY,
"Failed to create temporary syncobj. Errno: %d - %s.",
errno,
strerror(errno));
goto out_destroy_tmp_syncobj;
}
}
ret = drmSyncobjTransfer(drm_ws->base.render_fd,
vk_sync_as_drm_syncobj(signal_sync->sync)->syncobj,
signal_sync->signal_value,
tmp_syncobj,
wait_count,
0);
if (ret) {
result = vk_errorf(NULL,
VK_ERROR_OUT_OF_DEVICE_MEMORY,
"Syncobj transfer failed. Errno: %d - %s.",
errno,
strerror(errno));
} else {
result = VK_SUCCESS;
}
out_destroy_tmp_syncobj:
drmSyncobjDestroy(drm_ws->base.render_fd, tmp_syncobj);
return result;
}

View file

@ -0,0 +1,39 @@
/*
* Copyright © 2022 Imagination Technologies Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef PVR_DRM_JOB_NULL_H
#define PVR_DRM_JOB_NULL_H
#include <stdint.h>
#include <vulkan/vulkan.h>
struct pvr_winsys;
struct vk_sync_signal;
struct vk_sync_wait;
VkResult pvr_drm_winsys_null_job_submit(struct pvr_winsys *ws,
struct vk_sync_wait *waits,
uint32_t wait_count,
struct vk_sync_signal *signal_sync);
#endif /* PVR_DRM_JOB_NULL_H */

View file

@ -0,0 +1,624 @@
/*
* Copyright © 2022 Imagination Technologies Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <errno.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <vulkan/vulkan.h>
#include <xf86drm.h>
#include "drm-uapi/pvr_drm.h"
#include "pvr_drm.h"
#include "pvr_drm_bo.h"
#include "pvr_drm_job_common.h"
#include "pvr_drm_job_render.h"
#include "pvr_private.h"
#include "pvr_winsys.h"
#include "pvr_winsys_helper.h"
#include "util/macros.h"
#include "vk_alloc.h"
#include "vk_drm_syncobj.h"
#include "vk_log.h"
#include "vk_util.h"
#include "vk_sync.h"
#define PVR_DRM_FREE_LIST_LOCAL 0U
#define PVR_DRM_FREE_LIST_GLOBAL 1U
#define PVR_DRM_FREE_LIST_MAX 2U
struct pvr_drm_winsys_free_list {
struct pvr_winsys_free_list base;
uint32_t handle;
struct pvr_drm_winsys_free_list *parent;
};
#define to_pvr_drm_winsys_free_list(free_list) \
container_of(free_list, struct pvr_drm_winsys_free_list, base)
struct pvr_drm_winsys_rt_dataset {
struct pvr_winsys_rt_dataset base;
uint32_t handle;
};
#define to_pvr_drm_winsys_rt_dataset(rt_dataset) \
container_of(rt_dataset, struct pvr_drm_winsys_rt_dataset, base)
VkResult pvr_drm_winsys_free_list_create(
struct pvr_winsys *const ws,
struct pvr_winsys_vma *const free_list_vma,
uint32_t initial_num_pages,
uint32_t max_num_pages,
uint32_t grow_num_pages,
uint32_t grow_threshold,
struct pvr_winsys_free_list *const parent_free_list,
struct pvr_winsys_free_list **const free_list_out)
{
struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ws);
struct drm_pvr_ioctl_create_free_list_args free_list_args = {
.free_list_gpu_addr = free_list_vma->dev_addr.addr,
.initial_num_pages = initial_num_pages,
.max_num_pages = max_num_pages,
.grow_num_pages = grow_num_pages,
.grow_threshold = grow_threshold,
.vm_context_handle = drm_ws->vm_context,
};
struct pvr_drm_winsys_free_list *drm_free_list;
VkResult result;
drm_free_list = vk_zalloc(ws->alloc,
sizeof(*drm_free_list),
8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!drm_free_list) {
result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
goto err_out;
}
drm_free_list->base.ws = ws;
if (parent_free_list)
drm_free_list->parent = to_pvr_drm_winsys_free_list(parent_free_list);
/* Returns VK_ERROR_INITIALIZATION_FAILED to match pvrsrv. */
result = pvr_ioctlf(ws->render_fd,
DRM_IOCTL_PVR_CREATE_FREE_LIST,
&free_list_args,
VK_ERROR_INITIALIZATION_FAILED,
"Failed to create free list");
if (result != VK_SUCCESS)
goto err_free_free_list;
drm_free_list->handle = free_list_args.handle;
*free_list_out = &drm_free_list->base;
return VK_SUCCESS;
err_free_free_list:
vk_free(ws->alloc, drm_free_list);
err_out:
return result;
}
void pvr_drm_winsys_free_list_destroy(struct pvr_winsys_free_list *free_list)
{
struct pvr_drm_winsys_free_list *const drm_free_list =
to_pvr_drm_winsys_free_list(free_list);
struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(free_list->ws);
struct drm_pvr_ioctl_destroy_free_list_args args = {
.handle = drm_free_list->handle,
};
pvr_ioctlf(drm_ws->base.render_fd,
DRM_IOCTL_PVR_DESTROY_FREE_LIST,
&args,
VK_ERROR_UNKNOWN,
"Error destroying free list");
vk_free(drm_ws->base.alloc, free_list);
}
static void pvr_drm_render_ctx_static_state_init(
struct pvr_winsys_render_ctx_create_info *create_info,
uint8_t *stream_ptr_start,
uint32_t *stream_len_ptr)
{
struct pvr_winsys_render_ctx_static_state *ws_static_state =
&create_info->static_state;
uint64_t *stream_ptr = (uint64_t *)stream_ptr_start;
/* Leave space for stream header. */
stream_ptr += pvr_cmd_length(KMD_STREAM_HDR) / 2;
*stream_ptr++ = ws_static_state->vdm_ctx_state_base_addr;
/* geom_reg_vdm_context_state_resume_addr is unused and zeroed. */
*stream_ptr++ = 0;
*stream_ptr++ = ws_static_state->geom_ctx_state_base_addr;
for (uint32_t i = 0; i < ARRAY_SIZE(ws_static_state->geom_state); i++) {
*stream_ptr++ = ws_static_state->geom_state[i].vdm_ctx_store_task0;
*stream_ptr++ = ws_static_state->geom_state[i].vdm_ctx_store_task1;
*stream_ptr++ = ws_static_state->geom_state[i].vdm_ctx_store_task2;
/* {store, resume}_task{3, 4} are unused and zeroed. */
*stream_ptr++ = 0;
*stream_ptr++ = 0;
*stream_ptr++ = ws_static_state->geom_state[i].vdm_ctx_resume_task0;
*stream_ptr++ = ws_static_state->geom_state[i].vdm_ctx_resume_task1;
*stream_ptr++ = ws_static_state->geom_state[i].vdm_ctx_resume_task2;
/* {store, resume}_task{3, 4} are unused and zeroed. */
*stream_ptr++ = 0;
*stream_ptr++ = 0;
}
*stream_len_ptr = ((uint8_t *)stream_ptr - stream_ptr_start);
pvr_csb_pack ((uint64_t *)stream_ptr_start, KMD_STREAM_HDR, value) {
value.length = *stream_len_ptr;
}
}
struct pvr_drm_winsys_render_ctx {
struct pvr_winsys_render_ctx base;
/* Handle to kernel context. */
uint32_t handle;
uint32_t geom_to_pr_syncobj;
};
#define to_pvr_drm_winsys_render_ctx(ctx) \
container_of(ctx, struct pvr_drm_winsys_render_ctx, base)
VkResult pvr_drm_winsys_render_ctx_create(
struct pvr_winsys *ws,
struct pvr_winsys_render_ctx_create_info *create_info,
struct pvr_winsys_render_ctx **const ctx_out)
{
uint8_t static_ctx_state_fw_stream[192];
struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ws);
struct drm_pvr_ioctl_create_context_args ctx_args = {
.type = DRM_PVR_CTX_TYPE_RENDER,
.priority = pvr_drm_from_winsys_priority(create_info->priority),
.static_context_state = (uint64_t)&static_ctx_state_fw_stream,
.callstack_addr = create_info->vdm_callstack_addr.addr,
.vm_context_handle = drm_ws->vm_context,
};
struct pvr_drm_winsys_render_ctx *drm_ctx;
uint32_t geom_to_pr_syncobj;
VkResult result;
int ret;
drm_ctx = vk_alloc(ws->alloc,
sizeof(*drm_ctx),
8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!drm_ctx) {
result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
goto err_out;
}
ret = drmSyncobjCreate(ws->render_fd, 0, &geom_to_pr_syncobj);
if (ret < 0) {
result = vk_errorf(NULL,
VK_ERROR_OUT_OF_HOST_MEMORY,
"DRM_IOCTL_SYNCOBJ_CREATE failed: %s",
strerror(errno));
goto err_free_ctx;
}
pvr_drm_render_ctx_static_state_init(create_info,
static_ctx_state_fw_stream,
&ctx_args.static_context_state_len);
result = pvr_ioctlf(ws->render_fd,
DRM_IOCTL_PVR_CREATE_CONTEXT,
&ctx_args,
VK_ERROR_INITIALIZATION_FAILED,
"Failed to create render context");
if (result != VK_SUCCESS)
goto err_destroy_syncobj;
*drm_ctx = (struct pvr_drm_winsys_render_ctx) {
.base = {
.ws = ws,
},
.handle = ctx_args.handle,
.geom_to_pr_syncobj = geom_to_pr_syncobj,
};
*ctx_out = &drm_ctx->base;
return VK_SUCCESS;
err_destroy_syncobj:
ret = drmSyncobjDestroy(ws->render_fd, geom_to_pr_syncobj);
if (ret < 0) {
mesa_loge("DRM_IOCTL_SYNCOBJ_DESTROY failed: %s - leaking it",
strerror(errno));
}
err_free_ctx:
vk_free(ws->alloc, drm_ctx);
err_out:
return result;
}
void pvr_drm_winsys_render_ctx_destroy(struct pvr_winsys_render_ctx *ctx)
{
struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ctx->ws);
struct pvr_drm_winsys_render_ctx *drm_ctx =
to_pvr_drm_winsys_render_ctx(ctx);
struct drm_pvr_ioctl_destroy_context_args args = {
.handle = drm_ctx->handle,
};
int ret;
ret = drmSyncobjDestroy(ctx->ws->render_fd, drm_ctx->geom_to_pr_syncobj);
if (ret < 0) {
mesa_loge("DRM_IOCTL_SYNCOBJ_DESTROY failed: %s - leaking it",
strerror(errno));
}
pvr_ioctlf(drm_ws->base.render_fd,
DRM_IOCTL_PVR_DESTROY_CONTEXT,
&args,
VK_ERROR_UNKNOWN,
"Error destroying render context");
vk_free(drm_ws->base.alloc, drm_ctx);
}
VkResult pvr_drm_render_target_dataset_create(
struct pvr_winsys *const ws,
const struct pvr_winsys_rt_dataset_create_info *const create_info,
UNUSED const struct pvr_device_info *dev_info,
struct pvr_winsys_rt_dataset **const rt_dataset_out)
{
struct pvr_drm_winsys_free_list *drm_free_list =
to_pvr_drm_winsys_free_list(create_info->local_free_list);
/* 0 is just a placeholder. It doesn't indicate an invalid handle. */
uint32_t parent_free_list_handle =
drm_free_list->parent ? drm_free_list->parent->handle : 0;
struct drm_pvr_ioctl_create_hwrt_dataset_args args = {
.geom_data_args = {
.tpc_dev_addr = create_info->tpc_dev_addr.addr,
.tpc_size = create_info->tpc_size,
.tpc_stride = create_info->tpc_stride,
.vheap_table_dev_addr = create_info->vheap_table_dev_addr.addr,
.rtc_dev_addr = create_info->rtc_dev_addr.addr,
},
.rt_data_args = {
[0] = {
.pm_mlist_dev_addr =
create_info->rt_datas[0].pm_mlist_dev_addr.addr,
.macrotile_array_dev_addr =
create_info->rt_datas[0].macrotile_array_dev_addr.addr,
.region_header_dev_addr =
create_info->rt_datas[0].rgn_header_dev_addr.addr,
},
[1] = {
.pm_mlist_dev_addr =
create_info->rt_datas[1].pm_mlist_dev_addr.addr,
.macrotile_array_dev_addr =
create_info->rt_datas[1].macrotile_array_dev_addr.addr,
.region_header_dev_addr =
create_info->rt_datas[1].rgn_header_dev_addr.addr,
},
},
.free_list_handles = {
[PVR_DRM_FREE_LIST_LOCAL] = drm_free_list->handle,
[PVR_DRM_FREE_LIST_GLOBAL] = parent_free_list_handle,
},
.width = create_info->width,
.height = create_info->height,
.samples = create_info->samples,
.layers = create_info->layers,
.isp_merge_lower_x = create_info->isp_merge_lower_x,
.isp_merge_lower_y = create_info->isp_merge_lower_y,
.isp_merge_scale_x = create_info->isp_merge_scale_x,
.isp_merge_scale_y = create_info->isp_merge_scale_y,
.isp_merge_upper_x = create_info->isp_merge_upper_x,
.isp_merge_upper_y = create_info->isp_merge_upper_y,
.region_header_size = create_info->rgn_header_size,
};
struct pvr_drm_winsys_rt_dataset *drm_rt_dataset;
VkResult result;
STATIC_ASSERT(ARRAY_SIZE(args.rt_data_args) ==
ARRAY_SIZE(create_info->rt_datas));
drm_rt_dataset = vk_zalloc(ws->alloc,
sizeof(*drm_rt_dataset),
8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!drm_rt_dataset) {
result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
goto err_out;
}
/* Returns VK_ERROR_INITIALIZATION_FAILED to match pvrsrv. */
result = pvr_ioctlf(ws->render_fd,
DRM_IOCTL_PVR_CREATE_HWRT_DATASET,
&args,
VK_ERROR_INITIALIZATION_FAILED,
"Failed to create render target dataset");
if (result != VK_SUCCESS)
goto err_free_dataset;
drm_rt_dataset->handle = args.handle;
drm_rt_dataset->base.ws = ws;
*rt_dataset_out = &drm_rt_dataset->base;
return VK_SUCCESS;
err_free_dataset:
vk_free(ws->alloc, drm_rt_dataset);
err_out:
return result;
}
void pvr_drm_render_target_dataset_destroy(
struct pvr_winsys_rt_dataset *const rt_dataset)
{
struct pvr_drm_winsys_rt_dataset *const drm_rt_dataset =
to_pvr_drm_winsys_rt_dataset(rt_dataset);
struct pvr_drm_winsys *const drm_ws = to_pvr_drm_winsys(rt_dataset->ws);
struct drm_pvr_ioctl_destroy_hwrt_dataset_args args = {
.handle = drm_rt_dataset->handle,
};
pvr_ioctlf(drm_ws->base.render_fd,
DRM_IOCTL_PVR_DESTROY_HWRT_DATASET,
&args,
VK_ERROR_UNKNOWN,
"Error destroying render target dataset");
vk_free(drm_ws->base.alloc, drm_rt_dataset);
}
static uint32_t pvr_winsys_geom_flags_to_drm(
const struct pvr_winsys_geometry_state_flags *const ws_flags)
{
uint32_t flags = 0U;
if (ws_flags->is_first_geometry)
flags |= DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST;
if (ws_flags->is_last_geometry)
flags |= DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST;
if (ws_flags->use_single_core)
flags |= DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE;
return flags;
}
static uint32_t pvr_winsys_frag_flags_to_drm(
const struct pvr_winsys_fragment_state_flags *const ws_flags)
{
uint32_t flags = 0U;
if (ws_flags->use_single_core)
flags |= DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE;
if (ws_flags->has_depth_buffer)
flags |= DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER;
if (ws_flags->has_stencil_buffer)
flags |= DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER;
if (ws_flags->prevent_cdm_overlap)
flags |= DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP;
if (ws_flags->get_vis_results)
flags |= DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS;
if (ws_flags->has_spm_scratch_buffer)
flags |= DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER;
return flags;
}
VkResult pvr_drm_winsys_render_submit(
const struct pvr_winsys_render_ctx *ctx,
const struct pvr_winsys_render_submit_info *submit_info,
UNUSED const struct pvr_device_info *dev_info,
struct vk_sync *signal_sync_geom,
struct vk_sync *signal_sync_frag)
{
const struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ctx->ws);
const struct pvr_drm_winsys_render_ctx *drm_ctx =
to_pvr_drm_winsys_render_ctx(ctx);
const struct pvr_winsys_geometry_state *const geom_state =
&submit_info->geometry;
const struct pvr_winsys_fragment_state *const frag_state =
&submit_info->fragment;
const struct pvr_winsys_fragment_state *const pr_state =
&submit_info->fragment_pr;
const struct pvr_drm_winsys_rt_dataset *drm_rt_dataset =
to_pvr_drm_winsys_rt_dataset(submit_info->rt_dataset);
struct drm_pvr_sync_op geom_sync_ops[2], pr_sync_ops[1], frag_sync_ops[3];
unsigned num_geom_syncs = 0, num_pr_syncs = 0, num_frag_syncs = 0;
uint32_t geom_to_pr_syncobj;
struct drm_pvr_job jobs_args[3] = {
[0] = {
.type = DRM_PVR_JOB_TYPE_GEOMETRY,
.cmd_stream = (__u64)&geom_state->fw_stream[0],
.cmd_stream_len = geom_state->fw_stream_len,
.context_handle = drm_ctx->handle,
.flags = pvr_winsys_geom_flags_to_drm(&geom_state->flags),
.sync_ops = DRM_PVR_OBJ_ARRAY(0, geom_sync_ops),
.hwrt = {
.set_handle = drm_rt_dataset->handle,
.data_index = submit_info->rt_data_idx,
},
},
[1] = {
.type = DRM_PVR_JOB_TYPE_FRAGMENT,
.cmd_stream = (__u64)&pr_state->fw_stream[0],
.cmd_stream_len = pr_state->fw_stream_len,
.context_handle = drm_ctx->handle,
.flags = DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER |
pvr_winsys_frag_flags_to_drm(&pr_state->flags),
.sync_ops = DRM_PVR_OBJ_ARRAY(0, pr_sync_ops),
.hwrt = {
.set_handle = drm_rt_dataset->handle,
.data_index = submit_info->rt_data_idx,
},
}
};
struct drm_pvr_ioctl_submit_jobs_args args = {
.jobs = DRM_PVR_OBJ_ARRAY(2, jobs_args),
};
/* Geom syncs */
if (submit_info->geometry.wait) {
struct vk_sync *sync = submit_info->geometry.wait;
assert(!(sync->flags & VK_SYNC_IS_TIMELINE));
geom_sync_ops[num_geom_syncs++] = (struct drm_pvr_sync_op){
.handle = vk_sync_as_drm_syncobj(sync)->syncobj,
.flags = DRM_PVR_SYNC_OP_FLAG_WAIT |
DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ,
.value = 0,
};
}
if (signal_sync_geom) {
assert(!(signal_sync_geom->flags & VK_SYNC_IS_TIMELINE));
geom_sync_ops[num_geom_syncs++] = (struct drm_pvr_sync_op){
.handle = vk_sync_as_drm_syncobj(signal_sync_geom)->syncobj,
.flags = DRM_PVR_SYNC_OP_FLAG_SIGNAL |
DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ,
.value = 0,
};
}
/* PR syncs */
if (signal_sync_geom) {
assert(!(signal_sync_geom->flags & VK_SYNC_IS_TIMELINE));
geom_to_pr_syncobj = vk_sync_as_drm_syncobj(signal_sync_geom)->syncobj;
} else {
geom_to_pr_syncobj = drm_ctx->geom_to_pr_syncobj;
geom_sync_ops[num_geom_syncs++] = (struct drm_pvr_sync_op){
.handle = geom_to_pr_syncobj,
.flags = DRM_PVR_SYNC_OP_FLAG_SIGNAL |
DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ,
.value = 0,
};
}
pr_sync_ops[num_pr_syncs++] = (struct drm_pvr_sync_op){
.handle = geom_to_pr_syncobj,
.flags = DRM_PVR_SYNC_OP_FLAG_WAIT |
DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ,
.value = 0,
};
/* Frag job */
if (submit_info->has_fragment_job) {
jobs_args[args.jobs.count++] = (struct drm_pvr_job) {
.type = DRM_PVR_JOB_TYPE_FRAGMENT,
.cmd_stream = (__u64)&frag_state->fw_stream[0],
.cmd_stream_len = frag_state->fw_stream_len,
.context_handle = drm_ctx->handle,
.flags = pvr_winsys_frag_flags_to_drm(&frag_state->flags),
.sync_ops = DRM_PVR_OBJ_ARRAY(0, frag_sync_ops),
.hwrt = {
.set_handle = drm_rt_dataset->handle,
.data_index = submit_info->rt_data_idx,
},
};
/* There's no need to setup a geom -> frag dependency here, as we always
* setup a geom -> pr dependency (a PR just being a frag job) and the KMD
* respects submission order for jobs of the same type.
*
* Note that, in the case where PRs aren't needed, because we didn't run
* out of PB space during the geometry phase, the PR job will still be
* scheduled after the geometry job, but no PRs will be performed, as
* they aren't needed.
*/
if (submit_info->fragment.wait) {
struct vk_sync *sync = submit_info->fragment.wait;
assert(!(sync->flags & VK_SYNC_IS_TIMELINE));
frag_sync_ops[num_frag_syncs++] = (struct drm_pvr_sync_op){
.handle = vk_sync_as_drm_syncobj(sync)->syncobj,
.flags = DRM_PVR_SYNC_OP_FLAG_WAIT |
DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ,
.value = 0,
};
}
if (signal_sync_frag) {
assert(!(signal_sync_frag->flags & VK_SYNC_IS_TIMELINE));
frag_sync_ops[num_frag_syncs++] = (struct drm_pvr_sync_op){
.handle = vk_sync_as_drm_syncobj(signal_sync_frag)->syncobj,
.flags = DRM_PVR_SYNC_OP_FLAG_SIGNAL |
DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ,
.value = 0,
};
}
}
jobs_args[0].sync_ops.count = num_geom_syncs;
jobs_args[1].sync_ops.count = num_pr_syncs;
if (submit_info->has_fragment_job)
jobs_args[2].sync_ops.count = num_frag_syncs;
/* Returns VK_ERROR_OUT_OF_DEVICE_MEMORY to match pvrsrv. */
return pvr_ioctlf(drm_ws->base.render_fd,
DRM_IOCTL_PVR_SUBMIT_JOBS,
&args,
VK_ERROR_OUT_OF_DEVICE_MEMORY,
"Failed to submit render job");
}

View file

@ -0,0 +1,77 @@
/*
* Copyright © 2022 Imagination Technologies Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef PVR_DRM_JOB_RENDER_H
#define PVR_DRM_JOB_RENDER_H
#include <stdint.h>
#include <vulkan/vulkan.h>
struct pvr_device_info;
struct pvr_winsys;
struct pvr_winsys_free_list;
struct pvr_winsys_render_ctx;
struct pvr_winsys_render_ctx_create_info;
struct pvr_winsys_render_submit_info;
struct pvr_winsys_rt_dataset;
struct pvr_winsys_rt_dataset_create_info;
struct pvr_winsys_vma;
struct vk_sync;
/*******************************************
Function prototypes
*******************************************/
VkResult pvr_drm_winsys_free_list_create(
struct pvr_winsys *const ws,
struct pvr_winsys_vma *const free_list_vma,
uint32_t initial_num_pages,
uint32_t max_num_pages,
uint32_t grow_num_pages,
uint32_t grow_threshold,
struct pvr_winsys_free_list *const parent_free_list,
struct pvr_winsys_free_list **const free_list_out);
void pvr_drm_winsys_free_list_destroy(struct pvr_winsys_free_list *free_list);
VkResult pvr_drm_winsys_render_ctx_create(
struct pvr_winsys *ws,
struct pvr_winsys_render_ctx_create_info *create_info,
struct pvr_winsys_render_ctx **const ctx_out);
void pvr_drm_winsys_render_ctx_destroy(struct pvr_winsys_render_ctx *ctx);
VkResult pvr_drm_render_target_dataset_create(
struct pvr_winsys *const ws,
const struct pvr_winsys_rt_dataset_create_info *const create_info,
const struct pvr_device_info *dev_info,
struct pvr_winsys_rt_dataset **const rt_dataset_out);
void pvr_drm_render_target_dataset_destroy(
struct pvr_winsys_rt_dataset *const rt_dataset);
VkResult pvr_drm_winsys_render_submit(
const struct pvr_winsys_render_ctx *ctx,
const struct pvr_winsys_render_submit_info *submit_info,
const struct pvr_device_info *dev_info,
struct vk_sync *signal_sync_geom,
struct vk_sync *signal_sync_frag);
#endif /* PVR_DRM_JOB_RENDER_H */

View file

@ -0,0 +1,181 @@
/*
* Copyright © 2022 Imagination Technologies Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <errno.h>
#include <stddef.h>
#include <stdint.h>
#include <vulkan/vulkan.h>
#include <xf86drm.h>
#include "drm-uapi/pvr_drm.h"
#include "pvr_private.h"
#include "pvr_drm.h"
#include "pvr_drm_job_common.h"
#include "pvr_drm_job_transfer.h"
#include "pvr_winsys.h"
#include "pvr_winsys_helper.h"
#include "util/macros.h"
#include "vk_alloc.h"
#include "vk_drm_syncobj.h"
#include "vk_log.h"
struct pvr_drm_winsys_transfer_ctx {
struct pvr_winsys_transfer_ctx base;
uint32_t handle;
};
#define to_pvr_drm_winsys_transfer_ctx(ctx) \
container_of(ctx, struct pvr_drm_winsys_transfer_ctx, base)
VkResult pvr_drm_winsys_transfer_ctx_create(
struct pvr_winsys *ws,
const struct pvr_winsys_transfer_ctx_create_info *create_info,
struct pvr_winsys_transfer_ctx **const ctx_out)
{
struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ws);
struct drm_pvr_ioctl_create_context_args ctx_args = {
.type = DRM_PVR_CTX_TYPE_TRANSFER_FRAG,
.priority = pvr_drm_from_winsys_priority(create_info->priority),
.vm_context_handle = drm_ws->vm_context,
};
struct pvr_drm_winsys_transfer_ctx *drm_ctx;
VkResult result;
drm_ctx = vk_zalloc(ws->alloc,
sizeof(*drm_ctx),
8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!drm_ctx) {
result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
goto err_out;
}
result = pvr_ioctlf(ws->render_fd,
DRM_IOCTL_PVR_CREATE_CONTEXT,
&ctx_args,
VK_ERROR_INITIALIZATION_FAILED,
"Failed to create transfer context");
if (result)
goto err_free_ctx;
drm_ctx->base.ws = ws;
drm_ctx->handle = ctx_args.handle;
*ctx_out = &drm_ctx->base;
return VK_SUCCESS;
err_free_ctx:
vk_free(ws->alloc, drm_ctx);
err_out:
return result;
}
void pvr_drm_winsys_transfer_ctx_destroy(struct pvr_winsys_transfer_ctx *ctx)
{
struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ctx->ws);
struct pvr_drm_winsys_transfer_ctx *drm_ctx =
to_pvr_drm_winsys_transfer_ctx(ctx);
struct drm_pvr_ioctl_destroy_context_args args = {
.handle = drm_ctx->handle,
};
pvr_ioctlf(drm_ws->base.render_fd,
DRM_IOCTL_PVR_DESTROY_CONTEXT,
&args,
VK_ERROR_UNKNOWN,
"Error destroying transfer context");
vk_free(drm_ws->base.alloc, drm_ctx);
}
static uint32_t pvr_winsys_transfer_flags_to_drm(
const struct pvr_winsys_transfer_cmd_flags *ws_flags)
{
uint32_t flags = 0U;
if (ws_flags->use_single_core)
flags |= DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE;
return flags;
}
VkResult pvr_drm_winsys_transfer_submit(
const struct pvr_winsys_transfer_ctx *ctx,
const struct pvr_winsys_transfer_submit_info *submit_info,
UNUSED const struct pvr_device_info *const dev_info,
struct vk_sync *signal_sync)
{
const struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ctx->ws);
const struct pvr_drm_winsys_transfer_ctx *drm_ctx =
to_pvr_drm_winsys_transfer_ctx(ctx);
struct drm_pvr_sync_op sync_ops[2];
struct drm_pvr_job job_args = {
.type = DRM_PVR_JOB_TYPE_TRANSFER_FRAG,
.cmd_stream = (__u64)&submit_info->cmds[0].fw_stream[0],
.cmd_stream_len = submit_info->cmds[0].fw_stream_len,
.flags = pvr_winsys_transfer_flags_to_drm(&submit_info->cmds[0].flags),
.context_handle = drm_ctx->handle,
.sync_ops = DRM_PVR_OBJ_ARRAY(0, sync_ops),
};
struct drm_pvr_ioctl_submit_jobs_args args = {
.jobs = DRM_PVR_OBJ_ARRAY(1, &job_args),
};
assert(submit_info->cmd_count == 1);
if (submit_info->wait) {
struct vk_sync *sync = submit_info->wait;
assert(!(sync->flags & VK_SYNC_IS_TIMELINE));
sync_ops[job_args.sync_ops.count++] = (struct drm_pvr_sync_op){
.handle = vk_sync_as_drm_syncobj(sync)->syncobj,
.flags = DRM_PVR_SYNC_OP_FLAG_WAIT |
DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ,
.value = 0,
};
}
if (signal_sync) {
assert(!(signal_sync->flags & VK_SYNC_IS_TIMELINE));
sync_ops[job_args.sync_ops.count++] = (struct drm_pvr_sync_op){
.handle = vk_sync_as_drm_syncobj(signal_sync)->syncobj,
.flags = DRM_PVR_SYNC_OP_FLAG_SIGNAL |
DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ,
.value = 0,
};
}
/* Returns VK_ERROR_OUT_OF_DEVICE_MEMORY to match pvrsrv. */
return pvr_ioctlf(drm_ws->base.render_fd,
DRM_IOCTL_PVR_SUBMIT_JOBS,
&args,
VK_ERROR_OUT_OF_DEVICE_MEMORY,
"Failed to submit transfer job");
}

View file

@ -0,0 +1,51 @@
/*
* Copyright © 2022 Imagination Technologies Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef PVR_DRM_JOB_TRANSFER_H
#define PVR_DRM_JOB_TRANSFER_H
#include <vulkan/vulkan.h>
struct pvr_device_info;
struct pvr_winsys;
struct pvr_winsys_transfer_ctx;
struct pvr_winsys_transfer_ctx_create_info;
struct pvr_winsys_transfer_submit_info;
struct vk_sync;
/*******************************************
Function prototypes
*******************************************/
VkResult pvr_drm_winsys_transfer_ctx_create(
struct pvr_winsys *ws,
const struct pvr_winsys_transfer_ctx_create_info *create_info,
struct pvr_winsys_transfer_ctx **const ctx_out);
void pvr_drm_winsys_transfer_ctx_destroy(struct pvr_winsys_transfer_ctx *ctx);
VkResult pvr_drm_winsys_transfer_submit(
const struct pvr_winsys_transfer_ctx *ctx,
const struct pvr_winsys_transfer_submit_info *submit_info,
const struct pvr_device_info *dev_info,
struct vk_sync *signal_sync);
#endif /* PVR_DRM_JOB_TRANSFER_H */