asahi: Drop macOS backend

This might be useful in the future, but it is best reimplemented in
terms of the upcoming Linux UAPI instead of having parallel codepaths.
Let's drop it.

Signed-off-by: Asahi Lina <lina@asahilina.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21662>
This commit is contained in:
Asahi Lina 2023-03-01 17:24:01 +09:00 committed by Marge Bot
parent b5e7a22c2e
commit afe134a49c
8 changed files with 4 additions and 888 deletions

View file

@ -127,7 +127,6 @@ agx_open_device(void *memctx, struct agx_device *dev)
for (unsigned i = 0; i < ARRAY_SIZE(dev->bo_cache.buckets); ++i) for (unsigned i = 0; i < ARRAY_SIZE(dev->bo_cache.buckets); ++i)
list_inithead(&dev->bo_cache.buckets[i]); list_inithead(&dev->bo_cache.buckets[i]);
dev->queue = agx_create_command_queue(dev);
agx_get_global_ids(dev); agx_get_global_ids(dev);
return true; return true;
@ -140,19 +139,8 @@ agx_close_device(struct agx_device *dev)
util_sparse_array_finish(&dev->bo_map); util_sparse_array_finish(&dev->bo_map);
} }
struct agx_command_queue
agx_create_command_queue(struct agx_device *dev)
{
return (struct agx_command_queue){};
}
void void
agx_submit_cmdbuf(struct agx_device *dev, unsigned cmdbuf, unsigned mappings, agx_submit_cmdbuf(struct agx_device *dev, unsigned cmdbuf, unsigned mappings,
uint64_t scalar) uint64_t scalar)
{ {
} }
void
agx_wait_queue(struct agx_command_queue queue)
{
}

View file

@ -30,12 +30,6 @@
#include "agx_formats.h" #include "agx_formats.h"
#include "agx_bo.h" #include "agx_bo.h"
#if __APPLE__
#include "agx_iokit.h"
#include <IOKit/IOKitLib.h>
#include <mach/mach.h>
#endif
enum agx_dbg { enum agx_dbg {
AGX_DBG_TRACE = BITFIELD_BIT(0), AGX_DBG_TRACE = BITFIELD_BIT(0),
AGX_DBG_DEQP = BITFIELD_BIT(1), AGX_DBG_DEQP = BITFIELD_BIT(1),
@ -55,25 +49,13 @@ enum agx_dbg {
/* Fencepost problem, hence the off-by-one */ /* Fencepost problem, hence the off-by-one */
#define NR_BO_CACHE_BUCKETS (MAX_BO_CACHE_BUCKET - MIN_BO_CACHE_BUCKET + 1) #define NR_BO_CACHE_BUCKETS (MAX_BO_CACHE_BUCKET - MIN_BO_CACHE_BUCKET + 1)
#ifndef __APPLE__
struct agx_command_queue {
};
#endif
struct agx_device { struct agx_device {
uint32_t debug; uint32_t debug;
uint64_t next_global_id, last_global_id; uint64_t next_global_id, last_global_id;
struct agx_command_queue queue;
#if __APPLE__
io_connect_t fd;
struct agx_bo cmdbuf, memmap;
#else
/* Device handle */ /* Device handle */
int fd; int fd;
#endif
struct renderonly *ro; struct renderonly *ro;
pthread_mutex_t bo_map_lock; pthread_mutex_t bo_map_lock;
@ -110,17 +92,9 @@ agx_lookup_bo(struct agx_device *dev, uint32_t handle)
return util_sparse_array_get(&dev->bo_map, handle); return util_sparse_array_get(&dev->bo_map, handle);
} }
struct agx_bo agx_shmem_alloc(struct agx_device *dev, size_t size, bool cmdbuf);
void agx_shmem_free(struct agx_device *dev, unsigned handle);
uint64_t agx_get_global_id(struct agx_device *dev); uint64_t agx_get_global_id(struct agx_device *dev);
struct agx_command_queue agx_create_command_queue(struct agx_device *dev);
void agx_submit_cmdbuf(struct agx_device *dev, unsigned cmdbuf, void agx_submit_cmdbuf(struct agx_device *dev, unsigned cmdbuf,
unsigned mappings, uint64_t scalar); unsigned mappings, uint64_t scalar);
void agx_wait_queue(struct agx_command_queue queue);
#endif #endif

View file

@ -1,384 +0,0 @@
/*
* Copyright (C) 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io>
* Copyright 2019 Collabora, Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "agx_device.h"
#include <inttypes.h>
#include "agx_bo.h"
#include "decode.h"
unsigned AGX_FAKE_HANDLE = 0;
uint64_t AGX_FAKE_LO = 0;
uint64_t AGX_FAKE_HI = (1ull << 32);
void
agx_bo_free(struct agx_device *dev, struct agx_bo *bo)
{
const uint64_t handle = bo->handle;
kern_return_t ret = IOConnectCallScalarMethod(dev->fd, AGX_SELECTOR_FREE_MEM,
&handle, 1, NULL, NULL);
if (ret)
fprintf(stderr, "error freeing BO mem: %u\n", ret);
/* Reset the handle */
memset(bo, 0, sizeof(*bo));
}
void
agx_shmem_free(struct agx_device *dev, unsigned handle)
{
const uint64_t input = handle;
kern_return_t ret = IOConnectCallScalarMethod(
dev->fd, AGX_SELECTOR_FREE_SHMEM, &input, 1, NULL, NULL);
if (ret)
fprintf(stderr, "error freeing shmem: %u\n", ret);
}
struct agx_bo
agx_shmem_alloc(struct agx_device *dev, size_t size, bool cmdbuf)
{
struct agx_bo bo;
struct agx_create_shmem_resp out = {};
size_t out_sz = sizeof(out);
uint64_t inputs[2] = {
size,
cmdbuf ? 1 : 0 // 2 - error reporting, 1 - no error reporting
};
kern_return_t ret =
IOConnectCallMethod(dev->fd, AGX_SELECTOR_CREATE_SHMEM, inputs, 2, NULL,
0, NULL, NULL, &out, &out_sz);
assert(ret == 0);
assert(out_sz == sizeof(out));
assert(out.size == size);
assert(out.map != 0);
bo = (struct agx_bo){
.type = cmdbuf ? AGX_ALLOC_CMDBUF : AGX_ALLOC_MEMMAP,
.handle = out.id,
.ptr.cpu = out.map,
.size = out.size,
.guid = 0, /* TODO? */
};
if (dev->debug & AGX_DBG_TRACE)
agxdecode_track_alloc(&bo);
return bo;
}
struct agx_bo *
agx_bo_alloc(struct agx_device *dev, size_t size, enum agx_bo_flags flags)
{
struct agx_bo *bo;
unsigned handle = 0;
/* executable implies low va */
assert(!(flags & AGX_BO_EXEC) || (flags & AGX_BO_LOW_VA));
uint32_t mode = 0x430; // shared, ?
uint32_t args_in[24] = {0};
args_in[4] = 0x4000101; // 0x1000101; // unk
args_in[5] = mode;
args_in[16] = size;
args_in[20] = flags & AGX_BO_EXEC ? AGX_MEMORY_TYPE_SHADER
: flags & AGX_BO_LOW_VA ? AGX_MEMORY_TYPE_CMDBUF_32
: AGX_MEMORY_TYPE_FRAMEBUFFER;
uint64_t out[10] = {0};
size_t out_sz = sizeof(out);
kern_return_t ret =
IOConnectCallMethod(dev->fd, AGX_SELECTOR_ALLOCATE_MEM, NULL, 0, args_in,
sizeof(args_in), NULL, 0, out, &out_sz);
assert(ret == 0);
assert(out_sz == sizeof(out));
handle = (out[3] >> 32ull);
pthread_mutex_lock(&dev->bo_map_lock);
bo = agx_lookup_bo(dev, handle);
pthread_mutex_unlock(&dev->bo_map_lock);
/* Fresh handle */
assert(!memcmp(bo, &((struct agx_bo){}), sizeof(*bo)));
bo->type = AGX_ALLOC_REGULAR;
bo->size = size;
bo->flags = flags;
bo->dev = dev;
bo->handle = handle;
ASSERTED bool lo = (flags & AGX_BO_LOW_VA);
bo->ptr.gpu = out[0];
bo->ptr.cpu = (void *)out[1];
bo->guid = out[5];
assert(bo->ptr.gpu < (1ull << (lo ? 32 : 40)));
return bo;
}
struct agx_bo *
agx_bo_import(struct agx_device *dev, int fd)
{
unreachable("Linux UAPI not yet upstream");
}
int
agx_bo_export(struct agx_bo *bo)
{
bo->flags |= AGX_BO_SHARED;
unreachable("Linux UAPI not yet upstream");
}
static void
agx_get_global_ids(struct agx_device *dev)
{
uint64_t out[2] = {};
size_t out_sz = sizeof(out);
ASSERTED kern_return_t ret = IOConnectCallStructMethod(
dev->fd, AGX_SELECTOR_GET_GLOBAL_IDS, NULL, 0, &out, &out_sz);
assert(ret == 0);
assert(out_sz == sizeof(out));
assert(out[1] > out[0]);
dev->next_global_id = out[0];
dev->last_global_id = out[1];
}
uint64_t
agx_get_global_id(struct agx_device *dev)
{
if (unlikely(dev->next_global_id >= dev->last_global_id)) {
agx_get_global_ids(dev);
}
return dev->next_global_id++;
}
/* Tries to open an AGX device, returns true if successful */
bool
agx_open_device(void *memctx, struct agx_device *dev)
{
kern_return_t ret;
/* TODO: Support other models */
CFDictionaryRef matching = IOServiceNameMatching("AGXAcceleratorG13G_B0");
io_service_t service = IOServiceGetMatchingService(0, matching);
if (!service)
return false;
ret = IOServiceOpen(service, mach_task_self(), AGX_SERVICE_TYPE, &dev->fd);
if (ret)
return false;
const char *api = "Equestria";
char in[16] = {0};
assert(strlen(api) < sizeof(in));
memcpy(in, api, strlen(api));
ret = IOConnectCallStructMethod(dev->fd, AGX_SELECTOR_SET_API, in,
sizeof(in), NULL, NULL);
/* Oddly, the return codes are flipped for SET_API */
if (ret != 1)
return false;
dev->memctx = memctx;
util_sparse_array_init(&dev->bo_map, sizeof(struct agx_bo), 512);
simple_mtx_init(&dev->bo_cache.lock, mtx_plain);
list_inithead(&dev->bo_cache.lru);
for (unsigned i = 0; i < ARRAY_SIZE(dev->bo_cache.buckets); ++i)
list_inithead(&dev->bo_cache.buckets[i]);
dev->queue = agx_create_command_queue(dev);
dev->cmdbuf = agx_shmem_alloc(dev, 0x4000,
true); // length becomes kernelCommandDataSize
dev->memmap = agx_shmem_alloc(dev, 0x10000, false);
agx_get_global_ids(dev);
return true;
}
void
agx_close_device(struct agx_device *dev)
{
agx_bo_cache_evict_all(dev);
util_sparse_array_finish(&dev->bo_map);
kern_return_t ret = IOServiceClose(dev->fd);
if (ret)
fprintf(stderr, "Error from IOServiceClose: %u\n", ret);
}
static struct agx_notification_queue
agx_create_notification_queue(mach_port_t connection)
{
struct agx_create_notification_queue_resp resp;
size_t resp_size = sizeof(resp);
assert(resp_size == 0x10);
ASSERTED kern_return_t ret = IOConnectCallStructMethod(
connection, AGX_SELECTOR_CREATE_NOTIFICATION_QUEUE, NULL, 0, &resp,
&resp_size);
assert(resp_size == sizeof(resp));
assert(ret == 0);
mach_port_t notif_port = IODataQueueAllocateNotificationPort();
IOConnectSetNotificationPort(connection, 0, notif_port, resp.unk2);
return (struct agx_notification_queue){.port = notif_port,
.queue = resp.queue,
.id = resp.unk2};
}
struct agx_command_queue
agx_create_command_queue(struct agx_device *dev)
{
struct agx_command_queue queue = {};
{
uint8_t buffer[1024 + 8] = {0};
const char *path = "/tmp/a.out";
assert(strlen(path) < 1022);
memcpy(buffer + 0, path, strlen(path));
/* Copy to the end */
unsigned END_LEN = MIN2(strlen(path), 1024 - strlen(path));
unsigned SKIP = strlen(path) - END_LEN;
unsigned OFFS = 1024 - END_LEN;
memcpy(buffer + OFFS, path + SKIP, END_LEN);
buffer[1024] = 0x2;
struct agx_create_command_queue_resp out = {};
size_t out_sz = sizeof(out);
ASSERTED kern_return_t ret =
IOConnectCallStructMethod(dev->fd, AGX_SELECTOR_CREATE_COMMAND_QUEUE,
buffer, sizeof(buffer), &out, &out_sz);
assert(ret == 0);
assert(out_sz == sizeof(out));
queue.id = out.id;
assert(queue.id);
}
queue.notif = agx_create_notification_queue(dev->fd);
{
uint64_t scalars[2] = {queue.id, queue.notif.id};
ASSERTED kern_return_t ret =
IOConnectCallScalarMethod(dev->fd, 0x1D, scalars, 2, NULL, NULL);
assert(ret == 0);
}
{
uint64_t scalars[2] = {queue.id, 0x1ffffffffull};
ASSERTED kern_return_t ret =
IOConnectCallScalarMethod(dev->fd, 0x31, scalars, 2, NULL, NULL);
assert(ret == 0);
}
return queue;
}
void
agx_submit_cmdbuf(struct agx_device *dev, unsigned cmdbuf, unsigned mappings,
uint64_t scalar)
{
struct agx_submit_cmdbuf_req req = {
.count = 1,
.command_buffer_shmem_id = cmdbuf,
.segment_list_shmem_id = mappings,
.notify_1 = 0xABCD,
.notify_2 = 0x1234,
};
ASSERTED kern_return_t ret =
IOConnectCallMethod(dev->fd, AGX_SELECTOR_SUBMIT_COMMAND_BUFFERS, &scalar,
1, &req, sizeof(req), NULL, 0, NULL, 0);
assert(ret == 0);
return;
}
/*
* Wait for a frame to finish rendering.
*
* The macOS kernel indicates that rendering has finished using a notification
* queue. The kernel will send two messages on the notification queue. The
* second message indicates that rendering has completed. This simple routine
* waits for both messages. It's important that IODataQueueDequeue is used in a
* loop to flush the entire queue before calling
* IODataQueueWaitForAvailableData. Otherwise, we can race and get stuck in
* WaitForAvailabaleData.
*/
void
agx_wait_queue(struct agx_command_queue queue)
{
uint64_t data[4];
unsigned sz = sizeof(data);
unsigned message_id = 0;
uint64_t magic_numbers[2] = {0xABCD, 0x1234};
while (message_id < 2) {
IOReturn ret =
IODataQueueWaitForAvailableData(queue.notif.queue, queue.notif.port);
if (ret) {
fprintf(stderr, "Error waiting for available data\n");
return;
}
while (IODataQueueDequeue(queue.notif.queue, data, &sz) ==
kIOReturnSuccess) {
assert(sz == sizeof(data));
assert(data[0] == magic_numbers[message_id]);
message_id++;
}
}
}

View file

@ -21,16 +21,10 @@
dep_iokit = dependency('IOKit', required : false) dep_iokit = dependency('IOKit', required : false)
if host_machine.system() == 'darwin'
agx_device = 'agx_device_macos.c'
else
agx_device = 'agx_device.c'
endif
libasahi_lib_files = files( libasahi_lib_files = files(
'agx_bo.c', 'agx_bo.c',
'agx_border.c', 'agx_border.c',
agx_device, 'agx_device.c',
'agx_formats.c', 'agx_formats.c',
'agx_meta.c', 'agx_meta.c',
'agx_tilebuffer.c', 'agx_tilebuffer.c',
@ -75,7 +69,7 @@ libasahi_lib = static_library(
c_args : [no_override_init_args], c_args : [no_override_init_args],
gnu_symbol_visibility : 'hidden', gnu_symbol_visibility : 'hidden',
link_with: [libasahi_decode], link_with: [libasahi_decode],
dependencies: [dep_libdrm, dep_valgrind, idep_nir, dep_iokit], dependencies: [dep_libdrm, dep_valgrind, idep_nir],
build_by_default : false, build_by_default : false,
) )

View file

@ -29,6 +29,7 @@
#include "asahi/layout/layout.h" #include "asahi/layout/layout.h"
#include "asahi/lib/agx_formats.h" #include "asahi/lib/agx_formats.h"
#include "asahi/lib/decode.h" #include "asahi/lib/decode.h"
#include "drm-uapi/drm_fourcc.h"
#include "frontend/sw_winsys.h" #include "frontend/sw_winsys.h"
#include "frontend/winsys_handle.h" #include "frontend/winsys_handle.h"
#include "gallium/auxiliary/renderonly/renderonly.h" #include "gallium/auxiliary/renderonly/renderonly.h"
@ -52,25 +53,17 @@
#include "agx_disk_cache.h" #include "agx_disk_cache.h"
#include "agx_public.h" #include "agx_public.h"
#include "agx_state.h" #include "agx_state.h"
#include "magic.h"
/* drm_fourcc cannot be built on macOS */ /* Fake values, pending UAPI upstreaming */
#ifndef __APPLE__
#include "drm-uapi/drm_fourcc.h"
#endif
/* In case of macOS, pick some fake modifier values so we still build */
#ifndef DRM_FORMAT_MOD_LINEAR #ifndef DRM_FORMAT_MOD_LINEAR
#define DRM_FORMAT_MOD_LINEAR 1 #define DRM_FORMAT_MOD_LINEAR 1
#endif #endif
#ifndef DRM_FORMAT_MOD_INVALID #ifndef DRM_FORMAT_MOD_INVALID
#define DRM_FORMAT_MOD_INVALID ((1ULL << 56) - 1) #define DRM_FORMAT_MOD_INVALID ((1ULL << 56) - 1)
#endif #endif
#ifndef DRM_FORMAT_MOD_APPLE_TWIDDLED #ifndef DRM_FORMAT_MOD_APPLE_TWIDDLED
#define DRM_FORMAT_MOD_APPLE_TWIDDLED (2) #define DRM_FORMAT_MOD_APPLE_TWIDDLED (2)
#endif #endif
#ifndef DRM_FORMAT_MOD_APPLE_TWIDDLED_COMPRESSED #ifndef DRM_FORMAT_MOD_APPLE_TWIDDLED_COMPRESSED
#define DRM_FORMAT_MOD_APPLE_TWIDDLED_COMPRESSED (3) #define DRM_FORMAT_MOD_APPLE_TWIDDLED_COMPRESSED (3)
#endif #endif
@ -190,13 +183,11 @@ agx_resource_from_handle(struct pipe_screen *pscreen,
ail_make_miptree(&rsc->layout); ail_make_miptree(&rsc->layout);
#ifndef __APPLE__
if (dev->ro) { if (dev->ro) {
rsc->scanout = rsc->scanout =
renderonly_create_gpu_import_for_resource(prsc, dev->ro, NULL); renderonly_create_gpu_import_for_resource(prsc, dev->ro, NULL);
/* failure is expected in some cases.. */ /* failure is expected in some cases.. */
} }
#endif
return prsc; return prsc;
} }
@ -581,10 +572,8 @@ agx_resource_destroy(struct pipe_screen *screen, struct pipe_resource *prsrc)
winsys->displaytarget_destroy(winsys, rsrc->dt); winsys->displaytarget_destroy(winsys, rsrc->dt);
} }
#ifndef __APPLE__
if (rsrc->scanout) if (rsrc->scanout)
renderonly_scanout_destroy(rsrc->scanout, agx_screen->dev.ro); renderonly_scanout_destroy(rsrc->scanout, agx_screen->dev.ro);
#endif
agx_bo_unreference(rsrc->bo); agx_bo_unreference(rsrc->bo);
FREE(rsrc); FREE(rsrc);
@ -1057,30 +1046,6 @@ agx_flush_batch(struct agx_context *ctx, struct agx_batch *batch)
/* Size calculation should've been exact */ /* Size calculation should've been exact */
assert(handle_i == handle_count); assert(handle_i == handle_count);
#ifdef __APPLE__
unsigned cmdbuf_id = agx_get_global_id(dev);
unsigned encoder_id = agx_get_global_id(dev);
unsigned cmdbuf_size = demo_cmdbuf(
dev->cmdbuf.ptr.cpu, dev->cmdbuf.size, &batch->pool, &batch->key,
batch->encoder->ptr.gpu, encoder_id, scissor, zbias,
batch->occlusion_buffer.gpu, pipeline_background,
pipeline_background_partial, pipeline_store, clear_pipeline_textures,
batch->clear, batch->clear_depth, batch->clear_stencil);
/* Generate the mapping table from the BO list */
demo_mem_map(dev->memmap.ptr.cpu, dev->memmap.size, handles, handle_count,
cmdbuf_id, encoder_id, cmdbuf_size);
free(handles);
agx_wait_queue(dev->queue);
if (dev->debug & AGX_DBG_TRACE) {
agxdecode_cmdstream(dev->cmdbuf.handle, dev->memmap.handle, true);
agxdecode_next_frame();
}
#else
/* TODO: Linux UAPI submission */ /* TODO: Linux UAPI submission */
(void)dev; (void)dev;
(void)zbias; (void)zbias;
@ -1089,7 +1054,6 @@ agx_flush_batch(struct agx_context *ctx, struct agx_batch *batch)
(void)pipeline_store; (void)pipeline_store;
(void)pipeline_background; (void)pipeline_background;
(void)pipeline_background_partial; (void)pipeline_background_partial;
#endif
agx_batch_cleanup(ctx, batch); agx_batch_cleanup(ctx, batch);
} }

View file

@ -1,374 +0,0 @@
/*
* Copyright 2021 Alyssa Rosenzweig
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#
#include "magic.h"
#include <stdint.h>
#include "agx_state.h"
/* The structures managed in this file appear to be software defined (either in
* the macOS kernel driver or in the AGX firmware) */
/* Odd pattern */
static uint64_t
demo_unk6(struct agx_pool *pool)
{
struct agx_ptr ptr =
agx_pool_alloc_aligned(pool, 0x4000 * sizeof(uint64_t), 64);
uint64_t *buf = ptr.cpu;
memset(buf, 0, sizeof(*buf));
for (unsigned i = 1; i < 0x3ff; ++i)
buf[i] = (i + 1);
return ptr.gpu;
}
static uint64_t
demo_zero(struct agx_pool *pool, unsigned count)
{
struct agx_ptr ptr = agx_pool_alloc_aligned(pool, count, 64);
memset(ptr.cpu, 0, count);
return ptr.gpu;
}
static size_t
asahi_size_resource(struct pipe_resource *prsrc, unsigned level)
{
struct agx_resource *rsrc = agx_resource(prsrc);
size_t size = rsrc->layout.size_B;
if (rsrc->separate_stencil)
size += asahi_size_resource(&rsrc->separate_stencil->base, level);
return size;
}
static size_t
asahi_size_surface(struct pipe_surface *surf)
{
return asahi_size_resource(surf->texture, surf->u.tex.level);
}
static size_t
asahi_size_attachments(struct pipe_framebuffer_state *framebuffer)
{
size_t sum = 0;
for (unsigned i = 0; i < framebuffer->nr_cbufs; ++i)
sum += asahi_size_surface(framebuffer->cbufs[i]);
if (framebuffer->zsbuf)
sum += asahi_size_surface(framebuffer->zsbuf);
return sum;
}
static enum agx_iogpu_attachment_type
asahi_classify_attachment(enum pipe_format format)
{
const struct util_format_description *desc = util_format_description(format);
if (util_format_has_depth(desc))
return AGX_IOGPU_ATTACHMENT_TYPE_DEPTH;
else if (util_format_has_stencil(desc))
return AGX_IOGPU_ATTACHMENT_TYPE_STENCIL;
else
return AGX_IOGPU_ATTACHMENT_TYPE_COLOUR;
}
static uint64_t
agx_map_surface_resource(struct pipe_surface *surf, struct agx_resource *rsrc)
{
return agx_map_texture_gpu(rsrc, surf->u.tex.first_layer);
}
static uint64_t
agx_map_surface(struct pipe_surface *surf)
{
return agx_map_surface_resource(surf, agx_resource(surf->texture));
}
static void
asahi_pack_iogpu_attachment(void *out, struct agx_resource *rsrc,
unsigned total_size)
{
agx_pack(out, IOGPU_ATTACHMENT, cfg) {
cfg.type = asahi_classify_attachment(rsrc->layout.format);
cfg.address = rsrc->bo->ptr.gpu;
cfg.size = rsrc->layout.size_B;
cfg.percent = (100 * cfg.size) / total_size;
}
}
static unsigned
asahi_pack_iogpu_attachments(void *out,
struct pipe_framebuffer_state *framebuffer)
{
unsigned total_attachment_size = asahi_size_attachments(framebuffer);
struct agx_iogpu_attachment_packed *attachments = out;
unsigned nr = 0;
for (unsigned i = 0; i < framebuffer->nr_cbufs; ++i) {
asahi_pack_iogpu_attachment(attachments + (nr++),
agx_resource(framebuffer->cbufs[i]->texture),
total_attachment_size);
}
if (framebuffer->zsbuf) {
struct agx_resource *rsrc = agx_resource(framebuffer->zsbuf->texture);
asahi_pack_iogpu_attachment(attachments + (nr++), rsrc,
total_attachment_size);
if (rsrc->separate_stencil) {
asahi_pack_iogpu_attachment(attachments + (nr++),
rsrc->separate_stencil,
total_attachment_size);
}
}
return nr;
}
unsigned
demo_cmdbuf(uint64_t *buf, size_t size, struct agx_pool *pool,
struct pipe_framebuffer_state *framebuffer, uint64_t encoder_ptr,
uint64_t encoder_id, uint64_t scissor_ptr, uint64_t depth_bias_ptr,
uint64_t occlusion_ptr, uint32_t pipeline_clear,
uint32_t pipeline_load, uint32_t pipeline_store,
bool clear_pipeline_textures, unsigned clear_buffers,
double clear_depth, unsigned clear_stencil)
{
bool should_clear_depth = clear_buffers & PIPE_CLEAR_DEPTH;
bool should_clear_stencil = clear_buffers & PIPE_CLEAR_STENCIL;
uint32_t *map = (uint32_t *)buf;
memset(map, 0, 518 * 4);
uint64_t deflake_buffer = demo_zero(pool, 0x7e0);
uint64_t deflake_1 = deflake_buffer + 0x2a0;
uint64_t deflake_2 = deflake_buffer + 0x20;
uint64_t unk_buffer_2 = demo_zero(pool, 0x8000);
uint64_t depth_buffer = 0;
uint64_t stencil_buffer = 0;
agx_pack(map + 16, IOGPU_GRAPHICS, cfg) {
cfg.opengl_depth_clipping = true;
cfg.deflake_1 = deflake_1;
cfg.deflake_2 = deflake_2;
cfg.deflake_3 = deflake_buffer;
cfg.clear_pipeline_bind =
0xffff8002 | (clear_pipeline_textures ? 0x210 : 0);
cfg.clear_pipeline = pipeline_clear;
/* store pipeline used when entire frame completes */
cfg.store_pipeline_bind = 0x12;
cfg.store_pipeline = pipeline_store;
cfg.scissor_array = scissor_ptr;
cfg.depth_bias_array = depth_bias_ptr;
cfg.visibility_result_buffer = occlusion_ptr;
if (framebuffer->zsbuf) {
struct pipe_surface *zsbuf = framebuffer->zsbuf;
struct agx_resource *zsres = agx_resource(zsbuf->texture);
struct agx_resource *zres = NULL;
struct agx_resource *sres = NULL;
const struct util_format_description *desc =
util_format_description(zsres->layout.format);
assert(desc->format == PIPE_FORMAT_Z32_FLOAT ||
desc->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT ||
desc->format == PIPE_FORMAT_S8_UINT);
cfg.depth_width = framebuffer->width;
cfg.depth_height = framebuffer->height;
if (util_format_has_depth(desc)) {
zres = zsres;
depth_buffer = agx_map_surface(zsbuf);
} else {
sres = zsres;
stencil_buffer = agx_map_surface(zsbuf);
}
if (zsres->separate_stencil) {
sres = zsres->separate_stencil;
stencil_buffer = agx_map_surface_resource(zsbuf, sres);
}
if (zres) {
cfg.zls_control.z_store_enable = true;
cfg.zls_control.z_load_enable = !should_clear_depth;
cfg.depth_buffer_1 = depth_buffer;
cfg.depth_buffer_2 = depth_buffer;
cfg.depth_buffer_3 = depth_buffer;
if (ail_is_compressed(&zres->layout)) {
uint64_t accel_buffer =
depth_buffer + zres->layout.metadata_offset_B;
cfg.depth_acceleration_buffer_1 = accel_buffer;
cfg.depth_acceleration_buffer_2 = accel_buffer;
cfg.depth_acceleration_buffer_3 = accel_buffer;
cfg.zls_control.z_compress_1 = true;
cfg.zls_control.z_compress_2 = true;
}
}
if (sres) {
cfg.zls_control.s_store_enable = true;
cfg.zls_control.s_load_enable = !should_clear_stencil;
cfg.stencil_buffer_1 = stencil_buffer;
cfg.stencil_buffer_2 = stencil_buffer;
cfg.stencil_buffer_3 = stencil_buffer;
if (ail_is_compressed(&sres->layout)) {
uint64_t accel_buffer =
stencil_buffer + sres->layout.metadata_offset_B;
cfg.stencil_acceleration_buffer_1 = accel_buffer;
cfg.stencil_acceleration_buffer_2 = accel_buffer;
cfg.stencil_acceleration_buffer_3 = accel_buffer;
cfg.zls_control.s_compress_1 = true;
cfg.zls_control.s_compress_2 = true;
}
}
/* It's unclear how tile size is conveyed for depth/stencil targets,
* which interactions with mipmapping (for example of a 33x33
* depth/stencil attachment)
*/
if (zsbuf->u.tex.level != 0)
unreachable("todo: mapping other levels");
}
cfg.width_1 = framebuffer->width;
cfg.height_1 = framebuffer->height;
cfg.pointer = unk_buffer_2;
cfg.set_when_reloading_z_or_s_1 = clear_pipeline_textures;
/* More specifically, this is set when both load+storing Z or S */
if (depth_buffer && !should_clear_depth) {
cfg.set_when_reloading_z_or_s_1 = true;
cfg.set_when_reloading_z_or_s_2 = true;
}
if (stencil_buffer && !should_clear_stencil) {
cfg.set_when_reloading_z_or_s_1 = true;
cfg.set_when_reloading_z_or_s_2 = true;
}
cfg.depth_clear_value = fui(clear_depth);
cfg.stencil_clear_value = clear_stencil & 0xff;
cfg.partial_reload_pipeline_bind = 0xffff8212;
cfg.partial_reload_pipeline = pipeline_load;
cfg.partial_store_pipeline_bind = 0x12;
cfg.partial_store_pipeline = pipeline_store;
cfg.depth_buffer_3 = depth_buffer;
cfg.stencil_buffer_3 = stencil_buffer;
cfg.encoder_id = encoder_id;
cfg.unknown_buffer = demo_unk6(pool);
cfg.width_2 = framebuffer->width;
cfg.height_2 = framebuffer->height;
cfg.unk_352 = clear_pipeline_textures ? 0x0 : 0x1;
}
unsigned offset_unk = (484 * 4);
unsigned offset_attachments = (496 * 4);
unsigned nr_attachments = asahi_pack_iogpu_attachments(
map + (offset_attachments / 4) + 4, framebuffer);
map[(offset_attachments / 4) + 3] = nr_attachments;
unsigned total_size =
offset_attachments + (AGX_IOGPU_ATTACHMENT_LENGTH * nr_attachments) + 16;
agx_pack(map, IOGPU_HEADER, cfg) {
cfg.total_size = total_size;
cfg.attachment_offset = offset_attachments;
cfg.attachment_length = nr_attachments * AGX_IOGPU_ATTACHMENT_LENGTH;
cfg.unknown_offset = offset_unk;
cfg.encoder = encoder_ptr;
}
return total_size;
}
static struct agx_map_header
demo_map_header(uint64_t cmdbuf_id, uint64_t encoder_id, unsigned cmdbuf_size,
unsigned count)
{
/* Structure: header followed by resource groups. For now, we use a single
* resource group for every resource. This could be optimized.
*/
unsigned length = sizeof(struct agx_map_header);
length += count * sizeof(struct agx_map_entry);
assert(length < 0x10000);
return (struct agx_map_header){
.cmdbuf_id = cmdbuf_id,
.segment_count = 1,
.length = length,
.encoder_id = encoder_id,
.kernel_commands_start_offset = 0,
.kernel_commands_end_offset = cmdbuf_size,
.total_resources = count,
.resource_group_count = count,
.unk = 0x8000,
};
}
void
demo_mem_map(void *map, size_t size, unsigned *handles, unsigned count,
uint64_t cmdbuf_id, uint64_t encoder_id, unsigned cmdbuf_size)
{
struct agx_map_header *header = map;
struct agx_map_entry *entries =
(struct agx_map_entry *)(((uint8_t *)map) + sizeof(*header));
struct agx_map_entry *end =
(struct agx_map_entry *)(((uint8_t *)map) + size);
/* Header precedes the entry */
*header = demo_map_header(cmdbuf_id, encoder_id, cmdbuf_size, count);
/* Add an entry for each BO mapped */
for (unsigned i = 0; i < count; ++i) {
assert((entries + i) < end);
entries[i] = (struct agx_map_entry){
.resource_id = {handles[i]},
.resource_unk = {0x20},
.resource_flags = {0x1},
.resource_count = 1,
};
}
}

View file

@ -1,43 +0,0 @@
/*
* Copyright (C) 2021 Alyssa Rosenzweig
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __ASAHI_MAGIC_H
#define __ASAHI_MAGIC_H
#include <stdint.h>
#include "agx_state.h"
unsigned demo_cmdbuf(uint64_t *buf, size_t size, struct agx_pool *pool,
struct pipe_framebuffer_state *framebuffer,
uint64_t encoder_ptr, uint64_t encoder_id,
uint64_t scissor_ptr, uint64_t depth_bias_ptr,
uint64_t occlusion_ptr, uint32_t pipeline_clear,
uint32_t pipeline_load, uint32_t pipeline_store,
bool clear_pipeline_textures, unsigned clear_buffers,
double clear_depth, unsigned clear_stencil);
void demo_mem_map(void *map, size_t size, unsigned *handles, unsigned count,
uint64_t cmdbuf_id, uint64_t encoder_id,
unsigned cmdbuf_size);
#endif

View file

@ -28,9 +28,6 @@ files_asahi = files(
'agx_state.c', 'agx_state.c',
'agx_uniforms.c', 'agx_uniforms.c',
) )
if host_machine.system() == 'darwin'
files_asahi += files('magic.c')
endif
libasahi = static_library( libasahi = static_library(
'asahi', 'asahi',