2015-04-16 22:43:23 +02:00
|
|
|
/*
|
|
|
|
|
* Copyright © 2011 Marek Olšák <maraeo@gmail.com>
|
|
|
|
|
* Copyright © 2015 Advanced Micro Devices, Inc.
|
|
|
|
|
*
|
2023-05-18 17:22:27 -04:00
|
|
|
* SPDX-License-Identifier: MIT
|
2015-04-16 22:43:23 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#ifndef AMDGPU_CS_H
|
|
|
|
|
#define AMDGPU_CS_H
|
|
|
|
|
|
|
|
|
|
#include "amdgpu_bo.h"
|
|
|
|
|
#include "util/u_memory.h"
|
2020-04-25 20:03:15 +02:00
|
|
|
#include "drm-uapi/amdgpu_drm.h"
|
2015-04-16 22:43:23 +02:00
|
|
|
|
2021-08-11 12:23:05 -04:00
|
|
|
/* Smaller submits means the GPU gets busy sooner and there is less
|
|
|
|
|
* waiting for buffers and fences. Proof:
|
|
|
|
|
* http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
|
|
|
|
|
*/
|
2024-01-07 17:00:35 -05:00
|
|
|
#define IB_MAX_SUBMIT_BYTES (80 * 1024)
|
2021-08-11 12:23:05 -04:00
|
|
|
|
2015-04-16 22:43:23 +02:00
|
|
|
struct amdgpu_ctx {
|
2023-12-21 03:05:17 -05:00
|
|
|
struct pipe_reference reference;
|
2015-04-16 22:43:23 +02:00
|
|
|
struct amdgpu_winsys *ws;
|
|
|
|
|
amdgpu_context_handle ctx;
|
|
|
|
|
amdgpu_bo_handle user_fence_bo;
|
|
|
|
|
uint64_t *user_fence_cpu_address_base;
|
2023-08-06 20:38:37 -04:00
|
|
|
|
|
|
|
|
/* If true, report lost contexts and skip command submission.
|
|
|
|
|
* If false, terminate the process.
|
|
|
|
|
*/
|
|
|
|
|
bool allow_context_lost;
|
|
|
|
|
|
2023-08-06 21:37:32 -04:00
|
|
|
/* Lost context status due to ioctl and allocation failures. */
|
|
|
|
|
enum pipe_reset_status sw_status;
|
2015-04-16 22:43:23 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct amdgpu_cs_buffer {
|
|
|
|
|
struct amdgpu_winsys_bo *bo;
|
2021-10-21 23:17:03 -04:00
|
|
|
unsigned slab_real_idx; /* index of underlying real BO, used by slab buffers only */
|
|
|
|
|
unsigned usage;
|
2015-04-16 22:43:23 +02:00
|
|
|
};
|
|
|
|
|
|
2016-05-06 21:16:05 -05:00
|
|
|
enum ib_type {
|
2020-06-18 01:06:12 -04:00
|
|
|
IB_PREAMBLE,
|
2017-08-19 18:56:36 +02:00
|
|
|
IB_MAIN,
|
|
|
|
|
IB_NUM,
|
2016-05-06 21:16:05 -05:00
|
|
|
};
|
|
|
|
|
|
2015-08-08 13:27:38 +02:00
|
|
|
struct amdgpu_ib {
|
|
|
|
|
/* A buffer out of which new IBs are allocated. */
|
2023-12-09 17:07:14 -05:00
|
|
|
struct pb_buffer_lean *big_buffer;
|
2023-12-09 18:18:33 +05:30
|
|
|
uint8_t *big_buffer_cpu_ptr;
|
2023-12-09 14:23:49 -05:00
|
|
|
uint64_t gpu_address;
|
2015-08-08 13:27:38 +02:00
|
|
|
unsigned used_ib_space;
|
2019-02-04 16:30:32 -05:00
|
|
|
|
|
|
|
|
/* The maximum seen size from cs_check_space. If the driver does
|
|
|
|
|
* cs_check_space and flush, the newly allocated IB should have at least
|
|
|
|
|
* this size.
|
|
|
|
|
*/
|
|
|
|
|
unsigned max_check_space_size;
|
|
|
|
|
|
2024-01-07 17:06:32 -05:00
|
|
|
unsigned max_ib_bytes;
|
2023-12-09 17:59:57 +05:30
|
|
|
/* ptr_ib_size initially points to cs->csc->chunk_ib->ib_bytes.
|
|
|
|
|
* If in amdgpu_cs_check_space() ib chaining is required, then ptr_ib_size will point
|
|
|
|
|
* to indirect buffer packet size field.
|
|
|
|
|
*/
|
2016-05-06 21:33:17 -05:00
|
|
|
uint32_t *ptr_ib_size;
|
2023-12-09 17:59:57 +05:30
|
|
|
bool is_chained_ib;
|
2015-08-08 14:02:02 +02:00
|
|
|
};
|
|
|
|
|
|
2019-02-04 14:55:03 -05:00
|
|
|
struct amdgpu_fence_list {
|
|
|
|
|
struct pipe_fence_handle **list;
|
|
|
|
|
unsigned num;
|
|
|
|
|
unsigned max;
|
|
|
|
|
};
|
|
|
|
|
|
2023-12-03 17:53:20 -05:00
|
|
|
struct amdgpu_buffer_list {
|
|
|
|
|
unsigned max_buffers;
|
|
|
|
|
unsigned num_buffers;
|
|
|
|
|
struct amdgpu_cs_buffer *buffers;
|
|
|
|
|
};
|
|
|
|
|
|
2016-03-08 01:19:31 +01:00
|
|
|
struct amdgpu_cs_context {
|
2023-12-09 16:53:54 +05:30
|
|
|
struct drm_amdgpu_cs_chunk_ib chunk_ib[IB_NUM];
|
2021-08-19 15:44:55 -04:00
|
|
|
uint32_t *ib_main_addr; /* the beginning of IB before chaining */
|
2015-04-16 22:43:23 +02:00
|
|
|
|
2021-10-22 00:18:48 -04:00
|
|
|
struct amdgpu_winsys *ws;
|
|
|
|
|
|
2015-09-27 00:10:00 +02:00
|
|
|
/* Buffers. */
|
2023-12-03 17:53:20 -05:00
|
|
|
struct amdgpu_buffer_list buffer_lists[NUM_BO_LIST_TYPES];
|
2021-05-26 15:19:16 +02:00
|
|
|
int16_t *buffer_indices_hashlist;
|
2015-04-16 22:43:23 +02:00
|
|
|
|
2017-01-26 17:29:32 +01:00
|
|
|
struct amdgpu_winsys_bo *last_added_bo;
|
|
|
|
|
unsigned last_added_bo_usage;
|
|
|
|
|
|
2023-12-21 02:33:27 -05:00
|
|
|
struct amdgpu_seq_no_fences seq_no_dependencies;
|
2023-08-16 10:47:16 +02:00
|
|
|
|
2019-02-04 15:27:27 -05:00
|
|
|
struct amdgpu_fence_list syncobj_dependencies;
|
2019-02-04 14:55:03 -05:00
|
|
|
struct amdgpu_fence_list syncobj_to_signal;
|
2017-10-26 22:42:08 -04:00
|
|
|
|
2016-03-08 01:19:31 +01:00
|
|
|
struct pipe_fence_handle *fence;
|
2016-07-13 18:31:16 +02:00
|
|
|
|
|
|
|
|
/* the error returned from cs_flush for non-async submissions */
|
|
|
|
|
int error_code;
|
2019-12-06 10:28:10 +01:00
|
|
|
|
|
|
|
|
/* TMZ: will this command be submitted using the TMZ flag */
|
|
|
|
|
bool secure;
|
2016-03-08 01:19:31 +01:00
|
|
|
};
|
|
|
|
|
|
2021-10-22 02:04:18 -04:00
|
|
|
/* This high limit is needed for viewperf2020/catia. */
|
|
|
|
|
#define BUFFER_HASHLIST_SIZE 32768
|
2021-05-26 12:24:31 +02:00
|
|
|
|
2016-03-08 01:19:31 +01:00
|
|
|
struct amdgpu_cs {
|
2023-12-09 16:47:13 +05:30
|
|
|
struct amdgpu_ib main_ib; /* must be first because this is inherited */
|
2021-03-23 20:38:48 -04:00
|
|
|
struct amdgpu_winsys *ws;
|
2016-03-08 01:19:31 +01:00
|
|
|
struct amdgpu_ctx *ctx;
|
2023-04-25 16:47:23 +02:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Ensure a 64-bit alignment for drm_amdgpu_cs_chunk_fence.
|
|
|
|
|
*/
|
2017-09-06 00:50:45 +02:00
|
|
|
struct drm_amdgpu_cs_chunk_fence fence_chunk;
|
2023-04-25 16:47:23 +02:00
|
|
|
enum amd_ip_type ip_type;
|
2023-12-01 22:16:34 -05:00
|
|
|
unsigned queue_index;
|
2016-03-08 01:19:31 +01:00
|
|
|
|
|
|
|
|
/* We flip between these two CS. While one is being consumed
|
|
|
|
|
* by the kernel in another thread, the other one is being filled
|
|
|
|
|
* by the pipe driver. */
|
|
|
|
|
struct amdgpu_cs_context csc1;
|
|
|
|
|
struct amdgpu_cs_context csc2;
|
|
|
|
|
/* The currently-used CS. */
|
|
|
|
|
struct amdgpu_cs_context *csc;
|
|
|
|
|
/* The CS being currently-owned by the other thread. */
|
|
|
|
|
struct amdgpu_cs_context *cst;
|
2021-05-26 15:19:16 +02:00
|
|
|
/* buffer_indices_hashlist[hash(bo)] returns -1 if the bo
|
|
|
|
|
* isn't part of any buffer lists or the index where the bo could be found.
|
|
|
|
|
* Since 1) hash collisions of 2 different bo can happen and 2) we use a
|
|
|
|
|
* single hashlist for the 3 buffer list, this is only a hint.
|
|
|
|
|
* amdgpu_lookup_buffer uses this hint to speed up buffers look up.
|
|
|
|
|
*/
|
|
|
|
|
int16_t buffer_indices_hashlist[BUFFER_HASHLIST_SIZE];
|
2016-03-08 01:19:31 +01:00
|
|
|
|
|
|
|
|
/* Flush CS. */
|
|
|
|
|
void (*flush_cs)(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
|
|
|
|
|
void *flush_data;
|
2020-11-23 22:57:25 -05:00
|
|
|
bool noop;
|
2021-05-26 12:02:48 +02:00
|
|
|
bool has_chaining;
|
2016-03-08 01:19:31 +01:00
|
|
|
|
2016-06-11 13:10:49 +02:00
|
|
|
struct util_queue_fence flush_completed;
|
2016-08-02 00:44:55 +02:00
|
|
|
struct pipe_fence_handle *next_fence;
|
2023-12-09 17:07:14 -05:00
|
|
|
struct pb_buffer_lean *preamble_ib_bo;
|
2023-03-17 14:44:42 +01:00
|
|
|
|
|
|
|
|
struct drm_amdgpu_cs_chunk_cp_gfx_shadow mcbp_fw_shadow_chunk;
|
2015-04-16 22:43:23 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct amdgpu_fence {
|
|
|
|
|
struct pipe_reference reference;
|
2017-09-12 20:13:06 +02:00
|
|
|
uint32_t syncobj;
|
2015-04-16 22:43:23 +02:00
|
|
|
|
2017-09-12 20:13:06 +02:00
|
|
|
struct amdgpu_winsys *ws;
|
2023-08-16 10:47:16 +02:00
|
|
|
|
|
|
|
|
/* The following field aren't set for imported fences. */
|
2015-04-16 22:43:23 +02:00
|
|
|
struct amdgpu_ctx *ctx; /* submission context */
|
2023-08-16 10:47:16 +02:00
|
|
|
uint32_t ip_type;
|
2015-04-16 22:43:23 +02:00
|
|
|
uint64_t *user_fence_cpu_address;
|
2023-08-16 10:47:16 +02:00
|
|
|
uint64_t seq_no;
|
2015-04-16 22:43:23 +02:00
|
|
|
|
2017-11-09 14:00:22 +01:00
|
|
|
/* If the fence has been submitted. This is unsignalled for deferred fences
|
|
|
|
|
* (cs->next_fence) and while an IB is still being submitted in the submit
|
|
|
|
|
* thread. */
|
|
|
|
|
struct util_queue_fence submitted;
|
|
|
|
|
|
2015-04-16 22:43:23 +02:00
|
|
|
volatile int signalled; /* bool (int for atomicity) */
|
2023-12-01 22:16:34 -05:00
|
|
|
bool imported;
|
2023-12-21 02:33:27 -05:00
|
|
|
uint8_t queue_index; /* for non-imported fences */
|
|
|
|
|
uint_seq_no queue_seq_no; /* winsys-generated sequence number */
|
2015-04-16 22:43:23 +02:00
|
|
|
};
|
|
|
|
|
|
2024-01-07 16:01:58 -05:00
|
|
|
void amdgpu_fence_destroy(struct amdgpu_fence *fence);
|
|
|
|
|
|
2017-09-12 20:13:06 +02:00
|
|
|
static inline bool amdgpu_fence_is_syncobj(struct amdgpu_fence *fence)
|
|
|
|
|
{
|
|
|
|
|
return fence->ctx == NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2023-12-21 03:05:17 -05:00
|
|
|
static inline void amdgpu_ctx_reference(struct amdgpu_ctx **dst, struct amdgpu_ctx *src)
|
2015-04-16 22:43:23 +02:00
|
|
|
{
|
2023-12-21 03:05:17 -05:00
|
|
|
struct amdgpu_ctx *old_dst = *dst;
|
|
|
|
|
|
|
|
|
|
if (pipe_reference(old_dst ? &old_dst->reference : NULL,
|
|
|
|
|
src ? &src->reference : NULL)) {
|
|
|
|
|
amdgpu_cs_ctx_free(old_dst->ctx);
|
|
|
|
|
amdgpu_bo_free(old_dst->user_fence_bo);
|
|
|
|
|
FREE(old_dst);
|
2015-04-16 22:43:23 +02:00
|
|
|
}
|
2023-12-21 03:05:17 -05:00
|
|
|
*dst = src;
|
2015-04-16 22:43:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void amdgpu_fence_reference(struct pipe_fence_handle **dst,
|
|
|
|
|
struct pipe_fence_handle *src)
|
|
|
|
|
{
|
2019-01-18 19:36:49 -05:00
|
|
|
struct amdgpu_fence **adst = (struct amdgpu_fence **)dst;
|
|
|
|
|
struct amdgpu_fence *asrc = (struct amdgpu_fence *)src;
|
2015-04-16 22:43:23 +02:00
|
|
|
|
2024-01-07 16:01:58 -05:00
|
|
|
if (pipe_reference(&(*adst)->reference, &asrc->reference))
|
|
|
|
|
amdgpu_fence_destroy(*adst);
|
2017-09-12 20:13:06 +02:00
|
|
|
|
2019-01-18 19:36:49 -05:00
|
|
|
*adst = asrc;
|
2015-04-16 22:43:23 +02:00
|
|
|
}
|
|
|
|
|
|
2024-01-07 16:01:58 -05:00
|
|
|
/* Same as amdgpu_fence_reference, but ignore the value in *dst. */
|
|
|
|
|
static inline void amdgpu_fence_set_reference(struct pipe_fence_handle **dst,
|
|
|
|
|
struct pipe_fence_handle *src)
|
|
|
|
|
{
|
|
|
|
|
*dst = src;
|
|
|
|
|
pipe_reference(NULL, &((struct amdgpu_fence *)src)->reference); /* only increment refcount */
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Unreference dst, but don't assign anything. */
|
|
|
|
|
static inline void amdgpu_fence_drop_reference(struct pipe_fence_handle *dst)
|
|
|
|
|
{
|
|
|
|
|
struct amdgpu_fence *adst = (struct amdgpu_fence *)dst;
|
|
|
|
|
|
|
|
|
|
if (pipe_reference(&adst->reference, NULL)) /* only decrement refcount */
|
|
|
|
|
amdgpu_fence_destroy(adst);
|
|
|
|
|
}
|
|
|
|
|
|
2023-12-03 19:54:40 -05:00
|
|
|
struct amdgpu_cs_buffer *
|
|
|
|
|
amdgpu_lookup_buffer_any_type(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo);
|
2015-04-16 22:43:23 +02:00
|
|
|
|
|
|
|
|
static inline struct amdgpu_cs *
|
2020-11-29 04:09:02 -05:00
|
|
|
amdgpu_cs(struct radeon_cmdbuf *rcs)
|
2015-04-16 22:43:23 +02:00
|
|
|
{
|
2020-11-29 04:09:02 -05:00
|
|
|
struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs->priv;
|
2023-12-09 15:12:31 +05:30
|
|
|
assert(cs);
|
2020-11-29 04:09:02 -05:00
|
|
|
return cs;
|
2015-04-16 22:43:23 +02:00
|
|
|
}
|
|
|
|
|
|
2016-05-06 21:16:05 -05:00
|
|
|
#define get_container(member_ptr, container_type, container_member) \
|
|
|
|
|
(container_type *)((char *)(member_ptr) - offsetof(container_type, container_member))
|
|
|
|
|
|
2016-06-21 21:29:39 +02:00
|
|
|
static inline bool
|
2015-04-16 22:43:23 +02:00
|
|
|
amdgpu_bo_is_referenced_by_cs(struct amdgpu_cs *cs,
|
|
|
|
|
struct amdgpu_winsys_bo *bo)
|
|
|
|
|
{
|
2023-12-03 19:54:40 -05:00
|
|
|
return amdgpu_lookup_buffer_any_type(cs->csc, bo) != NULL;
|
2015-04-16 22:43:23 +02:00
|
|
|
}
|
|
|
|
|
|
2023-12-03 18:24:18 -05:00
|
|
|
static inline unsigned get_buf_list_idx(struct amdgpu_winsys_bo *bo)
|
|
|
|
|
{
|
2023-12-09 01:28:20 -05:00
|
|
|
/* AMDGPU_BO_REAL_REUSABLE* maps to AMDGPU_BO_REAL. */
|
2023-12-03 18:24:18 -05:00
|
|
|
static_assert(ARRAY_SIZE(((struct amdgpu_cs_context*)NULL)->buffer_lists) == NUM_BO_LIST_TYPES, "");
|
|
|
|
|
return MIN2(bo->type, AMDGPU_BO_REAL);
|
|
|
|
|
}
|
|
|
|
|
|
2016-06-21 21:29:39 +02:00
|
|
|
static inline bool
|
2015-04-16 22:43:23 +02:00
|
|
|
amdgpu_bo_is_referenced_by_cs_with_usage(struct amdgpu_cs *cs,
|
|
|
|
|
struct amdgpu_winsys_bo *bo,
|
2021-10-21 23:17:03 -04:00
|
|
|
unsigned usage)
|
2015-04-16 22:43:23 +02:00
|
|
|
{
|
2023-12-03 19:54:40 -05:00
|
|
|
struct amdgpu_cs_buffer *buffer = amdgpu_lookup_buffer_any_type(cs->csc, bo);
|
2015-04-16 22:43:23 +02:00
|
|
|
|
2023-12-03 19:54:40 -05:00
|
|
|
return buffer && (buffer->usage & usage) != 0;
|
2015-04-16 22:43:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
|
|
|
|
|
bool absolute);
|
2018-06-18 21:07:10 -04:00
|
|
|
void amdgpu_cs_sync_flush(struct radeon_cmdbuf *rcs);
|
2019-06-28 16:06:23 +02:00
|
|
|
void amdgpu_cs_init_functions(struct amdgpu_screen_winsys *ws);
|
2015-04-16 22:43:23 +02:00
|
|
|
|
|
|
|
|
#endif
|