radeonsi/tmz: allow secure job if the app made a tmz allocation

This commit makes TMZ always allowed instead of being either off or forced-on
with AMD_DEBUG=tmz.

With this change:
- secure job can be used as soon as the application made a tmz allocation. Driver
  internal allocations are not enough to enable secure jobs (if tmz is supported
  and enabled by the kernel)
- AMD_DEBUG=tmz forces all scanout/depth/stencil buffers to be allocated as TMZ.
  This is useful to test app thats don't explicitely support protected content.

Reviewed-by: Marek Olšák <marek.olsak@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6049>
This commit is contained in:
Pierre-Eric Pelloux-Prayer 2020-07-23 10:51:56 +02:00 committed by Marge Bot
parent f5ec617677
commit 1b0d660cbc
13 changed files with 55 additions and 40 deletions

View file

@ -690,7 +690,7 @@ struct radeon_winsys {
/**
* Secure context
*/
bool (*ws_is_secure)(struct radeon_winsys *ws);
bool (*ws_uses_secure_bo)(struct radeon_winsys *ws);
bool (*cs_is_secure)(struct radeon_cmdbuf *cs);
void (*cs_set_secure)(struct radeon_cmdbuf *cs, bool secure);
};

View file

@ -166,12 +166,13 @@ void si_init_resource_fields(struct si_screen *sscreen, struct si_resource *res,
else
res->flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
if (sscreen->ws->ws_is_secure(sscreen->ws)) {
if (res->b.b.bind & (PIPE_BIND_SCANOUT | PIPE_BIND_DEPTH_STENCIL))
res->flags |= RADEON_FLAG_ENCRYPTED;
if (res->b.b.flags & PIPE_RESOURCE_FLAG_ENCRYPTED)
res->flags |= RADEON_FLAG_ENCRYPTED;
}
/* Force scanout/depth/stencil buffer allocation to be encrypted */
if (sscreen->debug_flags & DBG(TMZ) &&
res->b.b.bind & (PIPE_BIND_SCANOUT | PIPE_BIND_DEPTH_STENCIL))
res->flags |= RADEON_FLAG_ENCRYPTED;
if (res->b.b.flags & PIPE_RESOURCE_FLAG_ENCRYPTED)
res->flags |= RADEON_FLAG_ENCRYPTED;
if (sscreen->debug_flags & DBG(NO_WC))
res->flags &= ~RADEON_FLAG_GTT_WC;

View file

@ -848,7 +848,7 @@ static void si_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info
si_need_gfx_cs_space(sctx);
/* If we're using a secure context, determine if cs must be secure or not */
if (unlikely(sctx->ws->ws_is_secure(sctx->ws))) {
if (unlikely(sctx->ws->ws_uses_secure_bo(sctx->ws))) {
bool secure = si_compute_resources_check_encrypted(sctx);
if (secure != sctx->ws->cs_is_secure(sctx->gfx_cs)) {
si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);

View file

@ -338,7 +338,7 @@ void si_cp_dma_copy_buffer(struct si_context *sctx, struct pipe_resource *dst,
}
/* TMZ handling */
if (unlikely(sctx->ws->ws_is_secure(sctx->ws) &&
if (unlikely(sctx->ws->ws_uses_secure_bo(sctx->ws) &&
!(user_flags & SI_CPDMA_SKIP_TMZ))) {
bool secure = src && (si_resource(src)->flags & RADEON_FLAG_ENCRYPTED);
assert(!secure || (!dst || (si_resource(dst)->flags & RADEON_FLAG_ENCRYPTED)));

View file

@ -74,7 +74,8 @@ void si_sdma_clear_buffer(struct si_context *sctx, struct pipe_resource *dst, ui
assert(size % 4 == 0);
if (!cs || dst->flags & PIPE_RESOURCE_FLAG_SPARSE ||
sctx->screen->debug_flags & DBG(NO_SDMA_CLEARS) || sctx->ws->ws_is_secure(sctx->ws)) {
sctx->screen->debug_flags & DBG(NO_SDMA_CLEARS) ||
sctx->ws->ws_uses_secure_bo(sctx->ws)) {
sctx->b.clear_buffer(&sctx->b, dst, offset, size, &clear_value, 4);
return;
}
@ -232,8 +233,7 @@ void si_need_dma_space(struct si_context *ctx, unsigned num_dw, struct si_resour
si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
bool use_secure_cmd = false;
/* if TMZ is supported and enabled */
if (ctx->ws->ws_is_secure(ctx->ws)) {
if (unlikely(ctx->ws->ws_uses_secure_bo(ctx->ws))) {
if (src && src->flags & RADEON_FLAG_ENCRYPTED) {
assert(!dst || (dst->flags & RADEON_FLAG_ENCRYPTED));
use_secure_cmd = true;

View file

@ -117,6 +117,8 @@ static const struct debug_named_value debug_options[] = {
{"nodccmsaa", DBG(NO_DCC_MSAA), "Disable DCC for MSAA"},
{"nofmask", DBG(NO_FMASK), "Disable MSAA compression"},
{"tmz", DBG(TMZ), "Force allocation of scanout/depth/stencil buffer as encrypted"},
DEBUG_NAMED_VALUE_END /* must be last */
};
@ -973,6 +975,14 @@ static struct pipe_screen *radeonsi_screen_create_impl(struct radeon_winsys *ws,
if (sscreen->debug_flags & DBG(NO_GFX))
sscreen->info.has_graphics = false;
if ((sscreen->debug_flags & DBG(TMZ)) &&
!sscreen->info.has_tmz_support) {
fprintf(stderr, "radeonsi: requesting TMZ features but TMZ is not supported\n");
FREE(sscreen);
return NULL;
}
/* Set functions first. */
sscreen->b.context_create = si_pipe_create_context;
sscreen->b.destroy = si_destroy_screen;

View file

@ -211,6 +211,8 @@ enum
DBG_NO_DCC_MSAA,
DBG_NO_FMASK,
DBG_TMZ,
DBG_COUNT
};

View file

@ -2034,7 +2034,7 @@ static void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *i
si_need_gfx_cs_space(sctx);
/* If we're using a secure context, determine if cs must be secure or not */
if (unlikely(sctx->ws->ws_is_secure(sctx->ws))) {
if (unlikely(sctx->ws->ws_uses_secure_bo(sctx->ws))) {
bool secure = si_gfx_resources_check_encrypted(sctx);
if (secure != sctx->ws->cs_is_secure(sctx->gfx_cs)) {
si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);

View file

@ -238,8 +238,8 @@ static void amdgpu_clean_up_buffer_managers(struct amdgpu_winsys *ws)
{
for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
pb_slabs_reclaim(&ws->bo_slabs[i]);
if (ws->secure)
pb_slabs_reclaim(&ws->bo_slabs_encrypted[i]);
if (ws->info.has_tmz_support)
pb_slabs_reclaim(&ws->bo_slabs_encrypted[i]);
}
pb_cache_release_all_buffers(&ws->bo_cache);
@ -521,9 +521,14 @@ static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
if (ws->zero_all_vram_allocs &&
(request.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM))
request.flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
if ((flags & RADEON_FLAG_ENCRYPTED) && ws->secure)
if ((flags & RADEON_FLAG_ENCRYPTED) &&
ws->info.has_tmz_support) {
request.flags |= AMDGPU_GEM_CREATE_ENCRYPTED;
if (!(flags & RADEON_FLAG_DRIVER_INTERNAL))
ws->uses_secure_bos = true;
}
r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
if (r) {
fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
@ -619,7 +624,7 @@ bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
static struct pb_slabs *get_slabs(struct amdgpu_winsys *ws, uint64_t size,
enum radeon_bo_flag flags)
{
struct pb_slabs *bo_slabs = ((flags & RADEON_FLAG_ENCRYPTED) && ws->secure) ?
struct pb_slabs *bo_slabs = ((flags & RADEON_FLAG_ENCRYPTED) && ws->info.has_tmz_support) ?
ws->bo_slabs_encrypted : ws->bo_slabs;
/* Find the correct slab allocator for the given size. */
for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
@ -672,7 +677,7 @@ static struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
if (encrypted)
flags |= RADEON_FLAG_ENCRYPTED;
struct pb_slabs *slabs = (flags & RADEON_FLAG_ENCRYPTED && ws->secure) ?
struct pb_slabs *slabs = ((flags & RADEON_FLAG_ENCRYPTED) && ws->info.has_tmz_support) ?
ws->bo_slabs_encrypted : ws->bo_slabs;
/* Determine the slab buffer size. */
@ -1291,7 +1296,7 @@ amdgpu_bo_create(struct amdgpu_winsys *ws,
/* Sparse buffers must have NO_CPU_ACCESS set. */
assert(!(flags & RADEON_FLAG_SPARSE) || flags & RADEON_FLAG_NO_CPU_ACCESS);
struct pb_slabs *slabs = (flags & RADEON_FLAG_ENCRYPTED && ws->secure) ?
struct pb_slabs *slabs = ((flags & RADEON_FLAG_ENCRYPTED) && ws->info.has_tmz_support) ?
ws->bo_slabs_encrypted : ws->bo_slabs;
struct pb_slabs *last_slab = &slabs[NUM_SLAB_ALLOCATORS - 1];
unsigned max_slab_entry_size = 1 << (last_slab->min_order + last_slab->num_orders - 1);

View file

@ -1656,7 +1656,8 @@ void amdgpu_cs_submit_ib(void *job, int thread_index)
chunks[num_chunks].chunk_data = (uintptr_t)&cs->ib[IB_MAIN];
num_chunks++;
if (ws->secure && cs->secure) {
if (cs->secure) {
assert(ws->uses_secure_bos);
cs->ib[IB_PREAMBLE].flags |= AMDGPU_IB_FLAGS_SECURE;
cs->ib[IB_MAIN].flags |= AMDGPU_IB_FLAGS_SECURE;
} else {

View file

@ -114,11 +114,6 @@ static bool do_winsys_init(struct amdgpu_winsys *ws,
ws->zero_all_vram_allocs = strstr(debug_get_option("R600_DEBUG", ""), "zerovram") != NULL ||
strstr(debug_get_option("AMD_DEBUG", ""), "zerovram") != NULL ||
driQueryOptionb(config->options, "radeonsi_zerovram");
ws->secure = strstr(debug_get_option("AMD_DEBUG", ""), "tmz");
if (ws->secure) {
fprintf(stderr, "=== TMZ usage enabled ===\n");
}
return true;
@ -332,10 +327,10 @@ static bool kms_handle_equals(const void *a, const void *b)
return a == b;
}
static bool amdgpu_ws_is_secure(struct radeon_winsys *rws)
static bool amdgpu_ws_uses_secure_bo(struct radeon_winsys *rws)
{
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
return ws->secure;
return ws->uses_secure_bos;
}
static bool amdgpu_cs_is_secure(struct radeon_cmdbuf *rcs)
@ -465,13 +460,14 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
return NULL;
}
if (aws->secure && !pb_slabs_init(&aws->bo_slabs_encrypted[i],
min_order, max_order,
RADEON_MAX_SLAB_HEAPS,
aws,
amdgpu_bo_can_reclaim_slab,
amdgpu_bo_slab_alloc_encrypted,
amdgpu_bo_slab_free)) {
if (aws->info.has_tmz_support &&
!pb_slabs_init(&aws->bo_slabs_encrypted[i],
min_order, max_order,
RADEON_MAX_SLAB_HEAPS,
aws,
amdgpu_bo_can_reclaim_slab,
amdgpu_bo_slab_alloc_encrypted,
amdgpu_bo_slab_free)) {
amdgpu_winsys_destroy(&ws->base);
simple_mtx_unlock(&dev_tab_mutex);
return NULL;
@ -522,7 +518,7 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
ws->base.query_value = amdgpu_query_value;
ws->base.read_registers = amdgpu_read_registers;
ws->base.pin_threads_to_L3_cache = amdgpu_pin_threads_to_L3_cache;
ws->base.ws_is_secure = amdgpu_ws_is_secure;
ws->base.ws_uses_secure_bo = amdgpu_ws_uses_secure_bo;
ws->base.cs_is_secure = amdgpu_cs_is_secure;
ws->base.cs_set_secure = amdgpu_cs_set_secure;

View file

@ -85,7 +85,7 @@ struct amdgpu_winsys {
bool debug_all_bos;
bool reserve_vmid;
bool zero_all_vram_allocs;
bool secure;
bool uses_secure_bos;
/* List of all allocated buffers */
simple_mtx_t global_bo_list_lock;

View file

@ -803,7 +803,7 @@ static void radeon_pin_threads_to_L3_cache(struct radeon_winsys *ws,
}
}
static bool radeon_ws_is_secure(struct radeon_winsys* ws)
static bool radeon_ws_uses_secure_bo(struct radeon_winsys* ws)
{
return false;
}
@ -888,9 +888,9 @@ radeon_drm_winsys_create(int fd, const struct pipe_screen_config *config,
ws->base.cs_request_feature = radeon_cs_request_feature;
ws->base.query_value = radeon_query_value;
ws->base.read_registers = radeon_read_registers;
ws->base.ws_is_secure = radeon_ws_is_secure;
ws->base.cs_is_secure = radeon_cs_is_secure;
ws->base.cs_set_secure = radeon_cs_set_secure;
ws->base.ws_uses_secure_bo = radeon_ws_uses_secure_bo;
ws->base.cs_is_secure = radeon_cs_is_secure;
ws->base.cs_set_secure = radeon_cs_set_secure;
radeon_drm_bo_init_functions(ws);
radeon_drm_cs_init_functions(ws);