mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-24 15:20:10 +01:00
gallium/radeon: create and return a fence in the flush function
All flush functions get a fence parameter. cs_create_fence is removed. Reviewed-by: Christian König <christian.koenig@amd.com>
This commit is contained in:
parent
3e9d2cbca2
commit
70cf6639c3
20 changed files with 99 additions and 85 deletions
|
|
@ -123,11 +123,12 @@ static void r300_destroy_context(struct pipe_context* context)
|
|||
FREE(r300);
|
||||
}
|
||||
|
||||
static void r300_flush_callback(void *data, unsigned flags)
|
||||
static void r300_flush_callback(void *data, unsigned flags,
|
||||
struct pipe_fence_handle **fence)
|
||||
{
|
||||
struct r300_context* const cs_context_copy = data;
|
||||
|
||||
r300_flush(&cs_context_copy->context, flags, NULL);
|
||||
r300_flush(&cs_context_copy->context, flags, fence);
|
||||
}
|
||||
|
||||
#define R300_INIT_ATOM(atomname, atomsize) \
|
||||
|
|
|
|||
|
|
@ -34,7 +34,8 @@
|
|||
#include "r300_emit.h"
|
||||
|
||||
|
||||
static void r300_flush_and_cleanup(struct r300_context *r300, unsigned flags)
|
||||
static void r300_flush_and_cleanup(struct r300_context *r300, unsigned flags,
|
||||
struct pipe_fence_handle **fence)
|
||||
{
|
||||
struct r300_atom *atom;
|
||||
|
||||
|
|
@ -52,7 +53,7 @@ static void r300_flush_and_cleanup(struct r300_context *r300, unsigned flags)
|
|||
}
|
||||
|
||||
r300->flush_counter++;
|
||||
r300->rws->cs_flush(r300->cs, flags, 0);
|
||||
r300->rws->cs_flush(r300->cs, flags, fence, 0);
|
||||
r300->dirty_hw = 0;
|
||||
|
||||
/* New kitchen sink, baby. */
|
||||
|
|
@ -81,23 +82,19 @@ void r300_flush(struct pipe_context *pipe,
|
|||
flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
|
||||
}
|
||||
|
||||
if (fence) {
|
||||
*fence = r300->rws->cs_create_fence(r300->cs);
|
||||
}
|
||||
|
||||
if (r300->dirty_hw) {
|
||||
r300_flush_and_cleanup(r300, flags);
|
||||
r300_flush_and_cleanup(r300, flags, fence);
|
||||
} else {
|
||||
if (fence) {
|
||||
/* We have to create a fence object, but the command stream is empty
|
||||
* and we cannot emit an empty CS. Let's write to some reg. */
|
||||
CS_LOCALS(r300);
|
||||
OUT_CS_REG(RB3D_COLOR_CHANNEL_MASK, 0);
|
||||
r300->rws->cs_flush(r300->cs, flags, 0);
|
||||
r300->rws->cs_flush(r300->cs, flags, fence, 0);
|
||||
} else {
|
||||
/* Even if hw is not dirty, we should at least reset the CS in case
|
||||
* the space checking failed for the first draw operation. */
|
||||
r300->rws->cs_flush(r300->cs, flags, 0);
|
||||
r300->rws->cs_flush(r300->cs, flags, NULL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -119,7 +116,9 @@ void r300_flush(struct pipe_context *pipe,
|
|||
r300_decompress_zmask(r300);
|
||||
}
|
||||
|
||||
r300_flush_and_cleanup(r300, flags);
|
||||
if (fence && *fence)
|
||||
r300->rws->fence_reference(fence, NULL);
|
||||
r300_flush_and_cleanup(r300, flags, fence);
|
||||
}
|
||||
|
||||
/* Revoke Hyper-Z access, so that some other process can take it. */
|
||||
|
|
|
|||
|
|
@ -408,8 +408,8 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
|
|||
int i;
|
||||
|
||||
/* make sure that the gfx ring is only one active */
|
||||
if (ctx->b.rings.dma.cs) {
|
||||
ctx->b.rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
|
||||
if (ctx->b.rings.dma.cs && ctx->b.rings.dma.cs->cdw) {
|
||||
ctx->b.rings.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
|
||||
}
|
||||
|
||||
/* Initialize all the compute-related registers.
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
|
|||
if (!ctx->b.ws->cs_memory_below_limit(ctx->b.rings.gfx.cs, ctx->b.vram, ctx->b.gtt)) {
|
||||
ctx->b.gtt = 0;
|
||||
ctx->b.vram = 0;
|
||||
ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC);
|
||||
ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
|
||||
return;
|
||||
}
|
||||
/* all will be accounted once relocation are emited */
|
||||
|
|
@ -93,7 +93,7 @@ void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
|
|||
|
||||
/* Flush if there's not enough space. */
|
||||
if (num_dw > RADEON_MAX_CMDBUF_DWORDS) {
|
||||
ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC);
|
||||
ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -230,7 +230,8 @@ void r600_flush_emit(struct r600_context *rctx)
|
|||
rctx->b.flags = 0;
|
||||
}
|
||||
|
||||
void r600_context_flush(struct r600_context *ctx, unsigned flags)
|
||||
void r600_context_flush(struct r600_context *ctx, unsigned flags,
|
||||
struct pipe_fence_handle **fence)
|
||||
{
|
||||
struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
|
||||
|
||||
|
|
@ -270,7 +271,7 @@ void r600_context_flush(struct r600_context *ctx, unsigned flags)
|
|||
}
|
||||
|
||||
/* Flush the CS. */
|
||||
ctx->b.ws->cs_flush(ctx->b.rings.gfx.cs, flags, ctx->screen->b.cs_count++);
|
||||
ctx->b.ws->cs_flush(cs, flags, fence, ctx->screen->b.cs_count++);
|
||||
}
|
||||
|
||||
void r600_begin_new_cs(struct r600_context *ctx)
|
||||
|
|
|
|||
|
|
@ -66,7 +66,8 @@ static const struct debug_named_value r600_debug_options[] = {
|
|||
* pipe_context
|
||||
*/
|
||||
|
||||
static void r600_flush(struct pipe_context *ctx, unsigned flags)
|
||||
static void r600_flush(struct pipe_context *ctx, unsigned flags,
|
||||
struct pipe_fence_handle **fence)
|
||||
{
|
||||
struct r600_context *rctx = (struct r600_context *)ctx;
|
||||
struct pipe_query *render_cond = NULL;
|
||||
|
|
@ -85,7 +86,7 @@ static void r600_flush(struct pipe_context *ctx, unsigned flags)
|
|||
ctx->render_condition(ctx, NULL, FALSE, 0);
|
||||
}
|
||||
|
||||
r600_context_flush(rctx, flags);
|
||||
r600_context_flush(rctx, flags, fence);
|
||||
rctx->b.rings.gfx.flushing = false;
|
||||
r600_begin_new_cs(rctx);
|
||||
|
||||
|
|
@ -105,19 +106,18 @@ static void r600_flush_from_st(struct pipe_context *ctx,
|
|||
unsigned fflags;
|
||||
|
||||
fflags = flags & PIPE_FLUSH_END_OF_FRAME ? RADEON_FLUSH_END_OF_FRAME : 0;
|
||||
if (fence) {
|
||||
*fence = rctx->b.ws->cs_create_fence(rctx->b.rings.gfx.cs);
|
||||
}
|
||||
|
||||
/* flush gfx & dma ring, order does not matter as only one can be live */
|
||||
if (rctx->b.rings.dma.cs) {
|
||||
rctx->b.rings.dma.flush(rctx, fflags);
|
||||
rctx->b.rings.dma.flush(rctx, fflags, NULL);
|
||||
}
|
||||
rctx->b.rings.gfx.flush(rctx, fflags);
|
||||
rctx->b.rings.gfx.flush(rctx, fflags, fence);
|
||||
}
|
||||
|
||||
static void r600_flush_gfx_ring(void *ctx, unsigned flags)
|
||||
static void r600_flush_gfx_ring(void *ctx, unsigned flags,
|
||||
struct pipe_fence_handle **fence)
|
||||
{
|
||||
r600_flush((struct pipe_context*)ctx, flags);
|
||||
r600_flush((struct pipe_context*)ctx, flags, fence);
|
||||
}
|
||||
|
||||
static void r600_destroy_context(struct pipe_context *context)
|
||||
|
|
|
|||
|
|
@ -582,7 +582,8 @@ boolean r600_is_format_supported(struct pipe_screen *screen,
|
|||
void r600_update_db_shader_control(struct r600_context * rctx);
|
||||
|
||||
/* r600_hw_context.c */
|
||||
void r600_context_flush(struct r600_context *ctx, unsigned flags);
|
||||
void r600_context_flush(struct r600_context *ctx, unsigned flags,
|
||||
struct pipe_fence_handle **fence);
|
||||
void r600_begin_new_cs(struct r600_context *ctx);
|
||||
void r600_flush_emit(struct r600_context *ctx);
|
||||
void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, boolean count_draw_in);
|
||||
|
|
|
|||
|
|
@ -1335,8 +1335,8 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
|
|||
}
|
||||
|
||||
/* make sure that the gfx ring is only one active */
|
||||
if (rctx->b.rings.dma.cs) {
|
||||
rctx->b.rings.dma.flush(rctx, RADEON_FLUSH_ASYNC);
|
||||
if (rctx->b.rings.dma.cs && rctx->b.rings.dma.cs->cdw) {
|
||||
rctx->b.rings.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
|
||||
}
|
||||
|
||||
if (!r600_update_derived_state(rctx)) {
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ boolean r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
|
|||
if (ctx->ws->cs_is_buffer_referenced(ctx->rings.gfx.cs, buf, usage)) {
|
||||
return TRUE;
|
||||
}
|
||||
if (ctx->rings.dma.cs &&
|
||||
if (ctx->rings.dma.cs && ctx->rings.dma.cs->cdw &&
|
||||
ctx->ws->cs_is_buffer_referenced(ctx->rings.dma.cs, buf, usage)) {
|
||||
return TRUE;
|
||||
}
|
||||
|
|
@ -64,10 +64,10 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
|
|||
ctx->ws->cs_is_buffer_referenced(ctx->rings.gfx.cs,
|
||||
resource->cs_buf, rusage)) {
|
||||
if (usage & PIPE_TRANSFER_DONTBLOCK) {
|
||||
ctx->rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC);
|
||||
ctx->rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
|
||||
return NULL;
|
||||
} else {
|
||||
ctx->rings.gfx.flush(ctx, 0);
|
||||
ctx->rings.gfx.flush(ctx, 0, NULL);
|
||||
busy = true;
|
||||
}
|
||||
}
|
||||
|
|
@ -76,10 +76,10 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
|
|||
ctx->ws->cs_is_buffer_referenced(ctx->rings.dma.cs,
|
||||
resource->cs_buf, rusage)) {
|
||||
if (usage & PIPE_TRANSFER_DONTBLOCK) {
|
||||
ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
|
||||
ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
|
||||
return NULL;
|
||||
} else {
|
||||
ctx->rings.dma.flush(ctx, 0);
|
||||
ctx->rings.dma.flush(ctx, 0, NULL);
|
||||
busy = true;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -57,11 +57,11 @@ static INLINE unsigned r600_context_bo_reloc(struct r600_common_context *rctx,
|
|||
if (ring == &rctx->rings.gfx) {
|
||||
if (rctx->rings.dma.cs) {
|
||||
/* flush dma ring */
|
||||
rctx->rings.dma.flush(rctx, RADEON_FLUSH_ASYNC);
|
||||
rctx->rings.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
|
||||
}
|
||||
} else {
|
||||
/* flush gfx ring */
|
||||
rctx->rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC);
|
||||
rctx->rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
|
||||
}
|
||||
}
|
||||
return rctx->ws->cs_add_reloc(ring->cs, rbo->cs_buf, usage,
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ void r600_need_dma_space(struct r600_common_context *ctx, unsigned num_dw)
|
|||
num_dw += ctx->rings.dma.cs->cdw;
|
||||
/* Flush if there's not enough space. */
|
||||
if (num_dw > RADEON_MAX_CMDBUF_DWORDS) {
|
||||
ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
|
||||
ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -53,7 +53,8 @@ static void r600_memory_barrier(struct pipe_context *ctx, unsigned flags)
|
|||
{
|
||||
}
|
||||
|
||||
static void r600_flush_dma_ring(void *ctx, unsigned flags)
|
||||
static void r600_flush_dma_ring(void *ctx, unsigned flags,
|
||||
struct pipe_fence_handle **fence)
|
||||
{
|
||||
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
|
||||
struct radeon_winsys_cs *cs = rctx->rings.dma.cs;
|
||||
|
|
@ -63,7 +64,7 @@ static void r600_flush_dma_ring(void *ctx, unsigned flags)
|
|||
}
|
||||
|
||||
rctx->rings.dma.flushing = true;
|
||||
rctx->ws->cs_flush(cs, flags, 0);
|
||||
rctx->ws->cs_flush(cs, flags, fence, 0);
|
||||
rctx->rings.dma.flushing = false;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -297,7 +297,8 @@ struct r600_streamout {
|
|||
struct r600_ring {
|
||||
struct radeon_winsys_cs *cs;
|
||||
bool flushing;
|
||||
void (*flush)(void *ctx, unsigned flags);
|
||||
void (*flush)(void *ctx, unsigned flags,
|
||||
struct pipe_fence_handle **fence);
|
||||
};
|
||||
|
||||
struct r600_rings {
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ struct ruvd_decoder {
|
|||
/* flush IB to the hardware */
|
||||
static void flush(struct ruvd_decoder *dec)
|
||||
{
|
||||
dec->ws->cs_flush(dec->cs, RADEON_FLUSH_ASYNC, 0);
|
||||
dec->ws->cs_flush(dec->cs, RADEON_FLUSH_ASYNC, NULL, 0);
|
||||
}
|
||||
|
||||
/* add a new set register command to the IB */
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@
|
|||
*/
|
||||
static void flush(struct rvce_encoder *enc)
|
||||
{
|
||||
enc->ws->cs_flush(enc->cs, RADEON_FLUSH_ASYNC, 0);
|
||||
enc->ws->cs_flush(enc->cs, RADEON_FLUSH_ASYNC, NULL, 0);
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
|
@ -267,7 +267,8 @@ static void rvce_flush(struct pipe_video_codec *encoder)
|
|||
{
|
||||
}
|
||||
|
||||
static void rvce_cs_flush(void *ctx, unsigned flags)
|
||||
static void rvce_cs_flush(void *ctx, unsigned flags,
|
||||
struct pipe_fence_handle **fence)
|
||||
{
|
||||
// just ignored
|
||||
}
|
||||
|
|
|
|||
|
|
@ -73,11 +73,12 @@ void si_need_cs_space(struct si_context *ctx, unsigned num_dw,
|
|||
|
||||
/* Flush if there's not enough space. */
|
||||
if (num_dw > RADEON_MAX_CMDBUF_DWORDS) {
|
||||
si_flush(&ctx->b.b, NULL, RADEON_FLUSH_ASYNC);
|
||||
ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void si_context_flush(struct si_context *ctx, unsigned flags)
|
||||
void si_context_flush(struct si_context *ctx, unsigned flags,
|
||||
struct pipe_fence_handle **fence)
|
||||
{
|
||||
struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
|
||||
|
||||
|
|
@ -123,7 +124,7 @@ void si_context_flush(struct si_context *ctx, unsigned flags)
|
|||
#endif
|
||||
|
||||
/* Flush the CS. */
|
||||
ctx->b.ws->cs_flush(ctx->b.rings.gfx.cs, flags, 0);
|
||||
ctx->b.ws->cs_flush(cs, flags, fence, 0);
|
||||
|
||||
#if SI_TRACE_CS
|
||||
if (ctx->screen->b.trace_bo) {
|
||||
|
|
|
|||
|
|
@ -33,18 +33,14 @@
|
|||
/*
|
||||
* pipe_context
|
||||
*/
|
||||
void si_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
|
||||
unsigned flags)
|
||||
static void si_flush(struct pipe_context *ctx, unsigned flags,
|
||||
struct pipe_fence_handle **fence)
|
||||
{
|
||||
struct si_context *sctx = (struct si_context *)ctx;
|
||||
struct pipe_query *render_cond = NULL;
|
||||
boolean render_cond_cond = FALSE;
|
||||
unsigned render_cond_mode = 0;
|
||||
|
||||
if (fence) {
|
||||
*fence = sctx->b.ws->cs_create_fence(sctx->b.rings.gfx.cs);
|
||||
}
|
||||
|
||||
/* Disable render condition. */
|
||||
if (sctx->b.current_render_cond) {
|
||||
render_cond = sctx->b.current_render_cond;
|
||||
|
|
@ -53,7 +49,7 @@ void si_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
|
|||
ctx->render_condition(ctx, NULL, FALSE, 0);
|
||||
}
|
||||
|
||||
si_context_flush(sctx, flags);
|
||||
si_context_flush(sctx, flags, fence);
|
||||
|
||||
/* Re-enable render condition. */
|
||||
if (render_cond) {
|
||||
|
|
@ -72,15 +68,16 @@ static void si_flush_from_st(struct pipe_context *ctx,
|
|||
rflags |= RADEON_FLUSH_END_OF_FRAME;
|
||||
|
||||
if (sctx->b.rings.dma.cs) {
|
||||
sctx->b.rings.dma.flush(sctx, rflags);
|
||||
sctx->b.rings.dma.flush(sctx, rflags, NULL);
|
||||
}
|
||||
|
||||
si_flush(ctx, fence, rflags);
|
||||
si_flush(ctx, rflags, fence);
|
||||
}
|
||||
|
||||
static void si_flush_gfx_ring(void *ctx, unsigned flags)
|
||||
static void si_flush_gfx_ring(void *ctx, unsigned flags,
|
||||
struct pipe_fence_handle **fence)
|
||||
{
|
||||
si_flush((struct pipe_context*)ctx, NULL, flags);
|
||||
si_flush(ctx, flags, fence);
|
||||
}
|
||||
|
||||
static void si_destroy_context(struct pipe_context *context)
|
||||
|
|
|
|||
|
|
@ -177,14 +177,11 @@ void si_dma_copy(struct pipe_context *ctx,
|
|||
const struct pipe_box *src_box);
|
||||
|
||||
/* si_hw_context.c */
|
||||
void si_context_flush(struct si_context *ctx, unsigned flags);
|
||||
void si_context_flush(struct si_context *ctx, unsigned flags,
|
||||
struct pipe_fence_handle **fence);
|
||||
void si_begin_new_cs(struct si_context *ctx);
|
||||
void si_need_cs_space(struct si_context *ctx, unsigned num_dw, boolean count_draw_in);
|
||||
|
||||
/* si_pipe.c */
|
||||
void si_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
|
||||
unsigned flags);
|
||||
|
||||
#if SI_TRACE_CS
|
||||
void si_trace_emit(struct si_context *sctx);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -405,7 +405,7 @@ static void *radeon_bo_map(struct radeon_winsys_cs_handle *buf,
|
|||
*
|
||||
* Only check whether the buffer is being used for write. */
|
||||
if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
|
||||
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
|
||||
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -415,7 +415,7 @@ static void *radeon_bo_map(struct radeon_winsys_cs_handle *buf,
|
|||
}
|
||||
} else {
|
||||
if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
|
||||
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
|
||||
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -436,7 +436,7 @@ static void *radeon_bo_map(struct radeon_winsys_cs_handle *buf,
|
|||
*
|
||||
* Only check whether the buffer is being used for write. */
|
||||
if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
|
||||
cs->flush_cs(cs->flush_data, 0);
|
||||
cs->flush_cs(cs->flush_data, 0, NULL);
|
||||
}
|
||||
radeon_bo_wait((struct pb_buffer*)bo,
|
||||
RADEON_USAGE_WRITE);
|
||||
|
|
@ -444,7 +444,7 @@ static void *radeon_bo_map(struct radeon_winsys_cs_handle *buf,
|
|||
/* Mapping for write. */
|
||||
if (cs) {
|
||||
if (radeon_bo_is_referenced_by_cs(cs, bo)) {
|
||||
cs->flush_cs(cs->flush_data, 0);
|
||||
cs->flush_cs(cs->flush_data, 0, NULL);
|
||||
} else {
|
||||
/* Try to avoid busy-waiting in radeon_bo_wait. */
|
||||
if (p_atomic_read(&bo->num_active_ioctls))
|
||||
|
|
@ -751,7 +751,7 @@ static void radeon_bo_set_tiling(struct pb_buffer *_buf,
|
|||
/* Tiling determines how DRM treats the buffer data.
|
||||
* We must flush CS when changing it if the buffer is referenced. */
|
||||
if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
|
||||
cs->flush_cs(cs->flush_data, 0);
|
||||
cs->flush_cs(cs->flush_data, 0, NULL);
|
||||
}
|
||||
|
||||
while (p_atomic_read(&bo->num_active_ioctls)) {
|
||||
|
|
|
|||
|
|
@ -75,6 +75,11 @@
|
|||
|
||||
#define RELOC_DWORDS (sizeof(struct drm_radeon_cs_reloc) / sizeof(uint32_t))
|
||||
|
||||
static struct pipe_fence_handle *
|
||||
radeon_cs_create_fence(struct radeon_winsys_cs *rcs);
|
||||
static void radeon_fence_reference(struct pipe_fence_handle **dst,
|
||||
struct pipe_fence_handle *src);
|
||||
|
||||
static boolean radeon_init_cs_context(struct radeon_cs_context *csc,
|
||||
struct radeon_drm_winsys *ws)
|
||||
{
|
||||
|
|
@ -140,7 +145,8 @@ static void radeon_destroy_cs_context(struct radeon_cs_context *csc)
|
|||
static struct radeon_winsys_cs *
|
||||
radeon_drm_cs_create(struct radeon_winsys *rws,
|
||||
enum ring_type ring_type,
|
||||
void (*flush)(void *ctx, unsigned flags),
|
||||
void (*flush)(void *ctx, unsigned flags,
|
||||
struct pipe_fence_handle **fence),
|
||||
void *flush_ctx,
|
||||
struct radeon_winsys_cs_handle *trace_buf)
|
||||
{
|
||||
|
|
@ -349,7 +355,7 @@ static boolean radeon_drm_cs_validate(struct radeon_winsys_cs *rcs)
|
|||
|
||||
/* Flush if there are any relocs. Clean up otherwise. */
|
||||
if (cs->csc->crelocs) {
|
||||
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
|
||||
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
|
||||
} else {
|
||||
radeon_cs_context_cleanup(cs->csc);
|
||||
|
||||
|
|
@ -417,7 +423,10 @@ void radeon_drm_cs_sync_flush(struct radeon_winsys_cs *rcs)
|
|||
|
||||
DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
|
||||
|
||||
static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs, unsigned flags, uint32_t cs_trace_id)
|
||||
static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
|
||||
unsigned flags,
|
||||
struct pipe_fence_handle **fence,
|
||||
uint32_t cs_trace_id)
|
||||
{
|
||||
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
|
||||
struct radeon_cs_context *tmp;
|
||||
|
|
@ -457,9 +466,14 @@ static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs, unsigned flags, ui
|
|||
fprintf(stderr, "radeon: command stream overflowed\n");
|
||||
}
|
||||
|
||||
if (fence) {
|
||||
radeon_fence_reference(fence, NULL);
|
||||
*fence = radeon_cs_create_fence(rcs);
|
||||
}
|
||||
|
||||
radeon_drm_cs_sync_flush(rcs);
|
||||
|
||||
/* Flip command streams. */
|
||||
/* Swap command streams. */
|
||||
tmp = cs->csc;
|
||||
cs->csc = cs->cst;
|
||||
cs->cst = tmp;
|
||||
|
|
@ -468,7 +482,9 @@ static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs, unsigned flags, ui
|
|||
|
||||
/* If the CS is not empty or overflowed, emit it in a separate thread. */
|
||||
if (cs->base.cdw && cs->base.cdw <= RADEON_MAX_CMDBUF_DWORDS && !debug_get_option_noop()) {
|
||||
unsigned i, crelocs = cs->cst->crelocs;
|
||||
unsigned i, crelocs;
|
||||
|
||||
crelocs = cs->cst->crelocs;
|
||||
|
||||
cs->cst->chunks[0].length_dw = cs->base.cdw;
|
||||
|
||||
|
|
@ -643,7 +659,6 @@ void radeon_drm_cs_init_functions(struct radeon_drm_winsys *ws)
|
|||
ws->base.cs_flush = radeon_drm_cs_flush;
|
||||
ws->base.cs_is_buffer_referenced = radeon_bo_is_referenced;
|
||||
ws->base.cs_sync_flush = radeon_drm_cs_sync_flush;
|
||||
ws->base.cs_create_fence = radeon_cs_create_fence;
|
||||
ws->base.fence_wait = radeon_fence_wait;
|
||||
ws->base.fence_reference = radeon_fence_reference;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ struct radeon_drm_cs {
|
|||
struct radeon_drm_winsys *ws;
|
||||
|
||||
/* Flush CS. */
|
||||
void (*flush_cs)(void *ctx, unsigned flags);
|
||||
void (*flush_cs)(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
|
||||
void *flush_data;
|
||||
|
||||
pipe_semaphore flush_completed;
|
||||
|
|
|
|||
|
|
@ -424,7 +424,8 @@ struct radeon_winsys {
|
|||
*/
|
||||
struct radeon_winsys_cs *(*cs_create)(struct radeon_winsys *ws,
|
||||
enum ring_type ring_type,
|
||||
void (*flush)(void *ctx, unsigned flags),
|
||||
void (*flush)(void *ctx, unsigned flags,
|
||||
struct pipe_fence_handle **fence),
|
||||
void *flush_ctx,
|
||||
struct radeon_winsys_cs_handle *trace_buf);
|
||||
|
||||
|
|
@ -488,9 +489,14 @@ struct radeon_winsys {
|
|||
*
|
||||
* \param cs A command stream to flush.
|
||||
* \param flags, RADEON_FLUSH_ASYNC or 0.
|
||||
* \param cs_trace_id A unique identifiant for the cs
|
||||
* \param fence Pointer to a fence. If non-NULL, a fence is inserted
|
||||
* after the CS and is returned through this parameter.
|
||||
* \param cs_trace_id A unique identifier of the cs, used for tracing.
|
||||
*/
|
||||
void (*cs_flush)(struct radeon_winsys_cs *cs, unsigned flags, uint32_t cs_trace_id);
|
||||
void (*cs_flush)(struct radeon_winsys_cs *cs,
|
||||
unsigned flags,
|
||||
struct pipe_fence_handle **fence,
|
||||
uint32_t cs_trace_id);
|
||||
|
||||
/**
|
||||
* Return TRUE if a buffer is referenced by a command stream.
|
||||
|
|
@ -519,13 +525,6 @@ struct radeon_winsys {
|
|||
*/
|
||||
void (*cs_sync_flush)(struct radeon_winsys_cs *cs);
|
||||
|
||||
/**
|
||||
* Return a fence associated with the CS. The fence will be signalled
|
||||
* once the CS is flushed and all commands in the CS are completed
|
||||
* by the GPU.
|
||||
*/
|
||||
struct pipe_fence_handle *(*cs_create_fence)(struct radeon_winsys_cs *cs);
|
||||
|
||||
/**
|
||||
* Wait for the fence and return true if the fence has been signalled.
|
||||
* The timeout of 0 will only return the status.
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue