mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-01 05:58:05 +02:00
r300g: decide whether a flush should be asynchronous when calling it
Thread offloading is not sometimes desirable, e.g. when mapping a buffer.
This commit is contained in:
parent
6051f26b78
commit
5650a719f0
11 changed files with 54 additions and 54 deletions
|
|
@ -263,7 +263,7 @@ static void r300_clear(struct pipe_context* pipe,
|
|||
|
||||
/* Reserve CS space. */
|
||||
if (dwords > (R300_MAX_CMDBUF_DWORDS - r300->cs->cdw)) {
|
||||
r300->context.flush(&r300->context, 0, NULL);
|
||||
r300_flush(&r300->context, R300_FLUSH_ASYNC, NULL);
|
||||
}
|
||||
|
||||
/* Emit clear packets. */
|
||||
|
|
|
|||
|
|
@ -139,11 +139,11 @@ static void r300_destroy_context(struct pipe_context* context)
|
|||
FREE(r300);
|
||||
}
|
||||
|
||||
void r300_flush_cb(void *data)
|
||||
static void r300_flush_callback(void *data, unsigned flags)
|
||||
{
|
||||
struct r300_context* const cs_context_copy = data;
|
||||
|
||||
cs_context_copy->context.flush(&cs_context_copy->context, 0, NULL);
|
||||
r300_flush(&cs_context_copy->context, flags, NULL);
|
||||
}
|
||||
|
||||
#define R300_INIT_ATOM(atomname, atomsize) \
|
||||
|
|
@ -453,7 +453,7 @@ struct pipe_context* r300_create_context(struct pipe_screen* screen,
|
|||
r300_init_render_functions(r300);
|
||||
r300_init_states(&r300->context);
|
||||
|
||||
rws->cs_set_flush(r300->cs, r300_flush_cb, r300);
|
||||
rws->cs_set_flush(r300->cs, r300_flush_callback, r300);
|
||||
|
||||
/* The KIL opcode needs the first texture unit to be enabled
|
||||
* on r3xx-r4xx. In order to calm down the CS checker, we bind this
|
||||
|
|
|
|||
|
|
@ -665,8 +665,6 @@ static INLINE void r300_mark_atom_dirty(struct r300_context *r300,
|
|||
struct pipe_context* r300_create_context(struct pipe_screen* screen,
|
||||
void *priv);
|
||||
|
||||
void r300_flush_cb(void *data);
|
||||
|
||||
/* Context initialization. */
|
||||
struct draw_stage* r300_draw_stage(struct r300_context* r300);
|
||||
void r300_init_blit_functions(struct r300_context *r300);
|
||||
|
|
@ -681,6 +679,11 @@ void r300_decompress_zmask(struct r300_context *r300);
|
|||
void r300_decompress_zmask_locked_unsafe(struct r300_context *r300);
|
||||
void r300_decompress_zmask_locked(struct r300_context *r300);
|
||||
|
||||
/* r300_flush.c */
|
||||
void r300_flush(struct pipe_context *pipe,
|
||||
unsigned flags,
|
||||
struct pipe_fence_handle **fence);
|
||||
|
||||
/* r300_hyperz.c */
|
||||
void r300_update_hyperz_state(struct r300_context* r300);
|
||||
|
||||
|
|
|
|||
|
|
@ -1218,7 +1218,7 @@ validate:
|
|||
if (flushed)
|
||||
return FALSE;
|
||||
|
||||
r300->context.flush(&r300->context, 0, NULL);
|
||||
r300_flush(&r300->context, R300_FLUSH_ASYNC, NULL);
|
||||
flushed = TRUE;
|
||||
goto validate;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,9 +31,10 @@
|
|||
#include "r300_cs.h"
|
||||
#include "r300_emit.h"
|
||||
|
||||
static void r300_flush(struct pipe_context* pipe,
|
||||
unsigned flags,
|
||||
struct pipe_fence_handle** fence)
|
||||
|
||||
void r300_flush(struct pipe_context *pipe,
|
||||
unsigned flags,
|
||||
struct pipe_fence_handle **fence)
|
||||
{
|
||||
struct r300_context *r300 = r300_context(pipe);
|
||||
struct r300_atom *atom;
|
||||
|
|
@ -61,7 +62,7 @@ static void r300_flush(struct pipe_context* pipe,
|
|||
r500_emit_index_bias(r300, 0);
|
||||
|
||||
r300->flush_counter++;
|
||||
r300->rws->cs_flush(r300->cs);
|
||||
r300->rws->cs_flush(r300->cs, flags);
|
||||
r300->dirty_hw = 0;
|
||||
|
||||
/* New kitchen sink, baby. */
|
||||
|
|
@ -83,20 +84,24 @@ static void r300_flush(struct pipe_context* pipe,
|
|||
* and we cannot emit an empty CS. We must write some regs then. */
|
||||
CS_LOCALS(r300);
|
||||
OUT_CS_REG(RB3D_COLOR_CHANNEL_MASK, 0);
|
||||
r300->rws->cs_flush(r300->cs);
|
||||
r300->rws->cs_flush(r300->cs, flags);
|
||||
} else {
|
||||
/* Even if hw is not dirty, we should at least reset the CS in case
|
||||
* the space checking failed for the first draw operation. */
|
||||
r300->rws->cs_flush(r300->cs);
|
||||
r300->rws->cs_flush(r300->cs, flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & PIPE_FLUSH_FRAME) {
|
||||
r300->rws->cs_sync_flush(r300->cs);
|
||||
}
|
||||
static void r300_flush_wrapped(struct pipe_context *pipe,
|
||||
unsigned flags,
|
||||
struct pipe_fence_handle **fence)
|
||||
{
|
||||
/* don't use the flags param, it means something else */
|
||||
r300_flush(pipe, 0, fence);
|
||||
}
|
||||
|
||||
void r300_init_flush_functions(struct r300_context* r300)
|
||||
{
|
||||
r300->context.flush = r300_flush;
|
||||
r300->context.flush = r300_flush_wrapped;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -219,7 +219,7 @@ static boolean r300_reserve_cs_dwords(struct r300_context *r300,
|
|||
|
||||
/* Reserve requested CS space. */
|
||||
if (cs_dwords > (R300_MAX_CMDBUF_DWORDS - r300->cs->cdw)) {
|
||||
r300->context.flush(&r300->context, 0, NULL);
|
||||
r300_flush(&r300->context, R300_FLUSH_ASYNC, NULL);
|
||||
flushed = TRUE;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -72,6 +72,7 @@ static void r300_copy_into_tiled_texture(struct pipe_context *ctx,
|
|||
transfer->box.x, transfer->box.y, transfer->box.z,
|
||||
&r300transfer->linear_texture->b.b.b, 0, &src_box);
|
||||
|
||||
/* XXX remove this. */
|
||||
ctx->flush(ctx, 0, NULL);
|
||||
}
|
||||
|
||||
|
|
@ -152,7 +153,7 @@ r300_texture_get_transfer(struct pipe_context *ctx,
|
|||
if (!trans->linear_texture) {
|
||||
/* Oh crap, the thing can't create the texture.
|
||||
* Let's flush and try again. */
|
||||
ctx->flush(ctx, 0, NULL);
|
||||
r300_flush(ctx, 0, NULL);
|
||||
|
||||
trans->linear_texture = r300_resource(
|
||||
ctx->screen->resource_create(ctx->screen,
|
||||
|
|
@ -176,13 +177,7 @@ r300_texture_get_transfer(struct pipe_context *ctx,
|
|||
assert(!trans->linear_texture->tex.microtile &&
|
||||
!trans->linear_texture->tex.macrotile[0]);
|
||||
|
||||
/* Set the stride.
|
||||
*
|
||||
* Even though we are using an internal texture for this,
|
||||
* the transfer level, box and usage parameters still reflect
|
||||
* the arguments received to get_transfer. We just do the
|
||||
* right thing internally.
|
||||
*/
|
||||
/* Set the stride. */
|
||||
trans->transfer.stride =
|
||||
trans->linear_texture->tex.stride_in_bytes[0];
|
||||
|
||||
|
|
@ -192,7 +187,7 @@ r300_texture_get_transfer(struct pipe_context *ctx,
|
|||
r300_copy_from_tiled_texture(ctx, trans);
|
||||
|
||||
/* Always referenced in the blit. */
|
||||
ctx->flush(ctx, 0, NULL);
|
||||
r300_flush(ctx, 0, NULL);
|
||||
}
|
||||
return &trans->transfer;
|
||||
}
|
||||
|
|
@ -202,8 +197,9 @@ r300_texture_get_transfer(struct pipe_context *ctx,
|
|||
trans->transfer.stride = tex->tex.stride_in_bytes[level];
|
||||
trans->offset = r300_texture_get_offset(tex, level, box->z);
|
||||
|
||||
if (referenced_cs)
|
||||
ctx->flush(ctx, PIPE_FLUSH_RENDER_CACHE, NULL);
|
||||
if (referenced_cs &&
|
||||
!(usage & PIPE_TRANSFER_UNSYNCHRONIZED))
|
||||
r300_flush(ctx, 0, NULL);
|
||||
return &trans->transfer;
|
||||
}
|
||||
return NULL;
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@
|
|||
#include "pipe/p_state.h"
|
||||
|
||||
#define R300_MAX_CMDBUF_DWORDS (16 * 1024)
|
||||
#define R300_FLUSH_ASYNC (1 << 0)
|
||||
|
||||
struct winsys_handle;
|
||||
struct r300_winsys_screen;
|
||||
|
|
@ -265,15 +266,9 @@ struct r300_winsys_screen {
|
|||
* Flush a command stream.
|
||||
*
|
||||
* \param cs A command stream to flush.
|
||||
* \param flags, R300_FLUSH_ASYNC or 0.
|
||||
*/
|
||||
void (*cs_flush)(struct r300_winsys_cs *cs);
|
||||
|
||||
/**
|
||||
* Wait until the last flush is completed.
|
||||
*
|
||||
* \param cs A command stream.
|
||||
*/
|
||||
void (*cs_sync_flush)(struct r300_winsys_cs *cs);
|
||||
void (*cs_flush)(struct r300_winsys_cs *cs, unsigned flags);
|
||||
|
||||
/**
|
||||
* Set a flush callback which is called from winsys when flush is
|
||||
|
|
@ -284,7 +279,7 @@ struct r300_winsys_screen {
|
|||
* \param user A user pointer that will be passed to the flush callback.
|
||||
*/
|
||||
void (*cs_set_flush)(struct r300_winsys_cs *cs,
|
||||
void (*flush)(void *),
|
||||
void (*flush)(void *ctx, unsigned flags),
|
||||
void *user);
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -172,7 +172,7 @@ static void *radeon_bo_map_internal(struct pb_buffer *_buf,
|
|||
/* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
|
||||
if (flags & PB_USAGE_DONTBLOCK) {
|
||||
if (radeon_bo_is_referenced_by_cs(cs, bo)) {
|
||||
cs->flush_cs(cs->flush_data);
|
||||
cs->flush_cs(cs->flush_data, R300_FLUSH_ASYNC);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -181,7 +181,11 @@ static void *radeon_bo_map_internal(struct pb_buffer *_buf,
|
|||
}
|
||||
} else {
|
||||
if (radeon_bo_is_referenced_by_cs(cs, bo)) {
|
||||
cs->flush_cs(cs->flush_data);
|
||||
cs->flush_cs(cs->flush_data, 0);
|
||||
} else {
|
||||
/* Try to avoid busy-waiting in radeon_bo_wait. */
|
||||
if (p_atomic_read(&bo->num_active_ioctls))
|
||||
radeon_drm_cs_sync_flush(cs);
|
||||
}
|
||||
|
||||
radeon_bo_wait((struct r300_winsys_bo*)bo);
|
||||
|
|
@ -406,8 +410,7 @@ static void radeon_bo_set_tiling(struct r300_winsys_bo *_buf,
|
|||
/* Tiling determines how DRM treats the buffer data.
|
||||
* We must flush CS when changing it if the buffer is referenced. */
|
||||
if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
|
||||
radeon_drm_cs_flush(rcs);
|
||||
radeon_drm_cs_sync_flush(rcs);
|
||||
cs->flush_cs(cs->flush_data, 0);
|
||||
}
|
||||
|
||||
while (p_atomic_read(&bo->num_active_ioctls)) {
|
||||
|
|
|
|||
|
|
@ -355,10 +355,8 @@ static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void radeon_drm_cs_sync_flush(struct r300_winsys_cs *rcs)
|
||||
void radeon_drm_cs_sync_flush(struct radeon_drm_cs *cs)
|
||||
{
|
||||
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
|
||||
|
||||
/* Wait for any pending ioctl to complete. */
|
||||
if (cs->thread) {
|
||||
pipe_thread_wait(cs->thread);
|
||||
|
|
@ -368,12 +366,12 @@ void radeon_drm_cs_sync_flush(struct r300_winsys_cs *rcs)
|
|||
|
||||
DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
|
||||
|
||||
void radeon_drm_cs_flush(struct r300_winsys_cs *rcs)
|
||||
static void radeon_drm_cs_flush(struct r300_winsys_cs *rcs, unsigned flags)
|
||||
{
|
||||
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
|
||||
struct radeon_cs_context *tmp;
|
||||
|
||||
radeon_drm_cs_sync_flush(rcs);
|
||||
radeon_drm_cs_sync_flush(cs);
|
||||
|
||||
/* If the CS is not empty, emit it in a newly-spawned thread. */
|
||||
if (cs->base.cdw) {
|
||||
|
|
@ -384,7 +382,8 @@ void radeon_drm_cs_flush(struct r300_winsys_cs *rcs)
|
|||
for (i = 0; i < crelocs; i++)
|
||||
p_atomic_inc(&cs->csc->relocs_bo[i]->num_active_ioctls);
|
||||
|
||||
if (cs->ws->num_cpus > 1 && debug_get_option_thread()) {
|
||||
if (cs->ws->num_cpus > 1 && debug_get_option_thread() &&
|
||||
(flags & R300_FLUSH_ASYNC)) {
|
||||
cs->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, cs->csc);
|
||||
assert(cs->thread);
|
||||
} else {
|
||||
|
|
@ -407,7 +406,7 @@ void radeon_drm_cs_flush(struct r300_winsys_cs *rcs)
|
|||
static void radeon_drm_cs_destroy(struct r300_winsys_cs *rcs)
|
||||
{
|
||||
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
|
||||
radeon_drm_cs_sync_flush(rcs);
|
||||
radeon_drm_cs_sync_flush(cs);
|
||||
radeon_cs_context_cleanup(&cs->csc1);
|
||||
radeon_cs_context_cleanup(&cs->csc2);
|
||||
p_atomic_dec(&cs->ws->num_cs);
|
||||
|
|
@ -417,7 +416,8 @@ static void radeon_drm_cs_destroy(struct r300_winsys_cs *rcs)
|
|||
}
|
||||
|
||||
static void radeon_drm_cs_set_flush(struct r300_winsys_cs *rcs,
|
||||
void (*flush)(void *), void *user)
|
||||
void (*flush)(void *ctx, unsigned flags),
|
||||
void *user)
|
||||
{
|
||||
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
|
||||
cs->flush_cs = flush;
|
||||
|
|
@ -441,7 +441,6 @@ void radeon_drm_cs_init_functions(struct radeon_drm_winsys *ws)
|
|||
ws->base.cs_validate = radeon_drm_cs_validate;
|
||||
ws->base.cs_write_reloc = radeon_drm_cs_write_reloc;
|
||||
ws->base.cs_flush = radeon_drm_cs_flush;
|
||||
ws->base.cs_sync_flush = radeon_drm_cs_sync_flush;
|
||||
ws->base.cs_set_flush = radeon_drm_cs_set_flush;
|
||||
ws->base.cs_is_buffer_referenced = radeon_bo_is_referenced;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ struct radeon_drm_cs {
|
|||
struct radeon_drm_winsys *ws;
|
||||
|
||||
/* Flush CS. */
|
||||
void (*flush_cs)(void *);
|
||||
void (*flush_cs)(void *ctx, unsigned flags);
|
||||
void *flush_data;
|
||||
|
||||
pipe_thread thread;
|
||||
|
|
@ -96,8 +96,7 @@ static INLINE boolean radeon_bo_is_referenced_by_any_cs(struct radeon_bo *bo)
|
|||
return bo->num_cs_references;
|
||||
}
|
||||
|
||||
void radeon_drm_cs_flush(struct r300_winsys_cs *rcs);
|
||||
void radeon_drm_cs_sync_flush(struct r300_winsys_cs *rcs);
|
||||
void radeon_drm_cs_sync_flush(struct radeon_drm_cs *cs);
|
||||
void radeon_drm_cs_init_functions(struct radeon_drm_winsys *ws);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue