ilo: simplify ilo_cp_set_owner()

The simplification allows us to get rid of ilo_cp_set_ring() and
ilo_cp_implicit_flush().  The 3D query code is refactored for the
simplification.
This commit is contained in:
Chia-I Wu 2014-09-19 13:42:08 +08:00
parent 26ee6f23a9
commit 270667472f
5 changed files with 123 additions and 131 deletions

View file

@ -200,21 +200,26 @@ ilo_3d_pause_queries(struct ilo_3d *hw3d)
}
}
static void
ilo_3d_release_render_ring(struct ilo_cp *cp, void *data)
{
struct ilo_3d *hw3d = data;
ilo_3d_pause_queries(hw3d);
}
void
ilo_3d_own_render_ring(struct ilo_3d *hw3d)
{
ilo_cp_set_ring(hw3d->cp, INTEL_RING_RENDER);
ilo_cp_set_owner(hw3d->cp, INTEL_RING_RENDER, &hw3d->owner);
}
if (ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve))
ilo_3d_resume_queries(hw3d);
static void
ilo_3d_reserve_for_query(struct ilo_3d *hw3d, struct ilo_query *q,
enum ilo_3d_pipeline_action act)
{
q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline, act, NULL);
/* XXX we should check the aperture size */
if (ilo_cp_space(hw3d->cp) < q->reg_cmd_size * 2) {
ilo_cp_flush(hw3d->cp, "out of space");
assert(ilo_cp_space(hw3d->cp) >= q->reg_cmd_size * 2);
}
/* reserve space for pausing the query */
hw3d->owner.reserve += q->reg_cmd_size;
}
/**
@ -229,21 +234,10 @@ ilo_3d_begin_query(struct ilo_context *ilo, struct ilo_query *q)
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
/* reserve some space for pausing the query */
q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
ILO_3D_PIPELINE_WRITE_DEPTH_COUNT, NULL);
hw3d->owner_reserve += q->reg_cmd_size;
ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
ilo_3d_reserve_for_query(hw3d, q, ILO_3D_PIPELINE_WRITE_DEPTH_COUNT);
q->data.u64 = 0;
if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
/* XXX we should check the aperture size */
if (q->reg_cmd_size > ilo_cp_space(hw3d->cp)) {
ilo_cp_flush(hw3d->cp, "out of space");
assert(q->reg_cmd_size <= ilo_cp_space(hw3d->cp));
}
ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
q->bo, q->reg_read++);
@ -254,21 +248,10 @@ ilo_3d_begin_query(struct ilo_context *ilo, struct ilo_query *q)
/* nop */
break;
case PIPE_QUERY_TIME_ELAPSED:
/* reserve some space for pausing the query */
q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
ILO_3D_PIPELINE_WRITE_TIMESTAMP, NULL);
hw3d->owner_reserve += q->reg_cmd_size;
ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
ilo_3d_reserve_for_query(hw3d, q, ILO_3D_PIPELINE_WRITE_TIMESTAMP);
q->data.u64 = 0;
if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
/* XXX we should check the aperture size */
if (q->reg_cmd_size > ilo_cp_space(hw3d->cp)) {
ilo_cp_flush(hw3d->cp, "out of space");
assert(q->reg_cmd_size <= ilo_cp_space(hw3d->cp));
}
ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
q->bo, q->reg_read++);
@ -284,22 +267,11 @@ ilo_3d_begin_query(struct ilo_context *ilo, struct ilo_query *q)
list_add(&q->list, &hw3d->prim_emitted_queries);
break;
case PIPE_QUERY_PIPELINE_STATISTICS:
/* reserve some space for pausing the query */
q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
ILO_3D_PIPELINE_WRITE_STATISTICS, NULL);
hw3d->owner_reserve += q->reg_cmd_size;
ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
ilo_3d_reserve_for_query(hw3d, q, ILO_3D_PIPELINE_WRITE_STATISTICS);
memset(&q->data.pipeline_statistics, 0,
sizeof(q->data.pipeline_statistics));
if (ilo_query_alloc_bo(q, 11 * 2, -1, hw3d->cp->winsys)) {
/* XXX we should check the aperture size */
if (q->reg_cmd_size > ilo_cp_space(hw3d->cp)) {
ilo_cp_flush(hw3d->cp, "out of space");
assert(q->reg_cmd_size <= ilo_cp_space(hw3d->cp));
}
ilo_3d_pipeline_emit_write_statistics(hw3d->pipeline,
q->bo, q->reg_read);
q->reg_read += 11;
@ -328,8 +300,9 @@ ilo_3d_end_query(struct ilo_context *ilo, struct ilo_query *q)
list_del(&q->list);
assert(q->reg_read < q->reg_total);
hw3d->owner_reserve -= q->reg_cmd_size;
ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
assert(hw3d->owner.reserve >= q->reg_cmd_size);
hw3d->owner.reserve -= q->reg_cmd_size;
ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
q->bo, q->reg_read++);
break;
@ -345,8 +318,9 @@ ilo_3d_end_query(struct ilo_context *ilo, struct ilo_query *q)
list_del(&q->list);
assert(q->reg_read < q->reg_total);
hw3d->owner_reserve -= q->reg_cmd_size;
ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
assert(hw3d->owner.reserve >= q->reg_cmd_size);
hw3d->owner.reserve -= q->reg_cmd_size;
ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
q->bo, q->reg_read++);
break;
@ -358,8 +332,9 @@ ilo_3d_end_query(struct ilo_context *ilo, struct ilo_query *q)
list_del(&q->list);
assert(q->reg_read + 11 <= q->reg_total);
hw3d->owner_reserve -= q->reg_cmd_size;
ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
assert(hw3d->owner.reserve >= q->reg_cmd_size);
hw3d->owner.reserve -= q->reg_cmd_size;
ilo_3d_pipeline_emit_write_statistics(hw3d->pipeline,
q->bo, q->reg_read);
q->reg_read += 11;
@ -422,6 +397,22 @@ ilo_3d_cp_flushed(struct ilo_3d *hw3d)
hw3d->new_batch = true;
}
static void
ilo_3d_own_cp(struct ilo_cp *cp, void *data)
{
struct ilo_3d *hw3d = data;
ilo_3d_resume_queries(hw3d);
}
static void
ilo_3d_release_cp(struct ilo_cp *cp, void *data)
{
struct ilo_3d *hw3d = data;
ilo_3d_pause_queries(hw3d);
}
/**
* Create a 3D context.
*/
@ -435,8 +426,10 @@ ilo_3d_create(struct ilo_cp *cp, const struct ilo_dev_info *dev)
return NULL;
hw3d->cp = cp;
hw3d->owner.release_callback = ilo_3d_release_render_ring;
hw3d->owner.release_data = hw3d;
hw3d->owner.own = ilo_3d_own_cp;
hw3d->owner.release = ilo_3d_release_cp;
hw3d->owner.data = hw3d;
hw3d->owner.reserve = 0;
hw3d->new_batch = true;

View file

@ -41,7 +41,6 @@ struct ilo_query;
struct ilo_3d {
struct ilo_cp *cp;
struct ilo_cp_owner owner;
int owner_reserve;
bool new_batch;

View file

@ -47,9 +47,8 @@ ilo_blitter_blt_begin(struct ilo_blitter *blitter, int max_cmd_size,
int count;
uint32_t swctrl;
/* change ring */
ilo_cp_set_ring(ilo->cp, INTEL_RING_BLT);
ilo_cp_set_owner(ilo->cp, NULL, 0);
/* change owner */
ilo_cp_set_owner(ilo->cp, INTEL_RING_BLT, NULL);
/* check aperture space */
aper_check[0] = dst;

View file

@ -31,12 +31,62 @@
#include "ilo_shader.h"
#include "ilo_cp.h"
static const struct ilo_cp_owner ilo_cp_default_owner;
static void
ilo_cp_release_owner(struct ilo_cp *cp)
{
if (cp->owner != &ilo_cp_default_owner) {
const struct ilo_cp_owner *owner = cp->owner;
cp->owner = &ilo_cp_default_owner;
assert(ilo_cp_space(cp) >= owner->reserve);
owner->release(cp, owner->data);
}
}
/**
* Set the parser owner. If this is a new owner or a new ring, the old owner
* is released and the new owner's own() is called.
*
* The parser may be implicitly flushed if there is a ring change or there is
* not enough space for the new owner.
*/
void
ilo_cp_set_owner(struct ilo_cp *cp, enum intel_ring_type ring,
const struct ilo_cp_owner *owner)
{
if (!owner)
owner = &ilo_cp_default_owner;
if (cp->ring != ring) {
ilo_cp_flush(cp, "ring change");
cp->ring = ring;
}
if (cp->owner != owner) {
ilo_cp_release_owner(cp);
/* multiply by 2 because there are own() and release() */
if (ilo_cp_space(cp) < owner->reserve * 2) {
ilo_cp_flush(cp, "new owner");
assert(ilo_cp_space(cp) >= owner->reserve * 2);
}
cp->owner = owner;
assert(ilo_cp_space(cp) >= owner->reserve);
cp->owner->own(cp, cp->owner->data);
}
}
static struct intel_bo *
ilo_cp_end_batch(struct ilo_cp *cp, unsigned *used)
{
struct intel_bo *bo;
ilo_cp_set_owner(cp, NULL, 0);
ilo_cp_release_owner(cp);
if (!ilo_builder_batch_used(&cp->builder)) {
ilo_builder_batch_discard(&cp->builder);
@ -130,6 +180,7 @@ ilo_cp_create(const struct ilo_dev_info *dev,
}
cp->ring = INTEL_RING_RENDER;
cp->owner = &ilo_cp_default_owner;
ilo_builder_init(&cp->builder, dev, winsys);

View file

@ -38,9 +38,20 @@ struct ilo_shader_cache;
typedef void (*ilo_cp_callback)(struct ilo_cp *cp, void *data);
/**
* Parser owners are notified when they gain or lose the ownership of the
* parser. This gives owners a chance to emit prolog or epilog.
*/
struct ilo_cp_owner {
ilo_cp_callback release_callback;
void *release_data;
ilo_cp_callback own;
ilo_cp_callback release;
void *data;
/*
* Space reserved for own() and release(). This can be modified at any
* time, as long as it is never increased by more than ilo_cp_space().
*/
int reserve;
};
/**
@ -54,10 +65,9 @@ struct ilo_cp {
ilo_cp_callback flush_callback;
void *flush_callback_data;
const struct ilo_cp_owner *owner;
int owner_reserve;
enum intel_ring_type ring;
const struct ilo_cp_owner *owner;
unsigned one_off_flags;
struct ilo_builder builder;
@ -87,6 +97,10 @@ ilo_cp_flush(struct ilo_cp *cp, const char *reason)
ilo_cp_flush_internal(cp);
}
void
ilo_cp_set_owner(struct ilo_cp *cp, enum intel_ring_type ring,
const struct ilo_cp_owner *owner);
/**
* Return true if the parser buffer is empty.
*/
@ -105,30 +119,9 @@ ilo_cp_space(struct ilo_cp *cp)
const int space = ilo_builder_batch_space(&cp->builder);
const int mi_batch_buffer_end_space = 2;
assert(space >= cp->owner_reserve + mi_batch_buffer_end_space);
assert(space >= cp->owner->reserve + mi_batch_buffer_end_space);
return space - cp->owner_reserve - mi_batch_buffer_end_space;
}
/**
* Internal function called by functions that flush implicitly.
*/
static inline void
ilo_cp_implicit_flush(struct ilo_cp *cp)
{
ilo_cp_flush(cp, "out of space (implicit)");
}
/**
* Set the ring buffer.
*/
static inline void
ilo_cp_set_ring(struct ilo_cp *cp, enum intel_ring_type ring)
{
if (cp->ring != ring) {
ilo_cp_implicit_flush(cp);
cp->ring = ring;
}
return space - cp->owner->reserve - mi_batch_buffer_end_space;
}
/**
@ -152,47 +145,4 @@ ilo_cp_set_flush_callback(struct ilo_cp *cp, ilo_cp_callback callback,
cp->flush_callback_data = data;
}
/**
* Set the parser owner. If this is a new owner, the previous owner is
* notified and the space it reserved is reclaimed.
*
* \return true if this is a new owner
*/
static inline bool
ilo_cp_set_owner(struct ilo_cp *cp, const struct ilo_cp_owner *owner,
int reserve)
{
const bool new_owner = (cp->owner != owner);
/* release current owner */
if (new_owner && cp->owner) {
/* reclaim the reserved space */
cp->owner_reserve = 0;
/* invoke the release callback */
cp->owner->release_callback(cp, cp->owner->release_data);
cp->owner = NULL;
}
if (cp->owner_reserve != reserve) {
const int extra = reserve - cp->owner_reserve;
if (ilo_cp_space(cp) < extra) {
ilo_cp_implicit_flush(cp);
assert(ilo_cp_space(cp) >= reserve);
cp->owner_reserve = reserve;
}
else {
cp->owner_reserve += extra;
}
}
/* set owner last because of the possible flush above */
cp->owner = owner;
return new_owner;
}
#endif /* ILO_CP_H */