virgl: improve virgl_transfer_queue_is_queued

Search only the pending list and return immediately on the first
hit.

When the transfer queue was introduced, the function was used to
deal with

  write transfer -> draw -> write transfer

sequence.  It was used to tell if the second transfer intersects
with the first transfer. If yes, the transfer queue avoided
reordering the second transfer to before the draw (by flushing) in
case the draw uses the transferred data.

With the recent changes to the transfer code, the function is used
to deal with

  write transfer -> readback transfer

We want to avoid reordering the readback transfer to before the
first transfer (also by flushing).

In the old code, we needed to track the compeleted transfers as well
to avoid reordering.  But in the new code, a readback transfer is
guaranteed to see the data from the completed transfers (in other
words, it cannot be reoderered to before the already completed
transfers).  We don't need to search the COMPLETED_LIST.

Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Alexandros Frantzis <alexandros.frantzis@collabora.com>
This commit is contained in:
Chia-I Wu 2019-07-08 16:34:32 -07:00
parent 5f6aab2ee2
commit bec2a85c48

View file

@ -137,6 +137,22 @@ transfer_overlap(const struct virgl_transfer *xfer,
return true;
}
static struct virgl_transfer *
virgl_transfer_queue_find_pending(const struct virgl_transfer_queue *queue,
const struct virgl_hw_res *hw_res,
unsigned level,
const struct pipe_box *box,
bool include_touching)
{
struct virgl_transfer *xfer;
LIST_FOR_EACH_ENTRY(xfer, &queue->lists[PENDING_LIST], queue_link) {
if (transfer_overlap(xfer, hw_res, level, box, include_touching))
return xfer;
}
return NULL;
}
static bool transfers_intersect(struct virgl_transfer *queued,
struct virgl_transfer *current)
{
@ -144,20 +160,6 @@ static bool transfers_intersect(struct virgl_transfer *queued,
&current->base.box, true);
}
static bool transfers_overlap(struct virgl_transfer *queued,
struct virgl_transfer *current)
{
return transfer_overlap(queued, current->hw_res, current->base.level,
&current->base.box, false);
}
static void set_true(UNUSED struct virgl_transfer_queue *queue,
struct list_action_args *args)
{
bool *val = args->data;
*val = true;
}
static void set_queued(UNUSED struct virgl_transfer_queue *queue,
struct list_action_args *args)
{
@ -392,22 +394,11 @@ int virgl_transfer_queue_clear(struct virgl_transfer_queue *queue,
bool virgl_transfer_queue_is_queued(struct virgl_transfer_queue *queue,
struct virgl_transfer *transfer)
{
bool queued = false;
struct list_iteration_args iter;
memset(&iter, 0, sizeof(iter));
iter.current = transfer;
iter.compare = transfers_overlap;
iter.action = set_true;
iter.data = &queued;
iter.type = PENDING_LIST;
compare_and_perform_action(queue, &iter);
iter.type = COMPLETED_LIST;
compare_and_perform_action(queue, &iter);
return queued;
return virgl_transfer_queue_find_pending(queue,
transfer->hw_res,
transfer->base.level,
&transfer->base.box,
false);
}
struct virgl_transfer *