asahi: Add get_query_address helper

This is the counterpart of get_oq_index for non-occlusion hardware queries.
These are not tracked with occlusion queries, since occlusion query allocations
are limited, and they are not based on indexing but rather general
batch-allocated space.

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24847>
This commit is contained in:
Alyssa Rosenzweig 2023-08-14 15:37:25 -04:00 committed by Marge Bot
parent a620e86f35
commit d42bb650b3
2 changed files with 38 additions and 8 deletions

View file

@ -6,6 +6,7 @@
#include "util/u_prim.h"
#include "agx_state.h"
#include "pool.h"
static struct pipe_query *
agx_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
@ -167,34 +168,61 @@ agx_set_active_query_state(struct pipe_context *pipe, bool enable)
ctx->dirty |= AGX_DIRTY_QUERY;
}
uint16_t
agx_get_oq_index(struct agx_batch *batch, struct agx_query *query)
static uint16_t
agx_add_query_to_batch(struct agx_batch *batch, struct agx_query *query,
struct util_dynarray *array)
{
/* If written by another batch, flush it now. If this affects real apps, we
* could avoid this flush by merging query results.
*/
if (query->writer && query->writer != batch) {
agx_flush_batch_for_reason(batch->ctx, query->writer,
"Multiple occlusion query writers");
"Multiple query writers");
}
/* Allocate if needed */
if (query->writer == NULL) {
query->writer = batch;
query->writer_index = util_dynarray_num_elements(
&batch->occlusion_queries, struct agx_query *);
query->writer_index =
util_dynarray_num_elements(array, struct agx_query *);
util_dynarray_append(&batch->occlusion_queries, struct agx_query *,
query);
util_dynarray_append(array, struct agx_query *, query);
}
assert(query->writer == batch);
assert(*util_dynarray_element(&batch->occlusion_queries, struct agx_query *,
assert(*util_dynarray_element(array, struct agx_query *,
query->writer_index) == query);
return query->writer_index;
}
uint16_t
agx_get_oq_index(struct agx_batch *batch, struct agx_query *query)
{
assert(is_occlusion(query));
return agx_add_query_to_batch(batch, query, &batch->occlusion_queries);
}
uint64_t
agx_get_query_address(struct agx_batch *batch, struct agx_query *query)
{
assert(!is_occlusion(query));
agx_add_query_to_batch(batch, query, &batch->nonocclusion_queries);
/* Allocate storage for the query in the batch */
if (!query->ptr.cpu) {
query->ptr = agx_pool_alloc_aligned(&batch->pool, sizeof(uint64_t),
sizeof(uint64_t));
uint64_t *value = query->ptr.cpu;
*value = 0;
}
return query->ptr.gpu;
}
void
agx_finish_batch_queries(struct agx_batch *batch)
{

View file

@ -805,6 +805,8 @@ uint64_t agx_build_meta(struct agx_batch *batch, bool store,
/* Query management */
uint16_t agx_get_oq_index(struct agx_batch *batch, struct agx_query *query);
uint64_t agx_get_query_address(struct agx_batch *batch,
struct agx_query *query);
void agx_finish_batch_queries(struct agx_batch *batch);