perfetto: Add a .clang-format for the directory.

I was frustrated trying to write code and not be able to just mash ^K^F to
format what I'd written.  This .clang-format is just cargo-cult of turnip
with a few tweaks to reduce the diff to the current directory contents.
The remaining deltas in the reformat look decent to me, and mostly bring
things closer to mesa-vague-consensus style.

Reviewed-by: Rob Clark <robdclark@chromium.org>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22157>
This commit is contained in:
Emma Anholt 2023-03-23 17:33:13 -07:00 committed by Marge Bot
parent 66a8f7dcdb
commit da78d5d729
6 changed files with 247 additions and 180 deletions

View file

@ -0,0 +1,45 @@
BasedOnStyle: LLVM
AlwaysBreakAfterReturnType: TopLevelDefinitions
BinPackParameters: false
BraceWrapping:
AfterControlStatement: false
AfterEnum: false
AfterFunction: true
AfterStruct: false
BeforeElse: false
SplitEmptyFunction: true
BreakBeforeBraces: Custom
ColumnLimit: 78
ContinuationIndentWidth: 3
Cpp11BracedListStyle: false
IncludeBlocks: Regroup
IncludeCategories:
- Regex: '^"(c11/|compiler/|main/|nir/|spirv/|util/|vk_|wsi_)'
Priority: 3
- Regex: '^"(perfetto)'
Priority: 2
- Regex: '.*'
Priority: 1
IndentWidth: 3
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyExcessCharacter: 100
SpaceAfterCStyleCast: true
SpaceBeforeCpp11BracedList: true
ForEachMacros:
- BITSET_FOREACH_SET
- BITSET_FOREACH_RANGE
- LIST_FOR_EACH_ENTRY
- LIST_FOR_EACH_ENTRY_SAFE
- LIST_FOR_EACH_ENTRY_SAFE_REV
- list_for_each_entry
- list_for_each_entry_safe
- list_for_each_entry_rev
- list_for_each_entry_rev_safe
- list_for_each_entry_from
- list_for_each_entry_from_rev
- foreach_list_typed
- u_foreach_bit
- u_vector_foreach
- util_dynarray_foreach
- rb_tree_foreach
- rb_tree_foreach_safe

View file

@ -77,7 +77,7 @@ static inline void
_mesa_trace_scope_end(int *scope)
{
/* we save the category in the scope variable */
_MESA_TRACE_END((enum util_perfetto_category) *scope);
_MESA_TRACE_END((enum util_perfetto_category) * scope);
}
#else

View file

@ -16,9 +16,9 @@
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _UTIL_PERFETTO_H
@ -41,8 +41,7 @@ enum util_perfetto_category {
extern int util_perfetto_category_states[UTIL_PERFETTO_CATEGORY_COUNT];
void
util_perfetto_init(void);
void util_perfetto_init(void);
static inline bool
util_perfetto_is_category_enabled(enum util_perfetto_category category)
@ -50,12 +49,10 @@ util_perfetto_is_category_enabled(enum util_perfetto_category category)
return p_atomic_read_relaxed(&util_perfetto_category_states[category]);
}
void
util_perfetto_trace_begin(enum util_perfetto_category category,
const char *name);
void util_perfetto_trace_begin(enum util_perfetto_category category,
const char *name);
void
util_perfetto_trace_end(enum util_perfetto_category category);
void util_perfetto_trace_end(enum util_perfetto_category category);
#else /* HAVE_PERFETTO */

View file

@ -16,11 +16,13 @@
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "u_trace.h"
#include <inttypes.h>
#include "util/list.h"
@ -28,23 +30,19 @@
#include "util/u_debug.h"
#include "util/u_vector.h"
#include "u_trace.h"
#define __NEEDS_TRACE_PRIV
#include "u_trace_priv.h"
#define PAYLOAD_BUFFER_SIZE 0x100
#define TIMESTAMP_BUF_SIZE 0x1000
#define TRACES_PER_CHUNK (TIMESTAMP_BUF_SIZE / sizeof(uint64_t))
#define TRACES_PER_CHUNK (TIMESTAMP_BUF_SIZE / sizeof(uint64_t))
struct u_trace_state {
util_once_flag once;
FILE *trace_file;
enum u_trace_type enabled_traces;
};
static struct u_trace_state u_trace_state = {
.once = UTIL_ONCE_FLAG_INIT
};
static struct u_trace_state u_trace_state = { .once = UTIL_ONCE_FLAG_INIT };
#ifdef HAVE_PERFETTO
/**
@ -102,8 +100,8 @@ struct u_trace_chunk {
struct util_queue_fence fence;
bool last; /* this chunk is last in batch */
bool eof; /* this chunk is last in frame */
bool last; /* this chunk is last in batch */
bool eof; /* this chunk is last in frame */
void *flush_data; /* assigned by u_trace_flush */
@ -124,13 +122,13 @@ struct u_trace_printer {
void (*event)(struct u_trace_context *utctx,
struct u_trace_chunk *chunk,
const struct u_trace_event *evt,
uint64_t ns, int32_t delta);
uint64_t ns,
int32_t delta);
};
static void
print_txt_start(struct u_trace_context *utctx)
{
}
static void
@ -149,20 +147,23 @@ static void
print_txt_end_of_batch(struct u_trace_context *utctx)
{
uint64_t elapsed = utctx->last_time_ns - utctx->first_time_ns;
fprintf(utctx->out, "ELAPSED: %"PRIu64" ns\n", elapsed);
fprintf(utctx->out, "ELAPSED: %" PRIu64 " ns\n", elapsed);
}
static void
print_txt_event(struct u_trace_context *utctx,
struct u_trace_chunk *chunk,
const struct u_trace_event *evt,
uint64_t ns, int32_t delta)
uint64_t ns,
int32_t delta)
{
if (evt->tp->print) {
fprintf(utctx->out, "%016"PRIu64" %+9d: %s: ", ns, delta, evt->tp->name);
fprintf(utctx->out, "%016" PRIu64 " %+9d: %s: ", ns, delta,
evt->tp->name);
evt->tp->print(utctx->out, evt->payload);
} else {
fprintf(utctx->out, "%016"PRIu64" %+9d: %s\n", ns, delta, evt->tp->name);
fprintf(utctx->out, "%016" PRIu64 " %+9d: %s\n", ns, delta,
evt->tp->name);
}
}
@ -217,7 +218,7 @@ print_json_end_of_batch(struct u_trace_context *utctx)
{
uint64_t elapsed = utctx->last_time_ns - utctx->first_time_ns;
fprintf(utctx->out, "],\n");
fprintf(utctx->out, "\"duration_ns\": %"PRIu64"\n", elapsed);
fprintf(utctx->out, "\"duration_ns\": %" PRIu64 "\n", elapsed);
fprintf(utctx->out, "}\n");
}
@ -225,12 +226,13 @@ static void
print_json_event(struct u_trace_context *utctx,
struct u_trace_chunk *chunk,
const struct u_trace_event *evt,
uint64_t ns, int32_t delta)
uint64_t ns,
int32_t delta)
{
if (utctx->event_nr != 0)
fprintf(utctx->out, ",\n");
fprintf(utctx->out, "{\n\"event\": \"%s\",\n", evt->tp->name);
fprintf(utctx->out, "\"time_ns\": \"%016"PRIu64"\",\n", ns);
fprintf(utctx->out, "\"time_ns\": \"%016" PRIu64 "\",\n", ns);
fprintf(utctx->out, "\"params\": {");
if (evt->tp->print)
evt->tp->print_json(utctx->out, evt->payload);
@ -285,7 +287,7 @@ free_chunk(void *ptr)
/* Unref payloads attached to this chunk. */
struct u_trace_payload_buf **payload;
u_vector_foreach(payload, &chunk->payloads)
u_vector_foreach (payload, &chunk->payloads)
u_trace_payload_buf_unref(*payload);
u_vector_finish(&chunk->payloads);
@ -297,8 +299,8 @@ static void
free_chunks(struct list_head *chunks)
{
while (!list_is_empty(chunks)) {
struct u_trace_chunk *chunk = list_first_entry(chunks,
struct u_trace_chunk, node);
struct u_trace_chunk *chunk =
list_first_entry(chunks, struct u_trace_chunk, node);
free_chunk(chunk);
}
}
@ -312,39 +314,39 @@ get_chunk(struct u_trace *ut, size_t payload_size)
/* do we currently have a non-full chunk to append msgs to? */
if (!list_is_empty(&ut->trace_chunks)) {
chunk = list_last_entry(&ut->trace_chunks,
struct u_trace_chunk, node);
/* Can we store a new trace in the chunk? */
if (chunk->num_traces < TRACES_PER_CHUNK) {
/* If no payload required, nothing else to check. */
if (payload_size <= 0)
return chunk;
chunk = list_last_entry(&ut->trace_chunks, struct u_trace_chunk, node);
/* Can we store a new trace in the chunk? */
if (chunk->num_traces < TRACES_PER_CHUNK) {
/* If no payload required, nothing else to check. */
if (payload_size <= 0)
return chunk;
/* If the payload buffer has space for the payload, we're good.
*/
if (chunk->payload &&
(chunk->payload->end - chunk->payload->next) >= payload_size)
return chunk;
/* If the payload buffer has space for the payload, we're good.
*/
if (chunk->payload &&
(chunk->payload->end - chunk->payload->next) >= payload_size)
return chunk;
/* If we don't have enough space in the payload buffer, can we
* allocate a new one?
*/
struct u_trace_payload_buf **buf = u_vector_add(&chunk->payloads);
*buf = u_trace_payload_buf_create();
chunk->payload = *buf;
return chunk;
}
/* we need to expand to add another chunk to the batch, so
* the current one is no longer the last one of the batch:
*/
chunk->last = false;
/* If we don't have enough space in the payload buffer, can we
* allocate a new one?
*/
struct u_trace_payload_buf **buf = u_vector_add(&chunk->payloads);
*buf = u_trace_payload_buf_create();
chunk->payload = *buf;
return chunk;
}
/* we need to expand to add another chunk to the batch, so
* the current one is no longer the last one of the batch:
*/
chunk->last = false;
}
/* .. if not, then create a new one: */
chunk = calloc(1, sizeof(*chunk));
chunk->utctx = ut->utctx;
chunk->timestamps = ut->utctx->create_timestamp_buffer(ut->utctx, TIMESTAMP_BUF_SIZE);
chunk->timestamps =
ut->utctx->create_timestamp_buffer(ut->utctx, TIMESTAMP_BUF_SIZE);
chunk->last = true;
u_vector_init(&chunk->payloads, 4, sizeof(struct u_trace_payload_buf *));
if (payload_size > 0) {
@ -359,12 +361,12 @@ get_chunk(struct u_trace *ut, size_t payload_size)
}
static const struct debug_named_value config_control[] = {
{ "print", U_TRACE_TYPE_PRINT, "Enable print"},
{ "print_json", U_TRACE_TYPE_PRINT_JSON, "Enable print in JSON"},
{ "print", U_TRACE_TYPE_PRINT, "Enable print" },
{ "print_json", U_TRACE_TYPE_PRINT_JSON, "Enable print in JSON" },
#ifdef HAVE_PERFETTO
{ "perfetto", U_TRACE_TYPE_PERFETTO_ENV, "Enable perfetto" },
#endif
{ "markers", U_TRACE_TYPE_MARKERS, "Enable marker trace"},
{ "markers", U_TRACE_TYPE_MARKERS, "Enable marker trace" },
DEBUG_NAMED_VALUE_END
};
@ -403,8 +405,8 @@ u_trace_state_init(void)
bool
u_trace_is_enabled(enum u_trace_type type)
{
/* Active is only tracked in a given u_trace context, so if you're asking us
* if U_TRACE_TYPE_PERFETTO (_ENV | _ACTIVE) is enabled, then just check
/* Active is only tracked in a given u_trace context, so if you're asking
* us if U_TRACE_TYPE_PERFETTO (_ENV | _ACTIVE) is enabled, then just check
* _ENV ("perfetto tracing is desired, but perfetto might not be running").
*/
type &= ~U_TRACE_TYPE_PERFETTO_ACTIVE;
@ -418,9 +420,10 @@ queue_init(struct u_trace_context *utctx)
if (utctx->queue.jobs)
return;
bool ret = util_queue_init(&utctx->queue, "traceq", 256, 1,
UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY |
UTIL_QUEUE_INIT_RESIZE_IF_FULL, NULL);
bool ret = util_queue_init(
&utctx->queue, "traceq", 256, 1,
UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY | UTIL_QUEUE_INIT_RESIZE_IF_FULL,
NULL);
assert(ret);
if (!ret)
@ -429,12 +432,12 @@ queue_init(struct u_trace_context *utctx)
void
u_trace_context_init(struct u_trace_context *utctx,
void *pctx,
u_trace_create_ts_buffer create_timestamp_buffer,
u_trace_delete_ts_buffer delete_timestamp_buffer,
u_trace_record_ts record_timestamp,
u_trace_read_ts read_timestamp,
u_trace_delete_flush_data delete_flush_data)
void *pctx,
u_trace_create_ts_buffer create_timestamp_buffer,
u_trace_delete_ts_buffer delete_timestamp_buffer,
u_trace_record_ts record_timestamp,
u_trace_read_ts read_timestamp,
u_trace_delete_flush_data delete_flush_data)
{
u_trace_state_init();
@ -481,7 +484,8 @@ u_trace_context_init(struct u_trace_context *utctx,
queue_init(utctx);
#endif
if (!(p_atomic_read_relaxed(&utctx->enabled_traces) & U_TRACE_TYPE_REQUIRE_QUEUING))
if (!(p_atomic_read_relaxed(&utctx->enabled_traces) &
U_TRACE_TYPE_REQUIRE_QUEUING))
return;
if (utctx->out) {
@ -516,7 +520,7 @@ u_trace_perfetto_start(void)
{
simple_mtx_lock(&ctx_list_mutex);
list_for_each_entry(struct u_trace_context, utctx, &ctx_list, node) {
list_for_each_entry (struct u_trace_context, utctx, &ctx_list, node) {
queue_init(utctx);
p_atomic_set(&utctx->enabled_traces,
utctx->enabled_traces | U_TRACE_TYPE_PERFETTO_ACTIVE);
@ -535,7 +539,7 @@ u_trace_perfetto_stop(void)
assert(_u_trace_perfetto_count > 0);
_u_trace_perfetto_count--;
if (_u_trace_perfetto_count == 0) {
list_for_each_entry(struct u_trace_context, utctx, &ctx_list, node) {
list_for_each_entry (struct u_trace_context, utctx, &ctx_list, node) {
p_atomic_set(&utctx->enabled_traces,
utctx->enabled_traces & ~U_TRACE_TYPE_PERFETTO_ACTIVE);
}
@ -573,7 +577,8 @@ process_chunk(void *job, void *gdata, int thread_index)
if (!evt->tp)
continue;
uint64_t ns = utctx->read_timestamp(utctx, chunk->timestamps, idx, chunk->flush_data);
uint64_t ns = utctx->read_timestamp(utctx, chunk->timestamps, idx,
chunk->flush_data);
int32_t delta;
if (!utctx->first_time_ns)
@ -595,7 +600,8 @@ process_chunk(void *job, void *gdata, int thread_index)
}
#ifdef HAVE_PERFETTO
if (evt->tp->perfetto &&
(p_atomic_read_relaxed(&utctx->enabled_traces) & U_TRACE_TYPE_PERFETTO_ACTIVE)) {
(p_atomic_read_relaxed(&utctx->enabled_traces) &
U_TRACE_TYPE_PERFETTO_ACTIVE)) {
evt->tp->perfetto(utctx->pctx, ns, chunk->flush_data, evt->payload);
}
#endif
@ -640,26 +646,24 @@ u_trace_context_process(struct u_trace_context *utctx, bool eof)
if (list_is_empty(chunks))
return;
struct u_trace_chunk *last_chunk = list_last_entry(chunks,
struct u_trace_chunk, node);
struct u_trace_chunk *last_chunk =
list_last_entry(chunks, struct u_trace_chunk, node);
last_chunk->eof = eof;
while (!list_is_empty(chunks)) {
struct u_trace_chunk *chunk = list_first_entry(chunks,
struct u_trace_chunk, node);
struct u_trace_chunk *chunk =
list_first_entry(chunks, struct u_trace_chunk, node);
/* remove from list before enqueuing, because chunk is freed
* once it is processed by the queue:
*/
list_delinit(&chunk->node);
util_queue_add_job(&utctx->queue, chunk, &chunk->fence,
process_chunk, cleanup_chunk,
TIMESTAMP_BUF_SIZE);
util_queue_add_job(&utctx->queue, chunk, &chunk->fence, process_chunk,
cleanup_chunk, TIMESTAMP_BUF_SIZE);
}
}
void
u_trace_init(struct u_trace *ut, struct u_trace_context *utctx)
{
@ -691,7 +695,7 @@ u_trace_begin_iterator(struct u_trace *ut)
struct u_trace_chunk *first_chunk =
list_first_entry(&ut->trace_chunks, struct u_trace_chunk, node);
return (struct u_trace_iterator) { ut, first_chunk, 0};
return (struct u_trace_iterator) { ut, first_chunk, 0 };
}
struct u_trace_iterator
@ -703,7 +707,8 @@ u_trace_end_iterator(struct u_trace *ut)
struct u_trace_chunk *last_chunk =
list_last_entry(&ut->trace_chunks, struct u_trace_chunk, node);
return (struct u_trace_iterator) { ut, last_chunk, last_chunk->num_traces};
return (struct u_trace_iterator) { ut, last_chunk,
last_chunk->num_traces };
}
/* If an iterator was created when there were no chunks and there are now
@ -713,22 +718,19 @@ static struct u_trace_iterator
sanitize_iterator(struct u_trace_iterator iter)
{
if (iter.ut && !iter.chunk && !list_is_empty(&iter.ut->trace_chunks)) {
iter.chunk = list_first_entry(&iter.ut->trace_chunks, struct
u_trace_chunk, node);
iter.chunk =
list_first_entry(&iter.ut->trace_chunks, struct u_trace_chunk, node);
}
return iter;
}
bool
u_trace_iterator_equal(struct u_trace_iterator a,
struct u_trace_iterator b)
u_trace_iterator_equal(struct u_trace_iterator a, struct u_trace_iterator b)
{
a = sanitize_iterator(a);
b = sanitize_iterator(b);
return a.ut == b.ut &&
a.chunk == b.chunk &&
a.event_idx == b.event_idx;
return a.ut == b.ut && a.chunk == b.chunk && a.event_idx == b.event_idx;
}
void
@ -752,9 +754,8 @@ u_trace_clone_append(struct u_trace_iterator begin_it,
if (from_chunk == end_it.chunk)
to_copy = MIN2(to_copy, end_it.event_idx - from_idx);
copy_ts_buffer(begin_it.ut->utctx, cmdstream,
from_chunk->timestamps, from_idx,
to_chunk->timestamps, to_chunk->num_traces,
copy_ts_buffer(begin_it.ut->utctx, cmdstream, from_chunk->timestamps,
from_idx, to_chunk->timestamps, to_chunk->num_traces,
to_copy);
memcpy(&to_chunk->traces[to_chunk->num_traces],
@ -764,7 +765,7 @@ u_trace_clone_append(struct u_trace_iterator begin_it,
/* Take a refcount on payloads from from_chunk if needed. */
if (begin_it.ut != into) {
struct u_trace_payload_buf **in_payload;
u_vector_foreach(in_payload, &from_chunk->payloads) {
u_vector_foreach (in_payload, &from_chunk->payloads) {
struct u_trace_payload_buf **out_payload =
u_vector_add(&to_chunk->payloads);
@ -781,7 +782,8 @@ u_trace_clone_append(struct u_trace_iterator begin_it,
break;
from_idx = 0;
from_chunk = list_entry(from_chunk->node.next, struct u_trace_chunk, node);
from_chunk =
list_entry(from_chunk->node.next, struct u_trace_chunk, node);
}
}
}
@ -796,11 +798,13 @@ u_trace_disable_event_range(struct u_trace_iterator begin_it,
struct u_trace_chunk *current_chunk = begin_it.chunk;
uint32_t start_idx = begin_it.event_idx;
while(current_chunk != end_it.chunk) {
while (current_chunk != end_it.chunk) {
memset(&current_chunk->traces[start_idx], 0,
(current_chunk->num_traces - start_idx) * sizeof(struct u_trace_event));
(current_chunk->num_traces - start_idx) *
sizeof(struct u_trace_event));
start_idx = 0;
current_chunk = list_entry(current_chunk->node.next, struct u_trace_chunk, node);
current_chunk =
list_entry(current_chunk->node.next, struct u_trace_chunk, node);
}
memset(&current_chunk->traces[start_idx], 0,
@ -813,8 +817,10 @@ u_trace_disable_event_range(struct u_trace_iterator begin_it,
* functions.
*/
void *
u_trace_appendv(struct u_trace *ut, void *cs,
const struct u_tracepoint *tp, unsigned variable_sz)
u_trace_appendv(struct u_trace *ut,
void *cs,
const struct u_tracepoint *tp,
unsigned variable_sz)
{
assert(tp->payload_sz == ALIGN_NPOT(tp->payload_sz, 8));
@ -830,11 +836,12 @@ u_trace_appendv(struct u_trace *ut, void *cs,
}
/* record a timestamp for the trace: */
ut->utctx->record_timestamp(ut, cs, chunk->timestamps, tp_idx, tp->end_of_pipe);
ut->utctx->record_timestamp(ut, cs, chunk->timestamps, tp_idx,
tp->end_of_pipe);
chunk->traces[tp_idx] = (struct u_trace_event) {
.tp = tp,
.payload = payload,
.tp = tp,
.payload = payload,
};
return payload;
@ -843,7 +850,8 @@ u_trace_appendv(struct u_trace *ut, void *cs,
void
u_trace_flush(struct u_trace *ut, void *flush_data, bool free_data)
{
list_for_each_entry(struct u_trace_chunk, chunk, &ut->trace_chunks, node) {
list_for_each_entry (struct u_trace_chunk, chunk, &ut->trace_chunks,
node) {
chunk->flush_data = flush_data;
chunk->free_flush_data = false;
}

View file

@ -16,9 +16,9 @@
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _U_TRACE_H
@ -77,20 +77,20 @@ struct u_trace_printer;
* Special reserved value to indicate that no timestamp was captured,
* and that the timestamp of the previous trace should be reused.
*/
#define U_TRACE_NO_TIMESTAMP ((uint64_t)0)
#define U_TRACE_NO_TIMESTAMP ((uint64_t) 0)
/**
* Driver provided callback to create a timestamp buffer which will be
* read by u_trace_read_ts function.
*/
typedef void* (*u_trace_create_ts_buffer)(struct u_trace_context *utctx,
uint32_t timestamps_count);
typedef void *(*u_trace_create_ts_buffer)(struct u_trace_context *utctx,
uint32_t timestamps_count);
/**
* Driver provided callback to delete a timestamp buffer.
*/
typedef void (*u_trace_delete_ts_buffer)(struct u_trace_context *utctx,
void *timestamps);
void *timestamps);
/**
* Driver provided callback to emit commands into the soecified command
@ -101,8 +101,10 @@ typedef void (*u_trace_delete_ts_buffer)(struct u_trace_context *utctx,
* a fixed rate, even as the GPU freq changes. The same source used for
* GL_TIMESTAMP queries should be appropriate.
*/
typedef void (*u_trace_record_ts)(struct u_trace *ut, void *cs,
void *timestamps, unsigned idx,
typedef void (*u_trace_record_ts)(struct u_trace *ut,
void *cs,
void *timestamps,
unsigned idx,
bool end_of_pipe);
/**
@ -124,13 +126,15 @@ typedef void (*u_trace_record_ts)(struct u_trace *ut, void *cs,
* capturing the same timestamp multiple times in a row.
*/
typedef uint64_t (*u_trace_read_ts)(struct u_trace_context *utctx,
void *timestamps, unsigned idx, void *flush_data);
void *timestamps,
unsigned idx,
void *flush_data);
/**
* Driver provided callback to delete flush data.
*/
typedef void (*u_trace_delete_flush_data)(struct u_trace_context *utctx,
void *flush_data);
void *flush_data);
enum u_trace_type {
U_TRACE_TYPE_PRINT = 1u << 0,
@ -140,16 +144,18 @@ enum u_trace_type {
U_TRACE_TYPE_MARKERS = 1u << 4,
U_TRACE_TYPE_PRINT_JSON = U_TRACE_TYPE_PRINT | U_TRACE_TYPE_JSON,
U_TRACE_TYPE_PERFETTO = U_TRACE_TYPE_PERFETTO_ACTIVE | U_TRACE_TYPE_PERFETTO_ENV,
U_TRACE_TYPE_PERFETTO =
U_TRACE_TYPE_PERFETTO_ACTIVE | U_TRACE_TYPE_PERFETTO_ENV,
/*
* A mask of traces that require appending to the tracepoint chunk list.
*/
* A mask of traces that require appending to the tracepoint chunk list.
*/
U_TRACE_TYPE_REQUIRE_QUEUING = U_TRACE_TYPE_PRINT | U_TRACE_TYPE_PERFETTO,
/*
* A mask of traces that require processing the tracepoint chunk list.
*/
U_TRACE_TYPE_REQUIRE_PROCESSING = U_TRACE_TYPE_PRINT | U_TRACE_TYPE_PERFETTO_ACTIVE,
* A mask of traces that require processing the tracepoint chunk list.
*/
U_TRACE_TYPE_REQUIRE_PROCESSING =
U_TRACE_TYPE_PRINT | U_TRACE_TYPE_PERFETTO_ACTIVE,
};
/**
@ -162,10 +168,10 @@ struct u_trace_context {
void *pctx;
u_trace_create_ts_buffer create_timestamp_buffer;
u_trace_delete_ts_buffer delete_timestamp_buffer;
u_trace_record_ts record_timestamp;
u_trace_read_ts read_timestamp;
u_trace_create_ts_buffer create_timestamp_buffer;
u_trace_delete_ts_buffer delete_timestamp_buffer;
u_trace_record_ts record_timestamp;
u_trace_read_ts read_timestamp;
u_trace_delete_flush_data delete_flush_data;
FILE *out;
@ -211,21 +217,22 @@ struct u_trace_context {
struct u_trace {
struct u_trace_context *utctx;
struct list_head trace_chunks; /* list of unflushed trace chunks in fifo order */
struct list_head
trace_chunks; /* list of unflushed trace chunks in fifo order */
};
void u_trace_context_init(struct u_trace_context *utctx,
void *pctx,
u_trace_create_ts_buffer create_timestamp_buffer,
u_trace_delete_ts_buffer delete_timestamp_buffer,
u_trace_record_ts record_timestamp,
u_trace_read_ts read_timestamp,
u_trace_delete_flush_data delete_flush_data);
void *pctx,
u_trace_create_ts_buffer create_timestamp_buffer,
u_trace_delete_ts_buffer delete_timestamp_buffer,
u_trace_record_ts record_timestamp,
u_trace_read_ts read_timestamp,
u_trace_delete_flush_data delete_flush_data);
void u_trace_context_fini(struct u_trace_context *utctx);
/**
* Flush (trigger processing) of traces previously flushed to the trace-context
* by u_trace_flush().
* Flush (trigger processing) of traces previously flushed to the
* trace-context by u_trace_flush().
*
* This should typically be called in the driver's pctx->flush().
*/
@ -239,28 +246,26 @@ bool u_trace_is_enabled(enum u_trace_type type);
bool u_trace_has_points(struct u_trace *ut);
struct u_trace_iterator
{
struct u_trace_iterator {
struct u_trace *ut;
struct u_trace_chunk *chunk;
uint32_t event_idx;
};
struct u_trace_iterator
u_trace_begin_iterator(struct u_trace *ut);
struct u_trace_iterator u_trace_begin_iterator(struct u_trace *ut);
struct u_trace_iterator
u_trace_end_iterator(struct u_trace *ut);
struct u_trace_iterator u_trace_end_iterator(struct u_trace *ut);
bool
u_trace_iterator_equal(struct u_trace_iterator a,
struct u_trace_iterator b);
bool u_trace_iterator_equal(struct u_trace_iterator a,
struct u_trace_iterator b);
typedef void (*u_trace_copy_ts_buffer)(struct u_trace_context *utctx,
void *cmdstream,
void *ts_from, uint32_t from_offset,
void *ts_to, uint32_t to_offset,
uint32_t count);
void *cmdstream,
void *ts_from,
uint32_t from_offset,
void *ts_to,
uint32_t to_offset,
uint32_t count);
/**
* Clones tracepoints range into target u_trace.
@ -284,31 +289,34 @@ void u_trace_disable_event_range(struct u_trace_iterator begin_it,
/**
* Flush traces to the parent trace-context. At this point, the expectation
* is that all the tracepoints are "executed" by the GPU following any previously
* flushed u_trace batch.
* is that all the tracepoints are "executed" by the GPU following any
* previously flushed u_trace batch.
*
* flush_data is a way for driver to pass additional data, which becomes available
* only at the point of flush, to the u_trace_read_ts callback and perfetto.
* The typical example of such data would be a fence to wait on in u_trace_read_ts,
* and a submission_id to pass into perfetto.
* The destruction of the data is done via u_trace_delete_flush_data.
* flush_data is a way for driver to pass additional data, which becomes
* available only at the point of flush, to the u_trace_read_ts callback and
* perfetto. The typical example of such data would be a fence to wait on in
* u_trace_read_ts, and a submission_id to pass into perfetto. The destruction
* of the data is done via u_trace_delete_flush_data.
*
* This should typically be called when the corresponding cmdstream (containing
* the timestamp reads) is flushed to the kernel.
* This should typically be called when the corresponding cmdstream
* (containing the timestamp reads) is flushed to the kernel.
*/
void u_trace_flush(struct u_trace *ut, void *flush_data, bool free_data);
#ifdef HAVE_PERFETTO
static ALWAYS_INLINE bool
u_trace_perfetto_active(struct u_trace_context* utctx) {
return p_atomic_read_relaxed(&utctx->enabled_traces) & U_TRACE_TYPE_PERFETTO_ACTIVE;
u_trace_perfetto_active(struct u_trace_context *utctx)
{
return p_atomic_read_relaxed(&utctx->enabled_traces) &
U_TRACE_TYPE_PERFETTO_ACTIVE;
}
void u_trace_perfetto_start(void);
void u_trace_perfetto_stop(void);
#else
static ALWAYS_INLINE bool
u_trace_perfetto_active(UNUSED struct u_trace_context* utctx) {
u_trace_perfetto_active(UNUSED struct u_trace_context *utctx)
{
return false;
}
#endif
@ -318,7 +326,8 @@ u_trace_perfetto_active(UNUSED struct u_trace_context* utctx) {
* gate any expensive traces.
*/
static ALWAYS_INLINE bool
u_trace_enabled(struct u_trace_context *utctx) {
u_trace_enabled(struct u_trace_context *utctx)
{
return p_atomic_read_relaxed(&utctx->enabled_traces) != 0;
}
@ -326,8 +335,10 @@ u_trace_enabled(struct u_trace_context *utctx) {
* Return whether chunks should be processed or not.
*/
static ALWAYS_INLINE bool
u_trace_should_process(struct u_trace_context *utctx) {
return p_atomic_read_relaxed(&utctx->enabled_traces) & U_TRACE_TYPE_REQUIRE_PROCESSING;
u_trace_should_process(struct u_trace_context *utctx)
{
return p_atomic_read_relaxed(&utctx->enabled_traces) &
U_TRACE_TYPE_REQUIRE_PROCESSING;
}
/**
@ -335,12 +346,14 @@ u_trace_should_process(struct u_trace_context *utctx) {
* isn't active.
*/
static ALWAYS_INLINE bool
u_trace_markers_enabled(struct u_trace_context *utctx) {
return p_atomic_read_relaxed(&utctx->enabled_traces) & U_TRACE_TYPE_MARKERS;
u_trace_markers_enabled(struct u_trace_context *utctx)
{
return p_atomic_read_relaxed(&utctx->enabled_traces) &
U_TRACE_TYPE_MARKERS;
}
#ifdef __cplusplus
}
#endif
#endif /* _U_TRACE_H */
#endif /* _U_TRACE_H */

View file

@ -16,21 +16,20 @@
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NEEDS_TRACE_PRIV
# error "Do not use this header!"
#error "Do not use this header!"
#endif
#ifndef _U_TRACE_PRIV_H
#define _U_TRACE_PRIV_H
#include <stdio.h>
#include "u_trace.h"
#include <stdio.h>
/*
* Internal interface used by generated tracepoints
@ -53,7 +52,10 @@ struct u_tracepoint {
/**
* Callback to emit a perfetto event, such as render-stage trace
*/
void (*perfetto)(void *pctx, uint64_t ts_ns, const void *flush_data, const void *payload);
void (*perfetto)(void *pctx,
uint64_t ts_ns,
const void *flush_data,
const void *payload);
#endif
};
@ -61,8 +63,10 @@ struct u_tracepoint {
* Append a tracepoint followed by some amount of memory specified by
* variable_sz, returning pointer that can be filled with trace payload.
*/
void * u_trace_appendv(struct u_trace *ut, void *cs,
const struct u_tracepoint *tp, unsigned variable_sz);
void *u_trace_appendv(struct u_trace *ut,
void *cs,
const struct u_tracepoint *tp,
unsigned variable_sz);
/**
* Append a trace event, returning pointer to buffer of tp->payload_sz
@ -79,4 +83,4 @@ u_trace_append(struct u_trace *ut, void *cs, const struct u_tracepoint *tp)
}
#endif
#endif /* _U_TRACE_PRIV_H */
#endif /* _U_TRACE_PRIV_H */