teflon: Improve dumped graph formatting

Currently, the graph dumped with ``TEFLON_DEBUG=verbose`` has the
following appearance:

idx type                      inputs                   outputs  operation type-specific
================================================================================================
  0 CONV   88,8,6 7
  1 DWCONV 7,35,34 33
  2 CONV   33,38,36 37
  3 DWCONV 37,41,40 39
  [...]
 20 CONV   9,14,12 13
 21 DWCONV 13,17,16 15
 22 CONV   15,20,18 19
 23 DWCONV 19,23,22 21
 24 CONV   21,26,24 25
 25 DWCONV 25,29,28 27
 26 CONV   27,32,30 31

Due to misaligned fields, the graph output is difficult to read.
Additionally, the "operation type-specific" field is always empty, as
its entries were removed in commit 986f8c7ff2 ("teflon: Support multiple
graph inputs and outputs").

Properly align the input and output fields and remove the
"operation type-specific" column. Also, widen the "type" field to
accommodate the longest currently supported operation name. The resulting
output looks like this:

idx type            inputs               outputs
==========================================================================
  0 CONV            88,8,6               7
  1 DWCONV          7,35,34              33
  2 CONV            33,38,36             37
  3 DWCONV          37,41,40             39
  [...]
 20 CONV            9,14,12              13
 21 DWCONV          13,17,16             15
 22 CONV            15,20,18             19
 23 DWCONV          19,23,22             21
 24 CONV            21,26,24             25
 25 DWCONV          25,29,28             27
 26 CONV            27,32,30             31

Signed-off-by: Maíra Canal <mairacanal@riseup.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/38942>
This commit is contained in:
Maíra Canal 2025-12-14 12:34:31 -03:00 committed by Marge Bot
parent 077292f65b
commit 104d1a0d4b

View file

@ -376,74 +376,79 @@ dump_graph(struct pipe_tensor *tensors, unsigned tensor_count, struct pipe_ml_op
}
teflon_debug("\n");
teflon_debug("%3s %-6s %25s %25s %s\n", "idx", "type", "inputs", "outputs", "operation type-specific");
teflon_debug("================================================================================================\n");
teflon_debug("%3s %-15s %-20s %-20s\n", "idx", "type", "inputs", "outputs");
teflon_debug("==========================================================================\n");
for (int i = 0; i < operation_count; i++) {
teflon_debug("%3d ", i);
switch (operations[i].type) {
case PIPE_ML_OPERATION_TYPE_ADD:
teflon_debug("%-6s ", "ADD");
teflon_debug("%-15s ", "ADD");
break;
case PIPE_ML_OPERATION_TYPE_CONVOLUTION:
teflon_debug("%-6s ", operations[i].conv.depthwise ? "DWCONV" : "CONV");
teflon_debug("%-15s ", operations[i].conv.depthwise ? "DWCONV" : "CONV");
break;
case PIPE_ML_OPERATION_TYPE_CONCATENATION:
teflon_debug("%-6s ", "CONCAT");
teflon_debug("%-15s ", "CONCAT");
break;
case PIPE_ML_OPERATION_TYPE_POOLING:
teflon_debug("%-6s ", "POOL");
teflon_debug("%-15s ", "POOL");
break;
case PIPE_ML_OPERATION_TYPE_SPLIT:
teflon_debug("%-6s ", "SPLIT");
teflon_debug("%-15s ", "SPLIT");
break;
case PIPE_ML_OPERATION_TYPE_PAD:
teflon_debug("%-6s ", "PAD");
teflon_debug("%-15s ", "PAD");
break;
case PIPE_ML_OPERATION_TYPE_FULLY_CONNECTED:
teflon_debug("%-6s ", "FCON");
teflon_debug("%-15s ", "FCON");
break;
case PIPE_ML_OPERATION_TYPE_RESHAPE:
teflon_debug("%-6s ", "RESHAPE");
teflon_debug("%-15s ", "RESHAPE");
break;
case PIPE_ML_OPERATION_TYPE_RELU:
teflon_debug("%-6s ", "RELU");
teflon_debug("%-15s ", "RELU");
break;
case PIPE_ML_OPERATION_TYPE_ABSOLUTE:
teflon_debug("%-6s ", "ABS");
teflon_debug("%-15s ", "ABS");
break;
case PIPE_ML_OPERATION_TYPE_LOGISTIC:
teflon_debug("%-6s ", "LOG");
teflon_debug("%-15s ", "LOG");
break;
case PIPE_ML_OPERATION_TYPE_SUBTRACT:
teflon_debug("%-6s ", "SUB");
teflon_debug("%-15s ", "SUB");
break;
case PIPE_ML_OPERATION_TYPE_TRANSPOSE:
teflon_debug("%-6s ", "TRANSPOSE");
teflon_debug("%-15s ", "TRANSPOSE");
break;
case PIPE_ML_OPERATION_TYPE_STRIDED_SLICE:
teflon_debug("%-6s ", "STRIDED_SLICE");
teflon_debug("%-15s ", "STRIDED_SLICE");
break;
case PIPE_ML_OPERATION_TYPE_RESIZE:
teflon_debug("%-6s ", "RESIZE");
teflon_debug("%-15s ", "RESIZE");
break;
}
char *input_buf = ralloc_strdup(NULL, "");
for (unsigned j = 0; j < operations[i].input_count; j++) {
teflon_debug("%d", operations[i].input_tensors[j]->index);
ralloc_asprintf_append(&input_buf, "%d", operations[i].input_tensors[j]->index);
if (j < operations[i].input_count - 1)
teflon_debug(",");
ralloc_asprintf_append(&input_buf, ",");
}
teflon_debug(" ");
char *output_buf = ralloc_strdup(NULL, "");
for (unsigned j = 0; j < operations[i].output_count; j++) {
teflon_debug("%d", operations[i].output_tensors[j]->index);
ralloc_asprintf_append(&output_buf, "%d", operations[i].output_tensors[j]->index);
if (j < operations[i].output_count - 1)
teflon_debug(",");
ralloc_asprintf_append(&output_buf, ",");
}
teflon_debug("\n");
teflon_debug("%-20s %-20s\n", input_buf, output_buf);
ralloc_free(input_buf);
ralloc_free(output_buf);
}
teflon_debug("\n");
}
@ -740,7 +745,7 @@ PrepareDelegate(TfLiteContext *tf_context, TfLiteDelegate *tf_delegate)
for (int i = 0; i < tf_context->tensors_size; i++)
fill_tensor(delegate, tf_context, &delegate->tensors[i], i);
teflon_debug("%3s %7s %3s %-11s %s\n", "idx", "type", "ver", "support", "inputs");
teflon_debug("%3s %-15s %3s %-11s %s\n", "idx", "type", "ver", "support", "inputs");
teflon_debug("================================================================================================\n");
// Get a list of supported nodes.
@ -756,7 +761,7 @@ PrepareDelegate(TfLiteContext *tf_context, TfLiteDelegate *tf_delegate)
supported = check_op_support(tf_delegate, tf_context, node, registration);
teflon_debug("%3d %7s v%-2d %-11s in:", node_index,
teflon_debug("%3d %-15s v%-2d %-11s in:", node_index,
tflite_builtin_op_name(registration->builtin_code),
registration->version,
supported ? "supported" : "unsupported");