diff --git a/src/gallium/drivers/etnaviv/etnaviv_ml.c b/src/gallium/drivers/etnaviv/etnaviv_ml.c index 41d64252b8d..87699ddf2e0 100644 --- a/src/gallium/drivers/etnaviv/etnaviv_ml.c +++ b/src/gallium/drivers/etnaviv/etnaviv_ml.c @@ -5,6 +5,7 @@ #include #include +#include "pipe/p_state.h" #include #include "util/u_inlines.h" @@ -272,6 +273,10 @@ dump_graph(struct list_head *etna_operations) ML_DBG("%3d %-4s %3d %3d out2: %3d", i, "SPLIT", operation->input_tensors[0], operation->output_tensors[0], operation->output_tensors[1]); break; + case ETNA_JOB_TYPE_BYPASS: + ML_DBG("%3d %-4s %3d %3d", + i, "BYPASS", operation->input_tensors[0], operation->output_tensors[0]); + break; } ML_DBG("\n"); i++; @@ -279,6 +284,25 @@ dump_graph(struct list_head *etna_operations) ML_DBG("\n"); } +static void +add_bypass(const struct pipe_ml_operation *poperation, unsigned input_tensor, + struct etna_operation *operation, struct list_head *etna_operations) +{ + operation->type = ETNA_JOB_TYPE_BYPASS; + operation->input_count = 1; + operation->input_tensors[0] = input_tensor; + operation->input_tensor_sizes[0] = poperation->input_tensors[0]->dims[1] * + poperation->input_tensors[0]->dims[2] * + poperation->input_tensors[0]->dims[3]; + operation->output_count = 1; + operation->output_tensors[0] = poperation->output_tensors[0]->index; + operation->output_tensor_sizes[0] = poperation->output_tensors[0]->dims[1] * + poperation->output_tensors[0]->dims[2] * + poperation->output_tensors[0]->dims[3]; + + list_addtail(&operation->link, etna_operations); +} + static bool is_3d(struct pipe_tensor *tensor) { @@ -438,6 +462,10 @@ lower_operations(struct etna_ml_subgraph *subgraph, list_addtail(&operation->link, etna_operations); break; } + case PIPE_ML_OPERATION_TYPE_RESHAPE: { + add_bypass(poperation, input_tensors[0], operation, etna_operations); + break; + } default: unreachable("Unsupported ML operation type"); } @@ -498,6 +526,13 @@ lower_operations(struct etna_ml_subgraph *subgraph, operation->input_tensors[1], operation->input_tensor_sizes[0], operation->input_tensor_sizes[1]); + } else if (operation->type == ETNA_JOB_TYPE_BYPASS) { + etna_ml_create_tensor(subgraph, operation->input_tensors[0], operation->input_tensor_sizes[0]); + reference_tensor_with_offset(subgraph, + operation->input_tensors[0], + operation->output_tensors[0], + 0, + operation->output_tensor_sizes[0]); } else { for (int i = 0; i < operation->input_count; i++) etna_ml_create_tensor(subgraph, operation->input_tensors[i], operation->input_tensor_sizes[i]); @@ -548,6 +583,7 @@ count_tensors(const struct pipe_ml_operation *poperations, case PIPE_ML_OPERATION_TYPE_ADD: case PIPE_ML_OPERATION_TYPE_CONCATENATION: case PIPE_ML_OPERATION_TYPE_SPLIT: + case PIPE_ML_OPERATION_TYPE_RESHAPE: break; default: unreachable("Unsupported ML operation type"); @@ -632,6 +668,9 @@ etna_ml_operation_supported(struct pipe_context *pcontext, supported = operation->input_tensors[0]->dims[3] < 1280; break; } + case PIPE_ML_OPERATION_TYPE_RESHAPE: + supported = true; + break; default: return false; } @@ -682,6 +721,7 @@ etna_ml_subgraph_create(struct pipe_context *pcontext, break; case ETNA_JOB_TYPE_CONCAT: case ETNA_JOB_TYPE_SPLIT: + case ETNA_JOB_TYPE_BYPASS: continue; } diff --git a/src/gallium/drivers/etnaviv/etnaviv_ml.h b/src/gallium/drivers/etnaviv/etnaviv_ml.h index 21dfe461269..0636e3047a3 100644 --- a/src/gallium/drivers/etnaviv/etnaviv_ml.h +++ b/src/gallium/drivers/etnaviv/etnaviv_ml.h @@ -18,6 +18,7 @@ enum etna_job_type { ETNA_JOB_TYPE_TP, ETNA_JOB_TYPE_CONCAT, /* Fake operation, won't execute on HW. Hack will go away after the move to NIR. */ ETNA_JOB_TYPE_SPLIT, /* Fake operation, won't execute on HW. Hack will go away after the move to NIR. */ + ETNA_JOB_TYPE_BYPASS, /* Fake operation, won't execute on HW. Hack will go away after the move to NIR. */ }; enum etna_ml_tp_type {