etnaviv/ml: Add support for Subtract

Based on how we perform addition with a convolution, do something
similar for subtractions.

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/34629>
This commit is contained in:
Tomeu Vizoso 2025-04-11 08:05:34 +02:00 committed by Marge Bot
parent 005ab1f0fe
commit 3170b5f31c
2 changed files with 20 additions and 1 deletions

View file

@ -487,6 +487,15 @@ lower_operations(struct etna_ml_subgraph *subgraph,
list_addtail(&operation->link, etna_operations);
break;
}
case PIPE_ML_OPERATION_TYPE_SUBTRACT: {
struct etna_operation *operation = calloc(1, sizeof(*operation));
etna_ml_lower_add(subgraph, poperation, operation);
operation->input_tensors[0] = input_tensors[0];
operation->input_tensors[1] = input_tensors[1];
operation->output_tensors[0] = poperation->output_tensors[0]->index;
list_addtail(&operation->link, etna_operations);
break;
}
default:
unreachable("Unsupported ML operation type");
}
@ -601,6 +610,7 @@ count_tensors(const struct pipe_ml_operation *poperations,
tensor_count = MAX2(tensor_count, poperation->fcon.bias_tensor->index);
break;
case PIPE_ML_OPERATION_TYPE_PAD:
case PIPE_ML_OPERATION_TYPE_SUBTRACT:
case PIPE_ML_OPERATION_TYPE_ADD:
case PIPE_ML_OPERATION_TYPE_CONCATENATION:
case PIPE_ML_OPERATION_TYPE_SPLIT:
@ -651,6 +661,7 @@ etna_ml_operation_supported(struct pipe_context *pcontext,
}
break;
}
case PIPE_ML_OPERATION_TYPE_SUBTRACT:
case PIPE_ML_OPERATION_TYPE_ADD: {
supported = operation->input_tensors[0]->resource == NULL &&
operation->input_tensors[1]->resource == NULL;

View file

@ -758,9 +758,11 @@ etna_ml_lower_add_v8(struct etna_ml_subgraph *subgraph,
struct etna_operation *operation)
{
struct pipe_context *context = subgraph->base.context;
bool subtract = poperation->type == PIPE_ML_OPERATION_TYPE_SUBTRACT;
unsigned max_input_dim = (1 << 13) - 1; /* in_image_x_size is 13 bits long */
assert(poperation->type == PIPE_ML_OPERATION_TYPE_ADD);
assert(poperation->type == PIPE_ML_OPERATION_TYPE_ADD ||
poperation->type == PIPE_ML_OPERATION_TYPE_SUBTRACT);
operation->type = ETNA_JOB_TYPE_NN;
operation->addition = false;
@ -836,6 +838,9 @@ etna_ml_lower_add_v8(struct etna_ml_subgraph *subgraph,
float min = 1.0 * (poperation->input_tensors[1]->scale / poperation->input_tensors[0]->scale);
float max = 1.0;
if (subtract)
min *= -1.0;
calc_quant_params(min, max, &operation->weight_scale, &operation->weight_zero_point);
unsigned kernel_size = operation->output_channels * operation->weight_width * operation->weight_height * operation->input_channels;
@ -861,6 +866,9 @@ etna_ml_lower_add_v8(struct etna_ml_subgraph *subgraph,
double scale_factor = poperation->input_tensors[0]->scale * operation->weight_scale;
double bias_scale = poperation->input_tensors[1]->scale / scale_factor;
if (subtract)
bias_scale *= -1.0;
int bias = zero_point_diff * round(bias_scale);
for(unsigned oc = 0; oc < operation->output_channels; oc++)
bias_map[oc] = bias;