teflon: Don't crash when a tensor isn't quantized

We don't support yet hardware that can deal with floats, but it is
better not to crash.

Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/32485>
This commit is contained in:
Tomeu Vizoso 2024-11-07 07:56:07 +01:00 committed by Marge Bot
parent a548b17b4e
commit f21d8af43a

View file

@ -165,7 +165,6 @@ fill_tensor(struct teflon_delegate *delegate, TfLiteContext *tf_context, struct
{
struct pipe_context *context = delegate->context;
TfLiteTensor tf_tensor = tf_context->tensors[index];
const TfLiteAffineQuantization *quant = (const TfLiteAffineQuantization *)tf_tensor.quantization.params;
if (tf_tensor.type == kTfLiteNoType)
return; /* Placeholder tensor */
@ -175,8 +174,12 @@ fill_tensor(struct teflon_delegate *delegate, TfLiteContext *tf_context, struct
tensor->index = index;
memcpy(tensor->dims, tf_tensor.dims->data, tf_tensor.dims->size * sizeof(*tensor->dims));
tensor->scale = quant->scale->data[0];
tensor->zero_point = quant->zero_point->data[0];
if (tf_tensor.quantization.type == kTfLiteAffineQuantization) {
const TfLiteAffineQuantization *quant = (const TfLiteAffineQuantization *)tf_tensor.quantization.params;
tensor->scale = quant->scale->data[0];
tensor->zero_point = quant->zero_point->data[0];
}
switch(tf_tensor.type) {
case kTfLiteUInt8: