From f21d8af43a52853f742326d83c114d0d1dcc8edb Mon Sep 17 00:00:00 2001 From: Tomeu Vizoso Date: Thu, 7 Nov 2024 07:56:07 +0100 Subject: [PATCH] teflon: Don't crash when a tensor isn't quantized We don't support yet hardware that can deal with floats, but it is better not to crash. Reviewed-by: Philipp Zabel Part-of: --- src/gallium/frontends/teflon/tfl_device.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/gallium/frontends/teflon/tfl_device.c b/src/gallium/frontends/teflon/tfl_device.c index 863344227ad..e6b26197ec7 100644 --- a/src/gallium/frontends/teflon/tfl_device.c +++ b/src/gallium/frontends/teflon/tfl_device.c @@ -165,7 +165,6 @@ fill_tensor(struct teflon_delegate *delegate, TfLiteContext *tf_context, struct { struct pipe_context *context = delegate->context; TfLiteTensor tf_tensor = tf_context->tensors[index]; - const TfLiteAffineQuantization *quant = (const TfLiteAffineQuantization *)tf_tensor.quantization.params; if (tf_tensor.type == kTfLiteNoType) return; /* Placeholder tensor */ @@ -175,8 +174,12 @@ fill_tensor(struct teflon_delegate *delegate, TfLiteContext *tf_context, struct tensor->index = index; memcpy(tensor->dims, tf_tensor.dims->data, tf_tensor.dims->size * sizeof(*tensor->dims)); - tensor->scale = quant->scale->data[0]; - tensor->zero_point = quant->zero_point->data[0]; + + if (tf_tensor.quantization.type == kTfLiteAffineQuantization) { + const TfLiteAffineQuantization *quant = (const TfLiteAffineQuantization *)tf_tensor.quantization.params; + tensor->scale = quant->scale->data[0]; + tensor->zero_point = quant->zero_point->data[0]; + } switch(tf_tensor.type) { case kTfLiteUInt8: