teflon/tests: Remove dependency on xtensor

Upstream has been moving headers around and breaking users.

Because we don't use it for much right now, drop the dependency
altogether by open coding some rand() helpers.

Issue: https://gitlab.freedesktop.org/mesa/mesa/-/issues/13681
Reviewed-by: Christian Gmeiner <cgmeiner@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/37220>
This commit is contained in:
Tomeu Vizoso 2025-09-02 16:51:46 +02:00
parent b148d47c3e
commit 5eab4f06d5
3 changed files with 37 additions and 24 deletions

View file

@ -2193,7 +2193,6 @@ endif
with_teflon = get_option('teflon')
if with_teflon and with_tests
dep_xtensor = dependency('xtensor')
dep_flatbuffers = dependency('flatbuffers')
prog_flatc = find_program('flatc')
endif

View file

@ -10,9 +10,6 @@
#include <stdio.h>
#include <vector>
#include <gtest/gtest.h>
#include <xtensor/xrandom.hpp>
#include "util/macros.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/common.h"
@ -27,6 +24,28 @@ randf(float min, float max)
return ((max - min) * ((float)rand() / (float)RAND_MAX)) + min;
}
template<typename T>
std::vector<T> rand(const std::vector<int>& shape, T min, T max) {
size_t size = 1;
for (int dim : shape) {
size *= dim;
}
std::vector<T> result(size);
if constexpr (std::is_integral<T>::value) {
std::vector<T> result(size);
std::generate(result.begin(), result.end(), [&]() { return rand() % (max - min + 1) + min; });
return result;
} else if constexpr (std::is_floating_point<T>::value) {
std::vector<T> result(size);
std::generate(result.begin(), result.end(), [&]() { return randf(-1.0, 1.0); });
return result;
}
return result;
}
static void
read_model(const char *file_name, tflite::ModelT &model)
{
@ -111,7 +130,7 @@ patch_conv2d(unsigned operation_index,
bias_tensor->shape.data()[0] = output_channels;
auto bias_data = &model->buffers[bias_buffer_index]->data;
xt::xarray<int32_t> bias_array = xt::random::randint<int32_t>({output_channels}, -20000, 20000);
std::vector<int32_t> bias_array = rand<int32_t>({output_channels}, -20000, 20000);
bias_data->resize(bias_array.size() * sizeof(int32_t));
memcpy(bias_data->data(), bias_array.data(), bias_array.size() * sizeof(int32_t));
@ -140,7 +159,7 @@ patch_conv2d(unsigned operation_index,
else
weight_shape = {output_channels, weight_size, weight_size, input_channels};
xt::xarray<uint8_t> weights_array = xt::random::randint<uint8_t>(weight_shape, 0, 255);
std::vector<uint8_t> weights_array = rand<uint8_t>(weight_shape, 0, 255);
weights_data->resize(weights_array.size());
memcpy(weights_data->data(), weights_array.data(), weights_array.size());
@ -250,6 +269,8 @@ add_generate_model(int input_size,
return buf;
}
static void
patch_fully_connected(unsigned operation_index,
tflite::ModelT *model,
@ -287,7 +308,7 @@ patch_fully_connected(unsigned operation_index,
bias_tensor->shape.data()[0] = output_channels;
auto bias_data = &model->buffers[bias_buffer_index]->data;
xt::xarray<int32_t> bias_array = xt::random::randint<int32_t>({output_channels}, -20000, 20000);
std::vector<int32_t> bias_array = rand<int32_t>({output_channels}, -20000, 20000);
bias_data->resize(bias_array.size() * sizeof(int32_t));
memcpy(bias_data->data(), bias_array.data(), bias_array.size() * sizeof(int32_t));
@ -302,7 +323,7 @@ patch_fully_connected(unsigned operation_index,
std::vector<int> weight_shape;
weight_shape = {output_channels, input_size};
xt::xarray<uint8_t> weights_array = xt::random::randint<uint8_t>(weight_shape, 0, 255);
std::vector<uint8_t> weights_array = rand<uint8_t>(weight_shape, 0, 255);
weights_data->resize(weights_array.size());
memcpy(weights_data->data(), weights_array.data(), weights_array.size());
@ -432,7 +453,7 @@ run_model(TfLiteModel *model, enum executor executor, void ***input, size_t *num
if ((*input)[i] == NULL) {
(*input)[i] = malloc(input_tensor->bytes);
std::vector<size_t> shape;
std::vector<int> shape;
shape.resize(input_tensor->dims->size);
for (int j = 0; j < input_tensor->dims->size; j++)
@ -440,12 +461,12 @@ run_model(TfLiteModel *model, enum executor executor, void ***input, size_t *num
switch (input_tensor->type) {
case kTfLiteFloat32: {
xt::xarray<float_t> a = xt::random::rand<float_t>(shape);
std::vector<float> a = rand<float>(shape, -1.0, 1.0);
memcpy((*input)[i], a.data(), input_tensor->bytes);
break;
}
default: {
xt::xarray<uint8_t> a = xt::random::randint<uint8_t>(shape, 0, 255);
std::vector<uint8_t> a = rand<uint8_t>(shape, 0, 255);
memcpy((*input)[i], a.data(), input_tensor->bytes);
break;
}

View file

@ -3,13 +3,13 @@
* SPDX-License-Identifier: MIT
*/
#include <algorithm>
#include <cstdio>
#include <fcntl.h>
#include <filesystem>
#include <fstream>
#include <gtest/gtest.h>
#include <sys/mman.h>
#include <xtensor/xrandom.hpp>
#include <iostream>
#include <sstream>
@ -38,13 +38,6 @@ std::vector<int> input_size{3, 5, 8, 80, 112};
std::vector<int> fc_channels{23, 46, 128, 256, 512};
std::vector<int> fc_size{128, 1280, 25088, 62720};
static void
set_seed(unsigned seed)
{
srand(seed);
xt::random::seed(seed);
}
static void
test_model(void *buf, size_t buf_size, std::string cache_dir, unsigned tolerance)
{
@ -175,7 +168,7 @@ test_model_file(std::string file_name, unsigned tolerance, bool use_cache)
cache_dir << path.stem().string();
}
set_seed(4);
srand(4);
struct stat sb;
int model_fd = open(file_name.c_str(), O_RDONLY);
@ -200,7 +193,7 @@ test_conv(int input_size, int weight_size, int input_channels, int output_channe
if (weight_size > input_size)
GTEST_SKIP();
set_seed(seed);
srand(seed);
if (cache_is_enabled()) {
if (access(model_cache.str().c_str(), F_OK) == 0) {
@ -245,7 +238,7 @@ test_add(int input_size, int weight_size, int input_channels, int output_channel
if (weight_size > input_size)
GTEST_SKIP();
set_seed(seed);
srand(seed);
if (cache_is_enabled()) {
if (access(model_cache.str().c_str(), F_OK) == 0) {
@ -284,7 +277,7 @@ test_fully_connected(int input_size, int output_channels, bool is_signed, int se
model_cache << cache_dir.str() << "/"
<< "model.tflite";
set_seed(seed);
srand(seed);
if (cache_is_enabled()) {
if (access(model_cache.str().c_str(), F_OK) == 0) {
@ -579,7 +572,7 @@ main(int argc, char **argv)
int depthwise = atoi(argv[n++]);
int seed = atoi(argv[n++]);
set_seed(seed);
srand(seed);
buf = conv2d_generate_model(input_size, weight_size,
input_channels, output_channels,