ci: Add and use find_s3_project_artifact (and curl-with-retry) helpers

Introduce find_s3_project_artifact (and curl-with-retry) helpers to
simplify locating S3 artifacts across upstream mesa/mesa and forks.

Use these helpers in the Fluster build process and for fetching LAVA
rootfs artifacts.

Signed-off-by: Valentine Burley <valentine.burley@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/35167>
This commit is contained in:
Valentine Burley 2025-05-26 11:43:54 +02:00 committed by Marge Bot
parent 7537f0f42b
commit 0ccfa323bc
7 changed files with 50 additions and 63 deletions

View file

@ -102,6 +102,8 @@ variables:
S3_TRACIE_RESULTS_BUCKET: mesa-tracie-results
S3_TRACIE_PUBLIC_BUCKET: mesa-tracie-public
S3_TRACIE_PRIVATE_BUCKET: mesa-tracie-private
# Base path used for various artifacts
S3_BASE_PATH: "${S3_HOST}/${S3_KERNEL_BUCKET}"
# per-pipeline artifact storage on MinIO
PIPELINE_ARTIFACTS_BASE: ${S3_HOST}/${S3_ARTIFACTS_BUCKET}/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
# per-job artifact storage on MinIO

View file

@ -1,5 +1,5 @@
variables:
CONDITIONAL_BUILD_ANGLE_TAG: 30422b1b4732b9f6de6d3e7fe329b569
CONDITIONAL_BUILD_CROSVM_TAG: bb7f75d912ef43be8c9204385d79820a
CONDITIONAL_BUILD_FLUSTER_TAG: f1af06c34ccdbb5b44f78adaeb63603c
CONDITIONAL_BUILD_FLUSTER_TAG: f42cd5fa50a120b580b6de838260d5d2
CONDITIONAL_BUILD_PIGLIT_TAG: 009be6e9949c1121f67c8a3985b08505

View file

@ -22,28 +22,18 @@ FLUSTER_REVISION="e997402978f62428fffc8e5a4a709690d9ca9bc5"
git clone https://github.com/fluendo/fluster.git --single-branch --no-checkout
export SKIP_UPDATE_FLUSTER_VECTORS=false
check_fluster()
{
S3_FLUSTER_TAR="${S3_HOST}/${S3_KERNEL_BUCKET}/$1/${DATA_STORAGE_PATH}/fluster/${FLUSTER_TAG}/vectors.tar.zst"
if curl -L --retry 4 -f --retry-connrefused --retry-delay 30 -s --head \
"https://${S3_FLUSTER_TAR}"; then
echo "Fluster vectors are up-to-date, skip rebuilding them."
export SKIP_UPDATE_FLUSTER_VECTORS=true
fi
}
check_fluster "${FDO_UPSTREAM_REPO}"
if ! $SKIP_UPDATE_FLUSTER_VECTORS; then
check_fluster "${CI_PROJECT_PATH}"
fi
pushd fluster || exit
git checkout "${FLUSTER_REVISION}"
popd || exit
if ! $SKIP_UPDATE_FLUSTER_VECTORS; then
ARTIFACT_PATH="${DATA_STORAGE_PATH}/fluster/${FLUSTER_TAG}/vectors.tar.zst"
if FOUND_ARTIFACT_URL="$(find_s3_project_artifact "${ARTIFACT_PATH}")"; then
echo "Found fluster vectors at: ${FOUND_ARTIFACT_URL}"
mv fluster/ /
curl-with-retry "${FOUND_ARTIFACT_URL}" | tar --zstd -x -C /
else
echo "No cached vectors found, rebuilding..."
# Download the necessary vectors: H264, H265 and VP9
# When updating FLUSTER_REVISION, make sure to update the vectors if necessary or
# fluster-runner will report Missing results.
@ -54,14 +44,9 @@ if ! $SKIP_UPDATE_FLUSTER_VECTORS; then
# Build fluster vectors archive and upload it
tar --zstd -cf "vectors.tar.zst" fluster/resources/
ci-fairy s3cp --token-file "${S3_JWT_FILE}" "vectors.tar.zst" "https://${S3_FLUSTER_TAR}"
fi
ci-fairy s3cp --token-file "${S3_JWT_FILE}" "vectors.tar.zst" \
"https://${S3_BASE_PATH}/${CI_PROJECT_PATH}/${ARTIFACT_PATH}"
mv fluster/ /
if $SKIP_UPDATE_FLUSTER_VECTORS; then
curl -L --retry 4 -f --retry-connrefused --retry-delay 30 \
"${FDO_HTTP_CACHE_URI:-}https://${S3_FLUSTER_TAR}" | tar --zstd -x -C /
fi
section_end fluster

View file

@ -25,12 +25,12 @@ variables:
DEBIAN_TEST_ANDROID_TAG: "20250603-curl-O-2"
DEBIAN_TEST_GL_TAG: "20250603-vkcts-main"
DEBIAN_TEST_VIDEO_TAG: "20250503-fluster"
DEBIAN_TEST_VIDEO_TAG: "20250609-helper"
DEBIAN_TEST_VK_TAG: "20250603-vkcts-main"
ALPINE_X86_64_BUILD_TAG: "20250423-rootfs"
ALPINE_X86_64_LAVA_SSH_TAG: "20250423-rootfs"
ALPINE_X86_64_LAVA_TRIGGER_TAG: "20250604-time-drift"
ALPINE_X86_64_LAVA_TRIGGER_TAG: "20250609-helper"
FEDORA_X86_64_BUILD_TAG: "20250423-rootfs"

View file

@ -35,11 +35,7 @@ variables:
FDO_CI_CONCURRENT: 6 # should be replaced by per-machine definitions
# the dispatchers use this to cache data locally
LAVA_HTTP_CACHE_URI: "http://caching-proxy/cache/?uri="
# base system generated by the container build job, shared between many pipelines
BASE_SYSTEM_HOST_PREFIX: "${S3_HOST}/${S3_KERNEL_BUCKET}"
# path to the LAVA rootfs
BASE_SYSTEM_MAINLINE_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${FDO_UPSTREAM_REPO}/${MESA_IMAGE_PATH}:${FDO_DISTRIBUTION_TAG}"
BASE_SYSTEM_FORK_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${CI_PROJECT_PATH}/${MESA_IMAGE_PATH}:${FDO_DISTRIBUTION_TAG}"
LAVA_ROOTFS_PATH: "${MESA_IMAGE_PATH}:${FDO_DISTRIBUTION_TAG}/lava-rootfs.tar.zst"
# per-job build artifacts
JOB_RESULTS_PATH: "${JOB_ARTIFACTS_BASE}/results.tar.zst"
S3_ARTIFACT_NAME: "mesa-${ARCH}-default-debugoptimized"

View file

@ -6,27 +6,17 @@
# .gitlab-ci/image-tags.yml tags:
# ALPINE_X86_64_LAVA_TRIGGER_TAG
# If we run in the fork (not from mesa or Marge-bot), reuse mainline kernel and rootfs, if exist.
_check_artifact_path() {
_url="https://${1}/${2}"
if curl -s -o /dev/null -I -L -f --retry 4 --retry-delay 15 "${_url}"; then
echo -n "${_url}"
fi
}
. "${SCRIPTS_DIR}/setup-test-env.sh"
get_path_to_artifact() {
_mainline_artifact="$(_check_artifact_path ${BASE_SYSTEM_MAINLINE_HOST_PATH} ${1})"
if [ -n "${_mainline_artifact}" ]; then
echo -n "${_mainline_artifact}"
return
fi
_fork_artifact="$(_check_artifact_path ${BASE_SYSTEM_FORK_HOST_PATH} ${1})"
if [ -n "${_fork_artifact}" ]; then
echo -n "${_fork_artifact}"
return
fi
section_start prepare_rootfs "Preparing root filesystem"
set -ex
# If we run in the fork (not from mesa or Marge-bot), reuse mainline kernel and rootfs, if exist.
ROOTFS_URL="$(find_s3_project_artifact "$LAVA_ROOTFS_PATH")" ||
{
set +x
error "Sorry, I couldn't find a viable built path for ${1} in either mainline or a fork." >&2
error "Sorry, I couldn't find a viable built path for ${LAVA_ROOTFS_PATH} in either mainline or a fork." >&2
echo "" >&2
echo "If you're working on CI, this probably means that you're missing a dependency:" >&2
echo "this job ran ahead of the job which was supposed to upload that artifact." >&2
@ -38,15 +28,6 @@ get_path_to_artifact() {
exit 1
}
. "${SCRIPTS_DIR}/setup-test-env.sh"
section_start prepare_rootfs "Preparing root filesystem"
set -ex
ROOTFS_URL="$(get_path_to_artifact lava-rootfs.tar.zst)"
[ $? != 1 ] || exit 1
rm -rf results
mkdir results
@ -73,7 +54,7 @@ if [ -n "${LAVA_FIRMWARE:-}" ]; then
LAVA_EXTRA_OVERLAYS+=(
- append-overlay
--name=linux-firmware
--url="https://${BASE_SYSTEM_HOST_PREFIX}/${FIRMWARE_REPO}/${fw}-${FIRMWARE_TAG}.tar"
--url="https://${S3_BASE_PATH}/${FIRMWARE_REPO}/${fw}-${FIRMWARE_TAG}.tar"
--path="/"
--format=tar
)

View file

@ -291,6 +291,29 @@ export -f get_tag_file
# Structured tagging ------
curl-with-retry() {
curl --fail --location --retry-connrefused --retry 4 --retry-delay 15 "$@"
}
export -f curl-with-retry
function find_s3_project_artifact() {
x_off
local artifact_path="$1"
for project in "${FDO_UPSTREAM_REPO}" "${CI_PROJECT_PATH}"; do
local full_path="${FDO_HTTP_CACHE_URI:-}${S3_BASE_PATH}/${project}/${artifact_path}"
if curl-with-retry -s --head "https://${full_path}" >/dev/null; then
echo "https://${full_path}"
x_restore
return 0
fi
done
x_restore
return 1
}
export -f find_s3_project_artifact
export -f error
export -f trap_err