mesa: import virtgpu_kumquat_ffi

This adds a frontend to C-FFI to VirtGpuKumquat, to be used
with gfxstream.  This allows testing gfxstream, without a virtual
machine.

Reviewed-by: Aaron Ruby <aruby@qnx.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/35210>
This commit is contained in:
Gurchetan Singh 2025-05-27 08:16:54 -07:00 committed by Marge Bot
parent c00027b46f
commit f2b07903ab
4 changed files with 750 additions and 0 deletions

View file

@ -0,0 +1,14 @@
# Copyright 2025 Google
# SPDX license identifier: MIT
BasedOnStyle: LLVM
AllowShortFunctionsOnASingleLine: None
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
BreakBeforeBraces: Linux
ColumnLimit: 100
IndentWidth: 4
TabWidth: 4
UseTab: Never
Cpp11BracedListStyle: false
IndentCaseLabels: false

View file

@ -0,0 +1,285 @@
/*
* Copyright 2025 Google
* SPDX License identifier: MIT
*/
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
#ifndef VIRTGPU_KUMQUAT_FFI_H
#define VIRTGPU_KUMQUAT_FFI_H
#ifdef __cplusplus
extern "C" {
#endif
struct virtgpu_kumquat;
struct drm_kumquat_map {
uint32_t bo_handle;
// out
void *ptr;
uint64_t size;
};
#define VIRTGPU_KUMQUAT_EXECBUF_SYNCOBJ_RESET 0x01
#define VIRTGPU_KUMQUAT_EXECBUF_SYNCOBJ_FLAGS (VIRTGPU_KUMQUAT_EXECBUF_SYNCOBJ_RESET | 0)
struct drm_kumquat_execbuffer_syncobj {
uint32_t handle;
uint32_t flags;
uint64_t point;
};
#define VIRTGPU_KUMQUAT_EXECBUF_FENCE_HANDLE_IN 0x01
#define VIRTGPU_KUMQUAT_EXECBUF_FENCE_HANDLE_OUT 0x02
#define VIRTGPU_KUMQUAT_EXECBUF_RING_IDX 0x04
#define VIRTGPU_KUMQUAT_EXECBUF_SHAREABLE_IN 0x08
#define VIRTGPU_KUMQUAT_EXECBUF_SHAREABLE_OUT 0x10
#define VIRTGPU_KUMQUAT_EXECBUF_FLAGS \
(VIRTGPU_EXECBUF_FENCE_HANDLE_IN | VIRTGPU_EXECBUF_FENCE_HANDLE_OUT | \
VIRTGPU_EXECBUF_RING_IDX | VIRTGPU_EXECBUF_SHAREABLE_IN | VIRTGPU_EXECBUF_SHAREABLE_OUT | 0)
/* fence_fd is modified on success if VIRTGPU_KUMQUAT_EXECBUF_FENCE_HANDLE_OUT flag is set. */
struct drm_kumquat_execbuffer {
uint32_t flags;
uint32_t size;
uint64_t command; /* void* */
uint64_t bo_handles;
uint32_t num_bo_handles;
int64_t fence_handle; /* in/out fence fd (see VIRTGPU_KUMQUAT_EXECBUF_FENCE_HANDLE_IN/OUT) */
uint32_t ring_idx; /* command ring index (see VIRTGPU_KUMQUAT_EXECBUF_RING_IDX) */
uint32_t syncobj_stride; /* size of @drm_kumquat_execbuffer_syncobj */
uint32_t num_in_syncobjs;
uint32_t num_out_syncobjs;
uint64_t in_syncobjs;
uint64_t out_syncobjs;
};
#define VIRTGPU_KUMQUAT_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
#define VIRTGPU_KUMQUAT_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
#define VIRTGPU_KUMQUAT_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
#define VIRTGPU_KUMQUAT_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
#define VIRTGPU_KUMQUAT_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
#define VIRTGPU_KUMQUAT_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_KUMQUAT_CONTEXT_INIT */
#define VIRTGPU_KUMQUAT_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
#define VIRTGPU_KUMQUAT_PARAM_EXPLICIT_DEBUG_NAME 8 /* Ability to set debug name from userspace */
#define VIRTGPU_KUMQUAT_PARAM_FENCE_PASSING 9 /* Host shareable fences */
#define VIRTGPU_KUMQUAT_PARAM_CREATE_GUEST_HANDLE 10
struct drm_kumquat_getparam {
uint64_t param;
uint64_t value;
};
struct drm_kumquat_resource_create_3d {
uint32_t target;
uint32_t format;
uint32_t bind;
uint32_t width;
uint32_t height;
uint32_t depth;
uint32_t array_size;
uint32_t last_level;
uint32_t nr_samples;
uint32_t flags;
uint32_t bo_handle;
uint32_t res_handle;
uint32_t size;
uint32_t stride;
};
struct drm_kumquat_vk_device_id {
uint8_t device_uuid[16];
uint8_t driver_uuid[16];
};
struct drm_kumquat_vk_info {
uint32_t memory_idx;
struct drm_kumquat_vk_device_id device_id;
};
struct drm_kumquat_resource_info {
uint32_t bo_handle;
struct drm_kumquat_vk_info vulkan_info;
};
struct drm_kumquat_3d_box {
uint32_t x;
uint32_t y;
uint32_t z;
uint32_t w;
uint32_t h;
uint32_t d;
};
struct drm_kumquat_transfer_to_host {
uint32_t bo_handle;
struct drm_kumquat_3d_box box;
uint32_t level;
uint32_t offset;
uint32_t stride;
uint32_t layer_stride;
};
struct drm_kumquat_transfer_from_host {
uint32_t bo_handle;
struct drm_kumquat_3d_box box;
uint32_t level;
uint32_t offset;
uint32_t stride;
uint32_t layer_stride;
};
struct drm_kumquat_wait {
uint32_t handle; /* 0 is an invalid handle */
uint32_t flags;
};
struct drm_kumquat_get_caps {
uint32_t cap_set_id;
uint32_t cap_set_ver;
uint64_t addr;
uint32_t size;
uint32_t pad;
};
struct drm_kumquat_resource_create_blob {
#define VIRTGPU_KUMQUAT_MEM_GUEST 0x0001
#define VIRTGPU_KUMQUAT_MEM_HOST3D 0x0002
#define VIRTGPU_KUMQUAT_MEM_HOST3D_GUEST 0x0003
#define VIRTGPU_KUMQUAT_FLAG_USE_MAPPABLE 0x0001
#define VIRTGPU_KUMQUAT_FLAG_USE_SHAREABLE 0x0002
#define VIRTGPU_KUMQUAT_FLAG_USE_CROSS_DEVICE 0x0004
/* zero is invalid blob_mem */
uint32_t blob_mem;
uint32_t blob_flags;
uint32_t bo_handle;
uint32_t res_handle;
uint64_t size;
/*
* for 3D contexts with VIRTGPU_KUMQUAT_MEM_HOST3D_GUEST and
* VIRTGPU_KUMQUAT_MEM_HOST3D otherwise, must be zero.
*/
uint32_t pad;
uint32_t cmd_size;
uint64_t cmd;
uint64_t blob_id;
};
struct drm_kumquat_resource_unref {
uint32_t bo_handle;
uint32_t pad;
};
#define VIRTGPU_KUMQUAT_CONTEXT_PARAM_CAPSET_ID 0x0001
#define VIRTGPU_KUMQUAT_CONTEXT_PARAM_NUM_RINGS 0x0002
#define VIRTGPU_KUMQUAT_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
#define VIRTGPU_KUMQUAT_CONTEXT_PARAM_DEBUG_NAME 0x0004
struct drm_kumquat_context_set_param {
uint64_t param;
uint64_t value;
};
struct drm_kumquat_context_init {
uint32_t num_params;
uint32_t pad;
/* pointer to drm_kumquat_context_set_param array */
uint64_t ctx_set_params;
};
/*
* Without VIRTGPU_KUMQUAT_EMULATED_EXPORT, the server side descriptor will
* be provided.
*
* With VIRTGPU_KUMQUAT_EMULATED_EXPORT, a shared memory descriptor embedded
* with resource will be provided.
*/
#define VIRTGPU_KUMQUAT_EMULATED_EXPORT 0x0001
#define VIRTGPU_KUMQUAT_HANDLE_TYPE_MEM_OPAQUE_FD 0x1
#define VIRTGPU_KUMQUAT_HANDLE_TYPE_MEM_DMABUF 0x2
#define VIRTGPU_KUMQUAT_HANDLE_TYPE_MEM_OPAQUE_WIN32 0x3
#define VIRTGPU_KUMQUAT_HANDLE_TYPE_MEM_SHM 0x4
#define VIRTGPU_KUMQUAT_HANDLE_TYPE_MEM_ZIRCON 0x5
#define VIRTGPU_KUMQUAT_HANDLE_TYPE_SIGNAL_OPAQUE_FD 0x10
#define VIRTGPU_KUMQUAT_HANDLE_TYPE_SIGNAL_SYNC_FD 0x0x20
#define VIRTGPU_KUMQUAT_HANDLE_TYPE_SIGNAL_OPAQUE_WIN32 0x30
#define VIRTGPU_KUMQUAT_HANDLE_TYPE_SIGNAL_ZIRCON 0x40
#define VIRTGPU_KUMQUAT_HANDLE_TYPE_SIGNAL_EVENT_FD 0x50
struct drm_kumquat_resource_export {
uint32_t bo_handle;
uint32_t flags;
int64_t os_handle;
uint32_t handle_type;
};
struct drm_kumquat_resource_import {
int64_t os_handle;
uint32_t handle_type;
uint32_t bo_handle;
uint32_t res_handle;
uint64_t size;
};
int32_t virtgpu_kumquat_init(struct virtgpu_kumquat **ptr, const char *gpu_socket);
int32_t virtgpu_kumquat_finish(struct virtgpu_kumquat **ptr);
int32_t virtgpu_kumquat_get_param(struct virtgpu_kumquat *ptr, struct drm_kumquat_getparam *cmd);
int32_t virtgpu_kumquat_get_caps(struct virtgpu_kumquat *ptr, struct drm_kumquat_get_caps *cmd);
int32_t virtgpu_kumquat_context_init(struct virtgpu_kumquat *ptr,
struct drm_kumquat_context_init *cmd);
int32_t virtgpu_kumquat_resource_create_3d(struct virtgpu_kumquat *ptr,
struct drm_kumquat_resource_create_3d *cmd);
int32_t virtgpu_kumquat_resource_create_blob(struct virtgpu_kumquat *ptr,
struct drm_kumquat_resource_create_blob *cmd);
int32_t virtgpu_kumquat_resource_unref(struct virtgpu_kumquat *ptr,
struct drm_kumquat_resource_unref *cmd);
int32_t virtgpu_kumquat_resource_map(struct virtgpu_kumquat *ptr, struct drm_kumquat_map *cmd);
int32_t virtgpu_kumquat_resource_unmap(struct virtgpu_kumquat *ptr, uint32_t bo_handle);
int32_t virtgpu_kumquat_transfer_to_host(struct virtgpu_kumquat *ptr,
struct drm_kumquat_transfer_to_host *cmd);
int32_t virtgpu_kumquat_transfer_from_host(struct virtgpu_kumquat *ptr,
struct drm_kumquat_transfer_from_host *cmd);
int32_t virtgpu_kumquat_execbuffer(struct virtgpu_kumquat *ptr, struct drm_kumquat_execbuffer *cmd);
int32_t virtgpu_kumquat_wait(struct virtgpu_kumquat *ptr, struct drm_kumquat_wait *cmd);
// The following commands are more emulated than the rest.
int32_t virtgpu_kumquat_resource_export(struct virtgpu_kumquat *ptr,
struct drm_kumquat_resource_export *cmd);
int32_t virtgpu_kumquat_resource_import(struct virtgpu_kumquat *ptr,
struct drm_kumquat_resource_import *cmd);
int32_t virtgpu_kumquat_resource_info(struct virtgpu_kumquat *ptr,
struct drm_kumquat_resource_info *cmd);
int32_t virtgpu_kumquat_snapshot_save(struct virtgpu_kumquat *ptr);
int32_t virtgpu_kumquat_snapshot_restore(struct virtgpu_kumquat *ptr);
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,424 @@
// Copyright 2025 Google
// SPDX-License-Identifier: MIT
use std::ffi::c_char;
use std::ffi::c_void;
use std::ffi::CStr;
use std::panic::catch_unwind;
use std::panic::AssertUnwindSafe;
use std::ptr::null_mut;
use std::slice::from_raw_parts;
use std::slice::from_raw_parts_mut;
use std::sync::Mutex;
use libc::EINVAL;
use libc::ESRCH;
use log::error;
use mesa3d_util::FromRawDescriptor;
use mesa3d_util::IntoRawDescriptor;
use mesa3d_util::MesaHandle;
use mesa3d_util::MesaResult;
use mesa3d_util::OwnedDescriptor;
use mesa3d_util::RawDescriptor;
use mesa3d_util::DEFAULT_RAW_DESCRIPTOR;
use virtgpu_kumquat::defines::*;
use virtgpu_kumquat::VirtGpuKumquat;
const NO_ERROR: i32 = 0;
fn return_result<T>(result: MesaResult<T>) -> i32 {
if let Err(e) = result {
error!("An error occurred: {}", e);
-EINVAL
} else {
NO_ERROR
}
}
macro_rules! return_on_error {
($result:expr) => {
match $result {
Ok(t) => t,
Err(e) => {
error!("An error occurred: {}", e);
return -EINVAL;
}
}
};
}
#[allow(non_camel_case_types)]
type virtgpu_kumquat_ffi = Mutex<VirtGpuKumquat>;
// The following structs (in define.rs) must be ABI-compatible with FFI header
// (virtgpu_kumquat_ffi.h).
#[allow(non_camel_case_types)]
type drm_kumquat_getparam = VirtGpuParam;
#[allow(non_camel_case_types)]
type drm_kumquat_resource_unref = VirtGpuResourceUnref;
#[allow(non_camel_case_types)]
type drm_kumquat_get_caps = VirtGpuGetCaps;
#[allow(non_camel_case_types)]
type drm_kumquat_context_init = VirtGpuContextInit;
#[allow(non_camel_case_types)]
type drm_kumquat_resource_create_3d = VirtGpuResourceCreate3D;
#[allow(non_camel_case_types)]
type drm_kumquat_resource_create_blob = VirtGpuResourceCreateBlob;
#[allow(non_camel_case_types)]
type drm_kumquat_transfer_to_host = VirtGpuTransfer;
#[allow(non_camel_case_types)]
type drm_kumquat_transfer_from_host = VirtGpuTransfer;
#[allow(non_camel_case_types)]
type drm_kumquat_execbuffer = VirtGpuExecBuffer;
#[allow(non_camel_case_types)]
type drm_kumquat_wait = VirtGpuWait;
#[allow(non_camel_case_types)]
type drm_kumquat_resource_map = VirtGpuResourceMap;
#[allow(non_camel_case_types)]
type drm_kumquat_resource_export = VirtGpuResourceExport;
#[allow(non_camel_case_types)]
type drm_kumquat_resource_import = VirtGpuResourceImport;
#[allow(non_camel_case_types)]
type drm_kumquat_resource_info = VirtGpuResourceInfo;
#[no_mangle]
pub unsafe extern "C" fn virtgpu_kumquat_init(
ptr: &mut *mut virtgpu_kumquat_ffi,
gpu_socket: Option<&c_char>,
) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let gpu_socket_str = match gpu_socket {
Some(value) => {
// SAFETY:
// The API user must pass in a valid C-string.
let c_str_slice = unsafe { CStr::from_ptr(value) };
let result = c_str_slice.to_str();
return_on_error!(result)
}
None => "/tmp/kumquat-gpu-0",
};
let result = VirtGpuKumquat::new(gpu_socket_str);
let kmqt = return_on_error!(result);
*ptr = Box::into_raw(Box::new(Mutex::new(kmqt))) as _;
NO_ERROR
}))
.unwrap_or(-ESRCH)
}
#[no_mangle]
pub extern "C" fn virtgpu_kumquat_finish(ptr: &mut *mut virtgpu_kumquat_ffi) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let _ = unsafe { Box::from_raw(*ptr) };
*ptr = null_mut();
NO_ERROR
}))
.unwrap_or(-ESRCH)
}
#[no_mangle]
pub unsafe extern "C" fn virtgpu_kumquat_get_param(
ptr: &mut virtgpu_kumquat_ffi,
cmd: &mut drm_kumquat_getparam,
) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let result = ptr.lock().unwrap().get_param(cmd);
return_result(result)
}))
.unwrap_or(-ESRCH)
}
#[no_mangle]
pub unsafe extern "C" fn virtgpu_kumquat_get_caps(
ptr: &mut virtgpu_kumquat_ffi,
cmd: &drm_kumquat_get_caps,
) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let caps_slice = if cmd.size != 0 {
// SAFETY:
// The API user must pass in a valid array to hold capset data.
unsafe { from_raw_parts_mut(cmd.addr as *mut u8, cmd.size as usize) }
} else {
&mut []
};
let result = ptr.lock().unwrap().get_caps(cmd.cap_set_id, caps_slice);
return_result(result)
}))
.unwrap_or(-ESRCH)
}
#[no_mangle]
pub unsafe extern "C" fn virtgpu_kumquat_context_init(
ptr: &mut virtgpu_kumquat_ffi,
cmd: &drm_kumquat_context_init,
) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let context_params: &[VirtGpuParam] = if cmd.num_params != 0 {
// SAFETY:
// The API user must pass in a valid array of context parameters.
unsafe {
from_raw_parts(
cmd.ctx_set_params as *const VirtGpuParam,
cmd.num_params as usize,
)
}
} else {
&[]
};
let mut capset_id: u64 = 0;
for param in context_params {
match param.param {
VIRTGPU_KUMQUAT_CONTEXT_PARAM_CAPSET_ID => {
capset_id = param.value;
}
_ => (),
}
}
let result = ptr.lock().unwrap().context_create(capset_id, "");
return_result(result)
}))
.unwrap_or(-ESRCH)
}
#[no_mangle]
pub unsafe extern "C" fn virtgpu_kumquat_resource_create_3d(
ptr: &mut virtgpu_kumquat_ffi,
cmd: &mut drm_kumquat_resource_create_3d,
) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let result = ptr.lock().unwrap().resource_create_3d(cmd);
return_result(result)
}))
.unwrap_or(-ESRCH)
}
#[no_mangle]
pub unsafe extern "C" fn virtgpu_kumquat_resource_create_blob(
ptr: &mut virtgpu_kumquat_ffi,
cmd: &mut drm_kumquat_resource_create_blob,
) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let blob_cmd = if cmd.cmd_size != 0 {
// SAFETY:
// The API user must pass in a valid command buffer with correct size.
unsafe { from_raw_parts(cmd.cmd as *const u8, cmd.cmd_size as usize) }
} else {
&[]
};
let result = ptr.lock().unwrap().resource_create_blob(cmd, blob_cmd);
return_result(result)
}))
.unwrap_or(-ESRCH)
}
#[no_mangle]
pub unsafe extern "C" fn virtgpu_kumquat_resource_unref(
ptr: &mut virtgpu_kumquat_ffi,
cmd: &mut drm_kumquat_resource_unref,
) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let result = ptr.lock().unwrap().resource_unref(cmd.bo_handle);
return_result(result)
}))
.unwrap_or(-ESRCH)
}
#[no_mangle]
pub unsafe extern "C" fn virtgpu_kumquat_resource_map(
ptr: &mut virtgpu_kumquat_ffi,
cmd: &mut drm_kumquat_resource_map,
) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let result = ptr.lock().unwrap().map(cmd.bo_handle);
let internal_map = return_on_error!(result);
(*cmd).ptr = internal_map.ptr as *mut c_void;
(*cmd).size = internal_map.size;
NO_ERROR
}))
.unwrap_or(-ESRCH)
}
#[no_mangle]
pub unsafe extern "C" fn virtgpu_kumquat_resource_unmap(
ptr: &mut virtgpu_kumquat_ffi,
bo_handle: u32,
) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let result = ptr.lock().unwrap().unmap(bo_handle);
return_result(result)
}))
.unwrap_or(-ESRCH)
}
#[no_mangle]
pub unsafe extern "C" fn virtgpu_kumquat_transfer_to_host(
ptr: &mut virtgpu_kumquat_ffi,
cmd: &mut drm_kumquat_transfer_to_host,
) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let result = ptr.lock().unwrap().transfer_to_host(cmd);
return_result(result)
}))
.unwrap_or(-ESRCH)
}
#[no_mangle]
pub unsafe extern "C" fn virtgpu_kumquat_transfer_from_host(
ptr: &mut virtgpu_kumquat_ffi,
cmd: &mut drm_kumquat_transfer_from_host,
) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let result = ptr.lock().unwrap().transfer_from_host(cmd);
return_result(result)
}))
.unwrap_or(-ESRCH)
}
#[no_mangle]
pub unsafe extern "C" fn virtgpu_kumquat_execbuffer(
ptr: &mut virtgpu_kumquat_ffi,
cmd: &mut drm_kumquat_execbuffer,
) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let bo_handles = if cmd.num_bo_handles != 0 {
// SAFETY:
// The API user must pass in a valid array of bo_handles with correct size.
unsafe { from_raw_parts(cmd.bo_handles as *const u32, cmd.num_bo_handles as usize) }
} else {
&[]
};
let cmd_buf = if cmd.size != 0 {
// SAFETY:
// The API user must pass in a valid command buffer with correct size.
unsafe { from_raw_parts(cmd.command as *const u8, cmd.size as usize) }
} else {
&[]
};
// TODO
let in_fences: &[u64] = &[0; 0];
let mut descriptor: RawDescriptor = DEFAULT_RAW_DESCRIPTOR;
let result = ptr.lock().unwrap().submit_command(
cmd.flags,
bo_handles,
cmd_buf,
cmd.ring_idx,
in_fences,
&mut descriptor,
);
cmd.fence_handle = descriptor as i64;
return_result(result)
}))
.unwrap_or(-ESRCH)
}
#[no_mangle]
pub unsafe extern "C" fn virtgpu_kumquat_wait(
ptr: &mut virtgpu_kumquat_ffi,
cmd: &mut drm_kumquat_wait,
) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let result = ptr.lock().unwrap().wait(cmd.bo_handle);
return_result(result)
}))
.unwrap_or(-ESRCH)
}
#[no_mangle]
pub extern "C" fn virtgpu_kumquat_resource_export(
ptr: &mut virtgpu_kumquat_ffi,
cmd: &mut drm_kumquat_resource_export,
) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let result = ptr
.lock()
.unwrap()
.resource_export(cmd.bo_handle, cmd.flags);
let hnd = return_on_error!(result);
(*cmd).handle_type = hnd.handle_type;
(*cmd).os_handle = hnd.os_handle.into_raw_descriptor() as i64;
NO_ERROR
}))
.unwrap_or(-ESRCH)
}
#[no_mangle]
pub unsafe extern "C" fn virtgpu_kumquat_resource_import(
ptr: &mut virtgpu_kumquat_ffi,
cmd: &mut drm_kumquat_resource_import,
) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let handle = MesaHandle {
// SAFETY:
// The API user must transfer ownership of a valid OS handle.
os_handle: unsafe {
OwnedDescriptor::from_raw_descriptor((*cmd).os_handle.into_raw_descriptor())
},
handle_type: (*cmd).handle_type,
};
let result = ptr.lock().unwrap().resource_import(
handle,
&mut cmd.bo_handle,
&mut cmd.res_handle,
&mut cmd.size,
);
return_result(result)
}))
.unwrap_or(-ESRCH)
}
#[no_mangle]
pub extern "C" fn virtgpu_kumquat_resource_info(
ptr: &mut virtgpu_kumquat_ffi,
cmd: &mut drm_kumquat_resource_info,
) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let result = ptr.lock().unwrap().resource_info(cmd.bo_handle);
let info = return_on_error!(result);
(*cmd).vulkan_info = info;
NO_ERROR
}))
.unwrap_or(-ESRCH)
}
#[no_mangle]
pub unsafe extern "C" fn virtgpu_kumquat_snapshot_save(ptr: &mut virtgpu_kumquat_ffi) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let result = ptr.lock().unwrap().snapshot();
return_result(result)
}))
.unwrap_or(-ESRCH)
}
#[no_mangle]
pub unsafe extern "C" fn virtgpu_kumquat_snapshot_restore(ptr: &mut virtgpu_kumquat_ffi) -> i32 {
catch_unwind(AssertUnwindSafe(|| {
let result = ptr.lock().unwrap().restore();
return_result(result)
}))
.unwrap_or(-ESRCH)
}

View file

@ -0,0 +1,27 @@
# Copyright © 2025 Google
# SPDX-License-Identifier: MIT
inc_virtgpu_kumquat_ffi = include_directories('include')
dep_log = dependency('log',
version: '>= 0.4.22',
fallback: ['log', 'dep_log'],
required: true,
)
virtgpu_kumquat_ffi_args = [
# we want unsafe blocks inside unsafe functions
'-Dunsafe_op_in_unsafe_fn',
]
libvirtgpu_kumquat_ffi = static_library(
'virtgpu_kumquat_ffi',
'lib.rs',
gnu_symbol_visibility : 'hidden',
rust_abi : 'c',
rust_args : [
virtgpu_kumquat_ffi_args,
],
link_with: [libmesa_protocols, libmesa_rust_util, libvirtgpu_kumquat],
dependencies: [dep_mesa3d_util, dep_log]
)