rusticl: remove unused interfaces to simplify code

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/30082>
This commit is contained in:
Karol Herbst 2024-07-09 10:19:31 +02:00 committed by Marge Bot
parent 7b22bc617b
commit 91cd3295d8
5 changed files with 32 additions and 228 deletions

View file

@ -13,7 +13,6 @@ use mesa_rust::pipe::device::load_screens;
use mesa_rust::pipe::fence::*;
use mesa_rust::pipe::resource::*;
use mesa_rust::pipe::screen::*;
use mesa_rust::pipe::transfer::*;
use mesa_rust_gen::*;
use mesa_rust_util::math::SetBitIndices;
use mesa_rust_util::static_assert;
@ -98,43 +97,11 @@ pub trait HelperContextWrapper {
where
F: Fn(&HelperContext);
fn buffer_map_directly(
&self,
res: &PipeResource,
offset: i32,
size: i32,
rw: RWFlags,
) -> Option<PipeTransfer>;
fn buffer_map_coherent(
&self,
res: &PipeResource,
offset: i32,
size: i32,
rw: RWFlags,
) -> Option<PipeTransfer>;
fn texture_map_directly(
&self,
res: &PipeResource,
bx: &pipe_box,
rw: RWFlags,
) -> Option<PipeTransfer>;
fn texture_map_coherent(
&self,
res: &PipeResource,
bx: &pipe_box,
rw: RWFlags,
) -> Option<PipeTransfer>;
fn create_compute_state(&self, nir: &NirShader, static_local_mem: u32) -> *mut c_void;
fn delete_compute_state(&self, cso: *mut c_void);
fn compute_state_info(&self, state: *mut c_void) -> pipe_compute_state_object_info;
fn compute_state_subgroup_size(&self, state: *mut c_void, block: &[u32; 3]) -> u32;
fn unmap(&self, tx: PipeTransfer);
fn is_create_fence_fd_supported(&self) -> bool;
fn import_fence(&self, fence_fd: &FenceFd) -> PipeFence;
}
@ -186,46 +153,6 @@ impl<'a> HelperContextWrapper for HelperContext<'a> {
self.lock.flush()
}
fn buffer_map_directly(
&self,
res: &PipeResource,
offset: i32,
size: i32,
rw: RWFlags,
) -> Option<PipeTransfer> {
self.lock.buffer_map_directly(res, offset, size, rw)
}
fn buffer_map_coherent(
&self,
res: &PipeResource,
offset: i32,
size: i32,
rw: RWFlags,
) -> Option<PipeTransfer> {
self.lock
.buffer_map(res, offset, size, rw, ResourceMapType::Coherent)
}
fn texture_map_directly(
&self,
res: &PipeResource,
bx: &pipe_box,
rw: RWFlags,
) -> Option<PipeTransfer> {
self.lock.texture_map_directly(res, bx, rw)
}
fn texture_map_coherent(
&self,
res: &PipeResource,
bx: &pipe_box,
rw: RWFlags,
) -> Option<PipeTransfer> {
self.lock
.texture_map(res, bx, rw, ResourceMapType::Coherent)
}
fn create_compute_state(&self, nir: &NirShader, static_local_mem: u32) -> *mut c_void {
self.lock.create_compute_state(nir, static_local_mem)
}
@ -242,10 +169,6 @@ impl<'a> HelperContextWrapper for HelperContext<'a> {
self.lock.compute_state_subgroup_size(state, block)
}
fn unmap(&self, tx: PipeTransfer) {
tx.with_ctx(&self.lock);
}
fn is_create_fence_fd_supported(&self) -> bool {
self.lock.is_create_fence_fd_supported()
}

View file

@ -11,7 +11,6 @@ use mesa_rust::compiler::clc::*;
use mesa_rust::compiler::nir::*;
use mesa_rust::nir_pass;
use mesa_rust::pipe::context::RWFlags;
use mesa_rust::pipe::context::ResourceMapType;
use mesa_rust::pipe::resource::*;
use mesa_rust::pipe::screen::ResourceType;
use mesa_rust_gen::*;
@ -1253,15 +1252,8 @@ impl Kernel {
if let Some(printf_buf) = &printf_buf {
let tx = ctx
.buffer_map(
printf_buf,
0,
printf_size as i32,
RWFlags::RD,
ResourceMapType::Normal,
)
.ok_or(CL_OUT_OF_RESOURCES)?
.with_ctx(ctx);
.buffer_map(printf_buf, 0, printf_size as i32, RWFlags::RD)
.ok_or(CL_OUT_OF_RESOURCES)?;
let mut buf: &[u8] =
unsafe { slice::from_raw_parts(tx.ptr().cast(), printf_size as usize) };
let length = u32::from_ne_bytes(*extract(&mut buf));

View file

@ -366,17 +366,6 @@ fn sw_copy(
}
}
/// helper function to determine if we can just map the resource in question or if we have to go
/// through a shdow buffer to let the CPU access the resources memory
fn can_map_directly(dev: &Device, res: &PipeResource) -> bool {
// there are two aprts to this check:
// 1. is the resource located in system RAM
// 2. has the resource a linear memory layout
// we do not want to map memory over the PCIe bus as this generally leads to bad performance.
(dev.unified_memory() || res.is_staging() || res.is_user)
&& (res.is_buffer() || res.is_linear())
}
impl MemBase {
pub fn new_buffer(
context: Arc<Context>,
@ -702,11 +691,6 @@ impl MemBase {
}
}
fn has_user_shadow_buffer(&self, d: &Device) -> CLResult<bool> {
let r = self.get_res_of_dev(d)?;
Ok(!r.is_user && bit_check(self.flags, CL_MEM_USE_HOST_PTR))
}
pub fn host_ptr(&self) -> *mut c_void {
self.host_ptr as *mut c_void
}
@ -1001,20 +985,17 @@ impl Buffer {
offset: usize,
size: usize,
rw: RWFlags,
) -> CLResult<GuardedPipeTransfer<'a>> {
) -> CLResult<PipeTransfer<'a>> {
let offset = self.apply_offset(offset)?;
let r = self.get_res_of_dev(q.device)?;
Ok(ctx
.buffer_map(
r,
offset.try_into().map_err(|_| CL_OUT_OF_HOST_MEMORY)?,
size.try_into().map_err(|_| CL_OUT_OF_HOST_MEMORY)?,
rw,
ResourceMapType::Normal,
)
.ok_or(CL_OUT_OF_RESOURCES)?
.with_ctx(ctx))
ctx.buffer_map(
r,
offset.try_into().map_err(|_| CL_OUT_OF_HOST_MEMORY)?,
size.try_into().map_err(|_| CL_OUT_OF_HOST_MEMORY)?,
rw,
)
.ok_or(CL_OUT_OF_RESOURCES)
}
// TODO: only sync on unmap when the memory is not mapped for writing
@ -1428,12 +1409,9 @@ impl Image {
ctx: &'a PipeContext,
bx: &pipe_box,
rw: RWFlags,
) -> CLResult<GuardedPipeTransfer<'a>> {
) -> CLResult<PipeTransfer<'a>> {
let r = self.get_res_of_dev(q.device)?;
Ok(ctx
.texture_map(r, bx, rw, ResourceMapType::Normal)
.ok_or(CL_OUT_OF_RESOURCES)?
.with_ctx(ctx))
ctx.texture_map(r, bx, rw).ok_or(CL_OUT_OF_RESOURCES)
}
// TODO: only sync on unmap when the memory is not mapped for writing

View file

@ -36,26 +36,6 @@ impl From<RWFlags> for pipe_map_flags {
}
}
pub enum ResourceMapType {
Normal,
Async,
Coherent,
}
impl From<ResourceMapType> for pipe_map_flags {
fn from(map_type: ResourceMapType) -> Self {
match map_type {
ResourceMapType::Normal => pipe_map_flags(0),
ResourceMapType::Async => pipe_map_flags::PIPE_MAP_UNSYNCHRONIZED,
ResourceMapType::Coherent => {
pipe_map_flags::PIPE_MAP_COHERENT
| pipe_map_flags::PIPE_MAP_PERSISTENT
| pipe_map_flags::PIPE_MAP_UNSYNCHRONIZED
}
}
}
}
impl PipeContext {
pub(super) fn new(context: *mut pipe_context, screen: &Arc<PipeScreen>) -> Option<Self> {
let s = Self {
@ -221,16 +201,16 @@ impl PipeContext {
if ptr.is_null() {
None
} else {
Some(PipeTransfer::new(is_buffer, out, ptr))
Some(PipeTransfer::new(self, is_buffer, out, ptr))
}
}
fn _buffer_map(
pub fn buffer_map(
&self,
res: &PipeResource,
offset: i32,
size: i32,
flags: pipe_map_flags,
rw: RWFlags,
) -> Option<PipeTransfer> {
let b = pipe_box {
x: offset,
@ -240,68 +220,20 @@ impl PipeContext {
..Default::default()
};
self.resource_map(res, &b, flags, true)
}
pub fn buffer_map(
&self,
res: &PipeResource,
offset: i32,
size: i32,
rw: RWFlags,
map_type: ResourceMapType,
) -> Option<PipeTransfer> {
let mut flags: pipe_map_flags = map_type.into();
flags |= rw.into();
self._buffer_map(res, offset, size, flags)
}
pub fn buffer_map_directly(
&self,
res: &PipeResource,
offset: i32,
size: i32,
rw: RWFlags,
) -> Option<PipeTransfer> {
let flags =
pipe_map_flags::PIPE_MAP_DIRECTLY | pipe_map_flags::PIPE_MAP_UNSYNCHRONIZED | rw.into();
self._buffer_map(res, offset, size, flags)
self.resource_map(res, &b, rw.into(), true)
}
pub(super) fn buffer_unmap(&self, tx: *mut pipe_transfer) {
unsafe { self.pipe.as_ref().buffer_unmap.unwrap()(self.pipe.as_ptr(), tx) };
}
pub fn _texture_map(
&self,
res: &PipeResource,
bx: &pipe_box,
flags: pipe_map_flags,
) -> Option<PipeTransfer> {
self.resource_map(res, bx, flags, false)
}
pub fn texture_map(
&self,
res: &PipeResource,
bx: &pipe_box,
rw: RWFlags,
map_type: ResourceMapType,
) -> Option<PipeTransfer> {
let mut flags: pipe_map_flags = map_type.into();
flags |= rw.into();
self._texture_map(res, bx, flags)
}
pub fn texture_map_directly(
&self,
res: &PipeResource,
bx: &pipe_box,
rw: RWFlags,
) -> Option<PipeTransfer> {
let flags =
pipe_map_flags::PIPE_MAP_DIRECTLY | pipe_map_flags::PIPE_MAP_UNSYNCHRONIZED | rw.into();
self.resource_map(res, bx, flags, false)
self.resource_map(res, bx, rw.into(), false)
}
pub(super) fn texture_unmap(&self, tx: *mut pipe_transfer) {

View file

@ -2,46 +2,38 @@ use crate::pipe::context::*;
use mesa_rust_gen::*;
use std::ops::Deref;
use std::os::raw::c_void;
use std::ptr;
pub struct PipeTransfer {
pub struct PipeTransfer<'a> {
pipe: *mut pipe_transfer,
res: *mut pipe_resource,
ptr: *mut c_void,
is_buffer: bool,
}
// SAFETY: Transfers are safe to send between threads
unsafe impl Send for PipeTransfer {}
pub struct GuardedPipeTransfer<'a> {
inner: PipeTransfer,
ctx: &'a PipeContext,
}
impl<'a> Deref for GuardedPipeTransfer<'a> {
type Target = PipeTransfer;
// SAFETY: Transfers are safe to send between threads
unsafe impl Send for PipeTransfer<'_> {}
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<'a> Drop for GuardedPipeTransfer<'a> {
impl<'a> Drop for PipeTransfer<'a> {
fn drop(&mut self) {
if self.is_buffer {
self.ctx.buffer_unmap(self.inner.pipe);
self.ctx.buffer_unmap(self.pipe);
} else {
self.ctx.texture_unmap(self.inner.pipe);
self.ctx.texture_unmap(self.pipe);
}
unsafe { pipe_resource_reference(&mut self.inner.res, ptr::null_mut()) };
unsafe { pipe_resource_reference(&mut self.res, ptr::null_mut()) };
}
}
impl PipeTransfer {
pub(super) fn new(is_buffer: bool, pipe: *mut pipe_transfer, ptr: *mut c_void) -> Self {
impl<'a> PipeTransfer<'a> {
pub(super) fn new(
ctx: &'a PipeContext,
is_buffer: bool,
pipe: *mut pipe_transfer,
ptr: *mut c_void,
) -> Self {
let mut res: *mut pipe_resource = ptr::null_mut();
unsafe { pipe_resource_reference(&mut res, (*pipe).resource) }
@ -50,6 +42,7 @@ impl PipeTransfer {
res: res,
ptr: ptr,
is_buffer: is_buffer,
ctx: ctx,
}
}
@ -68,18 +61,4 @@ impl PipeTransfer {
pub fn bx(&self) -> &pipe_box {
unsafe { &(*self.pipe).box_ }
}
pub fn with_ctx(self, ctx: &PipeContext) -> GuardedPipeTransfer {
GuardedPipeTransfer {
inner: self,
ctx: ctx,
}
}
}
// use set_ctx before operating on the PipeTransfer inside a block where it gets droped
impl Drop for PipeTransfer {
fn drop(&mut self) {
assert_eq!(ptr::null_mut(), self.res);
}
}