mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-07 00:38:48 +02:00
rusticl/event: change Queue argument to Context in EventSig
Will allow us to drop the strong reference to the queue in events. Reviewed-by: Adam Jackson <ajax@redhat.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/35672>
This commit is contained in:
parent
f03f79d9c6
commit
815373fe62
3 changed files with 36 additions and 37 deletions
|
|
@ -2449,17 +2449,17 @@ fn enqueue_svm_free_impl(
|
|||
let cb_opt = unsafe { SVMFreeCb::new(pfn_free_func, user_data) }.ok();
|
||||
|
||||
create_and_queue(
|
||||
q,
|
||||
Arc::clone(&q),
|
||||
cmd_type,
|
||||
evs,
|
||||
event,
|
||||
false,
|
||||
Box::new(move |q, _| {
|
||||
Box::new(move |cl_ctx, _| {
|
||||
if let Some(cb) = cb_opt {
|
||||
cb.call(q, &mut svm_pointers);
|
||||
cb.call(&q, &mut svm_pointers);
|
||||
} else {
|
||||
for ptr in svm_pointers {
|
||||
q.context.remove_svm_ptr(ptr);
|
||||
cl_ctx.remove_svm_ptr(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2558,7 +2558,7 @@ fn enqueue_svm_memcpy_impl(
|
|||
evs,
|
||||
event,
|
||||
block,
|
||||
Box::new(move |q, ctx| q.context.copy_svm(ctx, src_ptr, dst_ptr, size)),
|
||||
Box::new(move |cl_ctx, ctx| cl_ctx.copy_svm(ctx, src_ptr, dst_ptr, size)),
|
||||
)
|
||||
}
|
||||
|
||||
|
|
@ -2702,7 +2702,7 @@ fn enqueue_svm_mem_fill_impl(
|
|||
let pattern = unsafe { pattern_ptr.read_unaligned() };
|
||||
let svm_ptr = svm_ptr as usize;
|
||||
|
||||
Box::new(move |q, ctx| q.context.clear_svm(ctx, svm_ptr, size, pattern.0))
|
||||
Box::new(move |cl_ctx, ctx| cl_ctx.clear_svm(ctx, svm_ptr, size, pattern.0))
|
||||
}};
|
||||
}
|
||||
|
||||
|
|
@ -2814,7 +2814,7 @@ fn enqueue_svm_map_impl(
|
|||
evs,
|
||||
event,
|
||||
block,
|
||||
Box::new(move |q, ctx| q.context.copy_svm_to_host(ctx, svm_ptr, flags)),
|
||||
Box::new(move |cl_ctx, ctx| cl_ctx.copy_svm_to_host(ctx, svm_ptr, flags)),
|
||||
)
|
||||
}
|
||||
|
||||
|
|
@ -2999,9 +2999,8 @@ fn enqueue_svm_migrate_mem(
|
|||
evs,
|
||||
event,
|
||||
false,
|
||||
Box::new(move |q, ctx| {
|
||||
q.context
|
||||
.migrate_svm(ctx, svm_pointers, sizes, to_device, content_undefined)
|
||||
Box::new(move |cl_ctx, ctx| {
|
||||
cl_ctx.migrate_svm(ctx, svm_pointers, sizes, to_device, content_undefined)
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ static_assert!(CL_RUNNING == 1);
|
|||
static_assert!(CL_SUBMITTED == 2);
|
||||
static_assert!(CL_QUEUED == 3);
|
||||
|
||||
pub type EventSig = Box<dyn FnOnce(&Arc<Queue>, &QueueContext) -> CLResult<()> + Send + Sync>;
|
||||
pub type EventSig = Box<dyn FnOnce(&Context, &QueueContext) -> CLResult<()> + Send + Sync>;
|
||||
|
||||
pub enum EventTimes {
|
||||
Queued = CL_PROFILING_COMMAND_QUEUED as isize,
|
||||
|
|
@ -240,7 +240,7 @@ impl Event {
|
|||
PipeQueryGen::<{ pipe_query_type::PIPE_QUERY_TIMESTAMP }>::new(ctx);
|
||||
}
|
||||
|
||||
let res = w(queue, ctx).err().map_or(
|
||||
let res = w(&self.context, ctx).err().map_or(
|
||||
// return the error if there is one
|
||||
CL_SUBMITTED as cl_int,
|
||||
|e| e,
|
||||
|
|
|
|||
|
|
@ -1365,8 +1365,8 @@ impl Kernel {
|
|||
|
||||
self.optimize_local_size(q.device, &mut grid, &mut block);
|
||||
|
||||
Ok(Box::new(move |q, ctx| {
|
||||
let hw_max_grid = q.device.max_grid_size();
|
||||
Ok(Box::new(move |cl_ctx, ctx| {
|
||||
let hw_max_grid = ctx.dev.max_grid_size();
|
||||
|
||||
let variant = if offsets == [0; 3]
|
||||
&& grid[0] <= hw_max_grid[0]
|
||||
|
|
@ -1386,7 +1386,7 @@ impl Kernel {
|
|||
// Set it once so we get the alignment padding right
|
||||
let static_local_size: u64 = nir_kernel_build.shared_size;
|
||||
let mut variable_local_size: u64 = static_local_size;
|
||||
let printf_size = q.device.printf_buffer_size() as u32;
|
||||
let printf_size = ctx.dev.printf_buffer_size() as u32;
|
||||
let mut samplers = Vec::new();
|
||||
let mut iviews = Vec::new();
|
||||
let mut sviews = Vec::new();
|
||||
|
|
@ -1397,7 +1397,7 @@ impl Kernel {
|
|||
|
||||
let null_ptr;
|
||||
let null_ptr_v3;
|
||||
if q.device.address_bits() == 64 {
|
||||
if ctx.dev.address_bits() == 64 {
|
||||
null_ptr = [0u8; 8].as_slice();
|
||||
null_ptr_v3 = [0u8; 24].as_slice();
|
||||
} else {
|
||||
|
|
@ -1406,8 +1406,8 @@ impl Kernel {
|
|||
};
|
||||
|
||||
let mut resource_info = Vec::new();
|
||||
fn add_pointer(q: &Queue, input: &mut Vec<u8>, address: u64) {
|
||||
if q.device.address_bits() == 64 {
|
||||
fn add_pointer(ctx: &QueueContext, input: &mut Vec<u8>, address: u64) {
|
||||
if ctx.dev.address_bits() == 64 {
|
||||
let address: u64 = address;
|
||||
input.extend_from_slice(&address.to_ne_bytes());
|
||||
} else {
|
||||
|
|
@ -1417,18 +1417,18 @@ impl Kernel {
|
|||
}
|
||||
|
||||
fn add_global<'a>(
|
||||
q: &Queue,
|
||||
ctx: &QueueContext,
|
||||
input: &mut Vec<u8>,
|
||||
resource_info: &mut Vec<(&'a PipeResource, usize)>,
|
||||
res: &'a PipeResource,
|
||||
offset: usize,
|
||||
) {
|
||||
resource_info.push((res, input.len()));
|
||||
add_pointer(q, input, offset as u64);
|
||||
add_pointer(ctx, input, offset as u64);
|
||||
}
|
||||
|
||||
fn add_sysval(q: &Queue, input: &mut Vec<u8>, vals: &[usize; 3]) {
|
||||
if q.device.address_bits() == 64 {
|
||||
fn add_sysval(ctx: &QueueContext, input: &mut Vec<u8>, vals: &[usize; 3]) {
|
||||
if ctx.dev.address_bits() == 64 {
|
||||
input.extend_from_slice(unsafe { as_byte_slice(&vals.map(|v| v as u64)) });
|
||||
} else {
|
||||
input.extend_from_slice(unsafe { as_byte_slice(&vals.map(|v| v as u32)) });
|
||||
|
|
@ -1437,8 +1437,8 @@ impl Kernel {
|
|||
|
||||
let mut printf_buf = None;
|
||||
if nir_kernel_build.printf_info.is_some() {
|
||||
let buf = q
|
||||
.device
|
||||
let buf = ctx
|
||||
.dev
|
||||
.screen
|
||||
.resource_create_buffer(printf_size, ResourceType::Staging, PIPE_BIND_GLOBAL, 0)
|
||||
.unwrap();
|
||||
|
|
@ -1452,7 +1452,7 @@ impl Kernel {
|
|||
// translate SVM pointers to their base first
|
||||
let mut svms: HashSet<_> = svms
|
||||
.into_iter()
|
||||
.filter_map(|svm_pointer| Some(q.context.find_svm_alloc(svm_pointer)?.0 as usize))
|
||||
.filter_map(|svm_pointer| Some(cl_ctx.find_svm_alloc(svm_pointer)?.0 as usize))
|
||||
.collect();
|
||||
|
||||
for arg in &nir_kernel_build.compiled_args {
|
||||
|
|
@ -1478,7 +1478,7 @@ impl Kernel {
|
|||
KernelArgValue::BDA(address) => {
|
||||
bdas.push(*address);
|
||||
if !api_arg.dead {
|
||||
add_pointer(q, &mut input, *address);
|
||||
add_pointer(ctx, &mut input, *address);
|
||||
}
|
||||
}
|
||||
KernelArgValue::Buffer(buffer) => {
|
||||
|
|
@ -1505,7 +1505,7 @@ impl Kernel {
|
|||
} else {
|
||||
let res = buffer.get_res_for_access(ctx, rw)?;
|
||||
add_global(
|
||||
q,
|
||||
ctx,
|
||||
&mut input,
|
||||
&mut resource_info,
|
||||
res,
|
||||
|
|
@ -1515,12 +1515,12 @@ impl Kernel {
|
|||
}
|
||||
&KernelArgValue::SVM(handle) => {
|
||||
// get the base address so we deduplicate properly
|
||||
if let Some((base, _)) = q.context.find_svm_alloc(handle) {
|
||||
if let Some((base, _)) = cl_ctx.find_svm_alloc(handle) {
|
||||
svms.insert(base as usize);
|
||||
}
|
||||
|
||||
if !api_arg.dead {
|
||||
add_pointer(q, &mut input, handle as u64);
|
||||
add_pointer(ctx, &mut input, handle as u64);
|
||||
}
|
||||
}
|
||||
KernelArgValue::Image(image) => {
|
||||
|
|
@ -1549,7 +1549,7 @@ impl Kernel {
|
|||
let pot = cmp::min(*size, 0x80);
|
||||
variable_local_size = variable_local_size
|
||||
.next_multiple_of(pot.next_power_of_two() as u64);
|
||||
if q.device.address_bits() == 64 {
|
||||
if ctx.dev.address_bits() == 64 {
|
||||
let variable_local_size: [u8; 8] =
|
||||
variable_local_size.to_ne_bytes();
|
||||
input.extend_from_slice(&variable_local_size);
|
||||
|
|
@ -1578,21 +1578,21 @@ impl Kernel {
|
|||
CompiledKernelArgType::ConstantBuffer => {
|
||||
assert!(nir_kernel_build.constant_buffer.is_some());
|
||||
let res = nir_kernel_build.constant_buffer.as_ref().unwrap();
|
||||
add_global(q, &mut input, &mut resource_info, res, 0);
|
||||
add_global(ctx, &mut input, &mut resource_info, res, 0);
|
||||
}
|
||||
CompiledKernelArgType::GlobalWorkOffsets => {
|
||||
add_sysval(q, &mut input, &offsets);
|
||||
add_sysval(ctx, &mut input, &offsets);
|
||||
}
|
||||
CompiledKernelArgType::WorkGroupOffsets => {
|
||||
workgroup_id_offset_loc = Some(input.len());
|
||||
input.extend_from_slice(null_ptr_v3);
|
||||
}
|
||||
CompiledKernelArgType::GlobalWorkSize => {
|
||||
add_sysval(q, &mut input, &api_grid);
|
||||
add_sysval(ctx, &mut input, &api_grid);
|
||||
}
|
||||
CompiledKernelArgType::PrintfBuffer => {
|
||||
let res = printf_buf.as_ref().unwrap();
|
||||
add_global(q, &mut input, &mut resource_info, res, 0);
|
||||
add_global(ctx, &mut input, &mut resource_info, res, 0);
|
||||
}
|
||||
CompiledKernelArgType::InlineSampler(cl) => {
|
||||
samplers.push(Sampler::cl_to_pipe(cl));
|
||||
|
|
@ -1621,7 +1621,7 @@ impl Kernel {
|
|||
.into_iter()
|
||||
// Ignore invalid pointers as they are legal to be passed in, but illegal to
|
||||
// dereference.
|
||||
.filter_map(|address| q.context.find_bda_alloc(q.device, address))
|
||||
.filter_map(|address| cl_ctx.find_bda_alloc(ctx.dev, address))
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let mut bdas: Vec<_> = bdas
|
||||
|
|
@ -1631,7 +1631,7 @@ impl Kernel {
|
|||
|
||||
let svms_new = svms
|
||||
.into_iter()
|
||||
.filter_map(|svm| q.context.copy_svm_to_dev(ctx, svm).transpose())
|
||||
.filter_map(|svm| cl_ctx.copy_svm_to_dev(ctx, svm).transpose())
|
||||
.collect::<CLResult<Vec<_>>>()?;
|
||||
|
||||
// uhhh
|
||||
|
|
@ -1668,7 +1668,7 @@ impl Kernel {
|
|||
let this_offsets =
|
||||
[x * hw_max_grid[0], y * hw_max_grid[1], z * hw_max_grid[2]];
|
||||
|
||||
if q.device.address_bits() == 64 {
|
||||
if ctx.dev.address_bits() == 64 {
|
||||
let val = this_offsets.map(|v| v as u64);
|
||||
input[workgroup_id_offset_loc..workgroup_id_offset_loc + 24]
|
||||
.copy_from_slice(unsafe { as_byte_slice(&val) });
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue