diff --git a/src/gallium/frontends/rusticl/api/queue.rs b/src/gallium/frontends/rusticl/api/queue.rs index 01e8e115639..ff1e09a00d7 100644 --- a/src/gallium/frontends/rusticl/api/queue.rs +++ b/src/gallium/frontends/rusticl/api/queue.rs @@ -212,13 +212,7 @@ fn flush(command_queue: cl_command_queue) -> CLResult<()> { #[cl_entrypoint] fn finish(command_queue: cl_command_queue) -> CLResult<()> { // CL_INVALID_COMMAND_QUEUE if command_queue is not a valid host command-queue. - let q = command_queue.get_ref()?; - - for q in q.dependencies_for_pending_events() { - q.flush(false)?; - } - - q.flush(true) + command_queue.get_ref()?.flush(true) } #[cl_entrypoint] diff --git a/src/gallium/frontends/rusticl/core/queue.rs b/src/gallium/frontends/rusticl/core/queue.rs index 8fa9e5fcd53..bd58247b7d9 100644 --- a/src/gallium/frontends/rusticl/core/queue.rs +++ b/src/gallium/frontends/rusticl/core/queue.rs @@ -9,7 +9,6 @@ use mesa_rust::pipe::context::PipeContext; use mesa_rust_util::properties::*; use rusticl_opencl_gen::*; -use std::collections::HashSet; use std::mem; use std::sync::mpsc; use std::sync::Arc; @@ -133,6 +132,7 @@ impl Queue { pub fn flush(&self, wait: bool) -> CLResult<()> { let mut state = self.state.lock().unwrap(); let events = mem::take(&mut state.pending); + let mut queues = Event::deep_unflushed_queues(&events); // Update last if and only if we get new events, this prevents breaking application code // doing things like `clFlush(q); clFinish(q);` @@ -146,23 +146,28 @@ impl Queue { .map_err(|_| CL_OUT_OF_HOST_MEMORY)?; } - if wait { + let last = wait.then(|| state.last.clone()); + + // We have to unlock before actually flushing otherwise we'll run into dead locks when a + // queue gets flushed concurrently. + drop(state); + + // We need to flush out other queues implicitly and this _has_ to happen after taking the + // pending events, otherwise we'll risk dead locks when waiting on events. + queues.remove(self); + for q in queues { + q.flush(false)?; + } + + if let Some(last) = last { // Waiting on the last event is good enough here as the queue will process it in order // It's not a problem if the weak ref is invalid as that means the work is already done // and waiting isn't necessary anymore. - state.last.upgrade().map(|e| e.wait()); + last.upgrade().map(|e| e.wait()); } Ok(()) } - pub fn dependencies_for_pending_events(&self) -> HashSet> { - let state = self.state.lock().unwrap(); - - let mut queues = Event::deep_unflushed_queues(&state.pending); - queues.remove(self); - queues - } - pub fn is_profiling_enabled(&self) -> bool { (self.props & (CL_QUEUE_PROFILING_ENABLE as u64)) != 0 }