mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-06 07:18:17 +02:00
Revert "rusticl/event: use Weak refs for dependencies"
I didn't like the solution and I _think_ it even introduced a potential regressions involving releasing failed events and that causing dependents to run and succeed regardless. This reverts commita45f199086. Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/29190> (cherry picked from commit2f1f98e846)
This commit is contained in:
parent
eac0e52cdb
commit
11f595f5e7
3 changed files with 11 additions and 19 deletions
|
|
@ -2274,7 +2274,7 @@
|
|||
"description": "Revert \"rusticl/event: use Weak refs for dependencies\"",
|
||||
"nominated": false,
|
||||
"nomination_type": 2,
|
||||
"resolution": 4,
|
||||
"resolution": 1,
|
||||
"main_sha": null,
|
||||
"because_sha": "a45f1990860db3a8da6d7251bb627a314dfb8423",
|
||||
"notes": null
|
||||
|
|
|
|||
|
|
@ -15,7 +15,6 @@ use std::sync::Arc;
|
|||
use std::sync::Condvar;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::MutexGuard;
|
||||
use std::sync::Weak;
|
||||
use std::time::Duration;
|
||||
|
||||
// we assert that those are a continous range of numbers so we won't have to use HashMaps
|
||||
|
|
@ -49,8 +48,7 @@ pub struct Event {
|
|||
pub context: Arc<Context>,
|
||||
pub queue: Option<Arc<Queue>>,
|
||||
pub cmd_type: cl_command_type,
|
||||
// using a Weak ref so we don't cause stack overflows in the `drop` impl
|
||||
pub deps: Vec<Weak<Event>>,
|
||||
pub deps: Vec<Arc<Event>>,
|
||||
state: Mutex<EventMutState>,
|
||||
cv: Condvar,
|
||||
}
|
||||
|
|
@ -68,7 +66,6 @@ impl Event {
|
|||
deps: Vec<Arc<Event>>,
|
||||
work: EventSig,
|
||||
) -> Arc<Event> {
|
||||
let deps = deps.iter().map(Arc::downgrade).collect();
|
||||
Arc::new(Self {
|
||||
base: CLObjectBase::new(),
|
||||
context: queue.context.clone(),
|
||||
|
|
@ -241,18 +238,14 @@ impl Event {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn deps(&self) -> impl Iterator<Item = Arc<Self>> + Clone + '_ {
|
||||
self.deps.iter().filter_map(Weak::upgrade)
|
||||
}
|
||||
|
||||
fn deep_unflushed_deps_impl(self: &Arc<Self>, result: &mut HashSet<Arc<Event>>) {
|
||||
fn deep_unflushed_deps_impl<'a>(&'a self, result: &mut HashSet<&'a Event>) {
|
||||
if self.status() <= CL_SUBMITTED as i32 {
|
||||
return;
|
||||
}
|
||||
|
||||
// only scan dependencies if it's a new one
|
||||
if result.insert(Arc::clone(self)) {
|
||||
for e in self.deps() {
|
||||
if result.insert(self) {
|
||||
for e in &self.deps {
|
||||
e.deep_unflushed_deps_impl(result);
|
||||
}
|
||||
}
|
||||
|
|
@ -260,7 +253,7 @@ impl Event {
|
|||
|
||||
/// does a deep search and returns a list of all dependencies including `events` which haven't
|
||||
/// been flushed out yet
|
||||
pub fn deep_unflushed_deps(events: &[Arc<Event>]) -> HashSet<Arc<Event>> {
|
||||
pub fn deep_unflushed_deps(events: &[Arc<Event>]) -> HashSet<&Event> {
|
||||
let mut result = HashSet::new();
|
||||
|
||||
for e in events {
|
||||
|
|
|
|||
|
|
@ -127,17 +127,16 @@ impl Queue {
|
|||
let mut flushed = Vec::new();
|
||||
|
||||
for e in new_events {
|
||||
let deps_iter = e.deps();
|
||||
|
||||
// If we hit any deps from another queue, flush so we don't risk a dead
|
||||
// lock. Also clone the iter here as we'll iterate again later
|
||||
if deps_iter.clone().any(|ev| ev.queue != e.queue) {
|
||||
// this flush _has_ to happen before we wait on any of the deps
|
||||
// lock.
|
||||
if e.deps.iter().any(|ev| ev.queue != e.queue) {
|
||||
flush_events(&mut flushed, &ctx);
|
||||
}
|
||||
|
||||
// We have to wait on user events or events from other queues.
|
||||
let err = deps_iter
|
||||
let err = e
|
||||
.deps
|
||||
.iter()
|
||||
.filter(|ev| ev.is_user() || ev.queue != e.queue)
|
||||
.map(|e| e.wait())
|
||||
.find(|s| *s < 0);
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue