memory: fix CAtomicSharedPointer mutex unlock and lifetime issues (#68)

This commit is contained in:
Maximilian Seidler 2025-07-21 21:09:54 +02:00 committed by GitHub
parent bcabcbada9
commit b074d4abc7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -35,6 +35,11 @@ namespace Hyprutils::Memory {
std::lock_guard<std::recursive_mutex> lockGuard() {
return std::lock_guard<std::recursive_mutex>(m_mutex);
}
// Needed when unlock order or mutex lifetime matters.
std::recursive_mutex& getMutex() {
return m_mutex;
}
};
}
@ -87,19 +92,12 @@ namespace Hyprutils::Memory {
}
~CAtomicSharedPointer() {
if (!m_ptr.impl_)
return;
auto lg = implLockGuard();
m_ptr.reset();
reset();
}
template <typename U>
validHierarchy<const CAtomicSharedPointer<U>&> operator=(const CAtomicSharedPointer<U>& rhs) {
if (m_ptr.impl_) {
auto lg = implLockGuard();
m_ptr.reset();
}
reset();
if (!rhs.m_ptr.impl_)
return *this;
@ -113,10 +111,7 @@ namespace Hyprutils::Memory {
if (this == &rhs)
return *this;
if (m_ptr.impl_) {
auto lg = implLockGuard();
m_ptr.reset();
}
reset();
if (!rhs.m_ptr.impl_)
return *this;
@ -144,8 +139,43 @@ namespace Hyprutils::Memory {
if (!m_ptr.impl_)
return;
auto lg = implLockGuard();
// last ref and last wref?
// -> must unlock BEFORE reset
// not last ref?
// -> must unlock AFTER reset
auto& mutex = ((Atomic_::impl<T>*)m_ptr.impl_)->getMutex();
mutex.lock();
if (m_ptr.impl_->ref() > 1) {
m_ptr.reset();
mutex.unlock();
return;
}
if (m_ptr.impl_->wref() == 0) {
mutex.unlock(); // Don't hold the mutex when destroying it
m_ptr.reset();
// mutex invalid
return;
} else {
// When the control block gets destroyed, the mutex is destroyed with it.
// Thus we must avoid attempting an unlock after impl_ has been destroyed.
// Without the workaround is no safe way of checking whether it has been destroyed or not.
//
// To avoid this altogether, keep a weak pointer here.
// This guarantees that impl_ is still valid after the reset.
CWeakPointer<T> guard = m_ptr;
m_ptr.reset();
// Now we can safely check if guard is the last wref.
if (guard.impl_->wref() == 1) {
mutex.unlock();
return; // ~guard destroys impl_ and mutex
}
guard.reset();
mutex.unlock();
}
}
T& operator*() const {
@ -241,19 +271,12 @@ namespace Hyprutils::Memory {
}
~CAtomicWeakPointer() {
if (!m_ptr.impl_)
return;
auto lg = implLockGuard();
m_ptr.reset();
reset();
}
template <typename U>
validHierarchy<const CAtomicWeakPointer<U>&> operator=(const CAtomicWeakPointer<U>& rhs) {
if (m_ptr.impl_) {
auto lg = implLockGuard();
m_ptr.reset();
}
reset();
auto lg = rhs.implLockGuard();
m_ptr = rhs.m_ptr;
@ -264,10 +287,7 @@ namespace Hyprutils::Memory {
if (this == &rhs)
return *this;
if (m_ptr.impl_) {
auto lg = implLockGuard();
m_ptr.reset();
}
reset();
auto lg = rhs.implLockGuard();
m_ptr = rhs.m_ptr;
@ -292,8 +312,20 @@ namespace Hyprutils::Memory {
if (!m_ptr.impl_)
return;
auto lg = implLockGuard();
// last ref and last wref?
// -> must unlock BEFORE reset
// not last ref?
// -> must unlock AFTER reset
auto& mutex = ((Atomic_::impl<T>*)m_ptr.impl_)->getMutex();
if (m_ptr.impl_->ref() == 0 && m_ptr.impl_->wref() == 1) {
mutex.unlock();
m_ptr.reset();
// mutex invalid
return;
}
m_ptr.reset();
mutex.unlock();
}
T& operator*() const {