sys_lwmutex/mutex: Fix race on lock timeout

This commit is contained in:
Eladash 2023-03-18 11:36:55 +02:00 committed by Ivan
parent f57c8c1c35
commit b844aecb9e
5 changed files with 95 additions and 29 deletions

View File

@ -236,12 +236,38 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
std::lock_guard lock(mutex->mutex);
if (!mutex->unqueue(mutex->lv2_control.raw().sq, &ppu))
bool success = false;
mutex->lv2_control.fetch_op([&](lv2_lwmutex::control_data_t& data)
{
break;
success = false;
ppu_thread* sq = static_cast<ppu_thread*>(data.sq);
const bool retval = &ppu == sq;
if (!mutex->unqueue<false>(sq, &ppu))
{
return false;
}
success = true;
if (!retval)
{
return false;
}
data.sq = sq;
return true;
});
if (success)
{
ppu.next_cpu = nullptr;
ppu.gpr[3] = CELL_ETIMEDOUT;
}
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
}

View File

@ -152,21 +152,15 @@ struct lv2_lwmutex final : lv2_obj
template <typename T>
T* reown(bool unlock2 = false)
{
T* res{};
T* restore_next{};
T* res = nullptr;
lv2_control.fetch_op([&](control_data_t& data)
{
if (res)
{
res->next_cpu = restore_next;
res = nullptr;
}
res = nullptr;
if (auto sq = static_cast<T*>(data.sq))
{
restore_next = sq->next_cpu;
res = schedule<T>(data.sq, protocol);
res = schedule<T>(data.sq, protocol, false);
if (sq == data.sq)
{
@ -182,6 +176,12 @@ struct lv2_lwmutex final : lv2_obj
}
});
if (res && cpu_flag::again - res->state)
{
// Detach manually (fetch_op can fail, so avoid side-effects on the first node in this case)
res->next_cpu = nullptr;
}
return res;
}
};

View File

@ -245,12 +245,38 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
std::lock_guard lock(mutex->mutex);
if (!mutex->unqueue(mutex->control.raw().sq, &ppu))
bool success = false;
mutex->control.fetch_op([&](lv2_mutex::control_data_t& data)
{
break;
success = false;
ppu_thread* sq = static_cast<ppu_thread*>(data.sq);
const bool retval = &ppu == sq;
if (!mutex->unqueue<false>(sq, &ppu))
{
return false;
}
success = true;
if (!retval)
{
return false;
}
data.sq = sq;
return true;
});
if (success)
{
ppu.next_cpu = nullptr;
ppu.gpr[3] = CELL_ETIMEDOUT;
}
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
}

View File

@ -157,20 +157,14 @@ struct lv2_mutex final : lv2_obj
T* reown()
{
T* res{};
T* restore_next{};
control.fetch_op([&](control_data_t& data)
{
if (res)
{
res->next_cpu = restore_next;
res = nullptr;
}
res = nullptr;
if (auto sq = static_cast<T*>(data.sq))
{
restore_next = sq->next_cpu;
res = schedule<T>(data.sq, protocol);
res = schedule<T>(data.sq, protocol, false);
if (sq == data.sq)
{
@ -188,6 +182,12 @@ struct lv2_mutex final : lv2_obj
}
});
if (res && cpu_flag::again - res->state)
{
// Detach manually (fetch_op can fail, so avoid side-effects on the first node in this case)
res->next_cpu = nullptr;
}
return res;
}
};

View File

@ -106,7 +106,7 @@ public:
}
// Find and remove the object from the linked list
template <typename T>
template <bool ModifyNode = true, typename T>
static T* unqueue(T*& first, T* object, T* T::* mem_ptr = &T::next_cpu)
{
auto it = +first;
@ -114,7 +114,12 @@ public:
if (it == object)
{
atomic_storage<T*>::release(first, it->*mem_ptr);
atomic_storage<T*>::release(it->*mem_ptr, nullptr);
if constexpr (ModifyNode)
{
atomic_storage<T*>::release(it->*mem_ptr, nullptr);
}
return it;
}
@ -125,7 +130,12 @@ public:
if (next == object)
{
atomic_storage<T*>::release(it->*mem_ptr, next->*mem_ptr);
atomic_storage<T*>::release(next->*mem_ptr, nullptr);
if constexpr (ModifyNode)
{
atomic_storage<T*>::release(next->*mem_ptr, nullptr);
}
return next;
}
@ -137,7 +147,7 @@ public:
// Remove an object from the linked set according to the protocol
template <typename E, typename T>
static E* schedule(T& first, u32 protocol)
static E* schedule(T& first, u32 protocol, bool modify_node = true)
{
auto it = static_cast<E*>(first);
@ -161,7 +171,7 @@ public:
continue;
}
if (it && cpu_flag::again - it->state)
if (cpu_flag::again - it->state)
{
atomic_storage<T>::release(*parent_found, nullptr);
}
@ -199,7 +209,11 @@ public:
if (cpu_flag::again - found->state)
{
atomic_storage<T>::release(*parent_found, found->next_cpu);
atomic_storage<T>::release(found->next_cpu, nullptr);
if (modify_node)
{
atomic_storage<T>::release(found->next_cpu, nullptr);
}
}
return found;