mirror of
https://github.com/skyline-emu/skyline.git
synced 2024-11-30 03:24:13 +01:00
Restructure ConditionalVariableSignal to avoid potential deadlock
Since InsertThread can block for paused threads, we need to ensure we unlock syncWaiterMutex when calling it.
This commit is contained in:
parent
f4a8328cef
commit
388245789f
@ -230,14 +230,15 @@ namespace skyline::kernel::type {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (timeout > 0 && !state.scheduler->TimedWaitSchedule(std::chrono::nanoseconds(timeout))) {
|
if (timeout > 0 && !state.scheduler->TimedWaitSchedule(std::chrono::nanoseconds(timeout))) {
|
||||||
std::unique_lock lock(syncWaiterMutex);
|
{
|
||||||
auto queue{syncWaiters.equal_range(key)};
|
std::unique_lock lock(syncWaiterMutex);
|
||||||
auto iterator{std::find(queue.first, queue.second, SyncWaiters::value_type{key, state.thread})};
|
auto queue{syncWaiters.equal_range(key)};
|
||||||
if (iterator != queue.second)
|
auto iterator{std::find(queue.first, queue.second, SyncWaiters::value_type{key, state.thread})};
|
||||||
if (syncWaiters.erase(iterator) == queue.second)
|
if (iterator != queue.second)
|
||||||
__atomic_store_n(key, false, __ATOMIC_SEQ_CST);
|
if (syncWaiters.erase(iterator) == queue.second)
|
||||||
|
__atomic_store_n(key, false, __ATOMIC_SEQ_CST);
|
||||||
|
|
||||||
lock.unlock();
|
}
|
||||||
state.scheduler->InsertThread(state.thread);
|
state.scheduler->InsertThread(state.thread);
|
||||||
state.scheduler->WaitSchedule();
|
state.scheduler->WaitSchedule();
|
||||||
|
|
||||||
@ -259,15 +260,25 @@ namespace skyline::kernel::type {
|
|||||||
void KProcess::ConditionalVariableSignal(u32 *key, i32 amount) {
|
void KProcess::ConditionalVariableSignal(u32 *key, i32 amount) {
|
||||||
TRACE_EVENT_FMT("kernel", "ConditionalVariableSignal 0x{:X}", key);
|
TRACE_EVENT_FMT("kernel", "ConditionalVariableSignal 0x{:X}", key);
|
||||||
|
|
||||||
std::scoped_lock lock{syncWaiterMutex};
|
i32 waiterCount{amount};
|
||||||
auto queue{syncWaiters.equal_range(key)};
|
std::shared_ptr<type::KThread> thread;
|
||||||
|
do {
|
||||||
|
if (thread) {
|
||||||
|
state.scheduler->InsertThread(thread);
|
||||||
|
thread = {};
|
||||||
|
}
|
||||||
|
|
||||||
auto it{queue.first};
|
std::scoped_lock lock{syncWaiterMutex};
|
||||||
for (i32 waiterCount{amount}; it != queue.second && (amount <= 0 || waiterCount); it = syncWaiters.erase(it), waiterCount--)
|
auto queue{syncWaiters.equal_range(key)};
|
||||||
state.scheduler->InsertThread(it->second);
|
|
||||||
|
|
||||||
if (it == queue.second)
|
if (queue.first != queue.second && (amount <= 0 || waiterCount)) {
|
||||||
__atomic_store_n(key, false, __ATOMIC_SEQ_CST); // We need to update the boolean flag denoting that there are no more threads waiting on this conditional variable
|
thread = queue.first->second;
|
||||||
|
syncWaiters.erase(queue.first);
|
||||||
|
waiterCount--;
|
||||||
|
} else if (queue.first == queue.second) {
|
||||||
|
__atomic_store_n(key, false, __ATOMIC_SEQ_CST); // We need to update the boolean flag denoting that there are no more threads waiting on this conditional variable
|
||||||
|
}
|
||||||
|
} while (thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KProcess::WaitForAddress(u32 *address, u32 value, i64 timeout, ArbitrationType type) {
|
Result KProcess::WaitForAddress(u32 *address, u32 value, i64 timeout, ArbitrationType type) {
|
||||||
|
Loading…
Reference in New Issue
Block a user