coreinit: Tweak JD2019 workaround to avoid XCX softlock

This commit is contained in:
Exzap 2024-05-27 01:24:24 +02:00
parent aadd2f4a1a
commit 1ee9d5c78c
4 changed files with 13 additions and 12 deletions

View File

@ -310,7 +310,7 @@ namespace coreinit
currentThread->mutexQueue.removeMutex(mutex);
mutex->owner = nullptr;
if (!mutex->threadQueue.isEmpty())
mutex->threadQueue.wakeupSingleThreadWaitQueue(true);
mutex->threadQueue.wakeupSingleThreadWaitQueue(true, true);
}
// currentThread->cancelState = currentThread->cancelState & ~0x10000;
}

View File

@ -758,14 +758,14 @@ namespace coreinit
}
// returns true if thread runs on same core and has higher priority
bool __OSCoreShouldSwitchToThread(OSThread_t* currentThread, OSThread_t* newThread)
bool __OSCoreShouldSwitchToThread(OSThread_t* currentThread, OSThread_t* newThread, bool sharedPriorityAndAffinityWorkaround)
{
uint32 coreIndex = OSGetCoreId();
if (!newThread->context.hasCoreAffinitySet(coreIndex))
return false;
// special case: if current and new thread are running only on the same core then reschedule even if priority is equal
// this resolves a deadlock in Just Dance 2019 where one thread would always reacquire the same mutex within it's timeslice, blocking another thread on the same core from acquiring it
if ((1<<coreIndex) == newThread->context.affinity && currentThread->context.affinity == newThread->context.affinity && currentThread->effectivePriority == newThread->effectivePriority)
if (sharedPriorityAndAffinityWorkaround && (1<<coreIndex) == newThread->context.affinity && currentThread->context.affinity == newThread->context.affinity && currentThread->effectivePriority == newThread->effectivePriority)
return true;
// otherwise reschedule if new thread has higher priority
return newThread->effectivePriority < currentThread->effectivePriority;
@ -791,7 +791,7 @@ namespace coreinit
// todo - only set this once?
thread->wakeUpTime = PPCInterpreter_getMainCoreCycleCounter();
// reschedule if thread has higher priority
if (PPCInterpreter_getCurrentInstance() && __OSCoreShouldSwitchToThread(coreinit::OSGetCurrentThread(), thread))
if (PPCInterpreter_getCurrentInstance() && __OSCoreShouldSwitchToThread(coreinit::OSGetCurrentThread(), thread, false))
PPCCore_switchToSchedulerWithLock();
}
return previousSuspendCount;
@ -948,7 +948,7 @@ namespace coreinit
OSThread_t* currentThread = OSGetCurrentThread();
if (currentThread && currentThread != thread)
{
if (__OSCoreShouldSwitchToThread(currentThread, thread))
if (__OSCoreShouldSwitchToThread(currentThread, thread, false))
PPCCore_switchToSchedulerWithLock();
}
__OSUnlockScheduler();

View File

@ -126,8 +126,8 @@ namespace coreinit
// counterparts for queueAndWait
void cancelWait(OSThread_t* thread);
void wakeupEntireWaitQueue(bool reschedule);
void wakeupSingleThreadWaitQueue(bool reschedule);
void wakeupEntireWaitQueue(bool reschedule, bool sharedPriorityAndAffinityWorkaround = false);
void wakeupSingleThreadWaitQueue(bool reschedule, bool sharedPriorityAndAffinityWorkaround = false);
private:
OSThread_t* takeFirstFromQueue(size_t linkOffset)
@ -611,7 +611,7 @@ namespace coreinit
// internal
void __OSAddReadyThreadToRunQueue(OSThread_t* thread);
bool __OSCoreShouldSwitchToThread(OSThread_t* currentThread, OSThread_t* newThread);
bool __OSCoreShouldSwitchToThread(OSThread_t* currentThread, OSThread_t* newThread, bool sharedPriorityAndAffinityWorkaround);
void __OSQueueThreadDeallocation(OSThread_t* thread);
bool __OSIsThreadActive(OSThread_t* thread);

View File

@ -128,7 +128,8 @@ namespace coreinit
// counterpart for queueAndWait
// if reschedule is true then scheduler will switch to woken up thread (if it is runnable on the same core)
void OSThreadQueueInternal::wakeupEntireWaitQueue(bool reschedule)
// sharedPriorityAndAffinityWorkaround is currently a hack/placeholder for some special cases. A proper fix likely involves handling all the nuances of thread effective priority
void OSThreadQueueInternal::wakeupEntireWaitQueue(bool reschedule, bool sharedPriorityAndAffinityWorkaround)
{
cemu_assert_debug(__OSHasSchedulerLock());
bool shouldReschedule = false;
@ -139,7 +140,7 @@ namespace coreinit
thread->state = OSThread_t::THREAD_STATE::STATE_READY;
thread->currentWaitQueue = nullptr;
coreinit::__OSAddReadyThreadToRunQueue(thread);
if (reschedule && thread->suspendCounter == 0 && PPCInterpreter_getCurrentInstance() && __OSCoreShouldSwitchToThread(coreinit::OSGetCurrentThread(), thread))
if (reschedule && thread->suspendCounter == 0 && PPCInterpreter_getCurrentInstance() && __OSCoreShouldSwitchToThread(coreinit::OSGetCurrentThread(), thread, sharedPriorityAndAffinityWorkaround))
shouldReschedule = true;
}
if (shouldReschedule)
@ -148,7 +149,7 @@ namespace coreinit
// counterpart for queueAndWait
// if reschedule is true then scheduler will switch to woken up thread (if it is runnable on the same core)
void OSThreadQueueInternal::wakeupSingleThreadWaitQueue(bool reschedule)
void OSThreadQueueInternal::wakeupSingleThreadWaitQueue(bool reschedule, bool sharedPriorityAndAffinityWorkaround)
{
cemu_assert_debug(__OSHasSchedulerLock());
OSThread_t* thread = takeFirstFromQueue(offsetof(OSThread_t, waitQueueLink));
@ -159,7 +160,7 @@ namespace coreinit
thread->state = OSThread_t::THREAD_STATE::STATE_READY;
thread->currentWaitQueue = nullptr;
coreinit::__OSAddReadyThreadToRunQueue(thread);
if (reschedule && thread->suspendCounter == 0 && PPCInterpreter_getCurrentInstance() && __OSCoreShouldSwitchToThread(coreinit::OSGetCurrentThread(), thread))
if (reschedule && thread->suspendCounter == 0 && PPCInterpreter_getCurrentInstance() && __OSCoreShouldSwitchToThread(coreinit::OSGetCurrentThread(), thread, sharedPriorityAndAffinityWorkaround))
shouldReschedule = true;
}
if (shouldReschedule)