mirror of
https://github.com/skyline-emu/skyline.git
synced 2024-12-23 14:21:52 +01:00
Implement PI-Mutexes + Optimize InsertThread
This commit is contained in:
parent
7ba7cd2394
commit
7079f11add
@ -30,7 +30,6 @@ namespace skyline::kernel {
|
|||||||
if (!currentCore->queue.empty() && thread->affinityMask.count() != 1) {
|
if (!currentCore->queue.empty() && thread->affinityMask.count() != 1) {
|
||||||
// Select core where the current thread will be scheduled the earliest based off average timeslice durations for resident threads
|
// Select core where the current thread will be scheduled the earliest based off average timeslice durations for resident threads
|
||||||
// There's a preference for the current core as migration isn't free
|
// There's a preference for the current core as migration isn't free
|
||||||
|
|
||||||
size_t minTimeslice{};
|
size_t minTimeslice{};
|
||||||
CoreContext *optimalCore{};
|
CoreContext *optimalCore{};
|
||||||
for (auto &candidateCore : cores) {
|
for (auto &candidateCore : cores) {
|
||||||
@ -63,7 +62,7 @@ namespace skyline::kernel {
|
|||||||
if (optimalCore != currentCore) {
|
if (optimalCore != currentCore) {
|
||||||
std::unique_lock coreLock(currentCore->mutex);
|
std::unique_lock coreLock(currentCore->mutex);
|
||||||
currentCore->queue.erase(std::remove(currentCore->queue.begin(), currentCore->queue.end(), thread), currentCore->queue.end());
|
currentCore->queue.erase(std::remove(currentCore->queue.begin(), currentCore->queue.end(), thread), currentCore->queue.end());
|
||||||
currentCore->mutateCondition.notify_all();
|
currentCore->frontCondition.notify_all();
|
||||||
|
|
||||||
thread->coreId = optimalCore->id;
|
thread->coreId = optimalCore->id;
|
||||||
|
|
||||||
@ -80,11 +79,14 @@ namespace skyline::kernel {
|
|||||||
return *currentCore;
|
return *currentCore;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::InsertThread(bool loadBalance) {
|
void Scheduler::InsertThread(const std::shared_ptr<type::KThread> &thread, bool loadBalance) {
|
||||||
auto &thread{state.thread};
|
|
||||||
auto &core{loadBalance ? LoadBalance() : cores.at(thread->coreId)};
|
auto &core{loadBalance ? LoadBalance() : cores.at(thread->coreId)};
|
||||||
|
|
||||||
signal::SetSignalHandler({YieldSignal}, SignalHandler);
|
thread_local bool signalHandlerSetup{};
|
||||||
|
if (!signalHandlerSetup) {
|
||||||
|
signal::SetSignalHandler({YieldSignal}, SignalHandler);
|
||||||
|
signalHandlerSetup = true;
|
||||||
|
}
|
||||||
|
|
||||||
if (!thread->preemptionTimer) {
|
if (!thread->preemptionTimer) {
|
||||||
struct sigevent event{
|
struct sigevent event{
|
||||||
@ -92,19 +94,34 @@ namespace skyline::kernel {
|
|||||||
.sigev_notify = SIGEV_THREAD_ID,
|
.sigev_notify = SIGEV_THREAD_ID,
|
||||||
.sigev_notify_thread_id = gettid(),
|
.sigev_notify_thread_id = gettid(),
|
||||||
};
|
};
|
||||||
timer_create(CLOCK_THREAD_CPUTIME_ID, &event, &*thread->preemptionTimer);
|
timer_t timer;
|
||||||
|
if (timer_create(CLOCK_THREAD_CPUTIME_ID, &event, &timer))
|
||||||
|
throw exception("timer_create has failed with '{}'", strerror(errno));
|
||||||
|
thread->preemptionTimer.emplace(timer);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_lock lock(core.mutex);
|
std::unique_lock lock(core.mutex);
|
||||||
auto nextThread{std::find_if(core.queue.begin(), core.queue.end(), [&](const std::shared_ptr<type::KThread> &it) { return it->priority > thread->priority; })};
|
auto nextThread{std::upper_bound(core.queue.begin(), core.queue.end(), thread->priority.load(), type::KThread::IsHigherPriority)};
|
||||||
if (nextThread == core.queue.begin() && nextThread != core.queue.end()) {
|
if (nextThread == core.queue.begin()) {
|
||||||
// If the inserted thread has a higher priority than the currently running thread (and the queue isn't empty)
|
if (nextThread != core.queue.end()) {
|
||||||
core.queue.front()->SendSignal(YieldSignal);
|
// If the inserted thread has a higher priority than the currently running thread (and the queue isn't empty)
|
||||||
core.queue.insert(std::next(core.queue.begin()), thread);
|
// We need to interrupt the currently scheduled thread and put this thread at the front instead
|
||||||
|
core.queue.insert(std::next(core.queue.begin()), thread);
|
||||||
|
if (state.thread != core.queue.front())
|
||||||
|
core.queue.front()->SendSignal(YieldSignal);
|
||||||
|
else
|
||||||
|
YieldPending = true;
|
||||||
|
} else {
|
||||||
|
core.queue.push_front(thread);
|
||||||
|
}
|
||||||
|
if (thread != state.thread) {
|
||||||
|
lock.unlock(); // We should unlock this prior to waking all threads to not cause contention on the core lock
|
||||||
|
core.frontCondition.notify_all(); // We only want to trigger the conditional variable if the current thread isn't inserting itself
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
core.queue.insert(nextThread, thread);
|
core.queue.insert(nextThread, thread);
|
||||||
core.mutateCondition.notify_all(); // We only want to trigger the conditional variable if the current thread isn't going to be scheduled next
|
|
||||||
}
|
}
|
||||||
|
|
||||||
thread->needsReorder = true; // We need to reorder the thread from back to align it with other threads of it's priority and ensure strict ordering amongst priorities
|
thread->needsReorder = true; // We need to reorder the thread from back to align it with other threads of it's priority and ensure strict ordering amongst priorities
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,13 +132,13 @@ namespace skyline::kernel {
|
|||||||
std::shared_lock lock(core->mutex);
|
std::shared_lock lock(core->mutex);
|
||||||
if (thread->affinityMask.count() > 1) {
|
if (thread->affinityMask.count() > 1) {
|
||||||
std::chrono::milliseconds loadBalanceThreshold{PreemptiveTimeslice * 2}; //!< The amount of time that needs to pass unscheduled for a thread to attempt load balancing
|
std::chrono::milliseconds loadBalanceThreshold{PreemptiveTimeslice * 2}; //!< The amount of time that needs to pass unscheduled for a thread to attempt load balancing
|
||||||
while (!core->mutateCondition.wait_for(lock, loadBalanceThreshold, [&]() { return core->queue.front() == thread; })) {
|
while (!core->frontCondition.wait_for(lock, loadBalanceThreshold, [&]() { return !core->queue.empty() && core->queue.front() == thread; })) {
|
||||||
lock.unlock();
|
lock.unlock();
|
||||||
LoadBalance();
|
LoadBalance();
|
||||||
if (thread->coreId == core->id) {
|
if (thread->coreId == core->id) {
|
||||||
lock.lock();
|
lock.lock();
|
||||||
} else {
|
} else {
|
||||||
InsertThread(false);
|
InsertThread(state.thread, false);
|
||||||
core = &cores.at(thread->coreId);
|
core = &cores.at(thread->coreId);
|
||||||
lock = std::shared_lock(core->mutex);
|
lock = std::shared_lock(core->mutex);
|
||||||
}
|
}
|
||||||
@ -129,7 +146,7 @@ namespace skyline::kernel {
|
|||||||
loadBalanceThreshold *= 2; // We double the duration required for future load balancing for this invocation to minimize pointless load balancing
|
loadBalanceThreshold *= 2; // We double the duration required for future load balancing for this invocation to minimize pointless load balancing
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
core->mutateCondition.wait(lock, [&]() { return core->queue.front() == thread; });
|
core->frontCondition.wait(lock, [&]() { return !core->queue.empty() && core->queue.front() == thread; });
|
||||||
}
|
}
|
||||||
|
|
||||||
if (thread->priority == core->preemptionPriority) {
|
if (thread->priority == core->preemptionPriority) {
|
||||||
@ -160,14 +177,15 @@ namespace skyline::kernel {
|
|||||||
auto foldingPoint{std::find_if(std::next(core.queue.begin()), core.queue.end(), [&](const std::shared_ptr<type::KThread> &it) {
|
auto foldingPoint{std::find_if(std::next(core.queue.begin()), core.queue.end(), [&](const std::shared_ptr<type::KThread> &it) {
|
||||||
return lastPriority > it->priority ? true : lastPriority = it->priority, false;
|
return lastPriority > it->priority ? true : lastPriority = it->priority, false;
|
||||||
})};
|
})};
|
||||||
core.queue.insert(std::find_if(foldingPoint, core.queue.end(), [&](const std::shared_ptr<type::KThread> &it) { return it->priority > thread->priority; }), thread);
|
core.queue.insert(std::upper_bound(foldingPoint, core.queue.end(), thread->priority.load(), type::KThread::IsHigherPriority), thread);
|
||||||
thread->needsReorder = false;
|
thread->needsReorder = false;
|
||||||
} else {
|
} else {
|
||||||
core.queue.push_back(thread);
|
core.queue.push_back(thread);
|
||||||
thread->needsReorder = false;
|
thread->needsReorder = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
core.mutateCondition.notify_all();
|
lock.unlock();
|
||||||
|
core.frontCondition.notify_all();
|
||||||
|
|
||||||
if (cooperative && thread->isPreempted) {
|
if (cooperative && thread->isPreempted) {
|
||||||
// If a preemptive thread did a cooperative yield then we need to disarm the preemptive timer
|
// If a preemptive thread did a cooperative yield then we need to disarm the preemptive timer
|
||||||
@ -178,7 +196,7 @@ namespace skyline::kernel {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::UpdatePriority(const std::shared_ptr<type::KThread>& thread) {
|
void Scheduler::UpdatePriority(const std::shared_ptr<type::KThread> &thread) {
|
||||||
std::lock_guard migrationLock(thread->coreMigrationMutex);
|
std::lock_guard migrationLock(thread->coreMigrationMutex);
|
||||||
auto *core{&cores.at(thread->coreId)};
|
auto *core{&cores.at(thread->coreId)};
|
||||||
std::unique_lock coreLock(core->mutex);
|
std::unique_lock coreLock(core->mutex);
|
||||||
@ -188,12 +206,18 @@ namespace skyline::kernel {
|
|||||||
// If the thread isn't in the queue then the new priority will be handled automatically on insertion
|
// If the thread isn't in the queue then the new priority will be handled automatically on insertion
|
||||||
return;
|
return;
|
||||||
if (currentIt == core->queue.begin()) {
|
if (currentIt == core->queue.begin()) {
|
||||||
// Alternatively, if it's currently running then we'd just want to update after it rotates
|
// Alternatively, if it's currently running then we'd just want to reorder after it rotates
|
||||||
|
if (!thread->isPreempted && thread->priority == core->preemptionPriority) {
|
||||||
|
// If the thread needs to be preempted due to the new priority then arm it's preemption timer
|
||||||
|
struct itimerspec spec{.it_value = {.tv_nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(PreemptiveTimeslice).count()}};
|
||||||
|
timer_settime(*thread->preemptionTimer, 0, &spec, nullptr);
|
||||||
|
thread->isPreempted = true;
|
||||||
|
}
|
||||||
thread->needsReorder = true;
|
thread->needsReorder = true;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto targetIt{std::find_if(core->queue.begin(), core->queue.end(), [&](const std::shared_ptr<type::KThread> &it) { return it->priority > thread->priority; })};
|
auto targetIt{std::upper_bound(core->queue.begin(), core->queue.end(), thread->priority.load(), type::KThread::IsHigherPriority)};
|
||||||
if (currentIt == targetIt)
|
if (currentIt == targetIt)
|
||||||
// If this thread's position isn't affected by the priority change then we have nothing to do
|
// If this thread's position isn't affected by the priority change then we have nothing to do
|
||||||
return;
|
return;
|
||||||
@ -206,13 +230,12 @@ namespace skyline::kernel {
|
|||||||
thread->isPreempted = false;
|
thread->isPreempted = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
targetIt = std::find_if(core->queue.begin(), core->queue.end(), [&](const std::shared_ptr<type::KThread> &it) { return it->priority > thread->priority; }); // Iterator invalidation
|
targetIt = std::upper_bound(core->queue.begin(), core->queue.end(), thread->priority.load(), type::KThread::IsHigherPriority); // Iterator invalidation
|
||||||
if (targetIt == core->queue.begin() && targetIt != core->queue.end()) {
|
if (targetIt == core->queue.begin() && targetIt != core->queue.end()) {
|
||||||
core->queue.front()->SendSignal(YieldSignal);
|
|
||||||
core->queue.insert(std::next(core->queue.begin()), thread);
|
core->queue.insert(std::next(core->queue.begin()), thread);
|
||||||
|
core->queue.front()->SendSignal(YieldSignal);
|
||||||
} else {
|
} else {
|
||||||
core->queue.insert(targetIt, thread);
|
core->queue.insert(targetIt, thread);
|
||||||
core->mutateCondition.notify_all();
|
|
||||||
}
|
}
|
||||||
thread->needsReorder = true;
|
thread->needsReorder = true;
|
||||||
}
|
}
|
||||||
@ -221,8 +244,11 @@ namespace skyline::kernel {
|
|||||||
auto &thread{state.thread};
|
auto &thread{state.thread};
|
||||||
auto &core{cores.at(thread->coreId)};
|
auto &core{cores.at(thread->coreId)};
|
||||||
|
|
||||||
std::unique_lock lock(core.mutex);
|
{
|
||||||
core.queue.erase(std::remove(core.queue.begin(), core.queue.end(), thread), core.queue.end());
|
std::unique_lock lock(core.mutex);
|
||||||
|
core.queue.erase(std::remove(core.queue.begin(), core.queue.end(), thread), core.queue.end());
|
||||||
|
}
|
||||||
|
core.frontCondition.notify_all(); // We need to notify any threads in line to be scheduled
|
||||||
|
|
||||||
if (thread->isPreempted) {
|
if (thread->isPreempted) {
|
||||||
struct itimerspec spec{};
|
struct itimerspec spec{};
|
||||||
|
@ -46,7 +46,7 @@ namespace skyline {
|
|||||||
u8 id;
|
u8 id;
|
||||||
u8 preemptionPriority; //!< The priority at which this core becomes preemptive as opposed to cooperative
|
u8 preemptionPriority; //!< The priority at which this core becomes preemptive as opposed to cooperative
|
||||||
std::shared_mutex mutex; //!< Synchronizes all operations on the queue
|
std::shared_mutex mutex; //!< Synchronizes all operations on the queue
|
||||||
std::condition_variable_any mutateCondition; //!< A conditional variable which is signalled on every mutation of the queue
|
std::condition_variable_any frontCondition; //!< A conditional variable which is signalled when the front of the queue has changed
|
||||||
std::deque<std::shared_ptr<type::KThread>> queue; //!< A queue of threads which are running or to be run on this core
|
std::deque<std::shared_ptr<type::KThread>> queue; //!< A queue of threads which are running or to be run on this core
|
||||||
|
|
||||||
CoreContext(u8 id, u8 preemptionPriority);
|
CoreContext(u8 id, u8 preemptionPriority);
|
||||||
@ -74,10 +74,11 @@ namespace skyline {
|
|||||||
CoreContext& LoadBalance();
|
CoreContext& LoadBalance();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Inserts the calling thread into the scheduler queue at the appropriate location based on it's priority
|
* @brief Inserts the specified thread into the scheduler queue at the appropriate location based on it's priority
|
||||||
* @param loadBalance If to load balance or use the thread's current core (KThread::coreId)
|
* @param loadBalance If to load balance or use the thread's current core (KThread::coreId)
|
||||||
|
* @note It isn't supported to load balance if the supplied thread isn't the calling thread, it'll lead to UB
|
||||||
*/
|
*/
|
||||||
void InsertThread(bool loadBalance = true);
|
void InsertThread(const std::shared_ptr<type::KThread>& thread, bool loadBalance = true);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Wait for the current thread to be scheduled on it's resident core
|
* @brief Wait for the current thread to be scheduled on it's resident core
|
||||||
|
@ -276,7 +276,7 @@ namespace skyline::kernel::svc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void ExitThread(const DeviceState &state) {
|
void ExitThread(const DeviceState &state) {
|
||||||
state.logger->Debug("svcExitThread: Exiting current thread: {}", state.thread->id);
|
state.logger->Debug("svcExitThread: Exiting current thread");
|
||||||
std::longjmp(state.thread->originalCtx, true);
|
std::longjmp(state.thread->originalCtx, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -330,9 +330,11 @@ namespace skyline::kernel::svc {
|
|||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
auto thread{state.process->GetHandle<type::KThread>(handle)};
|
auto thread{state.process->GetHandle<type::KThread>(handle)};
|
||||||
state.logger->Debug("svcSetThreadPriority: Setting thread priority to {}", thread->id, priority);
|
state.logger->Debug("svcSetThreadPriority: Setting thread priority to {}", priority);
|
||||||
thread->priority = priority;
|
if (thread->priority != priority) {
|
||||||
state.scheduler->UpdatePriority(thread);
|
thread->priority = priority;
|
||||||
|
state.scheduler->UpdatePriority(thread);
|
||||||
|
}
|
||||||
state.ctx->gpr.w0 = Result{};
|
state.ctx->gpr.w0 = Result{};
|
||||||
} catch (const std::out_of_range &) {
|
} catch (const std::out_of_range &) {
|
||||||
state.logger->Warn("svcSetThreadPriority: 'handle' invalid: 0x{:X}", handle);
|
state.logger->Warn("svcSetThreadPriority: 'handle' invalid: 0x{:X}", handle);
|
||||||
@ -346,7 +348,7 @@ namespace skyline::kernel::svc {
|
|||||||
auto thread{state.process->GetHandle<type::KThread>(handle)};
|
auto thread{state.process->GetHandle<type::KThread>(handle)};
|
||||||
auto idealCore{thread->idealCore};
|
auto idealCore{thread->idealCore};
|
||||||
auto affinityMask{thread->affinityMask};
|
auto affinityMask{thread->affinityMask};
|
||||||
state.logger->Debug("svcGetThreadCoreMask: Writing thread #{}'s Ideal Core ({}) + Affinity Mask ({})", thread->id, idealCore, affinityMask);
|
state.logger->Debug("svcGetThreadCoreMask: Setting Ideal Core ({}) + Affinity Mask ({})", idealCore, affinityMask);
|
||||||
|
|
||||||
state.ctx->gpr.x2 = affinityMask.to_ullong();
|
state.ctx->gpr.x2 = affinityMask.to_ullong();
|
||||||
state.ctx->gpr.w1 = idealCore;
|
state.ctx->gpr.w1 = idealCore;
|
||||||
@ -386,7 +388,7 @@ namespace skyline::kernel::svc {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
state.logger->Debug("svcSetThreadCoreMask: Setting thread #{}'s Ideal Core ({}) + Affinity Mask ({})", thread->id, idealCore, affinityMask);
|
state.logger->Debug("svcSetThreadCoreMask: Setting Ideal Core ({}) + Affinity Mask ({})", idealCore, affinityMask);
|
||||||
|
|
||||||
thread->idealCore = idealCore;
|
thread->idealCore = idealCore;
|
||||||
thread->affinityMask = affinityMask;
|
thread->affinityMask = affinityMask;
|
||||||
@ -396,7 +398,8 @@ namespace skyline::kernel::svc {
|
|||||||
|
|
||||||
state.scheduler->RemoveThread();
|
state.scheduler->RemoveThread();
|
||||||
thread->coreId = idealCore;
|
thread->coreId = idealCore;
|
||||||
state.scheduler->InsertThread(false);
|
state.scheduler->InsertThread(state.thread, false);
|
||||||
|
state.scheduler->WaitSchedule();
|
||||||
}
|
}
|
||||||
|
|
||||||
state.ctx->gpr.w0 = Result{};
|
state.ctx->gpr.w0 = Result{};
|
||||||
@ -407,8 +410,9 @@ namespace skyline::kernel::svc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void GetCurrentProcessorNumber(const DeviceState &state) {
|
void GetCurrentProcessorNumber(const DeviceState &state) {
|
||||||
state.logger->Debug("svcGetCurrentProcessorNumber: Writing current core for thread #{}: {}", state.thread->id, state.thread->coreId);
|
auto coreId{state.thread->coreId};
|
||||||
state.ctx->gpr.w0 = state.thread->coreId;
|
state.logger->Debug("svcGetCurrentProcessorNumber: Writing current core: {}", coreId);
|
||||||
|
state.ctx->gpr.w0 = coreId;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ClearEvent(const DeviceState &state) {
|
void ClearEvent(const DeviceState &state) {
|
||||||
@ -611,12 +615,15 @@ namespace skyline::kernel::svc {
|
|||||||
|
|
||||||
state.logger->Debug("svcArbitrateLock: Locking mutex at 0x{:X}", pointer);
|
state.logger->Debug("svcArbitrateLock: Locking mutex at 0x{:X}", pointer);
|
||||||
|
|
||||||
if (state.process->MutexLock(pointer, ownerHandle))
|
auto result{state.process->MutexLock(pointer, ownerHandle)};
|
||||||
|
if (result == Result{})
|
||||||
state.logger->Debug("svcArbitrateLock: Locked mutex at 0x{:X}", pointer);
|
state.logger->Debug("svcArbitrateLock: Locked mutex at 0x{:X}", pointer);
|
||||||
else
|
else if (result == result::InvalidHandle)
|
||||||
|
state.logger->Warn("svcArbitrateLock: 'handle' invalid: 0x{:X}", ownerHandle);
|
||||||
|
else if (result == result::InvalidCurrentMemory)
|
||||||
state.logger->Debug("svcArbitrateLock: Owner handle did not match current owner for mutex or didn't have waiter flag at 0x{:X}", pointer);
|
state.logger->Debug("svcArbitrateLock: Owner handle did not match current owner for mutex or didn't have waiter flag at 0x{:X}", pointer);
|
||||||
|
|
||||||
state.ctx->gpr.w0 = Result{};
|
state.ctx->gpr.w0 = result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ArbitrateUnlock(const DeviceState &state) {
|
void ArbitrateUnlock(const DeviceState &state) {
|
||||||
@ -629,13 +636,10 @@ namespace skyline::kernel::svc {
|
|||||||
|
|
||||||
state.logger->Debug("svcArbitrateUnlock: Unlocking mutex at 0x{:X}", mutex);
|
state.logger->Debug("svcArbitrateUnlock: Unlocking mutex at 0x{:X}", mutex);
|
||||||
|
|
||||||
if (state.process->MutexUnlock(mutex)) {
|
state.process->MutexUnlock(mutex);
|
||||||
state.logger->Debug("svcArbitrateUnlock: Unlocked mutex at 0x{:X}", mutex);
|
|
||||||
state.ctx->gpr.w0 = Result{};
|
state.logger->Debug("svcArbitrateUnlock: Unlocked mutex at 0x{:X}", mutex);
|
||||||
} else {
|
state.ctx->gpr.w0 = Result{};
|
||||||
state.logger->Debug("svcArbitrateUnlock: A non-owner thread tried to release a mutex at 0x{:X}", mutex);
|
|
||||||
state.ctx->gpr.w0 = result::InvalidAddress;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void WaitProcessWideKeyAtomic(const DeviceState &state) {
|
void WaitProcessWideKeyAtomic(const DeviceState &state) {
|
||||||
@ -651,11 +655,7 @@ namespace skyline::kernel::svc {
|
|||||||
if (handle != state.thread->handle)
|
if (handle != state.thread->handle)
|
||||||
throw exception("svcWaitProcessWideKeyAtomic: Handle doesn't match current thread: 0x{:X} for thread 0x{:X}", handle, state.thread->handle);
|
throw exception("svcWaitProcessWideKeyAtomic: Handle doesn't match current thread: 0x{:X} for thread 0x{:X}", handle, state.thread->handle);
|
||||||
|
|
||||||
if (!state.process->MutexUnlock(mutex)) {
|
state.process->MutexUnlock(mutex);
|
||||||
state.logger->Debug("WaitProcessWideKeyAtomic: A non-owner thread tried to release a mutex at 0x{:X}", mutex);
|
|
||||||
state.ctx->gpr.w0 = result::InvalidAddress;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 timeout{state.ctx->gpr.x3};
|
u64 timeout{state.ctx->gpr.x3};
|
||||||
state.logger->Debug("svcWaitProcessWideKeyAtomic: Mutex: 0x{:X}, Conditional-Variable: 0x{:X}, Timeout: {} ns", mutex, conditional, timeout);
|
state.logger->Debug("svcWaitProcessWideKeyAtomic: Mutex: 0x{:X}, Conditional-Variable: 0x{:X}, Timeout: {} ns", mutex, conditional, timeout);
|
||||||
@ -731,7 +731,7 @@ namespace skyline::kernel::svc {
|
|||||||
if (debug.back() == '\n')
|
if (debug.back() == '\n')
|
||||||
debug.remove_suffix(1);
|
debug.remove_suffix(1);
|
||||||
|
|
||||||
state.logger->Info("Debug Output: {}", debug);
|
state.logger->Info("svcOutputDebugString: {}", debug);
|
||||||
state.ctx->gpr.w0 = Result{};
|
state.ctx->gpr.w0 = Result{};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,13 +3,10 @@
|
|||||||
|
|
||||||
#include <nce.h>
|
#include <nce.h>
|
||||||
#include <os.h>
|
#include <os.h>
|
||||||
|
#include <kernel/results.h>
|
||||||
#include "KProcess.h"
|
#include "KProcess.h"
|
||||||
|
|
||||||
namespace skyline::kernel::type {
|
namespace skyline::kernel::type {
|
||||||
KProcess::WaitStatus::WaitStatus(u8 priority, KHandle handle) : priority(priority), handle(handle) {}
|
|
||||||
|
|
||||||
KProcess::WaitStatus::WaitStatus(u8 priority, KHandle handle, u32 *mutex) : priority(priority), handle(handle), mutex(mutex) {}
|
|
||||||
|
|
||||||
KProcess::TlsPage::TlsPage(const std::shared_ptr<KPrivateMemory> &memory) : memory(memory) {}
|
KProcess::TlsPage::TlsPage(const std::shared_ptr<KPrivateMemory> &memory) : memory(memory) {}
|
||||||
|
|
||||||
u8 *KProcess::TlsPage::ReserveSlot() {
|
u8 *KProcess::TlsPage::ReserveSlot() {
|
||||||
@ -112,72 +109,135 @@ namespace skyline::kernel::type {
|
|||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KProcess::MutexLock(u32 *mutex, KHandle owner) {
|
constexpr u32 HandleWaitMask{0x40000000}; //!< A mask of a bit which denotes if the handle has waiters or not
|
||||||
std::unique_lock lock(mutexLock);
|
|
||||||
|
|
||||||
auto &mtxWaiters{mutexes[reinterpret_cast<u64>(mutex)]};
|
|
||||||
if (mtxWaiters.empty()) {
|
Result KProcess::MutexLock(u32 *mutex, KHandle ownerHandle) {
|
||||||
u32 mtxExpected{};
|
std::shared_ptr<KThread> owner;
|
||||||
if (__atomic_compare_exchange_n(mutex, &mtxExpected, (constant::MtxOwnerMask & state.thread->handle), false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
|
try {
|
||||||
return true;
|
owner = GetHandle<KThread>(ownerHandle & ~HandleWaitMask);
|
||||||
|
} catch (const std::out_of_range &) {
|
||||||
|
return result::InvalidHandle;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (__atomic_load_n(mutex, __ATOMIC_SEQ_CST) != (owner | ~constant::MtxOwnerMask))
|
bool isHighestPriority;
|
||||||
return false;
|
{
|
||||||
|
std::lock_guard lock(owner->waiterMutex);
|
||||||
|
|
||||||
std::shared_ptr<WaitStatus> status;
|
u32 value{};
|
||||||
for (auto it{mtxWaiters.begin()};; it++) {
|
if (__atomic_compare_exchange_n(mutex, &value, (HandleWaitMask & state.thread->handle), false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
|
||||||
if (it != mtxWaiters.end() && (*it)->priority >= state.thread->priority)
|
// We try to do a CAS to get ownership of the mutex in the case that it's unoccupied
|
||||||
continue;
|
return {};
|
||||||
|
if (value != (ownerHandle | HandleWaitMask))
|
||||||
|
// We ensure that the mutex's value is the handle with the waiter bit set
|
||||||
|
return result::InvalidCurrentMemory;
|
||||||
|
|
||||||
status = std::make_shared<WaitStatus>(state.thread->priority, state.thread->handle);
|
auto &waiters{owner->waiters};
|
||||||
mtxWaiters.insert(it, status);
|
isHighestPriority = waiters.insert(std::upper_bound(waiters.begin(), waiters.end(), state.thread->priority.load(), KThread::IsHigherPriority), state.thread) == waiters.begin();
|
||||||
break;
|
state.scheduler->RemoveThread();
|
||||||
|
|
||||||
|
std::atomic_store(&state.thread->waitThread, owner);
|
||||||
|
state.thread->waitKey = mutex;
|
||||||
}
|
}
|
||||||
|
|
||||||
lock.unlock();
|
if (isHighestPriority) {
|
||||||
while (!status->flag);
|
// If we were the highest priority thread then we need to inherit priorities for all threads we're waiting on recursively
|
||||||
lock.lock();
|
do {
|
||||||
status->flag = false;
|
u8 priority, ownerPriority;
|
||||||
|
do {
|
||||||
|
// Try to CAS the priority of the owner with the current thread
|
||||||
|
// If they're equivalent then we don't need to CAS as the priority won't be inherited
|
||||||
|
ownerPriority = owner->priority.load();
|
||||||
|
priority = std::min(ownerPriority, state.thread->priority.load());
|
||||||
|
} while (ownerPriority != priority && owner->priority.compare_exchange_strong(ownerPriority, priority));
|
||||||
|
|
||||||
for (auto it{mtxWaiters.begin()}; it != mtxWaiters.end(); it++) {
|
if (ownerPriority != priority) {
|
||||||
if ((*it)->handle == state.thread->handle) {
|
auto waitThread{std::atomic_load(&owner->waitThread)};
|
||||||
mtxWaiters.erase(it);
|
while (waitThread) {
|
||||||
break;
|
std::lock_guard lock(waitThread->waiterMutex);
|
||||||
}
|
|
||||||
|
auto currentWaitThread{std::atomic_load(&owner->waitThread)};
|
||||||
|
if (waitThread != currentWaitThread) {
|
||||||
|
waitThread = currentWaitThread;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// We need to update the location of the owner thread in the waiter queue of the thread it's waiting on
|
||||||
|
auto &waiters{waitThread->waiters};
|
||||||
|
waiters.erase(std::find(waiters.begin(), waiters.end(), waitThread));
|
||||||
|
waiters.insert(std::upper_bound(waiters.begin(), waiters.end(), state.thread->priority.load(), KThread::IsHigherPriority), owner);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
state.scheduler->UpdatePriority(owner);
|
||||||
|
|
||||||
|
owner = waitThread;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} while (owner);
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
state.scheduler->WaitSchedule();
|
||||||
|
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KProcess::MutexUnlock(u32 *mutex) {
|
void KProcess::MutexUnlock(u32 *mutex) {
|
||||||
std::unique_lock lock(mutexLock);
|
std::lock_guard lock(state.thread->waiterMutex);
|
||||||
|
auto &waiters{state.thread->waiters};
|
||||||
|
auto nextOwnerIt{std::find_if(waiters.begin(), waiters.end(), [mutex](const std::shared_ptr<KThread> &thread) { return thread->waitKey == mutex; })};
|
||||||
|
if (nextOwnerIt != waiters.end()) {
|
||||||
|
auto nextOwner{*nextOwnerIt};
|
||||||
|
std::lock_guard nextLock(nextOwner->waiterMutex);
|
||||||
|
std::atomic_store(&nextOwner->waitThread, std::shared_ptr<KThread>{nullptr});
|
||||||
|
nextOwner->waitKey = nullptr;
|
||||||
|
|
||||||
auto &mtxWaiters{mutexes[reinterpret_cast<u64>(mutex)]};
|
// Move all threads waiting on this key to the next owner's waiter list
|
||||||
u32 mtxDesired{};
|
std::shared_ptr<KThread> nextWaiter{};
|
||||||
if (!mtxWaiters.empty())
|
for (auto it{waiters.erase(nextOwnerIt)}; it != waiters.end(); it++) {
|
||||||
mtxDesired = (*mtxWaiters.begin())->handle | ((mtxWaiters.size() > 1) ? ~constant::MtxOwnerMask : 0);
|
if ((*it)->waitKey == mutex) {
|
||||||
|
nextOwner->waiters.splice(std::upper_bound(nextOwner->waiters.begin(), nextOwner->waiters.end(), (*it)->priority.load(), KThread::IsHigherPriority), waiters, it);
|
||||||
|
std::atomic_store(&(*it)->waitThread, nextOwner);
|
||||||
|
if (!nextWaiter)
|
||||||
|
nextWaiter = *it;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
u32 mtxExpected{(constant::MtxOwnerMask & state.thread->handle) | ~constant::MtxOwnerMask};
|
if (!waiters.empty()) {
|
||||||
if (!__atomic_compare_exchange_n(mutex, &mtxExpected, mtxDesired, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
|
// If there are threads still waiting on us then try to inherit their priority
|
||||||
mtxExpected &= constant::MtxOwnerMask;
|
auto highestPriority{waiters.front()};
|
||||||
|
u8 priority, ownerPriority;
|
||||||
|
do {
|
||||||
|
ownerPriority = state.thread->priority.load();
|
||||||
|
priority = std::min(ownerPriority, highestPriority->priority.load());
|
||||||
|
} while (ownerPriority != priority && nextOwner->priority.compare_exchange_strong(ownerPriority, priority));
|
||||||
|
state.scheduler->UpdatePriority(state.thread);
|
||||||
|
}
|
||||||
|
|
||||||
if (!__atomic_compare_exchange_n(mutex, &mtxExpected, mtxDesired, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
|
if (nextWaiter) {
|
||||||
return false;
|
// If there is a waiter on the new owner then try to inherit it's priority
|
||||||
|
u8 priority, ownerPriority;
|
||||||
|
do {
|
||||||
|
ownerPriority = nextOwner->priority.load();
|
||||||
|
priority = std::min(ownerPriority, nextWaiter->priority.load());
|
||||||
|
} while (ownerPriority != priority && nextOwner->priority.compare_exchange_strong(ownerPriority, priority));
|
||||||
|
|
||||||
|
__atomic_store_n(mutex, nextOwner->handle | HandleWaitMask, __ATOMIC_SEQ_CST);
|
||||||
|
} else {
|
||||||
|
__atomic_store_n(mutex, nextOwner->handle, __ATOMIC_SEQ_CST);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, schedule the next owner accordingly
|
||||||
|
state.scheduler->InsertThread(nextOwner, false);
|
||||||
|
} else {
|
||||||
|
__atomic_store_n(mutex, 0, __ATOMIC_SEQ_CST);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mtxDesired) {
|
|
||||||
auto status{(*mtxWaiters.begin())};
|
|
||||||
status->flag = true;
|
|
||||||
lock.unlock();
|
|
||||||
while (status->flag);
|
|
||||||
lock.lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KProcess::ConditionalVariableWait(void *conditional, u32 *mutex, u64 timeout) {
|
bool KProcess::ConditionalVariableWait(void *conditional, u32 *mutex, u64 timeout) {
|
||||||
|
/* FIXME
|
||||||
std::unique_lock lock(conditionalLock);
|
std::unique_lock lock(conditionalLock);
|
||||||
|
|
||||||
auto &condWaiters{conditionals[reinterpret_cast<u64>(conditional)]};
|
auto &condWaiters{conditionals[reinterpret_cast<u64>(conditional)]};
|
||||||
@ -216,9 +276,12 @@ namespace skyline::kernel::type {
|
|||||||
lock.unlock();
|
lock.unlock();
|
||||||
|
|
||||||
return !timedOut;
|
return !timedOut;
|
||||||
|
*/
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KProcess::ConditionalVariableSignal(void *conditional, u64 amount) {
|
void KProcess::ConditionalVariableSignal(void *conditional, u64 amount) {
|
||||||
|
/* FIXME
|
||||||
std::unique_lock condLock(conditionalLock);
|
std::unique_lock condLock(conditionalLock);
|
||||||
|
|
||||||
auto &condWaiters{conditionals[reinterpret_cast<u64>(conditional)]};
|
auto &condWaiters{conditionals[reinterpret_cast<u64>(conditional)]};
|
||||||
@ -282,5 +345,6 @@ namespace skyline::kernel::type {
|
|||||||
while (thread->flag);
|
while (thread->flag);
|
||||||
condLock.lock();
|
condLock.lock();
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,6 @@ namespace skyline {
|
|||||||
constexpr u16 TlsSlotSize{0x200}; //!< The size of a single TLS slot
|
constexpr u16 TlsSlotSize{0x200}; //!< The size of a single TLS slot
|
||||||
constexpr u8 TlsSlots{PAGE_SIZE / TlsSlotSize}; //!< The amount of TLS slots in a single page
|
constexpr u8 TlsSlots{PAGE_SIZE / TlsSlotSize}; //!< The amount of TLS slots in a single page
|
||||||
constexpr KHandle BaseHandleIndex{0xD000}; //!< The index of the base handle
|
constexpr KHandle BaseHandleIndex{0xD000}; //!< The index of the base handle
|
||||||
constexpr u32 MtxOwnerMask{0xBFFFFFFF}; //!< The mask of values which contain the owner of a mutex
|
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace kernel::type {
|
namespace kernel::type {
|
||||||
@ -27,22 +26,6 @@ namespace skyline {
|
|||||||
MemoryManager memory;
|
MemoryManager memory;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct WaitStatus {
|
|
||||||
std::atomic_bool flag{false};
|
|
||||||
u8 priority;
|
|
||||||
KHandle handle;
|
|
||||||
u32 *mutex{};
|
|
||||||
|
|
||||||
WaitStatus(u8 priority, KHandle handle);
|
|
||||||
|
|
||||||
WaitStatus(u8 priority, KHandle handle, u32 *mutex);
|
|
||||||
};
|
|
||||||
|
|
||||||
std::unordered_map<u64, std::vector<std::shared_ptr<WaitStatus>>> mutexes; //!< A map from a mutex's address to a vector of Mutex objects for threads waiting on it
|
|
||||||
std::unordered_map<u64, std::list<std::shared_ptr<WaitStatus>>> conditionals; //!< A map from a conditional variable's address to a vector of threads waiting on it
|
|
||||||
std::mutex mutexLock;
|
|
||||||
std::mutex conditionalLock;
|
|
||||||
|
|
||||||
std::mutex threadMutex; //!< Synchronizes thread creation to prevent a race between thread creation and thread killing
|
std::mutex threadMutex; //!< Synchronizes thread creation to prevent a race between thread creation and thread killing
|
||||||
bool disableThreadCreation{}; //!< If to disable thread creation, we use this to prevent thread creation after all threads have been killed
|
bool disableThreadCreation{}; //!< If to disable thread creation, we use this to prevent thread creation after all threads have been killed
|
||||||
std::vector<std::shared_ptr<KThread>> threads;
|
std::vector<std::shared_ptr<KThread>> threads;
|
||||||
@ -208,17 +191,15 @@ namespace skyline {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Locks the Mutex at the specified address
|
* @brief Locks the mutex at the specified address
|
||||||
* @param owner The handle of the current mutex owner
|
* @param ownerHandle The psuedo-handle of the current mutex owner
|
||||||
* @return If the mutex was successfully locked
|
|
||||||
*/
|
*/
|
||||||
bool MutexLock(u32 *mutex, KHandle owner);
|
Result MutexLock(u32 *mutex, KHandle ownerHandle);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Unlocks the Mutex at the specified address
|
* @brief Unlocks the mutex at the specified address
|
||||||
* @return If the mutex was successfully unlocked
|
|
||||||
*/
|
*/
|
||||||
bool MutexUnlock(u32 *mutex);
|
void MutexUnlock(u32 *mutex);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param timeout The amount of time to wait for the conditional variable
|
* @param timeout The amount of time to wait for the conditional variable
|
||||||
|
@ -58,7 +58,7 @@ namespace skyline::kernel::type {
|
|||||||
signal::SetSignalHandler({SIGINT, SIGILL, SIGTRAP, SIGBUS, SIGFPE, SIGSEGV}, nce::NCE::SignalHandler);
|
signal::SetSignalHandler({SIGINT, SIGILL, SIGTRAP, SIGBUS, SIGFPE, SIGSEGV}, nce::NCE::SignalHandler);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
state.scheduler->InsertThread();
|
state.scheduler->InsertThread(state.thread);
|
||||||
state.scheduler->WaitSchedule();
|
state.scheduler->WaitSchedule();
|
||||||
|
|
||||||
asm volatile(
|
asm volatile(
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <list>
|
||||||
#include <csetjmp>
|
#include <csetjmp>
|
||||||
#include <nce/guest.h>
|
#include <nce/guest.h>
|
||||||
#include <kernel/scheduler.h>
|
#include <kernel/scheduler.h>
|
||||||
@ -38,16 +39,21 @@ namespace skyline {
|
|||||||
u64 entryArgument;
|
u64 entryArgument;
|
||||||
void *stackTop;
|
void *stackTop;
|
||||||
|
|
||||||
|
std::atomic<u8> basePriority; //!< The priority of the thread for the scheduler without any priority-inheritance
|
||||||
std::atomic<u8> priority; //!< The priority of the thread for the scheduler
|
std::atomic<u8> priority; //!< The priority of the thread for the scheduler
|
||||||
i8 idealCore; //!< The ideal CPU core for this thread to run on
|
i8 idealCore; //!< The ideal CPU core for this thread to run on
|
||||||
i8 coreId; //!< The CPU core on which this thread is running
|
i8 coreId; //!< The CPU core on which this thread is running
|
||||||
CoreMask affinityMask{}; //!< A mask of CPU cores this thread is allowed to run on
|
CoreMask affinityMask{}; //!< A mask of CPU cores this thread is allowed to run on
|
||||||
|
std::mutex coreMigrationMutex; //!< Synchronizes operations which depend on which core the thread is running on
|
||||||
u64 timesliceStart{}; //!< Start of the scheduler timeslice
|
u64 timesliceStart{}; //!< Start of the scheduler timeslice
|
||||||
u64 averageTimeslice{}; //!< A weighted average of the timeslice duration for this thread
|
u64 averageTimeslice{}; //!< A weighted average of the timeslice duration for this thread
|
||||||
std::optional<timer_t> preemptionTimer{}; //!< A kernel timer used for preemption interrupts
|
std::optional<timer_t> preemptionTimer{}; //!< A kernel timer used for preemption interrupts
|
||||||
bool isPreempted{}; //!< If the preemption timer has been armed and will fire
|
bool isPreempted{}; //!< If the preemption timer has been armed and will fire
|
||||||
bool needsReorder{}; //!< If the thread needs to reorder itself during scheduler rotation
|
bool needsReorder{}; //!< If the thread needs to reorder itself during scheduler rotation
|
||||||
std::mutex coreMigrationMutex; //!< Synchronizes operations which depend on which core the thread is running on
|
std::mutex waiterMutex; //!< Synchronizes operations on mutation of the waiter members
|
||||||
|
u32* waitKey; //!< The key on which this thread is waiting on
|
||||||
|
std::shared_ptr<KThread> waitThread; //!< The thread which this thread is waiting on
|
||||||
|
std::list<std::shared_ptr<type::KThread>> waiters; //!< A queue of threads waiting on this thread sorted by priority
|
||||||
|
|
||||||
KThread(const DeviceState &state, KHandle handle, KProcess *parent, size_t id, void *entry, u64 argument, void *stackTop, u8 priority, i8 idealCore);
|
KThread(const DeviceState &state, KHandle handle, KProcess *parent, size_t id, void *entry, u64 argument, void *stackTop, u8 priority, i8 idealCore);
|
||||||
|
|
||||||
@ -69,6 +75,13 @@ namespace skyline {
|
|||||||
* @brief Sends a host OS signal to the thread which is running this KThread
|
* @brief Sends a host OS signal to the thread which is running this KThread
|
||||||
*/
|
*/
|
||||||
void SendSignal(int signal);
|
void SendSignal(int signal);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return If the supplied priority value is higher than the current thread
|
||||||
|
*/
|
||||||
|
static constexpr bool IsHigherPriority(const i8 priority, const std::shared_ptr<type::KThread> &it) {
|
||||||
|
return priority < it->priority;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user