2020-11-20 19:27:53 +01:00
// SPDX-License-Identifier: MPL-2.0
// Copyright © 2020 Skyline Team and Contributors (https://github.com/skyline-emu/)
# pragma once
# include <common.h>
2020-12-05 18:41:52 +01:00
# include <condition_variable>
2020-11-20 19:27:53 +01:00
namespace skyline {
namespace constant {
2020-12-08 11:43:54 +01:00
constexpr u8 CoreCount { 4 } ; //!< The amount of cores an HOS process can be scheduled onto (User applications can only be on the first 3 cores, the last one is reserved for the system)
2020-12-22 14:38:37 +01:00
constexpr u8 ParkedCoreId { CoreCount } ; //!< // An invalid core ID, representing that a thread has been parked
2020-11-20 19:27:53 +01:00
}
namespace kernel {
using CoreMask = std : : bitset < constant : : CoreCount > ;
2020-11-21 16:25:57 +01:00
/**
* @ brief Priority on HOS determines scheduling behavior relative to other threads
* @ note Lower priority values result in a higher priority , similar to niceness on Linux
*/
2020-11-20 19:27:53 +01:00
struct Priority {
2020-11-21 16:25:57 +01:00
u8 min ; //!< Numerically lowest priority, highest scheduler priority
u8 max ; //!< Numerically highest priority, lowest scheduler priority
2020-11-20 19:27:53 +01:00
2020-11-21 16:25:57 +01:00
/**
* @ return A bitmask with each bit corresponding to if scheduler priority with the same index is valid
*/
2020-11-20 19:27:53 +01:00
constexpr u64 Mask ( ) const {
2020-11-21 16:25:57 +01:00
return ( std : : numeric_limits < u64 > : : max ( ) > > ( ( std : : numeric_limits < u64 > : : digits - 1 + min ) - max ) ) < < min ;
2020-11-20 19:27:53 +01:00
}
2021-01-11 20:41:21 +01:00
constexpr bool Valid ( u8 value ) const {
2020-11-20 19:27:53 +01:00
return ( value > = min ) & & ( value < = max ) ;
}
} ;
2020-12-05 18:41:52 +01:00
/*
* @ brief The Scheduler is responsible for determining which threads should run on which virtual cores and when they should be scheduled
* @ note We tend to stray a lot from HOS in our scheduler design as we ' ve designed it around our 1 host thread per guest thread which leads to scheduling from the perspective of threads while the HOS scheduler deals with scheduling from the perspective of cores , not doing this would lead to missing out on key optimizations and serialization of scheduling
*/
class Scheduler {
private :
const DeviceState & state ;
struct CoreContext {
u8 id ;
2020-12-08 11:43:54 +01:00
u8 preemptionPriority ; //!< The priority at which this core becomes preemptive as opposed to cooperative
2021-01-15 22:15:06 +01:00
std : : mutex mutex ; //!< Synchronizes all operations on the queue
2020-12-22 14:38:37 +01:00
std : : list < std : : shared_ptr < type : : KThread > > queue ; //!< A queue of threads which are running or to be run on this core
2020-12-05 18:41:52 +01:00
2020-12-08 11:43:54 +01:00
CoreContext ( u8 id , u8 preemptionPriority ) ;
2020-12-05 18:41:52 +01:00
} ;
2020-12-08 11:43:54 +01:00
std : : array < CoreContext , constant : : CoreCount > cores { CoreContext ( 0 , 59 ) , CoreContext ( 1 , 59 ) , CoreContext ( 2 , 59 ) , CoreContext ( 3 , 63 ) } ;
2020-12-05 18:41:52 +01:00
2020-12-22 14:38:37 +01:00
std : : mutex parkedMutex ; //!< Synchronizes all operations on the queue of parked threads
std : : list < std : : shared_ptr < type : : KThread > > parkedQueue ; //!< A queue of threads which are parked and waiting on core migration
2020-12-05 18:41:52 +01:00
public :
2020-12-08 11:43:54 +01:00
static constexpr std : : chrono : : milliseconds PreemptiveTimeslice { 10 } ; //!< The duration of time a preemptive thread can run before yielding
inline static int YieldSignal { SIGRTMIN } ; //!< The signal used to cause a yield in running threads
inline static thread_local bool YieldPending { } ; //!< A flag denoting if a yield is pending on this thread, it's checked at SVC exit
2020-12-05 18:41:52 +01:00
Scheduler ( const DeviceState & state ) ;
2020-12-08 11:43:54 +01:00
/**
* @ brief A signal handler designed to cause a non - cooperative yield for preemption and higher priority threads being inserted
*/
static void SignalHandler ( int signal , siginfo * info , ucontext * ctx , void * * tls ) ;
2020-12-05 18:41:52 +01:00
/**
2020-12-20 15:09:36 +01:00
* @ brief Checks all cores and migrates the specified thread to the core where the calling thread should be scheduled the earliest
2020-12-22 14:38:37 +01:00
* @ param alwaysInsert If to insert the thread even if it hasn ' t migrated cores , this is used during thread creation
2020-12-05 18:41:52 +01:00
* @ return A reference to the CoreContext of the core which the calling thread is running on after load balancing
2020-12-22 14:38:37 +01:00
* @ note This inserts the thread into the migrated process ' s queue after load balancing , there is no need to call it redundantly
2021-01-11 20:41:21 +01:00
* @ note alwaysInsert makes the assumption that the thread isn ' t inserted in any core ' s queue currently
2020-12-05 18:41:52 +01:00
*/
2020-12-22 14:38:37 +01:00
CoreContext & LoadBalance ( const std : : shared_ptr < type : : KThread > & thread , bool alwaysInsert = false ) ;
2020-12-05 18:41:52 +01:00
/**
2020-12-15 19:54:08 +01:00
* @ brief Inserts the specified thread into the scheduler queue at the appropriate location based on it ' s priority
2020-12-05 18:41:52 +01:00
*/
2020-12-20 15:09:36 +01:00
void InsertThread ( const std : : shared_ptr < type : : KThread > & thread ) ;
2020-12-05 18:41:52 +01:00
/**
* @ brief Wait for the current thread to be scheduled on it ' s resident core
2020-12-20 15:09:36 +01:00
* @ param loadBalance If the thread is appropriate for load balancing then if to load balance it occassionally or not
2020-12-05 18:41:52 +01:00
* @ note There is an assumption of the thread being on it ' s resident core queue , if it ' s not this ' ll never return
*/
2020-12-20 15:09:36 +01:00
void WaitSchedule ( bool loadBalance = true ) ;
/**
* @ brief Wait for the current thread to be scheduled on it ' s resident core or for the timeout to expire
* @ return If the thread has been scheduled ( true ) or if the timer expired before it could be ( false )
* @ note This will never load balance as it uses the timeout itself as a result this shouldn ' t be used as a replacement for regular waits
*/
bool TimedWaitSchedule ( std : : chrono : : nanoseconds timeout ) ;
2020-12-05 18:41:52 +01:00
/**
* @ brief Rotates the calling thread ' s resident core queue , if it is at the front of it
2020-12-08 11:43:54 +01:00
* @ param cooperative If this was triggered by a cooperative yield as opposed to a preemptive one
2020-12-05 18:41:52 +01:00
*/
2020-12-08 11:43:54 +01:00
void Rotate ( bool cooperative = true ) ;
2020-12-05 18:41:52 +01:00
2021-02-18 15:13:29 +01:00
/**
* @ brief Removes the calling thread from it ' s resident core queue
*/
void RemoveThread ( ) ;
2020-12-10 20:36:00 +01:00
/**
* @ brief Updates the placement of the supplied thread in it ' s resident core ' s queue according to it ' s new priority
*/
void UpdatePriority ( const std : : shared_ptr < type : : KThread > & thread ) ;
2021-02-18 15:13:29 +01:00
/**
* @ brief Updates the core that the supplied thread is resident to according to it ' s new affinity mask and ideal core
* @ note This supports changing the core of a thread which is currently running
*/
void UpdateCore ( const std : : shared_ptr < type : : KThread > & thread ) ;
2020-12-22 14:38:37 +01:00
/**
* @ brief Parks the calling thread after removing it from it ' s resident core ' s queue and inserts it on the core it ' s been awoken on
* @ note This will not handle waiting for the thread to be scheduled , this should be followed with a call to WaitSchedule / TimedWaitSchedule
*/
void ParkThread ( ) ;
/**
* @ brief Wakes a single parked thread which may be appropriate for running next on this core
* @ note We will only wake a thread if it is determined to be a better pick than the thread which would be run on this core next
*/
void WakeParkedThread ( ) ;
2020-12-05 18:41:52 +01:00
} ;
2020-12-22 14:38:37 +01:00
/**
* @ brief A lock which removes the calling thread from it ' s resident core ' s scheduler queue and adds it back when being destroyed
* @ note It also blocks till the thread has been rescheduled in it ' s destructor , this behavior might not be preferable in some cases
* @ note This is not an analogue to KScopedSchedulerLock on HOS , it is for handling thread state changes which we handle with Scheduler : : YieldPending
*/
struct SchedulerScopedLock {
private :
const DeviceState & state ;
public :
inline SchedulerScopedLock ( const DeviceState & state ) : state ( state ) {
state . scheduler - > RemoveThread ( ) ;
}
inline ~ SchedulerScopedLock ( ) {
state . scheduler - > InsertThread ( state . thread ) ;
state . scheduler - > WaitSchedule ( ) ;
}
} ;
2020-11-20 19:27:53 +01:00
}
}