2020-11-20 19:27:53 +01:00
// SPDX-License-Identifier: MPL-2.0
// Copyright © 2020 Skyline Team and Contributors (https://github.com/skyline-emu/)
# pragma once
# include <common.h>
2020-12-05 18:41:52 +01:00
# include <condition_variable>
2020-11-20 19:27:53 +01:00
namespace skyline {
namespace constant {
2020-12-08 11:43:54 +01:00
constexpr u8 CoreCount { 4 } ; //!< The amount of cores an HOS process can be scheduled onto (User applications can only be on the first 3 cores, the last one is reserved for the system)
2020-12-22 14:38:37 +01:00
constexpr u8 ParkedCoreId { CoreCount } ; //!< // An invalid core ID, representing that a thread has been parked
2020-11-20 19:27:53 +01:00
}
namespace kernel {
using CoreMask = std : : bitset < constant : : CoreCount > ;
2020-11-21 16:25:57 +01:00
/**
* @ brief Priority on HOS determines scheduling behavior relative to other threads
* @ note Lower priority values result in a higher priority , similar to niceness on Linux
*/
2020-11-20 19:27:53 +01:00
struct Priority {
2020-11-21 16:25:57 +01:00
u8 min ; //!< Numerically lowest priority, highest scheduler priority
u8 max ; //!< Numerically highest priority, lowest scheduler priority
2020-11-20 19:27:53 +01:00
2020-11-21 16:25:57 +01:00
/**
* @ return A bitmask with each bit corresponding to if scheduler priority with the same index is valid
*/
2020-11-20 19:27:53 +01:00
constexpr u64 Mask ( ) const {
2020-11-21 16:25:57 +01:00
return ( std : : numeric_limits < u64 > : : max ( ) > > ( ( std : : numeric_limits < u64 > : : digits - 1 + min ) - max ) ) < < min ;
2020-11-20 19:27:53 +01:00
}
2021-01-11 20:41:21 +01:00
constexpr bool Valid ( u8 value ) const {
2020-11-20 19:27:53 +01:00
return ( value > = min ) & & ( value < = max ) ;
}
} ;
2020-12-05 18:41:52 +01:00
2021-03-04 14:30:14 +01:00
/**
2020-12-05 18:41:52 +01:00
* @ brief The Scheduler is responsible for determining which threads should run on which virtual cores and when they should be scheduled
* @ note We tend to stray a lot from HOS in our scheduler design as we ' ve designed it around our 1 host thread per guest thread which leads to scheduling from the perspective of threads while the HOS scheduler deals with scheduling from the perspective of cores , not doing this would lead to missing out on key optimizations and serialization of scheduling
*/
class Scheduler {
private :
const DeviceState & state ;
struct CoreContext {
u8 id ;
2020-12-08 11:43:54 +01:00
u8 preemptionPriority ; //!< The priority at which this core becomes preemptive as opposed to cooperative
2021-01-15 22:15:06 +01:00
std : : mutex mutex ; //!< Synchronizes all operations on the queue
2020-12-22 14:38:37 +01:00
std : : list < std : : shared_ptr < type : : KThread > > queue ; //!< A queue of threads which are running or to be run on this core
2020-12-05 18:41:52 +01:00
2020-12-08 11:43:54 +01:00
CoreContext ( u8 id , u8 preemptionPriority ) ;
2020-12-05 18:41:52 +01:00
} ;
2020-12-08 11:43:54 +01:00
std : : array < CoreContext , constant : : CoreCount > cores { CoreContext ( 0 , 59 ) , CoreContext ( 1 , 59 ) , CoreContext ( 2 , 59 ) , CoreContext ( 3 , 63 ) } ;
2020-12-05 18:41:52 +01:00
2020-12-22 14:38:37 +01:00
std : : mutex parkedMutex ; //!< Synchronizes all operations on the queue of parked threads
std : : list < std : : shared_ptr < type : : KThread > > parkedQueue ; //!< A queue of threads which are parked and waiting on core migration
2021-03-02 10:49:13 +01:00
/**
2021-03-04 14:30:14 +01:00
* @ brief Migrate a thread from its resident core to its ideal core
* @ note ' KThread : : coreMigrationMutex ' * * must * * be locked by the calling thread prior to calling this
* @ note This is used to handle non - cooperative core affinity mask changes where the resident core is not in its new affinity mask
2021-03-02 10:49:13 +01:00
*/
2021-03-04 14:30:14 +01:00
void MigrateToCore ( const std : : shared_ptr < type : : KThread > & thread , CoreContext * & currentCore , CoreContext * targetCore , std : : unique_lock < std : : mutex > & lock ) ;
2021-03-02 10:49:13 +01:00
2020-12-05 18:41:52 +01:00
public :
2020-12-08 11:43:54 +01:00
static constexpr std : : chrono : : milliseconds PreemptiveTimeslice { 10 } ; //!< The duration of time a preemptive thread can run before yielding
inline static int YieldSignal { SIGRTMIN } ; //!< The signal used to cause a yield in running threads
inline static thread_local bool YieldPending { } ; //!< A flag denoting if a yield is pending on this thread, it's checked at SVC exit
2020-12-05 18:41:52 +01:00
Scheduler ( const DeviceState & state ) ;
2020-12-08 11:43:54 +01:00
/**
* @ brief A signal handler designed to cause a non - cooperative yield for preemption and higher priority threads being inserted
*/
static void SignalHandler ( int signal , siginfo * info , ucontext * ctx , void * * tls ) ;
2020-12-05 18:41:52 +01:00
/**
2021-03-04 14:30:14 +01:00
* @ brief Checks all cores and determines the core where the supplied thread should be scheduled the earliest
* @ note ' KThread : : coreMigrationMutex ' * * must * * be locked by the calling thread prior to calling this
* @ note No core mutexes should be held by the calling thread , that will cause a recursive lock and lead to a deadlock
* @ return A reference to the CoreContext of the optimal core
2020-12-05 18:41:52 +01:00
*/
2021-03-04 14:30:14 +01:00
CoreContext & GetOptimalCoreForThread ( const std : : shared_ptr < type : : KThread > & thread ) ;
2020-12-05 18:41:52 +01:00
/**
2021-03-04 14:30:14 +01:00
* @ brief Inserts the specified thread into the scheduler queue at the appropriate location based on its priority
2020-12-05 18:41:52 +01:00
*/
2021-03-04 14:30:14 +01:00
void InsertThread ( const std : : shared_ptr < type : : KThread > & thread ) ;
2020-12-05 18:41:52 +01:00
/**
2021-03-04 14:30:14 +01:00
* @ brief Wait for the calling thread to be scheduled on its resident core
2020-12-20 15:09:36 +01:00
* @ param loadBalance If the thread is appropriate for load balancing then if to load balance it occassionally or not
2021-03-04 14:30:14 +01:00
* @ note There is an assumption of the thread being on its resident core queue , if it ' s not this ' ll never return
2020-12-05 18:41:52 +01:00
*/
2020-12-20 15:09:36 +01:00
void WaitSchedule ( bool loadBalance = true ) ;
/**
2021-03-04 14:30:14 +01:00
* @ brief Wait for the calling thread to be scheduled on its resident core or for the timeout to expire
2020-12-20 15:09:36 +01:00
* @ return If the thread has been scheduled ( true ) or if the timer expired before it could be ( false )
* @ note This will never load balance as it uses the timeout itself as a result this shouldn ' t be used as a replacement for regular waits
*/
bool TimedWaitSchedule ( std : : chrono : : nanoseconds timeout ) ;
2020-12-05 18:41:52 +01:00
/**
2021-03-04 14:30:14 +01:00
* @ brief Rotates the calling thread ' s resident core queue , if it ' s at the front of it
2020-12-08 11:43:54 +01:00
* @ param cooperative If this was triggered by a cooperative yield as opposed to a preemptive one
2020-12-05 18:41:52 +01:00
*/
2020-12-08 11:43:54 +01:00
void Rotate ( bool cooperative = true ) ;
2020-12-05 18:41:52 +01:00
2021-02-18 15:13:29 +01:00
/**
2021-03-04 14:30:14 +01:00
* @ brief Removes the calling thread from its resident core queue
2021-02-18 15:13:29 +01:00
*/
void RemoveThread ( ) ;
2020-12-10 20:36:00 +01:00
/**
2021-03-04 14:30:14 +01:00
* @ brief Updates the placement of the supplied thread in its resident core ' s queue according to its new priority
2020-12-10 20:36:00 +01:00
*/
2021-03-04 14:30:14 +01:00
void UpdatePriority ( const std : : shared_ptr < type : : KThread > & thread ) ;
2020-12-10 20:36:00 +01:00
2021-02-18 15:13:29 +01:00
/**
2021-03-04 14:30:14 +01:00
* @ brief Updates the core that the supplied thread is resident to according to its new affinity mask and ideal core
2021-02-18 15:13:29 +01:00
* @ note This supports changing the core of a thread which is currently running
*/
2021-03-04 14:30:14 +01:00
void UpdateCore ( const std : : shared_ptr < type : : KThread > & thread ) ;
2021-02-18 15:13:29 +01:00
2020-12-22 14:38:37 +01:00
/**
2021-03-04 14:30:14 +01:00
* @ brief Parks the calling thread after removing it from its resident core ' s queue and inserts it on the core it ' s been awoken on
2020-12-22 14:38:37 +01:00
* @ note This will not handle waiting for the thread to be scheduled , this should be followed with a call to WaitSchedule / TimedWaitSchedule
*/
void ParkThread ( ) ;
/**
* @ brief Wakes a single parked thread which may be appropriate for running next on this core
2021-03-04 14:30:14 +01:00
* @ note We will only wake a thread if it ' s determined to be a better pick than the thread which would be run on this core next
2020-12-22 14:38:37 +01:00
*/
void WakeParkedThread ( ) ;
2020-12-05 18:41:52 +01:00
} ;
2020-12-22 14:38:37 +01:00
/**
2021-03-04 14:30:14 +01:00
* @ brief A lock which removes the calling thread from its resident core ' s scheduler queue and adds it back when being destroyed
* @ note It also blocks till the thread has been rescheduled in its destructor , this behavior might not be preferable in some cases
* @ note This is not an analogue to KScopedSchedulerLock on HOS , it ' s for handling thread state changes which we handle with Scheduler : : YieldPending
2020-12-22 14:38:37 +01:00
*/
struct SchedulerScopedLock {
private :
2021-03-04 14:30:14 +01:00
const DeviceState & state ;
2020-12-22 14:38:37 +01:00
public :
2021-03-04 14:30:14 +01:00
inline SchedulerScopedLock ( const DeviceState & state ) : state ( state ) {
2020-12-22 14:38:37 +01:00
state . scheduler - > RemoveThread ( ) ;
}
inline ~ SchedulerScopedLock ( ) {
state . scheduler - > InsertThread ( state . thread ) ;
state . scheduler - > WaitSchedule ( ) ;
}
} ;
2020-11-20 19:27:53 +01:00
}
}