2020-04-19 23:04:05 +02:00
// SPDX-License-Identifier: MPL-2.0
2020-03-27 20:36:02 +01:00
// Copyright © 2020 Skyline Team and Contributors (https://github.com/skyline-emu/)
2020-03-26 15:33:19 +01:00
# include <nce.h>
# include <os.h>
2020-12-15 19:54:08 +01:00
# include <kernel/results.h>
2020-03-26 15:33:19 +01:00
# include "KProcess.h"
2019-09-24 22:54:27 +02:00
namespace skyline : : kernel : : type {
2020-10-07 17:41:13 +02:00
KProcess : : TlsPage : : TlsPage ( const std : : shared_ptr < KPrivateMemory > & memory ) : memory ( memory ) { }
2020-03-25 19:57:05 +01:00
2020-10-07 17:41:13 +02:00
u8 * KProcess : : TlsPage : : ReserveSlot ( ) {
2021-01-11 20:17:06 +01:00
if ( index = = constant : : TlsSlots )
return nullptr ;
return memory - > ptr + ( constant : : TlsSlotSize * index + + ) ;
2020-10-07 17:41:13 +02:00
}
KProcess : : KProcess ( const DeviceState & state ) : memory ( state ) , KSyncObject ( state , KType : : KProcess ) { }
2020-11-17 01:48:41 +01:00
KProcess : : ~ KProcess ( ) {
std : : lock_guard guard ( threadMutex ) ;
disableThreadCreation = true ;
for ( const auto & thread : threads )
thread - > Kill ( true ) ;
}
void KProcess : : Kill ( bool join , bool all , bool disableCreation ) {
std : : lock_guard guard ( threadMutex ) ;
if ( disableCreation )
disableThreadCreation = true ;
if ( all ) {
for ( const auto & thread : threads )
thread - > Kill ( join ) ;
} else {
std : : shared_ptr < KThread > thread ;
try {
thread = threads . at ( 0 ) ;
} catch ( const std : : out_of_range & ) {
return ;
}
thread - > Kill ( join ) ;
}
}
2021-01-11 20:17:06 +01:00
void KProcess : : InitializeHeapTls ( ) {
2020-10-07 17:41:13 +02:00
constexpr size_t DefaultHeapSize { 0x200000 } ;
2021-01-22 04:02:01 +01:00
heap = std : : make_shared < KPrivateMemory > ( state , reinterpret_cast < u8 * > ( state . process - > memory . heap . address ) , DefaultHeapSize , memory : : Permission { true , true , false } , memory : : states : : Heap ) ;
2020-10-13 17:41:49 +02:00
InsertItem ( heap ) ; // Insert it into the handle table so GetMemoryObject will contain it
2021-01-11 20:17:06 +01:00
tlsExceptionContext = AllocateTlsSlot ( ) ;
2019-09-24 22:54:27 +02:00
}
2020-10-07 17:41:13 +02:00
u8 * KProcess : : AllocateTlsSlot ( ) {
2021-01-11 20:17:06 +01:00
std : : lock_guard lock ( tlsMutex ) ;
u8 * slot ;
2020-01-21 08:16:57 +01:00
for ( auto & tlsPage : tlsPages )
2021-01-11 20:17:06 +01:00
if ( ( slot = tlsPage - > ReserveSlot ( ) ) )
return slot ;
2020-03-25 19:57:05 +01:00
2021-01-11 20:17:06 +01:00
slot = tlsPages . empty ( ) ? reinterpret_cast < u8 * > ( memory . tlsIo . address ) : ( ( * ( tlsPages . end ( ) - 1 ) ) - > memory - > ptr + PAGE_SIZE ) ;
auto tlsPage { std : : make_shared < TlsPage > ( std : : make_shared < KPrivateMemory > ( state , slot , PAGE_SIZE , memory : : Permission ( true , true , false ) , memory : : states : : ThreadLocal ) ) } ;
2020-10-07 17:41:13 +02:00
tlsPages . push_back ( tlsPage ) ;
2019-09-24 22:54:27 +02:00
return tlsPage - > ReserveSlot ( ) ;
}
2020-12-10 20:36:00 +01:00
std : : shared_ptr < KThread > KProcess : : CreateThread ( void * entry , u64 argument , void * stackTop , std : : optional < u8 > priority , std : : optional < u8 > idealCore ) {
2020-11-17 01:48:41 +01:00
std : : lock_guard guard ( threadMutex ) ;
if ( disableThreadCreation )
return nullptr ;
if ( ! stackTop & & threads . empty ( ) ) { //!< Main thread stack is created by the kernel and owned by the process
2021-01-22 04:02:01 +01:00
mainThreadStack = std : : make_shared < KPrivateMemory > ( state , reinterpret_cast < u8 * > ( state . process - > memory . stack . address ) , state . process - > npdm . meta . mainThreadStackSize , memory : : Permission { true , true , false } , memory : : states : : Stack ) ;
2020-10-17 13:38:27 +02:00
if ( mprotect ( mainThreadStack - > ptr , PAGE_SIZE , PROT_NONE ) )
throw exception ( " Failed to create guard page for thread stack at 0x{:X} " , mainThreadStack - > ptr ) ;
stackTop = mainThreadStack - > ptr + mainThreadStack - > size ;
}
2020-12-10 20:36:00 +01:00
auto thread { NewHandle < KThread > ( this , threads . size ( ) , entry , argument , stackTop , priority ? * priority : state . process - > npdm . meta . mainThreadPriority , idealCore ? * idealCore : state . process - > npdm . meta . idealCore ) . item } ;
2020-11-17 01:48:41 +01:00
threads . push_back ( thread ) ;
2020-10-04 18:40:52 +02:00
return thread ;
2020-02-03 03:12:24 +01:00
}
2020-10-07 17:41:13 +02:00
std : : optional < KProcess : : HandleOut < KMemory > > KProcess : : GetMemoryObject ( u8 * ptr ) {
std : : shared_lock lock ( handleMutex ) ;
2020-09-25 16:35:10 +02:00
for ( KHandle index { } ; index < handles . size ( ) ; index + + ) {
2020-10-07 17:41:13 +02:00
auto & object { handles [ index ] } ;
2020-10-13 17:41:49 +02:00
if ( object ) {
switch ( object - > objectType ) {
case type : : KType : : KPrivateMemory :
case type : : KType : : KSharedMemory :
case type : : KType : : KTransferMemory : {
auto mem { std : : static_pointer_cast < type : : KMemory > ( object ) } ;
if ( mem - > IsInside ( ptr ) )
return std : : make_optional < KProcess : : HandleOut < KMemory > > ( { mem , constant : : BaseHandleIndex + index } ) ;
}
default :
break ;
2020-01-21 08:16:57 +01:00
}
}
}
2020-02-01 16:51:32 +01:00
return std : : nullopt ;
}
2021-03-04 14:30:14 +01:00
constexpr u32 HandleWaitersBit { 1UL < < 30 } ; //!< A bit which denotes if a mutex psuedo-handle has waiters or not
2020-03-25 19:57:05 +01:00
2020-12-20 15:09:36 +01:00
Result KProcess : : MutexLock ( u32 * mutex , KHandle ownerHandle , KHandle tag ) {
2020-12-15 19:54:08 +01:00
std : : shared_ptr < KThread > owner ;
try {
2020-12-20 15:09:36 +01:00
owner = GetHandle < KThread > ( ownerHandle ) ;
2020-12-15 19:54:08 +01:00
} catch ( const std : : out_of_range & ) {
return result : : InvalidHandle ;
2020-04-02 08:39:24 +02:00
}
2020-12-15 19:54:08 +01:00
bool isHighestPriority ;
{
std : : lock_guard lock ( owner - > waiterMutex ) ;
2020-03-25 19:57:05 +01:00
2020-12-15 19:54:08 +01:00
u32 value { } ;
2020-12-20 15:09:36 +01:00
if ( __atomic_compare_exchange_n ( mutex , & value , tag , false , __ATOMIC_SEQ_CST , __ATOMIC_SEQ_CST ) )
2020-12-15 19:54:08 +01:00
// We try to do a CAS to get ownership of the mutex in the case that it's unoccupied
return { } ;
2020-12-20 15:09:36 +01:00
if ( value ! = ( ownerHandle | HandleWaitersBit ) )
2020-12-15 19:54:08 +01:00
// We ensure that the mutex's value is the handle with the waiter bit set
return result : : InvalidCurrentMemory ;
2020-03-25 19:57:05 +01:00
2020-12-15 19:54:08 +01:00
auto & waiters { owner - > waiters } ;
isHighestPriority = waiters . insert ( std : : upper_bound ( waiters . begin ( ) , waiters . end ( ) , state . thread - > priority . load ( ) , KThread : : IsHigherPriority ) , state . thread ) = = waiters . begin ( ) ;
state . scheduler - > RemoveThread ( ) ;
2021-03-04 14:30:14 +01:00
state . thread - > waitThread = owner ;
2020-12-15 19:54:08 +01:00
state . thread - > waitKey = mutex ;
2020-12-20 15:09:36 +01:00
state . thread - > waitTag = tag ;
2020-02-01 16:51:32 +01:00
}
2020-03-25 19:57:05 +01:00
2020-12-15 19:54:08 +01:00
if ( isHighestPriority ) {
// If we were the highest priority thread then we need to inherit priorities for all threads we're waiting on recursively
do {
u8 priority , ownerPriority ;
do {
// Try to CAS the priority of the owner with the current thread
2021-01-11 20:41:21 +01:00
// If the new priority is equivalent to the current priority then we don't need to CAS
2020-12-15 19:54:08 +01:00
ownerPriority = owner - > priority . load ( ) ;
priority = std : : min ( ownerPriority , state . thread - > priority . load ( ) ) ;
} while ( ownerPriority ! = priority & & owner - > priority . compare_exchange_strong ( ownerPriority , priority ) ) ;
if ( ownerPriority ! = priority ) {
2021-03-04 14:30:14 +01:00
std : : shared_ptr < KThread > waitThread ;
{
2020-12-15 19:54:08 +01:00
std : : lock_guard lock ( waitThread - > waiterMutex ) ;
2021-03-04 14:30:14 +01:00
waitThread = owner - > waitThread ;
2020-12-15 19:54:08 +01:00
// We need to update the location of the owner thread in the waiter queue of the thread it's waiting on
auto & waiters { waitThread - > waiters } ;
waiters . erase ( std : : find ( waiters . begin ( ) , waiters . end ( ) , waitThread ) ) ;
waiters . insert ( std : : upper_bound ( waiters . begin ( ) , waiters . end ( ) , state . thread - > priority . load ( ) , KThread : : IsHigherPriority ) , owner ) ;
break ;
}
2020-03-25 19:57:05 +01:00
2020-12-15 19:54:08 +01:00
state . scheduler - > UpdatePriority ( owner ) ;
owner = waitThread ;
} else {
break ;
}
} while ( owner ) ;
2020-02-15 10:38:17 +01:00
}
2020-03-25 19:57:05 +01:00
2020-12-20 15:09:36 +01:00
state . scheduler - > WaitSchedule ( false ) ;
2020-02-01 16:51:32 +01:00
2020-12-15 19:54:08 +01:00
return { } ;
}
2020-03-25 19:57:05 +01:00
2020-12-15 19:54:08 +01:00
void KProcess : : MutexUnlock ( u32 * mutex ) {
std : : lock_guard lock ( state . thread - > waiterMutex ) ;
auto & waiters { state . thread - > waiters } ;
auto nextOwnerIt { std : : find_if ( waiters . begin ( ) , waiters . end ( ) , [ mutex ] ( const std : : shared_ptr < KThread > & thread ) { return thread - > waitKey = = mutex ; } ) } ;
if ( nextOwnerIt ! = waiters . end ( ) ) {
auto nextOwner { * nextOwnerIt } ;
std : : lock_guard nextLock ( nextOwner - > waiterMutex ) ;
2021-03-04 14:30:14 +01:00
nextOwner - > waitThread = std : : shared_ptr < KThread > { nullptr } ;
2020-12-15 19:54:08 +01:00
nextOwner - > waitKey = nullptr ;
// Move all threads waiting on this key to the next owner's waiter list
std : : shared_ptr < KThread > nextWaiter { } ;
2020-12-20 15:09:36 +01:00
for ( auto it { waiters . erase ( nextOwnerIt ) } , nextIt { std : : next ( it ) } ; it ! = waiters . end ( ) ; it = nextIt + + ) {
auto thread { * it } ;
if ( thread - > waitKey = = mutex ) {
2020-12-15 19:54:08 +01:00
nextOwner - > waiters . splice ( std : : upper_bound ( nextOwner - > waiters . begin ( ) , nextOwner - > waiters . end ( ) , ( * it ) - > priority . load ( ) , KThread : : IsHigherPriority ) , waiters , it ) ;
2021-03-04 14:30:14 +01:00
thread - > waitThread = nextOwner ;
2020-12-15 19:54:08 +01:00
if ( ! nextWaiter )
2020-12-20 15:09:36 +01:00
nextWaiter = thread ;
2020-12-15 19:54:08 +01:00
}
}
2020-03-25 19:57:05 +01:00
2020-12-15 19:54:08 +01:00
if ( ! waiters . empty ( ) ) {
// If there are threads still waiting on us then try to inherit their priority
2021-01-11 20:41:21 +01:00
auto highestPriorityThread { waiters . front ( ) } ;
u8 newPriority , basePriority ;
2020-12-15 19:54:08 +01:00
do {
2021-01-11 20:41:21 +01:00
basePriority = state . thread - > basePriority . load ( ) ;
newPriority = std : : min ( basePriority , highestPriorityThread - > priority . load ( ) ) ;
} while ( basePriority ! = newPriority & & state . thread - > priority . compare_exchange_strong ( basePriority , newPriority ) ) ;
2020-12-15 19:54:08 +01:00
state . scheduler - > UpdatePriority ( state . thread ) ;
2021-01-11 20:41:21 +01:00
} else {
u8 priority , basePriority ;
do {
basePriority = state . thread - > basePriority . load ( ) ;
priority = state . thread - > priority . load ( ) ;
} while ( priority ! = basePriority & & ! state . thread - > priority . compare_exchange_strong ( priority , basePriority ) ) ;
if ( priority ! = basePriority )
state . scheduler - > UpdatePriority ( state . thread ) ;
2020-12-15 19:54:08 +01:00
}
2020-03-25 19:57:05 +01:00
2020-12-15 19:54:08 +01:00
if ( nextWaiter ) {
2021-03-04 14:30:14 +01:00
// If there is a waiter on the new owner then try to inherit its priority
2020-12-15 19:54:08 +01:00
u8 priority , ownerPriority ;
do {
ownerPriority = nextOwner - > priority . load ( ) ;
priority = std : : min ( ownerPriority , nextWaiter - > priority . load ( ) ) ;
} while ( ownerPriority ! = priority & & nextOwner - > priority . compare_exchange_strong ( ownerPriority , priority ) ) ;
2020-12-20 15:09:36 +01:00
__atomic_store_n ( mutex , nextOwner - > waitTag | HandleWaitersBit , __ATOMIC_SEQ_CST ) ;
2020-12-15 19:54:08 +01:00
} else {
2020-12-20 15:09:36 +01:00
__atomic_store_n ( mutex , nextOwner - > waitTag , __ATOMIC_SEQ_CST ) ;
2020-12-15 19:54:08 +01:00
}
2020-03-25 19:57:05 +01:00
2020-12-15 19:54:08 +01:00
// Finally, schedule the next owner accordingly
2020-12-20 15:09:36 +01:00
state . scheduler - > InsertThread ( nextOwner ) ;
2020-12-15 19:54:08 +01:00
} else {
__atomic_store_n ( mutex , 0 , __ATOMIC_SEQ_CST ) ;
2020-02-05 07:42:53 +01:00
}
2020-12-20 15:09:36 +01:00
}
2020-03-25 19:57:05 +01:00
2020-12-20 15:09:36 +01:00
Result KProcess : : ConditionalVariableWait ( u32 * key , u32 * mutex , KHandle tag , i64 timeout ) {
{
std : : lock_guard lock ( syncWaiterMutex ) ;
auto queue { syncWaiters . equal_range ( key ) } ;
2020-12-22 14:38:37 +01:00
syncWaiters . insert ( std : : upper_bound ( queue . first , queue . second , state . thread - > priority . load ( ) , [ ] ( const i8 priority , const SyncWaiters : : value_type & it ) { return it . second - > priority > priority ; } ) , { key , state . thread } ) ;
2020-03-25 19:57:05 +01:00
2020-12-20 15:09:36 +01:00
__atomic_store_n ( key , true , __ATOMIC_SEQ_CST ) ; // We need to notify any userspace threads that there are waiters on this conditional variable by writing back a boolean flag denoting it
2020-03-25 19:57:05 +01:00
2020-12-20 15:09:36 +01:00
state . scheduler - > RemoveThread ( ) ;
2020-12-22 14:38:37 +01:00
MutexUnlock ( mutex ) ;
2020-12-20 15:09:36 +01:00
}
2020-04-02 08:39:24 +02:00
2020-12-22 14:38:37 +01:00
if ( timeout > 0 & & ! state . scheduler - > TimedWaitSchedule ( std : : chrono : : nanoseconds ( timeout ) ) ) {
std : : unique_lock lock ( syncWaiterMutex ) ;
2020-12-20 15:09:36 +01:00
auto queue { syncWaiters . equal_range ( key ) } ;
2020-12-22 14:38:37 +01:00
auto iterator { std : : find ( queue . first , queue . second , SyncWaiters : : value_type { key , state . thread } ) } ;
if ( iterator ! = queue . second )
if ( syncWaiters . erase ( iterator ) = = queue . second )
__atomic_store_n ( key , false , __ATOMIC_SEQ_CST ) ;
2021-02-21 19:40:18 +01:00
2020-12-22 14:38:37 +01:00
lock . unlock ( ) ;
state . scheduler - > InsertThread ( state . thread ) ;
state . scheduler - > WaitSchedule ( ) ;
2021-02-21 19:40:18 +01:00
2020-12-20 15:09:36 +01:00
return result : : TimedOut ;
2020-12-22 14:38:37 +01:00
} else {
state . scheduler - > WaitSchedule ( false ) ;
2020-02-15 10:38:17 +01:00
}
2020-03-25 19:57:05 +01:00
2021-03-02 10:49:13 +01:00
KHandle value { } ;
if ( ! __atomic_compare_exchange_n ( mutex , & value , tag , false , __ATOMIC_SEQ_CST , __ATOMIC_SEQ_CST ) )
while ( MutexLock ( mutex , value & ~ HandleWaitersBit , tag ) ! = Result { } )
if ( ( value = __atomic_or_fetch ( mutex , HandleWaitersBit , __ATOMIC_SEQ_CST ) ) = = HandleWaitersBit )
if ( __atomic_compare_exchange_n ( mutex , & value , tag , false , __ATOMIC_SEQ_CST , __ATOMIC_SEQ_CST ) )
break ;
return { } ;
2020-02-01 16:51:32 +01:00
}
2021-02-21 19:40:18 +01:00
void KProcess : : ConditionalVariableSignal ( u32 * key , i32 amount ) {
2020-12-22 14:38:37 +01:00
std : : lock_guard lock ( syncWaiterMutex ) ;
2020-12-20 15:09:36 +01:00
auto queue { syncWaiters . equal_range ( key ) } ;
2021-02-21 19:40:18 +01:00
2020-12-20 15:09:36 +01:00
auto it { queue . first } ;
2021-03-04 14:30:14 +01:00
for ( i32 waiterCount { amount } ; it ! = queue . second & & ( amount < = 0 | | waiterCount ) ; it = syncWaiters . erase ( it ) , waiterCount - - )
state . scheduler - > InsertThread ( it - > second ) ;
2021-02-21 19:40:18 +01:00
2020-12-20 15:09:36 +01:00
if ( it = = queue . second )
2020-12-22 14:38:37 +01:00
__atomic_store_n ( key , false , __ATOMIC_SEQ_CST ) ; // We need to update the boolean flag denoting that there are no more threads waiting on this conditional variable
Framebuffer and NativeActivity
What was added:
* Framebuffer
* NativeActivity
* NV Services
* IOCTL Handler
* NV Devices:
* * /dev/nvmap - 0xC0080101, 0xC0080103, 0xC0200104, 0xC0180105, 0xC00C0109, 0xC008010E
* * /dev/nvhost-as-gpu
* * /dev/nvhost-channel - 0x40044801, 0xC0104809, 0xC010480B, 0xC018480C, 0x4004480D, 0xC020481A, 0x40084714
* * /dev/nvhost-ctrl
* * /dev/nvhost-ctrl-gpu - 0x80044701, 0x80284702, 0xC0184706, 0xC0B04705, 0x80084714
* SVCs:
* * SetMemoryAttribute
* * CreateTransferMemory
* * ResetSignal
* * GetSystemTick
* Addition of Compact Logger
What was fixed:
* SVCs:
* * SetHeapSize
* * SetMemoryAttribute
* * QueryMemory
* A release build would not set CMAKE_BUILD_TYPE to "RELEASE"
* The logger code was simplified
2019-11-13 21:09:31 +01:00
}
2021-02-21 19:08:13 +01:00
Result KProcess : : WaitForAddress ( u32 * address , u32 value , i64 timeout , bool ( * arbitrationFunction ) ( u32 * , u32 ) ) {
{
std : : lock_guard lock ( syncWaiterMutex ) ;
if ( ! arbitrationFunction ( address , value ) ) [[unlikely]]
return result : : InvalidState ;
auto queue { syncWaiters . equal_range ( address ) } ;
syncWaiters . insert ( std : : upper_bound ( queue . first , queue . second , state . thread - > priority . load ( ) , [ ] ( const i8 priority , const SyncWaiters : : value_type & it ) { return it . second - > priority > priority ; } ) , { address , state . thread } ) ;
state . scheduler - > RemoveThread ( ) ;
}
if ( timeout > 0 & & ! state . scheduler - > TimedWaitSchedule ( std : : chrono : : nanoseconds ( timeout ) ) ) {
2021-03-04 14:30:14 +01:00
{
std : : lock_guard lock ( syncWaiterMutex ) ;
auto queue { syncWaiters . equal_range ( address ) } ;
auto iterator { std : : find ( queue . first , queue . second , SyncWaiters : : value_type { address , state . thread } ) } ;
if ( iterator ! = queue . second )
if ( syncWaiters . erase ( iterator ) = = queue . second )
__atomic_store_n ( address , false , __ATOMIC_SEQ_CST ) ;
}
2021-02-21 19:08:13 +01:00
state . scheduler - > InsertThread ( state . thread ) ;
state . scheduler - > WaitSchedule ( ) ;
return result : : TimedOut ;
} else {
state . scheduler - > WaitSchedule ( false ) ;
}
return { } ;
}
Result KProcess : : SignalToAddress ( u32 * address , u32 value , i32 amount , bool ( * mutateFunction ) ( u32 * address , u32 value , u32 waiterCount ) ) {
std : : lock_guard lock ( syncWaiterMutex ) ;
auto queue { syncWaiters . equal_range ( address ) } ;
if ( mutateFunction )
if ( ! mutateFunction ( address , value , ( amount < = 0 ) ? 0 : std : : min ( static_cast < u32 > ( std : : distance ( queue . first , queue . second ) - amount ) , 0U ) ) ) [[unlikely]]
return result : : InvalidState ;
i32 waiterCount { amount } ;
2021-03-02 10:49:13 +01:00
for ( auto it { queue . first } ; it ! = queue . second & & ( amount < = 0 | | waiterCount ) ; it = syncWaiters . erase ( it ) , waiterCount - - )
state . scheduler - > InsertThread ( it - > second ) ;
2021-02-21 19:08:13 +01:00
return { } ;
}
2019-09-24 22:54:27 +02:00
}