2020-04-19 23:04:05 +02:00
// SPDX-License-Identifier: MPL-2.0
2020-03-27 20:36:02 +01:00
// Copyright © 2020 Skyline Team and Contributors (https://github.com/skyline-emu/)
2020-01-21 08:16:57 +01:00
# include "memory.h"
# include "types/KProcess.h"
namespace skyline : : kernel {
2020-10-04 18:40:52 +02:00
MemoryManager : : MemoryManager ( const DeviceState & state ) : state ( state ) { }
2020-01-21 08:16:57 +01:00
2020-11-03 10:44:09 +01:00
MemoryManager : : ~ MemoryManager ( ) {
if ( base . address & & base . size )
munmap ( reinterpret_cast < void * > ( base . address ) , base . size ) ;
}
2020-10-15 10:10:13 +02:00
constexpr size_t RegionAlignment { 1ULL < < 21 } ; //!< The minimum alignment of a HOS memory region
2020-10-07 17:41:13 +02:00
void MemoryManager : : InitializeVmm ( memory : : AddressSpaceType type ) {
2020-02-05 19:37:45 +01:00
switch ( type ) {
2020-01-21 08:16:57 +01:00
case memory : : AddressSpaceType : : AddressSpace32Bit :
2020-10-21 19:09:35 +02:00
case memory : : AddressSpaceType : : AddressSpace32BitNoReserved :
2020-01-21 08:16:57 +01:00
throw exception ( " 32-bit address spaces are not supported " ) ;
2020-03-25 19:57:05 +01:00
2020-01-21 08:16:57 +01:00
case memory : : AddressSpaceType : : AddressSpace36Bit : {
2020-07-05 22:21:08 +02:00
addressSpace . address = 0 ;
addressSpace . size = 1UL < < 36 ;
2020-10-20 11:22:15 +02:00
base . size = 0x78000000 + 0x180000000 + 0x78000000 + 0x180000000 ;
throw exception ( " 36-bit address spaces are not supported " ) ; // Due to VMM base being forced at 0x800000 and it being used by ART
2020-10-07 17:41:13 +02:00
}
case memory : : AddressSpaceType : : AddressSpace39Bit : {
addressSpace . address = 0 ;
addressSpace . size = 1UL < < 39 ;
2020-10-15 10:10:13 +02:00
base . size = 0x78000000 + 0x1000000000 + 0x180000000 + 0x80000000 + 0x1000000000 ; // Code region size is an assumed maximum here
2020-10-07 17:41:13 +02:00
break ;
}
default :
throw exception ( " VMM initialization with unknown address space " ) ;
}
2020-10-28 17:00:39 +01:00
// Search for a suitable carveout in host AS to fit the guest AS inside of
2020-10-13 22:43:52 +02:00
std : : ifstream mapsFile ( " /proc/self/maps " ) ;
std : : string maps ( ( std : : istreambuf_iterator < char > ( mapsFile ) ) , std : : istreambuf_iterator < char > ( ) ) ;
2020-10-28 17:00:39 +01:00
size_t line { } , start { 1ULL < < 35 } , alignedStart { 1ULL < < 35 } ; // 1 << 35 is where QC KGSL (Kernel Graphic Support Layer) maps down from, we skip over this or KGSL goes OOM
2020-10-13 22:43:52 +02:00
do {
auto end { util : : HexStringToInt < u64 > ( std : : string_view ( maps . data ( ) + line , sizeof ( u64 ) * 2 ) ) } ;
2020-10-28 17:00:39 +01:00
if ( end < start )
continue ;
2020-10-13 22:43:52 +02:00
if ( end - start > base . size + ( alignedStart - start ) ) { // We don't want to overflow if alignedStart > start
base . address = alignedStart ;
break ;
}
start = util : : HexStringToInt < u64 > ( std : : string_view ( maps . data ( ) + maps . find_first_of ( ' - ' , line ) + 1 , sizeof ( u64 ) * 2 ) ) ;
2020-10-15 10:10:13 +02:00
alignedStart = util : : AlignUp ( start , RegionAlignment ) ;
2020-10-13 22:43:52 +02:00
if ( alignedStart + base . size > addressSpace . size )
break ;
} while ( ( line = maps . find_first_of ( ' \n ' , line ) ) ! = std : : string : : npos & & line + + ) ;
if ( ! base . address )
throw exception ( " Cannot find a suitable carveout for the guest address space " ) ;
2020-10-15 10:10:13 +02:00
mmap ( reinterpret_cast < void * > ( base . address ) , base . size , PROT_NONE , MAP_ANONYMOUS | MAP_PRIVATE , - 1 , 0 ) ;
2020-10-13 22:43:52 +02:00
2020-10-28 17:00:39 +01:00
chunks = {
ChunkDescriptor {
. ptr = reinterpret_cast < u8 * > ( addressSpace . address ) ,
. size = base . address - addressSpace . address ,
. state = memory : : states : : Reserved ,
} ,
ChunkDescriptor {
. ptr = reinterpret_cast < u8 * > ( base . address ) ,
. size = base . size ,
. state = memory : : states : : Unmapped ,
} ,
ChunkDescriptor {
. ptr = reinterpret_cast < u8 * > ( base . address + base . size ) ,
. size = addressSpace . size - ( base . address + base . size ) ,
. state = memory : : states : : Reserved ,
} } ;
2020-10-07 17:41:13 +02:00
}
void MemoryManager : : InitializeRegions ( u8 * codeStart , u64 size ) {
u64 address { reinterpret_cast < u64 > ( codeStart ) } ;
2020-10-15 10:10:13 +02:00
if ( ! util : : IsAligned ( address , RegionAlignment ) )
throw exception ( " Non-aligned code region was used to initialize regions: 0x{:X} - 0x{:X} " , codeStart , codeStart + size ) ;
2020-10-07 17:41:13 +02:00
switch ( addressSpace . size ) {
case 1UL < < 36 : {
2020-10-20 11:22:15 +02:00
code . address = 0x800000 ;
2020-01-21 08:16:57 +01:00
code . size = 0x78000000 ;
2020-02-05 19:37:45 +01:00
if ( code . address > address | | ( code . size - ( address - code . address ) ) < size )
2020-01-21 08:16:57 +01:00
throw exception ( " Code mapping larger than 36-bit code region " ) ;
alias . address = code . address + code . size ;
alias . size = 0x180000000 ;
2020-10-20 11:22:15 +02:00
stack . address = alias . address + alias . size ;
stack . size = 0x78000000 ;
tlsIo = stack ; //!< TLS/IO is shared with Stack on 36-bit
heap . address = stack . address + stack . size ;
2020-01-21 08:16:57 +01:00
heap . size = 0x180000000 ;
break ;
}
2020-03-25 19:57:05 +01:00
2020-10-07 17:41:13 +02:00
case 1UL < < 39 : {
2020-10-13 22:43:52 +02:00
code . address = base . address ;
2020-10-15 10:10:13 +02:00
code . size = util : : AlignUp ( size , RegionAlignment ) ;
2020-01-21 08:16:57 +01:00
alias . address = code . address + code . size ;
alias . size = 0x1000000000 ;
heap . address = alias . address + alias . size ;
heap . size = 0x180000000 ;
stack . address = heap . address + heap . size ;
stack . size = 0x80000000 ;
tlsIo . address = stack . address + stack . size ;
tlsIo . size = 0x1000000000 ;
break ;
}
2020-03-25 19:57:05 +01:00
2020-10-07 17:41:13 +02:00
default :
throw exception ( " Regions initialized without VMM initialization " ) ;
}
2020-10-04 18:40:52 +02:00
2020-10-20 11:22:15 +02:00
auto newSize { code . size + alias . size + stack . size + heap . size + ( ( addressSpace . size = = 1UL < < 39 ) ? tlsIo . size : 0 ) } ;
2020-10-15 10:10:13 +02:00
if ( newSize > base . size )
throw exception ( " Region size has exceeded pre-allocated area: 0x{:X}/0x{:X} " , newSize , base . size ) ;
if ( newSize ! = base . size )
munmap ( reinterpret_cast < u8 * > ( base . address ) + base . size , newSize - base . size ) ;
2020-10-13 22:43:52 +02:00
if ( size > code . size )
throw exception ( " Code region ({}) is smaller than mapped code size ( { } ) " , code.size, size);
state . logger - > Debug ( " Region Map: \n VMM Base: 0x{:X} \n Code Region: 0x{:X} - 0x{:X} (Size: 0x{:X}) \n Alias Region: 0x{:X} - 0x{:X} (Size: 0x{:X}) \n Heap Region: 0x{:X} - 0x{:X} (Size: 0x{:X}) \n Stack Region: 0x{:X} - 0x{:X} (Size: 0x{:X}) \n TLS/IO Region: 0x{:X} - 0x{:X} (Size: 0x{:X}) " , base . address , code . address , code . address + code . size , code . size , alias . address , alias . address + alias . size , alias . size , heap . address , heap
2020-02-05 19:37:45 +01:00
. address + heap . size , heap . size , stack . address , stack . address + stack . size , stack . size , tlsIo . address , tlsIo . address + tlsIo . size , tlsIo . size ) ;
2020-01-21 08:16:57 +01:00
}
2020-10-04 18:40:52 +02:00
void MemoryManager : : InsertChunk ( const ChunkDescriptor & chunk ) {
std : : unique_lock lock ( mutex ) ;
2020-07-05 22:21:08 +02:00
2020-10-04 18:40:52 +02:00
auto upper { std : : upper_bound ( chunks . begin ( ) , chunks . end ( ) , chunk . ptr , [ ] ( const u8 * ptr , const ChunkDescriptor & chunk ) - > bool { return ptr < chunk . ptr ; } ) } ;
if ( upper = = chunks . begin ( ) )
2020-10-10 19:40:12 +02:00
throw exception ( " InsertChunk: Chunk inserted outside address space: 0x{:X} - 0x{:X} and 0x{:X} - 0x{:X} " , upper - > ptr , upper - > ptr + upper - > size , chunk . ptr , chunk . ptr + chunk . size ) ;
2020-07-05 22:21:08 +02:00
2020-10-21 19:09:35 +02:00
upper = chunks . erase ( upper , std : : upper_bound ( upper , chunks . end ( ) , chunk . ptr + chunk . size , [ ] ( const u8 * ptr , const ChunkDescriptor & chunk ) - > bool { return ptr < chunk . ptr + chunk . size ; } ) ) ;
2020-10-04 18:40:52 +02:00
if ( upper ! = chunks . end ( ) & & upper - > ptr < chunk . ptr + chunk . size ) {
auto end { upper - > ptr + upper - > size } ;
upper - > ptr = chunk . ptr + chunk . size ;
upper - > size = end - upper - > ptr ;
}
2020-07-05 22:21:08 +02:00
2020-10-04 18:40:52 +02:00
auto lower { std : : prev ( upper ) } ;
if ( lower - > ptr = = chunk . ptr & & lower - > size = = chunk . size ) {
lower - > state = chunk . state ;
lower - > permission = chunk . permission ;
lower - > attributes = chunk . attributes ;
2020-10-10 19:40:12 +02:00
} else if ( lower - > ptr + lower - > size > chunk . ptr + chunk . size ) {
auto lowerExtension { * lower } ;
lowerExtension . ptr = chunk . ptr + chunk . size ;
lowerExtension . size = ( lower - > ptr + lower - > size ) - lowerExtension . ptr ;
lower - > size = chunk . ptr - lower - > ptr ;
if ( lower - > size ) {
upper = chunks . insert ( upper , lowerExtension ) ;
chunks . insert ( upper , chunk ) ;
} else {
2020-10-17 13:38:27 +02:00
auto lower2 { std : : prev ( lower ) } ;
if ( chunk . IsCompatible ( * lower2 ) & & lower2 - > ptr + lower2 - > size > = chunk . ptr ) {
lower2 - > size = chunk . ptr + chunk . size - lower2 - > ptr ;
upper = chunks . erase ( lower ) ;
} else {
* lower = chunk ;
}
upper = chunks . insert ( upper , lowerExtension ) ;
2020-10-10 19:40:12 +02:00
}
2020-10-17 13:38:27 +02:00
} else if ( chunk . IsCompatible ( * lower ) & & lower - > ptr + lower - > size > = chunk . ptr ) {
lower - > size = chunk . ptr + chunk . size - lower - > ptr ;
2020-10-04 18:40:52 +02:00
} else {
if ( lower - > ptr + lower - > size > chunk . ptr )
lower - > size = chunk . ptr - lower - > ptr ;
2020-10-17 13:38:27 +02:00
if ( upper ! = chunks . end ( ) & & chunk . IsCompatible ( * upper ) & & chunk . ptr + chunk . size > = upper - > ptr ) {
2020-10-04 18:40:52 +02:00
upper - > ptr = chunk . ptr ;
upper - > size = chunk . size + upper - > size ;
2020-07-05 22:21:08 +02:00
} else {
2020-10-04 18:40:52 +02:00
chunks . insert ( upper , chunk ) ;
2020-07-05 22:21:08 +02:00
}
2020-10-04 18:40:52 +02:00
}
}
2020-07-05 22:21:08 +02:00
2020-10-07 17:41:13 +02:00
std : : optional < ChunkDescriptor > MemoryManager : : Get ( void * ptr ) {
2020-10-04 18:40:52 +02:00
std : : shared_lock lock ( mutex ) ;
2020-07-05 22:21:08 +02:00
2020-10-04 18:40:52 +02:00
auto chunk { std : : upper_bound ( chunks . begin ( ) , chunks . end ( ) , reinterpret_cast < u8 * > ( ptr ) , [ ] ( const u8 * ptr , const ChunkDescriptor & chunk ) - > bool { return ptr < chunk . ptr ; } ) } ;
if ( chunk - - ! = chunks . begin ( ) )
if ( ( chunk - > ptr + chunk - > size ) > ptr )
return std : : make_optional ( * chunk ) ;
2020-07-05 22:21:08 +02:00
2020-01-21 08:16:57 +01:00
return std : : nullopt ;
}
2020-10-17 13:38:27 +02:00
size_t MemoryManager : : GetMemoryUsage ( ) {
2020-10-20 11:22:15 +02:00
std : : shared_lock lock ( mutex ) ;
2020-09-26 07:17:57 +02:00
size_t size { } ;
2020-09-28 12:05:17 +02:00
for ( const auto & chunk : chunks )
2020-10-10 17:53:14 +02:00
if ( chunk . state ! = memory : : states : : Unmapped )
size + = chunk . size ;
2020-01-21 08:16:57 +01:00
return size ;
}
2020-10-20 11:22:15 +02:00
size_t MemoryManager : : GetKMemoryBlockSize ( ) {
std : : shared_lock lock ( mutex ) ;
constexpr size_t KMemoryBlockSize { 0x40 } ;
return util : : AlignUp ( chunks . size ( ) * KMemoryBlockSize , PAGE_SIZE ) ;
}
2020-01-21 08:16:57 +01:00
}