2020-05-29 19:25:05 +02:00
# include "memory_mapping.h"
# include <coreinit/cache.h>
# include <coreinit/memexpheap.h>
2022-02-02 18:34:27 +01:00
# include <coreinit/memorymap.h>
2020-05-29 19:25:05 +02:00
# include <coreinit/thread.h>
# include "CThread.h"
2022-02-02 18:34:27 +01:00
# include "logger.h"
2022-02-11 20:32:39 +01:00
# include <coreinit/mutex.h>
2021-09-24 16:51:11 +02:00
# include <cstring>
2022-02-02 18:34:27 +01:00
# include <vector>
2020-05-29 19:25:05 +02:00
// #define DEBUG_FUNCTION_LINE(x,...)
2024-05-03 22:22:55 +02:00
//OSMutex allocMutex;
2022-02-11 20:32:39 +01:00
2020-05-29 19:25:05 +02:00
void runOnAllCores ( CThread : : Callback callback , void * callbackArg , int32_t iAttr = 0 , int32_t iPriority = 16 , int32_t iStackSize = 0x8000 ) {
2020-06-03 18:36:02 +02:00
int32_t aff [ ] = { CThread : : eAttributeAffCore2 , CThread : : eAttributeAffCore1 , CThread : : eAttributeAffCore0 } ;
2020-05-29 19:25:05 +02:00
2022-02-02 18:34:27 +01:00
for ( int i : aff ) {
2022-01-21 19:52:35 +01:00
CThread thread ( iAttr | i , iPriority , iStackSize , callback , callbackArg ) ;
thread . resumeThread ( ) ;
2020-05-29 19:25:05 +02:00
}
}
2024-04-19 18:24:51 +02:00
void KernelWriteU32 ( uint32_t addr , uint32_t value ) {
ICInvalidateRange ( & value , 4 ) ;
DCFlushRange ( & value , 4 ) ;
auto dst = ( uint32_t ) OSEffectiveToPhysical ( addr ) ;
auto src = ( uint32_t ) OSEffectiveToPhysical ( ( uint32_t ) & value ) ;
KernelCopyData ( dst , src , 4 ) ;
DCFlushRange ( ( void * ) addr , 4 ) ;
ICInvalidateRange ( ( void * ) addr , 4 ) ;
}
void KernelWrite ( uint32_t addr , const void * data , uint32_t length ) {
// This is a hacky workaround, but currently it only works this way. ("data" is always on the stack, so maybe a problem with mapping values from the JIT area?)
// further testing required.
for ( uint32_t i = 0 ; i < length ; i + = 4 ) {
KernelWriteU32 ( addr + i , * ( uint32_t * ) ( ( ( uint32_t ) data ) + i ) ) ;
}
}
/*
static void SCSetupIBAT4DBAT5 ( ) {
asm volatile ( " sync; eieio; isync " ) ;
// Give our and the kernel full execution rights.
// 00800000-01000000 => 30800000-31000000 (read/write, user/supervisor)
unsigned int ibat4u = 0x008000FF ;
unsigned int ibat4l = 0x30800012 ;
asm volatile ( " mtspr 560, %0 " : : " r " ( ibat4u ) ) ;
asm volatile ( " mtspr 561, %0 " : : " r " ( ibat4l ) ) ;
// Give our and the kernel full data access rights.
// 00800000-01000000 => 30800000-31000000 (read/write, user/supervisor)
unsigned int dbat5u = ibat4u ;
unsigned int dbat5l = ibat4l ;
asm volatile ( " mtspr 570, %0 " : : " r " ( dbat5u ) ) ;
asm volatile ( " mtspr 571, %0 " : : " r " ( dbat5l ) ) ;
asm volatile ( " eieio; isync " ) ;
}
*/
const uint32_t sSCSetupIBAT4DBAT5Buffer [ ] = { 0x7c0004ac ,
0x7c0006ac ,
0x4c00012c ,
0x3d400080 ,
0x614a00ff ,
0x7d508ba6 ,
0x3d203080 ,
0x61290012 ,
0x7d318ba6 ,
0x7d5a8ba6 ,
0x7d3b8ba6 ,
0x7c0006ac ,
0x4c00012c ,
0x4e800020 } ;
# define TARGET_ADDRESS_EXECUTABLE_MEM 0x017FF000
# define SCSetupIBAT4DBAT5_ADDRESS TARGET_ADDRESS_EXECUTABLE_MEM
const uint32_t sSC0x51Buffer [ ] = {
0x7c7082a6 , // mfspr r3, 528
0x60630003 , // ori r3, r3, 0x03
0x7c7083a6 , // mtspr 528, r3
0x7c7282a6 , // mfspr r3, 530
0x60630003 , // ori r3, r3, 0x03
0x7c7283a6 , // mtspr 530, r3
0x7c0006ac , // eieio
0x4c00012c , // isync
0x3c600000 | ( SCSetupIBAT4DBAT5_ADDRESS > > 16 ) , // lis r3, SCSetupIBAT4DBAT5@h
0x60630000 | ( SCSetupIBAT4DBAT5_ADDRESS & 0xFFFF ) , // ori r3, r3, SCSetupIBAT4DBAT5@l
0x7c6903a6 , // mtctr r3
0x4e800420 , // bctr
} ;
# define SC0x51Buffer_ADDRESS (SCSetupIBAT4DBAT5_ADDRESS + sizeof(sSCSetupIBAT4DBAT5Buffer))
# define SC0x51Call_ADDRESS (SC0x51Buffer_ADDRESS + sizeof(sSC0x51Buffer))
const uint32_t sSC0x51CallBuffer [ ] = {
0x38005100 , //li %r0, 0x5100
0x44000002 , // sc
0x4e800020 //blr
} ;
void SetupIBAT4DBAT5OnAllCores ( ) {
unsigned char backupBuffer [ 0x74 ] ;
KernelWrite ( ( uint32_t ) backupBuffer , ( void * ) TARGET_ADDRESS_EXECUTABLE_MEM , sizeof ( backupBuffer ) ) ;
static_assert ( sizeof ( backupBuffer ) > = ( sizeof ( sSC0x51Buffer ) + sizeof ( sSCSetupIBAT4DBAT5Buffer ) + sizeof ( sSC0x51CallBuffer ) ) , " Not enough memory in backup buffer " ) ;
static_assert ( SCSetupIBAT4DBAT5_ADDRESS > = TARGET_ADDRESS_EXECUTABLE_MEM & & SCSetupIBAT4DBAT5_ADDRESS < ( TARGET_ADDRESS_EXECUTABLE_MEM + sizeof ( backupBuffer ) ) , " buffer in wrong memory region " ) ;
static_assert ( SC0x51Buffer_ADDRESS > = TARGET_ADDRESS_EXECUTABLE_MEM & & SC0x51Buffer_ADDRESS < ( TARGET_ADDRESS_EXECUTABLE_MEM + sizeof ( backupBuffer ) ) , " buffer in wrong memory region " ) ;
static_assert ( SC0x51Call_ADDRESS > = TARGET_ADDRESS_EXECUTABLE_MEM & & SC0x51Call_ADDRESS < ( TARGET_ADDRESS_EXECUTABLE_MEM + sizeof ( backupBuffer ) ) , " buffer in wrong memory region " ) ;
static_assert ( SCSetupIBAT4DBAT5_ADDRESS ! = SC0x51Buffer_ADDRESS & & SCSetupIBAT4DBAT5_ADDRESS ! = SC0x51Call_ADDRESS & & SC0x51Buffer_ADDRESS ! = SC0x51Call_ADDRESS , " buffer are not different " ) ;
// We need copy the functions to a memory region which is executable on all 3 cores
KernelWrite ( SCSetupIBAT4DBAT5_ADDRESS , sSCSetupIBAT4DBAT5Buffer , sizeof ( sSCSetupIBAT4DBAT5Buffer ) ) ; // Set IBAT5 and DBAT5 to map the memory region
KernelWrite ( SC0x51Buffer_ADDRESS , sSC0x51Buffer , sizeof ( sSC0x51Buffer ) ) ; // Implementation of 0x51 syscall
KernelWrite ( SC0x51Call_ADDRESS , sSC0x51CallBuffer , sizeof ( sSC0x51CallBuffer ) ) ; // Call of 0x51 syscall
/* set our setup syscall to an unused position */
KernelPatchSyscall ( 0x51 , SCSetupIBAT4DBAT5_ADDRESS ) ;
// We want to run this on all 3 cores.
{
int32_t aff [ ] = { CThread : : eAttributeAffCore2 , CThread : : eAttributeAffCore1 , CThread : : eAttributeAffCore0 } ;
int iStackSize = 0x200 ;
//! allocate the thread and stack on the default Cafe OS heap
auto * pThread = ( OSThread * ) gMEMAllocFromDefaultHeapExForThreads ( sizeof ( OSThread ) , 0x10 ) ;
auto * pThreadStack = ( uint8_t * ) gMEMAllocFromDefaultHeapExForThreads ( iStackSize , 0x20 ) ;
//! create the thread
if ( pThread & & pThreadStack ) {
for ( int i : aff ) {
* pThread = { } ;
memset ( pThreadStack , 0 , iStackSize ) ;
OSCreateThread ( pThread , reinterpret_cast < OSThreadEntryPointFn > ( SC0x51Call_ADDRESS ) , 0 , nullptr , ( void * ) ( pThreadStack + iStackSize ) , iStackSize , 16 , ( OSThreadAttributes ) i ) ;
OSResumeThread ( pThread ) ;
while ( OSIsThreadSuspended ( pThread ) ) {
OSResumeThread ( pThread ) ;
}
OSJoinThread ( pThread , nullptr ) ;
}
}
//! free the thread stack buffer
if ( pThreadStack ) {
memset ( pThreadStack , 0 , iStackSize ) ;
gMEMFreeToDefaultHeapForThreads ( pThreadStack ) ;
}
if ( pThread ) {
memset ( pThread , 0 , sizeof ( OSThread ) ) ;
gMEMFreeToDefaultHeapForThreads ( pThread ) ;
}
}
/* repair data */
KernelWrite ( TARGET_ADDRESS_EXECUTABLE_MEM , backupBuffer , sizeof ( backupBuffer ) ) ;
DCFlushRange ( ( void * ) TARGET_ADDRESS_EXECUTABLE_MEM , sizeof ( backupBuffer ) ) ;
}
2020-06-06 22:35:48 +02:00
void writeKernelNOPs ( CThread * thread , void * arg ) {
2021-09-24 16:51:11 +02:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Writing kernel NOPs on core %d " , OSGetThreadAffinity ( OSGetCurrentThread ( ) ) / 2 ) ;
2020-06-06 22:35:48 +02:00
2022-04-29 00:35:14 +02:00
// Patch out any writes to SR
int sr = MEMORY_START_BASE > > 28 ;
KernelNOPAtPhysicalAddress ( 0xfff1d734 + 0x4 * sr ) ;
if ( sr < 7 ) {
KernelNOPAtPhysicalAddress ( 0xfff1d604 + 0x4 * sr ) ;
} else {
KernelNOPAtPhysicalAddress ( 0xfff1d648 + 0x4 * ( sr - 7 ) ) ;
}
KernelNOPAtPhysicalAddress ( 0xffe00618 + 0x4 * sr ) ;
2020-06-06 22:35:48 +02:00
2022-02-11 20:27:38 +01:00
// nop out branches to app panic 0x17
2020-06-06 22:35:48 +02:00
KernelNOPAtPhysicalAddress ( 0xfff01db0 ) ;
KernelNOPAtPhysicalAddress ( 0xfff01e90 ) ;
KernelNOPAtPhysicalAddress ( 0xfff01ea0 ) ;
KernelNOPAtPhysicalAddress ( 0xfff01ea4 ) ;
2022-02-11 20:27:38 +01:00
// nop out branches to app panic 0x12
KernelNOPAtPhysicalAddress ( 0xfff01a00 ) ;
KernelNOPAtPhysicalAddress ( 0xfff01b68 ) ;
KernelNOPAtPhysicalAddress ( 0xfff01b70 ) ;
KernelNOPAtPhysicalAddress ( 0xfff01b7c ) ;
KernelNOPAtPhysicalAddress ( 0xfff01b80 ) ;
// nop out branches to app panic 0x16
2020-06-06 22:35:48 +02:00
KernelNOPAtPhysicalAddress ( 0xfff0db24 ) ;
KernelNOPAtPhysicalAddress ( 0xfff0dbb4 ) ;
KernelNOPAtPhysicalAddress ( 0xfff0dbbc ) ;
KernelNOPAtPhysicalAddress ( 0xfff0dbc8 ) ;
KernelNOPAtPhysicalAddress ( 0xfff0dbcc ) ;
2022-02-11 20:27:38 +01:00
// nop out branches to app panic 0x14
KernelNOPAtPhysicalAddress ( 0xfff01cfc ) ;
KernelNOPAtPhysicalAddress ( 0xfff01d4c ) ;
KernelNOPAtPhysicalAddress ( 0xfff01d54 ) ;
KernelNOPAtPhysicalAddress ( 0xfff01d60 ) ;
KernelNOPAtPhysicalAddress ( 0xfff01d64 ) ;
2020-06-06 22:35:48 +02:00
}
2020-05-29 19:25:05 +02:00
void writeSegmentRegister ( CThread * thread , void * arg ) {
2021-09-24 16:51:11 +02:00
auto * table = ( sr_table_t * ) arg ;
DEBUG_FUNCTION_LINE_VERBOSE ( " Writing segment register to core %d " , OSGetThreadAffinity ( OSGetCurrentThread ( ) ) / 2 ) ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
DCFlushRange ( table , sizeof ( sr_table_t ) ) ;
2020-05-29 19:25:05 +02:00
KernelWriteSRs ( table ) ;
}
void readAndPrintSegmentRegister ( CThread * thread , void * arg ) {
2021-09-24 16:51:11 +02:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Reading segment register and page table from core %d " , OSGetThreadAffinity ( OSGetCurrentThread ( ) ) / 2 ) ;
2020-05-29 19:25:05 +02:00
sr_table_t srTable ;
2020-06-03 18:36:02 +02:00
memset ( & srTable , 0 , sizeof ( srTable ) ) ;
2020-05-29 19:25:05 +02:00
KernelReadSRs ( & srTable ) ;
2020-06-03 18:36:02 +02:00
DCFlushRange ( & srTable , sizeof ( srTable ) ) ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; i < 16 ; i + + ) {
2022-01-26 13:24:56 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " [%d] SR[%d]=%08X " , OSGetThreadAffinity ( OSGetCurrentThread ( ) ) / 2 , i , srTable . value [ i ] ) ;
2020-05-29 19:25:05 +02:00
}
uint32_t pageTable [ 0x8000 ] ;
2020-06-03 18:36:02 +02:00
memset ( pageTable , 0 , sizeof ( pageTable ) ) ;
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Reading pageTable now. " ) ;
2020-06-03 18:36:02 +02:00
KernelReadPTE ( ( uint32_t ) pageTable , sizeof ( pageTable ) ) ;
DCFlushRange ( pageTable , sizeof ( pageTable ) ) ;
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Reading pageTable done " ) ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:51:24 +02:00
MemoryMapping_printPageTableTranslation ( srTable , pageTable ) ;
2020-05-29 19:25:05 +02:00
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " ----------------------------- " ) ;
2020-05-29 19:25:05 +02:00
}
2020-06-03 18:51:24 +02:00
bool MemoryMapping_isMemoryMapped ( ) {
2020-05-29 19:25:05 +02:00
sr_table_t srTable ;
2020-06-03 18:36:02 +02:00
memset ( & srTable , 0 , sizeof ( srTable ) ) ;
2020-05-29 19:25:05 +02:00
KernelReadSRs ( & srTable ) ;
2020-06-03 18:36:02 +02:00
if ( ( srTable . value [ MEMORY_START_BASE > > 28 ] & 0x00FFFFFF ) = = SEGMENT_UNIQUE_ID ) {
2020-05-29 19:25:05 +02:00
return true ;
}
return false ;
}
2020-06-03 18:51:24 +02:00
void MemoryMapping_searchEmptyMemoryRegions ( ) {
2022-05-08 20:52:23 +02:00
# ifdef DEBUG
2020-05-29 19:25:05 +02:00
DEBUG_FUNCTION_LINE ( " Searching for empty memory. " ) ;
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-29 19:25:05 +02:00
break ;
}
uint32_t ea_start_address = mem_mapping [ i ] . effective_start_address ;
2020-06-03 18:36:02 +02:00
const memory_values_t * mem_vals = mem_mapping [ i ] . physical_addresses ;
2020-05-29 19:25:05 +02:00
uint32_t ea_size = 0 ;
2020-06-03 18:36:02 +02:00
for ( uint32_t j = 0 ; ; j + + ) {
uint32_t pa_start_address = mem_vals [ j ] . start_address ;
2022-02-03 15:51:47 +01:00
uint32_t pa_end_address = mem_vals [ j ] . end_address ;
2020-06-03 18:36:02 +02:00
if ( pa_end_address = = 0 & & pa_start_address = = 0 ) {
2020-05-29 19:25:05 +02:00
break ;
}
2020-06-03 18:36:02 +02:00
ea_size + = pa_end_address - pa_start_address ;
2020-05-29 19:25:05 +02:00
}
2022-02-03 15:51:47 +01:00
auto * flush_start = ( uint32_t * ) ea_start_address ;
2020-05-29 19:25:05 +02:00
uint32_t flush_size = ea_size ;
2020-06-03 18:36:02 +02:00
DEBUG_FUNCTION_LINE ( " Flushing %08X (%d kB) at %08X. " , flush_size , flush_size / 1024 , flush_start ) ;
DCFlushRange ( flush_start , flush_size ) ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
DEBUG_FUNCTION_LINE ( " Searching in memory region %d. 0x%08X - 0x%08X. Size 0x%08X (%d KBytes). " , i + 1 , ea_start_address , ea_start_address + ea_size , ea_size , ea_size / 1024 ) ;
2022-02-03 15:51:47 +01:00
bool success = true ;
auto * memory_ptr = ( uint32_t * ) ea_start_address ;
bool inFailRange = false ;
2020-05-29 19:25:05 +02:00
uint32_t startFailing = 0 ;
2022-02-03 15:51:47 +01:00
uint32_t startGood = ea_start_address ;
2020-06-03 18:36:02 +02:00
for ( uint32_t j = 0 ; j < ea_size / 4 ; j + + ) {
if ( memory_ptr [ j ] ! = 0 ) {
2020-05-29 19:25:05 +02:00
success = false ;
2021-09-24 16:51:11 +02:00
if ( ! inFailRange ) {
2020-06-03 18:36:02 +02:00
if ( ( ( ( uint32_t ) & memory_ptr [ j ] ) - ( uint32_t ) startGood ) / 1024 > 512 ) {
2020-05-29 19:25:05 +02:00
uint32_t start_addr = startGood & 0xFFFE0000 ;
2020-06-03 18:36:02 +02:00
if ( start_addr ! = startGood ) {
2020-05-29 19:25:05 +02:00
start_addr + = 0x20000 ;
}
2020-06-03 18:36:02 +02:00
uint32_t end_addr = ( ( uint32_t ) & memory_ptr [ j ] ) - MEMORY_START_BASE ;
2022-02-03 15:51:47 +01:00
end_addr = ( end_addr & 0xFFFE0000 ) ;
2021-09-24 16:51:11 +02:00
DEBUG_FUNCTION_LINE ( " + Free between 0x%08X and 0x%08X size: %u kB " , start_addr - MEMORY_START_BASE , end_addr ,
( ( ( uint32_t ) end_addr ) - ( ( uint32_t ) startGood - MEMORY_START_BASE ) ) / 1024 ) ;
2020-05-29 19:25:05 +02:00
}
2020-06-03 18:36:02 +02:00
startFailing = ( uint32_t ) & memory_ptr [ j ] ;
2022-02-03 15:51:47 +01:00
inFailRange = true ;
startGood = 0 ;
j = ( ( j & 0xFFFF8000 ) + 0x00008000 ) - 1 ;
2020-05-29 19:25:05 +02:00
}
//break;
} else {
2020-06-03 18:36:02 +02:00
if ( inFailRange ) {
2020-05-29 19:25:05 +02:00
//DEBUG_FUNCTION_LINE("- Error between 0x%08X and 0x%08X size: %u kB",startFailing,&memory_ptr[j],(((uint32_t)&memory_ptr[j])-(uint32_t)startFailing)/1024);
startFailing = 0 ;
2022-02-03 15:51:47 +01:00
startGood = ( uint32_t ) & memory_ptr [ j ] ;
inFailRange = false ;
2020-05-29 19:25:05 +02:00
}
}
}
2020-06-03 18:36:02 +02:00
if ( startGood ! = 0 & & ( startGood ! = ea_start_address + ea_size ) ) {
DEBUG_FUNCTION_LINE ( " + Good between 0x%08X and 0x%08X size: %u kB " , startGood - MEMORY_START_BASE , ( ( uint32_t ) ( ea_start_address + ea_size ) - ( uint32_t ) MEMORY_START_BASE ) ,
( ( uint32_t ) ( ea_start_address + ea_size ) - ( uint32_t ) startGood ) / 1024 ) ;
} else if ( inFailRange ) {
DEBUG_FUNCTION_LINE ( " - Used between 0x%08X and 0x%08X size: %u kB " , startFailing , ea_start_address + ea_size , ( ( uint32_t ) ( ea_start_address + ea_size ) - ( uint32_t ) startFailing ) / 1024 ) ;
2020-05-29 19:25:05 +02:00
}
2020-06-03 18:36:02 +02:00
if ( success ) {
DEBUG_FUNCTION_LINE ( " Test %d was successful! " , i + 1 ) ;
2020-05-29 19:25:05 +02:00
}
}
DEBUG_FUNCTION_LINE ( " All tests done. " ) ;
2022-05-08 20:52:23 +02:00
# endif
2020-05-29 19:25:05 +02:00
}
2020-06-03 18:51:24 +02:00
void MemoryMapping_writeTestValuesToMemory ( ) {
2020-05-29 19:25:05 +02:00
//don't smash the stack.
uint32_t chunk_size = 0x1000 ;
uint32_t testBuffer [ chunk_size ] ;
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-29 19:25:05 +02:00
break ;
}
uint32_t cur_ea_start_address = mem_mapping [ i ] . effective_start_address ;
2020-06-03 18:36:02 +02:00
DEBUG_FUNCTION_LINE ( " Preparing memory test for region %d. Region start at effective address %08X. " , i + 1 , cur_ea_start_address ) ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
const memory_values_t * mem_vals = mem_mapping [ i ] . physical_addresses ;
2022-02-03 15:51:47 +01:00
uint32_t counter = 0 ;
2020-06-03 18:36:02 +02:00
for ( uint32_t j = 0 ; ; j + + ) {
uint32_t pa_start_address = mem_vals [ j ] . start_address ;
2022-02-03 15:51:47 +01:00
uint32_t pa_end_address = mem_vals [ j ] . end_address ;
2020-06-03 18:36:02 +02:00
if ( pa_end_address = = 0 & & pa_start_address = = 0 ) {
2020-05-29 19:25:05 +02:00
break ;
}
2020-06-03 18:36:02 +02:00
uint32_t pa_size = pa_end_address - pa_start_address ;
DEBUG_FUNCTION_LINE ( " Writing region %d of mapping %d. From %08X to %08X Size: %d KBytes... " , j + 1 , i + 1 , pa_start_address , pa_end_address , pa_size / 1024 ) ;
for ( uint32_t k = 0 ; k < = pa_size / 4 ; k + + ) {
if ( k > 0 & & ( k % chunk_size ) = = 0 ) {
DCFlushRange ( & testBuffer , sizeof ( testBuffer ) ) ;
DCInvalidateRange ( & testBuffer , sizeof ( testBuffer ) ) ;
uint32_t destination = pa_start_address + ( ( k * 4 ) - sizeof ( testBuffer ) ) ;
KernelCopyData ( destination , ( uint32_t ) OSEffectiveToPhysical ( ( uint32_t ) testBuffer ) , sizeof ( testBuffer ) ) ;
2020-05-29 19:25:05 +02:00
//DEBUG_FUNCTION_LINE("Copy testBuffer into %08X",destination);
}
2020-06-03 18:36:02 +02:00
if ( k ! = pa_size / 4 ) {
2020-05-29 19:25:05 +02:00
testBuffer [ k % chunk_size ] = counter + + ;
}
//DEBUG_FUNCTION_LINE("testBuffer[%d] = %d",i % chunk_size,i);
}
2022-02-03 15:51:47 +01:00
auto * flush_start = ( uint32_t * ) cur_ea_start_address ;
2020-05-29 19:25:05 +02:00
uint32_t flush_size = pa_size ;
cur_ea_start_address + = pa_size ;
2020-06-03 18:36:02 +02:00
DEBUG_FUNCTION_LINE ( " Flushing %08X (%d kB) at %08X to map memory. " , flush_size , flush_size / 1024 , flush_start ) ;
DCFlushRange ( flush_start , flush_size ) ;
2020-05-29 19:25:05 +02:00
}
2020-06-03 18:36:02 +02:00
DEBUG_FUNCTION_LINE ( " Done writing region %d " , i + 1 ) ;
2020-05-29 19:25:05 +02:00
}
}
2020-06-03 18:51:24 +02:00
void MemoryMapping_readTestValuesFromMemory ( ) {
2022-05-08 20:52:23 +02:00
# ifdef DEBUG
2020-05-29 19:25:05 +02:00
DEBUG_FUNCTION_LINE ( " Testing reading the written values. " ) ;
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-29 19:25:05 +02:00
break ;
}
uint32_t ea_start_address = mem_mapping [ i ] . effective_start_address ;
2020-06-03 18:36:02 +02:00
const memory_values_t * mem_vals = mem_mapping [ i ] . physical_addresses ;
2020-05-29 19:25:05 +02:00
//uint32_t counter = 0;
uint32_t ea_size = 0 ;
2020-06-03 18:36:02 +02:00
for ( uint32_t j = 0 ; ; j + + ) {
uint32_t pa_start_address = mem_vals [ j ] . start_address ;
2022-02-03 15:51:47 +01:00
uint32_t pa_end_address = mem_vals [ j ] . end_address ;
2020-06-03 18:36:02 +02:00
if ( pa_end_address = = 0 & & pa_start_address = = 0 ) {
2020-05-29 19:25:05 +02:00
break ;
}
2020-06-03 18:36:02 +02:00
ea_size + = pa_end_address - pa_start_address ;
2020-05-29 19:25:05 +02:00
}
2022-02-03 15:51:47 +01:00
auto * flush_start = ( uint32_t * ) ea_start_address ;
2020-05-29 19:25:05 +02:00
uint32_t flush_size = ea_size ;
2020-06-03 18:36:02 +02:00
DEBUG_FUNCTION_LINE ( " Flushing %08X (%d kB) at %08X to map memory. " , flush_size , flush_size / 1024 , flush_start ) ;
DCFlushRange ( flush_start , flush_size ) ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
DEBUG_FUNCTION_LINE ( " Testing memory region %d. 0x%08X - 0x%08X. Size 0x%08X (%d KBytes). " , i + 1 , ea_start_address , ea_start_address + ea_size , ea_size , ea_size / 1024 ) ;
2022-02-03 15:51:47 +01:00
bool success = true ;
auto * memory_ptr = ( uint32_t * ) ea_start_address ;
bool inFailRange = false ;
2020-05-29 19:25:05 +02:00
uint32_t startFailing = 0 ;
2022-02-03 15:51:47 +01:00
uint32_t startGood = ea_start_address ;
2020-06-03 18:36:02 +02:00
for ( uint32_t j = 0 ; j < ea_size / 4 ; j + + ) {
if ( memory_ptr [ j ] ! = j ) {
2020-05-29 19:25:05 +02:00
success = false ;
2021-09-24 16:51:11 +02:00
if ( ! inFailRange ) {
2020-06-03 18:36:02 +02:00
DEBUG_FUNCTION_LINE ( " + Good between 0x%08X and 0x%08X size: %u kB " , startGood , & memory_ptr [ j ] , ( ( ( uint32_t ) & memory_ptr [ j ] ) - ( uint32_t ) startGood ) / 1024 ) ;
startFailing = ( uint32_t ) & memory_ptr [ j ] ;
2022-02-03 15:51:47 +01:00
inFailRange = true ;
startGood = 0 ;
j = ( ( j & 0xFFFF8000 ) + 0x00008000 ) - 1 ;
2020-05-29 19:25:05 +02:00
}
//break;
} else {
2020-06-03 18:36:02 +02:00
if ( inFailRange ) {
DEBUG_FUNCTION_LINE ( " - Error between 0x%08X and 0x%08X size: %u kB " , startFailing , & memory_ptr [ j ] , ( ( ( uint32_t ) & memory_ptr [ j ] ) - ( uint32_t ) startFailing ) / 1024 ) ;
2020-05-29 19:25:05 +02:00
startFailing = 0 ;
2022-02-03 15:51:47 +01:00
startGood = ( uint32_t ) & memory_ptr [ j ] ;
inFailRange = false ;
2020-05-29 19:25:05 +02:00
}
}
}
2020-06-03 18:36:02 +02:00
if ( startGood ! = 0 & & ( startGood ! = ea_start_address + ea_size ) ) {
DEBUG_FUNCTION_LINE ( " + Good between 0x%08X and 0x%08X size: %u kB " , startGood , ea_start_address + ea_size , ( ( uint32_t ) ( ea_start_address + ea_size ) - ( uint32_t ) startGood ) / 1024 ) ;
} else if ( inFailRange ) {
DEBUG_FUNCTION_LINE ( " - Error between 0x%08X and 0x%08X size: %u kB " , startFailing , ea_start_address + ea_size , ( ( uint32_t ) ( ea_start_address + ea_size ) - ( uint32_t ) startFailing ) / 1024 ) ;
2020-05-29 19:25:05 +02:00
}
2020-06-03 18:36:02 +02:00
if ( success ) {
DEBUG_FUNCTION_LINE ( " Test %d was successful! " , i + 1 ) ;
2020-05-29 19:25:05 +02:00
}
}
DEBUG_FUNCTION_LINE ( " All tests done. " ) ;
2022-05-08 20:52:23 +02:00
# endif
2020-05-29 19:25:05 +02:00
}
2020-06-03 18:51:24 +02:00
void MemoryMapping_memoryMappingForRegions ( const memory_mapping_t * memory_mapping , sr_table_t SRTable , uint32_t * translation_table ) {
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; /* waiting for a break */ ; i + + ) {
2020-05-30 21:45:48 +02:00
//DEBUG_FUNCTION_LINE("In loop %d",i);
2021-09-24 16:51:11 +02:00
if ( memory_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-30 21:45:48 +02:00
//DEBUG_FUNCTION_LINE("break %d",i);
2020-05-29 19:25:05 +02:00
break ;
}
uint32_t cur_ea_start_address = memory_mapping [ i ] . effective_start_address ;
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Mapping area %d. effective address %08X... " , i + 1 , cur_ea_start_address ) ;
2020-06-03 18:36:02 +02:00
const memory_values_t * mem_vals = memory_mapping [ i ] . physical_addresses ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
for ( uint32_t j = 0 ; ; j + + ) {
2020-05-30 21:45:48 +02:00
//DEBUG_FUNCTION_LINE("In inner loop %d",j);
2020-06-03 18:36:02 +02:00
uint32_t pa_start_address = mem_vals [ j ] . start_address ;
2022-02-03 15:51:47 +01:00
uint32_t pa_end_address = mem_vals [ j ] . end_address ;
2020-06-03 18:36:02 +02:00
if ( pa_end_address = = 0 & & pa_start_address = = 0 ) {
2020-05-30 21:45:48 +02:00
//DEBUG_FUNCTION_LINE("inner break %d",j);
2020-05-29 19:25:05 +02:00
// Break if entry was empty.
break ;
}
2020-06-03 18:36:02 +02:00
uint32_t pa_size = pa_end_address - pa_start_address ;
2021-09-24 16:51:11 +02:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Adding page table entry %d for mapping area %d. %08X-%08X => %08X-%08X... " , j + 1 , i + 1 , cur_ea_start_address ,
memory_mapping [ i ] . effective_start_address + pa_size , pa_start_address , pa_end_address ) ;
2020-06-03 18:51:24 +02:00
if ( ! MemoryMapping_mapMemory ( pa_start_address , pa_end_address , cur_ea_start_address , SRTable , translation_table ) ) {
2020-05-29 19:25:05 +02:00
//log_print("error =(");
DEBUG_FUNCTION_LINE ( " Failed to map memory. " ) ;
//OSFatal("Failed to map memory.");
return ;
break ;
}
cur_ea_start_address + = pa_size ;
//log_print("done");
}
}
}
2020-06-03 18:51:24 +02:00
void MemoryMapping_setupMemoryMapping ( ) {
2024-04-19 18:24:51 +02:00
/*
* We need to make sure that with have full access to the 0x0080000 - 0x01000000 region on all 3 cores .
*/
SetupIBAT4DBAT5OnAllCores ( ) ;
2020-05-29 19:25:05 +02:00
// Override all writes to SR8 with nops.
2020-06-06 22:35:48 +02:00
// Override some memory region checks inside the kernel
2021-09-24 16:51:11 +02:00
runOnAllCores ( writeKernelNOPs , nullptr ) ;
2020-05-29 19:25:05 +02:00
2021-09-24 16:51:11 +02:00
//runOnAllCores(readAndPrintSegmentRegister,nullptr,0,16,0x80000);
2020-05-29 19:25:05 +02:00
sr_table_t srTableCpy ;
2023-03-27 18:11:31 +02:00
uint32_t sizePageTable = sizeof ( uint32_t ) * 0x8000 ;
auto * pageTableCpy = ( uint32_t * ) gMEMAllocFromDefaultHeapExForThreads ( sizePageTable , 0x10 ) ;
if ( ! pageTableCpy ) {
OSFatal ( " MemoryMappingModule: Failed to alloc memory for page table " ) ;
}
2020-05-29 19:25:05 +02:00
KernelReadSRs ( & srTableCpy ) ;
2023-03-27 18:11:31 +02:00
KernelReadPTE ( ( uint32_t ) pageTableCpy , sizePageTable ) ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
DCFlushRange ( & srTableCpy , sizeof ( srTableCpy ) ) ;
2023-03-27 18:11:31 +02:00
DCFlushRange ( pageTableCpy , sizePageTable ) ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; i < 16 ; i + + ) {
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " SR[%d]=%08X " , i , srTableCpy . value [ i ] ) ;
2020-05-29 19:25:05 +02:00
}
//printPageTableTranslation(srTableCpy,pageTableCpy);
// According to
// http://wiiubrew.org/wiki/Cafe_OS#Virtual_Memory_Map 0x80000000
// is currently unmapped.
// This is nice because it leads to SR[8] which also seems to be unused (was set to 0x30FFFFFF)
// The content of the segment was chosen randomly.
2022-02-03 15:51:47 +01:00
uint32_t segment_index = MEMORY_START_BASE > > 28 ;
2020-05-29 19:25:05 +02:00
uint32_t segment_content = 0x00000000 | SEGMENT_UNIQUE_ID ;
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Setting SR[%d] to %08X " , segment_index , segment_content ) ;
2020-05-29 19:25:05 +02:00
srTableCpy . value [ segment_index ] = segment_content ;
2020-06-03 18:36:02 +02:00
DCFlushRange ( & srTableCpy , sizeof ( srTableCpy ) ) ;
2020-05-29 19:25:05 +02:00
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Writing segment registers... " , segment_index , segment_content ) ;
2020-05-29 19:25:05 +02:00
// Writing the segment registers to ALL cores.
//
2021-09-24 16:51:11 +02:00
//writeSegmentRegister(nullptr, &srTableCpy);
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
runOnAllCores ( writeSegmentRegister , & srTableCpy ) ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:51:24 +02:00
MemoryMapping_memoryMappingForRegions ( mem_mapping , srTableCpy , pageTableCpy ) ;
2020-05-29 19:25:05 +02:00
//printPageTableTranslation(srTableCpy,pageTableCpy);
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Writing PageTable... " ) ;
2023-03-27 18:11:31 +02:00
DCFlushRange ( pageTableCpy , sizePageTable ) ;
KernelWritePTE ( ( uint32_t ) pageTableCpy , sizePageTable ) ;
DCFlushRange ( pageTableCpy , sizePageTable ) ;
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " done " ) ;
2020-05-29 19:25:05 +02:00
//printPageTableTranslation(srTableCpy,pageTableCpy);
2021-09-24 16:51:11 +02:00
//runOnAllCores(readAndPrintSegmentRegister,nullptr,0,16,0x80000);
2020-05-29 19:25:05 +02:00
//searchEmptyMemoryRegions();
//writeTestValuesToMemory();
//readTestValuesFromMemory();
//runOnAllCores(writeSegmentRegister,&srTableCpy);
2024-05-03 22:22:55 +02:00
// OSInitMutex(&allocMutex);
2023-03-27 18:11:31 +02:00
memset ( pageTableCpy , 0 , sizePageTable ) ;
gMEMFreeToDefaultHeapForThreads ( pageTableCpy ) ;
2020-05-29 19:25:05 +02:00
}
2021-09-28 17:58:20 +02:00
void * MemoryMapping_allocEx ( uint32_t size , int32_t align , bool videoOnly ) {
2024-05-03 22:22:55 +02:00
//OSLockMutex(&allocMutex);
2021-09-24 16:51:11 +02:00
void * res = nullptr ;
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; /* waiting for a break */ ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-29 19:25:05 +02:00
break ;
}
2021-09-28 17:58:20 +02:00
uint32_t effectiveAddress = mem_mapping [ i ] . effective_start_address ;
2022-02-03 15:51:47 +01:00
auto heapHandle = ( MEMHeapHandle ) effectiveAddress ;
2021-09-24 16:51:11 +02:00
2021-09-28 17:58:20 +02:00
// Skip non-video memory
if ( videoOnly & & ( ( effectiveAddress < MEMORY_START_VIDEO ) | | ( effectiveAddress > MEMORY_END_VIDEO ) ) ) {
continue ;
}
2022-02-13 13:23:40 +01:00
uint32_t allocSize ;
if ( align > 0 ) {
allocSize = ( size + align - 1 ) & ~ ( align - 1 ) ;
} else {
uint32_t alignAbs = - align ;
allocSize = ( size + alignAbs - 1 ) & ~ ( alignAbs - 1 ) ;
}
res = MEMAllocFromExpHeapEx ( heapHandle , allocSize , align ) ;
2021-01-01 01:56:54 +01:00
if ( res ! = nullptr ) {
2020-05-29 19:25:05 +02:00
break ;
}
}
2022-02-11 20:33:19 +01:00
OSMemoryBarrier ( ) ;
2024-05-03 22:22:55 +02:00
//OSUnlockMutex(&allocMutex);
2020-05-29 19:25:05 +02:00
return res ;
}
2024-04-26 23:32:39 +02:00
bool CheckMemExpHeapBlock ( MEMExpHeap * heap , MEMExpHeapBlockList * block , uint32_t tag , const char * listName , uint32_t & totalSizeOut ) {
MEMExpHeapBlock * prevBlock = nullptr ;
for ( auto * cur = block - > head ; cur ! = nullptr ; cur = cur - > next ) {
if ( cur - > prev ! = prevBlock ) {
DEBUG_FUNCTION_LINE_ERR ( " [Exp Heap Check] \" %s \" prev is invalid. expected %08X actual %08X " , listName , prevBlock , cur - > prev ) ;
return false ;
}
if ( cur < heap - > header . dataStart | | cur > heap - > header . dataEnd | | ( ( uint32_t ) cur + sizeof ( MEMExpHeapBlock ) + cur - > blockSize ) > ( uint32_t ) heap - > header . dataEnd ) {
DEBUG_FUNCTION_LINE_ERR ( " [Exp Heap Check] Block is not inside heap. block: %08X size %d; heap start %08X heap end %08X " , cur , sizeof ( MEMExpHeapBlock ) + cur - > blockSize , heap - > header . dataStart , heap - > header . dataEnd ) ;
return false ;
}
if ( cur - > tag ! = tag ) {
DEBUG_FUNCTION_LINE_ERR ( " [Exp Heap Check] Invalid block tag expected %04X, actual %04X " , tag , cur - > tag ) ;
return false ;
}
totalSizeOut = totalSizeOut + cur - > blockSize + ( cur - > attribs > > 8 & 0x7fffff ) + sizeof ( MEMExpHeapBlock ) ;
prevBlock = cur ;
}
if ( prevBlock ! = block - > tail ) {
DEBUG_FUNCTION_LINE_ERR ( " [Exp Heap Check] \" %s \" tail is unexpected! expected %08X, actual %08X " , listName , heap - > usedList . tail , prevBlock ) ;
return false ;
}
return true ;
}
bool CheckMemExpHeapCore ( MEMExpHeap * heap ) {
uint32_t totalSize = 0 ;
# pragma GCC diagnostic ignored "-Waddress-of-packed-member"
if ( ! CheckMemExpHeapBlock ( heap , & heap - > usedList , 0x5544 , " used " , totalSize ) ) {
return false ;
}
# pragma GCC diagnostic ignored "-Waddress-of-packed-member"
if ( ! CheckMemExpHeapBlock ( heap , & heap - > freeList , 0x4652 , " free " , totalSize ) ) {
return false ;
}
if ( totalSize ! = ( uint32_t ) heap - > header . dataEnd - ( uint32_t ) heap - > header . dataStart ) {
DEBUG_FUNCTION_LINE_ERR ( " [Exp Heap Check] heap size is unexpected! expected %08X, actual %08X " , ( uint32_t ) heap - > header . dataEnd - ( uint32_t ) heap - > header . dataStart , totalSize ) ;
return false ;
}
return true ;
}
bool CheckMemExpHeap ( MEMExpHeap * heap ) {
OSMemoryBarrier ( ) ;
if ( heap - > header . tag ! = MEM_EXPANDED_HEAP_TAG ) {
DEBUG_FUNCTION_LINE_ERR ( " [Exp Heap Check] Invalid heap handle. - %08X " , heap - > header . tag ) ;
return false ;
}
if ( heap - > header . flags & MEM_HEAP_FLAG_USE_LOCK ) {
# pragma GCC diagnostic ignored "-Waddress-of-packed-member"
OSUninterruptibleSpinLock_Acquire ( & ( heap - > header ) . lock ) ;
}
auto result = CheckMemExpHeapCore ( heap ) ;
if ( heap - > header . flags & MEM_HEAP_FLAG_USE_LOCK ) {
# pragma GCC diagnostic ignored "-Waddress-of-packed-member"
OSUninterruptibleSpinLock_Release ( & ( heap - > header ) . lock ) ;
}
return result ;
}
2024-04-19 18:24:51 +02:00
void MemoryMapping_checkHeaps ( ) {
2024-05-03 22:22:55 +02:00
//OSLockMutex(&allocMutex);
2024-04-19 18:24:51 +02:00
for ( int32_t i = 0 ; /* waiting for a break */ ; i + + ) {
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
break ;
}
auto heapHandle = ( MEMHeapHandle ) mem_mapping [ i ] . effective_start_address ;
2024-04-26 23:32:39 +02:00
if ( ! CheckMemExpHeap ( reinterpret_cast < MEMExpHeap * > ( heapHandle ) ) ) {
2024-04-19 18:24:51 +02:00
DEBUG_FUNCTION_LINE_ERR ( " MemoryMapping heap %08X (index %d) is corrupted. " , heapHandle , i ) ;
# ifdef DEBUG
OSFatal ( " MemoryMappingModule: Heap is corrupted " ) ;
# endif
}
}
2024-05-03 22:22:55 +02:00
//OSUnlockMutex(&allocMutex);
2024-04-19 18:24:51 +02:00
}
2021-09-28 17:58:20 +02:00
void * MemoryMapping_alloc ( uint32_t size , int32_t align ) {
return MemoryMapping_allocEx ( size , align , false ) ;
2020-06-27 11:17:38 +02:00
}
2021-09-28 17:58:20 +02:00
void * MemoryMapping_allocVideoMemory ( uint32_t size , int32_t align ) {
return MemoryMapping_allocEx ( size , align , true ) ;
}
2020-06-27 11:17:38 +02:00
2022-02-13 13:11:07 +01:00
// clang-format off
# define FindHeapContainingBlock ((MEMHeapHandle (*) (MEMMemoryList *, void *) )(0x101C400 + 0x2f2d8))
// clang-format on
MEMHeapHandle MemoryMapping_MEMFindContainHeap ( void * block ) {
for ( int32_t i = 0 ; /* waiting for a break */ ; i + + ) {
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
break ;
}
uint32_t effectiveAddress = mem_mapping [ i ] . effective_start_address ;
auto heapHandle = ( MEMHeapHandle ) effectiveAddress ;
auto * heap = ( MEMExpHeap * ) heapHandle ;
if ( block > = heap - > header . dataStart & &
block < heap - > header . dataEnd ) {
# pragma GCC diagnostic ignored "-Waddress-of-packed-member"
auto child = FindHeapContainingBlock ( & heap - > header . list , block ) ;
return child ? child : heapHandle ;
}
}
return nullptr ;
}
2020-06-03 18:51:24 +02:00
void MemoryMapping_free ( void * ptr ) {
2021-09-24 16:51:11 +02:00
if ( ptr = = nullptr ) {
2020-05-29 19:25:05 +02:00
return ;
}
2024-05-03 22:22:55 +02:00
//OSLockMutex(&allocMutex);
2021-09-24 16:51:11 +02:00
auto ptr_val = ( uint32_t ) ptr ;
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; /* waiting for a break */ ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-29 19:25:05 +02:00
break ;
}
2020-06-03 18:36:02 +02:00
if ( ptr_val > mem_mapping [ i ] . effective_start_address & & ptr_val < mem_mapping [ i ] . effective_end_address ) {
2021-09-24 16:51:11 +02:00
auto heapHandle = ( MEMHeapHandle ) mem_mapping [ i ] . effective_start_address ;
2022-05-08 20:52:23 +02:00
MEMFreeToExpHeap ( heapHandle , ptr ) ;
2020-05-29 19:25:05 +02:00
break ;
}
}
2022-02-11 20:33:19 +01:00
OSMemoryBarrier ( ) ;
2024-05-03 22:22:55 +02:00
//OSUnlockMutex(&allocMutex);
2020-05-29 19:25:05 +02:00
}
2021-01-01 01:56:24 +01:00
uint32_t MemoryMapping_MEMGetAllocatableSize ( ) {
return MemoryMapping_MEMGetAllocatableSizeEx ( 4 ) ;
}
uint32_t MemoryMapping_MEMGetAllocatableSizeEx ( uint32_t align ) {
2024-05-03 22:22:55 +02:00
//OSLockMutex(&allocMutex);
2021-01-01 01:56:24 +01:00
uint32_t res = 0 ;
for ( int32_t i = 0 ; /* waiting for a break */ ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2021-01-01 01:56:24 +01:00
break ;
}
uint32_t curRes = MEMGetAllocatableSizeForExpHeapEx ( ( MEMHeapHandle ) mem_mapping [ i ] . effective_start_address , align ) ;
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " heap at %08X MEMGetAllocatableSizeForExpHeapEx: %d KiB " , mem_mapping [ i ] . effective_start_address , curRes / 1024 ) ;
2021-01-01 01:56:24 +01:00
if ( curRes > res ) {
res = curRes ;
}
}
2024-05-03 22:22:55 +02:00
//OSUnlockMutex(&allocMutex);
2021-01-01 01:56:24 +01:00
return res ;
}
2020-06-03 18:51:24 +02:00
uint32_t MemoryMapping_GetFreeSpace ( ) {
2024-05-03 22:22:55 +02:00
//OSLockMutex(&allocMutex);
2020-05-30 21:49:29 +02:00
uint32_t res = 0 ;
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; /* waiting for a break */ ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-30 21:49:29 +02:00
break ;
}
uint32_t curRes = MEMGetTotalFreeSizeForExpHeap ( ( MEMHeapHandle ) mem_mapping [ i ] . effective_start_address ) ;
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " heap at %08X MEMGetTotalFreeSizeForExpHeap: %d KiB " , mem_mapping [ i ] . effective_start_address , curRes / 1024 ) ;
2020-06-03 18:36:02 +02:00
res + = curRes ;
2020-05-30 21:49:29 +02:00
}
2024-05-03 22:22:55 +02:00
//OSUnlockMutex(&allocMutex);
2020-05-30 21:49:29 +02:00
return res ;
}
2020-06-03 18:51:24 +02:00
void MemoryMapping_CreateHeaps ( ) {
2024-05-03 22:22:55 +02:00
//OSLockMutex(&allocMutex);
2020-05-30 21:48:50 +02:00
for ( int32_t i = 0 ; /* waiting for a break */ ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-30 21:48:50 +02:00
break ;
}
void * address = ( void * ) ( mem_mapping [ i ] . effective_start_address ) ;
uint32_t size = mem_mapping [ i ] . effective_end_address - mem_mapping [ i ] . effective_start_address ;
2021-01-01 01:56:54 +01:00
memset ( reinterpret_cast < void * > ( mem_mapping [ i ] . effective_start_address ) , 0 , size ) ;
2022-05-08 20:52:23 +02:00
# ifdef DEBUG
auto heap =
# endif
2022-05-08 20:55:30 +02:00
MEMCreateExpHeapEx ( address , size , MEM_HEAP_FLAG_USE_LOCK ) ;
2022-05-08 20:52:23 +02:00
# ifdef DEBUG
2021-10-20 23:52:09 +02:00
DEBUG_FUNCTION_LINE ( " Created heap @%08X, size %d KiB " , heap , size / 1024 ) ;
2022-05-08 20:52:23 +02:00
# endif
2020-05-30 21:48:50 +02:00
}
2024-05-03 22:22:55 +02:00
//OSUnlockMutex(&allocMutex);
2020-05-30 21:48:50 +02:00
}
2020-06-03 18:51:24 +02:00
void MemoryMapping_DestroyHeaps ( ) {
2024-05-03 22:22:55 +02:00
//OSLockMutex(&allocMutex);
2020-05-30 21:48:50 +02:00
for ( int32_t i = 0 ; /* waiting for a break */ ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-30 21:48:50 +02:00
break ;
}
void * address = ( void * ) ( mem_mapping [ i ] . effective_start_address ) ;
uint32_t size = mem_mapping [ i ] . effective_end_address - mem_mapping [ i ] . effective_start_address ;
2020-06-03 18:51:24 +02:00
2020-05-30 21:48:50 +02:00
MEMDestroyExpHeap ( ( MEMHeapHandle ) address ) ;
memset ( address , 0 , size ) ;
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Destroyed heap @%08X " , address ) ;
2020-05-30 21:48:50 +02:00
}
2024-05-03 22:22:55 +02:00
//OSUnlockMutex(&allocMutex);
2020-05-30 21:48:50 +02:00
}
2020-05-29 19:25:05 +02:00
2020-06-03 18:51:24 +02:00
uint32_t MemoryMapping_getAreaSizeFromPageTable ( uint32_t start , uint32_t maxSize ) {
2020-05-29 19:25:05 +02:00
sr_table_t srTable ;
uint32_t pageTable [ 0x8000 ] ;
KernelReadSRs ( & srTable ) ;
2020-06-03 18:36:02 +02:00
KernelReadPTE ( ( uint32_t ) pageTable , sizeof ( pageTable ) ) ;
2020-05-29 19:25:05 +02:00
uint32_t sr_start = start > > 28 ;
2022-02-03 15:51:47 +01:00
uint32_t sr_end = ( start + maxSize ) > > 28 ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
if ( sr_end < sr_start ) {
2020-05-29 19:25:05 +02:00
return 0 ;
}
uint32_t cur_address = start ;
uint32_t end_address = start + maxSize ;
uint32_t memSize = 0 ;
2020-06-03 18:36:02 +02:00
for ( uint32_t segment = sr_start ; segment < = sr_end ; segment + + ) {
2020-05-29 19:25:05 +02:00
uint32_t sr = srTable . value [ segment ] ;
2020-06-03 18:36:02 +02:00
if ( sr > > 31 ) {
2020-05-29 19:25:05 +02:00
DEBUG_FUNCTION_LINE ( " Direct access not supported " ) ;
} else {
uint32_t vsid = sr & 0xFFFFFF ;
2022-02-03 15:51:47 +01:00
uint32_t pageSize = 1 < < PAGE_INDEX_SHIFT ;
2020-05-29 19:25:05 +02:00
uint32_t cur_end_addr = 0 ;
2020-06-03 18:36:02 +02:00
if ( segment = = sr_end ) {
2020-05-29 19:25:05 +02:00
cur_end_addr = end_address ;
} else {
cur_end_addr = ( segment + 1 ) * 0x10000000 ;
}
2020-06-03 18:36:02 +02:00
if ( segment ! = sr_start ) {
2022-02-02 18:34:27 +01:00
cur_address = ( segment ) * 0x10000000 ;
2020-05-29 19:25:05 +02:00
}
bool success = true ;
2020-06-03 18:36:02 +02:00
for ( uint32_t addr = cur_address ; addr < cur_end_addr ; addr + = pageSize ) {
2020-05-29 19:25:05 +02:00
uint32_t PTEH = 0 ;
uint32_t PTEL = 0 ;
2020-06-03 18:51:24 +02:00
if ( MemoryMapping_getPageEntryForAddress ( srTable . sdr1 , addr , vsid , pageTable , & PTEH , & PTEL , false ) ) {
2020-05-29 19:25:05 +02:00
memSize + = pageSize ;
} else {
success = false ;
break ;
}
}
2020-06-03 18:36:02 +02:00
if ( ! success ) {
2020-05-29 19:25:05 +02:00
break ;
}
}
}
return memSize ;
}
2020-06-03 18:51:24 +02:00
bool MemoryMapping_getPageEntryForAddress ( uint32_t SDR1 , uint32_t addr , uint32_t vsid , uint32_t * translation_table , uint32_t * oPTEH , uint32_t * oPTEL , bool checkSecondHash ) {
2022-02-03 15:51:47 +01:00
uint32_t pageMask = SDR1 & 0x1FF ;
uint32_t pageIndex = ( addr > > PAGE_INDEX_SHIFT ) & PAGE_INDEX_MASK ;
2021-09-24 16:51:11 +02:00
uint32_t primaryHash = ( vsid & 0x7FFFF ) ^ pageIndex ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:51:24 +02:00
if ( MemoryMapping_getPageEntryForAddressEx ( SDR1 , addr , vsid , primaryHash , translation_table , oPTEH , oPTEL , 0 ) ) {
2020-05-29 19:25:05 +02:00
return true ;
}
2020-06-03 18:36:02 +02:00
if ( checkSecondHash ) {
2020-06-03 18:51:24 +02:00
if ( MemoryMapping_getPageEntryForAddressEx ( pageMask , addr , vsid , ~ primaryHash , translation_table , oPTEH , oPTEL , 1 ) ) {
2020-05-29 19:25:05 +02:00
return true ;
}
}
return false ;
}
2020-06-03 18:51:24 +02:00
bool MemoryMapping_getPageEntryForAddressEx ( uint32_t pageMask , uint32_t addr , uint32_t vsid , uint32_t primaryHash , uint32_t * translation_table , uint32_t * oPTEH , uint32_t * oPTEL , uint32_t H ) {
2020-05-29 19:25:05 +02:00
uint32_t maskedHash = primaryHash & ( ( pageMask < < 10 ) | 0x3FF ) ;
2022-02-03 15:51:47 +01:00
uint32_t api = ( addr > > 22 ) & 0x3F ;
2020-05-29 19:25:05 +02:00
uint32_t pteAddrOffset = ( maskedHash < < 6 ) ;
for ( int32_t j = 0 ; j < 8 ; j + + , pteAddrOffset + = 8 ) {
uint32_t PTEH = 0 ;
uint32_t PTEL = 0 ;
uint32_t pteh_index = pteAddrOffset / 4 ;
uint32_t ptel_index = pteh_index + 1 ;
PTEH = translation_table [ pteh_index ] ;
PTEL = translation_table [ ptel_index ] ;
//Check validity
if ( ! ( PTEH > > 31 ) ) {
//printf("PTE is not valid ");
continue ;
}
//DEBUG_FUNCTION_LINE("in");
// the H bit indicated if the PTE was found using the second hash.
if ( ( ( PTEH > > 6 ) & 1 ) ! = H ) {
//DEBUG_FUNCTION_LINE("Secondary hash is used",((PTEH >> 6) & 1));
continue ;
}
// Check if the VSID matches, otherwise this is a PTE for another SR
// This is the place where collision could happen.
// Hopefully no collision happen and only the PTEs of the SR will match.
if ( ( ( PTEH > > 7 ) & 0xFFFFFF ) ! = vsid ) {
//DEBUG_FUNCTION_LINE("VSID mismatch");
continue ;
}
// Check the API (Abbreviated Page Index)
if ( ( PTEH & 0x3F ) ! = api ) {
//DEBUG_FUNCTION_LINE("API mismatch");
continue ;
}
* oPTEH = PTEH ;
* oPTEL = PTEL ;
return true ;
}
return false ;
}
2020-06-03 18:51:24 +02:00
void MemoryMapping_printPageTableTranslation ( sr_table_t srTable , uint32_t * translation_table ) {
2020-05-29 19:25:05 +02:00
uint32_t SDR1 = srTable . sdr1 ;
pageInformation current ;
2020-06-03 18:36:02 +02:00
memset ( & current , 0 , sizeof ( current ) ) ;
2020-05-29 19:25:05 +02:00
std : : vector < pageInformation > pageInfos ;
2020-06-03 18:36:02 +02:00
for ( uint32_t segment = 0 ; segment < 16 ; segment + + ) {
2020-05-29 19:25:05 +02:00
uint32_t sr = srTable . value [ segment ] ;
2020-06-03 18:36:02 +02:00
if ( sr > > 31 ) {
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Direct access not supported " ) ;
2020-05-29 19:25:05 +02:00
} else {
2022-02-03 15:51:47 +01:00
uint32_t ks = ( sr > > 30 ) & 1 ;
uint32_t kp = ( sr > > 29 ) & 1 ;
uint32_t nx = ( sr > > 28 ) & 1 ;
2020-05-29 19:25:05 +02:00
uint32_t vsid = sr & 0xFFFFFF ;
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " ks %08X kp %08X nx %08X vsid %08X " , ks , kp , nx , vsid ) ;
2020-05-29 19:25:05 +02:00
uint32_t pageSize = 1 < < PAGE_INDEX_SHIFT ;
2020-06-03 18:36:02 +02:00
for ( uint32_t addr = segment * 0x10000000 ; addr < ( segment + 1 ) * 0x10000000 ; addr + = pageSize ) {
2020-05-29 19:25:05 +02:00
uint32_t PTEH = 0 ;
uint32_t PTEL = 0 ;
2020-06-03 18:51:24 +02:00
if ( MemoryMapping_getPageEntryForAddress ( SDR1 , addr , vsid , translation_table , & PTEH , & PTEL , false ) ) {
2022-02-03 15:51:47 +01:00
uint32_t pp = PTEL & 3 ;
2020-05-29 19:25:05 +02:00
uint32_t phys = PTEL & 0xFFFFF000 ;
//DEBUG_FUNCTION_LINE("current.phys == phys - current.size ( %08X %08X)",current.phys, phys - current.size);
2020-06-03 18:36:02 +02:00
if ( current . ks = = ks & &
current . kp = = kp & &
current . nx = = nx & &
current . pp = = pp & &
2022-02-02 18:34:27 +01:00
current . phys = = phys - current . size ) {
2020-05-29 19:25:05 +02:00
current . size + = pageSize ;
//DEBUG_FUNCTION_LINE("New size of %08X is %08X",current.addr,current.size);
} else {
2020-06-03 18:36:02 +02:00
if ( current . addr ! = 0 & & current . size ! = 0 ) {
2020-05-29 19:25:05 +02:00
/*DEBUG_FUNCTION_LINE("Saving old block from %08X",current.addr);
DEBUG_FUNCTION_LINE ( " ks %08X new %08X " , current . ks , ks ) ;
DEBUG_FUNCTION_LINE ( " kp %08X new %08X " , current . kp , kp ) ;
DEBUG_FUNCTION_LINE ( " nx %08X new %08X " , current . nx , nx ) ;
DEBUG_FUNCTION_LINE ( " pp %08X new %08X " , current . pp , pp ) ; */
pageInfos . push_back ( current ) ;
2020-06-03 18:36:02 +02:00
memset ( & current , 0 , sizeof ( current ) ) ;
2020-05-29 19:25:05 +02:00
}
//DEBUG_FUNCTION_LINE("Found new block at %08X",addr);
current . addr = addr ;
current . size = pageSize ;
2022-02-03 15:51:47 +01:00
current . kp = kp ;
current . ks = ks ;
current . nx = nx ;
current . pp = pp ;
2020-05-29 19:25:05 +02:00
current . phys = phys ;
}
} else {
2020-06-03 18:36:02 +02:00
if ( current . addr ! = 0 & & current . size ! = 0 ) {
2020-05-29 19:25:05 +02:00
pageInfos . push_back ( current ) ;
2020-06-03 18:36:02 +02:00
memset ( & current , 0 , sizeof ( current ) ) ;
2020-05-29 19:25:05 +02:00
}
}
}
}
}
2022-05-08 20:52:23 +02:00
# ifdef VERBOSE_DEBUG
2020-05-29 19:25:05 +02:00
const char * access1 [ ] = { " read/write " , " read/write " , " read/write " , " read only " } ;
const char * access2 [ ] = { " no access " , " read only " , " read/write " , " read only " } ;
2022-02-02 18:34:27 +01:00
for ( auto cur : pageInfos ) {
2021-09-24 16:51:11 +02:00
DEBUG_FUNCTION_LINE_VERBOSE ( " %08X %08X -> %08X %08X. user access %s. supervisor access %s. %s " , cur . addr , cur . addr + cur . size , cur . phys , cur . phys + cur . size ,
cur . kp ? access2 [ cur . pp ] : access1 [ cur . pp ] ,
cur . ks ? access2 [ cur . pp ] : access1 [ cur . pp ] , cur . nx ? " not executable " : " executable " ) ;
2020-05-29 19:25:05 +02:00
}
2022-05-08 20:52:23 +02:00
# endif
2020-05-29 19:25:05 +02:00
}
2020-06-03 18:51:24 +02:00
bool MemoryMapping_mapMemory ( uint32_t pa_start_address , uint32_t pa_end_address , uint32_t ea_start_address , sr_table_t SRTable , uint32_t * translation_table ) {
2020-05-29 19:25:05 +02:00
// Based on code from dimok. Thanks!
//uint32_t byteOffsetMask = (1 << PAGE_INDEX_SHIFT) - 1;
//uint32_t apiShift = 22 - PAGE_INDEX_SHIFT;
// Information on page 5.
// https://www.nxp.com/docs/en/application-note/AN2794.pdf
2022-02-03 15:51:47 +01:00
uint32_t HTABORG = SRTable . sdr1 > > 16 ;
2020-05-29 19:25:05 +02:00
uint32_t HTABMASK = SRTable . sdr1 & 0x1FF ;
// Iterate to all possible pages. Each page is 1<<(PAGE_INDEX_SHIFT) big.
2020-06-03 18:36:02 +02:00
uint32_t pageSize = 1 < < ( PAGE_INDEX_SHIFT ) ;
for ( uint32_t i = 0 ; i < pa_end_address - pa_start_address ; i + = pageSize ) {
2020-05-29 19:25:05 +02:00
// Calculate the current effective address.
uint32_t ea_addr = ea_start_address + i ;
// Calculate the segement.
uint32_t segment = SRTable . value [ ea_addr > > 28 ] ;
// Unique ID from the segment which is the input for the hash function.
// Change it to prevent collisions.
uint32_t VSID = segment & 0x00FFFFFF ;
2022-02-03 15:51:47 +01:00
uint32_t V = 1 ;
2020-05-29 19:25:05 +02:00
//Indicated if second hash is used.
uint32_t H = 0 ;
// Abbreviated Page Index
// Real page number
2022-02-03 15:51:47 +01:00
uint32_t RPN = ( pa_start_address + i ) > > 12 ;
uint32_t RC = 3 ;
2020-05-29 19:25:05 +02:00
uint32_t WIMG = 0x02 ;
2022-02-03 15:51:47 +01:00
uint32_t PP = 0x02 ;
2020-05-29 19:25:05 +02:00
uint32_t page_index = ( ea_addr > > PAGE_INDEX_SHIFT ) & PAGE_INDEX_MASK ;
2022-02-03 15:51:47 +01:00
uint32_t API = ( ea_addr > > 22 ) & 0x3F ;
2020-05-29 19:25:05 +02:00
uint32_t PTEH = ( V < < 31 ) | ( VSID < < 7 ) | ( H < < 6 ) | API ;
uint32_t PTEL = ( RPN < < 12 ) | ( RC < < 7 ) | ( WIMG < < 3 ) | PP ;
//unsigned long long virtual_address = ((unsigned long long)VSID << 28UL) | (page_index << PAGE_INDEX_SHIFT) | (ea_addr & 0xFFF);
uint32_t primary_hash = ( VSID & 0x7FFFF ) ;
2021-09-24 16:51:11 +02:00
uint32_t hashvalue1 = primary_hash ^ page_index ;
2020-05-29 19:25:05 +02:00
// hashvalue 2 is the complement of the first hash.
uint32_t hashvalue2 = ~ hashvalue1 ;
//uint32_t pageMask = SRTable.sdr1 & 0x1FF;
// calculate the address of the PTE groups.
// PTEs are saved in a group of 8 PTEs
// When PTEGaddr1 is full (all 8 PTEs set), PTEGaddr2 is used.
// Then H in PTEH needs to be set to 1.
2020-06-03 18:36:02 +02:00
uint32_t PTEGaddr1 = ( HTABORG < < 16 ) | ( ( ( hashvalue1 > > 10 ) & HTABMASK ) < < 16 ) | ( ( hashvalue1 & 0x3FF ) < < 6 ) ;
2020-05-29 19:25:05 +02:00
uint32_t PTEGaddr2 = ( HTABORG < < 16 ) | ( ( ( hashvalue2 > > 10 ) & HTABMASK ) < < 16 ) | ( ( hashvalue2 & 0x3FF ) < < 6 ) ;
//offset of the group inside the PTE Table.
uint32_t PTEGoffset = PTEGaddr1 - ( HTABORG < < 16 ) ;
bool setSuccessfully = false ;
2020-06-03 18:36:02 +02:00
PTEGoffset + = 7 * 8 ;
2020-05-29 19:25:05 +02:00
// Lets iterate through the PTE group where out PTE should be saved.
2020-06-03 18:36:02 +02:00
for ( int32_t j = 7 ; j > 0 ; PTEGoffset - = 8 ) {
int32_t index = ( PTEGoffset / 4 ) ;
2020-05-29 19:25:05 +02:00
uint32_t pteh = translation_table [ index ] ;
// Check if it's already taken. The first bit indicates if the PTE-slot inside
// this group is already taken.
2021-09-24 16:51:11 +02:00
if ( pteh = = 0 ) {
2020-05-29 19:25:05 +02:00
// If we found a free slot, set the PTEH and PTEL value.
2020-05-30 21:45:48 +02:00
//DEBUG_FUNCTION_LINE("Used slot %d. PTEGaddr1 %08X addr %08X",j+1,PTEGaddr1 - (HTABORG << 16),PTEGoffset);
2022-02-03 15:51:47 +01:00
translation_table [ index ] = PTEH ;
2020-06-03 18:36:02 +02:00
translation_table [ index + 1 ] = PTEL ;
2022-02-03 15:51:47 +01:00
setSuccessfully = true ;
2020-05-29 19:25:05 +02:00
break ;
} else {
//printf("PTEGoffset %08X was taken",PTEGoffset);
}
j - - ;
}
// Check if we already found a slot.
2020-06-03 18:36:02 +02:00
if ( ! setSuccessfully ) {
2020-05-30 21:45:48 +02:00
//DEBUG_FUNCTION_LINE("-------------- Using second slot -----------------------");
2020-05-29 19:25:05 +02:00
// We still have a chance to find a slot in the PTEGaddr2 using the complement of the hash.
// We need to set the H flag in PTEH and use PTEGaddr2.
// (Not well tested)
2022-02-03 15:51:47 +01:00
H = 1 ;
PTEH = ( V < < 31 ) | ( VSID < < 7 ) | ( H < < 6 ) | API ;
2020-05-29 19:25:05 +02:00
PTEGoffset = PTEGaddr2 - ( HTABORG < < 16 ) ;
2020-06-03 18:36:02 +02:00
PTEGoffset + = 7 * 8 ;
2020-05-29 19:25:05 +02:00
// Same as before.
2020-06-03 18:36:02 +02:00
for ( int32_t j = 7 ; j > 0 ; PTEGoffset - = 8 ) {
int32_t index = ( PTEGoffset / 4 ) ;
2020-05-29 19:25:05 +02:00
uint32_t pteh = translation_table [ index ] ;
//Check if it's already taken.
2021-09-24 16:51:11 +02:00
if ( pteh = = 0 ) {
2022-02-03 15:51:47 +01:00
translation_table [ index ] = PTEH ;
2020-06-03 18:36:02 +02:00
translation_table [ index + 1 ] = PTEL ;
2022-02-03 15:51:47 +01:00
setSuccessfully = true ;
2020-05-29 19:25:05 +02:00
break ;
} else {
//printf("PTEGoffset %08X was taken",PTEGoffset);
}
j - - ;
}
2020-06-03 18:36:02 +02:00
if ( ! setSuccessfully ) {
2020-05-29 19:25:05 +02:00
// Fail if we couldn't find a free slot.
DEBUG_FUNCTION_LINE ( " -------------- No more free PTE ----------------------- " ) ;
return false ;
}
}
}
return true ;
}
2020-06-03 18:51:24 +02:00
uint32_t MemoryMapping_PhysicalToEffective ( uint32_t phyiscalAddress ) {
2020-06-03 18:36:02 +02:00
if ( phyiscalAddress > = 0x30800000 & & phyiscalAddress < 0x31000000 ) {
2020-05-29 19:25:05 +02:00
return phyiscalAddress - ( 0x30800000 - 0x00800000 ) ;
}
2022-02-03 15:51:47 +01:00
uint32_t result = 0 ;
2021-09-24 16:51:11 +02:00
const memory_values_t * curMemValues = nullptr ;
2020-05-29 19:25:05 +02:00
//iterate through all own mapped memory regions
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; true ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-29 19:25:05 +02:00
break ;
}
2022-02-03 15:51:47 +01:00
curMemValues = mem_mapping [ i ] . physical_addresses ;
2020-05-29 19:25:05 +02:00
uint32_t curOffsetInEA = 0 ;
// iterate through all memory values of this region
2020-06-03 18:36:02 +02:00
for ( int32_t j = 0 ; true ; j + + ) {
if ( curMemValues [ j ] . end_address = = 0 ) {
2020-05-29 19:25:05 +02:00
break ;
}
2020-06-03 18:36:02 +02:00
if ( phyiscalAddress > = curMemValues [ j ] . start_address & & phyiscalAddress < curMemValues [ j ] . end_address ) {
2020-05-29 19:25:05 +02:00
// calculate the EA
2020-06-03 18:36:02 +02:00
result = ( phyiscalAddress - curMemValues [ j ] . start_address ) + ( mem_mapping [ i ] . effective_start_address + curOffsetInEA ) ;
2020-05-29 19:25:05 +02:00
return result ;
}
curOffsetInEA + = curMemValues [ j ] . end_address - curMemValues [ j ] . start_address ;
}
}
return result ;
}
2020-06-03 18:51:24 +02:00
uint32_t MemoryMapping_EffectiveToPhysical ( uint32_t effectiveAddress ) {
2020-06-03 18:36:02 +02:00
if ( effectiveAddress > = 0x00800000 & & effectiveAddress < 0x01000000 ) {
2020-05-29 19:25:05 +02:00
return effectiveAddress + ( 0x30800000 - 0x00800000 ) ;
}
uint32_t result = 0 ;
// CAUTION: The data may be fragmented between multiple areas in PA.
2021-09-24 16:51:11 +02:00
const memory_values_t * curMemValues = nullptr ;
2022-02-03 15:51:47 +01:00
uint32_t curOffset = 0 ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; true ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-29 19:25:05 +02:00
break ;
}
2020-06-03 18:36:02 +02:00
if ( effectiveAddress > = mem_mapping [ i ] . effective_start_address & & effectiveAddress < mem_mapping [ i ] . effective_end_address ) {
2020-05-29 19:25:05 +02:00
curMemValues = mem_mapping [ i ] . physical_addresses ;
2022-02-03 15:51:47 +01:00
curOffset = mem_mapping [ i ] . effective_start_address ;
2020-05-29 19:25:05 +02:00
break ;
}
}
2021-09-24 16:51:11 +02:00
if ( curMemValues = = nullptr ) {
2020-05-29 19:25:05 +02:00
return result ;
}
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; true ; i + + ) {
if ( curMemValues [ i ] . end_address = = 0 ) {
2020-05-29 19:25:05 +02:00
break ;
}
uint32_t curChunkSize = curMemValues [ i ] . end_address - curMemValues [ i ] . start_address ;
2020-06-03 18:36:02 +02:00
if ( effectiveAddress < ( curOffset + curChunkSize ) ) {
2020-05-29 19:25:05 +02:00
result = ( effectiveAddress - curOffset ) + curMemValues [ i ] . start_address ;
break ;
}
curOffset + = curChunkSize ;
}
return result ;
}