2020-05-29 19:25:05 +02:00
# include "memory_mapping.h"
# include <coreinit/cache.h>
2022-02-02 18:34:27 +01:00
# include <coreinit/memdefaultheap.h>
2020-05-29 19:25:05 +02:00
# include <coreinit/memexpheap.h>
2022-02-02 18:34:27 +01:00
# include <coreinit/memorymap.h>
2020-05-29 19:25:05 +02:00
# include <coreinit/thread.h>
# include "CThread.h"
2022-02-02 18:34:27 +01:00
# include "logger.h"
# include "memory.h"
2022-02-11 20:32:39 +01:00
# include <coreinit/mutex.h>
2021-09-24 16:51:11 +02:00
# include <cstring>
2022-02-02 18:34:27 +01:00
# include <vector>
2020-05-29 19:25:05 +02:00
// #define DEBUG_FUNCTION_LINE(x,...)
2022-02-11 20:32:39 +01:00
OSMutex allocMutex ;
2020-05-29 19:25:05 +02:00
void runOnAllCores ( CThread : : Callback callback , void * callbackArg , int32_t iAttr = 0 , int32_t iPriority = 16 , int32_t iStackSize = 0x8000 ) {
2020-06-03 18:36:02 +02:00
int32_t aff [ ] = { CThread : : eAttributeAffCore2 , CThread : : eAttributeAffCore1 , CThread : : eAttributeAffCore0 } ;
2020-05-29 19:25:05 +02:00
2022-02-02 18:34:27 +01:00
for ( int i : aff ) {
2022-01-21 19:52:35 +01:00
CThread thread ( iAttr | i , iPriority , iStackSize , callback , callbackArg ) ;
thread . resumeThread ( ) ;
2020-05-29 19:25:05 +02:00
}
}
2020-06-06 22:35:48 +02:00
void writeKernelNOPs ( CThread * thread , void * arg ) {
2021-09-24 16:51:11 +02:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Writing kernel NOPs on core %d " , OSGetThreadAffinity ( OSGetCurrentThread ( ) ) / 2 ) ;
2020-06-06 22:35:48 +02:00
2022-02-11 20:27:38 +01:00
// Patch out any writes to SR8
2020-06-06 22:35:48 +02:00
KernelNOPAtPhysicalAddress ( 0xFFF1D754 ) ;
KernelNOPAtPhysicalAddress ( 0xFFF1D64C ) ;
KernelNOPAtPhysicalAddress ( 0xFFE00638 ) ;
2022-02-11 20:27:38 +01:00
// nop out branches to app panic 0x17
2020-06-06 22:35:48 +02:00
KernelNOPAtPhysicalAddress ( 0xfff01db0 ) ;
KernelNOPAtPhysicalAddress ( 0xfff01e90 ) ;
KernelNOPAtPhysicalAddress ( 0xfff01ea0 ) ;
KernelNOPAtPhysicalAddress ( 0xfff01ea4 ) ;
2022-02-11 20:27:38 +01:00
// nop out branches to app panic 0x12
KernelNOPAtPhysicalAddress ( 0xfff01a00 ) ;
KernelNOPAtPhysicalAddress ( 0xfff01b68 ) ;
KernelNOPAtPhysicalAddress ( 0xfff01b70 ) ;
KernelNOPAtPhysicalAddress ( 0xfff01b7c ) ;
KernelNOPAtPhysicalAddress ( 0xfff01b80 ) ;
// nop out branches to app panic 0x16
2020-06-06 22:35:48 +02:00
KernelNOPAtPhysicalAddress ( 0xfff0db24 ) ;
KernelNOPAtPhysicalAddress ( 0xfff0dbb4 ) ;
KernelNOPAtPhysicalAddress ( 0xfff0dbbc ) ;
KernelNOPAtPhysicalAddress ( 0xfff0dbc8 ) ;
KernelNOPAtPhysicalAddress ( 0xfff0dbcc ) ;
2022-02-11 20:27:38 +01:00
// nop out branches to app panic 0x14
KernelNOPAtPhysicalAddress ( 0xfff01cfc ) ;
KernelNOPAtPhysicalAddress ( 0xfff01d4c ) ;
KernelNOPAtPhysicalAddress ( 0xfff01d54 ) ;
KernelNOPAtPhysicalAddress ( 0xfff01d60 ) ;
KernelNOPAtPhysicalAddress ( 0xfff01d64 ) ;
2020-06-06 22:35:48 +02:00
}
2020-05-29 19:25:05 +02:00
void writeSegmentRegister ( CThread * thread , void * arg ) {
2021-09-24 16:51:11 +02:00
auto * table = ( sr_table_t * ) arg ;
DEBUG_FUNCTION_LINE_VERBOSE ( " Writing segment register to core %d " , OSGetThreadAffinity ( OSGetCurrentThread ( ) ) / 2 ) ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
DCFlushRange ( table , sizeof ( sr_table_t ) ) ;
2020-05-29 19:25:05 +02:00
KernelWriteSRs ( table ) ;
}
void readAndPrintSegmentRegister ( CThread * thread , void * arg ) {
2021-09-24 16:51:11 +02:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Reading segment register and page table from core %d " , OSGetThreadAffinity ( OSGetCurrentThread ( ) ) / 2 ) ;
2020-05-29 19:25:05 +02:00
sr_table_t srTable ;
2020-06-03 18:36:02 +02:00
memset ( & srTable , 0 , sizeof ( srTable ) ) ;
2020-05-29 19:25:05 +02:00
KernelReadSRs ( & srTable ) ;
2020-06-03 18:36:02 +02:00
DCFlushRange ( & srTable , sizeof ( srTable ) ) ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; i < 16 ; i + + ) {
2022-01-26 13:24:56 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " [%d] SR[%d]=%08X " , OSGetThreadAffinity ( OSGetCurrentThread ( ) ) / 2 , i , srTable . value [ i ] ) ;
2020-05-29 19:25:05 +02:00
}
uint32_t pageTable [ 0x8000 ] ;
2020-06-03 18:36:02 +02:00
memset ( pageTable , 0 , sizeof ( pageTable ) ) ;
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Reading pageTable now. " ) ;
2020-06-03 18:36:02 +02:00
KernelReadPTE ( ( uint32_t ) pageTable , sizeof ( pageTable ) ) ;
DCFlushRange ( pageTable , sizeof ( pageTable ) ) ;
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Reading pageTable done " ) ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:51:24 +02:00
MemoryMapping_printPageTableTranslation ( srTable , pageTable ) ;
2020-05-29 19:25:05 +02:00
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " ----------------------------- " ) ;
2020-05-29 19:25:05 +02:00
}
2020-06-03 18:51:24 +02:00
bool MemoryMapping_isMemoryMapped ( ) {
2020-05-29 19:25:05 +02:00
sr_table_t srTable ;
2020-06-03 18:36:02 +02:00
memset ( & srTable , 0 , sizeof ( srTable ) ) ;
2020-05-29 19:25:05 +02:00
KernelReadSRs ( & srTable ) ;
2020-06-03 18:36:02 +02:00
if ( ( srTable . value [ MEMORY_START_BASE > > 28 ] & 0x00FFFFFF ) = = SEGMENT_UNIQUE_ID ) {
2020-05-29 19:25:05 +02:00
return true ;
}
return false ;
}
2020-06-03 18:51:24 +02:00
void MemoryMapping_searchEmptyMemoryRegions ( ) {
2020-05-29 19:25:05 +02:00
DEBUG_FUNCTION_LINE ( " Searching for empty memory. " ) ;
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-29 19:25:05 +02:00
break ;
}
uint32_t ea_start_address = mem_mapping [ i ] . effective_start_address ;
2020-06-03 18:36:02 +02:00
const memory_values_t * mem_vals = mem_mapping [ i ] . physical_addresses ;
2020-05-29 19:25:05 +02:00
uint32_t ea_size = 0 ;
2020-06-03 18:36:02 +02:00
for ( uint32_t j = 0 ; ; j + + ) {
uint32_t pa_start_address = mem_vals [ j ] . start_address ;
2022-02-03 15:51:47 +01:00
uint32_t pa_end_address = mem_vals [ j ] . end_address ;
2020-06-03 18:36:02 +02:00
if ( pa_end_address = = 0 & & pa_start_address = = 0 ) {
2020-05-29 19:25:05 +02:00
break ;
}
2020-06-03 18:36:02 +02:00
ea_size + = pa_end_address - pa_start_address ;
2020-05-29 19:25:05 +02:00
}
2022-02-03 15:51:47 +01:00
auto * flush_start = ( uint32_t * ) ea_start_address ;
2020-05-29 19:25:05 +02:00
uint32_t flush_size = ea_size ;
2020-06-03 18:36:02 +02:00
DEBUG_FUNCTION_LINE ( " Flushing %08X (%d kB) at %08X. " , flush_size , flush_size / 1024 , flush_start ) ;
DCFlushRange ( flush_start , flush_size ) ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
DEBUG_FUNCTION_LINE ( " Searching in memory region %d. 0x%08X - 0x%08X. Size 0x%08X (%d KBytes). " , i + 1 , ea_start_address , ea_start_address + ea_size , ea_size , ea_size / 1024 ) ;
2022-02-03 15:51:47 +01:00
bool success = true ;
auto * memory_ptr = ( uint32_t * ) ea_start_address ;
bool inFailRange = false ;
2020-05-29 19:25:05 +02:00
uint32_t startFailing = 0 ;
2022-02-03 15:51:47 +01:00
uint32_t startGood = ea_start_address ;
2020-06-03 18:36:02 +02:00
for ( uint32_t j = 0 ; j < ea_size / 4 ; j + + ) {
if ( memory_ptr [ j ] ! = 0 ) {
2020-05-29 19:25:05 +02:00
success = false ;
2021-09-24 16:51:11 +02:00
if ( ! inFailRange ) {
2020-06-03 18:36:02 +02:00
if ( ( ( ( uint32_t ) & memory_ptr [ j ] ) - ( uint32_t ) startGood ) / 1024 > 512 ) {
2020-05-29 19:25:05 +02:00
uint32_t start_addr = startGood & 0xFFFE0000 ;
2020-06-03 18:36:02 +02:00
if ( start_addr ! = startGood ) {
2020-05-29 19:25:05 +02:00
start_addr + = 0x20000 ;
}
2020-06-03 18:36:02 +02:00
uint32_t end_addr = ( ( uint32_t ) & memory_ptr [ j ] ) - MEMORY_START_BASE ;
2022-02-03 15:51:47 +01:00
end_addr = ( end_addr & 0xFFFE0000 ) ;
2021-09-24 16:51:11 +02:00
DEBUG_FUNCTION_LINE ( " + Free between 0x%08X and 0x%08X size: %u kB " , start_addr - MEMORY_START_BASE , end_addr ,
( ( ( uint32_t ) end_addr ) - ( ( uint32_t ) startGood - MEMORY_START_BASE ) ) / 1024 ) ;
2020-05-29 19:25:05 +02:00
}
2020-06-03 18:36:02 +02:00
startFailing = ( uint32_t ) & memory_ptr [ j ] ;
2022-02-03 15:51:47 +01:00
inFailRange = true ;
startGood = 0 ;
j = ( ( j & 0xFFFF8000 ) + 0x00008000 ) - 1 ;
2020-05-29 19:25:05 +02:00
}
//break;
} else {
2020-06-03 18:36:02 +02:00
if ( inFailRange ) {
2020-05-29 19:25:05 +02:00
//DEBUG_FUNCTION_LINE("- Error between 0x%08X and 0x%08X size: %u kB",startFailing,&memory_ptr[j],(((uint32_t)&memory_ptr[j])-(uint32_t)startFailing)/1024);
startFailing = 0 ;
2022-02-03 15:51:47 +01:00
startGood = ( uint32_t ) & memory_ptr [ j ] ;
inFailRange = false ;
2020-05-29 19:25:05 +02:00
}
}
}
2020-06-03 18:36:02 +02:00
if ( startGood ! = 0 & & ( startGood ! = ea_start_address + ea_size ) ) {
DEBUG_FUNCTION_LINE ( " + Good between 0x%08X and 0x%08X size: %u kB " , startGood - MEMORY_START_BASE , ( ( uint32_t ) ( ea_start_address + ea_size ) - ( uint32_t ) MEMORY_START_BASE ) ,
( ( uint32_t ) ( ea_start_address + ea_size ) - ( uint32_t ) startGood ) / 1024 ) ;
} else if ( inFailRange ) {
DEBUG_FUNCTION_LINE ( " - Used between 0x%08X and 0x%08X size: %u kB " , startFailing , ea_start_address + ea_size , ( ( uint32_t ) ( ea_start_address + ea_size ) - ( uint32_t ) startFailing ) / 1024 ) ;
2020-05-29 19:25:05 +02:00
}
2020-06-03 18:36:02 +02:00
if ( success ) {
DEBUG_FUNCTION_LINE ( " Test %d was successful! " , i + 1 ) ;
2020-05-29 19:25:05 +02:00
}
}
DEBUG_FUNCTION_LINE ( " All tests done. " ) ;
}
2020-06-03 18:51:24 +02:00
void MemoryMapping_writeTestValuesToMemory ( ) {
2020-05-29 19:25:05 +02:00
//don't smash the stack.
uint32_t chunk_size = 0x1000 ;
uint32_t testBuffer [ chunk_size ] ;
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-29 19:25:05 +02:00
break ;
}
uint32_t cur_ea_start_address = mem_mapping [ i ] . effective_start_address ;
2020-06-03 18:36:02 +02:00
DEBUG_FUNCTION_LINE ( " Preparing memory test for region %d. Region start at effective address %08X. " , i + 1 , cur_ea_start_address ) ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
const memory_values_t * mem_vals = mem_mapping [ i ] . physical_addresses ;
2022-02-03 15:51:47 +01:00
uint32_t counter = 0 ;
2020-06-03 18:36:02 +02:00
for ( uint32_t j = 0 ; ; j + + ) {
uint32_t pa_start_address = mem_vals [ j ] . start_address ;
2022-02-03 15:51:47 +01:00
uint32_t pa_end_address = mem_vals [ j ] . end_address ;
2020-06-03 18:36:02 +02:00
if ( pa_end_address = = 0 & & pa_start_address = = 0 ) {
2020-05-29 19:25:05 +02:00
break ;
}
2020-06-03 18:36:02 +02:00
uint32_t pa_size = pa_end_address - pa_start_address ;
DEBUG_FUNCTION_LINE ( " Writing region %d of mapping %d. From %08X to %08X Size: %d KBytes... " , j + 1 , i + 1 , pa_start_address , pa_end_address , pa_size / 1024 ) ;
for ( uint32_t k = 0 ; k < = pa_size / 4 ; k + + ) {
if ( k > 0 & & ( k % chunk_size ) = = 0 ) {
DCFlushRange ( & testBuffer , sizeof ( testBuffer ) ) ;
DCInvalidateRange ( & testBuffer , sizeof ( testBuffer ) ) ;
uint32_t destination = pa_start_address + ( ( k * 4 ) - sizeof ( testBuffer ) ) ;
KernelCopyData ( destination , ( uint32_t ) OSEffectiveToPhysical ( ( uint32_t ) testBuffer ) , sizeof ( testBuffer ) ) ;
2020-05-29 19:25:05 +02:00
//DEBUG_FUNCTION_LINE("Copy testBuffer into %08X",destination);
}
2020-06-03 18:36:02 +02:00
if ( k ! = pa_size / 4 ) {
2020-05-29 19:25:05 +02:00
testBuffer [ k % chunk_size ] = counter + + ;
}
//DEBUG_FUNCTION_LINE("testBuffer[%d] = %d",i % chunk_size,i);
}
2022-02-03 15:51:47 +01:00
auto * flush_start = ( uint32_t * ) cur_ea_start_address ;
2020-05-29 19:25:05 +02:00
uint32_t flush_size = pa_size ;
cur_ea_start_address + = pa_size ;
2020-06-03 18:36:02 +02:00
DEBUG_FUNCTION_LINE ( " Flushing %08X (%d kB) at %08X to map memory. " , flush_size , flush_size / 1024 , flush_start ) ;
DCFlushRange ( flush_start , flush_size ) ;
2020-05-29 19:25:05 +02:00
}
2020-06-03 18:36:02 +02:00
DEBUG_FUNCTION_LINE ( " Done writing region %d " , i + 1 ) ;
2020-05-29 19:25:05 +02:00
}
}
2020-06-03 18:51:24 +02:00
void MemoryMapping_readTestValuesFromMemory ( ) {
2020-05-29 19:25:05 +02:00
DEBUG_FUNCTION_LINE ( " Testing reading the written values. " ) ;
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-29 19:25:05 +02:00
break ;
}
uint32_t ea_start_address = mem_mapping [ i ] . effective_start_address ;
2020-06-03 18:36:02 +02:00
const memory_values_t * mem_vals = mem_mapping [ i ] . physical_addresses ;
2020-05-29 19:25:05 +02:00
//uint32_t counter = 0;
uint32_t ea_size = 0 ;
2020-06-03 18:36:02 +02:00
for ( uint32_t j = 0 ; ; j + + ) {
uint32_t pa_start_address = mem_vals [ j ] . start_address ;
2022-02-03 15:51:47 +01:00
uint32_t pa_end_address = mem_vals [ j ] . end_address ;
2020-06-03 18:36:02 +02:00
if ( pa_end_address = = 0 & & pa_start_address = = 0 ) {
2020-05-29 19:25:05 +02:00
break ;
}
2020-06-03 18:36:02 +02:00
ea_size + = pa_end_address - pa_start_address ;
2020-05-29 19:25:05 +02:00
}
2022-02-03 15:51:47 +01:00
auto * flush_start = ( uint32_t * ) ea_start_address ;
2020-05-29 19:25:05 +02:00
uint32_t flush_size = ea_size ;
2020-06-03 18:36:02 +02:00
DEBUG_FUNCTION_LINE ( " Flushing %08X (%d kB) at %08X to map memory. " , flush_size , flush_size / 1024 , flush_start ) ;
DCFlushRange ( flush_start , flush_size ) ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
DEBUG_FUNCTION_LINE ( " Testing memory region %d. 0x%08X - 0x%08X. Size 0x%08X (%d KBytes). " , i + 1 , ea_start_address , ea_start_address + ea_size , ea_size , ea_size / 1024 ) ;
2022-02-03 15:51:47 +01:00
bool success = true ;
auto * memory_ptr = ( uint32_t * ) ea_start_address ;
bool inFailRange = false ;
2020-05-29 19:25:05 +02:00
uint32_t startFailing = 0 ;
2022-02-03 15:51:47 +01:00
uint32_t startGood = ea_start_address ;
2020-06-03 18:36:02 +02:00
for ( uint32_t j = 0 ; j < ea_size / 4 ; j + + ) {
if ( memory_ptr [ j ] ! = j ) {
2020-05-29 19:25:05 +02:00
success = false ;
2021-09-24 16:51:11 +02:00
if ( ! inFailRange ) {
2020-06-03 18:36:02 +02:00
DEBUG_FUNCTION_LINE ( " + Good between 0x%08X and 0x%08X size: %u kB " , startGood , & memory_ptr [ j ] , ( ( ( uint32_t ) & memory_ptr [ j ] ) - ( uint32_t ) startGood ) / 1024 ) ;
startFailing = ( uint32_t ) & memory_ptr [ j ] ;
2022-02-03 15:51:47 +01:00
inFailRange = true ;
startGood = 0 ;
j = ( ( j & 0xFFFF8000 ) + 0x00008000 ) - 1 ;
2020-05-29 19:25:05 +02:00
}
//break;
} else {
2020-06-03 18:36:02 +02:00
if ( inFailRange ) {
DEBUG_FUNCTION_LINE ( " - Error between 0x%08X and 0x%08X size: %u kB " , startFailing , & memory_ptr [ j ] , ( ( ( uint32_t ) & memory_ptr [ j ] ) - ( uint32_t ) startFailing ) / 1024 ) ;
2020-05-29 19:25:05 +02:00
startFailing = 0 ;
2022-02-03 15:51:47 +01:00
startGood = ( uint32_t ) & memory_ptr [ j ] ;
inFailRange = false ;
2020-05-29 19:25:05 +02:00
}
}
}
2020-06-03 18:36:02 +02:00
if ( startGood ! = 0 & & ( startGood ! = ea_start_address + ea_size ) ) {
DEBUG_FUNCTION_LINE ( " + Good between 0x%08X and 0x%08X size: %u kB " , startGood , ea_start_address + ea_size , ( ( uint32_t ) ( ea_start_address + ea_size ) - ( uint32_t ) startGood ) / 1024 ) ;
} else if ( inFailRange ) {
DEBUG_FUNCTION_LINE ( " - Error between 0x%08X and 0x%08X size: %u kB " , startFailing , ea_start_address + ea_size , ( ( uint32_t ) ( ea_start_address + ea_size ) - ( uint32_t ) startFailing ) / 1024 ) ;
2020-05-29 19:25:05 +02:00
}
2020-06-03 18:36:02 +02:00
if ( success ) {
DEBUG_FUNCTION_LINE ( " Test %d was successful! " , i + 1 ) ;
2020-05-29 19:25:05 +02:00
}
}
DEBUG_FUNCTION_LINE ( " All tests done. " ) ;
}
2020-06-03 18:51:24 +02:00
void MemoryMapping_memoryMappingForRegions ( const memory_mapping_t * memory_mapping , sr_table_t SRTable , uint32_t * translation_table ) {
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; /* waiting for a break */ ; i + + ) {
2020-05-30 21:45:48 +02:00
//DEBUG_FUNCTION_LINE("In loop %d",i);
2021-09-24 16:51:11 +02:00
if ( memory_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-30 21:45:48 +02:00
//DEBUG_FUNCTION_LINE("break %d",i);
2020-05-29 19:25:05 +02:00
break ;
}
uint32_t cur_ea_start_address = memory_mapping [ i ] . effective_start_address ;
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Mapping area %d. effective address %08X... " , i + 1 , cur_ea_start_address ) ;
2020-06-03 18:36:02 +02:00
const memory_values_t * mem_vals = memory_mapping [ i ] . physical_addresses ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
for ( uint32_t j = 0 ; ; j + + ) {
2020-05-30 21:45:48 +02:00
//DEBUG_FUNCTION_LINE("In inner loop %d",j);
2020-06-03 18:36:02 +02:00
uint32_t pa_start_address = mem_vals [ j ] . start_address ;
2022-02-03 15:51:47 +01:00
uint32_t pa_end_address = mem_vals [ j ] . end_address ;
2020-06-03 18:36:02 +02:00
if ( pa_end_address = = 0 & & pa_start_address = = 0 ) {
2020-05-30 21:45:48 +02:00
//DEBUG_FUNCTION_LINE("inner break %d",j);
2020-05-29 19:25:05 +02:00
// Break if entry was empty.
break ;
}
2020-06-03 18:36:02 +02:00
uint32_t pa_size = pa_end_address - pa_start_address ;
2021-09-24 16:51:11 +02:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Adding page table entry %d for mapping area %d. %08X-%08X => %08X-%08X... " , j + 1 , i + 1 , cur_ea_start_address ,
memory_mapping [ i ] . effective_start_address + pa_size , pa_start_address , pa_end_address ) ;
2020-06-03 18:51:24 +02:00
if ( ! MemoryMapping_mapMemory ( pa_start_address , pa_end_address , cur_ea_start_address , SRTable , translation_table ) ) {
2020-05-29 19:25:05 +02:00
//log_print("error =(");
DEBUG_FUNCTION_LINE ( " Failed to map memory. " ) ;
//OSFatal("Failed to map memory.");
return ;
break ;
}
cur_ea_start_address + = pa_size ;
//log_print("done");
}
}
}
2020-06-03 18:51:24 +02:00
void MemoryMapping_setupMemoryMapping ( ) {
2020-05-29 19:25:05 +02:00
// Override all writes to SR8 with nops.
2020-06-06 22:35:48 +02:00
// Override some memory region checks inside the kernel
2021-09-24 16:51:11 +02:00
runOnAllCores ( writeKernelNOPs , nullptr ) ;
2020-05-29 19:25:05 +02:00
2021-09-24 16:51:11 +02:00
//runOnAllCores(readAndPrintSegmentRegister,nullptr,0,16,0x80000);
2020-05-29 19:25:05 +02:00
sr_table_t srTableCpy ;
uint32_t pageTableCpy [ 0x8000 ] ;
KernelReadSRs ( & srTableCpy ) ;
2020-06-03 18:36:02 +02:00
KernelReadPTE ( ( uint32_t ) pageTableCpy , sizeof ( pageTableCpy ) ) ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
DCFlushRange ( & srTableCpy , sizeof ( srTableCpy ) ) ;
DCFlushRange ( pageTableCpy , sizeof ( pageTableCpy ) ) ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; i < 16 ; i + + ) {
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " SR[%d]=%08X " , i , srTableCpy . value [ i ] ) ;
2020-05-29 19:25:05 +02:00
}
//printPageTableTranslation(srTableCpy,pageTableCpy);
// According to
// http://wiiubrew.org/wiki/Cafe_OS#Virtual_Memory_Map 0x80000000
// is currently unmapped.
// This is nice because it leads to SR[8] which also seems to be unused (was set to 0x30FFFFFF)
// The content of the segment was chosen randomly.
2022-02-03 15:51:47 +01:00
uint32_t segment_index = MEMORY_START_BASE > > 28 ;
2020-05-29 19:25:05 +02:00
uint32_t segment_content = 0x00000000 | SEGMENT_UNIQUE_ID ;
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Setting SR[%d] to %08X " , segment_index , segment_content ) ;
2020-05-29 19:25:05 +02:00
srTableCpy . value [ segment_index ] = segment_content ;
2020-06-03 18:36:02 +02:00
DCFlushRange ( & srTableCpy , sizeof ( srTableCpy ) ) ;
2020-05-29 19:25:05 +02:00
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Writing segment registers... " , segment_index , segment_content ) ;
2020-05-29 19:25:05 +02:00
// Writing the segment registers to ALL cores.
//
2021-09-24 16:51:11 +02:00
//writeSegmentRegister(nullptr, &srTableCpy);
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
runOnAllCores ( writeSegmentRegister , & srTableCpy ) ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:51:24 +02:00
MemoryMapping_memoryMappingForRegions ( mem_mapping , srTableCpy , pageTableCpy ) ;
2020-05-29 19:25:05 +02:00
//printPageTableTranslation(srTableCpy,pageTableCpy);
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Writing PageTable... " ) ;
2020-06-03 18:36:02 +02:00
DCFlushRange ( pageTableCpy , sizeof ( pageTableCpy ) ) ;
KernelWritePTE ( ( uint32_t ) pageTableCpy , sizeof ( pageTableCpy ) ) ;
DCFlushRange ( pageTableCpy , sizeof ( pageTableCpy ) ) ;
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " done " ) ;
2020-05-29 19:25:05 +02:00
//printPageTableTranslation(srTableCpy,pageTableCpy);
2021-09-24 16:51:11 +02:00
//runOnAllCores(readAndPrintSegmentRegister,nullptr,0,16,0x80000);
2020-05-29 19:25:05 +02:00
//searchEmptyMemoryRegions();
//writeTestValuesToMemory();
//readTestValuesFromMemory();
//runOnAllCores(writeSegmentRegister,&srTableCpy);
2022-02-11 20:32:39 +01:00
OSInitMutex ( & allocMutex ) ;
2020-05-29 19:25:05 +02:00
}
2022-02-11 20:36:37 +01:00
# define ROUNDDOWN(val, align) ((val) & ~(align - 1))
# define ROUNDUP(val, align) ROUNDDOWN(((val) + (align - 1)), align)
2021-09-28 17:58:20 +02:00
void * MemoryMapping_allocEx ( uint32_t size , int32_t align , bool videoOnly ) {
2022-02-11 20:32:39 +01:00
OSLockMutex ( & allocMutex ) ;
2021-09-24 16:51:11 +02:00
void * res = nullptr ;
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; /* waiting for a break */ ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-29 19:25:05 +02:00
break ;
}
2021-09-28 17:58:20 +02:00
uint32_t effectiveAddress = mem_mapping [ i ] . effective_start_address ;
2022-02-03 15:51:47 +01:00
auto heapHandle = ( MEMHeapHandle ) effectiveAddress ;
auto * heap = ( MEMExpHeap * ) heapHandle ;
2021-09-24 16:51:11 +02:00
2021-09-28 17:58:20 +02:00
// Skip non-video memory
if ( videoOnly & & ( ( effectiveAddress < MEMORY_START_VIDEO ) | | ( effectiveAddress > MEMORY_END_VIDEO ) ) ) {
continue ;
}
2022-02-11 20:36:37 +01:00
// We round up the size to avoid heap corruption.
// FSReadFile expects the buffer size to be a multiple of 0x40
// This can remove once all modules/plugins have been updated :)
res = MEMAllocFromExpHeapEx ( heapHandle , ROUNDUP ( size , 0x40 ) , align ) ;
2021-01-01 01:56:54 +01:00
if ( res ! = nullptr ) {
2020-05-29 19:25:05 +02:00
break ;
}
}
2022-02-11 20:33:19 +01:00
OSMemoryBarrier ( ) ;
2022-02-11 20:32:39 +01:00
OSUnlockMutex ( & allocMutex ) ;
2020-05-29 19:25:05 +02:00
return res ;
}
2021-09-28 17:58:20 +02:00
void * MemoryMapping_alloc ( uint32_t size , int32_t align ) {
return MemoryMapping_allocEx ( size , align , false ) ;
2020-06-27 11:17:38 +02:00
}
2021-09-28 17:58:20 +02:00
void * MemoryMapping_allocVideoMemory ( uint32_t size , int32_t align ) {
return MemoryMapping_allocEx ( size , align , true ) ;
}
2020-06-27 11:17:38 +02:00
2020-06-03 18:51:24 +02:00
void MemoryMapping_free ( void * ptr ) {
2021-09-24 16:51:11 +02:00
if ( ptr = = nullptr ) {
2020-05-29 19:25:05 +02:00
return ;
}
2022-02-11 20:32:39 +01:00
OSLockMutex ( & allocMutex ) ;
2021-09-24 16:51:11 +02:00
auto ptr_val = ( uint32_t ) ptr ;
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; /* waiting for a break */ ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-29 19:25:05 +02:00
break ;
}
2020-06-03 18:36:02 +02:00
if ( ptr_val > mem_mapping [ i ] . effective_start_address & & ptr_val < mem_mapping [ i ] . effective_end_address ) {
2021-09-24 16:51:11 +02:00
auto heapHandle = ( MEMHeapHandle ) mem_mapping [ i ] . effective_start_address ;
2022-02-03 15:51:47 +01:00
auto * heap = ( MEMExpHeap * ) heapHandle ;
2021-09-24 16:51:11 +02:00
2020-05-30 21:45:05 +02:00
MEMFreeToExpHeap ( ( MEMHeapHandle ) mem_mapping [ i ] . effective_start_address , ptr ) ;
2021-01-01 01:56:54 +01:00
auto cur = heap - > usedList . head ;
2020-05-29 19:25:05 +02:00
break ;
}
}
2022-02-11 20:33:19 +01:00
OSMemoryBarrier ( ) ;
2022-02-11 20:32:39 +01:00
OSUnlockMutex ( & allocMutex ) ;
2020-05-29 19:25:05 +02:00
}
2021-01-01 01:56:24 +01:00
uint32_t MemoryMapping_MEMGetAllocatableSize ( ) {
return MemoryMapping_MEMGetAllocatableSizeEx ( 4 ) ;
}
uint32_t MemoryMapping_MEMGetAllocatableSizeEx ( uint32_t align ) {
2022-02-11 20:32:39 +01:00
OSLockMutex ( & allocMutex ) ;
2021-01-01 01:56:24 +01:00
uint32_t res = 0 ;
for ( int32_t i = 0 ; /* waiting for a break */ ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2021-01-01 01:56:24 +01:00
break ;
}
uint32_t curRes = MEMGetAllocatableSizeForExpHeapEx ( ( MEMHeapHandle ) mem_mapping [ i ] . effective_start_address , align ) ;
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " heap at %08X MEMGetAllocatableSizeForExpHeapEx: %d KiB " , mem_mapping [ i ] . effective_start_address , curRes / 1024 ) ;
2021-01-01 01:56:24 +01:00
if ( curRes > res ) {
res = curRes ;
}
}
2022-02-11 20:32:39 +01:00
OSUnlockMutex ( & allocMutex ) ;
2021-01-01 01:56:24 +01:00
return res ;
}
2020-06-03 18:51:24 +02:00
uint32_t MemoryMapping_GetFreeSpace ( ) {
2022-02-11 20:32:39 +01:00
OSLockMutex ( & allocMutex ) ;
2020-05-30 21:49:29 +02:00
uint32_t res = 0 ;
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; /* waiting for a break */ ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-30 21:49:29 +02:00
break ;
}
uint32_t curRes = MEMGetTotalFreeSizeForExpHeap ( ( MEMHeapHandle ) mem_mapping [ i ] . effective_start_address ) ;
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " heap at %08X MEMGetTotalFreeSizeForExpHeap: %d KiB " , mem_mapping [ i ] . effective_start_address , curRes / 1024 ) ;
2020-06-03 18:36:02 +02:00
res + = curRes ;
2020-05-30 21:49:29 +02:00
}
2022-02-11 20:32:39 +01:00
OSUnlockMutex ( & allocMutex ) ;
2020-05-30 21:49:29 +02:00
return res ;
}
2020-06-03 18:51:24 +02:00
void MemoryMapping_CreateHeaps ( ) {
2022-02-11 20:32:39 +01:00
OSLockMutex ( & allocMutex ) ;
2020-05-30 21:48:50 +02:00
for ( int32_t i = 0 ; /* waiting for a break */ ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-30 21:48:50 +02:00
break ;
}
void * address = ( void * ) ( mem_mapping [ i ] . effective_start_address ) ;
uint32_t size = mem_mapping [ i ] . effective_end_address - mem_mapping [ i ] . effective_start_address ;
2021-01-01 01:56:54 +01:00
memset ( reinterpret_cast < void * > ( mem_mapping [ i ] . effective_start_address ) , 0 , size ) ;
2021-10-20 23:52:09 +02:00
auto heap = MEMCreateExpHeapEx ( address , size , MEM_HEAP_FLAG_USE_LOCK ) ;
DEBUG_FUNCTION_LINE ( " Created heap @%08X, size %d KiB " , heap , size / 1024 ) ;
2020-05-30 21:48:50 +02:00
}
2022-02-11 20:32:39 +01:00
OSUnlockMutex ( & allocMutex ) ;
2020-05-30 21:48:50 +02:00
}
2020-06-03 18:51:24 +02:00
void MemoryMapping_DestroyHeaps ( ) {
2022-02-11 20:32:39 +01:00
OSLockMutex ( & allocMutex ) ;
2020-05-30 21:48:50 +02:00
for ( int32_t i = 0 ; /* waiting for a break */ ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-30 21:48:50 +02:00
break ;
}
void * address = ( void * ) ( mem_mapping [ i ] . effective_start_address ) ;
uint32_t size = mem_mapping [ i ] . effective_end_address - mem_mapping [ i ] . effective_start_address ;
2020-06-03 18:51:24 +02:00
2020-05-30 21:48:50 +02:00
MEMDestroyExpHeap ( ( MEMHeapHandle ) address ) ;
memset ( address , 0 , size ) ;
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Destroyed heap @%08X " , address ) ;
2020-05-30 21:48:50 +02:00
}
2022-02-11 20:32:39 +01:00
OSUnlockMutex ( & allocMutex ) ;
2020-05-30 21:48:50 +02:00
}
2020-05-29 19:25:05 +02:00
2020-06-03 18:51:24 +02:00
uint32_t MemoryMapping_getAreaSizeFromPageTable ( uint32_t start , uint32_t maxSize ) {
2020-05-29 19:25:05 +02:00
sr_table_t srTable ;
uint32_t pageTable [ 0x8000 ] ;
KernelReadSRs ( & srTable ) ;
2020-06-03 18:36:02 +02:00
KernelReadPTE ( ( uint32_t ) pageTable , sizeof ( pageTable ) ) ;
2020-05-29 19:25:05 +02:00
uint32_t sr_start = start > > 28 ;
2022-02-03 15:51:47 +01:00
uint32_t sr_end = ( start + maxSize ) > > 28 ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
if ( sr_end < sr_start ) {
2020-05-29 19:25:05 +02:00
return 0 ;
}
uint32_t cur_address = start ;
uint32_t end_address = start + maxSize ;
uint32_t memSize = 0 ;
2020-06-03 18:36:02 +02:00
for ( uint32_t segment = sr_start ; segment < = sr_end ; segment + + ) {
2020-05-29 19:25:05 +02:00
uint32_t sr = srTable . value [ segment ] ;
2020-06-03 18:36:02 +02:00
if ( sr > > 31 ) {
2020-05-29 19:25:05 +02:00
DEBUG_FUNCTION_LINE ( " Direct access not supported " ) ;
} else {
uint32_t vsid = sr & 0xFFFFFF ;
2022-02-03 15:51:47 +01:00
uint32_t pageSize = 1 < < PAGE_INDEX_SHIFT ;
2020-05-29 19:25:05 +02:00
uint32_t cur_end_addr = 0 ;
2020-06-03 18:36:02 +02:00
if ( segment = = sr_end ) {
2020-05-29 19:25:05 +02:00
cur_end_addr = end_address ;
} else {
cur_end_addr = ( segment + 1 ) * 0x10000000 ;
}
2020-06-03 18:36:02 +02:00
if ( segment ! = sr_start ) {
2022-02-02 18:34:27 +01:00
cur_address = ( segment ) * 0x10000000 ;
2020-05-29 19:25:05 +02:00
}
bool success = true ;
2020-06-03 18:36:02 +02:00
for ( uint32_t addr = cur_address ; addr < cur_end_addr ; addr + = pageSize ) {
2020-05-29 19:25:05 +02:00
uint32_t PTEH = 0 ;
uint32_t PTEL = 0 ;
2020-06-03 18:51:24 +02:00
if ( MemoryMapping_getPageEntryForAddress ( srTable . sdr1 , addr , vsid , pageTable , & PTEH , & PTEL , false ) ) {
2020-05-29 19:25:05 +02:00
memSize + = pageSize ;
} else {
success = false ;
break ;
}
}
2020-06-03 18:36:02 +02:00
if ( ! success ) {
2020-05-29 19:25:05 +02:00
break ;
}
}
}
return memSize ;
}
2020-06-03 18:51:24 +02:00
bool MemoryMapping_getPageEntryForAddress ( uint32_t SDR1 , uint32_t addr , uint32_t vsid , uint32_t * translation_table , uint32_t * oPTEH , uint32_t * oPTEL , bool checkSecondHash ) {
2022-02-03 15:51:47 +01:00
uint32_t pageMask = SDR1 & 0x1FF ;
uint32_t pageIndex = ( addr > > PAGE_INDEX_SHIFT ) & PAGE_INDEX_MASK ;
2021-09-24 16:51:11 +02:00
uint32_t primaryHash = ( vsid & 0x7FFFF ) ^ pageIndex ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:51:24 +02:00
if ( MemoryMapping_getPageEntryForAddressEx ( SDR1 , addr , vsid , primaryHash , translation_table , oPTEH , oPTEL , 0 ) ) {
2020-05-29 19:25:05 +02:00
return true ;
}
2020-06-03 18:36:02 +02:00
if ( checkSecondHash ) {
2020-06-03 18:51:24 +02:00
if ( MemoryMapping_getPageEntryForAddressEx ( pageMask , addr , vsid , ~ primaryHash , translation_table , oPTEH , oPTEL , 1 ) ) {
2020-05-29 19:25:05 +02:00
return true ;
}
}
return false ;
}
2020-06-03 18:51:24 +02:00
bool MemoryMapping_getPageEntryForAddressEx ( uint32_t pageMask , uint32_t addr , uint32_t vsid , uint32_t primaryHash , uint32_t * translation_table , uint32_t * oPTEH , uint32_t * oPTEL , uint32_t H ) {
2020-05-29 19:25:05 +02:00
uint32_t maskedHash = primaryHash & ( ( pageMask < < 10 ) | 0x3FF ) ;
2022-02-03 15:51:47 +01:00
uint32_t api = ( addr > > 22 ) & 0x3F ;
2020-05-29 19:25:05 +02:00
uint32_t pteAddrOffset = ( maskedHash < < 6 ) ;
for ( int32_t j = 0 ; j < 8 ; j + + , pteAddrOffset + = 8 ) {
uint32_t PTEH = 0 ;
uint32_t PTEL = 0 ;
uint32_t pteh_index = pteAddrOffset / 4 ;
uint32_t ptel_index = pteh_index + 1 ;
PTEH = translation_table [ pteh_index ] ;
PTEL = translation_table [ ptel_index ] ;
//Check validity
if ( ! ( PTEH > > 31 ) ) {
//printf("PTE is not valid ");
continue ;
}
//DEBUG_FUNCTION_LINE("in");
// the H bit indicated if the PTE was found using the second hash.
if ( ( ( PTEH > > 6 ) & 1 ) ! = H ) {
//DEBUG_FUNCTION_LINE("Secondary hash is used",((PTEH >> 6) & 1));
continue ;
}
// Check if the VSID matches, otherwise this is a PTE for another SR
// This is the place where collision could happen.
// Hopefully no collision happen and only the PTEs of the SR will match.
if ( ( ( PTEH > > 7 ) & 0xFFFFFF ) ! = vsid ) {
//DEBUG_FUNCTION_LINE("VSID mismatch");
continue ;
}
// Check the API (Abbreviated Page Index)
if ( ( PTEH & 0x3F ) ! = api ) {
//DEBUG_FUNCTION_LINE("API mismatch");
continue ;
}
* oPTEH = PTEH ;
* oPTEL = PTEL ;
return true ;
}
return false ;
}
2020-06-03 18:51:24 +02:00
void MemoryMapping_printPageTableTranslation ( sr_table_t srTable , uint32_t * translation_table ) {
2020-05-29 19:25:05 +02:00
uint32_t SDR1 = srTable . sdr1 ;
pageInformation current ;
2020-06-03 18:36:02 +02:00
memset ( & current , 0 , sizeof ( current ) ) ;
2020-05-29 19:25:05 +02:00
std : : vector < pageInformation > pageInfos ;
2020-06-03 18:36:02 +02:00
for ( uint32_t segment = 0 ; segment < 16 ; segment + + ) {
2020-05-29 19:25:05 +02:00
uint32_t sr = srTable . value [ segment ] ;
2020-06-03 18:36:02 +02:00
if ( sr > > 31 ) {
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " Direct access not supported " ) ;
2020-05-29 19:25:05 +02:00
} else {
2022-02-03 15:51:47 +01:00
uint32_t ks = ( sr > > 30 ) & 1 ;
uint32_t kp = ( sr > > 29 ) & 1 ;
uint32_t nx = ( sr > > 28 ) & 1 ;
2020-05-29 19:25:05 +02:00
uint32_t vsid = sr & 0xFFFFFF ;
2021-03-13 13:56:07 +01:00
DEBUG_FUNCTION_LINE_VERBOSE ( " ks %08X kp %08X nx %08X vsid %08X " , ks , kp , nx , vsid ) ;
2020-05-29 19:25:05 +02:00
uint32_t pageSize = 1 < < PAGE_INDEX_SHIFT ;
2020-06-03 18:36:02 +02:00
for ( uint32_t addr = segment * 0x10000000 ; addr < ( segment + 1 ) * 0x10000000 ; addr + = pageSize ) {
2020-05-29 19:25:05 +02:00
uint32_t PTEH = 0 ;
uint32_t PTEL = 0 ;
2020-06-03 18:51:24 +02:00
if ( MemoryMapping_getPageEntryForAddress ( SDR1 , addr , vsid , translation_table , & PTEH , & PTEL , false ) ) {
2022-02-03 15:51:47 +01:00
uint32_t pp = PTEL & 3 ;
2020-05-29 19:25:05 +02:00
uint32_t phys = PTEL & 0xFFFFF000 ;
//DEBUG_FUNCTION_LINE("current.phys == phys - current.size ( %08X %08X)",current.phys, phys - current.size);
2020-06-03 18:36:02 +02:00
if ( current . ks = = ks & &
current . kp = = kp & &
current . nx = = nx & &
current . pp = = pp & &
2022-02-02 18:34:27 +01:00
current . phys = = phys - current . size ) {
2020-05-29 19:25:05 +02:00
current . size + = pageSize ;
//DEBUG_FUNCTION_LINE("New size of %08X is %08X",current.addr,current.size);
} else {
2020-06-03 18:36:02 +02:00
if ( current . addr ! = 0 & & current . size ! = 0 ) {
2020-05-29 19:25:05 +02:00
/*DEBUG_FUNCTION_LINE("Saving old block from %08X",current.addr);
DEBUG_FUNCTION_LINE ( " ks %08X new %08X " , current . ks , ks ) ;
DEBUG_FUNCTION_LINE ( " kp %08X new %08X " , current . kp , kp ) ;
DEBUG_FUNCTION_LINE ( " nx %08X new %08X " , current . nx , nx ) ;
DEBUG_FUNCTION_LINE ( " pp %08X new %08X " , current . pp , pp ) ; */
pageInfos . push_back ( current ) ;
2020-06-03 18:36:02 +02:00
memset ( & current , 0 , sizeof ( current ) ) ;
2020-05-29 19:25:05 +02:00
}
//DEBUG_FUNCTION_LINE("Found new block at %08X",addr);
current . addr = addr ;
current . size = pageSize ;
2022-02-03 15:51:47 +01:00
current . kp = kp ;
current . ks = ks ;
current . nx = nx ;
current . pp = pp ;
2020-05-29 19:25:05 +02:00
current . phys = phys ;
}
} else {
2020-06-03 18:36:02 +02:00
if ( current . addr ! = 0 & & current . size ! = 0 ) {
2020-05-29 19:25:05 +02:00
pageInfos . push_back ( current ) ;
2020-06-03 18:36:02 +02:00
memset ( & current , 0 , sizeof ( current ) ) ;
2020-05-29 19:25:05 +02:00
}
}
}
}
}
const char * access1 [ ] = { " read/write " , " read/write " , " read/write " , " read only " } ;
const char * access2 [ ] = { " no access " , " read only " , " read/write " , " read only " } ;
2022-02-02 18:34:27 +01:00
for ( auto cur : pageInfos ) {
2021-09-24 16:51:11 +02:00
DEBUG_FUNCTION_LINE_VERBOSE ( " %08X %08X -> %08X %08X. user access %s. supervisor access %s. %s " , cur . addr , cur . addr + cur . size , cur . phys , cur . phys + cur . size ,
cur . kp ? access2 [ cur . pp ] : access1 [ cur . pp ] ,
cur . ks ? access2 [ cur . pp ] : access1 [ cur . pp ] , cur . nx ? " not executable " : " executable " ) ;
2020-05-29 19:25:05 +02:00
}
}
2020-06-03 18:51:24 +02:00
bool MemoryMapping_mapMemory ( uint32_t pa_start_address , uint32_t pa_end_address , uint32_t ea_start_address , sr_table_t SRTable , uint32_t * translation_table ) {
2020-05-29 19:25:05 +02:00
// Based on code from dimok. Thanks!
//uint32_t byteOffsetMask = (1 << PAGE_INDEX_SHIFT) - 1;
//uint32_t apiShift = 22 - PAGE_INDEX_SHIFT;
// Information on page 5.
// https://www.nxp.com/docs/en/application-note/AN2794.pdf
2022-02-03 15:51:47 +01:00
uint32_t HTABORG = SRTable . sdr1 > > 16 ;
2020-05-29 19:25:05 +02:00
uint32_t HTABMASK = SRTable . sdr1 & 0x1FF ;
// Iterate to all possible pages. Each page is 1<<(PAGE_INDEX_SHIFT) big.
2020-06-03 18:36:02 +02:00
uint32_t pageSize = 1 < < ( PAGE_INDEX_SHIFT ) ;
for ( uint32_t i = 0 ; i < pa_end_address - pa_start_address ; i + = pageSize ) {
2020-05-29 19:25:05 +02:00
// Calculate the current effective address.
uint32_t ea_addr = ea_start_address + i ;
// Calculate the segement.
uint32_t segment = SRTable . value [ ea_addr > > 28 ] ;
// Unique ID from the segment which is the input for the hash function.
// Change it to prevent collisions.
uint32_t VSID = segment & 0x00FFFFFF ;
2022-02-03 15:51:47 +01:00
uint32_t V = 1 ;
2020-05-29 19:25:05 +02:00
//Indicated if second hash is used.
uint32_t H = 0 ;
// Abbreviated Page Index
// Real page number
2022-02-03 15:51:47 +01:00
uint32_t RPN = ( pa_start_address + i ) > > 12 ;
uint32_t RC = 3 ;
2020-05-29 19:25:05 +02:00
uint32_t WIMG = 0x02 ;
2022-02-03 15:51:47 +01:00
uint32_t PP = 0x02 ;
2020-05-29 19:25:05 +02:00
uint32_t page_index = ( ea_addr > > PAGE_INDEX_SHIFT ) & PAGE_INDEX_MASK ;
2022-02-03 15:51:47 +01:00
uint32_t API = ( ea_addr > > 22 ) & 0x3F ;
2020-05-29 19:25:05 +02:00
uint32_t PTEH = ( V < < 31 ) | ( VSID < < 7 ) | ( H < < 6 ) | API ;
uint32_t PTEL = ( RPN < < 12 ) | ( RC < < 7 ) | ( WIMG < < 3 ) | PP ;
//unsigned long long virtual_address = ((unsigned long long)VSID << 28UL) | (page_index << PAGE_INDEX_SHIFT) | (ea_addr & 0xFFF);
uint32_t primary_hash = ( VSID & 0x7FFFF ) ;
2021-09-24 16:51:11 +02:00
uint32_t hashvalue1 = primary_hash ^ page_index ;
2020-05-29 19:25:05 +02:00
// hashvalue 2 is the complement of the first hash.
uint32_t hashvalue2 = ~ hashvalue1 ;
//uint32_t pageMask = SRTable.sdr1 & 0x1FF;
// calculate the address of the PTE groups.
// PTEs are saved in a group of 8 PTEs
// When PTEGaddr1 is full (all 8 PTEs set), PTEGaddr2 is used.
// Then H in PTEH needs to be set to 1.
2020-06-03 18:36:02 +02:00
uint32_t PTEGaddr1 = ( HTABORG < < 16 ) | ( ( ( hashvalue1 > > 10 ) & HTABMASK ) < < 16 ) | ( ( hashvalue1 & 0x3FF ) < < 6 ) ;
2020-05-29 19:25:05 +02:00
uint32_t PTEGaddr2 = ( HTABORG < < 16 ) | ( ( ( hashvalue2 > > 10 ) & HTABMASK ) < < 16 ) | ( ( hashvalue2 & 0x3FF ) < < 6 ) ;
//offset of the group inside the PTE Table.
uint32_t PTEGoffset = PTEGaddr1 - ( HTABORG < < 16 ) ;
bool setSuccessfully = false ;
2020-06-03 18:36:02 +02:00
PTEGoffset + = 7 * 8 ;
2020-05-29 19:25:05 +02:00
// Lets iterate through the PTE group where out PTE should be saved.
2020-06-03 18:36:02 +02:00
for ( int32_t j = 7 ; j > 0 ; PTEGoffset - = 8 ) {
int32_t index = ( PTEGoffset / 4 ) ;
2020-05-29 19:25:05 +02:00
uint32_t pteh = translation_table [ index ] ;
// Check if it's already taken. The first bit indicates if the PTE-slot inside
// this group is already taken.
2021-09-24 16:51:11 +02:00
if ( pteh = = 0 ) {
2020-05-29 19:25:05 +02:00
// If we found a free slot, set the PTEH and PTEL value.
2020-05-30 21:45:48 +02:00
//DEBUG_FUNCTION_LINE("Used slot %d. PTEGaddr1 %08X addr %08X",j+1,PTEGaddr1 - (HTABORG << 16),PTEGoffset);
2022-02-03 15:51:47 +01:00
translation_table [ index ] = PTEH ;
2020-06-03 18:36:02 +02:00
translation_table [ index + 1 ] = PTEL ;
2022-02-03 15:51:47 +01:00
setSuccessfully = true ;
2020-05-29 19:25:05 +02:00
break ;
} else {
//printf("PTEGoffset %08X was taken",PTEGoffset);
}
j - - ;
}
// Check if we already found a slot.
2020-06-03 18:36:02 +02:00
if ( ! setSuccessfully ) {
2020-05-30 21:45:48 +02:00
//DEBUG_FUNCTION_LINE("-------------- Using second slot -----------------------");
2020-05-29 19:25:05 +02:00
// We still have a chance to find a slot in the PTEGaddr2 using the complement of the hash.
// We need to set the H flag in PTEH and use PTEGaddr2.
// (Not well tested)
2022-02-03 15:51:47 +01:00
H = 1 ;
PTEH = ( V < < 31 ) | ( VSID < < 7 ) | ( H < < 6 ) | API ;
2020-05-29 19:25:05 +02:00
PTEGoffset = PTEGaddr2 - ( HTABORG < < 16 ) ;
2020-06-03 18:36:02 +02:00
PTEGoffset + = 7 * 8 ;
2020-05-29 19:25:05 +02:00
// Same as before.
2020-06-03 18:36:02 +02:00
for ( int32_t j = 7 ; j > 0 ; PTEGoffset - = 8 ) {
int32_t index = ( PTEGoffset / 4 ) ;
2020-05-29 19:25:05 +02:00
uint32_t pteh = translation_table [ index ] ;
//Check if it's already taken.
2021-09-24 16:51:11 +02:00
if ( pteh = = 0 ) {
2022-02-03 15:51:47 +01:00
translation_table [ index ] = PTEH ;
2020-06-03 18:36:02 +02:00
translation_table [ index + 1 ] = PTEL ;
2022-02-03 15:51:47 +01:00
setSuccessfully = true ;
2020-05-29 19:25:05 +02:00
break ;
} else {
//printf("PTEGoffset %08X was taken",PTEGoffset);
}
j - - ;
}
2020-06-03 18:36:02 +02:00
if ( ! setSuccessfully ) {
2020-05-29 19:25:05 +02:00
// Fail if we couldn't find a free slot.
DEBUG_FUNCTION_LINE ( " -------------- No more free PTE ----------------------- " ) ;
return false ;
}
}
}
return true ;
}
2020-06-03 18:51:24 +02:00
uint32_t MemoryMapping_PhysicalToEffective ( uint32_t phyiscalAddress ) {
2020-06-03 18:36:02 +02:00
if ( phyiscalAddress > = 0x30800000 & & phyiscalAddress < 0x31000000 ) {
2020-05-29 19:25:05 +02:00
return phyiscalAddress - ( 0x30800000 - 0x00800000 ) ;
}
2022-02-03 15:51:47 +01:00
uint32_t result = 0 ;
2021-09-24 16:51:11 +02:00
const memory_values_t * curMemValues = nullptr ;
2020-05-29 19:25:05 +02:00
//iterate through all own mapped memory regions
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; true ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-29 19:25:05 +02:00
break ;
}
2022-02-03 15:51:47 +01:00
curMemValues = mem_mapping [ i ] . physical_addresses ;
2020-05-29 19:25:05 +02:00
uint32_t curOffsetInEA = 0 ;
// iterate through all memory values of this region
2020-06-03 18:36:02 +02:00
for ( int32_t j = 0 ; true ; j + + ) {
if ( curMemValues [ j ] . end_address = = 0 ) {
2020-05-29 19:25:05 +02:00
break ;
}
2020-06-03 18:36:02 +02:00
if ( phyiscalAddress > = curMemValues [ j ] . start_address & & phyiscalAddress < curMemValues [ j ] . end_address ) {
2020-05-29 19:25:05 +02:00
// calculate the EA
2020-06-03 18:36:02 +02:00
result = ( phyiscalAddress - curMemValues [ j ] . start_address ) + ( mem_mapping [ i ] . effective_start_address + curOffsetInEA ) ;
2020-05-29 19:25:05 +02:00
return result ;
}
curOffsetInEA + = curMemValues [ j ] . end_address - curMemValues [ j ] . start_address ;
}
}
return result ;
}
2020-06-03 18:51:24 +02:00
uint32_t MemoryMapping_EffectiveToPhysical ( uint32_t effectiveAddress ) {
2020-06-03 18:36:02 +02:00
if ( effectiveAddress > = 0x00800000 & & effectiveAddress < 0x01000000 ) {
2020-05-29 19:25:05 +02:00
return effectiveAddress + ( 0x30800000 - 0x00800000 ) ;
}
uint32_t result = 0 ;
// CAUTION: The data may be fragmented between multiple areas in PA.
2021-09-24 16:51:11 +02:00
const memory_values_t * curMemValues = nullptr ;
2022-02-03 15:51:47 +01:00
uint32_t curOffset = 0 ;
2020-05-29 19:25:05 +02:00
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; true ; i + + ) {
2021-09-24 16:51:11 +02:00
if ( mem_mapping [ i ] . physical_addresses = = nullptr ) {
2020-05-29 19:25:05 +02:00
break ;
}
2020-06-03 18:36:02 +02:00
if ( effectiveAddress > = mem_mapping [ i ] . effective_start_address & & effectiveAddress < mem_mapping [ i ] . effective_end_address ) {
2020-05-29 19:25:05 +02:00
curMemValues = mem_mapping [ i ] . physical_addresses ;
2022-02-03 15:51:47 +01:00
curOffset = mem_mapping [ i ] . effective_start_address ;
2020-05-29 19:25:05 +02:00
break ;
}
}
2021-09-24 16:51:11 +02:00
if ( curMemValues = = nullptr ) {
2020-05-29 19:25:05 +02:00
return result ;
}
2020-06-03 18:36:02 +02:00
for ( int32_t i = 0 ; true ; i + + ) {
if ( curMemValues [ i ] . end_address = = 0 ) {
2020-05-29 19:25:05 +02:00
break ;
}
uint32_t curChunkSize = curMemValues [ i ] . end_address - curMemValues [ i ] . start_address ;
2020-06-03 18:36:02 +02:00
if ( effectiveAddress < ( curOffset + curChunkSize ) ) {
2020-05-29 19:25:05 +02:00
result = ( effectiveAddress - curOffset ) + curMemValues [ i ] . start_address ;
break ;
}
curOffset + = curChunkSize ;
}
return result ;
}