diff --git a/Source/Core/Core/PowerPC/Jit64/JitAsm.cpp b/Source/Core/Core/PowerPC/Jit64/JitAsm.cpp index ffff485d95..81c02aadd0 100644 --- a/Source/Core/Core/PowerPC/Jit64/JitAsm.cpp +++ b/Source/Core/Core/PowerPC/Jit64/JitAsm.cpp @@ -99,8 +99,8 @@ void Jit64AsmRoutineManager::Generate() // Fast block number lookup. // ((PC >> 2) & mask) * sizeof(JitBlock*) = (PC & (mask << 2)) * 2 MOV(32, R(RSCRATCH), PPCSTATE(pc)); - u64 icache = reinterpret_cast(g_jit->GetBlockCache()->GetICache()); - AND(32, R(RSCRATCH), Imm32(JitBaseBlockCache::iCache_Mask << 2)); + u64 icache = reinterpret_cast(g_jit->GetBlockCache()->GetFastBlockMap()); + AND(32, R(RSCRATCH), Imm32(JitBaseBlockCache::FAST_BLOCK_MAP_MASK << 2)); if (icache <= INT_MAX) { MOV(64, R(RSCRATCH), MScaled(RSCRATCH, SCALE_2, static_cast(icache))); diff --git a/Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp b/Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp index 7f323ebe6d..3d2b236739 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp @@ -74,9 +74,9 @@ void JitArm64::GenerateAsm() ARM64Reg pc_masked = W25; ARM64Reg cache_base = X27; ARM64Reg block = X30; - ORRI2R(pc_masked, WZR, JitBaseBlockCache::iCache_Mask << 3); + ORRI2R(pc_masked, WZR, JitBaseBlockCache::FAST_BLOCK_MAP_MASK << 3); AND(pc_masked, pc_masked, DISPATCHER_PC, ArithOption(DISPATCHER_PC, ST_LSL, 1)); - MOVP2R(cache_base, g_jit->GetBlockCache()->GetICache()); + MOVP2R(cache_base, g_jit->GetBlockCache()->GetFastBlockMap()); LDR(block, cache_base, EncodeRegTo64(pc_masked)); FixupBranch not_found = CBZ(block); diff --git a/Source/Core/Core/PowerPC/JitCommon/JitCache.cpp b/Source/Core/Core/PowerPC/JitCommon/JitCache.cpp index 134b3ebca4..f5853ee071 100644 --- a/Source/Core/Core/PowerPC/JitCommon/JitCache.cpp +++ b/Source/Core/Core/PowerPC/JitCommon/JitCache.cpp @@ -74,7 +74,7 @@ void JitBaseBlockCache::Clear() valid_block.ClearAll(); - iCache.fill(nullptr); + fast_block_map.fill(nullptr); } void JitBaseBlockCache::Reset() @@ -88,9 +88,9 @@ void JitBaseBlockCache::SchedulateClearCacheThreadSafe() CoreTiming::ScheduleEvent(0, s_clear_jit_cache_thread_safe, 0, CoreTiming::FromThread::NON_CPU); } -JitBlock** JitBaseBlockCache::GetICache() +JitBlock** JitBaseBlockCache::GetFastBlockMap() { - return iCache.data(); + return fast_block_map.data(); } void JitBaseBlockCache::RunOnBlocks(std::function f) @@ -107,7 +107,7 @@ JitBlock* JitBaseBlockCache::AllocateBlock(u32 em_address) b.physicalAddress = physicalAddress; b.msrBits = MSR & JIT_CACHE_MSR_MASK; b.linkData.clear(); - b.in_icache = 0; + b.fast_block_map_index = 0; return &b; } @@ -125,9 +125,9 @@ void JitBaseBlockCache::FreeBlock(JitBlock* block) void JitBaseBlockCache::FinalizeBlock(JitBlock& block, bool block_link, const u8* code_ptr) { - size_t icache = FastLookupEntryForAddress(block.effectiveAddress); - iCache[icache] = █ - block.in_icache = icache; + size_t index = FastLookupIndexForAddress(block.effectiveAddress); + fast_block_map[index] = █ + block.fast_block_map_index = index; u32 pAddr = block.physicalAddress; @@ -175,12 +175,12 @@ JitBlock* JitBaseBlockCache::GetBlockFromStartAddress(u32 addr, u32 msr) const u8* JitBaseBlockCache::Dispatch() { - JitBlock* block = iCache[FastLookupEntryForAddress(PC)]; + JitBlock* block = fast_block_map[FastLookupIndexForAddress(PC)]; while (!block || block->effectiveAddress != PC || block->msrBits != (MSR & JIT_CACHE_MSR_MASK)) { MoveBlockIntoFastCache(PC, MSR & JIT_CACHE_MSR_MASK); - block = iCache[FastLookupEntryForAddress(PC)]; + block = fast_block_map[FastLookupIndexForAddress(PC)]; } return block->normalEntry; @@ -299,8 +299,8 @@ void JitBaseBlockCache::UnlinkBlock(const JitBlock& block) void JitBaseBlockCache::DestroyBlock(JitBlock& block) { - if (iCache[block.in_icache] == &block) - iCache[block.in_icache] = nullptr; + if (fast_block_map[block.fast_block_map_index] == &block) + fast_block_map[block.fast_block_map_index] = nullptr; UnlinkBlock(block); @@ -330,19 +330,19 @@ void JitBaseBlockCache::MoveBlockIntoFastCache(u32 addr, u32 msr) } else { - // Drop old icache entry - if (iCache[block->in_icache] == block) - iCache[block->in_icache] = nullptr; + // Drop old fast block map entry + if (fast_block_map[block->fast_block_map_index] == block) + fast_block_map[block->fast_block_map_index] = nullptr; // And create a new one - size_t icache = FastLookupEntryForAddress(addr); - iCache[icache] = block; - block->in_icache = icache; + size_t index = FastLookupIndexForAddress(addr); + fast_block_map[index] = block; + block->fast_block_map_index = index; LinkBlock(*block); } } -size_t JitBaseBlockCache::FastLookupEntryForAddress(u32 address) +size_t JitBaseBlockCache::FastLookupIndexForAddress(u32 address) { - return (address >> 2) & iCache_Mask; + return (address >> 2) & FAST_BLOCK_MAP_MASK; } diff --git a/Source/Core/Core/PowerPC/JitCommon/JitCache.h b/Source/Core/Core/PowerPC/JitCommon/JitCache.h index 0f1c36efef..c5aaa8d1e8 100644 --- a/Source/Core/Core/PowerPC/JitCommon/JitCache.h +++ b/Source/Core/Core/PowerPC/JitCommon/JitCache.h @@ -63,9 +63,9 @@ struct JitBlock u64 ticStop; // for profiling - time. u64 ticCounter; // for profiling - time. - // This tracks the position if this block within the icache. - // We allow each block to have one icache entry. - size_t in_icache; + // This tracks the position if this block within the fast block cache. + // We allow each block to have only one map entry. + size_t fast_block_map_index; }; typedef void (*CompiledCode)(); @@ -107,8 +107,8 @@ public: // is valid (MSR.IR and MSR.DR, the address translation bits). static constexpr u32 JIT_CACHE_MSR_MASK = 0x30; - static constexpr u32 iCache_Num_Elements = 0x10000; - static constexpr u32 iCache_Mask = iCache_Num_Elements - 1; + static constexpr u32 FAST_BLOCK_MAP_ELEMENTS = 0x10000; + static constexpr u32 FAST_BLOCK_MAP_MASK = FAST_BLOCK_MAP_ELEMENTS - 1; explicit JitBaseBlockCache(JitBase& jit); virtual ~JitBaseBlockCache(); @@ -120,7 +120,7 @@ public: void SchedulateClearCacheThreadSafe(); // Code Cache - JitBlock** GetICache(); + JitBlock** GetFastBlockMap(); void RunOnBlocks(std::function f); JitBlock* AllocateBlock(u32 em_address); @@ -128,7 +128,7 @@ public: void FinalizeBlock(JitBlock& block, bool block_link, const u8* code_ptr); // Look for the block in the slow but accurate way. - // This function shall be used if FastLookupEntryForAddress() failed. + // This function shall be used if FastLookupIndexForAddress() failed. // This might return nullptr if there is no such block. JitBlock* GetBlockFromStartAddress(u32 em_address, u32 msr); @@ -156,8 +156,8 @@ private: void MoveBlockIntoFastCache(u32 em_address, u32 msr); - // Fast but risky block lookup based on iCache. - size_t FastLookupEntryForAddress(u32 address); + // Fast but risky block lookup based on fast_block_map. + size_t FastLookupIndexForAddress(u32 address); // links_to hold all exit points of all valid blocks in a reverse way. // It is used to query all blocks which links to an address. @@ -178,5 +178,5 @@ private: // This array is indexed with the masked PC and likely holds the correct block id. // This is used as a fast cache of start_block_map used in the assembly dispatcher. - std::array iCache; // start_addr & mask -> number + std::array fast_block_map; // start_addr & mask -> number };