From d00430470b73888896ee30caed5c1a1a72867c30 Mon Sep 17 00:00:00 2001 From: JosJuice Date: Sun, 24 Jan 2021 14:56:38 +0100 Subject: [PATCH] JitArm64: Update registers last used before start of instruction Let's reset m_last_used for each register that will be used in an instruction before we start allocating any of them, so that one of the earlier allocations doesn't spill a register that we want in a later allocation. (We must still also increment/reset m_last_used in R and RW, otherwise we end up in trouble when emulating lmw/stmw since those access more guest registers than there are available host registers.) This should ensure that the asserts added earlier in this pull request are never triggered. --- Source/Core/Core/PowerPC/JitArm64/Jit.cpp | 9 +++++++++ .../Core/PowerPC/JitArm64/JitArm64_RegCache.cpp | 14 +++++++++++++- .../Core/Core/PowerPC/JitArm64/JitArm64_RegCache.h | 2 ++ 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/Source/Core/Core/PowerPC/JitArm64/Jit.cpp b/Source/Core/Core/PowerPC/JitArm64/Jit.cpp index 4a22d1c964..8555f2b5f2 100644 --- a/Source/Core/Core/PowerPC/JitArm64/Jit.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/Jit.cpp @@ -694,6 +694,15 @@ void JitArm64::DoJit(u32 em_address, JitBlock* b, u32 nextPC) if (!SConfig::GetInstance().bEnableDebugging) js.downcountAmount += PatchEngine::GetSpeedhackCycles(js.compilerPC); + // Skip calling UpdateLastUsed for lmw/stmw - it usually hurts more than it helps + if (op.inst.OPCD != 46 && op.inst.OPCD != 47) + gpr.UpdateLastUsed(op.regsIn | op.regsOut); + + BitSet32 fpr_used = op.fregsIn; + if (op.fregOut >= 0) + fpr_used[op.fregOut] = true; + fpr.UpdateLastUsed(fpr_used); + // Gather pipe writes using a non-immediate address are discovered by profiling. bool gatherPipeIntCheck = js.fifoWriteAddresses.find(op.address) != js.fifoWriteAddresses.end(); diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.cpp index 4a3f255c71..d0d861d992 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.cpp @@ -41,10 +41,22 @@ ARM64Reg Arm64RegCache::GetReg() // Holy cow, how did you run out of registers? // We can't return anything reasonable in this case. Return INVALID_REG and watch the failure // happen - WARN_LOG_FMT(DYNA_REC, "All available registers are locked dumb dumb"); + ASSERT_MSG(DYNA_REC, 0, "All available registers are locked!"); return INVALID_REG; } +void Arm64RegCache::UpdateLastUsed(BitSet32 regs_used) +{ + for (size_t i = 0; i < m_guest_registers.size(); ++i) + { + OpArg& reg = m_guest_registers[i]; + if (i < 32 && regs_used[i]) + reg.ResetLastUsed(); + else + reg.IncrementLastUsed(); + } +} + u32 Arm64RegCache::GetUnlockedRegisterCount() const { u32 unlocked_registers = 0; diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.h b/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.h index 4e37df9528..55c86bd4ab 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.h +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.h @@ -140,6 +140,8 @@ public: // Requires unlocking after done Arm64Gen::ARM64Reg GetReg(); + void UpdateLastUsed(BitSet32 regs_used); + // Locks a register so a cache cannot use it // Useful for function calls template