diff --git a/Source/Core/Core/PowerPC/Jit64/Jit.cpp b/Source/Core/Core/PowerPC/Jit64/Jit.cpp index 021ab386c4..1a2729fac4 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit.cpp @@ -190,7 +190,7 @@ void Jit64::Init() // BLR optimization has the same consequences as block linking, as well as // depending on the fault handler to be safe in the event of excessive BL. - m_enable_blr_optimization = jo.enableBlocklink && SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem; + m_enable_blr_optimization = jo.enableBlocklink && SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem && !SConfig::GetInstance().m_LocalCoreStartupParameter.bEnableDebugging; m_clear_cache_asap = false; m_stack = nullptr; @@ -207,9 +207,7 @@ void Jit64::Init() code_block.m_stats = &js.st; code_block.m_gpa = &js.gpa; code_block.m_fpa = &js.fpa; - analyzer.SetOption(PPCAnalyst::PPCAnalyzer::OPTION_CONDITIONAL_CONTINUE); - analyzer.SetOption(PPCAnalyst::PPCAnalyzer::OPTION_BRANCH_MERGE); - analyzer.SetOption(PPCAnalyst::PPCAnalyzer::OPTION_CARRY_MERGE); + EnableOptimization(); } void Jit64::ClearCache() @@ -518,6 +516,7 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer *code_buf, JitBloc { // We can link blocks as long as we are not single stepping and there are no breakpoints here EnableBlockLink(); + EnableOptimization(); // Comment out the following to disable breakpoints (speed-up) if (!Profiler::g_ProfileBlocks) @@ -528,6 +527,9 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer *code_buf, JitBloc // Do not link this block to other blocks While single stepping jo.enableBlocklink = false; + analyzer.ClearOption(PPCAnalyst::PPCAnalyzer::OPTION_CONDITIONAL_CONTINUE); + analyzer.ClearOption(PPCAnalyst::PPCAnalyzer::OPTION_BRANCH_MERGE); + analyzer.ClearOption(PPCAnalyst::PPCAnalyzer::OPTION_CARRY_MERGE); } Trace(); } @@ -873,3 +875,10 @@ void Jit64::EnableBlockLink() jo.enableBlocklink = false; } } + +void Jit64::EnableOptimization() +{ + analyzer.SetOption(PPCAnalyst::PPCAnalyzer::OPTION_CONDITIONAL_CONTINUE); + analyzer.SetOption(PPCAnalyst::PPCAnalyzer::OPTION_BRANCH_MERGE); + analyzer.SetOption(PPCAnalyst::PPCAnalyzer::OPTION_CARRY_MERGE); +} diff --git a/Source/Core/Core/PowerPC/Jit64/Jit.h b/Source/Core/Core/PowerPC/Jit64/Jit.h index 0cd48c5f57..7fe8b75506 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit.h +++ b/Source/Core/Core/PowerPC/Jit64/Jit.h @@ -65,6 +65,8 @@ public: void Init() override; + void EnableOptimization(); + void EnableBlockLink(); void Shutdown() override; diff --git a/Source/Core/Core/PowerPC/Jit64/Jit_LoadStore.cpp b/Source/Core/Core/PowerPC/Jit64/Jit_LoadStore.cpp index a2abade26b..bb52cb66e5 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit_LoadStore.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit_LoadStore.cpp @@ -98,9 +98,12 @@ void Jit64::lXXx(UGeckoInstruction inst) if (accessSize == 8 && js.next_inst.OPCD == 31 && js.next_inst.SUBOP10 == 954 && js.next_inst.RS == inst.RD && js.next_inst.RA == inst.RD && !js.next_inst.Rc) { - js.downcountAmount++; - js.skipnext = true; - signExtend = true; + if (PowerPC::GetState() != PowerPC::CPU_STEPPING) + { + js.downcountAmount++; + js.skipnext = true; + signExtend = true; + } } // TODO(ector): Make it dynamically enable/disable idle skipping where appropriate @@ -109,6 +112,7 @@ void Jit64::lXXx(UGeckoInstruction inst) // IMHO those Idles should always be skipped and replaced by a more controllable "native" Idle methode // ... maybe the throttle one already do that :p if (SConfig::GetInstance().m_LocalCoreStartupParameter.bSkipIdle && + PowerPC::GetState() != PowerPC::CPU_STEPPING && inst.OPCD == 32 && (inst.hex & 0xFFFF0000) == 0x800D0000 && (Memory::ReadUnchecked_U32(js.compilerPC + 4) == 0x28000000 || diff --git a/Source/Core/Core/PowerPC/Jit64/Jit_SystemRegisters.cpp b/Source/Core/Core/PowerPC/Jit64/Jit_SystemRegisters.cpp index 2aedc09c8b..73f4630e83 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit_SystemRegisters.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit_SystemRegisters.cpp @@ -228,21 +228,24 @@ void Jit64::mfspr(UGeckoInstruction inst) // Be careful; the actual opcode is for mftb (371), not mfspr (339) if (js.next_inst.OPCD == 31 && js.next_inst.SUBOP10 == 371 && (nextIndex == SPR_TU || nextIndex == SPR_TL)) { - int n = js.next_inst.RD; - js.downcountAmount++; - js.skipnext = true; - gpr.Lock(d, n); - gpr.BindToRegister(d, false); - gpr.BindToRegister(n, false); - if (iIndex == SPR_TL) - MOV(32, gpr.R(d), R(RAX)); - if (nextIndex == SPR_TL) - MOV(32, gpr.R(n), R(RAX)); - SHR(64, R(RAX), Imm8(32)); - if (iIndex == SPR_TU) - MOV(32, gpr.R(d), R(RAX)); - if (nextIndex == SPR_TU) - MOV(32, gpr.R(n), R(RAX)); + if (PowerPC::GetState() != PowerPC::CPU_STEPPING) + { + int n = js.next_inst.RD; + js.downcountAmount++; + js.skipnext = true; + gpr.Lock(d, n); + gpr.BindToRegister(d, false); + gpr.BindToRegister(n, false); + if (iIndex == SPR_TL) + MOV(32, gpr.R(d), R(RAX)); + if (nextIndex == SPR_TL) + MOV(32, gpr.R(n), R(RAX)); + SHR(64, R(RAX), Imm8(32)); + if (iIndex == SPR_TU) + MOV(32, gpr.R(d), R(RAX)); + if (nextIndex == SPR_TU) + MOV(32, gpr.R(n), R(RAX)); + } } else { diff --git a/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStore.cpp b/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStore.cpp index 0e20e79f8c..4dbc9aeda5 100644 --- a/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStore.cpp +++ b/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStore.cpp @@ -37,6 +37,7 @@ void JitILBase::lXz(UGeckoInstruction inst) // Idle Skipping. This really should be done somewhere else. // Either lower in the IR or higher in PPCAnalyist if (SConfig::GetInstance().m_LocalCoreStartupParameter.bSkipIdle && + PowerPC::GetState() != PowerPC::CPU_STEPPING && inst.OPCD == 32 && // Lwx (inst.hex & 0xFFFF0000) == 0x800D0000 && (Memory::ReadUnchecked_U32(js.compilerPC + 4) == 0x28000000 ||