diff --git a/Source/Core/Core/Src/PowerPC/Jit64/Jit_LoadStorePaired.cpp b/Source/Core/Core/Src/PowerPC/Jit64/Jit_LoadStorePaired.cpp index f86ec46f90..d6cb415d03 100644 --- a/Source/Core/Core/Src/PowerPC/Jit64/Jit_LoadStorePaired.cpp +++ b/Source/Core/Core/Src/PowerPC/Jit64/Jit_LoadStorePaired.cpp @@ -115,11 +115,39 @@ void Jit64::psq_st(UGeckoInstruction inst) // One value XORPS(XMM0, R(XMM0)); // TODO: See if we can get rid of this cheaply by tweaking the code in the singleStore* functions. CVTSD2SS(XMM0, fpr.R(s)); +#ifdef _M_X64 +#if _WIN32 + SUB(64, R(RSP), Imm8(0x28)); +#else + //SUB(64, R(RSP), Imm8(0x8)); +#endif +#endif CALLptr(MDisp(EDX, (u32)(u64)asm_routines.singleStoreQuantized)); +#ifdef _M_X64 +#if _WIN32 + ADD(64, R(RSP), Imm8(0x28)); +#else + //ADD(64, R(RSP), Imm8(0x8)); +#endif +#endif } else { // Pair of values CVTPD2PS(XMM0, fpr.R(s)); +#ifdef _M_X64 +#if _WIN32 + SUB(64, R(RSP), Imm8(0x28)); +#else + //SUB(64, R(RSP), Imm8(0x8)); +#endif +#endif CALLptr(MDisp(EDX, (u32)(u64)asm_routines.pairedStoreQuantized)); +#ifdef _M_X64 +#if _WIN32 + ADD(64, R(RSP), Imm8(0x28)); +#else + //ADD(64, R(RSP), Imm8(0x8)); +#endif +#endif } gpr.UnlockAll(); gpr.UnlockAllX(); @@ -164,8 +192,22 @@ void Jit64::psq_l(UGeckoInstruction inst) SHL(32, R(EDX), Imm8(2)); #else SHL(32, R(EDX), Imm8(3)); +#endif +#ifdef _M_X64 +#if _WIN32 + SUB(64, R(RSP), Imm8(0x28)); +#else + //SUB(64, R(RSP), Imm8(0x8)); +#endif #endif CALLptr(MDisp(EDX, (u32)(u64)asm_routines.pairedLoadQuantized)); +#ifdef _M_X64 +#if _WIN32 + ADD(64, R(RSP), Imm8(0x28)); +#else + //ADD(64, R(RSP), Imm8(0x8)); +#endif +#endif CVTPS2PD(fpr.RX(inst.RS), R(XMM0)); gpr.UnlockAll(); gpr.UnlockAllX();