diff --git a/Source/Core/Core/Src/CoreParameter.cpp b/Source/Core/Core/Src/CoreParameter.cpp index 56d89b5f3d..6af323e689 100644 --- a/Source/Core/Core/Src/CoreParameter.cpp +++ b/Source/Core/Core/Src/CoreParameter.cpp @@ -91,7 +91,6 @@ void SCoreStartupParameter::LoadDefaults() // These work fine in 32bit OSX // Since the reason why 64bit OSX fails out is due to casting (u32)(u64) // Since all 64bit applications are above the 32bit memory boundary - bJITLoadStoreOff = true; bJITLoadStorePairedOff = true; #endif //#elif defined(__linux__) diff --git a/Source/Core/Core/Src/PowerPC/Jit64/Jit_LoadStore.cpp b/Source/Core/Core/Src/PowerPC/Jit64/Jit_LoadStore.cpp index e27fbdeefd..05418a6615 100644 --- a/Source/Core/Core/Src/PowerPC/Jit64/Jit_LoadStore.cpp +++ b/Source/Core/Core/Src/PowerPC/Jit64/Jit_LoadStore.cpp @@ -317,7 +317,9 @@ void Jit64::stX(UGeckoInstruction inst) case 38: accessSize = 8; break; //stb default: _assert_msg_(DYNA_REC, 0, "AWETKLJASDLKF"); return; } - + +#ifndef __APPLE__ && _M_X64 +//(Sonicadvance1) This code path fails in OSX, not 100% sure why if (gpr.R(a).IsImm()) { // If we already know the address through constant folding, we can do some @@ -353,6 +355,7 @@ void Jit64::stX(UGeckoInstruction inst) } // Other IO not worth the trouble. } +#endif // Optimized stack access? if (accessSize == 32 && !gpr.R(a).IsImm() && a == 1 && js.st.isFirstBlockOfFunction && jo.optimizeStack) diff --git a/Source/Core/Core/Src/PowerPC/JitCommon/JitAsmCommon.cpp b/Source/Core/Core/Src/PowerPC/JitCommon/JitAsmCommon.cpp index 3d34917bdd..7e38a5431c 100644 --- a/Source/Core/Core/Src/PowerPC/JitCommon/JitAsmCommon.cpp +++ b/Source/Core/Core/Src/PowerPC/JitCommon/JitAsmCommon.cpp @@ -37,6 +37,60 @@ using namespace Gen; static int temp32; +#ifdef __APPLE__ && _M_X64 +void CommonAsmRoutines::GenFifoWrite(int size) +{ + // Assume value in ABI_PARAM1 + PUSH(RSI); + if (size != 32) + PUSH(EDX); + BSWAP(size, ABI_PARAM1); + MOV(32, R(EAX), Imm32((u64)GPFifo::m_gatherPipe)); + MOV(64, R(RSI), M(&GPFifo::m_gatherPipeCount)); + if (size != 32) { + MOV(32, R(EDX), R(ABI_PARAM1)); + MOV(size, MComplex(RAX, RSI, 1, 0), R(EDX)); + } else { + MOV(size, MComplex(RAX, RSI, 1, 0), R(ABI_PARAM1)); + } + ADD(64, R(RSI), Imm8(size >> 3)); + MOV(64, M(&GPFifo::m_gatherPipeCount), R(RSI)); + if (size != 32) + POP(EDX); + POP(RSI); + RET(); +} +void CommonAsmRoutines::GenFifoFloatWrite() +{ + // Assume value in XMM0 + PUSH(RSI); + PUSH(EDX); + MOVSS(M(&temp32), XMM0); + MOV(32, R(EDX), M(&temp32)); + BSWAP(32, EDX); + MOV(64, R(RAX), Imm64((u64)GPFifo::m_gatherPipe)); + MOV(64, R(RSI), M(&GPFifo::m_gatherPipeCount)); + MOV(32, MComplex(RAX, RSI, 1, 0), R(EDX)); + ADD(64, R(RSI), Imm8(4)); + MOV(64, M(&GPFifo::m_gatherPipeCount), R(RSI)); + POP(EDX); + POP(RSI); + RET(); +} + +void CommonAsmRoutines::GenFifoXmm64Write() +{ + // Assume value in XMM0. Assume pre-byteswapped (unlike the others here!) + PUSH(RSI); + MOV(64, R(RAX), Imm32((u64)GPFifo::m_gatherPipe)); + MOV(64, R(RSI), M(&GPFifo::m_gatherPipeCount)); + MOVQ_xmm(MComplex(RAX, RSI, 1, 0), XMM0); + ADD(64, R(RSI), Imm8(8)); + MOV(64, M(&GPFifo::m_gatherPipeCount), R(RSI)); + POP(RSI); + RET(); +} +#else void CommonAsmRoutines::GenFifoWrite(int size) { // Assume value in ABI_PARAM1 @@ -59,7 +113,6 @@ void CommonAsmRoutines::GenFifoWrite(int size) POP(ESI); RET(); } - void CommonAsmRoutines::GenFifoFloatWrite() { // Assume value in XMM0 @@ -77,7 +130,6 @@ void CommonAsmRoutines::GenFifoFloatWrite() POP(ESI); RET(); } - void CommonAsmRoutines::GenFifoXmm64Write() { // Assume value in XMM0. Assume pre-byteswapped (unlike the others here!) @@ -86,10 +138,11 @@ void CommonAsmRoutines::GenFifoXmm64Write() MOV(32, R(ESI), M(&GPFifo::m_gatherPipeCount)); MOVQ_xmm(MComplex(RAX, RSI, 1, 0), XMM0); ADD(32, R(ESI), Imm8(8)); - MOV(32, M(&GPFifo::m_gatherPipeCount), R(ESI)); - POP(ESI); - RET(); + MOV(32, M(&GPFifo::m_gatherPipeCount), R(ESI)); + POP(ESI); + RET(); } +#endif // Safe + Fast Quantizers, originally from JITIL by magumagu