diff --git a/Source/Core/Core/PowerPC/Jit64Common/Jit64AsmCommon.cpp b/Source/Core/Core/PowerPC/Jit64Common/Jit64AsmCommon.cpp index 81b4b27204..15cda2b043 100644 --- a/Source/Core/Core/PowerPC/Jit64Common/Jit64AsmCommon.cpp +++ b/Source/Core/Core/PowerPC/Jit64Common/Jit64AsmCommon.cpp @@ -489,6 +489,7 @@ void QuantizedMemoryRoutines::GenQuantizedLoad(bool single, EQuantizeType type, int size = sizes[type] * (single ? 1 : 2); bool isInline = quantize != -1; + bool safe_access = m_jit.jo.memcheck || !m_jit.jo.fastmem; // illegal if (type == QUANTIZE_INVALID1 || type == QUANTIZE_INVALID2 || type == QUANTIZE_INVALID3) @@ -506,7 +507,7 @@ void QuantizedMemoryRoutines::GenQuantizedLoad(bool single, EQuantizeType type, bool extend = single && (type == QUANTIZE_S8 || type == QUANTIZE_S16); - if (m_jit.jo.memcheck) + if (safe_access) { BitSet32 regsToSave = QUANTIZED_REGS_TO_SAVE_LOAD; int flags = isInline ? 0 : @@ -632,8 +633,9 @@ void QuantizedMemoryRoutines::GenQuantizedLoadFloat(bool single, bool isInline) { int size = single ? 32 : 64; bool extend = false; + bool safe_access = m_jit.jo.memcheck || !m_jit.jo.fastmem; - if (m_jit.jo.memcheck) + if (safe_access) { BitSet32 regsToSave = QUANTIZED_REGS_TO_SAVE; int flags = isInline ? 0 : @@ -644,7 +646,7 @@ void QuantizedMemoryRoutines::GenQuantizedLoadFloat(bool single, bool isInline) if (single) { - if (m_jit.jo.memcheck) + if (safe_access) { MOVD_xmm(XMM0, R(RSCRATCH_EXTRA)); } @@ -669,7 +671,7 @@ void QuantizedMemoryRoutines::GenQuantizedLoadFloat(bool single, bool isInline) // for a good reason, or merely because no game does this. // If we find something that actually does do this, maybe this should be changed. How // much of a performance hit would it be? - if (m_jit.jo.memcheck) + if (safe_access) { ROL(64, R(RSCRATCH_EXTRA), Imm8(32)); MOVQ_xmm(XMM0, R(RSCRATCH_EXTRA));