From 768273f59b38fb1150b12eb61a467705d2d50d80 Mon Sep 17 00:00:00 2001 From: Fiora Date: Mon, 3 Nov 2014 17:50:13 -0800 Subject: [PATCH] JIT: revert cmpXX optimization It seems like this wasn't correct in 100% of cases. --- Source/Core/Core/PowerPC/Jit64/Jit_Integer.cpp | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/Source/Core/Core/PowerPC/Jit64/Jit_Integer.cpp b/Source/Core/Core/PowerPC/Jit64/Jit_Integer.cpp index f28f668bee..68ae3e3ce5 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit_Integer.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit_Integer.cpp @@ -556,16 +556,7 @@ void Jit64::cmpXX(UGeckoInstruction inst) MOV(64, PPCSTATE(cr_val[crf]), R(input)); // Place the comparison next to the branch for macro-op fusion if (merge_branch) - { - // We only need to do a 32-bit compare, since the flags set will be the same as a sign-extended - // result. - // We should also test against gpr.R(a) if it's bound, since that's one less cycle of latency - // (the CPU doesn't have to wait for the movsxd to finish to resolve the branch). - if (gpr.R(a).IsSimpleReg()) - TEST(32, gpr.R(a), gpr.R(a)); - else - TEST(32, R(input), R(input)); - } + TEST(64, R(input), R(input)); } else {