From 48a7afa448a268bebf0dc967a8bf68eeedf296c8 Mon Sep 17 00:00:00 2001 From: JosJuice Date: Wed, 25 Dec 2024 15:46:20 +0100 Subject: [PATCH] JitArm64: Use AArch64 imm masks in rlwimix slow case All valid PPC imm masks (except for all zeroes and all ones) are also valid AArch64 imm masks. This lets us optimize things a little. Note that because I'm now ANDing rS before rotating it, its AND mask is rotated left. All AArch64 imm masks can be rotated by any amount and still be valid AArch64 imm masks. --- Source/Core/Core/PowerPC/JitArm64/JitArm64_Integer.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_Integer.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_Integer.cpp index 76f771abe5..51f6c836fb 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_Integer.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_Integer.cpp @@ -2113,12 +2113,11 @@ void JitArm64::rlwimix(UGeckoInstruction inst) gpr.BindToRegister(a, true); ARM64Reg RA = gpr.R(a); auto WA = gpr.GetScopedReg(); - auto WB = a == s ? gpr.GetScopedReg() : Arm64GPRCache::ScopedARM64Reg(RA); + const u32 inverted_mask = ~mask; - MOVI2R(WA, mask); - BIC(WB, RA, WA); - AND(WA, WA, gpr.R(s), ArithOption(gpr.R(s), ShiftType::ROR, rot_dist)); - ORR(RA, WB, WA); + AND(WA, gpr.R(s), LogicalImm(std::rotl(mask, rot_dist), GPRSize::B32)); + AND(RA, RA, LogicalImm(inverted_mask, GPRSize::B32)); + ORR(RA, RA, WA, ArithOption(WA, ShiftType::ROR, rot_dist)); } if (inst.Rc)