diff --git a/Source/Core/Common/Src/x64Emitter.cpp b/Source/Core/Common/Src/x64Emitter.cpp index 0d31da7e9a..958e5d7aeb 100644 --- a/Source/Core/Common/Src/x64Emitter.cpp +++ b/Source/Core/Common/Src/x64Emitter.cpp @@ -186,7 +186,7 @@ enum NormalSSEOps else if (scale >= 1) { //Ah good, no scaling. - if (scale == SCALE_ATREG && !((_offsetOrBaseReg&7) == 4 || (_offsetOrBaseReg&7) == 5)) + if (scale == SCALE_ATREG && !((_offsetOrBaseReg & 7) == 4 || (_offsetOrBaseReg & 7) == 5)) { //Okay, we're good. No SIB necessary. int ioff = (int)offset; @@ -216,10 +216,10 @@ enum NormalSSEOps SIB = true; } - if (scale == SCALE_ATREG && _offsetOrBaseReg == 4) + if (scale == SCALE_ATREG && ((_offsetOrBaseReg & 7) == 4)) { SIB = true; - ireg = 4; + ireg = _offsetOrBaseReg; } //Okay, we're fine. Just disp encoding. diff --git a/Source/Core/Common/Src/x64Emitter.h b/Source/Core/Common/Src/x64Emitter.h index 5f336968c0..b4b809770c 100644 --- a/Source/Core/Common/Src/x64Emitter.h +++ b/Source/Core/Common/Src/x64Emitter.h @@ -114,8 +114,6 @@ struct OpArg operandReg = 0; scale = (u8)_scale; offsetOrBaseReg = (u8)rmReg; - if (rmReg == R12) - PanicAlert("Codegen for R12 known buggy"); indexReg = (u8)scaledReg; //if scale == 0 never mind offseting offset = _offset; diff --git a/Source/Core/Core/Src/PowerPC/Jit64/JitRegCache.cpp b/Source/Core/Core/Src/PowerPC/Jit64/JitRegCache.cpp index 1ccf422d45..2f882d28d8 100644 --- a/Source/Core/Core/Src/PowerPC/Jit64/JitRegCache.cpp +++ b/Source/Core/Core/Src/PowerPC/Jit64/JitRegCache.cpp @@ -205,9 +205,9 @@ const int *GPRRegCache::GetAllocationOrder(int &count) // R12, when used as base register, for example in a LEA, can generate bad code! Need to look into this. #ifdef _M_X64 #ifdef _WIN32 - RSI, RDI, R13, R14, R8, R9, R10, R11 //, RCX + RSI, RDI, R13, R14, R8, R9, R10, R11, R12, //, RCX #else - RBP, R13, R14, R8, R9, R10, R11, //, RCX + RBP, R13, R14, R8, R9, R10, R11, R12, //, RCX #endif #elif _M_IX86 ESI, EDI, EBX, EBP, EDX, ECX, @@ -222,7 +222,7 @@ const int *FPURegCache::GetAllocationOrder(int &count) static const int allocationOrder[] = { #ifdef _M_X64 - XMM6, XMM7, XMM8, XMM9, XMM10, XMM11, XMM13, XMM14, XMM15, XMM2, XMM3, XMM4, XMM5 + XMM6, XMM7, XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, XMM2, XMM3, XMM4, XMM5 #elif _M_IX86 XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, #endif diff --git a/Source/Core/Core/Src/PowerPC/Jit64IL/IR.cpp b/Source/Core/Core/Src/PowerPC/Jit64IL/IR.cpp index 4987993927..9436226372 100644 --- a/Source/Core/Core/Src/PowerPC/Jit64IL/IR.cpp +++ b/Source/Core/Core/Src/PowerPC/Jit64IL/IR.cpp @@ -804,9 +804,9 @@ static void fregSpill(RegInfo& RI, X64Reg reg) { // 64-bit - calling conventions differ between linux & windows, so... #ifdef _WIN32 -static const X64Reg RegAllocOrder[] = {RSI, RDI, R13, R14, R8, R9, R10, R11}; +static const X64Reg RegAllocOrder[] = {RSI, RDI, R12, R13, R14, R8, R9, R10, R11}; #else -static const X64Reg RegAllocOrder[] = {RBP, R13, R14, R8, R9, R10, R11}; +static const X64Reg RegAllocOrder[] = {RBP, R12, R13, R14, R8, R9, R10, R11}; #endif static const int RegAllocSize = sizeof(RegAllocOrder) / sizeof(X64Reg); static const X64Reg FRegAllocOrder[] = {XMM6, XMM7, XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, XMM2, XMM3, XMM4, XMM5}; @@ -1353,7 +1353,7 @@ static void DoWriteCode(IRBuilder* ibuild, Jit64* Jit, bool UseProfile) { case StoreLink: case StoreCTR: case StoreMSR: - case StoreGQR: + case StoreGQR: case StoreSRR: case StoreFReg: if (!isImm(*getOp1(I))) @@ -1540,10 +1540,10 @@ static void DoWriteCode(IRBuilder* ibuild, Jit64* Jit, bool UseProfile) { regStoreInstToConstLoc(RI, 32, getOp1(I), &GQR(gqr)); regNormalRegClear(RI, I); break; - } + } case StoreSRR: { unsigned srr = *I >> 16; - regStoreInstToConstLoc(RI, 32, getOp1(I), + regStoreInstToConstLoc(RI, 32, getOp1(I), &PowerPC::ppcState.spr[SPR_SRR0+srr]); regNormalRegClear(RI, I); break;