diff --git a/Common/Arm64Emitter.cpp b/Common/Arm64Emitter.cpp index 52bf53e1f7..abf9747601 100644 --- a/Common/Arm64Emitter.cpp +++ b/Common/Arm64Emitter.cpp @@ -3736,6 +3736,7 @@ void ARM64XEmitter::CMPI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch) { } bool ARM64XEmitter::TryADDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) { + s64 negated = Is64Bit(Rn) ? -(s64)imm : -(s32)(u32)imm; u32 val; bool shift; if (imm == 0) { @@ -3745,12 +3746,16 @@ bool ARM64XEmitter::TryADDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) { } else if (IsImmArithmetic(imm, &val, &shift)) { ADD(Rd, Rn, val, shift); return true; + } else if (IsImmArithmetic((u64)negated, &val, &shift)) { + SUB(Rd, Rn, val, shift); + return true; } else { return false; } } bool ARM64XEmitter::TrySUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) { + s64 negated = Is64Bit(Rn) ? -(s64)imm : -(s32)(u32)imm; u32 val; bool shift; if (imm == 0) { @@ -3760,6 +3765,9 @@ bool ARM64XEmitter::TrySUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) { } else if (IsImmArithmetic(imm, &val, &shift)) { SUB(Rd, Rn, val, shift); return true; + } else if (IsImmArithmetic((u64)negated, &val, &shift)) { + ADD(Rd, Rn, val, shift); + return true; } else { return false; } diff --git a/Core/MIPS/ARM64/Arm64CompALU.cpp b/Core/MIPS/ARM64/Arm64CompALU.cpp index e9400f1efa..48572f3d96 100644 --- a/Core/MIPS/ARM64/Arm64CompALU.cpp +++ b/Core/MIPS/ARM64/Arm64CompALU.cpp @@ -90,11 +90,7 @@ void Arm64Jit::Comp_IType(MIPSOpcode op) { ARM64Reg r32 = gpr.RPtr(rs); gpr.MarkDirty(r32); ARM64Reg r = EncodeRegTo64(r32); - if (simm > 0) { - ADDI2R(r, r, simm); - } else { - SUBI2R(r, r, -simm); - } + ADDI2R(r, r, simm); } else { if (simm >= 0) { CompImmLogic(rs, rt, simm, &ARM64XEmitter::ADD, &ARM64XEmitter::TryADDI2R, &EvalAdd);