diff --git a/Core/MIPS/ARM64/Arm64CompFPU.cpp b/Core/MIPS/ARM64/Arm64CompFPU.cpp index c446b1e5b1..80f4d4ee93 100644 --- a/Core/MIPS/ARM64/Arm64CompFPU.cpp +++ b/Core/MIPS/ARM64/Arm64CompFPU.cpp @@ -334,10 +334,7 @@ void Arm64Jit::Comp_mxc1(MIPSOpcode op) } else { gpr.MapDirtyIn(rt, MIPS_REG_FPCOND); LDR(INDEX_UNSIGNED, gpr.R(rt), CTXREG, offsetof(MIPSState, fcr31)); - // BFI(gpr.R(rt), gpr.R(MIPS_REG_FPCOND), 23, 1); - ANDI2R(SCRATCH1, gpr.R(MIPS_REG_FPCOND), 1); // Just in case - ANDI2R(gpr.R(rt), gpr.R(rt), ~(0x1 << 23), SCRATCH2); // SCRATCHREG2 won't be used, this turns into a simple BIC. - ORR(gpr.R(rt), gpr.R(rt), SCRATCH1, ArithOption(gpr.R(rt), ST_LSL, 23)); + BFI(gpr.R(rt), gpr.R(MIPS_REG_FPCOND), 23, 1); } } else if (fs == 0) { gpr.SetImm(rt, MIPSState::FCR0_VALUE); diff --git a/Core/MIPS/ARM64/Arm64RegCache.cpp b/Core/MIPS/ARM64/Arm64RegCache.cpp index 8337372185..9209ce4172 100644 --- a/Core/MIPS/ARM64/Arm64RegCache.cpp +++ b/Core/MIPS/ARM64/Arm64RegCache.cpp @@ -69,6 +69,13 @@ bool Arm64RegCache::IsMapped(MIPSGPReg mipsReg) { return mr[mipsReg].loc == ML_ARMREG; } +bool Arm64RegCache::IsMappedAsPointer(MIPSGPReg mipsReg) { + if (IsMapped(mipsReg)) { + return ar[mr[mipsReg].reg].pointerified; + } + return false; +} + void Arm64RegCache::SetRegImm(ARM64Reg reg, u64 imm) { // On ARM64, at least Cortex A57, good old MOVT/MOVW (MOVK in 64-bit) is really fast. emit_->MOVI2R(reg, imm);