diff --git a/Core/MIPS/ARM64/Arm64CompFPU.cpp b/Core/MIPS/ARM64/Arm64CompFPU.cpp index af52f05e75..f6e6c33a5e 100644 --- a/Core/MIPS/ARM64/Arm64CompFPU.cpp +++ b/Core/MIPS/ARM64/Arm64CompFPU.cpp @@ -86,7 +86,7 @@ void Arm64Jit::Comp_FPULS(MIPSOpcode op) MIPSGPReg rs = _RS; // u32 addr = R(rs) + offset; // logBlocks = 1; - bool doCheck = false; + std::vector skips; switch (op >> 26) { case 49: //FI(ft) = Memory::Read_U32(addr); break; //lwc1 if (!gpr.IsImm(rs) && jo.cachePointers && g_Config.bFastMemory && (offset & 3) == 0 && offset <= 16380 && offset >= 0) { @@ -106,17 +106,12 @@ void Arm64Jit::Comp_FPULS(MIPSOpcode op) if (g_Config.bFastMemory) { SetScratch1ToEffectiveAddress(rs, offset); } else { - SetCCAndSCRATCH1ForSafeAddress(rs, offset, SCRATCH2); - doCheck = true; + skips = SetScratch1ForSafeAddress(rs, offset, SCRATCH2); } MOVK(SCRATCH1_64, ((uint64_t)Memory::base) >> 32, SHIFT_32); } - FixupBranch skip; - if (doCheck) { - skip = B(CC_EQ); - } LDR(INDEX_UNSIGNED, fpr.R(ft), SCRATCH1_64, 0); - if (doCheck) { + for (auto skip : skips) { SetJumpTarget(skip); } fpr.ReleaseSpillLocksAndDiscardTemps(); @@ -139,18 +134,13 @@ void Arm64Jit::Comp_FPULS(MIPSOpcode op) if (g_Config.bFastMemory) { SetScratch1ToEffectiveAddress(rs, offset); } else { - SetCCAndSCRATCH1ForSafeAddress(rs, offset, SCRATCH2); - doCheck = true; + skips = SetScratch1ForSafeAddress(rs, offset, SCRATCH2); } MOVK(SCRATCH1_64, ((uint64_t)Memory::base) >> 32, SHIFT_32); } - FixupBranch skip2; - if (doCheck) { - skip2 = B(CC_EQ); - } STR(INDEX_UNSIGNED, fpr.R(ft), SCRATCH1_64, 0); - if (doCheck) { - SetJumpTarget(skip2); + for (auto skip : skips) { + SetJumpTarget(skip); } break; diff --git a/Core/MIPS/ARM64/Arm64CompLoadStore.cpp b/Core/MIPS/ARM64/Arm64CompLoadStore.cpp index 0aa1a0e68c..df9d12b6fe 100644 --- a/Core/MIPS/ARM64/Arm64CompLoadStore.cpp +++ b/Core/MIPS/ARM64/Arm64CompLoadStore.cpp @@ -78,9 +78,46 @@ namespace MIPSComp } } - void Arm64Jit::SetCCAndSCRATCH1ForSafeAddress(MIPSGPReg rs, s16 offset, ARM64Reg tempReg, bool reverse) { + std::vector Arm64Jit::SetScratch1ForSafeAddress(MIPSGPReg rs, s16 offset, ARM64Reg tempReg) { + std::vector skips; + SetScratch1ToEffectiveAddress(rs, offset); - // TODO + + // First off, if it's too high for anything - skip. + MOVI2R(tempReg, PSP_GetUserMemoryEnd()); + CMP(SCRATCH1, tempReg); + skips.push_back(B(CC_HS)); + + // If its higher than memory start and we didn't skip yet, it must be good. Hurray. + MOVI2R(tempReg, PSP_GetKernelMemoryBase()); + CMP(SCRATCH1, tempReg); + FixupBranch inRAM = B(CC_HS); + + // If we got here and it's higher, then it's between VRAM and RAM - skip. + MOVI2R(tempReg, PSP_GetVidMemEnd()); + CMP(SCRATCH1, tempReg); + skips.push_back(B(CC_HS)); + + // And if it's higher the VRAM and we're still here again, it's in VRAM. + MOVI2R(tempReg, PSP_GetVidMemBase()); + CMP(SCRATCH1, tempReg); + FixupBranch inVRAM = B(CC_HS); + + // Last gap, this is between SRAM and VRAM. Skip it. + MOVI2R(tempReg, PSP_GetScratchpadMemoryEnd()); + CMP(SCRATCH1, tempReg); + skips.push_back(B(CC_HS)); + + // And for lower than SRAM, we just skip again. + MOVI2R(tempReg, PSP_GetScratchpadMemoryBase()); + CMP(SCRATCH1, tempReg); + skips.push_back(B(CC_LO)); + + // At this point, we're either in SRAM (above) or in RAM/VRAM. + SetJumpTarget(inRAM); + SetJumpTarget(inVRAM); + + return skips; } void Arm64Jit::Comp_ITypeMemLR(MIPSOpcode op, bool load) { @@ -107,8 +144,7 @@ namespace MIPSComp } u32 iaddr = gpr.IsImm(rs) ? offset + gpr.GetImm(rs) : 0xFFFFFFFF; - bool doCheck = false; - FixupBranch skip; + std::vector skips; if (gpr.IsImm(rs) && Memory::IsValidAddress(iaddr)) { u32 addr = iaddr; @@ -173,14 +209,10 @@ namespace MIPSComp } if (false && !g_Config.bFastMemory && rs != MIPS_REG_SP) { - SetCCAndSCRATCH1ForSafeAddress(rs, offset, SCRATCH2, true); - doCheck = true; + skips = SetScratch1ForSafeAddress(rs, offset, SCRATCH2); } else { SetScratch1ToEffectiveAddress(rs, offset); } - if (doCheck) { - skip = B(); - } // Need temp regs. TODO: Get from the regcache? static const ARM64Reg LR_SCRATCH3 = W9; @@ -254,7 +286,7 @@ namespace MIPSComp POP2(EncodeRegTo64(LR_SCRATCH3), EncodeRegTo64(LR_SCRATCH4)); } - if (doCheck) { + for (auto skip : skips) { SetJumpTarget(skip); } } @@ -273,7 +305,7 @@ namespace MIPSComp } u32 iaddr = gpr.IsImm(rs) ? offset + gpr.GetImm(rs) : 0xFFFFFFFF; - bool doCheck = false; + std::vector skips; ARM64Reg addrReg = SCRATCH1; switch (o) { @@ -333,8 +365,7 @@ namespace MIPSComp load ? gpr.MapDirtyIn(rt, rs) : gpr.MapInIn(rt, rs); if (!g_Config.bFastMemory && rs != MIPS_REG_SP) { - SetCCAndSCRATCH1ForSafeAddress(rs, offset, SCRATCH2); - doCheck = true; + skips = SetScratch1ForSafeAddress(rs, offset, SCRATCH2); } else { SetScratch1ToEffectiveAddress(rs, offset); } @@ -353,14 +384,10 @@ namespace MIPSComp case 41: STRH(gpr.R(rt), MEMBASEREG, addrReg); break; case 40: STRB(gpr.R(rt), MEMBASEREG, addrReg); break; } - /* - if (doCheck) { - if (load) { - SetCC(CC_EQ); - MOVI2R(gpr.R(rt), 0); - } - SetCC(CC_AL); - }*/ + for (auto skip : skips) { + SetJumpTarget(skip); + // TODO: Could clear to zero here on load, if skipping this for good reads. + } break; case 34: //lwl case 38: //lwr diff --git a/Core/MIPS/ARM64/Arm64CompVFPU.cpp b/Core/MIPS/ARM64/Arm64CompVFPU.cpp index fd4640bceb..d07a2fd03c 100644 --- a/Core/MIPS/ARM64/Arm64CompVFPU.cpp +++ b/Core/MIPS/ARM64/Arm64CompVFPU.cpp @@ -205,7 +205,7 @@ namespace MIPSComp int vt = ((op >> 16) & 0x1f) | ((op & 3) << 5); MIPSGPReg rs = _RS; - bool doCheck = false; + std::vector skips; switch (op >> 26) { case 50: //lv.s // VI(vt) = Memory::Read_U32(addr); { @@ -226,18 +226,13 @@ namespace MIPSComp if (g_Config.bFastMemory) { SetScratch1ToEffectiveAddress(rs, offset); } else { - SetCCAndSCRATCH1ForSafeAddress(rs, offset, SCRATCH2); - doCheck = true; + skips = SetScratch1ForSafeAddress(rs, offset, SCRATCH2); } // Pointerify MOVK(SCRATCH1_64, ((uint64_t)Memory::base) >> 32, SHIFT_32); } - FixupBranch skip; - if (doCheck) { - skip = B(CC_EQ); - } fp.LDR(32, INDEX_UNSIGNED, fpr.V(vt), SCRATCH1_64, 0); - if (doCheck) { + for (auto skip : skips) { SetJumpTarget(skip); } } @@ -262,17 +257,12 @@ namespace MIPSComp if (g_Config.bFastMemory) { SetScratch1ToEffectiveAddress(rs, offset); } else { - SetCCAndSCRATCH1ForSafeAddress(rs, offset, SCRATCH2); - doCheck = true; + skips = SetScratch1ForSafeAddress(rs, offset, SCRATCH2); } MOVK(SCRATCH1_64, ((uint64_t)Memory::base) >> 32, SHIFT_32); } - FixupBranch skip; - if (doCheck) { - skip = B(CC_EQ); - } fp.STR(32, INDEX_UNSIGNED, fpr.V(vt), SCRATCH1_64, 0); - if (doCheck) { + for (auto skip : skips) { SetJumpTarget(skip); } } @@ -291,7 +281,7 @@ namespace MIPSComp int vt = (((op >> 16) & 0x1f)) | ((op&1) << 5); MIPSGPReg rs = _RS; - bool doCheck = false; + std::vector skips; switch (op >> 26) { case 54: //lv.q @@ -309,22 +299,16 @@ namespace MIPSComp if (g_Config.bFastMemory) { SetScratch1ToEffectiveAddress(rs, imm); } else { - SetCCAndSCRATCH1ForSafeAddress(rs, imm, SCRATCH2); - doCheck = true; + skips = SetScratch1ForSafeAddress(rs, imm, SCRATCH2); } MOVK(SCRATCH1_64, ((uint64_t)Memory::base) >> 32, SHIFT_32); } - FixupBranch skip; - if (doCheck) { - skip = B(CC_EQ); - } - // Removed consecutive opt for now for (int i = 0; i < 4; i++) fp.LDR(32, INDEX_UNSIGNED, fpr.V(vregs[i]), SCRATCH1_64, i * 4); - if (doCheck) { + for (auto skip : skips) { SetJumpTarget(skip); } } @@ -345,21 +329,15 @@ namespace MIPSComp if (g_Config.bFastMemory) { SetScratch1ToEffectiveAddress(rs, imm); } else { - SetCCAndSCRATCH1ForSafeAddress(rs, imm, SCRATCH2); - doCheck = true; + skips = SetScratch1ForSafeAddress(rs, imm, SCRATCH2); } MOVK(SCRATCH1_64, ((uint64_t)Memory::base) >> 32, SHIFT_32); } - FixupBranch skip; - if (doCheck) { - skip = B(CC_EQ); - } - for (int i = 0; i < 4; i++) fp.STR(32, INDEX_UNSIGNED, fpr.V(vregs[i]), SCRATCH1_64, i * 4); - if (doCheck) { + for (auto skip : skips) { SetJumpTarget(skip); } } diff --git a/Core/MIPS/ARM64/Arm64Jit.h b/Core/MIPS/ARM64/Arm64Jit.h index 7966e53662..4eb3aa5b52 100644 --- a/Core/MIPS/ARM64/Arm64Jit.h +++ b/Core/MIPS/ARM64/Arm64Jit.h @@ -233,7 +233,7 @@ private: // Utils void SetScratch1ToEffectiveAddress(MIPSGPReg rs, s16 offset); - void SetCCAndSCRATCH1ForSafeAddress(MIPSGPReg rs, s16 offset, Arm64Gen::ARM64Reg tempReg, bool reverse = false); + std::vector SetScratch1ForSafeAddress(MIPSGPReg rs, s16 offset, Arm64Gen::ARM64Reg tempReg); void Comp_ITypeMemLR(MIPSOpcode op, bool load); JitBlockCache blocks;