diff --git a/Core/MIPS/ARM/ArmCompFPU.cpp b/Core/MIPS/ARM/ArmCompFPU.cpp index 971f8cebdc..f4ec293382 100644 --- a/Core/MIPS/ARM/ArmCompFPU.cpp +++ b/Core/MIPS/ARM/ArmCompFPU.cpp @@ -93,6 +93,7 @@ extern int logBlocks; void ArmJit::Comp_FPULS(MIPSOpcode op) { CONDITIONAL_DISABLE; + CheckMemoryBreakpoint(); s32 offset = (s16)(op & 0xFFFF); int ft = _FT; diff --git a/Core/MIPS/ARM/ArmCompLoadStore.cpp b/Core/MIPS/ARM/ArmCompLoadStore.cpp index 2c0baac10b..16a3753fd2 100644 --- a/Core/MIPS/ARM/ArmCompLoadStore.cpp +++ b/Core/MIPS/ARM/ArmCompLoadStore.cpp @@ -112,6 +112,7 @@ namespace MIPSComp void ArmJit::Comp_ITypeMemLR(MIPSOpcode op, bool load) { CONDITIONAL_DISABLE; + CheckMemoryBreakpoint(); int offset = (signed short)(op & 0xFFFF); MIPSGPReg rt = _RT; MIPSGPReg rs = _RS; @@ -120,6 +121,7 @@ namespace MIPSComp if (!js.inDelaySlot) { // Optimisation: Combine to single unaligned load/store bool isLeft = (o == 34 || o == 42); + CheckMemoryBreakpoint(1); MIPSOpcode nextOp = GetOffsetInstruction(1); // Find a matching shift in opposite direction with opposite offset. if (nextOp == (isLeft ? (op.encoding + (4<<26) - 3) @@ -259,6 +261,7 @@ namespace MIPSComp void ArmJit::Comp_ITypeMem(MIPSOpcode op) { CONDITIONAL_DISABLE; + CheckMemoryBreakpoint(); int offset = (signed short)(op&0xFFFF); bool load = false; MIPSGPReg rt = _RT; diff --git a/Core/MIPS/ARM/ArmCompVFPU.cpp b/Core/MIPS/ARM/ArmCompVFPU.cpp index 3f275c58be..c171424075 100644 --- a/Core/MIPS/ARM/ArmCompVFPU.cpp +++ b/Core/MIPS/ARM/ArmCompVFPU.cpp @@ -226,6 +226,7 @@ namespace MIPSComp void ArmJit::Comp_SV(MIPSOpcode op) { NEON_IF_AVAILABLE(CompNEON_SV); CONDITIONAL_DISABLE; + CheckMemoryBreakpoint(); s32 offset = (signed short)(op & 0xFFFC); int vt = ((op >> 16) & 0x1f) | ((op & 3) << 5); @@ -332,6 +333,7 @@ namespace MIPSComp { NEON_IF_AVAILABLE(CompNEON_SVQ); CONDITIONAL_DISABLE; + CheckMemoryBreakpoint(); int imm = (signed short)(op&0xFFFC); int vt = (((op >> 16) & 0x1f)) | ((op&1) << 5); diff --git a/Core/MIPS/ARM/ArmCompVFPUNEON.cpp b/Core/MIPS/ARM/ArmCompVFPUNEON.cpp index fe49e2e509..9f04436bf4 100644 --- a/Core/MIPS/ARM/ArmCompVFPUNEON.cpp +++ b/Core/MIPS/ARM/ArmCompVFPUNEON.cpp @@ -159,6 +159,7 @@ void ArmJit::CompNEON_VecDo3(MIPSOpcode op) { void ArmJit::CompNEON_SV(MIPSOpcode op) { CONDITIONAL_DISABLE; + CheckMemoryBreakpoint(); // Remember to use single lane stores here and not VLDR/VSTR - switching usage // between NEON and VFPU can be expensive on some chips. @@ -276,6 +277,7 @@ inline int MIPS_GET_VQVT(u32 op) { void ArmJit::CompNEON_SVQ(MIPSOpcode op) { CONDITIONAL_DISABLE; + CheckMemoryBreakpoint(); int offset = (signed short)(op & 0xFFFC); int vt = MIPS_GET_VQVT(op.encoding); diff --git a/Core/MIPS/ARM/ArmJit.cpp b/Core/MIPS/ARM/ArmJit.cpp index ea206a6d26..87bb801087 100644 --- a/Core/MIPS/ARM/ArmJit.cpp +++ b/Core/MIPS/ARM/ArmJit.cpp @@ -26,6 +26,7 @@ #include "Core/Config.h" #include "Core/Core.h" #include "Core/CoreTiming.h" +#include "Core/Debugger/Breakpoints.h" #include "Core/Debugger/SymbolMap.h" #include "Core/MemMap.h" @@ -45,7 +46,7 @@ using namespace ArmJitConstants; -void DisassembleArm(const u8 *data, int size) { +static void DisassembleArm(const u8 *data, int size) { char temp[256]; for (int i = 0; i < size; i += 4) { const u32 *codePtr = (const u32 *)(data + i); @@ -69,6 +70,33 @@ void DisassembleArm(const u8 *data, int size) { } } +static u32 JitBreakpoint() { + // Should we skip this breakpoint? + if (CBreakPoints::CheckSkipFirst() == currentMIPS->pc) + return 0; + + BreakAction result = CBreakPoints::ExecBreakPoint(currentMIPS->pc); + if ((result & BREAK_ACTION_PAUSE) == 0) + return 0; + + return 1; +} + +static u32 JitMemCheck(u32 pc) { + if (CBreakPoints::CheckSkipFirst() == currentMIPS->pc) + return 0; + + // Note: pc may be the delay slot. + const auto op = Memory::Read_Instruction(pc, true); + s32 offset = (s16)(op & 0xFFFF); + if (MIPSGetInfo(op) & IS_VFPU) + offset &= 0xFFFC; + u32 addr = currentMIPS->r[MIPS_GET_RS(op)] + offset; + + CBreakPoints::ExecOpMemCheck(addr, pc); + return coreState == CORE_RUNNING || coreState == CORE_NEXTFRAME ? 0 : 1; +} + namespace MIPSComp { using namespace ArmGen; @@ -86,6 +114,10 @@ ArmJit::ArmJit(MIPSState *mips) : blocks(mips, this), gpr(mips, &js, &jo), fpr(m INFO_LOG(JIT, "ARM JIT initialized: %d MB of code space", GetSpaceLeft() / (1024 * 1024)); js.startDefaultPrefix = mips_->HasDefaultPrefix(); + + // The debugger sets this so that "go" on a breakpoint will actually... go. + // But if they reset, we can end up hitting it by mistake, since it's based on PC and ticks. + CBreakPoints::SetSkipFirst(0); } ArmJit::~ArmJit() { @@ -104,6 +136,10 @@ void ArmJit::DoState(PointerWrap &p) } else { js.hasSetRounding = 1; } + + // The debugger sets this so that "go" on a breakpoint will actually... go. + // But if they reset, we can end up hitting it by mistake, since it's based on PC and ticks. + CBreakPoints::SetSkipFirst(0); } void ArmJit::UpdateFCR31() { @@ -158,13 +194,16 @@ void ArmJit::EatInstruction(MIPSOpcode op) { ERROR_LOG_REPORT_ONCE(ateInDelaySlot, JIT, "Ate an instruction inside a delay slot."); } + CheckJitBreakpoint(GetCompilerPC() + 4, 0); js.numInstructions++; js.compilerPC += 4; js.downcountAmount += MIPSGetInstructionCycleEstimate(op); } -void ArmJit::CompileDelaySlot(int flags) -{ +void ArmJit::CompileDelaySlot(int flags) { + // Need to offset the downcount which was already incremented for the branch + delay slot. + CheckJitBreakpoint(GetCompilerPC() + 4, -2); + // preserve flag around the delay slot! Maybe this is not always necessary on ARM where // we can (mostly) control whether we set the flag or not. Of course, if someone puts an slt in to the // delay slot, we're screwed. @@ -295,6 +334,9 @@ const u8 *ArmJit::DoJit(u32 em_address, JitBlock *b) while (js.compiling) { gpr.SetCompilerPC(GetCompilerPC()); // Let it know for log messages + // Jit breakpoints are quite fast, so let's do them in release too. + CheckJitBreakpoint(GetCompilerPC(), 0); + MIPSOpcode inst = Memory::Read_Opcode_JIT(GetCompilerPC()); //MIPSInfo info = MIPSGetInfo(inst); //if (info & IS_VFPU) { @@ -492,7 +534,18 @@ void ArmJit::Comp_ReplacementFunc(MIPSOpcode op) return; } - if (entry->flags & REPFLAG_DISABLED) { + u32 funcSize = g_symbolMap->GetFunctionSize(GetCompilerPC()); + bool disabled = (entry->flags & REPFLAG_DISABLED) != 0; + if (!disabled && funcSize != SymbolMap::INVALID_ADDRESS && funcSize > sizeof(u32)) { + // We don't need to disable hooks, the code will still run. + if ((entry->flags & (REPFLAG_HOOKENTER | REPFLAG_HOOKEXIT)) == 0) { + // Any breakpoint at the func entry was already tripped, so we can still run the replacement. + // That's a common case - just to see how often the replacement hits. + disabled = CBreakPoints::RangeContainsBreakPoint(GetCompilerPC() + sizeof(u32), funcSize - sizeof(u32)); + } + } + + if (disabled) { MIPSCompileOp(Memory::Read_Instruction(GetCompilerPC(), true), this); } else if (entry->jitReplaceFunc) { MIPSReplaceFunc repl = entry->jitReplaceFunc; @@ -686,6 +739,60 @@ void ArmJit::WriteSyscallExit() B((const void *)dispatcherCheckCoreState); } +bool ArmJit::CheckJitBreakpoint(u32 addr, int downcountOffset) { + if (CBreakPoints::IsAddressBreakPoint(addr)) { + MRS(R8); + FlushAll(); + MOVI2R(SCRATCHREG1, GetCompilerPC()); + MovToPC(SCRATCHREG1); + RestoreRoundingMode(); + QuickCallFunction(SCRATCHREG1, &JitBreakpoint); + + // If 0, the conditional breakpoint wasn't taken. + CMPI2R(R0, 0, SCRATCHREG2); + FixupBranch skip = B_CC(CC_EQ); + WriteDownCount(downcountOffset); + ApplyRoundingMode(); + B((const void *)dispatcherCheckCoreState); + SetJumpTarget(skip); + + ApplyRoundingMode(); + _MSR(true, false, R8); + return true; + } + + return false; +} + +bool ArmJit::CheckMemoryBreakpoint(int instructionOffset) { + if (CBreakPoints::HasMemChecks()) { + int off = instructionOffset + (js.inDelaySlot ? 1 : 0); + + MRS(R8); + FlushAll(); + RestoreRoundingMode(); + MOVI2R(R0, GetCompilerPC()); + MovToPC(R0); + if (off != 0) + ADDI2R(R0, R0, off, SCRATCHREG2); + QuickCallFunction(SCRATCHREG2, &JitMemCheck); + + // If 0, the breakpoint wasn't tripped. + CMPI2R(R0, 0, SCRATCHREG2); + FixupBranch skip = B_CC(CC_EQ); + WriteDownCount(-1 - off); + ApplyRoundingMode(); + B((const void *)dispatcherCheckCoreState); + SetJumpTarget(skip); + + ApplyRoundingMode(); + _MSR(true, false, R8); + return true; + } + + return false; +} + void ArmJit::Comp_DoNothing(MIPSOpcode op) { } MIPSOpcode ArmJit::GetOriginalOp(MIPSOpcode op) { diff --git a/Core/MIPS/ARM/ArmJit.h b/Core/MIPS/ARM/ArmJit.h index af2b6a128d..07b4b0f5a6 100644 --- a/Core/MIPS/ARM/ArmJit.h +++ b/Core/MIPS/ARM/ArmJit.h @@ -215,6 +215,8 @@ private: void WriteExit(u32 destination, int exit_num); void WriteExitDestInR(ArmGen::ARMReg Reg); void WriteSyscallExit(); + bool CheckJitBreakpoint(u32 addr, int downcountOffset); + bool CheckMemoryBreakpoint(int instructionOffset = 0); // Utility compilation functions void BranchFPFlag(MIPSOpcode op, CCFlags cc, bool likely);