mirror of
https://github.com/hrydgard/ppsspp.git
synced 2025-04-02 11:01:50 -04:00
irjit: Move more to IRNativeBackend, split.
This commit is contained in:
parent
691799a0ca
commit
93e3d35f5d
13 changed files with 365 additions and 236 deletions
|
@ -32,7 +32,7 @@ public:
|
|||
|
||||
virtual const u8 *GetCodePtr() const = 0;
|
||||
|
||||
u8 *GetBasePtr() {
|
||||
u8 *GetBasePtr() const {
|
||||
return region;
|
||||
}
|
||||
|
||||
|
|
|
@ -124,6 +124,13 @@ public:
|
|||
return nullptr;
|
||||
}
|
||||
}
|
||||
const IRBlock *GetBlock(int i) const {
|
||||
if (i >= 0 && i < (int)blocks_.size()) {
|
||||
return &blocks_[i];
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
int FindPreloadBlock(u32 em_address);
|
||||
int FindByCookie(int cookie);
|
||||
|
@ -180,7 +187,7 @@ public:
|
|||
void UnlinkBlock(u8 *checkedEntry, u32 originalAddress) override;
|
||||
|
||||
protected:
|
||||
virtual bool CompileBlock(u32 em_address, std::vector<IRInst> &instructions, u32 &mipsBytes, bool preload);
|
||||
bool CompileBlock(u32 em_address, std::vector<IRInst> &instructions, u32 &mipsBytes, bool preload);
|
||||
virtual bool CompileTargetBlock(IRBlock *block, int block_num, bool preload) { return true; }
|
||||
|
||||
JitOptions jo;
|
||||
|
|
|
@ -16,12 +16,87 @@
|
|||
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
|
||||
|
||||
#include "Common/Profiler/Profiler.h"
|
||||
#include "Common/StringUtils.h"
|
||||
#include "Common/TimeUtil.h"
|
||||
#include "Core/MIPS/MIPSTables.h"
|
||||
#include "Core/MIPS/IR/IRNativeCommon.h"
|
||||
|
||||
using namespace MIPSComp;
|
||||
|
||||
namespace MIPSComp {
|
||||
|
||||
// Compile time flag to enable debug stats for not compiled ops.
|
||||
static constexpr bool enableDebugStats = false;
|
||||
|
||||
// Used only for debugging when enableDebug is true above.
|
||||
static std::map<uint8_t, int> debugSeenNotCompiledIR;
|
||||
static std::map<const char *, int> debugSeenNotCompiled;
|
||||
static double lastDebugStatsLog = 0.0;
|
||||
|
||||
static void LogDebugStats() {
|
||||
if (!enableDebugStats)
|
||||
return;
|
||||
|
||||
double now = time_now_d();
|
||||
if (now < lastDebugStatsLog + 1.0)
|
||||
return;
|
||||
lastDebugStatsLog = now;
|
||||
|
||||
int worstIROp = -1;
|
||||
int worstIRVal = 0;
|
||||
for (auto it : debugSeenNotCompiledIR) {
|
||||
if (it.second > worstIRVal) {
|
||||
worstIRVal = it.second;
|
||||
worstIROp = it.first;
|
||||
}
|
||||
}
|
||||
debugSeenNotCompiledIR.clear();
|
||||
|
||||
const char *worstName = nullptr;
|
||||
int worstVal = 0;
|
||||
for (auto it : debugSeenNotCompiled) {
|
||||
if (it.second > worstVal) {
|
||||
worstVal = it.second;
|
||||
worstName = it.first;
|
||||
}
|
||||
}
|
||||
debugSeenNotCompiled.clear();
|
||||
|
||||
if (worstIROp != -1)
|
||||
WARN_LOG(JIT, "Most not compiled IR op: %s (%d)", GetIRMeta((IROp)worstIROp)->name, worstIRVal);
|
||||
if (worstName != nullptr)
|
||||
WARN_LOG(JIT, "Most not compiled op: %s (%d)", worstName, worstVal);
|
||||
}
|
||||
|
||||
bool IRNativeBackend::DebugStatsEnabled() const {
|
||||
return enableDebugStats;
|
||||
}
|
||||
|
||||
void IRNativeBackend::NotifyMIPSInterpret(const char *name) {
|
||||
_assert_(enableDebugStats);
|
||||
debugSeenNotCompiled[name]++;
|
||||
}
|
||||
|
||||
void IRNativeBackend::DoMIPSInst(uint32_t value) {
|
||||
MIPSOpcode op;
|
||||
memcpy(&op, &value, sizeof(op));
|
||||
|
||||
if constexpr (enableDebugStats)
|
||||
debugSeenNotCompiled[MIPSGetName(op)]++;
|
||||
|
||||
MIPSInterpret(op);
|
||||
}
|
||||
|
||||
uint32_t IRNativeBackend::DoIRInst(uint64_t value) {
|
||||
IRInst inst;
|
||||
memcpy(&inst, &value, sizeof(inst));
|
||||
|
||||
if constexpr (enableDebugStats)
|
||||
debugSeenNotCompiledIR[(uint8_t)inst.op]++;
|
||||
|
||||
return IRInterpret(currentMIPS, &inst, 1);
|
||||
}
|
||||
|
||||
void IRNativeBackend::CompileIRInst(IRInst inst) {
|
||||
switch (inst.op) {
|
||||
case IROp::Nop:
|
||||
|
@ -312,10 +387,127 @@ void IRNativeBackend::CompileIRInst(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
IRNativeJit::IRNativeJit(MIPSState *mipsState)
|
||||
: IRJit(mipsState), debugInterface_(blocks_) {}
|
||||
|
||||
void IRNativeJit::Init(IRNativeBackend &backend) {
|
||||
backend_ = &backend;
|
||||
debugInterface_.Init(&backend_->CodeBlock());
|
||||
backend_->GenerateFixedCode();
|
||||
|
||||
// Wanted this to be a reference, but vtbls get in the way. Shouldn't change.
|
||||
hooks_ = backend.GetNativeHooks();
|
||||
}
|
||||
|
||||
bool IRNativeJit::CompileTargetBlock(IRBlock *block, int block_num, bool preload) {
|
||||
return backend_->CompileBlock(block, block_num, preload);
|
||||
}
|
||||
|
||||
void IRNativeJit::RunLoopUntil(u64 globalticks) {
|
||||
if constexpr (enableDebugStats) {
|
||||
LogDebugStats();
|
||||
}
|
||||
|
||||
PROFILE_THIS_SCOPE("jit");
|
||||
hooks_.enterDispatcher();
|
||||
}
|
||||
|
||||
void IRNativeJit::ClearCache() {
|
||||
IRJit::ClearCache();
|
||||
backend_->ClearAllBlocks();
|
||||
}
|
||||
|
||||
bool IRNativeJit::DescribeCodePtr(const u8 *ptr, std::string &name) {
|
||||
if (ptr != nullptr && backend_->DescribeCodePtr(ptr, name))
|
||||
return true;
|
||||
|
||||
int offset = backend_->OffsetFromCodePtr(ptr);
|
||||
if (offset == -1)
|
||||
return false;
|
||||
|
||||
int block_num = -1;
|
||||
for (int i = 0; i < blocks_.GetNumBlocks(); ++i) {
|
||||
const auto &b = blocks_.GetBlock(i);
|
||||
// We allocate linearly.
|
||||
if (b->GetTargetOffset() <= offset)
|
||||
block_num = i;
|
||||
if (b->GetTargetOffset() > offset)
|
||||
break;
|
||||
}
|
||||
|
||||
if (block_num == -1) {
|
||||
name = "(unknown or deleted block)";
|
||||
return true;
|
||||
}
|
||||
|
||||
const IRBlock *block = blocks_.GetBlock(block_num);
|
||||
if (block) {
|
||||
u32 start = 0, size = 0;
|
||||
block->GetRange(start, size);
|
||||
name = StringFromFormat("(block %d at %08x)", block_num, start);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool IRNativeJit::CodeInRange(const u8 *ptr) const {
|
||||
return backend_->CodeInRange(ptr);
|
||||
}
|
||||
|
||||
bool IRNativeJit::IsAtDispatchFetch(const u8 *ptr) const {
|
||||
return ptr == backend_->GetNativeHooks().dispatchFetch;
|
||||
}
|
||||
|
||||
const u8 *IRNativeJit::GetDispatcher() const {
|
||||
return backend_->GetNativeHooks().dispatcher;
|
||||
}
|
||||
|
||||
const u8 *IRNativeJit::GetCrashHandler() const {
|
||||
return backend_->GetNativeHooks().crashHandler;
|
||||
}
|
||||
|
||||
JitBlockCacheDebugInterface *IRNativeJit::GetBlockCacheDebugInterface() {
|
||||
return &debugInterface_;
|
||||
}
|
||||
|
||||
bool IRNativeBackend::CodeInRange(const u8 *ptr) const {
|
||||
return CodeBlock().IsInSpace(ptr);
|
||||
}
|
||||
|
||||
bool IRNativeBackend::DescribeCodePtr(const u8 *ptr, std::string &name) const {
|
||||
if (!CodeBlock().IsInSpace(ptr))
|
||||
return false;
|
||||
|
||||
// Used in disassembly viewer.
|
||||
if (ptr == (const uint8_t *)hooks_.enterDispatcher) {
|
||||
name = "enterDispatcher";
|
||||
} else if (ptr == hooks_.dispatcher) {
|
||||
name = "dispatcher";
|
||||
} else if (ptr == hooks_.dispatchFetch) {
|
||||
name = "dispatchFetch";
|
||||
} else if (ptr == hooks_.crashHandler) {
|
||||
name = "crashHandler";
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
int IRNativeBackend::OffsetFromCodePtr(const u8 *ptr) {
|
||||
auto &codeBlock = CodeBlock();
|
||||
if (!codeBlock.IsInSpace(ptr))
|
||||
return -1;
|
||||
return (int)codeBlock.GetOffset(ptr);
|
||||
}
|
||||
|
||||
} // namespace MIPSComp
|
||||
|
||||
IRNativeBlockCacheDebugInterface::IRNativeBlockCacheDebugInterface(IRBlockCache &irBlocks, CodeBlockCommon &codeBlock)
|
||||
: irBlocks_(irBlocks), codeBlock_(codeBlock) {}
|
||||
IRNativeBlockCacheDebugInterface::IRNativeBlockCacheDebugInterface(const IRBlockCache &irBlocks)
|
||||
: irBlocks_(irBlocks) {}
|
||||
|
||||
void IRNativeBlockCacheDebugInterface::Init(const CodeBlockCommon *codeBlock) {
|
||||
codeBlock_ = codeBlock;
|
||||
}
|
||||
|
||||
int IRNativeBlockCacheDebugInterface::GetNumBlocks() const {
|
||||
return irBlocks_.GetNumBlocks();
|
||||
|
@ -331,7 +523,7 @@ void IRNativeBlockCacheDebugInterface::GetBlockCodeRange(int blockNum, int *star
|
|||
// We assume linear allocation. Maybe a bit dangerous, should always be right.
|
||||
if (blockNum + 1 >= GetNumBlocks()) {
|
||||
// Last block, get from current code pointer.
|
||||
endOffset = (int)codeBlock_.GetOffset(codeBlock_.GetCodePtr());
|
||||
endOffset = (int)codeBlock_->GetOffset(codeBlock_->GetCodePtr());
|
||||
} else {
|
||||
endOffset = irBlocks_.GetBlock(blockNum + 1)->GetTargetOffset();
|
||||
_assert_msg_(endOffset >= blockOffset, "Next block not sequential, block=%d/%08x, next=%d/%08x", blockNum, blockOffset, blockNum + 1, endOffset);
|
||||
|
@ -348,7 +540,7 @@ JitBlockDebugInfo IRNativeBlockCacheDebugInterface::GetBlockDebugInfo(int blockN
|
|||
GetBlockCodeRange(blockNum, &blockOffset, &codeSize);
|
||||
|
||||
// TODO: Normal entry?
|
||||
const u8 *blockStart = codeBlock_.GetBasePtr() + blockOffset;
|
||||
const u8 *blockStart = codeBlock_->GetBasePtr() + blockOffset;
|
||||
#if PPSSPP_ARCH(ARM)
|
||||
debugInfo.targetDisasm = DisassembleArm2(blockStart, codeSize);
|
||||
#elif PPSSPP_ARCH(ARM64)
|
||||
|
|
|
@ -20,7 +20,8 @@
|
|||
|
||||
class IRNativeBlockCacheDebugInterface : public JitBlockCacheDebugInterface {
|
||||
public:
|
||||
IRNativeBlockCacheDebugInterface(MIPSComp::IRBlockCache &irBlocks, CodeBlockCommon &codeBlock);
|
||||
IRNativeBlockCacheDebugInterface(const MIPSComp::IRBlockCache &irBlocks);
|
||||
void Init(const CodeBlockCommon *codeBlock);
|
||||
int GetNumBlocks() const;
|
||||
int GetBlockNumberFromStartAddress(u32 em_address, bool realBlocksOnly = true) const;
|
||||
JitBlockDebugInfo GetBlockDebugInfo(int blockNum) const;
|
||||
|
@ -29,18 +30,42 @@ public:
|
|||
private:
|
||||
void GetBlockCodeRange(int blockNum, int *startOffset, int *size) const;
|
||||
|
||||
MIPSComp::IRBlockCache &irBlocks_;
|
||||
CodeBlockCommon &codeBlock_;
|
||||
const MIPSComp::IRBlockCache &irBlocks_;
|
||||
const CodeBlockCommon *codeBlock_ = nullptr;
|
||||
};
|
||||
|
||||
namespace MIPSComp {
|
||||
|
||||
typedef void (*IRNativeFuncNoArg)();
|
||||
|
||||
struct IRNativeHooks {
|
||||
IRNativeFuncNoArg enterDispatcher = nullptr;
|
||||
|
||||
const uint8_t *dispatcher = nullptr;
|
||||
const uint8_t *dispatchFetch = nullptr;
|
||||
const uint8_t *crashHandler = nullptr;
|
||||
};
|
||||
|
||||
class IRNativeBackend {
|
||||
public:
|
||||
virtual ~IRNativeBackend() {}
|
||||
|
||||
void CompileIRInst(IRInst inst);
|
||||
|
||||
virtual bool DescribeCodePtr(const u8 *ptr, std::string &name) const;
|
||||
bool CodeInRange(const u8 *ptr) const;
|
||||
int OffsetFromCodePtr(const u8 *ptr);
|
||||
|
||||
virtual void GenerateFixedCode() = 0;
|
||||
virtual bool CompileBlock(IRBlock *block, int block_num, bool preload) = 0;
|
||||
virtual void ClearAllBlocks() = 0;
|
||||
|
||||
const IRNativeHooks &GetNativeHooks() const {
|
||||
return hooks_;
|
||||
}
|
||||
|
||||
virtual const CodeBlockCommon &CodeBlock() const = 0;
|
||||
|
||||
protected:
|
||||
virtual void CompIR_Arith(IRInst inst) = 0;
|
||||
virtual void CompIR_Assign(IRInst inst) = 0;
|
||||
|
@ -84,11 +109,46 @@ protected:
|
|||
virtual void CompIR_VecPack(IRInst inst) = 0;
|
||||
virtual void CompIR_VecStore(IRInst inst) = 0;
|
||||
virtual void CompIR_ValidateAddress(IRInst inst) = 0;
|
||||
|
||||
// Returns true when debugging statistics should be compiled in.
|
||||
bool DebugStatsEnabled() const;
|
||||
|
||||
// Callback (compile when DebugStatsEnabled()) to log a base interpreter hit.
|
||||
// Call the func returned by MIPSGetInterpretFunc(op) directly for interpret.
|
||||
static void NotifyMIPSInterpret(const char *name);
|
||||
|
||||
// Callback to log AND perform a base interpreter op. Alternative to NotifyMIPSInterpret().
|
||||
static void DoMIPSInst(uint32_t op);
|
||||
|
||||
// Callback to log AND perform an IR interpreter inst. Returns 0 or a PC to jump to.
|
||||
static uint32_t DoIRInst(uint64_t inst);
|
||||
|
||||
IRNativeHooks hooks_;
|
||||
};
|
||||
|
||||
class IRNativeJit : public IRJit {
|
||||
public:
|
||||
IRNativeJit(MIPSState *mipsState) : IRJit(mipsState) {}
|
||||
IRNativeJit(MIPSState *mipsState);
|
||||
|
||||
void RunLoopUntil(u64 globalticks) override;
|
||||
|
||||
void ClearCache() override;
|
||||
|
||||
bool DescribeCodePtr(const u8 *ptr, std::string &name) override;
|
||||
bool CodeInRange(const u8 *ptr) const override;
|
||||
bool IsAtDispatchFetch(const u8 *ptr) const override;
|
||||
const u8 *GetDispatcher() const override;
|
||||
const u8 *GetCrashHandler() const override;
|
||||
|
||||
JitBlockCacheDebugInterface *GetBlockCacheDebugInterface() override;
|
||||
|
||||
protected:
|
||||
void Init(IRNativeBackend &backend);
|
||||
bool CompileTargetBlock(IRBlock *block, int block_num, bool preload) override;
|
||||
|
||||
IRNativeBackend *backend_ = nullptr;
|
||||
IRNativeHooks hooks_;
|
||||
IRNativeBlockCacheDebugInterface debugInterface_;
|
||||
};
|
||||
|
||||
} // namespace MIPSComp
|
||||
|
|
|
@ -44,17 +44,7 @@ static void ShowPC(u32 downcount, void *membase, void *jitbase) {
|
|||
count++;
|
||||
}
|
||||
|
||||
static void ShowBlockError(int type) {
|
||||
if (type == 1) {
|
||||
ERROR_LOG(JIT, "[%08x] ShowBlockError: block num was out of range in emuhack", currentMIPS->pc);
|
||||
} else if (type == 2) {
|
||||
ERROR_LOG(JIT, "[%08x] ShowBlockError: block num pointed to null jitblock", currentMIPS->pc);
|
||||
} else {
|
||||
ERROR_LOG(JIT, "[%08x] ShowBlockError: invalid error type", currentMIPS->pc);
|
||||
}
|
||||
}
|
||||
|
||||
void RiscVJit::GenerateFixedCode(const JitOptions &jo) {
|
||||
void RiscVJitBackend::GenerateFixedCode() {
|
||||
BeginWrite(GetMemoryProtectPageSize());
|
||||
const u8 *start = AlignCodePage();
|
||||
|
||||
|
@ -104,7 +94,7 @@ void RiscVJit::GenerateFixedCode(const JitOptions &jo) {
|
|||
RET();
|
||||
}
|
||||
|
||||
enterDispatcher_ = AlignCode16();
|
||||
hooks_.enterDispatcher = (IRNativeFuncNoArg)AlignCode16();
|
||||
|
||||
// Start by saving some regs on the stack. There are 12 GPs and 12 FPs we want.
|
||||
// Note: we leave R_SP as, well, SP, so it doesn't need to be saved.
|
||||
|
@ -157,7 +147,7 @@ void RiscVJit::GenerateFixedCode(const JitOptions &jo) {
|
|||
dispatcherPCInSCRATCH1_ = GetCodePtr();
|
||||
MovToPC(SCRATCH1);
|
||||
|
||||
dispatcher_ = GetCodePtr();
|
||||
hooks_.dispatcher = GetCodePtr();
|
||||
FixupBranch bail = BLT(DOWNCOUNTREG, R_ZERO);
|
||||
SetJumpTarget(skipToRealDispatch);
|
||||
|
||||
|
@ -177,7 +167,7 @@ void RiscVJit::GenerateFixedCode(const JitOptions &jo) {
|
|||
AND(SCRATCH1, SCRATCH1, SCRATCH2);
|
||||
#endif
|
||||
ADD(SCRATCH1, SCRATCH1, MEMBASEREG);
|
||||
dispatcherFetch_ = GetCodePtr();
|
||||
hooks_.dispatchFetch = GetCodePtr();
|
||||
LWU(SCRATCH1, SCRATCH1, 0);
|
||||
SRLI(SCRATCH2, SCRATCH1, 24);
|
||||
// We're in other words comparing to the top 8 bits of MIPS_EMUHACK_OPCODE by subtracting.
|
||||
|
@ -224,7 +214,7 @@ void RiscVJit::GenerateFixedCode(const JitOptions &jo) {
|
|||
|
||||
RET();
|
||||
|
||||
crashHandler_ = GetCodePtr();
|
||||
hooks_.crashHandler = GetCodePtr();
|
||||
LI(SCRATCH1, &coreState, SCRATCH2);
|
||||
LI(SCRATCH2, CORE_RUNTIME_ERROR);
|
||||
SW(SCRATCH2, SCRATCH1, 0);
|
||||
|
|
|
@ -35,7 +35,7 @@ namespace MIPSComp {
|
|||
using namespace RiscVGen;
|
||||
using namespace RiscVJitConstants;
|
||||
|
||||
void RiscVJit::CompIR_Arith(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_Arith(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
bool allowPtrMath = true;
|
||||
|
@ -96,7 +96,7 @@ void RiscVJit::CompIR_Arith(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_Logic(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_Logic(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -187,7 +187,7 @@ void RiscVJit::CompIR_Logic(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_Assign(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_Assign(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -227,7 +227,7 @@ void RiscVJit::CompIR_Assign(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_Bits(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_Bits(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -269,7 +269,7 @@ void RiscVJit::CompIR_Bits(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_Shift(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_Shift(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -367,7 +367,7 @@ void RiscVJit::CompIR_Shift(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_Compare(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_Compare(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
RiscVReg lhs = INVALID_REG;
|
||||
|
@ -445,7 +445,7 @@ void RiscVJit::CompIR_Compare(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_CondAssign(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_CondAssign(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
RiscVReg lhs = INVALID_REG;
|
||||
|
@ -519,7 +519,7 @@ void RiscVJit::CompIR_CondAssign(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_HiLo(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_HiLo(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -553,7 +553,7 @@ void RiscVJit::CompIR_HiLo(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_Mult(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_Mult(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
auto makeArgsUnsigned = [&](RiscVReg *lhs, RiscVReg *rhs) {
|
||||
|
@ -652,7 +652,7 @@ void RiscVJit::CompIR_Mult(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_Div(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_Div(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
RiscVReg numReg, denomReg;
|
||||
|
|
|
@ -34,7 +34,7 @@ namespace MIPSComp {
|
|||
using namespace RiscVGen;
|
||||
using namespace RiscVJitConstants;
|
||||
|
||||
void RiscVJit::CompIR_Exit(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_Exit(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
RiscVReg exitReg = INVALID_REG;
|
||||
|
@ -64,7 +64,7 @@ void RiscVJit::CompIR_Exit(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_ExitIf(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_ExitIf(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
RiscVReg lhs = INVALID_REG;
|
||||
|
|
|
@ -34,7 +34,7 @@ namespace MIPSComp {
|
|||
using namespace RiscVGen;
|
||||
using namespace RiscVJitConstants;
|
||||
|
||||
void RiscVJit::CompIR_FArith(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_FArith(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -108,7 +108,7 @@ void RiscVJit::CompIR_FArith(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_FCondAssign(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_FCondAssign(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
if (inst.op != IROp::FMin && inst.op != IROp::FMax)
|
||||
INVALIDOP;
|
||||
|
@ -174,7 +174,7 @@ void RiscVJit::CompIR_FCondAssign(IRInst inst) {
|
|||
SetJumpTarget(finish);
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_FAssign(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_FAssign(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -220,7 +220,7 @@ void RiscVJit::CompIR_FAssign(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_FRound(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_FRound(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
// TODO: If this is followed by a GPR transfer, might want to combine.
|
||||
|
@ -251,7 +251,7 @@ void RiscVJit::CompIR_FRound(IRInst inst) {
|
|||
FMV(FMv::W, FMv::X, fpr.R(inst.dest), SCRATCH1);
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_FCvt(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_FCvt(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -274,7 +274,7 @@ void RiscVJit::CompIR_FCvt(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_FSat(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_FSat(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
RiscVReg tempReg = INVALID_REG;
|
||||
|
@ -334,7 +334,7 @@ void RiscVJit::CompIR_FSat(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_FCompare(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_FCompare(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
constexpr IRRegIndex IRREG_VFPUL_CC = IRREG_VFPU_CTRL_BASE + VFPU_CTRL_CC;
|
||||
|
@ -548,7 +548,7 @@ void RiscVJit::CompIR_FCompare(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_RoundingMode(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_RoundingMode(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -570,7 +570,7 @@ void RiscVJit::CompIR_RoundingMode(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_FSpecial(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_FSpecial(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
#ifdef __riscv_float_abi_soft
|
||||
|
|
|
@ -34,7 +34,7 @@ namespace MIPSComp {
|
|||
using namespace RiscVGen;
|
||||
using namespace RiscVJitConstants;
|
||||
|
||||
void RiscVJit::SetScratch1ToSrc1Address(IRReg src1) {
|
||||
void RiscVJitBackend::SetScratch1ToSrc1Address(IRReg src1) {
|
||||
gpr.MapReg(src1);
|
||||
#ifdef MASKED_PSP_MEMORY
|
||||
SLLIW(SCRATCH1, gpr.R(src1), 2);
|
||||
|
@ -53,7 +53,7 @@ void RiscVJit::SetScratch1ToSrc1Address(IRReg src1) {
|
|||
#endif
|
||||
}
|
||||
|
||||
int32_t RiscVJit::AdjustForAddressOffset(RiscVGen::RiscVReg *reg, int32_t constant, int32_t range) {
|
||||
int32_t RiscVJitBackend::AdjustForAddressOffset(RiscVGen::RiscVReg *reg, int32_t constant, int32_t range) {
|
||||
if (constant < -2048 || constant + range > 2047) {
|
||||
LI(SCRATCH2, constant);
|
||||
ADD(SCRATCH1, *reg, SCRATCH2);
|
||||
|
@ -63,7 +63,7 @@ int32_t RiscVJit::AdjustForAddressOffset(RiscVGen::RiscVReg *reg, int32_t consta
|
|||
return constant;
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_Load(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_Load(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
gpr.SpillLock(inst.dest, inst.src1);
|
||||
|
@ -124,7 +124,7 @@ void RiscVJit::CompIR_Load(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_LoadShift(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_LoadShift(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -140,7 +140,7 @@ void RiscVJit::CompIR_LoadShift(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_FLoad(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_FLoad(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
RiscVReg addrReg = INVALID_REG;
|
||||
|
@ -173,7 +173,7 @@ void RiscVJit::CompIR_FLoad(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_VecLoad(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_VecLoad(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
RiscVReg addrReg = INVALID_REG;
|
||||
|
@ -210,7 +210,7 @@ void RiscVJit::CompIR_VecLoad(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_Store(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_Store(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
gpr.SpillLock(inst.src3, inst.src1);
|
||||
|
@ -255,7 +255,7 @@ void RiscVJit::CompIR_Store(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_CondStore(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_CondStore(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
if (inst.op != IROp::Store32Conditional)
|
||||
INVALIDOP;
|
||||
|
@ -297,7 +297,7 @@ void RiscVJit::CompIR_CondStore(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_StoreShift(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_StoreShift(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -313,7 +313,7 @@ void RiscVJit::CompIR_StoreShift(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_FStore(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_FStore(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
RiscVReg addrReg = INVALID_REG;
|
||||
|
@ -346,7 +346,7 @@ void RiscVJit::CompIR_FStore(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_VecStore(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_VecStore(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
RiscVReg addrReg = INVALID_REG;
|
||||
|
|
|
@ -38,7 +38,7 @@ namespace MIPSComp {
|
|||
using namespace RiscVGen;
|
||||
using namespace RiscVJitConstants;
|
||||
|
||||
void RiscVJit::CompIR_Basic(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_Basic(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -85,7 +85,7 @@ void RiscVJit::CompIR_Basic(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_Transfer(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_Transfer(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -177,7 +177,7 @@ void RiscVJit::CompIR_Transfer(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_System(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_System(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -236,7 +236,7 @@ void RiscVJit::CompIR_System(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_Breakpoint(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_Breakpoint(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -251,7 +251,7 @@ void RiscVJit::CompIR_Breakpoint(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_ValidateAddress(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_ValidateAddress(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
|
|
@ -34,7 +34,7 @@ namespace MIPSComp {
|
|||
using namespace RiscVGen;
|
||||
using namespace RiscVJitConstants;
|
||||
|
||||
void RiscVJit::CompIR_VecAssign(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_VecAssign(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -129,7 +129,7 @@ void RiscVJit::CompIR_VecAssign(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_VecArith(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_VecArith(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -184,7 +184,7 @@ void RiscVJit::CompIR_VecArith(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_VecHoriz(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_VecHoriz(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -230,7 +230,7 @@ void RiscVJit::CompIR_VecHoriz(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_VecPack(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_VecPack(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
@ -266,7 +266,7 @@ void RiscVJit::CompIR_VecPack(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_VecClamp(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_VecClamp(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
switch (inst.op) {
|
||||
|
|
|
@ -15,62 +15,18 @@
|
|||
// Official git repository and contact information can be found at
|
||||
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
|
||||
|
||||
#include "Common/StringUtils.h"
|
||||
#include "Common/TimeUtil.h"
|
||||
#include "Core/MemMap.h"
|
||||
#include "Core/MIPS/MIPSTables.h"
|
||||
#include "Core/MIPS/RiscV/RiscVJit.h"
|
||||
#include "Core/MIPS/RiscV/RiscVRegCache.h"
|
||||
#include "Common/Profiler/Profiler.h"
|
||||
|
||||
namespace MIPSComp {
|
||||
|
||||
using namespace RiscVGen;
|
||||
using namespace RiscVJitConstants;
|
||||
|
||||
static constexpr bool enableDebug = false;
|
||||
|
||||
static std::map<uint8_t, int> debugSeenNotCompiledIR;
|
||||
static std::map<const char *, int> debugSeenNotCompiled;
|
||||
double lastDebugLog = 0.0;
|
||||
|
||||
static void LogDebugNotCompiled() {
|
||||
if (!enableDebug)
|
||||
return;
|
||||
|
||||
double now = time_now_d();
|
||||
if (now < lastDebugLog + 1.0)
|
||||
return;
|
||||
lastDebugLog = now;
|
||||
|
||||
int worstIROp = -1;
|
||||
int worstIRVal = 0;
|
||||
for (auto it : debugSeenNotCompiledIR) {
|
||||
if (it.second > worstIRVal) {
|
||||
worstIRVal = it.second;
|
||||
worstIROp = it.first;
|
||||
}
|
||||
}
|
||||
debugSeenNotCompiledIR.clear();
|
||||
|
||||
const char *worstName = nullptr;
|
||||
int worstVal = 0;
|
||||
for (auto it : debugSeenNotCompiled) {
|
||||
if (it.second > worstVal) {
|
||||
worstVal = it.second;
|
||||
worstName = it.first;
|
||||
}
|
||||
}
|
||||
debugSeenNotCompiled.clear();
|
||||
|
||||
if (worstIROp != -1)
|
||||
WARN_LOG(JIT, "Most not compiled IR op: %s (%d)", GetIRMeta((IROp)worstIROp)->name, worstIRVal);
|
||||
if (worstName != nullptr)
|
||||
WARN_LOG(JIT, "Most not compiled op: %s (%d)", worstName, worstVal);
|
||||
}
|
||||
|
||||
RiscVJit::RiscVJit(MIPSState *mipsState)
|
||||
: IRNativeJit(mipsState), gpr(mipsState, &jo), fpr(mipsState, &jo), debugInterface_(blocks_, *this) {
|
||||
RiscVJitBackend::RiscVJitBackend(MIPSState *mipsState, JitOptions &jitopt)
|
||||
: mips_(mipsState), jo(jitopt), gpr(mipsState, &jo), fpr(mipsState, &jo) {
|
||||
// Automatically disable incompatible options.
|
||||
if (((intptr_t)Memory::base & 0x00000000FFFFFFFFUL) != 0) {
|
||||
jo.enablePointerify = false;
|
||||
|
@ -83,31 +39,16 @@ RiscVJit::RiscVJit(MIPSState *mipsState)
|
|||
|
||||
gpr.Init(this);
|
||||
fpr.Init(this);
|
||||
|
||||
GenerateFixedCode(jo);
|
||||
}
|
||||
|
||||
RiscVJit::~RiscVJit() {
|
||||
}
|
||||
|
||||
void RiscVJit::RunLoopUntil(u64 globalticks) {
|
||||
if constexpr (enableDebug) {
|
||||
LogDebugNotCompiled();
|
||||
}
|
||||
|
||||
PROFILE_THIS_SCOPE("jit");
|
||||
((void (*)())enterDispatcher_)();
|
||||
}
|
||||
|
||||
JitBlockCacheDebugInterface *MIPSComp::RiscVJit::GetBlockCacheDebugInterface() {
|
||||
return &debugInterface_;
|
||||
RiscVJitBackend::~RiscVJitBackend() {
|
||||
}
|
||||
|
||||
static void NoBlockExits() {
|
||||
_assert_msg_(false, "Never exited block, invalid IR?");
|
||||
}
|
||||
|
||||
bool RiscVJit::CompileTargetBlock(IRBlock *block, int block_num, bool preload) {
|
||||
bool RiscVJitBackend::CompileBlock(IRBlock *block, int block_num, bool preload) {
|
||||
if (GetSpaceLeft() < 0x800)
|
||||
return false;
|
||||
|
||||
|
@ -138,9 +79,10 @@ bool RiscVJit::CompileTargetBlock(IRBlock *block, int block_num, bool preload) {
|
|||
}
|
||||
|
||||
// We should've written an exit above. If we didn't, bad things will happen.
|
||||
if (enableDebug) {
|
||||
// Only check if debug stats are enabled - needlessly wastes jit space.
|
||||
if (DebugStatsEnabled()) {
|
||||
QuickCallFunction(&NoBlockExits);
|
||||
QuickJ(R_RA, crashHandler_);
|
||||
QuickJ(R_RA, hooks_.crashHandler);
|
||||
}
|
||||
|
||||
FlushIcache();
|
||||
|
@ -148,17 +90,7 @@ bool RiscVJit::CompileTargetBlock(IRBlock *block, int block_num, bool preload) {
|
|||
return true;
|
||||
}
|
||||
|
||||
static u32 DoIRInst(uint64_t value) {
|
||||
IRInst inst;
|
||||
memcpy(&inst, &value, sizeof(inst));
|
||||
|
||||
if constexpr (enableDebug)
|
||||
debugSeenNotCompiledIR[(uint8_t)inst.op]++;
|
||||
|
||||
return IRInterpret(currentMIPS, &inst, 1);
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_Generic(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_Generic(IRInst inst) {
|
||||
// If we got here, we're going the slow way.
|
||||
uint64_t value;
|
||||
memcpy(&value, &inst, sizeof(inst));
|
||||
|
@ -183,36 +115,29 @@ void RiscVJit::CompIR_Generic(IRInst inst) {
|
|||
}
|
||||
}
|
||||
|
||||
static void DebugInterpretHit(const char *name) {
|
||||
if (enableDebug)
|
||||
debugSeenNotCompiled[name]++;
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_Interpret(IRInst inst) {
|
||||
void RiscVJitBackend::CompIR_Interpret(IRInst inst) {
|
||||
MIPSOpcode op(inst.constant);
|
||||
|
||||
// IR protects us against this being a branching instruction (well, hopefully.)
|
||||
FlushAll();
|
||||
SaveStaticRegisters();
|
||||
if (enableDebug) {
|
||||
if (DebugStatsEnabled()) {
|
||||
LI(X10, MIPSGetName(op));
|
||||
QuickCallFunction(&DebugInterpretHit);
|
||||
QuickCallFunction(&NotifyMIPSInterpret);
|
||||
}
|
||||
LI(X10, (int32_t)inst.constant);
|
||||
QuickCallFunction((const u8 *)MIPSGetInterpretFunc(op));
|
||||
LoadStaticRegisters();
|
||||
}
|
||||
|
||||
void RiscVJit::FlushAll() {
|
||||
void RiscVJitBackend::FlushAll() {
|
||||
gpr.FlushAll();
|
||||
fpr.FlushAll();
|
||||
}
|
||||
|
||||
bool RiscVJit::DescribeCodePtr(const u8 *ptr, std::string &name) {
|
||||
bool RiscVJitBackend::DescribeCodePtr(const u8 *ptr, std::string &name) const {
|
||||
// Used in disassembly viewer.
|
||||
if (ptr == dispatcher_) {
|
||||
name = "dispatcher";
|
||||
} else if (ptr == dispatcherPCInSCRATCH1_) {
|
||||
if (ptr == dispatcherPCInSCRATCH1_) {
|
||||
name = "dispatcher (PC in SCRATCH1)";
|
||||
} else if (ptr == dispatcherNoCheck_) {
|
||||
name = "dispatcherNoCheck";
|
||||
|
@ -220,81 +145,36 @@ bool RiscVJit::DescribeCodePtr(const u8 *ptr, std::string &name) {
|
|||
name = "saveStaticRegisters";
|
||||
} else if (ptr == loadStaticRegisters_) {
|
||||
name = "loadStaticRegisters";
|
||||
} else if (ptr == enterDispatcher_) {
|
||||
name = "enterDispatcher";
|
||||
} else if (ptr == applyRoundingMode_) {
|
||||
name = "applyRoundingMode";
|
||||
} else if (!IsInSpace(ptr)) {
|
||||
return false;
|
||||
} else {
|
||||
int offset = (int)GetOffset(ptr);
|
||||
int block_num = -1;
|
||||
for (int i = 0; i < blocks_.GetNumBlocks(); ++i) {
|
||||
const auto &b = blocks_.GetBlock(i);
|
||||
// We allocate linearly.
|
||||
if (b->GetTargetOffset() <= offset)
|
||||
block_num = i;
|
||||
if (b->GetTargetOffset() > offset)
|
||||
break;
|
||||
}
|
||||
|
||||
if (block_num == -1) {
|
||||
name = "(unknown or deleted block)";
|
||||
return true;
|
||||
}
|
||||
|
||||
const IRBlock *block = blocks_.GetBlock(block_num);
|
||||
if (block) {
|
||||
u32 start = 0, size = 0;
|
||||
block->GetRange(start, size);
|
||||
name = StringFromFormat("(block %d at %08x)", block_num, start);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return IRNativeBackend::DescribeCodePtr(ptr, name);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RiscVJit::CodeInRange(const u8 *ptr) const {
|
||||
return IsInSpace(ptr);
|
||||
}
|
||||
|
||||
bool RiscVJit::IsAtDispatchFetch(const u8 *ptr) const {
|
||||
return ptr == dispatcherFetch_;
|
||||
}
|
||||
|
||||
const u8 *RiscVJit::GetDispatcher() const {
|
||||
return dispatcher_;
|
||||
}
|
||||
|
||||
const u8 *RiscVJit::GetCrashHandler() const {
|
||||
return crashHandler_;
|
||||
}
|
||||
|
||||
void RiscVJit::ClearCache() {
|
||||
IRNativeJit::ClearCache();
|
||||
|
||||
void RiscVJitBackend::ClearAllBlocks() {
|
||||
ClearCodeSpace(jitStartOffset_);
|
||||
FlushIcacheSection(region + jitStartOffset_, region + region_size - jitStartOffset_);
|
||||
}
|
||||
|
||||
void RiscVJit::RestoreRoundingMode(bool force) {
|
||||
void RiscVJitBackend::RestoreRoundingMode(bool force) {
|
||||
FSRMI(Round::NEAREST_EVEN);
|
||||
}
|
||||
|
||||
void RiscVJit::ApplyRoundingMode(bool force) {
|
||||
void RiscVJitBackend::ApplyRoundingMode(bool force) {
|
||||
QuickCallFunction(applyRoundingMode_);
|
||||
}
|
||||
|
||||
void RiscVJit::MovFromPC(RiscVReg r) {
|
||||
void RiscVJitBackend::MovFromPC(RiscVReg r) {
|
||||
LWU(r, CTXREG, offsetof(MIPSState, pc));
|
||||
}
|
||||
|
||||
void RiscVJit::MovToPC(RiscVReg r) {
|
||||
void RiscVJitBackend::MovToPC(RiscVReg r) {
|
||||
SW(r, CTXREG, offsetof(MIPSState, pc));
|
||||
}
|
||||
|
||||
void RiscVJit::SaveStaticRegisters() {
|
||||
void RiscVJitBackend::SaveStaticRegisters() {
|
||||
if (jo.useStaticAlloc) {
|
||||
QuickCallFunction(saveStaticRegisters_);
|
||||
} else {
|
||||
|
@ -303,7 +183,7 @@ void RiscVJit::SaveStaticRegisters() {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::LoadStaticRegisters() {
|
||||
void RiscVJitBackend::LoadStaticRegisters() {
|
||||
if (jo.useStaticAlloc) {
|
||||
QuickCallFunction(loadStaticRegisters_);
|
||||
} else {
|
||||
|
@ -311,16 +191,16 @@ void RiscVJit::LoadStaticRegisters() {
|
|||
}
|
||||
}
|
||||
|
||||
void RiscVJit::NormalizeSrc1(IRInst inst, RiscVReg *reg, RiscVReg tempReg, bool allowOverlap) {
|
||||
void RiscVJitBackend::NormalizeSrc1(IRInst inst, RiscVReg *reg, RiscVReg tempReg, bool allowOverlap) {
|
||||
*reg = NormalizeR(inst.src1, allowOverlap ? 0 : inst.dest, tempReg);
|
||||
}
|
||||
|
||||
void RiscVJit::NormalizeSrc12(IRInst inst, RiscVReg *lhs, RiscVReg *rhs, RiscVReg lhsTempReg, RiscVReg rhsTempReg, bool allowOverlap) {
|
||||
void RiscVJitBackend::NormalizeSrc12(IRInst inst, RiscVReg *lhs, RiscVReg *rhs, RiscVReg lhsTempReg, RiscVReg rhsTempReg, bool allowOverlap) {
|
||||
*lhs = NormalizeR(inst.src1, allowOverlap ? 0 : inst.dest, lhsTempReg);
|
||||
*rhs = NormalizeR(inst.src2, allowOverlap ? 0 : inst.dest, rhsTempReg);
|
||||
}
|
||||
|
||||
RiscVReg RiscVJit::NormalizeR(IRRegIndex rs, IRRegIndex rd, RiscVReg tempReg) {
|
||||
RiscVReg RiscVJitBackend::NormalizeR(IRRegIndex rs, IRRegIndex rd, RiscVReg tempReg) {
|
||||
// For proper compare, we must sign extend so they both match or don't match.
|
||||
// But don't change pointers, in case one is SP (happens in LittleBigPlanet.)
|
||||
if (gpr.IsImm(rs) && gpr.GetImm(rs) == 0) {
|
||||
|
|
|
@ -29,30 +29,23 @@
|
|||
|
||||
namespace MIPSComp {
|
||||
|
||||
// TODO: Separate.
|
||||
class RiscVJit : public RiscVGen::RiscVCodeBlock, public IRNativeJit, public IRNativeBackend {
|
||||
class RiscVJitBackend : public RiscVGen::RiscVCodeBlock, public IRNativeBackend {
|
||||
public:
|
||||
RiscVJit(MIPSState *mipsState);
|
||||
~RiscVJit();
|
||||
RiscVJitBackend(MIPSState *mipsState, JitOptions &jo);
|
||||
~RiscVJitBackend();
|
||||
|
||||
void RunLoopUntil(u64 globalticks) override;
|
||||
bool DescribeCodePtr(const u8 *ptr, std::string &name) const override;
|
||||
|
||||
bool DescribeCodePtr(const u8 *ptr, std::string &name) override;
|
||||
bool CodeInRange(const u8 *ptr) const override;
|
||||
bool IsAtDispatchFetch(const u8 *ptr) const override;
|
||||
const u8 *GetDispatcher() const override;
|
||||
const u8 *GetCrashHandler() const override;
|
||||
|
||||
void ClearCache() override;
|
||||
|
||||
JitBlockCacheDebugInterface *GetBlockCacheDebugInterface() override;
|
||||
void GenerateFixedCode() override;
|
||||
bool CompileBlock(IRBlock *block, int block_num, bool preload) override;
|
||||
void ClearAllBlocks() override;
|
||||
|
||||
protected:
|
||||
bool CompileTargetBlock(IRBlock *block, int block_num, bool preload) override;
|
||||
const CodeBlockCommon &CodeBlock() const override {
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
void GenerateFixedCode(const JitOptions &jo);
|
||||
|
||||
void RestoreRoundingMode(bool force = false);
|
||||
void ApplyRoundingMode(bool force = false);
|
||||
void MovFromPC(RiscVGen::RiscVReg r);
|
||||
|
@ -114,27 +107,34 @@ private:
|
|||
void NormalizeSrc12(IRInst inst, RiscVGen::RiscVReg *lhs, RiscVGen::RiscVReg *rhs, RiscVGen::RiscVReg lhsTempReg, RiscVGen::RiscVReg rhsTempReg, bool allowOverlap);
|
||||
RiscVGen::RiscVReg NormalizeR(IRRegIndex rs, IRRegIndex rd, RiscVGen::RiscVReg tempReg);
|
||||
|
||||
// TODO: Maybe just a param to GenerateFixedCode().
|
||||
MIPSState *mips_;
|
||||
JitOptions &jo;
|
||||
RiscVRegCache gpr;
|
||||
RiscVRegCacheFPU fpr;
|
||||
IRNativeBlockCacheDebugInterface debugInterface_;
|
||||
|
||||
const u8 *enterDispatcher_ = nullptr;
|
||||
|
||||
const u8 *outerLoop_ = nullptr;
|
||||
const u8 *outerLoopPCInSCRATCH1_ = nullptr;
|
||||
const u8 *dispatcherCheckCoreState_ = nullptr;
|
||||
const u8 *dispatcherPCInSCRATCH1_ = nullptr;
|
||||
const u8 *dispatcher_ = nullptr;
|
||||
const u8 *dispatcherNoCheck_ = nullptr;
|
||||
const u8 *dispatcherFetch_ = nullptr;
|
||||
const u8 *applyRoundingMode_ = nullptr;
|
||||
|
||||
const u8 *saveStaticRegisters_ = nullptr;
|
||||
const u8 *loadStaticRegisters_ = nullptr;
|
||||
|
||||
const u8 *crashHandler_ = nullptr;
|
||||
|
||||
int jitStartOffset_ = 0;
|
||||
};
|
||||
|
||||
class RiscVJit : public IRNativeJit {
|
||||
public:
|
||||
RiscVJit(MIPSState *mipsState)
|
||||
: IRNativeJit(mipsState), rvBackend_(mipsState, jo) {
|
||||
Init(rvBackend_);
|
||||
}
|
||||
|
||||
private:
|
||||
RiscVJitBackend rvBackend_;
|
||||
};
|
||||
|
||||
} // namespace MIPSComp
|
||||
|
|
Loading…
Add table
Reference in a new issue