mirror of
https://github.com/hrydgard/ppsspp.git
synced 2025-04-02 11:01:50 -04:00
arm64jit: Allow disabling pointerification.
For platforms where we can't get base aligned.
This commit is contained in:
parent
4657397b57
commit
cccf448ae0
4 changed files with 64 additions and 5 deletions
|
@ -150,6 +150,8 @@ bool Arm64RegCache::IsMappedAsPointer(MIPSGPReg mipsReg) {
|
|||
if (ar[mr[mipsReg].reg].pointerified) {
|
||||
ELOG("Really shouldn't be pointerified here");
|
||||
}
|
||||
} else if (mr[mipsReg].loc == ML_ARMREG_AS_PTR) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -315,6 +317,21 @@ ARM64Reg Arm64RegCache::MapReg(MIPSGPReg mipsReg, int mapFlags) {
|
|||
}
|
||||
|
||||
return mr[mipsReg].reg;
|
||||
} else if (mr[mipsReg].loc == ML_ARMREG_AS_PTR) {
|
||||
// Was mapped as pointer, now we want it mapped as a value, presumably to
|
||||
// add or subtract stuff to it.
|
||||
if ((mapFlags & MAP_NOINIT) != MAP_NOINIT) {
|
||||
ARM64Reg loadReg = armReg;
|
||||
if (mipsReg == MIPS_REG_LO) {
|
||||
loadReg = EncodeRegTo64(loadReg);
|
||||
}
|
||||
emit_->LDR(INDEX_UNSIGNED, loadReg, CTXREG, GetMipsRegOffset(mipsReg));
|
||||
}
|
||||
mr[mipsReg].loc = ML_ARMREG;
|
||||
if (mapFlags & MAP_DIRTY) {
|
||||
ar[armReg].isDirty = true;
|
||||
}
|
||||
return (ARM64Reg)mr[mipsReg].reg;
|
||||
}
|
||||
|
||||
// Okay, not mapped, so we need to allocate an ARM register.
|
||||
|
@ -358,6 +375,11 @@ allocate:
|
|||
}
|
||||
|
||||
Arm64Gen::ARM64Reg Arm64RegCache::MapRegAsPointer(MIPSGPReg reg) {
|
||||
// Already mapped.
|
||||
if (mr[reg].loc == ML_ARMREG_AS_PTR) {
|
||||
return mr[reg].reg;
|
||||
}
|
||||
|
||||
ARM64Reg retval = INVALID_REG;
|
||||
if (mr[reg].loc != ML_ARMREG && mr[reg].loc != ML_ARMREG_IMM) {
|
||||
retval = MapReg(reg);
|
||||
|
@ -368,9 +390,23 @@ Arm64Gen::ARM64Reg Arm64RegCache::MapRegAsPointer(MIPSGPReg reg) {
|
|||
if (mr[reg].loc == ML_ARMREG || mr[reg].loc == ML_ARMREG_IMM) {
|
||||
// If there was an imm attached, discard it.
|
||||
mr[reg].loc = ML_ARMREG;
|
||||
int a = DecodeReg(mr[reg].reg);
|
||||
if (!ar[a].pointerified) {
|
||||
emit_->MOVK(ARM64Reg(X0 + a), ((uint64_t)Memory::base) >> 32, SHIFT_32);
|
||||
ARM64Reg a = DecodeReg(mr[reg].reg);
|
||||
if (!jo_->enablePointerify) {
|
||||
// First, flush the value.
|
||||
if (ar[a].isDirty) {
|
||||
ARM64Reg storeReg = ARM64RegForFlush(ar[a].mipsReg);
|
||||
if (storeReg != INVALID_REG)
|
||||
emit_->STR(INDEX_UNSIGNED, storeReg, CTXREG, GetMipsRegOffset(ar[a].mipsReg));
|
||||
ar[a].isDirty = false;
|
||||
}
|
||||
|
||||
// Convert to a pointer by adding the base and clearing off the top bits.
|
||||
// If SP, we can probably avoid the top bit clear, let's play with that later.
|
||||
emit_->ANDI2R(a, a, 0x3FFFFFFF, INVALID_REG);
|
||||
emit_->ADD(ARM64Reg(X0 + (int)a), ARM64Reg(X0 + (int)a), MEMBASEREG);
|
||||
mr[reg].loc = ML_ARMREG_AS_PTR;
|
||||
} else if (!ar[a].pointerified) {
|
||||
emit_->MOVK(ARM64Reg(X0 + (int)a), ((uint64_t)Memory::base) >> 32, SHIFT_32);
|
||||
ar[a].pointerified = true;
|
||||
}
|
||||
} else {
|
||||
|
@ -450,6 +486,7 @@ void Arm64RegCache::FlushArmReg(ARM64Reg r) {
|
|||
mreg.loc = ML_IMM;
|
||||
mreg.reg = INVALID_REG;
|
||||
} else {
|
||||
_assert_msg_(JIT, mreg.loc != ML_ARMREG_AS_PTR, "Cannot flush reg as pointer");
|
||||
// Note: may be a 64-bit reg.
|
||||
ARM64Reg storeReg = ARM64RegForFlush(ar[r].mipsReg);
|
||||
if (storeReg != INVALID_REG)
|
||||
|
@ -476,7 +513,7 @@ void Arm64RegCache::DiscardR(MIPSGPReg mipsReg) {
|
|||
return;
|
||||
}
|
||||
const RegMIPSLoc prevLoc = mr[mipsReg].loc;
|
||||
if (prevLoc == ML_ARMREG || prevLoc == ML_ARMREG_IMM) {
|
||||
if (prevLoc == ML_ARMREG || prevLoc == ML_ARMREG_IMM || prevLoc == ML_ARMREG_AS_PTR) {
|
||||
ARM64Reg armReg = mr[mipsReg].reg;
|
||||
ar[armReg].isDirty = false;
|
||||
ar[armReg].mipsReg = MIPS_REG_INVALID;
|
||||
|
@ -532,6 +569,9 @@ ARM64Reg Arm64RegCache::ARM64RegForFlush(MIPSGPReg r) {
|
|||
}
|
||||
return mr[r].reg;
|
||||
|
||||
case ML_ARMREG_AS_PTR:
|
||||
return INVALID_REG;
|
||||
|
||||
case ML_MEM:
|
||||
return INVALID_REG;
|
||||
|
||||
|
@ -578,6 +618,14 @@ void Arm64RegCache::FlushR(MIPSGPReg r) {
|
|||
ar[mr[r].reg].pointerified = false;
|
||||
break;
|
||||
|
||||
case ML_ARMREG_AS_PTR:
|
||||
// Never dirty.
|
||||
if (ar[mr[r].reg].isDirty) {
|
||||
ERROR_LOG_REPORT(JIT, "ARMREG_AS_PTR cannot be dirty (yet)");
|
||||
}
|
||||
ar[mr[r].reg].mipsReg = MIPS_REG_INVALID;
|
||||
break;
|
||||
|
||||
case ML_MEM:
|
||||
// Already there, nothing to do.
|
||||
break;
|
||||
|
@ -792,7 +840,9 @@ ARM64Reg Arm64RegCache::R(MIPSGPReg mipsReg) {
|
|||
}
|
||||
|
||||
ARM64Reg Arm64RegCache::RPtr(MIPSGPReg mipsReg) {
|
||||
if (mr[mipsReg].loc == ML_ARMREG || mr[mipsReg].loc == ML_ARMREG_IMM) {
|
||||
if (mr[mipsReg].loc == ML_ARMREG_AS_PTR) {
|
||||
return (ARM64Reg)mr[mipsReg].reg;
|
||||
} else if (mr[mipsReg].loc == ML_ARMREG || mr[mipsReg].loc == ML_ARMREG_IMM) {
|
||||
int a = mr[mipsReg].reg;
|
||||
if (ar[a].pointerified) {
|
||||
return (ARM64Reg)mr[mipsReg].reg;
|
||||
|
|
|
@ -41,6 +41,8 @@ enum {
|
|||
enum RegMIPSLoc {
|
||||
ML_IMM,
|
||||
ML_ARMREG,
|
||||
// In an arm reg, but an adjusted pointer (not pointerified - unaligned.)
|
||||
ML_ARMREG_AS_PTR,
|
||||
// In an arm reg, but also has a known immediate value.
|
||||
ML_ARMREG_IMM,
|
||||
ML_MEM,
|
||||
|
|
|
@ -51,8 +51,14 @@ namespace MIPSComp {
|
|||
continueMaxInstructions = 300;
|
||||
|
||||
useStaticAlloc = false;
|
||||
enablePointerify = false;
|
||||
#if PPSSPP_ARCH(ARM64)
|
||||
useStaticAlloc = true;
|
||||
enablePointerify = true;
|
||||
#endif
|
||||
#if PPSSPP_PLATFORM(IOS)
|
||||
useStaticAlloc = false;
|
||||
enablePointerify = false;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
|
|
@ -198,6 +198,7 @@ namespace MIPSComp {
|
|||
// ARM64 only
|
||||
bool useASIMDVFPU;
|
||||
bool useStaticAlloc;
|
||||
bool enablePointerify;
|
||||
|
||||
// Common
|
||||
bool enableBlocklink;
|
||||
|
|
Loading…
Add table
Reference in a new issue