Some casting cleanup, misc

This commit is contained in:
Henrik Rydgard 2015-03-08 00:56:15 +01:00
parent 34ab532971
commit 58b059ca14
10 changed files with 26 additions and 27 deletions

View file

@ -949,8 +949,14 @@ void ARM64XEmitter::BL(const void* ptr)
}
void ARM64XEmitter::QuickCallFunction(ARM64Reg scratchreg, const void *func) {
// TODO: Add special code to use the scratch reg if the call distance is too great.
BL(func);
s64 distance = (s64)func - (s64)m_code;
if (distance >= -0x3FFFFFF && distance < 0x3FFFFFF) {
WARN_LOG(DYNA_REC, "Distance too far in function call! Using scratch.");
MOVI2R(scratchreg, (uintptr_t)func);
BLR(scratchreg);
} else {
BL(func);
}
}

View file

@ -675,8 +675,8 @@ public:
ARM64Reg ABI_SetupLambda(const std::function<T(Args...)>* f)
{
auto trampoline = &ARM64XEmitter::CallLambdaTrampoline<T, Args...>;
MOVI2R(X30, (u64)trampoline);
MOVI2R(X0, (u64)const_cast<void*>((const void*)f));
MOVI2R(X30, (uintptr_t)trampoline);
MOVI2R(X0, (uintptr_t)const_cast<void*>((const void*)f));
return X30;
}

View file

@ -854,7 +854,7 @@ public:
// Load pointers without casting
template <class T> void MOVP2R(ARMReg reg, T *val) {
MOVI2R(reg, (u32)(intptr_t)(void *)val);
MOVI2R(reg, (uintptr_t)(void *)val);
}
void MOVIU2F(ARMReg dest, u32 val, ARMReg tempReg, bool negate = false) {

View file

@ -423,7 +423,7 @@ bool ArmJit::ReplaceJalTo(u32 dest) {
if (BLInRange((const void *)(entry->replaceFunc))) {
BL((const void *)(entry->replaceFunc));
} else {
MOVI2R(R0, (u32)entry->replaceFunc);
MOVI2R(R0, (uintptr_t)entry->replaceFunc);
BL(R0);
}
ApplyRoundingMode();
@ -482,7 +482,7 @@ void ArmJit::Comp_ReplacementFunc(MIPSOpcode op)
if (BLInRange((const void *)(entry->replaceFunc))) {
BL((const void *)(entry->replaceFunc));
} else {
MOVI2R(R0, (u32)entry->replaceFunc);
MOVI2R(R0, (uintptr_t)entry->replaceFunc);
BL(R0);
}

View file

@ -73,7 +73,7 @@ static const bool enableDebug = true;
// x26 : JIT base reg
// x27 : MIPS state (Could eliminate by placing the MIPS state right at the memory base)
// x28 : Memory base pointer.
// x29 : Down counter
// x24 : Down counter
extern volatile CoreState coreState;
@ -174,14 +174,7 @@ void Arm64Jit::GenerateFixedCode() {
LSR(SCRATCH2, SCRATCH2, 24);
CMP(SCRATCH2, MIPS_EMUHACK_OPCODE>>24);
FixupBranch skipJump = B(CC_NEQ);
// IDEA - we have 26 bits, why not just use offsets from base of code?
// Another idea: Shift the bloc number left by two in the op, this would let us do
// LDR(R0, R9, R0); here, replacing the next instructions.
#ifdef IOS
// On iOS, R9 (JITBASEREG) is volatile. We have to reload it.
MOVI2R(JITBASEREG, (uintptr_t)GetBasePtr());
#endif
ADD(SCRATCH1_64, SCRATCH1_64, JITBASEREG);
ADD(SCRATCH1_64, JITBASEREG, SCRATCH1_64);
BR(SCRATCH1_64);
SetJumpTarget(skipJump);
// No block found, let's jit

View file

@ -498,7 +498,7 @@ void Arm64Jit::Comp_JumpReg(MIPSOpcode op)
delaySlotIsNice = false;
CONDITIONAL_NICE_DELAYSLOT;
ARM64Reg destReg = X18;
ARM64Reg destReg = OTHERTEMPREG;
if (IsSyscall(delaySlotOp)) {
gpr.MapReg(rs);
MovToPC(gpr.R(rs)); // For syscall to be able to return.
@ -539,7 +539,7 @@ void Arm64Jit::Comp_JumpReg(MIPSOpcode op)
} else {
// Delay slot - this case is very rare, might be able to free up R8.
gpr.MapReg(rs);
MOV(X18, gpr.R(rs));
MOV(destReg, gpr.R(rs));
if (andLink)
gpr.SetImm(rd, js.compilerPC + 8);
CompileDelaySlot(DELAYSLOT_NICE);
@ -593,7 +593,7 @@ void Arm64Jit::Comp_Syscall(MIPSOpcode op)
// Skip the CallSyscall where possible.
void *quickFunc = GetQuickSyscallFunc(op);
if (quickFunc) {
MOVI2R(X0, (intptr_t)GetSyscallInfo(op));
MOVI2R(X0, (uintptr_t)GetSyscallInfo(op));
// Already flushed, so X1 is safe.
QuickCallFunction(X1, quickFunc);
} else {

View file

@ -170,7 +170,7 @@ void Arm64Jit::CompileDelaySlot(int flags)
// we can (mostly) control whether we set the flag or not. Of course, if someone puts an slt in to the
// delay slot, we're screwed.
if (flags & DELAYSLOT_SAFE)
MRS(FLAGTEMPREG, FIELD_NZCV); // Save flags register. X18 is preserved through function calls and is not allocated.
MRS(FLAGTEMPREG, FIELD_NZCV); // Save flags register. FLAGTEMPREG is preserved through function calls and is not allocated.
js.inDelaySlot = true;
MIPSOpcode op = Memory::Read_Opcode_JIT(js.compilerPC + 4);
@ -239,8 +239,6 @@ const u8 *Arm64Jit::DoJit(u32 em_address, JitBlock *b)
js.inDelaySlot = false;
js.PrefixStart();
logBlocks = 1;
// We add a downcount flag check before the block, used when entering from a linked block.
// The last block decremented downcounter, and the flag should still be available.
// Got three variants here of where we position the code, needs detailed benchmarking.
@ -369,6 +367,7 @@ bool Arm64Jit::ReplaceJalTo(u32 dest) {
void Arm64Jit::Comp_ReplacementFunc(MIPSOpcode op)
{
ERROR_LOG(JIT, "Comp_ReplacementFunc not implemented");
// TODO ARM64
}
@ -413,7 +412,8 @@ void Arm64Jit::RestoreDowncount() {
}
void Arm64Jit::WriteDownCount(int offset) {
SUBSI2R(DOWNCOUNTREG, DOWNCOUNTREG, offset, SCRATCH1);
int theDowncount = js.downcountAmount + offset;
SUBSI2R(DOWNCOUNTREG, DOWNCOUNTREG, theDowncount, SCRATCH1);
}
void Arm64Jit::WriteDownCountR(ARM64Reg reg) {

View file

@ -53,7 +53,7 @@ const ARM64Reg *Arm64RegCache::GetMIPSAllocationOrder(int &count) {
// See register alloc remarks in Arm64Asm.cpp
// TODO: Add static allocation of top MIPS registers like SP
static const ARM64Reg allocationOrder[] = {
W19, W20, W21, W22, W23, W24, W2, W3, W4, W5, W6, W7, W8, W9, W10, W11, W12, W13, W14, W15, W0, W1,
W19, W20, W21, W22, W2, W3, W4, W5, W6, W7, W8, W9, W10, W11, W12, W13, W14, W15, W0, W1,
};
count = sizeof(allocationOrder) / sizeof(const int);
return allocationOrder;

View file

@ -23,12 +23,12 @@
namespace Arm64JitConstants {
// Bogus mappings, TODO ARM64
const Arm64Gen::ARM64Reg DOWNCOUNTREG = Arm64Gen::W23;
const Arm64Gen::ARM64Reg OTHERTEMPREG = Arm64Gen::X24;
const Arm64Gen::ARM64Reg FLAGTEMPREG = Arm64Gen::X25;
const Arm64Gen::ARM64Reg JITBASEREG = Arm64Gen::X26;
const Arm64Gen::ARM64Reg CTXREG = Arm64Gen::X27;
const Arm64Gen::ARM64Reg MEMBASEREG = Arm64Gen::X28;
const Arm64Gen::ARM64Reg DOWNCOUNTREG = Arm64Gen::W29; // no need to use the full register width
const Arm64Gen::ARM64Reg SCRATCH1_64 = Arm64Gen::X16;
const Arm64Gen::ARM64Reg SCRATCH2_64 = Arm64Gen::X17;
const Arm64Gen::ARM64Reg SCRATCH1 = Arm64Gen::W16;

View file

@ -78,7 +78,7 @@ void TestCode::Generate()
// c will later be logged.
/*
MOVI2R(R11, (u32)&abc[0]);
MOVP2R(R11, &abc[0]);
MOVI2R(R1, 0x3f800000);
STR(R11, R1, 4 * (32 + 31));
VLDR(S0, R11, 0);