Switch to compile-time ARMV7 define.

This commit is contained in:
Sacha 2013-11-15 11:20:39 +10:00
parent adde016338
commit 20e8a81268
6 changed files with 57 additions and 52 deletions

View file

@ -268,13 +268,6 @@ void CPUInfo::Detect()
bASIMD = CheckCPUFeature("asimd");
num_cores = GetCoreCount();
#endif
// Since we can do this at compile-time (separate libraries) for every platform,
// maybe we can replace the bArmV7 check with #if like we do for x86 and x86_64
#if defined(__ARM_ARCH_7A__)
bArmV7 = true;
#else
bArmV7 = false;
#endif
}
// Turn the cpu info into a string we can show

View file

@ -202,7 +202,7 @@ void ARMXEmitter::ANDI2R(ARMReg rd, ARMReg rs, u32 val, ARMReg scratch)
}
// The worst case is 4 (e.g. 0x55555555.)
if (ops <= 3 || !cpu_info.bArmV7) {
if (ops <= 3) {
bool first = true;
for (int i = 0; i < 32; i += 2) {
u8 bits = RotR(val, i) & 0xFF;
@ -272,7 +272,7 @@ void ARMXEmitter::ORI2R(ARMReg rd, ARMReg rs, u32 val, ARMReg scratch)
if (TryMakeOperand2_AllowInverse(val, op2, &inversed) && ops >= 3) {
MVN(scratch, op2);
ORR(rd, rs, scratch);
} else if (ops <= 3 || !cpu_info.bArmV7) {
} else if (ops <= 3) {
bool first = true;
for (int i = 0; i < 32; i += 2) {
u8 bits = RotR(val, i) & 0xFF;
@ -333,26 +333,32 @@ void ARMXEmitter::MOVI2R(ARMReg reg, u32 val, bool optimize)
Operand2 op2;
bool inverse;
if (cpu_info.bArmV7 && !optimize)
#ifdef HAVE_ARMV7
// Unused
if (!optimize)
{
// For backpatching on ARMv7
MOVW(reg, val & 0xFFFF);
MOVT(reg, val, true);
return;
}
else if (TryMakeOperand2_AllowInverse(val, op2, &inverse)) {
#endif
if (TryMakeOperand2_AllowInverse(val, op2, &inverse)) {
inverse ? MVN(reg, op2) : MOV(reg, op2);
} else {
if (cpu_info.bArmV7)
{
// Use MOVW+MOVT for ARMv7+
MOVW(reg, val & 0xFFFF);
if(val & 0xFFFF0000)
MOVT(reg, val, true);
} else if (!TrySetValue_TwoOp(reg,val)) {
#ifdef HAVE_ARMV7
// Use MOVW+MOVT for ARMv7+
MOVW(reg, val & 0xFFFF);
if(val & 0xFFFF0000)
MOVT(reg, val, true);
#else
if (!TrySetValue_TwoOp(reg,val)) {
// Use literal pool for ARMv6.
AddNewLit(val);
LDR(reg, _PC); // To be backpatched later
}
#endif
}
}

View file

@ -19,6 +19,11 @@
#ifndef _CPUDETECT_H_
#define _CPUDETECT_H_
// Every architecture has its own define. This needs to be added to.
#if defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7S__)
#define HAVE_ARMV7 1
#endif
#include <string>
enum CPUVendor {
@ -70,7 +75,6 @@ struct CPUInfo {
bool bVFPv4;
bool bIDIVa;
bool bIDIVt;
bool bArmV7; // enable MOVT, MOVW etc
// ARMv8 specific
bool bFP;

View file

@ -543,12 +543,12 @@ namespace MIPSComp
}
gpr.MapDirtyIn(rt, rs);
if (cpu_info.bArmV7) {
UBFX(gpr.R(rt), gpr.R(rs), pos, size);
} else {
MOV(gpr.R(rt), Operand2(gpr.R(rs), ST_LSR, pos));
ANDI2R(gpr.R(rt), gpr.R(rt), mask, R0);
}
#ifdef HAVE_ARMV7
UBFX(gpr.R(rt), gpr.R(rs), pos, size);
#else
MOV(gpr.R(rt), Operand2(gpr.R(rs), ST_LSR, pos));
ANDI2R(gpr.R(rt), gpr.R(rt), mask, R0);
#endif
break;
case 0x4: //ins
@ -567,13 +567,13 @@ namespace MIPSComp
ORI2R(gpr.R(rt), gpr.R(rt), inserted, R0);
} else {
gpr.MapDirtyIn(rt, rs, false);
if (cpu_info.bArmV7) {
BFI(gpr.R(rt), gpr.R(rs), pos, size-pos);
} else {
ANDI2R(R0, gpr.R(rs), sourcemask, R1);
ANDI2R(gpr.R(rt), gpr.R(rt), destmask, R1);
ORR(gpr.R(rt), gpr.R(rt), Operand2(R0, ST_LSL, pos));
}
#ifdef HAVE_ARMV7
BFI(gpr.R(rt), gpr.R(rs), pos, size-pos);
#else
ANDI2R(R0, gpr.R(rs), sourcemask, R1);
ANDI2R(gpr.R(rt), gpr.R(rt), destmask, R1);
ORR(gpr.R(rt), gpr.R(rt), Operand2(R0, ST_LSL, pos));
#endif
}
}
break;
@ -621,12 +621,12 @@ namespace MIPSComp
return;
}
if (cpu_info.bArmV7) {
gpr.MapDirtyIn(rd, rt);
RBIT(gpr.R(rd), gpr.R(rt));
} else {
Comp_Generic(op);
}
#ifdef HAVE_ARMV7
gpr.MapDirtyIn(rd, rt);
RBIT(gpr.R(rd), gpr.R(rt));
#else
Comp_Generic(op);
#endif
break;
default:
Comp_Generic(op);

View file

@ -343,13 +343,13 @@ void Jit::Comp_mxc1(MIPSOpcode op)
{
gpr.MapDirtyIn(rt, MIPS_REG_FPCOND);
LDR(gpr.R(rt), CTXREG, offsetof(MIPSState, fcr31));
if (cpu_info.bArmV7) {
BFI(gpr.R(rt), gpr.R(MIPS_REG_FPCOND), 23, 1);
} else {
AND(R0, gpr.R(MIPS_REG_FPCOND), Operand2(1)); // Just in case
ANDI2R(gpr.R(rt), gpr.R(rt), ~(0x1 << 23), R1); // R1 won't be used, this turns into a simple BIC.
ORR(gpr.R(rt), gpr.R(rt), Operand2(R0, ST_LSL, 23));
}
#ifdef HAVE_ARMV7
BFI(gpr.R(rt), gpr.R(MIPS_REG_FPCOND), 23, 1);
#else
AND(R0, gpr.R(MIPS_REG_FPCOND), Operand2(1)); // Just in case
ANDI2R(gpr.R(rt), gpr.R(rt), ~(0x1 << 23), R1); // R1 won't be used, this turns into a simple BIC.
ORR(gpr.R(rt), gpr.R(rt), Operand2(R0, ST_LSL, 23));
#endif
}
else if (fs == 0)
{
@ -387,12 +387,12 @@ void Jit::Comp_mxc1(MIPSOpcode op)
*/
// Update MIPS state
STR(gpr.R(rt), CTXREG, offsetof(MIPSState, fcr31));
if (cpu_info.bArmV7) {
UBFX(gpr.R(MIPS_REG_FPCOND), gpr.R(rt), 23, 1);
} else {
MOV(R0, Operand2(gpr.R(rt), ST_LSR, 23));
AND(gpr.R(MIPS_REG_FPCOND), R0, Operand2(1));
}
#ifdef HAVE_ARMV7
UBFX(gpr.R(MIPS_REG_FPCOND), gpr.R(rt), 23, 1);
#else
MOV(R0, Operand2(gpr.R(rt), ST_LSR, 23));
AND(gpr.R(MIPS_REG_FPCOND), R0, Operand2(1));
#endif
}
return;
}

View file

@ -277,7 +277,8 @@ const u8 *Jit::DoJit(u32 em_address, JitBlock *b)
js.compilerPC += 4;
js.numInstructions++;
if (!cpu_info.bArmV7 && (GetCodePtr() - b->checkedEntry - partialFlushOffset) > 3200)
#ifndef HAVE_ARMV7
if ((GetCodePtr() - b->checkedEntry - partialFlushOffset) > 3200)
{
// We need to prematurely flush as we are out of range
FixupBranch skip = B_CC(CC_AL);
@ -285,6 +286,7 @@ const u8 *Jit::DoJit(u32 em_address, JitBlock *b)
SetJumpTarget(skip);
partialFlushOffset = GetCodePtr() - b->checkedEntry;
}
#endif
// Safety check, in case we get a bunch of really large jit ops without a lot of branching.
if (GetSpaceLeft() < 0x800)