From 20e8a812686d593fd2ac2a14209692e803e3d059 Mon Sep 17 00:00:00 2001 From: Sacha Date: Fri, 15 Nov 2013 11:20:39 +1000 Subject: [PATCH] Switch to compile-time ARMV7 define. --- Common/ArmCPUDetect.cpp | 7 ------- Common/ArmEmitter.cpp | 28 +++++++++++++++----------- Common/CPUDetect.h | 6 +++++- Core/MIPS/ARM/ArmCompALU.cpp | 38 ++++++++++++++++++------------------ Core/MIPS/ARM/ArmCompFPU.cpp | 26 ++++++++++++------------ Core/MIPS/ARM/ArmJit.cpp | 4 +++- 6 files changed, 57 insertions(+), 52 deletions(-) diff --git a/Common/ArmCPUDetect.cpp b/Common/ArmCPUDetect.cpp index 1fcd1f35a0..e7cc4308fe 100644 --- a/Common/ArmCPUDetect.cpp +++ b/Common/ArmCPUDetect.cpp @@ -268,13 +268,6 @@ void CPUInfo::Detect() bASIMD = CheckCPUFeature("asimd"); num_cores = GetCoreCount(); #endif -// Since we can do this at compile-time (separate libraries) for every platform, -// maybe we can replace the bArmV7 check with #if like we do for x86 and x86_64 -#if defined(__ARM_ARCH_7A__) - bArmV7 = true; -#else - bArmV7 = false; -#endif } // Turn the cpu info into a string we can show diff --git a/Common/ArmEmitter.cpp b/Common/ArmEmitter.cpp index d90a36d87d..34a814c3f4 100644 --- a/Common/ArmEmitter.cpp +++ b/Common/ArmEmitter.cpp @@ -202,7 +202,7 @@ void ARMXEmitter::ANDI2R(ARMReg rd, ARMReg rs, u32 val, ARMReg scratch) } // The worst case is 4 (e.g. 0x55555555.) - if (ops <= 3 || !cpu_info.bArmV7) { + if (ops <= 3) { bool first = true; for (int i = 0; i < 32; i += 2) { u8 bits = RotR(val, i) & 0xFF; @@ -272,7 +272,7 @@ void ARMXEmitter::ORI2R(ARMReg rd, ARMReg rs, u32 val, ARMReg scratch) if (TryMakeOperand2_AllowInverse(val, op2, &inversed) && ops >= 3) { MVN(scratch, op2); ORR(rd, rs, scratch); - } else if (ops <= 3 || !cpu_info.bArmV7) { + } else if (ops <= 3) { bool first = true; for (int i = 0; i < 32; i += 2) { u8 bits = RotR(val, i) & 0xFF; @@ -333,26 +333,32 @@ void ARMXEmitter::MOVI2R(ARMReg reg, u32 val, bool optimize) Operand2 op2; bool inverse; - if (cpu_info.bArmV7 && !optimize) +#ifdef HAVE_ARMV7 + // Unused + if (!optimize) { // For backpatching on ARMv7 MOVW(reg, val & 0xFFFF); MOVT(reg, val, true); + return; } - else if (TryMakeOperand2_AllowInverse(val, op2, &inverse)) { +#endif + + if (TryMakeOperand2_AllowInverse(val, op2, &inverse)) { inverse ? MVN(reg, op2) : MOV(reg, op2); } else { - if (cpu_info.bArmV7) - { - // Use MOVW+MOVT for ARMv7+ - MOVW(reg, val & 0xFFFF); - if(val & 0xFFFF0000) - MOVT(reg, val, true); - } else if (!TrySetValue_TwoOp(reg,val)) { +#ifdef HAVE_ARMV7 + // Use MOVW+MOVT for ARMv7+ + MOVW(reg, val & 0xFFFF); + if(val & 0xFFFF0000) + MOVT(reg, val, true); +#else + if (!TrySetValue_TwoOp(reg,val)) { // Use literal pool for ARMv6. AddNewLit(val); LDR(reg, _PC); // To be backpatched later } +#endif } } diff --git a/Common/CPUDetect.h b/Common/CPUDetect.h index 2027d2d17f..04c615b412 100644 --- a/Common/CPUDetect.h +++ b/Common/CPUDetect.h @@ -19,6 +19,11 @@ #ifndef _CPUDETECT_H_ #define _CPUDETECT_H_ +// Every architecture has its own define. This needs to be added to. +#if defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7S__) +#define HAVE_ARMV7 1 +#endif + #include enum CPUVendor { @@ -70,7 +75,6 @@ struct CPUInfo { bool bVFPv4; bool bIDIVa; bool bIDIVt; - bool bArmV7; // enable MOVT, MOVW etc // ARMv8 specific bool bFP; diff --git a/Core/MIPS/ARM/ArmCompALU.cpp b/Core/MIPS/ARM/ArmCompALU.cpp index e45020e662..cdbbcbd576 100644 --- a/Core/MIPS/ARM/ArmCompALU.cpp +++ b/Core/MIPS/ARM/ArmCompALU.cpp @@ -543,12 +543,12 @@ namespace MIPSComp } gpr.MapDirtyIn(rt, rs); - if (cpu_info.bArmV7) { - UBFX(gpr.R(rt), gpr.R(rs), pos, size); - } else { - MOV(gpr.R(rt), Operand2(gpr.R(rs), ST_LSR, pos)); - ANDI2R(gpr.R(rt), gpr.R(rt), mask, R0); - } +#ifdef HAVE_ARMV7 + UBFX(gpr.R(rt), gpr.R(rs), pos, size); +#else + MOV(gpr.R(rt), Operand2(gpr.R(rs), ST_LSR, pos)); + ANDI2R(gpr.R(rt), gpr.R(rt), mask, R0); +#endif break; case 0x4: //ins @@ -567,13 +567,13 @@ namespace MIPSComp ORI2R(gpr.R(rt), gpr.R(rt), inserted, R0); } else { gpr.MapDirtyIn(rt, rs, false); - if (cpu_info.bArmV7) { - BFI(gpr.R(rt), gpr.R(rs), pos, size-pos); - } else { - ANDI2R(R0, gpr.R(rs), sourcemask, R1); - ANDI2R(gpr.R(rt), gpr.R(rt), destmask, R1); - ORR(gpr.R(rt), gpr.R(rt), Operand2(R0, ST_LSL, pos)); - } +#ifdef HAVE_ARMV7 + BFI(gpr.R(rt), gpr.R(rs), pos, size-pos); +#else + ANDI2R(R0, gpr.R(rs), sourcemask, R1); + ANDI2R(gpr.R(rt), gpr.R(rt), destmask, R1); + ORR(gpr.R(rt), gpr.R(rt), Operand2(R0, ST_LSL, pos)); +#endif } } break; @@ -621,12 +621,12 @@ namespace MIPSComp return; } - if (cpu_info.bArmV7) { - gpr.MapDirtyIn(rd, rt); - RBIT(gpr.R(rd), gpr.R(rt)); - } else { - Comp_Generic(op); - } +#ifdef HAVE_ARMV7 + gpr.MapDirtyIn(rd, rt); + RBIT(gpr.R(rd), gpr.R(rt)); +#else + Comp_Generic(op); +#endif break; default: Comp_Generic(op); diff --git a/Core/MIPS/ARM/ArmCompFPU.cpp b/Core/MIPS/ARM/ArmCompFPU.cpp index d72d85852c..1a5381cd7a 100644 --- a/Core/MIPS/ARM/ArmCompFPU.cpp +++ b/Core/MIPS/ARM/ArmCompFPU.cpp @@ -343,13 +343,13 @@ void Jit::Comp_mxc1(MIPSOpcode op) { gpr.MapDirtyIn(rt, MIPS_REG_FPCOND); LDR(gpr.R(rt), CTXREG, offsetof(MIPSState, fcr31)); - if (cpu_info.bArmV7) { - BFI(gpr.R(rt), gpr.R(MIPS_REG_FPCOND), 23, 1); - } else { - AND(R0, gpr.R(MIPS_REG_FPCOND), Operand2(1)); // Just in case - ANDI2R(gpr.R(rt), gpr.R(rt), ~(0x1 << 23), R1); // R1 won't be used, this turns into a simple BIC. - ORR(gpr.R(rt), gpr.R(rt), Operand2(R0, ST_LSL, 23)); - } +#ifdef HAVE_ARMV7 + BFI(gpr.R(rt), gpr.R(MIPS_REG_FPCOND), 23, 1); +#else + AND(R0, gpr.R(MIPS_REG_FPCOND), Operand2(1)); // Just in case + ANDI2R(gpr.R(rt), gpr.R(rt), ~(0x1 << 23), R1); // R1 won't be used, this turns into a simple BIC. + ORR(gpr.R(rt), gpr.R(rt), Operand2(R0, ST_LSL, 23)); +#endif } else if (fs == 0) { @@ -387,12 +387,12 @@ void Jit::Comp_mxc1(MIPSOpcode op) */ // Update MIPS state STR(gpr.R(rt), CTXREG, offsetof(MIPSState, fcr31)); - if (cpu_info.bArmV7) { - UBFX(gpr.R(MIPS_REG_FPCOND), gpr.R(rt), 23, 1); - } else { - MOV(R0, Operand2(gpr.R(rt), ST_LSR, 23)); - AND(gpr.R(MIPS_REG_FPCOND), R0, Operand2(1)); - } +#ifdef HAVE_ARMV7 + UBFX(gpr.R(MIPS_REG_FPCOND), gpr.R(rt), 23, 1); +#else + MOV(R0, Operand2(gpr.R(rt), ST_LSR, 23)); + AND(gpr.R(MIPS_REG_FPCOND), R0, Operand2(1)); +#endif } return; } diff --git a/Core/MIPS/ARM/ArmJit.cpp b/Core/MIPS/ARM/ArmJit.cpp index dd9eb7e015..c2411f9246 100644 --- a/Core/MIPS/ARM/ArmJit.cpp +++ b/Core/MIPS/ARM/ArmJit.cpp @@ -277,7 +277,8 @@ const u8 *Jit::DoJit(u32 em_address, JitBlock *b) js.compilerPC += 4; js.numInstructions++; - if (!cpu_info.bArmV7 && (GetCodePtr() - b->checkedEntry - partialFlushOffset) > 3200) +#ifndef HAVE_ARMV7 + if ((GetCodePtr() - b->checkedEntry - partialFlushOffset) > 3200) { // We need to prematurely flush as we are out of range FixupBranch skip = B_CC(CC_AL); @@ -285,6 +286,7 @@ const u8 *Jit::DoJit(u32 em_address, JitBlock *b) SetJumpTarget(skip); partialFlushOffset = GetCodePtr() - b->checkedEntry; } +#endif // Safety check, in case we get a bunch of really large jit ops without a lot of branching. if (GetSpaceLeft() < 0x800)