diff --git a/Common/Common.h b/Common/Common.h index 5dafca92bc..7e5597aead 100644 --- a/Common/Common.h +++ b/Common/Common.h @@ -96,7 +96,7 @@ private: #define CHECK_HEAP_INTEGRITY() // Alignment - #define GC_ALIGNED16(x) __declspec(align(16)) x + #define MEMORY_ALIGNED16(x) __declspec(align(16)) x #define GC_ALIGNED32(x) __declspec(align(32)) x #define GC_ALIGNED64(x) __declspec(align(64)) x #define GC_ALIGNED128(x) __declspec(align(128)) x @@ -137,7 +137,7 @@ private: #endif #define __forceinline inline __attribute__((always_inline)) -#define GC_ALIGNED16(x) __attribute__((aligned(16))) x +#define MEMORY_ALIGNED16(x) __attribute__((aligned(16))) x #define GC_ALIGNED32(x) __attribute__((aligned(32))) x #define GC_ALIGNED64(x) __attribute__((aligned(64))) x #define GC_ALIGNED128(x) __attribute__((aligned(128))) x diff --git a/Core/MIPS/x86/CompFPU.cpp b/Core/MIPS/x86/CompFPU.cpp index 09598743dd..336c864565 100644 --- a/Core/MIPS/x86/CompFPU.cpp +++ b/Core/MIPS/x86/CompFPU.cpp @@ -87,7 +87,7 @@ void Jit::Comp_FPU3op(u32 op) } } -static u32 GC_ALIGNED16(ssLoadStoreTemp); +static u32 MEMORY_ALIGNED16(ssLoadStoreTemp); void Jit::Comp_FPULS(u32 op) { @@ -147,9 +147,9 @@ void Jit::Comp_FPULS(u32 op) } } -static const u64 GC_ALIGNED16(ssOneBits[2]) = {0x0000000100000001ULL, 0x0000000100000001ULL}; -static const u64 GC_ALIGNED16(ssSignBits2[2]) = {0x8000000080000000ULL, 0x8000000080000000ULL}; -static const u64 GC_ALIGNED16(ssNoSignMask[2]) = {0x7FFFFFFF7FFFFFFFULL, 0x7FFFFFFF7FFFFFFFULL}; +static const u64 MEMORY_ALIGNED16(ssOneBits[2]) = {0x0000000100000001ULL, 0x0000000100000001ULL}; +static const u64 MEMORY_ALIGNED16(ssSignBits2[2]) = {0x8000000080000000ULL, 0x8000000080000000ULL}; +static const u64 MEMORY_ALIGNED16(ssNoSignMask[2]) = {0x7FFFFFFF7FFFFFFFULL, 0x7FFFFFFF7FFFFFFFULL}; static u32 ssCompareTemp; diff --git a/Core/MIPS/x86/CompVFPU.cpp b/Core/MIPS/x86/CompVFPU.cpp index 9ba153051e..93201d1fe6 100644 --- a/Core/MIPS/x86/CompVFPU.cpp +++ b/Core/MIPS/x86/CompVFPU.cpp @@ -57,8 +57,9 @@ static const float one = 1.0f; static const float minus_one = -1.0f; static const float zero = 0.0f; -const u32 GC_ALIGNED16( noSignMask[4] ) = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF}; -const u32 GC_ALIGNED16( signBitLower[4] ) = {0x80000000, 0, 0, 0}; +const u32 MEMORY_ALIGNED16( noSignMask[4] ) = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF}; +const u32 MEMORY_ALIGNED16( signBitLower[4] ) = {0x80000000, 0, 0, 0}; +const float MEMORY_ALIGNED16( oneOneOneOne[4] ) = {1.0f, 1.0f, 1.0f, 1.0f}; void Jit::Comp_VPFX(u32 op) { @@ -195,7 +196,7 @@ bool IsOverlapSafe(int dreg, int di, int sn, u8 sregs[], int tn = 0, u8 tregs[] return IsOverlapSafeAllowS(dreg, di, sn, sregs, tn, tregs) && sregs[di] != dreg; } -static u32 GC_ALIGNED16(ssLoadStoreTemp); +static u32 MEMORY_ALIGNED16(ssLoadStoreTemp); void Jit::Comp_SV(u32 op) { CONDITIONAL_DISABLE; @@ -685,7 +686,6 @@ void Jit::Comp_VecDo3(u32 op) { break; case 6: // vsge case 7: // vslt - MOVSS(XMM0, M((void *)&one)); break; default: DISABLE; @@ -768,11 +768,11 @@ void Jit::Comp_VecDo3(u32 op) { break; case 6: // vsge CMPNLTSS(tempxregs[i], fpr.V(tregs[i])); - ANDPS(tempxregs[i], R(XMM0)); + ANDPS(tempxregs[i], M((void *)&oneOneOneOne)); break; case 7: // vslt CMPLTSS(tempxregs[i], fpr.V(tregs[i])); - ANDPS(tempxregs[i], R(XMM0)); + ANDPS(tempxregs[i], M((void *)&oneOneOneOne)); break; } break;