From f6fcc9e9a714cc565eb2bc39847d4b91a23dcafa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Henrik=20Rydg=C3=A5rd?= Date: Thu, 10 Nov 2022 21:05:08 +0100 Subject: [PATCH] Add a way to view the "GPU_USE_" flags at runtime. Useful for sanity checking on-device. --- Common/GPU/DataFormat.h | 3 +++ Common/GPU/OpenGL/thin3d_gl.cpp | 1 - Common/GPU/thin3d.cpp | 21 ++++++++++++++++ GPU/GLES/GPU_GLES.cpp | 3 +++ GPU/GPUState.cpp | 43 +++++++++++++++++++++++++++++++++ GPU/GPUState.h | 4 +++ UI/DevScreens.cpp | 14 +++++++++++ 7 files changed, 88 insertions(+), 1 deletion(-) diff --git a/Common/GPU/DataFormat.h b/Common/GPU/DataFormat.h index 0c8a93e627..b116ac9ab1 100644 --- a/Common/GPU/DataFormat.h +++ b/Common/GPU/DataFormat.h @@ -76,6 +76,9 @@ inline bool DataFormatIsColor(DataFormat fmt) { return !DataFormatIsDepthStencil(fmt); } +// Limited format support for now. +const char *DataFormatToString(DataFormat fmt); + void ConvertFromRGBA8888(uint8_t *dst, const uint8_t *src, uint32_t dstStride, uint32_t srcStride, uint32_t width, uint32_t height, DataFormat format); void ConvertFromBGRA8888(uint8_t *dst, const uint8_t *src, uint32_t dstStride, uint32_t srcStride, uint32_t width, uint32_t height, DataFormat format); void ConvertToD32F(uint8_t *dst, const uint8_t *src, uint32_t dstStride, uint32_t srcStride, uint32_t width, uint32_t height, DataFormat format); diff --git a/Common/GPU/OpenGL/thin3d_gl.cpp b/Common/GPU/OpenGL/thin3d_gl.cpp index f004cbdc08..9878886fd0 100644 --- a/Common/GPU/OpenGL/thin3d_gl.cpp +++ b/Common/GPU/OpenGL/thin3d_gl.cpp @@ -525,7 +525,6 @@ static bool HasIntelDualSrcBug(int versions[4]) { } OpenGLContext::OpenGLContext() { - // TODO: Detect more caps if (gl_extensions.IsGLES) { if (gl_extensions.OES_packed_depth_stencil || gl_extensions.OES_depth24) { caps_.preferredDepthBufferFormat = DataFormat::D24_S8; diff --git a/Common/GPU/thin3d.cpp b/Common/GPU/thin3d.cpp index 3aad02abc6..fab4e665d1 100644 --- a/Common/GPU/thin3d.cpp +++ b/Common/GPU/thin3d.cpp @@ -56,6 +56,27 @@ size_t DataFormatSizeInBytes(DataFormat fmt) { } } +const char *DataFormatToString(DataFormat fmt) { + switch (fmt) { + case DataFormat::R8_UNORM: return "R8_UNORM"; + case DataFormat::R8G8_UNORM: return "R8G8_UNORM"; + case DataFormat::R8G8B8A8_UNORM: return "R8G8B8A8_UNORM"; + case DataFormat::B8G8R8A8_UNORM: return "B8G8R8A8_UNORM"; + case DataFormat::R16_UNORM: return "R16_UNORM"; + case DataFormat::R16_FLOAT: return "R16_FLOAT"; + case DataFormat::R32_FLOAT: return "R32_FLOAT"; + + case DataFormat::S8: return "S8"; + case DataFormat::D16: return "D16"; + case DataFormat::D24_S8: return "D24_S8"; + case DataFormat::D32F: return "D32F"; + case DataFormat::D32F_S8: return "D32F_S8"; + + default: + return "(N/A)"; + } +} + bool DataFormatIsDepthStencil(DataFormat fmt) { switch (fmt) { case DataFormat::D16: diff --git a/GPU/GLES/GPU_GLES.cpp b/GPU/GLES/GPU_GLES.cpp index cec35c21d7..a43cdd2467 100644 --- a/GPU/GLES/GPU_GLES.cpp +++ b/GPU/GLES/GPU_GLES.cpp @@ -144,6 +144,9 @@ GPU_GLES::~GPU_GLES() { shaderManagerGL_ = nullptr; delete framebufferManagerGL_; delete textureCacheGL_; + + // Clear features so they're not visible in system info. + gstate_c.useFlags = 0; } // Take the raw GL extension and versioning data and turn into feature flags. diff --git a/GPU/GPUState.cpp b/GPU/GPUState.cpp index bae93b34b1..95568e1e93 100644 --- a/GPU/GPUState.cpp +++ b/GPU/GPUState.cpp @@ -367,3 +367,46 @@ void GPUStateCache::DoState(PointerWrap &p) { Do(p, savedContextVersion); } } + +static const char *const gpuUseFlagNames[32] = { + "GPU_USE_DUALSOURCE_BLEND", + "GPU_USE_LIGHT_UBERSHADER", + "GPU_USE_FRAGMENT_TEST_CACHE", + "GPU_USE_VS_RANGE_CULLING", + "GPU_USE_BLEND_MINMAX", + "GPU_USE_LOGIC_OP", + "GPU_USE_DEPTH_RANGE_HACK", + "GPU_USE_TEXTURE_NPOT", + "GPU_USE_ANISOTROPY", + "GPU_USE_CLEAR_RAM_HACK", + "GPU_USE_INSTANCE_RENDERING", + "GPU_USE_VERTEX_TEXTURE_FETCH", + "GPU_USE_TEXTURE_FLOAT", + "GPU_USE_16BIT_FORMATS", + "GPU_USE_DEPTH_CLAMP", + "GPU_USE_TEXTURE_LOD_CONTROL", + "GPU_USE_DEPTH_TEXTURE", + "GPU_USE_ACCURATE_DEPTH", + "GPU_USE_GS_CULLING", + "GPU_USE_REVERSE_COLOR_ORDER", + "GPU_USE_FRAMEBUFFER_FETCH", + "GPU_SCALE_DEPTH_FROM_24BIT_TO_16BIT", + "GPU_ROUND_FRAGMENT_DEPTH_TO_16BIT", + "GPU_ROUND_DEPTH_TO_16BIT", + "GPU_USE_CLIP_DISTANCE", + "GPU_USE_CULL_DISTANCE", + "N/A", // bit 26 + "N/A", // bit 27 + "N/A", // bit 28 + "GPU_USE_VIRTUAL_REALITY", + "GPU_USE_SINGLE_PASS_STEREO", + "GPU_USE_SIMPLE_STEREO_PERSPECTIVE", +}; + +const char *GpuUseFlagToString(int useFlag) { + if ((u32)useFlag < 32) { + return gpuUseFlagNames[useFlag]; + } else { + return "N/A"; + } +} \ No newline at end of file diff --git a/GPU/GPUState.h b/GPU/GPUState.h index d7b25ac911..02fba9c309 100644 --- a/GPU/GPUState.h +++ b/GPU/GPUState.h @@ -470,6 +470,7 @@ struct UVScale { // location. Sometimes we need to take things into account in multiple places, it helps // to centralize into flags like this. They're also fast to check since the cache line // will be hot. +// NOTE: Do not forget to update the string array at the end of GPUState.cpp! enum { GPU_USE_DUALSOURCE_BLEND = FLAG_BIT(0), GPU_USE_LIGHT_UBERSHADER = FLAG_BIT(1), @@ -504,6 +505,9 @@ enum { GPU_USE_SIMPLE_STEREO_PERSPECTIVE = FLAG_BIT(31), }; +// Note that this take a flag index, not the bit value. +const char *GpuUseFlagToString(int useFlag); + struct KnownVertexBounds { u16 minU; u16 minV; diff --git a/UI/DevScreens.cpp b/UI/DevScreens.cpp index 9d219eb1c8..9e6830e682 100644 --- a/UI/DevScreens.cpp +++ b/UI/DevScreens.cpp @@ -553,6 +553,7 @@ void SystemInfoScreen::CreateViews() { deviceSpecs->Add(new InfoItem(si->T("High precision float range"), temp)); } } + deviceSpecs->Add(new InfoItem(si->T("Depth buffer format"), DataFormatToString(draw->GetDeviceCaps().preferredDepthBufferFormat))); deviceSpecs->Add(new ItemHeader(si->T("OS Information"))); deviceSpecs->Add(new InfoItem(si->T("Memory Page Size"), StringFromFormat(si->T("%d bytes"), GetMemoryProtectPageSize()))); deviceSpecs->Add(new InfoItem(si->T("RW/RX exclusive"), PlatformIsWXExclusive() ? di->T("Active") : di->T("Inactive"))); @@ -617,6 +618,19 @@ void SystemInfoScreen::CreateViews() { deviceSpecs->Add(new InfoItem("Moga", moga)); #endif + if (gstate_c.useFlags != 0) { + // We're in-game, and can determine these. + // TODO: Call a static version of GPUCommon::CheckGPUFeatures() and derive them here directly. + + deviceSpecs->Add(new ItemHeader(si->T("GPU Flags"))); + + for (int i = 0; i < 32; i++) { + if (gstate_c.Use((1 << i))) { + deviceSpecs->Add(new TextView(GpuUseFlagToString(i), new LayoutParams(FILL_PARENT, WRAP_CONTENT)))->SetFocusable(true); + } + } + } + ViewGroup *storageScroll = new ScrollView(ORIENT_VERTICAL, new LinearLayoutParams(FILL_PARENT, FILL_PARENT)); storageScroll->SetTag("DevSystemInfoBuildConfig"); LinearLayout *storage = new LinearLayout(ORIENT_VERTICAL);