diff --git a/Common/GPU/Vulkan/VulkanRenderManager.cpp b/Common/GPU/Vulkan/VulkanRenderManager.cpp index a26a58369a..b5dba41183 100644 --- a/Common/GPU/Vulkan/VulkanRenderManager.cpp +++ b/Common/GPU/Vulkan/VulkanRenderManager.cpp @@ -511,17 +511,25 @@ void VulkanRenderManager::CompileThreadFunc() { } void VulkanRenderManager::DrainAndBlockCompileQueue() { - std::unique_lock lock(compileMutex_); compileBlocked_ = true; compileCond_.notify_all(); - while (!compileQueue_.empty()) { - queueRunner_.WaitForCompileNotification(); + while (true) { + bool anyInQueue = false; + { + std::unique_lock lock(compileMutex_); + anyInQueue = !compileQueue_.empty(); + } + if (anyInQueue) { + queueRunner_.WaitForCompileNotification(); + } else { + break; + } } + // At this point, no more tasks can be queued to the threadpool. So wait for them all to go away. CreateMultiPipelinesTask::WaitForAll(); } void VulkanRenderManager::ReleaseCompileQueue() { - std::unique_lock lock(compileMutex_); compileBlocked_ = false; } @@ -791,11 +799,11 @@ VKRGraphicsPipeline *VulkanRenderManager::CreateGraphicsPipeline(VKRGraphicsPipe VKRRenderPassStoreAction::STORE, VKRRenderPassStoreAction::DONT_CARE, VKRRenderPassStoreAction::DONT_CARE, }; VKRRenderPass *compatibleRenderPass = queueRunner_.GetRenderPass(key); - std::lock_guard lock(compileMutex_); if (compileBlocked_) { delete pipeline; return nullptr; } + std::lock_guard lock(compileMutex_); bool needsCompile = false; for (size_t i = 0; i < (size_t)RenderPassType::TYPE_COUNT; i++) { if (!(variantBitmask & (1 << i))) diff --git a/Common/GPU/Vulkan/VulkanRenderManager.h b/Common/GPU/Vulkan/VulkanRenderManager.h index 3a2f5f0225..8bd6a22235 100644 --- a/Common/GPU/Vulkan/VulkanRenderManager.h +++ b/Common/GPU/Vulkan/VulkanRenderManager.h @@ -601,7 +601,7 @@ private: std::condition_variable compileCond_; std::mutex compileMutex_; std::vector compileQueue_; - bool compileBlocked_ = false; + std::atomic compileBlocked_{}; // Thread for measuring presentation delay. std::thread presentWaitThread_;