Vulkan: Attempt to fix some more shutdown race conditions, simplify.

This commit is contained in:
Henrik Rydgård 2024-01-17 14:49:49 +01:00
parent 2b6bb46a98
commit cbb4236cd8
8 changed files with 91 additions and 145 deletions

View file

@ -43,7 +43,7 @@ GLRenderManager::GLRenderManager(HistoryBuffer<FrameTimeData, FRAME_TIME_HISTORY
} }
GLRenderManager::~GLRenderManager() { GLRenderManager::~GLRenderManager() {
_dbg_assert_(!run_); _dbg_assert_(!runCompileThread_);
for (int i = 0; i < MAX_INFLIGHT_FRAMES; i++) { for (int i = 0; i < MAX_INFLIGHT_FRAMES; i++) {
_assert_(frameData_[i].deleter.IsEmpty()); _assert_(frameData_[i].deleter.IsEmpty());
@ -125,7 +125,7 @@ void GLRenderManager::ThreadEnd() {
// //
// NOTE: If run_ is true, we WILL run a task! // NOTE: If run_ is true, we WILL run a task!
bool GLRenderManager::ThreadFrame() { bool GLRenderManager::ThreadFrame() {
if (!run_) { if (!runCompileThread_) {
return false; return false;
} }
@ -172,8 +172,8 @@ bool GLRenderManager::ThreadFrame() {
void GLRenderManager::StopThread() { void GLRenderManager::StopThread() {
// There's not really a lot to do here anymore. // There's not really a lot to do here anymore.
INFO_LOG(G3D, "GLRenderManager::StopThread()"); INFO_LOG(G3D, "GLRenderManager::StopThread()");
if (run_) { if (runCompileThread_) {
run_ = false; runCompileThread_ = false;
std::unique_lock<std::mutex> lock(pushMutex_); std::unique_lock<std::mutex> lock(pushMutex_);
renderThreadQueue_.push(new GLRRenderThreadTask(GLRRunType::EXIT)); renderThreadQueue_.push(new GLRRenderThreadTask(GLRRunType::EXIT));
@ -368,7 +368,7 @@ void GLRenderManager::BeginFrame(bool enableProfiling) {
frameData.readyForFence = false; frameData.readyForFence = false;
} }
if (!run_) { if (!runCompileThread_) {
WARN_LOG(G3D, "BeginFrame while !run_!"); WARN_LOG(G3D, "BeginFrame while !run_!");
} }

View file

@ -874,7 +874,7 @@ private:
FastVec<GLRInitStep> initSteps_; FastVec<GLRInitStep> initSteps_;
// Execution time state // Execution time state
bool run_ = true; bool runCompileThread_ = true;
// Thread is managed elsewhere, and should call ThreadFrame. // Thread is managed elsewhere, and should call ThreadFrame.
GLQueueRunner queueRunner_; GLQueueRunner queueRunner_;

View file

@ -266,15 +266,6 @@ public:
hacksEnabled_ = hacks; hacksEnabled_ = hacks;
} }
void NotifyCompileDone() {
compileDone_.notify_all();
}
void WaitForCompileNotification() {
std::unique_lock<std::mutex> lock(compileDoneMutex_);
compileDone_.wait(lock);
}
private: private:
bool InitBackbufferFramebuffers(int width, int height); bool InitBackbufferFramebuffers(int width, int height);
bool InitDepthStencilBuffer(VkCommandBuffer cmd, VulkanBarrierBatch *barriers); // Used for non-buffered rendering. bool InitDepthStencilBuffer(VkCommandBuffer cmd, VulkanBarrierBatch *barriers); // Used for non-buffered rendering.
@ -321,10 +312,6 @@ private:
// TODO: Enable based on compat.ini. // TODO: Enable based on compat.ini.
uint32_t hacksEnabled_ = 0; uint32_t hacksEnabled_ = 0;
// Compile done notifications.
std::mutex compileDoneMutex_;
std::condition_variable compileDone_;
// Image barrier helper used during command buffer record (PerformRenderPass etc). // Image barrier helper used during command buffer record (PerformRenderPass etc).
// Stored here to help reuse the allocation. // Stored here to help reuse the allocation.

View file

@ -237,6 +237,54 @@ void VKRGraphicsPipeline::LogCreationFailure() const {
ERROR_LOG(G3D, "======== END OF PIPELINE =========="); ERROR_LOG(G3D, "======== END OF PIPELINE ==========");
} }
struct SinglePipelineTask {
VKRGraphicsPipeline *pipeline;
VkRenderPass compatibleRenderPass;
RenderPassType rpType;
VkSampleCountFlagBits sampleCount;
double scheduleTime;
int countToCompile;
};
class CreateMultiPipelinesTask : public Task {
public:
CreateMultiPipelinesTask(VulkanContext *vulkan, std::vector<SinglePipelineTask> tasks) : vulkan_(vulkan), tasks_(tasks) {
tasksInFlight_.fetch_add(1);
}
~CreateMultiPipelinesTask() {}
TaskType Type() const override {
return TaskType::CPU_COMPUTE;
}
TaskPriority Priority() const override {
return TaskPriority::HIGH;
}
void Run() override {
for (auto &task : tasks_) {
task.pipeline->Create(vulkan_, task.compatibleRenderPass, task.rpType, task.sampleCount, task.scheduleTime, task.countToCompile);
}
tasksInFlight_.fetch_sub(1);
}
VulkanContext *vulkan_;
std::vector<SinglePipelineTask> tasks_;
// Use during shutdown to make sure there aren't any leftover tasks sitting queued.
// Could probably be done more elegantly. Like waiting for all tasks of a type, or saving pointers to them, or something...
static void WaitForAll();
static std::atomic<int> tasksInFlight_;
};
void CreateMultiPipelinesTask::WaitForAll() {
while (tasksInFlight_.load() > 0) {
sleep_ms(2);
}
}
std::atomic<int> CreateMultiPipelinesTask::tasksInFlight_;
VulkanRenderManager::VulkanRenderManager(VulkanContext *vulkan, bool useThread, HistoryBuffer<FrameTimeData, FRAME_TIME_HISTORY_LENGTH> &frameTimeHistory) VulkanRenderManager::VulkanRenderManager(VulkanContext *vulkan, bool useThread, HistoryBuffer<FrameTimeData, FRAME_TIME_HISTORY_LENGTH> &frameTimeHistory)
: vulkan_(vulkan), queueRunner_(vulkan), : vulkan_(vulkan), queueRunner_(vulkan),
initTimeMs_("initTimeMs"), initTimeMs_("initTimeMs"),
@ -294,7 +342,8 @@ bool VulkanRenderManager::CreateBackbuffers() {
// Start the thread(s). // Start the thread(s).
if (HasBackbuffers()) { if (HasBackbuffers()) {
run_ = true; // For controlling the compiler thread's exit runCompileThread_ = true; // For controlling the compiler thread's exit
compileBlocked_ = false;
if (useRenderThread_) { if (useRenderThread_) {
INFO_LOG(G3D, "Starting Vulkan submission thread"); INFO_LOG(G3D, "Starting Vulkan submission thread");
@ -324,7 +373,8 @@ void VulkanRenderManager::StopThread() {
} }
// Compiler and present thread still relies on this. // Compiler and present thread still relies on this.
run_ = false; runCompileThread_ = false;
compileBlocked_ = true;
if (presentWaitThread_.joinable()) { if (presentWaitThread_.joinable()) {
presentWaitThread_.join(); presentWaitThread_.join();
@ -344,36 +394,17 @@ void VulkanRenderManager::StopThread() {
INFO_LOG(G3D, "Vulkan submission thread joined. Frame=%d", vulkan_->GetCurFrame()); INFO_LOG(G3D, "Vulkan submission thread joined. Frame=%d", vulkan_->GetCurFrame());
if (compileThread_.joinable()) { if (compileThread_.joinable()) {
// Lock to avoid race conditions. // Lock to avoid race conditions. Not sure if needed.
std::lock_guard<std::mutex> guard(compileMutex_); {
compileCond_.notify_all(); std::lock_guard<std::mutex> guard(compileMutex_);
compileCond_.notify_all();
}
compileThread_.join();
} }
compileThread_.join(); INFO_LOG(G3D, "Vulkan compiler thread joined. Now wait for any straggling compile tasks.");
INFO_LOG(G3D, "Vulkan compiler thread joined."); CreateMultiPipelinesTask::WaitForAll();
// Eat whatever has been queued up for this frame if anything. _dbg_assert_(steps_.empty());
Wipe();
// Clean out any remaining queued data, which might refer to things that might not be valid
// when we restart the thread...
// Not sure if this is still needed
for (int i = 0; i < vulkan_->GetInflightFrames(); i++) {
auto &frameData = frameData_[i];
if (frameData.hasInitCommands) {
// Clear 'em out. This can happen on restart sometimes.
vkEndCommandBuffer(frameData.initCmd);
frameData.hasInitCommands = false;
}
if (frameData.hasMainCommands) {
vkEndCommandBuffer(frameData.mainCmd);
frameData.hasMainCommands = false;
}
if (frameData.hasPresentCommands) {
vkEndCommandBuffer(frameData.presentCmd);
frameData.hasPresentCommands = false;
}
}
} }
void VulkanRenderManager::DestroyBackbuffers() { void VulkanRenderManager::DestroyBackbuffers() {
@ -386,7 +417,7 @@ void VulkanRenderManager::DestroyBackbuffers() {
VulkanRenderManager::~VulkanRenderManager() { VulkanRenderManager::~VulkanRenderManager() {
INFO_LOG(G3D, "VulkanRenderManager destructor"); INFO_LOG(G3D, "VulkanRenderManager destructor");
_dbg_assert_(!run_); // StopThread should already have been called from DestroyBackbuffers. _dbg_assert_(!runCompileThread_); // StopThread should already have been called from DestroyBackbuffers.
vulkan_->WaitUntilQueueIdle(); vulkan_->WaitUntilQueueIdle();
@ -400,53 +431,6 @@ VulkanRenderManager::~VulkanRenderManager() {
queueRunner_.DestroyDeviceObjects(); queueRunner_.DestroyDeviceObjects();
} }
struct SinglePipelineTask {
VKRGraphicsPipeline *pipeline;
VkRenderPass compatibleRenderPass;
RenderPassType rpType;
VkSampleCountFlagBits sampleCount;
double scheduleTime;
int countToCompile;
};
class CreateMultiPipelinesTask : public Task {
public:
CreateMultiPipelinesTask(VulkanContext *vulkan, std::vector<SinglePipelineTask> tasks) : vulkan_(vulkan), tasks_(tasks) {
tasksInFlight_.fetch_add(1);
}
~CreateMultiPipelinesTask() {}
TaskType Type() const override {
return TaskType::CPU_COMPUTE;
}
TaskPriority Priority() const override {
return TaskPriority::HIGH;
}
void Run() override {
for (auto &task : tasks_) {
task.pipeline->Create(vulkan_, task.compatibleRenderPass, task.rpType, task.sampleCount, task.scheduleTime, task.countToCompile);
}
tasksInFlight_.fetch_sub(1);
}
VulkanContext *vulkan_;
std::vector<SinglePipelineTask> tasks_;
// Use during shutdown to make sure there aren't any leftover tasks sitting queued.
// Could probably be done more elegantly. Like waiting for all tasks of a type, or saving pointers to them, or something...
static void WaitForAll() {
while (tasksInFlight_.load() > 0) {
sleep_ms(2);
}
}
static std::atomic<int> tasksInFlight_;
};
std::atomic<int> CreateMultiPipelinesTask::tasksInFlight_;
void VulkanRenderManager::CompileThreadFunc() { void VulkanRenderManager::CompileThreadFunc() {
SetCurrentThreadName("ShaderCompile"); SetCurrentThreadName("ShaderCompile");
while (true) { while (true) {
@ -456,15 +440,12 @@ void VulkanRenderManager::CompileThreadFunc() {
// TODO: Should this be while? // TODO: Should this be while?
// It may be beneficial also to unlock and wait a little bit to see if we get some more shaders // It may be beneficial also to unlock and wait a little bit to see if we get some more shaders
// so we can do a better job of thread-sorting them. // so we can do a better job of thread-sorting them.
if (compileQueue_.empty() && run_) { if (compileQueue_.empty() && runCompileThread_) {
compileCond_.wait(lock); compileCond_.wait(lock);
} }
toCompile = std::move(compileQueue_); toCompile = std::move(compileQueue_);
compileQueue_.clear(); compileQueue_.clear();
} }
if (!run_) {
break;
}
int countToCompile = (int)toCompile.size(); int countToCompile = (int)toCompile.size();
@ -506,31 +487,32 @@ void VulkanRenderManager::CompileThreadFunc() {
g_threadManager.EnqueueTask(task); g_threadManager.EnqueueTask(task);
} }
queueRunner_.NotifyCompileDone(); // Hold off just a bit before we check again, to allow bunches of pipelines to collect.
sleep_ms(1);
if (!runCompileThread_) {
break;
}
} }
} }
void VulkanRenderManager::DrainAndBlockCompileQueue() { void VulkanRenderManager::DrainAndBlockCompileQueue() {
compileBlocked_ = true; compileBlocked_ = true;
runCompileThread_ = false;
compileCond_.notify_all(); compileCond_.notify_all();
while (true) { compileThread_.join();
bool anyInQueue = false;
{ _assert_(compileQueue_.empty());
std::unique_lock<std::mutex> lock(compileMutex_);
anyInQueue = !compileQueue_.empty();
}
if (anyInQueue) {
queueRunner_.WaitForCompileNotification();
} else {
break;
}
}
// At this point, no more tasks can be queued to the threadpool. So wait for them all to go away. // At this point, no more tasks can be queued to the threadpool. So wait for them all to go away.
CreateMultiPipelinesTask::WaitForAll(); CreateMultiPipelinesTask::WaitForAll();
} }
void VulkanRenderManager::ReleaseCompileQueue() { void VulkanRenderManager::ReleaseCompileQueue() {
compileBlocked_ = false; compileBlocked_ = false;
runCompileThread_ = true;
INFO_LOG(G3D, "Restarting Vulkan compiler thread");
compileThread_ = std::thread(&VulkanRenderManager::CompileThreadFunc, this);
} }
void VulkanRenderManager::ThreadFunc() { void VulkanRenderManager::ThreadFunc() {
@ -575,7 +557,7 @@ void VulkanRenderManager::PresentWaitThreadFunc() {
_dbg_assert_(vkWaitForPresentKHR != nullptr); _dbg_assert_(vkWaitForPresentKHR != nullptr);
uint64_t waitedId = frameIdGen_; uint64_t waitedId = frameIdGen_;
while (run_) { while (runCompileThread_) {
const uint64_t timeout = 1000000000ULL; // 1 sec const uint64_t timeout = 1000000000ULL; // 1 sec
if (VK_SUCCESS == vkWaitForPresentKHR(vulkan_->GetDevice(), vulkan_->GetSwapchain(), waitedId, timeout)) { if (VK_SUCCESS == vkWaitForPresentKHR(vulkan_->GetDevice(), vulkan_->GetSwapchain(), waitedId, timeout)) {
frameTimeHistory_[waitedId].actualPresent = time_now_d(); frameTimeHistory_[waitedId].actualPresent = time_now_d();
@ -799,10 +781,7 @@ VKRGraphicsPipeline *VulkanRenderManager::CreateGraphicsPipeline(VKRGraphicsPipe
VKRRenderPassStoreAction::STORE, VKRRenderPassStoreAction::DONT_CARE, VKRRenderPassStoreAction::DONT_CARE, VKRRenderPassStoreAction::STORE, VKRRenderPassStoreAction::DONT_CARE, VKRRenderPassStoreAction::DONT_CARE,
}; };
VKRRenderPass *compatibleRenderPass = queueRunner_.GetRenderPass(key); VKRRenderPass *compatibleRenderPass = queueRunner_.GetRenderPass(key);
if (compileBlocked_) { _dbg_assert_(!compileBlocked_);
delete pipeline;
return nullptr;
}
std::lock_guard<std::mutex> lock(compileMutex_); std::lock_guard<std::mutex> lock(compileMutex_);
bool needsCompile = false; bool needsCompile = false;
for (size_t i = 0; i < (size_t)RenderPassType::TYPE_COUNT; i++) { for (size_t i = 0; i < (size_t)RenderPassType::TYPE_COUNT; i++) {
@ -871,9 +850,10 @@ void VulkanRenderManager::EndCurRenderStep() {
VkSampleCountFlagBits sampleCount = curRenderStep_->render.framebuffer ? curRenderStep_->render.framebuffer->sampleCount : VK_SAMPLE_COUNT_1_BIT; VkSampleCountFlagBits sampleCount = curRenderStep_->render.framebuffer ? curRenderStep_->render.framebuffer->sampleCount : VK_SAMPLE_COUNT_1_BIT;
compileMutex_.lock(); compileMutex_.lock();
_dbg_assert_(!compileBlocked_);
bool needsCompile = false; bool needsCompile = false;
for (VKRGraphicsPipeline *pipeline : pipelinesToCheck_) { for (VKRGraphicsPipeline *pipeline : pipelinesToCheck_) {
if (!pipeline || compileBlocked_) { if (!pipeline) {
// Not good, but let's try not to crash. // Not good, but let's try not to crash.
continue; continue;
} }
@ -1441,13 +1421,6 @@ void VulkanRenderManager::Present() {
insideFrame_ = false; insideFrame_ = false;
} }
void VulkanRenderManager::Wipe() {
for (auto step : steps_) {
delete step;
}
steps_.clear();
}
// Called on the render thread. // Called on the render thread.
// //
// Can be called again after a VKRRunType::SYNC on the same frame. // Can be called again after a VKRRunType::SYNC on the same frame.

View file

@ -229,8 +229,6 @@ public:
// These can run on a different thread! // These can run on a different thread!
void Finish(); void Finish();
void Present(); void Present();
// Zaps queued up commands. Use if you know there's a risk you've queued up stuff that has already been deleted. Can happen during in-game shutdown.
void Wipe();
void SetInvalidationCallback(InvalidationCallback callback) { void SetInvalidationCallback(InvalidationCallback callback) {
invalidationCallback_ = callback; invalidationCallback_ = callback;
@ -565,7 +563,7 @@ private:
int curHeight_ = -1; int curHeight_ = -1;
bool insideFrame_ = false; bool insideFrame_ = false;
bool run_ = false; bool runCompileThread_ = false;
bool useRenderThread_ = true; bool useRenderThread_ = true;
bool measurePresentTime_ = false; bool measurePresentTime_ = false;
@ -601,7 +599,7 @@ private:
std::condition_variable compileCond_; std::condition_variable compileCond_;
std::mutex compileMutex_; std::mutex compileMutex_;
std::vector<CompileQueueEntry> compileQueue_; std::vector<CompileQueueEntry> compileQueue_;
std::atomic<bool> compileBlocked_{}; std::atomic<bool> compileBlocked_{}; // Only for asserting on, now.
// Thread for measuring presentation delay. // Thread for measuring presentation delay.
std::thread presentWaitThread_; std::thread presentWaitThread_;

View file

@ -491,8 +491,6 @@ public:
void EndFrame() override; void EndFrame() override;
void Present(PresentMode presentMode, int vblanks) override; void Present(PresentMode presentMode, int vblanks) override;
void WipeQueue() override;
int GetFrameCount() override { int GetFrameCount() override {
return frameCount_; return frameCount_;
} }
@ -1118,10 +1116,6 @@ void VKContext::Invalidate(InvalidationFlags flags) {
} }
} }
void VKContext::WipeQueue() {
renderManager_.Wipe();
}
void VKContext::BindDescriptors(VkBuffer buf, PackedDescriptor descriptors[4]) { void VKContext::BindDescriptors(VkBuffer buf, PackedDescriptor descriptors[4]) {
descriptors[0].buffer.buffer = buf; descriptors[0].buffer.buffer = buf;
descriptors[0].buffer.offset = 0; // dynamic descriptors[0].buffer.offset = 0; // dynamic

View file

@ -822,8 +822,6 @@ public:
// NOTE: Not all backends support vblanks > 1. Some backends also can't change presentation mode immediately. // NOTE: Not all backends support vblanks > 1. Some backends also can't change presentation mode immediately.
virtual void Present(PresentMode presentMode, int vblanks) = 0; virtual void Present(PresentMode presentMode, int vblanks) = 0;
virtual void WipeQueue() {}
// This should be avoided as much as possible, in favor of clearing when binding a render target, which is native // This should be avoided as much as possible, in favor of clearing when binding a render target, which is native
// on Vulkan. // on Vulkan.
virtual void Clear(int mask, uint32_t colorval, float depthVal, int stencilVal) = 0; virtual void Clear(int mask, uint32_t colorval, float depthVal, int stencilVal) = 0;

View file

@ -1642,11 +1642,7 @@ ScreenRenderFlags EmuScreen::render(ScreenRenderMode mode) {
SetVRAppMode(screenManager()->topScreen() == this ? VRAppMode::VR_GAME_MODE : VRAppMode::VR_DIALOG_MODE); SetVRAppMode(screenManager()->topScreen() == this ? VRAppMode::VR_GAME_MODE : VRAppMode::VR_DIALOG_MODE);
} }
if (mode & ScreenRenderMode::TOP) { if (!(mode & ScreenRenderMode::TOP)) {
// TODO: Replace this with something else.
if (stopRender_)
draw->WipeQueue();
} else {
darken(); darken();
} }
return flags; return flags;