mirror of
https://github.com/hrydgard/ppsspp.git
synced 2025-04-02 11:01:50 -04:00
Various refactoring
This commit is contained in:
parent
913ba9de88
commit
c7322edf7b
4 changed files with 104 additions and 89 deletions
|
@ -62,62 +62,100 @@ void FrameData::AcquireNextImage(VulkanContext *vulkan, FrameDataShared &shared)
|
|||
}
|
||||
}
|
||||
|
||||
void FrameData::SubmitInitCommands(VulkanContext *vulkan) {
|
||||
if (!hasInitCommands) {
|
||||
return;
|
||||
}
|
||||
VkResult FrameData::QueuePresent(VulkanContext *vulkan, FrameDataShared &shared) {
|
||||
_dbg_assert_(hasAcquired);
|
||||
hasAcquired = false;
|
||||
|
||||
if (profilingEnabled_) {
|
||||
// Pre-allocated query ID 1.
|
||||
vkCmdWriteTimestamp(initCmd, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, profile.queryPool, 1);
|
||||
}
|
||||
VkSwapchainKHR swapchain = vulkan->GetSwapchain();
|
||||
VkPresentInfoKHR present = { VK_STRUCTURE_TYPE_PRESENT_INFO_KHR };
|
||||
present.swapchainCount = 1;
|
||||
present.pSwapchains = &swapchain;
|
||||
present.pImageIndices = &curSwapchainImage;
|
||||
present.pWaitSemaphores = &shared.renderingCompleteSemaphore;
|
||||
present.waitSemaphoreCount = 1;
|
||||
|
||||
VkResult res = vkEndCommandBuffer(initCmd);
|
||||
_assert_msg_(res == VK_SUCCESS, "vkEndCommandBuffer failed (init)! result=%s", VulkanResultToString(res));
|
||||
|
||||
VkCommandBuffer cmdBufs[1];
|
||||
int numCmdBufs = 0;
|
||||
|
||||
cmdBufs[numCmdBufs++] = initCmd;
|
||||
// Send the init commands off separately, so they can be processed while we're building the rest of the list.
|
||||
// (Likely the CPU will be more than a frame ahead anyway, but this will help when we try to work on latency).
|
||||
VkSubmitInfo submit_info{ VK_STRUCTURE_TYPE_SUBMIT_INFO };
|
||||
submit_info.commandBufferCount = (uint32_t)numCmdBufs;
|
||||
submit_info.pCommandBuffers = cmdBufs;
|
||||
res = vkQueueSubmit(vulkan->GetGraphicsQueue(), 1, &submit_info, VK_NULL_HANDLE);
|
||||
if (res == VK_ERROR_DEVICE_LOST) {
|
||||
_assert_msg_(false, "Lost the Vulkan device in split submit! If this happens again, switch Graphics Backend away from Vulkan");
|
||||
} else {
|
||||
_assert_msg_(res == VK_SUCCESS, "vkQueueSubmit failed (init)! result=%s", VulkanResultToString(res));
|
||||
}
|
||||
numCmdBufs = 0;
|
||||
|
||||
hasInitCommands = false;
|
||||
return vkQueuePresentKHR(vulkan->GetGraphicsQueue(), &present);
|
||||
}
|
||||
|
||||
void FrameData::SubmitMainFinal(VulkanContext *vulkan, bool triggerFrameFence, FrameDataShared &sharedData) {
|
||||
VkCommandBuffer FrameData::GetInitCmd(VulkanContext *vulkan) {
|
||||
if (!hasInitCommands) {
|
||||
VkCommandBufferBeginInfo begin = {
|
||||
VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
|
||||
nullptr,
|
||||
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT
|
||||
};
|
||||
vkResetCommandPool(vulkan->GetDevice(), cmdPoolInit, 0);
|
||||
VkResult res = vkBeginCommandBuffer(initCmd, &begin);
|
||||
if (res != VK_SUCCESS) {
|
||||
return VK_NULL_HANDLE;
|
||||
}
|
||||
hasInitCommands = true;
|
||||
}
|
||||
return initCmd;
|
||||
}
|
||||
|
||||
void FrameData::SubmitPending(VulkanContext *vulkan, FrameSubmitType type, FrameDataShared &sharedData) {
|
||||
VkCommandBuffer cmdBufs[2];
|
||||
int numCmdBufs = 0;
|
||||
|
||||
cmdBufs[numCmdBufs++] = mainCmd;
|
||||
VkFence fenceToTrigger = VK_NULL_HANDLE;
|
||||
|
||||
if (hasInitCommands) {
|
||||
if (profilingEnabled_) {
|
||||
// Pre-allocated query ID 1 - end of init cmdbuf.
|
||||
vkCmdWriteTimestamp(initCmd, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, profile.queryPool, 1);
|
||||
}
|
||||
|
||||
VkResult res = vkEndCommandBuffer(initCmd);
|
||||
cmdBufs[numCmdBufs++] = initCmd;
|
||||
|
||||
_assert_msg_(res == VK_SUCCESS, "vkEndCommandBuffer failed (init)! result=%s", VulkanResultToString(res));
|
||||
hasInitCommands = false;
|
||||
}
|
||||
|
||||
if (hasMainCommands) {
|
||||
VkResult res = vkEndCommandBuffer(mainCmd);
|
||||
_assert_msg_(res == VK_SUCCESS, "vkEndCommandBuffer failed (main)! result=%s", VulkanResultToString(res));
|
||||
|
||||
cmdBufs[numCmdBufs++] = mainCmd;
|
||||
hasMainCommands = false;
|
||||
|
||||
if (type == FrameSubmitType::Sync) {
|
||||
fenceToTrigger = readbackFence;
|
||||
}
|
||||
}
|
||||
|
||||
if (hasPresentCommands) {
|
||||
VkResult res = vkEndCommandBuffer(presentCmd);
|
||||
_assert_msg_(res == VK_SUCCESS, "vkEndCommandBuffer failed (present)! result=%s", VulkanResultToString(res));
|
||||
|
||||
cmdBufs[numCmdBufs++] = presentCmd;
|
||||
hasPresentCommands = false;
|
||||
|
||||
if (type == FrameSubmitType::Present) {
|
||||
fenceToTrigger = fence;
|
||||
}
|
||||
}
|
||||
|
||||
if (!numCmdBufs && fenceToTrigger == VK_NULL_HANDLE) {
|
||||
// Nothing to do.
|
||||
return;
|
||||
}
|
||||
|
||||
VkSubmitInfo submit_info{ VK_STRUCTURE_TYPE_SUBMIT_INFO };
|
||||
VkPipelineStageFlags waitStage[1]{ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT };
|
||||
if (triggerFrameFence && !skipSwap) {
|
||||
if (type == FrameSubmitType::Present && !skipSwap) {
|
||||
submit_info.waitSemaphoreCount = 1;
|
||||
submit_info.pWaitSemaphores = &sharedData.acquireSemaphore;
|
||||
submit_info.pWaitDstStageMask = waitStage;
|
||||
}
|
||||
submit_info.commandBufferCount = (uint32_t)numCmdBufs;
|
||||
submit_info.pCommandBuffers = cmdBufs;
|
||||
if (triggerFrameFence && !skipSwap) {
|
||||
if (type == FrameSubmitType::Present && !skipSwap) {
|
||||
submit_info.signalSemaphoreCount = 1;
|
||||
submit_info.pSignalSemaphores = &sharedData.renderingCompleteSemaphore;
|
||||
}
|
||||
VkResult res = vkQueueSubmit(vulkan->GetGraphicsQueue(), 1, &submit_info, triggerFrameFence ? fence : readbackFence);
|
||||
VkResult res = vkQueueSubmit(vulkan->GetGraphicsQueue(), 1, &submit_info, fenceToTrigger);
|
||||
if (res == VK_ERROR_DEVICE_LOST) {
|
||||
_assert_msg_(false, "Lost the Vulkan device in vkQueueSubmit! If this happens again, switch Graphics Backend away from Vulkan");
|
||||
} else {
|
||||
|
@ -125,15 +163,12 @@ void FrameData::SubmitMainFinal(VulkanContext *vulkan, bool triggerFrameFence, F
|
|||
}
|
||||
|
||||
// When !triggerFence, we notify after syncing with Vulkan.
|
||||
if (triggerFrameFence) {
|
||||
if (type == FrameSubmitType::Present || type == FrameSubmitType::Sync) {
|
||||
VERBOSE_LOG(G3D, "PULL: Frame %d.readyForFence = true", index);
|
||||
std::unique_lock<std::mutex> lock(push_mutex);
|
||||
readyForFence = true;
|
||||
push_condVar.notify_all();
|
||||
}
|
||||
|
||||
hasInitCommands = false;
|
||||
hasPresentCommands = false;
|
||||
}
|
||||
|
||||
void FrameDataShared::Init(VulkanContext *vulkan) {
|
||||
|
|
|
@ -35,6 +35,12 @@ struct FrameDataShared {
|
|||
void Destroy(VulkanContext *vulkan);
|
||||
};
|
||||
|
||||
enum class FrameSubmitType {
|
||||
Pending,
|
||||
Sync,
|
||||
Present,
|
||||
};
|
||||
|
||||
// Per-frame data, round-robin so we can overlap submission with execution of the previous frame.
|
||||
struct FrameData {
|
||||
std::mutex push_mutex;
|
||||
|
@ -49,8 +55,7 @@ struct FrameData {
|
|||
VKRRunType type = VKRRunType::END;
|
||||
|
||||
VkFence fence;
|
||||
VkFence readbackFence; // Strictly speaking we might only need one of these.
|
||||
bool readbackFenceUsed = false;
|
||||
VkFence readbackFence; // Strictly speaking we might only need one global of these.
|
||||
|
||||
// These are on different threads so need separate pools.
|
||||
VkCommandPool cmdPoolInit; // Written to from main thread
|
||||
|
@ -61,7 +66,9 @@ struct FrameData {
|
|||
VkCommandBuffer presentCmd;
|
||||
|
||||
bool hasInitCommands = false;
|
||||
bool hasMainCommands = false;
|
||||
bool hasPresentCommands = false;
|
||||
|
||||
bool hasAcquired = false;
|
||||
|
||||
std::vector<VKRStep *> steps;
|
||||
|
@ -81,8 +88,9 @@ struct FrameData {
|
|||
void Destroy(VulkanContext *vulkan);
|
||||
|
||||
void AcquireNextImage(VulkanContext *vulkan, FrameDataShared &shared);
|
||||
VkResult QueuePresent(VulkanContext *vulkan, FrameDataShared &shared);
|
||||
VkCommandBuffer GetInitCmd(VulkanContext *vulkan);
|
||||
|
||||
// This will only submit if we are actually recording init commands.
|
||||
void SubmitInitCommands(VulkanContext *vulkan);
|
||||
void SubmitMainFinal(VulkanContext *vulkan, bool triggerFrameFence, FrameDataShared &shared);
|
||||
void SubmitPending(VulkanContext *vulkan, FrameSubmitType type, FrameDataShared &shared);
|
||||
};
|
||||
|
|
|
@ -535,7 +535,6 @@ void VulkanRenderManager::BeginFrame(bool enableProfiling, bool enableLogProfile
|
|||
|
||||
// Can't set this until after the fence.
|
||||
frameData.profilingEnabled_ = enableProfiling;
|
||||
frameData.readbackFenceUsed = false;
|
||||
|
||||
uint64_t queryResults[MAX_TIMESTAMP_QUERIES];
|
||||
|
||||
|
@ -600,21 +599,7 @@ void VulkanRenderManager::BeginFrame(bool enableProfiling, bool enableLogProfile
|
|||
|
||||
VkCommandBuffer VulkanRenderManager::GetInitCmd() {
|
||||
int curFrame = vulkan_->GetCurFrame();
|
||||
FrameData &frameData = frameData_[curFrame];
|
||||
if (!frameData.hasInitCommands) {
|
||||
VkCommandBufferBeginInfo begin = {
|
||||
VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
|
||||
nullptr,
|
||||
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT
|
||||
};
|
||||
vkResetCommandPool(vulkan_->GetDevice(), frameData.cmdPoolInit, 0);
|
||||
VkResult res = vkBeginCommandBuffer(frameData.initCmd, &begin);
|
||||
if (res != VK_SUCCESS) {
|
||||
return VK_NULL_HANDLE;
|
||||
}
|
||||
frameData.hasInitCommands = true;
|
||||
}
|
||||
return frameData_[curFrame].initCmd;
|
||||
return frameData_[curFrame].GetInitCmd(vulkan_);
|
||||
}
|
||||
|
||||
VKRGraphicsPipeline *VulkanRenderManager::CreateGraphicsPipeline(VKRGraphicsPipelineDesc *desc, uint32_t variantBitmask, const char *tag) {
|
||||
|
@ -1210,13 +1195,18 @@ void VulkanRenderManager::Wipe() {
|
|||
steps_.clear();
|
||||
}
|
||||
|
||||
// Called on the render thread.
|
||||
//
|
||||
// Can be called multiple times with no bad side effects. This is so that we can either begin a frame the normal way,
|
||||
// or stop it in the middle for a synchronous readback, then start over again mostly normally but without repeating
|
||||
// the backbuffer image acquisition.
|
||||
void VulkanRenderManager::BeginSubmitFrame(int frame) {
|
||||
FrameData &frameData = frameData_[frame];
|
||||
|
||||
frameData.SubmitInitCommands(vulkan_);
|
||||
// Should only have at most the init command buffer pending here (that one came from the other thread).
|
||||
_dbg_assert_(!frameData.hasMainCommands);
|
||||
_dbg_assert_(!frameData.hasPresentCommands);
|
||||
frameData.SubmitPending(vulkan_, FrameSubmitType::Pending, frameDataShared_);
|
||||
|
||||
if (!frameData.hasBegun) {
|
||||
// Effectively resets both main and present command buffers, since they both live in this pool.
|
||||
|
@ -1225,48 +1215,31 @@ void VulkanRenderManager::BeginSubmitFrame(int frame) {
|
|||
VkCommandBufferBeginInfo begin{ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO };
|
||||
begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
|
||||
VkResult res = vkBeginCommandBuffer(frameData.mainCmd, &begin);
|
||||
frameData.hasMainCommands = true;
|
||||
_assert_msg_(res == VK_SUCCESS, "vkBeginCommandBuffer failed! result=%s", VulkanResultToString(res));
|
||||
|
||||
frameData.hasBegun = true;
|
||||
}
|
||||
}
|
||||
|
||||
void VulkanRenderManager::Submit(int frame, bool triggerFrameFence) {
|
||||
// Called on the render thread.
|
||||
void VulkanRenderManager::Submit(int frame, FrameSubmitType submitType) {
|
||||
FrameData &frameData = frameData_[frame];
|
||||
|
||||
VkResult res = vkEndCommandBuffer(frameData.mainCmd);
|
||||
_assert_msg_(res == VK_SUCCESS, "vkEndCommandBuffer failed (main)! result=%s", VulkanResultToString(res));
|
||||
|
||||
if (frameData.hasPresentCommands) {
|
||||
VkResult res = vkEndCommandBuffer(frameData.presentCmd);
|
||||
_assert_msg_(res == VK_SUCCESS, "vkEndCommandBuffer failed (present)! result=%s", VulkanResultToString(res));
|
||||
}
|
||||
|
||||
// If any init commands left unsubmitted (like by a frame sync etc), submit it first.
|
||||
frameData.SubmitInitCommands(vulkan_);
|
||||
|
||||
// Submit the main and final cmdbuf, ending by signalling the fence.
|
||||
frameData.SubmitMainFinal(vulkan_, triggerFrameFence, frameDataShared_);
|
||||
// If any init commands left unsubmitted (like by a frame sync etc), they'll also tag along.
|
||||
frameData.SubmitPending(vulkan_, submitType, frameDataShared_);
|
||||
}
|
||||
|
||||
// Called on the render thread.
|
||||
void VulkanRenderManager::EndSubmitFrame(int frame) {
|
||||
FrameData &frameData = frameData_[frame];
|
||||
frameData.hasBegun = false;
|
||||
|
||||
Submit(frame, true);
|
||||
Submit(frame, FrameSubmitType::Present);
|
||||
|
||||
if (!frameData.skipSwap) {
|
||||
VkSwapchainKHR swapchain = vulkan_->GetSwapchain();
|
||||
VkPresentInfoKHR present = { VK_STRUCTURE_TYPE_PRESENT_INFO_KHR };
|
||||
present.swapchainCount = 1;
|
||||
present.pSwapchains = &swapchain;
|
||||
present.pImageIndices = &frameData.curSwapchainImage;
|
||||
present.pWaitSemaphores = &frameDataShared_.renderingCompleteSemaphore;
|
||||
present.waitSemaphoreCount = 1;
|
||||
|
||||
_dbg_assert_(frameData.hasAcquired);
|
||||
|
||||
VkResult res = vkQueuePresentKHR(vulkan_->GetGraphicsQueue(), &present);
|
||||
VkResult res = frameData.QueuePresent(vulkan_, frameDataShared_);
|
||||
if (res == VK_ERROR_OUT_OF_DATE_KHR) {
|
||||
// We clearly didn't get this in vkAcquireNextImageKHR because of the skipSwap check above.
|
||||
// Do the increment.
|
||||
|
@ -1279,7 +1252,6 @@ void VulkanRenderManager::EndSubmitFrame(int frame) {
|
|||
// Success
|
||||
outOfDateFrames_ = 0;
|
||||
}
|
||||
frameData.hasAcquired = false;
|
||||
} else {
|
||||
// We only get here if vkAcquireNextImage returned VK_ERROR_OUT_OF_DATE.
|
||||
outOfDateFrames_++;
|
||||
|
@ -1314,10 +1286,8 @@ void VulkanRenderManager::Run(int frame) {
|
|||
void VulkanRenderManager::EndSyncFrame(int frame) {
|
||||
FrameData &frameData = frameData_[frame];
|
||||
|
||||
frameData.readbackFenceUsed = true;
|
||||
|
||||
// The submit will trigger the readbackFence.
|
||||
Submit(frame, false);
|
||||
Submit(frame, FrameSubmitType::Sync);
|
||||
|
||||
// Hard stall of the GPU, not ideal, but necessary so the CPU has the contents of the readback.
|
||||
vkWaitForFences(vulkan_->GetDevice(), 1, &frameData.readbackFence, true, UINT64_MAX);
|
||||
|
@ -1331,8 +1301,10 @@ void VulkanRenderManager::EndSyncFrame(int frame) {
|
|||
nullptr,
|
||||
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT
|
||||
};
|
||||
_dbg_assert_(!frameData.hasPresentCommands); // Readbacks should happen before we try to submit any present commands.
|
||||
vkResetCommandPool(vulkan_->GetDevice(), frameData.cmdPoolMain, 0);
|
||||
VkResult res = vkBeginCommandBuffer(frameData.mainCmd, &begin);
|
||||
frameData.hasMainCommands = true;
|
||||
_assert_(res == VK_SUCCESS);
|
||||
|
||||
std::unique_lock<std::mutex> lock(frameData.push_mutex);
|
||||
|
|
|
@ -463,7 +463,7 @@ private:
|
|||
|
||||
void BeginSubmitFrame(int frame);
|
||||
void EndSubmitFrame(int frame);
|
||||
void Submit(int frame, bool triggerFence);
|
||||
void Submit(int frame, FrameSubmitType submitType);
|
||||
void SubmitInitCommands(int frame);
|
||||
|
||||
// Bad for performance but sometimes necessary for synchronous CPU readbacks (screenshots and whatnot).
|
||||
|
|
Loading…
Add table
Reference in a new issue