Merge pull request #11447 from hrydgard/android-gl-shutdown-fixes

Avoid calling any GL calls during shutdown on Android. Should help #11063
This commit is contained in:
Unknown W. Brackets 2018-10-06 12:52:43 -07:00 committed by GitHub
commit 8a74e6f7b2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 140 additions and 43 deletions

View file

@ -23,6 +23,7 @@ public:
// Android (EGL, Vulkan) we do have all this info on the render thread.
virtual bool InitFromRenderThread(ANativeWindow *wnd, int desiredBackbufferSizeX, int desiredBackbufferSizeY, int backbufferFormat, int androidVersion) = 0;
virtual bool Initialized() = 0;
virtual void BeginAndroidShutdown() {}
private:
using GraphicsContext::InitFromRenderThread;

View file

@ -41,6 +41,10 @@ public:
return renderManager_->ThreadFrame();
}
void BeginAndroidShutdown() override {
renderManager_->SetSkipGLCalls();
}
void ThreadEnd() override {
renderManager_->ThreadEnd();
}

View file

@ -528,13 +528,18 @@ extern "C" void Java_org_ppsspp_ppsspp_NativeApp_shutdown(JNIEnv *, jclass) {
if (renderer_inited && useCPUThread && graphicsContext) {
// Only used in Java EGL path.
EmuThreadStop();
graphicsContext->BeginAndroidShutdown();
// Skipping GL calls, the old context is gone.
while (graphicsContext->ThreadFrame()) {
ILOG("graphicsContext->ThreadFrame executed to clear buffers");
continue;
}
ILOG("Joining emuthread");
EmuThreadJoin();
graphicsContext->ThreadEnd();
graphicsContext->ShutdownFromRenderThread();
ILOG("Graphics context now shut down from NativeApp_shutdown");
}
ILOG("NativeApp.shutdown() -- begin");
@ -565,6 +570,8 @@ extern "C" void Java_org_ppsspp_ppsspp_NativeRenderer_displayInit(JNIEnv * env,
ILOG("NativeApp.displayInit() restoring");
if (useCPUThread) {
EmuThreadStop();
graphicsContext->BeginAndroidShutdown();
// Skipping GL calls here because the old context is lost.
while (graphicsContext->ThreadFrame()) {
continue;
}

View file

@ -90,7 +90,45 @@ static std::string GetInfoLog(GLuint name, Getiv getiv, GetLog getLog) {
return infoLog;
}
void GLQueueRunner::RunInitSteps(const std::vector<GLRInitStep> &steps) {
void GLQueueRunner::RunInitSteps(const std::vector<GLRInitStep> &steps, bool skipGLCalls) {
if (skipGLCalls) {
// Some bookkeeping still needs to be done.
for (size_t i = 0; i < steps.size(); i++) {
const GLRInitStep &step = steps[i];
switch (step.stepType) {
case GLRInitStepType::BUFFER_SUBDATA:
{
if (step.buffer_subdata.deleteData)
delete[] step.buffer_subdata.data;
break;
}
case GLRInitStepType::TEXTURE_IMAGE:
{
GLRTexture *tex = step.texture_image.texture;
if (step.texture_image.allocType == GLRAllocType::ALIGNED) {
FreeAlignedMemory(step.texture_image.data);
} else if (step.texture_image.allocType == GLRAllocType::NEW) {
delete[] step.texture_image.data;
}
break;
}
case GLRInitStepType::CREATE_PROGRAM:
{
WARN_LOG(G3D, "CREATE_PROGRAM found with skipGLCalls, not good");
break;
}
case GLRInitStepType::CREATE_SHADER:
{
WARN_LOG(G3D, "CREATE_SHADER found with skipGLCalls, not good");
break;
}
default:
break;
}
}
return;
}
CHECK_GL_ERROR_IF_DEBUG();
glActiveTexture(GL_TEXTURE0);
GLuint boundTexture = (GLuint)-1;
@ -111,8 +149,8 @@ void GLQueueRunner::RunInitSteps(const std::vector<GLRInitStep> &steps) {
case GLRInitStepType::CREATE_BUFFER:
{
GLRBuffer *buffer = step.create_buffer.buffer;
glGenBuffers(1, &buffer->buffer);
glBindBuffer(buffer->target_, buffer->buffer);
glGenBuffers(1, &buffer->buffer_);
glBindBuffer(buffer->target_, buffer->buffer_);
glBufferData(buffer->target_, step.create_buffer.size, nullptr, step.create_buffer.usage);
CHECK_GL_ERROR_IF_DEBUG();
break;
@ -120,7 +158,7 @@ void GLQueueRunner::RunInitSteps(const std::vector<GLRInitStep> &steps) {
case GLRInitStepType::BUFFER_SUBDATA:
{
GLRBuffer *buffer = step.buffer_subdata.buffer;
glBindBuffer(GL_ARRAY_BUFFER, buffer->buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer->buffer_);
glBufferSubData(GL_ARRAY_BUFFER, step.buffer_subdata.offset, step.buffer_subdata.size, step.buffer_subdata.data);
if (step.buffer_subdata.deleteData)
delete[] step.buffer_subdata.data;
@ -445,7 +483,21 @@ void GLQueueRunner::InitCreateFramebuffer(const GLRInitStep &step) {
currentReadHandle_ = fbo->handle;
}
void GLQueueRunner::RunSteps(const std::vector<GLRStep *> &steps) {
void GLQueueRunner::RunSteps(const std::vector<GLRStep *> &steps, bool skipGLCalls) {
if (skipGLCalls) {
// Dry run
for (size_t i = 0; i < steps.size(); i++) {
const GLRStep &step = *steps[i];
switch (step.stepType) {
case GLRStepType::RENDER:
// TODO: With #11425 there'll be a case where we should really free spline data here.
break;
}
delete steps[i];
}
return;
}
CHECK_GL_ERROR_IF_DEBUG();
for (size_t i = 0; i < steps.size(); i++) {
const GLRStep &step = *steps[i];
@ -836,7 +888,7 @@ void GLQueueRunner::PerformRenderPass(const GLRStep &step) {
{
// TODO: Add fast path for glBindVertexBuffer
GLRInputLayout *layout = c.bindVertexBuffer.inputLayout;
GLuint buf = c.bindVertexBuffer.buffer ? c.bindVertexBuffer.buffer->buffer : 0;
GLuint buf = c.bindVertexBuffer.buffer ? c.bindVertexBuffer.buffer->buffer_ : 0;
assert(!c.bindVertexBuffer.buffer->Mapped());
if (buf != curArrayBuffer) {
glBindBuffer(GL_ARRAY_BUFFER, buf);
@ -865,14 +917,14 @@ void GLQueueRunner::PerformRenderPass(const GLRStep &step) {
if (c.bind_buffer.target == GL_ARRAY_BUFFER) {
Crash();
} else if (c.bind_buffer.target == GL_ELEMENT_ARRAY_BUFFER) {
GLuint buf = c.bind_buffer.buffer ? c.bind_buffer.buffer->buffer : 0;
GLuint buf = c.bind_buffer.buffer ? c.bind_buffer.buffer->buffer_ : 0;
assert(!c.bind_buffer.buffer->Mapped());
if (buf != curElemArrayBuffer) {
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buf);
curElemArrayBuffer = buf;
}
} else {
GLuint buf = c.bind_buffer.buffer ? c.bind_buffer.buffer->buffer : 0;
GLuint buf = c.bind_buffer.buffer ? c.bind_buffer.buffer->buffer_ : 0;
assert(!c.bind_buffer.buffer->Mapped());
glBindBuffer(c.bind_buffer.target, buf);
}
@ -1352,6 +1404,9 @@ void GLQueueRunner::fbo_unbind() {
}
GLRFramebuffer::~GLRFramebuffer() {
if (handle == 0 && z_stencil_buffer == 0 && z_buffer == 0 && stencil_buffer == 0)
return;
CHECK_GL_ERROR_IF_DEBUG();
if (gl_extensions.ARB_framebuffer_object || gl_extensions.IsGLES) {
if (handle) {

View file

@ -322,9 +322,9 @@ class GLQueueRunner {
public:
GLQueueRunner() {}
void RunInitSteps(const std::vector<GLRInitStep> &steps);
void RunInitSteps(const std::vector<GLRInitStep> &steps, bool skipGLCalls);
void RunSteps(const std::vector<GLRStep *> &steps);
void RunSteps(const std::vector<GLRStep *> &steps, bool skipGLCalls);
void LogSteps(const std::vector<GLRStep *> &steps);
void CreateDeviceObjects();

View file

@ -21,33 +21,51 @@ static bool OnRenderThread() {
#endif
// Runs on the GPU thread.
void GLDeleter::Perform(GLRenderManager *renderManager) {
void GLDeleter::Perform(GLRenderManager *renderManager, bool skipGLCalls) {
for (auto pushBuffer : pushBuffers) {
renderManager->UnregisterPushBuffer(pushBuffer);
if (skipGLCalls) {
pushBuffer->Destroy(false);
}
delete pushBuffer;
}
pushBuffers.clear();
for (auto shader : shaders) {
if (skipGLCalls)
shader->shader = 0; // prevent the glDeleteShader
delete shader;
}
shaders.clear();
for (auto program : programs) {
if (skipGLCalls)
program->program = 0; // prevent the glDeleteProgram
delete program;
}
programs.clear();
for (auto buffer : buffers) {
if (skipGLCalls)
buffer->buffer_ = 0;
delete buffer;
}
buffers.clear();
for (auto texture : textures) {
if (skipGLCalls)
texture->texture = 0;
delete texture;
}
textures.clear();
for (auto inputLayout : inputLayouts) {
// No GL objects in an inputLayout yet
delete inputLayout;
}
inputLayouts.clear();
for (auto framebuffer : framebuffers) {
if (skipGLCalls) {
framebuffer->z_buffer = 0;
framebuffer->stencil_buffer = 0;
framebuffer->z_stencil_buffer = 0;
framebuffer->handle = 0;
}
delete framebuffer;
}
framebuffers.clear();
@ -65,7 +83,7 @@ GLRenderManager::~GLRenderManager() {
_assert_(frameData_[i].deleter_prev.IsEmpty());
}
// Was anything deleted during shutdown?
deleter_.Perform(this);
deleter_.Perform(this, false);
_assert_(deleter_.IsEmpty());
}
@ -112,15 +130,15 @@ void GLRenderManager::ThreadEnd() {
// Good point to run all the deleters to get rid of leftover objects.
for (int i = 0; i < MAX_INFLIGHT_FRAMES; i++) {
frameData_[i].deleter.Perform(this);
frameData_[i].deleter_prev.Perform(this);
frameData_[i].deleter.Perform(this, false);
frameData_[i].deleter_prev.Perform(this, false);
for (int j = 0; j < (int)frameData_[i].steps.size(); j++) {
delete frameData_[i].steps[j];
}
frameData_[i].steps.clear();
frameData_[i].initSteps.clear();
}
deleter_.Perform(this);
deleter_.Perform(this, false);
for (int i = 0; i < (int)steps_.size(); i++) {
delete steps_[i];
@ -154,7 +172,7 @@ bool GLRenderManager::ThreadFrame() {
}
VLOG("PULL: Setting frame[%d].readyForRun = false", threadFrame_);
frameData.readyForRun = false;
frameData.deleter_prev.Perform(this);
frameData.deleter_prev.Perform(this, skipGLCalls_);
frameData.deleter_prev.Take(frameData.deleter);
// Previously we had a quick exit here that avoided calling Run() if run_ was suddenly false,
// but that created a race condition where frames could end up not finished properly on resize etc.
@ -481,21 +499,25 @@ void GLRenderManager::Run(int frame) {
auto &stepsOnThread = frameData_[frame].steps;
auto &initStepsOnThread = frameData_[frame].initSteps;
// queueRunner_.LogSteps(stepsOnThread);
queueRunner_.RunInitSteps(initStepsOnThread);
queueRunner_.RunInitSteps(initStepsOnThread, skipGLCalls_);
initStepsOnThread.clear();
// Run this after RunInitSteps so any fresh GLRBuffers for the pushbuffers can get created.
if (!skipGLCalls_) {
for (auto iter : frameData.activePushBuffers) {
iter->Flush();
iter->UnmapDevice();
}
}
queueRunner_.RunSteps(stepsOnThread);
queueRunner_.RunSteps(stepsOnThread, skipGLCalls_);
stepsOnThread.clear();
if (!skipGLCalls_) {
for (auto iter : frameData.activePushBuffers) {
iter->MapDevice(bufferStrategy_);
}
}
switch (frameData.type) {
case GLRRunType::END:
@ -628,8 +650,8 @@ void GLPushBuffer::Flush() {
if (!buffers_[buf_].deviceMemory && writePtr_) {
auto &info = buffers_[buf_];
if (info.flushOffset != 0) {
assert(info.buffer->buffer);
glBindBuffer(target_, info.buffer->buffer);
assert(info.buffer->buffer_);
glBindBuffer(target_, info.buffer->buffer_);
glBufferSubData(target_, 0, info.flushOffset, info.localMemory);
}
@ -646,7 +668,7 @@ void GLPushBuffer::Flush() {
if (info.flushOffset == 0 || !info.deviceMemory)
continue;
glBindBuffer(target_, info.buffer->buffer);
glBindBuffer(target_, info.buffer->buffer_);
glFlushMappedBufferRange(target_, 0, info.flushOffset);
info.flushOffset = 0;
}
@ -664,16 +686,15 @@ bool GLPushBuffer::AddBuffer() {
return true;
}
// Executed on the render thread!
void GLPushBuffer::Destroy(bool onRenderThread) {
if (buf_ == -1)
return; // Already destroyed
for (BufInfo &info : buffers_) {
// This will automatically unmap device memory, if needed.
// NOTE: We immediately delete the buffer, don't go through the deleter, if we're on the render thread.
if (onRenderThread) {
_dbg_assert_(G3D, OnRenderThread());
delete info.buffer;
} else {
_dbg_assert_(G3D, !OnRenderThread());
render_->DeleteBuffer(info.buffer);
}
@ -708,7 +729,7 @@ void GLPushBuffer::NextBuffer(size_t minSize) {
}
void GLPushBuffer::Defragment() {
_dbg_assert_(G3D, !OnRenderThread());
_dbg_assert_msg_(G3D, !OnRenderThread(), "Defragment must not run on the render thread");
if (buffers_.size() <= 1) {
// Let's take this chance to jetison localMemory we don't need.
@ -728,7 +749,7 @@ void GLPushBuffer::Defragment() {
size_ = newSize;
bool res = AddBuffer();
_assert_(res);
_dbg_assert_msg_(G3D, res, "AddBuffer failed");
}
size_t GLPushBuffer::GetTotalSize() const {
@ -740,7 +761,7 @@ size_t GLPushBuffer::GetTotalSize() const {
}
void GLPushBuffer::MapDevice(GLBufferStrategy strategy) {
_dbg_assert_(G3D, OnRenderThread());
_dbg_assert_msg_(G3D, OnRenderThread(), "MapDevice must run on render thread");
strategy_ = strategy;
if (strategy_ == GLBufferStrategy::SUBDATA) {
@ -749,7 +770,7 @@ void GLPushBuffer::MapDevice(GLBufferStrategy strategy) {
bool mapChanged = false;
for (auto &info : buffers_) {
if (!info.buffer->buffer || info.deviceMemory) {
if (!info.buffer->buffer_ || info.deviceMemory) {
// Can't map - no device buffer associated yet or already mapped.
continue;
}
@ -763,7 +784,7 @@ void GLPushBuffer::MapDevice(GLBufferStrategy strategy) {
mapChanged = true;
}
assert(info.localMemory || info.deviceMemory);
_dbg_assert_msg_(G3D, info.localMemory || info.deviceMemory, "Local or device memory must succeed");
}
if (writePtr_ && mapChanged) {
@ -774,7 +795,7 @@ void GLPushBuffer::MapDevice(GLBufferStrategy strategy) {
}
void GLPushBuffer::UnmapDevice() {
_dbg_assert_(G3D, OnRenderThread());
_dbg_assert_msg_(G3D, OnRenderThread(), "UnmapDevice must run on render thread");
for (auto &info : buffers_) {
if (info.deviceMemory) {
@ -786,7 +807,7 @@ void GLPushBuffer::UnmapDevice() {
}
void *GLRBuffer::Map(GLBufferStrategy strategy) {
assert(buffer != 0);
assert(buffer_ != 0);
GLbitfield access = GL_MAP_WRITE_BIT;
if ((strategy & GLBufferStrategy::MASK_FLUSH) != 0) {
@ -799,7 +820,7 @@ void *GLRBuffer::Map(GLBufferStrategy strategy) {
void *p = nullptr;
bool allowNativeBuffer = strategy != GLBufferStrategy::SUBDATA;
if (allowNativeBuffer) {
glBindBuffer(target_, buffer);
glBindBuffer(target_, buffer_);
if (gl_extensions.ARB_buffer_storage || gl_extensions.EXT_buffer_storage) {
#ifndef IOS
@ -831,7 +852,7 @@ void *GLRBuffer::Map(GLBufferStrategy strategy) {
}
bool GLRBuffer::Unmap() {
glBindBuffer(target_, buffer);
glBindBuffer(target_, buffer_);
mapped_ = false;
return glUnmapBuffer(target_) == GL_TRUE;
}

View file

@ -73,6 +73,7 @@ public:
glDeleteShader(shader);
}
}
GLuint shader = 0;
bool valid = false;
// Warning: Won't know until a future frame.
@ -156,8 +157,8 @@ class GLRBuffer {
public:
GLRBuffer(GLuint target, size_t size) : target_(target), size_((int)size) {}
~GLRBuffer() {
if (buffer) {
glDeleteBuffers(1, &buffer);
if (buffer_) {
glDeleteBuffers(1, &buffer_);
}
}
@ -168,7 +169,7 @@ public:
return mapped_;
}
GLuint buffer = 0;
GLuint buffer_ = 0;
GLuint target_;
int size_;
@ -278,6 +279,7 @@ public:
size_t GetTotalSize() const;
void Destroy(bool onRenderThread);
void Flush();
protected:
@ -288,7 +290,6 @@ private:
bool AddBuffer();
void NextBuffer(size_t minSize);
void Defragment();
void Destroy(bool onRenderThread);
GLRenderManager *render_;
std::vector<BufInfo> buffers_;
@ -307,11 +308,12 @@ enum class GLRRunType {
class GLDeleter {
public:
void Perform(GLRenderManager *renderManager);
void Perform(GLRenderManager *renderManager, bool skipGLCalls);
bool IsEmpty() const {
return shaders.empty() && programs.empty() && buffers.empty() && textures.empty() && inputLayouts.empty() && framebuffers.empty() && pushBuffers.empty();
}
void Take(GLDeleter &other) {
_assert_msg_(G3D, IsEmpty(), "Deleter already has stuff");
shaders = std::move(other.shaders);
@ -885,6 +887,12 @@ public:
return queueRunner_.GetGLString(name);
}
// Used during Android-style ugly shutdown. No need to have a way to set it back because we'll be
// destroyed.
void SetSkipGLCalls() {
skipGLCalls_ = true;
}
private:
void BeginSubmitFrame(int frame);
void EndSubmitFrame(int frame);
@ -949,6 +957,7 @@ private:
bool firstFrame = true;
GLDeleter deleter_;
bool skipGLCalls_ = false;
int curFrame_ = 0;