Merge pull request #10481 from KentuckyCompass/ios-audio-fix

iOS: handle audio session interruptions and improve micro-stutter
This commit is contained in:
Henrik Rydgård 2017-12-31 10:11:44 +01:00 committed by GitHub
commit e82237bb0d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 267 additions and 153 deletions

View file

@ -672,7 +672,7 @@ elseif(IOS)
ios/AppDelegate.h
ios/ViewController.mm
ios/ViewController.h
ios/iOSCoreAudio.cpp
ios/iOSCoreAudio.mm
ios/iOSCoreAudio.h
ios/PPSSPPUIApplication.h
ios/PPSSPPUIApplication.mm
@ -681,7 +681,7 @@ elseif(IOS)
ios/iCade/iCadeReaderView.h
ios/iCade/iCadeReaderView.m
ios/iCade/iCadeState.h)
set(nativeExtraLibs ${nativeExtraLibs} "-framework Foundation -framework AudioToolbox -framework CoreGraphics -framework QuartzCore -framework UIKit -framework GLKit -framework OpenAL")
set(nativeExtraLibs ${nativeExtraLibs} "-framework Foundation -framework AudioToolbox -framework CoreGraphics -framework QuartzCore -framework UIKit -framework GLKit -framework OpenAL -framework AVFoundation")
if(EXISTS "${CMAKE_IOS_SDK_ROOT}/System/Library/Frameworks/GameController.framework")
set(nativeExtraLibs ${nativeExtraLibs} "-weak_framework GameController")
endif()

View file

@ -134,6 +134,13 @@ void Audio_Init() {
}
}
void Audio_Shutdown() {
if (audioInitialized) {
audioInitialized = false;
host->ShutdownSound();
}
}
bool IsOnSeparateCPUThread() {
if (cpuThread != nullptr) {
return cpuThreadID == std::this_thread::get_id();
@ -290,8 +297,7 @@ void CPU_Shutdown() {
__KernelShutdown();
HLEShutdown();
if (coreParameter.enableSound) {
host->ShutdownSound();
audioInitialized = false; // deleted in ShutdownSound
Audio_Shutdown();
}
pspFileSystem.Shutdown();
mipsr4k.Shutdown();

View file

@ -77,6 +77,7 @@ void PSP_RunLoopFor(int cycles);
void Core_UpdateDebugStats(bool collectStats);
void Audio_Init();
void Audio_Shutdown();
bool IsOnSeparateCPUThread();
bool IsAudioInitialised();

View file

@ -1,21 +1,108 @@
#import "AppDelegate.h"
#import "ViewController.h"
#import "base/NativeApp.h"
#import "Core/System.h"
#import "Core/Config.h"
#import "Common/Log.h"
#import <AVFoundation/AVFoundation.h>
@implementation AppDelegate
// This will be called when the user receives and dismisses a phone call
// or other interruption to the audio session
// Registered in application:didFinishLaunchingWithOptions:
// for AVAudioSessionInterruptionNotification
-(void) handleAudioSessionInterruption:(NSNotification *)notification {
NSNumber *interruptionType = notification.userInfo[AVAudioSessionInterruptionTypeKey];
// Sanity check in case it's somehow not an NSNumber
if (![interruptionType respondsToSelector:@selector(unsignedIntegerValue)]) {
return; // Lets not crash
}
switch ([interruptionType unsignedIntegerValue]) {
case AVAudioSessionInterruptionTypeBegan:
INFO_LOG(SYSTEM, "ios audio session interruption beginning");
if (g_Config.bEnableSound) {
Audio_Shutdown();
}
break;
case AVAudioSessionInterruptionTypeEnded:
INFO_LOG(SYSTEM, "ios audio session interruption ending");
if (g_Config.bEnableSound) {
/*
* Only try to reinit audio if in the foreground, otherwise
* it may fail. Instead, trust that applicationDidBecomeActive
* will do it later.
*/
if ([UIApplication sharedApplication].applicationState == UIApplicationStateActive) {
Audio_Init();
}
}
break;
default:
break;
};
}
// This will be called when the iOS's shared media process was reset
// Registered in application:didFinishLaunchingWithOptions:
// for AVAudioSessionMediaServicesWereResetNotification
-(void) handleMediaServicesWereReset:(NSNotification *)notification {
INFO_LOG(SYSTEM, "ios media services were reset - reinitializing audio");
/*
When media services were reset, Apple recommends:
1) Dispose of orphaned audio objects (such as players, recorders,
converters, or audio queues) and create new ones
2) Reset any internal audio states being tracked, including all
properties of AVAudioSession
3) When appropriate, reactivate the AVAudioSession instance using the
setActive:error: method
We accomplish this by shutting down and reinitializing audio
*/
if (g_Config.bEnableSound) {
Audio_Shutdown();
Audio_Init();
}
}
-(BOOL) application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions {
self.window = [[UIWindow alloc] initWithFrame:[[UIScreen mainScreen] bounds]];
self.viewController = [[ViewController alloc] init];
[[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(handleAudioSessionInterruption:) name:AVAudioSessionInterruptionNotification object:[AVAudioSession sharedInstance]];
[[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(handleMediaServicesWereReset:) name:AVAudioSessionMediaServicesWereResetNotification object:nil];
self.window.rootViewController = self.viewController;
[self.window makeKeyAndVisible];
return YES;
}
-(void) dealloc {
[[NSNotificationCenter defaultCenter] removeObserver:self];
}
-(void) applicationWillResignActive:(UIApplication *)application {
if (g_Config.bEnableSound) {
Audio_Shutdown();
}
NativeMessageReceived("lost_focus", "");
}
-(void) applicationDidBecomeActive:(UIApplication *)application {
if (g_Config.bEnableSound) {
Audio_Init();
}
NativeMessageReceived("got_focus", "");
}
@end
@end

View file

@ -1,148 +0,0 @@
// Copyright (c) 2012- PPSSPP Project.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, version 2.0 or later versions.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License 2.0 for more details.
// A copy of the GPL 2.0 should have been included with the program.
// If not, see http://www.gnu.org/licenses/
// Official git repository and contact information can be found at
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
// This code implements the emulated audio using CoreAudio for iOS
// Originally written by jtraynham
#include "iOSCoreAudio.h"
#include <AudioToolbox/AudioToolbox.h>
#define SAMPLE_RATE 44100
#define STREAM_MAX_FRAME_COUNT 2048
static short stream[STREAM_MAX_FRAME_COUNT * 2 * 2]; // frames * sample size * number of channels
AudioComponentInstance audioInstance = nil;
int NativeMix(short *audio, int num_samples);
OSStatus iOSCoreAudioCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
// see if we have any sound to play
UInt32 frames = (inNumberFrames > STREAM_MAX_FRAME_COUNT ? STREAM_MAX_FRAME_COUNT : inNumberFrames);
UInt32 framesReady = NativeMix(stream, frames);
if (framesReady == 0) {
// oops, we don't currently have any sound, so return silence
*ioActionFlags |= kAudioUnitRenderAction_OutputIsSilence;
return noErr;
}
// grab the output buffer and copy data into it
AudioSampleType *output = (AudioSampleType *)ioData->mBuffers[0].mData;
UInt32 bytesReady = framesReady * sizeof(short) * 2;
memcpy(output, stream, bytesReady);
// make sure and tell it how much audio data is there
ioData->mBuffers[0].mDataByteSize = bytesReady;
return noErr;
}
void iOSCoreAudioInit()
{
if (!audioInstance) {
OSErr err;
// first, grab the default output
AudioComponentDescription defaultOutputDescription;
defaultOutputDescription.componentType = kAudioUnitType_Output;
defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO;
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
defaultOutputDescription.componentFlags = 0;
defaultOutputDescription.componentFlagsMask = 0;
AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
// create our instance
err = AudioComponentInstanceNew(defaultOutput, &audioInstance);
if (err != noErr) {
audioInstance = nil;
return;
}
// create our callback so we can give it the audio data
AURenderCallbackStruct input;
input.inputProc = iOSCoreAudioCallback;
input.inputProcRefCon = NULL;
err = AudioUnitSetProperty(audioInstance,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input));
if (err != noErr) {
AudioComponentInstanceDispose(audioInstance);
audioInstance = nil;
return;
}
// setup the audio format we'll be using (stereo pcm)
AudioStreamBasicDescription streamFormat;
memset(&streamFormat, 0, sizeof(streamFormat));
streamFormat.mSampleRate = SAMPLE_RATE;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
streamFormat.mBitsPerChannel = sizeof(AudioSampleType) * 8;
streamFormat.mChannelsPerFrame = 2;
streamFormat.mFramesPerPacket = 1;
streamFormat.mBytesPerFrame = (streamFormat.mBitsPerChannel / 8) * streamFormat.mChannelsPerFrame;
streamFormat.mBytesPerPacket = streamFormat.mBytesPerFrame * streamFormat.mFramesPerPacket;
err = AudioUnitSetProperty(audioInstance,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&streamFormat,
sizeof(AudioStreamBasicDescription));
if (err != noErr) {
AudioComponentInstanceDispose(audioInstance);
audioInstance = nil;
return;
}
// k, all setup, so init
err = AudioUnitInitialize(audioInstance);
if (err != noErr) {
AudioComponentInstanceDispose(audioInstance);
audioInstance = nil;
return;
}
// finally start playback
err = AudioOutputUnitStart(audioInstance);
if (err != noErr) {
AudioUnitUninitialize(audioInstance);
AudioComponentInstanceDispose(audioInstance);
audioInstance = nil;
return;
}
// we're good to go
}
}
void iOSCoreAudioShutdown()
{
if (audioInstance) {
AudioOutputUnitStop(audioInstance);
AudioUnitUninitialize(audioInstance);
AudioComponentInstanceDispose(audioInstance);
audioInstance = nil;
}
}

168
ios/iOSCoreAudio.mm Normal file
View file

@ -0,0 +1,168 @@
// Copyright (c) 2012- PPSSPP Project.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, version 2.0 or later versions.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License 2.0 for more details.
// A copy of the GPL 2.0 should have been included with the program.
// If not, see http://www.gnu.org/licenses/
// Official git repository and contact information can be found at
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
// This code implements the emulated audio using CoreAudio for iOS
// Originally written by jtraynham
#include "iOSCoreAudio.h"
#include "Common/Log.h"
#include <AudioToolbox/AudioToolbox.h>
#import <AVFoundation/AVFoundation.h>
#define SAMPLE_RATE 44100
AudioComponentInstance audioInstance = nil;
int NativeMix(short *audio, int num_samples);
OSStatus iOSCoreAudioCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
// see if we have any sound to play
short *output = (short *)ioData->mBuffers[0].mData;
UInt32 framesReady = NativeMix(output, inNumberFrames);
if (framesReady == 0) {
// oops, we don't currently have any sound, so return silence
*ioActionFlags |= kAudioUnitRenderAction_OutputIsSilence;
}
/*
* You'd think iOS would want to know how many frames were
* actually generated in case it was less than asked for, but
* apparently that causes micro-stuttering and everything just
* works better if we lie and say we successfully generated as
* many frames as it wanted... weird. We still get micro-stuttering
* but it's less noticeable this way.
*/
//UInt32 bytesReady = framesReady * sizeof(short) * 2;
UInt32 bytesReady = inNumberFrames * sizeof(short) * 2;
ioData->mBuffers[0].mDataByteSize = bytesReady;
return noErr;
}
void iOSCoreAudioInit()
{
NSError *error = nil;
AVAudioSession *session = [AVAudioSession sharedInstance];
if (![session setActive:YES error:&error]) {
ERROR_LOG(SYSTEM, "Failed to activate AVFoundation audio session");
if (error.localizedDescription) {
NSLog(@"%@", error.localizedDescription);
}
if (error.localizedFailureReason) {
NSLog(@"%@", error.localizedFailureReason);
}
}
if (!audioInstance) {
OSErr err;
// first, grab the default output
AudioComponentDescription defaultOutputDescription;
defaultOutputDescription.componentType = kAudioUnitType_Output;
defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO;
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
defaultOutputDescription.componentFlags = 0;
defaultOutputDescription.componentFlagsMask = 0;
AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
// create our instance
err = AudioComponentInstanceNew(defaultOutput, &audioInstance);
if (err != noErr) {
audioInstance = nil;
return;
}
// create our callback so we can give it the audio data
AURenderCallbackStruct input;
input.inputProc = iOSCoreAudioCallback;
input.inputProcRefCon = NULL;
err = AudioUnitSetProperty(audioInstance,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input));
if (err != noErr) {
AudioComponentInstanceDispose(audioInstance);
audioInstance = nil;
return;
}
// setup the audio format we'll be using (stereo pcm)
AudioStreamBasicDescription streamFormat;
memset(&streamFormat, 0, sizeof(streamFormat));
streamFormat.mSampleRate = SAMPLE_RATE;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
streamFormat.mBitsPerChannel = sizeof(short) * 8;
streamFormat.mChannelsPerFrame = 2;
streamFormat.mFramesPerPacket = 1;
streamFormat.mBytesPerFrame = (streamFormat.mBitsPerChannel / 8) * streamFormat.mChannelsPerFrame;
streamFormat.mBytesPerPacket = streamFormat.mBytesPerFrame * streamFormat.mFramesPerPacket;
err = AudioUnitSetProperty(audioInstance,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&streamFormat,
sizeof(AudioStreamBasicDescription));
if (err != noErr) {
AudioComponentInstanceDispose(audioInstance);
audioInstance = nil;
return;
}
// k, all setup, so init
err = AudioUnitInitialize(audioInstance);
if (err != noErr) {
AudioComponentInstanceDispose(audioInstance);
audioInstance = nil;
return;
}
// finally start playback
err = AudioOutputUnitStart(audioInstance);
if (err != noErr) {
AudioUnitUninitialize(audioInstance);
AudioComponentInstanceDispose(audioInstance);
audioInstance = nil;
return;
}
// we're good to go
}
}
void iOSCoreAudioShutdown()
{
if (audioInstance) {
AudioOutputUnitStop(audioInstance);
AudioUnitUninitialize(audioInstance);
AudioComponentInstanceDispose(audioInstance);
audioInstance = nil;
}
}