mirror of
https://github.com/hrydgard/ppsspp.git
synced 2025-04-02 11:01:50 -04:00
iOS Audio fixes
This commit is contained in:
parent
ffff60c73c
commit
24961e28ef
6 changed files with 192 additions and 8 deletions
|
@ -173,8 +173,9 @@ if(NOT MSVC)
|
|||
|
||||
if(IOS)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libstdc++")
|
||||
# jtraynham: re-enabling default/"standard" architectures (which is a fat armv7/armv7s binary)
|
||||
#set(CMAKE_OSX_ARCHITECTURES "armv7")
|
||||
# armv7s (without resorting to FastMemory) is still a work in progress
|
||||
# comment out the next line to enable default/"standard" architectures (which is a fat armv7/armv7s binary)
|
||||
set(CMAKE_OSX_ARCHITECTURES "armv7")
|
||||
elseif(APPLE)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++ -U__STRICT_ANSI__")
|
||||
# Karen/angelXwind: --macosx-version-min=10.7 is needed in order to produce binaries that OS X 10.7 Lion can execute. However, it seems that PPSSPP won't support 10.6 or lower without getting rid of -stdlib=libc++ ...which probably won't end well. So I guess PPSSPP will strictly be a 10.7+ app.
|
||||
|
@ -1144,9 +1145,12 @@ if(USE_FFMPEG)
|
|||
endif()
|
||||
|
||||
if(APPLE)
|
||||
set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES} bz2 "-framework VideoDecodeAcceleration" "-framework CoreVideo")
|
||||
set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES} bz2 "-framework CoreVideo")
|
||||
if (NOT IOS)
|
||||
set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES} "-framework VideoDecodeAcceleration")
|
||||
endif()
|
||||
endif(APPLE)
|
||||
|
||||
|
||||
set(LinkCommon ${LinkCommon} ${FFMPEG_LIBRARIES})
|
||||
add_definitions(-DUSE_FFMPEG)
|
||||
endif(USE_FFMPEG)
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
#include "base/NativeApp.h"
|
||||
#include "file/vfs.h"
|
||||
#include "file/zip_read.h"
|
||||
#include "ext/jpge/jpge.h"
|
||||
#include "native/ext/jpge/jpge.h"
|
||||
#include "gfx_es2/gl_state.h"
|
||||
#include "gfx/gl_lost_manager.h"
|
||||
#include "gfx/texture.h"
|
||||
|
@ -79,6 +79,10 @@ static UI::Theme ui_theme;
|
|||
#include <mach-o/dyld.h>
|
||||
#endif
|
||||
|
||||
#ifdef IOS
|
||||
#include "ios/iOSCoreAudio.h"
|
||||
#endif
|
||||
|
||||
Texture *uiTexture;
|
||||
|
||||
ScreenManager *screenManager;
|
||||
|
@ -171,9 +175,17 @@ std::string boot_filename = "";
|
|||
|
||||
void NativeHost::InitSound(PMixer *mixer) {
|
||||
g_mixer = mixer;
|
||||
|
||||
#ifdef IOS
|
||||
iOSCoreAudioInit();
|
||||
#endif
|
||||
}
|
||||
|
||||
void NativeHost::ShutdownSound() {
|
||||
#ifdef IOS
|
||||
iOSCoreAudioShutdown();
|
||||
#endif
|
||||
|
||||
g_mixer = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -131,8 +131,6 @@ ViewController* sharedViewController;
|
|||
dp_xscale = (float)dp_xres / (float)pixel_xres;
|
||||
dp_yscale = (float)dp_yres / (float)pixel_yres;
|
||||
|
||||
if (g_Config.bEnableSound)
|
||||
self.audioEngine = [[[AudioEngine alloc] init] autorelease];
|
||||
/*
|
||||
UISwipeGestureRecognizer* gesture = [[[UISwipeGestureRecognizer alloc] initWithTarget:self action:@selector(swipeGesture:)] autorelease];
|
||||
[self.view addGestureRecognizer:gesture];
|
||||
|
|
148
ios/iOSCoreAudio.cpp
Normal file
148
ios/iOSCoreAudio.cpp
Normal file
|
@ -0,0 +1,148 @@
|
|||
// Copyright (c) 2012- PPSSPP Project.
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, version 2.0 or later versions.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License 2.0 for more details.
|
||||
|
||||
// A copy of the GPL 2.0 should have been included with the program.
|
||||
// If not, see http://www.gnu.org/licenses/
|
||||
|
||||
// Official git repository and contact information can be found at
|
||||
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
|
||||
|
||||
// This code implements the emulated audio using CoreAudio for iOS
|
||||
// Originally written by jtraynham
|
||||
|
||||
#include "iOSCoreAudio.h"
|
||||
#include <AudioToolbox/AudioToolbox.h>
|
||||
|
||||
#define SAMPLE_RATE 44100
|
||||
|
||||
#define STREAM_MAX_FRAME_COUNT 2048
|
||||
static short stream[STREAM_MAX_FRAME_COUNT * 2 * 2]; // frames * sample size * number of channels
|
||||
|
||||
AudioComponentInstance audioInstance = nil;
|
||||
|
||||
int NativeMix(short *audio, int num_samples);
|
||||
|
||||
OSStatus iOSCoreAudioCallback(void *inRefCon,
|
||||
AudioUnitRenderActionFlags *ioActionFlags,
|
||||
const AudioTimeStamp *inTimeStamp,
|
||||
UInt32 inBusNumber,
|
||||
UInt32 inNumberFrames,
|
||||
AudioBufferList *ioData)
|
||||
{
|
||||
// see if we have any sound to play
|
||||
UInt32 frames = (inNumberFrames > STREAM_MAX_FRAME_COUNT ? STREAM_MAX_FRAME_COUNT : inNumberFrames);
|
||||
UInt32 framesReady = NativeMix(stream, frames);
|
||||
if (framesReady == 0) {
|
||||
// oops, we don't currently have any sound, so return silence
|
||||
*ioActionFlags |= kAudioUnitRenderAction_OutputIsSilence;
|
||||
return noErr;
|
||||
}
|
||||
|
||||
// grab the output buffer and copy data into it
|
||||
AudioSampleType *output = (AudioSampleType *)ioData->mBuffers[0].mData;
|
||||
UInt32 bytesReady = framesReady * sizeof(short) * 2;
|
||||
memcpy(output, stream, bytesReady);
|
||||
// make sure and tell it how much audio data is there
|
||||
ioData->mBuffers[0].mDataByteSize = bytesReady;
|
||||
|
||||
return noErr;
|
||||
}
|
||||
|
||||
void iOSCoreAudioInit()
|
||||
{
|
||||
if (!audioInstance) {
|
||||
OSErr err;
|
||||
|
||||
// first, grab the default output
|
||||
AudioComponentDescription defaultOutputDescription;
|
||||
defaultOutputDescription.componentType = kAudioUnitType_Output;
|
||||
defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO;
|
||||
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
|
||||
defaultOutputDescription.componentFlags = 0;
|
||||
defaultOutputDescription.componentFlagsMask = 0;
|
||||
AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
|
||||
|
||||
// create our instance
|
||||
err = AudioComponentInstanceNew(defaultOutput, &audioInstance);
|
||||
if (err != noErr) {
|
||||
audioInstance = nil;
|
||||
return;
|
||||
}
|
||||
|
||||
// create our callback so we can give it the audio data
|
||||
AURenderCallbackStruct input;
|
||||
input.inputProc = iOSCoreAudioCallback;
|
||||
input.inputProcRefCon = NULL;
|
||||
err = AudioUnitSetProperty(audioInstance,
|
||||
kAudioUnitProperty_SetRenderCallback,
|
||||
kAudioUnitScope_Input,
|
||||
0,
|
||||
&input,
|
||||
sizeof(input));
|
||||
if (err != noErr) {
|
||||
AudioComponentInstanceDispose(audioInstance);
|
||||
audioInstance = nil;
|
||||
return;
|
||||
}
|
||||
|
||||
// setup the audio format we'll be using (stereo pcm)
|
||||
AudioStreamBasicDescription streamFormat;
|
||||
memset(&streamFormat, 0, sizeof(streamFormat));
|
||||
streamFormat.mSampleRate = SAMPLE_RATE;
|
||||
streamFormat.mFormatID = kAudioFormatLinearPCM;
|
||||
streamFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
|
||||
streamFormat.mBitsPerChannel = sizeof(AudioSampleType) * 8;
|
||||
streamFormat.mChannelsPerFrame = 2;
|
||||
streamFormat.mFramesPerPacket = 1;
|
||||
streamFormat.mBytesPerFrame = (streamFormat.mBitsPerChannel / 8) * streamFormat.mChannelsPerFrame;
|
||||
streamFormat.mBytesPerPacket = streamFormat.mBytesPerFrame * streamFormat.mFramesPerPacket;
|
||||
err = AudioUnitSetProperty(audioInstance,
|
||||
kAudioUnitProperty_StreamFormat,
|
||||
kAudioUnitScope_Input,
|
||||
0,
|
||||
&streamFormat,
|
||||
sizeof(AudioStreamBasicDescription));
|
||||
if (err != noErr) {
|
||||
AudioComponentInstanceDispose(audioInstance);
|
||||
audioInstance = nil;
|
||||
return;
|
||||
}
|
||||
|
||||
// k, all setup, so init
|
||||
err = AudioUnitInitialize(audioInstance);
|
||||
if (err != noErr) {
|
||||
AudioComponentInstanceDispose(audioInstance);
|
||||
audioInstance = nil;
|
||||
return;
|
||||
}
|
||||
|
||||
// finally start playback
|
||||
err = AudioOutputUnitStart(audioInstance);
|
||||
if (err != noErr) {
|
||||
AudioUnitUninitialize(audioInstance);
|
||||
AudioComponentInstanceDispose(audioInstance);
|
||||
audioInstance = nil;
|
||||
return;
|
||||
}
|
||||
|
||||
// we're good to go
|
||||
}
|
||||
}
|
||||
|
||||
void iOSCoreAudioShutdown()
|
||||
{
|
||||
if (audioInstance) {
|
||||
AudioOutputUnitStop(audioInstance);
|
||||
AudioUnitUninitialize(audioInstance);
|
||||
AudioComponentInstanceDispose(audioInstance);
|
||||
audioInstance = nil;
|
||||
}
|
||||
}
|
22
ios/iOSCoreAudio.h
Normal file
22
ios/iOSCoreAudio.h
Normal file
|
@ -0,0 +1,22 @@
|
|||
// Copyright (c) 2012- PPSSPP Project.
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, version 2.0 or later versions.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License 2.0 for more details.
|
||||
|
||||
// A copy of the GPL 2.0 should have been included with the program.
|
||||
// If not, see http://www.gnu.org/licenses/
|
||||
|
||||
// Official git repository and contact information can be found at
|
||||
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
|
||||
|
||||
// This code implements the emulated audio using CoreAudio for iOS
|
||||
// Originally written by jtraynham
|
||||
|
||||
void iOSCoreAudioInit();
|
||||
void iOSCoreAudioShutdown();
|
2
native
2
native
|
@ -1 +1 @@
|
|||
Subproject commit deb4ba83235a86491d073db73a1e5bab2a5ce523
|
||||
Subproject commit 80c85b1c604ccead734d595dd7f67656f3fd9d85
|
Loading…
Add table
Reference in a new issue