![](/img/trans.png)
[英]Calling AudioUnitInitialize() when no audio hardware is available
[英]AudioUnitInitialize throws error while initializing AudioComponentInstance
我使用下面的代碼來初始化我的音頻組件。
-(void) startListeningWithCoreAudio
{
NSError *error = nil;
[[AVAudioSession sharedInstance] setCategory: AVAudioSessionCategoryPlayAndRecord error:&error];
if (error)
NSLog(@"error setting up audio session: %@", [error localizedDescription]);
[[AVAudioSession sharedInstance] setDelegate:self];
OSStatus status = AudioSessionSetActive(YES);
checkStatus(status);
// Find the apple mic
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
AudioComponent inputComponent = AudioComponentFindNext( NULL, &desc );
status = AudioComponentInstanceNew( inputComponent, &kAudioUnit );
checkStatus( status );
// enable mic output as our input
UInt32 flag = 1;
status = AudioUnitSetProperty( kAudioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag) );
checkStatus(status);
// Define mic output audio format
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 16000.0;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
status = AudioUnitSetProperty( kAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &audioFormat, sizeof(audioFormat) );
checkStatus(status);
// Define our callback methods
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty( kAudioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, kInputBus, &callbackStruct, sizeof(callbackStruct) );
checkStatus(status);
// By pass voice processing
UInt32 audiobypassProcessing = [[NSUserDefaults standardUserDefaults] boolForKey:VOICE_BY_PASS_PROCESSING];
status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_BypassVoiceProcessing,
kAudioUnitScope_Global, kInputBus, &audiobypassProcessing, sizeof(audiobypassProcessing));
checkStatus(status);
// Automatic Gain Control
UInt32 audioAGC = [[NSUserDefaults standardUserDefaults]boolForKey:VOICE_AGC];
status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_VoiceProcessingEnableAGC,
kAudioUnitScope_Global, kInputBus, &audioAGC, sizeof(audioAGC));
checkStatus(status);
//Non Audio Voice Ducking
UInt32 audioDucking = [[NSUserDefaults standardUserDefaults]boolForKey:VOICE_DUCKING];
status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_DuckNonVoiceAudio,
kAudioUnitScope_Global, kInputBus, &audioDucking, sizeof(audioDucking));
checkStatus(status);
//Audio Quality
UInt32 quality = [[NSUserDefaults standardUserDefaults]integerForKey:VOICE_QUALITY];
status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_VoiceProcessingQuality,
kAudioUnitScope_Global, kInputBus, &quality, sizeof(quality));
checkStatus(status);
status = AudioUnitInitialize(kAudioUnit);
checkStatus(status);
status = AudioOutputUnitStart( kAudioUnit );
checkStatus(status);
UInt32 audioRoute = (UInt32)kAudioSessionOverrideAudioRoute_Speaker;
status = AudioSessionSetProperty(kAudioSessionProperty_OverrideAudioRoute, sizeof (audioRoute), &audioRoute);
checkStatus(status);
}
-(void) stopListeningWithCoreAudio
{
OSStatus status = AudioUnitUninitialize( kAudioUnit );
checkStatus(status);
status = AudioOutputUnitStop( kAudioUnit );
checkStatus( status );
// if(kAudioUnit)
// {
// status = AudioComponentInstanceDispose(kAudioUnit);
// checkStatus(status);
// kAudioUnit = nil;
// }
status = AudioSessionSetActive(NO);
checkStatus(status);
NSError *error = nil;
[[AVAudioSession sharedInstance] setCategory: AVAudioSessionCategorySoloAmbient error:&error];
if (error)
NSLog(@"error setting up audio session: %@", [error localizedDescription]);
}
它第一次工作正常。 我的意思是按下按鈕事件調用startListeningWithCoreAudio
。 它可以很好地記錄/處理音頻。 在其他事件中,我調用stopListeningWithCoreAudio
來停止錄制/處理音頻。
當我再次嘗試調用函數startListeningWithCoreAudio
時,問題就出現了。 它會拋出兩個函數的錯誤。 AudioUnitInitialize
和AudioOutputUnitStart
這是從名為startListeningWithCoreAudio
。
有誰可以幫我解決問題是什么?
我找到了解決方案。 如果我們在背后調用下面的函數,則會產生問題。
extern OSStatus AudioUnitUninitialize(AudioUnit inUnit)
extern OSStatus AudioComponentInstanceDispose(AudioComponentInstance inInstance)
所以,我通過以下方式在主線程上調用dispose方法。
[self performSelectorOnMainThread:@selector(disposeCoreAudio) withObject:nil waitUntilDone:NO];
-(void) disposeCoreAudio
{
OSStatus status = AudioComponentInstanceDispose(kAudioUnit);
kAudioUnit = nil;
}
它解決了這個問題。 因此,正確的順序是停止錄制,取消初始化錄制器並在主線程上配置錄制器。
一個可能的問題是您的代碼在停止之前嘗試取消初始化正在運行的音頻單元。
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.