Currently I am using pat of Apple's script "iPhoneMixerEQGraphTest" to playback audio in my app, however they do not "buffer" they send all the data to the buffer at once which I believe is causing my app to overload. I get a memory of over 100 MB and then a crash. The memory is usually less than 20MB. How would I be able to call on the audio read to only buffer part of the audio and not the whole thing. The crash happens on the callback : memcpy(out, &in[sample], ioData->mBuffers[0].mDataByteSize);
.
I load my audio with this method:
- (void)setPlayerItem:(PlayerItem *)item{
mUserData.frameNum = 0;
mUserData.maxNumFrames = 0;
for (int i = 0; i < 1 && i < MAXBUFS; i++) {
printf("loadFiles, %d\n", i);
ExtAudioFileRef xafref = 0;
OSStatus result = ExtAudioFileOpenURL((__bridge CFURLRef)[item url], &xafref);
if (result || 0 == xafref) { printf("ExtAudioFileOpenURL result %ld %08X %4.4s\n", result, (unsigned int)result, (char*)&result); return; }
// get the file data format, this represents the file's actual data format
// for informational purposes only -- the client format set on ExtAudioFile is what we really want back
CAStreamBasicDescription fileFormat;
UInt32 propSize = sizeof(fileFormat);
result = ExtAudioFileGetProperty(xafref, kExtAudioFileProperty_FileDataFormat, &propSize, &fileFormat);
if (result) { printf("ExtAudioFileGetProperty kExtAudioFileProperty_FileDataFormat result %ld %08X %4.4s\n", result, (unsigned int)result, (char*)&result); return; }
printf("file %d, native file format\n", i);
fileFormat.Print();
// set the client format to be what we want back
// this is the same format audio we're giving to the the mixer input
result = ExtAudioFileSetProperty(xafref, kExtAudioFileProperty_ClientDataFormat, sizeof(mClientFormat), &mClientFormat);
if (result) { printf("ExtAudioFileSetProperty kExtAudioFileProperty_ClientDataFormat %ld %08X %4.4s\n", result, (unsigned int)result, (char*)&result); return; }
// get the file's length in sample frames
UInt64 numFrames = 0;
propSize = sizeof(numFrames);
result = ExtAudioFileGetProperty(xafref, kExtAudioFileProperty_FileLengthFrames, &propSize, &numFrames);
if (result || numFrames == 0) { printf("ExtAudioFileGetProperty kExtAudioFileProperty_FileLengthFrames result %ld %08X %4.4s\n", result, (unsigned int)result, (char*)&result); return; }
// keep track of the largest number of source frames
if (numFrames > mUserData.maxNumFrames) mUserData.maxNumFrames = numFrames;
// set up our buffer
mUserData.soundBuffer[i].numFrames = numFrames;
mUserData.soundBuffer[i].asbd = mClientFormat;
UInt32 samples = numFrames * mUserData.soundBuffer[i].asbd.mChannelsPerFrame;
mUserData.soundBuffer[i].data = (AudioSampleType *)calloc(samples, sizeof(AudioSampleType));
// set up a AudioBufferList to read data into
AudioBufferList bufList;
bufList.mNumberBuffers = 1;
bufList.mBuffers[0].mNumberChannels = mUserData.soundBuffer[i].asbd.mChannelsPerFrame;
bufList.mBuffers[0].mData = mUserData.soundBuffer[i].data;
bufList.mBuffers[0].mDataByteSize = samples * sizeof(AudioSampleType);
// perform a synchronous sequential read of the audio data out of the file into our allocated data buffer
UInt32 numPackets = numFrames;
result = ExtAudioFileRead(xafref, &numPackets, &bufList);
if (result) {
printf("ExtAudioFileRead result %ld %08X %4.4s\n", result, (unsigned int)result, (char*)&result);
free(mUserData.soundBuffer[i].data);
mUserData.soundBuffer[i].data = 0;
return;
}
// close the file and dispose the ExtAudioFileRef
ExtAudioFileDispose(xafref);
}
}
It gets played with:
// create a new AUGraph
result = NewAUGraph(&mGraph);
if (result) { printf("NewAUGraph result %ld %08X %4.4s\n", result, (unsigned int)result, (char*)&result); return; }
// create three Audio Component Descriptons for the AUs we want in the graph using the CAComponentDescription helper class
// output unit
CAComponentDescription output_desc(kAudioUnitType_Output, kAudioUnitSubType_RemoteIO, kAudioUnitManufacturer_Apple);
// iPodEQ unit
CAComponentDescription eq_desc(kAudioUnitType_Effect, kAudioUnitSubType_AUiPodEQ, kAudioUnitManufacturer_Apple);
// multichannel mixer unit
CAComponentDescription mixer_desc(kAudioUnitType_Mixer, kAudioUnitSubType_MultiChannelMixer, kAudioUnitManufacturer_Apple);
printf("add nodes\n");
// create a node in the graph that is an AudioUnit, using the supplied AudioComponentDescription to find and open that unit
result = AUGraphAddNode(mGraph, &output_desc, &outputNode);
if (result) { printf("AUGraphNewNode 1 result %lu %4.4s\n", result, (char*)&result); return; }
result = AUGraphAddNode(mGraph, &eq_desc, &eqNode);
if (result) { printf("AUGraphNewNode 2 result %lu %4.4s\n", result, (char*)&result); return; }
result = AUGraphAddNode(mGraph, &mixer_desc, &mixerNode);
if (result) { printf("AUGraphNewNode 3 result %lu %4.4s\n", result, (char*)&result); return; }
// connect a node's output to a node's input
// mixer -> eq -> output
result = AUGraphConnectNodeInput(mGraph, mixerNode, 0, eqNode, 0);
if (result) { printf("AUGraphConnectNodeInput result %lu %4.4s\n", result, (char*)&result); return; }
result = AUGraphConnectNodeInput(mGraph, eqNode, 0, outputNode, 0);
if (result) { printf("AUGraphConnectNodeInput result %lu %4.4s\n", result, (char*)&result); return; }
// open the graph AudioUnits are open but not initialized (no resource allocation occurs here)
result = AUGraphOpen(mGraph);
if (result) { printf("AUGraphOpen result %ld %08X %4.4s\n", result, (unsigned int)result, (char*)&result); return; }
// grab the audio unit instances from the nodes
result = AUGraphNodeInfo(mGraph, mixerNode, NULL, &mMixer);
if (result) { printf("AUGraphNodeInfo result %ld %08X %4.4s\n", result, (unsigned int)result, (char*)&result); return; }
result = AUGraphNodeInfo(mGraph, eqNode, NULL, &mEQ);
if (result) { printf("AUGraphNodeInfo result %ld %08X %4.4s\n", result, (unsigned int)result, (char*)&result); return; }
// set bus count
UInt32 numbuses = 2;
printf("set input bus count %lu\n", numbuses);
result = AudioUnitSetProperty(mMixer, kAudioUnitProperty_ElementCount, kAudioUnitScope_Input, 0, &numbuses, sizeof(numbuses));
if (result) { printf("AudioUnitSetProperty result %ld %08X %4.4s\n", result, (unsigned int)result, (char*)&result); return; }
for (UInt32 i = 0; i < numbuses; ++i) {
// setup render callback struct
AURenderCallbackStruct rcbs;
rcbs.inputProc = &renderInput;
rcbs.inputProcRefCon = &mUserData;
printf("set AUGraphSetNodeInputCallback\n");
// set a callback for the specified node's specified input
result = AUGraphSetNodeInputCallback(mGraph, mixerNode, i, &rcbs);
if (result) { printf("AUGraphSetNodeInputCallback result %ld %08X %4.4s\n", result, (unsigned int)result, (char*)&result); return; }
printf("set input bus %d, client kAudioUnitProperty_StreamFormat\n", (unsigned int)i);
// set the input stream format, this is the format of the audio for mixer input
result = AudioUnitSetProperty(mMixer, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, i, &mClientFormat, sizeof(mClientFormat));
if (result) { printf("AudioUnitSetProperty result %ld %08X %4.4s\n", result, (unsigned int)result, (char*)&result); return; }
}
printf("get EQ kAudioUnitProperty_FactoryPresets\n");
// get the eq's factory preset list -- this is a read-only CFArray array of AUPreset structures
// host owns the retuned array and should release it when no longer needed
UInt32 size = sizeof(mEQPresetsArray);
result = AudioUnitGetProperty(mEQ, kAudioUnitProperty_FactoryPresets, kAudioUnitScope_Global, 0, &mEQPresetsArray, &size);
if (result) { printf("AudioUnitGetProperty result %ld %08X %4.4s\n", result, (unsigned int)result, (char*)&result); return; }
/* this code can be used if you're interested in dumping out the preset list
printf("iPodEQ Factory Preset List:\n");
UInt8 count = CFArrayGetCount(mEQPresetsArray);
for (int i = 0; i < count; ++i) {
AUPreset *aPreset = (AUPreset*)CFArrayGetValueAtIndex(mEQPresetsArray, i);
CFShow(aPreset->presetName);
}*/
printf("set output kAudioUnitProperty_StreamFormat\n");
// set the output stream format of the mixer
result = AudioUnitSetProperty(mMixer, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &mOutputFormat, sizeof(mOutputFormat));
if (result) { printf("AudioUnitSetProperty result %ld %08X %4.4s\n", result, (unsigned int)result, (char*)&result); return; }
printf("set render notification\n");
// add a render notification, this is a callback that the graph will call every time the graph renders
// the callback will be called once before the graph’s render operation, and once after the render operation is complete
result = AUGraphAddRenderNotify(mGraph, renderNotification, &mUserData);
if (result) { printf("AUGraphAddRenderNotify result %ld %08X %4.4s\n", result, (unsigned int)result, (char*)&result); return; }
printf("AUGraphInitialize\n");
// now that we've set everything up we can initialize the graph, this will also validate the connections
result = AUGraphInitialize(mGraph);
if (result) { printf("AUGraphInitialize result %ld %08X %4.4s\n", result, (unsigned int)result, (char*)&result); return; }
CAShow(mGraph);
OSStatus result = AUGraphStart(mGraph);
if (result) { printf("AUGraphStart result %ld %08X %4.4s\n", result, (unsigned int)result, (char*)&result); return; }
mIsPlaying = true;
Finally I receive a callback from:
static OSStatus renderInput(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData)
{
SourceAudioBufferDataPtr userData = (SourceAudioBufferDataPtr)inRefCon;
AudioSampleType *in = userData->soundBuffer[inBusNumber].data;
AudioSampleType *out = (AudioSampleType *)ioData->mBuffers[0].mData;
UInt32 sample = userData->frameNum * userData->soundBuffer[inBusNumber].asbd.mChannelsPerFrame;
// make sure we don't attempt to render more data than we have available in the source buffers
// if one buffer is larger than the other, just render silence for that bus until we loop around again
if ((userData->frameNum + inNumberFrames) > userData->soundBuffer[inBusNumber].numFrames) {
UInt32 offset = (userData->frameNum + inNumberFrames) - userData->soundBuffer[inBusNumber].numFrames;
if (offset < inNumberFrames) {
// copy the last bit of source
SilenceData(ioData);
memcpy(out, &in[sample], ((inNumberFrames - offset) * userData->soundBuffer[inBusNumber].asbd.mBytesPerFrame));
return noErr;
} else {
// we have no source data
SilenceData(ioData);
*ioActionFlags |= kAudioUnitRenderAction_OutputIsSilence;
return noErr;
}
}
memcpy(out, &in[sample], ioData->mBuffers[0].mDataByteSize);
//printf("render input bus %ld from sample %ld\n", inBusNumber, sample);
return noErr;
}
Don't read the whole audio file at once. Instead, read only a few seconds at a time into a circular buffer or fifo, and keep filling the buffer from the file (in another thread) to roughly keep up with the rate that the audio render callback is emptying it. The memory required for the buffer can be quite small (maybe a few seconds worth, or even less, to be safe).
Another alternative is to memory map the entire file (mmap), which doesn't dirty memory, and thus doesn't count against an apps memory allowance (until it gets close to the gigabyte range).
The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.