繁体   English   中英

使用ExtAudioFileWrite for iOS将音频样本缓冲区写入aac文件

[英]Writing buffer of audio samples to aac file using ExtAudioFileWrite for iOS

更新:我已经想出来并发布我的解决方案作为我自己的问题的答案(下面)

我正在尝试使用AAC格式的ExtAudioFileWrite将一个简单的音频样本缓冲区写入文件。

我已经使用下面的代码实现了这一点,将单声道缓冲区写入.wav文件 - 但是,我无法为立体声或AAC文件执行此操作,这是我想要做的。

这是我到目前为止所拥有的......

CFStringRef fPath;
fPath = CFStringCreateWithCString(kCFAllocatorDefault,
                                  "/path/to/my/audiofile/audiofile.wav",
                                  kCFStringEncodingMacRoman);


OSStatus err;

int mChannels = 1;
UInt32 totalFramesInFile = 100000;

Float32 *outputBuffer = (Float32 *)malloc(sizeof(Float32) * (totalFramesInFile*mChannels)); 


////////////// Set up Audio Buffer List ////////////

AudioBufferList outputData; 
outputData.mNumberBuffers = 1;
outputData.mBuffers[0].mNumberChannels = mChannels; 
outputData.mBuffers[0].mDataByteSize = 4 * totalFramesInFile * mChannels;
outputData.mBuffers[0].mData = outputBuffer;

Float32 audioFile[totalFramesInFile*mChannels];


for (int i = 0;i < totalFramesInFile*mChannels;i++)
{
    audioFile[i] = ((Float32)(rand() % 100))/100.0;
    audioFile[i] = audioFile[i]*0.2;
}

outputData.mBuffers[0].mData = &audioFile;


CFURLRef fileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault,fPath,kCFURLPOSIXPathStyle,false);

ExtAudioFileRef audiofileRef;

// WAVE FILES

AudioFileTypeID fileType = kAudioFileWAVEType;
AudioStreamBasicDescription clientFormat;
clientFormat.mSampleRate = 44100.0;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mFormatFlags = 12;
clientFormat.mBitsPerChannel = 16;
clientFormat.mChannelsPerFrame = mChannels;
clientFormat.mBytesPerFrame = 2*clientFormat.mChannelsPerFrame;
clientFormat.mFramesPerPacket = 1;
clientFormat.mBytesPerPacket = 2*clientFormat.mChannelsPerFrame;


// open the file for writing
err = ExtAudioFileCreateWithURL((CFURLRef)fileURL, fileType, &clientFormat, NULL, kAudioFileFlags_EraseFile, &audiofileRef);

if (err != noErr)
{
    cout << "Problem when creating audio file: " << err << "\n";
}

// tell the ExtAudioFile API what format we'll be sending samples in
err = ExtAudioFileSetProperty(audiofileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat);

if (err != noErr)
{
    cout << "Problem setting audio format: " << err << "\n";
}

UInt32 rFrames = (UInt32)totalFramesInFile;
// write the data
err = ExtAudioFileWrite(audiofileRef, rFrames, &outputData);

if (err != noErr)
{
    cout << "Problem writing audio file: " << err << "\n";
}

// close the file
ExtAudioFileDispose(audiofileRef);



NSLog(@"Done!");

我的具体问题是:

  • 如何为AAC设置AudioStreamBasicDescription?
  • 为什么我不能让立体声在这里正常工作? 如果我将通道数('mChannels')设置为2,那么我会正确地得到左声道并且右声道失真。

我非常感谢任何帮助 - 我想我已经阅读了几乎我能找到的每一页并且没有更明智的,虽然有类似的问题,但他们通常从某些输入音频文件中获取AudioStreamBasicDescription参数,我看不出结果。 Apple文档也没有任何帮助。

提前谢谢了,

亚当

好吧,经过一番探索,我已经弄明白了。 我把它包装成一个将随机噪声写入文件的函数。 具体来说,它可以:

  • 写.wav或.m4a文件
  • 以任一格式写单声道或立体声
  • 将文件写入指定的路径

函数参数是:

  • 要创建的音频文件的路径
  • 频道数量(最多2个)
  • boolean:用m4a压缩(如果为false,则使用pcm)

对于立体声M4A文件,该函数应调用为:

writeNoiseToAudioFile("/path/to/my/audiofile.m4a",2,true);

该函数的来源如下。 我试图尽可能地评论它 - 我希望它是正确的,它肯定对我有用,但请说“亚当,你做错了”,如果有什么我错过了。 祝好运! 这是代码:

void writeNoiseToAudioFile(char *fName,int mChannels,bool compress_with_m4a)
{
OSStatus err; // to record errors from ExtAudioFile API functions

// create file path as CStringRef
CFStringRef fPath;
fPath = CFStringCreateWithCString(kCFAllocatorDefault,
                                  fName,
                                  kCFStringEncodingMacRoman);


// specify total number of samples per channel
UInt32 totalFramesInFile = 100000;      

/////////////////////////////////////////////////////////////////////////////
////////////// Set up Audio Buffer List For Interleaved Audio ///////////////
/////////////////////////////////////////////////////////////////////////////

AudioBufferList outputData; 
outputData.mNumberBuffers = 1;
outputData.mBuffers[0].mNumberChannels = mChannels;    
outputData.mBuffers[0].mDataByteSize = sizeof(AudioUnitSampleType)*totalFramesInFile*mChannels;



/////////////////////////////////////////////////////////////////////////////
//////// Synthesise Noise and Put It In The AudioBufferList /////////////////
/////////////////////////////////////////////////////////////////////////////

// create an array to hold our audio
AudioUnitSampleType audioFile[totalFramesInFile*mChannels];

// fill the array with random numbers (white noise)
for (int i = 0;i < totalFramesInFile*mChannels;i++)
{
    audioFile[i] = ((AudioUnitSampleType)(rand() % 100))/100.0;
    audioFile[i] = audioFile[i]*0.2;
    // (yes, I know this noise has a DC offset, bad)
}

// set the AudioBuffer to point to the array containing the noise
outputData.mBuffers[0].mData = &audioFile;


/////////////////////////////////////////////////////////////////////////////
////////////////// Specify The Output Audio File Format /////////////////////
/////////////////////////////////////////////////////////////////////////////


// the client format will describe the output audio file
AudioStreamBasicDescription clientFormat;

// the file type identifier tells the ExtAudioFile API what kind of file we want created
AudioFileTypeID fileType;

// if compress_with_m4a is tru then set up for m4a file format
if (compress_with_m4a)
{
    // the file type identifier tells the ExtAudioFile API what kind of file we want created
    // this creates a m4a file type
    fileType = kAudioFileM4AType;

    // Here we specify the M4A format
    clientFormat.mSampleRate         = 44100.0;
    clientFormat.mFormatID           = kAudioFormatMPEG4AAC;
    clientFormat.mFormatFlags        = kMPEG4Object_AAC_Main;
    clientFormat.mChannelsPerFrame   = mChannels;
    clientFormat.mBytesPerPacket     = 0;
    clientFormat.mBytesPerFrame      = 0;
    clientFormat.mFramesPerPacket    = 1024;
    clientFormat.mBitsPerChannel     = 0;
    clientFormat.mReserved           = 0;
}
else // else encode as PCM
{
    // this creates a wav file type
    fileType = kAudioFileWAVEType;

    // This function audiomatically generates the audio format according to certain arguments
    FillOutASBDForLPCM(clientFormat,44100.0,mChannels,32,32,true,false,false);
}



/////////////////////////////////////////////////////////////////////////////
///////////////// Specify The Format of Our Audio Samples ///////////////////
/////////////////////////////////////////////////////////////////////////////

// the local format describes the format the samples we will give to the ExtAudioFile API
AudioStreamBasicDescription localFormat;
FillOutASBDForLPCM (localFormat,44100.0,mChannels,32,32,true,false,false);



/////////////////////////////////////////////////////////////////////////////
///////////////// Create the Audio File and Open It /////////////////////////
/////////////////////////////////////////////////////////////////////////////

// create the audio file reference
ExtAudioFileRef audiofileRef;

// create a fileURL from our path
CFURLRef fileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault,fPath,kCFURLPOSIXPathStyle,false);

// open the file for writing
err = ExtAudioFileCreateWithURL((CFURLRef)fileURL, fileType, &clientFormat, NULL, kAudioFileFlags_EraseFile, &audiofileRef);

if (err != noErr)
{
    cout << "Problem when creating audio file: " << err << "\n";
}


/////////////////////////////////////////////////////////////////////////////
///// Tell the ExtAudioFile API what format we'll be sending samples in /////
/////////////////////////////////////////////////////////////////////////////

// Tell the ExtAudioFile API what format we'll be sending samples in 
err = ExtAudioFileSetProperty(audiofileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(localFormat), &localFormat);

if (err != noErr)
{
    cout << "Problem setting audio format: " << err << "\n";
}

/////////////////////////////////////////////////////////////////////////////
///////// Write the Contents of the AudioBufferList to the AudioFile ////////
/////////////////////////////////////////////////////////////////////////////

UInt32 rFrames = (UInt32)totalFramesInFile;
// write the data
err = ExtAudioFileWrite(audiofileRef, rFrames, &outputData);

if (err != noErr)
{
    cout << "Problem writing audio file: " << err << "\n";
}


/////////////////////////////////////////////////////////////////////////////
////////////// Close the Audio File and Get Rid Of The Reference ////////////
/////////////////////////////////////////////////////////////////////////////

// close the file
ExtAudioFileDispose(audiofileRef);


NSLog(@"Done!");
}

不要忘记导入AudioToolbox Framework并包含头文件:

#import <AudioToolbox/AudioToolbox.h>

暂无
暂无

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM