簡體   English   中英

在 iOS 中播放原始 PCM 數據緩沖區時音頻失真

[英]Distorted audio when playing raw PCM data buffer in iOS

我正在嘗試使用 iOS 中的音頻單元播放音頻緩沖區。 音頻緩沖區來自 C 庫,該庫從 Playstation 4 接收音頻並使用 Opus 對其進行解碼。 音頻緩沖區的格式是 PCM int16_t。

使用 Audio Unit 和 TPCircularBuffer,我已經完成了播放聲音。 然而,它嚴重扭曲且不干凈。

這是我的音頻單元 class 的設置

init() {
    _TPCircularBufferInit(&buffer, 960
                          , MemoryLayout<TPCircularBuffer>.size)
    setupAudio()
}

func play(data: NSMutableData) {
    TPCircularBufferProduceBytes(&buffer, data.bytes, UInt32(data.length))
}

private func setupAudio() {
    do {
        try AVAudioSession.sharedInstance().setActive(true, options: [])
    } catch { }
    
    var audioComponentDesc = AudioComponentDescription(componentType: OSType(kAudioUnitType_Output),
                                                       componentSubType: OSType(kAudioUnitSubType_RemoteIO),
                                                       componentManufacturer: OSType(kAudioUnitManufacturer_Apple),
                                                       componentFlags: 0, componentFlagsMask: 0)
    let inputComponent = AudioComponentFindNext(nil, &audioComponentDesc)
    status = AudioComponentInstanceNew(inputComponent!, &audioUnit)
    
    if status != noErr {
        print("Audio Component Instance New Error \(status.debugDescription)")
    }
    
    var audioDescription = AudioStreamBasicDescription()
    audioDescription.mSampleRate = 48000
    audioDescription.mFormatID = kAudioFormatLinearPCM
    audioDescription.mFormatFlags = kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger
    audioDescription.mChannelsPerFrame = 2
    audioDescription.mFramesPerPacket = 1
    audioDescription.mBitsPerChannel = 16
    audioDescription.mBytesPerFrame = (audioDescription.mBitsPerChannel / 8) * audioDescription.mChannelsPerFrame
    audioDescription.mBytesPerPacket = audioDescription.mBytesPerFrame * audioDescription.mFramesPerPacket
    audioDescription.mReserved = 0
    status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat,
                                  kAudioUnitScope_Input, 0, &audioDescription,
                                  UInt32(MemoryLayout<AudioStreamBasicDescription>.size))
    
    if status != noErr {
        print("Enable IO for playback error \(status.debugDescription)")
    }
    
    var flag: UInt32 = 0
    status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO,
                                  kAudioUnitScope_Input, 1, &flag,
                                  UInt32(MemoryLayout.size(ofValue: flag)))
    
    if status != noErr {
        print("Enable IO for playback error \(status.debugDescription)")
    }
    
    var outputCallbackStruct = AURenderCallbackStruct()
    outputCallbackStruct.inputProc = performPlayback
    outputCallbackStruct.inputProcRefCon = UnsafeMutableRawPointer(Unmanaged.passUnretained(self).toOpaque())
    status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_SetRenderCallback,
                                  kAudioUnitScope_Global, 0, &outputCallbackStruct,
                                  UInt32(MemoryLayout<AURenderCallbackStruct>.size))
    status = AudioUnitInitialize(audioUnit)
    if status != noErr {
        print("Failed to initialize audio unit \(status!)")
    }
    status = AudioOutputUnitStart(audioUnit)
    if status != noErr {
        print("Failed to initialize output unit \(status!)")
    }
}

播放 function

private func performPlayback(clientData: UnsafeMutableRawPointer,_ ioActionFlags: UnsafeMutablePointer<AudioUnitRenderActionFlags>, inTimeStamp: UnsafePointer<AudioTimeStamp>,
                         inBufNumber: UInt32, inNumberFrames: UInt32, ioData: UnsafeMutablePointer<AudioBufferList>?) -> OSStatus {
let player = Unmanaged<AudioPlayer>.fromOpaque(UnsafeRawPointer(clientData)!).takeUnretainedValue()
let buffer = ioData![0].mBuffers
let bytesToCopy = ioData![0].mBuffers.mDataByteSize
var bufferTail: UnsafeMutableRawPointer?
var availableBytes: UInt32 = 0
bufferTail = TPCircularBufferTail(&player.buffer, &availableBytes)
let bytesToWrite = min(bytesToCopy, availableBytes)
memcpy(buffer.mData, bufferTail, Int(bytesToWrite))
TPCircularBufferConsume(&player.buffer, bytesToWrite)
return noErr}

這里是我把戲稱為 function

private func StreamAudioFrameCallback(buffers: UnsafeMutablePointer<Int16>?,
                                  samplesCount: Int, user: UnsafeMutableRawPointer?) {
let decodedData = NSMutableData()
if let buffer = buffers, samplesCount > 0 {
    let decodedDataSize = samplesCount * MemoryLayout<opus_int16>.size
    decodedData.append(buffer, length: decodedDataSize)
    AudioPlayer.shared.play(data: decodedData)
}

有人熟悉這個嗎? 任何幫助表示贊賞。

您可能不想開始播放,直到循環緩沖區中的內容超過了覆蓋傳入數據速率中的最大時間抖動所需的數量。 嘗試等到循環緩沖區中有半秒鍾的音頻,然后再開始播放。 然后試驗填充量。

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM