簡體   English   中英

端口Audio和Libsndfile沒有數據?

[英]Port Audio and Libsndfile no data?

我可能會做一些愚蠢的事情,但是當我在正在開發的聲音系統中使用libsndfile時,我沒有得到任何形式的數據。 sf_read_floatf返回大於零,但查看緩沖區本身,它只是將數據清零。 我也使用libsamplerate,但目前我要測試的音頻文件和端口音頻的采樣率相同。

端口音頻構造函數:

PortAudioSystem::PortAudioSystem(double sampleRate, PaDeviceIndex device, void * hostApiSpecificStreamInfo) {
    this->m_masterVol = this->m_musicVol = this->m_sfxVol = 1.0f;
    this->m_deltaTime = 0.0f;
    this->audioStream = nullptr;
    this->_hasPaError = false;

    this->m_sampleRate = sampleRate;

    PaStreamParameters streamParams;
    streamParams.device = device; //set device to use
    streamParams.hostApiSpecificStreamInfo = hostApiSpecificStreamInfo;
    streamParams.sampleFormat = paFloat32; // 32bit float format
    streamParams.suggestedLatency = 0.2; //200 ms ought to satisfy even the worst sound card
    streamParams.channelCount = 2; //number of channels (1: mono, 2: left/right, etc)

    int err = 0;

    err = Pa_OpenStream(
        &this->audioStream,
        0, // no input
        &streamParams,
        sampleRate,
        paFramesPerBufferUnspecified, // let portaudio choose the buffersize
        paNoFlag,/* no special modes (clip off, dither off) */
        PortAudioSystem::paCallbackCommon,
        this
    );

    if (err != paNoError) {
        pushPaError(err, Pa_GetErrorText(err));
        this->audioStream = nullptr;
    }

    int src_err;
    this->m_srcState = src_new(SRC_SINC_FASTEST,2,&src_err); //create a new sample rate converter

    if (this->m_srcState == NULL) {
        //src_error(this->m_srcState);
        this->pushPaError(src_err, src_strerror(src_err));
    }

    this->m_nextPlayID = 0;
}

端口音頻回調:

int PortAudioSystem::paCallback(const void *inputBuffer, void *outputBuffer, unsigned long framesPerBuffer, const PaStreamCallbackTimeInfo* timeInfo, PaStreamCallbackFlags statusFlags, void *userData) {
    this->m_deltaTime = (float)((double)framesPerBuffer / this->m_sampleRate); //get delta time based on sample rate and number of frames per buffer
    if (this->m_playingAudioFiles.size() == 0) return paContinue; //if we dont have any playing audio files, skip.

    AudioFrame::_2 *outFrame = (AudioFrame::_2*)outputBuffer; //convert the port audio buffer to an audio frame buffer.

    //zero out the output buffer.
    for (unsigned long zeroI = 0; zeroI < framesPerBuffer; zeroI++) {
        outFrame[zeroI].left = 0.0f;
        outFrame[zeroI].right = 0.0f;
    }

    //for (PlayingAudioFile playingfile : this->m_playingAudioFiles) playingfile.audioFile->Seek(playingfile.currentFrame); //seek to the current position of the file

    AudioFrame::_2 *framesOut = new AudioFrame::_2[framesPerBuffer]; //create a buffer for frames out.
    //float * fFramesOut = new float[framesPerBuffer * 2]; //create a float buffer for frames out (*2 for left and right channel)

    //prep common data for sample rate conversion.
    SRC_DATA src_data;
    src_data.output_frames = framesPerBuffer; //set output frames to be the max output frames to have.
    src_data.end_of_input = 0; //we are not at the end of the file (change for specific audio file system [function?]).
    src_data.data_out = (float *)framesOut; //set our output frames to be the buffer that we created.

    flow.lock(); //mutex lock
    for (std::pair<long, PlayingAudioFile> entry : this->m_playingAudioFiles){ //for each playing audio file
        if (entry.second.paused) continue; //if the audio file is paused, skip it.

        PlayingAudioFile& playingfile = entry.second;
        playingfile.audioFile->Seek(playingfile.currentFrame); //seek to the current position of the file

        src_data.src_ratio = this->m_sampleRate / playingfile.audioFile->GetSampleRate(); //get the ratio of the sample rate conversion

        if (src_data.src_ratio == 1) { //if we are 1 to 1, dont do sample rate conversion.
            src_data.input_frames_used = src_data.output_frames_gen = (long)playingfile.audioFile->GetFrames(framesPerBuffer, (float *)framesOut);
        } else { //otherwise convert to port audio system's sample rate.
            //adjust the number of frames to read based on ratio.
            long long framesToRead//;
            /*if (fmod((double)framesPerBuffer, src_data.src_ratio) == 0) framesToRead = framesPerBuffer;
            else framesToRead*/ = (framesPerBuffer / ((long long)src_data.src_ratio));// + 2;

            AudioFrame::_2* framesIn = new AudioFrame::_2[framesToRead]; //create a buffer for frames in.
            //float * fFramesIn = new float[framesToRead * 2]; //create a float buffer to read in frames. (*2 for left and right audio channel)
            src_data.data_in = (float *)framesIn; //set the frame in buffer

            src_data.input_frames = (long)playingfile.audioFile->GetFrames(framesToRead, (float *)framesIn); //read the frames; //set the number of frames that were read

            if (src_data.input_frames == 0) {
                delete[] framesIn; //free up the frame in buffer to prevent memory leaks.
                continue; //if we have no data, skip
            }

            src_reset(this->m_srcState); //reset the sample rate conversion state
            int src_err = src_process(this->m_srcState, &src_data); //convert the sample rate

            if (src_err != 0) this->pushPaError(src_err, src_strerror(src_err)); //if we have an error, push it back.

            delete[] framesIn; //free up the frame in buffer
        }

        //AudioFrame::_2* framesOut = (AudioFrame::_2*) fFramesOut; //convert the float buffer to an audio frame buffer for easier access to the channels

        for (unsigned long outFrameI = 0; outFrameI < src_data.output_frames_gen; outFrameI++) { //for each frame (based on how many frames were generated from conversion [possibly under framesPerBuffer])
            //individual frame data;
            AudioFrame::_2 frame = framesOut[outFrameI];

            //adjust volume from master volume
            frame.left *= this->m_masterVol;
            frame.right *= this->m_masterVol;

            //adjust volume
            switch (playingfile.channel) { //based on sound channel
            case eAT_Music: //if we are a music channel
                frame.left *= this->m_musicVol;
                frame.right *= this->m_musicVol;
                break;
            case eAT_SFX: //if we are a sfx channel
                frame.left *= this->m_sfxVol;
                frame.right *= this->m_sfxVol;
                break;
            }

            if (frame.left != 0.0f){ //make sure we have data.
                if (outFrame[outFrameI].left == 0.0f) { //if the output frame has no data
                    outFrame[outFrameI].left = frame.left; //set the audio
                } else { //if the output frame has data
                    outFrame[outFrameI].left += frame.left; //mix the channels
                    outFrame[outFrameI].left /= 2; //ghetto way of making sure we dont clip?
                }
            }
            if (frame.right != 0.0f){ //make sure we have data.
                if (outFrame[outFrameI].right == 0.0f) { //if the output frame has no data
                    outFrame[outFrameI].right = frame.right; //set the audio
                } else { //if the output frame has data 
                    outFrame[outFrameI].right += frame.right; //mix the channels
                    outFrame[outFrameI].right /= 2; //ghetto way of making sure we dont clip?
                }
            }
        }

        playingfile.currentFrame += src_data.input_frames_used; //update the current position time based on how many frames were converted;

        if (playingfile.currentFrame >= playingfile.endFrame && !playingfile.loop) { //if we are finnished playing.
            //mark for removal.
            m_stoppedAudioFiles.push_back(entry.first);
        } else {
            //loop the file
            playingfile.currentFrame = playingfile.startFrame;
        }
    }

    delete [] framesOut;

    //removed stopped audio files.
    while (m_stoppedAudioFiles.size() > 0) {
        this->m_playingAudioFiles.erase(m_stoppedAudioFiles.back());
        m_stoppedAudioFiles.pop_back();
    }

    flow.unlock(); //mutex unlock

    return paContinue; //continue playback
}

AudioFile_Libsnd:

bool AudioFile_Libsnd::Seek(long long position){
    if (!this->_hasSfError) {
        if (sf_seek(this->sndFile, position, SF_SEEK_SET) == -1) {
            int err = sf_error(this->sndFile);
            pushSfError(err, sf_error_number(err));
            return false;
        }
        return true;
    }
    return false;
}

long long AudioFile_Libsnd::GetFrames(long long framesToRead, float buff[]){
    if (this->_hasSfError) return 0;

    long long framesRead = sf_readf_float(this->sndFile, buff, framesToRead);

    return framesRead;
}

bool AudioFile_Libsnd::GetFrame(float* frame) {
    return sf_readf_float(this->sndFile, frame, 1) == 1;
}

AudioFrame :: _ 2:

    struct _2 {
        float left;
        float right;

        _2() : left(0.0f), right(0.0f) {}
    };

正在播放音頻文件:

struct PlayingAudioFile {
    IAudioFile* audioFile;
    long long currentFrame;
    long long startFrame;
    long long endFrame;
    bool loop;
    bool paused;
    EAudioChannel channel;
};

我知道整個代碼有點長,但是我目前不知道我在做什么錯...我相信我正在正確地構造緩沖區,但是我嘗試了純浮點指針,無論是數組定義還是使用new和那沒有用。 我也嘗試過使用for循環逐幀處理並將其保存到單個float中,但這顯然使我在變量周圍出現了堆棧錯誤錯誤...任何幫助將不勝感激。

修復。 我當時很蠢。 這是罪魁禍首:

    if (playingfile.currentFrame >= playingfile.endFrame && !playingfile.loop) { //if we are finnished playing.
        //mark for removal.
        m_stoppedAudioFiles.push_back(entry.first);
    } else {
        //loop the file
        playingfile.currentFrame = playingfile.startFrame;
    }

因為將每次都將當前幀設置為起始幀,所以將“ else”更改為“ else if(playingsource.currentFrame> = playingsource.endFrame && playingsource.loop)”

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM