[英]Trying to create a wav file with 'Capturing a Stream" from Win32 WASAPI C++
我在嘗試解釋 getBuffer 函數返回的 (BYTE* pData) 音頻數據時遇到問題。 我試圖將我的麥克風捕獲的音頻寫入一個 wav 文件,我自己正在構建所有這些文件,目的是更好地理解音頻設備、音頻數據和音頻格式。
以下是所有代碼,其中大部分代碼來自 Windows 文檔或引用自 Windows 文檔: https : //docs.microsoft.com/en-us/windows/win32/coreaudio/capturing-a-stream 。 試圖讓事情盡可能簡單沒有什么花哨的。 此代碼捕獲了幾秒鍾的麥克風音頻,您可以聽到失真且非常靜態的聲音。 失真是由於我將 pData 內容寫入文件的方式造成的嗎?
Main.cpp 注意 - 請忽略整個地方的 'cout',僅用於調試
#pragma once
#include "MyAudioSink.h"
#include <windows.h>
// REFERENCE_TIME time units per second and per millisecond
#define REFTIMES_PER_SEC 10000000
#define REFTIMES_PER_MILLISEC 10000
#define EXIT_ON_ERROR(hres) \
if (FAILED(hres)) { goto Exit; }
#define SAFE_RELEASE(punk) \
if ((punk) != NULL) \
{ (punk)->Release(); (punk) = NULL; }
const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
const IID IID_IAudioClient = __uuidof(IAudioClient);
const IID IID_IAudioCaptureClient = __uuidof(IAudioCaptureClient);
HRESULT RecordAudioStream(MyAudioSink * pMySink);
int main() {
HRESULT hr;
hr = CoInitialize(nullptr);
//declare MyAudioSink object
MyAudioSink pMySink;
hr = RecordAudioStream(&pMySink);
cout << "done";
}
//-----------------------------------------------------------
// Record an audio stream from the default audio capture
// device. The RecordAudioStream function allocates a shared
// buffer big enough to hold one second of PCM audio data.
// The function uses this buffer to stream data from the
// capture device. The main loop runs every 1/2 second.
//-----------------------------------------------------------
HRESULT RecordAudioStream(MyAudioSink* pMySink)
{
HRESULT hr;
REFERENCE_TIME hnsRequestedDuration = REFTIMES_PER_SEC;
REFERENCE_TIME hnsActualDuration;
UINT32 bufferFrameCount;
UINT32 numFramesAvailable;
IMMDeviceEnumerator* pEnumerator = NULL;
IMMDevice* pDevice = NULL;
IAudioClient* pAudioClient = NULL;
IAudioCaptureClient* pCaptureClient = NULL;
WAVEFORMATEX* pwfx = NULL;
UINT32 packetLength = 0;
BOOL bDone = FALSE;
BYTE* pData;
DWORD flags;
cout << "starting...";
hr = CoCreateInstance(
CLSID_MMDeviceEnumerator, NULL,
CLSCTX_ALL, IID_IMMDeviceEnumerator,
(void**)&pEnumerator);
cout << "test1" ;
EXIT_ON_ERROR(hr)
hr = pEnumerator->GetDefaultAudioEndpoint(
eCapture, eConsole, &pDevice);
cout << "test2" ;
EXIT_ON_ERROR(hr)
hr = pDevice->Activate(
IID_IAudioClient, CLSCTX_ALL,
NULL, (void**)&pAudioClient);
cout << "test3" ;
EXIT_ON_ERROR(hr)
hr = pAudioClient->GetMixFormat(&pwfx);
cout << "test4" ;
EXIT_ON_ERROR(hr)
hr = pAudioClient->Initialize(
AUDCLNT_SHAREMODE_SHARED,
0,
hnsRequestedDuration,
0,
pwfx,
NULL);
cout << "test5" ;
EXIT_ON_ERROR(hr)
// Get the size of the allocated buffer.
hr = pAudioClient->GetBufferSize(&bufferFrameCount);
cout << "test6" ;
EXIT_ON_ERROR(hr)
hr = pAudioClient->GetService(
IID_IAudioCaptureClient,
(void**)&pCaptureClient);
cout << "test7" ;
EXIT_ON_ERROR(hr)
// Calculate the actual duration of the allocated buffer.
hnsActualDuration = (double)REFTIMES_PER_SEC *
bufferFrameCount / pwfx->nSamplesPerSec;
// Notify the audio sink which format to use.
hr = pMySink->SetFormat(pwfx);
cout << "test8" ;
EXIT_ON_ERROR(hr)
//initialize the wav file with the specifications set by SetFormat
hr = pMySink->_Initialize_File();
cout << "test9" ;
EXIT_ON_ERROR(hr)
hr = pAudioClient->Start(); // Start recording.
cout << "test10" ;
EXIT_ON_ERROR(hr)
cout << "about to run while...";
// Each loop fills about half of the shared buffer.
while (bDone == FALSE)
{
// Sleep for half the buffer duration.
Sleep(hnsActualDuration / REFTIMES_PER_MILLISEC / 2);
hr = pCaptureClient->GetNextPacketSize(&packetLength);
EXIT_ON_ERROR(hr)
while (packetLength != 0)
{
// Get the available data in the shared buffer.
hr = pCaptureClient->GetBuffer(
&pData,
&numFramesAvailable,
&flags, NULL, NULL);
EXIT_ON_ERROR(hr)
if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
{
cout << "silent";
pData = NULL; // Tell CopyData to write silence.
}
// Copy the available capture data to the audio sink.
hr = pMySink->CopyData(
pData, numFramesAvailable, &bDone);
EXIT_ON_ERROR(hr)
hr = pCaptureClient->ReleaseBuffer(numFramesAvailable);
EXIT_ON_ERROR(hr)
hr = pCaptureClient->GetNextPacketSize(&packetLength);
EXIT_ON_ERROR(hr)
}
}
hr = pMySink->_File_WrapUp();
EXIT_ON_ERROR(hr)
hr = pAudioClient->Stop(); // Stop recording.
EXIT_ON_ERROR(hr)
Exit:
CoTaskMemFree(pwfx);
SAFE_RELEASE(pEnumerator)
SAFE_RELEASE(pDevice)
SAFE_RELEASE(pAudioClient)
SAFE_RELEASE(pCaptureClient)
return hr;
}
MyAudioSink.cpp 注意** - 這就是問題所在。 您可能會注意到一個名為“write_word”的 UDF 使用所有音頻格式參數初始化 wav 文件,但是,我一直無法弄清楚如何使用此函數來寫入 pData 內容,因此嘗試使用ostream write 函數產生了迄今為止最好的結果(聽到我的聲音),但它聽起來非常靜態和失真。
#pragma once
#include "MyAudioSink.h"
#include <string.h>
namespace little_endian_io
{
template <typename Word>
std::ostream& write_word(std::ostream& outs, Word value, unsigned size = sizeof(Word))
{
for (; size; --size, value >>= 8)
outs.put(static_cast <char> (value & 0xFF));
return outs;
}
}
using namespace little_endian_io;
HRESULT MyAudioSink::_Initialize_File() {
cout << "initializing file";
// prepare our wav file
mainFile.open("example.wav", ios::out | ios::binary);
// Write the file headers and sound format
mainFile << "RIFF----WAVEfmt "; // (chunk size to be filled in later)
write_word(mainFile, 16, 4); // no extension data
write_word(mainFile, 1, 2); // PCM - integer samples
write_word(mainFile, nChannels, 2); // two channels (stereo file)
write_word(mainFile, nSamplesPerSec, 4); // samples per second (Hz)
write_word(mainFile, nAvgBytesPerSec, 4); // (Sample Rate * BitsPerSample * Channels) / 8
write_word(mainFile, nBlockAlign, 2); // data block size (size of two integer samples, one for each channel, in bytes)
write_word(mainFile, wBitsPerSample, 2); // number of bits per sample (use a multiple of 8)
// Write the data chunk header
data_chunk_pos = mainFile.tellp();
mainFile << "data----"; // (chunk size to be filled in later)..
//start by setting our complete variable to False, main func will turn to true
bComplete = false;
//testing
test = 0;
return S_OK;
}
HRESULT MyAudioSink::SetFormat(WAVEFORMATEX* pwfx) {
//Update our format variables
wFormatTag = pwfx->wFormatTag;
nChannels = pwfx->nChannels;
nSamplesPerSec = pwfx->nSamplesPerSec;
nAvgBytesPerSec = pwfx->nAvgBytesPerSec;
nBlockAlign = pwfx->nBlockAlign;
wBitsPerSample = pwfx->wBitsPerSample;
cbSize = pwfx->cbSize;
return S_OK;
}
HRESULT MyAudioSink::CopyData(BYTE* pData, UINT32 numFramesAvailable, BOOL* bDone) {
//TODO
//forgot how to do this part, figure it out
for (int i = 0; i < numFramesAvailable; i++) {
mainFile.write((const char*) pData+(i* nBlockAlign), nBlockAlign);
}
//test
test++;
if (test >= nBlockAlign * 120) bComplete = true;
//check if our main function is done to finish capture
if (bComplete) *bDone = true;
return S_OK;
}
HRESULT MyAudioSink::_File_WrapUp() {
// (We'll need the final file size to fix the chunk sizes above)
file_length = mainFile.tellp();
// Fix the data chunk header to contain the data size
mainFile.seekp(data_chunk_pos + 4);
write_word(mainFile, file_length - data_chunk_pos + 8);
// Fix the file header to contain the proper RIFF chunk size, which is (file size - 8) bytes
mainFile.seekp(0 + 4);
write_word(mainFile, file_length - 8, 4);
mainFile.close();
cout << "finalized file";
return S_OK;
}
我的音頻接收器
#pragma once
//
#include <audioclient.h>
#include <Mmdeviceapi.h>
#include <fstream>
#include <iostream>
#include <cmath>
using namespace std;
class MyAudioSink
{
private:
size_t data_chunk_pos;
size_t file_length;
ofstream mainFile;
//sample format
WORD wFormatTag;
WORD nChannels;
DWORD nSamplesPerSec;
DWORD nAvgBytesPerSec;
WORD nBlockAlign;
WORD wBitsPerSample;
WORD cbSize;
int test;
public:
bool bComplete;
HRESULT _Initialize_File();
HRESULT SetFormat(WAVEFORMATEX* pwfx);
HRESULT CopyData(BYTE* pData, UINT32 numFramesAvailable, BOOL* bDone);
HRESULT _File_WrapUp();
};
我懷疑的問題是你的程序只處理 PCM 格式,而不是可擴展格式。 最終標頭將與WAVE 規范不同
添加此代碼以確認:
pAudioClient->GetMixFormat(&pwfx);
switch(pwfx->wFormatTag)
{
case WAVE_FORMAT_PCM:
cout << "WAVE_FORMAT_PCM";
break;
case WAVE_FORMAT_IEEE_FLOAT:
cout << "WAVE_FORMAT_IEEE_FLOAT";
break;
case WAVE_FORMAT_EXTENSIBLE:
cout << "WAVE_FORMAT_EXTENSIBLE";
WAVEFORMATEXTENSIBLE *pWaveFormatExtensible = reinterpret_cast<WAVEFORMATEXTENSIBLE *>(pwfx);
if(pWaveFormatExtensible->SubFormat == KSDATAFORMAT_SUBTYPE_PCM)
{
cout << "KSDATAFORMAT_SUBTYPE_PCM";
}
else if(pWaveFormatExtensible->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)
{
cout << "KSDATAFORMAT_SUBTYPE_IEEE_FLOAT";
}
break;
}
我認為最常見的情況是 WAVE_FORMAT_EXTENSIBLE 和 KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ...
編輯
我在這里做了一個快速示例: WasapiCapture
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.