简体   繁体   中英

Should I convert 32 bit float audio samples into a 16 bit PCM data wav file in my AudioWorkletProcessor or should I do it in the worker thread?

So I have written a small js library that will record Audio Samples from the user's microphone/audio input device and convert them to a wav file. Since modern browsers don't support this out of the box, I have used the WebAudio API to achieve this.

Recording.js:

var blobArray = [];
var audioContext;

var Float32BitSampleArray = [];

var record = async function() {
    audioContext = new AudioContext(),
    microphone = await navigator.mediaDevices.getUserMedia( {
        audio: true
    } );

    var inputNode = audioContext.createMediaStreamSource( microphone );

    await audioContext.audioWorklet.addModule( 'wavprocessor.js' );

    var wavProcessingNode = new AudioWorkletNode( audioContext, 'wav-processor' );

    inputNode.connect( wavProcessingNode );

    wavProcessingNode.connect( audioContext.destination );

    wavProcessingNode.port.onmessage = (e) => {

        Float32BitSampleArray = Float32BitSampleArray.concat( e.data );
    }
}

function buildWav() {
    buildWavSpecification();
}

function appendData( index, view ) {
    for( var i = 0; i < Float32BitSampleArray.length; i++ ) {
        var s = Math.max( -1, Math.min( 1, Float32BitSampleArray[ i ] ) );
        s = s < 0 ? s * 0x8000 : s * 0x7FFF;

        view.setUint16( index + 2 * i, s, true );
    }
}

function buildWavSpecification() {
    var dataTypeSize = 16, // 16 bit PCM data
    totalDataSize = ( dataTypeSize / 8 ) * Float32BitSampleArray.length,
    sizeOfFileDescriptor = totalDataSize + 36,
    numberOfChannels = 1,
    sampleRate = 44100,
    bytesPerSample = numberOfChannels * dataTypeSize / 8,
    blockAlign = numberOfChannels * bytesPerSample,
    bitsPerSample = bytesPerSample * 8,
    byteRate = sampleRate * bytesPerSample,
    buffer = new ArrayBuffer( 44 + totalDataSize ),
    view = new DataView( buffer ),
    format = 1;

    function writeStringIntoBuffer( index, str ) {
        for( var i = 0; i < str.length; i++ ) {
            view.setUint8( index + i, str.charCodeAt( i ) );
        }
    }

    function write32BitInt( index, val ) {
        view.setUint32( index, val, true );
    }

    function write16BitInt( index, val ) {
        view.setUint16( index, val, true );
    }



    writeStringIntoBuffer( 0, 'RIFF' );
    write32BitInt( 4, sizeOfFileDescriptor );
    writeStringIntoBuffer( 8, 'WAVE' );
    writeStringIntoBuffer( 12, 'fmt ' );
    write32BitInt( 16, 16 );
    write16BitInt( 20, format );
    write16BitInt( 22, numberOfChannels );
    write32BitInt( 24, sampleRate );
    write32BitInt( 28, byteRate );
    write16BitInt( 32, blockAlign );
    write16BitInt( 34, bitsPerSample );
    writeStringIntoBuffer( 36, 'data' );
    write32BitInt( 40, totalDataSize );

    appendData( 44, view );

    var blob = new Blob([ view ], { type: 'audio/wav' });

    return blob;

}

var stop = function() {
    // stop the recording
    var blob = buildWav();

}

Wavprocessor AudioWorkletProcessor (wavprocessor.js):

class WavProcessor extends AudioWorkletProcessor {

    constructor() {
        super();
        this.buffer = [];
    }

    process( inputs, outputs, parameters ) {
        this.pushData( inputs[ 0 ][ 0 ] );


        this.port.postMessage( this.buffer );

        this.buffer = [];

        return true;
    }

    pushData( samples ) {       
        for( var i = 0; i < samples.length; i++ ) {
            this.buffer.push( samples[ i ] );
        }
    }

}

registerProcessor('wav-processor', WavProcessor );

Now this works fine but my question is should I build the wav file in the AudioWorkletProcessor or should I span a separate worker thread to build the wav file. Right now the file is being built in the main thread of the browser and for a 10 second recording it is sending around 700k audio samples, so the Float32BitSampleArray has 700k audio samples inside it. I am asking from the point of view of performance.

Stackoverflow article used to build wav file: link
Wav file specification: link 1 link 2

I would say it depends on how you define performance.

If you want to do it as quickly as possible then doing it on the audio thread is probably the fastest possible way.

But it may interfere with any other audio processing scheduled on the same thread and might cause audible glitches. To avoid that it's better to do the post-processing (aka converting the PCM data into a WAV file) somewhere else. Since it may interfere with any user interaction when done on the main thread I would say it's a good idea to do it in a Web Worker.

I had to develop a similar thing in the past and went for the second approach. The result is available as an npm package. Maybe it comes in handy for you. extendable-media-recorder

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM