繁体   English   中英

HTML5 录音到文件

[英]HTML5 record audio to file

我最终想做的是从用户的麦克风录音,并在完成后将文件上传到服务器。 到目前为止,我已经成功地使用以下代码将 stream 制作成一个元素:

var audio = document.getElementById("audio_preview");

navigator.getUserMedia  = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
navigator.getUserMedia({video: false, audio: true}, function(stream) {
   audio.src = window.URL.createObjectURL(stream);
}, onRecordFail);

var onRecordFail = function (e) {
   console.log(e);
}

我如何从 go 录制到文件?

有一个相当完整的录制演示可在以下网址获得: http//webaudiodemos.appspot.com/AudioRecorder/index.html

它允许您在浏览器中录制音频,然后为您提供导出和下载已录制内容的选项。

您可以查看该页面的源代码以查找指向javascript的链接,但总而言之,有一个包含exportWAV方法和forceDownload方法的Recorder对象。

下面的代码版权归Matt Diamond所有,可在MIT许可下使用。 原始文件在这里:

保存这些文件并使用

 (function(window){ var WORKER_PATH = 'recorderWorker.js'; var Recorder = function(source, cfg){ var config = cfg || {}; var bufferLen = config.bufferLen || 4096; this.context = source.context; this.node = this.context.createScriptProcessor(bufferLen, 2, 2); var worker = new Worker(config.workerPath || WORKER_PATH); worker.postMessage({ command: 'init', config: { sampleRate: this.context.sampleRate } }); var recording = false, currCallback; this.node.onaudioprocess = function(e){ if (!recording) return; worker.postMessage({ command: 'record', buffer: [ e.inputBuffer.getChannelData(0), e.inputBuffer.getChannelData(1) ] }); } this.configure = function(cfg){ for (var prop in cfg){ if (cfg.hasOwnProperty(prop)){ config[prop] = cfg[prop]; } } } this.record = function(){ recording = true; } this.stop = function(){ recording = false; } this.clear = function(){ worker.postMessage({ command: 'clear' }); } this.getBuffer = function(cb) { currCallback = cb || config.callback; worker.postMessage({ command: 'getBuffer' }) } this.exportWAV = function(cb, type){ currCallback = cb || config.callback; type = type || config.type || 'audio/wav'; if (!currCallback) throw new Error('Callback not set'); worker.postMessage({ command: 'exportWAV', type: type }); } worker.onmessage = function(e){ var blob = e.data; currCallback(blob); } source.connect(this.node); this.node.connect(this.context.destination); //this should not be necessary }; Recorder.forceDownload = function(blob, filename){ var url = (window.URL || window.webkitURL).createObjectURL(blob); var link = window.document.createElement('a'); link.href = url; link.download = filename || 'output.wav'; var click = document.createEvent("Event"); click.initEvent("click", true, true); link.dispatchEvent(click); } window.Recorder = Recorder; })(window); //ADDITIONAL JS recorderWorker.js var recLength = 0, recBuffersL = [], recBuffersR = [], sampleRate; this.onmessage = function(e){ switch(e.data.command){ case 'init': init(e.data.config); break; case 'record': record(e.data.buffer); break; case 'exportWAV': exportWAV(e.data.type); break; case 'getBuffer': getBuffer(); break; case 'clear': clear(); break; } }; function init(config){ sampleRate = config.sampleRate; } function record(inputBuffer){ recBuffersL.push(inputBuffer[0]); recBuffersR.push(inputBuffer[1]); recLength += inputBuffer[0].length; } function exportWAV(type){ var bufferL = mergeBuffers(recBuffersL, recLength); var bufferR = mergeBuffers(recBuffersR, recLength); var interleaved = interleave(bufferL, bufferR); var dataview = encodeWAV(interleaved); var audioBlob = new Blob([dataview], { type: type }); this.postMessage(audioBlob); } function getBuffer() { var buffers = []; buffers.push( mergeBuffers(recBuffersL, recLength) ); buffers.push( mergeBuffers(recBuffersR, recLength) ); this.postMessage(buffers); } function clear(){ recLength = 0; recBuffersL = []; recBuffersR = []; } function mergeBuffers(recBuffers, recLength){ var result = new Float32Array(recLength); var offset = 0; for (var i = 0; i < recBuffers.length; i++){ result.set(recBuffers[i], offset); offset += recBuffers[i].length; } return result; } function interleave(inputL, inputR){ var length = inputL.length + inputR.length; var result = new Float32Array(length); var index = 0, inputIndex = 0; while (index < length){ result[index++] = inputL[inputIndex]; result[index++] = inputR[inputIndex]; inputIndex++; } return result; } function floatTo16BitPCM(output, offset, input){ for (var i = 0; i < input.length; i++, offset+=2){ var s = Math.max(-1, Math.min(1, input[i])); output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true); } } function writeString(view, offset, string){ for (var i = 0; i < string.length; i++){ view.setUint8(offset + i, string.charCodeAt(i)); } } function encodeWAV(samples){ var buffer = new ArrayBuffer(44 + samples.length * 2); var view = new DataView(buffer); /* RIFF identifier */ writeString(view, 0, 'RIFF'); /* file length */ view.setUint32(4, 32 + samples.length * 2, true); /* RIFF type */ writeString(view, 8, 'WAVE'); /* format chunk identifier */ writeString(view, 12, 'fmt '); /* format chunk length */ view.setUint32(16, 16, true); /* sample format (raw) */ view.setUint16(20, 1, true); /* channel count */ view.setUint16(22, 2, true); /* sample rate */ view.setUint32(24, sampleRate, true); /* byte rate (sample rate * block align) */ view.setUint32(28, sampleRate * 4, true); /* block align (channel count * bytes per sample) */ view.setUint16(32, 4, true); /* bits per sample */ view.setUint16(34, 16, true); /* data chunk identifier */ writeString(view, 36, 'data'); /* data chunk length */ view.setUint32(40, samples.length * 2, true); floatTo16BitPCM(view, 44, samples); return view; } 
 <html> <body> <audio controls autoplay></audio> <script type="text/javascript" src="recorder.js"> </script> <fieldset><legend>RECORD AUDIO</legend> <input onclick="startRecording()" type="button" value="start recording" /> <input onclick="stopRecording()" type="button" value="stop recording and play" /> </fieldset> <script> var onFail = function(e) { console.log('Rejected!', e); }; var onSuccess = function(s) { var context = new webkitAudioContext(); var mediaStreamSource = context.createMediaStreamSource(s); recorder = new Recorder(mediaStreamSource); recorder.record(); // audio loopback // mediaStreamSource.connect(context.destination); } window.URL = window.URL || window.webkitURL; navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia; var recorder; var audio = document.querySelector('audio'); function startRecording() { if (navigator.getUserMedia) { navigator.getUserMedia({audio: true}, onSuccess, onFail); } else { console.log('navigator.getUserMedia not present'); } } function stopRecording() { recorder.stop(); recorder.exportWAV(function(s) { audio.src = window.URL.createObjectURL(s); }); } </script> </body> </html> 

这个问题很老,当前版本的浏览器不支持许多答案。 我试图使用简单的htmlcssjs创建录音机。 我进一步继续使用 electron 中的相同代码来制作跨平台应用程序。

 <html>
  <head>
    <title>Recorder App</title>
    
  </head>
  <h2>Recorder App</h2>
  <p>
    <button type="button" id="record">Record</button>
    <button type="button" id="stopRecord" disabled>Stop</button>
  </p>
  <p>
    <audio id="recordedAudio"></audio>        
  </p>

  <script> 
    navigator.mediaDevices.getUserMedia({audio:true})
    .then(stream => {handlerFunction(stream)})

    function handlerFunction(stream) {
      rec = new MediaRecorder(stream);
      rec.ondataavailable = e => {
        audioChunks.push(e.data);
        if (rec.state == "inactive"){
          let blob = new Blob(audioChunks,{type:'audio/mp3'});
          recordedAudio.src = URL.createObjectURL(blob);
          recordedAudio.controls=true;
          recordedAudio.autoplay=true;
          sendData(blob)
          }
        }
      }
    
    function sendData(data) {}
      record.onclick = e => {
        record.disabled = true;
        record.style.backgroundColor = "blue"
        stopRecord.disabled=false;
        audioChunks = [];
        rec.start();
        }
      stopRecord.onclick = e => {
        record.disabled = false;
        stop.disabled=true;
        record.style.backgroundColor = "red"
        rec.stop();
        }
  </script>
</html>

上面的代码适用于 Windows 10,Mac,Linux,显然,谷歌浏览器和 firefox。

这是一个简单的JavaScript录音机和编辑器。 你可以尝试一下。

https://www.danieldemmel.me/JSSoundRecorder/

可以从这里下载

https://github.com/daaain/JSSoundRecorder

立即更新 Chrome还支持v47中的MediaRecorder API。 同样的事情是使用它(猜测本机记录方法必然比解决方法更快),API非常容易使用,你会发现很多关于如何为服务器上传blob的答案。

演示 - 可以在Chrome和Firefox中使用,故意将blob推送到服务器...

代码来源


目前,有三种方法可以做到:

  1. 作为wav [所有代码客户端,未压缩录制],您可以查看 - > Recorderjs 问题:文件大小很大,需要更多的上传带宽。
  2. 作为mp3 [所有代码客户端,压缩录制],你可以看看 - > mp3Recorder 问题:就个人而言,我发现质量不好,也存在此许可问题。
  3. 作为ogg [客户端+服务器( node.js )代码,压缩录制,无需浏览器崩溃的无限小时录制],您可以查看 - > recordOpus ,无论是客户端录制还是客户端 - 服务器捆绑,选择权归您所有。

    ogg录制示例(仅限firefox):

     var mediaRecorder = new MediaRecorder(stream); mediaRecorder.start(); // to start recording. ... mediaRecorder.stop(); // to stop recording. mediaRecorder.ondataavailable = function(e) { // do something with the data. } 

    用于ogg录音的小提琴演示

这是一个gitHub项目。

它以mp3格式记录来自浏览器的音频,并自动将其保存到网络服务器。 https://github.com/Audior/Recordmp3js

您还可以查看实施的详细说明: http//audior.ec/blog/recording-mp3-using-only-html5-and-javascript-recordmp3-js/

您可以使用GitHub中的Recordmp3js来满足您的要求。 您可以从用户的麦克风录制,然后将该文件作为mp3播放。 最后将其上传到您的服务器。

我在演示中使用了这个。 作者在此位置已提供源代码示例: https//github.com/Audior/Recordmp3js

演示在这里: http//audior.ec/recordmp3js/

但目前仅适用于Chrome和Firefox。

似乎工作得很好,非常简单。 希望这可以帮助。

实时流式传输音频而无需等待录制结束: https//github.com/noamtcohen/AudioStreamer

这会传输PCM数据,但您可以修改代码以流式传输mp3或Speex

暂无
暂无

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM