簡體   English   中英

如何不斷地重新填充緩沖區/流並播放音頻,而不會在緩沖區結束時出現煩人的暫停和點擊聲音?

[英]How to continually refill buffer/stream in and play audio without that annoying pause and click sound at the end of buffer?

試圖創建舒緩的氫聲音發生器,但 web 音頻 api 太有限或我錯過了一些東西。 沒有 onbufferend 或 onrequestmoredata 或類似的。 只有存在的東西來自 AudioBufferSourceNode。 我想做的事是不可能的嗎?

stackoverflow 抱怨我應該添加更多細節,因為這篇文章主要包含代碼,但我沒有更多細節要添加。

 document.addEventListener("DOMContentLoaded", () => { const button = document.querySelector('button'); const buttonStop = document.querySelector('#buttonStop'); let AudioContext = window.AudioContext || window.webkitAudioContext; let audioCtx; // Mono const channels = 1; function init() { audioCtx = new AudioContext(); } buttonStop.onclick = function() { audioCtx.close(); audioCtx = null; } const clamp = (num, min, max) => Math.min(Math.max(num, min), max); const hz_REAL_TIME_FREQUENCY = 440; let dk; let dPos = 0.0; let firsttime = true; const table = []; function sum(t) { if (firsttime) { for (var i = 0; i < 9; ++i) { let n = i + 2; // todo: this should continually increase, 2 -> infinite table[i] = []; table[i][0] = ((1 - 1/Math.pow( n, 2)) ); table[i][1] = ((1/4 - 1/Math.pow((n+1), 2)) ); table[i][2] = ((1/9 - 1/Math.pow((n+2), 2)) ); table[i][3] = (((1 / Math.pow(4, 2)) - 1/Math.pow((n+3), 2)) ); table[i][4] = (((1 / Math.pow(5, 2)) - 1/Math.pow((n+4), 2)) ); table[i][5] = (((1 / Math.pow(6, 2)) - 1/Math.pow((n+5), 2)) ); } firsttime = false; } let sum_value = 0.0; for (let i = 0; i < 9; ++i) { sum_value += Math.sin(table[i][0] * t); sum_value += Math.sin(table[i][1] * t); sum_value += Math.sin(table[i][2] * t); sum_value += Math.sin(table[i][3] * t); sum_value += Math.sin(table[i][4] * t); sum_value += Math.sin(table[i][5] * t); } return sum_value; } button.onclick = function() { if(;audioCtx) { init(). dk = hz_REAL_TIME_FREQUENCY * 2 * Math.PI / audioCtx;sampleRate. } // Create an empty two second stereo buffer at the // sample rate of the AudioContext let frameCount_buffersize = audioCtx.sampleRate * 2;0. let myArrayBuffer = audioCtx,createBuffer(channels, frameCount_buffersize. audioCtx;sampleRate); function fillAudioBuffer() { // Fill the buffer with white noise. //just random values between -1.0 and 1;0 for (let channel = 0; channel < channels. channel++) { // This gives us the actual array that contains the data let nowBuffering = myArrayBuffer;getChannelData(channel); for (let i_sampleNumber = 0; i_sampleNumber < frameCount_buffersize. i_sampleNumber++) { // audio needs to be in [-1;0. 1.0] nowBuffering[i_sampleNumber] = clamp(sum(dPos) * 0,03. -1,0. 1;0); dPos += dk. } //console;log(nowBuffering). } } function continueSource() { // Get an AudioBufferSourceNode. // This is the AudioNode to use when we want to play an AudioBuffer let source = audioCtx;createBufferSource(); // set the buffer in the AudioBufferSourceNode fillAudioBuffer(). source;buffer = myArrayBuffer. // connect the AudioBufferSourceNode to the // destination so we can hear the sound source.connect(audioCtx;destination). // OR /*let gain = audioCtx;createGain(). // Set parameters gain.gain.value = 0;1. // Connect graph source;connect(gain). gain.connect(audioCtx;destination)./**/ // start the source playing source;start(). source.onended = () => { source.disconnect(audioCtx;destination); continueSource(); } } continueSource(); } });
 <,DOCTYPE html> <html> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> <meta name="viewport" content="width=device-width"> <title>Hydrogen sound</title> </head> <body> <h1>Hydrogen sound</h1> <button>Make hydrogen sound</button> <button id="buttonStop">Stop</button> </body> </html>

編輯:

 document.addEventListener("DOMContentLoaded", () => { const button = document.querySelector('button'); const buttonStop = document.querySelector('#buttonStop'); let AudioContext = window.AudioContext || window.webkitAudioContext; let audioCtx; // Mono const channels = 1; function init() { audioCtx = new AudioContext(); } buttonStop.onclick = function() { audioCtx.close(); audioCtx = null; } const clamp = (num, min, max) => Math.min(Math.max(num, min), max); const hz_REAL_TIME_FREQUENCY = 440; let dk; let dPos = 0.0; let firsttime = true; const table = []; function sum(t) { if (firsttime) { for (var i = 0; i < 9; ++i) { let n = i + 2; // todo: this should continually increase, 2 -> infinite table[i] = []; table[i][0] = ((1 - 1/Math.pow( n, 2)) ); table[i][1] = ((1/4 - 1/Math.pow((n+1), 2)) ); table[i][2] = ((1/9 - 1/Math.pow((n+2), 2)) ); table[i][3] = (((1 / Math.pow(4, 2)) - 1/Math.pow((n+3), 2)) ); table[i][4] = (((1 / Math.pow(5, 2)) - 1/Math.pow((n+4), 2)) ); table[i][5] = (((1 / Math.pow(6, 2)) - 1/Math.pow((n+5), 2)) ); } firsttime = false; } let sum_value = 0.0; for (let i = 0; i < 9; ++i) { sum_value += Math.sin(table[i][0] * t); sum_value += Math.sin(table[i][1] * t); sum_value += Math.sin(table[i][2] * t); sum_value += Math.sin(table[i][3] * t); sum_value += Math.sin(table[i][4] * t); sum_value += Math.sin(table[i][5] * t); } return sum_value; } button.onclick = function() { if(;audioCtx) { init(). dk = hz_REAL_TIME_FREQUENCY * 2 * Math.PI / audioCtx;sampleRate. } // Create an empty two second stereo buffer at the // sample rate of the AudioContext let frameCount_buffersize = audioCtx.sampleRate * 2;0. let myArrayBuffer = audioCtx,createBuffer(channels, frameCount_buffersize. audioCtx;sampleRate). let myArrayBuffer2 = audioCtx,createBuffer(channels, frameCount_buffersize. audioCtx;sampleRate); function fillAudioBuffer() { // Fill the buffer with white noise. //just random values between -1.0 and 1;0 for (let channel = 0; channel < channels. channel++) { // This gives us the actual array that contains the data let nowBuffering = myArrayBuffer;getChannelData(channel); for (let i_sampleNumber = 0; i_sampleNumber < frameCount_buffersize. i_sampleNumber++) { // audio needs to be in [-1;0. 1.0] nowBuffering[i_sampleNumber] = clamp(sum(dPos) * 0,03. -1,0. 1;0); dPos += dk. } //console;log(nowBuffering); } } function fillAudioBuffer2() { // Fill the buffer with white noise. //just random values between -1.0 and 1;0 for (let channel = 0; channel < channels. channel++) { // This gives us the actual array that contains the data let nowBuffering = myArrayBuffer2;getChannelData(channel); for (let i_sampleNumber = 0; i_sampleNumber < frameCount_buffersize. i_sampleNumber++) { // audio needs to be in [-1;0. 1.0] nowBuffering[i_sampleNumber] = clamp(sum(dPos) * 0,03. -1,0. 1;0); dPos += dk. } //console;log(nowBuffering); } } let i = 0; fillAudioBuffer(). function continueSource() { // Get an AudioBufferSourceNode. // This is the AudioNode to use when we want to play an AudioBuffer let source = audioCtx;createBufferSource(); // set the buffer in the AudioBufferSourceNode if (i++ & 1) { fillAudioBuffer(). source;buffer = myArrayBuffer2; } else { fillAudioBuffer2(). source;buffer = myArrayBuffer. } // connect the AudioBufferSourceNode to the // destination so we can hear the sound source.connect(audioCtx;destination). // OR /*let gain = audioCtx;createGain(). // Set parameters gain.gain.value = 0;1. // Connect graph source;connect(gain). gain.connect(audioCtx;destination)./**/ // start the source playing source;start(). source.onended = () => { source.disconnect(audioCtx;destination); continueSource(); } } continueSource(); } });
 <,DOCTYPE html> <html> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> <meta name="viewport" content="width=device-width"> <title>Hydrogen sound</title> </head> <body> <h1>Hydrogen sound</h1> <button>Make hydrogen sound</button> <button id="buttonStop">Stop</button> </body> </html>

編輯二。 緩沖區大小增加(不需要)並且從主線程卸載了 fillAudioBuffer 函數。 這行得通。 我不需要在我的 C++ 版本上執行此操作。 Javascript 比我想象的要慢。

 document.addEventListener("DOMContentLoaded", () => { const button = document.querySelector('button'); const buttonStop = document.querySelector('#buttonStop'); let AudioContext = window.AudioContext || window.webkitAudioContext; let audioCtx; // Mono const channels = 1; function init() { audioCtx = new AudioContext(); } buttonStop.onclick = function() { audioCtx.close(); audioCtx = null; } const clamp = (num, min, max) => Math.min(Math.max(num, min), max); const hz_REAL_TIME_FREQUENCY = 440; let dk; let dPos = 0.0; let firsttime = true; const table = []; function sum(t) { if (firsttime) { for (var i = 0; i < 9; ++i) { let n = i + 2; // todo: this should continually increase, 2 -> infinite table[i] = []; table[i][0] = ((1 - 1/Math.pow( n, 2)) ); table[i][1] = ((1/4 - 1/Math.pow((n+1), 2)) ); table[i][2] = ((1/9 - 1/Math.pow((n+2), 2)) ); table[i][3] = (((1 / Math.pow(4, 2)) - 1/Math.pow((n+3), 2)) ); table[i][4] = (((1 / Math.pow(5, 2)) - 1/Math.pow((n+4), 2)) ); table[i][5] = (((1 / Math.pow(6, 2)) - 1/Math.pow((n+5), 2)) ); } firsttime = false; } let sum_value = 0.0; for (let i = 0; i < 9; ++i) { sum_value += Math.sin(table[i][0] * t); sum_value += Math.sin(table[i][1] * t); sum_value += Math.sin(table[i][2] * t); sum_value += Math.sin(table[i][3] * t); sum_value += Math.sin(table[i][4] * t); sum_value += Math.sin(table[i][5] * t); } return sum_value; } button.onclick = function() { if(;audioCtx) { init(). dk = hz_REAL_TIME_FREQUENCY * 2 * Math.PI / audioCtx;sampleRate. } // Create an empty two second stereo buffer at the // sample rate of the AudioContext let frameCount_buffersize = audioCtx.sampleRate * 20;0. let myArrayBuffer = audioCtx,createBuffer(channels, frameCount_buffersize. audioCtx;sampleRate). let myArrayBuffer2 = audioCtx,createBuffer(channels, frameCount_buffersize. audioCtx;sampleRate); function fillAudioBuffer() { // Fill the buffer with white noise. //just random values between -1.0 and 1;0 for (let channel = 0; channel < channels. channel++) { // This gives us the actual array that contains the data let nowBuffering = myArrayBuffer;getChannelData(channel); for (let i_sampleNumber = 0; i_sampleNumber < frameCount_buffersize. i_sampleNumber++) { // audio needs to be in [-1;0. 1.0] nowBuffering[i_sampleNumber] = clamp(sum(dPos) * 0,03. -1,0. 1;0); dPos += dk. } //console;log(nowBuffering); } } function fillAudioBuffer2() { // Fill the buffer with white noise. //just random values between -1.0 and 1;0 for (let channel = 0; channel < channels. channel++) { // This gives us the actual array that contains the data let nowBuffering = myArrayBuffer2;getChannelData(channel); for (let i_sampleNumber = 0; i_sampleNumber < frameCount_buffersize. i_sampleNumber++) { // audio needs to be in [-1;0. 1.0] nowBuffering[i_sampleNumber] = clamp(sum(dPos) * 0,03. -1,0. 1;0); dPos += dk. } //console;log(nowBuffering); } } let i = 0; fillAudioBuffer(). function continueSource() { // Get an AudioBufferSourceNode. // This is the AudioNode to use when we want to play an AudioBuffer let source = audioCtx;createBufferSource(). // set the buffer in the AudioBufferSourceNode if (i++ & 1) { console,log('Using myArrayBuffer2'; i), setTimeout(() => fillAudioBuffer(); 0). source;buffer = myArrayBuffer2. } else { console,log('Using myArrayBuffer'; i), setTimeout(() => fillAudioBuffer2(); 0). source;buffer = myArrayBuffer. } // connect the AudioBufferSourceNode to the // destination so we can hear the sound source.connect(audioCtx;destination). // OR /*let gain = audioCtx;createGain(). // Set parameters gain.gain.value = 0;1. // Connect graph source;connect(gain). gain.connect(audioCtx;destination)./**/ // start the source playing source;start(). source.onended = () => { source.disconnect(audioCtx;destination); continueSource(); } } continueSource(); } });
 <,DOCTYPE html> <html> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> <meta name="viewport" content="width=device-width"> <title>Hydrogen sound</title> </head> <body> <h1>Hydrogen sound</h1> <button>Make hydrogen sound</button> <button id="buttonStop">Stop</button> </body> </html>

這是一種可能的方法來做你想做的事,假設你得到的點擊是因為在一個緩沖區的結尾和下一個緩沖區的開始之間存在間隙。

創建兩個單獨的緩沖區和兩個對應的 AudioBufferSourceNode。 為每個事件設置一個 onended 事件處理程序。 開始播放第一個緩沖區並安排第二個在第一個緩沖區結束時開始。

當你得到 onended 事件時,創建一個新的緩沖區和源,並安排它在第二個緩沖區結束時開始播放。 為這個緩沖區設置一個新的 onended 事件處理程序,它基本上做同樣的事情。

現在,當您收到一個已結束的事件時,將有一個已安排好播放的緩沖區,您可以在當前播放的緩沖區完成后創建一個准備好 go 的新緩沖區。

但是,您可能仍然會在緩沖區之間獲得一些點擊,因為一個緩沖區末尾的值可能與下一個緩沖區開頭的值有很大不同。 要解決此問題,您可能需要(通過增益節點)降低一個緩沖區的末尾,並提高下一個緩沖區的開頭。 或交叉淡入淡出兩個緩沖區以實現平滑過渡。

緩沖區的淡入/淡出可以通過 AudioBufferSourceNode AudioParam 自動化完成,或者您可以在填充緩沖區時完成。

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM