[英]live audio stream over socket io. alsa shutdown, how to fix it?
我想為socket.io和ionic 4上的實時音頻流創建服務。
對於客戶端,我使用cordova-plugin-audioinput和ng-socket-io進行角度
對於服務器,我使用標准的npm軟件包
節點版本:10.16.0
Npm版本:6.9.0
我收到聲音並播放,但是在收到58-60音頻數據包(幀)后也關閉了。
我的客戶代碼:
export class HomePage {
bufferSize:number = 8192;
mediaRecorder;
constructor(public socket:Socket) {}
ngOnInit(){
this.socket.connect();
window.addEventListener('audioinput', (data) => {
this.onAudioInput(data, this.socket);
});
audioinput.initialize({
sampleRate: 8000,
bufferSize: this.bufferSize,
channels: 1,
format: audioinput.FORMAT.PCM_16BIT,
audioSourceType: audioinput.AUDIOSOURCE_TYPE.MIC
}, () => {
audioinput.checkMicrophonePermission((hasPermission) => {
if (hasPermission) {
console.log("Permission already exist");
}
else {
audioinput.getMicrophonePermission((hasPermission, message) => {
if (hasPermission) {
console.log("User gived permission.");
} else {
console.warn("User denied permission.");
}
});
}
});
});
}
onAudioInput(data, socket){
socket.emit("audioinput", data.data);
}
start(){
audioinput.start({
sampleRate: 8000,
bufferSize: this.bufferSize,
channels: 1,
format: audioinput.FORMAT.PCM_16BIT,
audioSourceType: audioinput.AUDIOSOURCE_TYPE.MIC
});
this.socket.emit("message", "started");
}
stop(){
audioinput.stop();
this.socket.emit("message", "stoped");
}
}
我的服務器代碼:
let app = require('express');
let http = require('http').Server(app);
let io = require('socket.io')(http);
let audioContext = require('audio-context')({
sampleRate: 8000,
latencyHint: 'balanced'
});
let buff = require('audio-buffer');
const play = require('audio-play');
var audioBuffer
= new buff(audioContext, {
length: 8192,
sampleRate: 8000,
numberOfChannels: 1
});
array = new Float32Array();
var counter = 0;
io.on('connection', (socket) =>{
socket.on('message', (data) =>{
console.log(data);
});
//var outputData = new Float32Array(buffer);
socket.on('audioinput', (data)=>{
array = data;
audioBuffer.copyToChannel(array, 0, 0);
if(counter == 0){
setTimeout(function audio(){
var playback = play(audioBuffer, {
start: 0,
end: audioBuffer.duration,
loop:false,
detune: 0,
rate: 8000,
volume: 1,
context: audioContext,
autoplay: false
}, (result) => {});
playback.play();
setTimeout(audio, 1000)
}, 1000);
}
counter ++;
console.log(counter);
});
});
var port = process.env.PORT || 6500;
http.listen(port, () =>{
console.log('listening started');
});
在58-60個音頻數據包關閉后,alsa會執行以下操作:
ALSA lib pulse.c:243:(pulse_connect) PulseAudio: Unable to connect: Connection terminated
[../deps/mpg123/src/output/alsa.c:165] error: cannot open device default
events.js:174
throw er; // Unhandled 'error' event
^
Error: open() failed: -1
at Speaker._open (/home/azat/ionicFolder/server (копия)/node_modules/audio-speaker/node_modules/speaker/index.js:168:11)
at Speaker._write (/home/azat/ionicFolder/server (копия)/node_modules/audio-speaker/node_modules/speaker/index.js:242:21)
at doWrite (/home/azat/ionicFolder/server (копия)/node_modules/audio-speaker/node_modules/speaker/node_modules/readable-stream/lib/_stream_writable.js:428:64)
at writeOrBuffer (/home/azat/ionicFolder/server (копия)/node_modules/audio-speaker/node_modules/speaker/node_modules/readable-stream/lib/_stream_writable.js:417:5)
at Speaker.Writable.write (/home/azat/ionicFolder/server (копия)/node_modules/audio-speaker/node_modules/speaker/node_modules/readable-stream/lib/_stream_writable.js:334:11)
at write (/home/azat/ionicFolder/server (копия)/node_modules/audio-speaker/direct.js:67:11)
at loop (/home/azat/ionicFolder/server (копия)/node_modules/audio-play/index.js:93:4)
at Function.play (/home/azat/ionicFolder/server (копия)/node_modules/audio-play/index.js:94:4)
at Timeout.audio [as _onTimeout] (/home/azat/ionicFolder/server (копия)/index.js:54:26)
at ontimeout (timers.js:436:11)
如何修復錯誤?
我解決了這個問題。 我的客戶端通過麥克風進行實時音頻翻譯:
declare var audioinput: any;
import { Component } from '@angular/core';
import { Socket } from 'ng-socket-io';
@Component({
selector: 'app-home',
templateUrl: 'home.page.html',
styleUrls: ['home.page.scss'],
})
export class HomePage {
bufferSize = 2048;
constructor(public socket: Socket) {}
ngOnInit() {
this.socket.connect();
window.addEventListener('audioinput', (data) => {
this.onAudioInput(data, this.socket);
});
audioinput.initialize({
sampleRate: 8192,
bufferSize: this.bufferSize,
channels: 1,
format: audioinput.FORMAT.PCM_16BIT,
audioSourceType: audioinput.AUDIOSOURCE_TYPE.MIC,
streamToWebAudio: false
}, () => {
audioinput.checkMicrophonePermission((hasPermission) => {
if (hasPermission) {
console.log('Разрешение уже получено');
} else {
audioinput.getMicrophonePermission((hasPermission, message) => {
if (hasPermission) {
console.log('Пользователь дал разрешение на запись.');
} else {
console.warn('Пользователь запретил записывать.');
}
});
}
});
});
}
onAudioInput(data, socket) {
socket.emit('audioinput', data.data);
console.log(data.data);
}
start() {
audioinput.start({
sampleRate: 8192,
bufferSize: this.bufferSize,
channels: 1,
format: audioinput.FORMAT.PCM_16BIT,
audioSourceType: audioinput.AUDIOSOURCE_TYPE.MIC,
streamToWebAudio: false
});
}
stop() {
audioinput.stop();
// this.socket.emit("message", "stoped");
}
}
和我的服務器端,用於通過移動麥克風播放語音
服務器通過socket.io接收到塊,並通過揚聲器播放:
let app = require('express');
let http = require('http').Server(app);
let io = require('socket.io')(http);
var buff = require('audio-buffer');
const Speaker = require('audio-speaker/stream');
var audioBuffer = new buff({
length: 2048,
sampleRate: 8192,
numberOfChannels: 1
})
array = new Float32Array();
var speakerVoice;
io.on("connection", (socket) => {
socket.on("message", (data) =>{
})
socket.on("audioinput", (data) => {
array = data;
if(counter == 0){
speakerVoice = new Speaker({
sampleRate: 4096,
float: true,
signed: true,
bitDepth: 16
});
}
audioBuffer.copyToChannel(array, 0, 0);
speakerVoice.write(audioBuffer);
})
})
var port = process.env.PORT || 6500;
http.listen(port, () =>{
console.log('listening started');
});
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.