![](/img/trans.png)
[英]How to play a base64 response from Google Text to Speech as an mp3 audio in browser using Javascript
[英]Send microphone audio recorder from browser to google speech to text - Javascript
從瀏覽器發送麥克風錄音機到谷歌語音到文本。 不需要流和套接字,也不需要通過Node.js向Google服務器發送HTTP請求,也不需要通過客戶端(瀏覽器)端的HTTP請求。
我面臨的問題:
完成了客戶端實現,以及服務器端實現。 兩種實現都彼此獨立工作。 我從麥克風獲取音頻數據,我可以播放它,以及能夠使用Google提供的audio.raw示例測試服務器端實現。
但是,當我嘗試將麥克風數據從瀏覽器發送到我的節點服務器然后再發送到Google服務器時,我收到編碼問題:“從谷歌服務器獲取空響應”。
我的問題是如何更改音頻文件的編碼,然后使用Javascript將其發送到Google Speech to Text服務器。
我已經玩過了,我可以使用Google API和瀏覽器錄音來發表文字。 我想知道配置對象是否是您遇到問題的原因。
我使用的組件是Node.js服務器:server.js和一個簡單的客戶端(index.html和client-app.js)。 全部在同一個文件夾中。
我正在使用Google語音到文本客戶端庫 ,因此您需要添加Google API密鑰文件(APIKey.json)來提供憑據。
如果您運行節點服務器,則將瀏覽器指向http:// localhost:3000 / ,這應該允許您測試代碼。
我從這里開始使用Matt Diamond的Recorder.js代碼繪制了很多客戶端代碼。
server.js
const express = require('express');
const multer = require('multer');
const fs = require('fs');
const upload = multer();
const app = express();
const port = 3000;
app.use(express.static('./'));
async function testGoogleTextToSpeech(audioBuffer) {
const speech = require('@google-cloud/speech');
const client = new speech.SpeechClient( { keyFilename: "APIKey.json"});
const audio = {
content: audioBuffer.toString('base64'),
};
const config = {
languageCode: 'en-US',
};
const request = {
audio: audio,
config: config,
};
const [response] = await client.recognize(request);
const transcription = response.results
.map(result => result.alternatives[0].transcript)
.join('\n');
return transcription;
}
app.post('/upload_sound', upload.any(), async (req, res) => {
console.log("Getting text transcription..");
let transcription = await testGoogleTextToSpeech(req.files[0].buffer);
console.log("Text transcription: " + transcription);
res.status(200).send(transcription);
});
app.listen(port, () => {
console.log(`Express server listening on port: ${port}...`);
});
的index.html
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Speech to text test</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" type="text/css" href="https://bootswatch.com/4/cerulean/bootstrap.min.css">
</head>
<body style="padding:50px;">
<h1>Speech to text test</h1>
<div id="controls">
<button id="recordButton">Record</button>
<button id="transcribeButton" disabled>Get transcription</button>
</div>
<div id="output"></div>
<script src="https://cdn.rawgit.com/mattdiamond/Recorderjs/08e7abd9/dist/recorder.js"></script>
<script src="client-app.js"></script>
</body>
</html>
客戶app.js
let rec = null;
let audioStream = null;
const recordButton = document.getElementById("recordButton");
const transcribeButton = document.getElementById("transcribeButton");
recordButton.addEventListener("click", startRecording);
transcribeButton.addEventListener("click", transcribeText);
function startRecording() {
let constraints = { audio: true, video:false }
recordButton.disabled = true;
transcribeButton.disabled = false;
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
const audioContext = new window.AudioContext();
audioStream = stream;
const input = audioContext.createMediaStreamSource(stream);
rec = new Recorder(input, { numChannels:1 })
rec.record()
}).catch(function(err) {
recordButton.disabled = false;
transcribeButton.disabled = true;
});
}
function transcribeText() {
transcribeButton.disabled = true;
recordButton.disabled = false;
rec.stop();
audioStream.getAudioTracks()[0].stop();
rec.exportWAV(uploadSoundData);
}
function uploadSoundData(blob) {
let filename = new Date().toISOString();
let xhr = new XMLHttpRequest();
xhr.onload = function(e) {
if(this.readyState === 4) {
document.getElementById("output").innerHTML = `<br><br><strong>Result: </strong>${e.target.responseText}`
}
};
let formData = new FormData();
formData.append("audio_data", blob, filename);
xhr.open("POST", "/upload_sound", true);
xhr.send(formData);
}
@ terry-lennox非常感謝你。 對於明確答案。
但我使用React作為我的前端,所以得到了一個名為recorder-js的npm包
該代碼僅供參考,以便將來看到這篇文章。
import Recorder from 'recorder-js';
import micGrey from './mic-grey.svg';
import micWhite from './mic-white.svg';
import './App.css';
var recorder = null;
var audioStream = null;
class App extends Component {
constructor(props) {
super(props);
this.mic = React.createRef();
this.accessMic = this.accessMic.bind(this);
this.handleClick = this.handleClick.bind(this);
this.handleClick = this.handleClick.bind(this);
this.handleSuccess = this.handleSuccess.bind(this);
this.stopAccessingMic = this.stopAccessingMic.bind(this);
this.getTextFromGoogle = this.getTextFromGoogle.bind(this);
this.state = {
isMicActive: false
};
}
accessMic() {
const audioContext = new (window.AudioContext ||
window.webkitAudioContext)();
recorder = new Recorder(audioContext);
navigator.mediaDevices
.getUserMedia({ audio: true })
.then(this.handleSuccess)
.catch(err => console.log('Uh oh... unable to get stream...', err));
}
handleSuccess(stream) {
audioStream = stream;
recorder.init(stream);
recorder.start();
}
getTextFromGoogle(blob) {
let filename = new Date().toISOString();
let xhr = new XMLHttpRequest();
xhr.onload = function(e) {
if (this.readyState === 4) {
console.log(e.target.responseText);
}
};
let formData = new FormData();
formData.append('audio_data', blob, filename);
xhr.open('POST', 'http://localhost:3000/', true);
xhr.send(formData);
}
handleClick() {
const isMicActive = this.state.isMicActive;
this.setState({
isMicActive: !isMicActive
});
if (!isMicActive) {
this.checkPermissions();
this.accessMic();
} else {
this.stopAccessingMic();
}
}
stopAccessingMic() {
audioStream && audioStream.getTracks()[0].stop();
recorder.stop().then(({ blob, buffer }) => {
this.getTextFromGoogle(blob);
});
}
checkPermissions() {
navigator.permissions
.query({ name: 'microphone' })
.then(permissionObj => {
console.log('Permission status - ', permissionObj.state);
})
.catch(error => {
console.log('Permission status - Got error :', error);
});
}
render() {
return (
<div className='App'>
<div
id='mic'
ref={this.mic}
onClick={this.handleClick}
className={
this.state.isMicActive ? 'mic-btn mic-btn-active' : 'mic-btn'
}
>
<img src={this.state.isMicActive ? micWhite : micGrey} alt='mic' />
</div>
</div>
);
}
}
export default App;
和參考的后端代碼,我面臨一個小的變化,錯誤是必須使用單通道(單聲道)音頻來修復這個我引用鏈接 , 鏈接 。 需要在config中添加audioChannelCount: 2
。
var router = express.Router();
const multer = require('multer');
const fs = require('fs');
const upload = multer();
process.env.GOOGLE_APPLICATION_CREDENTIALS =
'C:/Users/user/Desktop/Speech-to-Text-e851cb3889e5.json';
/* GET home page. */
router.post('/', upload.any(), async (req, res, next) => {
console.log('Getting text transcription..');
try {
let transcription = await testGoogleTextToSpeech(req.files[0].buffer);
console.log('Text transcription: ' + transcription);
res.status(200).send(transcription);
} catch (error) {
console.log(error);
res.status(400).send(error);
}
});
async function testGoogleTextToSpeech(audioBuffer) {
const speech = require('@google-cloud/speech');
const client = new speech.SpeechClient();
const audio = {
content: audioBuffer.toString('base64')
};
const config = {
languageCode: 'en-US',
audioChannelCount: 2
};
const request = {
audio: audio,
config: config
};
try {
const [response] = await client.recognize(request);
const transcription = response.results
.map(result => result.alternatives[0].transcript)
.join('\n');
return transcription;
} catch (error) {
return error;
}
}
module.exports = router;
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.