[英]Wrong output language Azure Text-To-Speech
我使用 azure 的 javascript 语音 SDK 并修改了speechSynthesisLanguage
属性以更改 output 语言,但它不起作用。 output 语言还是英文。 我遵循了文档并使用了他们的 repo上可用的演示代码。
(function() {
// <code>
"use strict";
// pull in the required packages.
var sdk = require("microsoft-cognitiveservices-speech-sdk");
var readline = require("readline");
const { exec } = require("child_process");
// replace with your own subscription key,
// service region (e.g., "westus"), and
// the name of the file you save the synthesized audio.
var subscriptionKey = "xxx";
var serviceRegion = "francecentral"; // e.g., "westus"
var filename = "test1.wav";
// we are done with the setup
// now create the audio-config pointing to our stream and
// the speech config specifying the language.
var audioConfig = sdk.AudioConfig.fromAudioFileOutput(filename);
var speechConfig = sdk.SpeechConfig.fromSubscription(subscriptionKey, serviceRegion);
speechConfig.setProperty('speechSynthesisLanguage', 'fr-FR')
speechConfig.setProperty('SpeechServiceConnection_RecoLanguage', 'fr-FR')
speechConfig.setProperty('speechSynthesisVoiceName', 'fr-FR-AlainNeural')
console.log(speechConfig)
//create the speech synthesizer.
var synthesizer = new sdk.SpeechSynthesizer(speechConfig, audioConfig);
var rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
rl.question("Test à saisir :\n", function (text) {
rl.close();
// start the synthesizer and wait for a result.
synthesizer.speakTextAsync(text,
function (result) {
if (result.reason === sdk.ResultReason.SynthesizingAudioCompleted) {
console.log("synthesis finished.");
} else {
console.error("Speech synthesis canceled, " + result.errorDetails +
"\nDid you update the subscription info?");
}
synthesizer.close();
synthesizer = undefined;
},
function (err) {
console.trace("err - " + err);
synthesizer.close();
synthesizer = undefined;
});
console.log("Now synthesizing to: " + filename);
exec(`vlc ${filename} vlc://quit`);
});
// </code>
}());
根据文档,output 语言已正确设置为fr-FR
。
console.log(speechConfig.getProperty('speechSynthesisLanguage')) //fr-FR
我试图通过德语更改语言或使用setServiceProperty(string, string, ServicePropertyChannel)
方法,但它没有用。
你有解决方案吗? 谢谢
我假设您想将法语文本转换为法语语音。 现在要做到这一点,我们需要设置两个设置,即speechSynthesisLanguage
和speechSynthesisVoiceName
这些设置将代表您传递的语言的名称和语音的名称。
在这里,我创建了一个 function,它将要求以法语作为文本并将数据添加到YourAudioFile.wav
文件中。
代码:
(function() {
"use strict";
var sdk = require("microsoft-cognitiveservices-speech-sdk");
var readline = require("readline");
var audioFile = "YourAudioFile.wav";
const speechConfig = sdk.SpeechConfig.fromSubscription(process.env.SPEECH_KEY, process.env.SPEECH_REGION);
const audioConfig = sdk.AudioConfig.fromAudioFileOutput(audioFile);
// the speech settings for french
speechConfig.speechSynthesisLanguage = "fr-FR";
speechConfig.speechSynthesisVoiceName = "fr-FR-BrigitteNeural";
var synthesizer = new sdk.SpeechSynthesizer(speechConfig, audioConfig);
var rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
rl.question("Enter some text that you want to speak >\n> ",
function (text)
{
rl.close();
synthesizer.speakTextAsync(text,
function (result)
{
if (result.reason === sdk.ResultReason.SynthesizingAudioCompleted)
{
console.log("synthesis finished.");
}
else
{
console.error("Speech synthesis canceled, " + result.errorDetails +"\nDid you set the speech resource key and region values?");
}
synthesizer.close();
synthesizer = null;
},
function (err)
{
console.trace("err - " + err);
synthesizer.close();
synthesizer = null;
}
);
console.log("Now synthesizing to: " + audioFile);
}
);
}());
上面的大部分代码来自这个关于文本到语音的MSDOC 。
声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.