繁体   English   中英

Google语音转换为C#文本API

[英]Google speech to text API in C#

当flac文件通过使用Windows的录音机手动录制并使用软件转换器进行转换时,我的程序会从Google得到正确的响应。
但是,当我使用程序记录的文件时,我从Google那里获得了“ {{result”:[]}”。 我该怎么办? 这是我的代码:
发送方 :

    private static void CopyStream(FileStream fileStream, Stream requestStream)
    {
        var buffer = new byte[32768];
        int read;
        while ((read = fileStream.Read(buffer, 0, buffer.Length)) > 0)
        {
            requestStream.Write(buffer, 0, read);
        }
    }

    private static void ConfigureRequest(HttpWebRequest request)
    {
        request.KeepAlive = true;
        request.SendChunked = true;
        request.ContentType = "audio/x-flac; rate=44100";
        request.UserAgent =
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.121 Safari/535.2";
        request.Headers.Set(HttpRequestHeader.AcceptEncoding, "gzip,deflate,sdch");
        request.Headers.Set(HttpRequestHeader.AcceptLanguage, "en-GB,en-US;q=0.8,en;q=0.6");
        request.Headers.Set(HttpRequestHeader.AcceptCharset, "ISO-8859-1,utf-8;q=0.7,*;q=0.3");
        request.Method = "POST";
    }
    using (var fileStream = new FileStream(@"C:\Users\Ahmad Mustofa\Documents\Visual Studio 2010\Projects\FP\FP\bin\Debug\voice.flac", FileMode.Open))
    {
        const string requestUrl = "https://www.google.com/speech-api/v2/recognize?output=json&lang=ar-sa&key=AIzaSyBJ6VJ326Rpb23msih2wGhXENEwU1TF1PA&client=chromium&maxresults=1&pfilter=2";
        var request = (HttpWebRequest)WebRequest.Create(requestUrl);
        ConfigureRequest(request);
        var requestStream = request.GetRequestStream();
        CopyStream(fileStream, requestStream);

        using (var response = request.GetResponse())
        {
            using (var responseStream = response.GetResponseStream())
            {
                using (var zippedStream = new GZipStream(responseStream, CompressionMode.Decompress))
                {
                     using (var sr = new StreamReader(zippedStream))
                     {
                          var res = sr.ReadToEnd();
                          state.Text = res;
                     }
                }
            }
        }
    }

WAV记录器:

        private void sourceStream_DataAvailable(object sender, NAudio.Wave.WaveInEventArgs e)
        {
             if (waveWriter == null) return;

             waveWriter.WriteData(e.Buffer, 0, e.BytesRecorded);
             waveWriter.Flush();
        }
        fileName = "C:\\Users\\Ahmad Mustofa\\Documents\\Visual Studio 2010\\Projects\\FP\\FP\\bin\\debug\\voice.wav";
        int deviceNumber = hardware.SelectedItems[0].Index;
        try
        {
            sourceStream = new NAudio.Wave.WaveIn();
            sourceStream.DeviceNumber = deviceNumber;
            sourceStream.WaveFormat = new NAudio.Wave.WaveFormat(44100, NAudio.Wave.WaveIn.GetCapabilities(deviceNumber).Channels);

            sourceStream.DataAvailable += new EventHandler<NAudio.Wave.WaveInEventArgs>(sourceStream_DataAvailable);
            waveWriter = new NAudio.Wave.WaveFileWriter(fileName, sourceStream.WaveFormat);

            sourceStream.StartRecording();
        }
        catch (Exception ex)
        {
            state.Text = "disini" + ex.Message;
        }

flac转换器:

        string inputFile = Path.Combine("wav ", input);
        string outputFile = Path.Combine("flac", Path.ChangeExtension(input, ".flac"));

        if (!File.Exists(inputFile))
            throw new ApplicationException("Input file " + inputFile + " cannot be found!");

        WavReader wav = new WavReader(inputFile);

        using (var flacStream = File.Create(outputFile))
        {
            FlacWriter flac = new FlacWriter(flacStream, wav.BitDepth, wav.Channels, wav.SampleRate);
            // Buffer for 1 second's worth of audio data
            byte[] buffer = new byte[wav.Bitrate / 8];
            int bytesRead;
            do
            {
                bytesRead = wav.InputStream.Read(buffer, 0, buffer.Length);
                flac.Convert(buffer, 0, bytesRead);
            } while (bytesRead > 0);
            flac.Dispose();
            flac = null;
        }

我也遇到了同样的问题,但是想出了一个整洁的解决方案。 我使用了Fiddler( http://www.telerik.com/fiddler/ )来了解Chrome如何进行语音识别,然后创建了一些代码来模拟chrome发送请求。 这种方法使用不同的URI,并且还有一个16个字符的值,称为pair,每个请求的值都不同。 我使用一个简单的随机值生成器函数为请求创建一个,并且还将输出值更改为“ json”。

注意 :结果有时可能像上面的情况一样为空,但是响应中还包含另一个json对象。

    private void GoogleSpeechToText()
    {
        string uri = "https://www.google.com/speech-api/full-duplex/v1/up?output=json&key=AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw&pair=" + GenerateUnique(16) + "&lang=en-US&pFilter=2&maxAlternatives=10&client=chromium";
        HttpWebRequest request = (HttpWebRequest)WebRequest.Create(uri);
        request.Timeout = 10000;
        request.Method = "POST";
        request.Host = "www.google.com";            
        request.KeepAlive = true;
        request.SendChunked = true;
        request.ContentType = "audio/x-flac; rate=16000";
        request.Headers.Set(HttpRequestHeader.AcceptLanguage, "en-GB,en-US;q=0.8,en;q=0.6");
        request.UserAgent = "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.143 Safari/537.36";

        string path = @"C:\TestFolder\test_audio.flac";     
        FileInfo fInfo = new FileInfo(path);
        var numBytes = fInfo.Length;
        byte[] data;

        using (FileStream fStream = new FileStream(path, FileMode.Open, FileAccess.Read))
        {
            data = new Byte[numBytes];
            fStream.Read(data, 0, (int) numBytes);
            fStream.Close();
        }

        using (Stream reqStream = request.GetRequestStream())
            reqStream.Write(data, 0, data.Length);

        try
        {
            WebResponse response = request.GetResponse();
            Stream respStream = response.GetResponseStream();

            if(response.ContentType == "application/json; charset=utf-8")
            {                    
                using (var sr = new StreamReader(respStream))
                {
                    var res = sr.ReadToEnd();
                    textBox1.Text = res;                        
                }
            }
        }
        catch (Exception ex) { MessageBox.Show(ex.Message, "Error", MessageBoxButtons.OK); }            
    }

    private string GenerateUnique(int length)
    {
        string[] LETTERS = new string[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" };
        string[] DIGITS = new string[] { "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" };
        string buffer = "";
        Random random = new Random();

        for(int i = 0; i < length; i++)
        {                
            int rnd = random.Next(2);
            if (rnd == 1)
                buffer += LETTERS[random.Next(LETTERS.Length)];
            else
                buffer += DIGITS[random.Next(DIGITS.Length)];
        }
        return buffer;
    }

Google云端API网址中明确提到了该功能,即

https://cloud.google.com/speech-to-text/docs/async-recognize#speech-async-recognize-gcs-protocol

如果操作尚未完成,则可以通过重复发出GET请求直到响应的done属性为true来轮询端点。

        {
      "name": "operationname here",
      "metadata": {
        "@type": "type.googleapis.com/google.cloud.speech.v1.LongRunningRecognizeMetadata",
        "progressPercent": 0,
        "startTime": "2018-12-18T10:56:09.425584Z",
        "lastUpdateTime": "2018-12-18T11:10:27.147310Z"
      },
      "done": true,
    }

通过重复发出GET请求来轮询端点,直到响应的done属性为true为止;或者您可以检查“ progressPercent”:0,直到其值变为100。一旦其100%then done属性变为true。

我在代码中使用操作名进行了相同的操作,以供参考

public async Task<string> TranscribeLongMediaFile(string operationName)
    {
        string bearerToken = GetOAuthToken();
        var baseUrl = new Uri(googleSpeechBaseUrl + operationName);
        string resultContent = string.Empty;
        using (var client = new HttpClient())
        {
            client.DefaultRequestHeaders.Add(HttpRequestHeader.Authorization.ToString(), "Bearer " + bearerToken);
            client.DefaultRequestHeaders.Add(HttpRequestHeader.ContentType.ToString(), "application/json; charset=utf-8");

            client.Timeout = TimeSpan.FromMilliseconds(Timeout.Infinite);

            int currentPercentage = 0;
            bool responseStatus = false;
            while (!responseStatus)
            {
                responseStatus = false;
                // Send request
                using (var result = await client.GetAsync(baseUrl))
                {
                    resultContent = await result.Content.ReadAsStringAsync();

                    ResponseObject responseObject = JsonConvert.DeserializeObject<ResponseObject>(resultContent);
                    currentPercentage = responseObject.metadata.progressPercent;
                    responseStatus = (responseObject.done && currentPercentage == 100);

                    // Delay the request based on percentage value to repeatedly making the GET request until the done property of the response is true.
                    await Task.Delay(CalculateDealy(currentPercentage));
                }
            }
        };
        return resultContent;
    }

为了延迟获取请求:

/// <summary>
    /// Delay the request to number of milliseconds
    /// </summary>
    /// <param name="currentPercentage"></param>
    /// <returns></returns>
    private int CalculateDealy(int currentPercentage)
    {
        int x = currentPercentage / 10;
        return (10 - x) * 1500;
    }

获取身份验证令牌:

/// <summary>
    /// Get OAuth token
    /// </summary>
    /// <returns></returns>
    public string GetOAuthToken()
    {
        return googleCredential.UnderlyingCredential.GetAccessTokenForRequestAsync("https://accounts.google.com/o/oauth2/v2/auth", CancellationToken.None).Result;
    }

最后,您将得到如下结果:

    {
  "name": "operationname here",
  "metadata": {
    "@type": "type.googleapis.com/google.cloud.speech.v1.LongRunningRecognizeMetadata",
    "progressPercent": 100,
    "startTime": "2018-12-18T10:56:09.425584Z",
    "lastUpdateTime": "2018-12-18T11:10:27.147310Z"
  },
  "done": true,
  "response": {
    "@type": "type.googleapis.com/google.cloud.speech.v1.LongRunningRecognizeResponse",
    "results": [
      {
        "alternatives": [
          {
            "transcript": "okay let's get started",
            "confidence": 0.97442055
          }
        ]
      }, and so on .....

需要的东西:

  1. api-key.json文件
  2. 安装软件包Google.Apis.Auth.OAuth2以授权HTTP Web请求

谢谢

用它来获得子宫(短语)和置信度(%)

     string toParse=(VALUE RETURNED BY GOOGLE)
        var trsc1 = @"transcript"":""";
        var trsc2 = @""",""confidence"":";
        var trsc3 = @"}],""final"":";
        var start = toParse.IndexOf(trsc1) + trsc1.Length;
        var end = toParse.IndexOf(trsc2);
        var end2 = toParse.IndexOf(trsc3);
        var vv1 = toParse.Substring(start, end - start);
        var vv2 = toParse.Substring(end + trsc2.Length, end2 - (end + trsc2.Length));
        vv2 = vv2.Trim().Replace(".", ",");

        float confidence = (float)Math.Round(double.Parse(vv2), 2);
        string utterance = vv1;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.IO;
using System.Net;
using Newtonsoft.Json;

namespace google_speech_api_trial4
{
    class Program
    {
        public static string ACCESS_GOOGLE_SPEECH_KEY =     "AIzaSyDC8nM1S0cLpXvRc8TXrDoey-tqQsoBGnM";

    static void Main(string[] args)
    {
        GoogleSpeechRequest();
        Console.ReadLine();

    }
             public static void GoogleSpeechRequest()
    {
            FileStream fileStream = File.OpenRead("my.flac");
        MemoryStream memoryStream = new MemoryStream();
        memoryStream.SetLength(fileStream.Length);
        fileStream.Read(memoryStream.GetBuffer(), 0, (int)fileStream.Length);
        byte[] BA_AudioFile = memoryStream.GetBuffer();
        HttpWebRequest _HWR_SpeechToText = null;
        _HWR_SpeechToText = (HttpWebRequest)HttpWebRequest.Create("https://www.google.com/speech-api/v2/recognize?output=json&lang=en-us&key=" + ACCESS_GOOGLE_SPEECH_KEY);
        _HWR_SpeechToText.Credentials = CredentialCache.DefaultCredentials;
        _HWR_SpeechToText.Method = "POST";
        _HWR_SpeechToText.ContentType = "audio/x-flac; rate=44100";
        _HWR_SpeechToText.ContentLength = BA_AudioFile.Length;
        Stream stream = _HWR_SpeechToText.GetRequestStream();
        stream.Write(BA_AudioFile, 0, BA_AudioFile.Length);
        stream.Close();
        HttpWebResponse HWR_Response = (HttpWebResponse)_HWR_SpeechToText.GetResponse();

        StreamReader SR_Response = new StreamReader(HWR_Response.GetResponseStream());
        string responseFromServer = (SR_Response.ReadToEnd());

        String[] jsons = responseFromServer.Split('\n');
        String text = "";
        foreach (String j in jsons)
        {
            dynamic jsonObject = JsonConvert.DeserializeObject(j);
            if (jsonObject == null || jsonObject.result.Count <= 0)
            {
                continue;
            }
            text = jsonObject.result[0].alternative[0].transcript;
        }
        Console.WriteLine(text);
    }
    }
}

我找了3个小时,在打印文本时,我一直保持{“ result:[]”}。 我以为那不是转换音频。 但是,Json obj有两行。 第二行包含音频文本。 要打印出来,我们需要解析它。 哦,我在导入,引用和使用语句方面也遇到很多问题。 但最终这段代码行得通。

暂无
暂无

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM