简体   繁体   中英

IBM Watson Speech to text handling of large files

I have been trying to use the BlueMix SpeechToText Java library and in particular the SpeechToText class within com.ibm.watson.developer_cloud.speech_to_text.v1.

I have long wav files that I would like to convert to text. The files are ~70MB. The goal is to use the java API ( http://www.ibm.com/smarterplanet/us/en/ibmwatson/developercloud/speech-to-text/api/v1/?java#recognize ) to recognize the text. I realize that I will need to check the status of the call every 30 seconds since after the translation is over, I only have 30 seconds to retrieve the final results.

In order to do so while using the RESTful API, I need to create a session and then bind my search engine against the said session so that I can query the status of the job running on the session.

I have tried to create a session but the session is never available. I have verified that it seems to work on the provided webapp ( https://stream.watsonplatform.net/speech-to-text/api/v1/sessions?Method=GET ).

Further, I have tried writing my own client where I attempted to set the cookie retrieved from the session create and that did not work either.

I also tried to connect through secure websockets but was unable to achieve successful connection.

Below is some sample code that I have been using.

Any ideas?

public class Speech2Text extends WatsonService {
private static final Logger logger = LoggerFactory           .getLogger(Speech2Text.class);
public static void main(String[] args) throws FileNotFoundException,           UnsupportedEncodingException, InterruptedException {
    Speech2Text s2t = new Speech2Text();
    s2t.httpClient();
    // try {
    // s2t.webSocketClient();
    // } catch (URISyntaxException e) {
    // TODO Auto-generated catch block
    // e.printStackTrace();
    // } catch (IOException e) {
    // TODO Auto-generated catch block
    // e.printStackTrace();
    // }
}
public void httpClient() throws FileNotFoundException,UnsupportedEncodingException {
    logger.info("Running http client");
    final Stopwatch stopwatch = Stopwatch.createStarted();
    SpeechToText service = new SpeechToText();
    service.setUsernameAndPassword("XXXXXX","XXXXX");
    List<SpeechModel> models = service.getModels();
    for (SpeechModel model : models) {
        logger.info(model.getName());
    }
    SpeechSession session = service.createSession("en-US_NarrowbandModel");
    System.out.println(session.toString());
    SessionStatus status = service.getRecognizeStatus(session);
    logger.info(status.getModel());
    logger.info(service.getEndPoint());
    File audio = new File("/home/baaron/watson-bluemix/answer_06.wav");
    Map params = new HashMap();
    params.put("audio", audio);
    params.put("content_type", "audio/wav");
    params.put("continuous", "true");
    params.put("session_id", session.getSessionId());
    logger.info(service.getEndPoint());
    SpeechResults transcript = service.recognize(params);
    PrintWriter writer = new PrintWriter("/home/baaron/watson-bluemix/PCCJPApart1test.transcript",   "UTF-8");
    writer.println(transcript.toString());
    SessionStatus status1 = service.getRecognizeStatus(session.getSessionId());
    System.out.println(status1);
    service.deleteSession(session.getSessionId());
    writer.close();
    stopwatch.stop();
    logger.info("Processing took: " + stopwatch + ".");
}
public void webSocketClient() throws URISyntaxException, IOException,
        InterruptedException {
    logger.info("Running web socket client");
    String encoding = new String(Base64.encodeBase64String("XXXXXXXXXX".getBytes()));
    HttpPost httppost = new HttpPost(                "https://stream.watsonplatform.net/authorization/api/v1/token?url=https://stream.watsonplatform.net/speech-to-text/api");
    httppost.setHeader("Authorization", "Basic " + encoding);
    System.out.println("executing request " + httppost.getRequestLine());
    DefaultHttpClient httpclient = new DefaultHttpClient();
    HttpResponse response = httpclient.execute(httppost);
    HttpEntity entity = response.getEntity();
    logger.info(response.getStatusLine().getReasonPhrase());
    WebSocketImpl.DEBUG = true;
    BufferedReader reader = new BufferedReader(new InputStreamReader(                entity.getContent()));
    StringBuilder out = new StringBuilder();
    String line;
    while ((line = reader.readLine()) != null) {
        out.append(line);
    }
    String token = out.toString();
    final WebSocketClient client = new WebSocketClient(
            new URI("wss://stream.watsonplatform.net/speech-to-text-beta/api/v1/recognize?watson-token=" + token)) {
        @Override
        public void onMessage(String message) {
            JSONObject obj = new JSONObject(message);
            // String channel = obj.getString("channel");
        }
        @Override
        public void onOpen(ServerHandshake handshake) {
            System.out.println("opened connection");
        }
        @Override
        public void onClose(int code, String reason, boolean remote) {
            System.out.println("closed connection");
        }
        @Override
        public void onError(Exception ex) {
            ex.printStackTrace();
        }
    };
    // open websocket
    SSLContext sslContext = null;
    try {
        sslContext = SSLContext.getInstance("TLS");
        sslContext.init(null, null, null); 
    } catch (NoSuchAlgorithmException e) {
        e.printStackTrace();
    } catch (KeyManagementException e) {
        e.printStackTrace();
    }
    client.setWebSocketFactory(new DefaultSSLWebSocketClientFactory(
            sslContext));
    logger.info("CONNECTED: " + client.connectBlocking());
    JSONObject obj = new JSONObject();
    obj.put("action", "start");
    obj.put("content-type", "audio/wav");
    client.send(obj.toString());
    logger.info("Done");
  }
} 

Doing a GET on https://stream.watsonplatform.net/speech-to-text/api/v1/sessions will not list your sessions, even if they were created.

The way to check if you have a session is to do a GET on https://stream.watsonplatform.net/speech-to-text/api/v1/sessions/yourSessionId

If the session is there, you'll get a 200 response, otherwise a 404. Remember to enable cookies for this.

If what you want is to transcribe audio files you can do:

SpeechToText service = new SpeechToText();
service.setUsernameAndPassword("{username"}, "{password}");

RecognizeOptions options = new RecognizeOptions.Builder()
  .contentType("audio/wav")
  .continuous(true)
  .model("en-US_NarrowbandModel")
  .inactivityTimeout(-1) // Seconds after which the connection is closed if no audio is detected
  .build();

String[] files = {"file1.wav", "file2.wav"};
for (String file : files) {
  SpeechResults results = service.recognize(new File(file), options).execute();
  System.out.println(results); // print results(you could write them to a file)
}

Make sure you use the newest version of the Java SDK.

Maven

<dependency>
  <groupId>com.ibm.watson.developer_cloud</groupId>
  <artifactId>java-sdk</artifactId>
  <version>3.8.0</version>
</dependency>

Gradle

compile 'com.ibm.watson.developer_cloud:java-sdk:3.8.0'

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM