简体   繁体   中英

firebase.firestore().onSnapshot occurs two times

I use Firebase Hosting and would like to realize this web app. (I use Windows 10, Windows Subsystems for Linux, Debian 10.3 and Google Chrome browser. )

  1. push buttons and record audio (index.html + main.js)
  2. upload the audio file to Cloud Storage (main.js)
  3. transcribe the audio file using Cloud Speech to text API (index.js: cloud function)
  4. write the transcription on Cloud Firestore (index.js: cloud function)
  5. get transcription data from Firestore using .onSnapshot . put that data in a textarea (main.js)

I passed step 1~4, but have a difficult in step 5. When I access the web app, it shows transcription data before I record audio.

This data was made the last time I accessed the web app. 在此处输入图像描述

When I go through step1 to step5, I get another textarea which is what I want. 在此处输入图像描述 Could you tell me how can I avoid the first textarea? Thank you in advance.

This is browser's console.

在此处输入图像描述

This is main.js(client side)

const startRecording = document.getElementById('start-recording');
const stopRecording = document.getElementById('stop-recording');
let recordAudio;
startRecording.disabled = false;

// on start button handler
startRecording.onclick = function() {
    // recording started
    startRecording.disabled = true;

    // make use of HTML 5/WebRTC, JavaScript getUserMedia()
    // to capture the browser microphone stream
    navigator.getUserMedia({
        audio: true
    }, function(stream) {
            recordAudio = RecordRTC(stream, {
            type: 'audio',
            mimeType: 'audio/webm',
            sampleRate: 44100, // this sampleRate should be the same in your server code

            // MediaStreamRecorder, StereoAudioRecorder, WebAssemblyRecorder
            // CanvasRecorder, GifRecorder, WhammyRecorder
            recorderType: StereoAudioRecorder,

            numberOfAudioChannels: 1,

            // get intervals based blobs
            // value in milliseconds
            // as you might not want to make detect calls every seconds
            timeSlice: 4000,

            // only for audio track
            // audioBitsPerSecond: 128000,

            // used by StereoAudioRecorder
            // the range 22050 to 96000.
            // let us force 16khz recording:
            desiredSampRate: 16000
        });

        recordAudio.startRecording();
        stopRecording.disabled = false;
    }, function(error) {
        console.error(JSON.stringify(error));
    });
};

// on stop button handler
stopRecording.onclick = function() {
    // recording stopped
    startRecording.disabled = false;
    stopRecording.disabled = true;

    // stop audio recorder
    recordAudio.stopRecording(function() {
      var blob = recordAudio.getBlob()

      // Create a root reference
      var storageRef = firebase.storage().ref();

      // Create the file metadata
      var metadata = {
        contentType: 'audio/wav'
      };

      // Upload file and metadata to the object 'images/mountains.jpg'
      var uploadTask = storageRef.child('audio/speech3.wav').put(blob, metadata);

      // Listen for state changes, errors, and completion of the upload.
      uploadTask.on(firebase.storage.TaskEvent.STATE_CHANGED, // or 'state_changed'
        function(snapshot) {
          // Get task progress, including the number of bytes uploaded and the total number of bytes to be uploaded
          var progress = (snapshot.bytesTransferred / snapshot.totalBytes) * 100;
          console.log('Upload is ' + progress + '% done');
          switch (snapshot.state) {
            case firebase.storage.TaskState.PAUSED: // or 'paused'
              console.log('Upload is paused');
              break;
            case firebase.storage.TaskState.RUNNING: // or 'running'
              console.log('Upload is running');
              break;
          }
        }, function(error) {

        // A full list of error codes is available at
        // https://firebase.google.com/docs/storage/web/handle-errors
        switch (error.code) {
          case 'storage/unauthorized':
            // User doesn't have permission to access the object
            break;

          case 'storage/canceled':
            // User canceled the upload
            break;

          case 'storage/unknown':
            // Unknown error occurred, inspect error.serverResponse
            break;
        }
      }, function() {
        // Upload completed successfully, now we can get the download URL
        uploadTask.snapshot.ref.getDownloadURL().then(function(downloadURL) {
          console.log('File available at', downloadURL);
        });
      });                 
    });
  };

firebase.firestore().collection('script').orderBy("timestamp", "desc").limit(1)
.onSnapshot(function(querySnapshot) {
  querySnapshot.forEach(function(doc) {
    console.log(doc.data().text);

    const textarea = document.createElement('textarea');
    textarea.value = doc.data().text;
    textarea.rows = '5';
    textarea.cols = '40';
    const parent = document.getElementById('textbox');
    parent.appendChild(textarea);
  });
});

This is index.js (cloud functions)

// Import the Firebase SDK for Google Cloud Functions.
const functions = require('firebase-functions');
const admin = require('firebase-admin');
admin.initializeApp();
const speech = require('@google-cloud/speech');

exports.transcribeAudio = functions.runWith({memory: '2GB'}).storage.object().onFinalize(
    async (object) => {
        // Creates a client
        const client = new speech.SpeechClient();

        const gcsUri = `gs://${object.bucket}/${object.name}`;
        const encoding = 'LINEAR16';
        const sampleRateHertz = 16000;
        const languageCode = 'en-US';

        const config = {
        encoding: encoding,
        sampleRateHertz: sampleRateHertz,
        languageCode: languageCode,
        enableAutomaticPunctuation: true
        };
        const audio = {
        uri: gcsUri,
        };

        const request = {
        config: config,
        audio: audio,
        };

        // Detects speech in the audio file. This creates a recognition job that you
        // can wait for now, or get its result later.
        const [operation] = await client.longRunningRecognize(request);
        // Get a Promise representation of the final result of the job
        const [response] = await operation.promise();
        const transcription = response.results
            .map(result => result.alternatives[0].transcript)
            .join('\n');
        console.log(`Transcription: ${transcription}`);

        admin.firestore().collection('script').add({
            text: transcription,
            timestamp: admin.firestore.FieldValue.serverTimestamp()
        });

    });

This is index.html

<!doctype html>
<html lang="ja">
   <head>
      <meta name="robots" content="noindex">

      <title>音読アプリ アドバンス</title>
      <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css" integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" crossorigin="anonymous">
      <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
      <link href="https://fonts.googleapis.com/css?family=M+PLUS+Rounded+1c&display=swap" rel="stylesheet">
      <meta http-equiv="content-type" content="text/html; charset=utf-8" />

      <script src="https://www.WebRTC-Experiment.com/RecordRTC.js"></script>
   </head>

   <body>
      <div>
        <button id="start-recording" disabled>Start Recording</button>
        <button id="stop-recording" disabled>Stop Recording</button>
      </div>
      <div id="textbox"></div>

      <script src="https://code.jquery.com/jquery-3.4.1.slim.min.js" integrity="sha384-J6qa4849blE2+poT4WnyKhv5vZF5SrPo0iEjwBvKU7imGFAV0wwj1yYfoRSJoZ+n" crossorigin="anonymous"></script>
      <script src="https://cdn.jsdelivr.net/npm/popper.js@1.16.0/dist/umd/popper.min.js" integrity="sha384-Q6E9RHvbIyZFJoft+2mJbHaEWldlvI9IOYy5n3zV9zzTtmI3UksdQRVvoxMfooAo" crossorigin="anonymous"></script>
      <script src="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/js/bootstrap.min.js" integrity="sha384-wfSDF2E50Y2D1uUdj0O3uMBJnjuUD4Ih7YwaYd1iqfktj0Uod8GCExl3Og8ifwB6" crossorigin="anonymous"></script>

      <!-- Import and configure the Firebase SDK -->
      <!-- These scripts are made available when the app is served or deployed on Firebase Hosting -->
      <!-- If you do not want to serve/host your project using Firebase Hosting see https://firebase.google.com/docs/web/setup -->
      <script src="/__/firebase/7.14.3/firebase-app.js"></script>
      <script src="/__/firebase/7.14.3/firebase-auth.js"></script>
      <script src="/__/firebase/7.14.3/firebase-storage.js"></script>
      <script src="/__/firebase/7.14.3/firebase-messaging.js"></script>
      <script src="/__/firebase/7.14.3/firebase-firestore.js"></script>
      <script src="/__/firebase/7.14.3/firebase-performance.js"></script>
      <script src="/__/firebase/7.14.3/firebase-functions.js"></script>
      <script src="/__/firebase/init.js"></script>

      <script src="scripts/main.js"></script>
   </body>
</html>

This is because, as explained in the doc , when you set a listener, there is always an initial call .

In your case, this initial call returns the last document that was created in your collection (because your query is defined with orderBy("timestamp", "desc").limit(1) ).

You could maintain a counter that indicates if it is the initial call or a subsequent one. Something along the following lines:

  let nbrCalls = 0;
  firebase
    .firestore()
    .collection('script')
    .orderBy('timestamp', 'desc')
    .limit(1)
    .onSnapshot(function (querySnapshot) {
      nbrCalls++;
      if (nbrCalls > 1) {
        querySnapshot.forEach(function (doc) {
          console.log(doc.data().text);
        });
      }
    });

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM