How can I extract the preceding audio (from microphone) as a buffer when silence is detected (JS)?

前端 未结 3 877
后悔当初
后悔当初 2020-12-05 11:20

I\'m using the Google Cloud API for Speech-to-text, with a NodeJS back-end. The app needs to be able to listen for voice commands, and transmit them to the back-end as a buf

相关标签:
3条回答
  • 2020-12-05 11:43

    I'm not too sure as to what exactly is being asked in the question, so this answer is only intended to give a way to detect silences in an AudioStream.


    To detect silence in an AudioStream, you can use an AudioAnalyser node, on which you will call the getByteFrequencyData method at regular intervals, and check whether there were sounds higher than than your expected level for a given time.

    You can set the threshold level directly with the minDecibels property of the AnalyserNode.

    function detectSilence(
      stream,
      onSoundEnd = _=>{},
      onSoundStart = _=>{},
      silence_delay = 500,
      min_decibels = -80
      ) {
      const ctx = new AudioContext();
      const analyser = ctx.createAnalyser();
      const streamNode = ctx.createMediaStreamSource(stream);
      streamNode.connect(analyser);
      analyser.minDecibels = min_decibels;
    
      const data = new Uint8Array(analyser.frequencyBinCount); // will hold our data
      let silence_start = performance.now();
      let triggered = false; // trigger only once per silence event
    
      function loop(time) {
        requestAnimationFrame(loop); // we'll loop every 60th of a second to check
        analyser.getByteFrequencyData(data); // get current data
        if (data.some(v => v)) { // if there is data above the given db limit
          if(triggered){
            triggered = false;
            onSoundStart();
            }
          silence_start = time; // set it to now
        }
        if (!triggered && time - silence_start > silence_delay) {
          onSoundEnd();
          triggered = true;
        }
      }
      loop();
    }
    
    function onSilence() {
      console.log('silence');
    }
    function onSpeak() {
      console.log('speaking');
    }
    
    navigator.mediaDevices.getUserMedia({
        audio: true
      })
      .then(stream => {
        detectSilence(stream, onSilence, onSpeak);
        // do something else with the stream
      })
      .catch(console.error);

    And as a fiddle since stackSnippets may block gUM.

    0 讨论(0)
  • 2020-12-05 11:45

    The simplest approach would be to use .pause() and .resume(), .stop() methods of MediaRecorder() to allow user to start, pause, and stop recording audio captured utilizing navigator.mediaDevices.getUserMedia() and convert the resulting Blob to an ArrayBuffer, if that is what the api is expecting to be POSTed to server

    <!DOCTYPE html>
    <html>
    
    <head>
      <title>User Media Recording</title>
    </head>
    
    <body>
      <input type="button" value="Start/resume recording audio" id="start">
      <input type="button" value="Pause recording audio" id="pause">
      <input type="button" value="Stop recording audio" id="stop">
      <script>
        navigator.mediaDevices.getUserMedia({
            audio: true
          })
          .then(stream => {
            const recorder = new MediaRecorder(stream);
    
            recorder.ondataavailable = async(e) => {
              if (stream.active) {
                try {
                  const blobURL = URL.createObjectURL(e.data);
                  const request = await fetch(blobURL);
                  const ab = await request.arrayBuffer();
                  // do stuff with `ArrayBuffer` of recorded audio
                  console.log(blobURL, ab);
                  // we do not need the `Blob URL`, we can revoke the object
                  // URL.revokeObjectURL(blobURL);
                } catch (err) {
                  throw err
                }
              }
            }
            recorder.onpause = e => {
              console.log("recorder " + recorder.state);
              recorder.requestData();
            }
    
            stream.oninactive = () => {
              console.log("stream ended");
            }
    
            document.getElementById("start")
              .onclick = () => {
    
                if (recorder.state === "inactive") {
                  recorder.start();
                } else {
                  recorder.resume();
                }
                console.log("recorder.state:", recorder.state);
              }
    
            document.getElementById("pause")
              .onclick = () => {
    
                if (recorder.state === "recording") {
                  recorder.pause();
                }
                console.log("recorder.state:", recorder.state);
              }
    
            document.getElementById("stop")
              .onclick = () => {
    
                if (recorder.state === "recording" || recorder.state === "paused") {
                  recorder.stop();
                }
    
                for (let track of stream.getTracks()) {
                  track.stop();
                }
    
                document.getElementById("start").onclick = null;
                document.getElementById("pause").onclick = null;
                console.log("recorder.state:", recorder.state
                , "stream.active", stream.active);
              }
    
          })
          .catch(err => {
            console.error(err)
          });
      </script>
    </body>
    
    </html>
    

    plnkr https://plnkr.co/edit/7caWYMsvub90G6pwDdQp?p=preview

    0 讨论(0)
  • 2020-12-05 11:46

    You can use SpeechRecognition result event to determine when a word or phrase has been recognized, for example, ls, cd, pwd or other commands, pass the .transcript of SpeechRecognitionAlternative to speechSynthesis.speak() where at attached start and end event of SpeechSynthesisUtterance call .start() or .resume() on MediaRecorder object where MediaStream is passed; convert the Blob at dataavailable event to an ArrayBuffer using FileReader or Response.arrayBuffer().

    We could alternatively use audiostart or soundstart with audioend or soundend events of SpeechRecognition to record the users' actual voice, though the ends may not be fired consistently in relation to the actual start and end of audio captured by only a standard system microphone.

    <!DOCTYPE html>
    <html>
    
    <head>
      <title>Speech Recognition Recording</title>
    </head>
    
    <body>
      <input type="button" value="Stop speech command recognition" id="stop">
      <script>
        navigator.mediaDevices.getUserMedia({
            audio: true
          })
          .then(stream => {
            const recorder = new MediaRecorder(stream);
            const recognition = new webkitSpeechRecognition();
            const synthesis = new SpeechSynthesisUtterance();
            const handleResult = e => {
              recognition.onresult = null;
              console.log(e.results);
              const result = e.results[e.results.length - 1];
    
              if (result.isFinal) {
                const [{transcript}] = result;
                console.log(transcript);
                synthesis.text = transcript;
                window.speechSynthesis.speak(synthesis);
              }
            }
            synthesis.onstart = () => {
              if (recorder.state === "inactive") {
                recorder.start()
              } else {
                if (recorder.state === "paused") {
                  recorder.resume();
                }
              }
            }
            synthesis.onend = () => {
              recorder.pause();
              recorder.requestData();
            }
            recorder.ondataavailable = async(e) => {
              if (stream.active) {
                try {
                  const blobURL = URL.createObjectURL(e.data);
                  const request = await fetch(blobURL);
                  const ab = await request.arrayBuffer();
                  console.log(blobURL, ab);
                  recognition.onresult = handleResult;
                  // URL.revokeObjectURL(blobURL);
                } catch (err) {
                  throw err
                }
              }
            }
            recorder.onpause = e => {
              console.log("recorder " + recorder.state);
            }
            recognition.continuous = true;
            recognition.interimResults = false;
            recognition.maxAlternatives = 1;
            recognition.start();
            recognition.onend = e => {
              console.log("recognition ended, stream.active", stream.active);
    
              if (stream.active) {
                console.log(e);
                // the service disconnects after a period of time
                recognition.start();
              }
            }
            recognition.onresult = handleResult;
    
            stream.oninactive = () => {
              console.log("stream ended");
            }
    
            document.getElementById("stop")
              .onclick = () => {
                console.log("stream.active:", stream.active);
                if (stream && stream.active && recognition) {
                  recognition.abort();
                  recorder.stop();
                  for (let track of stream.getTracks()) {
                    track.stop();
                  }
                  console.log("stream.active:", stream.active);
                }
              }
    
          })
          .catch(err => {
            console.error(err)
          });
      </script>
    </body>
    
    </html>
    

    plnkr https://plnkr.co/edit/4DVEg6mhFRR94M5gdaIp?p=preview

    0 讨论(0)
提交回复
热议问题