'Detect silence in audio recording
There are similar questions for Java and iOS, but I'm wondering about detecting silence in javascript for audio recordings via getUserMedia(). So given:
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
const mediaRecorder = new MediaRecorder(stream);
mediaRecorder.start();
const audioChunks = [];
mediaRecorder.addEventListener("dataavailable", event => {
audioChunks.push(event.data);
});
mediaRecorder.addEventListener("stop", () => {
const audioBlob = new Blob(audioChunks);
const audioUrl = URL.createObjectURL(audioBlob);
const audio = new Audio(audioUrl);
audio.play();
});
});
I'm wondering if there is anything that can be checked on the Blob, URL, or Audio objects in the stop event for an absence of audio. In the case of a bad microphone or a virtual device selected - anything along those lines. I was previously checking the blob's size, but silent audio still has a filesize. I can do this on the backend via ffmpeg, but hoping there is a way in pure JS to simplify.
Solution 1:[1]
This code can run a function for every dialog it detects. it runs in a loop until the user stops it:
VOICE_MIN_DECIBELS = -35;
DELAY_BETWEEN_DIALOGS = 400;
DIALOG_MAX_LENGTH = 60*1000;
MEDIA_RECORDER = null;
IS_RECORDING = false;
//startRecording:
function startRecording(){
IS_RECORDING = true;
record();
}
//stopRecording:
function stopRecording(){
IS_RECORDING = false;
if(MEDIA_RECORDER !== null)
MEDIA_RECORDER.stop();
}
//record:
function record(){
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
//start recording:
MEDIA_RECORDER = new MediaRecorder(stream);
MEDIA_RECORDER.start();
//save audio chunks:
const audioChunks = [];
MEDIA_RECORDER.addEventListener("dataavailable", event => {
audioChunks.push(event.data);
});
//analisys:
const audioContext = new AudioContext();
const audioStreamSource = audioContext.createMediaStreamSource(stream);
const analyser = audioContext.createAnalyser();
analyser.minDecibels = VOICE_MIN_DECIBELS;
audioStreamSource.connect(analyser);
const bufferLength = analyser.frequencyBinCount;
const domainData = new Uint8Array(bufferLength);
//loop:
const time = new Date();
let startTime,
lastDetectedTime = time.getTime();
let anySoundDetected = false;
const detectSound = () => {
//recording stoped by user:
if(IS_RECORDING)
return;
time = new Date();
currentTime = time.getTime();
//time out:
if(currentTime > startTime + DIALOG_MAX_LENGTH){
MEDIA_RECORDER.stop();
return;
}
//a dialog detected:
if( anySoundDetected === true &&
currentTime > lastDetectedTime + DELAY_BETWEEN_DIALOGS
){
MEDIA_RECORDER.stop();
return;
}
//check for detection:
analyser.getByteFrequencyData(domainData);
for(let i = 0; i < bufferLength; i++)
if(domainData[i] > 0){
anySoundDetected = true;
time = new Date();
lastDetectedTime = time.getTime();
}
//continue the loop:
window.requestAnimationFrame(detectSound);
};
window.requestAnimationFrame(detectSound);
//stop event:
MEDIA_RECORDER.addEventListener('stop', () => {
//stop all the tracks:
stream.getTracks().forEach(track => track.stop());
if(!anySoundDetected) return;
//send to server:
const audioBlob = new Blob(audioChunks, {'type': 'audio/mp3'});
doWhateverWithAudio(audioBlob);
//start recording again:
record();
});
});
}
//doWhateverWithAudio:
function doWhateverWithAudio(audioBlob){
//.... send to server, downlod, etc.
}
Sources
This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.
Source: Stack Overflow
| Solution | Source |
|---|---|
| Solution 1 |
