I am a little bit interested in the possibilities of reading audio input from the web browser. So I have written a small audio noise filter based on frame wise speech / non-speech classifcation a few days ago. This filter was based on the research of the paper Quantile based noise estimation for spectral subtraction and Wiener filtering. The paper contains also an other approach, a quantile based noise spectrum estimation.
This approach is just using the history of the audio input and for every frequency band we calculate the quantile with a given constant. This value will be the value of the noise frequency band. The JavaScript library quickselect from Vladimir Agafonkin helped me to calculate the quantile quickly. I guess the result looks very promising, but I have to create strategy to clean up the audio input history after a while.
Here is the JavaScript code behind the noise spectrum estimation based on quantiles. Please note that this code
requires the JavaScript library quickselect
as mentioned above.
function qbNoiseFilter(qValue) {
this.qValue = qValue;
if (this.alpha === undefined) {
this.alpha = 0.55;
}
this.noiseHistory = null;
this.getNoise = function(input) {
if (this.noiseHistory === null) {
this.noiseHistory = [];
for (var i = 0; i < input.length; i++) {
this.noiseHistory[i] = [];
}
}
var noise = [];
for (var i = 0; i < input.length; i++) {
this.noiseHistory[i].push(input[i]);
var k = Math.floor(this.noiseHistory[i].length * this.alpha)
quickselect(
this.noiseHistory[i],
k,
0,
this.noiseHistory[i].length - 1,
defaultCompare
);
noise[i] = this.noiseHistory[i][k];
}
return noise;
};
}
You just have to create a instance of qbNoiseFilter
and can call getNoise
with the given
audio data from getByteFrequencyData
function for example.
// Create analyser and check for navigator.getUserMedia
navigator.getUserMedia = (
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia
);
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var source;
var stream;
var analyser = audioCtx.createAnalyser();
analyser.minDecibels = -90;
analyser.maxDecibels = -10;
analyser.smoothingTimeConstant = 0.85;
analyser.fftSize = 128;
var bufferLength = analyser.frequencyBinCount;
var distortion = audioCtx.createWaveShaper();
var gainNode = audioCtx.createGain();
var biquadFilter = audioCtx.createBiquadFilter();
var convolver = audioCtx.createConvolver();
var noiseFilter = new qbNoiseFilter();
// Bind function readAudioData to navigator.getUserMedia
if (navigator.getUserMedia) {
navigator.getUserMedia(
{audio: true},
function(stream) {
source = audioCtx.createMediaStreamSource(stream);
source.connect(analyser);
analyser.connect(distortion);
distortion.connect(biquadFilter);
biquadFilter.connect(convolver);
convolver.connect(gainNode);
gainNode.connect(audioCtx.destination);
readAudioData();
},
function(err) {
console.log(err);
}
);
} else {
console.log('UserMedia not supported on your browser');
}
var readAudioData = function() {
requestAnimationFrame(readAudioData);
var dataArray = new Uint8Array(bufferLength);
analyser.getByteFrequencyData(dataArray);
var noise = noiseFilter.getNoise(dataArray);
console.log(noise);
}