Recording Audio on the Web
October 27, 2020
I made a few experiments on Audio Recording, using the Web Audio API.
show project setup
First, I created a simple project:
mkdir recorder && cd recorder && npm init # init project
npm i parcel --save-dev # install parcel to serve files
npm i react react-dom --save # install react for ui
touch index.html app.js # create files
parcel index.html --open # start server & open in browser
... added some html:
<html>
<head>
<title>Recorder</title>
</head>
<body>
<div id="app" />
<script src="./app.js"></script>
</body>
</html>
... and a minimal react setup:
import React from 'react';
import ReactDOM from 'react-dom';
const App = () => {
return <button>record</button>;
};
const appContainer = document.querySelector('#app');
ReactDOM.render(<App />, appContainer);
Now we're ready to go!
Recording Audio
useUserMedia hook
At first, we need to get access to the users microphone. We can do this with getUserMedia, wrapped into a hook:
import React, { useState } from 'react';
import ReactDOM from 'react-dom';
import 'regenerator-runtime/runtime'; // https://flaviocopes.com/parcel-regeneratorruntime-not-defined/
function useUserMedia(constraints) {
const [stream, setStream] = useState();
function getStream(refresh = false) {
if (stream && !refresh) {
return stream;
}
return navigator.mediaDevices.getUserMedia(constraints).then((_stream) => {
setStream(_stream);
return _stream;
});
}
return { stream, getStream };
}
useMediaRecorder hook
To record audio, we can now pass our stream to the MediaRecorder API, also wrapped with a hook:
export function useMediaRecorder({ onStart, onStop, onData }: { onStart?: any, onStop?: any, onData?: any }) {
const [recorder, setRecorder] = useState<any>();
const [state, setState] = useState<string>('inactive');
const { getStream } = useUserMedia({ audio: true, video: false });
const audioChunks = useRef([]); // this will contain the recorded chunks
async function start(timeslices?, _stream?) {
const stream = _stream || await getStream(true); // request stream using our custom hook
audioChunks.current = [];
const _recorder = new MediaRecorder(stream);
onStart && onStart(_recorder);
_recorder.start(timeslices); // start recording with timeslices
setRecorder(_recorder);
setState(_recorder.state);
// called every timeslices (ms)
_recorder.addEventListener('dataavailable', (event) => {
audioChunks.current.push(event.data);
onData && onData(event, audioChunks.current);
});
_recorder.addEventListener('stop', () => {
onStop && onStop(audioChunks.current);
setState(_recorder.state);
});
}
async function stop() {
if (recorder) {
recorder.stop();
(await getStream()).getTracks().forEach(track => track.stop());
}
}
return { start, stop, state };
}
Usage in the App
We can now use the above hook and play the recorded audio when we press stop:
const App = () => {
const { start, stop } = useMediaRecorder({
constraints: { audio: true, video: false }, // audio only
onStop: (audioChunks) => {
const audioBlob = new Blob(audioChunks);
const audioUrl = URL.createObjectURL(audioBlob);
const audio = new Audio(audioUrl);
audio.play();
},
});
return (
<>
<button onClick={() => start()}>record</button>
<button onClick={() => stop()}>stop</button>
</>
);
};
Result:
Displaying an audio waveform
Getting the raw PCM data
First, we need to the raw pcm data:
function getPCM(blob) {
return new Promise((resolve, reject) => {
const fileReader = new FileReader();
fileReader.onloadend = () => {
const arrayBuffer = fileReader.result;
// Convert array buffer into audio buffer
audioContext.decodeAudioData(arrayBuffer, (audioBuffer) => {
// Do something with audioBuffer
const pcm = audioBuffer.getChannelData(0);
resolve(pcm);
});
};
fileReader.onerror = reject;
fileReader.readAsArrayBuffer(blob);
});
}
Drawing the Waveform
Let's set up the canvas:
export function Waveform({ pcm, playhead }) {
const [canvasRef, setCanvasRef] = useState();
useEffect(() => {
if (pcm && canvasRef) {
drawPCM(pcm, canvasRef, playhead); // TODO
}
}, [pcm, canvasRef, playhead]);
function prettyCanvas(width, height, style) {
return {
width: width * 2,
height: height * 2,
style: { width, height, ...style },
};
}
return <canvas ref={setCanvasRef} {...prettyCanvas(640, 200, { backgroundColor: '#BFBFBF' })} />;
}
Now we can draw the waveform like this:
function drawPCM(values, canvas, playhead) {
const ctx = canvas.getContext('2d');
let { width: clientWidth, height: clientHeight } = canvas;
canvas.width = clientWidth;
const scale = 2;
ctx.scale(scale, scale);
clientWidth /= scale; // scale down for pretty canvas
clientHeight /= scale;
const absoluteValues = true; // if false, we will retain the true waveform
const valuesPerPixel = values.length / clientWidth;
const blockSize = 1; // width of one sample block
let max = 0;
const averageValues = [];
for (let x = 0; x < clientWidth; x += blockSize) {
const area = values.slice(Math.floor(x * valuesPerPixel), Math.ceil((x + blockSize) * valuesPerPixel));
const areaReducer = absoluteValues ? (sum, v) => sum + Math.abs(v) : (sum, v) => sum + v;
const value = area.reduce(areaReducer, 0) / area.length;
max = max < value ? value : max;
averageValues.push(value);
}
averageValues.forEach((value, index) => {
const height = (((value / max) * clientHeight) / 2) * 0.9;
ctx.beginPath();
ctx.strokeStyle = `#3535C3`;
ctx.fillStyle = `#6464D8`;
const args = [index * blockSize, clientHeight / 2 - (absoluteValues ? height / 2 : 0), blockSize, height];
const borderRadius = Math.floor(Math.min(args[2], args[3]) / 2);
ctx.fillRect(index * blockSize, clientHeight / 2 - (absoluteValues ? height / 2 : 0), blockSize, height);
ctx.stroke();
});
if (playhead) {
ctx.beginPath();
const x = playhead * clientWidth;
ctx.moveTo(x, 0);
ctx.lineTo(x, clientHeight);
ctx.stroke();
}
}
In our app, we can now update the pcm on stop and render the waveform:
const App = () => {
const [pcm, setPcm] = useState();
const { start, stop } = useMediaRecorder({
onStop: async (audioChunks) => {
const audioBlob = new Blob(audioChunks);
setPcm(await getPCM(audioBlob));
/* other stuff */
},
});
return (
<>
{pcm && <Waveform pcm={pcm} />}
{/* other stuff*/}
</>
);
};
For the "other stuff" I used some Material UI components to make it sweeter:
That's it. I will go into using analyzer nodes in a future post!
Links
- https://github.com/bbc/peaks.js + https://waveform.prototyping.bbc.co.uk/
- https://getstream.io/blog/generating-waveforms-for-podcasts-in-winds-2-0/
- https://css-tricks.com/making-an-audio-waveform-visualizer-with-vanilla-javascript/
- https://library-demos.herokuapp.com/react-voice-recorder
- https://github.com/danrouse/react-audio-recorder
- https://hackingbeauty.github.io/react-mic/
- https://ruebel.github.io/waveform-react/
- https://wavesurfer-js.org/
- https://www.freecodecamp.org/news/how-to-make-realtime-soundcloud-waveforms-in-react-native-4df0f4c6b3cc/
- https://github.com/klambycom/react-waveform
- https://github.com/andrewrk/waveform
- https://www.twilio.com/blog/audio-visualisation-web-audio-api--react