react-visual-audio-recorder
Advanced tools
Comparing version
{ | ||
"name": "react-visual-audio-recorder", | ||
"version": "1.1.1", | ||
"version": "1.1.2", | ||
"private": false, | ||
@@ -5,0 +5,0 @@ "scripts": { |
@@ -19,3 +19,3 @@ /* eslint-disable react-hooks/exhaustive-deps */ | ||
}, [_mimeType, _ext]); | ||
const { stopRecording: onStopRecording, pauseRecording: onPauseRecording, resumeRecording: onResumeRecording, resetRecording: onResetRecording, startRecording: onStartRecording, audioContextAnalyser, } = useMicrophoneRecorder({ | ||
const { stopRecording: onStopRecording, pauseRecording: onPauseRecording, resumeRecording: onResumeRecording, resetRecording: onResetRecording, startRecording: onStartRecording, mediaRecorderApi, } = useMicrophoneRecorder({ | ||
onStart, | ||
@@ -38,4 +38,6 @@ onStop, | ||
let animationFrame = -1; | ||
if (record && visualizerRef.current && audioContextAnalyser) | ||
animationFrame = Visualizer(visualizerRef.current.getContext("2d"), visualizerRef.current, audioContextAnalyser, width, height, backgroundColor, strokeColor); | ||
mediaRecorderApi.then((api) => { | ||
if (record && visualizerRef.current) | ||
animationFrame = Visualizer(visualizerRef.current.getContext("2d"), visualizerRef.current, api.analyser, width, height, backgroundColor, strokeColor); | ||
}); | ||
return () => { | ||
@@ -45,3 +47,3 @@ if (animationFrame > -1) | ||
}; | ||
}, [record, visualizerRef.current, audioContextAnalyser, width, height, backgroundColor, strokeColor]); | ||
}, [record, visualizerRef.current, mediaRecorderApi, width, height, backgroundColor, strokeColor]); | ||
useEffect(() => { | ||
@@ -80,2 +82,3 @@ if (handleStatus) | ||
getFileExtension: () => ext, | ||
mediaRecorderApi, | ||
}), [record]); | ||
@@ -82,0 +85,0 @@ return React.createElement("canvas", { ref: visualizerRef, height: height, width: width, className: className }); |
@@ -126,2 +126,8 @@ import { ForwardedRef } from "react"; | ||
getFileExtension: () => string | void; | ||
mediaRecorderApi: Promise<{ | ||
mediaRecorder: MediaRecorder; | ||
audioContext: AudioContext; | ||
analyser: AnalyserNode; | ||
mediaStream: MediaStream; | ||
}>; | ||
} | ||
@@ -144,4 +150,8 @@ export interface UseMicrophoneRecorderParams { | ||
startRecording: () => Promise<void>; | ||
audioContext: AudioContext | void; | ||
audioContextAnalyser: AnalyserNode | void; | ||
mediaRecorderApi: Promise<{ | ||
mediaRecorder: MediaRecorder; | ||
audioContext: AudioContext; | ||
analyser: AnalyserNode; | ||
mediaStream: MediaStream; | ||
}>; | ||
}; |
@@ -45,6 +45,2 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { | ||
const { onStart, onStop: _onStop, onChange, onData, options, soundOptions } = params; | ||
const [mediaStream, setMediaStream] = useState(); | ||
const [mediaRecorder, setMediaRecorder] = useState(); | ||
const [audioContext, setAudioContext] = useState(); | ||
const [audioContextAnalyser, setAudioContextAnalyser] = useState(); | ||
const [startTime, setStartTime] = useState(); | ||
@@ -56,2 +52,33 @@ const chunks = useRef([]); | ||
}), [soundOptions]); | ||
const mediaRecorderApi = useMemo(() => __awaiter(this, void 0, void 0, function* () { | ||
if (navigator.mediaDevices) { | ||
const mediaStream = yield navigator.mediaDevices.getUserMedia(constraints); | ||
let mediaRecorderInstance; | ||
if (options.mimeType && MediaRecorder.isTypeSupported(options.mimeType)) { | ||
mediaRecorderInstance = new MediaRecorder(mediaStream, options); | ||
} | ||
else { | ||
mediaRecorderInstance = new MediaRecorder(mediaStream, Object.assign(Object.assign({}, options), { mimeType: "" })); | ||
} | ||
mediaRecorderInstance.addEventListener("dataavailable", onHandleChunks); | ||
const audioCtx = createAudioContextCompat(); | ||
const analyser = audioCtx.createAnalyser(); | ||
audioCtx.resume().then(() => { | ||
const sourceNode = audioCtx.createMediaStreamSource(mediaStream); | ||
sourceNode.connect(analyser); | ||
}); | ||
setStartTime(Date.now()); | ||
if (onStart) | ||
onStart(mediaRecorderInstance, audioCtx, mediaStream, analyser); | ||
return { | ||
mediaRecorder: mediaRecorderInstance, | ||
audioContext: audioCtx, | ||
analyser, | ||
mediaStream, | ||
}; | ||
} | ||
else { | ||
throw new Error("Your browser does not support audio recording"); | ||
} | ||
}), []); | ||
function onHandleChunks(event) { | ||
@@ -109,30 +136,30 @@ chunks.current.push(event.data); | ||
onStop(); | ||
if (mediaRecorder) { | ||
if (mediaRecorder.state !== "inactive") | ||
mediaRecorder.stop(); | ||
setMediaRecorder(); | ||
} | ||
if (mediaStream) { | ||
mediaStream.getAudioTracks().forEach((track) => { | ||
track.stop(); | ||
}); | ||
setMediaStream(); | ||
} | ||
if (audioContext) { | ||
audioContext.close(); | ||
setAudioContext(); | ||
} | ||
if (audioContextAnalyser) { | ||
setAudioContextAnalyser(); | ||
} | ||
setStartTime(); | ||
mediaRecorderApi.then(({ mediaRecorder, mediaStream, audioContext }) => { | ||
if (mediaRecorder) { | ||
if (mediaRecorder.state !== "inactive") | ||
mediaRecorder.stop(); | ||
} | ||
if (mediaStream) { | ||
mediaStream.getAudioTracks().forEach((track) => { | ||
track.stop(); | ||
}); | ||
} | ||
if (audioContext) { | ||
audioContext.close(); | ||
} | ||
setStartTime(); | ||
}); | ||
} | ||
function pauseRecording() { | ||
onPause(); | ||
audioContext === null || audioContext === void 0 ? void 0 : audioContext.suspend(); | ||
mediaRecorder === null || mediaRecorder === void 0 ? void 0 : mediaRecorder.pause(); | ||
mediaRecorderApi.then(({ audioContext, mediaRecorder }) => { | ||
onPause(); | ||
audioContext === null || audioContext === void 0 ? void 0 : audioContext.suspend(); | ||
mediaRecorder === null || mediaRecorder === void 0 ? void 0 : mediaRecorder.pause(); | ||
}); | ||
} | ||
function resumeRecording() { | ||
audioContext === null || audioContext === void 0 ? void 0 : audioContext.resume(); | ||
mediaRecorder === null || mediaRecorder === void 0 ? void 0 : mediaRecorder.resume(); | ||
mediaRecorderApi.then(({ audioContext, mediaRecorder }) => { | ||
audioContext === null || audioContext === void 0 ? void 0 : audioContext.resume(); | ||
mediaRecorder === null || mediaRecorder === void 0 ? void 0 : mediaRecorder.resume(); | ||
}); | ||
} | ||
@@ -144,39 +171,3 @@ function resetRecording() { | ||
function startRecording() { | ||
return __awaiter(this, void 0, void 0, function* () { | ||
return new Promise((resolve, reject) => { | ||
if (navigator.mediaDevices) { | ||
navigator.mediaDevices.getUserMedia(constraints).then((mediaStream) => { | ||
setMediaStream(mediaStream); | ||
let mediaRecorderInstance; | ||
if (options.mimeType && MediaRecorder.isTypeSupported(options.mimeType)) { | ||
mediaRecorderInstance = new MediaRecorder(mediaStream, options); | ||
} | ||
else { | ||
mediaRecorderInstance = new MediaRecorder(mediaStream, Object.assign(Object.assign({}, options), { mimeType: "" })); | ||
} | ||
mediaRecorderInstance.addEventListener("dataavailable", onHandleChunks); | ||
const audioCtx = createAudioContextCompat(); | ||
audioCtx.resume().then(() => { | ||
const analyser = audioCtx.createAnalyser(); | ||
if (mediaRecorderInstance.state !== "recording") { | ||
mediaRecorderInstance.start(10); | ||
} | ||
const sourceNode = audioCtx.createMediaStreamSource(mediaStream); | ||
sourceNode.connect(analyser); | ||
setStartTime(Date.now()); | ||
setMediaRecorder(mediaRecorderInstance); | ||
setAudioContext(audioCtx); | ||
setAudioContextAnalyser(analyser); | ||
setMediaStream(mediaStream); | ||
if (onStart) | ||
onStart(mediaRecorderInstance, audioCtx, mediaStream, analyser); | ||
resolve(); | ||
}); | ||
}); | ||
} | ||
else { | ||
reject(new Error("Your browser does not support audio recording")); | ||
} | ||
}); | ||
}); | ||
return mediaRecorderApi.then((api) => api.mediaRecorder.start(10)); | ||
} | ||
@@ -196,5 +187,4 @@ useEffect(() => { | ||
startRecording, | ||
audioContext, | ||
audioContextAnalyser, | ||
mediaRecorderApi, | ||
}; | ||
} |
483
0.63%27088
-1.37%