New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

react-text-to-speech

Package Overview
Dependencies
Maintainers
0
Versions
125
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

react-text-to-speech - npm Package Compare versions

Comparing version

to
1.3.0

109

dist/index.js
import { __spreadValues, __objRest, HiVolumeUp, HiVolumeOff, HiMiniStop } from './chunks/chunk-3KTCESQJ.js';
import React2, { useState, useEffect, useRef, useMemo, useCallback, isValidElement, cloneElement } from 'react';
import React2, { useState, useEffect, useMemo, useCallback, isValidElement, cloneElement, useRef } from 'react';
import { createPortal } from 'react-dom';

@@ -17,5 +17,2 @@

var specialSymbol = "\xA0";
var utteranceEvents = ["onstart", "onend", "onerror", "onpause", "onresume", "onmark", "onboundary"];
var utteranceProperties = ["lang", "voice", "volume", "rate", "pitch"];
var utterancePropertiesAndEvents = utteranceProperties.concat(utteranceEvents);

@@ -67,13 +64,8 @@ // src/state.ts

}
function cancel() {
function cancel(stopReason = "manual") {
var _a;
if (typeof window === "undefined") return;
setState({ stopReason: "manual" });
setState({ stopReason });
(_a = window.speechSynthesis) == null ? void 0 : _a.cancel();
}
function cloneUtterance(utterance, text) {
const newUtterance = new SpeechSynthesisUtterance(text);
utterancePropertiesAndEvents.forEach((property) => newUtterance[property] = utterance[property]);
return newUtterance;
}
function findCharIndex(words, index) {

@@ -160,3 +152,3 @@ let currentIndex = 0;

function emit(callback) {
const utteranceQueue = queue.map(({ displayUtterance }) => displayUtterance);
const utteranceQueue = queue.map(({ text, utterance: { pitch, rate, volume, lang, voice } }) => ({ text, pitch, rate, volume, lang, voice }));
queueListeners.forEach((listener) => listener(utteranceQueue));

@@ -217,6 +209,10 @@ callback == null ? void 0 : callback(utteranceQueue);

const [speakingWord, speakingWordRef, setSpeakingWord] = useStateRef(null);
const utteranceRef = useRef(null);
const { voices } = useVoices();
const { utteranceRef, updateProps } = useSpeechSynthesisUtterance();
const key = useMemo(() => NodeToKey(text), [text]);
const words = useMemo(() => NodeToWords(text), [key]);
const { words, sanitizedText, chunks, numChunks } = useMemo(() => {
const words2 = NodeToWords(text);
const sanitizedText2 = sanitize(WordsToText(words2));
const chunks2 = TextToChunks(sanitizedText2, maxChunkSize);
return { words: words2, sanitizedText: sanitizedText2, chunks: chunks2, numChunks: chunks2.length };
}, [key]);
const reactContent = useMemo(() => highlightedText(text), [speakingWord, words, highlightText, showOnlyHighlightedText]);

@@ -229,25 +225,10 @@ const Text = useCallback(() => reactContent, [reactContent]);

if (speechStatus === "queued") return;
const sanitizedText = sanitize(WordsToText(words));
const chunks = TextToChunks(sanitizedText, maxChunkSize);
const numChunks = chunks.length;
let currentChunk = 0;
let currentText = chunks[currentChunk] || "";
const utterance = new SpeechSynthesisUtterance(currentText.trimStart());
const utterance = utteranceRef.current;
utterance.text = currentText.trimStart();
let offset = currentText.length - utterance.text.length;
utterance.pitch = pitch;
utterance.rate = rate;
utterance.volume = volume;
if (lang) utterance.lang = lang;
if (voiceURI) {
if (!Array.isArray(voiceURI)) voiceURI = [voiceURI];
for (let i = 0; i < voiceURI.length; i++) {
const uri = voiceURI[i];
const voice = voices.find((voice2) => voice2.voiceURI === uri);
if (voice) {
utterance.voice = voice;
break;
}
}
}
updateProps({ pitch, rate, volume, lang, voiceURI });
const stopEventHandler = (event) => {
var _a;
if (state.stopReason === "auto" && currentChunk < numChunks - 1) {

@@ -260,2 +241,10 @@ offset += utterance.text.length;

}
if (state.stopReason === "change") {
if (speakingWordRef.current) {
let currentLength = utterance.text.length;
utterance.text = utterance.text.slice(((_a = speakingWordRef.current) == null ? void 0 : _a.charIndex) || 0).trimStart();
offset += currentLength - utterance.text.length;
}
return speakFromQueue();
}
if (synth.paused) cancel();

@@ -297,3 +286,3 @@ window.removeEventListener("beforeunload", clearQueueUnload);

if (shouldHighlightNextPart(highlightMode, name, utterance, charIndex) || parent(index) !== parent((_a = speakingWordRef.current) == null ? void 0 : _a.index))
setSpeakingWord({ index, length: isSpecialSymbol || charLength });
setSpeakingWord({ index, charIndex: isSpecialSymbol ? charIndex + charLength + 1 : charIndex, length: isSpecialSymbol || charLength });
if (isSpecialSymbol) offset -= charLength + 1;

@@ -303,10 +292,7 @@ onBoundary == null ? void 0 : onBoundary(event);

if (!preserveUtteranceQueue) clearQueue();
addToQueue({ utterance, displayUtterance: cloneUtterance(utterance, sanitizedText), setSpeechStatus }, onQueueChange);
addToQueue({ text: sanitizedText, utterance, setSpeechStatus }, onQueueChange);
setSpeechStatus("started");
if (synth.speaking) {
if (preserveUtteranceQueue && speechStatus !== "started") {
utteranceRef.current = utterance;
return setSpeechStatus("queued");
} else cancel();
} else speakFromQueue();
if (!synth.speaking) return speakFromQueue();
if (preserveUtteranceQueue && speechStatus !== "started") return setSpeechStatus("queued");
cancel();
}

@@ -318,5 +304,5 @@ function pause() {

}
function stop(status = speechStatus) {
function stop({ status = speechStatus, stopReason } = {}) {
if (status === "stopped") return;
if (status !== "queued") return cancel();
if (status !== "queued") return cancel(stopReason);
removeFromQueue(utteranceRef.current, onQueueChange);

@@ -343,4 +329,13 @@ setSpeechStatus("stopped");

if (autoPlay) start();
return () => stop(speechStatusRef.current);
return () => stop({ status: speechStatusRef.current });
}, [autoPlay, key]);
useEffect(() => {
if (speechStatus !== "started") return;
const timeout = setTimeout(() => {
updateProps({ pitch, rate, volume });
stop({ stopReason: "change" });
emit(onQueueChange);
}, 500);
return () => clearTimeout(timeout);
}, [pitch, rate, volume]);
return {

@@ -355,2 +350,26 @@ Text,

}
function useSpeechSynthesisUtterance() {
const utteranceRef = useRef(typeof window === "undefined" || !window.speechSynthesis ? null : new SpeechSynthesisUtterance());
const { voices } = useVoices();
function updateProps({ pitch, rate, volume, lang, voiceURI }) {
const utterance = utteranceRef.current;
if (!utterance) return;
utterance.pitch = pitch;
utterance.rate = rate;
utterance.volume = volume;
if (lang) utterance.lang = lang;
if (voiceURI) {
if (!Array.isArray(voiceURI)) voiceURI = [voiceURI];
for (let i = 0; i < voiceURI.length; i++) {
const uri = voiceURI[i];
const voice = voices.find((voice2) => voice2.voiceURI === uri);
if (voice) {
utterance.voice = voice;
break;
}
}
}
}
return { utteranceRef, updateProps };
}
function useStateRef(init) {

@@ -357,0 +376,0 @@ const [state2, setState2] = useState(init);

import { DetailedHTMLProps, HTMLAttributes, ReactNode, JSX } from 'react';
type SpeechSynthesisUtteranceKeys = SpeechSynthesisUtteranceKey[];
type HighlightMode = "word" | "sentence" | "line" | "paragraph";

@@ -8,2 +7,3 @@ type SpanProps = DetailedHTMLProps<HTMLAttributes<HTMLSpanElement>, HTMLSpanElement>;

index: string;
charIndex: number;
length: number;

@@ -15,9 +15,11 @@ } | null;

type SpeechSynthesisEventName = "word" | "sentence";
type UseSpeechOptions = {
text: ReactNode;
pitch?: number;
rate?: number;
volume?: number;
type SpeechSynthesisUtteranceProps = {
pitch: number;
rate: number;
volume: number;
lang?: string;
voiceURI?: string | string[];
};
type UseSpeechOptions = Partial<SpeechSynthesisUtteranceProps> & {
text: ReactNode;
autoPlay?: boolean;

@@ -60,4 +62,4 @@ preserveUtteranceQueue?: boolean;

type SpeechQueueItem = {
text: string;
utterance: SpeechSynthesisUtterance;
displayUtterance: SpeechSynthesisUtterance;
setSpeechStatus: SpeechStatusUpdater;

@@ -67,10 +69,9 @@ };

type SpeechStatusUpdater = (newStatus: SpeechStatus) => void;
type SpeechUtterancesQueue = SpeechSynthesisUtterance[];
type SpeechUtterancesQueue = Partial<SpeechSynthesisUtterance>[];
type Index = string | number;
type State = {
stopReason: "auto" | "manual";
stopReason: "auto" | "change" | "manual";
};
type SpeechSynthesisUtteranceKey = keyof SpeechSynthesisUtterance;
type Words = string | Words[];
export type { Button, Children, ChildrenOptions, DivProps, HighlightMode, IconProps, Index, QueueChangeEventHandler, SpanProps, SpeakingWord, SpeechProps, SpeechQueue, SpeechQueueItem, SpeechStatus, SpeechStatusUpdater, SpeechSynthesisErrorHandler, SpeechSynthesisEventHandler, SpeechSynthesisEventName, SpeechSynthesisUtteranceKey, SpeechSynthesisUtteranceKeys, SpeechUtterancesQueue, State, UseSpeechOptions, Words };
export type { Button, Children, ChildrenOptions, DivProps, HighlightMode, IconProps, Index, QueueChangeEventHandler, SpanProps, SpeakingWord, SpeechProps, SpeechQueue, SpeechQueueItem, SpeechStatus, SpeechStatusUpdater, SpeechSynthesisErrorHandler, SpeechSynthesisEventHandler, SpeechSynthesisEventName, SpeechSynthesisUtteranceProps, SpeechUtterancesQueue, State, UseSpeechOptions, Words };
{
"name": "react-text-to-speech",
"version": "1.2.5",
"version": "1.3.0",
"description": "An easy-to-use React.js component that leverages the Web Speech API to convert text to speech.",

@@ -31,3 +31,3 @@ "license": "MIT",

"devDependencies": {
"tsup": "^8.3.6"
"tsup": "^8.4.0"
},

@@ -48,5 +48,5 @@ "keywords": [

"scripts": {
"dev": "tsup --watch",
"compile": "tsup"
"compile": "tsup",
"dev": "tsup --watch"
}
}