Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

web-speech-cognitive-services

Package Overview
Dependencies
Maintainers
0
Versions
163
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

web-speech-cognitive-services - npm Package Compare versions

Comparing version 8.0.0-main.ccf35da to 8.0.0-main.d9ef940

62

dist/web-speech-cognitive-services.d.ts
import * as memoize_one from 'memoize-one';
import { SpeechRecognizer } from 'microsoft-cognitiveservices-speech-sdk';
import { AudioConfig, SpeechRecognizer } from 'microsoft-cognitiveservices-speech-sdk';

@@ -51,3 +51,3 @@ declare class SpeechSynthesisUtterance {

declare class _default$3 {
declare class _default$2 {
constructor(audioContext: any);

@@ -62,3 +62,3 @@ audioContext: any;

declare class _default$2 {
declare class _default$1 {
constructor({ audioContext, ponyfill }: {

@@ -68,3 +68,3 @@ audioContext: any;

});
consumer: _default$3 | null;
consumer: _default$2 | null;
paused: boolean;

@@ -81,4 +81,39 @@ queue: any[];

declare function createSpeechRecognitionPonyfill(options: any): {};
type Credentials = Readonly<({
authorizationToken: string;
subscriptionKey?: undefined;
} | {
authorizationToken?: undefined;
subscriptionKey: string;
}) & ({
customVoiceHostname?: undefined;
region: string;
speechRecognitionHostname?: undefined;
speechSynthesisHostname?: undefined;
} | {
customVoiceHostname: string;
region?: undefined;
speechRecognitionHostname: string;
speechSynthesisHostname: string;
})>;
type PatchOptionsInit = {
audioConfig: AudioConfig;
credentials?: (() => Credentials | Promise<Credentials>) | Credentials | Promise<Credentials>;
enableTelemetry: boolean;
looseEvent?: boolean | undefined;
looseEvents?: boolean | undefined;
referenceGrammars?: readonly string[] | undefined;
region?: string | undefined;
speechRecognitionEndpointId: string;
textNormalization: 'display' | 'itn' | 'lexical' | 'maskeditn';
} & ({
authorizationToken: string;
subscriptionKey?: undefined;
} | {
authorizationToken?: undefined;
subscriptionKey: string;
});
declare function createSpeechRecognitionPonyfill(options: PatchOptionsInit): {};
declare class SpeechGrammarList {

@@ -185,3 +220,3 @@ #private;

looseEvents: boolean;
referenceGrammars: [];
referenceGrammars?: readonly string[] | undefined;
textNormalization: 'display' | 'itn' | 'lexical' | 'maskeditn';

@@ -241,3 +276,3 @@ };

declare function _default$1(options: any): {
declare function _default(options: any): {
speechSynthesis?: never;

@@ -262,6 +297,7 @@ SpeechSynthesisEvent?: never;

declare function _default({ region, subscriptionKey }: {
region: any;
subscriptionKey: any;
}): Promise<string>;
type FetchAuthorizationTokenInit = {
region: string;
subscriptionKey: string;
};
declare function fetchAuthorizationToken({ region, subscriptionKey }: FetchAuthorizationTokenInit): Promise<string>;

@@ -274,3 +310,3 @@ declare function createSpeechServicesPonyfill(options?: {}, ...args: any[]): {

speechSynthesis: {
queue: _default$2;
queue: _default$1;
cancel(): void;

@@ -289,2 +325,2 @@ getVoices(): any[];

export { createSpeechRecognitionPonyfill, createSpeechRecognitionPonyfillFromRecognizer, createSpeechServicesPonyfill, _default$1 as createSpeechSynthesisPonyfill, _default as fetchAuthorizationToken };
export { createSpeechRecognitionPonyfill, createSpeechRecognitionPonyfillFromRecognizer, createSpeechServicesPonyfill, _default as createSpeechSynthesisPonyfill, fetchAuthorizationToken };

@@ -37,3 +37,3 @@ "use strict";

createSpeechSynthesisPonyfill: () => TextToSpeech_default,
fetchAuthorizationToken: () => fetchAuthorizationToken_default
fetchAuthorizationToken: () => fetchAuthorizationToken
});

@@ -50,13 +50,17 @@ module.exports = __toCommonJS(src_exports);

// src/SpeechServices/patchOptions.js
// src/SpeechServices/patchOptions.ts
var shouldWarnOnSubscriptionKey = true;
function patchOptions({
authorizationToken,
credentials,
looseEvent,
looseEvents,
region = "westus",
subscriptionKey,
...otherOptions
} = {}) {
function patchOptions(init) {
const {
audioConfig,
authorizationToken,
enableTelemetry,
looseEvent,
referenceGrammars,
region = "westus",
speechRecognitionEndpointId,
subscriptionKey,
textNormalization
} = init;
let { credentials, looseEvents } = init;
if (typeof looseEvent !== "undefined") {

@@ -73,7 +77,8 @@ console.warn('web-speech-cognitive-services: The option "looseEvent" should be named as "looseEvents".');

);
credentials = async () => authorizationToken ? { authorizationToken: await resolveFunctionOrReturnValue(authorizationToken), region } : { region, subscriptionKey: await resolveFunctionOrReturnValue(subscriptionKey) };
credentials = async () => typeof init.authorizationToken !== "undefined" ? { authorizationToken: await resolveFunctionOrReturnValue(init.authorizationToken), region } : { region, subscriptionKey: await resolveFunctionOrReturnValue(init.subscriptionKey) };
}
}
return {
...otherOptions,
return Object.freeze({
audioConfig,
enableTelemetry,
fetchCredentials: async () => {

@@ -113,17 +118,19 @@ const {

}
const resolvedCredentials = authorizationToken2 ? { authorizationToken: authorizationToken2 } : { subscriptionKey: subscriptionKey2 };
if (region2) {
resolvedCredentials.region = region2;
} else {
resolvedCredentials.customVoiceHostname = customVoiceHostname;
resolvedCredentials.speechRecognitionHostname = speechRecognitionHostname;
resolvedCredentials.speechSynthesisHostname = speechSynthesisHostname;
}
return resolvedCredentials;
return {
...typeof authorizationToken2 !== "undefined" ? { authorizationToken: authorizationToken2 } : { subscriptionKey: subscriptionKey2 },
...typeof region2 !== "undefined" ? { region: region2 } : {
customVoiceHostname,
speechRecognitionHostname,
speechSynthesisHostname
}
};
},
looseEvents
};
looseEvents: !!looseEvents,
referenceGrammars: referenceGrammars && Object.freeze([...referenceGrammars]),
speechRecognitionEndpointId,
textNormalization
});
}
// src/SpeechServices/SpeechSDK.js
// src/SpeechServices/SpeechSDK.ts
var import_microsoft_cognitiveservices_speech = require("microsoft-cognitiveservices-speech-sdk/distrib/lib/microsoft.cognitiveservices.speech.sdk");

@@ -265,78 +272,2 @@ var SpeechSDK_default = {

// src/SpeechServices/SpeechToText/SpeechGrammarList.ts
var SpeechGrammarList = class {
constructor() {
this.#phrases = [];
}
addFromString() {
throw new Error("JSGF is not supported");
}
#phrases;
get phrases() {
return this.#phrases;
}
set phrases(value) {
if (Array.isArray(value)) {
this.#phrases = Object.freeze([...value]);
} else if (typeof value === "string") {
this.#phrases = Object.freeze([value]);
} else {
throw new Error(`The provided value is not an array or of type 'string'`);
}
}
};
// src/SpeechServices/SpeechToText/SpeechRecognitionErrorEvent.ts
var SpeechRecognitionErrorEvent = class extends Event {
constructor(type, { error, message }) {
super(type);
this.#error = error;
this.#message = message;
}
#error;
#message;
get error() {
return this.#error;
}
get message() {
return this.#message;
}
get type() {
return "error";
}
};
// src/SpeechServices/SpeechToText/SpeechRecognitionResultList.ts
var SpeechRecognitionResultList = class extends FakeArray {
constructor(result) {
super(result);
}
};
// src/SpeechServices/SpeechToText/SpeechRecognitionEvent.ts
var SpeechRecognitionEvent = class extends Event {
constructor(type, { data, resultIndex, results } = {}) {
super(type);
this.#data = data;
this.#resultIndex = resultIndex;
this.#results = results || new SpeechRecognitionResultList([]);
}
#data;
// TODO: "resultIndex" should be set.
#resultIndex;
#results;
get data() {
return this.#data;
}
get resultIndex() {
return this.#resultIndex;
}
get results() {
return this.#results;
}
get type() {
return super.type;
}
};
// src/SpeechServices/SpeechToText/private/EventListenerMap.ts

@@ -430,2 +361,78 @@ var EventListenerMap = class {

// src/SpeechServices/SpeechToText/SpeechGrammarList.ts
var SpeechGrammarList = class {
constructor() {
this.#phrases = [];
}
addFromString() {
throw new Error("JSGF is not supported");
}
#phrases;
get phrases() {
return this.#phrases;
}
set phrases(value) {
if (Array.isArray(value)) {
this.#phrases = Object.freeze([...value]);
} else if (typeof value === "string") {
this.#phrases = Object.freeze([value]);
} else {
throw new Error(`The provided value is not an array or of type 'string'`);
}
}
};
// src/SpeechServices/SpeechToText/SpeechRecognitionErrorEvent.ts
var SpeechRecognitionErrorEvent = class extends Event {
constructor(type, { error, message }) {
super(type);
this.#error = error;
this.#message = message;
}
#error;
#message;
get error() {
return this.#error;
}
get message() {
return this.#message;
}
get type() {
return "error";
}
};
// src/SpeechServices/SpeechToText/SpeechRecognitionResultList.ts
var SpeechRecognitionResultList = class extends FakeArray {
constructor(result) {
super(result);
}
};
// src/SpeechServices/SpeechToText/SpeechRecognitionEvent.ts
var SpeechRecognitionEvent = class extends Event {
constructor(type, { data, resultIndex, results } = {}) {
super(type);
this.#data = data;
this.#resultIndex = resultIndex;
this.#results = results || new SpeechRecognitionResultList([]);
}
#data;
// TODO: "resultIndex" should be set.
#resultIndex;
#results;
get data() {
return this.#data;
}
get resultIndex() {
return this.#resultIndex;
}
get results() {
return this.#results;
}
get type() {
return super.type;
}
};
// src/SpeechServices/SpeechToText/createSpeechRecognitionPonyfillFromRecognizer.ts

@@ -640,3 +647,3 @@ var { ResultReason: ResultReason2, SpeechRecognizer: SpeechRecognizer2 } = SpeechSDK_default;

const { dynamicGrammar } = recognizer["privReco"];
referenceGrammars && referenceGrammars.length && dynamicGrammar.addReferenceGrammar(referenceGrammars);
referenceGrammars && referenceGrammars.length && dynamicGrammar.addReferenceGrammar([...referenceGrammars]);
phrases && phrases.length && dynamicGrammar.addPhrase([...phrases]);

@@ -810,3 +817,3 @@ await cognitiveServicesAsyncToPromise(recognizer.startContinuousRecognitionAsync, recognizer)();

// src/SpeechServices/SpeechToText/createSpeechRecognitionPonyfill.js
// src/SpeechServices/SpeechToText/createSpeechRecognitionPonyfill.ts
var { AudioConfig: AudioConfig2, OutputFormat: OutputFormat2, SpeechConfig: SpeechConfig2, SpeechRecognizer: SpeechRecognizer3 } = SpeechSDK_default;

@@ -832,14 +839,15 @@ function createSpeechRecognitionPonyfill(options) {

const createRecognizer = async (lang) => {
const { authorizationToken, region, speechRecognitionHostname, subscriptionKey } = await fetchCredentials();
const credentials = await fetchCredentials();
let speechConfig;
if (speechRecognitionHostname) {
const host = { hostname: speechRecognitionHostname, port: 443, protocol: "wss:" };
if (authorizationToken) {
if (typeof credentials.speechRecognitionHostname !== "undefined") {
const host = new URL("wss://hostname:443");
host.hostname = credentials.speechRecognitionHostname;
if (credentials.authorizationToken) {
speechConfig = SpeechConfig2.fromHost(host);
speechConfig.authorizationToken = authorizationToken;
speechConfig.authorizationToken = credentials.authorizationToken;
} else {
speechConfig = SpeechConfig2.fromHost(host, subscriptionKey);
speechConfig = SpeechConfig2.fromHost(host, credentials.subscriptionKey);
}
} else {
speechConfig = authorizationToken ? SpeechConfig2.fromAuthorizationToken(authorizationToken, region) : SpeechConfig2.fromSubscription(subscriptionKey, region);
speechConfig = typeof credentials.authorizationToken !== "undefined" ? SpeechConfig2.fromAuthorizationToken(credentials.authorizationToken, credentials.region) : SpeechConfig2.fromSubscription(credentials.subscriptionKey, credentials.region);
}

@@ -854,3 +862,2 @@ if (speechRecognitionEndpointId) {

return createSpeechRecognitionPonyfillFromRecognizer({
audioConfig,
createRecognizer,

@@ -864,3 +871,3 @@ enableTelemetry,

// src/SpeechServices/SpeechToText.js
// src/SpeechServices/SpeechToText.ts
var SpeechToText_default = createSpeechRecognitionPonyfill;

@@ -1371,5 +1378,8 @@

// src/SpeechServices/fetchAuthorizationToken.js
// src/SpeechServices/fetchAuthorizationToken.ts
var TOKEN_URL_TEMPLATE = "https://{region}.api.cognitive.microsoft.com/sts/v1.0/issueToken";
async function fetchAuthorizationToken_default({ region, subscriptionKey }) {
async function fetchAuthorizationToken({
region,
subscriptionKey
}) {
const res = await fetch(TOKEN_URL_TEMPLATE.replace(/\{region\}/u, region), {

@@ -1396,3 +1406,3 @@ headers: {

meta.setAttribute("name", "web-speech-cognitive-services");
meta.setAttribute("content", `version=${"8.0.0-main.ccf35da"}`);
meta.setAttribute("content", `version=${"8.0.0-main.d9ef940"}`);
document.head.appendChild(meta);

@@ -1399,0 +1409,0 @@ // Annotate the CommonJS export names for ESM import in node:

{
"name": "web-speech-cognitive-services",
"version": "8.0.0-main.ccf35da",
"version": "8.0.0-main.d9ef940",
"description": "Polyfill Web Speech API with Cognitive Services Speech-to-Text service",

@@ -103,3 +103,3 @@ "files": [

"simple-update-in": "^2.2.0",
"web-speech-cognitive-services": "^8.0.0-main.ccf35da"
"web-speech-cognitive-services": "^8.0.0-main.d9ef940"
},

@@ -106,0 +106,0 @@ "peerDependencies": {

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc