Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@speechly/browser-client

Package Overview
Dependencies
Maintainers
6
Versions
79
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@speechly/browser-client - npm Package Compare versions

Comparing version 1.0.2 to 1.0.3

microphone/audioworklet.d.ts

27

index.d.ts

@@ -33,3 +33,3 @@

*/
initialize(appId: string, deviceId: string, token?: string): Promise<string>;
initialize(token: string, sourceSampleRate: number, targetSampleRate: number): Promise<void>;
/**

@@ -58,3 +58,9 @@ * Closes the client.

*/
sendAudio(audioChunk: Int16Array): Error | void;
sendAudio(audioChunk: Float32Array): void;
/**
* Sends message to the Worker.
*
* @param message - message to send.
*/
postMessage(message: Object): void;
}

@@ -79,3 +85,8 @@

private readonly microphone;
private readonly websocket;
private readonly apiClient;
private readonly loginUrl;
private readonly isWebkit;
private readonly audioContext;
private readonly sampleRate;
private readonly nativeResamplingSupported;
private readonly activeContexts;

@@ -167,4 +178,2 @@ private readonly reconnectAttemptCount;

private reconnectWebsocket;
private initializeWebsocket;
private readonly handleMicrophoneAudio;
private setState;

@@ -384,8 +393,2 @@ }

/**
* Registers the callback that is invoked whenever an audio chunk is emitted.
*
* @param cb - the callback to invoke.
*/
onAudio(cb: AudioCallback): void;
/**
* Initialises the microphone.

@@ -397,3 +400,3 @@ *

*/
initialize(): Promise<void>;
initialize(isWebkit: boolean, opts: MediaStreamConstraints): Promise<void>;
/**

@@ -400,0 +403,0 @@ * Closes the microphone, tearing down all the infrastructure.

@@ -1,9 +0,15 @@

import { AudioCallback, Microphone } from './types';
import { AudioProcessor } from './browser_audio_processor';
import { Microphone } from './types';
import { APIClient } from '../websocket';
export declare class BrowserMicrophone implements Microphone {
private readonly audioProcessor;
private onAudioCb;
constructor(sampleRate: number, audioProcessor?: AudioProcessor);
onAudio(cb: AudioCallback): void;
initialize(): Promise<void>;
private readonly audioContext;
private readonly apiClient;
private readonly resampleRatio;
private readonly sampleRate;
private initialized;
private muted;
private audioTrack?;
private mediaStream?;
private audioProcessor?;
constructor(audioContext: AudioContext, sampleRate: number, apiClient: APIClient);
initialize(isWebkit: boolean, opts: MediaStreamConstraints): Promise<void>;
close(): Promise<void>;

@@ -10,0 +16,0 @@ mute(): void;

@@ -11,18 +11,106 @@ "use strict";

};
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
const browser_audio_processor_1 = require("./browser_audio_processor");
const types_1 = require("./types");
const audioworklet_1 = __importDefault(require("./audioworklet"));
const audioProcessEvent = 'audioprocess';
const baseBufferSize = 4096;
class BrowserMicrophone {
constructor(sampleRate, audioProcessor) {
this.onAudioCb = () => { };
this.handleAudio = (audioBuffer) => {
this.onAudioCb(audioBuffer);
constructor(audioContext, sampleRate, apiClient) {
this.initialized = false;
this.muted = false;
this.handleAudio = (array) => {
if (this.muted) {
return;
}
this.apiClient.sendAudio(array);
};
this.audioProcessor = audioProcessor !== null && audioProcessor !== void 0 ? audioProcessor : new browser_audio_processor_1.BrowserAudioProcessor(sampleRate, this.handleAudio);
this.audioContext = audioContext;
this.apiClient = apiClient;
this.sampleRate = sampleRate;
this.resampleRatio = this.audioContext.sampleRate / this.sampleRate;
}
onAudio(cb) {
this.onAudioCb = cb;
}
initialize() {
initialize(isWebkit, opts) {
var _a;
return __awaiter(this, void 0, void 0, function* () {
yield this.audioProcessor.initialize();
if (((_a = window.navigator) === null || _a === void 0 ? void 0 : _a.mediaDevices) === undefined) {
throw types_1.ErrDeviceNotSupported;
}
// Start audio context if we are dealing with a WebKit browser.
//
// WebKit browsers (e.g. Safari) require to resume the context first,
// before obtaining user media by calling `mediaDevices.getUserMedia`.
//
// If done in a different order, the audio context will resume successfully,
// but will emit empty audio buffers.
if (isWebkit) {
yield this.audioContext.resume();
}
try {
this.mediaStream = yield window.navigator.mediaDevices.getUserMedia(opts);
}
catch (_b) {
throw types_1.ErrNoAudioConsent;
}
this.audioTrack = this.mediaStream.getAudioTracks()[0];
this.audioTrack.enabled = false;
// Start audio context if we are dealing with a non-WebKit browser.
//
// Non-webkit browsers (currently only Chrome on Android)
// require that user media is obtained before resuming the audio context.
//
// If audio context is attempted to be resumed before `mediaDevices.getUserMedia`,
// `audioContext.resume()` will hang indefinitely, without being resolved or rejected.
if (!isWebkit) {
yield this.audioContext.resume();
}
if (window.AudioWorkletNode !== undefined) {
const blob = new Blob([audioworklet_1.default], { type: 'text/javascript' });
const blobURL = window.URL.createObjectURL(blob);
yield this.audioContext.audioWorklet.addModule(blobURL);
const speechlyNode = new AudioWorkletNode(this.audioContext, 'speechly-worklet');
this.audioContext.createMediaStreamSource(this.mediaStream).connect(speechlyNode);
speechlyNode.connect(this.audioContext.destination);
if (window.SharedArrayBuffer !== undefined) {
// Chrome, Edge, Firefox, Firefox Android
const controlSAB = new window.SharedArrayBuffer(2 * Int32Array.BYTES_PER_ELEMENT);
const dataSAB = new window.SharedArrayBuffer(2 * 4096 * Float32Array.BYTES_PER_ELEMENT);
this.apiClient.postMessage({
type: 'SET_SHARED_ARRAY_BUFFERS',
controlSAB,
dataSAB,
});
speechlyNode.port.postMessage({
type: 'SET_SHARED_ARRAY_BUFFERS',
controlSAB,
dataSAB,
});
}
else {
// Opera, Chrome Android, Webview Anroid
speechlyNode.port.onmessage = (event) => {
this.handleAudio(event.data);
};
}
}
else {
// Safari, iOS Safari and Internet Explorer
if (isWebkit) {
// Multiply base buffer size of 4 kB by the resample ratio rounded up to the next power of 2.
// i.e. for 48 kHz to 16 kHz downsampling, this will be 4096 (base) * 4 = 16384.
const bufSize = baseBufferSize * Math.pow(2, Math.ceil(Math.log(this.resampleRatio) / Math.log(2)));
this.audioProcessor = this.audioContext.createScriptProcessor(bufSize, 1, 1);
}
else {
this.audioProcessor = this.audioContext.createScriptProcessor(undefined, 1, 1);
}
this.audioContext.createMediaStreamSource(this.mediaStream).connect(this.audioProcessor);
this.audioProcessor.connect(this.audioContext.destination);
this.audioProcessor.addEventListener(audioProcessEvent, (event) => {
this.handleAudio(event.inputBuffer.getChannelData(0));
});
}
this.initialized = true;
this.mute();

@@ -34,10 +122,33 @@ });

this.mute();
return this.audioProcessor.close();
if (!this.initialized) {
throw types_1.ErrNotInitialized;
}
// Stop all media tracks
const stream = this.mediaStream;
stream.getTracks().forEach(t => t.stop());
// Disconnect and stop ScriptProcessorNode
if (this.audioProcessor != null) {
const proc = this.audioProcessor;
proc.disconnect();
}
// Unset all audio infrastructure
this.mediaStream = undefined;
this.audioTrack = undefined;
this.audioProcessor = undefined;
this.initialized = false;
});
}
mute() {
this.audioProcessor.mute();
this.muted = true;
if (this.initialized) {
const t = this.audioTrack;
t.enabled = false;
}
}
unmute() {
this.audioProcessor.unmute();
this.muted = false;
if (this.initialized) {
const t = this.audioTrack;
t.enabled = true;
}
}

@@ -44,0 +155,0 @@ }

@@ -37,8 +37,2 @@ /**

/**
* Registers the callback that is invoked whenever an audio chunk is emitted.
*
* @param cb - the callback to invoke.
*/
onAudio(cb: AudioCallback): void;
/**
* Initialises the microphone.

@@ -50,3 +44,3 @@ *

*/
initialize(): Promise<void>;
initialize(isWebkit: boolean, opts: MediaStreamConstraints): Promise<void>;
/**

@@ -53,0 +47,0 @@ * Closes the microphone, tearing down all the infrastructure.

{
"name": "@speechly/browser-client",
"version": "1.0.2",
"version": "1.0.3",
"description": "Browser client for Speechly API",

@@ -5,0 +5,0 @@ "private": false,

@@ -13,3 +13,8 @@ import { ClientOptions, StateChangeCallback, SegmentChangeCallback, TentativeTranscriptCallback, TranscriptCallback, TentativeEntitiesCallback, EntityCallback, IntentCallback } from './types';

private readonly microphone;
private readonly websocket;
private readonly apiClient;
private readonly loginUrl;
private readonly isWebkit;
private readonly audioContext;
private readonly sampleRate;
private readonly nativeResamplingSupported;
private readonly activeContexts;

@@ -101,5 +106,3 @@ private readonly reconnectAttemptCount;

private reconnectWebsocket;
private initializeWebsocket;
private readonly handleMicrophoneAudio;
private setState;
}

@@ -17,2 +17,3 @@ "use strict";

const uuid_1 = require("uuid");
const token_1 = require("../websocket/token");
const microphone_1 = require("../microphone");

@@ -135,20 +136,39 @@ const websocket_1 = require("../websocket");

};
this.handleMicrophoneAudio = (audioChunk) => {
if (this.state !== types_1.ClientState.Recording) {
return;
this.sampleRate = (_a = options.sampleRate) !== null && _a !== void 0 ? _a : microphone_1.DefaultSampleRate;
try {
const constraints = window.navigator.mediaDevices.getSupportedConstraints();
this.nativeResamplingSupported = constraints.sampleRate === true;
}
catch (_k) {
this.nativeResamplingSupported = false;
}
if (window.AudioContext !== undefined) {
const opts = {};
if (this.nativeResamplingSupported) {
opts.sampleRate = this.sampleRate;
}
this.websocket.sendAudio(audioChunk);
};
const language = (_a = options.language) !== null && _a !== void 0 ? _a : defaultLanguage;
this.audioContext = new window.AudioContext(opts);
this.isWebkit = false;
}
else if (window.webkitAudioContext !== undefined) {
// eslint-disable-next-line new-cap
this.audioContext = new window.webkitAudioContext();
this.isWebkit = true;
}
else {
throw microphone_1.ErrDeviceNotSupported;
}
const language = (_b = options.language) !== null && _b !== void 0 ? _b : defaultLanguage;
if (!locale_code_1.default.validate(language)) {
throw Error(`[SpeechlyClient] Invalid language "${language}"`);
}
this.debug = (_b = options.debug) !== null && _b !== void 0 ? _b : false;
this.debug = (_c = options.debug) !== null && _c !== void 0 ? _c : false;
this.loginUrl = (_d = options.loginUrl) !== null && _d !== void 0 ? _d : defaultLoginUrl;
this.appId = options.appId;
this.microphone = (_c = options.microphone) !== null && _c !== void 0 ? _c : new microphone_1.BrowserMicrophone((_d = options.sampleRate) !== null && _d !== void 0 ? _d : microphone_1.DefaultSampleRate);
this.websocket = (_e = options.apiClient) !== null && _e !== void 0 ? _e : new websocket_1.WebsocketClient((_f = options.loginUrl) !== null && _f !== void 0 ? _f : defaultLoginUrl, (_g = options.apiUrl) !== null && _g !== void 0 ? _g : defaultApiUrl, language, (_h = options.sampleRate) !== null && _h !== void 0 ? _h : microphone_1.DefaultSampleRate);
const apiUrl = generateWsUrl((_e = options.apiUrl) !== null && _e !== void 0 ? _e : defaultApiUrl, language, (_f = options.sampleRate) !== null && _f !== void 0 ? _f : microphone_1.DefaultSampleRate);
this.apiClient = (_g = options.apiClient) !== null && _g !== void 0 ? _g : new websocket_1.WebWorkerController(apiUrl);
this.microphone = (_h = options.microphone) !== null && _h !== void 0 ? _h : new microphone_1.BrowserMicrophone(this.audioContext, this.sampleRate, this.apiClient);
this.storage = (_j = options.storage) !== null && _j !== void 0 ? _j : new storage_1.LocalStorage();
this.microphone.onAudio(this.handleMicrophoneAudio);
this.websocket.onResponse(this.handleWebsocketResponse);
this.websocket.onClose(this.handleWebsocketClosure);
this.apiClient.onResponse(this.handleWebsocketResponse);
this.apiClient.onClose(this.handleWebsocketClosure);
}

@@ -183,6 +203,30 @@ /**

}
// 2. Initialise the microphone stack.
yield this.microphone.initialize();
// 3. Initialise websocket.
yield this.initializeWebsocket(this.deviceId);
if (this.authToken === undefined || !token_1.validateToken(this.authToken, this.appId, this.deviceId)) {
this.authToken = yield token_1.fetchToken(defaultLoginUrl, this.appId, this.deviceId);
// Cache the auth token in local storage for future use.
try {
yield this.storage.set(authTokenKey, this.authToken);
}
catch (err) {
// No need to fail if the token caching failed, we will just re-fetch it next time.
if (this.debug) {
console.warn('[SpeechlyClient]', 'Error caching auth token in storage:', err);
}
}
}
const opts = {
video: false,
};
if (this.nativeResamplingSupported) {
opts.audio = {
sampleRate: this.sampleRate,
};
}
else {
opts.audio = true;
}
// 2. Initialise websocket.
yield this.apiClient.initialize(this.authToken, this.audioContext.sampleRate, this.sampleRate);
// 3. Initialise the microphone stack.
yield this.microphone.initialize(this.isWebkit, opts);
}

@@ -224,3 +268,3 @@ catch (err) {

try {
yield this.websocket.close();
yield this.apiClient.close();
}

@@ -253,3 +297,3 @@ catch (err) {

try {
contextId = yield this.websocket.startContext();
contextId = yield this.apiClient.startContext();
}

@@ -299,3 +343,3 @@ catch (err) {

try {
contextId = yield this.websocket.stopContext();
contextId = yield this.apiClient.stopContext();
}

@@ -371,3 +415,3 @@ catch (err) {

}
yield this.initializeWebsocket(deviceId);
// await this.initializeWebsocket(deviceId)
}), {

@@ -379,18 +423,2 @@ retries: this.reconnectAttemptCount,

}
initializeWebsocket(deviceId) {
return __awaiter(this, void 0, void 0, function* () {
// Initialise websocket and save the auth token.
this.authToken = yield this.websocket.initialize(this.appId, deviceId, this.authToken);
// Cache the auth token in local storage for future use.
try {
yield this.storage.set(authTokenKey, this.authToken);
}
catch (err) {
// No need to fail if the token caching failed, we will just re-fetch it next time.
if (this.debug) {
console.warn('[SpeechlyClient]', 'Error caching auth token in storage:', err);
}
}
});
}
setState(newState) {

@@ -408,2 +436,8 @@ if (this.state === newState) {

exports.Client = Client;
function generateWsUrl(baseUrl, languageCode, sampleRate) {
const params = new URLSearchParams();
params.append('languageCode', languageCode);
params.append('sampleRate', sampleRate.toString());
return `${baseUrl}?${params.toString()}`;
}
//# sourceMappingURL=client.js.map
"use strict";
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
Object.defineProperty(exports, "__esModule", { value: true });
const client_1 = require("./client");
const types_1 = require("./types");
let microphone;
let apiClient;
let storage;
let client;
let stateChangeCb;
describe('Speechly Client', function () {
beforeEach(function () {
return __awaiter(this, void 0, void 0, function* () {
microphone = {
onAudio: jest.fn(),
initialize: jest.fn(),
close: jest.fn(),
mute: jest.fn(() => Date.now()),
unmute: jest.fn(() => Date.now()),
};
apiClient = {
onResponse: jest.fn(),
onClose: jest.fn(),
initialize: jest.fn(),
close: jest.fn(),
startContext: jest.fn(),
stopContext: jest.fn(() => __awaiter(this, void 0, void 0, function* () { return new Promise(resolve => resolve(Date.now().toString())); })),
sendAudio: jest.fn(),
};
storage = {
initialize: jest.fn(),
close: jest.fn(),
get: jest.fn(),
set: jest.fn(),
getOrSet: jest.fn(),
};
client = new client_1.Client({
appId: 'xxxx-xxxx-xxxx',
language: 'en-US',
microphone,
apiClient,
storage,
debug: true,
});
stateChangeCb = jest.fn();
client.onStateChange(stateChangeCb);
yield client.initialize();
});
});
it('set state Connecting and Connected during initialization', function () {
return __awaiter(this, void 0, void 0, function* () {
expect(stateChangeCb.mock.calls.length).toBe(2);
expect(stateChangeCb.mock.calls[0][0]).toBe(types_1.ClientState.Connecting);
expect(stateChangeCb.mock.calls[1][0]).toBe(types_1.ClientState.Connected);
});
});
it('delay stop context after call for 250 ms', function () {
return __awaiter(this, void 0, void 0, function* () {
yield client.startContext();
expect(apiClient.startContext.mock.calls.length).toBe(1);
expect(stateChangeCb.mock.calls[2][0]).toBe(types_1.ClientState.Starting);
expect(stateChangeCb.mock.calls[3][0]).toBe(types_1.ClientState.Recording);
const callStopTime = Date.now();
const stopPromise = client.stopContext();
const realStopTime = yield stopPromise;
expect(apiClient.stopContext.mock.calls.length).toBe(1);
expect(parseInt(realStopTime) - callStopTime).toBeGreaterThanOrEqual(250);
});
});
it('cancel delay stop context on start context', function () {
return __awaiter(this, void 0, void 0, function* () {
yield client.startContext();
const callStopTime = Date.now();
const stopPromise = client.stopContext();
yield client.startContext();
const realStopTime = yield stopPromise;
expect(apiClient.stopContext.mock.calls.length).toBe(1);
expect(parseInt(realStopTime) - callStopTime).toBeLessThan(250);
});
});
test('downsampler', () => {
expect('downsampler').toBe('downsampler');
});
//# sourceMappingURL=client.test.js.map

@@ -1,2 +0,2 @@

export * from './client';
export * from './webWorkerController';
export * from './types';

@@ -6,4 +6,4 @@ "use strict";

Object.defineProperty(exports, "__esModule", { value: true });
__export(require("./client"));
__export(require("./webWorkerController"));
__export(require("./types"));
//# sourceMappingURL=index.js.map

@@ -163,3 +163,3 @@ /**

*/
initialize(appId: string, deviceId: string, token?: string): Promise<string>;
initialize(token: string, sourceSampleRate: number, targetSampleRate: number): Promise<void>;
/**

@@ -188,3 +188,9 @@ * Closes the client.

*/
sendAudio(audioChunk: Int16Array): Error | void;
sendAudio(audioChunk: Float32Array): void;
/**
* Sends message to the Worker.
*
* @param message - message to send.
*/
postMessage(message: Object): void;
}

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc