New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

web-speech-cognitive-services

Package Overview
Dependencies
Maintainers
1
Versions
169
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

web-speech-cognitive-services - npm Package Compare versions

Comparing version 2.1.1-master.feffa73 to 3.0.0-master.1a40b8b

lib/util/createFetchTokenUsingSubscriptionKey.js

6

CHANGELOG.md

@@ -15,2 +15,8 @@ # Changelog

- Updated voice list from [https://docs.microsoft.com/en-us/azure/cognitive-services/speech-service/supported-languages](https://docs.microsoft.com/en-us/azure/cognitive-services/speech-service/supported-languages)
- Reliability around cancelling a playing utterance
- Instead of shutting down the `AudioContext`, we will stop the `AudioBufferSourceNode` for a graceful stop
- Simplify speech token authorization
- `recognition.fetchToken = async () => return await 'your subscription key';`
- `recognition.fetchToken = createFetchTokenUsingSubscriptionKey('your subscription key');`
- `fetchToken` will be called every time a token is required, implementor should cache the token as needed

@@ -17,0 +23,0 @@ ## [2.1.0] - 2018-07-09

12

lib/index.js

@@ -6,4 +6,8 @@ 'use strict';

});
exports.SubscriptionKey = exports.SpeechSynthesisUtterance = exports.speechSynthesis = exports.SpeechRecognition = exports.SpeechGrammarList = undefined;
exports.SpeechSynthesisUtterance = exports.speechSynthesis = exports.SpeechRecognition = exports.SpeechGrammarList = exports.createFetchTokenUsingSubscriptionKey = undefined;
var _createFetchTokenUsingSubscriptionKey = require('./util/createFetchTokenUsingSubscriptionKey');
var _createFetchTokenUsingSubscriptionKey2 = _interopRequireDefault(_createFetchTokenUsingSubscriptionKey);
var _SpeechGrammarList = require('./recognition/SpeechGrammarList');

@@ -25,8 +29,5 @@

var _SubscriptionKey = require('./util/SubscriptionKey');
var _SubscriptionKey2 = _interopRequireDefault(_SubscriptionKey);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
exports.createFetchTokenUsingSubscriptionKey = _createFetchTokenUsingSubscriptionKey2.default;
exports.SpeechGrammarList = _SpeechGrammarList2.default;

@@ -36,3 +37,2 @@ exports.SpeechRecognition = _SpeechRecognition2.default;

exports.SpeechSynthesisUtterance = _SpeechSynthesisUtterance2.default;
exports.SubscriptionKey = _SubscriptionKey2.default;
//# sourceMappingURL=index.js.map

@@ -105,4 +105,14 @@ 'use strict';

_this.speechToken.authorized.then(_sink.Resolve, _sink.Reject);
if (!_this.fetchToken) {
console.error('SpeechRecognition: fetchToken must be set');
return _sink.Reject('fetchToken must be set');
} else if (typeof _this.fetchToken !== 'function') {
console.error('SpeechRecognition: fetchToken must be a function that returns a Promise and it will resolve to a string-based token');
return _sink.Reject('fetchToken must be a function that returns a Promise and it will resolve to a string-based token');
}
_this.fetchToken().then(_sink.Resolve, _sink.Reject);
return new CognitiveSpeech.Promise(_sink);

@@ -109,0 +119,0 @@ } catch (err) {

@@ -53,23 +53,27 @@ 'use strict';

if (!(utterance = queue.shift())) {
_context.next = 9;
_context.next = 11;
break;
}
_context.next = 7;
this.playingUtterance = utterance;
_context.next = 8;
return utterance.play(this.audioContext || (this.audioContext = new audioContextClass()));
case 7:
case 8:
this.playingUtterance = null;
_context.next = 4;
break;
case 9:
_context.prev = 9;
_context.next = 12;
case 11:
_context.prev = 11;
_context.next = 14;
return this.audioContext;
case 12:
case 14:
_context.t0 = _context.sent;
if (!_context.t0) {
_context.next = 15;
_context.next = 17;
break;

@@ -80,6 +84,6 @@ }

case 15:
return _context.finish(9);
case 17:
return _context.finish(11);
case 16:
case 18:
case 'end':

@@ -89,3 +93,3 @@ return _context.stop();

}
}, _callee, this, [[3,, 9, 16]]);
}, _callee, this, [[3,, 11, 18]]);
}));

@@ -102,9 +106,3 @@

value: function stop() {
if (this.audioContext) {
var closePromise = this.audioContext.close();
this.audioContext = null;
return closePromise;
}
this.playingUtterance && this.playingUtterance.stop();
}

@@ -111,0 +109,0 @@ }]);

@@ -27,3 +27,4 @@ 'use strict';

var _ref2 = (0, _asyncToGenerator3.default)( /*#__PURE__*/_regenerator2.default.mark(function _callee(_ref) {
var _ref$lang = _ref.lang,
var accessToken = _ref.accessToken,
_ref$lang = _ref.lang,
lang = _ref$lang === undefined ? DEFAULT_LANGUAGE : _ref$lang,

@@ -33,3 +34,2 @@ outputFormat = _ref.outputFormat,

rate = _ref.rate,
speechToken = _ref.speechToken,
text = _ref.text,

@@ -48,3 +48,3 @@ _ref$voice = _ref.voice,

headers: {
Authorization: speechToken,
Authorization: accessToken,
'Content-Type': 'application/ssml+xml',

@@ -51,0 +51,0 @@ 'X-Microsoft-OutputFormat': outputFormat

@@ -69,3 +69,3 @@ 'use strict';

var speechToken;
var accessToken;
return _regenerator2.default.wrap(function _callee$(_context) {

@@ -83,20 +83,28 @@ while (1) {

case 2:
if (this.speechToken) {
_context.next = 4;
if (this.fetchToken) {
_context.next = 6;
break;
}
throw new Error('speechToken must be set');
throw new Error('SpeechSynthesis: fetchToken must be set');
case 4:
_context.next = 6;
return this.speechToken.authorized;
case 6:
if (!(typeof this.fetchToken !== 'function')) {
_context.next = 8;
break;
}
case 6:
speechToken = _context.sent;
throw new Error('SpeechSynthesis: fetchToken must be a function that returns a Promise and it will resolve to a string-based token');
case 8:
_context.next = 10;
return this.fetchToken();
case 10:
accessToken = _context.sent;
return _context.abrupt('return', new _promise2.default(function (resolve, reject) {
utterance.addEventListener('end', resolve);
utterance.addEventListener('error', reject);
utterance.accessToken = accessToken;
utterance.outputFormat = _this.outputFormat;
utterance.speechToken = speechToken;
utterance.preload();

@@ -107,3 +115,3 @@

case 8:
case 12:
case 'end':

@@ -110,0 +118,0 @@ return _context.stop();

@@ -71,3 +71,3 @@ 'use strict';

var _ref = (0, _asyncToGenerator3.default)( /*#__PURE__*/_regenerator2.default.mark(function _callee(resolve, reject) {
var audioContextClosed, unsubscribe;
var audioContextClosed, sourceEnded, unsubscribe;
return _regenerator2.default.wrap(function _callee$(_context) {

@@ -78,2 +78,3 @@ while (1) {

audioContextClosed = new _eventAsPromise2.default();
sourceEnded = new _eventAsPromise2.default();
unsubscribe = (0, _subscribeEvent2.default)(audioContext, 'statechange', function (_ref2) {

@@ -83,7 +84,7 @@ var state = _ref2.target.state;

});
_context.prev = 2;
_context.prev = 3;
source.buffer = audioBuffer;
// "ended" may not fire if the underlying AudioContext is closed prematurely
source.onended = resolve;
source.onended = sourceEnded.eventListener;

@@ -93,24 +94,24 @@ source.connect(audioContext.destination);

_context.next = 9;
return audioContextClosed.upcoming();
_context.next = 10;
return _promise2.default.race([audioContextClosed.upcoming(), sourceEnded.upcoming()]);
case 9:
case 10:
resolve();
_context.next = 15;
_context.next = 16;
break;
case 12:
_context.prev = 12;
_context.t0 = _context['catch'](2);
case 13:
_context.prev = 13;
_context.t0 = _context['catch'](3);
reject(_context.t0);
case 15:
_context.prev = 15;
case 16:
_context.prev = 16;
unsubscribe();
return _context.finish(15);
return _context.finish(16);
case 18:
case 19:
case 'end':

@@ -120,3 +121,3 @@ return _context.stop();

}
}, _callee, _this, [[2, 12, 15, 18]]);
}, _callee, _this, [[3, 13, 16, 19]]);
}));

@@ -165,2 +166,3 @@

this.arrayBufferPromise = (0, _fetchSpeechData2.default)({
accessToken: this.accessToken,
lang: this.lang || window.navigator.language,

@@ -170,3 +172,2 @@ outputFormat: this.outputFormat,

rate: this.rate,
speechToken: this.speechToken,
text: this.text,

@@ -222,14 +223,16 @@ voice: this.voice && this.voice.voiceURI,

this.emit('start');
this._playingSource = source;
_context3.next = 13;
_context3.next = 14;
return playDecoded(audioContext, audioBuffer, source);
case 13:
case 14:
this._playingSource = null;
this.emit('end');
_context3.next = 20;
_context3.next = 22;
break;
case 16:
_context3.prev = 16;
case 18:
_context3.prev = 18;
_context3.t3 = _context3['catch'](0);

@@ -241,3 +244,3 @@

case 20:
case 22:
case 'end':

@@ -247,3 +250,3 @@ return _context3.stop();

}
}, _callee3, this, [[0, 16]]);
}, _callee3, this, [[0, 18]]);
}));

@@ -258,2 +261,7 @@

}, {
key: 'stop',
value: function stop() {
this._playingSource && this._playingSource.stop();
}
}, {
key: 'lang',

@@ -260,0 +268,0 @@ get: function get() {

{
"name": "web-speech-cognitive-services",
"version": "2.1.1-master.feffa73",
"version": "3.0.0-master.1a40b8b",
"description": "Polyfill Web Speech API with Cognitive Services Speech-to-Text service",

@@ -31,4 +31,5 @@ "keywords": [

"bootstrap": "lerna bootstrap",
"build": "lerna run build",
"test": "lerna run test"
"build": "lerna run --stream build",
"test": "lerna run --stream test",
"watch": "lerna run --stream watch"
},

@@ -35,0 +36,0 @@ "author": "William Wong <compulim@hotmail.com> (http://compulim.info/)",

@@ -32,3 +32,3 @@ # web-speech-cognitive-services

```jsx
import { SpeechRecognition, SubscriptionKey } from 'web-speech-cognitive-services';
import { createFetchTokenUsingSubscriptionKey, SpeechRecognition } from 'web-speech-cognitive-services';

@@ -38,3 +38,3 @@ const recognition = new SpeechRecognition();

recognition.lang = 'en-US';
recognition.speechToken = new SubscriptionKey('your subscription key');
recognition.fetchToken = createFetchTokenUsingSubscriptionKey('your subscription key');

@@ -55,6 +55,6 @@ recognition.onresult = ({ results }) => {

```jsx
import { SpeechGrammarList, SpeechRecognition, SubscriptionKey } from 'web-speech-cognitive-services';
import { createFetchTokenUsingSubscriptionKey, SpeechGrammarList, SpeechRecognition } from 'web-speech-cognitive-services';
import DictateButton from 'react-dictate-button';
const extra = { subscriptionKey: new SubscriptionKey('your subscription key') };
const extra = { fetchToken: createFetchTokenUsingSubscriptionKey('your subscription key') };

@@ -81,3 +81,3 @@ export default props =>

```jsx
import { SpeechGrammarList, SpeechRecognition, SubscriptionKey } from 'web-speech-cognitive-services';
import { createFetchTokenUsingSubscriptionKey, SpeechGrammarList, SpeechRecognition } from 'web-speech-cognitive-services';

@@ -88,3 +88,3 @@ const recognition = new SpeechRecognition();

recognition.grammars.words = ['Tuen Mun', 'Yuen Long'];
recognition.speechToken = new SubscriptionKey('your subscription key');
recognition.fetchToken = createFetchTokenUsingSubscriptionKey('your subscription key');

@@ -103,11 +103,11 @@ recognition.onresult = ({ results }) => {

```jsx
import { speechSynthesis, SpeechSynthesisUtterance, SubscriptionKey } from 'web-speech-cognitive-services';
import { createFetchTokenUsingSubscriptionKey, speechSynthesis, SpeechSynthesisUtterance } from 'web-speech-cognitive-services';
const subscriptionKey = new SubscriptionKey('your subscription key');
const fetchToken = createFetchTokenUsingSubscriptionKey('your subscription key');
const utterance = new SpeechSynthesisUtterance('Hello, World!');
speechSynthesis.speechToken = subscriptionKey;
speechSynthesis.fetchToken = fetchToken;
// Need to wait until token exchange is complete before speak
await subscriptionKey.authorized;
await fetchToken();
await speechSynthesis.speak(utterance);

@@ -125,3 +125,3 @@ ```

```jsx
import { speechSynthesis, SpeechSynthesisUtterance, SubscriptionKey } from 'web-speech-cognitive-services';
import { createFetchTokenUsingSubscriptionKey, speechSynthesis, SpeechSynthesisUtterance } from 'web-speech-cognitive-services';
import React from 'react';

@@ -134,8 +134,16 @@ import Say from 'react-say';

speechSynthesis.speechToken = new SubscriptionKey('your subscription key');
speechSynthesis.speechToken.authorized.then(() => this.setState(() => ({ ready: true })));
speechSynthesis.fetchToken = createFetchTokenUsingSubscriptionKey('your subscription key');
// We call it here to preload the token, the token is cached
speechSynthesis.fetchToken();
this.state = { ready: false };
}
async componentDidMount() {
await speechSynthesis.fetchToken();
this.setState(() => ({ ready: true }));
}
render() {

@@ -142,0 +150,0 @@ return (

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc