New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

web-speech-cognitive-services

Package Overview
Dependencies
Maintainers
1
Versions
169
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

web-speech-cognitive-services - npm Package Compare versions

Comparing version 1.0.1-master.f1ee18c to 2.0.0-master.7ac198d

lib/util/exchangeToken.js

9

CHANGELOG.md

@@ -8,2 +8,4 @@ # Changelog

## [Unreleased]
## [2.0.0] - 2018-07-09
### Added

@@ -14,4 +16,7 @@ - SpeechSynthesis polyfill with Cognitive Services

- Removed `CognitiveServices` prefix
- Renamed `CognitiveServicesSpeechGrammarList` to `SpeechGrammarList`
- Renamed `CognitiveServicesSpeechRecognition` to `SpeechRecognition`
- Renamed `CognitiveServicesSpeechGrammarList` to `SpeechGrammarList`
- Renamed `CognitiveServicesSpeechRecognition` to `SpeechRecognition`
- Removed default export, now must use `import { SpeechRecognition } from 'web-speech-cognitive-services';`
- Speech Recognition: changed speech token authorization
- `recognition.speechToken = new SubscriptionKey('your subscription key');`

@@ -18,0 +23,0 @@ ## [1.0.0] - 2018-06-29

@@ -6,3 +6,3 @@ 'use strict';

});
exports.SpeechSynthesisUtterance = exports.speechSynthesis = exports.SpeechRecognition = exports.SpeechGrammarList = undefined;
exports.SubscriptionKey = exports.SpeechSynthesisUtterance = exports.speechSynthesis = exports.SpeechRecognition = exports.SpeechGrammarList = undefined;

@@ -27,5 +27,8 @@ require('babel-polyfill');

var _SubscriptionKey = require('./util/SubscriptionKey');
var _SubscriptionKey2 = _interopRequireDefault(_SubscriptionKey);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
exports.default = _SpeechRecognition2.default;
exports.SpeechGrammarList = _SpeechGrammarList2.default;

@@ -35,2 +38,3 @@ exports.SpeechRecognition = _SpeechRecognition2.default;

exports.SpeechSynthesisUtterance = _SpeechSynthesisUtterance2.default;
exports.SubscriptionKey = _SubscriptionKey2.default;
//# sourceMappingURL=index.js.map

@@ -11,8 +11,8 @@ 'use strict';

var CognitiveServicesSpeechGrammarList = function () {
function CognitiveServicesSpeechGrammarList() {
_classCallCheck(this, CognitiveServicesSpeechGrammarList);
var _class = function () {
function _class() {
_classCallCheck(this, _class);
}
_createClass(CognitiveServicesSpeechGrammarList, [{
_createClass(_class, [{
key: 'addFromString',

@@ -24,6 +24,6 @@ value: function addFromString() {

return CognitiveServicesSpeechGrammarList;
return _class;
}();
exports.default = CognitiveServicesSpeechGrammarList;
exports.default = _class;
//# sourceMappingURL=SpeechGrammarList.js.map

@@ -41,7 +41,7 @@ 'use strict';

var CognitiveServicesSpeechRecognition = function () {
function CognitiveServicesSpeechRecognition() {
var _class = function () {
function _class() {
var _this = this;
_classCallCheck(this, CognitiveServicesSpeechRecognition);
_classCallCheck(this, _class);

@@ -64,73 +64,27 @@ this._lang = '';

this.createRecognizer = (0, _memoizeOne2.default)(function (subscriptionKeyOrTokenFetch) {
var lang = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : navigator.language;
var mode = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : CognitiveSpeech.RecognitionMode.Interactive;
var osPlatform = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : window.navigator.userAgent;
var osName = arguments.length > 4 && arguments[4] !== undefined ? arguments[4] : window.navigator.appName;
var osVersion = arguments.length > 5 && arguments[5] !== undefined ? arguments[5] : window.navigator.appVersion;
var deviceManufacturer = arguments.length > 6 && arguments[6] !== undefined ? arguments[6] : 'microsoft-speech-browser-sdk';
var deviceModel = arguments.length > 7 && arguments[7] !== undefined ? arguments[7] : 'web-speech-cognitive-services';
var deviceVersion = arguments.length > 8 && arguments[8] !== undefined ? arguments[8] : "1.0.1-master.f1ee18c";
this.createRecognizer = (0, _memoizeOne2.default)(function () {
var lang = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : navigator.language;
var mode = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : CognitiveSpeech.RecognitionMode.Interactive;
var osPlatform = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : window.navigator.userAgent;
var osName = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : window.navigator.appName;
var osVersion = arguments.length > 4 && arguments[4] !== undefined ? arguments[4] : window.navigator.appVersion;
var deviceManufacturer = arguments.length > 5 && arguments[5] !== undefined ? arguments[5] : 'microsoft-speech-browser-sdk';
var deviceModel = arguments.length > 6 && arguments[6] !== undefined ? arguments[6] : 'web-speech-cognitive-services';
var deviceVersion = arguments.length > 7 && arguments[7] !== undefined ? arguments[7] : "2.0.0-master.7ac198d";
var config = new CognitiveSpeech.RecognizerConfig(new CognitiveSpeech.SpeechConfig(new CognitiveSpeech.Context(new CognitiveSpeech.OS(osPlatform, osName, osVersion), new CognitiveSpeech.Device(deviceManufacturer, deviceModel, deviceVersion))), mode, lang, CognitiveSpeech.SpeechResultFormat.Detailed);
var auth = void 0;
var fetchToken = function fetchToken() {
var sink = new CognitiveSpeech.Sink();
if (typeof subscriptionKeyOrTokenFetch === 'function') {
auth = new CognitiveSpeech.CognitiveTokenAuthentication(function () {
var _ref = _asyncToGenerator( /*#__PURE__*/regeneratorRuntime.mark(function _callee(authFetchEventID) {
return regeneratorRuntime.wrap(function _callee$(_context) {
while (1) {
switch (_context.prev = _context.next) {
case 0:
_context.next = 2;
return subscriptionKeyOrTokenFetch(authFetchEventID, false);
_this.speechToken.authorized.then(sink.Resolve, sink.Reject);
case 2:
return _context.abrupt('return', _context.sent);
return new CognitiveSpeech.Promise(sink);
};
case 3:
case 'end':
return _context.stop();
}
}
}, _callee, _this);
}));
return function (_x9) {
return _ref.apply(this, arguments);
};
}(), function () {
var _ref2 = _asyncToGenerator( /*#__PURE__*/regeneratorRuntime.mark(function _callee2(authFetchEventID) {
return regeneratorRuntime.wrap(function _callee2$(_context2) {
while (1) {
switch (_context2.prev = _context2.next) {
case 0:
_context2.next = 2;
return subscriptionKeyOrTokenFetch(authFetchEventID, true);
case 2:
return _context2.abrupt('return', _context2.sent);
case 3:
case 'end':
return _context2.stop();
}
}
}, _callee2, _this);
}));
return function (_x10) {
return _ref2.apply(this, arguments);
};
}());
} else {
auth = new CognitiveSpeech.CognitiveSubscriptionKeyAuthentication(subscriptionKeyOrTokenFetch);
}
return CognitiveSpeech.CreateRecognizer(config, auth);
return CognitiveSpeech.CreateRecognizer(config, new CognitiveSpeech.CognitiveTokenAuthentication(fetchToken, fetchToken));
});
}
_createClass(CognitiveServicesSpeechRecognition, [{
_createClass(_class, [{
key: 'abort',

@@ -140,4 +94,4 @@ value: function abort() {

// After abort is called, we should not saw it is a "success", "silent", or "no match"
var _ref3 = this.recognizer || {},
AudioSource = _ref3.AudioSource;
var _ref = this.recognizer || {},
AudioSource = _ref.AudioSource;

@@ -163,10 +117,10 @@ AudioSource && AudioSource.TurnOff();

value: function () {
var _ref4 = _asyncToGenerator( /*#__PURE__*/regeneratorRuntime.mark(function _callee3() {
var _ref2 = _asyncToGenerator( /*#__PURE__*/regeneratorRuntime.mark(function _callee() {
var recognizer, _toPromise, eventListener, promises, error, listeningStarted, recognitionStarted, gotFirstHypothesis, speechHypothesis, speechDetailedPhrase, recognitionResult;
return regeneratorRuntime.wrap(function _callee3$(_context3) {
return regeneratorRuntime.wrap(function _callee$(_context) {
while (1) {
switch (_context3.prev = _context3.next) {
switch (_context.prev = _context.next) {
case 0:
recognizer = this.recognizer = this.createRecognizer(this.subscriptionKey || this.tokenFetch, this.lang, this.osPlatform || window.navigator.userAgent, this.osName || window.navigator.appName, this.osVersion || window.navigator.appVersion, this.deviceManufacturer || 'web-speech-cognitive-services', this.deviceModel || 'web-speech-cognitive-services', this.deviceVersion || "1.0.1-master.f1ee18c");
recognizer = this.recognizer = this.createRecognizer(this.lang, this.osPlatform || window.navigator.userAgent, this.osName || window.navigator.appName, this.osVersion || window.navigator.appVersion, this.deviceManufacturer || 'web-speech-cognitive-services', this.deviceModel || 'web-speech-cognitive-services', this.deviceVersion || "2.0.0-master.7ac198d");
_toPromise = toPromise(), eventListener = _toPromise.eventListener, promises = _objectWithoutProperties(_toPromise, ['eventListener']);

@@ -178,3 +132,3 @@

_context3.next = 6;
_context.next = 6;
return promises.recognitionTriggered;

@@ -184,10 +138,10 @@

error = void 0;
_context3.next = 9;
_context.next = 9;
return Promise.race([promises.listeningStarted, promises.recognitionEnded]);
case 9:
listeningStarted = _context3.sent;
listeningStarted = _context.sent;
if (!(listeningStarted.Name === 'RecognitionEndedEvent')) {
_context3.next = 14;
_context.next = 14;
break;

@@ -202,3 +156,3 @@ }

}
_context3.next = 51;
_context.next = 51;
break;

@@ -209,11 +163,11 @@

_context3.next = 17;
_context.next = 17;
return promises.connectingToService;
case 17:
_context3.next = 19;
_context.next = 19;
return Promise.race([promises.recognitionStarted, promises.recognitionEnded]);
case 19:
recognitionStarted = _context3.sent;
recognitionStarted = _context.sent;

@@ -224,3 +178,3 @@

if (!(recognitionStarted.Name === 'RecognitionEndedEvent')) {
_context3.next = 25;
_context.next = 25;
break;

@@ -235,3 +189,3 @@ }

}
_context3.next = 36;
_context.next = 36;
break;

@@ -243,14 +197,14 @@

case 26:
_context3.next = 28;
_context.next = 28;
return Promise.race([promises.getSpeechHypothesisPromise(), promises.speechEndDetected]);
case 28:
speechHypothesis = _context3.sent;
speechHypothesis = _context.sent;
if (!(speechHypothesis.Name === 'SpeechEndDetectedEvent')) {
_context3.next = 31;
_context.next = 31;
break;
}
return _context3.abrupt('break', 35);
return _context.abrupt('break', 35);

@@ -268,3 +222,3 @@ case 31:

case 33:
_context3.next = 26;
_context.next = 26;
break;

@@ -284,3 +238,3 @@

if (!this._aborted) {
_context3.next = 43;
_context.next = 43;
break;

@@ -291,18 +245,18 @@ }

_context3.next = 41;
_context.next = 41;
return promises.recognitionEnded;
case 41:
_context3.next = 51;
_context.next = 51;
break;
case 43:
_context3.next = 45;
_context.next = 45;
return Promise.race([promises.speechDetailedPhrase, promises.recognitionEnded]);
case 45:
speechDetailedPhrase = _context3.sent;
speechDetailedPhrase = _context.sent;
if (!(speechDetailedPhrase.Name !== 'RecognitionEndedEvent')) {
_context3.next = 51;
_context.next = 51;
break;

@@ -325,3 +279,3 @@ }

_context3.next = 51;
_context.next = 51;
return promises.recognitionEnded;

@@ -336,10 +290,10 @@

case 'end':
return _context3.stop();
return _context.stop();
}
}
}, _callee3, this);
}, _callee, this);
}));
function start() {
return _ref4.apply(this, arguments);
return _ref2.apply(this, arguments);
}

@@ -401,5 +355,8 @@

return CognitiveServicesSpeechRecognition;
return _class;
}();
exports.default = _class;
function toPromise() {

@@ -445,4 +402,2 @@ var events = {

}
exports.default = CognitiveServicesSpeechRecognition;
//# sourceMappingURL=SpeechRecognition.js.map

@@ -13,6 +13,2 @@ 'use strict';

var _exchangeToken = require('./exchangeToken');
var _exchangeToken2 = _interopRequireDefault(_exchangeToken);
var _fetchVoices = require('./fetchVoices');

@@ -35,7 +31,2 @@

// Token expiration is hardcoded at 10 minutes
// https://docs.microsoft.com/en-us/azure/cognitive-services/Speech/how-to/how-to-authentication?tabs=Powershell#use-an-authorization-token
var TOKEN_EXPIRATION = 600000;
var TOKEN_EARLY_RENEWAL = 60000;
var SpeechSynthesis = function () {

@@ -61,8 +52,7 @@ function SpeechSynthesis() {

}, {
key: 'authorize',
key: 'speak',
value: function () {
var _ref = _asyncToGenerator( /*#__PURE__*/regeneratorRuntime.mark(function _callee(subscriptionKey) {
var _ref = _asyncToGenerator( /*#__PURE__*/regeneratorRuntime.mark(function _callee(utterance) {
var _this = this;
var autoRenewal = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : true;
return regeneratorRuntime.wrap(function _callee$(_context) {

@@ -72,51 +62,4 @@ while (1) {

case 0:
clearTimeout(this._renewal);
_context.t0 = subscriptionKey;
if (!_context.t0) {
_context.next = 6;
break;
}
_context.next = 5;
return (0, _exchangeToken2.default)(subscriptionKey);
case 5:
_context.t0 = _context.sent;
case 6:
this.speechToken = _context.t0;
this._renewal = autoRenewal && setTimeout(function () {
_this.fetchToken(subscriptionKey);
}, TOKEN_EXPIRATION - TOKEN_EARLY_RENEWAL);
case 8:
case 'end':
return _context.stop();
}
}
}, _callee, this);
}));
function authorize(_x2) {
return _ref.apply(this, arguments);
}
return authorize;
}()
}, {
key: 'speak',
value: function () {
var _ref2 = _asyncToGenerator( /*#__PURE__*/regeneratorRuntime.mark(function _callee2(utterance) {
var _this2 = this;
return regeneratorRuntime.wrap(function _callee2$(_context2) {
while (1) {
switch (_context2.prev = _context2.next) {
case 0:
if (utterance instanceof _SpeechSynthesisUtterance2.default) {
_context2.next = 2;
_context.next = 2;
break;

@@ -129,29 +72,37 @@ }

if (this.speechToken) {
_context2.next = 4;
_context.next = 4;
break;
}
throw new Error('authorize() must be called prior speak()');
throw new Error('speechToken must be set');
case 4:
return _context2.abrupt('return', new Promise(function (resolve, reject) {
if (this.speechToken.value) {
_context.next = 6;
break;
}
throw new Error('must wait for token to be authorized prior speak()');
case 6:
return _context.abrupt('return', new Promise(function (resolve, reject) {
utterance.addEventListener('end', resolve);
utterance.addEventListener('error', reject);
utterance.outputFormat = _this2.outputFormat;
utterance.speechToken = _this2.speechToken;
utterance.outputFormat = _this.outputFormat;
utterance.speechToken = _this.speechToken.value;
utterance.preload();
_this2.queue.push(utterance);
_this.queue.push(utterance);
}));
case 5:
case 7:
case 'end':
return _context2.stop();
return _context.stop();
}
}
}, _callee2, this);
}, _callee, this);
}));
function speak(_x3) {
return _ref2.apply(this, arguments);
function speak(_x) {
return _ref.apply(this, arguments);
}

@@ -158,0 +109,0 @@

@@ -6,4 +6,4 @@ "use strict";

});
exports.default = function (target, name, handler) {
exports.default = subscribeEvent;
function subscribeEvent(target, name, handler) {
target.addEventListener(name, handler);

@@ -14,3 +14,3 @@

};
};
}
//# sourceMappingURL=subscribeEvent.js.map

@@ -17,4 +17,4 @@ 'use strict';

var DOMEventEmitter = function () {
function DOMEventEmitter() {
var _class = function () {
function _class() {
var _this = this;

@@ -24,3 +24,3 @@

_classCallCheck(this, DOMEventEmitter);
_classCallCheck(this, _class);

@@ -38,3 +38,3 @@ this._events = new _events2.default();

_createClass(DOMEventEmitter, [{
_createClass(_class, [{
key: 'addEventListener',

@@ -58,6 +58,6 @@ value: function addEventListener(name, listener) {

return DOMEventEmitter;
return _class;
}();
exports.default = DOMEventEmitter;
exports.default = _class;
//# sourceMappingURL=domEventEmitter.js.map
{
"name": "web-speech-cognitive-services",
"version": "1.0.1-master.f1ee18c",
"version": "2.0.0-master.7ac198d",
"description": "Polyfill Web Speech API with Cognitive Services Speech-to-Text service",

@@ -5,0 +5,0 @@ "keywords": [

@@ -19,3 +19,3 @@ # web-speech-cognitive-services

Microsoft Azure [Cognitive Services Bing Speec](https://azure.microsoft.com/en-us/services/cognitive-services/speech/) service provide speech recognition with great accuracy. But unfortunately, the APIs are not based on Web Speech API.
Microsoft Azure [Cognitive Services Bing Speech](https://azure.microsoft.com/en-us/services/cognitive-services/speech/) service provide speech recognition with great accuracy. But unfortunately, the APIs are not based on Web Speech API.

@@ -31,18 +31,9 @@ This package will polyfill Web Speech API by turning Cognitive Services Bing Speech API into Web Speech API. We test this package with popular combination of platforms and browsers.

```jsx
import SpeechRecognition from 'web-speech-cognitive-services';
import { SpeechRecognition, SubscriptionKey } from 'web-speech-cognitive-services';
const recognition = new SpeechRecognition();
// There are two ways to provide your credential:
// 1. Provide a subscription key (good for prototype, not for production)
// 2. Provide a mechanism to obtain/refresh access token
recognition.lang = 'en-US';
recognition.speechToken = new SubscriptionKey('your subscription key');
// If you are using subscription key
recognition.subscriptionKey = 'your subscription key';
// If you are using access token, refreshToken === true, if we are renewing the token, otherwise, false
recognition.tokenFetch = async (authFetchEventID, refreshToken) => {
};
recognition.lang = 'en-US';
recognition.onresult = ({ results }) => {

@@ -55,2 +46,4 @@ console.log(results);

> Note: most browsers requires HTTPS or `localhost` for WebRTC.
### Integrating with React

@@ -61,8 +54,10 @@

```jsx
import { SpeechGrammarList, SpeechRecognition } from 'web-speech-cognitive-services';
import { SpeechGrammarList, SpeechRecognition, SubscriptionKey } from 'web-speech-cognitive-services';
import DictateButton from 'react-dictate-button';
const extra = { subscriptionKey: new SubscriptionKey('your subscription key') };
export default props =>
<DictateButton
extra={{ subscriptionKey: 'your subscription key' }}
extra={ extra }
onDictate={ ({ result }) => alert(result.transcript) }

@@ -81,7 +76,11 @@ speechGrammarList={ SpeechGrammarList }

```jsx
import { speechSynthesis, SpeechSynthesisUtterance } from 'web-speech-cognitive-services';
import { speechSynthesis, SpeechSynthesisUtterance, SubscriptionKey } from 'web-speech-cognitive-services';
const subscriptionKey = new SubscriptionKey('your subscription key');
const utterance = new SpeechSynthesisUtterance('Hello, World!');
await speechSynthesis.authorize('your subscription key');
speechSynthesis.speechToken = subscriptionKey;
// Need to wait until token exchange is complete before speak
await subscriptionKey.authorized;
await speechSynthesis.speak(utterance);

@@ -99,3 +98,3 @@ ```

```jsx
import { speechSynthesis, SpeechSynthesisUtterance } from 'web-speech-cognitive-services';
import { speechSynthesis, SpeechSynthesisUtterance, SubscriptionKey } from 'web-speech-cognitive-services';
import React from 'react';

@@ -108,12 +107,8 @@ import Say from 'react-say';

speechSynthesis.speechToken = new SubscriptionKey('your subscription key');
speechSynthesis.speechToken.authorized.then(() => this.setState(() => ({ ready: true })));
this.state = { ready: false };
}
componentWillMount() {
// Speech synthesis is not ready to use until authorization complete
speechSynthesis.authorize('your subscription key').then(() => ({
this.setState(() => ({ ready: true }));
}));
}
render() {

@@ -149,2 +144,4 @@ return (

* General
* [x] Unified [token exchange mechanism](packages/component/src/util/SubscriptionKey.js)
* Speech recognition

@@ -156,5 +153,10 @@ * [ ] Add grammar list

* Currently, there is a problem with `microsoft-speech-browser-sdk@0.0.12`, tracking on [this issue](https://github.com/Azure-Samples/SpeechToText-WebSockets-Javascript/issues/88)
* [ ] Support custom speech
* [ ] Support new [Speech-to-Text](https://azure.microsoft.com/en-us/services/cognitive-services/speech-to-text/) service
* Point to [new URIs](https://docs.microsoft.com/en-us/azure/cognitive-services/Speech-Service/rest-apis)
* Speech synthesis
* [ ] Event: add `pause`/`resume` support
* [ ] Properties: add `paused`/`pending`/`speaking` support
* [ ] Support new [Text-to-Speech](https://docs.microsoft.com/en-us/azure/cognitive-services/speech-service/how-to-text-to-speech) service
* Custom voice fonts

@@ -161,0 +163,0 @@ # Contributions

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc