New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

web-speech-cognitive-services

Package Overview
Dependencies
Maintainers
1
Versions
169
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

web-speech-cognitive-services - npm Package Compare versions

Comparing version

to
3.0.0-master.3bf86f4

5

CHANGELOG.md

@@ -12,3 +12,3 @@ # Changelog

### Changed
- Use `babel-runtime` and `babel-plugin-tranform-runtime`, in favor of `babel-polyfill`
- Use `@babel/runtime` and `@babel/plugin-tranform-runtime`, in favor of `babel-polyfill`
- Better error handling on `null` token

@@ -22,2 +22,5 @@ - Updated voice list from [https://docs.microsoft.com/en-us/azure/cognitive-services/speech-service/supported-languages](https://docs.microsoft.com/en-us/azure/cognitive-services/speech-service/supported-languages)

- `fetchToken` will be called every time a token is required, implementor should cache the token as needed
- Bump to `@babel/core@7.1.2` and `jest@^23.6.0`
- Bump to `react-scripts@2.0.4`
- Publish `/packages/component/` instead of `/`

@@ -24,0 +27,0 @@ ## [2.1.0] - 2018-07-09

63

lib/index.js

@@ -1,35 +0,48 @@

'use strict';
"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.SpeechSynthesisUtterance = exports.speechSynthesis = exports.SpeechRecognition = exports.SpeechGrammarList = exports.createFetchTokenUsingSubscriptionKey = undefined;
Object.defineProperty(exports, "createFetchTokenUsingSubscriptionKey", {
enumerable: true,
get: function get() {
return _createFetchTokenUsingSubscriptionKey.default;
}
});
Object.defineProperty(exports, "SpeechGrammarList", {
enumerable: true,
get: function get() {
return _SpeechGrammarList.default;
}
});
Object.defineProperty(exports, "SpeechRecognition", {
enumerable: true,
get: function get() {
return _SpeechRecognition.default;
}
});
Object.defineProperty(exports, "speechSynthesis", {
enumerable: true,
get: function get() {
return _speechSynthesis.default;
}
});
Object.defineProperty(exports, "SpeechSynthesisUtterance", {
enumerable: true,
get: function get() {
return _SpeechSynthesisUtterance.default;
}
});
var _createFetchTokenUsingSubscriptionKey = require('./util/createFetchTokenUsingSubscriptionKey');
var _createFetchTokenUsingSubscriptionKey = _interopRequireDefault(require("./util/createFetchTokenUsingSubscriptionKey"));
var _createFetchTokenUsingSubscriptionKey2 = _interopRequireDefault(_createFetchTokenUsingSubscriptionKey);
var _SpeechGrammarList = _interopRequireDefault(require("./recognition/SpeechGrammarList"));
var _SpeechGrammarList = require('./recognition/SpeechGrammarList');
var _SpeechRecognition = _interopRequireDefault(require("./recognition/SpeechRecognition"));
var _SpeechGrammarList2 = _interopRequireDefault(_SpeechGrammarList);
var _speechSynthesis = _interopRequireDefault(require("./synthesis/speechSynthesis"));
var _SpeechRecognition = require('./recognition/SpeechRecognition');
var _SpeechRecognition2 = _interopRequireDefault(_SpeechRecognition);
var _speechSynthesis = require('./synthesis/speechSynthesis');
var _speechSynthesis2 = _interopRequireDefault(_speechSynthesis);
var _SpeechSynthesisUtterance = require('./synthesis/SpeechSynthesisUtterance');
var _SpeechSynthesisUtterance2 = _interopRequireDefault(_SpeechSynthesisUtterance);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
exports.createFetchTokenUsingSubscriptionKey = _createFetchTokenUsingSubscriptionKey2.default;
exports.SpeechGrammarList = _SpeechGrammarList2.default;
exports.SpeechRecognition = _SpeechRecognition2.default;
exports.speechSynthesis = _speechSynthesis2.default;
exports.SpeechSynthesisUtterance = _SpeechSynthesisUtterance2.default;
var _SpeechSynthesisUtterance = _interopRequireDefault(require("./synthesis/SpeechSynthesisUtterance"));
//# sourceMappingURL=index.js.map

@@ -1,29 +0,23 @@

'use strict';
"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = void 0;
var _toConsumableArray2 = require('babel-runtime/helpers/toConsumableArray');
var _toConsumableArray2 = _interopRequireDefault(require("@babel/runtime/helpers/toConsumableArray"));
var _toConsumableArray3 = _interopRequireDefault(_toConsumableArray2);
var _classCallCheck2 = _interopRequireDefault(require("@babel/runtime/helpers/classCallCheck"));
var _classCallCheck2 = require('babel-runtime/helpers/classCallCheck');
var _createClass2 = _interopRequireDefault(require("@babel/runtime/helpers/createClass"));
var _classCallCheck3 = _interopRequireDefault(_classCallCheck2);
var _simpleUpdateIn = _interopRequireDefault(require("simple-update-in"));
var _createClass2 = require('babel-runtime/helpers/createClass');
var _createClass3 = _interopRequireDefault(_createClass2);
var _simpleUpdateIn = require('simple-update-in');
var _simpleUpdateIn2 = _interopRequireDefault(_simpleUpdateIn);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var _class = function () {
function _class() {
(0, _classCallCheck3.default)(this, _class);
var _default =
/*#__PURE__*/
function () {
function _default() {
(0, _classCallCheck2.default)(this, _default);
this._referenceGrammar = null;

@@ -33,4 +27,4 @@ this._words = [];

(0, _createClass3.default)(_class, [{
key: 'addFromString',
(0, _createClass2.default)(_default, [{
key: "addFromString",
value: function addFromString() {

@@ -40,15 +34,16 @@ throw new Error('JSGF is not supported');

}, {
key: 'createSpeechContext',
key: "createSpeechContext",
value: function createSpeechContext() {
var referenceGrammar = this.referenceGrammar,
words = this.words;
var speechContext;
var speechContext = void 0;
if (referenceGrammar) {
speechContext = (0, _simpleUpdateIn2.default)(speechContext, ['dgi', 'Groups'], function () {
speechContext = (0, _simpleUpdateIn.default)(speechContext, ['dgi', 'Groups'], function () {
var groups = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : [];
return [].concat((0, _toConsumableArray3.default)(groups), [{
return (0, _toConsumableArray2.default)(groups).concat([{
Type: 'Generic',
Hints: { ReferenceGrammar: referenceGrammar }
Hints: {
ReferenceGrammar: referenceGrammar
}
}]);

@@ -59,8 +54,10 @@ });

if (words && words.length) {
speechContext = (0, _simpleUpdateIn2.default)(speechContext, ['dgi', 'Groups'], function () {
speechContext = (0, _simpleUpdateIn.default)(speechContext, ['dgi', 'Groups'], function () {
var groups = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : [];
return [].concat((0, _toConsumableArray3.default)(groups), [{
return (0, _toConsumableArray2.default)(groups).concat([{
Type: 'Generic',
Items: words.map(function (word) {
return { Text: word };
return {
Text: word
};
})

@@ -74,3 +71,3 @@ }]);

}, {
key: 'referenceGrammar',
key: "referenceGrammar",
get: function get() {

@@ -87,3 +84,3 @@ return this._referenceGrammar;

}, {
key: 'words',
key: "words",
get: function get() {

@@ -100,6 +97,6 @@ return this._words;

}]);
return _class;
return _default;
}();
exports.default = _class;
exports.default = _default;
//# sourceMappingURL=SpeechGrammarList.js.map

@@ -1,77 +0,53 @@

'use strict';
"use strict";
var _interopRequireWildcard = require("@babel/runtime/helpers/interopRequireWildcard");
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = void 0;
var _regenerator = require('babel-runtime/regenerator');
var _regenerator = _interopRequireDefault(require("@babel/runtime/regenerator"));
var _regenerator2 = _interopRequireDefault(_regenerator);
var _objectWithoutProperties2 = _interopRequireDefault(require("@babel/runtime/helpers/objectWithoutProperties"));
var _promise = require('babel-runtime/core-js/promise');
var _asyncToGenerator2 = _interopRequireDefault(require("@babel/runtime/helpers/asyncToGenerator"));
var _promise2 = _interopRequireDefault(_promise);
var _objectSpread2 = _interopRequireDefault(require("@babel/runtime/helpers/objectSpread"));
var _stringify = require('babel-runtime/core-js/json/stringify');
var _classCallCheck2 = _interopRequireDefault(require("@babel/runtime/helpers/classCallCheck"));
var _stringify2 = _interopRequireDefault(_stringify);
var _createClass2 = _interopRequireDefault(require("@babel/runtime/helpers/createClass"));
var _objectWithoutProperties2 = require('babel-runtime/helpers/objectWithoutProperties');
var CognitiveSpeech = _interopRequireWildcard(require("microsoft-speech-browser-sdk"));
var _objectWithoutProperties3 = _interopRequireDefault(_objectWithoutProperties2);
var _eventAsPromise = _interopRequireDefault(require("event-as-promise"));
var _asyncToGenerator2 = require('babel-runtime/helpers/asyncToGenerator');
var _memoizeOne = _interopRequireDefault(require("memoize-one"));
var _asyncToGenerator3 = _interopRequireDefault(_asyncToGenerator2);
var _SpeechGrammarList = _interopRequireDefault(require("./SpeechGrammarList"));
var _extends2 = require('babel-runtime/helpers/extends');
var _extends3 = _interopRequireDefault(_extends2);
var _classCallCheck2 = require('babel-runtime/helpers/classCallCheck');
var _classCallCheck3 = _interopRequireDefault(_classCallCheck2);
var _createClass2 = require('babel-runtime/helpers/createClass');
var _createClass3 = _interopRequireDefault(_createClass2);
var _microsoftSpeechBrowserSdk = require('microsoft-speech-browser-sdk');
var CognitiveSpeech = _interopRequireWildcard(_microsoftSpeechBrowserSdk);
var _eventAsPromise = require('event-as-promise');
var _eventAsPromise2 = _interopRequireDefault(_eventAsPromise);
var _memoizeOne = require('memoize-one');
var _memoizeOne2 = _interopRequireDefault(_memoizeOne);
var _SpeechGrammarList = require('./SpeechGrammarList');
var _SpeechGrammarList2 = _interopRequireDefault(_SpeechGrammarList);
function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function buildSpeechResult(transcript, confidence, isFinal) {
var result = [{ confidence: confidence, transcript: transcript }];
var result = [{
confidence: confidence,
transcript: transcript
}];
result.isFinal = isFinal;
return { results: [result], type: 'result' };
return {
results: [result],
type: 'result'
};
}
var _class = function () {
function _class() {
var _default =
/*#__PURE__*/
function () {
function _default() {
var _this = this;
(0, _classCallCheck3.default)(this, _class);
(0, _classCallCheck2.default)(this, _default);
this._lang = '';
this.readyState = 0;
this.onaudiostart = null;

@@ -88,4 +64,3 @@ this.onaudioend = null;

this.onstart = null;
this.createRecognizer = (0, _memoizeOne2.default)(function () {
this.createRecognizer = (0, _memoizeOne.default)(function () {
var lang = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : navigator.language;

@@ -98,4 +73,3 @@ var mode = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : CognitiveSpeech.RecognitionMode.Interactive;

var deviceModel = arguments.length > 6 && arguments[6] !== undefined ? arguments[6] : 'web-speech-cognitive-services';
var deviceVersion = arguments.length > 7 && arguments[7] !== undefined ? arguments[7] : "1.0.0";
var deviceVersion = arguments.length > 7 && arguments[7] !== undefined ? arguments[7] : "3.0.0-master.3bf86f4";
var config = new CognitiveSpeech.RecognizerConfig(new CognitiveSpeech.SpeechConfig(new CognitiveSpeech.Context(new CognitiveSpeech.OS(osPlatform, osName, osVersion), new CognitiveSpeech.Device(deviceManufacturer, deviceModel, deviceVersion))), mode, lang, CognitiveSpeech.SpeechResultFormat.Detailed);

@@ -109,7 +83,5 @@

console.error('SpeechRecognition: fetchToken must be set');
return _sink.Reject('fetchToken must be set');
} else if (typeof _this.fetchToken !== 'function') {
console.error('SpeechRecognition: fetchToken must be a function that returns a Promise and it will resolve to a string-based token');
return _sink.Reject('fetchToken must be a function that returns a Promise and it will resolve to a string-based token');

@@ -130,4 +102,4 @@ }

(0, _createClass3.default)(_class, [{
key: 'abort',
(0, _createClass2.default)(_default, [{
key: "abort",
value: function abort() {

@@ -140,14 +112,14 @@ // TODO: Should redesign how to stop a recognition session

AudioSource && AudioSource.TurnOff();
this._aborted = true;
}
}, {
key: 'emit',
key: "emit",
value: function emit(name, event) {
var listener = this['on' + name];
listener && listener.call(this, (0, _extends3.default)({}, event, { type: name }));
var listener = this["on".concat(name)];
listener && listener.call(this, (0, _objectSpread2.default)({}, event, {
type: name
}));
}
}, {
key: 'stop',
key: "stop",
value: function stop() {

@@ -158,19 +130,18 @@ // TODO: Support stop

}, {
key: 'start',
key: "start",
value: function () {
var _ref2 = (0, _asyncToGenerator3.default)( /*#__PURE__*/_regenerator2.default.mark(function _callee() {
var _start = (0, _asyncToGenerator2.default)(
/*#__PURE__*/
_regenerator.default.mark(function _callee() {
var recognizer, _toPromise, eventListener, promises, speechContext, error, listeningStarted, recognitionStarted, gotFirstHypothesis, speechHypothesis, speechDetailedPhrase, recognitionResult;
return _regenerator2.default.wrap(function _callee$(_context) {
return _regenerator.default.wrap(function _callee$(_context) {
while (1) {
switch (_context.prev = _context.next) {
case 0:
recognizer = this.recognizer = this.createRecognizer(this.lang, this.osPlatform || window.navigator.userAgent, this.osName || window.navigator.appName, this.osVersion || window.navigator.appVersion, this.deviceManufacturer || 'web-speech-cognitive-services', this.deviceModel || 'web-speech-cognitive-services', this.deviceVersion || "1.0.0");
_toPromise = toPromise(), eventListener = _toPromise.eventListener, promises = (0, _objectWithoutProperties3.default)(_toPromise, ['eventListener']);
recognizer = this.recognizer = this.createRecognizer(this.lang, this.osPlatform || window.navigator.userAgent, this.osName || window.navigator.appName, this.osVersion || window.navigator.appVersion, this.deviceManufacturer || 'web-speech-cognitive-services', this.deviceModel || 'web-speech-cognitive-services', this.deviceVersion || "3.0.0-master.3bf86f4");
_toPromise = toPromise(), eventListener = _toPromise.eventListener, promises = (0, _objectWithoutProperties2.default)(_toPromise, ["eventListener"]);
speechContext = this.grammars && this.grammars.createSpeechContext();
recognizer.Recognize(eventListener, speechContext && (0, _stringify2.default)(speechContext));
recognizer.Recognize(eventListener, speechContext && JSON.stringify(speechContext));
this._aborted = false;
_context.next = 7;

@@ -180,11 +151,10 @@ return promises.recognitionTriggered;

case 7:
error = void 0;
_context.next = 10;
return _promise2.default.race([promises.listeningStarted, promises.recognitionEnded]);
_context.next = 9;
return Promise.race([promises.listeningStarted, promises.recognitionEnded]);
case 10:
case 9:
listeningStarted = _context.sent;
if (!(listeningStarted.Name === 'RecognitionEndedEvent')) {
_context.next = 15;
_context.next = 14;
break;

@@ -199,23 +169,21 @@ }

}
_context.next = 52;
_context.next = 50;
break;
case 15:
case 14:
this.emit('start');
_context.next = 18;
_context.next = 17;
return promises.connectingToService;
case 18:
_context.next = 20;
return _promise2.default.race([promises.recognitionStarted, promises.recognitionEnded]);
case 17:
_context.next = 19;
return Promise.race([promises.recognitionStarted, promises.recognitionEnded]);
case 20:
case 19:
recognitionStarted = _context.sent;
this.emit('audiostart');
if (!(recognitionStarted.Name === 'RecognitionEndedEvent')) {
_context.next = 26;
_context.next = 25;
break;

@@ -230,24 +198,21 @@ }

}
_context.next = 37;
_context.next = 35;
break;
case 26:
gotFirstHypothesis = void 0;
case 25:
_context.next = 27;
return Promise.race([promises.getSpeechHypothesisPromise(), promises.speechEndDetected]);
case 27:
_context.next = 29;
return _promise2.default.race([promises.getSpeechHypothesisPromise(), promises.speechEndDetected]);
case 29:
speechHypothesis = _context.sent;
if (!(speechHypothesis.Name === 'SpeechEndDetectedEvent')) {
_context.next = 32;
_context.next = 30;
break;
}
return _context.abrupt('break', 36);
return _context.abrupt("break", 34);
case 32:
case 30:
if (!gotFirstHypothesis) {

@@ -261,8 +226,7 @@ gotFirstHypothesis = true;

case 34:
_context.next = 27;
case 32:
_context.next = 25;
break;
case 36:
case 34:
if (gotFirstHypothesis) {

@@ -273,8 +237,7 @@ this.emit('speechend');

case 37:
case 35:
this.emit('audioend');
if (!this._aborted) {
_context.next = 44;
_context.next = 42;
break;

@@ -284,19 +247,18 @@ }

error = 'aborted';
_context.next = 42;
_context.next = 40;
return promises.recognitionEnded;
case 42:
_context.next = 52;
case 40:
_context.next = 50;
break;
case 42:
_context.next = 44;
return Promise.race([promises.speechDetailedPhrase, promises.recognitionEnded]);
case 44:
_context.next = 46;
return _promise2.default.race([promises.speechDetailedPhrase, promises.recognitionEnded]);
case 46:
speechDetailedPhrase = _context.sent;
if (!(speechDetailedPhrase.Name !== 'RecognitionEndedEvent')) {
_context.next = 52;
_context.next = 50;
break;

@@ -307,3 +269,2 @@ }

if (recognitionResult === CognitiveSpeech.RecognitionStatus.Success) {

@@ -320,12 +281,13 @@ this.emit('result', buildSpeechResult(speechDetailedPhrase.Result.NBest[0].Display, speechDetailedPhrase.Result.NBest[0].Confidence, true));

_context.next = 52;
_context.next = 50;
return promises.recognitionEnded;
case 52:
error && this.emit('error', { error: error });
case 50:
error && this.emit('error', {
error: error
});
this.emit('end');
case 54:
case 'end':
case 52:
case "end":
return _context.stop();

@@ -337,10 +299,8 @@ }

function start() {
return _ref2.apply(this, arguments);
}
return start;
return function start() {
return _start.apply(this, arguments);
};
}()
}, {
key: 'grammars',
key: "grammars",
get: function get() {

@@ -350,3 +310,3 @@ return this._grammars;

set: function set(nextGrammars) {
if (nextGrammars && !(nextGrammars instanceof _SpeechGrammarList2.default)) {
if (nextGrammars && !(nextGrammars instanceof _SpeechGrammarList.default)) {
throw new Error('must be instance of SpeechGrammarList from "web-speech-cognitive-services"');

@@ -358,3 +318,3 @@ }

}, {
key: 'lang',
key: "lang",
get: function get() {

@@ -367,3 +327,3 @@ return this._lang;

}, {
key: 'continuous',
key: "continuous",
get: function get() {

@@ -376,3 +336,3 @@ return false;

}, {
key: 'interimResults',
key: "interimResults",
get: function get() {

@@ -387,3 +347,3 @@ return true;

}, {
key: 'maxAlternatives',
key: "maxAlternatives",
get: function get() {

@@ -396,3 +356,3 @@ return 1;

}, {
key: 'serviceURI',
key: "serviceURI",
get: function get() {

@@ -405,22 +365,20 @@ return null;

}]);
return _class;
return _default;
}();
exports.default = _class;
exports.default = _default;
function toPromise() {
var events = {
ConnectingToServiceEvent: new _eventAsPromise2.default(),
ListeningStartedEvent: new _eventAsPromise2.default(),
RecognitionEndedEvent: new _eventAsPromise2.default(),
RecognitionStartedEvent: new _eventAsPromise2.default(),
RecognitionTriggeredEvent: new _eventAsPromise2.default(),
SpeechDetailedPhraseEvent: new _eventAsPromise2.default(),
SpeechEndDetectedEvent: new _eventAsPromise2.default(),
SpeechHypothesisEvent: new _eventAsPromise2.default(),
SpeechSimplePhraseEvent: new _eventAsPromise2.default(),
SpeechStartDetectedEvent: new _eventAsPromise2.default()
ConnectingToServiceEvent: new _eventAsPromise.default(),
ListeningStartedEvent: new _eventAsPromise.default(),
RecognitionEndedEvent: new _eventAsPromise.default(),
RecognitionStartedEvent: new _eventAsPromise.default(),
RecognitionTriggeredEvent: new _eventAsPromise.default(),
SpeechDetailedPhraseEvent: new _eventAsPromise.default(),
SpeechEndDetectedEvent: new _eventAsPromise.default(),
SpeechHypothesisEvent: new _eventAsPromise.default(),
SpeechSimplePhraseEvent: new _eventAsPromise.default(),
SpeechStartDetectedEvent: new _eventAsPromise.default()
};
return {

@@ -441,3 +399,2 @@ connectingToService: events.ConnectingToServiceEvent.upcoming(),

var name = event.Name;
var eventAsPromise = events[name];

@@ -448,3 +405,3 @@

} else {
console.warn('Unexpected event "' + name + '" from Cognitive Services, please file a bug to https://github.com/compulim/web-speech-cognitive-services');
console.warn("Unexpected event \"".concat(name, "\" from Cognitive Services, please file a bug to https://github.com/compulim/web-speech-cognitive-services"));
}

@@ -451,0 +408,0 @@ }

@@ -1,42 +0,42 @@

'use strict';
"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = void 0;
var _regenerator = require('babel-runtime/regenerator');
var _regenerator = _interopRequireDefault(require("@babel/runtime/regenerator"));
var _regenerator2 = _interopRequireDefault(_regenerator);
var _asyncToGenerator2 = _interopRequireDefault(require("@babel/runtime/helpers/asyncToGenerator"));
var _asyncToGenerator2 = require('babel-runtime/helpers/asyncToGenerator');
var _classCallCheck2 = _interopRequireDefault(require("@babel/runtime/helpers/classCallCheck"));
var _asyncToGenerator3 = _interopRequireDefault(_asyncToGenerator2);
var _createClass2 = _interopRequireDefault(require("@babel/runtime/helpers/createClass"));
var _classCallCheck2 = require('babel-runtime/helpers/classCallCheck');
var _classCallCheck3 = _interopRequireDefault(_classCallCheck2);
var _createClass2 = require('babel-runtime/helpers/createClass');
var _createClass3 = _interopRequireDefault(_createClass2);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var _class = function () {
function _class() {
(0, _classCallCheck3.default)(this, _class);
var _default =
/*#__PURE__*/
function () {
function _default() {
(0, _classCallCheck2.default)(this, _default);
}
(0, _createClass3.default)(_class, [{
key: 'start',
(0, _createClass2.default)(_default, [{
key: "start",
value: function () {
var _ref = (0, _asyncToGenerator3.default)( /*#__PURE__*/_regenerator2.default.mark(function _callee(queue) {
var audioContextClass = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : window.AudioContext || window.webkitAudioContext;
var utterance;
return _regenerator2.default.wrap(function _callee$(_context) {
var _start = (0, _asyncToGenerator2.default)(
/*#__PURE__*/
_regenerator.default.mark(function _callee(queue) {
var audioContextClass,
utterance,
_args = arguments;
return _regenerator.default.wrap(function _callee$(_context) {
while (1) {
switch (_context.prev = _context.next) {
case 0:
audioContextClass = _args.length > 1 && _args[1] !== undefined ? _args[1] : window.AudioContext || window.webkitAudioContext;
if (!this.audioContext) {
_context.next = 2;
_context.next = 3;
break;

@@ -47,4 +47,3 @@ }

case 2:
utterance = void 0;
case 3:
_context.prev = 3;

@@ -59,3 +58,2 @@

this.playingUtterance = utterance;
_context.next = 8;

@@ -65,3 +63,2 @@ return utterance.play(this.audioContext || (this.audioContext = new audioContextClass()));

case 8:
this.playingUtterance = null;

@@ -90,3 +87,3 @@ _context.next = 4;

case 18:
case 'end':
case "end":
return _context.stop();

@@ -98,10 +95,8 @@ }

function start(_x2) {
return _ref.apply(this, arguments);
}
return start;
return function start(_x) {
return _start.apply(this, arguments);
};
}()
}, {
key: 'stop',
key: "stop",
value: function stop() {

@@ -111,6 +106,6 @@ this.playingUtterance && this.playingUtterance.stop();

}]);
return _class;
return _default;
}();
exports.default = _class;
exports.default = _default;
//# sourceMappingURL=AudioContextConsumer.js.map

@@ -1,34 +0,26 @@

'use strict';
"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = void 0;
var _regenerator = require('babel-runtime/regenerator');
var _regenerator = _interopRequireDefault(require("@babel/runtime/regenerator"));
var _regenerator2 = _interopRequireDefault(_regenerator);
var _asyncToGenerator2 = _interopRequireDefault(require("@babel/runtime/helpers/asyncToGenerator"));
var _asyncToGenerator2 = require('babel-runtime/helpers/asyncToGenerator');
var _classCallCheck2 = _interopRequireDefault(require("@babel/runtime/helpers/classCallCheck"));
var _asyncToGenerator3 = _interopRequireDefault(_asyncToGenerator2);
var _createClass2 = _interopRequireDefault(require("@babel/runtime/helpers/createClass"));
var _classCallCheck2 = require('babel-runtime/helpers/classCallCheck');
var _AudioContextConsumer = _interopRequireDefault(require("./AudioContextConsumer"));
var _classCallCheck3 = _interopRequireDefault(_classCallCheck2);
var _createClass2 = require('babel-runtime/helpers/createClass');
var _createClass3 = _interopRequireDefault(_createClass2);
var _AudioContextConsumer = require('./AudioContextConsumer');
var _AudioContextConsumer2 = _interopRequireDefault(_AudioContextConsumer);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var _class = function () {
function _class() {
var _default =
/*#__PURE__*/
function () {
function _default() {
var audioContextClass = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : window.AudioContext || window.webkitAudioContext;
(0, _classCallCheck3.default)(this, _class);
(0, _classCallCheck2.default)(this, _default);
this.audioContextClass = audioContextClass;

@@ -39,7 +31,9 @@ this.consumer = null;

(0, _createClass3.default)(_class, [{
key: 'startConsumer',
(0, _createClass2.default)(_default, [{
key: "startConsumer",
value: function () {
var _ref = (0, _asyncToGenerator3.default)( /*#__PURE__*/_regenerator2.default.mark(function _callee() {
return _regenerator2.default.wrap(function _callee$(_context) {
var _startConsumer = (0, _asyncToGenerator2.default)(
/*#__PURE__*/
_regenerator.default.mark(function _callee() {
return _regenerator.default.wrap(function _callee$(_context) {
while (1) {

@@ -53,3 +47,3 @@ switch (_context.prev = _context.next) {

this.consumer = new _AudioContextConsumer2.default();
this.consumer = new _AudioContextConsumer.default();
_context.next = 4;

@@ -64,3 +58,3 @@ return this.consumer.start(this.queue, this.audioContextClass);

case 7:
case 'end':
case "end":
return _context.stop();

@@ -72,10 +66,8 @@ }

function startConsumer() {
return _ref.apply(this, arguments);
}
return startConsumer;
return function startConsumer() {
return _startConsumer.apply(this, arguments);
};
}()
}, {
key: 'push',
key: "push",
value: function push(utterance) {

@@ -86,3 +78,3 @@ this.queue.push(utterance);

}, {
key: 'stop',
key: "stop",
value: function stop() {

@@ -93,6 +85,6 @@ this.queue.splice(0);

}]);
return _class;
return _default;
}();
exports.default = _class;
exports.default = _default;
//# sourceMappingURL=AudioContextQueue.js.map

@@ -1,2 +0,2 @@

'use strict';
"use strict";

@@ -7,2 +7,3 @@ Object.defineProperty(exports, "__esModule", {

exports.default = buildSSML;
// Cognitive Services does not support unsigned percentage

@@ -24,11 +25,10 @@ // It must be converted into +/- first.

_ref$pitch = _ref.pitch,
pitch = _ref$pitch === undefined ? 1 : _ref$pitch,
pitch = _ref$pitch === void 0 ? 1 : _ref$pitch,
_ref$rate = _ref.rate,
rate = _ref$rate === undefined ? 1 : _ref$rate,
rate = _ref$rate === void 0 ? 1 : _ref$rate,
text = _ref.text,
voice = _ref.voice,
volume = _ref.volume;
return '<speak version="1.0" xml:lang="' + lang + '">\n <voice xml:lang="' + lang + '" xml:gender="' + gender + '" name="' + voice + '">\n <prosody pitch="' + relativePercentage(pitch) + '" rate="' + relativePercentage(rate) + '" volume="' + relativePercentage(volume) + '">\n ' + text + '\n </prosody>\n </voice>\n</speak>';
return "<speak version=\"1.0\" xml:lang=\"".concat(lang, "\">\n <voice xml:lang=\"").concat(lang, "\" xml:gender=\"").concat(gender, "\" name=\"").concat(voice, "\">\n <prosody pitch=\"").concat(relativePercentage(pitch), "\" rate=\"").concat(relativePercentage(rate), "\" volume=\"").concat(relativePercentage(volume), "\">\n ").concat(text, "\n </prosody>\n </voice>\n</speak>");
}
//# sourceMappingURL=buildSSML.js.map

@@ -1,21 +0,16 @@

'use strict';
"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = fetchSpeechData;
var _regenerator = require('babel-runtime/regenerator');
var _regenerator = _interopRequireDefault(require("@babel/runtime/regenerator"));
var _regenerator2 = _interopRequireDefault(_regenerator);
var _asyncToGenerator2 = _interopRequireDefault(require("@babel/runtime/helpers/asyncToGenerator"));
var _asyncToGenerator2 = require('babel-runtime/helpers/asyncToGenerator');
var _buildSSML = _interopRequireDefault(require("./buildSSML"));
var _asyncToGenerator3 = _interopRequireDefault(_asyncToGenerator2);
var _buildSSML = require('./buildSSML');
var _buildSSML2 = _interopRequireDefault(_buildSSML);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var DEFAULT_LANGUAGE = 'en-US';

@@ -25,21 +20,26 @@ var DEFAULT_VOICE = 'Microsoft Server Speech Text to Speech Voice (en-US, JessaRUS)';

exports.default = function () {
var _ref2 = (0, _asyncToGenerator3.default)( /*#__PURE__*/_regenerator2.default.mark(function _callee(_ref) {
var accessToken = _ref.accessToken,
_ref$lang = _ref.lang,
lang = _ref$lang === undefined ? DEFAULT_LANGUAGE : _ref$lang,
outputFormat = _ref.outputFormat,
pitch = _ref.pitch,
rate = _ref.rate,
text = _ref.text,
_ref$voice = _ref.voice,
voice = _ref$voice === undefined ? DEFAULT_VOICE : _ref$voice,
volume = _ref.volume;
var ssml, res;
return _regenerator2.default.wrap(function _callee$(_context) {
function fetchSpeechData(_x) {
return _fetchSpeechData.apply(this, arguments);
}
function _fetchSpeechData() {
_fetchSpeechData = (0, _asyncToGenerator2.default)(
/*#__PURE__*/
_regenerator.default.mark(function _callee(_ref) {
var accessToken, _ref$lang, lang, outputFormat, pitch, rate, text, _ref$voice, voice, volume, ssml, res;
return _regenerator.default.wrap(function _callee$(_context) {
while (1) {
switch (_context.prev = _context.next) {
case 0:
ssml = (0, _buildSSML2.default)({ lang: lang, pitch: pitch, rate: rate, text: text, voice: voice, volume: volume });
_context.next = 3;
accessToken = _ref.accessToken, _ref$lang = _ref.lang, lang = _ref$lang === void 0 ? DEFAULT_LANGUAGE : _ref$lang, outputFormat = _ref.outputFormat, pitch = _ref.pitch, rate = _ref.rate, text = _ref.text, _ref$voice = _ref.voice, voice = _ref$voice === void 0 ? DEFAULT_VOICE : _ref$voice, volume = _ref.volume;
ssml = (0, _buildSSML.default)({
lang: lang,
pitch: pitch,
rate: rate,
text: text,
voice: voice,
volume: volume
});
_context.next = 4;
return fetch(SYNTHESIS_URL, {

@@ -55,17 +55,17 @@ headers: {

case 3:
case 4:
res = _context.sent;
if (!(res.status !== 200)) {
_context.next = 6;
_context.next = 7;
break;
}
throw new Error('Failed to syntheis speech, server returned ' + res.status);
throw new Error("Failed to syntheis speech, server returned ".concat(res.status));
case 6:
return _context.abrupt('return', res.arrayBuffer());
case 7:
return _context.abrupt("return", res.arrayBuffer());
case 7:
case 'end':
case 8:
case "end":
return _context.stop();

@@ -76,9 +76,4 @@ }

}));
function fetchSpeechData(_x) {
return _ref2.apply(this, arguments);
}
return fetchSpeechData;
}();
return _fetchSpeechData.apply(this, arguments);
}
//# sourceMappingURL=fetchSpeechData.js.map

@@ -1,16 +0,335 @@

'use strict';
"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = _default;
exports.default = function () {
return [new _SpeechSynthesisVoice2.default({ lang: 'ar-EG', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (ar-EG, Hoda)' }), new _SpeechSynthesisVoice2.default({ lang: 'ar-SA', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (ar-SA, Naayf)' }), new _SpeechSynthesisVoice2.default({ lang: 'bg-BG', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (bg-BG, Ivan)' }), new _SpeechSynthesisVoice2.default({ lang: 'ca-ES', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (ca-ES, HerenaRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'cs-CZ', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (cs-CZ, Jakub)' }), new _SpeechSynthesisVoice2.default({ lang: 'da-DK', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (da-DK, HelleRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'de-AT', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (de-AT, Michael)' }), new _SpeechSynthesisVoice2.default({ lang: 'de-CH', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (de-CH, Karsten)' }), new _SpeechSynthesisVoice2.default({ lang: 'de-DE', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (de-DE, Hedda)' }), new _SpeechSynthesisVoice2.default({ lang: 'de-DE', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (de-DE, HeddaRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'de-DE', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (de-DE, Stefan, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'el-GR', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (el-GR, Stefanos)' }), new _SpeechSynthesisVoice2.default({ lang: 'en-AU', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-AU, Catherine)' }), new _SpeechSynthesisVoice2.default({ lang: 'en-AU', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-AU, HayleyRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'en-CA', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-CA, Linda)' }), new _SpeechSynthesisVoice2.default({ lang: 'en-CA', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-CA, HeatherRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'en-GB', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-GB, Susan, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'en-GB', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-GB, HazelRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'en-GB', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-GB, George, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'en-IE', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-IE, Sean)' }), new _SpeechSynthesisVoice2.default({ lang: 'en-IN', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-IN, Heera, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'en-IN', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-IN, PriyaRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'en-IN', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-IN, Ravi, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'en-US', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-US, ZiraRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'en-US', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-US, JessaRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'en-US', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-US, BenjaminRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'en-US', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-US, Jessa24kRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'en-US', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-US, Guy24kRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'es-ES', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (es-ES, Laura, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'es-ES', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (es-ES, HelenaRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'es-ES', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (es-ES, Pablo, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'es-MX', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (es-MX, HildaRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'es-MX', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (es-MX, Raul, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'fi-FI', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (fi-FI, HeidiRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'fr-CA', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (fr-CA, Caroline)' }), new _SpeechSynthesisVoice2.default({ lang: 'fr-CA', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (fr-CA, HarmonieRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'fr-CH', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (fr-CH, Guillaume)' }), new _SpeechSynthesisVoice2.default({ lang: 'fr-FR', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (fr-FR, Julie, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'fr-FR', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (fr-FR, HortenseRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'fr-FR', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (fr-FR, Paul, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'he-IL', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (he-IL, Asaf)' }), new _SpeechSynthesisVoice2.default({ lang: 'hi-IN', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (hi-IN, Kalpana, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'hi-IN', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (hi-IN, Kalpana)' }), new _SpeechSynthesisVoice2.default({ lang: 'hi-IN', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (hi-IN, Hemant)' }), new _SpeechSynthesisVoice2.default({ lang: 'hr-HR', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (hr-HR, Matej)' }), new _SpeechSynthesisVoice2.default({ lang: 'hu-HU', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (hu-HU, Szabolcs)' }), new _SpeechSynthesisVoice2.default({ lang: 'id-ID', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (id-ID, Andika)' }), new _SpeechSynthesisVoice2.default({ lang: 'it-IT', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (it-IT, Cosimo, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'it-IT', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (it-IT, LuciaRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'ja-JP', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (ja-JP, Ayumi, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'ja-JP', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (ja-JP, Ichiro, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'ja-JP', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (ja-JP, HarukaRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'ko-KR', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (ko-KR, HeamiRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'ms-MY', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (ms-MY, Rizwan)' }), new _SpeechSynthesisVoice2.default({ lang: 'nb-NO', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (nb-NO, HuldaRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'nl-NL', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (nl-NL, HannaRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'pl-PL', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (pl-PL, PaulinaRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'pt-BR', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (pt-BR, HeloisaRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'pt-BR', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (pt-BR, Daniel, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'pt-PT', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (pt-PT, HeliaRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'ro-RO', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (ro-RO, Andrei)' }), new _SpeechSynthesisVoice2.default({ lang: 'ru-RU', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (ru-RU, Irina, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'ru-RU', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (ru-RU, Pavel, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'ru-RU', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (ru-RU, EkaterinaRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'sk-SK', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (sk-SK, Filip)' }), new _SpeechSynthesisVoice2.default({ lang: 'sl-SI', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (sl-SI, Lado)' }), new _SpeechSynthesisVoice2.default({ lang: 'sv-SE', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (sv-SE, HedvigRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'ta-IN', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (ta-IN, Valluvar)' }), new _SpeechSynthesisVoice2.default({ lang: 'th-TH', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (th-TH, Pattara)' }), new _SpeechSynthesisVoice2.default({ lang: 'tr-TR', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (tr-TR, SedaRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'vi-VN', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (vi-VN, An)' }), new _SpeechSynthesisVoice2.default({ lang: 'zh-CN', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (zh-CN, HuihuiRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'zh-CN', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (zh-CN, Yaoyao, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'zh-CN', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (zh-CN, Kangkang, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'zh-HK', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (zh-HK, Tracy, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'zh-HK', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (zh-HK, TracyRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'zh-HK', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (zh-HK, Danny, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'zh-TW', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (zh-TW, Yating, Apollo)' }), new _SpeechSynthesisVoice2.default({ lang: 'zh-TW', gender: 'Female', voiceURI: 'Microsoft Server Speech Text to Speech Voice (zh-TW, HanHanRUS)' }), new _SpeechSynthesisVoice2.default({ lang: 'zh-TW', gender: 'Male', voiceURI: 'Microsoft Server Speech Text to Speech Voice (zh-TW, Zhiwei, Apollo)' })];
};
var _SpeechSynthesisVoice = _interopRequireDefault(require("./SpeechSynthesisVoice"));
var _SpeechSynthesisVoice = require('./SpeechSynthesisVoice');
var _SpeechSynthesisVoice2 = _interopRequireDefault(_SpeechSynthesisVoice);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _default() {
return [new _SpeechSynthesisVoice.default({
lang: 'ar-EG',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (ar-EG, Hoda)'
}), new _SpeechSynthesisVoice.default({
lang: 'ar-SA',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (ar-SA, Naayf)'
}), new _SpeechSynthesisVoice.default({
lang: 'bg-BG',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (bg-BG, Ivan)'
}), new _SpeechSynthesisVoice.default({
lang: 'ca-ES',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (ca-ES, HerenaRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'cs-CZ',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (cs-CZ, Jakub)'
}), new _SpeechSynthesisVoice.default({
lang: 'da-DK',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (da-DK, HelleRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'de-AT',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (de-AT, Michael)'
}), new _SpeechSynthesisVoice.default({
lang: 'de-CH',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (de-CH, Karsten)'
}), new _SpeechSynthesisVoice.default({
lang: 'de-DE',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (de-DE, Hedda)'
}), new _SpeechSynthesisVoice.default({
lang: 'de-DE',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (de-DE, HeddaRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'de-DE',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (de-DE, Stefan, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'el-GR',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (el-GR, Stefanos)'
}), new _SpeechSynthesisVoice.default({
lang: 'en-AU',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-AU, Catherine)'
}), new _SpeechSynthesisVoice.default({
lang: 'en-AU',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-AU, HayleyRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'en-CA',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-CA, Linda)'
}), new _SpeechSynthesisVoice.default({
lang: 'en-CA',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-CA, HeatherRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'en-GB',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-GB, Susan, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'en-GB',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-GB, HazelRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'en-GB',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-GB, George, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'en-IE',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-IE, Sean)'
}), new _SpeechSynthesisVoice.default({
lang: 'en-IN',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-IN, Heera, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'en-IN',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-IN, PriyaRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'en-IN',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-IN, Ravi, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'en-US',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-US, ZiraRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'en-US',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-US, JessaRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'en-US',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-US, BenjaminRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'en-US',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-US, Jessa24kRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'en-US',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (en-US, Guy24kRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'es-ES',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (es-ES, Laura, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'es-ES',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (es-ES, HelenaRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'es-ES',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (es-ES, Pablo, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'es-MX',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (es-MX, HildaRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'es-MX',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (es-MX, Raul, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'fi-FI',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (fi-FI, HeidiRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'fr-CA',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (fr-CA, Caroline)'
}), new _SpeechSynthesisVoice.default({
lang: 'fr-CA',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (fr-CA, HarmonieRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'fr-CH',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (fr-CH, Guillaume)'
}), new _SpeechSynthesisVoice.default({
lang: 'fr-FR',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (fr-FR, Julie, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'fr-FR',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (fr-FR, HortenseRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'fr-FR',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (fr-FR, Paul, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'he-IL',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (he-IL, Asaf)'
}), new _SpeechSynthesisVoice.default({
lang: 'hi-IN',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (hi-IN, Kalpana, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'hi-IN',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (hi-IN, Kalpana)'
}), new _SpeechSynthesisVoice.default({
lang: 'hi-IN',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (hi-IN, Hemant)'
}), new _SpeechSynthesisVoice.default({
lang: 'hr-HR',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (hr-HR, Matej)'
}), new _SpeechSynthesisVoice.default({
lang: 'hu-HU',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (hu-HU, Szabolcs)'
}), new _SpeechSynthesisVoice.default({
lang: 'id-ID',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (id-ID, Andika)'
}), new _SpeechSynthesisVoice.default({
lang: 'it-IT',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (it-IT, Cosimo, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'it-IT',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (it-IT, LuciaRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'ja-JP',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (ja-JP, Ayumi, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'ja-JP',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (ja-JP, Ichiro, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'ja-JP',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (ja-JP, HarukaRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'ko-KR',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (ko-KR, HeamiRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'ms-MY',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (ms-MY, Rizwan)'
}), new _SpeechSynthesisVoice.default({
lang: 'nb-NO',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (nb-NO, HuldaRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'nl-NL',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (nl-NL, HannaRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'pl-PL',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (pl-PL, PaulinaRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'pt-BR',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (pt-BR, HeloisaRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'pt-BR',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (pt-BR, Daniel, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'pt-PT',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (pt-PT, HeliaRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'ro-RO',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (ro-RO, Andrei)'
}), new _SpeechSynthesisVoice.default({
lang: 'ru-RU',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (ru-RU, Irina, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'ru-RU',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (ru-RU, Pavel, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'ru-RU',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (ru-RU, EkaterinaRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'sk-SK',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (sk-SK, Filip)'
}), new _SpeechSynthesisVoice.default({
lang: 'sl-SI',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (sl-SI, Lado)'
}), new _SpeechSynthesisVoice.default({
lang: 'sv-SE',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (sv-SE, HedvigRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'ta-IN',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (ta-IN, Valluvar)'
}), new _SpeechSynthesisVoice.default({
lang: 'th-TH',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (th-TH, Pattara)'
}), new _SpeechSynthesisVoice.default({
lang: 'tr-TR',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (tr-TR, SedaRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'vi-VN',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (vi-VN, An)'
}), new _SpeechSynthesisVoice.default({
lang: 'zh-CN',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (zh-CN, HuihuiRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'zh-CN',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (zh-CN, Yaoyao, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'zh-CN',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (zh-CN, Kangkang, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'zh-HK',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (zh-HK, Tracy, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'zh-HK',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (zh-HK, TracyRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'zh-HK',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (zh-HK, Danny, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'zh-TW',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (zh-TW, Yating, Apollo)'
}), new _SpeechSynthesisVoice.default({
lang: 'zh-TW',
gender: 'Female',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (zh-TW, HanHanRUS)'
}), new _SpeechSynthesisVoice.default({
lang: 'zh-TW',
gender: 'Male',
voiceURI: 'Microsoft Server Speech Text to Speech Voice (zh-TW, Zhiwei, Apollo)'
})];
}
//# sourceMappingURL=fetchVoices.js.map

@@ -1,55 +0,39 @@

'use strict';
"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = void 0;
var _regenerator = require('babel-runtime/regenerator');
var _regenerator = _interopRequireDefault(require("@babel/runtime/regenerator"));
var _regenerator2 = _interopRequireDefault(_regenerator);
var _asyncToGenerator2 = _interopRequireDefault(require("@babel/runtime/helpers/asyncToGenerator"));
var _promise = require('babel-runtime/core-js/promise');
var _classCallCheck2 = _interopRequireDefault(require("@babel/runtime/helpers/classCallCheck"));
var _promise2 = _interopRequireDefault(_promise);
var _createClass2 = _interopRequireDefault(require("@babel/runtime/helpers/createClass"));
var _asyncToGenerator2 = require('babel-runtime/helpers/asyncToGenerator');
var _AudioContextQueue = _interopRequireDefault(require("./AudioContextQueue"));
var _asyncToGenerator3 = _interopRequireDefault(_asyncToGenerator2);
var _fetchVoices = _interopRequireDefault(require("./fetchVoices"));
var _classCallCheck2 = require('babel-runtime/helpers/classCallCheck');
var _SpeechSynthesisUtterance = _interopRequireDefault(require("./SpeechSynthesisUtterance"));
var _classCallCheck3 = _interopRequireDefault(_classCallCheck2);
var _createClass2 = require('babel-runtime/helpers/createClass');
var _createClass3 = _interopRequireDefault(_createClass2);
var _AudioContextQueue = require('./AudioContextQueue');
var _AudioContextQueue2 = _interopRequireDefault(_AudioContextQueue);
var _fetchVoices = require('./fetchVoices');
var _fetchVoices2 = _interopRequireDefault(_fetchVoices);
var _SpeechSynthesisUtterance = require('./SpeechSynthesisUtterance');
var _SpeechSynthesisUtterance2 = _interopRequireDefault(_SpeechSynthesisUtterance);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
// Supported output format can be found at https://docs.microsoft.com/en-us/azure/cognitive-services/Speech/API-Reference-REST/BingVoiceOutput#Subscription
var DEFAULT_OUTPUT_FORMAT = 'audio-16khz-128kbitrate-mono-mp3';
var SpeechSynthesis = function () {
var SpeechSynthesis =
/*#__PURE__*/
function () {
function SpeechSynthesis() {
(0, _classCallCheck3.default)(this, SpeechSynthesis);
(0, _classCallCheck2.default)(this, SpeechSynthesis);
this.onvoiceschanged = null;
this.outputFormat = DEFAULT_OUTPUT_FORMAT;
this.queue = new _AudioContextQueue2.default();
this.queue = new _AudioContextQueue.default();
}
(0, _createClass3.default)(SpeechSynthesis, [{
key: 'cancel',
(0, _createClass2.default)(SpeechSynthesis, [{
key: "cancel",
value: function cancel() {

@@ -59,18 +43,20 @@ this.queue.stop();

}, {
key: 'getVoices',
key: "getVoices",
value: function getVoices() {
return (0, _fetchVoices2.default)();
return (0, _fetchVoices.default)();
}
}, {
key: 'speak',
key: "speak",
value: function () {
var _ref = (0, _asyncToGenerator3.default)( /*#__PURE__*/_regenerator2.default.mark(function _callee(utterance) {
var _speak = (0, _asyncToGenerator2.default)(
/*#__PURE__*/
_regenerator.default.mark(function _callee(utterance) {
var _this = this;
var accessToken;
return _regenerator2.default.wrap(function _callee$(_context) {
return _regenerator.default.wrap(function _callee$(_context) {
while (1) {
switch (_context.prev = _context.next) {
case 0:
if (utterance instanceof _SpeechSynthesisUtterance2.default) {
if (utterance instanceof _SpeechSynthesisUtterance.default) {
_context.next = 2;

@@ -104,3 +90,3 @@ break;

accessToken = _context.sent;
return _context.abrupt('return', new _promise2.default(function (resolve, reject) {
return _context.abrupt("return", new Promise(function (resolve, reject) {
utterance.addEventListener('end', resolve);

@@ -116,3 +102,3 @@ utterance.addEventListener('error', reject);

case 12:
case 'end':
case "end":
return _context.stop();

@@ -124,7 +110,5 @@ }

function speak(_x) {
return _ref.apply(this, arguments);
}
return speak;
return function speak(_x) {
return _speak.apply(this, arguments);
};
}()

@@ -135,3 +119,5 @@ }]);

exports.default = new SpeechSynthesis();
var _default = new SpeechSynthesis();
exports.default = _default;
//# sourceMappingURL=speechSynthesis.js.map

@@ -1,62 +0,36 @@

'use strict';
"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = void 0;
var _getPrototypeOf = require('babel-runtime/core-js/object/get-prototype-of');
var _classCallCheck2 = _interopRequireDefault(require("@babel/runtime/helpers/classCallCheck"));
var _getPrototypeOf2 = _interopRequireDefault(_getPrototypeOf);
var _createClass2 = _interopRequireDefault(require("@babel/runtime/helpers/createClass"));
var _classCallCheck2 = require('babel-runtime/helpers/classCallCheck');
var _possibleConstructorReturn2 = _interopRequireDefault(require("@babel/runtime/helpers/possibleConstructorReturn"));
var _classCallCheck3 = _interopRequireDefault(_classCallCheck2);
var _getPrototypeOf2 = _interopRequireDefault(require("@babel/runtime/helpers/getPrototypeOf"));
var _createClass2 = require('babel-runtime/helpers/createClass');
var _inherits2 = _interopRequireDefault(require("@babel/runtime/helpers/inherits"));
var _createClass3 = _interopRequireDefault(_createClass2);
var _regenerator = _interopRequireDefault(require("@babel/runtime/regenerator"));
var _possibleConstructorReturn2 = require('babel-runtime/helpers/possibleConstructorReturn');
var _asyncToGenerator2 = _interopRequireDefault(require("@babel/runtime/helpers/asyncToGenerator"));
var _possibleConstructorReturn3 = _interopRequireDefault(_possibleConstructorReturn2);
var _domEventEmitter = _interopRequireDefault(require("../util/domEventEmitter"));
var _inherits2 = require('babel-runtime/helpers/inherits');
var _eventAsPromise = _interopRequireDefault(require("event-as-promise"));
var _inherits3 = _interopRequireDefault(_inherits2);
var _fetchSpeechData = _interopRequireDefault(require("./fetchSpeechData"));
var _regenerator = require('babel-runtime/regenerator');
var _subscribeEvent = _interopRequireDefault(require("./subscribeEvent"));
var _regenerator2 = _interopRequireDefault(_regenerator);
var _asyncToGenerator2 = require('babel-runtime/helpers/asyncToGenerator');
var _asyncToGenerator3 = _interopRequireDefault(_asyncToGenerator2);
var _promise = require('babel-runtime/core-js/promise');
var _promise2 = _interopRequireDefault(_promise);
var _domEventEmitter = require('../util/domEventEmitter');
var _domEventEmitter2 = _interopRequireDefault(_domEventEmitter);
var _eventAsPromise = require('event-as-promise');
var _eventAsPromise2 = _interopRequireDefault(_eventAsPromise);
var _fetchSpeechData = require('./fetchSpeechData');
var _fetchSpeechData2 = _interopRequireDefault(_fetchSpeechData);
var _subscribeEvent = require('./subscribeEvent');
var _subscribeEvent2 = _interopRequireDefault(_subscribeEvent);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function asyncDecodeAudioData(audioContext, arrayBuffer) {
return new _promise2.default(function (resolve, reject) {
var promise = audioContext.decodeAudioData(arrayBuffer, resolve, reject);
return new Promise(function (resolve, reject) {
var promise = audioContext.decodeAudioData(arrayBuffer, resolve, reject); // Newer implementation of "decodeAudioData" will return a Promise
// Newer implementation of "decodeAudioData" will return a Promise
promise && typeof promise.then === 'function' && resolve(promise);

@@ -67,14 +41,16 @@ });

function playDecoded(audioContext, audioBuffer, source) {
var _this = this;
return new _promise2.default(function () {
var _ref = (0, _asyncToGenerator3.default)( /*#__PURE__*/_regenerator2.default.mark(function _callee(resolve, reject) {
return new Promise(
/*#__PURE__*/
function () {
var _ref = (0, _asyncToGenerator2.default)(
/*#__PURE__*/
_regenerator.default.mark(function _callee(resolve, reject) {
var audioContextClosed, sourceEnded, unsubscribe;
return _regenerator2.default.wrap(function _callee$(_context) {
return _regenerator.default.wrap(function _callee$(_context) {
while (1) {
switch (_context.prev = _context.next) {
case 0:
audioContextClosed = new _eventAsPromise2.default();
sourceEnded = new _eventAsPromise2.default();
unsubscribe = (0, _subscribeEvent2.default)(audioContext, 'statechange', function (_ref2) {
audioContextClosed = new _eventAsPromise.default();
sourceEnded = new _eventAsPromise.default();
unsubscribe = (0, _subscribeEvent.default)(audioContext, 'statechange', function (_ref2) {
var state = _ref2.target.state;

@@ -84,15 +60,11 @@ return state === 'closed' && audioContextClosed.eventListener();

_context.prev = 3;
source.buffer = audioBuffer; // "ended" may not fire if the underlying AudioContext is closed prematurely
source.buffer = audioBuffer;
// "ended" may not fire if the underlying AudioContext is closed prematurely
source.onended = sourceEnded.eventListener;
source.connect(audioContext.destination);
source.start(0);
_context.next = 10;
return _promise2.default.race([audioContextClosed.upcoming(), sourceEnded.upcoming()]);
return Promise.race([audioContextClosed.upcoming(), sourceEnded.upcoming()]);
case 10:
resolve();

@@ -104,4 +76,3 @@ _context.next = 16;

_context.prev = 13;
_context.t0 = _context['catch'](3);
_context.t0 = _context["catch"](3);
reject(_context.t0);

@@ -111,3 +82,2 @@

_context.prev = 16;
unsubscribe();

@@ -117,7 +87,7 @@ return _context.finish(16);

case 19:
case 'end':
case "end":
return _context.stop();
}
}
}, _callee, _this, [[3, 13, 16, 19]]);
}, _callee, this, [[3, 13, 16, 19]]);
}));

@@ -131,37 +101,39 @@

var _class = function (_DOMEventEmitter) {
(0, _inherits3.default)(_class, _DOMEventEmitter);
var _default =
/*#__PURE__*/
function (_DOMEventEmitter) {
(0, _inherits2.default)(_default, _DOMEventEmitter);
function _class(text) {
(0, _classCallCheck3.default)(this, _class);
function _default(text) {
var _this;
var _this2 = (0, _possibleConstructorReturn3.default)(this, (_class.__proto__ || (0, _getPrototypeOf2.default)(_class)).call(this, ['boundary', 'end', 'error', 'mark', 'pause', 'resume', 'start']));
_this2._lang = null;
_this2._pitch = 1;
_this2._rate = 1;
_this2._voice = null;
_this2._volume = 1;
_this2.text = text;
_this2.onboundary = null;
_this2.onend = null;
_this2.onerror = null;
_this2.onmark = null;
_this2.onpause = null;
_this2.onresume = null;
_this2.onstart = null;
return _this2;
(0, _classCallCheck2.default)(this, _default);
_this = (0, _possibleConstructorReturn2.default)(this, (0, _getPrototypeOf2.default)(_default).call(this, ['boundary', 'end', 'error', 'mark', 'pause', 'resume', 'start']));
_this._lang = null;
_this._pitch = 1;
_this._rate = 1;
_this._voice = null;
_this._volume = 1;
_this.text = text;
_this.onboundary = null;
_this.onend = null;
_this.onerror = null;
_this.onmark = null;
_this.onpause = null;
_this.onresume = null;
_this.onstart = null;
return _this;
}
(0, _createClass3.default)(_class, [{
key: 'preload',
(0, _createClass2.default)(_default, [{
key: "preload",
value: function () {
var _ref3 = (0, _asyncToGenerator3.default)( /*#__PURE__*/_regenerator2.default.mark(function _callee2() {
return _regenerator2.default.wrap(function _callee2$(_context2) {
var _preload = (0, _asyncToGenerator2.default)(
/*#__PURE__*/
_regenerator.default.mark(function _callee2() {
return _regenerator.default.wrap(function _callee2$(_context2) {
while (1) {
switch (_context2.prev = _context2.next) {
case 0:
this.arrayBufferPromise = (0, _fetchSpeechData2.default)({
this.arrayBufferPromise = (0, _fetchSpeechData.default)({
accessToken: this.accessToken,

@@ -176,3 +148,2 @@ lang: this.lang || window.navigator.language,

});
_context2.next = 3;

@@ -182,3 +153,3 @@ return this.arrayBufferPromise;

case 3:
case 'end':
case "end":
return _context2.stop();

@@ -190,14 +161,14 @@ }

function preload() {
return _ref3.apply(this, arguments);
}
return preload;
return function preload() {
return _preload.apply(this, arguments);
};
}()
}, {
key: 'play',
key: "play",
value: function () {
var _ref4 = (0, _asyncToGenerator3.default)( /*#__PURE__*/_regenerator2.default.mark(function _callee3(audioContext) {
var _play = (0, _asyncToGenerator2.default)(
/*#__PURE__*/
_regenerator.default.mark(function _callee3(audioContext) {
var source, audioBuffer;
return _regenerator2.default.wrap(function _callee3$(_context3) {
return _regenerator.default.wrap(function _callee3$(_context3) {
while (1) {

@@ -207,3 +178,2 @@ switch (_context3.prev = _context3.next) {

_context3.prev = 0;
// HACK: iOS requires bufferSourceNode to be constructed before decoding data

@@ -223,7 +193,4 @@ source = audioContext.createBufferSource();

audioBuffer = _context3.sent;
this.emit('start');
this._playingSource = source;
_context3.next = 14;

@@ -233,3 +200,2 @@ return playDecoded(audioContext, audioBuffer, source);

case 14:
this._playingSource = null;

@@ -242,10 +208,11 @@ this.emit('end');

_context3.prev = 18;
_context3.t3 = _context3['catch'](0);
this.emit('error', { error: _context3.t3, type: 'error' });
_context3.t3 = _context3["catch"](0);
this.emit('error', {
error: _context3.t3,
type: 'error'
});
throw _context3.t3;
case 22:
case 'end':
case "end":
return _context3.stop();

@@ -257,10 +224,8 @@ }

function play(_x3) {
return _ref4.apply(this, arguments);
}
return play;
return function play(_x3) {
return _play.apply(this, arguments);
};
}()
}, {
key: 'stop',
key: "stop",
value: function stop() {

@@ -270,3 +235,3 @@ this._playingSource && this._playingSource.stop();

}, {
key: 'lang',
key: "lang",
get: function get() {

@@ -279,3 +244,3 @@ return this._lang;

}, {
key: 'pitch',
key: "pitch",
get: function get() {

@@ -288,3 +253,3 @@ return this._pitch;

}, {
key: 'rate',
key: "rate",
get: function get() {

@@ -297,3 +262,3 @@ return this._rate;

}, {
key: 'voice',
key: "voice",
get: function get() {

@@ -306,3 +271,3 @@ return this._voice;

}, {
key: 'volume',
key: "volume",
get: function get() {

@@ -315,6 +280,6 @@ return this._volume;

}]);
return _class;
}(_domEventEmitter2.default);
return _default;
}(_domEventEmitter.default);
exports.default = _class;
exports.default = _default;
//# sourceMappingURL=SpeechSynthesisUtterance.js.map
"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = void 0;
var _classCallCheck2 = require("babel-runtime/helpers/classCallCheck");
var _classCallCheck2 = _interopRequireDefault(require("@babel/runtime/helpers/classCallCheck"));
var _classCallCheck3 = _interopRequireDefault(_classCallCheck2);
var _createClass2 = _interopRequireDefault(require("@babel/runtime/helpers/createClass"));
var _createClass2 = require("babel-runtime/helpers/createClass");
var _createClass3 = _interopRequireDefault(_createClass2);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var _class = function () {
function _class(_ref) {
var _default =
/*#__PURE__*/
function () {
function _default(_ref) {
var gender = _ref.gender,
lang = _ref.lang,
voiceURI = _ref.voiceURI;
(0, _classCallCheck3.default)(this, _class);
(0, _classCallCheck2.default)(this, _default);
this._gender = gender;

@@ -30,3 +28,3 @@ this._lang = lang;

(0, _createClass3.default)(_class, [{
(0, _createClass2.default)(_default, [{
key: "default",

@@ -62,6 +60,6 @@ get: function get() {

}]);
return _class;
return _default;
}();
exports.default = _class;
exports.default = _default;
//# sourceMappingURL=SpeechSynthesisVoice.js.map

@@ -7,5 +7,5 @@ "use strict";

exports.default = subscribeEvent;
function subscribeEvent(target, name, handler) {
target.addEventListener(name, handler);
return function () {

@@ -12,0 +12,0 @@ return target.removeEventListener(name, handler);

@@ -1,17 +0,12 @@

'use strict';
"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = void 0;
var _promise = require('babel-runtime/core-js/promise');
var _exchangeToken = _interopRequireDefault(require("./exchangeToken"));
var _promise2 = _interopRequireDefault(_promise);
var _exchangeToken = require('./exchangeToken');
var _exchangeToken2 = _interopRequireDefault(_exchangeToken);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
// Token expiration is hardcoded at 10 minutes

@@ -22,6 +17,5 @@ // https://docs.microsoft.com/en-us/azure/cognitive-services/Speech/how-to/how-to-authentication?tabs=Powershell#use-an-authorization-token

exports.default = function (subscriptionKey) {
var _default = function _default(subscriptionKey) {
var lastRenew = 0;
var accessTokenPromise = null;
return function () {

@@ -36,8 +30,6 @@ if (!lastRenew) {

lastRenew = now;
accessTokenPromise = (0, _exchangeToken2.default)(subscriptionKey).catch(function (err) {
accessTokenPromise = (0, _exchangeToken.default)(subscriptionKey).catch(function (err) {
// Force to renew on next fetch
lastRenew = 0;
return _promise2.default.reject(err);
return Promise.reject(err);
});

@@ -49,2 +41,4 @@ }

};
exports.default = _default;
//# sourceMappingURL=createFetchTokenUsingSubscriptionKey.js.map

@@ -1,33 +0,28 @@

'use strict';
"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = void 0;
var _classCallCheck2 = require('babel-runtime/helpers/classCallCheck');
var _classCallCheck2 = _interopRequireDefault(require("@babel/runtime/helpers/classCallCheck"));
var _classCallCheck3 = _interopRequireDefault(_classCallCheck2);
var _createClass2 = _interopRequireDefault(require("@babel/runtime/helpers/createClass"));
var _createClass2 = require('babel-runtime/helpers/createClass');
var _events = _interopRequireDefault(require("events"));
var _createClass3 = _interopRequireDefault(_createClass2);
var _events = require('events');
var _events2 = _interopRequireDefault(_events);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var _class = function () {
function _class() {
var _default =
/*#__PURE__*/
function () {
function _default() {
var _this = this;
var events = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : [];
(0, _classCallCheck3.default)(this, _class);
this._events = new _events2.default();
(0, _classCallCheck2.default)(this, _default);
this._events = new _events.default();
events.forEach(function (name) {
_this._events.addListener(name, function (event) {
var handler = _this['on' + name];
var handler = _this["on".concat(name)];

@@ -39,4 +34,4 @@ handler && handler.call(_this, event);

(0, _createClass3.default)(_class, [{
key: 'addEventListener',
(0, _createClass2.default)(_default, [{
key: "addEventListener",
value: function addEventListener(name, listener) {

@@ -46,3 +41,3 @@ this._events.addListener(name, listener);

}, {
key: 'removeEventListener',
key: "removeEventListener",
value: function removeEventListener(name, listener) {

@@ -52,5 +47,7 @@ name ? this._events.removeListener(name, listener) : this._events.removeAllListeners(name);

}, {
key: 'emit',
key: "emit",
value: function emit(name) {
var event = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : { type: name };
var event = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {
type: name
};

@@ -60,6 +57,6 @@ this._events.emit(name, event);

}]);
return _class;
return _default;
}();
exports.default = _class;
exports.default = _default;
//# sourceMappingURL=domEventEmitter.js.map

@@ -1,23 +0,26 @@

'use strict';
"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = _default;
var _regenerator = require('babel-runtime/regenerator');
var _regenerator = _interopRequireDefault(require("@babel/runtime/regenerator"));
var _regenerator2 = _interopRequireDefault(_regenerator);
var _asyncToGenerator2 = _interopRequireDefault(require("@babel/runtime/helpers/asyncToGenerator"));
var _asyncToGenerator2 = require('babel-runtime/helpers/asyncToGenerator');
var TOKEN_URL = 'https://api.cognitive.microsoft.com/sts/v1.0/issueToken';
var _asyncToGenerator3 = _interopRequireDefault(_asyncToGenerator2);
function _default(_x) {
return _ref.apply(this, arguments);
}
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var TOKEN_URL = 'https://api.cognitive.microsoft.com/sts/v1.0/issueToken';
exports.default = function () {
var _ref = (0, _asyncToGenerator3.default)( /*#__PURE__*/_regenerator2.default.mark(function _callee(subscriptionKey) {
function _ref() {
_ref = (0, _asyncToGenerator2.default)(
/*#__PURE__*/
_regenerator.default.mark(function _callee(subscriptionKey) {
var res;
return _regenerator2.default.wrap(function _callee$(_context) {
return _regenerator.default.wrap(function _callee$(_context) {
while (1) {

@@ -42,9 +45,9 @@ switch (_context.prev = _context.next) {

throw new Error('Failed to fetch speech token, server returned ' + res.status);
throw new Error("Failed to fetch speech token, server returned ".concat(res.status));
case 5:
return _context.abrupt('return', res.text());
return _context.abrupt("return", res.text());
case 6:
case 'end':
case "end":
return _context.stop();

@@ -55,7 +58,4 @@ }

}));
return function (_x) {
return _ref.apply(this, arguments);
};
}();
return _ref.apply(this, arguments);
}
//# sourceMappingURL=exchangeToken.js.map
{
"name": "web-speech-cognitive-services",
"version": "3.0.0-master.1a40b8b",
"version": "3.0.0-master.3bf86f4",
"description": "Polyfill Web Speech API with Cognitive Services Speech-to-Text service",

@@ -30,6 +30,6 @@ "keywords": [

"scripts": {
"bootstrap": "lerna bootstrap",
"build": "lerna run --stream build",
"test": "lerna run --stream test",
"watch": "lerna run --stream watch"
"build": "babel --out-dir lib --ignore **/*.spec.js,**/*.test.js --source-maps true src/",
"clean": "rimraf lib",
"test": "echo No tests defined",
"watch": "npm run build -- --watch"
},

@@ -46,16 +46,28 @@ "author": "William Wong <compulim@hotmail.com> (http://compulim.info/)",

"homepage": "https://github.com/compulim/web-speech-cognitive-services#readme",
"peerDependencies": {
"microsoft-speech-browser-sdk": "^0.0.12",
"react": "^16.4.2"
},
"devDependencies": {
"lerna": "^3.1.4"
"@babel/cli": "^7.1.2",
"@babel/core": "^7.1.2",
"@babel/plugin-proposal-object-rest-spread": "^7.0.0",
"@babel/plugin-transform-runtime": "^7.1.0",
"@babel/preset-env": "^7.1.0",
"@babel/preset-react": "^7.0.0",
"babel-core": "7.0.0-bridge.0",
"babel-jest": "^23.6.0",
"babel-plugin-version-transform": "^1.0.0",
"jest": "^23.6.0",
"microsoft-speech-browser-sdk": "^0.0.12",
"react": "^16.4.2",
"rimraf": "^2.6.2"
},
"dependencies": {
"@babel/runtime": "^7.1.2",
"event-as-promise": "^1.0.3",
"events": "^3.0.0",
"memoize-one": "^4.0.0",
"react-say": "0.0.1-master.ca079e3",
"simple-update-in": "^1.2.0"
},
"peerDependencies": {
"microsoft-speech-browser-sdk": "^0.0.12",
"react": "^16.4.2"
}
}

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet