web-speech-cognitive-services
Advanced tools
Comparing version 4.0.0-master.ff12edb to 4.0.0
@@ -8,2 +8,4 @@ # Changelog | ||
## [Unreleased] | ||
## [4.0.0] - 2018-12-10 | ||
### Added | ||
@@ -15,2 +17,3 @@ - New playground for better debuggability | ||
- Speech synthesis: Support `pause` and `resume` (with `pause` and `resume` event) | ||
- Speech synthesis: Support `speaking` property | ||
@@ -21,2 +24,5 @@ ### Changed | ||
### Fixed | ||
- Fix [#13](https://github.com/compulim/web-speech-cognitive-services/issues/13) Speech recognition: `SpeechRecognitionResult` should be iterable | ||
## [3.0.0] - 2018-10-31 | ||
@@ -23,0 +29,0 @@ ### Added |
@@ -99,3 +99,3 @@ "use strict"; | ||
var deviceModel = arguments.length > 6 && arguments[6] !== undefined ? arguments[6] : 'web-speech-cognitive-services'; | ||
var deviceVersion = arguments.length > 7 && arguments[7] !== undefined ? arguments[7] : "4.0.0-master.ff12edb"; | ||
var deviceVersion = arguments.length > 7 && arguments[7] !== undefined ? arguments[7] : "4.0.0"; | ||
var config = new CognitiveSpeech.RecognizerConfig(new CognitiveSpeech.SpeechConfig(new CognitiveSpeech.Context(new CognitiveSpeech.OS(osPlatform, osName, osVersion), new CognitiveSpeech.Device(deviceManufacturer, deviceModel, deviceVersion))), mode, language, CognitiveSpeech.SpeechResultFormat.Detailed); | ||
@@ -205,3 +205,3 @@ var fetchToken; | ||
case 0: | ||
recognizer = this.recognizer = this.createRecognizer(this.lang, this.osPlatform || window.navigator.userAgent, this.osName || window.navigator.appName, this.osVersion || window.navigator.appVersion, this.deviceManufacturer || 'web-speech-cognitive-services', this.deviceModel || 'web-speech-cognitive-services', this.deviceVersion || "4.0.0-master.ff12edb"); | ||
recognizer = this.recognizer = this.createRecognizer(this.lang, this.osPlatform || window.navigator.userAgent, this.osName || window.navigator.appName, this.osVersion || window.navigator.appVersion, this.deviceManufacturer || 'web-speech-cognitive-services', this.deviceModel || 'web-speech-cognitive-services', this.deviceVersion || "4.0.0"); | ||
_toPromise = toPromise(), eventListener = _toPromise.eventListener, promises = (0, _objectWithoutProperties2.default)(_toPromise, ["eventListener"]); | ||
@@ -208,0 +208,0 @@ speechContext = this.grammars && this.grammars.createSpeechContext(); |
@@ -32,2 +32,25 @@ "use strict"; | ||
(0, _createClass2.default)(_default, [{ | ||
key: "pause", | ||
value: function pause() { | ||
this.paused = true; | ||
this.consumer && this.consumer.pause(); | ||
} | ||
}, { | ||
key: "push", | ||
value: function push(utterance) { | ||
this.queue.push(utterance); | ||
this.startConsumer(); | ||
} | ||
}, { | ||
key: "resume", | ||
value: function resume() { | ||
this.paused = false; | ||
if (this.consumer) { | ||
this.consumer.resume(); | ||
} else { | ||
this.startConsumer(); | ||
} | ||
} | ||
}, { | ||
key: "startConsumer", | ||
@@ -69,25 +92,2 @@ value: function () { | ||
}, { | ||
key: "pause", | ||
value: function pause() { | ||
this.paused = true; | ||
this.consumer && this.consumer.pause(); | ||
} | ||
}, { | ||
key: "push", | ||
value: function push(utterance) { | ||
this.queue.push(utterance); | ||
this.startConsumer(); | ||
} | ||
}, { | ||
key: "resume", | ||
value: function resume() { | ||
this.paused = false; | ||
if (this.consumer) { | ||
this.consumer.resume(); | ||
} else { | ||
this.startConsumer(); | ||
} | ||
} | ||
}, { | ||
key: "stop", | ||
@@ -98,2 +98,7 @@ value: function stop() { | ||
} | ||
}, { | ||
key: "speaking", | ||
get: function get() { | ||
return !!this.consumer; | ||
} | ||
}]); | ||
@@ -100,0 +105,0 @@ return _default; |
@@ -142,3 +142,3 @@ "use strict"; | ||
_context.t0 = _context.sent; | ||
_context.next = 16; | ||
_context.next = 18; | ||
break; | ||
@@ -148,12 +148,16 @@ | ||
if (!authorizationToken) { | ||
_context.next = 12; | ||
_context.next = 14; | ||
break; | ||
} | ||
_context.t1 = authorizationToken; | ||
_context.next = 15; | ||
_context.next = 11; | ||
return authorizationToken; | ||
case 11: | ||
_context.t1 = _context.sent; | ||
_context.next = 17; | ||
break; | ||
case 12: | ||
_context.next = 14; | ||
case 14: | ||
_context.next = 16; | ||
return fetchMemoizedAuthorizationToken({ | ||
@@ -164,9 +168,9 @@ now: Date.now, | ||
case 14: | ||
case 16: | ||
_context.t1 = _context.sent; | ||
case 15: | ||
case 17: | ||
_context.t0 = _context.t1; | ||
case 16: | ||
case 18: | ||
utterance.authorizationToken = _context.t0; | ||
@@ -178,3 +182,3 @@ utterance.outputFormat = _this2.outputFormat; | ||
case 20: | ||
case 22: | ||
case "end": | ||
@@ -204,2 +208,7 @@ return _context.stop(); | ||
}() | ||
}, { | ||
key: "speaking", | ||
get: function get() { | ||
return this.queue.speaking; | ||
} | ||
}]); | ||
@@ -206,0 +215,0 @@ return SpeechSynthesis; |
@@ -10,3 +10,3 @@ "use strict"; | ||
var _objectSpread2 = _interopRequireDefault(require("@babel/runtime/helpers/objectSpread")); | ||
var _arrayToMap = _interopRequireDefault(require("../../Util/arrayToMap")); | ||
@@ -19,11 +19,2 @@ var _SpeechSDK = _interopRequireDefault(require("../SpeechSDK")); | ||
function arrayToMap(array, extras) { | ||
return (0, _objectSpread2.default)({}, array.reduce(function (map, value, index) { | ||
map[index] = value; | ||
return map; | ||
}, {}), extras, { | ||
length: array.length | ||
}); | ||
} | ||
function _default(result) { | ||
@@ -42,3 +33,3 @@ var _ref = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}, | ||
} else if (result.reason === RecognizedSpeech) { | ||
var resultList = [arrayToMap((result.json.NBest || []).slice(0, maxAlternatives).map(function (_ref2) { | ||
var resultList = [(0, _arrayToMap.default)((result.json.NBest || []).slice(0, maxAlternatives).map(function (_ref2) { | ||
var confidence = _ref2.Confidence, | ||
@@ -45,0 +36,0 @@ display = _ref2.Display, |
@@ -141,70 +141,72 @@ "use strict"; | ||
_this._maxAlternatives = 1; | ||
_this.createRecognizer = (0, _memoizeOne.default)( | ||
/*#__PURE__*/ | ||
(0, _asyncToGenerator2.default)( | ||
/*#__PURE__*/ | ||
_regenerator.default.mark(function _callee() { | ||
var _ref5, | ||
language, | ||
speechConfig, | ||
_args = arguments; | ||
return _this; | ||
} | ||
return _regenerator.default.wrap(function _callee$(_context) { | ||
while (1) { | ||
switch (_context.prev = _context.next) { | ||
case 0: | ||
_ref5 = _args.length > 0 && _args[0] !== undefined ? _args[0] : {}, language = _ref5.language; | ||
(0, _createClass2.default)(SpeechRecognition, [{ | ||
key: "createRecognizer", | ||
value: function () { | ||
var _createRecognizer = (0, _asyncToGenerator2.default)( | ||
/*#__PURE__*/ | ||
_regenerator.default.mark(function _callee() { | ||
var speechConfig; | ||
return _regenerator.default.wrap(function _callee$(_context) { | ||
while (1) { | ||
switch (_context.prev = _context.next) { | ||
case 0: | ||
if (!authorizationToken) { | ||
_context.next = 16; | ||
break; | ||
} | ||
if (!authorizationToken) { | ||
_context.next = 15; | ||
break; | ||
} | ||
_context.t1 = SpeechConfig; | ||
_context.t1 = SpeechConfig; | ||
if (!(typeof authorizationToken === 'function')) { | ||
_context.next = 8; | ||
break; | ||
} | ||
if (!(typeof authorizationToken === 'function')) { | ||
_context.next = 9; | ||
_context.next = 5; | ||
return authorizationToken(); | ||
case 5: | ||
_context.t2 = _context.sent; | ||
_context.next = 11; | ||
break; | ||
} | ||
_context.next = 6; | ||
return authorizationToken(); | ||
case 8: | ||
_context.next = 10; | ||
return authorizationToken; | ||
case 6: | ||
_context.t2 = _context.sent; | ||
_context.next = 10; | ||
break; | ||
case 10: | ||
_context.t2 = _context.sent; | ||
case 9: | ||
_context.t2 = authorizationToken; | ||
case 11: | ||
_context.t3 = _context.t2; | ||
_context.t4 = region; | ||
_context.t0 = _context.t1.fromAuthorizationToken.call(_context.t1, _context.t3, _context.t4); | ||
_context.next = 17; | ||
break; | ||
case 10: | ||
_context.t3 = _context.t2; | ||
_context.t4 = region; | ||
_context.t0 = _context.t1.fromAuthorizationToken.call(_context.t1, _context.t3, _context.t4); | ||
_context.next = 16; | ||
break; | ||
case 16: | ||
_context.t0 = SpeechConfig.fromSubscription(subscriptionKey, region); | ||
case 15: | ||
_context.t0 = SpeechConfig.fromSubscription(subscriptionKey, region); | ||
case 17: | ||
speechConfig = _context.t0; | ||
speechConfig.outputFormat = OutputFormat.Detailed; | ||
speechConfig.speechRecognitionLanguage = this.lang || 'en-US'; | ||
return _context.abrupt("return", new SpeechRecognizer(speechConfig, audioConfig)); | ||
case 16: | ||
speechConfig = _context.t0; | ||
speechConfig.outputFormat = OutputFormat.Detailed; | ||
speechConfig.speechRecognitionLanguage = language || 'en-US'; | ||
return _context.abrupt("return", new SpeechRecognizer(speechConfig, audioConfig)); | ||
case 20: | ||
case "end": | ||
return _context.stop(); | ||
case 21: | ||
case "end": | ||
return _context.stop(); | ||
} | ||
} | ||
} | ||
}, _callee, this); | ||
})), null, function (recognizer) { | ||
recognizer.dispose(); | ||
}); | ||
return _this; | ||
} | ||
}, _callee, this); | ||
})); | ||
(0, _createClass2.default)(SpeechRecognition, [{ | ||
return function createRecognizer() { | ||
return _createRecognizer.apply(this, arguments); | ||
}; | ||
}() | ||
}, { | ||
key: "emitCognitiveServices", | ||
@@ -255,10 +257,10 @@ value: function emitCognitiveServices(type, event) { | ||
case 2: | ||
recognizer = this._recognizer = _context3.sent; | ||
recognizer = _context3.sent; | ||
queue = (0, _createPromiseQueue.default)(); | ||
recognizer.canceled = function (_, _ref6) { | ||
var errorDetails = _ref6.errorDetails, | ||
offset = _ref6.offset, | ||
reason = _ref6.reason, | ||
sessionId = _ref6.sessionId; | ||
recognizer.canceled = function (_, _ref4) { | ||
var errorDetails = _ref4.errorDetails, | ||
offset = _ref4.offset, | ||
reason = _ref4.reason, | ||
sessionId = _ref4.sessionId; | ||
queue.push({ | ||
@@ -274,6 +276,6 @@ canceled: { | ||
recognizer.recognized = function (_, _ref7) { | ||
var offset = _ref7.offset, | ||
result = _ref7.result, | ||
sessionId = _ref7.sessionId; | ||
recognizer.recognized = function (_, _ref5) { | ||
var offset = _ref5.offset, | ||
result = _ref5.result, | ||
sessionId = _ref5.sessionId; | ||
queue.push({ | ||
@@ -288,6 +290,6 @@ recognized: { | ||
recognizer.recognizing = function (_, _ref8) { | ||
var offset = _ref8.offset, | ||
result = _ref8.result, | ||
sessionId = _ref8.sessionId; | ||
recognizer.recognizing = function (_, _ref6) { | ||
var offset = _ref6.offset, | ||
result = _ref6.result, | ||
sessionId = _ref6.sessionId; | ||
queue.push({ | ||
@@ -521,4 +523,5 @@ recognizing: { | ||
this.emit('end'); | ||
recognizer.dispose(); | ||
case 23: | ||
case 24: | ||
case "end": | ||
@@ -537,20 +540,3 @@ return _context3.stop(); | ||
key: "stop", | ||
value: function stop() {} // stop() { | ||
// if (!this._recognizer) { | ||
// // TODO: [P3] Should we throw an error or leave it as-is? | ||
// throw new Error('not started'); | ||
// } | ||
// if (this.continuous) { | ||
// const onStop = event => { | ||
// console.warn(event); | ||
// this.emit('cognitiveservices', { subType: 'stop' }); | ||
// }; | ||
// const onError = error => { | ||
// console.warn(error); | ||
// this.emit('cognitiveservices', { error, subType: 'error on stop' }); | ||
// }; | ||
// this._recognizer.stopContinuousRecognitionAsync(onStop, onError); | ||
// } | ||
// } | ||
value: function stop() {} | ||
}, { | ||
@@ -557,0 +543,0 @@ key: "continuous", |
@@ -32,2 +32,25 @@ "use strict"; | ||
(0, _createClass2.default)(_default, [{ | ||
key: "pause", | ||
value: function pause() { | ||
this.paused = true; | ||
this.consumer && this.consumer.pause(); | ||
} | ||
}, { | ||
key: "push", | ||
value: function push(utterance) { | ||
this.queue.push(utterance); | ||
this.startConsumer(); | ||
} | ||
}, { | ||
key: "resume", | ||
value: function resume() { | ||
this.paused = false; | ||
if (this.consumer) { | ||
this.consumer.resume(); | ||
} else { | ||
this.startConsumer(); | ||
} | ||
} | ||
}, { | ||
key: "startConsumer", | ||
@@ -69,25 +92,2 @@ value: function () { | ||
}, { | ||
key: "pause", | ||
value: function pause() { | ||
this.paused = true; | ||
this.consumer && this.consumer.pause(); | ||
} | ||
}, { | ||
key: "push", | ||
value: function push(utterance) { | ||
this.queue.push(utterance); | ||
this.startConsumer(); | ||
} | ||
}, { | ||
key: "resume", | ||
value: function resume() { | ||
this.paused = false; | ||
if (this.consumer) { | ||
this.consumer.resume(); | ||
} else { | ||
this.startConsumer(); | ||
} | ||
} | ||
}, { | ||
key: "stop", | ||
@@ -98,2 +98,7 @@ value: function stop() { | ||
} | ||
}, { | ||
key: "speaking", | ||
get: function get() { | ||
return !!this.consumer; | ||
} | ||
}]); | ||
@@ -100,0 +105,0 @@ return _default; |
@@ -153,3 +153,3 @@ "use strict"; | ||
_context.t0 = _context.sent; | ||
_context.next = 16; | ||
_context.next = 18; | ||
break; | ||
@@ -159,12 +159,16 @@ | ||
if (!authorizationToken) { | ||
_context.next = 12; | ||
_context.next = 14; | ||
break; | ||
} | ||
_context.t1 = authorizationToken; | ||
_context.next = 15; | ||
_context.next = 11; | ||
return authorizationToken; | ||
case 11: | ||
_context.t1 = _context.sent; | ||
_context.next = 17; | ||
break; | ||
case 12: | ||
_context.next = 14; | ||
case 14: | ||
_context.next = 16; | ||
return fetchMemoizedAuthorizationToken({ | ||
@@ -176,9 +180,9 @@ now: Date.now, | ||
case 14: | ||
case 16: | ||
_context.t1 = _context.sent; | ||
case 15: | ||
case 17: | ||
_context.t0 = _context.t1; | ||
case 16: | ||
case 18: | ||
utterance.authorizationToken = _context.t0; | ||
@@ -191,3 +195,3 @@ utterance.region = region; | ||
case 21: | ||
case 23: | ||
case "end": | ||
@@ -205,2 +209,7 @@ return _context.stop(); | ||
} | ||
}, { | ||
key: "speaking", | ||
get: function get() { | ||
return this.queue.speaking; | ||
} | ||
}]); | ||
@@ -207,0 +216,0 @@ return SpeechSynthesis; |
{ | ||
"name": "web-speech-cognitive-services", | ||
"version": "4.0.0-master.ff12edb", | ||
"version": "4.0.0", | ||
"description": "Polyfill Web Speech API with Cognitive Services Speech-to-Text service", | ||
@@ -5,0 +5,0 @@ "keywords": [ |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
No v1
QualityPackage is not semver >=1. This means it is not stable and does not support ^ ranges.
Found 1 instance in 1 package
1359882
84
14192
0