assemblyai
Advanced tools
Comparing version 4.3.1 to 4.3.2
# Changelog | ||
## [4.3.2] - 2024-03-08 | ||
### Added | ||
- Add `audio_url` property to `TranscribeParams` in addition to the `audio` property. You can use one or the other. `audio_url` only accepts a URL string. | ||
- Add `TranscriptReadyNotification` type for the transcript webhook body. | ||
### Changed | ||
- Update codebase to use TSDoc | ||
- Update README.md with more samples | ||
## [4.3.0] - 2024-02-15 | ||
@@ -4,0 +16,0 @@ |
@@ -57,3 +57,3 @@ (function (global, factory) { | ||
* Create a new service. | ||
* @param params The parameters to use for the service. | ||
* @param params - The parameters to use for the service. | ||
*/ | ||
@@ -64,4 +64,4 @@ constructor(params) { | ||
fetch(input, init) { | ||
var _a; | ||
return __awaiter(this, void 0, void 0, function* () { | ||
var _a; | ||
init = init !== null && init !== void 0 ? init : {}; | ||
@@ -127,3 +127,3 @@ init.headers = (_a = init.headers) !== null && _a !== void 0 ? _a : {}; | ||
* Delete the data for a previously submitted LeMUR request. | ||
* @param id ID of the LeMUR request | ||
* @param id - ID of the LeMUR request | ||
*/ | ||
@@ -211,4 +211,3 @@ purgeRequestData(id) { | ||
this.encoding = params.encoding; | ||
this.endUtteranceSilenceThreshold = | ||
params.endUtteranceSilenceThreshold; | ||
this.endUtteranceSilenceThreshold = params.endUtteranceSilenceThreshold; | ||
if ("token" in params && params.token) | ||
@@ -340,4 +339,4 @@ this.token = params.token; | ||
* Configure the threshold for how long to wait before ending an utterance. Default is 700ms. | ||
* @param threshold The duration of the end utterance silence threshold in milliseconds | ||
* @format integer | ||
* @param threshold - The duration of the end utterance silence threshold in milliseconds. | ||
* This value must be an integer between 0 and 20_000. | ||
*/ | ||
@@ -353,4 +352,4 @@ configureEndUtteranceSilenceThreshold(threshold) { | ||
} | ||
close(waitForSessionTermination = true) { | ||
return __awaiter(this, void 0, void 0, function* () { | ||
close() { | ||
return __awaiter(this, arguments, void 0, function* (waitForSessionTermination = true) { | ||
if (this.socket) { | ||
@@ -437,4 +436,4 @@ if (this.socket.readyState === WebSocket$1.OPEN) { | ||
* Transcribe an audio file. This will create a transcript and wait until the transcript status is "completed" or "error". | ||
* @param params The parameters to transcribe an audio file. | ||
* @param options The options to transcribe an audio file. | ||
* @param params - The parameters to transcribe an audio file. | ||
* @param options - The options to transcribe an audio file. | ||
* @returns A promise that resolves to the transcript. The transcript status is "completed" or "error". | ||
@@ -450,3 +449,3 @@ */ | ||
* Submits a transcription job for an audio file. This will not wait until the transcript status is "completed" or "error". | ||
* @param params The parameters to start the transcription of an audio file. | ||
* @param params - The parameters to start the transcription of an audio file. | ||
* @returns A promise that resolves to the queued transcript. | ||
@@ -456,22 +455,29 @@ */ | ||
return __awaiter(this, void 0, void 0, function* () { | ||
const { audio } = params, createParams = __rest(params, ["audio"]); | ||
let audioUrl; | ||
if (typeof audio === "string") { | ||
const path = getPath(audio); | ||
if (path !== null) { | ||
// audio is local path, upload local file | ||
audioUrl = yield this.files.upload(path); | ||
let transcriptParams = undefined; | ||
if ("audio" in params) { | ||
const { audio } = params, audioTranscriptParams = __rest(params, ["audio"]); | ||
if (typeof audio === "string") { | ||
const path = getPath(audio); | ||
if (path !== null) { | ||
// audio is local path, upload local file | ||
audioUrl = yield this.files.upload(path); | ||
} | ||
else { | ||
// audio is not a local path, assume it's a URL | ||
audioUrl = audio; | ||
} | ||
} | ||
else { | ||
// audio is not a local path, assume it's a URL | ||
audioUrl = audio; | ||
// audio is of uploadable type | ||
audioUrl = yield this.files.upload(audio); | ||
} | ||
transcriptParams = Object.assign(Object.assign({}, audioTranscriptParams), { audio_url: audioUrl }); | ||
} | ||
else { | ||
// audio is of uploadable type | ||
audioUrl = yield this.files.upload(audio); | ||
transcriptParams = params; | ||
} | ||
const data = yield this.fetchJson("/v2/transcript", { | ||
method: "POST", | ||
body: JSON.stringify(Object.assign(Object.assign({}, createParams), { audio_url: audioUrl })), | ||
body: JSON.stringify(transcriptParams), | ||
}); | ||
@@ -483,4 +489,4 @@ return data; | ||
* Create a transcript. | ||
* @param params The parameters to create a transcript. | ||
* @param options The options used for creating the new transcript. | ||
* @param params - The parameters to create a transcript. | ||
* @param options - The options used for creating the new transcript. | ||
* @returns A promise that resolves to the transcript. | ||
@@ -490,4 +496,4 @@ * @deprecated Use `transcribe` instead to transcribe a audio file that includes polling, or `submit` to transcribe a audio file without polling. | ||
create(params, options) { | ||
var _a; | ||
return __awaiter(this, void 0, void 0, function* () { | ||
var _a; | ||
const path = getPath(params.audio_url); | ||
@@ -510,9 +516,9 @@ if (path !== null) { | ||
* Wait until the transcript ready, either the status is "completed" or "error". | ||
* @param transcriptId The ID of the transcript. | ||
* @param options The options to wait until the transcript is ready. | ||
* @param transcriptId - The ID of the transcript. | ||
* @param options - The options to wait until the transcript is ready. | ||
* @returns A promise that resolves to the transcript. The transcript status is "completed" or "error". | ||
*/ | ||
waitUntilReady(transcriptId, options) { | ||
var _a, _b; | ||
return __awaiter(this, void 0, void 0, function* () { | ||
var _a, _b; | ||
const pollingInterval = (_a = options === null || options === void 0 ? void 0 : options.pollingInterval) !== null && _a !== void 0 ? _a : 3000; | ||
@@ -539,3 +545,3 @@ const pollingTimeout = (_b = options === null || options === void 0 ? void 0 : options.pollingTimeout) !== null && _b !== void 0 ? _b : -1; | ||
* Retrieve a transcript. | ||
* @param id The identifier of the transcript. | ||
* @param id - The identifier of the transcript. | ||
* @returns A promise that resolves to the transcript. | ||
@@ -548,16 +554,16 @@ */ | ||
* Retrieves a page of transcript listings. | ||
* @param parameters The parameters to filter the transcript list by, or the URL to retrieve the transcript list from. | ||
* @param params - The parameters to filter the transcript list by, or the URL to retrieve the transcript list from. | ||
*/ | ||
list(parameters) { | ||
list(params) { | ||
return __awaiter(this, void 0, void 0, function* () { | ||
let url = "/v2/transcript"; | ||
if (typeof parameters === "string") { | ||
url = parameters; | ||
if (typeof params === "string") { | ||
url = params; | ||
} | ||
else if (parameters) { | ||
url = `${url}?${new URLSearchParams(Object.keys(parameters).map((key) => { | ||
else if (params) { | ||
url = `${url}?${new URLSearchParams(Object.keys(params).map((key) => { | ||
var _a; | ||
return [ | ||
key, | ||
((_a = parameters[key]) === null || _a === void 0 ? void 0 : _a.toString()) || "", | ||
((_a = params[key]) === null || _a === void 0 ? void 0 : _a.toString()) || "", | ||
]; | ||
@@ -578,3 +584,3 @@ }))}`; | ||
* Delete a transcript | ||
* @param id The identifier of the transcript. | ||
* @param id - The identifier of the transcript. | ||
* @returns A promise that resolves to the transcript. | ||
@@ -588,5 +594,5 @@ */ | ||
* You can search for individual words, numbers, or phrases containing up to five words or numbers. | ||
* @param id The identifier of the transcript. | ||
* @param words Keywords to search for. | ||
* @return A promise that resolves to the sentences. | ||
* @param id - The identifier of the transcript. | ||
* @param words - Keywords to search for. | ||
* @returns A promise that resolves to the sentences. | ||
*/ | ||
@@ -599,4 +605,4 @@ wordSearch(id, words) { | ||
* Retrieve all sentences of a transcript. | ||
* @param id The identifier of the transcript. | ||
* @return A promise that resolves to the sentences. | ||
* @param id - The identifier of the transcript. | ||
* @returns A promise that resolves to the sentences. | ||
*/ | ||
@@ -608,4 +614,4 @@ sentences(id) { | ||
* Retrieve all paragraphs of a transcript. | ||
* @param id The identifier of the transcript. | ||
* @return A promise that resolves to the paragraphs. | ||
* @param id - The identifier of the transcript. | ||
* @returns A promise that resolves to the paragraphs. | ||
*/ | ||
@@ -617,9 +623,9 @@ paragraphs(id) { | ||
* Retrieve subtitles of a transcript. | ||
* @param id The identifier of the transcript. | ||
* @param format The format of the subtitles. | ||
* @param chars_per_caption The maximum number of characters per caption. | ||
* @return A promise that resolves to the subtitles text. | ||
* @param id - The identifier of the transcript. | ||
* @param format - The format of the subtitles. | ||
* @param chars_per_caption - The maximum number of characters per caption. | ||
* @returns A promise that resolves to the subtitles text. | ||
*/ | ||
subtitles(id, format = "srt", chars_per_caption) { | ||
return __awaiter(this, void 0, void 0, function* () { | ||
subtitles(id_1) { | ||
return __awaiter(this, arguments, void 0, function* (id, format = "srt", chars_per_caption) { | ||
let url = `/v2/transcript/${id}/${format}`; | ||
@@ -637,4 +643,4 @@ if (chars_per_caption) { | ||
* Retrieve redactions of a transcript. | ||
* @param id The identifier of the transcript. | ||
* @return A promise that resolves to the subtitles text. | ||
* @param id - The identifier of the transcript. | ||
* @returns A promise that resolves to the subtitles text. | ||
*/ | ||
@@ -657,4 +663,4 @@ redactions(id) { | ||
* Upload a local file to AssemblyAI. | ||
* @param input The local file path to upload, or a stream or buffer of the file to upload. | ||
* @return A promise that resolves to the uploaded file URL. | ||
* @param input - The local file path to upload, or a stream or buffer of the file to upload. | ||
* @returns A promise that resolves to the uploaded file URL. | ||
*/ | ||
@@ -685,3 +691,3 @@ upload(input) { | ||
* Create a new AssemblyAI client. | ||
* @param params The parameters for the service, including the API key and base URL, if any. | ||
* @param params - The parameters for the service, including the API key and base URL, if any. | ||
*/ | ||
@@ -688,0 +694,0 @@ constructor(params) { |
@@ -1,1 +0,1 @@ | ||
!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports):"function"==typeof define&&define.amd?define(["exports"],t):t((e="undefined"!=typeof globalThis?globalThis:e||self).assemblyai={})}(this,(function(e){"use strict";function t(e,t,s,i){return new(s||(s=Promise))((function(n,o){function r(e){try{c(i.next(e))}catch(e){o(e)}}function a(e){try{c(i.throw(e))}catch(e){o(e)}}function c(e){var t;e.done?n(e.value):(t=e.value,t instanceof s?t:new s((function(e){e(t)}))).then(r,a)}c((i=i.apply(e,t||[])).next())}))}"function"==typeof SuppressedError&&SuppressedError;class s{constructor(e){this.params=e}fetch(e,s){var i;return t(this,void 0,void 0,(function*(){(s=null!=s?s:{}).headers=null!==(i=s.headers)&&void 0!==i?i:{},s.headers=Object.assign({Authorization:this.params.apiKey,"Content-Type":"application/json"},s.headers),e.startsWith("http")||(e=this.params.baseUrl+e);const t=yield fetch(e,s);if(t.status>=400){let e;const s=yield t.text();if(s){try{e=JSON.parse(s)}catch(e){}if(null==e?void 0:e.error)throw new Error(e.error);throw new Error(s)}throw new Error(`HTTP Error: ${t.status} ${t.statusText}`)}return t}))}fetchJson(e,s){return t(this,void 0,void 0,(function*(){return(yield this.fetch(e,s)).json()}))}}class i extends s{summary(e){return this.fetchJson("/lemur/v3/generate/summary",{method:"POST",body:JSON.stringify(e)})}questionAnswer(e){return this.fetchJson("/lemur/v3/generate/question-answer",{method:"POST",body:JSON.stringify(e)})}actionItems(e){return this.fetchJson("/lemur/v3/generate/action-items",{method:"POST",body:JSON.stringify(e)})}task(e){return this.fetchJson("/lemur/v3/generate/task",{method:"POST",body:JSON.stringify(e)})}purgeRequestData(e){return this.fetchJson(`/lemur/v3/${e}`,{method:"DELETE"})}}const{WritableStream:n}="undefined"!=typeof window?window:"undefined"!=typeof global?global:globalThis;var o=null;"undefined"!=typeof WebSocket?o=WebSocket:"undefined"!=typeof MozWebSocket?o=MozWebSocket:"undefined"!=typeof global?o=global.WebSocket||global.MozWebSocket:"undefined"!=typeof window?o=window.WebSocket||window.MozWebSocket:"undefined"!=typeof self&&(o=self.WebSocket||self.MozWebSocket);var r,a=o;!function(e){e[e.BadSampleRate=4e3]="BadSampleRate",e[e.AuthFailed=4001]="AuthFailed",e[e.InsufficientFundsOrFreeAccount=4002]="InsufficientFundsOrFreeAccount",e[e.NonexistentSessionId=4004]="NonexistentSessionId",e[e.SessionExpired=4008]="SessionExpired",e[e.ClosedSession=4010]="ClosedSession",e[e.RateLimited=4029]="RateLimited",e[e.UniqueSessionViolation=4030]="UniqueSessionViolation",e[e.SessionTimeout=4031]="SessionTimeout",e[e.AudioTooShort=4032]="AudioTooShort",e[e.AudioTooLong=4033]="AudioTooLong",e[e.BadJson=4100]="BadJson",e[e.BadSchema=4101]="BadSchema",e[e.TooManyStreams=4102]="TooManyStreams",e[e.Reconnected=4103]="Reconnected",e[e.ReconnectAttemptsExhausted=1013]="ReconnectAttemptsExhausted"}(r||(r={}));const c={[r.BadSampleRate]:"Sample rate must be a positive integer",[r.AuthFailed]:"Not Authorized",[r.InsufficientFundsOrFreeAccount]:"Insufficient funds or you are using a free account. This feature is paid-only and requires you to add a credit card. Please visit https://assemblyai.com/dashboard/ to add a credit card to your account.",[r.NonexistentSessionId]:"Session ID does not exist",[r.SessionExpired]:"Session has expired",[r.ClosedSession]:"Session is closed",[r.RateLimited]:"Rate limited",[r.UniqueSessionViolation]:"Unique session violation",[r.SessionTimeout]:"Session Timeout",[r.AudioTooShort]:"Audio too short",[r.AudioTooLong]:"Audio too long",[r.BadJson]:"Bad JSON",[r.BadSchema]:"Bad schema",[r.TooManyStreams]:"Too many streams",[r.Reconnected]:"Reconnected",[r.ReconnectAttemptsExhausted]:"Reconnect attempts exhausted"};class l extends Error{}const d='{"terminate_session":true}';class h{constructor(e){var t,s;if(this.listeners={},this.realtimeUrl=null!==(t=e.realtimeUrl)&&void 0!==t?t:"wss://api.assemblyai.com/v2/realtime/ws",this.sampleRate=null!==(s=e.sampleRate)&&void 0!==s?s:16e3,this.wordBoost=e.wordBoost,this.encoding=e.encoding,this.endUtteranceSilenceThreshold=e.endUtteranceSilenceThreshold,"token"in e&&e.token&&(this.token=e.token),"apiKey"in e&&e.apiKey&&(this.apiKey=e.apiKey),!this.token&&!this.apiKey)throw new Error("API key or temporary token is required.")}connectionUrl(){const e=new URL(this.realtimeUrl);if("wss:"!==e.protocol)throw new Error("Invalid protocol, must be wss");const t=new URLSearchParams;return this.token&&t.set("token",this.token),t.set("sample_rate",this.sampleRate.toString()),this.wordBoost&&this.wordBoost.length>0&&t.set("word_boost",JSON.stringify(this.wordBoost)),this.encoding&&t.set("encoding",this.encoding),e.search=t.toString(),e}on(e,t){this.listeners[e]=t}connect(){return new Promise((e=>{if(this.socket)throw new Error("Already connected");const t=this.connectionUrl();this.token?this.socket=new a(t.toString()):this.socket=new a(t.toString(),{headers:{Authorization:this.apiKey}}),this.socket.binaryType="arraybuffer",this.socket.onopen=()=>{void 0!==this.endUtteranceSilenceThreshold&&null!==this.endUtteranceSilenceThreshold&&this.configureEndUtteranceSilenceThreshold(this.endUtteranceSilenceThreshold)},this.socket.onclose=({code:e,reason:t})=>{var s,i;t||e in r&&(t=c[e]),null===(i=(s=this.listeners).close)||void 0===i||i.call(s,e,t)},this.socket.onerror=e=>{var t,s,i,n;e.error?null===(s=(t=this.listeners).error)||void 0===s||s.call(t,e.error):null===(n=(i=this.listeners).error)||void 0===n||n.call(i,new Error(e.message))},this.socket.onmessage=({data:t})=>{var s,i,n,o,r,a,c,d,h,u,f,p,m;const y=JSON.parse(t.toString());if("error"in y)null===(i=(s=this.listeners).error)||void 0===i||i.call(s,new l(y.error));else switch(y.message_type){case"SessionBegins":{const t={sessionId:y.session_id,expiresAt:new Date(y.expires_at)};e(t),null===(o=(n=this.listeners).open)||void 0===o||o.call(n,t);break}case"PartialTranscript":y.created=new Date(y.created),null===(a=(r=this.listeners).transcript)||void 0===a||a.call(r,y),null===(d=(c=this.listeners)["transcript.partial"])||void 0===d||d.call(c,y);break;case"FinalTranscript":y.created=new Date(y.created),null===(u=(h=this.listeners).transcript)||void 0===u||u.call(h,y),null===(p=(f=this.listeners)["transcript.final"])||void 0===p||p.call(f,y);break;case"SessionTerminated":null===(m=this.sessionTerminatedResolve)||void 0===m||m.call(this)}}}))}sendAudio(e){this.send(e)}stream(){return new n({write:e=>{this.sendAudio(e)}})}forceEndUtterance(){this.send('{"force_end_utterance":true}')}configureEndUtteranceSilenceThreshold(e){this.send(`{"end_utterance_silence_threshold":${e}}`)}send(e){if(!this.socket||this.socket.readyState!==a.OPEN)throw new Error("Socket is not open for communication");this.socket.send(e)}close(e=!0){return t(this,void 0,void 0,(function*(){if(this.socket){if(this.socket.readyState===a.OPEN)if(e){const e=new Promise((e=>{this.sessionTerminatedResolve=e}));this.socket.send(d),yield e}else this.socket.send(d);"removeAllListeners"in this.socket&&this.socket.removeAllListeners(),this.socket.close()}this.listeners={},this.socket=void 0}))}}class u extends s{constructor(e){super(e),this.rtFactoryParams=e}createService(e){return this.transcriber(e)}transcriber(e){const t=Object.assign({},e);return t.token||t.apiKey||(t.apiKey=this.rtFactoryParams.apiKey),new h(t)}createTemporaryToken(e){return t(this,void 0,void 0,(function*(){return(yield this.fetchJson("/v2/realtime/token",{method:"POST",body:JSON.stringify(e)})).token}))}}function f(e){return e.startsWith("http")||e.startsWith("https")?null:e.startsWith("file://")?e.substring(7):e.startsWith("file:")?e.substring(5):e}class p extends s{constructor(e,t){super(e),this.files=t}transcribe(e,s){return t(this,void 0,void 0,(function*(){const t=yield this.submit(e);return yield this.waitUntilReady(t.id,s)}))}submit(e){return t(this,void 0,void 0,(function*(){const{audio:t}=e,s=function(e,t){var s={};for(var i in e)Object.prototype.hasOwnProperty.call(e,i)&&t.indexOf(i)<0&&(s[i]=e[i]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols){var n=0;for(i=Object.getOwnPropertySymbols(e);n<i.length;n++)t.indexOf(i[n])<0&&Object.prototype.propertyIsEnumerable.call(e,i[n])&&(s[i[n]]=e[i[n]])}return s}(e,["audio"]);let i;if("string"==typeof t){const e=f(t);i=null!==e?yield this.files.upload(e):t}else i=yield this.files.upload(t);return yield this.fetchJson("/v2/transcript",{method:"POST",body:JSON.stringify(Object.assign(Object.assign({},s),{audio_url:i}))})}))}create(e,s){var i;return t(this,void 0,void 0,(function*(){const t=f(e.audio_url);if(null!==t){const s=yield this.files.upload(t);e.audio_url=s}const n=yield this.fetchJson("/v2/transcript",{method:"POST",body:JSON.stringify(e)});return null===(i=null==s?void 0:s.poll)||void 0===i||i?yield this.waitUntilReady(n.id,s):n}))}waitUntilReady(e,s){var i,n;return t(this,void 0,void 0,(function*(){const t=null!==(i=null==s?void 0:s.pollingInterval)&&void 0!==i?i:3e3,o=null!==(n=null==s?void 0:s.pollingTimeout)&&void 0!==n?n:-1,r=Date.now();for(;;){const s=yield this.get(e);if("completed"===s.status||"error"===s.status)return s;if(o>0&&Date.now()-r>o)throw new Error("Polling timeout");yield new Promise((e=>setTimeout(e,t)))}}))}get(e){return this.fetchJson(`/v2/transcript/${e}`)}list(e){return t(this,void 0,void 0,(function*(){let t="/v2/transcript";"string"==typeof e?t=e:e&&(t=`${t}?${new URLSearchParams(Object.keys(e).map((t=>{var s;return[t,(null===(s=e[t])||void 0===s?void 0:s.toString())||""]})))}`);const s=yield this.fetchJson(t);for(const e of s.transcripts)e.created=new Date(e.created),e.completed&&(e.completed=new Date(e.completed));return s}))}delete(e){return this.fetchJson(`/v2/transcript/${e}`,{method:"DELETE"})}wordSearch(e,t){const s=new URLSearchParams({words:t.join(",")});return this.fetchJson(`/v2/transcript/${e}/word-search?${s.toString()}`)}sentences(e){return this.fetchJson(`/v2/transcript/${e}/sentences`)}paragraphs(e){return this.fetchJson(`/v2/transcript/${e}/paragraphs`)}subtitles(e,s="srt",i){return t(this,void 0,void 0,(function*(){let t=`/v2/transcript/${e}/${s}`;if(i){const e=new URLSearchParams;e.set("chars_per_caption",i.toString()),t+=`?${e.toString()}`}const n=yield this.fetch(t);return yield n.text()}))}redactions(e){return this.fetchJson(`/v2/transcript/${e}/redacted-audio`)}}class m extends s{upload(e){return t(this,void 0,void 0,(function*(){let s;s="string"==typeof e?yield function(e){return t(this,void 0,void 0,(function*(){throw new Error("Interacting with the file system is not supported in this environment.")}))}():e;return(yield this.fetchJson("/v2/upload",{method:"POST",body:s,headers:{"Content-Type":"application/octet-stream"},duplex:"half"})).upload_url}))}}e.AssemblyAI=class{constructor(e){e.baseUrl=e.baseUrl||"https://api.assemblyai.com",e.baseUrl&&e.baseUrl.endsWith("/")&&(e.baseUrl=e.baseUrl.slice(0,-1)),this.files=new m(e),this.transcripts=new p(e,this.files),this.lemur=new i(e),this.realtime=new u(e)}},e.FileService=m,e.LemurService=i,e.RealtimeService=class extends h{},e.RealtimeServiceFactory=class extends u{},e.RealtimeTranscriber=h,e.RealtimeTranscriberFactory=u,e.TranscriptService=p})); | ||
!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports):"function"==typeof define&&define.amd?define(["exports"],t):t((e="undefined"!=typeof globalThis?globalThis:e||self).assemblyai={})}(this,(function(e){"use strict";function t(e,t,s,i){return new(s||(s=Promise))((function(n,o){function r(e){try{c(i.next(e))}catch(e){o(e)}}function a(e){try{c(i.throw(e))}catch(e){o(e)}}function c(e){var t;e.done?n(e.value):(t=e.value,t instanceof s?t:new s((function(e){e(t)}))).then(r,a)}c((i=i.apply(e,t||[])).next())}))}"function"==typeof SuppressedError&&SuppressedError;class s{constructor(e){this.params=e}fetch(e,s){return t(this,void 0,void 0,(function*(){var t;(s=null!=s?s:{}).headers=null!==(t=s.headers)&&void 0!==t?t:{},s.headers=Object.assign({Authorization:this.params.apiKey,"Content-Type":"application/json"},s.headers),e.startsWith("http")||(e=this.params.baseUrl+e);const i=yield fetch(e,s);if(i.status>=400){let e;const t=yield i.text();if(t){try{e=JSON.parse(t)}catch(e){}if(null==e?void 0:e.error)throw new Error(e.error);throw new Error(t)}throw new Error(`HTTP Error: ${i.status} ${i.statusText}`)}return i}))}fetchJson(e,s){return t(this,void 0,void 0,(function*(){return(yield this.fetch(e,s)).json()}))}}class i extends s{summary(e){return this.fetchJson("/lemur/v3/generate/summary",{method:"POST",body:JSON.stringify(e)})}questionAnswer(e){return this.fetchJson("/lemur/v3/generate/question-answer",{method:"POST",body:JSON.stringify(e)})}actionItems(e){return this.fetchJson("/lemur/v3/generate/action-items",{method:"POST",body:JSON.stringify(e)})}task(e){return this.fetchJson("/lemur/v3/generate/task",{method:"POST",body:JSON.stringify(e)})}purgeRequestData(e){return this.fetchJson(`/lemur/v3/${e}`,{method:"DELETE"})}}const{WritableStream:n}="undefined"!=typeof window?window:"undefined"!=typeof global?global:globalThis;var o=null;"undefined"!=typeof WebSocket?o=WebSocket:"undefined"!=typeof MozWebSocket?o=MozWebSocket:"undefined"!=typeof global?o=global.WebSocket||global.MozWebSocket:"undefined"!=typeof window?o=window.WebSocket||window.MozWebSocket:"undefined"!=typeof self&&(o=self.WebSocket||self.MozWebSocket);var r,a=o;!function(e){e[e.BadSampleRate=4e3]="BadSampleRate",e[e.AuthFailed=4001]="AuthFailed",e[e.InsufficientFundsOrFreeAccount=4002]="InsufficientFundsOrFreeAccount",e[e.NonexistentSessionId=4004]="NonexistentSessionId",e[e.SessionExpired=4008]="SessionExpired",e[e.ClosedSession=4010]="ClosedSession",e[e.RateLimited=4029]="RateLimited",e[e.UniqueSessionViolation=4030]="UniqueSessionViolation",e[e.SessionTimeout=4031]="SessionTimeout",e[e.AudioTooShort=4032]="AudioTooShort",e[e.AudioTooLong=4033]="AudioTooLong",e[e.BadJson=4100]="BadJson",e[e.BadSchema=4101]="BadSchema",e[e.TooManyStreams=4102]="TooManyStreams",e[e.Reconnected=4103]="Reconnected",e[e.ReconnectAttemptsExhausted=1013]="ReconnectAttemptsExhausted"}(r||(r={}));const c={[r.BadSampleRate]:"Sample rate must be a positive integer",[r.AuthFailed]:"Not Authorized",[r.InsufficientFundsOrFreeAccount]:"Insufficient funds or you are using a free account. This feature is paid-only and requires you to add a credit card. Please visit https://assemblyai.com/dashboard/ to add a credit card to your account.",[r.NonexistentSessionId]:"Session ID does not exist",[r.SessionExpired]:"Session has expired",[r.ClosedSession]:"Session is closed",[r.RateLimited]:"Rate limited",[r.UniqueSessionViolation]:"Unique session violation",[r.SessionTimeout]:"Session Timeout",[r.AudioTooShort]:"Audio too short",[r.AudioTooLong]:"Audio too long",[r.BadJson]:"Bad JSON",[r.BadSchema]:"Bad schema",[r.TooManyStreams]:"Too many streams",[r.Reconnected]:"Reconnected",[r.ReconnectAttemptsExhausted]:"Reconnect attempts exhausted"};class l extends Error{}const d='{"terminate_session":true}';class h{constructor(e){var t,s;if(this.listeners={},this.realtimeUrl=null!==(t=e.realtimeUrl)&&void 0!==t?t:"wss://api.assemblyai.com/v2/realtime/ws",this.sampleRate=null!==(s=e.sampleRate)&&void 0!==s?s:16e3,this.wordBoost=e.wordBoost,this.encoding=e.encoding,this.endUtteranceSilenceThreshold=e.endUtteranceSilenceThreshold,"token"in e&&e.token&&(this.token=e.token),"apiKey"in e&&e.apiKey&&(this.apiKey=e.apiKey),!this.token&&!this.apiKey)throw new Error("API key or temporary token is required.")}connectionUrl(){const e=new URL(this.realtimeUrl);if("wss:"!==e.protocol)throw new Error("Invalid protocol, must be wss");const t=new URLSearchParams;return this.token&&t.set("token",this.token),t.set("sample_rate",this.sampleRate.toString()),this.wordBoost&&this.wordBoost.length>0&&t.set("word_boost",JSON.stringify(this.wordBoost)),this.encoding&&t.set("encoding",this.encoding),e.search=t.toString(),e}on(e,t){this.listeners[e]=t}connect(){return new Promise((e=>{if(this.socket)throw new Error("Already connected");const t=this.connectionUrl();this.token?this.socket=new a(t.toString()):this.socket=new a(t.toString(),{headers:{Authorization:this.apiKey}}),this.socket.binaryType="arraybuffer",this.socket.onopen=()=>{void 0!==this.endUtteranceSilenceThreshold&&null!==this.endUtteranceSilenceThreshold&&this.configureEndUtteranceSilenceThreshold(this.endUtteranceSilenceThreshold)},this.socket.onclose=({code:e,reason:t})=>{var s,i;t||e in r&&(t=c[e]),null===(i=(s=this.listeners).close)||void 0===i||i.call(s,e,t)},this.socket.onerror=e=>{var t,s,i,n;e.error?null===(s=(t=this.listeners).error)||void 0===s||s.call(t,e.error):null===(n=(i=this.listeners).error)||void 0===n||n.call(i,new Error(e.message))},this.socket.onmessage=({data:t})=>{var s,i,n,o,r,a,c,d,h,u,f,p,m;const y=JSON.parse(t.toString());if("error"in y)null===(i=(s=this.listeners).error)||void 0===i||i.call(s,new l(y.error));else switch(y.message_type){case"SessionBegins":{const t={sessionId:y.session_id,expiresAt:new Date(y.expires_at)};e(t),null===(o=(n=this.listeners).open)||void 0===o||o.call(n,t);break}case"PartialTranscript":y.created=new Date(y.created),null===(a=(r=this.listeners).transcript)||void 0===a||a.call(r,y),null===(d=(c=this.listeners)["transcript.partial"])||void 0===d||d.call(c,y);break;case"FinalTranscript":y.created=new Date(y.created),null===(u=(h=this.listeners).transcript)||void 0===u||u.call(h,y),null===(p=(f=this.listeners)["transcript.final"])||void 0===p||p.call(f,y);break;case"SessionTerminated":null===(m=this.sessionTerminatedResolve)||void 0===m||m.call(this)}}}))}sendAudio(e){this.send(e)}stream(){return new n({write:e=>{this.sendAudio(e)}})}forceEndUtterance(){this.send('{"force_end_utterance":true}')}configureEndUtteranceSilenceThreshold(e){this.send(`{"end_utterance_silence_threshold":${e}}`)}send(e){if(!this.socket||this.socket.readyState!==a.OPEN)throw new Error("Socket is not open for communication");this.socket.send(e)}close(){return t(this,arguments,void 0,(function*(e=!0){if(this.socket){if(this.socket.readyState===a.OPEN)if(e){const e=new Promise((e=>{this.sessionTerminatedResolve=e}));this.socket.send(d),yield e}else this.socket.send(d);"removeAllListeners"in this.socket&&this.socket.removeAllListeners(),this.socket.close()}this.listeners={},this.socket=void 0}))}}class u extends s{constructor(e){super(e),this.rtFactoryParams=e}createService(e){return this.transcriber(e)}transcriber(e){const t=Object.assign({},e);return t.token||t.apiKey||(t.apiKey=this.rtFactoryParams.apiKey),new h(t)}createTemporaryToken(e){return t(this,void 0,void 0,(function*(){return(yield this.fetchJson("/v2/realtime/token",{method:"POST",body:JSON.stringify(e)})).token}))}}function f(e){return e.startsWith("http")||e.startsWith("https")?null:e.startsWith("file://")?e.substring(7):e.startsWith("file:")?e.substring(5):e}class p extends s{constructor(e,t){super(e),this.files=t}transcribe(e,s){return t(this,void 0,void 0,(function*(){const t=yield this.submit(e);return yield this.waitUntilReady(t.id,s)}))}submit(e){return t(this,void 0,void 0,(function*(){let t,s;if("audio"in e){const{audio:i}=e,n=function(e,t){var s={};for(var i in e)Object.prototype.hasOwnProperty.call(e,i)&&t.indexOf(i)<0&&(s[i]=e[i]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols){var n=0;for(i=Object.getOwnPropertySymbols(e);n<i.length;n++)t.indexOf(i[n])<0&&Object.prototype.propertyIsEnumerable.call(e,i[n])&&(s[i[n]]=e[i[n]])}return s}(e,["audio"]);if("string"==typeof i){const e=f(i);t=null!==e?yield this.files.upload(e):i}else t=yield this.files.upload(i);s=Object.assign(Object.assign({},n),{audio_url:t})}else s=e;return yield this.fetchJson("/v2/transcript",{method:"POST",body:JSON.stringify(s)})}))}create(e,s){return t(this,void 0,void 0,(function*(){var t;const i=f(e.audio_url);if(null!==i){const t=yield this.files.upload(i);e.audio_url=t}const n=yield this.fetchJson("/v2/transcript",{method:"POST",body:JSON.stringify(e)});return null===(t=null==s?void 0:s.poll)||void 0===t||t?yield this.waitUntilReady(n.id,s):n}))}waitUntilReady(e,s){return t(this,void 0,void 0,(function*(){var t,i;const n=null!==(t=null==s?void 0:s.pollingInterval)&&void 0!==t?t:3e3,o=null!==(i=null==s?void 0:s.pollingTimeout)&&void 0!==i?i:-1,r=Date.now();for(;;){const t=yield this.get(e);if("completed"===t.status||"error"===t.status)return t;if(o>0&&Date.now()-r>o)throw new Error("Polling timeout");yield new Promise((e=>setTimeout(e,n)))}}))}get(e){return this.fetchJson(`/v2/transcript/${e}`)}list(e){return t(this,void 0,void 0,(function*(){let t="/v2/transcript";"string"==typeof e?t=e:e&&(t=`${t}?${new URLSearchParams(Object.keys(e).map((t=>{var s;return[t,(null===(s=e[t])||void 0===s?void 0:s.toString())||""]})))}`);const s=yield this.fetchJson(t);for(const e of s.transcripts)e.created=new Date(e.created),e.completed&&(e.completed=new Date(e.completed));return s}))}delete(e){return this.fetchJson(`/v2/transcript/${e}`,{method:"DELETE"})}wordSearch(e,t){const s=new URLSearchParams({words:t.join(",")});return this.fetchJson(`/v2/transcript/${e}/word-search?${s.toString()}`)}sentences(e){return this.fetchJson(`/v2/transcript/${e}/sentences`)}paragraphs(e){return this.fetchJson(`/v2/transcript/${e}/paragraphs`)}subtitles(e){return t(this,arguments,void 0,(function*(e,t="srt",s){let i=`/v2/transcript/${e}/${t}`;if(s){const e=new URLSearchParams;e.set("chars_per_caption",s.toString()),i+=`?${e.toString()}`}const n=yield this.fetch(i);return yield n.text()}))}redactions(e){return this.fetchJson(`/v2/transcript/${e}/redacted-audio`)}}class m extends s{upload(e){return t(this,void 0,void 0,(function*(){let s;s="string"==typeof e?yield function(e){return t(this,void 0,void 0,(function*(){throw new Error("Interacting with the file system is not supported in this environment.")}))}():e;return(yield this.fetchJson("/v2/upload",{method:"POST",body:s,headers:{"Content-Type":"application/octet-stream"},duplex:"half"})).upload_url}))}}e.AssemblyAI=class{constructor(e){e.baseUrl=e.baseUrl||"https://api.assemblyai.com",e.baseUrl&&e.baseUrl.endsWith("/")&&(e.baseUrl=e.baseUrl.slice(0,-1)),this.files=new m(e),this.transcripts=new p(e,this.files),this.lemur=new i(e),this.realtime=new u(e)}},e.FileService=m,e.LemurService=i,e.RealtimeService=class extends h{},e.RealtimeServiceFactory=class extends u{},e.RealtimeTranscriber=h,e.RealtimeTranscriberFactory=u,e.TranscriptService=p})); |
@@ -9,3 +9,3 @@ import { BaseServiceParams } from ".."; | ||
* Create a new service. | ||
* @param params The parameters to use for the service. | ||
* @param params - The parameters to use for the service. | ||
*/ | ||
@@ -12,0 +12,0 @@ constructor(params: BaseServiceParams); |
@@ -6,6 +6,6 @@ import { BaseService } from "../base"; | ||
* Upload a local file to AssemblyAI. | ||
* @param input The local file path to upload, or a stream or buffer of the file to upload. | ||
* @return A promise that resolves to the uploaded file URL. | ||
* @param input - The local file path to upload, or a stream or buffer of the file to upload. | ||
* @returns A promise that resolves to the uploaded file URL. | ||
*/ | ||
upload(input: FileUploadParams): Promise<string>; | ||
} |
@@ -25,3 +25,3 @@ import { BaseServiceParams } from ".."; | ||
* Create a new AssemblyAI client. | ||
* @param params The parameters for the service, including the API key and base URL, if any. | ||
* @param params - The parameters for the service, including the API key and base URL, if any. | ||
*/ | ||
@@ -28,0 +28,0 @@ constructor(params: BaseServiceParams); |
@@ -10,5 +10,5 @@ import { LemurSummaryParams, LemurActionItemsParams, LemurQuestionAnswerParams, LemurTaskParams, LemurSummaryResponse, LemurQuestionAnswerResponse, LemurActionItemsResponse, LemurTaskResponse, PurgeLemurRequestDataResponse } from "../.."; | ||
* Delete the data for a previously submitted LeMUR request. | ||
* @param id ID of the LeMUR request | ||
* @param id - ID of the LeMUR request | ||
*/ | ||
purgeRequestData(id: string): Promise<PurgeLemurRequestDataResponse>; | ||
} |
@@ -30,4 +30,4 @@ import { RealtimeTranscriberParams, RealtimeTranscript, PartialTranscript, FinalTranscript, SessionBeginsEventData, AudioData } from "../.."; | ||
* Configure the threshold for how long to wait before ending an utterance. Default is 700ms. | ||
* @param threshold The duration of the end utterance silence threshold in milliseconds | ||
* @format integer | ||
* @param threshold - The duration of the end utterance silence threshold in milliseconds. | ||
* This value must be an integer between 0 and 20_000. | ||
*/ | ||
@@ -34,0 +34,0 @@ configureEndUtteranceSilenceThreshold(threshold: number): void; |
import { BaseService } from "../base"; | ||
import { ParagraphsResponse, SentencesResponse, Transcript, TranscriptList, TranscriptParams, CreateTranscriptOptions, Createable, Deletable, Listable, Retrieveable, SubtitleFormat, RedactedAudioResponse, ListTranscriptParams, WordSearchResponse, BaseServiceParams, PollingOptions, TranscribeParams, TranscribeOptions, SubmitParams } from "../.."; | ||
import { ParagraphsResponse, SentencesResponse, Transcript, TranscriptList, TranscriptParams, CreateTranscriptOptions, SubtitleFormat, RedactedAudioResponse, ListTranscriptParams, WordSearchResponse, BaseServiceParams, PollingOptions, TranscribeParams, TranscribeOptions, SubmitParams } from "../.."; | ||
import { FileService } from "../files"; | ||
export declare class TranscriptService extends BaseService implements Createable<Transcript, TranscriptParams, CreateTranscriptOptions>, Retrieveable<Transcript>, Deletable<Transcript>, Listable<TranscriptList> { | ||
export declare class TranscriptService extends BaseService { | ||
private files; | ||
@@ -9,4 +9,4 @@ constructor(params: BaseServiceParams, files: FileService); | ||
* Transcribe an audio file. This will create a transcript and wait until the transcript status is "completed" or "error". | ||
* @param params The parameters to transcribe an audio file. | ||
* @param options The options to transcribe an audio file. | ||
* @param params - The parameters to transcribe an audio file. | ||
* @param options - The options to transcribe an audio file. | ||
* @returns A promise that resolves to the transcript. The transcript status is "completed" or "error". | ||
@@ -17,3 +17,3 @@ */ | ||
* Submits a transcription job for an audio file. This will not wait until the transcript status is "completed" or "error". | ||
* @param params The parameters to start the transcription of an audio file. | ||
* @param params - The parameters to start the transcription of an audio file. | ||
* @returns A promise that resolves to the queued transcript. | ||
@@ -24,4 +24,4 @@ */ | ||
* Create a transcript. | ||
* @param params The parameters to create a transcript. | ||
* @param options The options used for creating the new transcript. | ||
* @param params - The parameters to create a transcript. | ||
* @param options - The options used for creating the new transcript. | ||
* @returns A promise that resolves to the transcript. | ||
@@ -33,4 +33,4 @@ * @deprecated Use `transcribe` instead to transcribe a audio file that includes polling, or `submit` to transcribe a audio file without polling. | ||
* Wait until the transcript ready, either the status is "completed" or "error". | ||
* @param transcriptId The ID of the transcript. | ||
* @param options The options to wait until the transcript is ready. | ||
* @param transcriptId - The ID of the transcript. | ||
* @param options - The options to wait until the transcript is ready. | ||
* @returns A promise that resolves to the transcript. The transcript status is "completed" or "error". | ||
@@ -41,3 +41,3 @@ */ | ||
* Retrieve a transcript. | ||
* @param id The identifier of the transcript. | ||
* @param id - The identifier of the transcript. | ||
* @returns A promise that resolves to the transcript. | ||
@@ -48,8 +48,8 @@ */ | ||
* Retrieves a page of transcript listings. | ||
* @param parameters The parameters to filter the transcript list by, or the URL to retrieve the transcript list from. | ||
* @param params - The parameters to filter the transcript list by, or the URL to retrieve the transcript list from. | ||
*/ | ||
list(parameters?: ListTranscriptParams | string): Promise<TranscriptList>; | ||
list(params?: ListTranscriptParams | string): Promise<TranscriptList>; | ||
/** | ||
* Delete a transcript | ||
* @param id The identifier of the transcript. | ||
* @param id - The identifier of the transcript. | ||
* @returns A promise that resolves to the transcript. | ||
@@ -61,5 +61,5 @@ */ | ||
* You can search for individual words, numbers, or phrases containing up to five words or numbers. | ||
* @param id The identifier of the transcript. | ||
* @param words Keywords to search for. | ||
* @return A promise that resolves to the sentences. | ||
* @param id - The identifier of the transcript. | ||
* @param words - Keywords to search for. | ||
* @returns A promise that resolves to the sentences. | ||
*/ | ||
@@ -69,4 +69,4 @@ wordSearch(id: string, words: string[]): Promise<WordSearchResponse>; | ||
* Retrieve all sentences of a transcript. | ||
* @param id The identifier of the transcript. | ||
* @return A promise that resolves to the sentences. | ||
* @param id - The identifier of the transcript. | ||
* @returns A promise that resolves to the sentences. | ||
*/ | ||
@@ -76,4 +76,4 @@ sentences(id: string): Promise<SentencesResponse>; | ||
* Retrieve all paragraphs of a transcript. | ||
* @param id The identifier of the transcript. | ||
* @return A promise that resolves to the paragraphs. | ||
* @param id - The identifier of the transcript. | ||
* @returns A promise that resolves to the paragraphs. | ||
*/ | ||
@@ -83,6 +83,6 @@ paragraphs(id: string): Promise<ParagraphsResponse>; | ||
* Retrieve subtitles of a transcript. | ||
* @param id The identifier of the transcript. | ||
* @param format The format of the subtitles. | ||
* @param chars_per_caption The maximum number of characters per caption. | ||
* @return A promise that resolves to the subtitles text. | ||
* @param id - The identifier of the transcript. | ||
* @param format - The format of the subtitles. | ||
* @param chars_per_caption - The maximum number of characters per caption. | ||
* @returns A promise that resolves to the subtitles text. | ||
*/ | ||
@@ -92,6 +92,6 @@ subtitles(id: string, format?: SubtitleFormat, chars_per_caption?: number): Promise<string>; | ||
* Retrieve redactions of a transcript. | ||
* @param id The identifier of the transcript. | ||
* @return A promise that resolves to the subtitles text. | ||
* @param id - The identifier of the transcript. | ||
* @returns A promise that resolves to the subtitles text. | ||
*/ | ||
redactions(id: string): Promise<RedactedAudioResponse>; | ||
} |
/** | ||
* Format: binary | ||
* @description Binary audio data | ||
* Binary audio data | ||
*/ | ||
export type AudioData = ArrayBufferLike; | ||
/** | ||
* @description The encoding of the audio data | ||
* @default pcm_s16le | ||
* @enum {string} | ||
* The encoding of the audio data | ||
* @defaultValue "pcm_s16"le | ||
*/ | ||
export type AudioEncoding = "pcm_s16le" | "pcm_mulaw"; | ||
/** @description Configure the threshold for how long to wait before ending an utterance. Default is 700ms. */ | ||
/** | ||
* Configure the threshold for how long to wait before ending an utterance. Default is 700ms. | ||
*/ | ||
export type ConfigureEndUtteranceSilenceThreshold = { | ||
/** @description The duration threshold in milliseconds */ | ||
/** | ||
* The duration threshold in milliseconds | ||
*/ | ||
end_utterance_silence_threshold: number; | ||
@@ -19,22 +21,27 @@ }; | ||
/** | ||
* @description Describes the type of message | ||
* @constant | ||
* Describes the type of message | ||
*/ | ||
message_type: "FinalTranscript"; | ||
/** @description Whether the text is punctuated and cased */ | ||
/** | ||
* Whether the text is punctuated and cased | ||
*/ | ||
punctuated: boolean; | ||
/** @description Whether the text is formatted, for example Dollar -> $ */ | ||
/** | ||
* Whether the text is formatted, for example Dollar -> $ | ||
*/ | ||
text_formatted: boolean; | ||
}; | ||
/** @description Manually end an utterance */ | ||
/** | ||
* Manually end an utterance | ||
*/ | ||
export type ForceEndUtterance = { | ||
/** @description A boolean value to communicate that you wish to force the end of the utterance */ | ||
/** | ||
* A boolean value to communicate that you wish to force the end of the utterance | ||
*/ | ||
force_end_utterance: boolean; | ||
}; | ||
/** @enum {string} */ | ||
export type MessageType = "SessionBegins" | "PartialTranscript" | "FinalTranscript" | "SessionTerminated"; | ||
export type PartialTranscript = RealtimeBaseTranscript & { | ||
/** | ||
* @description Describes the type of message | ||
* @constant | ||
* Describes the type of message | ||
*/ | ||
@@ -44,21 +51,30 @@ message_type: "PartialTranscript"; | ||
export type RealtimeBaseMessage = { | ||
/** @description Describes the type of the message */ | ||
/** | ||
* Describes the type of the message | ||
*/ | ||
message_type: MessageType; | ||
}; | ||
export type RealtimeBaseTranscript = { | ||
/** @description End time of audio sample relative to session start, in milliseconds */ | ||
/** | ||
* End time of audio sample relative to session start, in milliseconds | ||
*/ | ||
audio_end: number; | ||
/** @description Start time of audio sample relative to session start, in milliseconds */ | ||
/** | ||
* Start time of audio sample relative to session start, in milliseconds | ||
*/ | ||
audio_start: number; | ||
/** | ||
* Format: double | ||
* @description The confidence score of the entire transcription, between 0 and 1 | ||
* The confidence score of the entire transcription, between 0 and 1 | ||
*/ | ||
confidence: number; | ||
/** @description The timestamp for the partial transcript */ | ||
/** | ||
* The timestamp for the partial transcript | ||
*/ | ||
created: Date; | ||
/** @description The partial transcript for your audio */ | ||
/** | ||
* The partial transcript for your audio | ||
*/ | ||
text: string; | ||
/** | ||
* @description An array of objects, with the information for each word in the transcription text. | ||
* An array of objects, with the information for each word in the transcription text. | ||
* Includes the start and end time of the word in milliseconds, the confidence score of the word, and the text, which is the word itself. | ||
@@ -73,15 +89,14 @@ */ | ||
export type RealtimeTranscript = PartialTranscript | FinalTranscript; | ||
/** @enum {string} */ | ||
export type RealtimeTranscriptType = "PartialTranscript" | "FinalTranscript"; | ||
export type SessionBegins = RealtimeBaseMessage & { | ||
/** @description Timestamp when this session will expire */ | ||
/** | ||
* Timestamp when this session will expire | ||
*/ | ||
expires_at: Date; | ||
/** | ||
* @description Describes the type of the message | ||
* @constant | ||
* Describes the type of the message | ||
*/ | ||
message_type: "SessionBegins"; | ||
/** | ||
* Format: uuid | ||
* @description Unique identifier for the established session | ||
* Unique identifier for the established session | ||
*/ | ||
@@ -92,9 +107,10 @@ session_id: string; | ||
/** | ||
* @description Describes the type of the message | ||
* @constant | ||
* Describes the type of the message | ||
*/ | ||
message_type: "SessionTerminated"; | ||
}; | ||
export type TerminateSession = RealtimeBaseMessage & { | ||
/** @description Set to true to end your real-time session forever */ | ||
export type TerminateSession = { | ||
/** | ||
* Set to true to end your real-time session forever | ||
*/ | ||
terminate_session: boolean; | ||
@@ -104,12 +120,17 @@ }; | ||
/** | ||
* Format: double | ||
* @description Confidence score of the word | ||
* Confidence score of the word | ||
*/ | ||
confidence: number; | ||
/** @description End time of the word in milliseconds */ | ||
/** | ||
* End time of the word in milliseconds | ||
*/ | ||
end: number; | ||
/** @description Start time of the word in milliseconds */ | ||
/** | ||
* Start time of the word in milliseconds | ||
*/ | ||
start: number; | ||
/** @description The word itself */ | ||
/** | ||
* The word itself | ||
*/ | ||
text: string; | ||
}; |
@@ -5,3 +5,2 @@ type BaseServiceParams = { | ||
}; | ||
export * from "./abstractions"; | ||
export type { BaseServiceParams }; |
import { FileUploadParams } from "../files"; | ||
import { TranscriptParams } from "../openapi.generated"; | ||
/** | ||
* Options for polling. | ||
*/ | ||
export type PollingOptions = { | ||
/** | ||
* The amount of time to wait between polling requests. | ||
* @default 3000 or every 3 seconds | ||
* @defaultValue 3000 or every 3 seconds | ||
*/ | ||
@@ -11,10 +14,13 @@ pollingInterval?: number; | ||
* The maximum amount of time to wait for the transcript to be ready. | ||
* @default -1 which means wait forever | ||
* @defaultValue -1 which means wait forever | ||
*/ | ||
pollingTimeout?: number; | ||
}; | ||
/** | ||
* @deprecated Use `TranscriptService.transcribe` with `TranscribeOptions`. | ||
*/ | ||
export type CreateTranscriptOptions = { | ||
/** | ||
* Whether to poll the transcript until it is ready. | ||
* @default true | ||
* @defaultValue true | ||
*/ | ||
@@ -30,5 +36,8 @@ poll?: boolean; | ||
*/ | ||
export type TranscribeParams = { | ||
export type TranscribeParams = ({ | ||
/** | ||
* The audio to transcribe. This can be a public URL, a local file path, a readable file stream, or a file buffer. | ||
*/ | ||
audio: AudioToTranscribe; | ||
} & Omit<TranscriptParams, "audio_url">; | ||
} & Omit<TranscriptParams, "audio_url">) | TranscriptParams; | ||
/** | ||
@@ -35,0 +44,0 @@ * The parameters to start the transcription of an audio file. |
{ | ||
"name": "assemblyai", | ||
"version": "4.3.1", | ||
"version": "4.3.2", | ||
"description": "The AssemblyAI JavaScript SDK provides an easy-to-use interface for interacting with the AssemblyAI API, which supports async and real-time transcription, as well as the latest LeMUR models.", | ||
@@ -113,2 +113,3 @@ "engines": { | ||
"eslint": "^8.48.0", | ||
"eslint-plugin-tsdoc": "^0.2.17", | ||
"jest": "^29.5.0", | ||
@@ -115,0 +116,0 @@ "jest-cli": "^29.5.0", |
247
README.md
@@ -19,6 +19,10 @@ <img src="https://github.com/AssemblyAI/assemblyai-node-sdk/blob/main/assemblyai.png?raw=true" width="500"/> | ||
## Installation | ||
## Documentation | ||
You can install the AssemblyAI SDK by running: | ||
Visit the [AssemblyAI documentation](https://www.assemblyai.com/docs) for step-by-step instructions and a lot more details about our AI models and API. | ||
## Quickstart | ||
Install the AssemblyAI SDK using your preferred package manager: | ||
```bash | ||
@@ -40,7 +44,5 @@ npm install assemblyai | ||
# Usage | ||
Then, import the `assemblyai` module and create an AssemblyAI object with your API key: | ||
Import the AssemblyAI package and create an AssemblyAI object with your API key: | ||
```javascript | ||
```js | ||
import { AssemblyAI } from "assemblyai"; | ||
@@ -55,7 +57,12 @@ | ||
## Create a transcript | ||
## Speech-To-Text | ||
### Transcribe audio and video files | ||
<details open> | ||
<summary>Transcribe an audio file with a public URL</summary> | ||
When you create a transcript, you can either pass in a URL to an audio file or upload a file directly. | ||
```javascript | ||
```js | ||
// Transcribe file at remote URL | ||
@@ -65,3 +72,25 @@ let transcript = await client.transcripts.transcribe({ | ||
}); | ||
``` | ||
> **Note** | ||
> You can also pass a local file path, a stream, or a buffer as the `audio` property. | ||
`transcribe` queues a transcription job and polls it until the `status` is `completed` or `error`. | ||
If you don't want to wait until the transcript is ready, you can use `submit`: | ||
```js | ||
let transcript = await client.transcripts.submit({ | ||
audio: "https://storage.googleapis.com/aai-web-samples/espn-bears.m4a", | ||
}); | ||
``` | ||
</details> | ||
<details> | ||
<summary>Transcribe a local audio file</summary> | ||
When you create a transcript, you can either pass in a URL to an audio file or upload a file directly. | ||
```js | ||
// Upload a file via local path and transcribe | ||
@@ -73,41 +102,47 @@ let transcript = await client.transcripts.transcribe({ | ||
> **Note** | ||
> You can also pass streams and buffers to the `audio` property. | ||
> **Note:** | ||
> You can also pass a file URL, a stream, or a buffer as the `audio` property. | ||
`transcribe` queues a transcription job and polls it until the `status` is `completed` or `error`. | ||
You can configure the polling interval and polling timeout using these options: | ||
```javascript | ||
let transcript = await client.transcripts.transcribe( | ||
{ | ||
audio: "https://storage.googleapis.com/aai-web-samples/espn-bears.m4a", | ||
}, | ||
{ | ||
// How frequently the transcript is polled in ms. Defaults to 3000. | ||
pollingInterval: 1000, | ||
// How long to wait in ms until the "Polling timeout" error is thrown. Defaults to infinite (-1). | ||
pollingTimeout: 5000, | ||
} | ||
); | ||
If you don't want to wait until the transcript is ready, you can use `submit`: | ||
```js | ||
let transcript = await client.transcripts.submit({ | ||
audio: "./news.mp4", | ||
}); | ||
``` | ||
If you don't want to wait until the transcript is ready, you can use `submit`: | ||
</details> | ||
```javascript | ||
let transcript = await client.transcripts.submit({ | ||
<details> | ||
<summary>Enable additional AI models</summary> | ||
You can extract even more insights from the audio by enabling any of our [AI models](https://www.assemblyai.com/docs/audio-intelligence) using _transcription options_. | ||
For example, here's how to enable [Speaker diarization](https://www.assemblyai.com/docs/speech-to-text/speaker-diarization) model to detect who said what. | ||
```js | ||
let transcript = await client.transcripts.transcribe({ | ||
audio: "https://storage.googleapis.com/aai-web-samples/espn-bears.m4a", | ||
speaker_labels: true, | ||
}); | ||
for (let utterance of transcript.utterances) { | ||
console.log(`Speaker ${utterance.speaker}: ${utterance.text}`); | ||
} | ||
``` | ||
## Get a transcript | ||
</details> | ||
<details> | ||
<summary>Get a transcript</summary> | ||
This will return the transcript object in its current state. If the transcript is still processing, the `status` field will be `queued` or `processing`. Once the transcript is complete, the `status` field will be `completed`. | ||
```javascript | ||
```js | ||
const transcript = await client.transcripts.get(transcript.id); | ||
``` | ||
If you created a transcript using `submit`, you can still poll until the transcript `status` is `completed` or `error` using `waitUntilReady`: | ||
If you created a transcript using `.submit()`, you can still poll until the transcript `status` is `completed` or `error` using `.waitUntilReady()`: | ||
```javascript | ||
```js | ||
const transcript = await client.transcripts.waitUntilReady(transcript.id, { | ||
@@ -121,7 +156,32 @@ // How frequently the transcript is polled in ms. Defaults to 3000. | ||
## List transcripts | ||
</details> | ||
<details> | ||
<summary>Get sentences and paragraphs</summary> | ||
```js | ||
const sentences = await client.transcripts.sentences(transcript.id); | ||
const paragraphs = await client.transcripts.paragraphs(transcript.id); | ||
``` | ||
</details> | ||
<details> | ||
<summary>Get subtitles</summary> | ||
```js | ||
const charsPerCaption = 32; | ||
let srt = await client.transcripts.subtitles(transcript.id, "srt"); | ||
srt = await client.transcripts.subtitles(transcript.id, "srt", charsPerCaption); | ||
let vtt = await client.transcripts.subtitles(transcript.id, "vtt"); | ||
vtt = await client.transcripts.subtitles(transcript.id, "vtt", charsPerCaption); | ||
``` | ||
</details> | ||
<details> | ||
<summary>List transcripts</summary> | ||
This will return a page of transcripts you created. | ||
```javascript | ||
```js | ||
const page = await client.transcripts.list(); | ||
@@ -140,57 +200,15 @@ ``` | ||
## Delete a transcript | ||
</details> | ||
```javascript | ||
<details> | ||
<summary>Delete a transcript</summary> | ||
```js | ||
const res = await client.transcripts.delete(transcript.id); | ||
``` | ||
## Use LeMUR | ||
</details> | ||
Call [LeMUR endpoints](https://www.assemblyai.com/docs/API%20reference/lemur) to summarize, ask questions, generate action items, or run a custom task. | ||
### Transcribe in real-time | ||
Custom Summary: | ||
```javascript | ||
const { response } = await client.lemur.summary({ | ||
transcript_ids: ["0d295578-8c75-421a-885a-2c487f188927"], | ||
answer_format: "one sentence", | ||
context: { | ||
speakers: ["Alex", "Bob"], | ||
}, | ||
}); | ||
``` | ||
Question & Answer: | ||
```javascript | ||
const { response } = await client.lemur.questionAnswer({ | ||
transcript_ids: ["0d295578-8c75-421a-885a-2c487f188927"], | ||
questions: [ | ||
{ | ||
question: "What are they discussing?", | ||
answer_format: "text", | ||
}, | ||
], | ||
}); | ||
``` | ||
Action Items: | ||
```javascript | ||
const { response } = await client.lemur.actionItems({ | ||
transcript_ids: ["0d295578-8c75-421a-885a-2c487f188927"], | ||
}); | ||
``` | ||
Custom Task: | ||
```javascript | ||
const { response } = await client.lemur.task({ | ||
transcript_ids: ["0d295578-8c75-421a-885a-2c487f188927"], | ||
prompt: "Write a haiku about this conversation.", | ||
}); | ||
``` | ||
## Transcribe in real-time | ||
Create the real-time transcriber. | ||
@@ -265,9 +283,66 @@ | ||
# Tests | ||
## Apply LLMs to your audio with LeMUR | ||
To run the test suite, first install the dependencies, then run `pnpm test`: | ||
Call [LeMUR endpoints](https://www.assemblyai.com/docs/api-reference/lemur) to apply LLMs to your transcript. | ||
```bash | ||
pnpm install | ||
pnpm test | ||
<details open> | ||
<summary>Prompt your audio with LeMUR</summary> | ||
```js | ||
const { response } = await client.lemur.task({ | ||
transcript_ids: ["0d295578-8c75-421a-885a-2c487f188927"], | ||
prompt: "Write a haiku about this conversation.", | ||
}); | ||
``` | ||
</details> | ||
<details> | ||
<summary>Summarize with LeMUR</summary> | ||
```js | ||
const { response } = await client.lemur.summary({ | ||
transcript_ids: ["0d295578-8c75-421a-885a-2c487f188927"], | ||
answer_format: "one sentence", | ||
context: { | ||
speakers: ["Alex", "Bob"], | ||
}, | ||
}); | ||
``` | ||
</details> | ||
<details> | ||
<summary>Ask questions</summary> | ||
```js | ||
const { response } = await client.lemur.questionAnswer({ | ||
transcript_ids: ["0d295578-8c75-421a-885a-2c487f188927"], | ||
questions: [ | ||
{ | ||
question: "What are they discussing?", | ||
answer_format: "text", | ||
}, | ||
], | ||
}); | ||
``` | ||
</details> | ||
<details> | ||
<summary>Generate action items</summary> | ||
```js | ||
const { response } = await client.lemur.actionItems({ | ||
transcript_ids: ["0d295578-8c75-421a-885a-2c487f188927"], | ||
}); | ||
``` | ||
</details> | ||
<details> | ||
<summary>Delete LeMUR request</summary> | ||
```js | ||
const response = await client.lemur.purgeRequestData(lemurResponse.request_id); | ||
``` | ||
</details> |
@@ -10,3 +10,3 @@ import { BaseServiceParams } from ".."; | ||
* Create a new service. | ||
* @param params The parameters to use for the service. | ||
* @param params - The parameters to use for the service. | ||
*/ | ||
@@ -13,0 +13,0 @@ constructor(private params: BaseServiceParams) {} |
@@ -8,4 +8,4 @@ import { readFile } from "#fs"; | ||
* Upload a local file to AssemblyAI. | ||
* @param input The local file path to upload, or a stream or buffer of the file to upload. | ||
* @return A promise that resolves to the uploaded file URL. | ||
* @param input - The local file path to upload, or a stream or buffer of the file to upload. | ||
* @returns A promise that resolves to the uploaded file URL. | ||
*/ | ||
@@ -12,0 +12,0 @@ async upload(input: FileUploadParams): Promise<string> { |
@@ -37,3 +37,3 @@ import { BaseServiceParams } from ".."; | ||
* Create a new AssemblyAI client. | ||
* @param params The parameters for the service, including the API key and base URL, if any. | ||
* @param params - The parameters for the service, including the API key and base URL, if any. | ||
*/ | ||
@@ -40,0 +40,0 @@ constructor(params: BaseServiceParams) { |
@@ -55,3 +55,3 @@ import { | ||
* Delete the data for a previously submitted LeMUR request. | ||
* @param id ID of the LeMUR request | ||
* @param id - ID of the LeMUR request | ||
*/ | ||
@@ -58,0 +58,0 @@ purgeRequestData(id: string): Promise<PurgeLemurRequestDataResponse> { |
@@ -61,4 +61,3 @@ import { WritableStream } from "#streams"; | ||
this.encoding = params.encoding; | ||
this.endUtteranceSilenceThreshold = | ||
params.endUtteranceSilenceThreshold; | ||
this.endUtteranceSilenceThreshold = params.endUtteranceSilenceThreshold; | ||
if ("token" in params && params.token) this.token = params.token; | ||
@@ -218,4 +217,4 @@ if ("apiKey" in params && params.apiKey) this.apiKey = params.apiKey; | ||
* Configure the threshold for how long to wait before ending an utterance. Default is 700ms. | ||
* @param threshold The duration of the end utterance silence threshold in milliseconds | ||
* @format integer | ||
* @param threshold - The duration of the end utterance silence threshold in milliseconds. | ||
* This value must be an integer between 0 and 20_000. | ||
*/ | ||
@@ -222,0 +221,0 @@ configureEndUtteranceSilenceThreshold(threshold: number) { |
@@ -9,6 +9,2 @@ import { BaseService } from "../base"; | ||
CreateTranscriptOptions, | ||
Createable, | ||
Deletable, | ||
Listable, | ||
Retrieveable, | ||
SubtitleFormat, | ||
@@ -27,10 +23,3 @@ RedactedAudioResponse, | ||
export class TranscriptService | ||
extends BaseService | ||
implements | ||
Createable<Transcript, TranscriptParams, CreateTranscriptOptions>, | ||
Retrieveable<Transcript>, | ||
Deletable<Transcript>, | ||
Listable<TranscriptList> | ||
{ | ||
export class TranscriptService extends BaseService { | ||
constructor(params: BaseServiceParams, private files: FileService) { | ||
@@ -42,4 +31,4 @@ super(params); | ||
* Transcribe an audio file. This will create a transcript and wait until the transcript status is "completed" or "error". | ||
* @param params The parameters to transcribe an audio file. | ||
* @param options The options to transcribe an audio file. | ||
* @param params - The parameters to transcribe an audio file. | ||
* @param options - The options to transcribe an audio file. | ||
* @returns A promise that resolves to the transcript. The transcript status is "completed" or "error". | ||
@@ -57,20 +46,26 @@ */ | ||
* Submits a transcription job for an audio file. This will not wait until the transcript status is "completed" or "error". | ||
* @param params The parameters to start the transcription of an audio file. | ||
* @param params - The parameters to start the transcription of an audio file. | ||
* @returns A promise that resolves to the queued transcript. | ||
*/ | ||
async submit(params: SubmitParams): Promise<Transcript> { | ||
const { audio, ...createParams } = params; | ||
let audioUrl; | ||
if (typeof audio === "string") { | ||
const path = getPath(audio); | ||
if (path !== null) { | ||
// audio is local path, upload local file | ||
audioUrl = await this.files.upload(path); | ||
let transcriptParams: TranscriptParams | undefined = undefined; | ||
if ("audio" in params) { | ||
const { audio, ...audioTranscriptParams } = params; | ||
if (typeof audio === "string") { | ||
const path = getPath(audio); | ||
if (path !== null) { | ||
// audio is local path, upload local file | ||
audioUrl = await this.files.upload(path); | ||
} else { | ||
// audio is not a local path, assume it's a URL | ||
audioUrl = audio; | ||
} | ||
} else { | ||
// audio is not a local path, assume it's a URL | ||
audioUrl = audio; | ||
// audio is of uploadable type | ||
audioUrl = await this.files.upload(audio); | ||
} | ||
transcriptParams = { ...audioTranscriptParams, audio_url: audioUrl }; | ||
} else { | ||
// audio is of uploadable type | ||
audioUrl = await this.files.upload(audio); | ||
transcriptParams = params; | ||
} | ||
@@ -80,3 +75,3 @@ | ||
method: "POST", | ||
body: JSON.stringify({ ...createParams, audio_url: audioUrl }), | ||
body: JSON.stringify(transcriptParams), | ||
}); | ||
@@ -88,4 +83,4 @@ return data; | ||
* Create a transcript. | ||
* @param params The parameters to create a transcript. | ||
* @param options The options used for creating the new transcript. | ||
* @param params - The parameters to create a transcript. | ||
* @param options - The options used for creating the new transcript. | ||
* @returns A promise that resolves to the transcript. | ||
@@ -118,4 +113,4 @@ * @deprecated Use `transcribe` instead to transcribe a audio file that includes polling, or `submit` to transcribe a audio file without polling. | ||
* Wait until the transcript ready, either the status is "completed" or "error". | ||
* @param transcriptId The ID of the transcript. | ||
* @param options The options to wait until the transcript is ready. | ||
* @param transcriptId - The ID of the transcript. | ||
* @param options - The options to wait until the transcript is ready. | ||
* @returns A promise that resolves to the transcript. The transcript status is "completed" or "error". | ||
@@ -148,3 +143,3 @@ */ | ||
* Retrieve a transcript. | ||
* @param id The identifier of the transcript. | ||
* @param id - The identifier of the transcript. | ||
* @returns A promise that resolves to the transcript. | ||
@@ -158,15 +153,13 @@ */ | ||
* Retrieves a page of transcript listings. | ||
* @param parameters The parameters to filter the transcript list by, or the URL to retrieve the transcript list from. | ||
* @param params - The parameters to filter the transcript list by, or the URL to retrieve the transcript list from. | ||
*/ | ||
async list( | ||
parameters?: ListTranscriptParams | string | ||
): Promise<TranscriptList> { | ||
async list(params?: ListTranscriptParams | string): Promise<TranscriptList> { | ||
let url = "/v2/transcript"; | ||
if (typeof parameters === "string") { | ||
url = parameters; | ||
} else if (parameters) { | ||
if (typeof params === "string") { | ||
url = params; | ||
} else if (params) { | ||
url = `${url}?${new URLSearchParams( | ||
Object.keys(parameters).map((key) => [ | ||
Object.keys(params).map((key) => [ | ||
key, | ||
parameters[key as keyof ListTranscriptParams]?.toString() || "", | ||
params[key as keyof ListTranscriptParams]?.toString() || "", | ||
]) | ||
@@ -188,3 +181,3 @@ )}`; | ||
* Delete a transcript | ||
* @param id The identifier of the transcript. | ||
* @param id - The identifier of the transcript. | ||
* @returns A promise that resolves to the transcript. | ||
@@ -199,5 +192,5 @@ */ | ||
* You can search for individual words, numbers, or phrases containing up to five words or numbers. | ||
* @param id The identifier of the transcript. | ||
* @param words Keywords to search for. | ||
* @return A promise that resolves to the sentences. | ||
* @param id - The identifier of the transcript. | ||
* @param words - Keywords to search for. | ||
* @returns A promise that resolves to the sentences. | ||
*/ | ||
@@ -213,4 +206,4 @@ wordSearch(id: string, words: string[]): Promise<WordSearchResponse> { | ||
* Retrieve all sentences of a transcript. | ||
* @param id The identifier of the transcript. | ||
* @return A promise that resolves to the sentences. | ||
* @param id - The identifier of the transcript. | ||
* @returns A promise that resolves to the sentences. | ||
*/ | ||
@@ -223,4 +216,4 @@ sentences(id: string): Promise<SentencesResponse> { | ||
* Retrieve all paragraphs of a transcript. | ||
* @param id The identifier of the transcript. | ||
* @return A promise that resolves to the paragraphs. | ||
* @param id - The identifier of the transcript. | ||
* @returns A promise that resolves to the paragraphs. | ||
*/ | ||
@@ -235,6 +228,6 @@ paragraphs(id: string): Promise<ParagraphsResponse> { | ||
* Retrieve subtitles of a transcript. | ||
* @param id The identifier of the transcript. | ||
* @param format The format of the subtitles. | ||
* @param chars_per_caption The maximum number of characters per caption. | ||
* @return A promise that resolves to the subtitles text. | ||
* @param id - The identifier of the transcript. | ||
* @param format - The format of the subtitles. | ||
* @param chars_per_caption - The maximum number of characters per caption. | ||
* @returns A promise that resolves to the subtitles text. | ||
*/ | ||
@@ -258,4 +251,4 @@ async subtitles( | ||
* Retrieve redactions of a transcript. | ||
* @param id The identifier of the transcript. | ||
* @return A promise that resolves to the subtitles text. | ||
* @param id - The identifier of the transcript. | ||
* @returns A promise that resolves to the subtitles text. | ||
*/ | ||
@@ -262,0 +255,0 @@ redactions(id: string): Promise<RedactedAudioResponse> { |
// this file is generated by typescript/scripts/generate-types.ts | ||
/* tslint:disable */ | ||
/* eslint-disable */ | ||
import { LiteralUnion } from "./helpers"; | ||
@@ -18,5 +17,6 @@ | ||
/* eslint-enable */ | ||
/** | ||
* Format: binary | ||
* @description Binary audio data | ||
* Binary audio data | ||
*/ | ||
@@ -26,11 +26,14 @@ export type AudioData = ArrayBufferLike; | ||
/** | ||
* @description The encoding of the audio data | ||
* @default pcm_s16le | ||
* @enum {string} | ||
* The encoding of the audio data | ||
* @defaultValue "pcm_s16"le | ||
*/ | ||
export type AudioEncoding = "pcm_s16le" | "pcm_mulaw"; | ||
/** @description Configure the threshold for how long to wait before ending an utterance. Default is 700ms. */ | ||
/** | ||
* Configure the threshold for how long to wait before ending an utterance. Default is 700ms. | ||
*/ | ||
export type ConfigureEndUtteranceSilenceThreshold = { | ||
/** @description The duration threshold in milliseconds */ | ||
/** | ||
* The duration threshold in milliseconds | ||
*/ | ||
end_utterance_silence_threshold: number; | ||
@@ -41,19 +44,25 @@ }; | ||
/** | ||
* @description Describes the type of message | ||
* @constant | ||
* Describes the type of message | ||
*/ | ||
message_type: "FinalTranscript"; | ||
/** @description Whether the text is punctuated and cased */ | ||
/** | ||
* Whether the text is punctuated and cased | ||
*/ | ||
punctuated: boolean; | ||
/** @description Whether the text is formatted, for example Dollar -> $ */ | ||
/** | ||
* Whether the text is formatted, for example Dollar -> $ | ||
*/ | ||
text_formatted: boolean; | ||
}; | ||
/** @description Manually end an utterance */ | ||
/** | ||
* Manually end an utterance | ||
*/ | ||
export type ForceEndUtterance = { | ||
/** @description A boolean value to communicate that you wish to force the end of the utterance */ | ||
/** | ||
* A boolean value to communicate that you wish to force the end of the utterance | ||
*/ | ||
force_end_utterance: boolean; | ||
}; | ||
/** @enum {string} */ | ||
export type MessageType = | ||
@@ -67,4 +76,3 @@ | "SessionBegins" | ||
/** | ||
* @description Describes the type of message | ||
* @constant | ||
* Describes the type of message | ||
*/ | ||
@@ -75,3 +83,5 @@ message_type: "PartialTranscript"; | ||
export type RealtimeBaseMessage = { | ||
/** @description Describes the type of the message */ | ||
/** | ||
* Describes the type of the message | ||
*/ | ||
message_type: MessageType; | ||
@@ -81,17 +91,24 @@ }; | ||
export type RealtimeBaseTranscript = { | ||
/** @description End time of audio sample relative to session start, in milliseconds */ | ||
/** | ||
* End time of audio sample relative to session start, in milliseconds | ||
*/ | ||
audio_end: number; | ||
/** @description Start time of audio sample relative to session start, in milliseconds */ | ||
/** | ||
* Start time of audio sample relative to session start, in milliseconds | ||
*/ | ||
audio_start: number; | ||
/** | ||
* Format: double | ||
* @description The confidence score of the entire transcription, between 0 and 1 | ||
* The confidence score of the entire transcription, between 0 and 1 | ||
*/ | ||
confidence: number; | ||
/** @description The timestamp for the partial transcript */ | ||
/** | ||
* The timestamp for the partial transcript | ||
*/ | ||
created: Date; | ||
/** @description The partial transcript for your audio */ | ||
/** | ||
* The partial transcript for your audio | ||
*/ | ||
text: string; | ||
/** | ||
* @description An array of objects, with the information for each word in the transcription text. | ||
* An array of objects, with the information for each word in the transcription text. | ||
* Includes the start and end time of the word in milliseconds, the confidence score of the word, and the text, which is the word itself. | ||
@@ -115,16 +132,15 @@ */ | ||
/** @enum {string} */ | ||
export type RealtimeTranscriptType = "PartialTranscript" | "FinalTranscript"; | ||
export type SessionBegins = RealtimeBaseMessage & { | ||
/** @description Timestamp when this session will expire */ | ||
/** | ||
* Timestamp when this session will expire | ||
*/ | ||
expires_at: Date; | ||
/** | ||
* @description Describes the type of the message | ||
* @constant | ||
* Describes the type of the message | ||
*/ | ||
message_type: "SessionBegins"; | ||
/** | ||
* Format: uuid | ||
* @description Unique identifier for the established session | ||
* Unique identifier for the established session | ||
*/ | ||
@@ -136,4 +152,3 @@ session_id: string; | ||
/** | ||
* @description Describes the type of the message | ||
* @constant | ||
* Describes the type of the message | ||
*/ | ||
@@ -143,4 +158,6 @@ message_type: "SessionTerminated"; | ||
export type TerminateSession = RealtimeBaseMessage & { | ||
/** @description Set to true to end your real-time session forever */ | ||
export type TerminateSession = { | ||
/** | ||
* Set to true to end your real-time session forever | ||
*/ | ||
terminate_session: boolean; | ||
@@ -151,12 +168,17 @@ }; | ||
/** | ||
* Format: double | ||
* @description Confidence score of the word | ||
* Confidence score of the word | ||
*/ | ||
confidence: number; | ||
/** @description End time of the word in milliseconds */ | ||
/** | ||
* End time of the word in milliseconds | ||
*/ | ||
end: number; | ||
/** @description Start time of the word in milliseconds */ | ||
/** | ||
* Start time of the word in milliseconds | ||
*/ | ||
start: number; | ||
/** @description The word itself */ | ||
/** | ||
* The word itself | ||
*/ | ||
text: string; | ||
}; |
@@ -6,3 +6,2 @@ type BaseServiceParams = { | ||
export * from "./abstractions"; | ||
export type { BaseServiceParams }; |
import { FileUploadParams } from "../files"; | ||
import { TranscriptParams } from "../openapi.generated"; | ||
/** | ||
* Options for polling. | ||
*/ | ||
export type PollingOptions = { | ||
/** | ||
* The amount of time to wait between polling requests. | ||
* @default 3000 or every 3 seconds | ||
* @defaultValue 3000 or every 3 seconds | ||
*/ | ||
@@ -12,3 +15,3 @@ pollingInterval?: number; | ||
* The maximum amount of time to wait for the transcript to be ready. | ||
* @default -1 which means wait forever | ||
* @defaultValue -1 which means wait forever | ||
*/ | ||
@@ -18,6 +21,9 @@ pollingTimeout?: number; | ||
/** | ||
* @deprecated Use `TranscriptService.transcribe` with `TranscribeOptions`. | ||
*/ | ||
export type CreateTranscriptOptions = { | ||
/** | ||
* Whether to poll the transcript until it is ready. | ||
* @default true | ||
* @defaultValue true | ||
*/ | ||
@@ -35,6 +41,10 @@ poll?: boolean; | ||
*/ | ||
export type TranscribeParams = { audio: AudioToTranscribe } & Omit< | ||
TranscriptParams, | ||
"audio_url" | ||
>; | ||
export type TranscribeParams = | ||
| ({ | ||
/** | ||
* The audio to transcribe. This can be a public URL, a local file path, a readable file stream, or a file buffer. | ||
*/ | ||
audio: AudioToTranscribe; | ||
} & Omit<TranscriptParams, "audio_url">) | ||
| TranscriptParams; | ||
@@ -41,0 +51,0 @@ /** |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is too big to display
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
12473
341
462286
28
71