@huggingface/inference
Advanced tools
Comparing version 2.1.3 to 2.2.0
@@ -45,2 +45,3 @@ "use strict"; | ||
textToImage: () => textToImage, | ||
textToSpeech: () => textToSpeech, | ||
tokenClassification: () => tokenClassification, | ||
@@ -76,2 +77,3 @@ translation: () => translation, | ||
textToImage: () => textToImage, | ||
textToSpeech: () => textToSpeech, | ||
tokenClassification: () => tokenClassification, | ||
@@ -291,3 +293,7 @@ translation: () => translation, | ||
if (event.data.length > 0) { | ||
yield JSON.parse(event.data); | ||
const data = JSON.parse(event.data); | ||
if (typeof data === "object" && data !== null && "error" in data) { | ||
throw new Error(data.error); | ||
} | ||
yield data; | ||
} | ||
@@ -332,2 +338,12 @@ } | ||
// src/tasks/audio/textToSpeech.ts | ||
async function textToSpeech(args, options) { | ||
const res = await request(args, options); | ||
const isValidOutput = res && res instanceof Blob; | ||
if (!isValidOutput) { | ||
throw new InferenceOutputError("Expected Blob"); | ||
} | ||
return res; | ||
} | ||
// src/tasks/cv/imageClassification.ts | ||
@@ -664,2 +680,3 @@ async function imageClassification(args, options) { | ||
textToImage, | ||
textToSpeech, | ||
tokenClassification, | ||
@@ -666,0 +683,0 @@ translation, |
{ | ||
"name": "@huggingface/inference", | ||
"version": "2.1.3", | ||
"version": "2.2.0", | ||
"license": "MIT", | ||
@@ -5,0 +5,0 @@ "author": "Tim Mikeladze <tim.mikeladze@gmail.com>", |
@@ -68,3 +68,7 @@ import type { Options, RequestArgs } from "../../types"; | ||
if (event.data.length > 0) { | ||
yield JSON.parse(event.data) as T; | ||
const data = JSON.parse(event.data); | ||
if (typeof data === "object" && data !== null && "error" in data) { | ||
throw new Error(data.error); | ||
} | ||
yield data as T; | ||
} | ||
@@ -71,0 +75,0 @@ } |
@@ -8,2 +8,3 @@ // Custom tasks with arbitrary inputs and outputs | ||
export * from "./audio/automaticSpeechRecognition"; | ||
export * from "./audio/textToSpeech"; | ||
@@ -10,0 +11,0 @@ // Commputer Vision tasks |
@@ -47,2 +47,6 @@ import { InferenceOutputError } from "../../lib/InferenceOutputError"; | ||
top_p?: number; | ||
/** | ||
* (Default: None). Integer. The maximum number of tokens from the input. | ||
*/ | ||
truncate?: number; | ||
}; | ||
@@ -49,0 +53,0 @@ }; |
Sorry, the diff of this file is not supported yet
125102
46
3227