@huggingface/inference
Advanced tools
Comparing version 2.6.4 to 2.6.5
@@ -0,1 +1,2 @@ | ||
export interface Options { | ||
@@ -7,3 +8,3 @@ /** | ||
/** | ||
* (Default: true). Boolean. There is a cache layer on the inference API to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query. | ||
* (Default: true). Boolean. There is a cache layer on Inference API (serverless) to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query. | ||
*/ | ||
@@ -43,3 +44,2 @@ use_cache?: boolean; | ||
| "automatic-speech-recognition" | ||
| "conversational" | ||
| "depth-estimation" | ||
@@ -49,26 +49,43 @@ | "document-question-answering" | ||
| "fill-mask" | ||
| "graph-ml" | ||
| "image-classification" | ||
| "image-feature-extraction" | ||
| "image-segmentation" | ||
| "image-text-to-text" | ||
| "image-to-3d" | ||
| "image-to-image" | ||
| "image-to-text" | ||
| "image-to-video" | ||
| "mask-generation" | ||
| "multiple-choice" | ||
| "object-detection" | ||
| "video-classification" | ||
| "question-answering" | ||
| "reinforcement-learning" | ||
| "robotics" | ||
| "sentence-similarity" | ||
| "summarization" | ||
| "table-question-answering" | ||
| "table-to-text" | ||
| "tabular-classification" | ||
| "tabular-regression" | ||
| "tabular-to-text" | ||
| "text-classification" | ||
| "text-generation" | ||
| "text-retrieval" | ||
| "text-to-3d" | ||
| "text-to-audio" | ||
| "text-to-image" | ||
| "text-to-speech" | ||
| "text-to-video" | ||
| "text2text-generation" | ||
| "time-series-forecasting" | ||
| "token-classification" | ||
| "translation" | ||
| "unconditional-image-generation" | ||
| "video-classification" | ||
| "visual-question-answering" | ||
| "voice-activity-detection" | ||
| "zero-shot-classification" | ||
| "zero-shot-image-classification"; | ||
| "zero-shot-image-classification" | ||
| "zero-shot-object-detection"; | ||
@@ -83,3 +100,3 @@ export interface BaseArgs { | ||
/** | ||
* The model to use. Can be a full URL for HF inference endpoints. | ||
* The model to use. Can be a full URL for a dedicated inference endpoint. | ||
* | ||
@@ -185,3 +202,3 @@ * If not specified, will call huggingface.co/api/tasks to get the default model for the task. | ||
/** | ||
* Primitive to make custom calls to the inference API | ||
* Primitive to make custom calls to Inference Endpoints | ||
*/ | ||
@@ -491,61 +508,2 @@ export function request<T>( | ||
): Promise<VisualQuestionAnsweringOutput>; | ||
export type ConversationalArgs = BaseArgs & { | ||
inputs: { | ||
/** | ||
* A list of strings corresponding to the earlier replies from the model. | ||
*/ | ||
generated_responses?: string[]; | ||
/** | ||
* A list of strings corresponding to the earlier replies from the user. Should be of the same length of generated_responses. | ||
*/ | ||
past_user_inputs?: string[]; | ||
/** | ||
* The last input from the user in the conversation. | ||
*/ | ||
text: string; | ||
}; | ||
parameters?: { | ||
/** | ||
* (Default: None). Integer to define the maximum length in tokens of the output summary. | ||
*/ | ||
max_length?: number; | ||
/** | ||
* (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. | ||
*/ | ||
max_time?: number; | ||
/** | ||
* (Default: None). Integer to define the minimum length in tokens of the output summary. | ||
*/ | ||
min_length?: number; | ||
/** | ||
* (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes. | ||
*/ | ||
repetition_penalty?: number; | ||
/** | ||
* (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability. | ||
*/ | ||
temperature?: number; | ||
/** | ||
* (Default: None). Integer to define the top tokens considered within the sample operation to create new text. | ||
*/ | ||
top_k?: number; | ||
/** | ||
* (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p. | ||
*/ | ||
top_p?: number; | ||
}; | ||
}; | ||
export interface ConversationalOutput { | ||
conversation: { | ||
generated_responses: string[]; | ||
past_user_inputs: string[]; | ||
}; | ||
generated_text: string; | ||
warnings: string[]; | ||
} | ||
/** | ||
* This task corresponds to any chatbot like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long range dependency or not. Recommended model: microsoft/DialoGPT-large. | ||
* | ||
*/ | ||
export function conversational(args: ConversationalArgs, options?: Options): Promise<ConversationalOutput>; | ||
export type FeatureExtractionArgs = BaseArgs & { | ||
@@ -753,64 +711,9 @@ /** | ||
): Promise<TextClassificationOutput>; | ||
export type TextGenerationArgs = BaseArgs & { | ||
/** | ||
* A string to be generated from | ||
*/ | ||
inputs: string; | ||
parameters?: { | ||
/** | ||
* (Optional: True). Bool. Whether or not to use sampling, use greedy decoding otherwise. | ||
*/ | ||
do_sample?: boolean; | ||
/** | ||
* (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input length it is a estimate of the size of generated text you want. Each new tokens slows down the request, so look for balance between response times and length of text generated. | ||
*/ | ||
max_new_tokens?: number; | ||
/** | ||
* (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. Use that in combination with max_new_tokens for best results. | ||
*/ | ||
max_time?: number; | ||
/** | ||
* (Default: 1). Integer. The number of proposition you want to be returned. | ||
*/ | ||
num_return_sequences?: number; | ||
/** | ||
* (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes. | ||
*/ | ||
repetition_penalty?: number; | ||
/** | ||
* (Default: True). Bool. If set to False, the return results will not contain the original query making it easier for prompting. | ||
*/ | ||
return_full_text?: boolean; | ||
/** | ||
* (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability. | ||
*/ | ||
temperature?: number; | ||
/** | ||
* (Default: None). Integer to define the top tokens considered within the sample operation to create new text. | ||
*/ | ||
top_k?: number; | ||
/** | ||
* (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p. | ||
*/ | ||
top_p?: number; | ||
/** | ||
* (Default: None). Integer. The maximum number of tokens from the input. | ||
*/ | ||
truncate?: number; | ||
/** | ||
* (Default: []) List of strings. The model will stop generating text when one of the strings in the list is generated. | ||
* **/ | ||
stop_sequences?: string[]; | ||
}; | ||
}; | ||
export interface TextGenerationOutput { | ||
/** | ||
* The continuated string | ||
*/ | ||
generated_text: string; | ||
} | ||
/** | ||
* Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with). | ||
*/ | ||
export function textGeneration(args: TextGenerationArgs, options?: Options): Promise<TextGenerationOutput>; | ||
export function textGeneration( | ||
args: BaseArgs & TextGenerationInput, | ||
options?: Options | ||
): Promise<TextGenerationOutput>; | ||
export type TextGenerationStreamFinishReason = | ||
@@ -893,3 +796,3 @@ /** number of generated tokens == `max_new_tokens` */ | ||
export function textGenerationStream( | ||
args: TextGenerationArgs, | ||
args: BaseArgs & TextGenerationInput, | ||
options?: Options | ||
@@ -953,5 +856,6 @@ ): AsyncGenerator<TextGenerationStreamOutput>; | ||
*/ | ||
inputs: string; | ||
inputs: string | string[]; | ||
}; | ||
export interface TranslationOutput { | ||
export type TranslationOutput = TranslationOutputValue | TranslationOutputValue[]; | ||
export interface TranslationOutputValue { | ||
/** | ||
@@ -1070,3 +974,3 @@ * The string after translation | ||
/** | ||
* Primitive to make custom calls to the inference API | ||
* Primitive to make custom calls to Inference Endpoints | ||
*/ | ||
@@ -1152,7 +1056,2 @@ request<T>( | ||
/** | ||
* This task corresponds to any chatbot like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long range dependency or not. Recommended model: microsoft/DialoGPT-large. | ||
* | ||
*/ | ||
conversational(args: Omit<ConversationalArgs, 'accessToken'>, options?: Options): Promise<ConversationalOutput>; | ||
/** | ||
* This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search. | ||
@@ -1203,3 +1102,6 @@ */ | ||
*/ | ||
textGeneration(args: Omit<TextGenerationArgs, 'accessToken'>, options?: Options): Promise<TextGenerationOutput>; | ||
textGeneration( | ||
args: Omit<BaseArgs, 'accessToken'> & TextGenerationInput, | ||
options?: Options | ||
): Promise<TextGenerationOutput>; | ||
/** | ||
@@ -1209,3 +1111,3 @@ * Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time | ||
textGenerationStream( | ||
args: Omit<TextGenerationArgs, 'accessToken'>, | ||
args: Omit<BaseArgs, 'accessToken'> & TextGenerationInput, | ||
options?: Options | ||
@@ -1279,3 +1181,3 @@ ): AsyncGenerator<TextGenerationStreamOutput>; | ||
/** | ||
* Primitive to make custom calls to the inference API | ||
* Primitive to make custom calls to Inference Endpoints | ||
*/ | ||
@@ -1361,7 +1263,2 @@ request<T>( | ||
/** | ||
* This task corresponds to any chatbot like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long range dependency or not. Recommended model: microsoft/DialoGPT-large. | ||
* | ||
*/ | ||
conversational(args: Omit<ConversationalArgs, 'accessToken' | 'model'>, options?: Options): Promise<ConversationalOutput>; | ||
/** | ||
* This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search. | ||
@@ -1412,3 +1309,6 @@ */ | ||
*/ | ||
textGeneration(args: Omit<TextGenerationArgs, 'accessToken' | 'model'>, options?: Options): Promise<TextGenerationOutput>; | ||
textGeneration( | ||
args: Omit<BaseArgs, 'accessToken' | 'model'> & TextGenerationInput, | ||
options?: Options | ||
): Promise<TextGenerationOutput>; | ||
/** | ||
@@ -1418,3 +1318,3 @@ * Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time | ||
textGenerationStream( | ||
args: Omit<TextGenerationArgs, 'accessToken' | 'model'>, | ||
args: Omit<BaseArgs, 'accessToken' | 'model'> & TextGenerationInput, | ||
options?: Options | ||
@@ -1421,0 +1321,0 @@ ): AsyncGenerator<TextGenerationStreamOutput>; |
/// <reference path="./index.d.ts" /> | ||
"use strict"; | ||
var __defProp = Object.defineProperty; | ||
var __getOwnPropDesc = Object.getOwnPropertyDescriptor; | ||
var __getOwnPropNames = Object.getOwnPropertyNames; | ||
var __hasOwnProp = Object.prototype.hasOwnProperty; | ||
var __export = (target, all) => { | ||
@@ -11,51 +7,3 @@ for (var name in all) | ||
}; | ||
var __copyProps = (to, from, except, desc) => { | ||
if (from && typeof from === "object" || typeof from === "function") { | ||
for (let key of __getOwnPropNames(from)) | ||
if (!__hasOwnProp.call(to, key) && key !== except) | ||
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); | ||
} | ||
return to; | ||
}; | ||
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); | ||
// src/index.ts | ||
var src_exports = {}; | ||
__export(src_exports, { | ||
HfInference: () => HfInference, | ||
HfInferenceEndpoint: () => HfInferenceEndpoint, | ||
InferenceOutputError: () => InferenceOutputError, | ||
audioClassification: () => audioClassification, | ||
audioToAudio: () => audioToAudio, | ||
automaticSpeechRecognition: () => automaticSpeechRecognition, | ||
conversational: () => conversational, | ||
documentQuestionAnswering: () => documentQuestionAnswering, | ||
featureExtraction: () => featureExtraction, | ||
fillMask: () => fillMask, | ||
imageClassification: () => imageClassification, | ||
imageSegmentation: () => imageSegmentation, | ||
imageToImage: () => imageToImage, | ||
imageToText: () => imageToText, | ||
objectDetection: () => objectDetection, | ||
questionAnswering: () => questionAnswering, | ||
request: () => request, | ||
sentenceSimilarity: () => sentenceSimilarity, | ||
streamingRequest: () => streamingRequest, | ||
summarization: () => summarization, | ||
tableQuestionAnswering: () => tableQuestionAnswering, | ||
tabularClassification: () => tabularClassification, | ||
tabularRegression: () => tabularRegression, | ||
textClassification: () => textClassification, | ||
textGeneration: () => textGeneration, | ||
textGenerationStream: () => textGenerationStream, | ||
textToImage: () => textToImage, | ||
textToSpeech: () => textToSpeech, | ||
tokenClassification: () => tokenClassification, | ||
translation: () => translation, | ||
visualQuestionAnswering: () => visualQuestionAnswering, | ||
zeroShotClassification: () => zeroShotClassification, | ||
zeroShotImageClassification: () => zeroShotImageClassification | ||
}); | ||
module.exports = __toCommonJS(src_exports); | ||
// src/tasks/index.ts | ||
@@ -67,3 +15,2 @@ var tasks_exports = {}; | ||
automaticSpeechRecognition: () => automaticSpeechRecognition, | ||
conversational: () => conversational, | ||
documentQuestionAnswering: () => documentQuestionAnswering, | ||
@@ -332,3 +279,3 @@ featureExtraction: () => featureExtraction, | ||
if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) { | ||
return streamingRequest(args, { | ||
return yield* streamingRequest(args, { | ||
...options, | ||
@@ -591,14 +538,2 @@ wait_for_model: true | ||
// src/tasks/nlp/conversational.ts | ||
async function conversational(args, options) { | ||
const res = await request(args, { ...options, taskHint: "conversational" }); | ||
const isValidOutput = Array.isArray(res.conversation.generated_responses) && res.conversation.generated_responses.every((x) => typeof x === "string") && Array.isArray(res.conversation.past_user_inputs) && res.conversation.past_user_inputs.every((x) => typeof x === "string") && typeof res.generated_text === "string" && (typeof res.warnings === "undefined" || Array.isArray(res.warnings) && res.warnings.every((x) => typeof x === "string")); | ||
if (!isValidOutput) { | ||
throw new InferenceOutputError( | ||
"Expected {conversation: {generated_responses: string[], past_user_inputs: string[]}, generated_text: string, warnings: string[]}" | ||
); | ||
} | ||
return res; | ||
} | ||
// src/tasks/nlp/featureExtraction.ts | ||
@@ -773,3 +708,3 @@ async function featureExtraction(args, options) { | ||
} | ||
return res?.[0]; | ||
return res?.length === 1 ? res?.[0] : res; | ||
} | ||
@@ -911,4 +846,3 @@ | ||
}; | ||
// Annotate the CommonJS export names for ESM import in node: | ||
0 && (module.exports = { | ||
export { | ||
HfInference, | ||
@@ -920,3 +854,2 @@ HfInferenceEndpoint, | ||
automaticSpeechRecognition, | ||
conversational, | ||
documentQuestionAnswering, | ||
@@ -948,2 +881,2 @@ featureExtraction, | ||
zeroShotImageClassification | ||
}); | ||
}; |
{ | ||
"name": "@huggingface/inference", | ||
"version": "2.6.4", | ||
"packageManager": "pnpm@8.3.1", | ||
"version": "2.6.5", | ||
"packageManager": "pnpm@8.10.5", | ||
"license": "MIT", | ||
"author": "Tim Mikeladze <tim.mikeladze@gmail.com>", | ||
"description": "Typescript wrapper for the Hugging Face Inference API", | ||
"description": "Typescript wrapper for the Hugging Face Inference Endpoints & Inference API", | ||
"repository": { | ||
@@ -33,15 +33,13 @@ "type": "git", | ||
"types": "./dist/index.d.ts", | ||
"main": "./dist/index.js", | ||
"module": "./dist/index.mjs", | ||
"main": "./dist/index.cjs", | ||
"module": "./dist/index.js", | ||
"exports": { | ||
"types": "./dist/index.d.ts", | ||
"require": "./dist/index.js", | ||
"import": "./dist/index.mjs" | ||
"require": "./dist/index.cjs", | ||
"import": "./dist/index.js" | ||
}, | ||
"type": "module", | ||
"devDependencies": { | ||
"@types/node": "18.13.0", | ||
"ts-node": "^10.9.1", | ||
"typescript": "^5.0.4", | ||
"vite": "^4.1.4", | ||
"vitest": "^0.29.8" | ||
"@huggingface/tasks": "^0.6.0" | ||
}, | ||
@@ -51,3 +49,3 @@ "resolutions": {}, | ||
"build": "tsup src/index.ts --format cjs,esm --clean && pnpm run dts", | ||
"dts": "ts-node scripts/generate-dts.ts", | ||
"dts": "tsx scripts/generate-dts.ts", | ||
"lint": "eslint --quiet --fix --ext .cjs,.ts .", | ||
@@ -57,6 +55,6 @@ "lint:check": "eslint --ext .cjs,.ts .", | ||
"format:check": "prettier --check .", | ||
"test": "vitest run --config vitest.config.ts", | ||
"test:browser": "vitest run --browser.name=chrome --browser.headless --config vitest.config.ts", | ||
"type-check": "tsc" | ||
"test": "vitest run --config vitest.config.mts", | ||
"test:browser": "vitest run --browser.name=chrome --browser.headless --config vitest.config.mts", | ||
"check": "tsc" | ||
} | ||
} |
334
README.md
@@ -1,11 +0,16 @@ | ||
# 🤗 Hugging Face Inference API | ||
# 🤗 Hugging Face Inference Endpoints | ||
A Typescript powered wrapper for the Hugging Face Inference API. Learn more about the Inference API at [Hugging Face](https://huggingface.co/docs/api-inference/index). It also works with [Inference Endpoints](https://huggingface.co/docs/inference-endpoints/index). | ||
A Typescript powered wrapper for the Hugging Face Inference Endpoints API. Learn more about Inference Endpoints at [Hugging Face](https://huggingface.co/inference-endpoints). | ||
It works with both [Inference API (serverless)](https://huggingface.co/docs/api-inference/index) and [Inference Endpoints (dedicated)](https://huggingface.co/docs/inference-endpoints/index). | ||
Check out the [full documentation](https://huggingface.co/docs/huggingface.js/inference/README). | ||
You can also try out a live [interactive notebook](https://observablehq.com/@huggingface/hello-huggingface-js-inference) or see some demos on [hf.co/huggingfacejs](https://huggingface.co/huggingfacejs). | ||
You can also try out a live [interactive notebook](https://observablehq.com/@huggingface/hello-huggingface-js-inference), see some demos on [hf.co/huggingfacejs](https://huggingface.co/huggingfacejs), or watch a [Scrimba tutorial that explains how Inference Endpoints works](https://scrimba.com/scrim/cod8248f5adfd6e129582c523). | ||
## Install | ||
## Getting Started | ||
### Install | ||
#### Node | ||
```console | ||
@@ -19,3 +24,3 @@ npm install @huggingface/inference | ||
### Deno | ||
#### Deno | ||
@@ -29,4 +34,11 @@ ```ts | ||
## Usage | ||
### Initialize | ||
```typescript | ||
import { HfInference } from '@huggingface/inference' | ||
const hf = new HfInference('your access token') | ||
``` | ||
❗**Important note:** Using an access token is optional to get started, however you will be rate limited eventually. Join [Hugging Face](https://huggingface.co/join) and then visit [access tokens](https://huggingface.co/settings/tokens) to generate your access token for **free**. | ||
@@ -36,11 +48,27 @@ | ||
### Basic examples | ||
```typescript | ||
import { HfInference } from '@huggingface/inference' | ||
#### Tree-shaking | ||
const hf = new HfInference('your access token') | ||
You can import the functions you need directly from the module instead of using the `HfInference` class. | ||
// Natural Language | ||
```ts | ||
import { textGeneration } from "@huggingface/inference"; | ||
await textGeneration({ | ||
accessToken: "hf_...", | ||
model: "model_or_endpoint", | ||
inputs: ..., | ||
parameters: ... | ||
}) | ||
``` | ||
This will enable tree-shaking by your bundler. | ||
## Natural Language Processing | ||
### Fill Mask | ||
Tries to fill in a hole with a missing word (token to be precise). | ||
```typescript | ||
await hf.fillMask({ | ||
@@ -50,3 +78,9 @@ model: 'bert-base-uncased', | ||
}) | ||
``` | ||
### Summarization | ||
Summarizes longer text into shorter text. Be careful, some models have a maximum length of input. | ||
```typescript | ||
await hf.summarization({ | ||
@@ -60,3 +94,9 @@ model: 'facebook/bart-large-cnn', | ||
}) | ||
``` | ||
### Question Answering | ||
Answers questions based on the context you provide. | ||
```typescript | ||
await hf.questionAnswering({ | ||
@@ -69,3 +109,7 @@ model: 'deepset/roberta-base-squad2', | ||
}) | ||
``` | ||
### Table Question Answering | ||
```typescript | ||
await hf.tableQuestionAnswering({ | ||
@@ -83,3 +127,9 @@ model: 'google/tapas-base-finetuned-wtq', | ||
}) | ||
``` | ||
### Text Classification | ||
Often used for sentiment analysis, this method will assign labels to the given text along with a probability score of that label. | ||
```typescript | ||
await hf.textClassification({ | ||
@@ -89,3 +139,11 @@ model: 'distilbert-base-uncased-finetuned-sst-2-english', | ||
}) | ||
``` | ||
### Text Generation | ||
Generates text from an input prompt. | ||
[Demo](https://huggingface.co/spaces/huggingfacejs/streaming-text-generation) | ||
```typescript | ||
await hf.textGeneration({ | ||
@@ -103,3 +161,9 @@ model: 'gpt2', | ||
} | ||
``` | ||
### Token Classification | ||
Used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text. | ||
```typescript | ||
await hf.tokenClassification({ | ||
@@ -109,3 +173,9 @@ model: 'dbmdz/bert-large-cased-finetuned-conll03-english', | ||
}) | ||
``` | ||
### Translation | ||
Converts text from one language to another. | ||
```typescript | ||
await hf.translation({ | ||
@@ -116,2 +186,17 @@ model: 't5-base', | ||
await hf.translation({ | ||
model: 'facebook/mbart-large-50-many-to-many-mmt', | ||
inputs: textToTranslate, | ||
parameters: { | ||
"src_lang": "en_XX", | ||
"tgt_lang": "fr_XX" | ||
} | ||
}) | ||
``` | ||
### Zero-Shot Classification | ||
Checks how well an input text fits into a set of labels you provide. | ||
```typescript | ||
await hf.zeroShotClassification({ | ||
@@ -124,3 +209,9 @@ model: 'facebook/bart-large-mnli', | ||
}) | ||
``` | ||
### Conversational | ||
This task corresponds to any chatbot-like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long-range dependency or not. | ||
```typescript | ||
await hf.conversational({ | ||
@@ -134,3 +225,9 @@ model: 'microsoft/DialoGPT-large', | ||
}) | ||
``` | ||
### Sentence Similarity | ||
Calculate the semantic similarity between one text and a list of other sentences. | ||
```typescript | ||
await hf.sentenceSimilarity({ | ||
@@ -147,10 +244,13 @@ model: 'sentence-transformers/paraphrase-xlm-r-multilingual-v1', | ||
}) | ||
``` | ||
await hf.featureExtraction({ | ||
model: "sentence-transformers/distilbert-base-nli-mean-tokens", | ||
inputs: "That is a happy person", | ||
}); | ||
## Audio | ||
// Audio | ||
### Automatic Speech Recognition | ||
Transcribes speech from an audio file. | ||
[Demo](https://huggingface.co/spaces/huggingfacejs/speech-recognition-vue) | ||
```typescript | ||
await hf.automaticSpeechRecognition({ | ||
@@ -160,3 +260,11 @@ model: 'facebook/wav2vec2-large-960h-lv60-self', | ||
}) | ||
``` | ||
### Audio Classification | ||
Assigns labels to the given audio along with a probability score of that label. | ||
[Demo](https://huggingface.co/spaces/huggingfacejs/audio-classification-vue) | ||
```typescript | ||
await hf.audioClassification({ | ||
@@ -166,3 +274,11 @@ model: 'superb/hubert-large-superb-er', | ||
}) | ||
``` | ||
### Text To Speech | ||
Generates natural-sounding speech from text input. | ||
[Interactive tutorial](https://scrimba.com/scrim/co8da4d23b49b648f77f4848a?pl=pkVnrP7uP) | ||
```typescript | ||
await hf.textToSpeech({ | ||
@@ -172,3 +288,9 @@ model: 'espnet/kan-bayashi_ljspeech_vits', | ||
}) | ||
``` | ||
### Audio To Audio | ||
Outputs one or multiple generated audios from an input audio, commonly used for speech enhancement and source separation. | ||
```typescript | ||
await hf.audioToAudio({ | ||
@@ -178,5 +300,13 @@ model: 'speechbrain/sepformer-wham', | ||
}) | ||
``` | ||
// Computer Vision | ||
## Computer Vision | ||
### Image Classification | ||
Assigns labels to a given image along with a probability score of that label. | ||
[Demo](https://huggingface.co/spaces/huggingfacejs/image-classification-vue) | ||
```typescript | ||
await hf.imageClassification({ | ||
@@ -186,3 +316,11 @@ data: readFileSync('test/cheetah.png'), | ||
}) | ||
``` | ||
### Object Detection | ||
Detects objects within an image and returns labels with corresponding bounding boxes and probability scores. | ||
[Demo](https://huggingface.co/spaces/huggingfacejs/object-detection-vue) | ||
```typescript | ||
await hf.objectDetection({ | ||
@@ -192,3 +330,9 @@ data: readFileSync('test/cats.png'), | ||
}) | ||
``` | ||
### Image Segmentation | ||
Detects segments within an image and returns labels with corresponding bounding boxes and probability scores. | ||
```typescript | ||
await hf.imageSegmentation({ | ||
@@ -198,3 +342,22 @@ data: readFileSync('test/cats.png'), | ||
}) | ||
``` | ||
### Image To Text | ||
Outputs text from a given image, commonly used for captioning or optical character recognition. | ||
```typescript | ||
await hf.imageToText({ | ||
data: readFileSync('test/cats.png'), | ||
model: 'nlpconnect/vit-gpt2-image-captioning' | ||
}) | ||
``` | ||
### Text To Image | ||
Creates an image from a text prompt. | ||
[Demo](https://huggingface.co/spaces/huggingfacejs/image-to-text) | ||
```typescript | ||
await hf.textToImage({ | ||
@@ -207,8 +370,11 @@ inputs: 'award winning high resolution photo of a giant tortoise/((ladybird)) hybrid, [trending on artstation]', | ||
}) | ||
``` | ||
await hf.imageToText({ | ||
data: readFileSync('test/cats.png'), | ||
model: 'nlpconnect/vit-gpt2-image-captioning' | ||
}) | ||
### Image To Image | ||
Image-to-image is the task of transforming a source image to match the characteristics of a target image or a target image domain. | ||
[Interactive tutorial](https://scrimba.com/scrim/co4834bf9a91cc81cfab07969?pl=pkVnrP7uP) | ||
```typescript | ||
await hf.imageToImage({ | ||
@@ -221,3 +387,9 @@ inputs: new Blob([readFileSync("test/stormtrooper_depth.png")]), | ||
}); | ||
``` | ||
### Zero Shot Image Classification | ||
Checks how well an input image fits into a set of labels you provide. | ||
```typescript | ||
await hf.zeroShotImageClassification({ | ||
@@ -232,5 +404,24 @@ model: 'openai/clip-vit-large-patch14-336', | ||
}) | ||
``` | ||
// Multimodal | ||
## Multimodal | ||
### Feature Extraction | ||
This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search. | ||
```typescript | ||
await hf.featureExtraction({ | ||
model: "sentence-transformers/distilbert-base-nli-mean-tokens", | ||
inputs: "That is a happy person", | ||
}); | ||
``` | ||
### Visual Question Answering | ||
Visual Question Answering is the task of answering open-ended questions based on an image. They output natural language responses to natural language questions. | ||
[Demo](https://huggingface.co/spaces/huggingfacejs/doc-vis-qa) | ||
```typescript | ||
await hf.visualQuestionAnswering({ | ||
@@ -243,3 +434,11 @@ model: 'dandelin/vilt-b32-finetuned-vqa', | ||
}) | ||
``` | ||
### Document Question Answering | ||
Document question answering models take a (document, question) pair as input and return an answer in natural language. | ||
[Demo](https://huggingface.co/spaces/huggingfacejs/doc-vis-qa) | ||
```typescript | ||
await hf.documentQuestionAnswering({ | ||
@@ -252,5 +451,11 @@ model: 'impira/layoutlm-document-qa', | ||
}) | ||
``` | ||
// Tabular | ||
## Tabular | ||
### Tabular Regression | ||
Tabular regression is the task of predicting a numerical value given a set of attributes. | ||
```typescript | ||
await hf.tabularRegression({ | ||
@@ -269,3 +474,9 @@ model: "scikit-learn/Fish-Weight", | ||
}) | ||
``` | ||
### Tabular Classification | ||
Tabular classification is the task of classifying a target category (a group) based on set of attributes. | ||
```typescript | ||
await hf.tabularClassification({ | ||
@@ -289,4 +500,9 @@ model: "vvmnnnkv/wine-quality", | ||
}) | ||
``` | ||
// Custom call, for models with custom parameters / outputs | ||
## Custom Calls | ||
For models with custom parameters / outputs. | ||
```typescript | ||
await hf.request({ | ||
@@ -310,75 +526,17 @@ model: 'my-custom-model', | ||
} | ||
// Using your own inference endpoint: https://hf.co/docs/inference-endpoints/ | ||
const gpt2 = hf.endpoint('https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2'); | ||
const { generated_text } = await gpt2.textGeneration({inputs: 'The answer to the universe is'}); | ||
``` | ||
## Supported Tasks | ||
## Custom Inference Endpoints | ||
### Natural Language Processing | ||
Learn more about using your own inference endpoints [here](https://hf.co/docs/inference-endpoints/) | ||
- [x] Fill mask | ||
- [x] Summarization | ||
- [x] Question answering | ||
- [x] Table question answering | ||
- [x] Text classification | ||
- [x] Text generation - [demo](https://huggingface.co/spaces/huggingfacejs/streaming-text-generation) | ||
- [x] Text2Text generation | ||
- [x] Token classification | ||
- [x] Named entity recognition | ||
- [x] Translation | ||
- [x] Zero-shot classification | ||
- [x] Conversational | ||
- [x] Feature extraction | ||
- [x] Sentence Similarity | ||
### Audio | ||
- [x] Automatic speech recognition | ||
- [x] Audio classification | ||
- [x] Text to speech | ||
- [x] Audio to audio | ||
### Computer Vision | ||
- [x] Image classification | ||
- [x] Object detection | ||
- [x] Image segmentation | ||
- [x] Text to image | ||
- [x] Image to text - [demo](https://huggingface.co/spaces/huggingfacejs/image-to-text) | ||
- [x] Image to Image | ||
- [x] Zero-shot image classification | ||
### Multimodal | ||
- [x] Document question answering - [demo](https://huggingface.co/spaces/huggingfacejs/doc-vis-qa) | ||
- [x] Visual question answering - [demo](https://huggingface.co/spaces/huggingfacejs/doc-vis-qa) | ||
### Tabular | ||
- [x] Tabular regression | ||
- [x] Tabular classification | ||
## Tree-shaking | ||
You can import the functions you need directly from the module, rather than using the `HfInference` class: | ||
```ts | ||
import {textGeneration} from "@huggingface/inference"; | ||
await textGeneration({ | ||
accessToken: "hf_...", | ||
model: "model_or_endpoint", | ||
inputs: ..., | ||
parameters: ... | ||
}) | ||
```typescript | ||
const gpt2 = hf.endpoint('https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2'); | ||
const { generated_text } = await gpt2.textGeneration({inputs: 'The answer to the universe is'}); | ||
``` | ||
This will enable tree-shaking by your bundler. | ||
## Running tests | ||
```console | ||
HF_ACCESS_TOKEN="your access token" pnpm run test | ||
HF_TOKEN="your access token" pnpm run test | ||
``` | ||
@@ -385,0 +543,0 @@ |
@@ -5,3 +5,3 @@ import { isUrl } from "./isUrl"; | ||
* We want to make calls to the huggingface hub the least possible, eg if | ||
* someone is calling the inference API 1000 times per second, we don't want | ||
* someone is calling Inference Endpoints 1000 times per second, we don't want | ||
* to make 1000 calls to the hub to get the task name. | ||
@@ -8,0 +8,0 @@ */ |
@@ -5,3 +5,3 @@ import type { InferenceTask, Options, RequestArgs } from "../../types"; | ||
/** | ||
* Primitive to make custom calls to the inference API | ||
* Primitive to make custom calls to Inference Endpoints | ||
*/ | ||
@@ -8,0 +8,0 @@ export async function request<T>( |
@@ -22,3 +22,3 @@ import type { InferenceTask, Options, RequestArgs } from "../../types"; | ||
if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) { | ||
return streamingRequest(args, { | ||
return yield* streamingRequest(args, { | ||
...options, | ||
@@ -25,0 +25,0 @@ wait_for_model: true, |
@@ -21,3 +21,2 @@ // Custom tasks with arbitrary inputs and outputs | ||
// Natural Language Processing tasks | ||
export * from "./nlp/conversational"; | ||
export * from "./nlp/featureExtraction"; | ||
@@ -24,0 +23,0 @@ export * from "./nlp/fillMask"; |
@@ -0,1 +1,2 @@ | ||
import type { TextGenerationInput, TextGenerationOutput } from "@huggingface/tasks/src/tasks/text-generation/inference"; | ||
import { InferenceOutputError } from "../../lib/InferenceOutputError"; | ||
@@ -5,66 +6,9 @@ import type { BaseArgs, Options } from "../../types"; | ||
export type TextGenerationArgs = BaseArgs & { | ||
/** | ||
* A string to be generated from | ||
*/ | ||
inputs: string; | ||
parameters?: { | ||
/** | ||
* (Optional: True). Bool. Whether or not to use sampling, use greedy decoding otherwise. | ||
*/ | ||
do_sample?: boolean; | ||
/** | ||
* (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input length it is a estimate of the size of generated text you want. Each new tokens slows down the request, so look for balance between response times and length of text generated. | ||
*/ | ||
max_new_tokens?: number; | ||
/** | ||
* (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. Use that in combination with max_new_tokens for best results. | ||
*/ | ||
max_time?: number; | ||
/** | ||
* (Default: 1). Integer. The number of proposition you want to be returned. | ||
*/ | ||
num_return_sequences?: number; | ||
/** | ||
* (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes. | ||
*/ | ||
repetition_penalty?: number; | ||
/** | ||
* (Default: True). Bool. If set to False, the return results will not contain the original query making it easier for prompting. | ||
*/ | ||
return_full_text?: boolean; | ||
/** | ||
* (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability. | ||
*/ | ||
temperature?: number; | ||
/** | ||
* (Default: None). Integer to define the top tokens considered within the sample operation to create new text. | ||
*/ | ||
top_k?: number; | ||
/** | ||
* (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p. | ||
*/ | ||
top_p?: number; | ||
/** | ||
* (Default: None). Integer. The maximum number of tokens from the input. | ||
*/ | ||
truncate?: number; | ||
/** | ||
* (Default: []) List of strings. The model will stop generating text when one of the strings in the list is generated. | ||
* **/ | ||
stop_sequences?: string[]; | ||
}; | ||
}; | ||
export interface TextGenerationOutput { | ||
/** | ||
* The continuated string | ||
*/ | ||
generated_text: string; | ||
} | ||
/** | ||
* Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with). | ||
*/ | ||
export async function textGeneration(args: TextGenerationArgs, options?: Options): Promise<TextGenerationOutput> { | ||
export async function textGeneration( | ||
args: BaseArgs & TextGenerationInput, | ||
options?: Options | ||
): Promise<TextGenerationOutput> { | ||
const res = await request<TextGenerationOutput[]>(args, { | ||
@@ -71,0 +15,0 @@ ...options, |
@@ -1,5 +0,6 @@ | ||
import type { Options } from "../../types"; | ||
import type { BaseArgs, Options } from "../../types"; | ||
import { streamingRequest } from "../custom/streamingRequest"; | ||
import type { TextGenerationArgs } from "./textGeneration"; | ||
import type { TextGenerationInput } from "@huggingface/tasks/src/tasks/text-generation/inference"; | ||
export interface TextGenerationStreamToken { | ||
@@ -88,3 +89,3 @@ /** Token ID from the model tokenizer */ | ||
export async function* textGenerationStream( | ||
args: TextGenerationArgs, | ||
args: BaseArgs & TextGenerationInput, | ||
options?: Options | ||
@@ -91,0 +92,0 @@ ): AsyncGenerator<TextGenerationStreamOutput> { |
@@ -9,6 +9,6 @@ import { InferenceOutputError } from "../../lib/InferenceOutputError"; | ||
*/ | ||
inputs: string; | ||
inputs: string | string[]; | ||
}; | ||
export interface TranslationOutput { | ||
export interface TranslationOutputValue { | ||
/** | ||
@@ -20,2 +20,4 @@ * The string after translation | ||
export type TranslationOutput = TranslationOutputValue | TranslationOutputValue[]; | ||
/** | ||
@@ -25,3 +27,3 @@ * This task is well known to translate text from one language to another. Recommended model: Helsinki-NLP/opus-mt-ru-en. | ||
export async function translation(args: TranslationArgs, options?: Options): Promise<TranslationOutput> { | ||
const res = await request<TranslationOutput[]>(args, { | ||
const res = await request<TranslationOutputValue[]>(args, { | ||
...options, | ||
@@ -34,3 +36,3 @@ taskHint: "translation", | ||
} | ||
return res?.[0]; | ||
return res?.length === 1 ? res?.[0] : res; | ||
} |
@@ -0,1 +1,3 @@ | ||
import type { PipelineType } from "@huggingface/tasks"; | ||
export interface Options { | ||
@@ -7,3 +9,3 @@ /** | ||
/** | ||
* (Default: true). Boolean. There is a cache layer on the inference API to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query. | ||
* (Default: true). Boolean. There is a cache layer on Inference API (serverless) to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query. | ||
*/ | ||
@@ -39,35 +41,3 @@ use_cache?: boolean; | ||
export type InferenceTask = | ||
| "audio-classification" | ||
| "audio-to-audio" | ||
| "automatic-speech-recognition" | ||
| "conversational" | ||
| "depth-estimation" | ||
| "document-question-answering" | ||
| "feature-extraction" | ||
| "fill-mask" | ||
| "image-classification" | ||
| "image-segmentation" | ||
| "image-to-image" | ||
| "image-to-text" | ||
| "object-detection" | ||
| "video-classification" | ||
| "question-answering" | ||
| "reinforcement-learning" | ||
| "sentence-similarity" | ||
| "summarization" | ||
| "table-question-answering" | ||
| "tabular-classification" | ||
| "tabular-regression" | ||
| "text-classification" | ||
| "text-generation" | ||
| "text-to-image" | ||
| "text-to-speech" | ||
| "text-to-video" | ||
| "token-classification" | ||
| "translation" | ||
| "unconditional-image-generation" | ||
| "visual-question-answering" | ||
| "zero-shot-classification" | ||
| "zero-shot-image-classification"; | ||
export type InferenceTask = Exclude<PipelineType, "other">; | ||
@@ -82,3 +52,3 @@ export interface BaseArgs { | ||
/** | ||
* The model to use. Can be a full URL for HF inference endpoints. | ||
* The model to use. Can be a full URL for a dedicated inference endpoint. | ||
* | ||
@@ -85,0 +55,0 @@ * If not specified, will call huggingface.co/api/tasks to get the default model for the task. |
2
520
8
Yes
200024
50
5328