New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More β†’
Socket
Sign inDemoInstall
Socket

@huggingface/tasks

Package Overview
Dependencies
Maintainers
4
Versions
136
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@huggingface/tasks - npm Package Compare versions

Comparing version 0.15.8 to 0.15.9

9

dist/commonjs/inference-providers.d.ts

@@ -1,4 +0,4 @@

export declare const INFERENCE_PROVIDERS: readonly ["hf-inference", "fal-ai", "replicate", "sambanova", "together"];
export type InferenceProvider = (typeof INFERENCE_PROVIDERS)[number];
export declare const HF_HUB_INFERENCE_PROXY_TEMPLATE = "https://huggingface.co/api/inference-proxy/{{PROVIDER}}";
declare const INFERENCE_PROVIDERS: readonly ["fal-ai", "fireworks-ai", "hf-inference", "hyperbolic", "replicate", "sambanova", "together"];
export type SnippetInferenceProvider = (typeof INFERENCE_PROVIDERS)[number] | string;
export declare const HF_HUB_INFERENCE_PROXY_TEMPLATE = "https://router.huggingface.co/{{PROVIDER}}";
/**

@@ -9,3 +9,4 @@ * URL to set as baseUrl in the OpenAI SDK.

*/
export declare function openAIbaseUrl(provider: InferenceProvider): string;
export declare function openAIbaseUrl(provider: SnippetInferenceProvider): string;
export {};
//# sourceMappingURL=inference-providers.d.ts.map
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.HF_HUB_INFERENCE_PROXY_TEMPLATE = exports.INFERENCE_PROVIDERS = void 0;
exports.HF_HUB_INFERENCE_PROXY_TEMPLATE = void 0;
exports.openAIbaseUrl = openAIbaseUrl;
exports.INFERENCE_PROVIDERS = ["hf-inference", "fal-ai", "replicate", "sambanova", "together"];
exports.HF_HUB_INFERENCE_PROXY_TEMPLATE = `https://huggingface.co/api/inference-proxy/{{PROVIDER}}`;
/// This list is for illustration purposes only.
/// in the `tasks` sub-package, we do not need actual strong typing of the inference providers.
const INFERENCE_PROVIDERS = [
"fal-ai",
"fireworks-ai",
"hf-inference",
"hyperbolic",
"replicate",
"sambanova",
"together",
];
exports.HF_HUB_INFERENCE_PROXY_TEMPLATE = `https://router.huggingface.co/{{PROVIDER}}`;
/**

@@ -13,5 +23,4 @@ * URL to set as baseUrl in the OpenAI SDK.

function openAIbaseUrl(provider) {
return provider === "hf-inference"
? "https://api-inference.huggingface.co/v1/"
: exports.HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider);
const url = exports.HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider);
return provider === "hf-inference" ? `${url}/v1` : url;
}

@@ -5,3 +5,3 @@ import type { ModelLibraryKey } from "./model-libraries.js";

* Mapping from library name to its supported tasks.
* Inference API (serverless) should be disabled for all other (library, task) pairs beyond this mapping.
* HF-Inference API (serverless) should be disabled for all other (library, task) pairs beyond this mapping.
* This mapping is partially generated automatically by "python-api-export-tasks" action in

@@ -8,0 +8,0 @@ * huggingface/api-inference-community repo upon merge. For transformers, the mapping is manually

@@ -6,3 +6,3 @@ "use strict";

* Mapping from library name to its supported tasks.
* Inference API (serverless) should be disabled for all other (library, task) pairs beyond this mapping.
* HF-Inference API (serverless) should be disabled for all other (library, task) pairs beyond this mapping.
* This mapping is partially generated automatically by "python-api-export-tasks" action in

@@ -9,0 +9,0 @@ * huggingface/api-inference-community repo upon merge. For transformers, the mapping is manually

@@ -6,2 +6,3 @@ import type { ModelData } from "./model-data.js";

export declare const audioseal: (model: ModelData) => string[];
export declare const ben2: (model: ModelData) => string[];
export declare const bertopic: (model: ModelData) => string[];

@@ -8,0 +9,0 @@ export declare const bm25s: (model: ModelData) => string[];

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.terratorch = exports.speechbrain = exports.stanza = exports.span_marker = exports.spacy = exports.setfit = exports.sentenceTransformers = exports.sampleFactory = exports.sam2 = exports.fastai = exports.stable_audio_tools = exports.sklearn = exports.seed_story = exports.saelens = exports.timm = exports.tensorflowtts = exports.relik = exports.pyannote_audio = exports.pyannote_audio_pipeline = exports.paddlenlp = exports.open_clip = exports.mesh_anything = exports.mars5_tts = exports.mamba_ssm = exports.tf_keras = exports.llama_cpp_python = exports.keras_hub = exports.keras = exports.htrflow = exports.gliner = exports.flair = exports.fairseq = exports.espnet = exports.espnetASR = exports.espnetTTS = exports.edsnlp = exports.cartesia_mlx = exports.cartesia_pytorch = exports.diffusionkit = exports.diffusers = exports.derm_foundation = exports.depth_pro = exports.depth_anything_v2 = exports.cxr_foundation = exports.bm25s = exports.bertopic = exports.audioseal = exports.asteroid = exports.allennlp = exports.adapters = void 0;
exports.hezar = exports.threedtopia_xl = exports.whisperkit = exports.audiocraft = exports.anemoi = exports.pythae = exports.pxia = exports.nemo = exports.model2vec = exports.mlxim = exports.mlx = exports.swarmformer = exports.birefnet = exports.ultralytics = exports.chattts = exports.voicecraft = exports.vfimamba = exports.sana = exports.sentis = exports.mlAgents = exports.stableBaselines3 = exports.fasttext = exports.peft = exports.transformersJS = exports.transformers = void 0;
exports.speechbrain = exports.stanza = exports.span_marker = exports.spacy = exports.setfit = exports.sentenceTransformers = exports.sampleFactory = exports.sam2 = exports.fastai = exports.stable_audio_tools = exports.sklearn = exports.seed_story = exports.saelens = exports.timm = exports.tensorflowtts = exports.relik = exports.pyannote_audio = exports.pyannote_audio_pipeline = exports.paddlenlp = exports.open_clip = exports.mesh_anything = exports.mars5_tts = exports.mamba_ssm = exports.tf_keras = exports.llama_cpp_python = exports.keras_hub = exports.keras = exports.htrflow = exports.gliner = exports.flair = exports.fairseq = exports.espnet = exports.espnetASR = exports.espnetTTS = exports.edsnlp = exports.cartesia_mlx = exports.cartesia_pytorch = exports.diffusionkit = exports.diffusers = exports.derm_foundation = exports.depth_pro = exports.depth_anything_v2 = exports.cxr_foundation = exports.bm25s = exports.bertopic = exports.ben2 = exports.audioseal = exports.asteroid = exports.allennlp = exports.adapters = void 0;
exports.hezar = exports.threedtopia_xl = exports.whisperkit = exports.audiocraft = exports.anemoi = exports.pythae = exports.pxia = exports.nemo = exports.model2vec = exports.mlxim = exports.mlx = exports.swarmformer = exports.birefnet = exports.ultralytics = exports.chattts = exports.voicecraft = exports.vfimamba = exports.sana = exports.sentis = exports.mlAgents = exports.stableBaselines3 = exports.fasttext = exports.peft = exports.transformersJS = exports.transformers = exports.terratorch = void 0;
const library_to_tasks_js_1 = require("./library-to-tasks.js");

@@ -77,2 +77,16 @@ const inputs_js_1 = require("./snippets/inputs.js");

}
const ben2 = (model) => [
`import requests
from PIL import Image
from ben2 import AutoModel
url = "https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg"
image = Image.open(requests.get(url, stream=True).raw)
model = AutoModel.from_pretrained("${model.id}")
model.to("cuda").eval()
foreground = model.inference(image)
`,
];
exports.ben2 = ben2;
const bertopic = (model) => [

@@ -79,0 +93,0 @@ `from bertopic import BERTopic

@@ -108,2 +108,9 @@ import type { ModelData } from "./model-data.js";

};
ben2: {
prettyLabel: string;
repoName: string;
repoUrl: string;
snippets: (model: ModelData) => string[];
filter: false;
};
bertopic: {

@@ -898,3 +905,3 @@ prettyLabel: string;

export declare const ALL_MODEL_LIBRARY_KEYS: ModelLibraryKey[];
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "anemoi" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birder" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "cosmos" | "cxr-foundation" | "deepforest" | "depth-anything-v2" | "depth-pro" | "derm-foundation" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "clipscope" | "cosyvoice" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "htrflow" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-hub" | "k2" | "liveportrait" | "llama-cpp-python" | "mini-omni2" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "mitie" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open-oasis" | "open_clip" | "paddlenlp" | "peft" | "pxia" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "swarmformer" | "f5-tts" | "genmo" | "tensorflowtts" | "tabpfn" | "terratorch" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "trellis" | "ultralytics" | "unity-sentis" | "sana" | "vfi-mamba" | "voicecraft" | "whisperkit" | "yolov10" | "3dtopia-xl")[];
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "anemoi" | "asteroid" | "audiocraft" | "audioseal" | "ben2" | "bertopic" | "big_vision" | "birder" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "cosmos" | "cxr-foundation" | "deepforest" | "depth-anything-v2" | "depth-pro" | "derm-foundation" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "clipscope" | "cosyvoice" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "htrflow" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-hub" | "k2" | "liveportrait" | "llama-cpp-python" | "mini-omni2" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "mitie" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open-oasis" | "open_clip" | "paddlenlp" | "peft" | "pxia" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "swarmformer" | "f5-tts" | "genmo" | "tensorflowtts" | "tabpfn" | "terratorch" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "trellis" | "ultralytics" | "unity-sentis" | "sana" | "vfi-mamba" | "voicecraft" | "whisperkit" | "yolov10" | "3dtopia-xl")[];
//# sourceMappingURL=model-libraries.d.ts.map

@@ -94,2 +94,9 @@ "use strict";

},
ben2: {
prettyLabel: "BEN2",
repoName: "BEN2",
repoUrl: "https://github.com/PramaLLC/BEN2",
snippets: snippets.ben2,
filter: false,
},
bertopic: {

@@ -96,0 +103,0 @@ prettyLabel: "BERTopic",

@@ -412,2 +412,8 @@ export declare const MODALITIES: readonly ["multimodal", "nlp", "cv", "audio", "tabular", "rl", "other"];

};
"visual-document-retrieval": {
name: string;
modality: "multimodal";
color: "yellow";
hideInDatasets: true;
};
"any-to-any": {

@@ -431,3 +437,3 @@ name: string;

export declare const SUBTASK_TYPES: string[];
export declare const PIPELINE_TYPES_SET: Set<"other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "audio-text-to-text" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "image-text-to-text" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d" | "image-feature-extraction" | "video-text-to-text" | "keypoint-detection" | "any-to-any">;
export declare const PIPELINE_TYPES_SET: Set<"other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "audio-text-to-text" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "image-text-to-text" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d" | "image-feature-extraction" | "video-text-to-text" | "keypoint-detection" | "visual-document-retrieval" | "any-to-any">;
//# sourceMappingURL=pipelines.d.ts.map

@@ -633,2 +633,8 @@ "use strict";

},
"visual-document-retrieval": {
name: "Visual Document Retrieval",
modality: "multimodal",
color: "yellow",
hideInDatasets: true,
},
"any-to-any": {

@@ -635,0 +641,0 @@ name: "Any-to-Any",

@@ -21,4 +21,4 @@ "use strict";

return Object.entries(config)
.map(([key, val]) => `${quote}${key}${quote}${opts.attributeValueConnector}${val}`)
.join(`,${opts.indent}`);
.map(([key, val]) => `${quote}${key}${quote}${opts.attributeValueConnector}${val},`)
.join(`${opts.indent}`);
}

@@ -1,7 +0,7 @@

import { type InferenceProvider } from "../inference-providers.js";
import { type SnippetInferenceProvider } from "../inference-providers.js";
import type { PipelineType } from "../pipelines.js";
import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: {
export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: {
streaming?: boolean;

@@ -13,6 +13,6 @@ messages?: ChatCompletionInputMessage[];

}) => InferenceSnippet[];
export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
export declare const snippetFile: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
export declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
export declare function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const snippetFile: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
export declare function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
//# sourceMappingURL=curl.d.ts.map

@@ -16,3 +16,3 @@ "use strict";

content: `\
curl https://api-inference.huggingface.co/models/${model.id} \\
curl https://router.huggingface.co/hf-inference/models/${model.id} \\
-X POST \\

@@ -29,3 +29,3 @@ -d '{"inputs": ${(0, inputs_js_1.getModelInputSnippet)(model, true)}}' \\

const baseUrl = provider === "hf-inference"
? `https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions`
? `https://router.huggingface.co/hf-inference/models/${model.id}/v1/chat/completions`
: inference_providers_js_1.HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider) + "/v1/chat/completions";

@@ -58,3 +58,3 @@ // Conversational model detected, so we display a code snippet that features the Messages API

attributeValueConnector: ": ",
})},
})}
"stream": ${!!streaming}

@@ -77,3 +77,3 @@ }'`,

client: "curl",
content: `curl https://api-inference.huggingface.co/models/${model.id} \\
content: `curl https://router.huggingface.co/hf-inference/models/${model.id} \\
-X POST \\

@@ -94,3 +94,3 @@ -d '{"inputs": ${(0, inputs_js_1.getModelInputSnippet)(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\

client: "curl",
content: `curl https://api-inference.huggingface.co/models/${model.id} \\
content: `curl https://router.huggingface.co/hf-inference/models/${model.id} \\
-X POST \\

@@ -97,0 +97,0 @@ --data-binary '@${(0, inputs_js_1.getModelInputSnippet)(model, true, true)}' \\

@@ -1,7 +0,7 @@

import { type InferenceProvider } from "../inference-providers.js";
import { type SnippetInferenceProvider } from "../inference-providers.js";
import type { PipelineType } from "../pipelines.js";
import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: {
export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: {
streaming?: boolean;

@@ -14,8 +14,8 @@ messages?: ChatCompletionInputMessage[];

export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet[];
export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
export declare const snippetTextToAudio: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
export declare const snippetAutomaticSpeechRecognition: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
export declare const snippetFile: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
export declare const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
export declare function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const snippetTextToAudio: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const snippetAutomaticSpeechRecognition: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const snippetFile: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
export declare function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
//# sourceMappingURL=js.d.ts.map

@@ -38,3 +38,3 @@ "use strict";

console.log(output)
console.log(output);
`,

@@ -49,3 +49,3 @@ },

const response = await fetch(
"https://api-inference.huggingface.co/models/${model.id}",
"https://router.huggingface.co/hf-inference/models/${model.id}",
{

@@ -126,3 +126,3 @@ headers: {

messages: ${messagesStr},
${configStr},
${configStr}
stream: true,

@@ -156,3 +156,4 @@ });

console.log(chatCompletion.choices[0].message);`,
console.log(chatCompletion.choices[0].message);
`,
},

@@ -174,3 +175,4 @@ {

console.log(chatCompletion.choices[0].message);`,
console.log(chatCompletion.choices[0].message);
`,
},

@@ -191,3 +193,3 @@ ];

const response = await fetch(
"https://api-inference.huggingface.co/models/${model.id}",
"https://router.huggingface.co/hf-inference/models/${model.id}",
{

@@ -237,3 +239,3 @@ headers: {

const response = await fetch(
"https://api-inference.huggingface.co/models/${model.id}",
"https://router.huggingface.co/hf-inference/models/${model.id}",
{

@@ -266,3 +268,3 @@ headers: {

const response = await fetch(
"https://api-inference.huggingface.co/models/${model.id}",
"https://router.huggingface.co/hf-inference/models/${model.id}",
{

@@ -344,3 +346,3 @@ headers: {

const response = await fetch(
"https://api-inference.huggingface.co/models/${model.id}",
"https://router.huggingface.co/hf-inference/models/${model.id}",
{

@@ -347,0 +349,0 @@ headers: {

@@ -1,6 +0,6 @@

import { type InferenceProvider } from "../inference-providers.js";
import { type SnippetInferenceProvider } from "../inference-providers.js";
import type { PipelineType } from "../pipelines.js";
import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
export declare const snippetConversational: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: {
export declare const snippetConversational: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: {
streaming?: boolean;

@@ -14,10 +14,10 @@ messages?: ChatCompletionInputMessage[];

export declare const snippetZeroShotImageClassification: (model: ModelDataMinimal) => InferenceSnippet[];
export declare const snippetBasic: (model: ModelDataMinimal) => InferenceSnippet[];
export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const snippetFile: (model: ModelDataMinimal) => InferenceSnippet[];
export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const snippetTabular: (model: ModelDataMinimal) => InferenceSnippet[];
export declare const snippetTextToAudio: (model: ModelDataMinimal) => InferenceSnippet[];
export declare const snippetDocumentQuestionAnswering: (model: ModelDataMinimal) => InferenceSnippet[];
export declare const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
export declare function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
export declare const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
export declare function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
//# sourceMappingURL=python.d.ts.map

@@ -8,2 +8,31 @@ "use strict";

const inputs_js_1 = require("./inputs.js");
const HFH_INFERENCE_CLIENT_METHODS = {
"audio-classification": "audio_classification",
"audio-to-audio": "audio_to_audio",
"automatic-speech-recognition": "automatic_speech_recognition",
"text-to-speech": "text_to_speech",
"image-classification": "image_classification",
"image-segmentation": "image_segmentation",
"image-to-image": "image_to_image",
"image-to-text": "image_to_text",
"object-detection": "object_detection",
"text-to-image": "text_to_image",
"text-to-video": "text_to_video",
"zero-shot-image-classification": "zero_shot_image_classification",
"document-question-answering": "document_question_answering",
"visual-question-answering": "visual_question_answering",
"feature-extraction": "feature_extraction",
"fill-mask": "fill_mask",
"question-answering": "question_answering",
"sentence-similarity": "sentence_similarity",
summarization: "summarization",
"table-question-answering": "table_question_answering",
"text-classification": "text_classification",
"text-generation": "text_generation",
"token-classification": "token_classification",
translation: "translation",
"zero-shot-classification": "zero_shot_classification",
"tabular-classification": "tabular_classification",
"tabular-regression": "tabular_regression",
};
const snippetImportInferenceClient = (accessToken, provider) => `\

@@ -42,3 +71,3 @@ from huggingface_hub import InferenceClient

messages=messages,
${configStr},
${configStr}
stream=True

@@ -65,3 +94,3 @@ )

messages=messages,
${configStr},
${configStr}
stream=True

@@ -156,4 +185,22 @@ )

exports.snippetZeroShotImageClassification = snippetZeroShotImageClassification;
const snippetBasic = (model) => {
const snippetBasic = (model, accessToken, provider) => {
return [
...(model.pipeline_tag && model.pipeline_tag in HFH_INFERENCE_CLIENT_METHODS
? [
{
client: "huggingface_hub",
content: `\
${snippetImportInferenceClient(accessToken, provider)}
result = client.${HFH_INFERENCE_CLIENT_METHODS[model.pipeline_tag]}(
model="${model.id}",
inputs=${(0, inputs_js_1.getModelInputSnippet)(model)},
provider="${provider}",
)
print(result)
`,
},
]
: []),
{

@@ -360,5 +407,2 @@ client: "requests",

: [];
const baseUrl = provider === "hf-inference"
? `https://api-inference.huggingface.co/models/${model.id}`
: inference_providers_js_1.HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider);
return snippets.map((snippet) => {

@@ -371,3 +415,3 @@ return {

API_URL = "${baseUrl}"
API_URL = "${(0, inference_providers_js_1.openAIbaseUrl)(provider)}"
headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}}

@@ -374,0 +418,0 @@

@@ -113,2 +113,3 @@ "use strict";

"any-to-any": ["transformers"],
"visual-document-retrieval": ["transformers"],
};

@@ -140,2 +141,3 @@ /**

"document-question-answering": getData("document-question-answering", data_js_4.default),
"visual-document-retrieval": getData("visual-document-retrieval", data_js_16.default),
"feature-extraction": getData("feature-extraction", data_js_5.default),

@@ -142,0 +144,0 @@ "fill-mask": getData("fill-mask", data_js_6.default),

@@ -61,5 +61,3 @@ "use strict";

models: [
{ description: "A text-generation model trained to follow instructions.",
id: "google/gemma-2-2b-it",
},
{ description: "A text-generation model trained to follow instructions.", id: "google/gemma-2-2b-it" },
{

@@ -66,0 +64,0 @@ description: "Smaller variant of one of the most powerful models.",

@@ -1,4 +0,4 @@

export declare const INFERENCE_PROVIDERS: readonly ["hf-inference", "fal-ai", "replicate", "sambanova", "together"];
export type InferenceProvider = (typeof INFERENCE_PROVIDERS)[number];
export declare const HF_HUB_INFERENCE_PROXY_TEMPLATE = "https://huggingface.co/api/inference-proxy/{{PROVIDER}}";
declare const INFERENCE_PROVIDERS: readonly ["fal-ai", "fireworks-ai", "hf-inference", "hyperbolic", "replicate", "sambanova", "together"];
export type SnippetInferenceProvider = (typeof INFERENCE_PROVIDERS)[number] | string;
export declare const HF_HUB_INFERENCE_PROXY_TEMPLATE = "https://router.huggingface.co/{{PROVIDER}}";
/**

@@ -9,3 +9,4 @@ * URL to set as baseUrl in the OpenAI SDK.

*/
export declare function openAIbaseUrl(provider: InferenceProvider): string;
export declare function openAIbaseUrl(provider: SnippetInferenceProvider): string;
export {};
//# sourceMappingURL=inference-providers.d.ts.map

@@ -1,3 +0,13 @@

export const INFERENCE_PROVIDERS = ["hf-inference", "fal-ai", "replicate", "sambanova", "together"];
export const HF_HUB_INFERENCE_PROXY_TEMPLATE = `https://huggingface.co/api/inference-proxy/{{PROVIDER}}`;
/// This list is for illustration purposes only.
/// in the `tasks` sub-package, we do not need actual strong typing of the inference providers.
const INFERENCE_PROVIDERS = [
"fal-ai",
"fireworks-ai",
"hf-inference",
"hyperbolic",
"replicate",
"sambanova",
"together",
];
export const HF_HUB_INFERENCE_PROXY_TEMPLATE = `https://router.huggingface.co/{{PROVIDER}}`;
/**

@@ -9,5 +19,4 @@ * URL to set as baseUrl in the OpenAI SDK.

export function openAIbaseUrl(provider) {
return provider === "hf-inference"
? "https://api-inference.huggingface.co/v1/"
: HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider);
const url = HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider);
return provider === "hf-inference" ? `${url}/v1` : url;
}

@@ -5,3 +5,3 @@ import type { ModelLibraryKey } from "./model-libraries.js";

* Mapping from library name to its supported tasks.
* Inference API (serverless) should be disabled for all other (library, task) pairs beyond this mapping.
* HF-Inference API (serverless) should be disabled for all other (library, task) pairs beyond this mapping.
* This mapping is partially generated automatically by "python-api-export-tasks" action in

@@ -8,0 +8,0 @@ * huggingface/api-inference-community repo upon merge. For transformers, the mapping is manually

/**
* Mapping from library name to its supported tasks.
* Inference API (serverless) should be disabled for all other (library, task) pairs beyond this mapping.
* HF-Inference API (serverless) should be disabled for all other (library, task) pairs beyond this mapping.
* This mapping is partially generated automatically by "python-api-export-tasks" action in

@@ -5,0 +5,0 @@ * huggingface/api-inference-community repo upon merge. For transformers, the mapping is manually

@@ -6,2 +6,3 @@ import type { ModelData } from "./model-data.js";

export declare const audioseal: (model: ModelData) => string[];
export declare const ben2: (model: ModelData) => string[];
export declare const bertopic: (model: ModelData) => string[];

@@ -8,0 +9,0 @@ export declare const bm25s: (model: ModelData) => string[];

@@ -69,2 +69,15 @@ import { LIBRARY_TASK_MAPPING } from "./library-to-tasks.js";

}
export const ben2 = (model) => [
`import requests
from PIL import Image
from ben2 import AutoModel
url = "https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg"
image = Image.open(requests.get(url, stream=True).raw)
model = AutoModel.from_pretrained("${model.id}")
model.to("cuda").eval()
foreground = model.inference(image)
`,
];
export const bertopic = (model) => [

@@ -71,0 +84,0 @@ `from bertopic import BERTopic

@@ -108,2 +108,9 @@ import type { ModelData } from "./model-data.js";

};
ben2: {
prettyLabel: string;
repoName: string;
repoUrl: string;
snippets: (model: ModelData) => string[];
filter: false;
};
bertopic: {

@@ -898,3 +905,3 @@ prettyLabel: string;

export declare const ALL_MODEL_LIBRARY_KEYS: ModelLibraryKey[];
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "anemoi" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birder" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "cosmos" | "cxr-foundation" | "deepforest" | "depth-anything-v2" | "depth-pro" | "derm-foundation" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "clipscope" | "cosyvoice" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "htrflow" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-hub" | "k2" | "liveportrait" | "llama-cpp-python" | "mini-omni2" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "mitie" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open-oasis" | "open_clip" | "paddlenlp" | "peft" | "pxia" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "swarmformer" | "f5-tts" | "genmo" | "tensorflowtts" | "tabpfn" | "terratorch" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "trellis" | "ultralytics" | "unity-sentis" | "sana" | "vfi-mamba" | "voicecraft" | "whisperkit" | "yolov10" | "3dtopia-xl")[];
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "anemoi" | "asteroid" | "audiocraft" | "audioseal" | "ben2" | "bertopic" | "big_vision" | "birder" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "cosmos" | "cxr-foundation" | "deepforest" | "depth-anything-v2" | "depth-pro" | "derm-foundation" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "clipscope" | "cosyvoice" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "htrflow" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-hub" | "k2" | "liveportrait" | "llama-cpp-python" | "mini-omni2" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "mitie" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open-oasis" | "open_clip" | "paddlenlp" | "peft" | "pxia" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "swarmformer" | "f5-tts" | "genmo" | "tensorflowtts" | "tabpfn" | "terratorch" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "trellis" | "ultralytics" | "unity-sentis" | "sana" | "vfi-mamba" | "voicecraft" | "whisperkit" | "yolov10" | "3dtopia-xl")[];
//# sourceMappingURL=model-libraries.d.ts.map

@@ -68,2 +68,9 @@ import * as snippets from "./model-libraries-snippets.js";

},
ben2: {
prettyLabel: "BEN2",
repoName: "BEN2",
repoUrl: "https://github.com/PramaLLC/BEN2",
snippets: snippets.ben2,
filter: false,
},
bertopic: {

@@ -70,0 +77,0 @@ prettyLabel: "BERTopic",

@@ -412,2 +412,8 @@ export declare const MODALITIES: readonly ["multimodal", "nlp", "cv", "audio", "tabular", "rl", "other"];

};
"visual-document-retrieval": {
name: string;
modality: "multimodal";
color: "yellow";
hideInDatasets: true;
};
"any-to-any": {

@@ -431,3 +437,3 @@ name: string;

export declare const SUBTASK_TYPES: string[];
export declare const PIPELINE_TYPES_SET: Set<"other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "audio-text-to-text" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "image-text-to-text" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d" | "image-feature-extraction" | "video-text-to-text" | "keypoint-detection" | "any-to-any">;
export declare const PIPELINE_TYPES_SET: Set<"other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "audio-text-to-text" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "image-text-to-text" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d" | "image-feature-extraction" | "video-text-to-text" | "keypoint-detection" | "visual-document-retrieval" | "any-to-any">;
//# sourceMappingURL=pipelines.d.ts.map

@@ -630,2 +630,8 @@ export const MODALITIES = ["multimodal", "nlp", "cv", "audio", "tabular", "rl", "other"];

},
"visual-document-retrieval": {
name: "Visual Document Retrieval",
modality: "multimodal",
color: "yellow",
hideInDatasets: true,
},
"any-to-any": {

@@ -632,0 +638,0 @@ name: "Any-to-Any",

@@ -17,4 +17,4 @@ export function stringifyMessages(messages, opts) {

return Object.entries(config)
.map(([key, val]) => `${quote}${key}${quote}${opts.attributeValueConnector}${val}`)
.join(`,${opts.indent}`);
.map(([key, val]) => `${quote}${key}${quote}${opts.attributeValueConnector}${val},`)
.join(`${opts.indent}`);
}

@@ -1,7 +0,7 @@

import { type InferenceProvider } from "../inference-providers.js";
import { type SnippetInferenceProvider } from "../inference-providers.js";
import type { PipelineType } from "../pipelines.js";
import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: {
export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: {
streaming?: boolean;

@@ -13,6 +13,6 @@ messages?: ChatCompletionInputMessage[];

}) => InferenceSnippet[];
export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
export declare const snippetFile: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
export declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
export declare function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const snippetFile: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
export declare function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
//# sourceMappingURL=curl.d.ts.map

@@ -12,3 +12,3 @@ import { HF_HUB_INFERENCE_PROXY_TEMPLATE } from "../inference-providers.js";

content: `\
curl https://api-inference.huggingface.co/models/${model.id} \\
curl https://router.huggingface.co/hf-inference/models/${model.id} \\
-X POST \\

@@ -24,3 +24,3 @@ -d '{"inputs": ${getModelInputSnippet(model, true)}}' \\

const baseUrl = provider === "hf-inference"
? `https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions`
? `https://router.huggingface.co/hf-inference/models/${model.id}/v1/chat/completions`
: HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider) + "/v1/chat/completions";

@@ -53,3 +53,3 @@ // Conversational model detected, so we display a code snippet that features the Messages API

attributeValueConnector: ": ",
})},
})}
"stream": ${!!streaming}

@@ -71,3 +71,3 @@ }'`,

client: "curl",
content: `curl https://api-inference.huggingface.co/models/${model.id} \\
content: `curl https://router.huggingface.co/hf-inference/models/${model.id} \\
-X POST \\

@@ -87,3 +87,3 @@ -d '{"inputs": ${getModelInputSnippet(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\

client: "curl",
content: `curl https://api-inference.huggingface.co/models/${model.id} \\
content: `curl https://router.huggingface.co/hf-inference/models/${model.id} \\
-X POST \\

@@ -90,0 +90,0 @@ --data-binary '@${getModelInputSnippet(model, true, true)}' \\

@@ -1,7 +0,7 @@

import { type InferenceProvider } from "../inference-providers.js";
import { type SnippetInferenceProvider } from "../inference-providers.js";
import type { PipelineType } from "../pipelines.js";
import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: {
export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: {
streaming?: boolean;

@@ -14,8 +14,8 @@ messages?: ChatCompletionInputMessage[];

export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet[];
export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
export declare const snippetTextToAudio: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
export declare const snippetAutomaticSpeechRecognition: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
export declare const snippetFile: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
export declare const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
export declare function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const snippetTextToAudio: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const snippetAutomaticSpeechRecognition: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const snippetFile: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
export declare function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
//# sourceMappingURL=js.d.ts.map

@@ -34,3 +34,3 @@ import { openAIbaseUrl } from "../inference-providers.js";

console.log(output)
console.log(output);
`,

@@ -45,3 +45,3 @@ },

const response = await fetch(
"https://api-inference.huggingface.co/models/${model.id}",
"https://router.huggingface.co/hf-inference/models/${model.id}",
{

@@ -121,3 +121,3 @@ headers: {

messages: ${messagesStr},
${configStr},
${configStr}
stream: true,

@@ -151,3 +151,4 @@ });

console.log(chatCompletion.choices[0].message);`,
console.log(chatCompletion.choices[0].message);
`,
},

@@ -169,3 +170,4 @@ {

console.log(chatCompletion.choices[0].message);`,
console.log(chatCompletion.choices[0].message);
`,
},

@@ -185,3 +187,3 @@ ];

const response = await fetch(
"https://api-inference.huggingface.co/models/${model.id}",
"https://router.huggingface.co/hf-inference/models/${model.id}",
{

@@ -230,3 +232,3 @@ headers: {

const response = await fetch(
"https://api-inference.huggingface.co/models/${model.id}",
"https://router.huggingface.co/hf-inference/models/${model.id}",
{

@@ -258,3 +260,3 @@ headers: {

const response = await fetch(
"https://api-inference.huggingface.co/models/${model.id}",
"https://router.huggingface.co/hf-inference/models/${model.id}",
{

@@ -334,3 +336,3 @@ headers: {

const response = await fetch(
"https://api-inference.huggingface.co/models/${model.id}",
"https://router.huggingface.co/hf-inference/models/${model.id}",
{

@@ -337,0 +339,0 @@ headers: {

@@ -1,6 +0,6 @@

import { type InferenceProvider } from "../inference-providers.js";
import { type SnippetInferenceProvider } from "../inference-providers.js";
import type { PipelineType } from "../pipelines.js";
import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
export declare const snippetConversational: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: {
export declare const snippetConversational: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: {
streaming?: boolean;

@@ -14,10 +14,10 @@ messages?: ChatCompletionInputMessage[];

export declare const snippetZeroShotImageClassification: (model: ModelDataMinimal) => InferenceSnippet[];
export declare const snippetBasic: (model: ModelDataMinimal) => InferenceSnippet[];
export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const snippetFile: (model: ModelDataMinimal) => InferenceSnippet[];
export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
export declare const snippetTabular: (model: ModelDataMinimal) => InferenceSnippet[];
export declare const snippetTextToAudio: (model: ModelDataMinimal) => InferenceSnippet[];
export declare const snippetDocumentQuestionAnswering: (model: ModelDataMinimal) => InferenceSnippet[];
export declare const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
export declare function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
export declare const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
export declare function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
//# sourceMappingURL=python.d.ts.map

@@ -1,4 +0,33 @@

import { HF_HUB_INFERENCE_PROXY_TEMPLATE, openAIbaseUrl } from "../inference-providers.js";
import { openAIbaseUrl, } from "../inference-providers.js";
import { stringifyGenerationConfig, stringifyMessages } from "./common.js";
import { getModelInputSnippet } from "./inputs.js";
const HFH_INFERENCE_CLIENT_METHODS = {
"audio-classification": "audio_classification",
"audio-to-audio": "audio_to_audio",
"automatic-speech-recognition": "automatic_speech_recognition",
"text-to-speech": "text_to_speech",
"image-classification": "image_classification",
"image-segmentation": "image_segmentation",
"image-to-image": "image_to_image",
"image-to-text": "image_to_text",
"object-detection": "object_detection",
"text-to-image": "text_to_image",
"text-to-video": "text_to_video",
"zero-shot-image-classification": "zero_shot_image_classification",
"document-question-answering": "document_question_answering",
"visual-question-answering": "visual_question_answering",
"feature-extraction": "feature_extraction",
"fill-mask": "fill_mask",
"question-answering": "question_answering",
"sentence-similarity": "sentence_similarity",
summarization: "summarization",
"table-question-answering": "table_question_answering",
"text-classification": "text_classification",
"text-generation": "text_generation",
"token-classification": "token_classification",
translation: "translation",
"zero-shot-classification": "zero_shot_classification",
"tabular-classification": "tabular_classification",
"tabular-regression": "tabular_regression",
};
const snippetImportInferenceClient = (accessToken, provider) => `\

@@ -37,3 +66,3 @@ from huggingface_hub import InferenceClient

messages=messages,
${configStr},
${configStr}
stream=True

@@ -60,3 +89,3 @@ )

messages=messages,
${configStr},
${configStr}
stream=True

@@ -148,4 +177,22 @@ )

};
export const snippetBasic = (model) => {
export const snippetBasic = (model, accessToken, provider) => {
return [
...(model.pipeline_tag && model.pipeline_tag in HFH_INFERENCE_CLIENT_METHODS
? [
{
client: "huggingface_hub",
content: `\
${snippetImportInferenceClient(accessToken, provider)}
result = client.${HFH_INFERENCE_CLIENT_METHODS[model.pipeline_tag]}(
model="${model.id}",
inputs=${getModelInputSnippet(model)},
provider="${provider}",
)
print(result)
`,
},
]
: []),
{

@@ -346,5 +393,2 @@ client: "requests",

: [];
const baseUrl = provider === "hf-inference"
? `https://api-inference.huggingface.co/models/${model.id}`
: HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider);
return snippets.map((snippet) => {

@@ -357,3 +401,3 @@ return {

API_URL = "${baseUrl}"
API_URL = "${openAIbaseUrl(provider)}"
headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}}

@@ -360,0 +404,0 @@

@@ -107,2 +107,3 @@ import { PIPELINE_DATA } from "../pipelines.js";

"any-to-any": ["transformers"],
"visual-document-retrieval": ["transformers"],
};

@@ -134,2 +135,3 @@ /**

"document-question-answering": getData("document-question-answering", documentQuestionAnswering),
"visual-document-retrieval": getData("visual-document-retrieval", placeholder),
"feature-extraction": getData("feature-extraction", featureExtraction),

@@ -136,0 +138,0 @@ "fill-mask": getData("fill-mask", fillMask),

@@ -59,5 +59,3 @@ const taskData = {

models: [
{ description: "A text-generation model trained to follow instructions.",
id: "google/gemma-2-2b-it",
},
{ description: "A text-generation model trained to follow instructions.", id: "google/gemma-2-2b-it" },
{

@@ -64,0 +62,0 @@ description: "Smaller variant of one of the most powerful models.",

{
"name": "@huggingface/tasks",
"packageManager": "pnpm@8.10.5",
"version": "0.15.8",
"version": "0.15.9",
"description": "List of ML tasks for huggingface.co/tasks",

@@ -6,0 +6,0 @@ "repository": "https://github.com/huggingface/huggingface.js.git",

@@ -1,6 +0,16 @@

export const INFERENCE_PROVIDERS = ["hf-inference", "fal-ai", "replicate", "sambanova", "together"] as const;
/// This list is for illustration purposes only.
/// in the `tasks` sub-package, we do not need actual strong typing of the inference providers.
const INFERENCE_PROVIDERS = [
"fal-ai",
"fireworks-ai",
"hf-inference",
"hyperbolic",
"replicate",
"sambanova",
"together",
] as const;
export type InferenceProvider = (typeof INFERENCE_PROVIDERS)[number];
export type SnippetInferenceProvider = (typeof INFERENCE_PROVIDERS)[number] | string;
export const HF_HUB_INFERENCE_PROXY_TEMPLATE = `https://huggingface.co/api/inference-proxy/{{PROVIDER}}`;
export const HF_HUB_INFERENCE_PROXY_TEMPLATE = `https://router.huggingface.co/{{PROVIDER}}`;

@@ -12,6 +22,5 @@ /**

*/
export function openAIbaseUrl(provider: InferenceProvider): string {
return provider === "hf-inference"
? "https://api-inference.huggingface.co/v1/"
: HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider);
export function openAIbaseUrl(provider: SnippetInferenceProvider): string {
const url = HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider);
return provider === "hf-inference" ? `${url}/v1` : url;
}

@@ -6,3 +6,3 @@ import type { ModelLibraryKey } from "./model-libraries.js";

* Mapping from library name to its supported tasks.
* Inference API (serverless) should be disabled for all other (library, task) pairs beyond this mapping.
* HF-Inference API (serverless) should be disabled for all other (library, task) pairs beyond this mapping.
* This mapping is partially generated automatically by "python-api-export-tasks" action in

@@ -9,0 +9,0 @@ * huggingface/api-inference-community repo upon merge. For transformers, the mapping is manually

@@ -86,2 +86,16 @@ import type { ModelData } from "./model-data.js";

export const ben2 = (model: ModelData): string[] => [
`import requests
from PIL import Image
from ben2 import AutoModel
url = "https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg"
image = Image.open(requests.get(url, stream=True).raw)
model = AutoModel.from_pretrained("${model.id}")
model.to("cuda").eval()
foreground = model.inference(image)
`,
];
export const bertopic = (model: ModelData): string[] => [

@@ -88,0 +102,0 @@ `from bertopic import BERTopic

@@ -112,2 +112,9 @@ import * as snippets from "./model-libraries-snippets.js";

},
ben2: {
prettyLabel: "BEN2",
repoName: "BEN2",
repoUrl: "https://github.com/PramaLLC/BEN2",
snippets: snippets.ben2,
filter: false,
},
bertopic: {

@@ -114,0 +121,0 @@ prettyLabel: "BERTopic",

@@ -679,2 +679,8 @@ export const MODALITIES = ["multimodal", "nlp", "cv", "audio", "tabular", "rl", "other"] as const;

},
"visual-document-retrieval": {
name: "Visual Document Retrieval",
modality: "multimodal",
color: "yellow",
hideInDatasets: true,
},
"any-to-any": {

@@ -681,0 +687,0 @@ name: "Any-to-Any",

@@ -37,4 +37,4 @@ import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";

return Object.entries(config)
.map(([key, val]) => `${quote}${key}${quote}${opts.attributeValueConnector}${val}`)
.join(`,${opts.indent}`);
.map(([key, val]) => `${quote}${key}${quote}${opts.attributeValueConnector}${val},`)
.join(`${opts.indent}`);
}

@@ -1,2 +0,2 @@

import { HF_HUB_INFERENCE_PROXY_TEMPLATE, type InferenceProvider } from "../inference-providers.js";
import { HF_HUB_INFERENCE_PROXY_TEMPLATE, type SnippetInferenceProvider } from "../inference-providers.js";
import type { PipelineType } from "../pipelines.js";

@@ -11,3 +11,3 @@ import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";

accessToken: string,
provider: InferenceProvider
provider: SnippetInferenceProvider
): InferenceSnippet[] => {

@@ -21,3 +21,3 @@ if (provider !== "hf-inference") {

content: `\
curl https://api-inference.huggingface.co/models/${model.id} \\
curl https://router.huggingface.co/hf-inference/models/${model.id} \\
-X POST \\

@@ -34,3 +34,3 @@ -d '{"inputs": ${getModelInputSnippet(model, true)}}' \\

accessToken: string,
provider: InferenceProvider,
provider: SnippetInferenceProvider,
opts?: {

@@ -47,3 +47,3 @@ streaming?: boolean;

provider === "hf-inference"
? `https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions`
? `https://router.huggingface.co/hf-inference/models/${model.id}/v1/chat/completions`
: HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider) + "/v1/chat/completions";

@@ -70,11 +70,11 @@

"messages": ${stringifyMessages(messages, {
indent: "\t",
attributeKeyQuotes: true,
customContentEscaper: (str) => str.replace(/'/g, "'\\''"),
})},
indent: "\t",
attributeKeyQuotes: true,
customContentEscaper: (str) => str.replace(/'/g, "'\\''"),
})},
${stringifyGenerationConfig(config, {
indent: "\n ",
attributeKeyQuotes: true,
attributeValueConnector: ": ",
})},
indent: "\n ",
attributeKeyQuotes: true,
attributeValueConnector: ": ",
})}
"stream": ${!!streaming}

@@ -92,3 +92,3 @@ }'`,

accessToken: string,
provider: InferenceProvider
provider: SnippetInferenceProvider
): InferenceSnippet[] => {

@@ -101,3 +101,3 @@ if (provider !== "hf-inference") {

client: "curl",
content: `curl https://api-inference.huggingface.co/models/${model.id} \\
content: `curl https://router.huggingface.co/hf-inference/models/${model.id} \\
-X POST \\

@@ -114,3 +114,3 @@ -d '{"inputs": ${getModelInputSnippet(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\

accessToken: string,
provider: InferenceProvider
provider: SnippetInferenceProvider
): InferenceSnippet[] => {

@@ -123,3 +123,3 @@ if (provider !== "hf-inference") {

client: "curl",
content: `curl https://api-inference.huggingface.co/models/${model.id} \\
content: `curl https://router.huggingface.co/hf-inference/models/${model.id} \\
-X POST \\

@@ -138,3 +138,3 @@ --data-binary '@${getModelInputSnippet(model, true, true)}' \\

accessToken: string,
provider: InferenceProvider,
provider: SnippetInferenceProvider,
opts?: Record<string, unknown>

@@ -173,3 +173,3 @@ ) => InferenceSnippet[]

accessToken: string,
provider: InferenceProvider,
provider: SnippetInferenceProvider,
opts?: Record<string, unknown>

@@ -176,0 +176,0 @@ ): InferenceSnippet[] {

@@ -1,3 +0,3 @@

import { openAIbaseUrl, type InferenceProvider } from "../inference-providers.js";
import type { PipelineType } from "../pipelines.js";
import { openAIbaseUrl, type SnippetInferenceProvider } from "../inference-providers.js";
import type { PipelineType, WidgetType } from "../pipelines.js";
import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";

@@ -8,3 +8,3 @@ import { stringifyGenerationConfig, stringifyMessages } from "./common.js";

const HFJS_METHODS: Record<string, string> = {
const HFJS_METHODS: Partial<Record<WidgetType, string>> = {
"text-classification": "textClassification",

@@ -26,3 +26,3 @@ "token-classification": "tokenClassification",

accessToken: string,
provider: InferenceProvider
provider: SnippetInferenceProvider
): InferenceSnippet[] => {

@@ -45,3 +45,3 @@ return [

console.log(output)
console.log(output);
`,

@@ -56,3 +56,3 @@ },

const response = await fetch(
"https://api-inference.huggingface.co/models/${model.id}",
"https://router.huggingface.co/hf-inference/models/${model.id}",
{

@@ -81,3 +81,3 @@ headers: {

accessToken: string,
provider: InferenceProvider,
provider: SnippetInferenceProvider,
opts?: {

@@ -147,3 +147,3 @@ streaming?: boolean;

messages: ${messagesStr},
${configStr},
${configStr}
stream: true,

@@ -176,3 +176,4 @@ });

console.log(chatCompletion.choices[0].message);`,
console.log(chatCompletion.choices[0].message);
`,
},

@@ -194,3 +195,4 @@ {

console.log(chatCompletion.choices[0].message);`,
console.log(chatCompletion.choices[0].message);
`,
},

@@ -210,3 +212,3 @@ ];

const response = await fetch(
"https://api-inference.huggingface.co/models/${model.id}",
"https://router.huggingface.co/hf-inference/models/${model.id}",
{

@@ -237,3 +239,3 @@ headers: {

accessToken: string,
provider: InferenceProvider
provider: SnippetInferenceProvider
): InferenceSnippet[] => {

@@ -263,3 +265,3 @@ return [

const response = await fetch(
"https://api-inference.huggingface.co/models/${model.id}",
"https://router.huggingface.co/hf-inference/models/${model.id}",
{

@@ -289,3 +291,3 @@ headers: {

accessToken: string,
provider: InferenceProvider
provider: SnippetInferenceProvider
): InferenceSnippet[] => {

@@ -297,3 +299,3 @@ if (provider !== "hf-inference") {

const response = await fetch(
"https://api-inference.huggingface.co/models/${model.id}",
"https://router.huggingface.co/hf-inference/models/${model.id}",
{

@@ -345,3 +347,3 @@ headers: {

accessToken: string,
provider: InferenceProvider
provider: SnippetInferenceProvider
): InferenceSnippet[] => {

@@ -374,3 +376,3 @@ return [

accessToken: string,
provider: InferenceProvider
provider: SnippetInferenceProvider
): InferenceSnippet[] => {

@@ -386,3 +388,3 @@ if (provider !== "hf-inference") {

const response = await fetch(
"https://api-inference.huggingface.co/models/${model.id}",
"https://router.huggingface.co/hf-inference/models/${model.id}",
{

@@ -414,3 +416,3 @@ headers: {

accessToken: string,
provider: InferenceProvider,
provider: SnippetInferenceProvider,
opts?: Record<string, unknown>

@@ -449,3 +451,3 @@ ) => InferenceSnippet[]

accessToken: string,
provider: InferenceProvider,
provider: SnippetInferenceProvider,
opts?: Record<string, unknown>

@@ -452,0 +454,0 @@ ): InferenceSnippet[] {

@@ -1,3 +0,7 @@

import { HF_HUB_INFERENCE_PROXY_TEMPLATE, openAIbaseUrl, type InferenceProvider } from "../inference-providers.js";
import type { PipelineType } from "../pipelines.js";
import {
HF_HUB_INFERENCE_PROXY_TEMPLATE,
openAIbaseUrl,
type SnippetInferenceProvider,
} from "../inference-providers.js";
import type { PipelineType, WidgetType } from "../pipelines.js";
import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";

@@ -8,3 +12,33 @@ import { stringifyGenerationConfig, stringifyMessages } from "./common.js";

const snippetImportInferenceClient = (accessToken: string, provider: InferenceProvider): string =>
const HFH_INFERENCE_CLIENT_METHODS: Partial<Record<WidgetType, string>> = {
"audio-classification": "audio_classification",
"audio-to-audio": "audio_to_audio",
"automatic-speech-recognition": "automatic_speech_recognition",
"text-to-speech": "text_to_speech",
"image-classification": "image_classification",
"image-segmentation": "image_segmentation",
"image-to-image": "image_to_image",
"image-to-text": "image_to_text",
"object-detection": "object_detection",
"text-to-image": "text_to_image",
"text-to-video": "text_to_video",
"zero-shot-image-classification": "zero_shot_image_classification",
"document-question-answering": "document_question_answering",
"visual-question-answering": "visual_question_answering",
"feature-extraction": "feature_extraction",
"fill-mask": "fill_mask",
"question-answering": "question_answering",
"sentence-similarity": "sentence_similarity",
summarization: "summarization",
"table-question-answering": "table_question_answering",
"text-classification": "text_classification",
"text-generation": "text_generation",
"token-classification": "token_classification",
translation: "translation",
"zero-shot-classification": "zero_shot_classification",
"tabular-classification": "tabular_classification",
"tabular-regression": "tabular_regression",
};
const snippetImportInferenceClient = (accessToken: string, provider: SnippetInferenceProvider): string =>
`\

@@ -21,3 +55,3 @@ from huggingface_hub import InferenceClient

accessToken: string,
provider: InferenceProvider,
provider: SnippetInferenceProvider,
opts?: {

@@ -58,3 +92,3 @@ streaming?: boolean;

messages=messages,
${configStr},
${configStr}
stream=True

@@ -81,3 +115,3 @@ )

messages=messages,
${configStr},
${configStr}
stream=True

@@ -171,4 +205,26 @@ )

export const snippetBasic = (model: ModelDataMinimal): InferenceSnippet[] => {
export const snippetBasic = (
model: ModelDataMinimal,
accessToken: string,
provider: SnippetInferenceProvider
): InferenceSnippet[] => {
return [
...(model.pipeline_tag && model.pipeline_tag in HFH_INFERENCE_CLIENT_METHODS
? [
{
client: "huggingface_hub",
content: `\
${snippetImportInferenceClient(accessToken, provider)}
result = client.${HFH_INFERENCE_CLIENT_METHODS[model.pipeline_tag]}(
model="${model.id}",
inputs=${getModelInputSnippet(model)},
provider="${provider}",
)
print(result)
`,
},
]
: []),
{

@@ -207,3 +263,3 @@ client: "requests",

accessToken: string,
provider: InferenceProvider
provider: SnippetInferenceProvider
): InferenceSnippet[] => {

@@ -346,3 +402,3 @@ return [

accessToken: string,
provider: InferenceProvider,
provider: SnippetInferenceProvider,
opts?: Record<string, unknown>

@@ -385,3 +441,3 @@ ) => InferenceSnippet[]

accessToken: string,
provider: InferenceProvider,
provider: SnippetInferenceProvider,
opts?: Record<string, unknown>

@@ -398,7 +454,2 @@ ): InferenceSnippet[] {

const baseUrl =
provider === "hf-inference"
? `https://api-inference.huggingface.co/models/${model.id}`
: HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider);
return snippets.map((snippet) => {

@@ -412,3 +463,3 @@ return {

API_URL = "${baseUrl}"
API_URL = "${openAIbaseUrl(provider)}"
headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}}

@@ -415,0 +466,0 @@

@@ -36,3 +36,3 @@ ## Use Cases

headers = {"Authorization": f"Bearer {API_TOKEN}"}
API_URL = "https://api-inference.huggingface.co/models/superb/hubert-large-superb-er"
API_URL = "https://router.huggingface.co/hf-inference/models/superb/hubert-large-superb-er"

@@ -39,0 +39,0 @@ def query(filename):

@@ -22,3 +22,3 @@ ## Use Cases

headers = {"Authorization": f"Bearer {API_TOKEN}"}
API_URL = "https://api-inference.huggingface.co/models/speechbrain/mtl-mimic-voicebank"
API_URL = "https://router.huggingface.co/hf-inference/models/speechbrain/mtl-mimic-voicebank"

@@ -25,0 +25,0 @@ def query(filename):

@@ -28,3 +28,3 @@ ## Use Cases

headers = {"Authorization": f"Bearer {API_TOKEN}"}
API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v3"
API_URL = "https://router.huggingface.co/hf-inference/models/openai/whisper-large-v3"

@@ -31,0 +31,0 @@ def query(filename):

@@ -11,7 +11,14 @@ {

"title": "FeatureExtractionInputs",
"description": "The text or list of texts to embed.",
"oneOf": [
{ "type": "string" },
{ "type": "array", "items": { "type": "string" } }
],
"description": "The text or list of texts to embed."
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
]
},

@@ -18,0 +25,0 @@ "normalize": {

@@ -79,3 +79,3 @@ ## Different Types of Vision Language Models

```bash
curl https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct \
curl https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.2-11B-Vision-Instruct \
-X POST \

@@ -82,0 +82,0 @@ -d '{"messages": [{"role": "user","content": [{"type": "image"}, {"type": "text", "text": "Can you describe the image?"}]}]}' \

@@ -177,2 +177,3 @@ import type { PipelineType } from "../pipelines.js";

"any-to-any": ["transformers"],
"visual-document-retrieval": ["transformers"],
};

@@ -206,2 +207,3 @@

"document-question-answering": getData("document-question-answering", documentQuestionAnswering),
"visual-document-retrieval": getData("visual-document-retrieval", placeholder),
"feature-extraction": getData("feature-extraction", featureExtraction),

@@ -208,0 +210,0 @@ "fill-mask": getData("fill-mask", fillMask),

@@ -25,3 +25,3 @@ ## Use Cases πŸ”

API_URL = "https://api-inference.huggingface.co/models/sentence-transformers/msmarco-distilbert-base-tas-b"
API_URL = "https://router.huggingface.co/hf-inference/models/sentence-transformers/msmarco-distilbert-base-tas-b"
headers = {"Authorization": f"Bearer {api_token}"}

@@ -55,3 +55,3 @@

API_URL = "https://api-inference.huggingface.co/models/sentence-transformers/all-MiniLM-L6-v2"
API_URL = "https://router.huggingface.co/hf-inference/models/sentence-transformers/all-MiniLM-L6-v2"
headers = {"Authorization": f"Bearer {api_token}"}

@@ -58,0 +58,0 @@

@@ -64,5 +64,3 @@ import type { TaskDataCustom } from "../index.js";

models: [
{ description: "A text-generation model trained to follow instructions.",
id: "google/gemma-2-2b-it",
},
{ description: "A text-generation model trained to follow instructions.", id: "google/gemma-2-2b-it" },
{

@@ -69,0 +67,0 @@ description: "Smaller variant of one of the most powerful models.",

@@ -22,3 +22,3 @@ ## Use Cases

headers = {"Authorization": f"Bearer {API_TOKEN}"}
API_URL = "https://api-inference.huggingface.co/models/microsoft/speecht5_tts"
API_URL = "https://router.huggingface.co/hf-inference/models/microsoft/speecht5_tts"

@@ -25,0 +25,0 @@ def query(payload):

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚑️ by Socket Inc