New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

@huggingface/tasks

Package Overview
Dependencies
Maintainers
4
Versions
136
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@huggingface/tasks - npm Package Compare versions

Comparing version 0.12.21 to 0.12.22

23

dist/src/local-apps.d.ts

@@ -51,5 +51,7 @@ import type { ModelData } from "./model-data";

* Support the placeholder {{GGUF_FILE}} that will be replaced by the gguf file path or the list of available files.
* Support the placeholder {{OLLAMA_TAG}} that will be replaced by the list of available quant tags or will be removed if there are no multiple quant files in a same repo.
*/
snippet: (model: ModelData, filepath?: string) => string | string[] | LocalAppSnippet | LocalAppSnippet[];
});
declare function isTgiModel(model: ModelData): boolean;
declare function isLlamaCppGgufModel(model: ModelData): boolean;

@@ -75,2 +77,9 @@ /**

};
"node-llama-cpp": {
prettyLabel: string;
docsUrl: string;
mainTask: "text-generation";
displayOnModelPage: typeof isLlamaCppGgufModel;
snippet: (model: ModelData, filepath?: string) => LocalAppSnippet[];
};
vllm: {

@@ -83,2 +92,9 @@ prettyLabel: string;

};
tgi: {
prettyLabel: string;
docsUrl: string;
mainTask: "text-generation";
displayOnModelPage: typeof isTgiModel;
snippet: (model: ModelData) => LocalAppSnippet[];
};
lmstudio: {

@@ -172,2 +188,9 @@ prettyLabel: string;

};
ollama: {
prettyLabel: string;
docsUrl: string;
mainTask: "text-generation";
displayOnModelPage: typeof isLlamaCppGgufModel;
snippet: (model: ModelData, filepath?: string) => string;
};
};

@@ -174,0 +197,0 @@ export type LocalAppKey = keyof typeof LOCAL_APPS;

11

dist/src/model-libraries.d.ts

@@ -663,2 +663,9 @@ import type { ModelData } from "./model-data";

};
"f5-tts": {
prettyLabel: string;
repoName: string;
repoUrl: string;
filter: false;
countDownloads: string;
};
tensorflowtts: {

@@ -756,4 +763,4 @@ prettyLabel: string;

export type ModelLibraryKey = keyof typeof MODEL_LIBRARIES_UI_ELEMENTS;
export declare const ALL_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "depth-pro" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "depth-pro" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
export declare const ALL_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "depth-pro" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "f5-tts" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "depth-pro" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "f5-tts" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
//# sourceMappingURL=model-libraries.d.ts.map

5

package.json
{
"name": "@huggingface/tasks",
"packageManager": "pnpm@8.10.5",
"version": "0.12.21",
"version": "0.12.22",
"description": "List of ML tasks for huggingface.co/tasks",

@@ -39,2 +39,5 @@ "repository": "https://github.com/huggingface/huggingface.js.git",

},
"dependencies": {
"@huggingface/gguf": "^0.1.12"
},
"scripts": {

@@ -41,0 +44,0 @@ "lint": "eslint --quiet --fix --ext .cjs,.ts .",

import type { ModelData } from "./model-data";
import type { PipelineType } from "./pipelines";
import { parseGGUFQuantLabel } from "@huggingface/gguf";

@@ -56,2 +57,3 @@ export interface LocalAppSnippet {

* Support the placeholder {{GGUF_FILE}} that will be replaced by the gguf file path or the list of available files.
* Support the placeholder {{OLLAMA_TAG}} that will be replaced by the list of available quant tags or will be removed if there are no multiple quant files in a same repo.
*/

@@ -81,2 +83,5 @@ snippet: (model: ModelData, filepath?: string) => string | string[] | LocalAppSnippet | LocalAppSnippet[];

}
function isTgiModel(model: ModelData): boolean {
return model.tags.includes("text-generation-inference");
}

@@ -128,2 +133,28 @@ function isLlamaCppGgufModel(model: ModelData) {

const snippetNodeLlamaCppCli = (model: ModelData, filepath?: string): LocalAppSnippet[] => {
return [
{
title: "Chat with the model",
content: [
`npx -y node-llama-cpp chat \\`,
` --model "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}" \\`,
` --prompt 'Hi there!'`,
].join("\n"),
},
{
title: "Estimate the model compatibility with your hardware",
content: `npx -y node-llama-cpp inspect estimate "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}"`,
},
];
};
const snippetOllama = (model: ModelData, filepath?: string): string => {
if (filepath) {
const quantLabel = parseGGUFQuantLabel(filepath);
const ollamatag = quantLabel ? `:${quantLabel}` : "";
return `ollama run hf.co/${model.id}${ollamatag}`;
}
return `ollama run hf.co/${model.id}{{OLLAMA_TAG}}`;
};
const snippetLocalAI = (model: ModelData, filepath?: string): LocalAppSnippet[] => {

@@ -190,2 +221,30 @@ const command = (binary: string) =>

};
const snippetTgi = (model: ModelData): LocalAppSnippet[] => {
const runCommand = [
"# Call the server using curl:",
`curl -X POST "http://localhost:8000/v1/chat/completions" \\`,
` -H "Content-Type: application/json" \\`,
` --data '{`,
` "model": "${model.id}",`,
` "messages": [`,
` {"role": "user", "content": "What is the capital of France?"}`,
` ]`,
` }'`,
];
return [
{
title: "Use Docker images",
setup: [
"# Deploy with docker on Linux:",
`docker run --gpus all \\`,
` -v ~/.cache/huggingface:/root/.cache/huggingface \\`,
` -e HF_TOKEN="<secret>" \\`,
` -p 8000:80 \\`,
` ghcr.io/huggingface/text-generation-inference:latest \\`,
` --model-id ${model.id}`,
].join("\n"),
content: [runCommand.join("\n")],
},
];
};

@@ -211,2 +270,9 @@ /**

},
"node-llama-cpp": {
prettyLabel: "node-llama-cpp",
docsUrl: "https://node-llama-cpp.withcat.ai",
mainTask: "text-generation",
displayOnModelPage: isLlamaCppGgufModel,
snippet: snippetNodeLlamaCppCli,
},
vllm: {

@@ -226,2 +292,9 @@ prettyLabel: "vLLM",

},
tgi: {
prettyLabel: "TGI",
docsUrl: "https://huggingface.co/docs/text-generation-inference/",
mainTask: "text-generation",
displayOnModelPage: isTgiModel,
snippet: snippetTgi,
},
lmstudio: {

@@ -336,4 +409,11 @@ prettyLabel: "LM Studio",

},
ollama: {
prettyLabel: "Ollama",
docsUrl: "https://ollama.com",
mainTask: "text-generation",
displayOnModelPage: isLlamaCppGgufModel,
snippet: snippetOllama,
},
} satisfies Record<string, LocalApp>;
export type LocalAppKey = keyof typeof LOCAL_APPS;

@@ -669,2 +669,9 @@ import * as snippets from "./model-libraries-snippets";

},
"f5-tts": {
prettyLabel: "F5-TTS",
repoName: "F5-TTS",
repoUrl: "https://github.com/SWivid/F5-TTS",
filter: false,
countDownloads: `path_extension:"safetensors" OR path_extension:"pt"`,
},
tensorflowtts: {

@@ -671,0 +678,0 @@ prettyLabel: "TensorFlowTTS",

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc