New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

@huggingface/tasks

Package Overview
Dependencies
Maintainers
4
Versions
148
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@huggingface/tasks - npm Package Compare versions

Comparing version 0.16.1 to 0.16.2

6

dist/commonjs/default-widget-inputs.js

@@ -93,5 +93,5 @@ "use strict";

`Hi, what can you help me with?`,
`Hey, let's have a conversation!`,
`Hello there!`,
`Hey my name is Clara! How are you?`,
`What is 84 * 3 / 2?`,
`Tell me an interesting fact about the universe!`,
`Explain quantum computing in simple terms.`,
],

@@ -98,0 +98,0 @@ ],

@@ -35,2 +35,6 @@ /**

NVIDIA: {
H200: {
tflops: number;
memory: number[];
};
H100: {

@@ -40,2 +44,6 @@ tflops: number;

};
L40s: {
tflops: number;
memory: number[];
};
L40: {

@@ -45,2 +53,10 @@ tflops: number;

};
L20: {
tflops: number;
memory: number[];
};
L4: {
tflops: number;
memory: number[];
};
"RTX 6000 Ada": {

@@ -102,2 +118,26 @@ tflops: number;

};
"RTX 5090": {
tflops: number;
memory: number[];
};
"RTX 5090 D": {
tflops: number;
memory: number[];
};
"RTX 5080": {
tflops: number;
memory: number[];
};
"RTX 5080 Mobile": {
tflops: number;
memory: number[];
};
"RTX 5070": {
tflops: number;
memory: number[];
};
"RTX 5070 Mobile": {
tflops: number;
memory: number[];
};
"RTX 4090": {

@@ -111,2 +151,6 @@ tflops: number;

};
"RTX 4090 Mobile": {
tflops: number;
memory: number[];
};
"RTX 4080 SUPER": {

@@ -120,2 +164,6 @@ tflops: number;

};
"RTX 4080 Mobile": {
tflops: number;
memory: number[];
};
"RTX 4070": {

@@ -125,2 +173,6 @@ tflops: number;

};
"RTX 4070 Mobile": {
tflops: number;
memory: number[];
};
"RTX 4070 Ti": {

@@ -146,2 +198,6 @@ tflops: number;

};
"RTX 4060 Mobile": {
tflops: number;
memory: number[];
};
"RTX 3090": {

@@ -163,2 +219,6 @@ tflops: number;

};
"RTX 3080 Mobile": {
tflops: number;
memory: number[];
};
"RTX 3070": {

@@ -172,3 +232,3 @@ tflops: number;

};
"RTX 3070 Ti Laptop": {
"RTX 3070 Ti Mobile": {
tflops: number;

@@ -229,2 +289,6 @@ memory: number[];

};
"GTX 1050 Ti": {
tflops: number;
memory: number[];
};
"RTX Titan": {

@@ -258,2 +322,6 @@ tflops: number;

};
P100: {
tflops: number;
memory: number[];
};
};

@@ -260,0 +328,0 @@ AMD: {

@@ -20,2 +20,6 @@ "use strict";

NVIDIA: {
H200: {
tflops: 241.3,
memory: [141],
},
H100: {

@@ -25,2 +29,6 @@ tflops: 267.6,

},
L40s: {
tflops: 91.61,
memory: [48],
},
L40: {

@@ -30,2 +38,10 @@ tflops: 90.52,

},
L20: {
tflops: 59.35,
memory: [48],
},
L4: {
tflops: 30.29,
memory: [24],
},
"RTX 6000 Ada": {

@@ -87,2 +103,26 @@ tflops: 91.1,

},
"RTX 5090": {
tflops: 104.8,
memory: [32],
},
"RTX 5090 D": {
tflops: 104.8,
memory: [32],
},
"RTX 5080": {
tflops: 56.28,
memory: [16],
},
"RTX 5080 Mobile": {
tflops: 24.58,
memory: [16],
},
"RTX 5070": {
tflops: 30.84,
memory: [12],
},
"RTX 5070 Mobile": {
tflops: 23.22,
memory: [8],
},
"RTX 4090": {

@@ -96,2 +136,6 @@ tflops: 82.58,

},
"RTX 4090 Mobile": {
tflops: 32.98,
memory: [16]
},
"RTX 4080 SUPER": {

@@ -105,2 +149,6 @@ tflops: 52.2,

},
"RTX 4080 Mobile": {
tflops: 24.72,
memory: [12]
},
"RTX 4070": {

@@ -110,2 +158,6 @@ tflops: 29.15,

},
"RTX 4070 Mobile": {
tflops: 15.62,
memory: [8]
},
"RTX 4070 Ti": {

@@ -131,2 +183,6 @@ tflops: 40.09,

},
"RTX 4060 Mobile": {
tflops: 11.61,
memory: [8]
},
"RTX 3090": {

@@ -148,2 +204,6 @@ tflops: 35.58,

},
"RTX 3080 Mobile": {
tflops: 18.98,
memory: [8]
},
"RTX 3070": {

@@ -157,3 +217,3 @@ tflops: 20.31,

},
"RTX 3070 Ti Laptop": {
"RTX 3070 Ti Mobile": {
tflops: 16.6,

@@ -214,2 +274,6 @@ memory: [8],

},
"GTX 1050 Ti": {
tflops: 2.1, // float32 (GPU does not support native float16)
memory: [4]
},
"RTX Titan": {

@@ -243,2 +307,6 @@ tflops: 32.62,

},
P100: {
tflops: 19.05,
memory: [16],
},
},

@@ -245,0 +313,0 @@ AMD: {

@@ -74,14 +74,15 @@ "use strict";

const snippetNodeLlamaCppCli = (model, filepath) => {
let tagName = "{{OLLAMA_TAG}}";
if (filepath) {
const quantLabel = (0, gguf_js_1.parseGGUFQuantLabel)(filepath);
tagName = quantLabel ? `:${quantLabel}` : tagName;
}
return [
{
title: "Chat with the model",
content: [
`npx -y node-llama-cpp chat \\`,
` --model "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}" \\`,
` --prompt 'Hi there!'`,
].join("\n"),
content: `npx -y node-llama-cpp chat hf:${model.id}${tagName}`,
},
{
title: "Estimate the model compatibility with your hardware",
content: `npx -y node-llama-cpp inspect estimate "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}"`,
content: `npx -y node-llama-cpp inspect estimate hf:${model.id}${tagName}`,
},

@@ -88,0 +89,0 @@ ];

@@ -104,17 +104,10 @@ "use strict";

const cxr_foundation = () => [
`!git clone https://github.com/Google-Health/cxr-foundation.git
import tensorflow as tf, sys, requests
sys.path.append('cxr-foundation/python/')
`# pip install git+https://github.com/Google-Health/cxr-foundation.git#subdirectory=python
# Install dependencies
major_version = tf.__version__.rsplit(".", 1)[0]
!pip install tensorflow-text=={major_version} pypng && pip install --no-deps pydicom hcls_imaging_ml_toolkit retrying
# Load image (Stillwaterising, CC0, via Wikimedia Commons)
# Load image as grayscale (Stillwaterising, CC0, via Wikimedia Commons)
import requests
from PIL import Image
from io import BytesIO
image_url = "https://upload.wikimedia.org/wikipedia/commons/c/c8/Chest_Xray_PA_3-8-2010.png"
response = requests.get(image_url, headers={'User-Agent': 'Demo'}, stream=True)
response.raw.decode_content = True # Ensure correct decoding
img = Image.open(BytesIO(response.content)).convert('L') # Convert to grayscale
img = Image.open(requests.get(image_url, headers={'User-Agent': 'Demo'}, stream=True).raw).convert('L')

@@ -121,0 +114,0 @@ # Run inference

@@ -172,2 +172,8 @@ import type { ModelData } from "./model-data.js";

};
comet: {
prettyLabel: string;
repoName: string;
repoUrl: string;
countDownloads: string;
};
cosmos: {

@@ -905,3 +911,3 @@ prettyLabel: string;

export declare const ALL_MODEL_LIBRARY_KEYS: ModelLibraryKey[];
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "anemoi" | "asteroid" | "audiocraft" | "audioseal" | "ben2" | "bertopic" | "big_vision" | "birder" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "cosmos" | "cxr-foundation" | "deepforest" | "depth-anything-v2" | "depth-pro" | "derm-foundation" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "clipscope" | "cosyvoice" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "htrflow" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-hub" | "k2" | "liveportrait" | "llama-cpp-python" | "mini-omni2" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "mitie" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open-oasis" | "open_clip" | "paddlenlp" | "peft" | "pxia" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "swarmformer" | "f5-tts" | "genmo" | "tensorflowtts" | "tabpfn" | "terratorch" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "trellis" | "ultralytics" | "unity-sentis" | "sana" | "vfi-mamba" | "voicecraft" | "whisperkit" | "yolov10" | "3dtopia-xl")[];
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "anemoi" | "asteroid" | "audiocraft" | "audioseal" | "ben2" | "bertopic" | "big_vision" | "birder" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "comet" | "cosmos" | "cxr-foundation" | "deepforest" | "depth-anything-v2" | "depth-pro" | "derm-foundation" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "clipscope" | "cosyvoice" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "htrflow" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-hub" | "k2" | "liveportrait" | "llama-cpp-python" | "mini-omni2" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "mitie" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open-oasis" | "open_clip" | "paddlenlp" | "peft" | "pxia" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "swarmformer" | "f5-tts" | "genmo" | "tensorflowtts" | "tabpfn" | "terratorch" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "trellis" | "ultralytics" | "unity-sentis" | "sana" | "vfi-mamba" | "voicecraft" | "whisperkit" | "yolov10" | "3dtopia-xl")[];
//# sourceMappingURL=model-libraries.d.ts.map

@@ -158,2 +158,8 @@ "use strict";

},
comet: {
prettyLabel: "COMET",
repoName: "COMET",
repoUrl: "https://github.com/Unbabel/COMET/",
countDownloads: `path:"hparams.yaml"`,
},
cosmos: {

@@ -160,0 +166,0 @@ prettyLabel: "Cosmos",

@@ -90,5 +90,5 @@ /// NOTE TO CONTRIBUTORS:

`Hi, what can you help me with?`,
`Hey, let's have a conversation!`,
`Hello there!`,
`Hey my name is Clara! How are you?`,
`What is 84 * 3 / 2?`,
`Tell me an interesting fact about the universe!`,
`Explain quantum computing in simple terms.`,
],

@@ -95,0 +95,0 @@ ],

@@ -35,2 +35,6 @@ /**

NVIDIA: {
H200: {
tflops: number;
memory: number[];
};
H100: {

@@ -40,2 +44,6 @@ tflops: number;

};
L40s: {
tflops: number;
memory: number[];
};
L40: {

@@ -45,2 +53,10 @@ tflops: number;

};
L20: {
tflops: number;
memory: number[];
};
L4: {
tflops: number;
memory: number[];
};
"RTX 6000 Ada": {

@@ -102,2 +118,26 @@ tflops: number;

};
"RTX 5090": {
tflops: number;
memory: number[];
};
"RTX 5090 D": {
tflops: number;
memory: number[];
};
"RTX 5080": {
tflops: number;
memory: number[];
};
"RTX 5080 Mobile": {
tflops: number;
memory: number[];
};
"RTX 5070": {
tflops: number;
memory: number[];
};
"RTX 5070 Mobile": {
tflops: number;
memory: number[];
};
"RTX 4090": {

@@ -111,2 +151,6 @@ tflops: number;

};
"RTX 4090 Mobile": {
tflops: number;
memory: number[];
};
"RTX 4080 SUPER": {

@@ -120,2 +164,6 @@ tflops: number;

};
"RTX 4080 Mobile": {
tflops: number;
memory: number[];
};
"RTX 4070": {

@@ -125,2 +173,6 @@ tflops: number;

};
"RTX 4070 Mobile": {
tflops: number;
memory: number[];
};
"RTX 4070 Ti": {

@@ -146,2 +198,6 @@ tflops: number;

};
"RTX 4060 Mobile": {
tflops: number;
memory: number[];
};
"RTX 3090": {

@@ -163,2 +219,6 @@ tflops: number;

};
"RTX 3080 Mobile": {
tflops: number;
memory: number[];
};
"RTX 3070": {

@@ -172,3 +232,3 @@ tflops: number;

};
"RTX 3070 Ti Laptop": {
"RTX 3070 Ti Mobile": {
tflops: number;

@@ -229,2 +289,6 @@ memory: number[];

};
"GTX 1050 Ti": {
tflops: number;
memory: number[];
};
"RTX Titan": {

@@ -258,2 +322,6 @@ tflops: number;

};
P100: {
tflops: number;
memory: number[];
};
};

@@ -260,0 +328,0 @@ AMD: {

@@ -17,2 +17,6 @@ /**

NVIDIA: {
H200: {
tflops: 241.3,
memory: [141],
},
H100: {

@@ -22,2 +26,6 @@ tflops: 267.6,

},
L40s: {
tflops: 91.61,
memory: [48],
},
L40: {

@@ -27,2 +35,10 @@ tflops: 90.52,

},
L20: {
tflops: 59.35,
memory: [48],
},
L4: {
tflops: 30.29,
memory: [24],
},
"RTX 6000 Ada": {

@@ -84,2 +100,26 @@ tflops: 91.1,

},
"RTX 5090": {
tflops: 104.8,
memory: [32],
},
"RTX 5090 D": {
tflops: 104.8,
memory: [32],
},
"RTX 5080": {
tflops: 56.28,
memory: [16],
},
"RTX 5080 Mobile": {
tflops: 24.58,
memory: [16],
},
"RTX 5070": {
tflops: 30.84,
memory: [12],
},
"RTX 5070 Mobile": {
tflops: 23.22,
memory: [8],
},
"RTX 4090": {

@@ -93,2 +133,6 @@ tflops: 82.58,

},
"RTX 4090 Mobile": {
tflops: 32.98,
memory: [16]
},
"RTX 4080 SUPER": {

@@ -102,2 +146,6 @@ tflops: 52.2,

},
"RTX 4080 Mobile": {
tflops: 24.72,
memory: [12]
},
"RTX 4070": {

@@ -107,2 +155,6 @@ tflops: 29.15,

},
"RTX 4070 Mobile": {
tflops: 15.62,
memory: [8]
},
"RTX 4070 Ti": {

@@ -128,2 +180,6 @@ tflops: 40.09,

},
"RTX 4060 Mobile": {
tflops: 11.61,
memory: [8]
},
"RTX 3090": {

@@ -145,2 +201,6 @@ tflops: 35.58,

},
"RTX 3080 Mobile": {
tflops: 18.98,
memory: [8]
},
"RTX 3070": {

@@ -154,3 +214,3 @@ tflops: 20.31,

},
"RTX 3070 Ti Laptop": {
"RTX 3070 Ti Mobile": {
tflops: 16.6,

@@ -211,2 +271,6 @@ memory: [8],

},
"GTX 1050 Ti": {
tflops: 2.1, // float32 (GPU does not support native float16)
memory: [4]
},
"RTX Titan": {

@@ -240,2 +304,6 @@ tflops: 32.62,

},
P100: {
tflops: 19.05,
memory: [16],
},
},

@@ -242,0 +310,0 @@ AMD: {

@@ -71,14 +71,15 @@ import { parseGGUFQuantLabel } from "./gguf.js";

const snippetNodeLlamaCppCli = (model, filepath) => {
let tagName = "{{OLLAMA_TAG}}";
if (filepath) {
const quantLabel = parseGGUFQuantLabel(filepath);
tagName = quantLabel ? `:${quantLabel}` : tagName;
}
return [
{
title: "Chat with the model",
content: [
`npx -y node-llama-cpp chat \\`,
` --model "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}" \\`,
` --prompt 'Hi there!'`,
].join("\n"),
content: `npx -y node-llama-cpp chat hf:${model.id}${tagName}`,
},
{
title: "Estimate the model compatibility with your hardware",
content: `npx -y node-llama-cpp inspect estimate "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}"`,
content: `npx -y node-llama-cpp inspect estimate hf:${model.id}${tagName}`,
},

@@ -85,0 +86,0 @@ ];

@@ -93,17 +93,10 @@ import { LIBRARY_TASK_MAPPING } from "./library-to-tasks.js";

export const cxr_foundation = () => [
`!git clone https://github.com/Google-Health/cxr-foundation.git
import tensorflow as tf, sys, requests
sys.path.append('cxr-foundation/python/')
`# pip install git+https://github.com/Google-Health/cxr-foundation.git#subdirectory=python
# Install dependencies
major_version = tf.__version__.rsplit(".", 1)[0]
!pip install tensorflow-text=={major_version} pypng && pip install --no-deps pydicom hcls_imaging_ml_toolkit retrying
# Load image (Stillwaterising, CC0, via Wikimedia Commons)
# Load image as grayscale (Stillwaterising, CC0, via Wikimedia Commons)
import requests
from PIL import Image
from io import BytesIO
image_url = "https://upload.wikimedia.org/wikipedia/commons/c/c8/Chest_Xray_PA_3-8-2010.png"
response = requests.get(image_url, headers={'User-Agent': 'Demo'}, stream=True)
response.raw.decode_content = True # Ensure correct decoding
img = Image.open(BytesIO(response.content)).convert('L') # Convert to grayscale
img = Image.open(requests.get(image_url, headers={'User-Agent': 'Demo'}, stream=True).raw).convert('L')

@@ -110,0 +103,0 @@ # Run inference

@@ -172,2 +172,8 @@ import type { ModelData } from "./model-data.js";

};
comet: {
prettyLabel: string;
repoName: string;
repoUrl: string;
countDownloads: string;
};
cosmos: {

@@ -905,3 +911,3 @@ prettyLabel: string;

export declare const ALL_MODEL_LIBRARY_KEYS: ModelLibraryKey[];
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "anemoi" | "asteroid" | "audiocraft" | "audioseal" | "ben2" | "bertopic" | "big_vision" | "birder" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "cosmos" | "cxr-foundation" | "deepforest" | "depth-anything-v2" | "depth-pro" | "derm-foundation" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "clipscope" | "cosyvoice" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "htrflow" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-hub" | "k2" | "liveportrait" | "llama-cpp-python" | "mini-omni2" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "mitie" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open-oasis" | "open_clip" | "paddlenlp" | "peft" | "pxia" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "swarmformer" | "f5-tts" | "genmo" | "tensorflowtts" | "tabpfn" | "terratorch" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "trellis" | "ultralytics" | "unity-sentis" | "sana" | "vfi-mamba" | "voicecraft" | "whisperkit" | "yolov10" | "3dtopia-xl")[];
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "anemoi" | "asteroid" | "audiocraft" | "audioseal" | "ben2" | "bertopic" | "big_vision" | "birder" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "comet" | "cosmos" | "cxr-foundation" | "deepforest" | "depth-anything-v2" | "depth-pro" | "derm-foundation" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "clipscope" | "cosyvoice" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "htrflow" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-hub" | "k2" | "liveportrait" | "llama-cpp-python" | "mini-omni2" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "mitie" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open-oasis" | "open_clip" | "paddlenlp" | "peft" | "pxia" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "swarmformer" | "f5-tts" | "genmo" | "tensorflowtts" | "tabpfn" | "terratorch" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "trellis" | "ultralytics" | "unity-sentis" | "sana" | "vfi-mamba" | "voicecraft" | "whisperkit" | "yolov10" | "3dtopia-xl")[];
//# sourceMappingURL=model-libraries.d.ts.map

@@ -132,2 +132,8 @@ import * as snippets from "./model-libraries-snippets.js";

},
comet: {
prettyLabel: "COMET",
repoName: "COMET",
repoUrl: "https://github.com/Unbabel/COMET/",
countDownloads: `path:"hparams.yaml"`,
},
cosmos: {

@@ -134,0 +140,0 @@ prettyLabel: "Cosmos",

@@ -1,2 +0,2 @@

import { openAIbaseUrl, } from "../inference-providers.js";
import { openAIbaseUrl } from "../inference-providers.js";
import { stringifyGenerationConfig, stringifyMessages } from "./common.js";

@@ -3,0 +3,0 @@ import { getModelInputSnippet } from "./inputs.js";

{
"name": "@huggingface/tasks",
"packageManager": "pnpm@8.10.5",
"version": "0.16.1",
"version": "0.16.2",
"description": "List of ML tasks for huggingface.co/tasks",

@@ -6,0 +6,0 @@ "repository": "https://github.com/huggingface/huggingface.js.git",

@@ -99,5 +99,5 @@ import type { WidgetExample } from "./widget-example.js";

`Hi, what can you help me with?`,
`Hey, let's have a conversation!`,
`Hello there!`,
`Hey my name is Clara! How are you?`,
`What is 84 * 3 / 2?`,
`Tell me an interesting fact about the universe!`,
`Explain quantum computing in simple terms.`,
],

@@ -104,0 +104,0 @@ ],

@@ -39,2 +39,6 @@ /**

NVIDIA: {
H200: {
tflops: 241.3,
memory: [141],
},
H100: {

@@ -44,2 +48,6 @@ tflops: 267.6,

},
L40s: {
tflops: 91.61,
memory: [48],
},
L40: {

@@ -49,2 +57,10 @@ tflops: 90.52,

},
L20: {
tflops: 59.35,
memory: [48],
},
L4: {
tflops: 30.29,
memory: [24],
},
"RTX 6000 Ada": {

@@ -106,2 +122,26 @@ tflops: 91.1,

},
"RTX 5090": {
tflops: 104.8,
memory: [32],
},
"RTX 5090 D": {
tflops: 104.8,
memory: [32],
},
"RTX 5080": {
tflops: 56.28,
memory: [16],
},
"RTX 5080 Mobile": {
tflops: 24.58,
memory: [16],
},
"RTX 5070": {
tflops: 30.84,
memory: [12],
},
"RTX 5070 Mobile": {
tflops: 23.22,
memory: [8],
},
"RTX 4090": {

@@ -115,2 +155,6 @@ tflops: 82.58,

},
"RTX 4090 Mobile": {
tflops: 32.98,
memory: [16]
},
"RTX 4080 SUPER": {

@@ -124,2 +168,6 @@ tflops: 52.2,

},
"RTX 4080 Mobile": {
tflops: 24.72,
memory: [12]
},
"RTX 4070": {

@@ -129,2 +177,6 @@ tflops: 29.15,

},
"RTX 4070 Mobile": {
tflops: 15.62,
memory: [8]
},
"RTX 4070 Ti": {

@@ -150,2 +202,6 @@ tflops: 40.09,

},
"RTX 4060 Mobile": {
tflops: 11.61,
memory: [8]
},
"RTX 3090": {

@@ -167,2 +223,6 @@ tflops: 35.58,

},
"RTX 3080 Mobile": {
tflops: 18.98,
memory: [8]
},
"RTX 3070": {

@@ -176,3 +236,3 @@ tflops: 20.31,

},
"RTX 3070 Ti Laptop": {
"RTX 3070 Ti Mobile": {
tflops: 16.6,

@@ -233,2 +293,6 @@ memory: [8],

},
"GTX 1050 Ti": {
tflops: 2.1, // float32 (GPU does not support native float16)
memory: [4]
},
"RTX Titan": {

@@ -262,2 +326,6 @@ tflops: 32.62,

},
P100: {
tflops: 19.05,
memory: [16],
},
},

@@ -264,0 +332,0 @@ AMD: {

@@ -141,14 +141,17 @@ import { parseGGUFQuantLabel } from "./gguf.js";

const snippetNodeLlamaCppCli = (model: ModelData, filepath?: string): LocalAppSnippet[] => {
let tagName = "{{OLLAMA_TAG}}";
if (filepath) {
const quantLabel = parseGGUFQuantLabel(filepath);
tagName = quantLabel ? `:${quantLabel}` : tagName;
}
return [
{
title: "Chat with the model",
content: [
`npx -y node-llama-cpp chat \\`,
` --model "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}" \\`,
` --prompt 'Hi there!'`,
].join("\n"),
content: `npx -y node-llama-cpp chat hf:${model.id}${tagName}`,
},
{
title: "Estimate the model compatibility with your hardware",
content: `npx -y node-llama-cpp inspect estimate "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}"`,
content: `npx -y node-llama-cpp inspect estimate hf:${model.id}${tagName}`,
},

@@ -155,0 +158,0 @@ ];

@@ -113,17 +113,10 @@ import type { ModelData } from "./model-data.js";

export const cxr_foundation = (): string[] => [
`!git clone https://github.com/Google-Health/cxr-foundation.git
import tensorflow as tf, sys, requests
sys.path.append('cxr-foundation/python/')
`# pip install git+https://github.com/Google-Health/cxr-foundation.git#subdirectory=python
# Install dependencies
major_version = tf.__version__.rsplit(".", 1)[0]
!pip install tensorflow-text=={major_version} pypng && pip install --no-deps pydicom hcls_imaging_ml_toolkit retrying
# Load image (Stillwaterising, CC0, via Wikimedia Commons)
# Load image as grayscale (Stillwaterising, CC0, via Wikimedia Commons)
import requests
from PIL import Image
from io import BytesIO
image_url = "https://upload.wikimedia.org/wikipedia/commons/c/c8/Chest_Xray_PA_3-8-2010.png"
response = requests.get(image_url, headers={'User-Agent': 'Demo'}, stream=True)
response.raw.decode_content = True # Ensure correct decoding
img = Image.open(BytesIO(response.content)).convert('L') # Convert to grayscale
img = Image.open(requests.get(image_url, headers={'User-Agent': 'Demo'}, stream=True).raw).convert('L')

@@ -130,0 +123,0 @@ # Run inference

@@ -176,2 +176,8 @@ import * as snippets from "./model-libraries-snippets.js";

},
comet: {
prettyLabel: "COMET",
repoName: "COMET",
repoUrl: "https://github.com/Unbabel/COMET/",
countDownloads: `path:"hparams.yaml"`,
},
cosmos: {

@@ -178,0 +184,0 @@ prettyLabel: "Cosmos",

@@ -30,5 +30,5 @@ import { openAIbaseUrl, type SnippetInferenceProvider } from "../inference-providers.js";

? [
{
client: "huggingface.js",
content: `\
{
client: "huggingface.js",
content: `\
import { HfInference } from "@huggingface/inference";

@@ -46,4 +46,4 @@

`,
},
]
},
]
: []),

@@ -222,4 +222,4 @@ {

query({"inputs": ${getModelInputSnippet(
model
)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}).then((response) => {
model
)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}).then((response) => {
console.log(JSON.stringify(response));

@@ -255,5 +255,5 @@ });`,

? [
{
client: "fetch",
content: `async function query(data) {
{
client: "fetch",
content: `async function query(data) {
const response = await fetch(

@@ -276,4 +276,4 @@ "https://router.huggingface.co/hf-inference/models/${model.id}",

});`,
},
]
},
]
: []),

@@ -280,0 +280,0 @@ ];

@@ -1,5 +0,2 @@

import {
openAIbaseUrl,
type SnippetInferenceProvider,
} from "../inference-providers.js";
import { openAIbaseUrl, type SnippetInferenceProvider } from "../inference-providers.js";
import type { PipelineType, WidgetType } from "../pipelines.js";

@@ -209,5 +206,5 @@ import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";

? [
{
client: "huggingface_hub",
content: `\
{
client: "huggingface_hub",
content: `\
${snippetImportInferenceClient(accessToken, provider)}

@@ -223,4 +220,4 @@

`,
},
]
},
]
: []),

@@ -261,3 +258,3 @@ {

provider: SnippetInferenceProvider,
providerModelId?: string,
providerModelId?: string
): InferenceSnippet[] => {

@@ -278,5 +275,5 @@ return [

? [
{
client: "fal-client",
content: `\
{
client: "fal-client",
content: `\
import fal_client

@@ -292,10 +289,10 @@

`,
},
]
},
]
: []),
...(provider === "hf-inference"
? [
{
client: "requests",
content: `\
{
client: "requests",
content: `\
def query(payload):

@@ -313,4 +310,4 @@ response = requests.post(API_URL, headers=headers, json=payload)

image = Image.open(io.BytesIO(image_bytes))`,
},
]
},
]
: []),

@@ -317,0 +314,0 @@ ];

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc