New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

@huggingface/tasks

Package Overview
Dependencies
Maintainers
4
Versions
136
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@huggingface/tasks - npm Package Compare versions

Comparing version 0.10.11 to 0.10.12

dist/src/dataset-libraries.d.ts

2

dist/src/index.d.ts

@@ -19,2 +19,4 @@ export { LIBRARY_TASK_MAPPING } from "./library-to-tasks";

export type { LocalApp, LocalAppKey } from "./local-apps";
export { DATASET_LIBRARIES_UI_ELEMENTS } from "./dataset-libraries";
export type { DatasetLibraryUiElement, DatasetLibraryKey } from "./dataset-libraries";
//# sourceMappingURL=index.d.ts.map

@@ -87,2 +87,24 @@ import type { ModelData } from "./model-data";

};
jellybox: {
prettyLabel: string;
docsUrl: string;
mainTask: "text-generation";
displayOnModelPage: (model: ModelData) => boolean;
deeplink: (model: ModelData) => URL;
};
msty: {
prettyLabel: string;
docsUrl: string;
mainTask: "text-generation";
displayOnModelPage: typeof isGgufModel;
deeplink: (model: ModelData) => URL;
};
recursechat: {
prettyLabel: string;
docsUrl: string;
mainTask: "text-generation";
macOSOnly: true;
displayOnModelPage: typeof isGgufModel;
deeplink: (model: ModelData) => URL;
};
drawthings: {

@@ -89,0 +111,0 @@ prettyLabel: string;

@@ -22,2 +22,3 @@ import type { ModelData } from "./model-data";

export declare const sklearn: (model: ModelData) => string[];
export declare const stable_audio_tools: (model: ModelData) => string[];
export declare const fastai: (model: ModelData) => string[];

@@ -39,2 +40,3 @@ export declare const sampleFactory: (model: ModelData) => string[];

export declare const voicecraft: (model: ModelData) => string[];
export declare const chattts: () => string[];
export declare const mlx: (model: ModelData) => string[];

@@ -41,0 +43,0 @@ export declare const mlxim: (model: ModelData) => string[];

@@ -116,2 +116,14 @@ import type { ModelData } from "./model-data";

};
chat_tts: {
prettyLabel: string;
repoName: string;
repoUrl: string;
filter: false;
countDownloads: {
term: {
path: string;
};
};
snippets: () => string[];
};
diffusers: {

@@ -170,2 +182,7 @@ prettyLabel: string;

filter: true;
countDownloads: {
wildcard: {
path: string;
};
};
};

@@ -409,2 +426,14 @@ flair: {

};
"stable-audio-tools": {
prettyLabel: string;
repoName: string;
repoUrl: string;
filter: false;
countDownloads: {
term: {
path: string;
};
};
snippets: (model: ModelData) => string[];
};
"stable-baselines3": {

@@ -513,4 +542,4 @@ prettyLabel: string;

export type ModelLibraryKey = keyof typeof MODEL_LIBRARIES_UI_ELEMENTS;
export declare const ALL_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "bertopic" | "big_vision" | "diffusers" | "doctr" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gliner" | "grok" | "keras" | "keras-nlp" | "k2" | "mindspore" | "ml-agents" | "mlx" | "mlx-image" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "pythae" | "sample-factory" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "stable-baselines3" | "stanza" | "tensorflowtts" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "voicecraft" | "whisperkit")[];
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "bertopic" | "big_vision" | "diffusers" | "doctr" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gliner" | "grok" | "keras" | "keras-nlp" | "k2" | "mindspore" | "ml-agents" | "mlx" | "mlx-image" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "pythae" | "sample-factory" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "stable-baselines3" | "stanza" | "tensorflowtts" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "voicecraft" | "whisperkit")[];
export declare const ALL_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "bertopic" | "big_vision" | "chat_tts" | "diffusers" | "doctr" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gliner" | "grok" | "keras" | "keras-nlp" | "k2" | "mindspore" | "ml-agents" | "mlx" | "mlx-image" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "pythae" | "sample-factory" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "stable-audio-tools" | "stable-baselines3" | "stanza" | "tensorflowtts" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "voicecraft" | "whisperkit")[];
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "bertopic" | "big_vision" | "chat_tts" | "diffusers" | "doctr" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gliner" | "grok" | "keras" | "keras-nlp" | "k2" | "mindspore" | "ml-agents" | "mlx" | "mlx-image" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "pythae" | "sample-factory" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "stable-audio-tools" | "stable-baselines3" | "stanza" | "tensorflowtts" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "voicecraft" | "whisperkit")[];
//# sourceMappingURL=model-libraries.d.ts.map

2

package.json
{
"name": "@huggingface/tasks",
"packageManager": "pnpm@8.10.5",
"version": "0.10.11",
"version": "0.10.12",
"description": "List of ML tasks for huggingface.co/tasks",

@@ -6,0 +6,0 @@ "repository": "https://github.com/huggingface/huggingface.js.git",

@@ -52,1 +52,4 @@ export { LIBRARY_TASK_MAPPING } from "./library-to-tasks";

export type { LocalApp, LocalAppKey } from "./local-apps";
export { DATASET_LIBRARIES_UI_ELEMENTS } from "./dataset-libraries";
export type { DatasetLibraryUiElement, DatasetLibraryKey } from "./dataset-libraries";

@@ -121,2 +121,36 @@ import type { ModelData } from "./model-data";

},
jellybox: {
prettyLabel: "Jellybox",
docsUrl: "https://jellybox.com",
mainTask: "text-generation",
displayOnModelPage: (model) =>
isGgufModel(model) ||
(model.library_name === "diffusers" &&
model.tags.includes("safetensors") &&
(model.pipeline_tag === "text-to-image" || model.tags.includes("lora"))),
deeplink: (model) => {
if (isGgufModel(model)) {
return new URL(`jellybox://llm/models/huggingface/LLM/${model.id}`);
} else if (model.tags.includes("lora")) {
return new URL(`jellybox://image/models/huggingface/ImageLora/${model.id}`);
} else {
return new URL(`jellybox://image/models/huggingface/Image/${model.id}`);
}
},
},
msty: {
prettyLabel: "Msty",
docsUrl: "https://msty.app",
mainTask: "text-generation",
displayOnModelPage: isGgufModel,
deeplink: (model) => new URL(`msty://models/search/hf/${model.id}`),
},
recursechat: {
prettyLabel: "RecurseChat",
docsUrl: "https://recurse.chat",
mainTask: "text-generation",
macOSOnly: true,
displayOnModelPage: isGgufModel,
deeplink: (model) => new URL(`recursechat://new-hf-gguf-model?hf-model-id=${model.id}`),
},
drawthings: {

@@ -123,0 +157,0 @@ prettyLabel: "Draw Things",

@@ -329,2 +329,39 @@ import type { ModelData } from "./model-data";

export const stable_audio_tools = (model: ModelData): string[] => [
`import torch
import torchaudio
from einops import rearrange
from stable_audio_tools import get_pretrained_model
from stable_audio_tools.inference.generation import generate_diffusion_cond
device = "cuda" if torch.cuda.is_available() else "cpu"
# Download model
model, model_config = get_pretrained_model("${model.id}")
sample_rate = model_config["sample_rate"]
sample_size = model_config["sample_size"]
model = model.to(device)
# Set up text and timing conditioning
conditioning = [{
"prompt": "128 BPM tech house drum loop",
}]
# Generate stereo audio
output = generate_diffusion_cond(
model,
conditioning=conditioning,
sample_size=sample_size,
device=device
)
# Rearrange audio batch to a single sequence
output = rearrange(output, "b d n -> d (b n)")
# Peak normalize, clip, convert to int16, and save to file
output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu()
torchaudio.save("output.wav", output, sample_rate)`,
];
export const fastai = (model: ModelData): string[] => [

@@ -557,2 +594,16 @@ `from huggingface_hub import from_pretrained_fastai

export const chattts = (): string[] => [
`import ChatTTS
import torchaudio
chat = ChatTTS.Chat()
chat.load_models(compile=False) # Set to True for better performance
texts = ["PUT YOUR TEXT HERE",]
wavs = chat.infer(texts, )
torchaudio.save("output1.wav", torch.from_numpy(wavs[0]), 24000)`,
];
export const mlx = (model: ModelData): string[] => [

@@ -559,0 +610,0 @@ `pip install huggingface_hub hf_transfer

@@ -114,2 +114,10 @@ import * as snippets from "./model-libraries-snippets";

},
chat_tts: {
prettyLabel: "ChatTTS",
repoName: "ChatTTS",
repoUrl: "https://github.com/2noise/ChatTTS.git",
filter: false,
countDownloads: { term: { path: "asset/GPT.pt" } },
snippets: snippets.chattts,
},
diffusers: {

@@ -167,2 +175,5 @@ prettyLabel: "Diffusers",

filter: true,
countDownloads: {
wildcard: { path: "*.bin" },
},
},

@@ -366,2 +377,10 @@ flair: {

},
"stable-audio-tools": {
prettyLabel: "Stable Audio Tools",
repoName: "stable-audio-tools",
repoUrl: "https://github.com/Stability-AI/stable-audio-tools.git",
filter: false,
countDownloads: { term: { path: "model.safetensors" } },
snippets: snippets.stable_audio_tools,
},
"stable-baselines3": {

@@ -368,0 +387,0 @@ prettyLabel: "stable-baselines3",

@@ -39,3 +39,3 @@ ## Use Cases

You can also use[huggingface.js](https://github.com/huggingface/huggingface.js), the JavaScript client, to transcribe models with the Inference API.
You can also use [huggingface.js](https://github.com/huggingface/huggingface.js), the JavaScript client, to transcribe audio with the Serverless Inference API.

@@ -52,3 +52,3 @@ ```javascript

For transformers compatible models like Whisper, Wav2Vec2, HuBERT, etc. You can also run inference in Python using transformers as follows:
For transformers-compatible models like Whisper, Wav2Vec2, and HuBERT, you can also run inference with the library as follows:

@@ -55,0 +55,0 @@ ```python

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc