New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

@huggingface/tasks

Package Overview
Dependencies
Maintainers
4
Versions
136
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@huggingface/tasks - npm Package Compare versions

Comparing version 0.3.3 to 0.3.4

2

package.json
{
"name": "@huggingface/tasks",
"packageManager": "pnpm@8.10.5",
"version": "0.3.3",
"version": "0.3.4",
"description": "List of ML tasks for huggingface.co/tasks",

@@ -6,0 +6,0 @@ "repository": "https://github.com/huggingface/huggingface.js.git",

@@ -83,3 +83,3 @@ import type { PipelineType } from "./pipelines";

/**
* Parameters that will be used by the widget when calling Inference Endpoints (serverless)
* Parameters that will be used by the widget when calling Inference API (serverless)
* https://huggingface.co/docs/api-inference/detailed_parameters

@@ -86,0 +86,0 @@ *

@@ -551,2 +551,40 @@ import type { ModelData } from "./model-data";

const musicgen = (model: ModelData): string[] => [
`from audiocraft.models import MusicGen
model = MusicGen.get_pretrained("${model.id}")
descriptions = ['happy rock', 'energetic EDM', 'sad jazz']
wav = model.generate(descriptions) # generates 3 samples.`,
];
const magnet = (model: ModelData): string[] => [
`from audiocraft.models import MAGNeT
model = MAGNeT.get_pretrained("${model.id}")
descriptions = ['disco beat', 'energetic EDM', 'funky groove']
wav = model.generate(descriptions) # generates 3 samples.`,
];
const audiogen = (model: ModelData): string[] => [
`from audiocraft.models import AudioGen
model = AudioGen.get_pretrained("${model.id}")
model.set_generation_params(duration=5) # generate 5 seconds.
descriptions = ['dog barking', 'sirene of an emergency vehicle', 'footsteps in a corridor']
wav = model.generate(descriptions) # generates 3 samples.`,
];
export const audiocraft = (model: ModelData): string[] => {
if (model.tags?.includes("musicgen")) {
return musicgen(model);
} else if (model.tags?.includes("audiogen")) {
return audiogen(model);
} else if (model.tags?.includes("magnet")) {
return magnet(model);
} else {
return [`# Type of model unknown.`];
}
};
//#endregion

@@ -91,2 +91,9 @@ import * as snippets from "./model-libraries-snippets";

},
audiocraft: {
prettyLabel: "Audiocraft",
repoName: "audiocraft",
repoUrl: "https://github.com/facebookresearch/audiocraft",
snippets: snippets.audiocraft,
filter: false,
},
bertopic: {

@@ -93,0 +100,0 @@ prettyLabel: "BERTopic",

@@ -593,3 +593,3 @@ export const MODALITIES = ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"] as const;

"image-text-to-text": {
name: "Image + Text to Text (VLLMs)",
name: "Image-Text-to-Text",
modality: "multimodal",

@@ -596,0 +596,0 @@ color: "red",

@@ -39,2 +39,60 @@ import type { PipelineType } from "../pipelines";

export type * from "./audio-classification/inference";
export type * from "./automatic-speech-recognition/inference";
export type * from "./document-question-answering/inference";
export type * from "./feature-extraction/inference";
export type * from "./fill-mask/inference";
export type {
ImageClassificationInput,
ImageClassificationOutput,
ImageClassificationOutputElement,
ImageClassificationParameters,
} from "./image-classification/inference";
export type * from "./image-to-image/inference";
export type { ImageToTextInput, ImageToTextOutput, ImageToTextParameters } from "./image-to-text/inference";
export type * from "./image-segmentation/inference";
export type * from "./object-detection/inference";
export type * from "./depth-estimation/inference";
export type * from "./question-answering/inference";
export type * from "./sentence-similarity/inference";
export type * from "./summarization/inference";
export type * from "./table-question-answering/inference";
export type { TextToImageInput, TextToImageOutput, TextToImageParameters } from "./text-to-image/inference";
export type { TextToAudioParameters, TextToSpeechInput, TextToSpeechOutput } from "./text-to-speech/inference";
export type * from "./token-classification/inference";
export type {
Text2TextGenerationParameters,
Text2TextGenerationTruncationStrategy,
TranslationInput,
TranslationOutput,
} from "./translation/inference";
export type {
ClassificationOutputTransform,
TextClassificationInput,
TextClassificationOutput,
TextClassificationOutputElement,
TextClassificationParameters,
} from "./text-classification/inference";
export type {
FinishReason,
PrefillToken,
TextGenerationInput,
TextGenerationOutput,
TextGenerationOutputDetails,
TextGenerationParameters,
TextGenerationSequenceDetails,
Token,
} from "./text-generation/inference";
export type * from "./video-classification/inference";
export type * from "./visual-question-answering/inference";
export type * from "./zero-shot-classification/inference";
export type * from "./zero-shot-image-classification/inference";
export type {
BoundingBox,
ZeroShotObjectDetectionInput,
ZeroShotObjectDetectionInputData,
ZeroShotObjectDetectionOutput,
ZeroShotObjectDetectionOutputElement,
} from "./zero-shot-object-detection/inference";
import type { ModelLibraryKey } from "../model-libraries";

@@ -41,0 +99,0 @@

@@ -48,13 +48,10 @@ /**

/**
* Outputs for Summarization inference
*
* Outputs of inference for the Text2text Generation task
* Outputs of inference for the Summarization task
*/
export interface SummarizationOutput {
generatedText: unknown;
/**
* The generated text.
* The summarized text.
*/
generated_text?: string;
summary_text: string;
[property: string]: unknown;
}
{
"$ref": "/inference/schemas/text2text-generation/output.json",
"$id": "/inference/schemas/summarization/output.json",
"$schema": "http://json-schema.org/draft-06/schema#",
"description": "Outputs of inference for the Summarization task",
"title": "SummarizationOutput",
"description": "Outputs for Summarization inference"
"type": "object",
"properties": {
"summary_text": {
"type": "string",
"description": "The summarized text."
}
},
"required": ["summary_text"]
}

@@ -48,13 +48,10 @@ /**

/**
* Outputs for Translation inference
*
* Outputs of inference for the Text2text Generation task
* Outputs of inference for the Translation task
*/
export interface TranslationOutput {
generatedText: unknown;
/**
* The generated text.
* The translated text.
*/
generated_text?: string;
translation_text: string;
[property: string]: unknown;
}
{
"$ref": "/inference/schemas/text2text-generation/output.json",
"$id": "/inference/schemas/translation/output.json",
"$schema": "http://json-schema.org/draft-06/schema#",
"description": "Outputs of inference for the Translation task",
"title": "TranslationOutput",
"description": "Outputs for Translation inference"
"type": "object",
"properties": {
"translation_text": {
"type": "string",
"description": "The translated text."
}
},
"required": ["translation_text"]
}

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc