@huggingface/tasks
Advanced tools
Comparing version 0.3.1 to 0.3.2
@@ -107,11 +107,2 @@ declare const MODALITIES: readonly ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"]; | ||
}; | ||
conversational: { | ||
name: string; | ||
subtasks: { | ||
type: string; | ||
name: string; | ||
}[]; | ||
modality: "nlp"; | ||
color: "green"; | ||
}; | ||
"feature-extraction": { | ||
@@ -396,2 +387,7 @@ name: string; | ||
}; | ||
"image-feature-extraction": { | ||
name: string; | ||
modality: "cv"; | ||
color: "indigo"; | ||
}; | ||
other: { | ||
@@ -406,5 +402,6 @@ name: string; | ||
type PipelineType = keyof typeof PIPELINE_DATA; | ||
declare const PIPELINE_TYPES: ("other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "conversational" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "image-text-to-text" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d")[]; | ||
type WidgetType = PipelineType | "conversational"; | ||
declare const PIPELINE_TYPES: ("other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "image-text-to-text" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d" | "image-feature-extraction")[]; | ||
declare const SUBTASK_TYPES: string[]; | ||
declare const PIPELINE_TYPES_SET: Set<"other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "conversational" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "image-text-to-text" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d">; | ||
declare const PIPELINE_TYPES_SET: Set<"other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "image-text-to-text" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d" | "image-feature-extraction">; | ||
@@ -489,2 +486,17 @@ /** | ||
declare const SPECIAL_TOKENS_ATTRIBUTES: readonly ["bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token"]; | ||
/** | ||
* Public interface for a tokenizer's special tokens mapping | ||
*/ | ||
type SpecialTokensMap = { | ||
[key in (typeof SPECIAL_TOKENS_ATTRIBUTES)[number]]?: string; | ||
}; | ||
/** | ||
* Public interface for tokenizer config | ||
*/ | ||
interface TokenizerConfig extends SpecialTokensMap { | ||
use_default_system_prompt?: boolean; | ||
chat_template?: string; | ||
} | ||
declare enum InferenceDisplayability { | ||
@@ -544,2 +556,3 @@ /** | ||
}; | ||
tokenizer?: TokenizerConfig; | ||
}; | ||
@@ -1058,3 +1071,3 @@ /** | ||
type PerLanguageMapping = Map<PipelineType, string[] | WidgetExample[]>; | ||
type PerLanguageMapping = Map<WidgetType, string[] | WidgetExample[]>; | ||
declare const MAPPING_DEFAULT_WIDGET: Map<string, PerLanguageMapping>; | ||
@@ -1227,2 +1240,2 @@ | ||
export { ALL_DISPLAY_MODEL_LIBRARY_KEYS, ALL_MODEL_LIBRARY_KEYS, ExampleRepo, InferenceDisplayability, LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS, LibraryUiElement, MAPPING_DEFAULT_WIDGET, MODALITIES, MODALITY_LABELS, MODEL_LIBRARIES_UI_ELEMENTS, Modality, ModelData, ModelLibraryKey, PIPELINE_DATA, PIPELINE_TYPES, PIPELINE_TYPES_SET, PipelineData, PipelineType, SUBTASK_TYPES, TASKS_DATA, TASKS_MODEL_LIBRARIES, TaskData, TaskDataCustom, TaskDemo, TaskDemoEntry, TransformersInfo, WidgetExample, WidgetExampleAssetAndPromptInput, WidgetExampleAssetAndTextInput, WidgetExampleAssetAndZeroShotInput, WidgetExampleAssetInput, WidgetExampleAttribute, WidgetExampleOutput, WidgetExampleOutputAnswerScore, WidgetExampleOutputLabels, WidgetExampleOutputText, WidgetExampleOutputUrl, WidgetExampleSentenceSimilarityInput, WidgetExampleStructuredDataInput, WidgetExampleTableDataInput, WidgetExampleTextAndContextInput, WidgetExampleTextAndTableInput, WidgetExampleTextInput, WidgetExampleZeroShotTextInput, index as snippets }; | ||
export { ALL_DISPLAY_MODEL_LIBRARY_KEYS, ALL_MODEL_LIBRARY_KEYS, ExampleRepo, InferenceDisplayability, LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS, LibraryUiElement, MAPPING_DEFAULT_WIDGET, MODALITIES, MODALITY_LABELS, MODEL_LIBRARIES_UI_ELEMENTS, Modality, ModelData, ModelLibraryKey, PIPELINE_DATA, PIPELINE_TYPES, PIPELINE_TYPES_SET, PipelineData, PipelineType, SPECIAL_TOKENS_ATTRIBUTES, SUBTASK_TYPES, SpecialTokensMap, TASKS_DATA, TASKS_MODEL_LIBRARIES, TaskData, TaskDataCustom, TaskDemo, TaskDemoEntry, TokenizerConfig, TransformersInfo, WidgetExample, WidgetExampleAssetAndPromptInput, WidgetExampleAssetAndTextInput, WidgetExampleAssetAndZeroShotInput, WidgetExampleAssetInput, WidgetExampleAttribute, WidgetExampleOutput, WidgetExampleOutputAnswerScore, WidgetExampleOutputLabels, WidgetExampleOutputText, WidgetExampleOutputUrl, WidgetExampleSentenceSimilarityInput, WidgetExampleStructuredDataInput, WidgetExampleTableDataInput, WidgetExampleTextAndContextInput, WidgetExampleTextAndTableInput, WidgetExampleTextInput, WidgetExampleZeroShotTextInput, WidgetType, index as snippets }; |
{ | ||
"name": "@huggingface/tasks", | ||
"packageManager": "pnpm@8.10.5", | ||
"version": "0.3.1", | ||
"version": "0.3.2", | ||
"description": "List of ML tasks for huggingface.co/tasks", | ||
@@ -6,0 +6,0 @@ "repository": "https://github.com/huggingface/huggingface.js.git", |
import type { WidgetExample } from "./widget-example"; | ||
import type { PipelineType } from "./pipelines"; | ||
import type { WidgetType } from "./pipelines"; | ||
type LanguageCode = string; | ||
type PerLanguageMapping = Map<PipelineType, string[] | WidgetExample[]>; | ||
type PerLanguageMapping = Map<WidgetType, string[] | WidgetExample[]>; | ||
@@ -8,0 +8,0 @@ /// NOTE TO CONTRIBUTORS: |
@@ -8,2 +8,3 @@ export { LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS } from "./library-to-tasks"; | ||
PIPELINE_TYPES, | ||
type WidgetType, | ||
type PipelineType, | ||
@@ -20,2 +21,3 @@ type PipelineData, | ||
export type { ModelData, TransformersInfo } from "./model-data"; | ||
export type { SpecialTokensMap, TokenizerConfig } from "./tokenizer-data"; | ||
export type { | ||
@@ -42,4 +44,5 @@ WidgetExample, | ||
export { InferenceDisplayability } from "./model-data"; | ||
export { SPECIAL_TOKENS_ATTRIBUTES } from "./tokenizer-data"; | ||
import * as snippets from "./snippets"; | ||
export { snippets }; |
@@ -30,3 +30,3 @@ import type { ModelLibraryKey } from "./model-libraries"; | ||
open_clip: ["zero-shot-classification", "zero-shot-image-classification"], | ||
paddlenlp: ["conversational", "fill-mask", "summarization", "zero-shot-classification"], | ||
paddlenlp: ["fill-mask", "summarization", "zero-shot-classification"], | ||
peft: ["text-generation"], | ||
@@ -33,0 +33,0 @@ "pyannote-audio": ["automatic-speech-recognition"], |
import type { PipelineType } from "./pipelines"; | ||
import type { WidgetExample } from "./widget-example"; | ||
import type { TokenizerConfig } from "./tokenizer-data"; | ||
@@ -56,2 +57,3 @@ export enum InferenceDisplayability { | ||
}; | ||
tokenizer?: TokenizerConfig; | ||
}; | ||
@@ -58,0 +60,0 @@ /** |
@@ -228,13 +228,2 @@ export const MODALITIES = ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"] as const; | ||
}, | ||
conversational: { | ||
name: "Conversational", | ||
subtasks: [ | ||
{ | ||
type: "dialogue-generation", | ||
name: "Dialogue Generation", | ||
}, | ||
], | ||
modality: "nlp", | ||
color: "green", | ||
}, | ||
"feature-extraction": { | ||
@@ -253,2 +242,10 @@ name: "Feature Extraction", | ||
{ | ||
type: "dialogue-generation", | ||
name: "Dialogue Generation", | ||
}, | ||
{ | ||
type: "conversational", | ||
name: "Conversational", | ||
}, | ||
{ | ||
type: "language-modeling", | ||
@@ -598,3 +595,3 @@ name: "Language Modeling", | ||
"image-text-to-text": { | ||
name: "Image + Text to Image (VLLMs)", | ||
name: "Image + Text to Text (VLLMs)", | ||
modality: "multimodal", | ||
@@ -657,2 +654,7 @@ color: "red", | ||
}, | ||
"image-feature-extraction": { | ||
name: "Image Feature Extraction", | ||
modality: "cv", | ||
color: "indigo", | ||
}, | ||
other: { | ||
@@ -669,2 +671,4 @@ name: "Other", | ||
export type WidgetType = PipelineType | "conversational"; | ||
export const PIPELINE_TYPES = Object.keys(PIPELINE_DATA) as PipelineType[]; | ||
@@ -671,0 +675,0 @@ |
@@ -37,3 +37,2 @@ import type { ModelData } from "../model-data.js"; | ||
summarization: snippetBasic, | ||
conversational: snippetBasic, | ||
"feature-extraction": snippetBasic, | ||
@@ -40,0 +39,0 @@ "text-generation": snippetBasic, |
@@ -12,9 +12,2 @@ import type { ModelData } from "../model-data"; | ||
const inputsConversational = () => | ||
`{ | ||
"past_user_inputs": ["Which movie is the best ?"], | ||
"generated_responses": ["It is Die Hard for sure."], | ||
"text": "Can you explain why ?" | ||
}`; | ||
const inputsTableQuestionAnswering = () => | ||
@@ -100,3 +93,2 @@ `{ | ||
"automatic-speech-recognition": inputsAutomaticSpeechRecognition, | ||
conversational: inputsConversational, | ||
"document-question-answering": inputsVisualQuestionAnswering, | ||
@@ -103,0 +95,0 @@ "feature-extraction": inputsFeatureExtraction, |
@@ -124,3 +124,2 @@ import type { ModelData } from "../model-data.js"; | ||
summarization: snippetBasic, | ||
conversational: snippetBasic, | ||
"feature-extraction": snippetBasic, | ||
@@ -127,0 +126,0 @@ "text-generation": snippetBasic, |
@@ -119,3 +119,2 @@ import type { ModelData } from "../model-data.js"; | ||
summarization: snippetBasic, | ||
conversational: snippetBasic, | ||
"feature-extraction": snippetBasic, | ||
@@ -122,0 +121,0 @@ "text-generation": snippetBasic, |
@@ -1,2 +0,3 @@ | ||
import { type PipelineType, PIPELINE_DATA } from "../pipelines"; | ||
import type { PipelineType } from "../pipelines"; | ||
import { PIPELINE_DATA } from "../pipelines"; | ||
@@ -6,3 +7,2 @@ import audioClassification from "./audio-classification/data"; | ||
import automaticSpeechRecognition from "./automatic-speech-recognition/data"; | ||
import conversational from "./conversational/data"; | ||
import documentQuestionAnswering from "./document-question-answering/data"; | ||
@@ -49,3 +49,2 @@ import featureExtraction from "./feature-extraction/data"; | ||
"automatic-speech-recognition": ["espnet", "nemo", "speechbrain", "transformers", "transformers.js"], | ||
conversational: ["transformers"], | ||
"depth-estimation": ["transformers", "transformers.js"], | ||
@@ -57,2 +56,3 @@ "document-question-answering": ["transformers", "transformers.js"], | ||
"image-classification": ["keras", "timm", "transformers", "transformers.js"], | ||
"image-feature-extraction": ["timm", "transformers"], | ||
"image-segmentation": ["transformers", "transformers.js"], | ||
@@ -129,3 +129,2 @@ "image-text-to-text": ["transformers"], | ||
"automatic-speech-recognition": getData("automatic-speech-recognition", automaticSpeechRecognition), | ||
conversational: getData("conversational", conversational), | ||
"depth-estimation": getData("depth-estimation", depthEstimation), | ||
@@ -176,2 +175,3 @@ "document-question-answering": getData("document-question-answering", documentQuestionAnswering), | ||
"image-to-3d": getData("image-to-3d", placeholder), | ||
"image-feature-extraction": getData("image-feature-extraction", placeholder), | ||
} as const; | ||
@@ -178,0 +178,0 @@ |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is too big to display
914270
178
21873