@huggingface/tasks
Advanced tools
Comparing version 0.12.10 to 0.12.11
@@ -5,2 +5,3 @@ import type { PipelineType } from "../pipelines.js"; | ||
export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string) => string; | ||
export declare const snippetImageTextToTextGeneration: (model: ModelDataMinimal, accessToken: string) => string; | ||
export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => string; | ||
@@ -7,0 +8,0 @@ export declare const snippetFile: (model: ModelDataMinimal, accessToken: string) => string; |
@@ -5,2 +5,3 @@ import type { PipelineType } from "../pipelines.js"; | ||
export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string) => string; | ||
export declare const snippetImageTextToTextGeneration: (model: ModelDataMinimal, accessToken: string) => string; | ||
export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => string; | ||
@@ -7,0 +8,0 @@ export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string) => string; |
import type { PipelineType } from "../pipelines.js"; | ||
import type { ModelDataMinimal } from "./types.js"; | ||
export declare const snippetConversational: (model: ModelDataMinimal, accessToken: string) => string; | ||
export declare const snippetConversationalWithImage: (model: ModelDataMinimal, accessToken: string) => string; | ||
export declare const snippetZeroShotClassification: (model: ModelDataMinimal) => string; | ||
@@ -5,0 +6,0 @@ export declare const snippetZeroShotImageClassification: (model: ModelDataMinimal) => string; |
@@ -7,3 +7,3 @@ import type { ModelData } from "../model-data"; | ||
*/ | ||
export type ModelDataMinimal = Pick<ModelData, "id" | "pipeline_tag" | "mask_token" | "library_name" | "config">; | ||
export type ModelDataMinimal = Pick<ModelData, "id" | "pipeline_tag" | "mask_token" | "library_name" | "config" | "tags">; | ||
//# sourceMappingURL=types.d.ts.map |
@@ -30,3 +30,3 @@ /** | ||
*/ | ||
generate?: GenerationParameters; | ||
generation_parameters?: GenerationParameters; | ||
/** | ||
@@ -33,0 +33,0 @@ * Whether to output corresponding timestamps with the generated text |
@@ -29,3 +29,3 @@ /** | ||
*/ | ||
generate?: GenerationParameters; | ||
generation_parameters?: GenerationParameters; | ||
/** | ||
@@ -32,0 +32,0 @@ * The amount of maximum tokens to generate. |
@@ -29,3 +29,3 @@ /** | ||
*/ | ||
generate?: GenerationParameters; | ||
generation_parameters?: GenerationParameters; | ||
[property: string]: unknown; | ||
@@ -32,0 +32,0 @@ } |
@@ -29,3 +29,3 @@ /** | ||
*/ | ||
generate?: GenerationParameters; | ||
generation_parameters?: GenerationParameters; | ||
[property: string]: unknown; | ||
@@ -32,0 +32,0 @@ } |
{ | ||
"name": "@huggingface/tasks", | ||
"packageManager": "pnpm@8.10.5", | ||
"version": "0.12.10", | ||
"version": "0.12.11", | ||
"description": "List of ML tasks for huggingface.co/tasks", | ||
@@ -6,0 +6,0 @@ "repository": "https://github.com/huggingface/huggingface.js.git", |
@@ -13,3 +13,3 @@ import type { PipelineType } from "../pipelines.js"; | ||
export const snippetTextGeneration = (model: ModelDataMinimal, accessToken: string): string => { | ||
if (model.config?.tokenizer_config?.chat_template) { | ||
if (model.tags.includes("conversational")) { | ||
// Conversational model detected, so we display a code snippet that features the Messages API | ||
@@ -31,2 +31,28 @@ return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\ | ||
export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, accessToken: string): string => { | ||
if (model.tags.includes("conversational")) { | ||
// Conversational model detected, so we display a code snippet that features the Messages API | ||
return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\ | ||
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\ | ||
-H 'Content-Type: application/json' \\ | ||
-d '{ | ||
"model": "${model.id}", | ||
"messages": [ | ||
{ | ||
"role": "user", | ||
"content": [ | ||
{"type": "image_url", "image_url": {"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"}}, | ||
{"type": "text", "text": "Describe this image in one sentence."} | ||
] | ||
} | ||
], | ||
"max_tokens": 500, | ||
"stream": false | ||
}' | ||
`; | ||
} else { | ||
return snippetBasic(model, accessToken); | ||
} | ||
}; | ||
export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): string => | ||
@@ -56,2 +82,3 @@ `curl https://api-inference.huggingface.co/models/${model.id} \\ | ||
"text-generation": snippetTextGeneration, | ||
"image-text-to-text": snippetImageTextToTextGeneration, | ||
"text2text-generation": snippetBasic, | ||
@@ -58,0 +85,0 @@ "fill-mask": snippetBasic, |
@@ -27,3 +27,3 @@ import type { PipelineType } from "../pipelines.js"; | ||
export const snippetTextGeneration = (model: ModelDataMinimal, accessToken: string): string => { | ||
if (model.config?.tokenizer_config?.chat_template) { | ||
if (model.tags.includes("conversational")) { | ||
// Conversational model detected, so we display a code snippet that features the Messages API | ||
@@ -45,2 +45,31 @@ return `import { HfInference } from "@huggingface/inference"; | ||
}; | ||
export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, accessToken: string): string => { | ||
if (model.tags.includes("conversational")) { | ||
// Conversational model detected, so we display a code snippet that features the Messages API | ||
return `import { HfInference } from "@huggingface/inference"; | ||
const inference = new HfInference("${accessToken || `{API_TOKEN}`}"); | ||
const imageUrl = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"; | ||
for await (const chunk of inference.chatCompletionStream({ | ||
model: "${model.id}", | ||
messages: [ | ||
{ | ||
"role": "user", | ||
"content": [ | ||
{"type": "image_url", "image_url": {"url": imageUrl}}, | ||
{"type": "text", "text": "Describe this image in one sentence."}, | ||
], | ||
} | ||
], | ||
max_tokens: 500, | ||
})) { | ||
process.stdout.write(chunk.choices[0]?.delta?.content || ""); | ||
}`; | ||
} else { | ||
return snippetBasic(model, accessToken); | ||
} | ||
}; | ||
export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): string => | ||
@@ -161,2 +190,3 @@ `async function query(data) { | ||
"text-generation": snippetTextGeneration, | ||
"image-text-to-text": snippetImageTextToTextGeneration, | ||
"text2text-generation": snippetBasic, | ||
@@ -163,0 +193,0 @@ "fill-mask": snippetBasic, |
@@ -8,8 +8,6 @@ import type { PipelineType } from "../pipelines.js"; | ||
client = InferenceClient( | ||
"${model.id}", | ||
token="${accessToken || "{API_TOKEN}"}", | ||
) | ||
client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}") | ||
for message in client.chat_completion( | ||
model="${model.id}", | ||
messages=[{"role": "user", "content": "What is the capital of France?"}], | ||
@@ -21,2 +19,25 @@ max_tokens=500, | ||
export const snippetConversationalWithImage = (model: ModelDataMinimal, accessToken: string): string => | ||
`from huggingface_hub import InferenceClient | ||
client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}") | ||
image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" | ||
for message in client.chat_completion( | ||
model="${model.id}", | ||
messages=[ | ||
{ | ||
"role": "user", | ||
"content": [ | ||
{"type": "image_url", "image_url": {"url": image_url}}, | ||
{"type": "text", "text": "Describe this image in one sentence."}, | ||
], | ||
} | ||
], | ||
max_tokens=500, | ||
stream=True, | ||
): | ||
print(message.choices[0].delta.content, end="")`; | ||
export const snippetZeroShotClassification = (model: ModelDataMinimal): string => | ||
@@ -158,5 +179,8 @@ `def query(payload): | ||
export function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string): string { | ||
if (model.pipeline_tag === "text-generation" && model.config?.tokenizer_config?.chat_template) { | ||
if (model.pipeline_tag === "text-generation" && model.tags.includes("conversational")) { | ||
// Conversational model detected, so we display a code snippet that features the Messages API | ||
return snippetConversational(model, accessToken); | ||
} else if (model.pipeline_tag === "image-text-to-text" && model.tags.includes("conversational")) { | ||
// Example sending an image to the Message API | ||
return snippetConversationalWithImage(model, accessToken); | ||
} else { | ||
@@ -163,0 +187,0 @@ const body = |
@@ -8,2 +8,5 @@ import type { ModelData } from "../model-data"; | ||
*/ | ||
export type ModelDataMinimal = Pick<ModelData, "id" | "pipeline_tag" | "mask_token" | "library_name" | "config">; | ||
export type ModelDataMinimal = Pick< | ||
ModelData, | ||
"id" | "pipeline_tag" | "mask_token" | "library_name" | "config" | "tags" | ||
>; |
@@ -32,3 +32,3 @@ /** | ||
*/ | ||
generate?: GenerationParameters; | ||
generation_parameters?: GenerationParameters; | ||
/** | ||
@@ -35,0 +35,0 @@ * Whether to output corresponding timestamps with the generated text |
@@ -27,3 +27,3 @@ { | ||
}, | ||
"generate": { | ||
"generation_parameters": { | ||
"description": "Parametrization of the text generation process", | ||
@@ -30,0 +30,0 @@ "$ref": "/inference/schemas/common-definitions.json#/definitions/GenerationParameters" |
@@ -31,3 +31,3 @@ /** | ||
*/ | ||
generate?: GenerationParameters; | ||
generation_parameters?: GenerationParameters; | ||
/** | ||
@@ -34,0 +34,0 @@ * The amount of maximum tokens to generate. |
@@ -26,3 +26,3 @@ { | ||
}, | ||
"generate": { | ||
"generation_parameters": { | ||
"description": "Parametrization of the text generation process", | ||
@@ -29,0 +29,0 @@ "$ref": "/inference/schemas/common-definitions.json#/definitions/GenerationParameters" |
@@ -31,3 +31,3 @@ /** | ||
*/ | ||
generate?: GenerationParameters; | ||
generation_parameters?: GenerationParameters; | ||
[property: string]: unknown; | ||
@@ -34,0 +34,0 @@ } |
@@ -23,3 +23,3 @@ { | ||
"properties": { | ||
"generate": { | ||
"generation_parameters": { | ||
"description": "Parametrization of the text generation process", | ||
@@ -26,0 +26,0 @@ "$ref": "/inference/schemas/common-definitions.json#/definitions/GenerationParameters" |
@@ -31,3 +31,3 @@ /** | ||
*/ | ||
generate?: GenerationParameters; | ||
generation_parameters?: GenerationParameters; | ||
[property: string]: unknown; | ||
@@ -34,0 +34,0 @@ } |
@@ -23,3 +23,3 @@ { | ||
"properties": { | ||
"generate": { | ||
"generation_parameters": { | ||
"description": "Parametrization of the text generation process", | ||
@@ -26,0 +26,0 @@ "$ref": "/inference/schemas/common-definitions.json#/definitions/GenerationParameters" |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
1362039
33734