New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

@huggingface/tasks

Package Overview
Dependencies
Maintainers
4
Versions
133
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@huggingface/tasks - npm Package Compare versions

Comparing version 0.12.26 to 0.12.27

1

dist/src/snippets/curl.d.ts

@@ -12,3 +12,2 @@ import type { PipelineType } from "../pipelines.js";

}) => InferenceSnippet;
export declare const snippetImageTextToTextGeneration: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;

@@ -15,0 +14,0 @@ export declare const snippetFile: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;

@@ -12,3 +12,2 @@ import type { PipelineType } from "../pipelines.js";

}) => InferenceSnippet | InferenceSnippet[];
export declare const snippetImageTextToTextGeneration: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;

@@ -15,0 +14,0 @@ export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;

@@ -11,3 +11,2 @@ import type { PipelineType } from "../pipelines.js";

}) => InferenceSnippet[];
export declare const snippetConversationalWithImage: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
export declare const snippetZeroShotClassification: (model: ModelDataMinimal) => InferenceSnippet;

@@ -14,0 +13,0 @@ export declare const snippetZeroShotImageClassification: (model: ModelDataMinimal) => InferenceSnippet;

2

package.json
{
"name": "@huggingface/tasks",
"packageManager": "pnpm@8.10.5",
"version": "0.12.26",
"version": "0.12.27",
"description": "List of ML tasks for huggingface.co/tasks",

@@ -6,0 +6,0 @@ "repository": "https://github.com/huggingface/huggingface.js.git",

@@ -29,5 +29,20 @@ import type { PipelineType } from "../pipelines.js";

const streaming = opts?.streaming ?? true;
const messages: ChatCompletionInputMessage[] = opts?.messages ?? [
{ role: "user", content: "What is the capital of France?" },
];
const exampleMessages: ChatCompletionInputMessage[] =
model.pipeline_tag === "text-generation"
? [{ role: "user", content: "What is the capital of France?" }]
: [
{
role: "user",
content: [
{
type: "image_url",
image_url: {
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
},
},
{ type: "text", text: "Describe this image in one sentence." },
],
},
];
const messages = opts?.messages ?? exampleMessages;

@@ -67,30 +82,2 @@ const config = {

export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => {
if (model.tags.includes("conversational")) {
// Conversational model detected, so we display a code snippet that features the Messages API
return {
content: `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
-H 'Content-Type: application/json' \\
-d '{
"model": "${model.id}",
"messages": [
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"}},
{"type": "text", "text": "Describe this image in one sentence."}
]
}
],
"max_tokens": 500,
"stream": false
}'
`,
};
} else {
return snippetBasic(model, accessToken);
}
};
export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({

@@ -127,3 +114,3 @@ content: `curl https://api-inference.huggingface.co/models/${model.id} \\

"text-generation": snippetTextGeneration,
"image-text-to-text": snippetImageTextToTextGeneration,
"image-text-to-text": snippetTextGeneration,
"text2text-generation": snippetBasic,

@@ -130,0 +117,0 @@ "fill-mask": snippetBasic,

@@ -43,5 +43,20 @@ import type { PipelineType } from "../pipelines.js";

const streaming = opts?.streaming ?? true;
const messages: ChatCompletionInputMessage[] = opts?.messages ?? [
{ role: "user", content: "What is the capital of France?" },
];
const exampleMessages: ChatCompletionInputMessage[] =
model.pipeline_tag === "text-generation"
? [{ role: "user", content: "What is the capital of France?" }]
: [
{
role: "user",
content: [
{
type: "image_url",
image_url: {
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
},
},
{ type: "text", text: "Describe this image in one sentence." },
],
},
];
const messages = opts?.messages ?? exampleMessages;
const messagesStr = stringifyMessages(messages, { sep: ",\n\t\t", start: "[\n\t\t", end: "\n\t]" });

@@ -152,32 +167,2 @@

export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => {
if (model.tags.includes("conversational")) {
// Conversational model detected, so we display a code snippet that features the Messages API
return {
content: `import { HfInference } from "@huggingface/inference";
const inference = new HfInference("${accessToken || `{API_TOKEN}`}");
const imageUrl = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg";
for await (const chunk of inference.chatCompletionStream({
model: "${model.id}",
messages: [
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": imageUrl}},
{"type": "text", "text": "Describe this image in one sentence."},
],
}
],
max_tokens: 500,
})) {
process.stdout.write(chunk.choices[0]?.delta?.content || "");
}`,
};
} else {
return snippetBasic(model, accessToken);
}
};
export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({

@@ -312,3 +297,3 @@ content: `async function query(data) {

"text-generation": snippetTextGeneration,
"image-text-to-text": snippetImageTextToTextGeneration,
"image-text-to-text": snippetTextGeneration,
"text2text-generation": snippetBasic,

@@ -315,0 +300,0 @@ "fill-mask": snippetBasic,

@@ -19,5 +19,20 @@ import type { PipelineType } from "../pipelines.js";

const streaming = opts?.streaming ?? true;
const messages: ChatCompletionInputMessage[] = opts?.messages ?? [
{ role: "user", content: "What is the capital of France?" },
];
const exampleMessages: ChatCompletionInputMessage[] =
model.pipeline_tag === "text-generation"
? [{ role: "user", content: "What is the capital of France?" }]
: [
{
role: "user",
content: [
{
type: "image_url",
image_url: {
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
},
},
{ type: "text", text: "Describe this image in one sentence." },
],
},
];
const messages = opts?.messages ?? exampleMessages;
const messagesStr = stringifyMessages(messages, {

@@ -125,26 +140,2 @@ sep: ",\n\t",

export const snippetConversationalWithImage = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({
content: `from huggingface_hub import InferenceClient
client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}")
image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
for message in client.chat_completion(
model="${model.id}",
messages=[
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": image_url}},
{"type": "text", "text": "Describe this image in one sentence."},
],
}
],
max_tokens=500,
stream=True,
):
print(message.choices[0].delta.content, end="")`,
});
export const snippetZeroShotClassification = (model: ModelDataMinimal): InferenceSnippet => ({

@@ -287,3 +278,3 @@ content: `def query(payload):

"text2text-generation": snippetBasic,
"image-text-to-text": snippetConversationalWithImage,
"image-text-to-text": snippetConversational,
"fill-mask": snippetBasic,

@@ -312,8 +303,5 @@ "sentence-similarity": snippetBasic,

): InferenceSnippet | InferenceSnippet[] {
if (model.pipeline_tag === "text-generation" && model.tags.includes("conversational")) {
if (model.tags.includes("conversational")) {
// Conversational model detected, so we display a code snippet that features the Messages API
return snippetConversational(model, accessToken, opts);
} else if (model.pipeline_tag === "image-text-to-text" && model.tags.includes("conversational")) {
// Example sending an image to the Message API
return snippetConversationalWithImage(model, accessToken);
} else {

@@ -320,0 +308,0 @@ let snippets =

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc