Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@huggingface/tasks

Package Overview
Dependencies
Maintainers
4
Versions
124
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@huggingface/tasks - npm Package Compare versions

Comparing version 0.13.0 to 0.13.1

2

dist/src/snippets/curl.d.ts

@@ -15,4 +15,4 @@ import type { PipelineType } from "../pipelines.js";

export declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, opts?: Record<string, unknown>) => InferenceSnippet>>;
export declare function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string): InferenceSnippet;
export declare function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string, opts?: Record<string, unknown>): InferenceSnippet;
export declare function hasCurlInferenceSnippet(model: Pick<ModelDataMinimal, "pipeline_tag">): boolean;
//# sourceMappingURL=curl.d.ts.map

@@ -17,4 +17,4 @@ import type { PipelineType } from "../pipelines.js";

export declare const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, opts?: Record<string, unknown>) => InferenceSnippet | InferenceSnippet[]>>;
export declare function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string): InferenceSnippet | InferenceSnippet[];
export declare function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string, opts?: Record<string, unknown>): InferenceSnippet | InferenceSnippet[];
export declare function hasJsInferenceSnippet(model: ModelDataMinimal): boolean;
//# sourceMappingURL=js.d.ts.map
{
"name": "@huggingface/tasks",
"packageManager": "pnpm@8.10.5",
"version": "0.13.0",
"version": "0.13.1",
"description": "List of ML tasks for huggingface.co/tasks",

@@ -6,0 +6,0 @@ "repository": "https://github.com/huggingface/huggingface.js.git",

@@ -483,3 +483,3 @@ import * as snippets from "./model-libraries-snippets";

repoUrl: "https://github.com/etched-ai/open-oasis",
countDownloads: `path:"oasis500m.pt"`,
countDownloads: `path:"oasis500m.safetensors"`,
},

@@ -486,0 +486,0 @@ open_clip: {

import type { ModelDataMinimal } from "./types";
import { describe, expect, it } from "vitest";
import { snippetTextGeneration } from "./curl";
import { getCurlInferenceSnippet } from "./curl";

@@ -13,3 +13,3 @@ describe("inference API snippets", () => {

};
const snippet = snippetTextGeneration(model, "api_token");
const snippet = getCurlInferenceSnippet(model, "api_token");

@@ -33,2 +33,28 @@ expect(snippet.content)

it("conversational llm non-streaming", async () => {
const model: ModelDataMinimal = {
id: "meta-llama/Llama-3.1-8B-Instruct",
pipeline_tag: "text-generation",
tags: ["conversational"],
inference: "",
};
const snippet = getCurlInferenceSnippet(model, "api_token", { streaming: false });
expect(snippet.content)
.toEqual(`curl 'https://api-inference.huggingface.co/models/meta-llama/Llama-3.1-8B-Instruct/v1/chat/completions' \\
-H "Authorization: Bearer api_token" \\
-H 'Content-Type: application/json' \\
--data '{
"model": "meta-llama/Llama-3.1-8B-Instruct",
"messages": [
{
"role": "user",
"content": "What is the capital of France?"
}
],
"max_tokens": 500,
"stream": false
}'`);
});
it("conversational vlm", async () => {

@@ -41,3 +67,3 @@ const model: ModelDataMinimal = {

};
const snippet = snippetTextGeneration(model, "api_token");
const snippet = getCurlInferenceSnippet(model, "api_token");

@@ -44,0 +70,0 @@ expect(snippet.content)

@@ -108,5 +108,9 @@ import type { PipelineType } from "../pipelines.js";

export function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string): InferenceSnippet {
export function getCurlInferenceSnippet(
model: ModelDataMinimal,
accessToken: string,
opts?: Record<string, unknown>
): InferenceSnippet {
return model.pipeline_tag && model.pipeline_tag in curlSnippets
? curlSnippets[model.pipeline_tag]?.(model, accessToken) ?? { content: "" }
? curlSnippets[model.pipeline_tag]?.(model, accessToken, opts) ?? { content: "" }
: { content: "" };

@@ -113,0 +117,0 @@ }

import type { InferenceSnippet, ModelDataMinimal } from "./types";
import { describe, expect, it } from "vitest";
import { snippetTextGeneration } from "./js";
import { getJsInferenceSnippet } from "./js";

@@ -13,3 +13,3 @@ describe("inference API snippets", () => {

};
const snippet = snippetTextGeneration(model, "api_token") as InferenceSnippet[];
const snippet = getJsInferenceSnippet(model, "api_token") as InferenceSnippet[];

@@ -42,2 +42,29 @@ expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"

it("conversational llm non-streaming", async () => {
const model: ModelDataMinimal = {
id: "meta-llama/Llama-3.1-8B-Instruct",
pipeline_tag: "text-generation",
tags: ["conversational"],
inference: "",
};
const snippet = getJsInferenceSnippet(model, "api_token", { streaming: false }) as InferenceSnippet[];
expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
const client = new HfInference("api_token")
const chatCompletion = await client.chatCompletion({
model: "meta-llama/Llama-3.1-8B-Instruct",
messages: [
{
role: "user",
content: "What is the capital of France?"
}
],
max_tokens: 500
});
console.log(chatCompletion.choices[0].message);`);
});
it("conversational vlm", async () => {

@@ -50,3 +77,3 @@ const model: ModelDataMinimal = {

};
const snippet = snippetTextGeneration(model, "api_token") as InferenceSnippet[];
const snippet = getJsInferenceSnippet(model, "api_token") as InferenceSnippet[];

@@ -89,2 +116,37 @@ expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"

});
it("conversational llm", async () => {
const model: ModelDataMinimal = {
id: "meta-llama/Llama-3.1-8B-Instruct",
pipeline_tag: "text-generation",
tags: ["conversational"],
inference: "",
};
const snippet = getJsInferenceSnippet(model, "api_token") as InferenceSnippet[];
expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
const client = new HfInference("api_token")
let out = "";
const stream = client.chatCompletionStream({
model: "meta-llama/Llama-3.1-8B-Instruct",
messages: [
{
role: "user",
content: "What is the capital of France?"
}
],
max_tokens: 500
});
for await (const chunk of stream) {
if (chunk.choices && chunk.choices.length > 0) {
const newContent = chunk.choices[0].delta.content;
out += newContent;
console.log(newContent);
}
}`);
});
});

@@ -112,3 +112,3 @@ import type { PipelineType } from "../pipelines.js";

client: "huggingface.js",
content: `import { HfInference } from '@huggingface/inference'
content: `import { HfInference } from "@huggingface/inference"

@@ -296,6 +296,7 @@ const client = new HfInference("${accessToken || `{API_TOKEN}`}")

model: ModelDataMinimal,
accessToken: string
accessToken: string,
opts?: Record<string, unknown>
): InferenceSnippet | InferenceSnippet[] {
return model.pipeline_tag && model.pipeline_tag in jsSnippets
? jsSnippets[model.pipeline_tag]?.(model, accessToken) ?? { content: "" }
? jsSnippets[model.pipeline_tag]?.(model, accessToken, opts) ?? { content: "" }
: { content: "" };

@@ -302,0 +303,0 @@ }

@@ -1,4 +0,4 @@

import type { ModelDataMinimal } from "./types";
import type { InferenceSnippet, ModelDataMinimal } from "./types";
import { describe, expect, it } from "vitest";
import { snippetConversational } from "./python";
import { getPythonInferenceSnippet } from "./python";

@@ -13,3 +13,3 @@ describe("inference API snippets", () => {

};
const snippet = snippetConversational(model, "api_token");
const snippet = getPythonInferenceSnippet(model, "api_token") as InferenceSnippet[];

@@ -38,2 +38,31 @@ expect(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient

it("conversational llm non-streaming", async () => {
const model: ModelDataMinimal = {
id: "meta-llama/Llama-3.1-8B-Instruct",
pipeline_tag: "text-generation",
tags: ["conversational"],
inference: "",
};
const snippet = getPythonInferenceSnippet(model, "api_token", { streaming: false }) as InferenceSnippet[];
expect(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient
client = InferenceClient(api_key="api_token")
messages = [
{
"role": "user",
"content": "What is the capital of France?"
}
]
completion = client.chat.completions.create(
model="meta-llama/Llama-3.1-8B-Instruct",
messages=messages,
max_tokens=500
)
print(completion.choices[0].message)`);
});
it("conversational vlm", async () => {

@@ -46,3 +75,3 @@ const model: ModelDataMinimal = {

};
const snippet = snippetConversational(model, "api_token");
const snippet = getPythonInferenceSnippet(model, "api_token") as InferenceSnippet[];

@@ -49,0 +78,0 @@ expect(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc