New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

@huggingface/tasks

Package Overview
Dependencies
Maintainers
4
Versions
136
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@huggingface/tasks - npm Package Compare versions

Comparing version 0.6.0 to 0.6.1

src/tasks/chat-completion/inference.ts

4

package.json
{
"name": "@huggingface/tasks",
"packageManager": "pnpm@8.10.5",
"version": "0.6.0",
"version": "0.6.1",
"description": "List of ML tasks for huggingface.co/tasks",

@@ -36,3 +36,3 @@ "repository": "https://github.com/huggingface/huggingface.js.git",

"@types/node": "^20.11.5",
"quicktype-core": "https://github.com/huggingface/quicktype/raw/pack-18.0.15/packages/quicktype-core/quicktype-core-18.0.15.tgz"
"quicktype-core": "https://github.com/huggingface/quicktype/raw/pack-18.0.17/packages/quicktype-core/quicktype-core-18.0.17.tgz"
},

@@ -39,0 +39,0 @@ "scripts": {

@@ -144,3 +144,3 @@ import type { ModelData } from "./model-data";

export const gliner = (model: ModelData): string[] => [
`from model import GLiNER
`from gliner import GLiNER

@@ -542,2 +542,8 @@ model = GLiNER.from_pretrained("${model.id}")`,

export const mlxim = (model: ModelData): string[] => [
`from mlxim.model import create_model
model = create_model(${model.id})`,
];
export const nemo = (model: ModelData): string[] => {

@@ -544,0 +550,0 @@ let command: string[] | undefined = undefined;

@@ -170,2 +170,11 @@ import * as snippets from "./model-libraries-snippets";

},
grok: {
prettyLabel: "Grok",
repoName: "Grok",
repoUrl: "https://github.com/xai-org/grok-1",
filter: false,
countDownloads: {
terms: { path: ["ckpt/tensor00000_000", "ckpt-0/tensor00000_000"] },
},
},
keras: {

@@ -206,2 +215,11 @@ prettyLabel: "Keras",

},
"mlx-image": {
prettyLabel: "mlx-image",
repoName: "mlx-image",
repoUrl: "https://github.com/riccardomusmeci/mlx-image",
docsUrl: "https://huggingface.co/docs/hub/mlx-image",
snippets: snippets.mlxim,
filter: false,
countDownloads: { term: { path: "model.safetensors" } },
},
nemo: {

@@ -208,0 +226,0 @@ prettyLabel: "NeMo",

@@ -41,2 +41,13 @@ import type { PipelineType } from "../pipelines";

export type * from "./automatic-speech-recognition/inference";
export type {
ChatCompletionInput,
ChatCompletionInputMessage,
ChatCompletionOutput,
ChatCompletionOutputChoice,
ChatCompletionFinishReason,
ChatCompletionOutputChoiceMessage,
ChatCompletionStreamOutput,
ChatCompletionStreamOutputChoice,
ChatCompletionStreamOutputDelta,
} from "./chat-completion/inference";
export type * from "./document-question-answering/inference";

@@ -77,4 +88,4 @@ export type * from "./feature-extraction/inference";

export type {
FinishReason,
PrefillToken,
TextGenerationFinishReason,
TextGenerationPrefillToken,
TextGenerationInput,

@@ -84,4 +95,4 @@ TextGenerationOutput,

TextGenerationParameters,
TextGenerationSequenceDetails,
Token,
TextGenerationOutputSequenceDetails,
TextGenerationOutputToken,
} from "./text-generation/inference";

@@ -88,0 +99,0 @@ export type * from "./video-classification/inference";

@@ -19,2 +19,6 @@ /**

parameters?: TextGenerationParameters;
/**
* Whether to stream output tokens
*/
stream?: boolean;
[property: string]: unknown;

@@ -118,7 +122,7 @@ }

*/
best_of_sequences?: TextGenerationSequenceDetails[];
best_of_sequences?: TextGenerationOutputSequenceDetails[];
/**
* The reason why the generation was stopped.
*/
finish_reason: FinishReason;
finish_reason: TextGenerationFinishReason;
/**

@@ -128,3 +132,3 @@ * The number of generated tokens

generated_tokens: number;
prefill: PrefillToken[];
prefill: TextGenerationPrefillToken[];
/**

@@ -137,15 +141,16 @@ * The random seed used for generation

*/
tokens: Token[];
tokens: TextGenerationOutputToken[];
/**
* Most likely tokens
*/
top_tokens?: Array<TextGenerationOutputToken[]>;
[property: string]: unknown;
}
export interface TextGenerationSequenceDetails {
export interface TextGenerationOutputSequenceDetails {
finish_reason: TextGenerationFinishReason;
/**
* The reason why the generation was stopped.
*/
finish_reason: FinishReason;
/**
* The generated text
*/
generated_text: number;
generated_text: string;
/**

@@ -155,3 +160,3 @@ * The number of generated tokens

generated_tokens: number;
prefill: PrefillToken[];
prefill: TextGenerationPrefillToken[];
/**

@@ -164,3 +169,7 @@ * The random seed used for generation

*/
tokens: Token[];
tokens: TextGenerationOutputToken[];
/**
* Most likely tokens
*/
top_tokens?: Array<TextGenerationOutputToken[]>;
[property: string]: unknown;

@@ -170,11 +179,13 @@ }

/**
* The generated sequence reached the maximum allowed length
* The reason why the generation was stopped.
*
* The model generated an end-of-sentence (EOS) token
* length: The generated sequence reached the maximum allowed length
*
* One of the sequence in stop_sequences was generated
* eos_token: The model generated an end-of-sentence (EOS) token
*
* stop_sequence: One of the sequence in stop_sequences was generated
*/
export type FinishReason = "length" | "eos_token" | "stop_sequence";
export type TextGenerationFinishReason = "length" | "eos_token" | "stop_sequence";
export interface PrefillToken {
export interface TextGenerationPrefillToken {
id: number;

@@ -189,5 +200,8 @@ logprob: number;

export interface Token {
/**
* Generated token.
*/
export interface TextGenerationOutputToken {
id: number;
logprob: number;
logprob?: number;
/**

@@ -203,1 +217,43 @@ * Whether or not that token is a special one

}
/**
* Text Generation Stream Output
*/
export interface TextGenerationStreamOutput {
/**
* Generation details. Only available when the generation is finished.
*/
details?: TextGenerationStreamDetails;
/**
* The complete generated text. Only available when the generation is finished.
*/
generated_text?: string;
/**
* The token index within the stream. Optional to support older clients that omit it.
*/
index?: number;
/**
* Generated token.
*/
token: TextGenerationOutputToken;
[property: string]: unknown;
}
/**
* Generation details. Only available when the generation is finished.
*/
export interface TextGenerationStreamDetails {
/**
* The reason why the generation was stopped.
*/
finish_reason: TextGenerationFinishReason;
/**
* The number of generated tokens
*/
generated_tokens: number;
/**
* The random seed used for generation
*/
seed: number;
[property: string]: unknown;
}

@@ -15,2 +15,6 @@ {

"$ref": "#/$defs/TextGenerationParameters"
},
"stream": {
"description": "Whether to stream output tokens",
"type": "boolean"
}

@@ -17,0 +21,0 @@ },

@@ -13,39 +13,41 @@ {

"details": {
"description": "When enabled, details about the generation",
"title": "TextGenerationOutputDetails",
"allOf": [
{ "$ref": "#/$defs/SequenceDetails" },
{
"type": "object",
"properties": {
"best_of_sequences": {
"type": "array",
"description": "Details about additional sequences when best_of is provided",
"items": {
"allOf": [
{ "$ref": "#/$defs/SequenceDetails" },
{
"type": "object",
"properties": {
"generated_text": {
"type": "integer",
"description": "The generated text"
}
},
"required": ["generated_text"]
}
]
}
}
}
}
]
"$ref": "#/$defs/Details",
"description": "When enabled, details about the generation"
}
},
"required": ["generated_text"],
"$defs": {
"FinishReason": {
"type": "string",
"title": "TextGenerationFinishReason",
"description": "The reason why the generation was stopped.",
"oneOf": [
{ "const": "length", "description": "length: The generated sequence reached the maximum allowed length" },
{ "const": "eos_token", "description": "eos_token: The model generated an end-of-sentence (EOS) token" },
{
"const": "stop_sequence",
"description": "stop_sequence: One of the sequence in stop_sequences was generated"
}
]
},
"PrefillToken": {
"title": "TextGenerationPrefillToken",
"type": "object",
"properties": {
"id": {
"type": "integer"
},
"logprob": {
"type": "number"
},
"text": {
"type": "string",
"description": "The text associated with that token"
}
},
"required": ["id", "logprob", "text"]
},
"Token": {
"type": "object",
"title": "Token",
"title": "TextGenerationOutputToken",
"properties": {

@@ -67,17 +69,64 @@ "id": {

},
"required": ["id", "logprob", "special", "text"]
"required": ["id", "special", "text"]
},
"Details": {
"type": "object",
"title": "TextGenerationOutputDetails",
"properties": {
"finish_reason": {
"$ref": "#/$defs/FinishReason",
"description": "The reason why the generation was stopped."
},
"generated_tokens": {
"type": "integer",
"description": "The number of generated tokens"
},
"prefill": {
"type": "array",
"items": {
"$ref": "#/$defs/PrefillToken"
}
},
"seed": {
"type": "integer",
"description": "The random seed used for generation"
},
"tokens": {
"type": "array",
"description": "The generated tokens and associated details",
"items": {
"$ref": "#/$defs/Token"
}
},
"top_tokens": {
"type": "array",
"description": "Most likely tokens",
"items": {
"type": "array",
"items": {
"$ref": "#/$defs/Token"
}
}
},
"best_of_sequences": {
"type": "array",
"description": "Details about additional sequences when best_of is provided",
"items": {
"$ref": "#/$defs/SequenceDetails"
}
}
},
"required": ["finish_reason", "generated_tokens", "prefill", "tokens"]
},
"SequenceDetails": {
"type": "object",
"title": "TextGenerationSequenceDetails",
"title": "TextGenerationOutputSequenceDetails",
"properties": {
"finish_reason": {
"generated_text": {
"type": "string",
"description": "The reason why the generation was stopped.",
"oneOf": [
{ "const": "length", "description": "The generated sequence reached the maximum allowed length" },
{ "const": "eos_token", "description": "The model generated an end-of-sentence (EOS) token" },
{ "const": "stop_sequence", "description": "One of the sequence in stop_sequences was generated" }
]
"description": "The generated text"
},
"finish_reason": {
"$ref": "#/$defs/FinishReason"
},
"generated_tokens": {

@@ -90,17 +139,3 @@ "type": "integer",

"items": {
"title": "PrefillToken",
"type": "object",
"properties": {
"id": {
"type": "integer"
},
"logprob": {
"type": "number"
},
"text": {
"type": "string",
"description": "The text associated with that token"
}
},
"required": ["id", "logprob", "text"]
"$ref": "#/$defs/PrefillToken"
}

@@ -118,7 +153,17 @@ },

}
},
"top_tokens": {
"type": "array",
"description": "Most likely tokens",
"items": {
"type": "array",
"items": {
"$ref": "#/$defs/Token"
}
}
}
},
"required": ["finish_reason", "generated_tokens", "prefill", "tokens"]
"required": ["generated_text", "finish_reason", "generated_tokens", "prefill", "tokens"]
}
}
}

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc