@huggingface/inference
Advanced tools
Comparing version 3.3.0 to 3.3.1
@@ -51,2 +51,5 @@ var __defProp = Object.defineProperty; | ||
// src/providers/nebius.ts | ||
var NEBIUS_API_BASE_URL = "https://api.studio.nebius.ai"; | ||
// src/providers/replicate.ts | ||
@@ -71,3 +74,3 @@ var REPLICATE_API_BASE_URL = "https://api.replicate.com"; | ||
var name = "@huggingface/inference"; | ||
var version = "3.3.0"; | ||
var version = "3.3.1"; | ||
@@ -85,2 +88,3 @@ // src/providers/consts.ts | ||
"hf-inference": {}, | ||
nebius: {}, | ||
replicate: {}, | ||
@@ -142,3 +146,3 @@ sambanova: {}, | ||
const provider = maybeProvider ?? "hf-inference"; | ||
const { forceTask, includeCredentials, taskHint, wait_for_model, use_cache, dont_load_model, chatCompletion: chatCompletion2 } = options ?? {}; | ||
const { forceTask, includeCredentials, taskHint, chatCompletion: chatCompletion2 } = options ?? {}; | ||
if (endpointUrl && provider !== "hf-inference") { | ||
@@ -181,13 +185,2 @@ throw new Error(`Cannot use endpointUrl with a third-party provider.`); | ||
} | ||
if (provider === "hf-inference") { | ||
if (wait_for_model) { | ||
headers["X-Wait-For-Model"] = "true"; | ||
} | ||
if (use_cache === false) { | ||
headers["X-Use-Cache"] = "false"; | ||
} | ||
if (dont_load_model) { | ||
headers["X-Load-Model"] = "0"; | ||
} | ||
} | ||
if (provider === "replicate") { | ||
@@ -211,3 +204,3 @@ headers["Prefer"] = "wait"; | ||
...otherArgs, | ||
...chatCompletion2 || provider === "together" ? { model } : void 0 | ||
...chatCompletion2 || provider === "together" || provider === "nebius" ? { model } : void 0 | ||
}), | ||
@@ -229,2 +222,15 @@ ...credentials ? { credentials } : void 0, | ||
} | ||
case "nebius": { | ||
const baseUrl = shouldProxy ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider) : NEBIUS_API_BASE_URL; | ||
if (params.taskHint === "text-to-image") { | ||
return `${baseUrl}/v1/images/generations`; | ||
} | ||
if (params.taskHint === "text-generation") { | ||
if (params.chatCompletion) { | ||
return `${baseUrl}/v1/chat/completions`; | ||
} | ||
return `${baseUrl}/v1/completions`; | ||
} | ||
return baseUrl; | ||
} | ||
case "replicate": { | ||
@@ -296,7 +302,4 @@ const baseUrl = shouldProxy ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider) : REPLICATE_API_BASE_URL; | ||
const response = await (options?.fetch ?? fetch)(url, info); | ||
if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) { | ||
return request(args, { | ||
...options, | ||
wait_for_model: true | ||
}); | ||
if (options?.retry_on_error !== false && response.status === 503) { | ||
return request(args, options); | ||
} | ||
@@ -430,7 +433,4 @@ if (!response.ok) { | ||
const response = await (options?.fetch ?? fetch)(url, info); | ||
if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) { | ||
return yield* streamingRequest(args, { | ||
...options, | ||
wait_for_model: true | ||
}); | ||
if (options?.retry_on_error !== false && response.status === 503) { | ||
return yield* streamingRequest(args, options); | ||
} | ||
@@ -726,9 +726,23 @@ if (!response.ok) { | ||
// src/tasks/cv/textToImage.ts | ||
function getResponseFormatArg(provider) { | ||
switch (provider) { | ||
case "fal-ai": | ||
return { sync_mode: true }; | ||
case "nebius": | ||
return { response_format: "b64_json" }; | ||
case "replicate": | ||
return void 0; | ||
case "together": | ||
return { response_format: "base64" }; | ||
default: | ||
return void 0; | ||
} | ||
} | ||
async function textToImage(args, options) { | ||
const payload = args.provider === "together" || args.provider === "fal-ai" || args.provider === "replicate" ? { | ||
const payload = !args.provider || args.provider === "hf-inference" || args.provider === "sambanova" ? args : { | ||
...omit(args, ["inputs", "parameters"]), | ||
...args.parameters, | ||
...args.provider !== "replicate" ? { response_format: "base64" } : void 0, | ||
...getResponseFormatArg(args.provider), | ||
prompt: args.inputs | ||
} : args; | ||
}; | ||
const res = await request(payload, { | ||
@@ -1113,4 +1127,4 @@ ...options, | ||
}); | ||
const isValidOutput = typeof res === "object" && Array.isArray(res?.choices) && typeof res?.created === "number" && typeof res?.id === "string" && typeof res?.model === "string" && /// Together.ai does not output a system_fingerprint | ||
(res.system_fingerprint === void 0 || typeof res.system_fingerprint === "string") && typeof res?.usage === "object"; | ||
const isValidOutput = typeof res === "object" && Array.isArray(res?.choices) && typeof res?.created === "number" && typeof res?.id === "string" && typeof res?.model === "string" && /// Together.ai and Nebius do not output a system_fingerprint | ||
(res.system_fingerprint === void 0 || res.system_fingerprint === null || typeof res.system_fingerprint === "string") && typeof res?.usage === "object"; | ||
if (!isValidOutput) { | ||
@@ -1249,2 +1263,3 @@ throw new InferenceOutputError("Expected ChatCompletionOutput"); | ||
"fireworks-ai", | ||
"nebius", | ||
"hf-inference", | ||
@@ -1251,0 +1266,0 @@ "replicate", |
@@ -8,22 +8,6 @@ import type { ChatCompletionInput, PipelineType } from "@huggingface/tasks"; | ||
/** | ||
* (Default: true) Boolean. If a request 503s and wait_for_model is set to false, the request will be retried with the same parameters but with wait_for_model set to true. | ||
* (Default: true) Boolean. If a request 503s, the request will be retried with the same parameters. | ||
*/ | ||
retry_on_error?: boolean; | ||
/** | ||
* (Default: true). Boolean. There is a cache layer on Inference API (serverless) to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query. | ||
*/ | ||
use_cache?: boolean; | ||
/** | ||
* (Default: false). Boolean. Do not load the model if it's not already available. | ||
*/ | ||
dont_load_model?: boolean; | ||
/** | ||
* (Default: false). Boolean to use GPU instead of CPU for inference (requires Startup plan at least). | ||
*/ | ||
use_gpu?: boolean; | ||
/** | ||
* (Default: false) Boolean. If the model is not ready, wait for it instead of receiving 503. It limits the number of requests required to get your inference done. It is advised to only set this flag to true after receiving a 503 error as it will limit hanging in your application to known places. | ||
*/ | ||
wait_for_model?: boolean; | ||
/** | ||
* Custom fetch function to use instead of the default one, for example to use a proxy or edit headers. | ||
@@ -42,3 +26,3 @@ */ | ||
export type InferenceTask = Exclude<PipelineType, "other">; | ||
export declare const INFERENCE_PROVIDERS: readonly ["fal-ai", "fireworks-ai", "hf-inference", "replicate", "sambanova", "together"]; | ||
export declare const INFERENCE_PROVIDERS: readonly ["fal-ai", "fireworks-ai", "nebius", "hf-inference", "replicate", "sambanova", "together"]; | ||
export type InferenceProvider = (typeof INFERENCE_PROVIDERS)[number]; | ||
@@ -45,0 +29,0 @@ export interface BaseArgs { |
{ | ||
"name": "@huggingface/inference", | ||
"version": "3.3.0", | ||
"version": "3.3.1", | ||
"packageManager": "pnpm@8.10.5", | ||
@@ -5,0 +5,0 @@ "license": "MIT", |
@@ -52,2 +52,3 @@ # 🤗 Hugging Face Inference | ||
- [Fireworks AI](https://fireworks.ai) | ||
- [Nebius](https://studio.nebius.ai) | ||
- [Replicate](https://replicate.com) | ||
@@ -75,2 +76,3 @@ - [Sambanova](https://sambanova.ai) | ||
- [Fireworks AI supported models](https://huggingface.co/api/partners/fireworks-ai/models) | ||
- [Nebius supported models](https://huggingface.co/api/partners/nebius/models) | ||
- [Replicate supported models](https://huggingface.co/api/partners/replicate/models) | ||
@@ -81,3 +83,3 @@ - [Sambanova supported models](https://huggingface.co/api/partners/sambanova/models) | ||
❗**Important note:** To be compatible, the third-party API must adhere to the "standard" shape API we expect on HF model pages for each pipeline task type. | ||
❗**Important note:** To be compatible, the third-party API must adhere to the "standard" shape API we expect on HF model pages for each pipeline task type. | ||
This is not an issue for LLMs as everyone converged on the OpenAI API anyways, but can be more tricky for other tasks like "text-to-image" or "automatic-speech-recognition" where there exists no standard API. Let us know if any help is needed or if we can make things easier for you! | ||
@@ -469,3 +471,3 @@ | ||
image: await (await fetch('https://placekitten.com/300/300')).blob() | ||
}, | ||
}, | ||
parameters: { | ||
@@ -472,0 +474,0 @@ candidate_labels: ['cat', 'dog'] |
import { HF_HUB_URL, HF_ROUTER_URL } from "../config"; | ||
import { FAL_AI_API_BASE_URL } from "../providers/fal-ai"; | ||
import { NEBIUS_API_BASE_URL } from "../providers/nebius"; | ||
import { REPLICATE_API_BASE_URL } from "../providers/replicate"; | ||
@@ -41,4 +42,3 @@ import { SAMBANOVA_API_BASE_URL } from "../providers/sambanova"; | ||
const { forceTask, includeCredentials, taskHint, wait_for_model, use_cache, dont_load_model, chatCompletion } = | ||
options ?? {}; | ||
const { forceTask, includeCredentials, taskHint, chatCompletion } = options ?? {}; | ||
@@ -105,14 +105,2 @@ if (endpointUrl && provider !== "hf-inference") { | ||
if (provider === "hf-inference") { | ||
if (wait_for_model) { | ||
headers["X-Wait-For-Model"] = "true"; | ||
} | ||
if (use_cache === false) { | ||
headers["X-Use-Cache"] = "false"; | ||
} | ||
if (dont_load_model) { | ||
headers["X-Load-Model"] = "0"; | ||
} | ||
} | ||
if (provider === "replicate") { | ||
@@ -148,3 +136,3 @@ headers["Prefer"] = "wait"; | ||
...otherArgs, | ||
...(chatCompletion || provider === "together" ? { model } : undefined), | ||
...(chatCompletion || provider === "together" || provider === "nebius" ? { model } : undefined), | ||
}), | ||
@@ -178,2 +166,18 @@ ...(credentials ? { credentials } : undefined), | ||
} | ||
case "nebius": { | ||
const baseUrl = shouldProxy | ||
? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider) | ||
: NEBIUS_API_BASE_URL; | ||
if (params.taskHint === "text-to-image") { | ||
return `${baseUrl}/v1/images/generations`; | ||
} | ||
if (params.taskHint === "text-generation") { | ||
if (params.chatCompletion) { | ||
return `${baseUrl}/v1/chat/completions`; | ||
} | ||
return `${baseUrl}/v1/completions`; | ||
} | ||
return baseUrl; | ||
} | ||
case "replicate": { | ||
@@ -180,0 +184,0 @@ const baseUrl = shouldProxy |
@@ -22,2 +22,3 @@ import type { InferenceProvider } from "../types"; | ||
"hf-inference": {}, | ||
nebius: {}, | ||
replicate: {}, | ||
@@ -24,0 +25,0 @@ sambanova: {}, |
@@ -21,7 +21,4 @@ import type { InferenceTask, Options, RequestArgs } from "../../types"; | ||
if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) { | ||
return request(args, { | ||
...options, | ||
wait_for_model: true, | ||
}); | ||
if (options?.retry_on_error !== false && response.status === 503) { | ||
return request(args, options); | ||
} | ||
@@ -28,0 +25,0 @@ |
@@ -23,7 +23,4 @@ import type { InferenceTask, Options, RequestArgs } from "../../types"; | ||
if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) { | ||
return yield* streamingRequest(args, { | ||
...options, | ||
wait_for_model: true, | ||
}); | ||
if (options?.retry_on_error !== false && response.status === 503) { | ||
return yield* streamingRequest(args, options); | ||
} | ||
@@ -30,0 +27,0 @@ if (!response.ok) { |
import type { TextToImageInput, TextToImageOutput } from "@huggingface/tasks"; | ||
import { InferenceOutputError } from "../../lib/InferenceOutputError"; | ||
import type { BaseArgs, Options } from "../../types"; | ||
import type { BaseArgs, InferenceProvider, Options } from "../../types"; | ||
import { omit } from "../../utils/omit"; | ||
@@ -18,2 +18,17 @@ import { request } from "../custom/request"; | ||
function getResponseFormatArg(provider: InferenceProvider) { | ||
switch (provider) { | ||
case "fal-ai": | ||
return { sync_mode: true }; | ||
case "nebius": | ||
return { response_format: "b64_json" }; | ||
case "replicate": | ||
return undefined; | ||
case "together": | ||
return { response_format: "base64" }; | ||
default: | ||
return undefined; | ||
} | ||
} | ||
/** | ||
@@ -25,10 +40,10 @@ * This task reads some text input and outputs an image. | ||
const payload = | ||
args.provider === "together" || args.provider === "fal-ai" || args.provider === "replicate" | ||
? { | ||
!args.provider || args.provider === "hf-inference" || args.provider === "sambanova" | ||
? args | ||
: { | ||
...omit(args, ["inputs", "parameters"]), | ||
...args.parameters, | ||
...(args.provider !== "replicate" ? { response_format: "base64" } : undefined), | ||
...getResponseFormatArg(args.provider), | ||
prompt: args.inputs, | ||
} | ||
: args; | ||
}; | ||
const res = await request<TextToImageOutput | Base64ImageGeneration | OutputUrlImageGeneration>(payload, { | ||
@@ -38,2 +53,3 @@ ...options, | ||
}); | ||
if (res && typeof res === "object") { | ||
@@ -40,0 +56,0 @@ if (args.provider === "fal-ai" && "images" in res && Array.isArray(res.images) && res.images[0].url) { |
@@ -18,2 +18,3 @@ import { InferenceOutputError } from "../../lib/InferenceOutputError"; | ||
}); | ||
const isValidOutput = | ||
@@ -25,4 +26,6 @@ typeof res === "object" && | ||
typeof res?.model === "string" && | ||
/// Together.ai does not output a system_fingerprint | ||
(res.system_fingerprint === undefined || typeof res.system_fingerprint === "string") && | ||
/// Together.ai and Nebius do not output a system_fingerprint | ||
(res.system_fingerprint === undefined || | ||
res.system_fingerprint === null || | ||
typeof res.system_fingerprint === "string") && | ||
typeof res?.usage === "object"; | ||
@@ -29,0 +32,0 @@ |
@@ -10,23 +10,7 @@ import type { ChatCompletionInput, PipelineType } from "@huggingface/tasks"; | ||
/** | ||
* (Default: true) Boolean. If a request 503s and wait_for_model is set to false, the request will be retried with the same parameters but with wait_for_model set to true. | ||
* (Default: true) Boolean. If a request 503s, the request will be retried with the same parameters. | ||
*/ | ||
retry_on_error?: boolean; | ||
/** | ||
* (Default: true). Boolean. There is a cache layer on Inference API (serverless) to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query. | ||
*/ | ||
use_cache?: boolean; | ||
/** | ||
* (Default: false). Boolean. Do not load the model if it's not already available. | ||
*/ | ||
dont_load_model?: boolean; | ||
/** | ||
* (Default: false). Boolean to use GPU instead of CPU for inference (requires Startup plan at least). | ||
*/ | ||
use_gpu?: boolean; | ||
/** | ||
* (Default: false) Boolean. If the model is not ready, wait for it instead of receiving 503. It limits the number of requests required to get your inference done. It is advised to only set this flag to true after receiving a 503 error as it will limit hanging in your application to known places. | ||
*/ | ||
wait_for_model?: boolean; | ||
/** | ||
* Custom fetch function to use instead of the default one, for example to use a proxy or edit headers. | ||
@@ -51,2 +35,3 @@ */ | ||
"fireworks-ai", | ||
"nebius", | ||
"hf-inference", | ||
@@ -53,0 +38,0 @@ "replicate", |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
277316
196
5922
670