You're Invited:Meet the Socket Team at RSAC and BSidesSF 2026, March 23–26.RSVP
Socket
Book a DemoSign in
Socket

@langchain/openai

Package Overview
Dependencies
Maintainers
10
Versions
152
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@langchain/openai - npm Package Compare versions

Comparing version
0.5.18
to
0.6.0
+1
-1
dist/azure/chat_models.cjs

@@ -350,3 +350,3 @@ "use strict";

* ```typescript
* const logprobsLlm = new ChatOpenAI({ logprobs: true });
* const logprobsLlm = new ChatOpenAI({ model: "gpt-4o-mini", logprobs: true });
* const aiMsgForLogprobs = await logprobsLlm.invoke(input);

@@ -353,0 +353,0 @@ * console.log(aiMsgForLogprobs.response_metadata.logprobs);

import { type ClientOptions } from "openai";
import { LangSmithParams, type BaseChatModelParams } from "@langchain/core/language_models/chat_models";
import { BaseLanguageModelInput } from "@langchain/core/language_models/base";
import { BaseLanguageModelInput, StructuredOutputMethodOptions } from "@langchain/core/language_models/base";
import { BaseMessage } from "@langchain/core/messages";
import { Runnable } from "@langchain/core/runnables";
import { InteropZodType } from "@langchain/core/utils/types";
import { ChatOpenAI, ChatOpenAIStructuredOutputMethodOptions } from "../chat_models.js";
import { ChatOpenAI } from "../chat_models.js";
import { AzureOpenAIInput, OpenAIChatInput, OpenAICoreRequestOptions } from "../types.js";

@@ -351,3 +351,3 @@ export type { AzureOpenAIInput };

* ```typescript
* const logprobsLlm = new ChatOpenAI({ logprobs: true });
* const logprobsLlm = new ChatOpenAI({ model: "gpt-4o-mini", logprobs: true });
* const aiMsgForLogprobs = await logprobsLlm.invoke(input);

@@ -448,8 +448,8 @@ * console.log(aiMsgForLogprobs.response_metadata.logprobs);

toJSON(): any;
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: ChatOpenAIStructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: ChatOpenAIStructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {
raw: BaseMessage;
parsed: RunOutput;
}>;
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: ChatOpenAIStructuredOutputMethodOptions<boolean>): Runnable<BaseLanguageModelInput, RunOutput> | Runnable<BaseLanguageModelInput, {
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<boolean>): Runnable<BaseLanguageModelInput, RunOutput> | Runnable<BaseLanguageModelInput, {
raw: BaseMessage;

@@ -456,0 +456,0 @@ parsed: RunOutput;

import { AzureOpenAI as AzureOpenAIClient } from "openai";
import { getEnv, getEnvironmentVariable } from "@langchain/core/utils/env";
import { ChatOpenAI, } from "../chat_models.js";
import { ChatOpenAI } from "../chat_models.js";
import { getEndpoint } from "../utils/azure.js";

@@ -347,3 +347,3 @@ import { normalizeHeaders } from "../utils/headers.js";

* ```typescript
* const logprobsLlm = new ChatOpenAI({ logprobs: true });
* const logprobsLlm = new ChatOpenAI({ model: "gpt-4o-mini", logprobs: true });
* const aiMsgForLogprobs = await logprobsLlm.invoke(input);

@@ -350,0 +350,0 @@ * console.log(aiMsgForLogprobs.response_metadata.logprobs);

import { type ClientOptions, OpenAI as OpenAIClient } from "openai";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { AIMessageChunk, type BaseMessage, ChatMessageChunk, FunctionMessageChunk, HumanMessageChunk, SystemMessageChunk, ToolMessageChunk } from "@langchain/core/messages";
import { ChatGenerationChunk, type ChatResult } from "@langchain/core/outputs";
import { ChatGenerationChunk, type ChatGeneration, type ChatResult } from "@langchain/core/outputs";
import { BaseChatModel, type BindToolsInput, type LangSmithParams, type BaseChatModelParams } from "@langchain/core/language_models/chat_models";
import { type BaseFunctionCallOptions, type BaseLanguageModelInput, type StructuredOutputMethodOptions } from "@langchain/core/language_models/base";
import { Runnable } from "@langchain/core/runnables";
import type { ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema } from "openai/resources/shared";
import { InteropZodType } from "@langchain/core/utils/types";
import { type OpenAICallOptions, type OpenAIChatInput, type OpenAICoreRequestOptions, type ChatOpenAIResponseFormat } from "./types.js";
import { type OpenAICallOptions, type OpenAIChatInput, type OpenAICoreRequestOptions, type ChatOpenAIResponseFormat, ChatOpenAIReasoningSummary } from "./types.js";
import { OpenAIToolChoice } from "./utils/openai.js";
export type { OpenAICallOptions, OpenAIChatInput };
interface TokenUsage {
completionTokens?: number;
promptTokens?: number;
totalTokens?: number;
}
type ResponsesTool = NonNullable<OpenAIClient.Responses.ResponseCreateParams["tools"]>[number];
type ResponsesToolChoice = NonNullable<OpenAIClient.Responses.ResponseCreateParams["tool_choice"]>;
type ChatOpenAIToolType = BindToolsInput | OpenAIClient.Chat.ChatCompletionTool | ResponsesTool;
interface OpenAILLMOutput {
tokenUsage: TokenUsage;
tokenUsage: {
completionTokens?: number;
promptTokens?: number;
totalTokens?: number;
};
}
type OpenAIRoleEnum = "system" | "developer" | "assistant" | "user" | "function" | "tool";
type OpenAICompletionParam = OpenAIClient.Chat.Completions.ChatCompletionMessageParam;
export declare function messageToOpenAIRole(message: BaseMessage): OpenAIRoleEnum;
export declare function _convertMessagesToOpenAIParams(messages: BaseMessage[], model?: string): OpenAICompletionParam[];
type ExcludeController<T> = T extends {
controller: unknown;
} ? never : T;
type ExcludeNonController<T> = T extends {
controller: unknown;
} ? T : never;
type ResponsesCreate = OpenAIClient.Responses["create"];
type ResponsesCreateParams = Parameters<OpenAIClient.Responses["create"]>[0];
type ResponsesTool = Exclude<ResponsesCreateParams["tools"], undefined>[number];
type ResponsesToolChoice = Exclude<ResponsesCreateParams["tool_choice"], undefined>;
type ResponsesCreateInvoke = ExcludeController<Awaited<ReturnType<ResponsesCreate>>>;
type ResponsesCreateStream = ExcludeNonController<Awaited<ReturnType<ResponsesCreate>>>;
type ResponseInvocationParams = Omit<ResponsesCreateParams, "input">;
type ChatCompletionInvocationParams = Omit<OpenAIClient.Chat.ChatCompletionCreateParams, "messages">;
type ChatOpenAIToolType = BindToolsInput | OpenAIClient.ChatCompletionTool | ResponsesTool;
export interface ChatOpenAIStructuredOutputMethodOptions<IncludeRaw extends boolean> extends StructuredOutputMethodOptions<IncludeRaw> {
export type { OpenAICallOptions, OpenAIChatInput };
export declare function messageToOpenAIRole(message: BaseMessage): OpenAIClient.ChatCompletionRole;
export declare function _convertMessagesToOpenAIParams(messages: BaseMessage[], model?: string): OpenAIClient.Chat.Completions.ChatCompletionMessageParam[];
interface BaseChatOpenAICallOptions extends OpenAICallOptions, BaseFunctionCallOptions {
/**
* strict: If `true` and `method` = "function_calling", model output is
* guaranteed to exactly match the schema. If `true`, the input schema
* will also be validated according to
* https://platform.openai.com/docs/guides/structured-outputs/supported-schemas.
* If `false`, input schema will not be validated and model output will not
* be validated.
* If `undefined`, `strict` argument will not be passed to the model.
*
* @version 0.2.6
* @note Planned breaking change in version `0.3.0`:
* `strict` will default to `true` when `method` is
* "function_calling" as of version `0.3.0`.
* A list of tools that the model may use to generate responses.
* Each tool can be a function, a built-in tool, or a custom tool definition.
* If not provided, the model will not use any tools.
*/
strict?: boolean;
}
export interface ChatOpenAICallOptions extends OpenAICallOptions, BaseFunctionCallOptions {
tools?: ChatOpenAIToolType[];
/**
* Specifies which tool the model should use to respond.
* Can be an {@link OpenAIToolChoice} or a {@link ResponsesToolChoice}.
* If not set, the model will decide which tool to use automatically.
*/
tool_choice?: OpenAIToolChoice | ResponsesToolChoice;
/**
* Adds a prompt index to prompts passed to the model to track
* what prompt is being used for a given generation.
*/
promptIndex?: number;
/**
* An object specifying the format that the model must output.
*/
response_format?: ChatOpenAIResponseFormat;
/**
* When provided, the completions API will make a best effort to sample
* deterministically, such that repeated requests with the same `seed`
* and parameters should return the same result.
*/
seed?: number;
/**
* Additional options to pass to streamed completions.
* If provided takes precedence over "streamUsage" set at initialization time.
* If provided, this takes precedence over "streamUsage" set at
* initialization time.
*/
stream_options?: {
/**
* Whether or not to include token usage in the stream.
* If set to `true`, this will include an additional
* chunk at the end of the stream with the token usage.
*/
include_usage: boolean;
};
stream_options?: OpenAIClient.Chat.ChatCompletionStreamOptions;
/**
* Whether or not to restrict the ability to
* call multiple tools in one response.
* The model may choose to call multiple functions in a single turn. You can
* set parallel_tool_calls to false which ensures only one tool is called at most.
* [Learn more](https://platform.openai.com/docs/guides/function-calling#parallel-function-calling)
*/

@@ -89,4 +75,2 @@ parallel_tool_calls?: boolean;

* If `undefined`, `strict` argument will not be passed to the model.
*
* @version 0.2.6
*/

@@ -119,9 +103,2 @@ strict?: boolean;

/**
* Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high.
* Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
*
* @deprecated Use {@link reasoning} object instead.
*/
reasoning_effort?: OpenAIClient.Chat.ChatCompletionReasoningEffort;
/**
* Options for reasoning models.

@@ -136,37 +113,202 @@ *

/**
* Service tier to use for this request. Can be "auto", "default", or "flex"
* Specifies the service tier for prioritization and latency optimization.
*/
service_tier?: OpenAIClient.Chat.ChatCompletionCreateParams["service_tier"];
}
export interface BaseChatOpenAIFields extends Partial<OpenAIChatInput>, BaseChatModelParams {
/**
* Optional configuration options for the OpenAI client.
*/
configuration?: ClientOptions;
}
/** @internal */
declare abstract class BaseChatOpenAI<CallOptions extends BaseChatOpenAICallOptions> extends BaseChatModel<CallOptions, AIMessageChunk> implements Partial<OpenAIChatInput> {
temperature?: number;
topP?: number;
frequencyPenalty?: number;
presencePenalty?: number;
n?: number;
logitBias?: Record<string, number>;
model: string;
modelKwargs?: OpenAIChatInput["modelKwargs"];
stop?: string[];
stopSequences?: string[];
user?: string;
timeout?: number;
streaming: boolean;
streamUsage: boolean;
maxTokens?: number;
logprobs?: boolean;
topLogprobs?: number;
apiKey?: string;
organization?: string;
__includeRawResponse?: boolean;
protected client: OpenAIClient;
protected clientConfig: ClientOptions;
/**
* Whether the model supports the `strict` argument when passing in tools.
* If `undefined` the `strict` argument will not be passed to OpenAI.
*/
supportsStrictToolCalling?: boolean;
audio?: OpenAIClient.Chat.ChatCompletionAudioParam;
modalities?: Array<OpenAIClient.Chat.ChatCompletionModality>;
reasoning?: OpenAIClient.Reasoning;
/**
* Must be set to `true` in tenancies with Zero Data Retention. Setting to `true` will disable
* output storage in the Responses API, but this DOES NOT enable Zero Data Retention in your
* OpenAI organization or project. This must be configured directly with OpenAI.
*
* See:
* https://help.openai.com/en/articles/10503543-data-residency-for-the-openai-api
* https://platform.openai.com/docs/api-reference/responses/create#responses-create-store
*
* @default false
*/
zdrEnabled?: boolean | undefined;
/**
* Service tier to use for this request. Can be "auto", "default", or "flex" or "priority".
* Specifies the service tier for prioritization and latency optimization.
*/
service_tier?: OpenAIClient.Chat.ChatCompletionCreateParams["service_tier"];
_llmType(): string;
static lc_name(): string;
get callKeys(): string[];
lc_serializable: boolean;
get lc_secrets(): {
[key: string]: string;
} | undefined;
get lc_aliases(): Record<string, string>;
get lc_serializable_keys(): string[];
getLsParams(options: this["ParsedCallOptions"]): LangSmithParams;
/** @ignore */
_identifyingParams(): Omit<OpenAIClient.Chat.ChatCompletionCreateParams, "messages"> & {
model_name: string;
} & ClientOptions;
/**
* Get the identifying parameters for the model
*/
identifyingParams(): Omit<OpenAIClient.Chat.Completions.ChatCompletionCreateParams, "messages"> & {
model_name: string;
} & ClientOptions;
constructor(fields?: BaseChatOpenAIFields);
/**
* Returns backwards compatible reasoning parameters from constructor params and call options
* @internal
*/
protected _getReasoningParams(options?: this["ParsedCallOptions"]): OpenAIClient.Reasoning | undefined;
/**
* Returns an openai compatible response format from a set of options
* @internal
*/
protected _getResponseFormat(resFormat?: CallOptions["response_format"]): ResponseFormatText | ResponseFormatJSONObject | ResponseFormatJSONSchema | undefined;
protected _getClientOptions(options: OpenAICoreRequestOptions | undefined): OpenAICoreRequestOptions;
protected _convertChatOpenAIToolToCompletionsTool(tool: ChatOpenAIToolType, fields?: {
strict?: boolean;
}): OpenAIClient.ChatCompletionTool;
bindTools(tools: ChatOpenAIToolType[], kwargs?: Partial<CallOptions>): Runnable<BaseLanguageModelInput, AIMessageChunk, CallOptions>;
/** @ignore */
_combineLLMOutput(...llmOutputs: OpenAILLMOutput[]): OpenAILLMOutput;
getNumTokensFromMessages(messages: BaseMessage[]): Promise<{
totalCount: number;
countPerMessage: number[];
}>;
/** @internal */
protected _getNumTokensFromGenerations(generations: ChatGeneration[]): Promise<number>;
/** @internal */
protected _getEstimatedTokenCountFromPrompt(messages: BaseMessage[], functions?: OpenAIClient.Chat.ChatCompletionCreateParams.Function[], function_call?: "none" | "auto" | OpenAIClient.Chat.ChatCompletionFunctionCallOption): Promise<number>;
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {
raw: BaseMessage;
parsed: RunOutput;
}>;
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<boolean>): Runnable<BaseLanguageModelInput, RunOutput> | Runnable<BaseLanguageModelInput, {
raw: BaseMessage;
parsed: RunOutput;
}>;
}
type ExcludeController<T> = T extends {
controller: unknown;
} ? never : T;
type ResponsesCreate = OpenAIClient.Responses["create"];
type ResponsesParse = OpenAIClient.Responses["parse"];
type ResponsesCreateInvoke = ExcludeController<Awaited<ReturnType<ResponsesCreate>>>;
type ResponsesParseInvoke = ExcludeController<Awaited<ReturnType<ResponsesParse>>>;
interface ChatOpenAIResponsesCallOptions extends BaseChatOpenAICallOptions {
/**
* Configuration options for a text response from the model. Can be plain text or
* structured JSON data.
*
* If set, the Responses API will be used to fulfill the request.
*/
text?: ResponsesCreateParams["text"];
text?: OpenAIClient.Responses.ResponseCreateParams["text"];
/**
* The truncation strategy to use for the model response.
*
* If set, the Responses API will be used to fulfill the request.
*/
truncation?: ResponsesCreateParams["truncation"];
truncation?: OpenAIClient.Responses.ResponseCreateParams["truncation"];
/**
* Specify additional output data to include in the model response.
*
* If set, the Responses API will be used to fulfill the request.
*/
include?: ResponsesCreateParams["include"];
include?: OpenAIClient.Responses.ResponseCreateParams["include"];
/**
* The unique ID of the previous response to the model. Use this to create multi-turn
* conversations. Will be set automatically if {@link ChatOpenAI.zdrEnabled} is `false`, provided
* that AIMessages included in the request have a `response_metadata.id` property. Ignored unless
* {@link ChatOpenAI.useResponsesApi} is `true`.
*
* If set, the Responses API will be used to fulfill the request.
* conversations.
*/
previous_response_id?: ResponsesCreateParams["previous_response_id"];
previous_response_id?: OpenAIClient.Responses.ResponseCreateParams["previous_response_id"];
}
type ChatResponsesInvocationParams = Omit<OpenAIClient.Responses.ResponseCreateParams, "input">;
/**
* OpenAI Responses API implementation.
*
* Will be exported in a later version of @langchain/openai.
*
* @internal
*/
export declare class ChatOpenAIResponses<CallOptions extends ChatOpenAIResponsesCallOptions = ChatOpenAIResponsesCallOptions> extends BaseChatOpenAI<CallOptions> {
invocationParams(options?: this["ParsedCallOptions"]): ChatResponsesInvocationParams;
_generate(messages: BaseMessage[], options: this["ParsedCallOptions"]): Promise<ChatResult>;
_streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"]): AsyncGenerator<ChatGenerationChunk>;
/**
* Service tier to use for this request. Can be "auto", "default", or "flex"
* Specifies the service tier for prioritization and latency optimization.
* Calls the Responses API with retry logic in case of failures.
* @param request The request to send to the OpenAI API.
* @param options Optional configuration for the API call.
* @returns The response from the OpenAI API.
*/
service_tier?: ResponsesCreateParams["service_tier"];
completionWithRetry(request: OpenAIClient.Responses.ResponseCreateParamsStreaming, requestOptions?: OpenAIClient.RequestOptions): Promise<AsyncIterable<OpenAIClient.Responses.ResponseStreamEvent>>;
completionWithRetry(request: OpenAIClient.Responses.ResponseCreateParamsNonStreaming, requestOptions?: OpenAIClient.RequestOptions): Promise<OpenAIClient.Responses.Response>;
/** @internal */
protected _convertResponsesMessageToBaseMessage(response: ResponsesCreateInvoke | ResponsesParseInvoke): BaseMessage;
/** @internal */
protected _convertResponsesDeltaToBaseMessageChunk(chunk: OpenAIClient.Responses.ResponseStreamEvent): ChatGenerationChunk | null;
/** @internal */
protected _convertMessagesToResponsesParams(messages: BaseMessage[]): (OpenAIClient.Responses.ResponseReasoningItem | OpenAIClient.Responses.EasyInputMessage | OpenAIClient.Responses.ResponseInputItem.Message | OpenAIClient.Responses.ResponseOutputMessage | OpenAIClient.Responses.ResponseFileSearchToolCall | OpenAIClient.Responses.ResponseComputerToolCall | OpenAIClient.Responses.ResponseInputItem.ComputerCallOutput | OpenAIClient.Responses.ResponseFunctionWebSearch | OpenAIClient.Responses.ResponseFunctionToolCall | OpenAIClient.Responses.ResponseInputItem.FunctionCallOutput | OpenAIClient.Responses.ResponseInputItem.ImageGenerationCall | OpenAIClient.Responses.ResponseCodeInterpreterToolCall | OpenAIClient.Responses.ResponseInputItem.LocalShellCall | OpenAIClient.Responses.ResponseInputItem.LocalShellCallOutput | OpenAIClient.Responses.ResponseInputItem.McpListTools | OpenAIClient.Responses.ResponseInputItem.McpApprovalRequest | OpenAIClient.Responses.ResponseInputItem.McpApprovalResponse | OpenAIClient.Responses.ResponseInputItem.McpCall | OpenAIClient.Responses.ResponseInputItem.ItemReference)[];
/** @internal */
protected _convertReasoningSummary(reasoning: ChatOpenAIReasoningSummary): OpenAIClient.Responses.ResponseReasoningItem;
/** @internal */
protected _reduceChatOpenAITools(tools: ChatOpenAIToolType[], fields: {
stream?: boolean;
strict?: boolean;
}): ResponsesTool[];
}
export interface ChatOpenAIFields extends Partial<OpenAIChatInput>, BaseChatModelParams {
configuration?: ClientOptions;
interface ChatOpenAICompletionsCallOptions extends BaseChatOpenAICallOptions {
}
type ChatCompletionsInvocationParams = Omit<OpenAIClient.Chat.Completions.ChatCompletionCreateParams, "messages">;
/**
* OpenAI Completions API implementation.
* @internal
*/
export declare class ChatOpenAICompletions<CallOptions extends ChatOpenAICompletionsCallOptions = ChatOpenAICompletionsCallOptions> extends BaseChatOpenAI<CallOptions> {
/** @internal */
invocationParams(options?: this["ParsedCallOptions"], extra?: {
streaming?: boolean;
}): ChatCompletionsInvocationParams;
_generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
_streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
completionWithRetry(request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming, requestOptions?: OpenAIClient.RequestOptions): Promise<AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk>>;
completionWithRetry(request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, requestOptions?: OpenAIClient.RequestOptions): Promise<OpenAIClient.Chat.Completions.ChatCompletion>;
/** @internal */
protected _convertCompletionsMessageToBaseMessage(message: OpenAIClient.Chat.Completions.ChatCompletionMessage, rawResponse: OpenAIClient.Chat.Completions.ChatCompletion): BaseMessage;
/** @internal */
protected _convertCompletionsDeltaToBaseMessageChunk(delta: Record<string, any>, rawResponse: OpenAIClient.Chat.Completions.ChatCompletionChunk, defaultRole?: OpenAIClient.Chat.ChatCompletionRole): AIMessageChunk | HumanMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
}
export type ChatOpenAICallOptions = ChatOpenAICompletionsCallOptions & ChatOpenAIResponsesCallOptions;
export interface ChatOpenAIFields extends BaseChatOpenAIFields {
/**

@@ -223,3 +365,3 @@ * Whether to use the responses API for all requests. If `false` the responses API will be used

* const llm = new ChatOpenAI({
* model: "gpt-4o",
* model: "gpt-4o-mini",
* temperature: 0,

@@ -525,3 +667,3 @@ * maxTokens: undefined,

* ```typescript
* const logprobsLlm = new ChatOpenAI({ logprobs: true });
* const logprobsLlm = new ChatOpenAI({ model: "gpt-4o-mini", logprobs: true });
* const aiMsgForLogprobs = await logprobsLlm.invoke(input);

@@ -718,52 +860,4 @@ * console.log(aiMsgForLogprobs.response_metadata.logprobs);

*/
export declare class ChatOpenAI<CallOptions extends ChatOpenAICallOptions = ChatOpenAICallOptions> extends BaseChatModel<CallOptions, AIMessageChunk> implements Partial<OpenAIChatInput> {
static lc_name(): string;
get callKeys(): string[];
lc_serializable: boolean;
get lc_secrets(): {
[key: string]: string;
} | undefined;
get lc_aliases(): Record<string, string>;
get lc_serializable_keys(): string[];
temperature?: number;
topP?: number;
frequencyPenalty?: number;
presencePenalty?: number;
n?: number;
logitBias?: Record<string, number>;
/** @deprecated Use "model" instead */
modelName: string;
model: string;
modelKwargs?: OpenAIChatInput["modelKwargs"];
stop?: string[];
stopSequences?: string[];
user?: string;
timeout?: number;
streaming: boolean;
streamUsage: boolean;
maxTokens?: number;
logprobs?: boolean;
topLogprobs?: number;
openAIApiKey?: string;
apiKey?: string;
organization?: string;
__includeRawResponse?: boolean;
protected client: OpenAIClient;
protected clientConfig: ClientOptions;
export declare class ChatOpenAI<CallOptions extends ChatOpenAICallOptions = ChatOpenAICallOptions> extends BaseChatOpenAI<CallOptions> implements Partial<OpenAIChatInput> {
/**
* Whether the model supports the `strict` argument when passing in tools.
* If `undefined` the `strict` argument will not be passed to OpenAI.
*/
supportsStrictToolCalling?: boolean;
audio?: OpenAIClient.Chat.ChatCompletionAudioParam;
modalities?: Array<OpenAIClient.Chat.ChatCompletionModality>;
/**
* @deprecated Use {@link reasoning} object instead.
*/
reasoningEffort?: OpenAIClient.Chat.ChatCompletionReasoningEffort;
/**
* Options for reasoning models.
*/
reasoning?: OpenAIClient.Reasoning;
/**
* Whether to use the responses API for all requests. If `false` the responses API will be used

@@ -773,103 +867,11 @@ * only when required in order to fulfill the request.

useResponsesApi: boolean;
/**
* Must be set to `true` in tenancies with Zero Data Retention. Setting to `true` will disable
* output storage in the Responses API, but this DOES NOT enable Zero Data Retention in your
* OpenAI organization or project. This must be configured directly with OpenAI.
*
* See:
* https://help.openai.com/en/articles/10503543-data-residency-for-the-openai-api
* https://platform.openai.com/docs/api-reference/responses/create#responses-create-store
*
* @default false
*/
zdrEnabled?: boolean | undefined;
/**
* Service tier to use for this request. Can be "auto", "default", or "flex" or "priority".
* Specifies the service tier for prioritization and latency optimization.
*/
service_tier?: ResponsesCreateParams["service_tier"];
private responses;
private completions;
get lc_serializable_keys(): string[];
constructor(fields?: ChatOpenAIFields);
getLsParams(options: this["ParsedCallOptions"]): LangSmithParams;
bindTools(tools: ChatOpenAIToolType[], kwargs?: Partial<CallOptions>): Runnable<BaseLanguageModelInput, AIMessageChunk, CallOptions>;
private createResponseFormat;
private getReasoningParams;
/**
* Get the parameters used to invoke the model
*/
invocationParams<Type extends "responses" | "completion" = "completion">(options?: this["ParsedCallOptions"], extra?: {
streaming?: boolean;
}): Type extends "responses" ? ResponseInvocationParams : ChatCompletionInvocationParams;
protected _convertOpenAIChatCompletionMessageToBaseMessage(message: OpenAIClient.Chat.Completions.ChatCompletionMessage, rawResponse: OpenAIClient.Chat.Completions.ChatCompletion): BaseMessage;
protected _convertOpenAIDeltaToBaseMessageChunk(delta: Record<string, any>, rawResponse: OpenAIClient.Chat.Completions.ChatCompletionChunk, defaultRole?: OpenAIRoleEnum): AIMessageChunk | HumanMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
protected _useResponsesApi(options: this["ParsedCallOptions"] | undefined): boolean;
/** @ignore */
_identifyingParams(): Omit<OpenAIClient.Chat.ChatCompletionCreateParams, "messages"> & {
model_name: string;
} & ClientOptions;
_generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
_streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
/**
* Get the identifying parameters for the model
*
*/
identifyingParams(): Omit<OpenAIClient.Chat.Completions.ChatCompletionCreateParams, "messages"> & {
model_name: string;
} & ClientOptions;
/** @ignore */
_responseApiGenerate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
/**
* Determines whether the responses API should be used for the given request.
*
* @internal
*
* @param options The parsed call options for the request.
* @returns `true` if the responses API should be used, either because it is explicitly enabled,
* or because the request requires it.
*/
protected _useResponseApi(options: this["ParsedCallOptions"] | undefined): boolean;
/** @ignore */
_generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
/**
* Estimate the number of tokens a prompt will use.
* Modified from: https://github.com/hmarr/openai-chat-tokens/blob/main/src/index.ts
*/
private getEstimatedTokenCountFromPrompt;
/**
* Estimate the number of tokens an array of generations have used.
*/
private getNumTokensFromGenerations;
getNumTokensFromMessages(messages: BaseMessage[]): Promise<{
totalCount: number;
countPerMessage: number[];
}>;
/**
* Calls the OpenAI API with retry logic in case of failures.
* @param request The request to send to the OpenAI API.
* @param options Optional configuration for the API call.
* @returns The response from the OpenAI API.
*/
completionWithRetry(request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming, options?: OpenAICoreRequestOptions): Promise<AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk>>;
completionWithRetry(request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions): Promise<OpenAIClient.Chat.Completions.ChatCompletion>;
protected responseApiWithRetry(request: ResponsesCreateParams & {
stream: true;
}, options?: OpenAICoreRequestOptions): Promise<ResponsesCreateStream>;
protected responseApiWithRetry(request: ResponsesCreateParams, options?: OpenAICoreRequestOptions): Promise<ResponsesCreateInvoke>;
/**
* Call the beta chat completions parse endpoint. This should only be called if
* response_format is set to "json_object".
* @param {OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming} request
* @param {OpenAICoreRequestOptions | undefined} options
*/
betaParsedCompletionWithRetry(request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions): Promise<ReturnType<OpenAIClient["chat"]["completions"]["parse"]>>;
protected _getClientOptions(options: OpenAICoreRequestOptions | undefined): OpenAICoreRequestOptions;
_llmType(): string;
/** @ignore */
_combineLLMOutput(...llmOutputs: OpenAILLMOutput[]): OpenAILLMOutput;
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: ChatOpenAIStructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: ChatOpenAIStructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {
raw: BaseMessage;
parsed: RunOutput;
}>;
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: ChatOpenAIStructuredOutputMethodOptions<boolean>): Runnable<BaseLanguageModelInput, RunOutput> | Runnable<BaseLanguageModelInput, {
raw: BaseMessage;
parsed: RunOutput;
}>;
withConfig(config: Partial<CallOptions>): Runnable<BaseLanguageModelInput, AIMessageChunk, CallOptions>;
}

@@ -139,9 +139,2 @@ import type { OpenAI as OpenAIClient } from "openai";

/**
* Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high.
* Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
*
* @deprecated Use the {@link reasoning} object instead.
*/
reasoningEffort?: OpenAIClient.Chat.ChatCompletionReasoningEffort;
/**
* Options for reasoning models.

@@ -148,0 +141,0 @@ *

@@ -7,2 +7,3 @@ "use strict";

exports.formatToOpenAIToolChoice = formatToOpenAIToolChoice;
exports.interopZodResponseFormat = interopZodResponseFormat;
const openai_1 = require("openai");

@@ -14,2 +15,4 @@ const function_calling_1 = require("@langchain/core/utils/function_calling");

const json_schema_1 = require("@langchain/core/utils/json_schema");
const core_1 = require("zod/v4/core");
const zod_1 = require("openai/helpers/zod");
const errors_js_1 = require("./errors.cjs");

@@ -81,1 +84,47 @@ // eslint-disable-next-line @typescript-eslint/no-explicit-any

}
// inlined from openai/lib/parser.ts
function makeParseableResponseFormat(response_format, parser) {
const obj = { ...response_format };
Object.defineProperties(obj, {
$brand: {
value: "auto-parseable-response-format",
enumerable: false,
},
$parseRaw: {
value: parser,
enumerable: false,
},
});
return obj;
}
function interopZodResponseFormat(zodSchema, name, props) {
if ((0, types_1.isZodSchemaV3)(zodSchema)) {
return (0, zod_1.zodResponseFormat)(zodSchema, name, props);
}
if ((0, types_1.isZodSchemaV4)(zodSchema)) {
return makeParseableResponseFormat({
type: "json_schema",
json_schema: {
...props,
name,
strict: true,
schema: (0, core_1.toJSONSchema)(zodSchema, {
cycles: "ref", // equivalent to nameStrategy: 'duplicate-ref'
reused: "ref", // equivalent to $refStrategy: 'extract-to-root'
override(ctx) {
ctx.jsonSchema.title = name; // equivalent to `name` property
// TODO: implement `nullableStrategy` patch-fix (zod doesn't support openApi3 json schema target)
// TODO: implement `openaiStrictMode` patch-fix (where optional properties without `nullable` are not supported)
},
/// property equivalents from native `zodResponseFormat` fn
// openaiStrictMode: true,
// name,
// nameStrategy: 'duplicate-ref',
// $refStrategy: 'extract-to-root',
// nullableStrategy: 'property',
}),
},
}, (content) => (0, core_1.parse)(zodSchema, JSON.parse(content)));
}
throw new Error("Unsupported schema response format");
}

@@ -5,2 +5,4 @@ import { OpenAI as OpenAIClient } from "openai";

import { ToolDefinition } from "@langchain/core/language_models/base";
import { InteropZodType } from "@langchain/core/utils/types";
import { ResponseFormatJSONSchema } from "openai/resources";
export declare function wrapOpenAIClientError(e: any): any;

@@ -11,1 +13,5 @@ export { convertToOpenAIFunction as formatToOpenAIFunction, convertToOpenAITool as formatToOpenAITool, };

export declare function formatToOpenAIToolChoice(toolChoice?: OpenAIToolChoice): OpenAIClient.ChatCompletionToolChoiceOption | undefined;
export declare function interopZodResponseFormat(zodSchema: InteropZodType, name: string, props: Omit<ResponseFormatJSONSchema.JSONSchema, "schema" | "strict" | "name">): {
json_schema: ResponseFormatJSONSchema.JSONSchema;
type: "json_schema";
};
import { APIConnectionTimeoutError, APIUserAbortError, } from "openai";
import { convertToOpenAIFunction, convertToOpenAITool, } from "@langchain/core/utils/function_calling";
import { isInteropZodSchema } from "@langchain/core/utils/types";
import { isInteropZodSchema, isZodSchemaV3, isZodSchemaV4, } from "@langchain/core/utils/types";
import { toJsonSchema } from "@langchain/core/utils/json_schema";
import { toJSONSchema as toJSONSchemaV4, parse as parseV4 } from "zod/v4/core";
import { zodResponseFormat } from "openai/helpers/zod";
import { addLangChainErrorFields } from "./errors.js";

@@ -72,1 +74,47 @@ // eslint-disable-next-line @typescript-eslint/no-explicit-any

}
// inlined from openai/lib/parser.ts
function makeParseableResponseFormat(response_format, parser) {
const obj = { ...response_format };
Object.defineProperties(obj, {
$brand: {
value: "auto-parseable-response-format",
enumerable: false,
},
$parseRaw: {
value: parser,
enumerable: false,
},
});
return obj;
}
export function interopZodResponseFormat(zodSchema, name, props) {
if (isZodSchemaV3(zodSchema)) {
return zodResponseFormat(zodSchema, name, props);
}
if (isZodSchemaV4(zodSchema)) {
return makeParseableResponseFormat({
type: "json_schema",
json_schema: {
...props,
name,
strict: true,
schema: toJSONSchemaV4(zodSchema, {
cycles: "ref", // equivalent to nameStrategy: 'duplicate-ref'
reused: "ref", // equivalent to $refStrategy: 'extract-to-root'
override(ctx) {
ctx.jsonSchema.title = name; // equivalent to `name` property
// TODO: implement `nullableStrategy` patch-fix (zod doesn't support openApi3 json schema target)
// TODO: implement `openaiStrictMode` patch-fix (where optional properties without `nullable` are not supported)
},
/// property equivalents from native `zodResponseFormat` fn
// openaiStrictMode: true,
// name,
// nameStrategy: 'duplicate-ref',
// $refStrategy: 'extract-to-root',
// nullableStrategy: 'property',
}),
},
}, (content) => parseV4(zodSchema, JSON.parse(content)));
}
throw new Error("Unsupported schema response format");
}

@@ -29,3 +29,3 @@ "use strict";

* const openaiResponse = await openAIClient.chat.completions.create({
* model: "gpt-4o",
* model: "gpt-4o-mini",
* messages,

@@ -32,0 +32,0 @@ * });

@@ -27,3 +27,3 @@ import type { BasePromptValue } from "@langchain/core/prompt_values";

* const openaiResponse = await openAIClient.chat.completions.create({
* model: "gpt-4o",
* model: "gpt-4o-mini",
* messages,

@@ -30,0 +30,0 @@ * });

@@ -26,3 +26,3 @@ import { _convertMessagesToOpenAIParams } from "../chat_models.js";

* const openaiResponse = await openAIClient.chat.completions.create({
* model: "gpt-4o",
* model: "gpt-4o-mini",
* messages,

@@ -29,0 +29,0 @@ * });

{
"name": "@langchain/openai",
"version": "0.5.18",
"version": "0.6.0",
"description": "OpenAI integrations for LangChain.js",

@@ -5,0 +5,0 @@ "type": "module",

@@ -56,3 +56,3 @@ # @langchain/openai

apiKey: process.env.OPENAI_API_KEY,
modelName: "gpt-4-1106-preview",
model: "gpt-4-1106-preview",
});

@@ -69,3 +69,3 @@ const response = await model.invoke(new HumanMessage("Hello world!"));

apiKey: process.env.OPENAI_API_KEY,
modelName: "gpt-4-1106-preview",
model: "gpt-4-1106-preview",
});

@@ -72,0 +72,0 @@ const response = await model.stream(new HumanMessage("Hello world!"));

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display