Socket
Socket
Sign inDemoInstall

@langchain/openai

Package Overview
Dependencies
Maintainers
11
Versions
69
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@langchain/openai - npm Package Compare versions

Comparing version 0.3.5 to 0.3.6-rc.0

4

dist/azure/embeddings.d.ts

@@ -13,3 +13,5 @@ import { type ClientOptions, OpenAI as OpenAIClient } from "openai";

}, configuration?: ClientOptions & LegacyOpenAIInput);
protected embeddingWithRetry(request: OpenAIClient.EmbeddingCreateParams): Promise<OpenAIClient.Embeddings.CreateEmbeddingResponse>;
protected embeddingWithRetry(request: OpenAIClient.EmbeddingCreateParams): Promise<OpenAIClient.Embeddings.CreateEmbeddingResponse & {
_request_id?: string | null | undefined;
}>;
}

@@ -1155,2 +1155,18 @@ import { OpenAI as OpenAIClient } from "openai";

if (usage) {
const inputTokenDetails = {
...(usage.prompt_tokens_details?.audio_tokens !== null && {
audio: usage.prompt_tokens_details?.audio_tokens,
}),
...(usage.prompt_tokens_details?.cached_tokens !== null && {
cache_read: usage.prompt_tokens_details?.cached_tokens,
}),
};
const outputTokenDetails = {
...(usage.completion_tokens_details?.audio_tokens !== null && {
audio: usage.completion_tokens_details?.audio_tokens,
}),
...(usage.completion_tokens_details?.reasoning_tokens !== null && {
reasoning: usage.completion_tokens_details?.reasoning_tokens,
}),
};
const generationChunk = new ChatGenerationChunk({

@@ -1166,2 +1182,8 @@ message: new AIMessageChunk({

total_tokens: usage.total_tokens,
...(Object.keys(inputTokenDetails).length > 0 && {
input_token_details: inputTokenDetails,
}),
...(Object.keys(outputTokenDetails).length > 0 && {
output_token_details: outputTokenDetails,
}),
},

@@ -1186,3 +1208,3 @@ }),

async _generate(messages, options, runManager) {
const tokenUsage = {};
const usageMetadata = {};
const params = this.invocationParams(options);

@@ -1214,6 +1236,15 @@ const messagesMapped = _convertMessagesToOpenAIParams(messages);

const completionTokenUsage = await this.getNumTokensFromGenerations(generations);
tokenUsage.promptTokens = promptTokenUsage;
tokenUsage.completionTokens = completionTokenUsage;
tokenUsage.totalTokens = promptTokenUsage + completionTokenUsage;
return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } };
usageMetadata.input_tokens = promptTokenUsage;
usageMetadata.output_tokens = completionTokenUsage;
usageMetadata.total_tokens = promptTokenUsage + completionTokenUsage;
return {
generations,
llmOutput: {
estimatedTokenUsage: {
promptTokens: usageMetadata.input_tokens,
completionTokens: usageMetadata.output_tokens,
totalTokens: usageMetadata.total_tokens,
},
},
};
}

@@ -1243,13 +1274,37 @@ else {

}
const { completion_tokens: completionTokens, prompt_tokens: promptTokens, total_tokens: totalTokens, } = data?.usage ?? {};
const { completion_tokens: completionTokens, prompt_tokens: promptTokens, total_tokens: totalTokens, prompt_tokens_details: promptTokensDetails, completion_tokens_details: completionTokensDetails, } = data?.usage ?? {};
if (completionTokens) {
tokenUsage.completionTokens =
(tokenUsage.completionTokens ?? 0) + completionTokens;
usageMetadata.output_tokens =
(usageMetadata.output_tokens ?? 0) + completionTokens;
}
if (promptTokens) {
tokenUsage.promptTokens = (tokenUsage.promptTokens ?? 0) + promptTokens;
usageMetadata.input_tokens =
(usageMetadata.input_tokens ?? 0) + promptTokens;
}
if (totalTokens) {
tokenUsage.totalTokens = (tokenUsage.totalTokens ?? 0) + totalTokens;
usageMetadata.total_tokens =
(usageMetadata.total_tokens ?? 0) + totalTokens;
}
if (promptTokensDetails?.audio_tokens !== null ||
promptTokensDetails?.cached_tokens !== null) {
usageMetadata.input_token_details = {
...(promptTokensDetails?.audio_tokens !== null && {
audio: promptTokensDetails?.audio_tokens,
}),
...(promptTokensDetails?.cached_tokens !== null && {
cache_read: promptTokensDetails?.cached_tokens,
}),
};
}
if (completionTokensDetails?.audio_tokens !== null ||
completionTokensDetails?.reasoning_tokens !== null) {
usageMetadata.output_token_details = {
...(completionTokensDetails?.audio_tokens !== null && {
audio: completionTokensDetails?.audio_tokens,
}),
...(completionTokensDetails?.reasoning_tokens !== null && {
reasoning: completionTokensDetails?.reasoning_tokens,
}),
};
}
const generations = [];

@@ -1267,7 +1322,3 @@ for (const part of data?.choices ?? []) {

if (isAIMessage(generation.message)) {
generation.message.usage_metadata = {
input_tokens: tokenUsage.promptTokens ?? 0,
output_tokens: tokenUsage.completionTokens ?? 0,
total_tokens: tokenUsage.totalTokens ?? 0,
};
generation.message.usage_metadata = usageMetadata;
}

@@ -1278,3 +1329,9 @@ generations.push(generation);

generations,
llmOutput: { tokenUsage },
llmOutput: {
tokenUsage: {
promptTokens: usageMetadata.input_tokens,
completionTokens: usageMetadata.output_tokens,
totalTokens: usageMetadata.total_tokens,
},
},
};

@@ -1281,0 +1338,0 @@ }

@@ -105,3 +105,8 @@ import { type ClientOptions, OpenAI as OpenAIClient } from "openai";

*/
protected embeddingWithRetry(request: OpenAIClient.EmbeddingCreateParams): Promise<OpenAIClient.Embeddings.CreateEmbeddingResponse>;
protected embeddingWithRetry(request: OpenAIClient.EmbeddingCreateParams): Promise<OpenAIClient.Embeddings.CreateEmbeddingResponse & {
_request_id?: string | null | undefined; /**
* The number of dimensions the resulting output embeddings should have.
* Only supported in `text-embedding-3` and later models.
*/
}>;
}
{
"name": "@langchain/openai",
"version": "0.3.5",
"version": "0.3.6-rc.0",
"description": "OpenAI integrations for LangChain.js",

@@ -39,3 +39,3 @@ "type": "module",

"js-tiktoken": "^1.0.12",
"openai": "^4.57.3",
"openai": "^4.67.2",
"zod": "^3.22.4",

@@ -42,0 +42,0 @@ "zod-to-json-schema": "^3.22.3"

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc