Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoInstallSign in
Socket

@langchain/google-genai

Package Overview
Dependencies
Maintainers
12
Versions
98
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@langchain/google-genai - npm Package Compare versions

Comparing version
2.1.18
to
2.1.19
+9
-0
CHANGELOG.md
# @langchain/google-genai
## 2.1.19
### Patch Changes
- [#10078](https://github.com/langchain-ai/langchainjs/pull/10078) [`7be50a7`](https://github.com/langchain-ai/langchainjs/commit/7be50a7014d7622e0ab8d303dfc9c633ebc96333) Thanks [@christian-bromann](https://github.com/christian-bromann)! - chore(\*): update model profiles
- Updated dependencies [[`27186c5`](https://github.com/langchain-ai/langchainjs/commit/27186c54884cfe7c2522fa50b42c3ca0ccaefdba), [`05396f7`](https://github.com/langchain-ai/langchainjs/commit/05396f7ce0a91c49a3bae4bbcd3dbdd6cbd18089), [`5a6f26b`](https://github.com/langchain-ai/langchainjs/commit/5a6f26bbaed80195dc538c538b96219a8b03f38f)]:
- @langchain/core@1.1.25
## 2.1.18

@@ -4,0 +13,0 @@

+20
-24

@@ -1,2 +0,1 @@

const require_rolldown_runtime = require('./_virtual/rolldown_runtime.cjs');
const require_zod_to_genai_parameters = require('./utils/zod_to_genai_parameters.cjs');

@@ -7,8 +6,8 @@ const require_common = require('./utils/common.cjs');

const require_profiles = require('./profiles.cjs');
const __google_generative_ai = require_rolldown_runtime.__toESM(require("@google/generative-ai"));
const __langchain_core_utils_env = require_rolldown_runtime.__toESM(require("@langchain/core/utils/env"));
const __langchain_core_language_models_chat_models = require_rolldown_runtime.__toESM(require("@langchain/core/language_models/chat_models"));
const __langchain_core_runnables = require_rolldown_runtime.__toESM(require("@langchain/core/runnables"));
const __langchain_core_utils_types = require_rolldown_runtime.__toESM(require("@langchain/core/utils/types"));
const __langchain_core_output_parsers = require_rolldown_runtime.__toESM(require("@langchain/core/output_parsers"));
let _google_generative_ai = require("@google/generative-ai");
let _langchain_core_utils_env = require("@langchain/core/utils/env");
let _langchain_core_language_models_chat_models = require("@langchain/core/language_models/chat_models");
let _langchain_core_runnables = require("@langchain/core/runnables");
let _langchain_core_utils_types = require("@langchain/core/utils/types");
let _langchain_core_output_parsers = require("@langchain/core/output_parsers");

@@ -391,3 +390,3 @@ //#region src/chat_models.ts

*/
var ChatGoogleGenerativeAI = class extends __langchain_core_language_models_chat_models.BaseChatModel {
var ChatGoogleGenerativeAI = class extends _langchain_core_language_models_chat_models.BaseChatModel {
static lc_name() {

@@ -438,8 +437,7 @@ return "ChatGoogleGenerativeAI";

this.stopSequences = fields.stopSequences ?? this.stopSequences;
this.apiKey = fields.apiKey ?? (0, __langchain_core_utils_env.getEnvironmentVariable)("GOOGLE_API_KEY");
this.apiKey = fields.apiKey ?? (0, _langchain_core_utils_env.getEnvironmentVariable)("GOOGLE_API_KEY");
if (!this.apiKey) throw new Error("Please set an API key for Google GenerativeAI in the environment variable GOOGLE_API_KEY or in the `apiKey` field of the ChatGoogleGenerativeAI constructor");
this.safetySettings = fields.safetySettings ?? this.safetySettings;
if (this.safetySettings && this.safetySettings.length > 0) {
const safetySettingsSet = new Set(this.safetySettings.map((s) => s.category));
if (safetySettingsSet.size !== this.safetySettings.length) throw new Error("The categories in `safetySettings` array must be unique");
if (new Set(this.safetySettings.map((s) => s.category)).size !== this.safetySettings.length) throw new Error("The categories in `safetySettings` array must be unique");
}

@@ -449,3 +447,3 @@ this.streaming = fields.streaming ?? this.streaming;

this.thinkingConfig = fields.thinkingConfig ?? this.thinkingConfig;
this.client = new __google_generative_ai.GoogleGenerativeAI(this.apiKey).getGenerativeModel({
this.client = new _google_generative_ai.GoogleGenerativeAI(this.apiKey).getGenerativeModel({
model: this.model,

@@ -471,3 +469,3 @@ safetySettings: this.safetySettings,

if (!this.apiKey) return;
this.client = new __google_generative_ai.GoogleGenerativeAI(this.apiKey).getGenerativeModelFromCachedContent(cachedContent, modelParams, requestOptions);
this.client = new _google_generative_ai.GoogleGenerativeAI(this.apiKey).getGenerativeModelFromCachedContent(cachedContent, modelParams, requestOptions);
}

@@ -542,5 +540,4 @@ get useSystemInstruction() {

}
const generations = finalChunks.filter((c) => c !== void 0);
return {
generations,
generations: finalChunks.filter((c) => c !== void 0),
llmOutput: { estimatedTokenUsage: tokenUsage }

@@ -567,10 +564,9 @@ };

}
const parameters = this.invocationParams(options);
const request = {
...parameters,
...this.invocationParams(options),
contents: actualPrompt
};
const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
const { stream: stream$1 } = await this.client.generateContentStream(request, { signal: options?.signal });
return stream$1;
const { stream } = await this.client.generateContentStream(request, { signal: options?.signal });
return stream;
});

@@ -647,3 +643,3 @@ let usageMetadata;

let tools;
if ((0, __langchain_core_utils_types.isInteropZodSchema)(schema)) {
if ((0, _langchain_core_utils_types.isInteropZodSchema)(schema)) {
const jsonSchema = require_zod_to_genai_parameters.schemaToGenerativeAIParameters(schema);

@@ -681,9 +677,9 @@ tools = [{ functionDeclarations: [{

llm = this.withConfig({ responseSchema: jsonSchema });
outputParser = new __langchain_core_output_parsers.JsonOutputParser();
outputParser = new _langchain_core_output_parsers.JsonOutputParser();
}
if (!includeRaw) return llm.pipe(outputParser).withConfig({ runName: "ChatGoogleGenerativeAIStructuredOutput" });
const parserAssign = __langchain_core_runnables.RunnablePassthrough.assign({ parsed: (input, config$1) => outputParser.invoke(input.raw, config$1) });
const parserNone = __langchain_core_runnables.RunnablePassthrough.assign({ parsed: () => null });
const parserAssign = _langchain_core_runnables.RunnablePassthrough.assign({ parsed: (input, config) => outputParser.invoke(input.raw, config) });
const parserNone = _langchain_core_runnables.RunnablePassthrough.assign({ parsed: () => null });
const parsedWithFallback = parserAssign.withFallbacks({ fallbacks: [parserNone] });
return __langchain_core_runnables.RunnableSequence.from([{ raw: llm }, parsedWithFallback]).withConfig({ runName: "StructuredOutputRunnable" });
return _langchain_core_runnables.RunnableSequence.from([{ raw: llm }, parsedWithFallback]).withConfig({ runName: "StructuredOutputRunnable" });
}

@@ -690,0 +686,0 @@ };

@@ -5,3 +5,3 @@ import { removeAdditionalProperties, schemaToGenerativeAIParameters } from "./utils/zod_to_genai_parameters.js";

import { convertToolsToGenAI } from "./utils/tools.js";
import profiles_default from "./profiles.js";
import PROFILES from "./profiles.js";
import { GoogleGenerativeAI } from "@google/generative-ai";

@@ -440,4 +440,3 @@ import { getEnvironmentVariable } from "@langchain/core/utils/env";

if (this.safetySettings && this.safetySettings.length > 0) {
const safetySettingsSet = new Set(this.safetySettings.map((s) => s.category));
if (safetySettingsSet.size !== this.safetySettings.length) throw new Error("The categories in `safetySettings` array must be unique");
if (new Set(this.safetySettings.map((s) => s.category)).size !== this.safetySettings.length) throw new Error("The categories in `safetySettings` array must be unique");
}

@@ -538,5 +537,4 @@ this.streaming = fields.streaming ?? this.streaming;

}
const generations = finalChunks.filter((c) => c !== void 0);
return {
generations,
generations: finalChunks.filter((c) => c !== void 0),
llmOutput: { estimatedTokenUsage: tokenUsage }

@@ -563,10 +561,9 @@ };

}
const parameters = this.invocationParams(options);
const request = {
...parameters,
...this.invocationParams(options),
contents: actualPrompt
};
const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
const { stream: stream$1 } = await this.client.generateContentStream(request, { signal: options?.signal });
return stream$1;
const { stream } = await this.client.generateContentStream(request, { signal: options?.signal });
return stream;
});

@@ -630,3 +627,3 @@ let usageMetadata;

get profile() {
return profiles_default[this.model] ?? {};
return PROFILES[this.model] ?? {};
}

@@ -680,3 +677,3 @@ withStructuredOutput(outputSchema, config) {

if (!includeRaw) return llm.pipe(outputParser).withConfig({ runName: "ChatGoogleGenerativeAIStructuredOutput" });
const parserAssign = RunnablePassthrough.assign({ parsed: (input, config$1) => outputParser.invoke(input.raw, config$1) });
const parserAssign = RunnablePassthrough.assign({ parsed: (input, config) => outputParser.invoke(input.raw, config) });
const parserNone = RunnablePassthrough.assign({ parsed: () => null });

@@ -683,0 +680,0 @@ const parsedWithFallback = parserAssign.withFallbacks({ fallbacks: [parserNone] });

@@ -1,6 +0,5 @@

const require_rolldown_runtime = require('./_virtual/rolldown_runtime.cjs');
const __google_generative_ai = require_rolldown_runtime.__toESM(require("@google/generative-ai"));
const __langchain_core_utils_env = require_rolldown_runtime.__toESM(require("@langchain/core/utils/env"));
const __langchain_core_embeddings = require_rolldown_runtime.__toESM(require("@langchain/core/embeddings"));
const __langchain_core_utils_chunk_array = require_rolldown_runtime.__toESM(require("@langchain/core/utils/chunk_array"));
let _google_generative_ai = require("@google/generative-ai");
let _langchain_core_utils_env = require("@langchain/core/utils/env");
let _langchain_core_embeddings = require("@langchain/core/embeddings");
let _langchain_core_utils_chunk_array = require("@langchain/core/utils/chunk_array");

@@ -29,3 +28,3 @@ //#region src/embeddings.ts

*/
var GoogleGenerativeAIEmbeddings = class extends __langchain_core_embeddings.Embeddings {
var GoogleGenerativeAIEmbeddings = class extends _langchain_core_embeddings.Embeddings {
apiKey;

@@ -46,12 +45,11 @@ modelName = "embedding-001";

if (this.title && this.taskType !== "RETRIEVAL_DOCUMENT") throw new Error("title can only be sepcified with TaskType.RETRIEVAL_DOCUMENT");
this.apiKey = fields?.apiKey ?? (0, __langchain_core_utils_env.getEnvironmentVariable)("GOOGLE_API_KEY");
this.apiKey = fields?.apiKey ?? (0, _langchain_core_utils_env.getEnvironmentVariable)("GOOGLE_API_KEY");
if (!this.apiKey) throw new Error("Please set an API key for Google GenerativeAI in the environmentb variable GOOGLE_API_KEY or in the `apiKey` field of the GoogleGenerativeAIEmbeddings constructor");
this.client = new __google_generative_ai.GoogleGenerativeAI(this.apiKey).getGenerativeModel({ model: this.model }, { baseUrl: fields?.baseUrl });
this.client = new _google_generative_ai.GoogleGenerativeAI(this.apiKey).getGenerativeModel({ model: this.model }, { baseUrl: fields?.baseUrl });
}
_convertToContent(text) {
const cleanedText = this.stripNewLines ? text.replace(/\n/g, " ") : text;
return {
content: {
role: "user",
parts: [{ text: cleanedText }]
parts: [{ text: this.stripNewLines ? text.replace(/\n/g, " ") : text }]
},

@@ -64,14 +62,11 @@ taskType: this.taskType,

const req = this._convertToContent(text);
const res = await this.client.embedContent(req);
return res.embedding.values ?? [];
return (await this.client.embedContent(req)).embedding.values ?? [];
}
async _embedDocumentsContent(documents) {
const batchEmbedChunks = (0, __langchain_core_utils_chunk_array.chunkArray)(documents, this.maxBatchSize);
const batchEmbedChunks = (0, _langchain_core_utils_chunk_array.chunkArray)(documents, this.maxBatchSize);
const batchEmbedRequests = batchEmbedChunks.map((chunk) => ({ requests: chunk.map((doc) => this._convertToContent(doc)) }));
const responses = await Promise.allSettled(batchEmbedRequests.map((req) => this.client.batchEmbedContents(req)));
const embeddings = responses.flatMap((res, idx) => {
return (await Promise.allSettled(batchEmbedRequests.map((req) => this.client.batchEmbedContents(req)))).flatMap((res, idx) => {
if (res.status === "fulfilled") return res.value.embeddings.map((e) => e.values || []);
else return Array(batchEmbedChunks[idx].length).fill([]);
});
return embeddings;
}

@@ -78,0 +73,0 @@ /**

@@ -5,3 +5,2 @@ import { TaskType } from "@google/generative-ai";

//#region src/embeddings.d.ts
/**

@@ -8,0 +7,0 @@ * Interface that extends EmbeddingsParams and defines additional

@@ -5,3 +5,2 @@ import { TaskType } from "@google/generative-ai";

//#region src/embeddings.d.ts
/**

@@ -8,0 +7,0 @@ * Interface that extends EmbeddingsParams and defines additional

@@ -49,7 +49,6 @@ import { GoogleGenerativeAI } from "@google/generative-ai";

_convertToContent(text) {
const cleanedText = this.stripNewLines ? text.replace(/\n/g, " ") : text;
return {
content: {
role: "user",
parts: [{ text: cleanedText }]
parts: [{ text: this.stripNewLines ? text.replace(/\n/g, " ") : text }]
},

@@ -62,4 +61,3 @@ taskType: this.taskType,

const req = this._convertToContent(text);
const res = await this.client.embedContent(req);
return res.embedding.values ?? [];
return (await this.client.embedContent(req)).embedding.values ?? [];
}

@@ -69,8 +67,6 @@ async _embedDocumentsContent(documents) {

const batchEmbedRequests = batchEmbedChunks.map((chunk) => ({ requests: chunk.map((doc) => this._convertToContent(doc)) }));
const responses = await Promise.allSettled(batchEmbedRequests.map((req) => this.client.batchEmbedContents(req)));
const embeddings = responses.flatMap((res, idx) => {
return (await Promise.allSettled(batchEmbedRequests.map((req) => this.client.batchEmbedContents(req)))).flatMap((res, idx) => {
if (res.status === "fulfilled") return res.value.embeddings.map((e) => e.values || []);
else return Array(batchEmbedChunks[idx].length).fill([]);
});
return embeddings;
}

@@ -77,0 +73,0 @@ /**

@@ -0,1 +1,2 @@

Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });
const require_chat_models = require('./chat_models.cjs');

@@ -2,0 +3,0 @@ const require_embeddings = require('./embeddings.cjs');

@@ -1,7 +0,6 @@

const require_rolldown_runtime = require('./_virtual/rolldown_runtime.cjs');
const __langchain_core_utils_types = require_rolldown_runtime.__toESM(require("@langchain/core/utils/types"));
const __langchain_core_output_parsers = require_rolldown_runtime.__toESM(require("@langchain/core/output_parsers"));
let _langchain_core_utils_types = require("@langchain/core/utils/types");
let _langchain_core_output_parsers = require("@langchain/core/output_parsers");
//#region src/output_parsers.ts
var GoogleGenerativeAIToolsOutputParser = class extends __langchain_core_output_parsers.BaseLLMOutputParser {
var GoogleGenerativeAIToolsOutputParser = class extends _langchain_core_output_parsers.BaseLLMOutputParser {
static lc_name() {

@@ -29,5 +28,5 @@ return "GoogleGenerativeAIToolsOutputParser";

if (this.zodSchema === void 0) return result;
const zodParsedResult = await (0, __langchain_core_utils_types.interopSafeParseAsync)(this.zodSchema, result);
const zodParsedResult = await (0, _langchain_core_utils_types.interopSafeParseAsync)(this.zodSchema, result);
if (zodParsedResult.success) return zodParsedResult.data;
else throw new __langchain_core_output_parsers.OutputParserException(`Failed to parse. Text: "${JSON.stringify(result, null, 2)}". Error: ${JSON.stringify(zodParsedResult.error.issues)}`, JSON.stringify(result, null, 2));
else throw new _langchain_core_output_parsers.OutputParserException(`Failed to parse. Text: "${JSON.stringify(result, null, 2)}". Error: ${JSON.stringify(zodParsedResult.error.issues)}`, JSON.stringify(result, null, 2));
}

@@ -42,4 +41,3 @@ async parseResult(generations) {

const [tool] = tools;
const validatedResult = await this._validateResult(tool.args);
return validatedResult;
return await this._validateResult(tool.args);
}

@@ -46,0 +44,0 @@ };

@@ -40,4 +40,3 @@ import { interopSafeParseAsync } from "@langchain/core/utils/types";

const [tool] = tools;
const validatedResult = await this._validateResult(tool.args);
return validatedResult;
return await this._validateResult(tool.args);
}

@@ -44,0 +43,0 @@ };

@@ -18,17 +18,17 @@

},
"gemini-2.5-flash-image": {
maxInputTokens: 32768,
"gemini-2.5-flash-lite-preview-09-2025": {
maxInputTokens: 1048576,
imageInputs: true,
audioInputs: false,
pdfInputs: false,
videoInputs: false,
maxOutputTokens: 32768,
audioInputs: true,
pdfInputs: true,
videoInputs: true,
maxOutputTokens: 65536,
reasoningOutput: true,
imageOutputs: true,
imageOutputs: false,
audioOutputs: false,
videoOutputs: false,
toolCalling: false,
structuredOutput: false
toolCalling: true,
structuredOutput: true
},
"gemini-2.5-flash-preview-05-20": {
"gemini-2.5-pro-preview-06-05": {
maxInputTokens: 1048576,

@@ -47,3 +47,3 @@ imageInputs: true,

},
"gemini-flash-lite-latest": {
"gemini-2.5-flash-preview-04-17": {
maxInputTokens: 1048576,

@@ -60,5 +60,19 @@ imageInputs: true,

toolCalling: true,
structuredOutput: false
},
"gemini-2.5-flash-preview-09-2025": {
maxInputTokens: 1048576,
imageInputs: true,
audioInputs: true,
pdfInputs: true,
videoInputs: true,
maxOutputTokens: 65536,
reasoningOutput: true,
imageOutputs: false,
audioOutputs: false,
videoOutputs: false,
toolCalling: true,
structuredOutput: true
},
"gemini-2.5-flash": {
"gemini-2.5-pro-preview-05-06": {
maxInputTokens: 1048576,

@@ -77,3 +91,3 @@ imageInputs: true,

},
"gemini-flash-latest": {
"gemini-2.5-flash-preview-05-20": {
maxInputTokens: 1048576,

@@ -92,3 +106,3 @@ imageInputs: true,

},
"gemini-2.5-pro-preview-05-06": {
"gemini-2.5-flash": {
maxInputTokens: 1048576,

@@ -107,17 +121,17 @@ imageInputs: true,

},
"gemini-2.5-flash-preview-tts": {
maxInputTokens: 8e3,
imageInputs: false,
audioInputs: false,
"gemini-live-2.5-flash": {
maxInputTokens: 128e3,
imageInputs: true,
audioInputs: true,
pdfInputs: false,
videoInputs: false,
maxOutputTokens: 16e3,
reasoningOutput: false,
videoInputs: true,
maxOutputTokens: 8e3,
reasoningOutput: true,
imageOutputs: false,
audioOutputs: true,
videoOutputs: false,
toolCalling: false,
toolCalling: true,
structuredOutput: false
},
"gemini-2.0-flash-lite": {
"gemini-3-flash-preview": {
maxInputTokens: 1048576,

@@ -128,4 +142,4 @@ imageInputs: true,

videoInputs: true,
maxOutputTokens: 8192,
reasoningOutput: false,
maxOutputTokens: 65536,
reasoningOutput: true,
imageOutputs: false,

@@ -151,3 +165,3 @@ audioOutputs: false,

},
"gemini-2.0-flash": {
"gemini-2.5-flash-lite": {
maxInputTokens: 1048576,

@@ -158,4 +172,4 @@ imageInputs: true,

videoInputs: true,
maxOutputTokens: 8192,
reasoningOutput: false,
maxOutputTokens: 65536,
reasoningOutput: true,
imageOutputs: false,

@@ -167,3 +181,17 @@ audioOutputs: false,

},
"gemini-2.5-flash-lite": {
"gemini-2.5-flash-preview-tts": {
maxInputTokens: 8e3,
imageInputs: false,
audioInputs: false,
pdfInputs: false,
videoInputs: false,
maxOutputTokens: 16e3,
reasoningOutput: false,
imageOutputs: false,
audioOutputs: true,
videoOutputs: false,
toolCalling: false,
structuredOutput: false
},
"gemini-flash-latest": {
maxInputTokens: 1048576,

@@ -182,3 +210,3 @@ imageInputs: true,

},
"gemini-2.5-pro-preview-06-05": {
"gemini-2.5-flash-lite-preview-06-17": {
maxInputTokens: 1048576,

@@ -195,30 +223,30 @@ imageInputs: true,

toolCalling: true,
structuredOutput: true
structuredOutput: false
},
"gemini-live-2.5-flash": {
maxInputTokens: 128e3,
"gemini-2.5-flash-image": {
maxInputTokens: 32768,
imageInputs: true,
audioInputs: true,
audioInputs: false,
pdfInputs: false,
videoInputs: true,
maxOutputTokens: 8e3,
videoInputs: false,
maxOutputTokens: 32768,
reasoningOutput: true,
imageOutputs: false,
audioOutputs: true,
imageOutputs: true,
audioOutputs: false,
videoOutputs: false,
toolCalling: true,
toolCalling: false,
structuredOutput: false
},
"gemini-2.5-flash-lite-preview-06-17": {
maxInputTokens: 1048576,
imageInputs: true,
audioInputs: true,
pdfInputs: true,
videoInputs: true,
maxOutputTokens: 65536,
reasoningOutput: true,
"gemini-2.5-pro-preview-tts": {
maxInputTokens: 8e3,
imageInputs: false,
audioInputs: false,
pdfInputs: false,
videoInputs: false,
maxOutputTokens: 16e3,
reasoningOutput: false,
imageOutputs: false,
audioOutputs: false,
audioOutputs: true,
videoOutputs: false,
toolCalling: true,
toolCalling: false,
structuredOutput: false

@@ -240,10 +268,10 @@ },

},
"gemini-2.5-flash-preview-09-2025": {
maxInputTokens: 1048576,
"gemini-1.5-flash-8b": {
maxInputTokens: 1e6,
imageInputs: true,
audioInputs: true,
pdfInputs: true,
pdfInputs: false,
videoInputs: true,
maxOutputTokens: 65536,
reasoningOutput: true,
maxOutputTokens: 8192,
reasoningOutput: false,
imageOutputs: false,

@@ -253,6 +281,6 @@ audioOutputs: false,

toolCalling: true,
structuredOutput: true
structuredOutput: false
},
"gemini-2.5-flash-preview-04-17": {
maxInputTokens: 1048576,
"gemini-3-pro-preview": {
maxInputTokens: 1e6,
imageInputs: true,

@@ -262,3 +290,3 @@ audioInputs: true,

videoInputs: true,
maxOutputTokens: 65536,
maxOutputTokens: 64e3,
reasoningOutput: true,

@@ -269,19 +297,5 @@ imageOutputs: false,

toolCalling: true,
structuredOutput: false
structuredOutput: true
},
"gemini-2.5-pro-preview-tts": {
maxInputTokens: 8e3,
imageInputs: false,
audioInputs: false,
pdfInputs: false,
videoInputs: false,
maxOutputTokens: 16e3,
reasoningOutput: false,
imageOutputs: false,
audioOutputs: true,
videoOutputs: false,
toolCalling: false,
structuredOutput: false
},
"gemini-2.5-pro": {
"gemini-2.0-flash-lite": {
maxInputTokens: 1048576,

@@ -292,4 +306,4 @@ imageInputs: true,

videoInputs: true,
maxOutputTokens: 65536,
reasoningOutput: true,
maxOutputTokens: 8192,
reasoningOutput: false,
imageOutputs: false,

@@ -315,10 +329,10 @@ audioOutputs: false,

},
"gemini-1.5-flash-8b": {
maxInputTokens: 1e6,
"gemini-flash-lite-latest": {
maxInputTokens: 1048576,
imageInputs: true,
audioInputs: true,
pdfInputs: false,
pdfInputs: true,
videoInputs: true,
maxOutputTokens: 8192,
reasoningOutput: false,
maxOutputTokens: 65536,
reasoningOutput: true,
imageOutputs: false,

@@ -328,5 +342,5 @@ audioOutputs: false,

toolCalling: true,
structuredOutput: false
structuredOutput: true
},
"gemini-2.5-flash-lite-preview-09-2025": {
"gemini-2.5-pro": {
maxInputTokens: 1048576,

@@ -345,2 +359,16 @@ imageInputs: true,

},
"gemini-2.0-flash": {
maxInputTokens: 1048576,
imageInputs: true,
audioInputs: true,
pdfInputs: true,
videoInputs: true,
maxOutputTokens: 8192,
reasoningOutput: false,
imageOutputs: false,
audioOutputs: false,
videoOutputs: false,
toolCalling: true,
structuredOutput: true
},
"gemini-1.5-pro": {

@@ -361,6 +389,5 @@ maxInputTokens: 1e6,

};
var profiles_default = PROFILES;
//#endregion
exports.default = profiles_default;
exports.default = PROFILES;
//# sourceMappingURL=profiles.cjs.map

@@ -17,17 +17,17 @@ //#region src/profiles.ts

},
"gemini-2.5-flash-image": {
maxInputTokens: 32768,
"gemini-2.5-flash-lite-preview-09-2025": {
maxInputTokens: 1048576,
imageInputs: true,
audioInputs: false,
pdfInputs: false,
videoInputs: false,
maxOutputTokens: 32768,
audioInputs: true,
pdfInputs: true,
videoInputs: true,
maxOutputTokens: 65536,
reasoningOutput: true,
imageOutputs: true,
imageOutputs: false,
audioOutputs: false,
videoOutputs: false,
toolCalling: false,
structuredOutput: false
toolCalling: true,
structuredOutput: true
},
"gemini-2.5-flash-preview-05-20": {
"gemini-2.5-pro-preview-06-05": {
maxInputTokens: 1048576,

@@ -46,3 +46,3 @@ imageInputs: true,

},
"gemini-flash-lite-latest": {
"gemini-2.5-flash-preview-04-17": {
maxInputTokens: 1048576,

@@ -59,5 +59,19 @@ imageInputs: true,

toolCalling: true,
structuredOutput: false
},
"gemini-2.5-flash-preview-09-2025": {
maxInputTokens: 1048576,
imageInputs: true,
audioInputs: true,
pdfInputs: true,
videoInputs: true,
maxOutputTokens: 65536,
reasoningOutput: true,
imageOutputs: false,
audioOutputs: false,
videoOutputs: false,
toolCalling: true,
structuredOutput: true
},
"gemini-2.5-flash": {
"gemini-2.5-pro-preview-05-06": {
maxInputTokens: 1048576,

@@ -76,3 +90,3 @@ imageInputs: true,

},
"gemini-flash-latest": {
"gemini-2.5-flash-preview-05-20": {
maxInputTokens: 1048576,

@@ -91,3 +105,3 @@ imageInputs: true,

},
"gemini-2.5-pro-preview-05-06": {
"gemini-2.5-flash": {
maxInputTokens: 1048576,

@@ -106,17 +120,17 @@ imageInputs: true,

},
"gemini-2.5-flash-preview-tts": {
maxInputTokens: 8e3,
imageInputs: false,
audioInputs: false,
"gemini-live-2.5-flash": {
maxInputTokens: 128e3,
imageInputs: true,
audioInputs: true,
pdfInputs: false,
videoInputs: false,
maxOutputTokens: 16e3,
reasoningOutput: false,
videoInputs: true,
maxOutputTokens: 8e3,
reasoningOutput: true,
imageOutputs: false,
audioOutputs: true,
videoOutputs: false,
toolCalling: false,
toolCalling: true,
structuredOutput: false
},
"gemini-2.0-flash-lite": {
"gemini-3-flash-preview": {
maxInputTokens: 1048576,

@@ -127,4 +141,4 @@ imageInputs: true,

videoInputs: true,
maxOutputTokens: 8192,
reasoningOutput: false,
maxOutputTokens: 65536,
reasoningOutput: true,
imageOutputs: false,

@@ -150,3 +164,3 @@ audioOutputs: false,

},
"gemini-2.0-flash": {
"gemini-2.5-flash-lite": {
maxInputTokens: 1048576,

@@ -157,4 +171,4 @@ imageInputs: true,

videoInputs: true,
maxOutputTokens: 8192,
reasoningOutput: false,
maxOutputTokens: 65536,
reasoningOutput: true,
imageOutputs: false,

@@ -166,3 +180,17 @@ audioOutputs: false,

},
"gemini-2.5-flash-lite": {
"gemini-2.5-flash-preview-tts": {
maxInputTokens: 8e3,
imageInputs: false,
audioInputs: false,
pdfInputs: false,
videoInputs: false,
maxOutputTokens: 16e3,
reasoningOutput: false,
imageOutputs: false,
audioOutputs: true,
videoOutputs: false,
toolCalling: false,
structuredOutput: false
},
"gemini-flash-latest": {
maxInputTokens: 1048576,

@@ -181,3 +209,3 @@ imageInputs: true,

},
"gemini-2.5-pro-preview-06-05": {
"gemini-2.5-flash-lite-preview-06-17": {
maxInputTokens: 1048576,

@@ -194,30 +222,30 @@ imageInputs: true,

toolCalling: true,
structuredOutput: true
structuredOutput: false
},
"gemini-live-2.5-flash": {
maxInputTokens: 128e3,
"gemini-2.5-flash-image": {
maxInputTokens: 32768,
imageInputs: true,
audioInputs: true,
audioInputs: false,
pdfInputs: false,
videoInputs: true,
maxOutputTokens: 8e3,
videoInputs: false,
maxOutputTokens: 32768,
reasoningOutput: true,
imageOutputs: false,
audioOutputs: true,
imageOutputs: true,
audioOutputs: false,
videoOutputs: false,
toolCalling: true,
toolCalling: false,
structuredOutput: false
},
"gemini-2.5-flash-lite-preview-06-17": {
maxInputTokens: 1048576,
imageInputs: true,
audioInputs: true,
pdfInputs: true,
videoInputs: true,
maxOutputTokens: 65536,
reasoningOutput: true,
"gemini-2.5-pro-preview-tts": {
maxInputTokens: 8e3,
imageInputs: false,
audioInputs: false,
pdfInputs: false,
videoInputs: false,
maxOutputTokens: 16e3,
reasoningOutput: false,
imageOutputs: false,
audioOutputs: false,
audioOutputs: true,
videoOutputs: false,
toolCalling: true,
toolCalling: false,
structuredOutput: false

@@ -239,10 +267,10 @@ },

},
"gemini-2.5-flash-preview-09-2025": {
maxInputTokens: 1048576,
"gemini-1.5-flash-8b": {
maxInputTokens: 1e6,
imageInputs: true,
audioInputs: true,
pdfInputs: true,
pdfInputs: false,
videoInputs: true,
maxOutputTokens: 65536,
reasoningOutput: true,
maxOutputTokens: 8192,
reasoningOutput: false,
imageOutputs: false,

@@ -252,6 +280,6 @@ audioOutputs: false,

toolCalling: true,
structuredOutput: true
structuredOutput: false
},
"gemini-2.5-flash-preview-04-17": {
maxInputTokens: 1048576,
"gemini-3-pro-preview": {
maxInputTokens: 1e6,
imageInputs: true,

@@ -261,3 +289,3 @@ audioInputs: true,

videoInputs: true,
maxOutputTokens: 65536,
maxOutputTokens: 64e3,
reasoningOutput: true,

@@ -268,19 +296,5 @@ imageOutputs: false,

toolCalling: true,
structuredOutput: false
structuredOutput: true
},
"gemini-2.5-pro-preview-tts": {
maxInputTokens: 8e3,
imageInputs: false,
audioInputs: false,
pdfInputs: false,
videoInputs: false,
maxOutputTokens: 16e3,
reasoningOutput: false,
imageOutputs: false,
audioOutputs: true,
videoOutputs: false,
toolCalling: false,
structuredOutput: false
},
"gemini-2.5-pro": {
"gemini-2.0-flash-lite": {
maxInputTokens: 1048576,

@@ -291,4 +305,4 @@ imageInputs: true,

videoInputs: true,
maxOutputTokens: 65536,
reasoningOutput: true,
maxOutputTokens: 8192,
reasoningOutput: false,
imageOutputs: false,

@@ -314,10 +328,10 @@ audioOutputs: false,

},
"gemini-1.5-flash-8b": {
maxInputTokens: 1e6,
"gemini-flash-lite-latest": {
maxInputTokens: 1048576,
imageInputs: true,
audioInputs: true,
pdfInputs: false,
pdfInputs: true,
videoInputs: true,
maxOutputTokens: 8192,
reasoningOutput: false,
maxOutputTokens: 65536,
reasoningOutput: true,
imageOutputs: false,

@@ -327,5 +341,5 @@ audioOutputs: false,

toolCalling: true,
structuredOutput: false
structuredOutput: true
},
"gemini-2.5-flash-lite-preview-09-2025": {
"gemini-2.5-pro": {
maxInputTokens: 1048576,

@@ -344,2 +358,16 @@ imageInputs: true,

},
"gemini-2.0-flash": {
maxInputTokens: 1048576,
imageInputs: true,
audioInputs: true,
pdfInputs: true,
videoInputs: true,
maxOutputTokens: 8192,
reasoningOutput: false,
imageOutputs: false,
audioOutputs: false,
videoOutputs: false,
toolCalling: true,
structuredOutput: true
},
"gemini-1.5-pro": {

@@ -360,6 +388,5 @@ maxInputTokens: 1e6,

};
var profiles_default = PROFILES;
//#endregion
export { profiles_default as default };
export { PROFILES as default };
//# sourceMappingURL=profiles.js.map

@@ -7,7 +7,4 @@ import { CodeExecutionTool, FunctionDeclarationsTool, GoogleSearchRetrievalTool, Part } from "@google/generative-ai";

type GoogleGenerativeAIThinkingConfig = {
/** Indicates whether to include thoughts in the response. If true, thoughts are returned only when available. */
includeThoughts?: boolean;
/** The number of thoughts tokens that the model should generate. */
thinkingBudget?: number;
/** Optional. The level of thoughts tokens that the model should generate. */
/** Indicates whether to include thoughts in the response. If true, thoughts are returned only when available. */includeThoughts?: boolean; /** The number of thoughts tokens that the model should generate. */
thinkingBudget?: number; /** Optional. The level of thoughts tokens that the model should generate. */
thinkingLevel?: GoogleGenerativeAIThinkingLevel;

@@ -14,0 +11,0 @@ };

@@ -7,7 +7,4 @@ import { CodeExecutionTool, FunctionDeclarationsTool, GoogleSearchRetrievalTool, Part } from "@google/generative-ai";

type GoogleGenerativeAIThinkingConfig = {
/** Indicates whether to include thoughts in the response. If true, thoughts are returned only when available. */
includeThoughts?: boolean;
/** The number of thoughts tokens that the model should generate. */
thinkingBudget?: number;
/** Optional. The level of thoughts tokens that the model should generate. */
/** Indicates whether to include thoughts in the response. If true, thoughts are returned only when available. */includeThoughts?: boolean; /** The number of thoughts tokens that the model should generate. */
thinkingBudget?: number; /** Optional. The level of thoughts tokens that the model should generate. */
thinkingLevel?: GoogleGenerativeAIThinkingLevel;

@@ -14,0 +11,0 @@ };

@@ -1,9 +0,8 @@

const require_rolldown_runtime = require('../_virtual/rolldown_runtime.cjs');
const require_zod_to_genai_parameters = require('./zod_to_genai_parameters.cjs');
const require_validate_schema = require('./validate_schema.cjs');
const __langchain_core_messages = require_rolldown_runtime.__toESM(require("@langchain/core/messages"));
const __langchain_core_outputs = require_rolldown_runtime.__toESM(require("@langchain/core/outputs"));
const __langchain_core_utils_function_calling = require_rolldown_runtime.__toESM(require("@langchain/core/utils/function_calling"));
const __langchain_core_language_models_base = require_rolldown_runtime.__toESM(require("@langchain/core/language_models/base"));
const uuid = require_rolldown_runtime.__toESM(require("uuid"));
let _langchain_core_messages = require("@langchain/core/messages");
let _langchain_core_outputs = require("@langchain/core/outputs");
let _langchain_core_utils_function_calling = require("@langchain/core/utils/function_calling");
let _langchain_core_language_models_base = require("@langchain/core/language_models/base");
let uuid = require("uuid");

@@ -15,3 +14,3 @@ //#region src/utils/common.ts

function getMessageAuthor(message) {
if (__langchain_core_messages.ChatMessage.isInstance(message)) return message.role;
if (_langchain_core_messages.ChatMessage.isInstance(message)) return message.role;
return message.type;

@@ -50,3 +49,3 @@ }

return previousMessages.map((msg) => {
if ((0, __langchain_core_messages.isAIMessage)(msg)) return msg.tool_calls ?? [];
if ((0, _langchain_core_messages.isAIMessage)(msg)) return msg.tool_calls ?? [];
return [];

@@ -58,3 +57,3 @@ }).flat().find((toolCall) => {

function _getStandardContentBlockConverter(isMultimodalModel) {
const standardContentBlockConverter = {
return {
providerName: "Google Gemini",

@@ -67,3 +66,3 @@ fromStandardTextBlock(block) {

if (block.source_type === "url") {
const data = (0, __langchain_core_messages.parseBase64DataUrl)({ dataUrl: block.url });
const data = (0, _langchain_core_messages.parseBase64DataUrl)({ dataUrl: block.url });
if (data) return { inlineData: {

@@ -87,3 +86,3 @@ mimeType: data.mime_type,

if (block.source_type === "url") {
const data = (0, __langchain_core_messages.parseBase64DataUrl)({ dataUrl: block.url });
const data = (0, _langchain_core_messages.parseBase64DataUrl)({ dataUrl: block.url });
if (data) return { inlineData: {

@@ -108,3 +107,3 @@ mimeType: data.mime_type,

if (block.source_type === "url") {
const data = (0, __langchain_core_messages.parseBase64DataUrl)({ dataUrl: block.url });
const data = (0, _langchain_core_messages.parseBase64DataUrl)({ dataUrl: block.url });
if (data) return { inlineData: {

@@ -126,6 +125,5 @@ mimeType: data.mime_type,

};
return standardContentBlockConverter;
}
function _convertLangChainContentToPart(content, isMultimodalModel) {
if ((0, __langchain_core_messages.isDataContentBlock)(content)) return (0, __langchain_core_messages.convertToProviderContentBlock)(content, _getStandardContentBlockConverter(isMultimodalModel));
if ((0, _langchain_core_messages.isDataContentBlock)(content)) return (0, _langchain_core_messages.convertToProviderContentBlock)(content, _getStandardContentBlockConverter(isMultimodalModel));
if (content.type === "text") return { text: content.text };

@@ -161,3 +159,3 @@ else if (content.type === "executableCode") return { executableCode: content.executableCode };

} };
else if ("functionCall" in content) return void 0;
else if ("functionCall" in content) return;
else if ("type" in content) throw new Error(`Unknown content type ${content.type}`);

@@ -167,3 +165,3 @@ else throw new Error(`Unknown content ${JSON.stringify(content)}`);

function convertMessageContentToParts(message, isMultimodalModel, previousMessages, model) {
if ((0, __langchain_core_messages.isToolMessage)(message)) {
if ((0, _langchain_core_messages.isToolMessage)(message)) {
const messageName = message.name ?? inferToolNameFromPreviousMessages(message, previousMessages);

@@ -186,3 +184,3 @@ if (messageName === void 0) throw new Error(`Google requires a tool name for each tool call response, and we could not infer a called tool name for ToolMessage "${message.id}" from your passed messages. Please populate a "name" field on that ToolMessage explicitly.`);

const functionThoughtSignatures = message.additional_kwargs?.[_FUNCTION_CALL_THOUGHT_SIGNATURES_MAP_KEY];
if ((0, __langchain_core_messages.isAIMessage)(message) && message.tool_calls?.length) functionCalls = message.tool_calls.map((tc) => {
if ((0, _langchain_core_messages.isAIMessage)(message) && message.tool_calls?.length) functionCalls = message.tool_calls.map((tc) => {
const thoughtSignature = iife(() => {

@@ -208,3 +206,3 @@ if (tc.id) {

return messages.reduce((acc, message, index) => {
if (!(0, __langchain_core_messages.isBaseMessage)(message)) throw new Error("Unsupported message input");
if (!(0, _langchain_core_messages.isBaseMessage)(message)) throw new Error("Unsupported message input");
const author = getMessageAuthor(message);

@@ -217,5 +215,5 @@ if (author === "system" && index !== 0) throw new Error("System message should be the first one");

if (acc.mergeWithPreviousContent) {
const prevContent$1 = acc.content[acc.content.length - 1];
if (!prevContent$1) throw new Error("There was a problem parsing your system message. Please try a prompt without one.");
prevContent$1.parts.push(...parts);
const prevContent = acc.content[acc.content.length - 1];
if (!prevContent) throw new Error("There was a problem parsing your system message. Please try a prompt without one.");
prevContent.parts.push(...parts);
return {

@@ -247,3 +245,3 @@ mergeWithPreviousContent: false,

const [candidate] = response.candidates;
const { content: candidateContent,...generationInfo } = candidate;
const { content: candidateContent, ...generationInfo } = candidate;
const functionCalls = candidateContent.parts?.reduce((acc, p) => {

@@ -302,26 +300,22 @@ if ("functionCall" in p && p.functionCall) acc.push({

if (typeof content === "string") text = content;
else if (Array.isArray(content) && content.length > 0) {
const block = content.find((b) => "text" in b);
text = block?.text ?? text;
}
const generation = {
text,
message: new __langchain_core_messages.AIMessage({
content: content ?? "",
tool_calls: functionCalls?.map((fc) => ({
type: "tool_call",
id: fc.id,
name: fc.functionCall.name,
args: fc.functionCall.args
})),
additional_kwargs: {
...generationInfo,
[_FUNCTION_CALL_THOUGHT_SIGNATURES_MAP_KEY]: functionThoughtSignatures
},
usage_metadata: extra?.usageMetadata
}),
generationInfo
};
else if (Array.isArray(content) && content.length > 0) text = content.find((b) => "text" in b)?.text ?? text;
return {
generations: [generation],
generations: [{
text,
message: new _langchain_core_messages.AIMessage({
content: content ?? "",
tool_calls: functionCalls?.map((fc) => ({
type: "tool_call",
id: fc.id,
name: fc.functionCall.name,
args: fc.functionCall.args
})),
additional_kwargs: {
...generationInfo,
[_FUNCTION_CALL_THOUGHT_SIGNATURES_MAP_KEY]: functionThoughtSignatures
},
usage_metadata: extra?.usageMetadata
}),
generationInfo
}],
llmOutput: { tokenUsage: {

@@ -337,3 +331,3 @@ promptTokens: extra?.usageMetadata?.input_tokens,

const [candidate] = response.candidates;
const { content: candidateContent,...generationInfo } = candidate;
const { content: candidateContent, ...generationInfo } = candidate;
const functionCalls = candidateContent.parts?.reduce((acc, p) => {

@@ -388,6 +382,3 @@ if ("functionCall" in p && p.functionCall) acc.push({

if (content && typeof content === "string") text = content;
else if (Array.isArray(content)) {
const block = content.find((b) => "text" in b);
text = block?.text ?? "";
}
else if (Array.isArray(content)) text = content.find((b) => "text" in b)?.text ?? "";
const toolCallChunks = [];

@@ -404,5 +395,5 @@ if (functionCalls) toolCallChunks.push(...functionCalls.map((fc) => ({

}, {});
return new __langchain_core_outputs.ChatGenerationChunk({
return new _langchain_core_outputs.ChatGenerationChunk({
text,
message: new __langchain_core_messages.AIMessageChunk({
message: new _langchain_core_messages.AIMessageChunk({
content: content || "",

@@ -421,3 +412,3 @@ name: !candidateContent ? void 0 : candidateContent.role,

return [{ functionDeclarations: tools.map((tool) => {
if ((0, __langchain_core_utils_function_calling.isLangChainTool)(tool)) {
if ((0, _langchain_core_utils_function_calling.isLangChainTool)(tool)) {
const jsonSchema = require_zod_to_genai_parameters.schemaToGenerativeAIParameters(tool.schema);

@@ -435,3 +426,3 @@ if (jsonSchema.type === "object" && "properties" in jsonSchema && Object.keys(jsonSchema.properties).length === 0) return {

}
if ((0, __langchain_core_language_models_base.isOpenAITool)(tool)) {
if ((0, _langchain_core_language_models_base.isOpenAITool)(tool)) {
const params = require_zod_to_genai_parameters.jsonSchemaToGeminiParameters(tool.function.parameters);

@@ -438,0 +429,0 @@ require_validate_schema.assertNoEmptyStringEnums(params, tool.function.name);

@@ -55,3 +55,3 @@ import { jsonSchemaToGeminiParameters, schemaToGenerativeAIParameters } from "./zod_to_genai_parameters.js";

function _getStandardContentBlockConverter(isMultimodalModel) {
const standardContentBlockConverter = {
return {
providerName: "Google Gemini",

@@ -120,3 +120,2 @@ fromStandardTextBlock(block) {

};
return standardContentBlockConverter;
}

@@ -155,3 +154,3 @@ function _convertLangChainContentToPart(content, isMultimodalModel) {

} };
else if ("functionCall" in content) return void 0;
else if ("functionCall" in content) return;
else if ("type" in content) throw new Error(`Unknown content type ${content.type}`);

@@ -208,5 +207,5 @@ else throw new Error(`Unknown content ${JSON.stringify(content)}`);

if (acc.mergeWithPreviousContent) {
const prevContent$1 = acc.content[acc.content.length - 1];
if (!prevContent$1) throw new Error("There was a problem parsing your system message. Please try a prompt without one.");
prevContent$1.parts.push(...parts);
const prevContent = acc.content[acc.content.length - 1];
if (!prevContent) throw new Error("There was a problem parsing your system message. Please try a prompt without one.");
prevContent.parts.push(...parts);
return {

@@ -238,3 +237,3 @@ mergeWithPreviousContent: false,

const [candidate] = response.candidates;
const { content: candidateContent,...generationInfo } = candidate;
const { content: candidateContent, ...generationInfo } = candidate;
const functionCalls = candidateContent.parts?.reduce((acc, p) => {

@@ -293,26 +292,22 @@ if ("functionCall" in p && p.functionCall) acc.push({

if (typeof content === "string") text = content;
else if (Array.isArray(content) && content.length > 0) {
const block = content.find((b) => "text" in b);
text = block?.text ?? text;
}
const generation = {
text,
message: new AIMessage({
content: content ?? "",
tool_calls: functionCalls?.map((fc) => ({
type: "tool_call",
id: fc.id,
name: fc.functionCall.name,
args: fc.functionCall.args
})),
additional_kwargs: {
...generationInfo,
[_FUNCTION_CALL_THOUGHT_SIGNATURES_MAP_KEY]: functionThoughtSignatures
},
usage_metadata: extra?.usageMetadata
}),
generationInfo
};
else if (Array.isArray(content) && content.length > 0) text = content.find((b) => "text" in b)?.text ?? text;
return {
generations: [generation],
generations: [{
text,
message: new AIMessage({
content: content ?? "",
tool_calls: functionCalls?.map((fc) => ({
type: "tool_call",
id: fc.id,
name: fc.functionCall.name,
args: fc.functionCall.args
})),
additional_kwargs: {
...generationInfo,
[_FUNCTION_CALL_THOUGHT_SIGNATURES_MAP_KEY]: functionThoughtSignatures
},
usage_metadata: extra?.usageMetadata
}),
generationInfo
}],
llmOutput: { tokenUsage: {

@@ -328,3 +323,3 @@ promptTokens: extra?.usageMetadata?.input_tokens,

const [candidate] = response.candidates;
const { content: candidateContent,...generationInfo } = candidate;
const { content: candidateContent, ...generationInfo } = candidate;
const functionCalls = candidateContent.parts?.reduce((acc, p) => {

@@ -379,6 +374,3 @@ if ("functionCall" in p && p.functionCall) acc.push({

if (content && typeof content === "string") text = content;
else if (Array.isArray(content)) {
const block = content.find((b) => "text" in b);
text = block?.text ?? "";
}
else if (Array.isArray(content)) text = content.find((b) => "text" in b)?.text ?? "";
const toolCallChunks = [];

@@ -385,0 +377,0 @@ if (functionCalls) toolCallChunks.push(...functionCalls.map((fc) => ({

@@ -1,7 +0,6 @@

const require_rolldown_runtime = require('../_virtual/rolldown_runtime.cjs');
const require_zod_to_genai_parameters = require('./zod_to_genai_parameters.cjs');
const require_common = require('./common.cjs');
const __google_generative_ai = require_rolldown_runtime.__toESM(require("@google/generative-ai"));
const __langchain_core_utils_function_calling = require_rolldown_runtime.__toESM(require("@langchain/core/utils/function_calling"));
const __langchain_core_language_models_base = require_rolldown_runtime.__toESM(require("@langchain/core/language_models/base"));
let _google_generative_ai = require("@google/generative-ai");
let _langchain_core_utils_function_calling = require("@langchain/core/utils/function_calling");
let _langchain_core_language_models_base = require("@langchain/core/language_models/base");

@@ -11,6 +10,5 @@ //#region src/utils/tools.ts

const genAITools = processTools(tools);
const toolConfig = createToolConfig(genAITools, extra);
return {
tools: genAITools,
toolConfig
toolConfig: createToolConfig(genAITools, extra)
};

@@ -22,6 +20,6 @@ }

tools.forEach((tool) => {
if ((0, __langchain_core_utils_function_calling.isLangChainTool)(tool)) {
if ((0, _langchain_core_utils_function_calling.isLangChainTool)(tool)) {
const [convertedTool] = require_common.convertToGenerativeAITools([tool]);
if (convertedTool.functionDeclarations) functionDeclarationTools.push(...convertedTool.functionDeclarations);
} else if ((0, __langchain_core_language_models_base.isOpenAITool)(tool)) {
} else if ((0, _langchain_core_language_models_base.isOpenAITool)(tool)) {
const { functionDeclarations } = convertOpenAIToolToGenAI(tool);

@@ -32,4 +30,3 @@ if (functionDeclarations) functionDeclarationTools.push(...functionDeclarations);

});
const genAIFunctionDeclaration = genAITools.find((t) => "functionDeclarations" in t);
if (genAIFunctionDeclaration) return genAITools.map((tool) => {
if (genAITools.find((t) => "functionDeclarations" in t)) return genAITools.map((tool) => {
if (functionDeclarationTools?.length > 0 && "functionDeclarations" in tool) {

@@ -55,5 +52,5 @@ const newTool = { functionDeclarations: [...tool.functionDeclarations || [], ...functionDeclarationTools] };

const modeMap = {
any: __google_generative_ai.FunctionCallingMode.ANY,
auto: __google_generative_ai.FunctionCallingMode.AUTO,
none: __google_generative_ai.FunctionCallingMode.NONE
any: _google_generative_ai.FunctionCallingMode.ANY,
auto: _google_generative_ai.FunctionCallingMode.AUTO,
none: _google_generative_ai.FunctionCallingMode.NONE
};

@@ -69,6 +66,5 @@ if (toolChoice && [

if (typeof toolChoice === "string" || allowedFunctionNames) return { functionCallingConfig: {
mode: __google_generative_ai.FunctionCallingMode.ANY,
mode: _google_generative_ai.FunctionCallingMode.ANY,
allowedFunctionNames: [...allowedFunctionNames ?? [], ...toolChoice && typeof toolChoice === "string" ? [toolChoice] : []]
} };
return void 0;
}

@@ -75,0 +71,0 @@

@@ -10,6 +10,5 @@ import { removeAdditionalProperties } from "./zod_to_genai_parameters.js";

const genAITools = processTools(tools);
const toolConfig = createToolConfig(genAITools, extra);
return {
tools: genAITools,
toolConfig
toolConfig: createToolConfig(genAITools, extra)
};

@@ -30,4 +29,3 @@ }

});
const genAIFunctionDeclaration = genAITools.find((t) => "functionDeclarations" in t);
if (genAIFunctionDeclaration) return genAITools.map((tool) => {
if (genAITools.find((t) => "functionDeclarations" in t)) return genAITools.map((tool) => {
if (functionDeclarationTools?.length > 0 && "functionDeclarations" in tool) {

@@ -69,3 +67,2 @@ const newTool = { functionDeclarations: [...tool.functionDeclarations || [], ...functionDeclarationTools] };

} };
return void 0;
}

@@ -72,0 +69,0 @@

@@ -1,4 +0,3 @@

const require_rolldown_runtime = require('../_virtual/rolldown_runtime.cjs');
const __langchain_core_utils_types = require_rolldown_runtime.__toESM(require("@langchain/core/utils/types"));
const __langchain_core_utils_json_schema = require_rolldown_runtime.__toESM(require("@langchain/core/utils/json_schema"));
let _langchain_core_utils_types = require("@langchain/core/utils/types");
let _langchain_core_utils_json_schema = require("@langchain/core/utils/json_schema");

@@ -21,9 +20,7 @@ //#region src/utils/zod_to_genai_parameters.ts

function schemaToGenerativeAIParameters(schema) {
const jsonSchema = removeAdditionalProperties((0, __langchain_core_utils_types.isInteropZodSchema)(schema) ? (0, __langchain_core_utils_json_schema.toJsonSchema)(schema) : schema);
const { $schema,...rest } = jsonSchema;
const { $schema, ...rest } = removeAdditionalProperties((0, _langchain_core_utils_types.isInteropZodSchema)(schema) ? (0, _langchain_core_utils_json_schema.toJsonSchema)(schema) : schema);
return rest;
}
function jsonSchemaToGeminiParameters(schema) {
const jsonSchema = removeAdditionalProperties(schema);
const { $schema,...rest } = jsonSchema;
const { $schema, ...rest } = removeAdditionalProperties(schema);
return rest;

@@ -30,0 +27,0 @@ }

@@ -20,9 +20,7 @@ import { isInteropZodSchema } from "@langchain/core/utils/types";

function schemaToGenerativeAIParameters(schema) {
const jsonSchema = removeAdditionalProperties(isInteropZodSchema(schema) ? toJsonSchema(schema) : schema);
const { $schema,...rest } = jsonSchema;
const { $schema, ...rest } = removeAdditionalProperties(isInteropZodSchema(schema) ? toJsonSchema(schema) : schema);
return rest;
}
function jsonSchemaToGeminiParameters(schema) {
const jsonSchema = removeAdditionalProperties(schema);
const { $schema,...rest } = jsonSchema;
const { $schema, ...rest } = removeAdditionalProperties(schema);
return rest;

@@ -29,0 +27,0 @@ }

{
"name": "@langchain/google-genai",
"version": "2.1.18",
"version": "2.1.19",
"description": "Google Generative AI integration for LangChain.js",

@@ -21,3 +21,3 @@ "author": "LangChain",

"peerDependencies": {
"@langchain/core": "^1.1.23"
"@langchain/core": "^1.1.25"
},

@@ -39,6 +39,6 @@ "devDependencies": {

"zod": "^3.25.76",
"@langchain/core": "^1.1.23",
"@langchain/tsconfig": "0.0.1",
"@langchain/core": "^1.1.25",
"@langchain/eslint": "0.1.1",
"@langchain/standard-tests": "0.0.23",
"@langchain/eslint": "0.1.1"
"@langchain/tsconfig": "0.0.1"
},

@@ -45,0 +45,0 @@ "publishConfig": {

//#region rolldown:runtime
var __create = Object.create;
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __getProtoOf = Object.getPrototypeOf;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") for (var keys = __getOwnPropNames(from), i = 0, n = keys.length, key; i < n; i++) {
key = keys[i];
if (!__hasOwnProp.call(to, key) && key !== except) __defProp(to, key, {
get: ((k) => from[k]).bind(null, key),
enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable
});
}
return to;
};
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", {
value: mod,
enumerable: true
}) : target, mod));
//#endregion
exports.__toESM = __toESM;

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet