Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@langchain/google-genai

Package Overview
Dependencies
Maintainers
8
Versions
32
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@langchain/google-genai - npm Package Compare versions

Comparing version 0.0.17 to 0.0.18

12

dist/chat_models.d.ts

@@ -1,2 +0,2 @@

import { FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool, GenerateContentRequest, SafetySetting } from "@google/generative-ai";
import { FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool, GenerateContentRequest, SafetySetting, Part as GenerativeAIPart } from "@google/generative-ai";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";

@@ -16,2 +16,8 @@ import { AIMessageChunk, BaseMessage } from "@langchain/core/messages";

tools?: StructuredToolInterface[] | GoogleGenerativeAIFunctionDeclarationsTool[];
/**
* Whether or not to include usage data, like token counts
* in the streamed response chunks.
* @default true
*/
streamUsage?: boolean;
}

@@ -21,3 +27,3 @@ /**

*/
export interface GoogleGenerativeAIChatInput extends BaseChatModelParams {
export interface GoogleGenerativeAIChatInput extends BaseChatModelParams, Pick<GoogleGenerativeAIChatCallOptions, "streamUsage"> {
/**

@@ -152,2 +158,3 @@ * Model Name to use

streaming: boolean;
streamUsage: boolean;
private client;

@@ -163,2 +170,3 @@ get _isMultimodalModel(): boolean;

_streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
completionWithRetry(request: string | GenerateContentRequest | (string | GenerativeAIPart)[], options?: this["ParsedCallOptions"]): Promise<import("@google/generative-ai").GenerateContentResult>;
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: z.ZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;

@@ -165,0 +173,0 @@ withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: z.ZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {

95

dist/chat_models.js

@@ -118,2 +118,8 @@ import { GoogleGenerativeAI as GenerativeAI, } from "@google/generative-ai";

});
Object.defineProperty(this, "streamUsage", {
enumerable: true,
configurable: true,
writable: true,
value: true
});
Object.defineProperty(this, "client", {

@@ -180,2 +186,3 @@ enumerable: true,

});
this.streamUsage = fields?.streamUsage ?? this.streamUsage;
}

@@ -238,21 +245,18 @@ getLsParams(options) {

}
const res = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
let output;
try {
output = await this.client.generateContent({
...parameters,
contents: prompt,
});
// eslint-disable-next-line @typescript-eslint/no-explicit-any
}
catch (e) {
// TODO: Improve error handling
if (e.message?.includes("400 Bad Request")) {
e.status = 400;
}
throw e;
}
return output;
const res = await this.completionWithRetry({
...parameters,
contents: prompt,
});
const generationResult = mapGenerateContentResultToChatResult(res.response);
let usageMetadata;
if ("usageMetadata" in res.response) {
const genAIUsageMetadata = res.response.usageMetadata;
usageMetadata = {
input_tokens: genAIUsageMetadata.promptTokenCount ?? 0,
output_tokens: genAIUsageMetadata.candidatesTokenCount ?? 0,
total_tokens: genAIUsageMetadata.totalTokenCount ?? 0,
};
}
const generationResult = mapGenerateContentResultToChatResult(res.response, {
usageMetadata,
});
await runManager?.handleLLMNewToken(generationResult.generations[0].text ?? "");

@@ -264,11 +268,41 @@ return generationResult;

const parameters = this.invocationParams(options);
const request = {
...parameters,
contents: prompt,
};
const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
const { stream } = await this.client.generateContentStream({
...parameters,
contents: prompt,
});
const { stream } = await this.client.generateContentStream(request);
return stream;
});
let usageMetadata;
let index = 0;
for await (const response of stream) {
const chunk = convertResponseContentToChatGenerationChunk(response);
if ("usageMetadata" in response &&
this.streamUsage !== false &&
options.streamUsage !== false) {
const genAIUsageMetadata = response.usageMetadata;
if (!usageMetadata) {
usageMetadata = {
input_tokens: genAIUsageMetadata.promptTokenCount,
output_tokens: genAIUsageMetadata.candidatesTokenCount,
total_tokens: genAIUsageMetadata.totalTokenCount,
};
}
else {
// Under the hood, LangChain combines the prompt tokens. Google returns the updated
// total each time, so we need to find the difference between the tokens.
const outputTokenDiff = genAIUsageMetadata.candidatesTokenCount -
usageMetadata.output_tokens;
usageMetadata = {
input_tokens: 0,
output_tokens: outputTokenDiff,
total_tokens: outputTokenDiff,
};
}
}
const chunk = convertResponseContentToChatGenerationChunk(response, {
usageMetadata,
index,
});
index += 1;
if (!chunk) {

@@ -281,2 +315,17 @@ continue;

}
async completionWithRetry(request, options) {
return this.caller.callWithOptions({ signal: options?.signal }, async () => {
try {
return this.client.generateContent(request);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
}
catch (e) {
// TODO: Improve error handling
if (e.message?.includes("400 Bad Request")) {
e.status = 400;
}
throw e;
}
});
}
withStructuredOutput(outputSchema, config) {

@@ -283,0 +332,0 @@ // eslint-disable-next-line @typescript-eslint/no-explicit-any

@@ -1,3 +0,3 @@

import { EnhancedGenerateContentResponse, Content, Part, type FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool } from "@google/generative-ai";
import { BaseMessage, MessageContent } from "@langchain/core/messages";
import { EnhancedGenerateContentResponse, Content, Part, type FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool, POSSIBLE_ROLES } from "@google/generative-ai";
import { BaseMessage, UsageMetadata } from "@langchain/core/messages";
import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";

@@ -12,7 +12,12 @@ import { StructuredToolInterface } from "@langchain/core/tools";

*/
export declare function convertAuthorToRole(author: string): "model" | "user";
export declare function convertMessageContentToParts(content: MessageContent, isMultimodalModel: boolean): Part[];
export declare function convertAuthorToRole(author: string): (typeof POSSIBLE_ROLES)[number];
export declare function convertMessageContentToParts(message: BaseMessage, isMultimodalModel: boolean, role: (typeof POSSIBLE_ROLES)[number]): Part[];
export declare function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel: boolean): Content[];
export declare function mapGenerateContentResultToChatResult(response: EnhancedGenerateContentResponse): ChatResult;
export declare function convertResponseContentToChatGenerationChunk(response: EnhancedGenerateContentResponse): ChatGenerationChunk | null;
export declare function mapGenerateContentResultToChatResult(response: EnhancedGenerateContentResponse, extra?: {
usageMetadata: UsageMetadata | undefined;
}): ChatResult;
export declare function convertResponseContentToChatGenerationChunk(response: EnhancedGenerateContentResponse, extra: {
usageMetadata?: UsageMetadata | undefined;
index: number;
}): ChatGenerationChunk | null;
export declare function convertToGenerativeAITools(structuredTools: (StructuredToolInterface | Record<string, unknown>)[]): GoogleGenerativeAIFunctionDeclarationsTool[];

@@ -10,2 +10,5 @@ import { AIMessage, AIMessageChunk, ChatMessage, isBaseMessage, } from "@langchain/core/messages";

}
if (type === "tool") {
return type;
}
return message.name ?? type;

@@ -31,2 +34,5 @@ }

return "user";
case "tool":
case "function":
return "function";
default:

@@ -47,7 +53,32 @@ throw new Error(`Unknown / unsupported author: ${author}`);

}
export function convertMessageContentToParts(content, isMultimodalModel) {
if (typeof content === "string") {
return [{ text: content }];
export function convertMessageContentToParts(message, isMultimodalModel, role) {
if (typeof message.content === "string") {
return [{ text: message.content }];
}
return content.map((c) => {
let functionCallParts = [];
if (role === "function") {
if (message.name && typeof message.content === "string") {
functionCallParts.push({
functionResponse: {
name: message.name,
response: message.content,
},
});
}
else {
throw new Error("ChatGoogleGenerativeAI requires tool messages to contain the tool name, and a string content.");
}
}
if ("tool_calls" in message) {
const castMessage = message;
if (castMessage.tool_calls && castMessage.tool_calls.length > 0) {
functionCallParts = castMessage.tool_calls.map((tc) => ({
functionCall: {
name: tc.name,
args: tc.args,
},
}));
}
}
const messageContentParts = message.content.map((c) => {
if (c.type === "text") {

@@ -90,4 +121,13 @@ return {

}
else if (c.type === "tool_use") {
return {
functionCall: {
name: c.name,
args: c.input,
},
};
}
throw new Error(`Unknown content type ${c.type}`);
});
return [...messageContentParts, ...functionCallParts];
}

@@ -110,3 +150,3 @@ export function convertBaseMessagesToContent(messages, isMultimodalModel) {

}
const parts = convertMessageContentToParts(message.content, isMultimodalModel);
const parts = convertMessageContentToParts(message, isMultimodalModel, role);
if (acc.mergeWithPreviousContent) {

@@ -123,4 +163,9 @@ const prevContent = acc.content[acc.content.length - 1];

}
let actualRole = role;
if (actualRole === "function") {
// GenerativeAI API will throw an error if the role is not "user" or "model."
actualRole = "user";
}
const content = {
role,
role: actualRole,
parts,

@@ -134,3 +179,3 @@ };

}
export function mapGenerateContentResultToChatResult(response) {
export function mapGenerateContentResultToChatResult(response, extra) {
// if rejected or error, return empty generations with reason in filters

@@ -159,2 +204,3 @@ if (!response.candidates ||

},
usage_metadata: extra?.usageMetadata,
}),

@@ -167,9 +213,18 @@ generationInfo,

}
export function convertResponseContentToChatGenerationChunk(response) {
export function convertResponseContentToChatGenerationChunk(response, extra) {
if (!response.candidates || response.candidates.length === 0) {
return null;
}
const functionCalls = response.functionCalls();
const [candidate] = response.candidates;
const { content, ...generationInfo } = candidate;
const text = content?.parts[0]?.text ?? "";
const toolCallChunks = [];
if (functionCalls) {
toolCallChunks.push(...functionCalls.map((fc) => ({
...fc,
args: JSON.stringify(fc.args),
index: extra.index,
})));
}
return new ChatGenerationChunk({

@@ -180,5 +235,7 @@ text,

name: !content ? undefined : content.role,
tool_call_chunks: toolCallChunks,
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
// so leave blank for now.
additional_kwargs: {},
usage_metadata: extra.usageMetadata,
}),

@@ -185,0 +242,0 @@ generationInfo,

{
"name": "@langchain/google-genai",
"version": "0.0.17",
"version": "0.0.18",
"description": "Sample integration for LangChain.js",

@@ -39,3 +39,3 @@ "type": "module",

"@google/generative-ai": "^0.7.0",
"@langchain/core": ">0.1.5 <0.3.0",
"@langchain/core": ">=0.2.5 <0.3.0",
"zod-to-json-schema": "^3.22.4"

@@ -46,3 +46,3 @@ },

"@langchain/scripts": "~0.0.14",
"@langchain/standard-tests": "workspace:*",
"@langchain/standard-tests": "0.0.0",
"@swc/core": "^1.3.90",

@@ -49,0 +49,0 @@ "@swc/jest": "^0.2.29",

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc