Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@langchain/google-common

Package Overview
Dependencies
Maintainers
5
Versions
33
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@langchain/google-common - npm Package Compare versions

Comparing version 0.0.1 to 0.0.2

dist/utils/index.cjs

17

dist/chat_models.d.ts
import { type BaseMessage } from "@langchain/core/messages";
import { type BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { BaseChatModel, type BaseChatModelParams } from "@langchain/core/language_models/chat_models";
import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
import { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAISafetySetting, GoogleConnectionParams, GooglePlatformType, GeminiContent } from "./types.js";
import { BaseLanguageModelInput, StructuredOutputMethodOptions } from "@langchain/core/language_models/base";
import type { z } from "zod";
import { Runnable } from "@langchain/core/runnables";
import { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAISafetySetting, GoogleConnectionParams, GooglePlatformType, GeminiContent, GoogleAIBaseLanguageModelCallOptions } from "./types.js";
import { AbstractGoogleLLMConnection } from "./connection.js";

@@ -21,6 +23,8 @@ import { GoogleAbstractedClient } from "./auth.js";

*/
export declare abstract class ChatGoogleBase<AuthOptions> extends BaseChatModel<BaseLanguageModelCallOptions> implements ChatGoogleBaseInput<AuthOptions> {
export declare abstract class ChatGoogleBase<AuthOptions> extends BaseChatModel<GoogleAIBaseLanguageModelCallOptions> implements ChatGoogleBaseInput<AuthOptions> {
static lc_name(): string;
lc_serializable: boolean;
/** @deprecated Prefer `modelName` */
model: string;
modelName: string;
temperature: number;

@@ -44,6 +48,11 @@ maxOutputTokens: number;

_generate(messages: BaseMessage[], options: this["ParsedCallOptions"], _runManager: CallbackManagerForLLMRun | undefined): Promise<ChatResult>;
_streamResponseChunks(_messages: BaseMessage[], _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
_streamResponseChunks(_messages: BaseMessage[], options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
/** @ignore */
_combineLLMOutput(): never[];
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: z.ZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: z.ZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {
raw: BaseMessage;
parsed: RunOutput;
}>;
}
export {};

@@ -5,2 +5,4 @@ import { getEnvironmentVariable } from "@langchain/core/utils/env";

import { AIMessageChunk } from "@langchain/core/messages";
import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables";
import { JsonOutputKeyToolsParser } from "@langchain/core/output_parsers/openai_tools";
import { copyAIModelParams, copyAndValidateModelParamsInto, } from "./utils/common.js";

@@ -11,2 +13,3 @@ import { AbstractGoogleLLMConnection } from "./connection.js";

import { ensureParams } from "./utils/failed_handler.js";
import { zodToGeminiParameters } from "./utils/zod_to_gemini_parameters.js";
class ChatConnection extends AbstractGoogleLLMConnection {

@@ -35,2 +38,3 @@ formatContents(input, _parameters) {

});
/** @deprecated Prefer `modelName` */
Object.defineProperty(this, "model", {

@@ -42,2 +46,8 @@ enumerable: true,

});
Object.defineProperty(this, "modelName", {
enumerable: true,
configurable: true,
writable: true,
value: "gemini-pro"
});
Object.defineProperty(this, "temperature", {

@@ -130,3 +140,3 @@ enumerable: true,

async _generate(messages, options, _runManager) {
const parameters = copyAIModelParams(this);
const parameters = copyAIModelParams(this, options);
const response = await this.connection.request(messages, parameters, options);

@@ -136,6 +146,6 @@ const ret = safeResponseToChatResult(response, this.safetyHandler);

}
async *_streamResponseChunks(_messages, _options, _runManager) {
async *_streamResponseChunks(_messages, options, _runManager) {
// Make the call as a streaming request
const parameters = copyAIModelParams(this);
const response = await this.streamedConnection.request(_messages, parameters, _options);
const parameters = copyAIModelParams(this, options);
const response = await this.streamedConnection.request(_messages, parameters, options);
// Get the streaming parser of the response

@@ -164,2 +174,91 @@ const stream = response.data;

}
withStructuredOutput(outputSchema, config) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const schema = outputSchema;
const name = config?.name;
const method = config?.method;
const includeRaw = config?.includeRaw;
if (method === "jsonMode") {
throw new Error(`Google only supports "functionCalling" as a method.`);
}
let functionName = name ?? "extract";
let outputParser;
let tools;
if (isZodSchema(schema)) {
const jsonSchema = zodToGeminiParameters(schema);
tools = [
{
functionDeclarations: [
{
name: functionName,
description: jsonSchema.description ?? "A function available to call.",
parameters: jsonSchema,
},
],
},
];
outputParser = new JsonOutputKeyToolsParser({
returnSingle: true,
keyName: functionName,
zodSchema: schema,
});
}
else {
let geminiFunctionDefinition;
if (typeof schema.name === "string" &&
typeof schema.parameters === "object" &&
schema.parameters != null) {
geminiFunctionDefinition = schema;
functionName = schema.name;
}
else {
geminiFunctionDefinition = {
name: functionName,
description: schema.description ?? "",
parameters: schema,
};
}
tools = [
{
functionDeclarations: [geminiFunctionDefinition],
},
];
outputParser = new JsonOutputKeyToolsParser({
returnSingle: true,
keyName: functionName,
});
}
const llm = this.bind({
tools,
});
if (!includeRaw) {
return llm.pipe(outputParser).withConfig({
runName: "ChatGoogleStructuredOutput",
});
}
const parserAssign = RunnablePassthrough.assign({
// eslint-disable-next-line @typescript-eslint/no-explicit-any
parsed: (input, config) => outputParser.invoke(input.raw, config),
});
const parserNone = RunnablePassthrough.assign({
parsed: () => null,
});
const parsedWithFallback = parserAssign.withFallbacks({
fallbacks: [parserNone],
});
return RunnableSequence.from([
{
raw: llm,
},
parsedWithFallback,
]).withConfig({
runName: "StructuredOutputRunnable",
});
}
}
function isZodSchema(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
input) {
// Check for a characteristic method of Zod schemas
return typeof input?.parse === "function";
}
import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
import { AsyncCaller, AsyncCallerCallOptions } from "@langchain/core/utils/async_caller";
import type { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleConnectionParams, GoogleLLMModelFamily, GooglePlatformType, GoogleResponse, GoogleLLMResponse, GeminiContent, GeminiGenerationConfig, GeminiRequest, GeminiSafetySetting } from "./types.js";
import { StructuredToolInterface } from "@langchain/core/tools";
import type { GoogleAIBaseLLMInput, GoogleConnectionParams, GoogleLLMModelFamily, GooglePlatformType, GoogleResponse, GoogleLLMResponse, GeminiContent, GeminiGenerationConfig, GeminiRequest, GeminiSafetySetting, GeminiTool, GeminiFunctionDeclaration, GoogleAIModelRequestParams } from "./types.js";
import { GoogleAbstractedClient, GoogleAbstractedClientOpsMethod } from "./auth.js";

@@ -28,3 +29,5 @@ export declare abstract class GoogleConnection<CallOptions extends AsyncCallerCallOptions, ResponseType extends GoogleResponse> {

export declare abstract class GoogleAIConnection<CallOptions extends BaseLanguageModelCallOptions, MessageType, AuthOptions> extends GoogleHostConnection<CallOptions, GoogleLLMResponse, AuthOptions> implements GoogleAIBaseLLMInput<AuthOptions> {
/** @deprecated Prefer `modelName` */
model: string;
modelName: string;
client: GoogleAbstractedClient;

@@ -38,4 +41,4 @@ constructor(fields: GoogleAIBaseLLMInput<AuthOptions> | undefined, caller: AsyncCaller, client: GoogleAbstractedClient, streaming?: boolean);

buildUrl(): Promise<string>;
abstract formatData(input: MessageType, parameters: GoogleAIModelParams): unknown;
request(input: MessageType, parameters: GoogleAIModelParams, options: CallOptions): Promise<GoogleLLMResponse>;
abstract formatData(input: MessageType, parameters: GoogleAIModelRequestParams): unknown;
request(input: MessageType, parameters: GoogleAIModelRequestParams, options: CallOptions): Promise<GoogleLLMResponse>;
}

@@ -45,6 +48,10 @@ export declare abstract class AbstractGoogleLLMConnection<MessageType, AuthOptions> extends GoogleAIConnection<BaseLanguageModelCallOptions, MessageType, AuthOptions> {

buildUrlMethod(): Promise<string>;
abstract formatContents(input: MessageType, parameters: GoogleAIModelParams): GeminiContent[];
formatGenerationConfig(_input: MessageType, parameters: GoogleAIModelParams): GeminiGenerationConfig;
formatSafetySettings(_input: MessageType, parameters: GoogleAIModelParams): GeminiSafetySetting[];
formatData(input: MessageType, parameters: GoogleAIModelParams): GeminiRequest;
abstract formatContents(input: MessageType, parameters: GoogleAIModelRequestParams): GeminiContent[];
formatGenerationConfig(_input: MessageType, parameters: GoogleAIModelRequestParams): GeminiGenerationConfig;
formatSafetySettings(_input: MessageType, parameters: GoogleAIModelRequestParams): GeminiSafetySetting[];
isStructuredToolArray(tools?: unknown[]): tools is StructuredToolInterface[];
structuredToolToFunctionDeclaration(tool: StructuredToolInterface): GeminiFunctionDeclaration;
structuredToolsToGeminiTools(tools: StructuredToolInterface[]): GeminiTool[];
formatTools(_input: MessageType, parameters: GoogleAIModelRequestParams): GeminiTool[];
formatData(input: MessageType, parameters: GoogleAIModelRequestParams): GeminiRequest;
}
import { getRuntimeEnvironment } from "@langchain/core/utils/env";
import { zodToGeminiParameters } from "./utils/zod_to_gemini_parameters.js";
export class GoogleConnection {

@@ -121,2 +122,3 @@ constructor(caller, client, streaming) {

super(fields, caller, client, streaming);
/** @deprecated Prefer `modelName` */
Object.defineProperty(this, "model", {

@@ -128,2 +130,8 @@ enumerable: true,

});
Object.defineProperty(this, "modelName", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
});
Object.defineProperty(this, "client", {

@@ -136,6 +144,6 @@ enumerable: true,

this.client = client;
this.model = fields?.model ?? this.model;
this.modelName = fields?.modelName ?? fields?.model ?? this.modelName;
}
get modelFamily() {
if (this.model.startsWith("gemini")) {
if (this.modelName.startsWith("gemini")) {
return "gemini";

@@ -157,3 +165,3 @@ }

const method = await this.buildUrlMethod();
const url = `https://generativelanguage.googleapis.com/${this.apiVersion}/models/${this.model}:${method}`;
const url = `https://generativelanguage.googleapis.com/${this.apiVersion}/models/${this.modelName}:${method}`;
return url;

@@ -164,3 +172,3 @@ }

const method = await this.buildUrlMethod();
const url = `https://${this.endpoint}/${this.apiVersion}/projects/${projectId}/locations/${this.location}/publishers/google/models/${this.model}:${method}`;
const url = `https://${this.endpoint}/${this.apiVersion}/projects/${projectId}/locations/${this.location}/publishers/google/models/${this.modelName}:${method}`;
return url;

@@ -207,2 +215,34 @@ }

}
// Borrowed from the OpenAI invocation params test
isStructuredToolArray(tools) {
return (tools !== undefined &&
tools.every((tool) => Array.isArray(tool.lc_namespace)));
}
structuredToolToFunctionDeclaration(tool) {
const jsonSchema = zodToGeminiParameters(tool.schema);
return {
name: tool.name,
description: tool.description,
parameters: jsonSchema,
};
}
structuredToolsToGeminiTools(tools) {
return [
{
functionDeclarations: tools.map(this.structuredToolToFunctionDeclaration),
},
];
}
formatTools(_input, parameters) {
const tools = parameters?.tools;
if (!tools || tools.length === 0) {
return [];
}
if (this.isStructuredToolArray(tools)) {
return this.structuredToolsToGeminiTools(tools);
}
else {
return tools;
}
}
formatData(input, parameters) {

@@ -220,2 +260,3 @@ /*

const generationConfig = this.formatGenerationConfig(input, parameters);
const tools = this.formatTools(input, parameters);
const safetySettings = this.formatSafetySettings(input, parameters);

@@ -226,2 +267,5 @@ const ret = {

};
if (tools && tools.length) {
ret.tools = tools;
}
if (safetySettings && safetySettings.length) {

@@ -228,0 +272,0 @@ ret.safetySettings = safetySettings;

@@ -8,1 +8,2 @@ export * from "./chat_models.js";

export * from "./utils/common.js";
export * from "./utils/zod_to_gemini_parameters.js";

@@ -8,1 +8,2 @@ export * from "./chat_models.js";

export * from "./utils/common.js";
export * from "./utils/zod_to_gemini_parameters.js";

@@ -21,3 +21,3 @@ import { Callbacks } from "@langchain/core/callbacks/manager";

lc_serializable: boolean;
model: string;
modelName: string;
temperature: number;

@@ -24,0 +24,0 @@ maxOutputTokens: number;

@@ -53,3 +53,3 @@ import { CallbackManager } from "@langchain/core/callbacks/manager";

});
Object.defineProperty(this, "model", {
Object.defineProperty(this, "modelName", {
enumerable: true,

@@ -157,3 +157,3 @@ configurable: true,

async _call(prompt, options) {
const parameters = copyAIModelParams(this);
const parameters = copyAIModelParams(this, options);
const result = await this.connection.request(prompt, parameters, options);

@@ -160,0 +160,0 @@ const ret = safeResponseToString(result, this.safetyHandler);

import type { BaseLLMParams } from "@langchain/core/language_models/llms";
import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
import { StructuredToolInterface } from "@langchain/core/tools";
import type { JsonStream } from "./utils/stream.js";

@@ -39,4 +41,6 @@ /**

export interface GoogleAIModelParams {
/** @deprecated Prefer `modelName` */
model?: string;
/** Model to use */
model?: string;
modelName?: string;
/** Sampling temperature to use */

@@ -71,4 +75,12 @@ temperature?: number;

}
/**
* The params which can be passed to the API at request time.
*/
export interface GoogleAIModelRequestParams extends GoogleAIModelParams {
tools?: StructuredToolInterface[] | GeminiTool[];
}
export interface GoogleAIBaseLLMInput<AuthOptions> extends BaseLLMParams, GoogleConnectionParams<AuthOptions>, GoogleAIModelParams, GoogleAISafetyParams {
}
export interface GoogleAIBaseLanguageModelCallOptions extends BaseLanguageModelCallOptions, GoogleAIModelRequestParams, GoogleAISafetyParams {
}
/**

@@ -118,3 +130,3 @@ * Input to LLM class.

}
export type GeminiRole = "user" | "model";
export type GeminiRole = "user" | "model" | "function";
export interface GeminiContent {

@@ -125,3 +137,20 @@ parts: GeminiPart[];

export interface GeminiTool {
functionDeclarations?: GeminiFunctionDeclaration[];
}
export interface GeminiFunctionDeclaration {
name: string;
description: string;
parameters?: GeminiFunctionSchema;
}
export interface GeminiFunctionSchema {
type: GeminiFunctionSchemaType;
format?: string;
description?: string;
nullable?: boolean;
enum?: string[];
properties?: Record<string, GeminiFunctionSchema>;
required?: string[];
items?: GeminiFunctionSchema;
}
export type GeminiFunctionSchemaType = "string" | "number" | "integer" | "boolean" | "array" | "object";
export interface GeminiGenerationConfig {

@@ -128,0 +157,0 @@ stopSequences?: string[];

@@ -1,6 +0,6 @@

import type { GoogleAIModelParams, GoogleLLMModelFamily } from "../types.js";
export declare function copyAIModelParams(params: GoogleAIModelParams | undefined): GoogleAIModelParams;
export declare function copyAIModelParamsInto(params: GoogleAIModelParams | undefined, target: GoogleAIModelParams): GoogleAIModelParams;
import type { GoogleAIBaseLanguageModelCallOptions, GoogleAIModelParams, GoogleAIModelRequestParams, GoogleLLMModelFamily } from "../types.js";
export declare function copyAIModelParams(params: GoogleAIModelParams | undefined, options: GoogleAIBaseLanguageModelCallOptions | undefined): GoogleAIModelRequestParams;
export declare function copyAIModelParamsInto(params: GoogleAIModelParams | undefined, options: GoogleAIBaseLanguageModelCallOptions | undefined, target: GoogleAIModelParams): GoogleAIModelRequestParams;
export declare function modelToFamily(modelName: string | undefined): GoogleLLMModelFamily;
export declare function validateModelParams(params: GoogleAIModelParams | undefined): void;
export declare function copyAndValidateModelParamsInto(params: GoogleAIModelParams | undefined, target: GoogleAIModelParams): GoogleAIModelParams;
import { isModelGemini, validateGeminiParams } from "./gemini.js";
export function copyAIModelParams(params) {
return copyAIModelParamsInto(params, {});
export function copyAIModelParams(params, options) {
return copyAIModelParamsInto(params, options, {});
}
export function copyAIModelParamsInto(params, target) {
export function copyAIModelParamsInto(params, options, target) {
const ret = target || {};
ret.model = params?.model ?? target.model;
ret.temperature = params?.temperature ?? target.temperature;
ret.maxOutputTokens = params?.maxOutputTokens ?? target.maxOutputTokens;
ret.topP = params?.topP ?? target.topP;
ret.topK = params?.topK ?? target.topK;
ret.stopSequences = params?.stopSequences ?? target.stopSequences;
ret.safetySettings = params?.safetySettings ?? target.safetySettings;
ret.modelName = options?.modelName ?? params?.modelName ?? target.modelName;
ret.temperature =
options?.temperature ?? params?.temperature ?? target.temperature;
ret.maxOutputTokens =
options?.maxOutputTokens ??
params?.maxOutputTokens ??
target.maxOutputTokens;
ret.topP = options?.topP ?? params?.topP ?? target.topP;
ret.topK = options?.topK ?? params?.topK ?? target.topK;
ret.stopSequences =
options?.stopSequences ?? params?.stopSequences ?? target.stopSequences;
ret.safetySettings =
options?.safetySettings ?? params?.safetySettings ?? target.safetySettings;
ret.tools = options?.tools;
return ret;

@@ -29,3 +36,3 @@ }

const testParams = params ?? {};
switch (modelToFamily(testParams.model)) {
switch (modelToFamily(testParams.modelName)) {
case "gemini":

@@ -38,5 +45,5 @@ return validateGeminiParams(testParams);

export function copyAndValidateModelParamsInto(params, target) {
copyAIModelParamsInto(params, target);
copyAIModelParamsInto(params, undefined, target);
validateModelParams(target);
return target;
}

@@ -1,2 +0,2 @@

import { BaseMessage, BaseMessageChunk, MessageContent } from "@langchain/core/messages";
import { BaseMessage, BaseMessageChunk, BaseMessageFields, MessageContent } from "@langchain/core/messages";
import { ChatGeneration, ChatGenerationChunk, ChatResult, Generation } from "@langchain/core/outputs";

@@ -7,2 +7,22 @@ import type { GoogleLLMResponse, GoogleAIModelParams, GeminiPart, GeminiContent, GenerateContentResponseData, GoogleAISafetyHandler } from "../types.js";

export declare function partsToMessageContent(parts: GeminiPart[]): MessageContent;
interface FunctionCall {
name: string;
arguments: string;
}
interface ToolCall {
id: string;
type: "function";
function: FunctionCall;
}
interface FunctionCallRaw {
name: string;
arguments: object;
}
interface ToolCallRaw {
id: string;
type: "function";
function: FunctionCallRaw;
}
export declare function partsToToolsRaw(parts: GeminiPart[]): ToolCallRaw[];
export declare function toolsRawToTools(raws: ToolCallRaw[]): ToolCall[];
export declare function responseToGenerateContentResponseData(response: GoogleLLMResponse): GenerateContentResponseData;

@@ -21,3 +41,4 @@ export declare function responseToParts(response: GoogleLLMResponse): GeminiPart[];

export declare function responseToChatGenerations(response: GoogleLLMResponse): ChatGeneration[];
export declare function responseToMessageContent(response: GoogleLLMResponse): MessageContent;
export declare function responseToBaseMessageFields(response: GoogleLLMResponse): BaseMessageFields;
export declare function partsToBaseMessageFields(parts: GeminiPart[]): BaseMessageFields;
export declare function responseToBaseMessage(response: GoogleLLMResponse): BaseMessage;

@@ -51,1 +72,2 @@ export declare function safeResponseToBaseMessage(response: GoogleLLMResponse, safetyHandler: GoogleAISafetyHandler): BaseMessage;

}
export {};

@@ -5,5 +5,10 @@ import { AIMessage, AIMessageChunk, } from "@langchain/core/messages";

function messageContentText(content) {
return {
text: content.text,
};
if (content?.text && content?.text.length > 0) {
return {
text: content.text,
};
}
else {
return null;
}
}

@@ -46,4 +51,4 @@ function messageContentImageUrl(content) {

// eslint-disable-next-line array-callback-return
const parts = messageContent.map((content) => {
// eslint-disable-next-line default-case
const parts = messageContent
.map((content) => {
switch (content.type) {

@@ -54,11 +59,49 @@ case "text":

return messageContentImageUrl(content);
default:
throw new Error(`Unsupported type received while converting message to message parts`);
}
});
})
.reduce((acc, val) => {
if (val) {
return [...acc, val];
}
else {
return acc;
}
}, []);
return parts;
}
function messageToolCallsToParts(toolCalls) {
if (!toolCalls || toolCalls.length === 0) {
return [];
}
return toolCalls.map((tool) => {
let args = {};
if (tool?.function?.arguments) {
const argStr = tool.function.arguments;
args = JSON.parse(argStr);
}
return {
functionCall: {
name: tool.function.name,
args,
},
};
});
}
function messageKwargsToParts(kwargs) {
const ret = [];
if (kwargs?.tool_calls) {
ret.push(...messageToolCallsToParts(kwargs.tool_calls));
}
return ret;
}
function roleMessageToContent(role, message) {
const contentParts = messageContentToParts(message.content);
const toolParts = messageKwargsToParts(message.additional_kwargs);
const parts = [...contentParts, ...toolParts];
return [
{
role,
parts: messageContentToParts(message.content),
parts,
},

@@ -73,2 +116,28 @@ ];

}
function toolMessageToContent(message) {
const contentStr = typeof message.content === "string"
? message.content
: message.content.reduce((acc, content) => {
if (content.type === "text") {
return acc + content.text;
}
else {
return acc;
}
}, "");
const content = JSON.parse(contentStr);
return [
{
role: "function",
parts: [
{
functionResponse: {
name: message.tool_call_id,
response: content,
},
},
],
},
];
}
export function baseMessageToContent(message) {

@@ -83,2 +152,4 @@ const type = message._getType();

return roleMessageToContent("model", message);
case "tool":
return toolMessageToContent(message);
default:

@@ -133,2 +204,45 @@ console.log(`Unsupported message type: ${type}`);

}
function toolRawToTool(raw) {
return {
id: raw.id,
type: raw.type,
function: {
name: raw.function.name,
arguments: JSON.stringify(raw.function.arguments),
},
};
}
function functionCallPartToToolRaw(part) {
return {
id: part?.functionCall?.name ?? "",
type: "function",
function: {
name: part.functionCall.name,
arguments: part.functionCall.args ?? {},
},
};
}
export function partsToToolsRaw(parts) {
return parts
.map((part) => {
if (part === undefined || part === null) {
return null;
}
else if ("functionCall" in part) {
return functionCallPartToToolRaw(part);
}
else {
return null;
}
})
.reduce((acc, content) => {
if (content) {
acc.push(content);
}
return acc;
}, []);
}
export function toolsRawToTools(raws) {
return raws.map((raw) => toolRawToTool(raw));
}
export function responseToGenerateContentResponseData(response) {

@@ -224,4 +338,4 @@ if ("nextChunk" in response.data) {

export function partToMessage(part) {
const content = partsToMessageContent([part]);
return new AIMessageChunk({ content });
const fields = partsToBaseMessageFields([part]);
return new AIMessageChunk(fields);
}

@@ -241,10 +355,22 @@ export function partToChatGeneration(part) {

}
export function responseToMessageContent(response) {
export function responseToBaseMessageFields(response) {
const parts = responseToParts(response);
return partsToMessageContent(parts);
return partsToBaseMessageFields(parts);
}
export function partsToBaseMessageFields(parts) {
const fields = {
content: partsToMessageContent(parts),
};
const rawTools = partsToToolsRaw(parts);
if (rawTools.length > 0) {
const tools = toolsRawToTools(rawTools);
fields.additional_kwargs = {
tool_calls: tools,
};
}
return fields;
}
export function responseToBaseMessage(response) {
return new AIMessage({
content: responseToMessageContent(response),
});
const fields = responseToBaseMessageFields(response);
return new AIMessage(fields);
}

@@ -251,0 +377,0 @@ export function safeResponseToBaseMessage(response, safetyHandler) {

{
"name": "@langchain/google-common",
"version": "0.0.1",
"version": "0.0.2",
"description": "Core types and classes for Google services.",

@@ -42,3 +42,4 @@ "type": "module",

"dependencies": {
"@langchain/core": "~0.1.1"
"@langchain/core": "~0.1.1",
"zod-to-json-schema": "^3.22.4"
},

@@ -67,3 +68,4 @@ "devDependencies": {

"ts-jest": "^29.1.0",
"typescript": "<5.2.0"
"typescript": "<5.2.0",
"zod": "^3.22.4"
},

@@ -83,2 +85,20 @@ "publishConfig": {

},
"./utils": {
"types": {
"import": "./utils.d.ts",
"require": "./utils.d.cts",
"default": "./utils.d.ts"
},
"import": "./utils.js",
"require": "./utils.cjs"
},
"./types": {
"types": {
"import": "./types.d.ts",
"require": "./types.d.cts",
"default": "./types.d.ts"
},
"import": "./types.js",
"require": "./types.cjs"
},
"./package.json": "./package.json"

@@ -91,4 +111,12 @@ },

"index.d.ts",
"index.d.cts"
"index.d.cts",
"utils.cjs",
"utils.js",
"utils.d.ts",
"utils.d.cts",
"types.cjs",
"types.js",
"types.d.ts",
"types.d.cts"
]
}

@@ -30,3 +30,4 @@ # LangChain google-common

* Gemini model through LLM and Chat classes (both through Google AI Studio and
Google Cloud Vertex AI)
Google Cloud Vertex AI). Including:
* Function/Tool support

@@ -38,3 +39,2 @@

* Functions for Gemini
* PaLM Vertex AI support and backwards compatibility

@@ -48,3 +48,8 @@ * PaLM MakerSuite support and backwards compatibility

* Vertex AI Model Garden
* Online prediction endpoints
* Gemma
* Google managed models
* Claude
* AI Studio Tuned Models
* MakerSuite / Google Drive Hub
* Google Cloud Vector Store

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc