@langchain/google-genai
Advanced tools
Comparing version 0.0.26 to 0.1.0
@@ -120,29 +120,335 @@ import { GenerateContentRequest, SafetySetting, Part as GenerativeAIPart } from "@google/generative-ai"; | ||
/** | ||
* A class that wraps the Google Palm chat model. | ||
* @example | ||
* Google Generative AI chat model integration. | ||
* | ||
* Setup: | ||
* Install `@langchain/google-genai` and set an environment variable named `GOOGLE_API_KEY`. | ||
* | ||
* ```bash | ||
* npm install @langchain/google-genai | ||
* export GOOGLE_API_KEY="your-api-key" | ||
* ``` | ||
* | ||
* ## [Constructor args](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html#constructor) | ||
* | ||
* ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_genai.GoogleGenerativeAIChatCallOptions.html) | ||
* | ||
* Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc. | ||
* They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below: | ||
* | ||
* ```typescript | ||
* const model = new ChatGoogleGenerativeAI({ | ||
* apiKey: "<YOUR API KEY>", | ||
* temperature: 0.7, | ||
* modelName: "gemini-pro", | ||
* topK: 40, | ||
* topP: 1, | ||
* // When calling `.bind`, call options should be passed via the first argument | ||
* const llmWithArgsBound = llm.bind({ | ||
* stop: ["\n"], | ||
* tools: [...], | ||
* }); | ||
* const questions = [ | ||
* new HumanMessage({ | ||
* content: [ | ||
* | ||
* // When calling `.bindTools`, call options should be passed via the second argument | ||
* const llmWithTools = llm.bindTools( | ||
* [...], | ||
* { | ||
* stop: ["\n"], | ||
* } | ||
* ); | ||
* ``` | ||
* | ||
* ## Examples | ||
* | ||
* <details open> | ||
* <summary><strong>Instantiate</strong></summary> | ||
* | ||
* ```typescript | ||
* import { ChatGoogleGenerativeAI } from '@langchain/google-genai'; | ||
* | ||
* const llm = new ChatGoogleGenerativeAI({ | ||
* model: "gemini-1.5-flash", | ||
* temperature: 0, | ||
* maxRetries: 2, | ||
* // apiKey: "...", | ||
* // other params... | ||
* }); | ||
* ``` | ||
* </details> | ||
* | ||
* <br /> | ||
* | ||
* <details> | ||
* <summary><strong>Invoking</strong></summary> | ||
* | ||
* ```typescript | ||
* const input = `Translate "I love programming" into French.`; | ||
* | ||
* // Models also accept a list of chat messages or a formatted prompt | ||
* const result = await llm.invoke(input); | ||
* console.log(result); | ||
* ``` | ||
* | ||
* ```txt | ||
* AIMessage { | ||
* "content": "There are a few ways to translate \"I love programming\" into French, depending on the level of formality and nuance you want to convey:\n\n**Formal:**\n\n* **J'aime la programmation.** (This is the most literal and formal translation.)\n\n**Informal:**\n\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\n\n**More specific:**\n\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\n\nThe best translation will depend on the context and your intended audience. \n", | ||
* "response_metadata": { | ||
* "finishReason": "STOP", | ||
* "index": 0, | ||
* "safetyRatings": [ | ||
* { | ||
* type: "text", | ||
* text: "You are a funny assistant that answers in pirate language.", | ||
* "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", | ||
* "probability": "NEGLIGIBLE" | ||
* }, | ||
* { | ||
* type: "text", | ||
* text: "What is your favorite food?", | ||
* "category": "HARM_CATEGORY_HATE_SPEECH", | ||
* "probability": "NEGLIGIBLE" | ||
* }, | ||
* { | ||
* "category": "HARM_CATEGORY_HARASSMENT", | ||
* "probability": "NEGLIGIBLE" | ||
* }, | ||
* { | ||
* "category": "HARM_CATEGORY_DANGEROUS_CONTENT", | ||
* "probability": "NEGLIGIBLE" | ||
* } | ||
* ] | ||
* }) | ||
* ]; | ||
* const res = await model.invoke(questions); | ||
* console.log({ res }); | ||
* }, | ||
* "usage_metadata": { | ||
* "input_tokens": 10, | ||
* "output_tokens": 149, | ||
* "total_tokens": 159 | ||
* } | ||
* } | ||
* ``` | ||
* </details> | ||
* | ||
* <br /> | ||
* | ||
* <details> | ||
* <summary><strong>Streaming Chunks</strong></summary> | ||
* | ||
* ```typescript | ||
* for await (const chunk of await llm.stream(input)) { | ||
* console.log(chunk); | ||
* } | ||
* ``` | ||
* | ||
* ```txt | ||
* AIMessageChunk { | ||
* "content": "There", | ||
* "response_metadata": { | ||
* "index": 0 | ||
* } | ||
* "usage_metadata": { | ||
* "input_tokens": 10, | ||
* "output_tokens": 1, | ||
* "total_tokens": 11 | ||
* } | ||
* } | ||
* AIMessageChunk { | ||
* "content": " are a few ways to translate \"I love programming\" into French, depending on", | ||
* } | ||
* AIMessageChunk { | ||
* "content": " the level of formality and nuance you want to convey:\n\n**Formal:**\n\n", | ||
* } | ||
* AIMessageChunk { | ||
* "content": "* **J'aime la programmation.** (This is the most literal and formal translation.)\n\n**Informal:**\n\n* **J'adore programmer.** (This", | ||
* } | ||
* AIMessageChunk { | ||
* "content": " is a more enthusiastic and informal translation.)\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\n\n**More", | ||
* } | ||
* AIMessageChunk { | ||
* "content": " specific:**\n\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\n\nThe best translation will depend on the context and", | ||
* } | ||
* AIMessageChunk { | ||
* "content": " your intended audience. \n", | ||
* } | ||
* ``` | ||
* </details> | ||
* | ||
* <br /> | ||
* | ||
* <details> | ||
* <summary><strong>Aggregate Streamed Chunks</strong></summary> | ||
* | ||
* ```typescript | ||
* import { AIMessageChunk } from '@langchain/core/messages'; | ||
* import { concat } from '@langchain/core/utils/stream'; | ||
* | ||
* const stream = await llm.stream(input); | ||
* let full: AIMessageChunk | undefined; | ||
* for await (const chunk of stream) { | ||
* full = !full ? chunk : concat(full, chunk); | ||
* } | ||
* console.log(full); | ||
* ``` | ||
* | ||
* ```txt | ||
* AIMessageChunk { | ||
* "content": "There are a few ways to translate \"I love programming\" into French, depending on the level of formality and nuance you want to convey:\n\n**Formal:**\n\n* **J'aime la programmation.** (This is the most literal and formal translation.)\n\n**Informal:**\n\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\n\n**More specific:**\n\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\n\nThe best translation will depend on the context and your intended audience. \n", | ||
* "usage_metadata": { | ||
* "input_tokens": 10, | ||
* "output_tokens": 277, | ||
* "total_tokens": 287 | ||
* } | ||
* } | ||
* ``` | ||
* </details> | ||
* | ||
* <br /> | ||
* | ||
* <details> | ||
* <summary><strong>Bind tools</strong></summary> | ||
* | ||
* ```typescript | ||
* import { z } from 'zod'; | ||
* | ||
* const GetWeather = { | ||
* name: "GetWeather", | ||
* description: "Get the current weather in a given location", | ||
* schema: z.object({ | ||
* location: z.string().describe("The city and state, e.g. San Francisco, CA") | ||
* }), | ||
* } | ||
* | ||
* const GetPopulation = { | ||
* name: "GetPopulation", | ||
* description: "Get the current population in a given location", | ||
* schema: z.object({ | ||
* location: z.string().describe("The city and state, e.g. San Francisco, CA") | ||
* }), | ||
* } | ||
* | ||
* const llmWithTools = llm.bindTools([GetWeather, GetPopulation]); | ||
* const aiMsg = await llmWithTools.invoke( | ||
* "Which city is hotter today and which is bigger: LA or NY?" | ||
* ); | ||
* console.log(aiMsg.tool_calls); | ||
* ``` | ||
* | ||
* ```txt | ||
* [ | ||
* { | ||
* name: 'GetWeather', | ||
* args: { location: 'Los Angeles, CA' }, | ||
* type: 'tool_call' | ||
* }, | ||
* { | ||
* name: 'GetWeather', | ||
* args: { location: 'New York, NY' }, | ||
* type: 'tool_call' | ||
* }, | ||
* { | ||
* name: 'GetPopulation', | ||
* args: { location: 'Los Angeles, CA' }, | ||
* type: 'tool_call' | ||
* }, | ||
* { | ||
* name: 'GetPopulation', | ||
* args: { location: 'New York, NY' }, | ||
* type: 'tool_call' | ||
* } | ||
* ] | ||
* ``` | ||
* </details> | ||
* | ||
* <br /> | ||
* | ||
* <details> | ||
* <summary><strong>Structured Output</strong></summary> | ||
* | ||
* ```typescript | ||
* const Joke = z.object({ | ||
* setup: z.string().describe("The setup of the joke"), | ||
* punchline: z.string().describe("The punchline to the joke"), | ||
* rating: z.number().optional().describe("How funny the joke is, from 1 to 10") | ||
* }).describe('Joke to tell user.'); | ||
* | ||
* const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" }); | ||
* const jokeResult = await structuredLlm.invoke("Tell me a joke about cats"); | ||
* console.log(jokeResult); | ||
* ``` | ||
* | ||
* ```txt | ||
* { | ||
* setup: "Why don\\'t cats play poker?", | ||
* punchline: "Why don\\'t cats play poker? Because they always have an ace up their sleeve!" | ||
* } | ||
* ``` | ||
* </details> | ||
* | ||
* <br /> | ||
* | ||
* <details> | ||
* <summary><strong>Multimodal</strong></summary> | ||
* | ||
* ```typescript | ||
* import { HumanMessage } from '@langchain/core/messages'; | ||
* | ||
* const imageUrl = "https://example.com/image.jpg"; | ||
* const imageData = await fetch(imageUrl).then(res => res.arrayBuffer()); | ||
* const base64Image = Buffer.from(imageData).toString('base64'); | ||
* | ||
* const message = new HumanMessage({ | ||
* content: [ | ||
* { type: "text", text: "describe the weather in this image" }, | ||
* { | ||
* type: "image_url", | ||
* image_url: { url: `data:image/jpeg;base64,${base64Image}` }, | ||
* }, | ||
* ] | ||
* }); | ||
* | ||
* const imageDescriptionAiMsg = await llm.invoke([message]); | ||
* console.log(imageDescriptionAiMsg.content); | ||
* ``` | ||
* | ||
* ```txt | ||
* The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions. | ||
* ``` | ||
* </details> | ||
* | ||
* <br /> | ||
* | ||
* <details> | ||
* <summary><strong>Usage Metadata</strong></summary> | ||
* | ||
* ```typescript | ||
* const aiMsgForMetadata = await llm.invoke(input); | ||
* console.log(aiMsgForMetadata.usage_metadata); | ||
* ``` | ||
* | ||
* ```txt | ||
* { input_tokens: 10, output_tokens: 149, total_tokens: 159 } | ||
* ``` | ||
* </details> | ||
* | ||
* <br /> | ||
* | ||
* <details> | ||
* <summary><strong>Response Metadata</strong></summary> | ||
* | ||
* ```typescript | ||
* const aiMsgForResponseMetadata = await llm.invoke(input); | ||
* console.log(aiMsgForResponseMetadata.response_metadata); | ||
* ``` | ||
* | ||
* ```txt | ||
* { | ||
* finishReason: 'STOP', | ||
* index: 0, | ||
* safetyRatings: [ | ||
* { | ||
* category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', | ||
* probability: 'NEGLIGIBLE' | ||
* }, | ||
* { | ||
* category: 'HARM_CATEGORY_HATE_SPEECH', | ||
* probability: 'NEGLIGIBLE' | ||
* }, | ||
* { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' }, | ||
* { | ||
* category: 'HARM_CATEGORY_DANGEROUS_CONTENT', | ||
* probability: 'NEGLIGIBLE' | ||
* } | ||
* ] | ||
* } | ||
* ``` | ||
* </details> | ||
* | ||
* <br /> | ||
*/ | ||
@@ -149,0 +455,0 @@ export declare class ChatGoogleGenerativeAI extends BaseChatModel<GoogleGenerativeAIChatCallOptions, AIMessageChunk> implements GoogleGenerativeAIChatInput { |
@@ -10,29 +10,335 @@ import { GoogleGenerativeAI as GenerativeAI, } from "@google/generative-ai"; | ||
/** | ||
* A class that wraps the Google Palm chat model. | ||
* @example | ||
* Google Generative AI chat model integration. | ||
* | ||
* Setup: | ||
* Install `@langchain/google-genai` and set an environment variable named `GOOGLE_API_KEY`. | ||
* | ||
* ```bash | ||
* npm install @langchain/google-genai | ||
* export GOOGLE_API_KEY="your-api-key" | ||
* ``` | ||
* | ||
* ## [Constructor args](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html#constructor) | ||
* | ||
* ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_genai.GoogleGenerativeAIChatCallOptions.html) | ||
* | ||
* Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc. | ||
* They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below: | ||
* | ||
* ```typescript | ||
* const model = new ChatGoogleGenerativeAI({ | ||
* apiKey: "<YOUR API KEY>", | ||
* temperature: 0.7, | ||
* modelName: "gemini-pro", | ||
* topK: 40, | ||
* topP: 1, | ||
* // When calling `.bind`, call options should be passed via the first argument | ||
* const llmWithArgsBound = llm.bind({ | ||
* stop: ["\n"], | ||
* tools: [...], | ||
* }); | ||
* const questions = [ | ||
* new HumanMessage({ | ||
* content: [ | ||
* | ||
* // When calling `.bindTools`, call options should be passed via the second argument | ||
* const llmWithTools = llm.bindTools( | ||
* [...], | ||
* { | ||
* stop: ["\n"], | ||
* } | ||
* ); | ||
* ``` | ||
* | ||
* ## Examples | ||
* | ||
* <details open> | ||
* <summary><strong>Instantiate</strong></summary> | ||
* | ||
* ```typescript | ||
* import { ChatGoogleGenerativeAI } from '@langchain/google-genai'; | ||
* | ||
* const llm = new ChatGoogleGenerativeAI({ | ||
* model: "gemini-1.5-flash", | ||
* temperature: 0, | ||
* maxRetries: 2, | ||
* // apiKey: "...", | ||
* // other params... | ||
* }); | ||
* ``` | ||
* </details> | ||
* | ||
* <br /> | ||
* | ||
* <details> | ||
* <summary><strong>Invoking</strong></summary> | ||
* | ||
* ```typescript | ||
* const input = `Translate "I love programming" into French.`; | ||
* | ||
* // Models also accept a list of chat messages or a formatted prompt | ||
* const result = await llm.invoke(input); | ||
* console.log(result); | ||
* ``` | ||
* | ||
* ```txt | ||
* AIMessage { | ||
* "content": "There are a few ways to translate \"I love programming\" into French, depending on the level of formality and nuance you want to convey:\n\n**Formal:**\n\n* **J'aime la programmation.** (This is the most literal and formal translation.)\n\n**Informal:**\n\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\n\n**More specific:**\n\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\n\nThe best translation will depend on the context and your intended audience. \n", | ||
* "response_metadata": { | ||
* "finishReason": "STOP", | ||
* "index": 0, | ||
* "safetyRatings": [ | ||
* { | ||
* type: "text", | ||
* text: "You are a funny assistant that answers in pirate language.", | ||
* "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", | ||
* "probability": "NEGLIGIBLE" | ||
* }, | ||
* { | ||
* type: "text", | ||
* text: "What is your favorite food?", | ||
* "category": "HARM_CATEGORY_HATE_SPEECH", | ||
* "probability": "NEGLIGIBLE" | ||
* }, | ||
* { | ||
* "category": "HARM_CATEGORY_HARASSMENT", | ||
* "probability": "NEGLIGIBLE" | ||
* }, | ||
* { | ||
* "category": "HARM_CATEGORY_DANGEROUS_CONTENT", | ||
* "probability": "NEGLIGIBLE" | ||
* } | ||
* ] | ||
* }) | ||
* ]; | ||
* const res = await model.invoke(questions); | ||
* console.log({ res }); | ||
* }, | ||
* "usage_metadata": { | ||
* "input_tokens": 10, | ||
* "output_tokens": 149, | ||
* "total_tokens": 159 | ||
* } | ||
* } | ||
* ``` | ||
* </details> | ||
* | ||
* <br /> | ||
* | ||
* <details> | ||
* <summary><strong>Streaming Chunks</strong></summary> | ||
* | ||
* ```typescript | ||
* for await (const chunk of await llm.stream(input)) { | ||
* console.log(chunk); | ||
* } | ||
* ``` | ||
* | ||
* ```txt | ||
* AIMessageChunk { | ||
* "content": "There", | ||
* "response_metadata": { | ||
* "index": 0 | ||
* } | ||
* "usage_metadata": { | ||
* "input_tokens": 10, | ||
* "output_tokens": 1, | ||
* "total_tokens": 11 | ||
* } | ||
* } | ||
* AIMessageChunk { | ||
* "content": " are a few ways to translate \"I love programming\" into French, depending on", | ||
* } | ||
* AIMessageChunk { | ||
* "content": " the level of formality and nuance you want to convey:\n\n**Formal:**\n\n", | ||
* } | ||
* AIMessageChunk { | ||
* "content": "* **J'aime la programmation.** (This is the most literal and formal translation.)\n\n**Informal:**\n\n* **J'adore programmer.** (This", | ||
* } | ||
* AIMessageChunk { | ||
* "content": " is a more enthusiastic and informal translation.)\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\n\n**More", | ||
* } | ||
* AIMessageChunk { | ||
* "content": " specific:**\n\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\n\nThe best translation will depend on the context and", | ||
* } | ||
* AIMessageChunk { | ||
* "content": " your intended audience. \n", | ||
* } | ||
* ``` | ||
* </details> | ||
* | ||
* <br /> | ||
* | ||
* <details> | ||
* <summary><strong>Aggregate Streamed Chunks</strong></summary> | ||
* | ||
* ```typescript | ||
* import { AIMessageChunk } from '@langchain/core/messages'; | ||
* import { concat } from '@langchain/core/utils/stream'; | ||
* | ||
* const stream = await llm.stream(input); | ||
* let full: AIMessageChunk | undefined; | ||
* for await (const chunk of stream) { | ||
* full = !full ? chunk : concat(full, chunk); | ||
* } | ||
* console.log(full); | ||
* ``` | ||
* | ||
* ```txt | ||
* AIMessageChunk { | ||
* "content": "There are a few ways to translate \"I love programming\" into French, depending on the level of formality and nuance you want to convey:\n\n**Formal:**\n\n* **J'aime la programmation.** (This is the most literal and formal translation.)\n\n**Informal:**\n\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\n\n**More specific:**\n\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\n\nThe best translation will depend on the context and your intended audience. \n", | ||
* "usage_metadata": { | ||
* "input_tokens": 10, | ||
* "output_tokens": 277, | ||
* "total_tokens": 287 | ||
* } | ||
* } | ||
* ``` | ||
* </details> | ||
* | ||
* <br /> | ||
* | ||
* <details> | ||
* <summary><strong>Bind tools</strong></summary> | ||
* | ||
* ```typescript | ||
* import { z } from 'zod'; | ||
* | ||
* const GetWeather = { | ||
* name: "GetWeather", | ||
* description: "Get the current weather in a given location", | ||
* schema: z.object({ | ||
* location: z.string().describe("The city and state, e.g. San Francisco, CA") | ||
* }), | ||
* } | ||
* | ||
* const GetPopulation = { | ||
* name: "GetPopulation", | ||
* description: "Get the current population in a given location", | ||
* schema: z.object({ | ||
* location: z.string().describe("The city and state, e.g. San Francisco, CA") | ||
* }), | ||
* } | ||
* | ||
* const llmWithTools = llm.bindTools([GetWeather, GetPopulation]); | ||
* const aiMsg = await llmWithTools.invoke( | ||
* "Which city is hotter today and which is bigger: LA or NY?" | ||
* ); | ||
* console.log(aiMsg.tool_calls); | ||
* ``` | ||
* | ||
* ```txt | ||
* [ | ||
* { | ||
* name: 'GetWeather', | ||
* args: { location: 'Los Angeles, CA' }, | ||
* type: 'tool_call' | ||
* }, | ||
* { | ||
* name: 'GetWeather', | ||
* args: { location: 'New York, NY' }, | ||
* type: 'tool_call' | ||
* }, | ||
* { | ||
* name: 'GetPopulation', | ||
* args: { location: 'Los Angeles, CA' }, | ||
* type: 'tool_call' | ||
* }, | ||
* { | ||
* name: 'GetPopulation', | ||
* args: { location: 'New York, NY' }, | ||
* type: 'tool_call' | ||
* } | ||
* ] | ||
* ``` | ||
* </details> | ||
* | ||
* <br /> | ||
* | ||
* <details> | ||
* <summary><strong>Structured Output</strong></summary> | ||
* | ||
* ```typescript | ||
* const Joke = z.object({ | ||
* setup: z.string().describe("The setup of the joke"), | ||
* punchline: z.string().describe("The punchline to the joke"), | ||
* rating: z.number().optional().describe("How funny the joke is, from 1 to 10") | ||
* }).describe('Joke to tell user.'); | ||
* | ||
* const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" }); | ||
* const jokeResult = await structuredLlm.invoke("Tell me a joke about cats"); | ||
* console.log(jokeResult); | ||
* ``` | ||
* | ||
* ```txt | ||
* { | ||
* setup: "Why don\\'t cats play poker?", | ||
* punchline: "Why don\\'t cats play poker? Because they always have an ace up their sleeve!" | ||
* } | ||
* ``` | ||
* </details> | ||
* | ||
* <br /> | ||
* | ||
* <details> | ||
* <summary><strong>Multimodal</strong></summary> | ||
* | ||
* ```typescript | ||
* import { HumanMessage } from '@langchain/core/messages'; | ||
* | ||
* const imageUrl = "https://example.com/image.jpg"; | ||
* const imageData = await fetch(imageUrl).then(res => res.arrayBuffer()); | ||
* const base64Image = Buffer.from(imageData).toString('base64'); | ||
* | ||
* const message = new HumanMessage({ | ||
* content: [ | ||
* { type: "text", text: "describe the weather in this image" }, | ||
* { | ||
* type: "image_url", | ||
* image_url: { url: `data:image/jpeg;base64,${base64Image}` }, | ||
* }, | ||
* ] | ||
* }); | ||
* | ||
* const imageDescriptionAiMsg = await llm.invoke([message]); | ||
* console.log(imageDescriptionAiMsg.content); | ||
* ``` | ||
* | ||
* ```txt | ||
* The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions. | ||
* ``` | ||
* </details> | ||
* | ||
* <br /> | ||
* | ||
* <details> | ||
* <summary><strong>Usage Metadata</strong></summary> | ||
* | ||
* ```typescript | ||
* const aiMsgForMetadata = await llm.invoke(input); | ||
* console.log(aiMsgForMetadata.usage_metadata); | ||
* ``` | ||
* | ||
* ```txt | ||
* { input_tokens: 10, output_tokens: 149, total_tokens: 159 } | ||
* ``` | ||
* </details> | ||
* | ||
* <br /> | ||
* | ||
* <details> | ||
* <summary><strong>Response Metadata</strong></summary> | ||
* | ||
* ```typescript | ||
* const aiMsgForResponseMetadata = await llm.invoke(input); | ||
* console.log(aiMsgForResponseMetadata.response_metadata); | ||
* ``` | ||
* | ||
* ```txt | ||
* { | ||
* finishReason: 'STOP', | ||
* index: 0, | ||
* safetyRatings: [ | ||
* { | ||
* category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', | ||
* probability: 'NEGLIGIBLE' | ||
* }, | ||
* { | ||
* category: 'HARM_CATEGORY_HATE_SPEECH', | ||
* probability: 'NEGLIGIBLE' | ||
* }, | ||
* { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' }, | ||
* { | ||
* category: 'HARM_CATEGORY_DANGEROUS_CONTENT', | ||
* probability: 'NEGLIGIBLE' | ||
* } | ||
* ] | ||
* } | ||
* ``` | ||
* </details> | ||
* | ||
* <br /> | ||
*/ | ||
@@ -39,0 +345,0 @@ export class ChatGoogleGenerativeAI extends BaseChatModel { |
{ | ||
"name": "@langchain/google-genai", | ||
"version": "0.0.26", | ||
"version": "0.1.0", | ||
"description": "Google Generative AI integration for LangChain.js", | ||
@@ -18,3 +18,3 @@ "type": "module", | ||
"build": "yarn turbo:command build:internal --filter=@langchain/google-genai", | ||
"build:internal": "yarn lc_build_v2 --create-entrypoints --pre --tree-shaking", | ||
"build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking", | ||
"lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", | ||
@@ -40,8 +40,11 @@ "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", | ||
"@google/generative-ai": "^0.7.0", | ||
"@langchain/core": ">=0.2.21 <0.3.0", | ||
"zod-to-json-schema": "^3.22.4" | ||
}, | ||
"peerDependencies": { | ||
"@langchain/core": ">=0.2.21 <0.4.0" | ||
}, | ||
"devDependencies": { | ||
"@jest/globals": "^29.5.0", | ||
"@langchain/scripts": "~0.0.20", | ||
"@langchain/core": "workspace:*", | ||
"@langchain/scripts": ">=0.1.0 <0.2.0", | ||
"@langchain/standard-tests": "0.0.0", | ||
@@ -48,0 +51,0 @@ "@swc/core": "^1.3.90", |
@@ -8,3 +8,3 @@ # @langchain/google-genai | ||
```bash npm2yarn | ||
npm install @langchain/google-genai | ||
npm install @langchain/google-genai @langchain/core | ||
``` | ||
@@ -21,14 +21,14 @@ | ||
"dependencies": { | ||
"@langchain/google-genai": "^0.0.0", | ||
"langchain": "0.0.207" | ||
"@langchain/core": "^0.3.0", | ||
"@langchain/google-genai": "^0.0.0" | ||
}, | ||
"resolutions": { | ||
"@langchain/core": "0.1.5" | ||
"@langchain/core": "^0.3.0" | ||
}, | ||
"overrides": { | ||
"@langchain/core": "0.1.5" | ||
"@langchain/core": "^0.3.0" | ||
}, | ||
"pnpm": { | ||
"overrides": { | ||
"@langchain/core": "0.1.5" | ||
"@langchain/core": "^0.3.0" | ||
} | ||
@@ -35,0 +35,0 @@ } |
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
126519
3225
26
+ Added@langchain/core@0.3.18(transitive)
+ Addedlangsmith@0.2.5(transitive)
- Removed@langchain/core@>=0.2.21 <0.3.0
- Removed@langchain/core@0.2.36(transitive)
- Removedlangsmith@0.1.68(transitive)