Vertex AI SDK for Node.js quickstart
The Vertex AI SDK for Node.js lets you use the Vertex AI Gemini API to build
AI-powered features and applications.
For detailed samples using the Vertex AI Node.js SDK, see the
samples repository
on GitHub.
For the latest list of available Gemini models on Vertex AI, see the
Model information
page in Vertex AI documentation.
Before you begin
- Select or create a Google Cloud project.
- Enable billing for your project.
- Enable the Vertex AI API.
- Set up authentication with a service account
so you can access the API from your local workstation.
Install the SDK
Install the Vertex AI SDK for Node.js by running the following command:
npm install @google-cloud/vertexai
Initialize the VertexAI
class
To use the Vertex AI SDK for Node.js, create an instance of VertexAI
by
passing it your Google Cloud project ID and location. Then create a reference to
a generative model.
const {
FunctionDeclarationSchemaType,
HarmBlockThreshold,
HarmCategory,
VertexAI
} = require('@google-cloud/vertexai');
const project = 'your-cloud-project';
const location = 'us-central1';
const textModel = 'gemini-1.0-pro';
const visionModel = 'gemini-1.0-pro-vision';
const vertexAI = new VertexAI({project: project, location: location});
const generativeModel = vertexAI.getGenerativeModel({
model: textModel,
safetySettings: [{category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE}],
generationConfig: {maxOutputTokens: 256},
});
const generativeVisionModel = vertexAI.getGenerativeModel({
model: visionModel,
});
const generativeModelPreview = vertexAI.preview.getGenerativeModel({
model: textModel,
});
Send text prompt requests
You can send text prompt requests by using generateContentStream
for streamed
responses, or generateContent
for nonstreamed responses.
Get streamed text responses
The response is returned in chunks as it's being generated to reduce the
perception of latency to a human reader.
async function streamGenerateContent() {
const request = {
contents: [{role: 'user', parts: [{text: 'How are you doing today?'}]}],
};
const streamingResult = await generativeModel.generateContentStream(request);
for await (const item of streamingResult.stream) {
console.log('stream chunk: ', JSON.stringify(item));
}
const aggregatedResponse = await streamingResult.response;
console.log('aggregated response: ', JSON.stringify(aggregatedResponse));
};
streamGenerateContent();
Get nonstreamed text responses
The response is returned all at once.
async function generateContent() {
const request = {
contents: [{role: 'user', parts: [{text: 'How are you doing today?'}]}],
};
const result = await generativeModel.generateContent(request);
const response = result.response;
console.log('Response: ', JSON.stringify(response));
};
generateContent();
Send multiturn chat requests
Chat requests use previous messages as context when responding to new prompts.
To send multiturn chat requests, use sendMessageStream
for streamed responses,
or sendMessage
for nonstreamed responses.
Get streamed chat responses
The response is returned in chunks as it's being generated to reduce the
perception of latency to a human reader.
async function streamChat() {
const chat = generativeModel.startChat();
const chatInput = "How can I learn more about Node.js?";
const result = await chat.sendMessageStream(chatInput);
for await (const item of result.stream) {
console.log("Stream chunk: ", item.candidates[0].content.parts[0].text);
}
const aggregatedResponse = await result.response;
console.log('Aggregated response: ', JSON.stringify(aggregatedResponse));
}
streamChat();
Get nonstreamed chat responses
The response is returned all at once.
async function sendChat() {
const chat = generativeModel.startChat();
const chatInput = "How can I learn more about Node.js?";
const result = await chat.sendMessage(chatInput);
const response = result.response;
console.log('response: ', JSON.stringify(response));
}
sendChat();
Include images or videos in your prompt request
Prompt requests can include either an image or video in addition to text.
For more information, see
Send multimodal prompt requests
in the Vertex AI documentation.
Include an image
You can include images in the prompt either by specifying the Cloud Storage URI
where the image is located or by including a base64 encoding of the image.
Specify a Cloud Storage URI of the image
You can specify the Cloud Storage URI of the image in fileUri
.
async function multiPartContent() {
const filePart = {fileData: {fileUri: "gs://generativeai-downloads/images/scones.jpg", mimeType: "image/jpeg"}};
const textPart = {text: 'What is this picture about?'};
const request = {
contents: [{role: 'user', parts: [textPart, filePart]}],
};
const streamingResult = await generativeVisionModel.generateContentStream(request);
for await (const item of streamingResult.stream) {
console.log('stream chunk: ', JSON.stringify(item));
}
const aggregatedResponse = await streamingResult.response;
console.log(aggregatedResponse.candidates[0].content);
}
multiPartContent();
Specify a base64 image encoding string
You can specify the base64 image encoding string in data
.
async function multiPartContentImageString() {
const base64Image = 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg==';
const filePart = {inline_data: {data: base64Image, mimeType: 'image/jpeg'}};
const textPart = {text: 'What is this picture about?'};
const request = {
contents: [{role: 'user', parts: [textPart, filePart]}],
};
const streamingResult = await generativeVisionModel.generateContentStream(request);
const contentResponse = await streamingResult.response;
console.log(contentResponse.candidates[0].content.parts[0].text);
}
multiPartContentImageString();
Include a video
You can include videos in the prompt by specifying the Cloud Storage URI
where the video is located in fileUri
.
async function multiPartContentVideo() {
const filePart = {fileData: {fileUri: 'gs://cloud-samples-data/video/animals.mp4', mimeType: 'video/mp4'}};
const textPart = {text: 'What is in the video?'};
const request = {
contents: [{role: 'user', parts: [textPart, filePart]}],
};
const streamingResult = await generativeVisionModel.generateContentStream(request);
for await (const item of streamingResult.stream) {
console.log('stream chunk: ', JSON.stringify(item));
}
const aggregatedResponse = await streamingResult.response;
console.log(aggregatedResponse.candidates[0].content);
}
multiPartContentVideo();
Function calling
The Vertex AI SDK for Node.js supports
function calling
in the sendMessage
, sendMessageStream
, generateContent
, and
generateContentStream
methods. We recommend using it through the chat methods
(sendMessage
or sendMessageStream
) but have included examples of both
approaches below.
Declare a function
The following examples show you how to declare a function.
const functionDeclarations = [
{
functionDeclarations: [
{
name: "get_current_weather",
description: 'get weather in a given location',
parameters: {
type: FunctionDeclarationSchemaType.OBJECT,
properties: {
location: {type: FunctionDeclarationSchemaType.STRING},
unit: {
type: FunctionDeclarationSchemaType.STRING,
enum: ['celsius', 'fahrenheit'],
},
},
required: ['location'],
},
},
],
},
];
const functionResponseParts = [
{
functionResponse: {
name: "get_current_weather",
response:
{name: "get_current_weather", content: {weather: "super nice"}},
},
},
];
Function calling using sendMessageStream
After the function is declared, you can pass it to the model in the
tools
parameter of the prompt request.
async function functionCallingChat() {
const chat = generativeModel.startChat({
tools: functionDeclarations,
});
const chatInput1 = 'What is the weather in Boston?';
const streamingResult1 = await chat.sendMessageStream(chatInput1);
for await (const item of streamingResult1.stream) {
console.log(item.candidates[0]);
}
const response1 = await streamingResult1.response;
console.log("first aggregated response: ", JSON.stringify(response1));
const streamingResult2 = await chat.sendMessageStream(functionResponseParts);
for await (const item of streamingResult2.stream) {
console.log(item.candidates[0]);
}
const response2 = await streamingResult2.response;
console.log("second aggregated response: ", JSON.stringify(response2));
}
functionCallingChat();
Function calling using generateContentStream
async function functionCallingGenerateContentStream() {
const request = {
contents: [
{role: 'user', parts: [{text: 'What is the weather in Boston?'}]},
{role: 'model', parts: [{functionCall: {name: 'get_current_weather', args: {'location': 'Boston'}}}]},
{role: 'user', parts: functionResponseParts}
],
tools: functionDeclarations,
};
const streamingResult =
await generativeModel.generateContentStream(request);
for await (const item of streamingResult.stream) {
console.log(item.candidates[0]);
}
}
functionCallingGenerateContentStream();
Counting tokens
async function countTokens() {
const request = {
contents: [{role: 'user', parts: [{text: 'How are you doing today?'}]}],
};
const response = await generativeModel.countTokens(request);
console.log('count tokens response: ', JSON.stringify(response));
}
countTokens();
Grounding (Preview)
Grounding is preview only feature.
Grounding lets you connect model output to verifiable sources of information to
reduce hallucination. You can specify Google Search or Vertex AI search as the
data source for grounding.
Grounding using Google Search (Preview)
async function generateContentWithGoogleSearchGrounding() {
const generativeModelPreview = vertexAI.preview.getGenerativeModel({
model: textModel,
safetySettings: [{category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE}],
generationConfig: {maxOutputTokens: 256},
});
const googleSearchRetrievalTool = {
googleSearchRetrieval: {
disableAttribution: false,
},
};
const result = await generativeModelPreview.generateContent({
contents: [{role: 'user', parts: [{text: 'Why is the sky blue?'}]}],
tools: [googleSearchRetrievalTool],
})
const response = result.response;
const groundingMetadata = response.candidates[0].groundingMetadata;
console.log("GroundingMetadata is: ", JSON.stringify(groundingMetadata));
}
generateContentWithGoogleSearchGrounding();
Grounding using Vertex AI Search (Preview)
async function generateContentWithVertexAISearchGrounding() {
const generativeModelPreview = vertexAI.preview.getGenerativeModel({
model: textModel,
safetySettings: [{category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE}],
generationConfig: {maxOutputTokens: 256},
});
const vertexAIRetrievalTool = {
retrieval: {
vertexAiSearch: {
datastore: 'projects/.../locations/.../collections/.../dataStores/...',
},
disableAttribution: false,
},
};
const result = await generativeModelPreview.generateContent({
contents: [{role: 'user', parts: [{text: 'Why is the sky blue?'}]}],
tools: [vertexAIRetrievalTool],
})
const response = result.response;
const groundingMetadata = response.candidates[0].groundingMetadata;
console.log("Grounding metadata is: ", JSON.stringify(groundingMetadata));
}
generateContentWithVertexAISearchGrounding();
License
The contents of this repository are licensed under the
Apache License, version 2.0.