
Security News
Deno 2.2 Improves Dependency Management and Expands Node.js Compatibility
Deno 2.2 enhances Node.js compatibility, improves dependency management, adds OpenTelemetry support, and expands linting and task automation for developers.
@google-cloud/vertexai
Advanced tools
The Vertex AI Node.js SDK enables developers to use Google's state-of-the-art generative AI models (like Gemini) to build AI-powered features and applications.
See here for detailed samples using the Vertex AI Node.js SDK.
Install this SDK via NPM.
npm install @google-cloud/vertexai
To use the SDK, create an instance of VertexAI
by passing it your Google Cloud project ID and location. Then create a reference to a generative model.
const {VertexAI, HarmCategory, HarmBlockThreshold} = require('@google-cloud/vertexai');
const project = 'your-cloud-project';
const location = 'us-central1';
const vertex_ai = new VertexAI({project: project, location: location});
// Instantiate models
const generativeModel = vertex_ai.preview.getGenerativeModel({
model: 'gemini-pro',
// The following parameters are optional
// They can also be passed to individual content generation requests
safety_settings: [{category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE}],
generation_config: {max_output_tokens: 256},
});
const generativeVisionModel = vertex_ai.preview.getGenerativeModel({
model: 'gemini-pro-vision',
});
async function streamGenerateContent() {
const request = {
contents: [{role: 'user', parts: [{text: 'How are you doing today?'}]}],
};
const streamingResp = await generativeModel.generateContentStream(request);
for await (const item of streamingResp.stream) {
console.log('stream chunk: ', JSON.stringify(item));
}
console.log('aggregated response: ', JSON.stringify(await streamingResp.response));
};
streamGenerateContent();
async function streamChat() {
const chat = generativeModel.startChat();
const chatInput1 = "How can I learn more about Node.js?";
const result1 = await chat.sendMessageStream(chatInput1);
for await (const item of result1.stream) {
console.log(item.candidates[0].content.parts[0].text);
}
console.log('aggregated response: ', JSON.stringify(await result1.response));
}
streamChat();
async function multiPartContent() {
const filePart = {file_data: {file_uri: "gs://generativeai-downloads/images/scones.jpg", mime_type: "image/jpeg"}};
const textPart = {text: 'What is this a picture of?'};
const request = {
contents: [{role: 'user', parts: [textPart, filePart]}],
};
const streamingResp = await generativeVisionModel.generateContentStream(request);
for await (const item of streamingResp.stream) {
console.log('stream chunk: ', JSON.stringify(item));
}
const aggregatedResponse = await streamingResp.response;
console.log(aggregatedResponse.candidates[0].content);
}
multiPartContent();
async function multiPartContentImageString() {
// Replace this with your own base64 image string
const base64Image = 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg==';
const filePart = {inline_data: {data: base64Image, mime_type: 'image/jpeg'}};
const textPart = {text: 'What is this a picture of?'};
const request = {
contents: [{role: 'user', parts: [textPart, filePart]}],
};
const resp = await generativeVisionModel.generateContentStream(request);
const contentResponse = await resp.response;
console.log(contentResponse.candidates[0].content.parts[0].text);
}
multiPartContentImageString();
async function multiPartContentVideo() {
const filePart = {file_data: {file_uri: 'gs://cloud-samples-data/video/animals.mp4', mime_type: 'video/mp4'}};
const textPart = {text: 'What is in the video?'};
const request = {
contents: [{role: 'user', parts: [textPart, filePart]}],
};
const streamingResp = await generativeVisionModel.generateContentStream(request);
for await (const item of streamingResp.stream) {
console.log('stream chunk: ', JSON.stringify(item));
}
const aggregatedResponse = await streamingResp.response;
console.log(aggregatedResponse.candidates[0].content);
}
multiPartContentVideo();
async function generateContent() {
const request = {
contents: [{role: 'user', parts: [{text: 'How are you doing today?'}]}],
};
const resp = await generativeModel.generateContent(request);
console.log('aggregated response: ', JSON.stringify(await resp.response));
};
generateContent();
async function countTokens() {
const request = {
contents: [{role: 'user', parts: [{text: 'How are you doing today?'}]}],
};
const resp = await generativeModel.countTokens(request);
console.log('count tokens response: ', resp);
}
countTokens();
The Node SDK supports
function calling via sendMessage
, sendMessageStream
, generateContent
, and generateContentStream
. We recommend using it through chat methods
(sendMessage
or sendMessageStream
) but have included examples of both
approaches below.
This is an example of a function declaration and function response, which are passed to the model in the snippets that follow.
const functionDeclarations = [
{
function_declarations: [
{
name: "get_current_weather",
description: 'get weather in a given location',
parameters: {
type: FunctionDeclarationSchemaType.OBJECT,
properties: {
location: {type: FunctionDeclarationSchemaType.STRING},
unit: {
type: FunctionDeclarationSchemaType.STRING,
enum: ['celsius', 'fahrenheit'],
},
},
required: ['location'],
},
},
],
},
];
const functionResponseParts = [
{
functionResponse: {
name: "get_current_weather",
response:
{name: "get_current_weather", content: {weather: "super nice"}},
},
},
];
async function functionCallingChat() {
// Create a chat session and pass your function declarations
const chat = generativeModel.startChat({
tools: functionDeclarations,
});
const chatInput1 = 'What is the weather in Boston?';
// This should include a functionCall response from the model
const result1 = await chat.sendMessageStream(chatInput1);
for await (const item of result1.stream) {
console.log(item.candidates[0]);
}
const response1 = await result1.response;
// Send a follow up message with a FunctionResponse
const result2 = await chat.sendMessageStream(functionResponseParts);
for await (const item of result2.stream) {
console.log(item.candidates[0]);
}
// This should include a text response from the model using the response content
// provided above
const response2 = await result2.response;
}
functionCallingChat();
async function functionCallingGenerateContent() {
const request = {
contents: [
{role: 'user', parts: [{text: 'What is the weather in Boston?'}]},
{role: 'model', parts: [{functionCall: {name: 'get_current_weather', args: {'location': 'Boston'}}}]},
{role: 'function', parts: functionResponseParts}
],
tools: functionDeclarations,
};
const streamingResp =
await generativeModel.generateContentStream(request);
for await (const item of streamingResp.stream) {
console.log(item.candidates[0]);
}
}
functionCallingGenerateContent();
The contents of this repository are licensed under the Apache License, version 2.0.
FAQs
Vertex Generative AI client for Node.js
The npm package @google-cloud/vertexai receives a total of 164,877 weekly downloads. As such, @google-cloud/vertexai popularity was classified as popular.
We found that @google-cloud/vertexai demonstrated a healthy version release cadence and project activity because the last version was released less than a year ago. It has 2 open source maintainers collaborating on the project.
Did you know?
Socket for GitHub automatically highlights issues in each pull request and monitors the health of all your open source dependencies. Discover the contents of your packages and block harmful activity before you install or update your dependencies.
Security News
Deno 2.2 enhances Node.js compatibility, improves dependency management, adds OpenTelemetry support, and expands linting and task automation for developers.
Security News
React's CRA deprecation announcement sparked community criticism over framework recommendations, leading to quick updates acknowledging build tools like Vite as valid alternatives.
Security News
Ransomware payment rates hit an all-time low in 2024 as law enforcement crackdowns, stronger defenses, and shifting policies make attacks riskier and less profitable.