Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@google-cloud/vertexai

Package Overview
Dependencies
Maintainers
2
Versions
23
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@google-cloud/vertexai - npm Package Compare versions

Comparing version 1.7.0 to 1.8.0

build/src/functions/util.d.ts

2

.release-please-manifest.json
{
".": "1.7.0"
".": "1.8.0"
}

@@ -40,2 +40,3 @@ "use strict";

systemInstruction: request.systemInstruction,
cachedContent: request.cachedContent,
generationConfig: (_a = request.generationConfig) !== null && _a !== void 0 ? _a : generationConfig,

@@ -81,2 +82,3 @@ safetySettings: (_b = request.safetySettings) !== null && _b !== void 0 ? _b : safetySettings,

systemInstruction: request.systemInstruction,
cachedContent: request.cachedContent,
generationConfig: (_a = request.generationConfig) !== null && _a !== void 0 ? _a : generationConfig,

@@ -83,0 +85,0 @@ safetySettings: (_b = request.safetySettings) !== null && _b !== void 0 ? _b : safetySettings,

@@ -63,3 +63,5 @@ "use strict";

function getApiVersion(request) {
return hasVertexRagStore(request) ? 'v1beta1' : 'v1';
return hasVertexRagStore(request) || hasCachedContent(request)
? 'v1beta1'
: 'v1';
}

@@ -80,2 +82,5 @@ exports.getApiVersion = getApiVersion;

exports.hasVertexRagStore = hasVertexRagStore;
function hasCachedContent(request) {
return !!request.cachedContent;
}
function hasVertexAISearch(request) {

@@ -82,0 +87,0 @@ var _a;

@@ -20,3 +20,3 @@ "use strict";

exports.ChatSessionPreview = exports.ChatSession = void 0;
const util_1 = require("./util");
const util_1 = require("../functions/util");
const generate_content_1 = require("../functions/generate_content");

@@ -23,0 +23,0 @@ const errors_1 = require("../types/errors");

@@ -17,3 +17,3 @@ /**

*/
import { CountTokensRequest, CountTokensResponse, GenerateContentRequest, GenerateContentResult, GetGenerativeModelParams, StartChatParams, StreamGenerateContentResult } from '../types/content';
import { CachedContent, Content, CountTokensRequest, CountTokensResponse, GenerateContentRequest, GenerateContentResult, GetGenerativeModelParams, StartChatParams, StreamGenerateContentResult } from '../types/content';
import { ChatSession, ChatSessionPreview } from './chat_session';

@@ -160,2 +160,3 @@ /**

private readonly apiEndpoint?;
private readonly cachedContent?;
/**

@@ -259,2 +260,5 @@ * @constructor

startChat(request?: StartChatParams): ChatSessionPreview;
getModelName(): string;
getCachedContent(): CachedContent | undefined;
getSystemInstruction(): Content | undefined;
}

@@ -20,3 +20,3 @@ "use strict";

exports.GenerativeModelPreview = exports.GenerativeModel = void 0;
const util_1 = require("./util");
const util_1 = require("../functions/util");
const count_tokens_1 = require("../functions/count_tokens");

@@ -214,2 +214,3 @@ const generate_content_1 = require("../functions/generate_content");

this.toolConfig = getGenerativeModelParams.toolConfig;
this.cachedContent = getGenerativeModelParams.cachedContent;
this.requestOptions = (_a = getGenerativeModelParams.requestOptions) !== null && _a !== void 0 ? _a : {};

@@ -252,4 +253,8 @@ if (getGenerativeModelParams.systemInstruction) {

async generateContent(request) {
var _a;
request = formulateRequestToGenerateContentRequest(request);
const formulatedRequest = formulateSystemInstructionIntoGenerateContentRequest(request, this.systemInstruction);
const formulatedRequest = {
...formulateSystemInstructionIntoGenerateContentRequest(request, this.systemInstruction),
cachedContent: (_a = this.cachedContent) === null || _a === void 0 ? void 0 : _a.name,
};
return (0, generate_content_1.generateContent)(this.location, this.resourcePath, this.fetchToken(), formulatedRequest, this.apiEndpoint, this.generationConfig, this.safetySettings, this.tools, this.toolConfig, this.requestOptions);

@@ -282,4 +287,8 @@ }

async generateContentStream(request) {
var _a;
request = formulateRequestToGenerateContentRequest(request);
const formulatedRequest = formulateSystemInstructionIntoGenerateContentRequest(request, this.systemInstruction);
const formulatedRequest = {
...formulateSystemInstructionIntoGenerateContentRequest(request, this.systemInstruction),
cachedContent: (_a = this.cachedContent) === null || _a === void 0 ? void 0 : _a.name,
};
return (0, generate_content_1.generateContentStream)(this.location, this.resourcePath, this.fetchToken(), formulatedRequest, this.apiEndpoint, this.generationConfig, this.safetySettings, this.tools, this.toolConfig, this.requestOptions);

@@ -333,3 +342,3 @@ }

startChat(request) {
var _a, _b, _c, _d;
var _a, _b, _c, _d, _e, _f, _g;
const startChatRequest = {

@@ -343,2 +352,3 @@ project: this.project,

systemInstruction: this.systemInstruction,
cachedContent: (_a = this.cachedContent) === null || _a === void 0 ? void 0 : _a.name,
};

@@ -348,11 +358,22 @@ if (request) {

startChatRequest.generationConfig =
(_a = request.generationConfig) !== null && _a !== void 0 ? _a : this.generationConfig;
(_b = request.generationConfig) !== null && _b !== void 0 ? _b : this.generationConfig;
startChatRequest.safetySettings =
(_b = request.safetySettings) !== null && _b !== void 0 ? _b : this.safetySettings;
startChatRequest.tools = (_c = request.tools) !== null && _c !== void 0 ? _c : this.tools;
(_c = request.safetySettings) !== null && _c !== void 0 ? _c : this.safetySettings;
startChatRequest.tools = (_d = request.tools) !== null && _d !== void 0 ? _d : this.tools;
startChatRequest.systemInstruction =
(_d = request.systemInstruction) !== null && _d !== void 0 ? _d : this.systemInstruction;
(_e = request.systemInstruction) !== null && _e !== void 0 ? _e : this.systemInstruction;
startChatRequest.cachedContent =
(_f = request.cachedContent) !== null && _f !== void 0 ? _f : (_g = this.cachedContent) === null || _g === void 0 ? void 0 : _g.name;
}
return new chat_session_1.ChatSessionPreview(startChatRequest, this.requestOptions);
}
getModelName() {
return this.model;
}
getCachedContent() {
return this.cachedContent;
}
getSystemInstruction() {
return this.systemInstruction;
}
}

@@ -359,0 +380,0 @@ exports.GenerativeModelPreview = GenerativeModelPreview;

@@ -61,2 +61,7 @@ /**

systemInstruction?: string | Content;
/**
* Optional. The name of the cached content used as context to serve the prediction.
* This is the name of a `CachedContent` and not the cache object itself.
*/
cachedContent?: string;
}

@@ -132,2 +137,8 @@ /**

model: string;
/**
* Optional. The cached content used as context to serve the prediction.
* Note: only used in explicit caching, where users can have control over caching
* (e.g. what content to cache) and enjoy guaranteed cost savings.
*/
cachedContent?: CachedContent;
}

@@ -957,2 +968,7 @@ /**

systemInstruction?: string | Content;
/**
* Optional. The name of the cached content used as context to serve the prediction.
* This is the name of a `CachedContent` and not the cache object itself.
*/
cachedContent?: string;
}

@@ -996,1 +1012,70 @@ /**

}
/**
* A resource used in LLM queries for users to explicitly specify
* what to cache and how to cache.
*/
export interface CachedContent {
/**
* Immutable. Identifier. The server-generated resource name of the cached content.
* Format: projects/{project}/locations/{location}/cachedContents/{cached_content}
*/
name?: string;
/** Optional. Immutable. The user-generated meaningful display name of the cached content. */
displayName?: string;
/**
* Immutable. The name of the publisher model to use for cached content.
* Format: projects/{project}/locations/{location}/publishers/{publisher}/models/{model}
*/
model?: string;
/** Developer set system instruction. Currently, text only. */
systemInstruction?: Content | string;
/** Optional. Input only. Immutable. The content to cache. */
contents?: Content[];
/** Optional. Input only. Immutable. A list of `Tools` the model may use to generate the next response. */
tools?: Tool[];
/** Optional. Input only. Immutable. Tool config. This config is shared for all tools. */
toolConfig?: ToolConfig;
/**
* Output only. Creatation time of the cache entry.
* Format: google-datetime. See {@link https://cloud.google.com/docs/discovery/type-format}
*/
createTime?: string;
/**
* Output only. When the cache entry was last updated in UTC time.
* Format: google-datetime. See {@link https://cloud.google.com/docs/discovery/type-format}
*/
updateTime?: string;
/** Output only. Metadata on the usage of the cached content. */
usageMetadata?: CachedContentUsageMetadata;
/**
* Timestamp of when this resource is considered expired.
* This is *always* provided on output, regardless of what was sent on input.
*/
expireTime?: string;
/**
* Input only. The TTL seconds for this resource. The expiration time
* is computed: now + TTL.
* Format: google-duration. See {@link https://cloud.google.com/docs/discovery/type-format}
*/
ttl?: string;
}
/** Metadata on the usage of the cached content. */
export interface CachedContentUsageMetadata {
/** Total number of tokens that the cached content consumes. */
totalTokenCount?: number;
/** Number of text characters. */
textCount?: number;
/** Number of images. */
imageCount?: number;
/** Duration of video in seconds. */
videoDurationSeconds?: number;
/** Duration of audio in seconds. */
audioDurationSeconds?: number;
}
/** Response with a list of CachedContents. */
export interface ListCachedContentsResponse {
/** List of cached contents. */
cachedContents?: CachedContent[];
/** A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages. */
nextPageToken?: string;
}

@@ -23,3 +23,3 @@ /**

export declare const SYSTEM_ROLE = "system";
export declare const USER_AGENT = "model-builder/1.7.0 grpc-node/1.7.0";
export declare const USER_AGENT = "model-builder/1.8.0 grpc-node/1.8.0";
export declare const CREDENTIAL_ERROR_MESSAGE = "\nUnable to authenticate your request \nDepending on your run time environment, you can get authentication by \n- if in local instance or cloud shell: `!gcloud auth login` \n- if in Colab: \n -`from google.colab import auth` \n -`auth.authenticate_user()` \n- if in service account or other: please follow guidance in https://cloud.google.com/docs/authentication";

@@ -27,3 +27,3 @@ "use strict";

const USER_AGENT_PRODUCT = 'model-builder';
const CLIENT_LIBRARY_VERSION = '1.7.0'; // x-release-please-version
const CLIENT_LIBRARY_VERSION = '1.8.0'; // x-release-please-version
const CLIENT_LIBRARY_LANGUAGE = `grpc-node/${CLIENT_LIBRARY_VERSION}`;

@@ -30,0 +30,0 @@ exports.USER_AGENT = `${USER_AGENT_PRODUCT}/${CLIENT_LIBRARY_VERSION} ${CLIENT_LIBRARY_LANGUAGE}`;

@@ -19,3 +19,4 @@ /**

import { GenerativeModelPreview, GenerativeModel } from './models';
import { ModelParams, RequestOptions, VertexInit } from './types/content';
import { CachedContent, ModelParams, RequestOptions, VertexInit } from './types/content';
import * as Resources from './resources';
/**

@@ -100,2 +101,4 @@ * The `VertexAI` class is the base class for authenticating to Vertex AI.

private readonly apiEndpoint?;
private readonly apiClient;
readonly cachedContents: Resources.CachedContents;
/**

@@ -123,3 +126,4 @@ * @constructor

getGenerativeModel(modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModelPreview;
getGenerativeModelFromCachedContent(cachedContent: CachedContent, modelParams?: Partial<ModelParams>, requestOptions?: RequestOptions): GenerativeModelPreview;
}
export {};

@@ -24,2 +24,4 @@ "use strict";

const errors_1 = require("./types/errors");
const Resources = require("./resources");
const cached_contents_1 = require("./resources/cached_contents");
/**

@@ -142,2 +144,4 @@ * The `VertexAI` class is the base class for authenticating to Vertex AI.

this.apiEndpoint = apiEndpoint;
this.apiClient = new Resources.ApiClient(this.project, this.location, 'v1beta1', this.googleAuth);
this.cachedContents = new Resources.CachedContents(this.apiClient);
}

@@ -165,3 +169,63 @@ /**

}
getGenerativeModelFromCachedContent(cachedContent, modelParams, requestOptions) {
if (!cachedContent.name) {
throw new errors_1.ClientError('Cached content must contain a `name` field.');
}
if (!cachedContent.model) {
throw new errors_1.ClientError('Cached content must contain a `model` field.');
}
validateCachedContentModel(cachedContent.model);
/**
* Not checking tools and toolConfig for now as it would require a deep
* equality comparison and isn't likely to be a common case.
*/
const disallowedDuplicates = ['model', 'systemInstruction'];
for (const key of disallowedDuplicates) {
if ((modelParams === null || modelParams === void 0 ? void 0 : modelParams[key]) &&
cachedContent[key] &&
(modelParams === null || modelParams === void 0 ? void 0 : modelParams[key]) !== cachedContent[key]) {
if (key === 'model') {
const modelParamsComp = parseModelName(modelParams[key]);
const cachedContentComp = parseModelName(cachedContent[key]);
if (modelParamsComp === cachedContentComp) {
continue;
}
}
throw new errors_1.ClientError(`Different value for "${key}" specified in modelParams` +
` (${modelParams[key]}) and cachedContent (${cachedContent[key]})`);
}
}
cachedContent.name = (0, cached_contents_1.inferFullResourceName)(this.project, this.location, cachedContent.name);
const modelParamsFromCache = {
model: cachedContent.model,
project: this.project,
location: this.location,
googleAuth: this.googleAuth,
apiEndpoint: this.apiEndpoint,
safetySettings: modelParams === null || modelParams === void 0 ? void 0 : modelParams.safetySettings,
generationConfig: modelParams === null || modelParams === void 0 ? void 0 : modelParams.generationConfig,
tools: cachedContent.tools,
toolConfig: cachedContent.toolConfig,
requestOptions: requestOptions,
systemInstruction: cachedContent.systemInstruction,
cachedContent,
};
return new models_1.GenerativeModelPreview(modelParamsFromCache);
}
}
function validateCachedContentModel(modelName) {
if (modelName.startsWith('models/') ||
(modelName.startsWith('projects/') &&
modelName.includes('/publishers/google/models/')) ||
!modelName.includes('/')) {
return;
}
throw new errors_1.ClientError(`Cached content model name must start with "models/" or match "projects/.*/publishers/google/models/.*" or is a model name listed at https://cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versions. Received: ${modelName}`);
}
function parseModelName(modelName) {
if (!modelName.includes('/')) {
return modelName;
}
return modelName.split('/').pop();
}
function validateGoogleAuthOptions(project, googleAuthOptions) {

@@ -168,0 +232,0 @@ let opts;

# Changelog
## [1.8.0](https://github.com/googleapis/nodejs-vertexai/compare/v1.7.0...v1.8.0) (2024-09-18)
### Features
* Add CachedContent resource to Vertex AI client library. ([8c8963e](https://github.com/googleapis/nodejs-vertexai/commit/8c8963e5c62bf491d5a47f7c5a0db64fafaea0cd))
* Implement cached_content with generateContent methods ([c604b8c](https://github.com/googleapis/nodejs-vertexai/commit/c604b8caf4138537b38bdf9f57e8086d55216981))
## [1.7.0](https://github.com/googleapis/nodejs-vertexai/compare/v1.6.0...v1.7.0) (2024-08-30)

@@ -4,0 +12,0 @@

{
"name": "@google-cloud/vertexai",
"description": "Vertex Generative AI client for Node.js",
"version": "1.7.0",
"version": "1.8.0",
"license": "Apache-2.0",

@@ -6,0 +6,0 @@ "author": "Google LLC",

@@ -75,2 +75,3 @@ /**

systemInstruction: request.systemInstruction,
cachedContent: request.cachedContent,
generationConfig: request.generationConfig ?? generationConfig,

@@ -130,2 +131,3 @@ safetySettings: request.safetySettings ?? safetySettings,

systemInstruction: request.systemInstruction,
cachedContent: request.cachedContent,
generationConfig: request.generationConfig ?? generationConfig,

@@ -132,0 +134,0 @@ safetySettings: request.safetySettings ?? safetySettings,

@@ -83,3 +83,5 @@ /**

): 'v1' | 'v1beta1' {
return hasVertexRagStore(request) ? 'v1beta1' : 'v1';
return hasVertexRagStore(request) || hasCachedContent(request)
? 'v1beta1'
: 'v1';
}

@@ -98,2 +100,6 @@

function hasCachedContent(request: GenerateContentRequest): boolean {
return !!request.cachedContent;
}
export function hasVertexAISearch(request: GenerateContentRequest): boolean {

@@ -100,0 +106,0 @@ for (const tool of request?.tools ?? []) {

@@ -21,3 +21,3 @@ /**

import {formulateSystemInstructionIntoContent} from './util';
import {formulateSystemInstructionIntoContent} from '../functions/util';
import {

@@ -24,0 +24,0 @@ generateContent,

@@ -21,3 +21,3 @@ /**

import {formulateSystemInstructionIntoContent} from './util';
import {formulateSystemInstructionIntoContent} from '../functions/util';
import {countTokens} from '../functions/count_tokens';

@@ -29,2 +29,3 @@ import {

import {
CachedContent,
Content,

@@ -300,2 +301,3 @@ CountTokensRequest,

private readonly apiEndpoint?: string;
private readonly cachedContent?: CachedContent;

@@ -316,2 +318,3 @@ /**

this.toolConfig = getGenerativeModelParams.toolConfig;
this.cachedContent = getGenerativeModelParams.cachedContent;
this.requestOptions = getGenerativeModelParams.requestOptions ?? {};

@@ -365,7 +368,9 @@ if (getGenerativeModelParams.systemInstruction) {

request = formulateRequestToGenerateContentRequest(request);
const formulatedRequest =
formulateSystemInstructionIntoGenerateContentRequest(
const formulatedRequest = {
...formulateSystemInstructionIntoGenerateContentRequest(
request,
this.systemInstruction
);
),
cachedContent: this.cachedContent?.name,
};
return generateContent(

@@ -413,7 +418,9 @@ this.location,

request = formulateRequestToGenerateContentRequest(request);
const formulatedRequest =
formulateSystemInstructionIntoGenerateContentRequest(
const formulatedRequest = {
...formulateSystemInstructionIntoGenerateContentRequest(
request,
this.systemInstruction
);
),
cachedContent: this.cachedContent?.name,
};
return generateContentStream(

@@ -495,2 +502,3 @@ this.location,

systemInstruction: this.systemInstruction,
cachedContent: this.cachedContent?.name,
};

@@ -507,5 +515,19 @@

request.systemInstruction ?? this.systemInstruction;
startChatRequest.cachedContent =
request.cachedContent ?? this.cachedContent?.name;
}
return new ChatSessionPreview(startChatRequest, this.requestOptions);
}
getModelName(): string {
return this.model;
}
getCachedContent(): CachedContent | undefined {
return this.cachedContent;
}
getSystemInstruction(): Content | undefined {
return this.systemInstruction;
}
}

@@ -512,0 +534,0 @@

@@ -65,2 +65,8 @@ /**

systemInstruction?: string | Content;
/**
* Optional. The name of the cached content used as context to serve the prediction.
* This is the name of a `CachedContent` and not the cache object itself.
*/
cachedContent?: string;
}

@@ -140,2 +146,9 @@

model: string;
/**
* Optional. The cached content used as context to serve the prediction.
* Note: only used in explicit caching, where users can have control over caching
* (e.g. what content to cache) and enjoy guaranteed cost savings.
*/
cachedContent?: CachedContent;
}

@@ -1026,2 +1039,7 @@

systemInstruction?: string | Content;
/**
* Optional. The name of the cached content used as context to serve the prediction.
* This is the name of a `CachedContent` and not the cache object itself.
*/
cachedContent?: string;
}

@@ -1067,1 +1085,88 @@

}
/**
* A resource used in LLM queries for users to explicitly specify
* what to cache and how to cache.
*/
export interface CachedContent {
/**
* Immutable. Identifier. The server-generated resource name of the cached content.
* Format: projects/{project}/locations/{location}/cachedContents/{cached_content}
*/
name?: string;
/** Optional. Immutable. The user-generated meaningful display name of the cached content. */
displayName?: string;
/**
* Immutable. The name of the publisher model to use for cached content.
* Format: projects/{project}/locations/{location}/publishers/{publisher}/models/{model}
*/
model?: string;
/** Developer set system instruction. Currently, text only. */
systemInstruction?: Content | string;
/** Optional. Input only. Immutable. The content to cache. */
contents?: Content[];
/** Optional. Input only. Immutable. A list of `Tools` the model may use to generate the next response. */
tools?: Tool[];
/** Optional. Input only. Immutable. Tool config. This config is shared for all tools. */
toolConfig?: ToolConfig;
/**
* Output only. Creatation time of the cache entry.
* Format: google-datetime. See {@link https://cloud.google.com/docs/discovery/type-format}
*/
createTime?: string;
/**
* Output only. When the cache entry was last updated in UTC time.
* Format: google-datetime. See {@link https://cloud.google.com/docs/discovery/type-format}
*/
updateTime?: string;
/** Output only. Metadata on the usage of the cached content. */
usageMetadata?: CachedContentUsageMetadata;
/**
* Timestamp of when this resource is considered expired.
* This is *always* provided on output, regardless of what was sent on input.
*/
expireTime?: string;
/**
* Input only. The TTL seconds for this resource. The expiration time
* is computed: now + TTL.
* Format: google-duration. See {@link https://cloud.google.com/docs/discovery/type-format}
*/
ttl?: string;
}
/** Metadata on the usage of the cached content. */
export interface CachedContentUsageMetadata {
/** Total number of tokens that the cached content consumes. */
totalTokenCount?: number;
/** Number of text characters. */
textCount?: number;
/** Number of images. */
imageCount?: number;
/** Duration of video in seconds. */
videoDurationSeconds?: number;
/** Duration of audio in seconds. */
audioDurationSeconds?: number;
}
/** Response with a list of CachedContents. */
export interface ListCachedContentsResponse {
/** List of cached contents. */
cachedContents?: CachedContent[];
/** A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages. */
nextPageToken?: string;
}

@@ -24,3 +24,3 @@ /**

const USER_AGENT_PRODUCT = 'model-builder';
const CLIENT_LIBRARY_VERSION = '1.7.0'; // x-release-please-version
const CLIENT_LIBRARY_VERSION = '1.8.0'; // x-release-please-version
const CLIENT_LIBRARY_LANGUAGE = `grpc-node/${CLIENT_LIBRARY_VERSION}`;

@@ -27,0 +27,0 @@ export const USER_AGENT = `${USER_AGENT_PRODUCT}/${CLIENT_LIBRARY_VERSION} ${CLIENT_LIBRARY_LANGUAGE}`;

@@ -23,2 +23,3 @@ /**

import {
CachedContent,
GetGenerativeModelParams,

@@ -29,3 +30,9 @@ ModelParams,

} from './types/content';
import {GoogleAuthError, IllegalArgumentError} from './types/errors';
import {
GoogleAuthError,
IllegalArgumentError,
ClientError,
} from './types/errors';
import * as Resources from './resources';
import {inferFullResourceName} from './resources/cached_contents';

@@ -154,2 +161,5 @@ /**

private readonly apiClient: Resources.ApiClient;
readonly cachedContents: Resources.CachedContents;
/**

@@ -180,2 +190,10 @@ * @constructor

this.apiEndpoint = apiEndpoint;
this.apiClient = new Resources.ApiClient(
this.project,
this.location,
'v1beta1',
this.googleAuth
);
this.cachedContents = new Resources.CachedContents(this.apiClient);
}

@@ -207,4 +225,86 @@

}
getGenerativeModelFromCachedContent(
cachedContent: CachedContent,
modelParams?: Partial<ModelParams>,
requestOptions?: RequestOptions
) {
if (!cachedContent.name) {
throw new ClientError('Cached content must contain a `name` field.');
}
if (!cachedContent.model) {
throw new ClientError('Cached content must contain a `model` field.');
}
validateCachedContentModel(cachedContent.model);
/**
* Not checking tools and toolConfig for now as it would require a deep
* equality comparison and isn't likely to be a common case.
*/
const disallowedDuplicates: Array<keyof ModelParams & keyof CachedContent> =
['model', 'systemInstruction'];
for (const key of disallowedDuplicates) {
if (
modelParams?.[key] &&
cachedContent[key] &&
modelParams?.[key] !== cachedContent[key]
) {
if (key === 'model') {
const modelParamsComp = parseModelName(modelParams[key]!);
const cachedContentComp = parseModelName(cachedContent[key]!);
if (modelParamsComp === cachedContentComp) {
continue;
}
}
throw new ClientError(
`Different value for "${key}" specified in modelParams` +
` (${modelParams[key]}) and cachedContent (${cachedContent[key]})`
);
}
}
cachedContent.name = inferFullResourceName(
this.project,
this.location,
cachedContent.name
);
const modelParamsFromCache: GetGenerativeModelParams = {
model: cachedContent.model,
project: this.project,
location: this.location,
googleAuth: this.googleAuth,
apiEndpoint: this.apiEndpoint,
safetySettings: modelParams?.safetySettings,
generationConfig: modelParams?.generationConfig,
tools: cachedContent.tools,
toolConfig: cachedContent.toolConfig,
requestOptions: requestOptions,
systemInstruction: cachedContent.systemInstruction,
cachedContent,
};
return new GenerativeModelPreview(modelParamsFromCache);
}
}
function validateCachedContentModel(modelName: string) {
if (
modelName.startsWith('models/') ||
(modelName.startsWith('projects/') &&
modelName.includes('/publishers/google/models/')) ||
!modelName.includes('/')
) {
return;
}
throw new ClientError(
`Cached content model name must start with "models/" or match "projects/.*/publishers/google/models/.*" or is a model name listed at https://cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versions. Received: ${modelName}`
);
}
function parseModelName(modelName: string): string {
if (!modelName.includes('/')) {
return modelName;
}
return modelName.split('/').pop()!;
}
function validateGoogleAuthOptions(

@@ -211,0 +311,0 @@ project?: string,

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc