@google-cloud/vertexai
Advanced tools
Comparing version 0.3.0 to 0.3.1
{ | ||
".": "0.3.0" | ||
".": "0.3.1" | ||
} |
@@ -55,7 +55,2 @@ /** | ||
/** | ||
* Get access token from GoogleAuth. Throws GoogleAuthError when fails. | ||
* @return {Promise<any>} Promise of token | ||
*/ | ||
get token(): Promise<any>; | ||
/** | ||
* @param {ModelParams} modelParams - {@link ModelParams} Parameters to specify the generative model. | ||
@@ -85,6 +80,31 @@ * @return {GenerativeModel} Instance of the GenerativeModel class. {@link GenerativeModel} | ||
export declare interface StartChatSessionRequest extends StartChatParams { | ||
_vertex_instance: VertexAI_Preview; | ||
project: string; | ||
location: string; | ||
_model_instance: GenerativeModel; | ||
} | ||
/** | ||
* @property {string} model - model name | ||
* @property {string} project - project The Google Cloud project to use for the request | ||
* @property {string} location - The Google Cloud project location to use for the request | ||
* @property {GoogleAuth} googleAuth - GoogleAuth class instance that handles authentication. | ||
* Details about GoogleAuth is referred to https://github.com/googleapis/google-auth-library-nodejs/blob/main/src/auth/googleauth.ts | ||
* @property {string} - [apiEndpoint] The base Vertex AI endpoint to use for the request. If | ||
* not provided, the default regionalized endpoint | ||
* (i.e. us-central1-aiplatform.googleapis.com) will be used. | ||
* @property {GenerationConfig} [generation_config] - {@link | ||
* GenerationConfig} | ||
* @property {SafetySetting[]} [safety_settings] - {@link SafetySetting} | ||
* @property {Tool[]} [tools] - {@link Tool} | ||
*/ | ||
export declare interface GetGenerativeModelParams extends ModelParams { | ||
model: string; | ||
project: string; | ||
location: string; | ||
googleAuth: GoogleAuth; | ||
apiEndpoint?: string; | ||
generation_config?: GenerationConfig; | ||
safety_settings?: SafetySetting[]; | ||
tools?: Tool[]; | ||
} | ||
/** | ||
* Chat session to make multi-turn send message request. | ||
@@ -98,3 +118,2 @@ * `sendMessage` method makes async call to get response of a chat message. | ||
private historyInternal; | ||
private _vertex_instance; | ||
private _model_instance; | ||
@@ -135,15 +154,18 @@ private _send_stream_promise; | ||
tools?: Tool[]; | ||
private _vertex_instance; | ||
private _use_non_stream; | ||
private project; | ||
private location; | ||
private googleAuth; | ||
private publisherModelEndpoint; | ||
private apiEndpoint?; | ||
/** | ||
* @constructor | ||
* @param {VertexAI_Preview} vertex_instance - {@link VertexAI_Preview} | ||
* @param {string} model - model name | ||
* @param {GenerationConfig} generation_config - Optional. {@link | ||
* GenerationConfig} | ||
* @param {SafetySetting[]} safety_settings - Optional. {@link SafetySetting} | ||
* @param {GetGenerativeModelParams} getGenerativeModelParams - {@link GetGenerativeModelParams} | ||
*/ | ||
constructor(vertex_instance: VertexAI_Preview, model: string, generation_config?: GenerationConfig, safety_settings?: SafetySetting[], tools?: Tool[]); | ||
constructor(getGenerativeModelParams: GetGenerativeModelParams); | ||
/** | ||
* Get access token from GoogleAuth. Throws GoogleAuthError when fails. | ||
* @return {Promise<any>} Promise of token | ||
*/ | ||
get token(): Promise<any>; | ||
/** | ||
* Make a async call to generate content. | ||
@@ -150,0 +172,0 @@ * @param request A GenerateContentRequest object with the request contents. |
@@ -86,19 +86,2 @@ "use strict"; | ||
/** | ||
* Get access token from GoogleAuth. Throws GoogleAuthError when fails. | ||
* @return {Promise<any>} Promise of token | ||
*/ | ||
get token() { | ||
const credential_error_message = '\nUnable to authenticate your request\ | ||
\nDepending on your run time environment, you can get authentication by\ | ||
\n- if in local instance or cloud shell: `!gcloud auth login`\ | ||
\n- if in Colab:\ | ||
\n -`from google.colab import auth`\ | ||
\n -`auth.authenticate_user()`\ | ||
\n- if in service account or other: please follow guidance in https://cloud.google.com/docs/authentication'; | ||
const tokenPromise = this.googleAuth.getAccessToken().catch(e => { | ||
throw new errors_1.GoogleAuthError(credential_error_message, e); | ||
}); | ||
return tokenPromise; | ||
} | ||
/** | ||
* @param {ModelParams} modelParams - {@link ModelParams} Parameters to specify the generative model. | ||
@@ -108,6 +91,15 @@ * @return {GenerativeModel} Instance of the GenerativeModel class. {@link GenerativeModel} | ||
getGenerativeModel(modelParams) { | ||
const getGenerativeModelParams = { | ||
model: modelParams.model, | ||
project: this.project, | ||
location: this.location, | ||
googleAuth: this.googleAuth, | ||
apiEndpoint: this.apiEndpoint, | ||
safety_settings: modelParams.safety_settings, | ||
tools: modelParams.tools, | ||
}; | ||
if (modelParams.generation_config) { | ||
modelParams.generation_config = validateGenerationConfig(modelParams.generation_config); | ||
getGenerativeModelParams.generation_config = validateGenerationConfig(modelParams.generation_config); | ||
} | ||
return new GenerativeModel(this, modelParams.model, modelParams.generation_config, modelParams.safety_settings, modelParams.tools); | ||
return new GenerativeModel(getGenerativeModelParams); | ||
} | ||
@@ -156,7 +148,6 @@ validateGoogleAuthOptions(project, googleAuthOptions) { | ||
this._send_stream_promise = Promise.resolve(); | ||
this.project = request._vertex_instance.project; | ||
this.location = request._vertex_instance.location; | ||
this.project = request.project; | ||
this.location = request.location; | ||
this._model_instance = request._model_instance; | ||
this.historyInternal = (_a = request.history) !== null && _a !== void 0 ? _a : []; | ||
this._vertex_instance = request._vertex_instance; | ||
this.generation_config = request.generation_config; | ||
@@ -184,3 +175,3 @@ this.safety_settings = request.safety_settings; | ||
}); | ||
const generateContentResponse = generateContentResult.response; | ||
const generateContentResponse = await generateContentResult.response; | ||
// Only push the latest message to history if the response returned a result | ||
@@ -251,16 +242,14 @@ if (generateContentResponse.candidates.length !== 0) { | ||
* @constructor | ||
* @param {VertexAI_Preview} vertex_instance - {@link VertexAI_Preview} | ||
* @param {string} model - model name | ||
* @param {GenerationConfig} generation_config - Optional. {@link | ||
* GenerationConfig} | ||
* @param {SafetySetting[]} safety_settings - Optional. {@link SafetySetting} | ||
* @param {GetGenerativeModelParams} getGenerativeModelParams - {@link GetGenerativeModelParams} | ||
*/ | ||
constructor(vertex_instance, model, generation_config, safety_settings, tools) { | ||
this._use_non_stream = false; | ||
this._vertex_instance = vertex_instance; | ||
this.model = model; | ||
this.generation_config = generation_config; | ||
this.safety_settings = safety_settings; | ||
this.tools = tools; | ||
if (model.startsWith('models/')) { | ||
constructor(getGenerativeModelParams) { | ||
this.project = getGenerativeModelParams.project; | ||
this.location = getGenerativeModelParams.location; | ||
this.apiEndpoint = getGenerativeModelParams.apiEndpoint; | ||
this.googleAuth = getGenerativeModelParams.googleAuth; | ||
this.model = getGenerativeModelParams.model; | ||
this.generation_config = getGenerativeModelParams.generation_config; | ||
this.safety_settings = getGenerativeModelParams.safety_settings; | ||
this.tools = getGenerativeModelParams.tools; | ||
if (this.model.startsWith('models/')) { | ||
this.publisherModelEndpoint = `publishers/google/${this.model}`; | ||
@@ -273,2 +262,19 @@ } | ||
/** | ||
* Get access token from GoogleAuth. Throws GoogleAuthError when fails. | ||
* @return {Promise<any>} Promise of token | ||
*/ | ||
get token() { | ||
const credential_error_message = '\nUnable to authenticate your request\ | ||
\nDepending on your run time environment, you can get authentication by\ | ||
\n- if in local instance or cloud shell: `!gcloud auth login`\ | ||
\n- if in Colab:\ | ||
\n -`from google.colab import auth`\ | ||
\n -`auth.authenticate_user()`\ | ||
\n- if in service account or other: please follow guidance in https://cloud.google.com/docs/authentication'; | ||
const tokenPromise = this.googleAuth.getAccessToken().catch(e => { | ||
throw new errors_1.GoogleAuthError(credential_error_message, e); | ||
}); | ||
return tokenPromise; | ||
} | ||
/** | ||
* Make a async call to generate content. | ||
@@ -285,11 +291,2 @@ * @param request A GenerateContentRequest object with the request contents. | ||
} | ||
if (!this._use_non_stream) { | ||
const streamGenerateContentResult = await this.generateContentStream(request).catch(e => { | ||
throw e; | ||
}); | ||
const result = { | ||
response: await streamGenerateContentResult.response, | ||
}; | ||
return Promise.resolve(result); | ||
} | ||
const generateContentRequest = { | ||
@@ -302,9 +299,9 @@ contents: request.contents, | ||
const response = await (0, util_1.postRequest)({ | ||
region: this._vertex_instance.location, | ||
project: this._vertex_instance.project, | ||
region: this.location, | ||
project: this.project, | ||
resourcePath: this.publisherModelEndpoint, | ||
resourceMethod: util_1.constants.GENERATE_CONTENT_METHOD, | ||
token: await this._vertex_instance.token, | ||
token: await this.token, | ||
data: generateContentRequest, | ||
apiEndpoint: this._vertex_instance.apiEndpoint, | ||
apiEndpoint: this.apiEndpoint, | ||
}).catch(e => { | ||
@@ -336,9 +333,9 @@ throw new errors_1.GoogleGenerativeAIError('exception posting request', e); | ||
const response = await (0, util_1.postRequest)({ | ||
region: this._vertex_instance.location, | ||
project: this._vertex_instance.project, | ||
region: this.location, | ||
project: this.project, | ||
resourcePath: this.publisherModelEndpoint, | ||
resourceMethod: util_1.constants.STREAMING_GENERATE_CONTENT_METHOD, | ||
token: await this._vertex_instance.token, | ||
token: await this.token, | ||
data: generateContentRequest, | ||
apiEndpoint: this._vertex_instance.apiEndpoint, | ||
apiEndpoint: this.apiEndpoint, | ||
}).catch(e => { | ||
@@ -358,9 +355,9 @@ throw new errors_1.GoogleGenerativeAIError('exception posting request', e); | ||
const response = await (0, util_1.postRequest)({ | ||
region: this._vertex_instance.location, | ||
project: this._vertex_instance.project, | ||
region: this.location, | ||
project: this.project, | ||
resourcePath: this.publisherModelEndpoint, | ||
resourceMethod: 'countTokens', | ||
token: await this._vertex_instance.token, | ||
token: await this.token, | ||
data: request, | ||
apiEndpoint: this._vertex_instance.apiEndpoint, | ||
apiEndpoint: this.apiEndpoint, | ||
}).catch(e => { | ||
@@ -382,3 +379,4 @@ throw new errors_1.GoogleGenerativeAIError('exception posting request', e); | ||
const startChatRequest = { | ||
_vertex_instance: this._vertex_instance, | ||
project: this.project, | ||
location: this.location, | ||
_model_instance: this, | ||
@@ -385,0 +383,0 @@ }; |
@@ -22,2 +22,2 @@ /** | ||
export declare const FUNCTION_ROLE = "function"; | ||
export declare const USER_AGENT = "model-builder/0.3.0 grpc-node/0.3.0"; | ||
export declare const USER_AGENT = "model-builder/0.3.1 grpc-node/0.3.1"; |
@@ -26,5 +26,5 @@ "use strict"; | ||
const USER_AGENT_PRODUCT = 'model-builder'; | ||
const CLIENT_LIBRARY_VERSION = '0.3.0'; // x-release-please-version | ||
const CLIENT_LIBRARY_VERSION = '0.3.1'; // x-release-please-version | ||
const CLIENT_LIBRARY_LANGUAGE = `grpc-node/${CLIENT_LIBRARY_VERSION}`; | ||
exports.USER_AGENT = `${USER_AGENT_PRODUCT}/${CLIENT_LIBRARY_VERSION} ${CLIENT_LIBRARY_LANGUAGE}`; | ||
//# sourceMappingURL=constants.js.map |
@@ -223,4 +223,4 @@ "use strict"; | ||
}); | ||
spyOnProperty(vertexai.preview, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
model = vertexai.preview.getGenerativeModel({ model: 'gemini-pro' }); | ||
spyOnProperty(model, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
expectedStreamResult = { | ||
@@ -293,3 +293,3 @@ response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue(expectedStreamResult); | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue(expectedResult); | ||
const resp = await model.generateContent(req); | ||
@@ -302,3 +302,3 @@ expect(resp).toEqual(expectedResult); | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue(expectedStreamResult); | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue(expectedResult); | ||
const resp = await model.generateContent(TEST_CHAT_MESSSAGE_TEXT); | ||
@@ -314,7 +314,3 @@ expect(resp).toEqual(expectedResult); | ||
}; | ||
const expectedStreamResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue(expectedStreamResult); | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue(expectedResult); | ||
const resp = await model.generateContent(req); | ||
@@ -338,7 +334,3 @@ expect(resp).toEqual(expectedResult); | ||
}; | ||
const expectedStreamResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue(expectedStreamResult); | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue(expectedResult); | ||
const resp = await model.generateContent(req); | ||
@@ -353,6 +345,6 @@ expect(resp).toEqual(expectedResult); | ||
}); | ||
spyOnProperty(vertexaiWithBasePath.preview, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
model = vertexaiWithBasePath.preview.getGenerativeModel({ | ||
model: 'gemini-pro', | ||
}); | ||
spyOnProperty(model, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
const req = { | ||
@@ -364,7 +356,3 @@ contents: TEST_USER_CHAT_MESSAGE, | ||
}; | ||
const expectedStreamResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue(expectedStreamResult); | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue(expectedResult); | ||
await model.generateContent(req); | ||
@@ -378,6 +366,6 @@ expect(fetchSpy.calls.allArgs()[0][0].toString()).toContain(TEST_ENDPOINT_BASE_PATH); | ||
}); | ||
spyOnProperty(vertexaiWithoutBasePath.preview, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
model = vertexaiWithoutBasePath.preview.getGenerativeModel({ | ||
model: 'gemini-pro', | ||
}); | ||
spyOnProperty(model, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
const req = { | ||
@@ -389,7 +377,3 @@ contents: TEST_USER_CHAT_MESSAGE, | ||
}; | ||
const expectedStreamResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue(expectedStreamResult); | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue(expectedResult); | ||
await model.generateContent(req); | ||
@@ -407,7 +391,3 @@ expect(fetchSpy.calls.allArgs()[0][0].toString()).toContain(`${LOCATION}-aiplatform.googleapis.com`); | ||
}; | ||
const expectedStreamResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue(expectedStreamResult); | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue(expectedResult); | ||
await model.generateContent(reqWithEmptyConfigs); | ||
@@ -428,7 +408,3 @@ const requestArgs = fetchSpy.calls.allArgs()[0][1]; | ||
}; | ||
const expectedStreamResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue(expectedStreamResult); | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue(expectedResult); | ||
await model.generateContent(reqWithEmptyConfigs); | ||
@@ -448,7 +424,3 @@ const requestArgs = fetchSpy.calls.allArgs()[0][1]; | ||
}; | ||
const expectedStreamResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue(expectedStreamResult); | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue(expectedResult); | ||
const resp = await model.generateContent(req); | ||
@@ -468,7 +440,3 @@ expect((_a = resp.response.candidates[0].citationMetadata) === null || _a === void 0 ? void 0 : _a.citationSources.length).toEqual(TEST_MODEL_RESPONSE.candidates[0].citationMetadata.citationSources | ||
}; | ||
const expectedStreamResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE_WITH_FUNCTION_CALL), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue(expectedStreamResult); | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue(expectedResult); | ||
const resp = await model.generateContent(req); | ||
@@ -624,4 +592,4 @@ expect(resp).toEqual(expectedResult); | ||
}); | ||
spyOnProperty(vertexai.preview, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
const model = vertexai.preview.getGenerativeModel({ model: 'gemini-pro' }); | ||
spyOnProperty(model, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
const req = { | ||
@@ -649,3 +617,2 @@ contents: TEST_USER_CHAT_MESSAGE, | ||
vertexai = new index_1.VertexAI({ project: PROJECT, location: LOCATION }); | ||
spyOnProperty(vertexai.preview, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
model = vertexai.preview.getGenerativeModel({ model: 'gemini-pro' }); | ||
@@ -667,2 +634,3 @@ chatSession = model.startChat({ | ||
spyOn(global, 'fetch').and.returnValue(fetchResult); | ||
spyOnProperty(model, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
}); | ||
@@ -675,7 +643,3 @@ describe('sendMessage', () => { | ||
}; | ||
const expectedStreamResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue(expectedStreamResult); | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue(expectedResult); | ||
const resp = await chatSession.sendMessage(req); | ||
@@ -690,7 +654,3 @@ expect(resp).toEqual(expectedResult); | ||
}; | ||
const expectedStreamResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue(expectedStreamResult); | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue(expectedResult); | ||
const resp = await chatSessionWithNoArgs.sendMessage(req); | ||
@@ -705,7 +665,3 @@ expect(resp).toEqual(expectedResult); | ||
}; | ||
const expectedStreamResult = { | ||
response: Promise.resolve(TEST_EMPTY_MODEL_RESPONSE), | ||
stream: testGeneratorWithEmptyResponse(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue(expectedStreamResult); | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue(expectedResult); | ||
await expectAsync(chatSessionWithEmptyResponse.sendMessage(req)).toBeRejected(); | ||
@@ -719,7 +675,3 @@ expect(chatSessionWithEmptyResponse.history.length).toEqual(0); | ||
}; | ||
const expectedStreamResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue(expectedStreamResult); | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue(expectedResult); | ||
const resp = await chatSessionWithNoArgs.sendMessage(req); | ||
@@ -735,8 +687,7 @@ expect(resp).toEqual(expectedResult); | ||
}; | ||
const expectedStreamResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE_WITH_FUNCTION_CALL), | ||
stream: testGenerator(), | ||
const expectedResult = { | ||
response: TEST_MODEL_RESPONSE_WITH_FUNCTION_CALL, | ||
}; | ||
const streamSpy = spyOn(StreamFunctions, 'processStream'); | ||
streamSpy.and.returnValue(expectedStreamResult); | ||
const streamSpy = spyOn(StreamFunctions, 'processNonStream'); | ||
streamSpy.and.returnValue(expectedResult); | ||
const response1 = await chatSessionWithFunctionCall.sendMessage(functionCallChatMessage); | ||
@@ -749,7 +700,6 @@ expect(response1).toEqual(expectedFunctionCallResponse); | ||
}; | ||
const expectedFollowUpStreamResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
const expectedFollowUpResult = { | ||
response: TEST_MODEL_RESPONSE, | ||
}; | ||
streamSpy.and.returnValue(expectedFollowUpStreamResult); | ||
streamSpy.and.returnValue(expectedFollowUpResult); | ||
const response2 = await chatSessionWithFunctionCall.sendMessage(TEST_FUNCTION_RESPONSE_PART); | ||
@@ -884,3 +834,3 @@ expect(response2).toEqual(expectedFollowUpResponse); | ||
beforeEach(() => { | ||
spyOnProperty(vertexai.preview, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
spyOnProperty(model, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
spyOn(global, 'fetch').and.throwError('error'); | ||
@@ -917,4 +867,4 @@ }); | ||
beforeEach(() => { | ||
spyOnProperty(vertexai.preview, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
spyOn(global, 'fetch').and.resolveTo(); | ||
spyOnProperty(model, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
}); | ||
@@ -969,3 +919,3 @@ it('generateContent should throw GoogleGenerativeAI error', async () => { | ||
beforeEach(() => { | ||
spyOnProperty(vertexai.preview, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
spyOnProperty(model, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
spyOn(global, 'fetch').and.resolveTo(response); | ||
@@ -1021,3 +971,3 @@ }); | ||
beforeEach(() => { | ||
spyOnProperty(vertexai.preview, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
spyOnProperty(model, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
spyOn(global, 'fetch').and.resolveTo(response); | ||
@@ -1024,0 +974,0 @@ }); |
# Changelog | ||
## [0.3.1](https://github.com/googleapis/nodejs-vertexai/compare/v0.3.0...v0.3.1) (2024-02-06) | ||
### Bug Fixes | ||
* decouple dependency between VertexAI_Preivew and GenerativeModel classes ([6762c99](https://github.com/googleapis/nodejs-vertexai/commit/6762c995bfa1bfdb740ed01a2eb4385126b0e36a)) | ||
* Switch NodeJS generateContent to call Unary API endpoint ([e4edb59](https://github.com/googleapis/nodejs-vertexai/commit/e4edb599863c23a896e263ba2639c80481a65543)) | ||
## [0.3.0](https://github.com/googleapis/nodejs-vertexai/compare/v0.2.1...v0.3.0) (2024-01-30) | ||
@@ -4,0 +12,0 @@ |
{ | ||
"name": "@google-cloud/vertexai", | ||
"description": "Vertex Generative AI client for Node.js", | ||
"version": "0.3.0", | ||
"version": "0.3.1", | ||
"license": "Apache-2.0", | ||
@@ -6,0 +6,0 @@ "author": "Google LLC", |
166
src/index.ts
@@ -105,21 +105,2 @@ /** | ||
/** | ||
* Get access token from GoogleAuth. Throws GoogleAuthError when fails. | ||
* @return {Promise<any>} Promise of token | ||
*/ | ||
get token(): Promise<any> { | ||
const credential_error_message = | ||
'\nUnable to authenticate your request\ | ||
\nDepending on your run time environment, you can get authentication by\ | ||
\n- if in local instance or cloud shell: `!gcloud auth login`\ | ||
\n- if in Colab:\ | ||
\n -`from google.colab import auth`\ | ||
\n -`auth.authenticate_user()`\ | ||
\n- if in service account or other: please follow guidance in https://cloud.google.com/docs/authentication'; | ||
const tokenPromise = this.googleAuth.getAccessToken().catch(e => { | ||
throw new GoogleAuthError(credential_error_message, e); | ||
}); | ||
return tokenPromise; | ||
} | ||
/** | ||
* @param {ModelParams} modelParams - {@link ModelParams} Parameters to specify the generative model. | ||
@@ -129,4 +110,13 @@ * @return {GenerativeModel} Instance of the GenerativeModel class. {@link GenerativeModel} | ||
getGenerativeModel(modelParams: ModelParams): GenerativeModel { | ||
const getGenerativeModelParams: GetGenerativeModelParams = { | ||
model: modelParams.model, | ||
project: this.project, | ||
location: this.location, | ||
googleAuth: this.googleAuth, | ||
apiEndpoint: this.apiEndpoint, | ||
safety_settings: modelParams.safety_settings, | ||
tools: modelParams.tools, | ||
}; | ||
if (modelParams.generation_config) { | ||
modelParams.generation_config = validateGenerationConfig( | ||
getGenerativeModelParams.generation_config = validateGenerationConfig( | ||
modelParams.generation_config | ||
@@ -136,9 +126,3 @@ ); | ||
return new GenerativeModel( | ||
this, | ||
modelParams.model, | ||
modelParams.generation_config, | ||
modelParams.safety_settings, | ||
modelParams.tools | ||
); | ||
return new GenerativeModel(getGenerativeModelParams); | ||
} | ||
@@ -206,3 +190,4 @@ | ||
export declare interface StartChatSessionRequest extends StartChatParams { | ||
_vertex_instance: VertexAI_Preview; | ||
project: string; | ||
location: string; | ||
_model_instance: GenerativeModel; | ||
@@ -212,2 +197,27 @@ } | ||
/** | ||
* @property {string} model - model name | ||
* @property {string} project - project The Google Cloud project to use for the request | ||
* @property {string} location - The Google Cloud project location to use for the request | ||
* @property {GoogleAuth} googleAuth - GoogleAuth class instance that handles authentication. | ||
* Details about GoogleAuth is referred to https://github.com/googleapis/google-auth-library-nodejs/blob/main/src/auth/googleauth.ts | ||
* @property {string} - [apiEndpoint] The base Vertex AI endpoint to use for the request. If | ||
* not provided, the default regionalized endpoint | ||
* (i.e. us-central1-aiplatform.googleapis.com) will be used. | ||
* @property {GenerationConfig} [generation_config] - {@link | ||
* GenerationConfig} | ||
* @property {SafetySetting[]} [safety_settings] - {@link SafetySetting} | ||
* @property {Tool[]} [tools] - {@link Tool} | ||
*/ | ||
export declare interface GetGenerativeModelParams extends ModelParams { | ||
model: string; | ||
project: string; | ||
location: string; | ||
googleAuth: GoogleAuth; | ||
apiEndpoint?: string; | ||
generation_config?: GenerationConfig; | ||
safety_settings?: SafetySetting[]; | ||
tools?: Tool[]; | ||
} | ||
/** | ||
* Chat session to make multi-turn send message request. | ||
@@ -222,3 +232,2 @@ * `sendMessage` method makes async call to get response of a chat message. | ||
private historyInternal: Content[]; | ||
private _vertex_instance: VertexAI_Preview; | ||
private _model_instance: GenerativeModel; | ||
@@ -239,7 +248,6 @@ private _send_stream_promise: Promise<void> = Promise.resolve(); | ||
constructor(request: StartChatSessionRequest) { | ||
this.project = request._vertex_instance.project; | ||
this.location = request._vertex_instance.location; | ||
this.project = request.project; | ||
this.location = request.location; | ||
this._model_instance = request._model_instance; | ||
this.historyInternal = request.history ?? []; | ||
this._vertex_instance = request._vertex_instance; | ||
this.generation_config = request.generation_config; | ||
@@ -273,3 +281,3 @@ this.safety_settings = request.safety_settings; | ||
}); | ||
const generateContentResponse = generateContentResult.response; | ||
const generateContentResponse = await generateContentResult.response; | ||
// Only push the latest message to history if the response returned a result | ||
@@ -358,27 +366,22 @@ if (generateContentResponse.candidates.length !== 0) { | ||
tools?: Tool[]; | ||
private _vertex_instance: VertexAI_Preview; | ||
private _use_non_stream = false; | ||
private project: string; | ||
private location: string; | ||
private googleAuth: GoogleAuth; | ||
private publisherModelEndpoint: string; | ||
private apiEndpoint?: string; | ||
/** | ||
* @constructor | ||
* @param {VertexAI_Preview} vertex_instance - {@link VertexAI_Preview} | ||
* @param {string} model - model name | ||
* @param {GenerationConfig} generation_config - Optional. {@link | ||
* GenerationConfig} | ||
* @param {SafetySetting[]} safety_settings - Optional. {@link SafetySetting} | ||
* @param {GetGenerativeModelParams} getGenerativeModelParams - {@link GetGenerativeModelParams} | ||
*/ | ||
constructor( | ||
vertex_instance: VertexAI_Preview, | ||
model: string, | ||
generation_config?: GenerationConfig, | ||
safety_settings?: SafetySetting[], | ||
tools?: Tool[] | ||
) { | ||
this._vertex_instance = vertex_instance; | ||
this.model = model; | ||
this.generation_config = generation_config; | ||
this.safety_settings = safety_settings; | ||
this.tools = tools; | ||
if (model.startsWith('models/')) { | ||
constructor(getGenerativeModelParams: GetGenerativeModelParams) { | ||
this.project = getGenerativeModelParams.project; | ||
this.location = getGenerativeModelParams.location; | ||
this.apiEndpoint = getGenerativeModelParams.apiEndpoint; | ||
this.googleAuth = getGenerativeModelParams.googleAuth; | ||
this.model = getGenerativeModelParams.model; | ||
this.generation_config = getGenerativeModelParams.generation_config; | ||
this.safety_settings = getGenerativeModelParams.safety_settings; | ||
this.tools = getGenerativeModelParams.tools; | ||
if (this.model.startsWith('models/')) { | ||
this.publisherModelEndpoint = `publishers/google/${this.model}`; | ||
@@ -391,2 +394,21 @@ } else { | ||
/** | ||
* Get access token from GoogleAuth. Throws GoogleAuthError when fails. | ||
* @return {Promise<any>} Promise of token | ||
*/ | ||
get token(): Promise<any> { | ||
const credential_error_message = | ||
'\nUnable to authenticate your request\ | ||
\nDepending on your run time environment, you can get authentication by\ | ||
\n- if in local instance or cloud shell: `!gcloud auth login`\ | ||
\n- if in Colab:\ | ||
\n -`from google.colab import auth`\ | ||
\n -`auth.authenticate_user()`\ | ||
\n- if in service account or other: please follow guidance in https://cloud.google.com/docs/authentication'; | ||
const tokenPromise = this.googleAuth.getAccessToken().catch(e => { | ||
throw new GoogleAuthError(credential_error_message, e); | ||
}); | ||
return tokenPromise; | ||
} | ||
/** | ||
* Make a async call to generate content. | ||
@@ -413,13 +435,2 @@ * @param request A GenerateContentRequest object with the request contents. | ||
if (!this._use_non_stream) { | ||
const streamGenerateContentResult: StreamGenerateContentResult = | ||
await this.generateContentStream(request).catch(e => { | ||
throw e; | ||
}); | ||
const result: GenerateContentResult = { | ||
response: await streamGenerateContentResult.response, | ||
}; | ||
return Promise.resolve(result); | ||
} | ||
const generateContentRequest: GenerateContentRequest = { | ||
@@ -433,9 +444,9 @@ contents: request.contents, | ||
const response: Response | undefined = await postRequest({ | ||
region: this._vertex_instance.location, | ||
project: this._vertex_instance.project, | ||
region: this.location, | ||
project: this.project, | ||
resourcePath: this.publisherModelEndpoint, | ||
resourceMethod: constants.GENERATE_CONTENT_METHOD, | ||
token: await this._vertex_instance.token, | ||
token: await this.token, | ||
data: generateContentRequest, | ||
apiEndpoint: this._vertex_instance.apiEndpoint, | ||
apiEndpoint: this.apiEndpoint, | ||
}).catch(e => { | ||
@@ -477,9 +488,9 @@ throw new GoogleGenerativeAIError('exception posting request', e); | ||
const response = await postRequest({ | ||
region: this._vertex_instance.location, | ||
project: this._vertex_instance.project, | ||
region: this.location, | ||
project: this.project, | ||
resourcePath: this.publisherModelEndpoint, | ||
resourceMethod: constants.STREAMING_GENERATE_CONTENT_METHOD, | ||
token: await this._vertex_instance.token, | ||
token: await this.token, | ||
data: generateContentRequest, | ||
apiEndpoint: this._vertex_instance.apiEndpoint, | ||
apiEndpoint: this.apiEndpoint, | ||
}).catch(e => { | ||
@@ -500,9 +511,9 @@ throw new GoogleGenerativeAIError('exception posting request', e); | ||
const response = await postRequest({ | ||
region: this._vertex_instance.location, | ||
project: this._vertex_instance.project, | ||
region: this.location, | ||
project: this.project, | ||
resourcePath: this.publisherModelEndpoint, | ||
resourceMethod: 'countTokens', | ||
token: await this._vertex_instance.token, | ||
token: await this.token, | ||
data: request, | ||
apiEndpoint: this._vertex_instance.apiEndpoint, | ||
apiEndpoint: this.apiEndpoint, | ||
}).catch(e => { | ||
@@ -524,3 +535,4 @@ throw new GoogleGenerativeAIError('exception posting request', e); | ||
const startChatRequest: StartChatSessionRequest = { | ||
_vertex_instance: this._vertex_instance, | ||
project: this.project, | ||
location: this.location, | ||
_model_instance: this, | ||
@@ -527,0 +539,0 @@ }; |
@@ -23,4 +23,4 @@ /** | ||
const USER_AGENT_PRODUCT = 'model-builder'; | ||
const CLIENT_LIBRARY_VERSION = '0.3.0'; // x-release-please-version | ||
const CLIENT_LIBRARY_VERSION = '0.3.1'; // x-release-please-version | ||
const CLIENT_LIBRARY_LANGUAGE = `grpc-node/${CLIENT_LIBRARY_VERSION}`; | ||
export const USER_AGENT = `${USER_AGENT_PRODUCT}/${CLIENT_LIBRARY_VERSION} ${CLIENT_LIBRARY_LANGUAGE}`; |
@@ -258,4 +258,4 @@ /** | ||
}); | ||
spyOnProperty(vertexai.preview, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
model = vertexai.preview.getGenerativeModel({model: 'gemini-pro'}); | ||
spyOnProperty(model, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
expectedStreamResult = { | ||
@@ -348,4 +348,4 @@ response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue( | ||
expectedStreamResult | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue( | ||
expectedResult | ||
); | ||
@@ -359,4 +359,4 @@ const resp = await model.generateContent(req); | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue( | ||
expectedStreamResult | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue( | ||
expectedResult | ||
); | ||
@@ -374,8 +374,4 @@ const resp = await model.generateContent(TEST_CHAT_MESSSAGE_TEXT); | ||
}; | ||
const expectedStreamResult: StreamGenerateContentResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue( | ||
expectedStreamResult | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue( | ||
expectedResult | ||
); | ||
@@ -404,8 +400,4 @@ const resp = await model.generateContent(req); | ||
}; | ||
const expectedStreamResult: StreamGenerateContentResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue( | ||
expectedStreamResult | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue( | ||
expectedResult | ||
); | ||
@@ -422,9 +414,6 @@ const resp = await model.generateContent(req); | ||
}); | ||
spyOnProperty(vertexaiWithBasePath.preview, 'token', 'get').and.resolveTo( | ||
TEST_TOKEN | ||
); | ||
model = vertexaiWithBasePath.preview.getGenerativeModel({ | ||
model: 'gemini-pro', | ||
}); | ||
spyOnProperty(model, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
const req: GenerateContentRequest = { | ||
@@ -436,8 +425,4 @@ contents: TEST_USER_CHAT_MESSAGE, | ||
}; | ||
const expectedStreamResult: StreamGenerateContentResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue( | ||
expectedStreamResult | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue( | ||
expectedResult | ||
); | ||
@@ -455,11 +440,6 @@ await model.generateContent(req); | ||
}); | ||
spyOnProperty( | ||
vertexaiWithoutBasePath.preview, | ||
'token', | ||
'get' | ||
).and.resolveTo(TEST_TOKEN); | ||
model = vertexaiWithoutBasePath.preview.getGenerativeModel({ | ||
model: 'gemini-pro', | ||
}); | ||
spyOnProperty(model, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
const req: GenerateContentRequest = { | ||
@@ -471,8 +451,4 @@ contents: TEST_USER_CHAT_MESSAGE, | ||
}; | ||
const expectedStreamResult: StreamGenerateContentResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue( | ||
expectedStreamResult | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue( | ||
expectedResult | ||
); | ||
@@ -494,8 +470,4 @@ await model.generateContent(req); | ||
}; | ||
const expectedStreamResult: StreamGenerateContentResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue( | ||
expectedStreamResult | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue( | ||
expectedResult | ||
); | ||
@@ -518,8 +490,4 @@ await model.generateContent(reqWithEmptyConfigs); | ||
}; | ||
const expectedStreamResult: StreamGenerateContentResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue( | ||
expectedStreamResult | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue( | ||
expectedResult | ||
); | ||
@@ -540,8 +508,4 @@ await model.generateContent(reqWithEmptyConfigs); | ||
}; | ||
const expectedStreamResult: StreamGenerateContentResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue( | ||
expectedStreamResult | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue( | ||
expectedResult | ||
); | ||
@@ -567,8 +531,4 @@ const resp = await model.generateContent(req); | ||
}; | ||
const expectedStreamResult: StreamGenerateContentResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE_WITH_FUNCTION_CALL), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue( | ||
expectedStreamResult | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue( | ||
expectedResult | ||
); | ||
@@ -739,4 +699,4 @@ const resp = await model.generateContent(req); | ||
}); | ||
spyOnProperty(vertexai.preview, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
const model = vertexai.preview.getGenerativeModel({model: 'gemini-pro'}); | ||
spyOnProperty(model, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
const req: CountTokensRequest = { | ||
@@ -769,3 +729,2 @@ contents: TEST_USER_CHAT_MESSAGE, | ||
vertexai = new VertexAI({project: PROJECT, location: LOCATION}); | ||
spyOnProperty(vertexai.preview, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
model = vertexai.preview.getGenerativeModel({model: 'gemini-pro'}); | ||
@@ -789,2 +748,3 @@ chatSession = model.startChat({ | ||
spyOn(global, 'fetch').and.returnValue(fetchResult); | ||
spyOnProperty(model, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
}); | ||
@@ -798,8 +758,4 @@ | ||
}; | ||
const expectedStreamResult: StreamGenerateContentResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue( | ||
expectedStreamResult | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue( | ||
expectedResult | ||
); | ||
@@ -816,8 +772,4 @@ const resp = await chatSession.sendMessage(req); | ||
}; | ||
const expectedStreamResult: StreamGenerateContentResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue( | ||
expectedStreamResult | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue( | ||
expectedResult | ||
); | ||
@@ -834,8 +786,4 @@ const resp = await chatSessionWithNoArgs.sendMessage(req); | ||
}; | ||
const expectedStreamResult: StreamGenerateContentResult = { | ||
response: Promise.resolve(TEST_EMPTY_MODEL_RESPONSE), | ||
stream: testGeneratorWithEmptyResponse(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue( | ||
expectedStreamResult | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue( | ||
expectedResult | ||
); | ||
@@ -852,8 +800,4 @@ await expectAsync( | ||
}; | ||
const expectedStreamResult: StreamGenerateContentResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue( | ||
expectedStreamResult | ||
spyOn(StreamFunctions, 'processNonStream').and.returnValue( | ||
expectedResult | ||
); | ||
@@ -870,10 +814,9 @@ const resp = await chatSessionWithNoArgs.sendMessage(req); | ||
}; | ||
const expectedStreamResult: StreamGenerateContentResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE_WITH_FUNCTION_CALL), | ||
stream: testGenerator(), | ||
const expectedResult: GenerateContentResult = { | ||
response: TEST_MODEL_RESPONSE_WITH_FUNCTION_CALL, | ||
}; | ||
const streamSpy = spyOn(StreamFunctions, 'processStream'); | ||
const streamSpy = spyOn(StreamFunctions, 'processNonStream'); | ||
streamSpy.and.returnValue(expectedStreamResult); | ||
streamSpy.and.returnValue(expectedResult); | ||
const response1 = await chatSessionWithFunctionCall.sendMessage( | ||
@@ -889,7 +832,6 @@ functionCallChatMessage | ||
}; | ||
const expectedFollowUpStreamResult: StreamGenerateContentResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
const expectedFollowUpResult: GenerateContentResult = { | ||
response: TEST_MODEL_RESPONSE, | ||
}; | ||
streamSpy.and.returnValue(expectedFollowUpStreamResult); | ||
streamSpy.and.returnValue(expectedFollowUpResult); | ||
const response2 = await chatSessionWithFunctionCall.sendMessage( | ||
@@ -1045,3 +987,3 @@ TEST_FUNCTION_RESPONSE_PART | ||
beforeEach(() => { | ||
spyOnProperty(vertexai.preview, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
spyOnProperty(model, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
spyOn(global, 'fetch').and.throwError('error'); | ||
@@ -1084,4 +1026,4 @@ }); | ||
beforeEach(() => { | ||
spyOnProperty(vertexai.preview, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
spyOn(global, 'fetch').and.resolveTo(); | ||
spyOnProperty(model, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
}); | ||
@@ -1142,3 +1084,3 @@ | ||
beforeEach(() => { | ||
spyOnProperty(vertexai.preview, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
spyOnProperty(model, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
spyOn(global, 'fetch').and.resolveTo(response); | ||
@@ -1200,3 +1142,3 @@ }); | ||
beforeEach(() => { | ||
spyOnProperty(vertexai.preview, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
spyOnProperty(model, 'token', 'get').and.resolveTo(TEST_TOKEN); | ||
spyOn(global, 'fetch').and.resolveTo(response); | ||
@@ -1203,0 +1145,0 @@ }); |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
416902
104
7187
2