@google-cloud/vertexai
Advanced tools
Comparing version 0.2.0 to 0.2.1
{ | ||
".": "0.2.0" | ||
".": "0.2.1" | ||
} |
@@ -67,2 +67,5 @@ /** | ||
* Params to initiate a multiturn chat with the model via startChat | ||
* @property {Content[]} - [history] history of the chat session. {@link Content} | ||
* @property {SafetySetting[]} - [safety_settings] Array of {@link SafetySetting} | ||
* @property {GenerationConfig} - [generation_config] {@link GenerationConfig} | ||
*/ | ||
@@ -73,8 +76,7 @@ export declare interface StartChatParams { | ||
generation_config?: GenerationConfig; | ||
stream?: boolean; | ||
} | ||
/** | ||
* All params passed to initiate multiturn chat via startChat | ||
* @see VertexAI_Preview for details on _vertex_instance parameter | ||
* @see GenerativeModel for details on _model_instance parameter | ||
* @property {VertexAI_Preview} - _vertex_instance {@link VertexAI_Preview} | ||
* @property {GenerativeModel} - _model_instance {@link GenerativeModel} | ||
*/ | ||
@@ -89,3 +91,2 @@ export declare interface StartChatSessionRequest extends StartChatParams { | ||
* `sendMessageStream` method makes async call to stream response of a chat message. | ||
* @param {StartChatSessionRequest} request - {@link StartChatSessionRequest} | ||
*/ | ||
@@ -102,2 +103,6 @@ export declare class ChatSession { | ||
get history(): Content[]; | ||
/** | ||
* @constructor | ||
* @param {StartChatSessionRequest} request - {@link StartChatSessionRequest} | ||
*/ | ||
constructor(request: StartChatSessionRequest); | ||
@@ -144,3 +149,3 @@ /** | ||
*/ | ||
generateContent(request: GenerateContentRequest): Promise<GenerateContentResult>; | ||
generateContent(request: GenerateContentRequest | string): Promise<GenerateContentResult>; | ||
/** | ||
@@ -151,3 +156,3 @@ * Make an async stream request to generate content. The response will be returned in stream. | ||
*/ | ||
generateContentStream(request: GenerateContentRequest): Promise<StreamGenerateContentResult>; | ||
generateContentStream(request: GenerateContentRequest | string): Promise<StreamGenerateContentResult>; | ||
/** | ||
@@ -154,0 +159,0 @@ * Make a async request to count tokens. |
@@ -133,3 +133,2 @@ "use strict"; | ||
* `sendMessageStream` method makes async call to stream response of a chat message. | ||
* @param {StartChatSessionRequest} request - {@link StartChatSessionRequest} | ||
*/ | ||
@@ -140,2 +139,6 @@ class ChatSession { | ||
} | ||
/** | ||
* @constructor | ||
* @param {StartChatSessionRequest} request - {@link StartChatSessionRequest} | ||
*/ | ||
constructor(request) { | ||
@@ -258,2 +261,3 @@ var _a; | ||
var _a, _b; | ||
request = formatContentRequest(request, this.generation_config, this.safety_settings); | ||
validateGcsInput(request.contents); | ||
@@ -299,2 +303,3 @@ if (request.generation_config) { | ||
var _a, _b; | ||
request = formatContentRequest(request, this.generation_config, this.safety_settings); | ||
validateGcsInput(request.contents); | ||
@@ -404,2 +409,3 @@ if (request.generation_config) { | ||
if ('file_data' in part) { | ||
// @ts-ignore | ||
const uri = part['file_data']['file_uri']; | ||
@@ -421,2 +427,14 @@ if (!uri.startsWith('gs://')) { | ||
} | ||
function formatContentRequest(request, generation_config, safety_settings) { | ||
if (typeof request === 'string') { | ||
return { | ||
contents: [{ role: util_1.constants.USER_ROLE, parts: [{ text: request }] }], | ||
generation_config: generation_config, | ||
safety_settings: safety_settings, | ||
}; | ||
} | ||
else { | ||
return request; | ||
} | ||
} | ||
//# sourceMappingURL=index.js.map |
@@ -34,3 +34,4 @@ /** | ||
/** | ||
* Params used by the generateContent endpoint | ||
* Params used to call the generateContent method. | ||
* @property {Content[]} - contents. Array of {@link Content} | ||
*/ | ||
@@ -41,3 +42,4 @@ export declare interface GenerateContentRequest extends BaseModelParams { | ||
/** | ||
* Params used to call countTokens | ||
* Params used to call the countTokens method. | ||
* @property {Content[]} - contents. Array of {@link Content} | ||
*/ | ||
@@ -48,3 +50,6 @@ export declare interface CountTokensRequest { | ||
/** | ||
* Response returned from countTokens | ||
* Response returned from countTokens method. | ||
* @property {number} - totalTokens. The total number of tokens counted across all instances from the request. | ||
* @property {number} - [totalBillableCharacters]. The total number of billable characters counted across all instances from the request. | ||
* | ||
*/ | ||
@@ -57,3 +62,3 @@ export declare interface CountTokensResponse { | ||
* Configuration for initializing a model, for example via getGenerativeModel | ||
* @param {string} model - model name. | ||
* @property {string} model - model name. | ||
* @example "gemini-pro" | ||
@@ -65,3 +70,5 @@ */ | ||
/** | ||
* Base params for initializing a model or calling GenerateContent | ||
* Base params for initializing a model or calling GenerateContent. | ||
* @property {SafetySetting[]} - [safety_settings] Array of {@link SafetySetting} | ||
* @property {GenerationConfig} - [generation_config] {@link GenerationConfig} | ||
*/ | ||
@@ -73,3 +80,5 @@ export declare interface BaseModelParams { | ||
/** | ||
* Safety feedback for an entire request | ||
* Safety feedback for an entire request. | ||
* @property {HarmCategory} - category. {@link HarmCategory} | ||
* @property {HarmBlockThreshold} - threshold. {@link HarmBlockThreshold} | ||
*/ | ||
@@ -82,2 +91,8 @@ export declare interface SafetySetting { | ||
* Configuration options for model generation and outputs | ||
* @property {number} - [candidate_count] Number of candidates to generate. | ||
* @property {string[]} - [stop_sequences] Stop sequences. | ||
* @property {number} - [max_output_tokens] The maximum number of output tokens to generate per message. | ||
* @property {number} - [temperature] Controls the randomness of predictions. | ||
* @property {number} - [top_p] If specified, nucleus sampling will be used. | ||
* @property {number} - [top_k] If specified, top-k sampling will be used. | ||
*/ | ||
@@ -93,3 +108,16 @@ export declare interface GenerationConfig { | ||
/** | ||
* Harm categories that would cause prompts or candidates to be blocked. | ||
* @enum {string} | ||
* Harm categories that will block the content. | ||
* Values: | ||
* HARM_CATEGORY_UNSPECIFIED: | ||
* The harm category is unspecified. | ||
* HARM_CATEGORY_HATE_SPEECH: | ||
* The harm category is hate speech. | ||
* HARM_CATEGORY_DANGEROUS_CONTENT: | ||
* The harm category is dangerous content. | ||
* HARM_CATEGORY_HARASSMENT: | ||
* The harm category is harassment. | ||
* HARM_CATEGORY_SEXUALLY_EXPLICIT: | ||
* The harm category is sexually explicit | ||
* content. | ||
*/ | ||
@@ -104,3 +132,16 @@ export declare enum HarmCategory { | ||
/** | ||
* Threshold above which a prompt or candidate will be blocked. | ||
* @enum {string} | ||
* Probability based thresholds levels for blocking. | ||
* Values: | ||
* HARM_BLOCK_THRESHOLD_UNSPECIFIED: | ||
* Unspecified harm block threshold. | ||
* BLOCK_LOW_AND_ABOVE: | ||
* Block low threshold and above (i.e. block | ||
* more). | ||
* BLOCK_MEDIUM_AND_ABOVE: | ||
* Block medium threshold and above. | ||
* BLOCK_ONLY_HIGH: | ||
* Block only high threshold (i.e. block less). | ||
* BLOCK_NONE: | ||
* Block none. | ||
*/ | ||
@@ -115,3 +156,15 @@ export declare enum HarmBlockThreshold { | ||
/** | ||
* Probability that a prompt or candidate matches a harm category. | ||
* @enum {string} | ||
* Harm probability levels in the content. | ||
* Values: | ||
* HARM_PROBABILITY_UNSPECIFIED: | ||
* Harm probability unspecified. | ||
* NEGLIGIBLE: | ||
* Negligible level of harm. | ||
* LOW: | ||
* Low level of harm. | ||
* MEDIUM: | ||
* Medium level of harm. | ||
* HIGH: | ||
* High level of harm. | ||
*/ | ||
@@ -126,3 +179,5 @@ export declare enum HarmProbability { | ||
/** | ||
* Safety rating for a piece of content | ||
* Safety rating corresponding to the generated content. | ||
* @property {HarmCategory} - category. {@link HarmCategory} | ||
* @property {HarmProbability} - probability. {@link HarmProbability} | ||
*/ | ||
@@ -134,3 +189,6 @@ export declare interface SafetyRating { | ||
/** | ||
* A single turn in a conversation with the model | ||
* The base structured datatype containing multi-part content of a message. | ||
* @property {Part[]} - parts. Array of {@link Part} | ||
* @property {string} - [role]. The producer of the content. Must be either 'user' or 'model'. | ||
Useful to set for multi-turn conversations, otherwise can be left blank or unset. | ||
*/ | ||
@@ -143,15 +201,37 @@ export declare interface Content { | ||
* A part of a turn in a conversation with the model with a fixed MIME type. | ||
* | ||
* Exactly one of text or inline_data must be provided. | ||
* It has one of the following mutually exclusive fields: | ||
* 1. text | ||
* 2. inline_data | ||
* 3. file_data | ||
*/ | ||
export interface BasePart { | ||
} | ||
/** | ||
* A text part of a conversation with the model. | ||
* @property {string} - text. Only this propery is expected for TextPart. | ||
* @property {never} - [inline_data]. inline_data is not expected for TextPart. | ||
* @property {never} - [file_data]. file_data is not expected for TextPart. | ||
* | ||
*/ | ||
export interface TextPart extends BasePart { | ||
text: string; | ||
inline_data?: never; | ||
file_data?: never; | ||
} | ||
/** | ||
* An inline data part of a conversation with the model. | ||
* @property {never} - [text]. text is not expected for InlineDataPart. | ||
* @property {GenerativeContentBlob} - inline_data. Only this property is expected for InlineDataPart. {@link GenerativeContentBlob} | ||
* @property {never} - [file_data]. file_data is not expected for InlineDataPart. | ||
*/ | ||
export interface InlineDataPart extends BasePart { | ||
text?: never; | ||
inline_data: GenerativeContentBlob; | ||
file_data?: never; | ||
} | ||
/** | ||
* URI based data. | ||
* @property {string} - mime_type. The IANA standard MIME type of the source data. | ||
* @property {string} - file_uri. URI of the file. | ||
*/ | ||
export interface FileData { | ||
@@ -161,6 +241,21 @@ mime_type: string; | ||
} | ||
/** | ||
* A file data part of a conversation with the model. | ||
* @property {never} - [text]. text is not expected for FileDataPart. | ||
* @property {never} - [inline_data]. inline_data is not expected for FileDataPart. | ||
* @property {FileData} - file_data. Only this property is expected for FileDataPart. {@link FileData} | ||
*/ | ||
export interface FileDataPart extends BasePart { | ||
text?: never; | ||
inline_data?: never; | ||
file_data: FileData; | ||
} | ||
/** | ||
* A datatype containing media that is part of a multi-part {@link Content} message. | ||
* A `Part` is a union type of {@link TextPart}, {@link InlineDataPart} and {@link FileDataPart} | ||
* A `Part` has one of the following mutually exclusive fields: | ||
* 1. text | ||
* 2. inline_data | ||
* 3. file_data | ||
*/ | ||
export declare type Part = TextPart | InlineDataPart | FileDataPart; | ||
@@ -170,2 +265,4 @@ /** | ||
* raw bytes. | ||
* @property {string} - mime_type. The MIME type of the source data. The only accepted values: "image/png" or "image/jpeg". | ||
* @property {string} - data. data must be base64 string | ||
*/ | ||
@@ -177,3 +274,6 @@ export declare interface GenerativeContentBlob { | ||
/** | ||
* Metadata on token count for the request | ||
* Usage metadata about response(s). | ||
* @property {number} - [prompt_token_count]. Number of tokens in the request. | ||
* @property {number} - [candidates_token_count]. Number of tokens in the response(s). | ||
* @property {number} - [totalTokenCount]. Total number of tokens. | ||
*/ | ||
@@ -186,4 +286,6 @@ export declare interface UsageMetadata { | ||
/** | ||
* A set of the feedback metadata the prompt specified in | ||
* GenerateContentRequest.content. | ||
* Content filter results for a prompt sent in the request. | ||
* @property {BlockedReason} - block_reason. {@link BlockReason} | ||
* @property {SafetyRating[]} - safety_ratings. Array of {@link SafetyRating} | ||
* @property {string} - block_reason_message. A readable block reason message. | ||
*/ | ||
@@ -195,2 +297,13 @@ export declare interface PromptFeedback { | ||
} | ||
/** | ||
* @enum {string} | ||
* The reason why the reponse is blocked. | ||
* Values: | ||
* BLOCKED_REASON_UNSPECIFIED | ||
* Unspecified blocked reason. | ||
* SAFETY | ||
* Candidates blocked due to safety. | ||
* OTHER | ||
* Candidates blocked due to other reason. | ||
*/ | ||
export declare enum BlockedReason { | ||
@@ -201,2 +314,27 @@ BLOCKED_REASON_UNSPECIFIED = "BLOCK_REASON_UNSPECIFIED", | ||
} | ||
/** | ||
* @enum {string} | ||
* The reason why the model stopped generating tokens. | ||
* If empty, the model has not stopped generating the tokens. | ||
* Values: | ||
* FINISH_REASON_UNSPECIFIED | ||
* The finish reason is unspecified. | ||
* STOP: | ||
* Natural stop point of the model or provided | ||
* stop sequence. | ||
* MAX_TOKENS: | ||
* The maximum number of tokens as specified in | ||
* the request was reached. | ||
* SAFETY: | ||
* The token generation was stopped as the | ||
* response was flagged for safety reasons. NOTE: | ||
* When streaming the Candidate.content will be | ||
* empty if content filters blocked the output. | ||
* RECITATION: | ||
* The token generation was stopped as the | ||
* response was flagged for unauthorized citations. | ||
* OTHER: | ||
* All other reasons that stopped the token | ||
* generation | ||
*/ | ||
export declare enum FinishReason { | ||
@@ -212,3 +350,3 @@ FINISH_REASON_UNSPECIFIED = "FINISH_REASON_UNSPECIFIED", | ||
* Wrapper for respones from a generateContent request | ||
* @see GenerateContentResponse | ||
* @property {GenerateContentResponse} - response. All GenerateContentResponses received so far {@link GenerateContentResponse} | ||
*/ | ||
@@ -219,4 +357,5 @@ export declare interface GenerateContentResult { | ||
/** | ||
* Wrapper for respones from a streamGenerateContent request | ||
* @see GenerateContentResponse | ||
* Wrapper for respones from a generateContent method when `steam` parameter is `true` | ||
* @property {Promise<GenerateContentResponse>} - response. Promise of {@link GenerateContentResponse} | ||
* @property {AsyncGenerator<GenerateContentResponse>} - stream. Async iterable that provides one {@link GenerateContentResponse} at a time | ||
*/ | ||
@@ -229,2 +368,5 @@ export declare interface StreamGenerateContentResult { | ||
* Response from the model supporting multiple candidates | ||
* @property {GenerateContentCandidate} - candidates. {@link GenerateContentCandidate} | ||
* @property {PromptFeedback} - [promptFeedback]. This is only populated if there are no candidates due to a safety block {@link PromptFeedback} | ||
* @property {UsageMetadata} - [usageMetadata]. {@link UsageMetadata} | ||
*/ | ||
@@ -237,3 +379,9 @@ export declare interface GenerateContentResponse { | ||
/** | ||
* Content candidate from the model | ||
* A response candidate generated from the model. | ||
* @property {Content} - content. {@link Content} | ||
* @property {number} - [index]. The index of the candidate in the {@link GenerateContentResponse} | ||
* @property {FinishReason} - [finishReason]. {@link FinishReason} | ||
* @property {string} - [finishMessage]. | ||
* @property {SafetyRating[]} - [safetyRatings]. Array of {@link SafetyRating} | ||
* @property {CitationMetadata} - [citationMetadata]. {@link CitationMetadata} | ||
*/ | ||
@@ -249,3 +397,4 @@ export declare interface GenerateContentCandidate { | ||
/** | ||
* Citation information for model-generated canadidate. | ||
* A collection of source attributions for a piece of content. | ||
* @property {CitationSource[]} - citationSources. Array of {@link CitationSource} | ||
*/ | ||
@@ -256,3 +405,7 @@ export declare interface CitationMetadata { | ||
/** | ||
* Citations to sources for a specific response | ||
* Source attributions for content. | ||
* @property {number} - [startIndex] Start index into the content. | ||
* @property {number} - [endIndex] End index into the content. | ||
* @property {string} - [url] Url reference of the attribution. | ||
* @property {string} - [license] License of the attribution. | ||
*/ | ||
@@ -259,0 +412,0 @@ export declare interface CitationSource { |
@@ -21,3 +21,16 @@ "use strict"; | ||
/** | ||
* Harm categories that would cause prompts or candidates to be blocked. | ||
* @enum {string} | ||
* Harm categories that will block the content. | ||
* Values: | ||
* HARM_CATEGORY_UNSPECIFIED: | ||
* The harm category is unspecified. | ||
* HARM_CATEGORY_HATE_SPEECH: | ||
* The harm category is hate speech. | ||
* HARM_CATEGORY_DANGEROUS_CONTENT: | ||
* The harm category is dangerous content. | ||
* HARM_CATEGORY_HARASSMENT: | ||
* The harm category is harassment. | ||
* HARM_CATEGORY_SEXUALLY_EXPLICIT: | ||
* The harm category is sexually explicit | ||
* content. | ||
*/ | ||
@@ -33,57 +46,99 @@ var HarmCategory; | ||
/** | ||
* Threshold above which a prompt or candidate will be blocked. | ||
* @enum {string} | ||
* Probability based thresholds levels for blocking. | ||
* Values: | ||
* HARM_BLOCK_THRESHOLD_UNSPECIFIED: | ||
* Unspecified harm block threshold. | ||
* BLOCK_LOW_AND_ABOVE: | ||
* Block low threshold and above (i.e. block | ||
* more). | ||
* BLOCK_MEDIUM_AND_ABOVE: | ||
* Block medium threshold and above. | ||
* BLOCK_ONLY_HIGH: | ||
* Block only high threshold (i.e. block less). | ||
* BLOCK_NONE: | ||
* Block none. | ||
*/ | ||
var HarmBlockThreshold; | ||
(function (HarmBlockThreshold) { | ||
// Unspecified harm block threshold. | ||
HarmBlockThreshold["HARM_BLOCK_THRESHOLD_UNSPECIFIED"] = "HARM_BLOCK_THRESHOLD_UNSPECIFIED"; | ||
// Block low threshold and above (i.e. block more). | ||
HarmBlockThreshold["BLOCK_LOW_AND_ABOVE"] = "BLOCK_LOW_AND_ABOVE"; | ||
// Block medium threshold and above. | ||
HarmBlockThreshold["BLOCK_MEDIUM_AND_ABOVE"] = "BLOCK_MEDIUM_AND_ABOVE"; | ||
// Block only high threshold (i.e. block less). | ||
HarmBlockThreshold["BLOCK_ONLY_HIGH"] = "BLOCK_ONLY_HIGH"; | ||
// Block none. | ||
HarmBlockThreshold["BLOCK_NONE"] = "BLOCK_NONE"; | ||
})(HarmBlockThreshold || (exports.HarmBlockThreshold = HarmBlockThreshold = {})); | ||
/** | ||
* Probability that a prompt or candidate matches a harm category. | ||
* @enum {string} | ||
* Harm probability levels in the content. | ||
* Values: | ||
* HARM_PROBABILITY_UNSPECIFIED: | ||
* Harm probability unspecified. | ||
* NEGLIGIBLE: | ||
* Negligible level of harm. | ||
* LOW: | ||
* Low level of harm. | ||
* MEDIUM: | ||
* Medium level of harm. | ||
* HIGH: | ||
* High level of harm. | ||
*/ | ||
var HarmProbability; | ||
(function (HarmProbability) { | ||
// Probability is unspecified. | ||
HarmProbability["HARM_PROBABILITY_UNSPECIFIED"] = "HARM_PROBABILITY_UNSPECIFIED"; | ||
// Content has a negligible chance of being unsafe. | ||
HarmProbability["NEGLIGIBLE"] = "NEGLIGIBLE"; | ||
// Content has a low chance of being unsafe. | ||
HarmProbability["LOW"] = "LOW"; | ||
// Content has a medium chance of being unsafe. | ||
HarmProbability["MEDIUM"] = "MEDIUM"; | ||
// Content has a high chance of being unsafe. | ||
HarmProbability["HIGH"] = "HIGH"; | ||
})(HarmProbability || (exports.HarmProbability = HarmProbability = {})); | ||
/** | ||
* @enum {string} | ||
* The reason why the reponse is blocked. | ||
* Values: | ||
* BLOCKED_REASON_UNSPECIFIED | ||
* Unspecified blocked reason. | ||
* SAFETY | ||
* Candidates blocked due to safety. | ||
* OTHER | ||
* Candidates blocked due to other reason. | ||
*/ | ||
var BlockedReason; | ||
(function (BlockedReason) { | ||
// A blocked reason was not specified. | ||
BlockedReason["BLOCKED_REASON_UNSPECIFIED"] = "BLOCK_REASON_UNSPECIFIED"; | ||
// Content was blocked by safety settings. | ||
BlockedReason["SAFETY"] = "SAFETY"; | ||
// Content was blocked, but the reason is uncategorized. | ||
BlockedReason["OTHER"] = "OTHER"; | ||
})(BlockedReason || (exports.BlockedReason = BlockedReason = {})); | ||
/** | ||
* @enum {string} | ||
* The reason why the model stopped generating tokens. | ||
* If empty, the model has not stopped generating the tokens. | ||
* Values: | ||
* FINISH_REASON_UNSPECIFIED | ||
* The finish reason is unspecified. | ||
* STOP: | ||
* Natural stop point of the model or provided | ||
* stop sequence. | ||
* MAX_TOKENS: | ||
* The maximum number of tokens as specified in | ||
* the request was reached. | ||
* SAFETY: | ||
* The token generation was stopped as the | ||
* response was flagged for safety reasons. NOTE: | ||
* When streaming the Candidate.content will be | ||
* empty if content filters blocked the output. | ||
* RECITATION: | ||
* The token generation was stopped as the | ||
* response was flagged for unauthorized citations. | ||
* OTHER: | ||
* All other reasons that stopped the token | ||
* generation | ||
*/ | ||
var FinishReason; | ||
(function (FinishReason) { | ||
// Default value. This value is unused. | ||
FinishReason["FINISH_REASON_UNSPECIFIED"] = "FINISH_REASON_UNSPECIFIED"; | ||
// Natural stop point of the model or provided stop sequence. | ||
FinishReason["STOP"] = "STOP"; | ||
// The maximum number of tokens as specified in the request was reached. | ||
FinishReason["MAX_TOKENS"] = "MAX_TOKENS"; | ||
// The candidate content was flagged for safety reasons. | ||
FinishReason["SAFETY"] = "SAFETY"; | ||
// The candidate content was flagged for recitation reasons. | ||
FinishReason["RECITATION"] = "RECITATION"; | ||
// Unknown reason. | ||
FinishReason["OTHER"] = "OTHER"; | ||
})(FinishReason || (exports.FinishReason = FinishReason = {})); | ||
//# sourceMappingURL=content.js.map |
@@ -21,2 +21,2 @@ /** | ||
export declare const MODEL_ROLE = "model"; | ||
export declare const USER_AGENT = "model-builder/0.2.0 grpc-node/0.2.0"; | ||
export declare const USER_AGENT = "model-builder/0.2.1 grpc-node/0.2.1"; |
@@ -25,5 +25,5 @@ "use strict"; | ||
const USER_AGENT_PRODUCT = 'model-builder'; | ||
const CLIENT_LIBRARY_VERSION = '0.2.0'; | ||
const CLIENT_LIBRARY_VERSION = '0.2.1'; // x-release-please-version | ||
const CLIENT_LIBRARY_LANGUAGE = `grpc-node/${CLIENT_LIBRARY_VERSION}`; | ||
exports.USER_AGENT = `${USER_AGENT_PRODUCT}/${CLIENT_LIBRARY_VERSION} ${CLIENT_LIBRARY_LANGUAGE}`; | ||
//# sourceMappingURL=constants.js.map |
@@ -176,9 +176,7 @@ "use strict"; | ||
const chat = textModelNoOutputLimit.startChat({}); | ||
const chatInput1 = 'Tell me a story in 1000 words'; | ||
const chatInput1 = 'Tell me a story in 3000 words'; | ||
const result1 = await chat.sendMessageStream(chatInput1); | ||
let firstChunkTimestamp = 0; | ||
let aggregatedResultTimestamp = 0; | ||
// To verify streaming is working correcty, we check that there is >= 2 | ||
// second difference between the first chunk and the aggregated result | ||
const streamThreshold = 2000; | ||
const firstChunkFinalResultTimeDiff = 200; // ms | ||
for await (const item of result1.stream) { | ||
@@ -191,3 +189,3 @@ if (firstChunkTimestamp === 0) { | ||
aggregatedResultTimestamp = Date.now(); | ||
expect(aggregatedResultTimestamp - firstChunkTimestamp).toBeGreaterThan(streamThreshold); | ||
expect(aggregatedResultTimestamp - firstChunkTimestamp).toBeGreaterThan(firstChunkFinalResultTimeDiff); | ||
}); | ||
@@ -203,3 +201,3 @@ }); | ||
beforeEach(() => { | ||
jasmine.DEFAULT_TIMEOUT_INTERVAL = 20000; | ||
jasmine.DEFAULT_TIMEOUT_INTERVAL = 25000; | ||
}); | ||
@@ -214,3 +212,3 @@ it('should should return a stream and aggregated response when passed text', async () => { | ||
}); | ||
it('should should return a stream and aggregated response when passed multipart base64 content when using models/gemini-pro-vision', async () => { | ||
it('should return a stream and aggregated response when passed multipart base64 content when using models/gemini-pro-vision', async () => { | ||
const streamingResp = await generativeVisionModelWithPrefix.generateContentStream(MULTI_PART_BASE64_REQUEST); | ||
@@ -221,5 +219,5 @@ for await (const item of streamingResp.stream) { | ||
const aggregatedResp = await streamingResp.response; | ||
assert(aggregatedResp.candidates[0], `sys test failure on generateContentStream using models/gemini-pro-visionfor aggregated response: ${aggregatedResp}`); | ||
assert(aggregatedResp.candidates[0], `sys test failure on generateContentStream using models/gemini-pro-vision for aggregated response: ${aggregatedResp}`); | ||
}); | ||
}); | ||
//# sourceMappingURL=end_to_end_sample_test.js.map |
@@ -27,4 +27,5 @@ "use strict"; | ||
const LOCATION = 'test_location'; | ||
const TEST_CHAT_MESSSAGE_TEXT = 'How are you doing today?'; | ||
const TEST_USER_CHAT_MESSAGE = [ | ||
{ role: util_1.constants.USER_ROLE, parts: [{ text: 'How are you doing today?' }] }, | ||
{ role: util_1.constants.USER_ROLE, parts: [{ text: TEST_CHAT_MESSSAGE_TEXT }] }, | ||
]; | ||
@@ -36,3 +37,3 @@ const TEST_TOKEN = 'testtoken'; | ||
parts: [ | ||
{ text: 'How are you doing today?' }, | ||
{ text: TEST_CHAT_MESSSAGE_TEXT }, | ||
{ | ||
@@ -51,3 +52,3 @@ file_data: { | ||
parts: [ | ||
{ text: 'How are you doing today?' }, | ||
{ text: TEST_CHAT_MESSSAGE_TEXT }, | ||
{ file_data: { file_uri: 'test_image.jpeg', mime_type: 'image/jpeg' } }, | ||
@@ -201,2 +202,10 @@ ], | ||
}); | ||
it('returns a GenerateContentResponse when passed a string', async () => { | ||
const expectedResult = { | ||
response: TEST_MODEL_RESPONSE, | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue(expectedStreamResult); | ||
const resp = await model.generateContent(TEST_CHAT_MESSSAGE_TEXT); | ||
expect(resp).toEqual(expectedResult); | ||
}); | ||
}); | ||
@@ -378,2 +387,11 @@ describe('generateContent', () => { | ||
}); | ||
it('returns a GenerateContentResponse when passed a string', async () => { | ||
const expectedResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue(expectedResult); | ||
const resp = await model.generateContentStream(TEST_CHAT_MESSSAGE_TEXT); | ||
expect(resp).toEqual(expectedResult); | ||
}); | ||
}); | ||
@@ -380,0 +398,0 @@ describe('generateContentStream', () => { |
# Changelog | ||
## [0.2.1](https://github.com/googleapis/nodejs-vertexai/compare/v0.2.0...v0.2.1) (2024-01-05) | ||
### Bug Fixes | ||
* enable passing only a string to generateContent and generateContentStream ([c50811e](https://github.com/googleapis/nodejs-vertexai/commit/c50811e5443848edb8f9ce5d88ae4c6c8b59b65b)) | ||
## [0.2.0](https://github.com/googleapis/nodejs-vertexai/compare/v0.1.3...v0.2.0) (2024-01-03) | ||
@@ -4,0 +12,0 @@ |
{ | ||
"name": "@google-cloud/vertexai", | ||
"description": "Vertex Generative AI client for Node.js", | ||
"version": "0.2.0", | ||
"version": "0.2.1", | ||
"license": "Apache-2.0", | ||
@@ -6,0 +6,0 @@ "author": "Google LLC", |
@@ -6,4 +6,8 @@ { | ||
"packages": { | ||
".": {} | ||
".": { | ||
"extra-files": [ | ||
"src/util/constants.ts" | ||
] | ||
} | ||
} | ||
} |
@@ -162,2 +162,5 @@ /** | ||
* Params to initiate a multiturn chat with the model via startChat | ||
* @property {Content[]} - [history] history of the chat session. {@link Content} | ||
* @property {SafetySetting[]} - [safety_settings] Array of {@link SafetySetting} | ||
* @property {GenerationConfig} - [generation_config] {@link GenerationConfig} | ||
*/ | ||
@@ -168,3 +171,2 @@ export declare interface StartChatParams { | ||
generation_config?: GenerationConfig; | ||
stream?: boolean; | ||
} | ||
@@ -178,4 +180,4 @@ | ||
* All params passed to initiate multiturn chat via startChat | ||
* @see VertexAI_Preview for details on _vertex_instance parameter | ||
* @see GenerativeModel for details on _model_instance parameter | ||
* @property {VertexAI_Preview} - _vertex_instance {@link VertexAI_Preview} | ||
* @property {GenerativeModel} - _model_instance {@link GenerativeModel} | ||
*/ | ||
@@ -191,3 +193,2 @@ export declare interface StartChatSessionRequest extends StartChatParams { | ||
* `sendMessageStream` method makes async call to stream response of a chat message. | ||
* @param {StartChatSessionRequest} request - {@link StartChatSessionRequest} | ||
*/ | ||
@@ -209,2 +210,6 @@ export class ChatSession { | ||
/** | ||
* @constructor | ||
* @param {StartChatSessionRequest} request - {@link StartChatSessionRequest} | ||
*/ | ||
constructor(request: StartChatSessionRequest) { | ||
@@ -355,4 +360,10 @@ this.project = request._vertex_instance.project; | ||
async generateContent( | ||
request: GenerateContentRequest | ||
request: GenerateContentRequest | string | ||
): Promise<GenerateContentResult> { | ||
request = formatContentRequest( | ||
request, | ||
this.generation_config, | ||
this.safety_settings | ||
); | ||
validateGcsInput(request.contents); | ||
@@ -405,4 +416,9 @@ | ||
async generateContentStream( | ||
request: GenerateContentRequest | ||
request: GenerateContentRequest | string | ||
): Promise<StreamGenerateContentResult> { | ||
request = formatContentRequest( | ||
request, | ||
this.generation_config, | ||
this.safety_settings | ||
); | ||
validateGcsInput(request.contents); | ||
@@ -520,2 +536,3 @@ | ||
if ('file_data' in part) { | ||
// @ts-ignore | ||
const uri = part['file_data']['file_uri']; | ||
@@ -542,1 +559,17 @@ if (!uri.startsWith('gs://')) { | ||
} | ||
function formatContentRequest( | ||
request: GenerateContentRequest | string, | ||
generation_config?: GenerationConfig, | ||
safety_settings?: SafetySetting[] | ||
): GenerateContentRequest { | ||
if (typeof request === 'string') { | ||
return { | ||
contents: [{role: constants.USER_ROLE, parts: [{text: request}]}], | ||
generation_config: generation_config, | ||
safety_settings: safety_settings, | ||
}; | ||
} else { | ||
return request; | ||
} | ||
} |
@@ -38,3 +38,4 @@ /** | ||
/** | ||
* Params used by the generateContent endpoint | ||
* Params used to call the generateContent method. | ||
* @property {Content[]} - contents. Array of {@link Content} | ||
*/ | ||
@@ -46,3 +47,4 @@ export declare interface GenerateContentRequest extends BaseModelParams { | ||
/** | ||
* Params used to call countTokens | ||
* Params used to call the countTokens method. | ||
* @property {Content[]} - contents. Array of {@link Content} | ||
*/ | ||
@@ -54,3 +56,6 @@ export declare interface CountTokensRequest { | ||
/** | ||
* Response returned from countTokens | ||
* Response returned from countTokens method. | ||
* @property {number} - totalTokens. The total number of tokens counted across all instances from the request. | ||
* @property {number} - [totalBillableCharacters]. The total number of billable characters counted across all instances from the request. | ||
* | ||
*/ | ||
@@ -64,3 +69,3 @@ export declare interface CountTokensResponse { | ||
* Configuration for initializing a model, for example via getGenerativeModel | ||
* @param {string} model - model name. | ||
* @property {string} model - model name. | ||
* @example "gemini-pro" | ||
@@ -73,3 +78,5 @@ */ | ||
/** | ||
* Base params for initializing a model or calling GenerateContent | ||
* Base params for initializing a model or calling GenerateContent. | ||
* @property {SafetySetting[]} - [safety_settings] Array of {@link SafetySetting} | ||
* @property {GenerationConfig} - [generation_config] {@link GenerationConfig} | ||
*/ | ||
@@ -82,3 +89,5 @@ export declare interface BaseModelParams { | ||
/** | ||
* Safety feedback for an entire request | ||
* Safety feedback for an entire request. | ||
* @property {HarmCategory} - category. {@link HarmCategory} | ||
* @property {HarmBlockThreshold} - threshold. {@link HarmBlockThreshold} | ||
*/ | ||
@@ -92,2 +101,8 @@ export declare interface SafetySetting { | ||
* Configuration options for model generation and outputs | ||
* @property {number} - [candidate_count] Number of candidates to generate. | ||
* @property {string[]} - [stop_sequences] Stop sequences. | ||
* @property {number} - [max_output_tokens] The maximum number of output tokens to generate per message. | ||
* @property {number} - [temperature] Controls the randomness of predictions. | ||
* @property {number} - [top_p] If specified, nucleus sampling will be used. | ||
* @property {number} - [top_k] If specified, top-k sampling will be used. | ||
*/ | ||
@@ -103,3 +118,16 @@ export declare interface GenerationConfig { | ||
/** | ||
* Harm categories that would cause prompts or candidates to be blocked. | ||
* @enum {string} | ||
* Harm categories that will block the content. | ||
* Values: | ||
* HARM_CATEGORY_UNSPECIFIED: | ||
* The harm category is unspecified. | ||
* HARM_CATEGORY_HATE_SPEECH: | ||
* The harm category is hate speech. | ||
* HARM_CATEGORY_DANGEROUS_CONTENT: | ||
* The harm category is dangerous content. | ||
* HARM_CATEGORY_HARASSMENT: | ||
* The harm category is harassment. | ||
* HARM_CATEGORY_SEXUALLY_EXPLICIT: | ||
* The harm category is sexually explicit | ||
* content. | ||
*/ | ||
@@ -115,14 +143,22 @@ export enum HarmCategory { | ||
/** | ||
* Threshold above which a prompt or candidate will be blocked. | ||
* @enum {string} | ||
* Probability based thresholds levels for blocking. | ||
* Values: | ||
* HARM_BLOCK_THRESHOLD_UNSPECIFIED: | ||
* Unspecified harm block threshold. | ||
* BLOCK_LOW_AND_ABOVE: | ||
* Block low threshold and above (i.e. block | ||
* more). | ||
* BLOCK_MEDIUM_AND_ABOVE: | ||
* Block medium threshold and above. | ||
* BLOCK_ONLY_HIGH: | ||
* Block only high threshold (i.e. block less). | ||
* BLOCK_NONE: | ||
* Block none. | ||
*/ | ||
export enum HarmBlockThreshold { | ||
// Unspecified harm block threshold. | ||
HARM_BLOCK_THRESHOLD_UNSPECIFIED = 'HARM_BLOCK_THRESHOLD_UNSPECIFIED', | ||
// Block low threshold and above (i.e. block more). | ||
BLOCK_LOW_AND_ABOVE = 'BLOCK_LOW_AND_ABOVE', | ||
// Block medium threshold and above. | ||
BLOCK_MEDIUM_AND_ABOVE = 'BLOCK_MEDIUM_AND_ABOVE', | ||
// Block only high threshold (i.e. block less). | ||
BLOCK_ONLY_HIGH = 'BLOCK_ONLY_HIGH', | ||
// Block none. | ||
BLOCK_NONE = 'BLOCK_NONE', | ||
@@ -132,14 +168,21 @@ } | ||
/** | ||
* Probability that a prompt or candidate matches a harm category. | ||
* @enum {string} | ||
* Harm probability levels in the content. | ||
* Values: | ||
* HARM_PROBABILITY_UNSPECIFIED: | ||
* Harm probability unspecified. | ||
* NEGLIGIBLE: | ||
* Negligible level of harm. | ||
* LOW: | ||
* Low level of harm. | ||
* MEDIUM: | ||
* Medium level of harm. | ||
* HIGH: | ||
* High level of harm. | ||
*/ | ||
export enum HarmProbability { | ||
// Probability is unspecified. | ||
HARM_PROBABILITY_UNSPECIFIED = 'HARM_PROBABILITY_UNSPECIFIED', | ||
// Content has a negligible chance of being unsafe. | ||
NEGLIGIBLE = 'NEGLIGIBLE', | ||
// Content has a low chance of being unsafe. | ||
LOW = 'LOW', | ||
// Content has a medium chance of being unsafe. | ||
MEDIUM = 'MEDIUM', | ||
// Content has a high chance of being unsafe. | ||
HIGH = 'HIGH', | ||
@@ -149,3 +192,5 @@ } | ||
/** | ||
* Safety rating for a piece of content | ||
* Safety rating corresponding to the generated content. | ||
* @property {HarmCategory} - category. {@link HarmCategory} | ||
* @property {HarmProbability} - probability. {@link HarmProbability} | ||
*/ | ||
@@ -158,3 +203,6 @@ export declare interface SafetyRating { | ||
/** | ||
* A single turn in a conversation with the model | ||
* The base structured datatype containing multi-part content of a message. | ||
* @property {Part[]} - parts. Array of {@link Part} | ||
* @property {string} - [role]. The producer of the content. Must be either 'user' or 'model'. | ||
Useful to set for multi-turn conversations, otherwise can be left blank or unset. | ||
*/ | ||
@@ -168,4 +216,6 @@ export declare interface Content { | ||
* A part of a turn in a conversation with the model with a fixed MIME type. | ||
* | ||
* Exactly one of text or inline_data must be provided. | ||
* It has one of the following mutually exclusive fields: | ||
* 1. text | ||
* 2. inline_data | ||
* 3. file_data | ||
*/ | ||
@@ -176,12 +226,32 @@ // TODO: Adjust so one must be true. | ||
/** | ||
* A text part of a conversation with the model. | ||
* @property {string} - text. Only this propery is expected for TextPart. | ||
* @property {never} - [inline_data]. inline_data is not expected for TextPart. | ||
* @property {never} - [file_data]. file_data is not expected for TextPart. | ||
* | ||
*/ | ||
export interface TextPart extends BasePart { | ||
text: string; | ||
inline_data?: never; | ||
file_data?: never; | ||
} | ||
/** | ||
* An inline data part of a conversation with the model. | ||
* @property {never} - [text]. text is not expected for InlineDataPart. | ||
* @property {GenerativeContentBlob} - inline_data. Only this property is expected for InlineDataPart. {@link GenerativeContentBlob} | ||
* @property {never} - [file_data]. file_data is not expected for InlineDataPart. | ||
*/ | ||
export interface InlineDataPart extends BasePart { | ||
text?: never; | ||
inline_data: GenerativeContentBlob; | ||
file_data?: never; | ||
} | ||
/** | ||
* URI based data. | ||
* @property {string} - mime_type. The IANA standard MIME type of the source data. | ||
* @property {string} - file_uri. URI of the file. | ||
*/ | ||
export interface FileData { | ||
@@ -192,7 +262,22 @@ mime_type: string; | ||
/** | ||
* A file data part of a conversation with the model. | ||
* @property {never} - [text]. text is not expected for FileDataPart. | ||
* @property {never} - [inline_data]. inline_data is not expected for FileDataPart. | ||
* @property {FileData} - file_data. Only this property is expected for FileDataPart. {@link FileData} | ||
*/ | ||
export interface FileDataPart extends BasePart { | ||
text?: never; | ||
inline_data?: never; | ||
file_data: FileData; | ||
} | ||
/** | ||
* A datatype containing media that is part of a multi-part {@link Content} message. | ||
* A `Part` is a union type of {@link TextPart}, {@link InlineDataPart} and {@link FileDataPart} | ||
* A `Part` has one of the following mutually exclusive fields: | ||
* 1. text | ||
* 2. inline_data | ||
* 3. file_data | ||
*/ | ||
export declare type Part = TextPart | InlineDataPart | FileDataPart; | ||
@@ -203,8 +288,7 @@ | ||
* raw bytes. | ||
* @property {string} - mime_type. The MIME type of the source data. The only accepted values: "image/png" or "image/jpeg". | ||
* @property {string} - data. data must be base64 string | ||
*/ | ||
export declare interface GenerativeContentBlob { | ||
// The MIME type of the source data. The only accepted values: "image/png", | ||
// "image/jpeg". | ||
mime_type: string; | ||
// image must be base64 string | ||
data: string; | ||
@@ -214,3 +298,6 @@ } | ||
/** | ||
* Metadata on token count for the request | ||
* Usage metadata about response(s). | ||
* @property {number} - [prompt_token_count]. Number of tokens in the request. | ||
* @property {number} - [candidates_token_count]. Number of tokens in the response(s). | ||
* @property {number} - [totalTokenCount]. Total number of tokens. | ||
*/ | ||
@@ -224,4 +311,6 @@ export declare interface UsageMetadata { | ||
/** | ||
* A set of the feedback metadata the prompt specified in | ||
* GenerateContentRequest.content. | ||
* Content filter results for a prompt sent in the request. | ||
* @property {BlockedReason} - block_reason. {@link BlockReason} | ||
* @property {SafetyRating[]} - safety_ratings. Array of {@link SafetyRating} | ||
* @property {string} - block_reason_message. A readable block reason message. | ||
*/ | ||
@@ -234,23 +323,50 @@ export declare interface PromptFeedback { | ||
/** | ||
* @enum {string} | ||
* The reason why the reponse is blocked. | ||
* Values: | ||
* BLOCKED_REASON_UNSPECIFIED | ||
* Unspecified blocked reason. | ||
* SAFETY | ||
* Candidates blocked due to safety. | ||
* OTHER | ||
* Candidates blocked due to other reason. | ||
*/ | ||
export enum BlockedReason { | ||
// A blocked reason was not specified. | ||
BLOCKED_REASON_UNSPECIFIED = 'BLOCK_REASON_UNSPECIFIED', | ||
// Content was blocked by safety settings. | ||
SAFETY = 'SAFETY', | ||
// Content was blocked, but the reason is uncategorized. | ||
OTHER = 'OTHER', | ||
} | ||
/** | ||
* @enum {string} | ||
* The reason why the model stopped generating tokens. | ||
* If empty, the model has not stopped generating the tokens. | ||
* Values: | ||
* FINISH_REASON_UNSPECIFIED | ||
* The finish reason is unspecified. | ||
* STOP: | ||
* Natural stop point of the model or provided | ||
* stop sequence. | ||
* MAX_TOKENS: | ||
* The maximum number of tokens as specified in | ||
* the request was reached. | ||
* SAFETY: | ||
* The token generation was stopped as the | ||
* response was flagged for safety reasons. NOTE: | ||
* When streaming the Candidate.content will be | ||
* empty if content filters blocked the output. | ||
* RECITATION: | ||
* The token generation was stopped as the | ||
* response was flagged for unauthorized citations. | ||
* OTHER: | ||
* All other reasons that stopped the token | ||
* generation | ||
*/ | ||
export enum FinishReason { | ||
// Default value. This value is unused. | ||
FINISH_REASON_UNSPECIFIED = 'FINISH_REASON_UNSPECIFIED', | ||
// Natural stop point of the model or provided stop sequence. | ||
STOP = 'STOP', | ||
// The maximum number of tokens as specified in the request was reached. | ||
MAX_TOKENS = 'MAX_TOKENS', | ||
// The candidate content was flagged for safety reasons. | ||
SAFETY = 'SAFETY', | ||
// The candidate content was flagged for recitation reasons. | ||
RECITATION = 'RECITATION', | ||
// Unknown reason. | ||
OTHER = 'OTHER', | ||
@@ -261,6 +377,5 @@ } | ||
* Wrapper for respones from a generateContent request | ||
* @see GenerateContentResponse | ||
* @property {GenerateContentResponse} - response. All GenerateContentResponses received so far {@link GenerateContentResponse} | ||
*/ | ||
export declare interface GenerateContentResult { | ||
// All GenerateContentResponses received so far | ||
response: GenerateContentResponse; | ||
@@ -270,7 +385,7 @@ } | ||
/** | ||
* Wrapper for respones from a streamGenerateContent request | ||
* @see GenerateContentResponse | ||
* Wrapper for respones from a generateContent method when `steam` parameter is `true` | ||
* @property {Promise<GenerateContentResponse>} - response. Promise of {@link GenerateContentResponse} | ||
* @property {AsyncGenerator<GenerateContentResponse>} - stream. Async iterable that provides one {@link GenerateContentResponse} at a time | ||
*/ | ||
export declare interface StreamGenerateContentResult { | ||
// Async iterable that provides one GenerateContentResponse at a time | ||
response: Promise<GenerateContentResponse>; | ||
@@ -282,6 +397,8 @@ stream: AsyncGenerator<GenerateContentResponse>; | ||
* Response from the model supporting multiple candidates | ||
* @property {GenerateContentCandidate} - candidates. {@link GenerateContentCandidate} | ||
* @property {PromptFeedback} - [promptFeedback]. This is only populated if there are no candidates due to a safety block {@link PromptFeedback} | ||
* @property {UsageMetadata} - [usageMetadata]. {@link UsageMetadata} | ||
*/ | ||
export declare interface GenerateContentResponse { | ||
candidates: GenerateContentCandidate[]; | ||
// This is only populated if there are no candidates due to a safety block | ||
promptFeedback?: PromptFeedback; | ||
@@ -292,3 +409,9 @@ usageMetadata?: UsageMetadata; | ||
/** | ||
* Content candidate from the model | ||
* A response candidate generated from the model. | ||
* @property {Content} - content. {@link Content} | ||
* @property {number} - [index]. The index of the candidate in the {@link GenerateContentResponse} | ||
* @property {FinishReason} - [finishReason]. {@link FinishReason} | ||
* @property {string} - [finishMessage]. | ||
* @property {SafetyRating[]} - [safetyRatings]. Array of {@link SafetyRating} | ||
* @property {CitationMetadata} - [citationMetadata]. {@link CitationMetadata} | ||
*/ | ||
@@ -305,3 +428,4 @@ export declare interface GenerateContentCandidate { | ||
/** | ||
* Citation information for model-generated canadidate. | ||
* A collection of source attributions for a piece of content. | ||
* @property {CitationSource[]} - citationSources. Array of {@link CitationSource} | ||
*/ | ||
@@ -313,3 +437,7 @@ export declare interface CitationMetadata { | ||
/** | ||
* Citations to sources for a specific response | ||
* Source attributions for content. | ||
* @property {number} - [startIndex] Start index into the content. | ||
* @property {number} - [endIndex] End index into the content. | ||
* @property {string} - [url] Url reference of the attribution. | ||
* @property {string} - [license] License of the attribution. | ||
*/ | ||
@@ -316,0 +444,0 @@ export declare interface CitationSource { |
@@ -22,4 +22,4 @@ /** | ||
const USER_AGENT_PRODUCT = 'model-builder'; | ||
const CLIENT_LIBRARY_VERSION = '0.2.0'; | ||
const CLIENT_LIBRARY_VERSION = '0.2.1'; // x-release-please-version | ||
const CLIENT_LIBRARY_LANGUAGE = `grpc-node/${CLIENT_LIBRARY_VERSION}`; | ||
export const USER_AGENT = `${USER_AGENT_PRODUCT}/${CLIENT_LIBRARY_VERSION} ${CLIENT_LIBRARY_LANGUAGE}`; |
@@ -238,3 +238,3 @@ /** | ||
const chat = textModelNoOutputLimit.startChat({}); | ||
const chatInput1 = 'Tell me a story in 1000 words'; | ||
const chatInput1 = 'Tell me a story in 3000 words'; | ||
const result1 = await chat.sendMessageStream(chatInput1); | ||
@@ -244,5 +244,3 @@ let firstChunkTimestamp = 0; | ||
// To verify streaming is working correcty, we check that there is >= 2 | ||
// second difference between the first chunk and the aggregated result | ||
const streamThreshold = 2000; | ||
const firstChunkFinalResultTimeDiff = 200; // ms | ||
@@ -257,3 +255,3 @@ for await (const item of result1.stream) { | ||
expect(aggregatedResultTimestamp - firstChunkTimestamp).toBeGreaterThan( | ||
streamThreshold | ||
firstChunkFinalResultTimeDiff | ||
); | ||
@@ -275,3 +273,3 @@ }); | ||
beforeEach(() => { | ||
jasmine.DEFAULT_TIMEOUT_INTERVAL = 20000; | ||
jasmine.DEFAULT_TIMEOUT_INTERVAL = 25000; | ||
}); | ||
@@ -297,3 +295,3 @@ | ||
it('should should return a stream and aggregated response when passed multipart base64 content when using models/gemini-pro-vision', async () => { | ||
it('should return a stream and aggregated response when passed multipart base64 content when using models/gemini-pro-vision', async () => { | ||
const streamingResp = | ||
@@ -314,5 +312,5 @@ await generativeVisionModelWithPrefix.generateContentStream( | ||
aggregatedResp.candidates[0], | ||
`sys test failure on generateContentStream using models/gemini-pro-visionfor aggregated response: ${aggregatedResp}` | ||
`sys test failure on generateContentStream using models/gemini-pro-vision for aggregated response: ${aggregatedResp}` | ||
); | ||
}); | ||
}); |
@@ -44,4 +44,5 @@ /** | ||
const LOCATION = 'test_location'; | ||
const TEST_CHAT_MESSSAGE_TEXT = 'How are you doing today?'; | ||
const TEST_USER_CHAT_MESSAGE = [ | ||
{role: constants.USER_ROLE, parts: [{text: 'How are you doing today?'}]}, | ||
{role: constants.USER_ROLE, parts: [{text: TEST_CHAT_MESSSAGE_TEXT}]}, | ||
]; | ||
@@ -54,3 +55,3 @@ const TEST_TOKEN = 'testtoken'; | ||
parts: [ | ||
{text: 'How are you doing today?'}, | ||
{text: TEST_CHAT_MESSSAGE_TEXT}, | ||
{ | ||
@@ -70,3 +71,3 @@ file_data: { | ||
parts: [ | ||
{text: 'How are you doing today?'}, | ||
{text: TEST_CHAT_MESSSAGE_TEXT}, | ||
{file_data: {file_uri: 'test_image.jpeg', mime_type: 'image/jpeg'}}, | ||
@@ -240,2 +241,12 @@ ], | ||
}); | ||
it('returns a GenerateContentResponse when passed a string', async () => { | ||
const expectedResult: GenerateContentResult = { | ||
response: TEST_MODEL_RESPONSE, | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue( | ||
expectedStreamResult | ||
); | ||
const resp = await model.generateContent(TEST_CHAT_MESSSAGE_TEXT); | ||
expect(resp).toEqual(expectedResult); | ||
}); | ||
}); | ||
@@ -457,2 +468,11 @@ | ||
}); | ||
it('returns a GenerateContentResponse when passed a string', async () => { | ||
const expectedResult: StreamGenerateContentResult = { | ||
response: Promise.resolve(TEST_MODEL_RESPONSE), | ||
stream: testGenerator(), | ||
}; | ||
spyOn(StreamFunctions, 'processStream').and.returnValue(expectedResult); | ||
const resp = await model.generateContentStream(TEST_CHAT_MESSSAGE_TEXT); | ||
expect(resp).toEqual(expectedResult); | ||
}); | ||
}); | ||
@@ -459,0 +479,0 @@ |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Major refactor
Supply chain riskPackage has recently undergone a major refactor. It may be unstable or indicate significant internal changes. Use caution when updating to versions that include significant changes.
Found 1 instance in 1 package
313093
5213
1