@hypermode/models-as
Advanced tools
Comparing version 0.1.6 to 0.2.0
import { Model } from "../.."; | ||
// Reference: https://platform.openai.com/docs/api-reference/chat | ||
/** | ||
* Provides input and output types that conform to the OpenAI Chat API. | ||
* | ||
* Reference: https://platform.openai.com/docs/api-reference/chat | ||
*/ | ||
export class ChatModel extends Model<ChatInput, ChatOutput> { | ||
/** | ||
* Creates an input object for the OpenAI Chat API. | ||
* | ||
* @param messages: An array of messages to send to the chat model. | ||
* @returns An input object that can be passed to the `invoke` method. | ||
*/ | ||
createInput(messages: Message[]): ChatInput { | ||
@@ -12,9 +21,31 @@ const model = this.info.fullName; | ||
/** | ||
* The input object for the OpenAI Chat API. | ||
*/ | ||
@json | ||
class ChatInput { | ||
/** | ||
* The name of the model to use for the chat. | ||
* Must be the exact string expected by the model provider. | ||
* For example, "gpt-3.5-turbo". | ||
* | ||
* @remarks | ||
* This field is automatically set by the `createInput` method when creating this object. | ||
* It does not need to be set manually. | ||
*/ | ||
model!: string; | ||
/** | ||
* An array of messages to send to the chat model. | ||
*/ | ||
messages!: Message[]; | ||
/** | ||
* Number between `-2.0` and `2.0`. | ||
* | ||
* Positive values penalize new tokens based on their existing frequency in the text so far, | ||
* decreasing the model's likelihood to repeat the same line verbatim. | ||
* | ||
* @default 0.0 | ||
*/ | ||
@alias("frequency_penalty") | ||
@@ -24,3 +55,10 @@ @omitif("this.frequencyPenalty == 0.0") | ||
/** | ||
* Modifies the likelihood of specified tokens appearing in the completion. | ||
* | ||
* Accepts an object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. | ||
* Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, | ||
* but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban | ||
* or exclusive selection of the relevant token. | ||
*/ | ||
@alias("logit_bias") | ||
@@ -30,7 +68,16 @@ @omitnull() | ||
/** | ||
* Whether to return log probabilities of the output tokens or not, | ||
* | ||
* If true, returns the log probabilities of each output token returned in the content of message. | ||
* | ||
* @default false | ||
*/ | ||
@omitif("this.logprobs == false") | ||
logprobs: bool = false; | ||
/** | ||
* An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, | ||
* each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. | ||
*/ | ||
@alias("top_logprobs") | ||
@@ -40,3 +87,7 @@ @omitif("this.logprobs == false") | ||
/** | ||
* The maximum number of tokens to generate in the chat completion. | ||
* | ||
* @default 4096 | ||
*/ | ||
@alias("max_tokens") | ||
@@ -46,7 +97,17 @@ @omitif("this.maxTokens == 4096") | ||
/** | ||
* The number of completions to generate for each prompt. | ||
*/ | ||
@omitif("this.n == 1") | ||
n: i32 = 1; | ||
/** | ||
* | ||
* Number between `-2.0` and `2.0`. | ||
* | ||
* Positive values penalize new tokens based on whether they appear in the text so far, | ||
* increasing the model's likelihood to talk about new topics. | ||
* | ||
* @default 0.0 | ||
*/ | ||
@alias("presence_penalty") | ||
@@ -56,3 +117,9 @@ @omitif("this.presencePenalty == 0.0") | ||
/** | ||
* Specifies the format for the response. | ||
* | ||
* If set to `ResponseFormat.Json`, the response will be a JSON object. | ||
* | ||
* @default ResponseFormat.Text | ||
*/ | ||
@alias("response_format") | ||
@@ -62,7 +129,21 @@ @omitif("this.responseFormat.type == 'text'") | ||
/** | ||
* If specified, the model will make a best effort to sample deterministically, such that | ||
* repeated requests with the same seed and parameters should return the same result. | ||
* Determinism is not guaranteed, and you should use the `systemFingerprint` response | ||
* parameter to monitor changes in the backend. | ||
*/ | ||
@omitif("this.seed == -1") | ||
seed: i32 = -1; // TODO: make this an `i32 | null` when supported | ||
/** | ||
* Specifies the latency tier to use for processing the request. | ||
*/ | ||
@alias("service_tier") | ||
@omitnull() | ||
serviceTier: ServiceTier | null = null; | ||
/** | ||
* Up to 4 sequences where the API will stop generating further tokens. | ||
*/ | ||
@omitnull() | ||
@@ -77,6 +158,23 @@ stop: string[] | null = null; | ||
/** | ||
* A number between `0.0` and `2.0` that controls the sampling temperature. | ||
* | ||
* Higher values like `0.8` will make the output more random, while lower values like `0.2` will make | ||
* it more focused and deterministic. | ||
* | ||
* We generally recommend altering this or `topP` but not both. | ||
* | ||
* @default 1.0 | ||
*/ | ||
@omitif("this.temperature == 1.0") | ||
temperature: f64 = 1.0; | ||
/** | ||
* An alternative to sampling with temperature, called nucleus sampling, where the model | ||
* considers the results of the tokens with `topP` probability mass. | ||
* | ||
* For example, `0.1` means only the tokens comprising the top 10% probability mass are considered. | ||
* | ||
* We generally recommend altering this or `temperature` but not both. | ||
*/ | ||
@alias("top_p") | ||
@@ -86,12 +184,28 @@ @omitif("this.topP == 1.0") | ||
/** | ||
* A list of tools the model may call. Currently, only functions are supported as a tool. | ||
* Use this to provide a list of functions the model may generate JSON inputs for. | ||
* A max of 128 functions are supported. | ||
*/ | ||
@omitnull() | ||
tools: Tool[] | null = null; | ||
/** | ||
* Controls which (if any) tool is called by the model. | ||
* - `"none"` means the model will not call any tool and instead generates a message. | ||
* - `"auto"` means the model can pick between generating a message or calling one or more tools. | ||
* - `"required"` means the model must call one or more tools. | ||
* - Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | ||
* | ||
* `none` is the default when no tools are present. `auto` is the default if tools are present. | ||
*/ | ||
@alias("tool_choice") | ||
@omitnull() | ||
toolChoice: string | null = null; // TODO: verify this works | ||
toolChoice: string | null = null; // TODO: Make this work with a custom tool object | ||
/** | ||
* Whether to enable parallel function calling during tool use. | ||
* | ||
* @default true | ||
*/ | ||
@alias("parallel_tool_calls") | ||
@@ -101,3 +215,7 @@ @omitif("this.parallelToolCalls == true || !this.tools || this.tools!.length == 0") | ||
/** | ||
* The user ID to associate with the request. | ||
* If not specified, the request will be anonymous. | ||
* See https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids | ||
*/ | ||
@omitnull() | ||
@@ -107,24 +225,102 @@ user: string | null = null; | ||
/** | ||
* The OpenAI service tier used to process the request. | ||
*/ | ||
// eslint-disable-next-line @typescript-eslint/no-namespace | ||
export namespace ServiceTier { | ||
/** | ||
* The OpenAI system will utilize scale tier credits until they are exhausted. | ||
*/ | ||
export const Auto = "auto"; | ||
/** | ||
* The request will be processed using the default OpenAI service tier with a lower | ||
* uptime SLA and no latency guarantee. | ||
*/ | ||
export const Default = "default"; | ||
} | ||
export type ServiceTier = string; | ||
/** | ||
* The output object for the OpenAI Chat API. | ||
*/ | ||
@json | ||
class ChatOutput { | ||
/** | ||
* A unique identifier for the chat completion. | ||
*/ | ||
id!: string; | ||
/** | ||
* The name of the output object type returned by the API. | ||
* Always `"chat.completion"`. | ||
*/ | ||
object!: string; | ||
/** | ||
* A list of chat completion choices. Can be more than one if `n` is greater than 1 in the input options. | ||
*/ | ||
choices!: Choice[]; | ||
created!: i32; // unix timestamp seconds | ||
/** | ||
* The Unix timestamp (in seconds) of when the chat completion was created. | ||
*/ | ||
created!: i32; | ||
/** | ||
* The model used for the chat completion. | ||
* In most cases, this will match the requested `model` field in the input. | ||
*/ | ||
model!: string; | ||
/** | ||
* The service tier used for processing the request. | ||
* | ||
* This field is only included if the `serviceTier` parameter is specified in the request. | ||
*/ | ||
@alias("service_tier") | ||
serviceTier: string | null = null; | ||
/** | ||
* This fingerprint represents the OpenAI backend configuration that the model runs with. | ||
* | ||
* Can be used in conjunction with the seed request parameter to understand when backend changes | ||
* have been made that might impact determinism. | ||
*/ | ||
@alias("system_fingerprint") | ||
systemFingerprint!: string; | ||
/** | ||
* The usage statistics for the request. | ||
*/ | ||
usage!: Usage; | ||
} | ||
/** | ||
* An object specifying the format that the model must output. | ||
*/ | ||
@json | ||
export class ResponseFormat { | ||
type!: string; | ||
/** | ||
* The type of response format. Must be one of `"text"` or `"json_object"`. | ||
*/ | ||
readonly type!: string; | ||
/** | ||
* Instructs the model to output the response as a JSON object. | ||
* | ||
* @remarks | ||
* You must also instruct the model to produce JSON yourself via a system or user message. | ||
* | ||
* Additionally, if you need an array you must ask for an object that wraps the array, | ||
* because the model will not reliably produce arrays directly (ie., there is no `json_array` option). | ||
*/ | ||
static Json: ResponseFormat = { type: "json_object" }; | ||
/** | ||
* Instructs the model to output the response as a plain text string. | ||
* | ||
* @remarks | ||
* This is the default response format. | ||
*/ | ||
static Text: ResponseFormat = { type: "text" }; | ||
@@ -141,18 +337,43 @@ } | ||
/** | ||
* A tool object that the model may call. | ||
*/ | ||
@json | ||
export class Tool { | ||
/** | ||
* The type of the tool. Currently, only `"function"` is supported. | ||
* | ||
* @default "function" | ||
*/ | ||
type: string = "function"; | ||
/** | ||
* The definition of the function. | ||
*/ | ||
function: FunctionDefinition = new FunctionDefinition(); | ||
} | ||
/** | ||
* The definition of a function that can be called by the model. | ||
*/ | ||
@json | ||
export class FunctionDefinition { | ||
/** | ||
* The name of the function to be called. | ||
* | ||
* Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. | ||
*/ | ||
name!: string; | ||
/** | ||
* An optional description of what the function does, used by the model to choose when and how to call the function. | ||
*/ | ||
@omitnull() | ||
description: string | null = null; | ||
/** | ||
* The parameters the functions accepts, described as a JSON Schema object. | ||
* | ||
* See https://platform.openai.com/docs/guides/function-calling | ||
*/ | ||
@omitnull() | ||
@@ -162,29 +383,65 @@ parameters: string | null = null; // TODO: verify this works | ||
/** | ||
* A tool call object that the model may generate. | ||
*/ | ||
@json | ||
export class ToolCall { | ||
/** | ||
* The ID of the tool call. | ||
*/ | ||
id!: string; | ||
/** | ||
* The type of the tool. Currently, only `"function"` is supported. | ||
* | ||
* @default "function" | ||
*/ | ||
type: string = "function"; | ||
/** | ||
* The function that the model called. | ||
*/ | ||
function!: FunctionCall; | ||
} | ||
/** | ||
* A function call object that the model may generate. | ||
*/ | ||
@json | ||
export class FunctionCall { | ||
/** | ||
* The name of the function to call. | ||
*/ | ||
name!: string; | ||
/** | ||
* The arguments to call the function with, as generated by the model in JSON format. | ||
* | ||
* @remarks | ||
* Note that the model does not always generate valid JSON, and may hallucinate parameters not | ||
* defined by your function schema. Validate the arguments in your code before calling your function. | ||
*/ | ||
arguments!: string; | ||
} | ||
/** | ||
* The usage statistics for the request. | ||
*/ | ||
@json | ||
class Usage { | ||
/** | ||
* The number of completion tokens used in the response. | ||
*/ | ||
@alias("completion_tokens") | ||
completionTokens!: i32; | ||
/** | ||
* The number of prompt tokens used in the request. | ||
*/ | ||
@alias("prompt_tokens") | ||
promptTokens!: i32; | ||
/** | ||
* The total number of tokens used in the request and response. | ||
*/ | ||
@alias("total_tokens") | ||
@@ -194,30 +451,75 @@ totalTokens!: i32; | ||
/** | ||
* A completion choice object returned in the response. | ||
*/ | ||
@json | ||
class Choice { | ||
/** | ||
* The reason the model stopped generating tokens. | ||
* | ||
* Possible values are: | ||
* - `"stop"` if the model hit a natural stop point or a provided stop sequence | ||
* - `"length"` if the maximum number of tokens specified in the request was reached | ||
* - `"content_filter"` if content was omitted due to a flag from our content filters | ||
* - `"tool_calls"` if the model called a tool | ||
*/ | ||
@alias("finish_reason") | ||
finishReason!: string; | ||
/** | ||
* The index of the choice in the list of choices. | ||
*/ | ||
index!: i32; | ||
/** | ||
* A chat completion message generated by the model. | ||
*/ | ||
message!: CompletionMessage; | ||
/** | ||
* Log probability information for the choice. | ||
*/ | ||
logprobs!: Logprobs | null; | ||
} | ||
/** | ||
* Log probability information for a choice. | ||
*/ | ||
@json | ||
class Logprobs { | ||
/** | ||
* A list of message content tokens with log probability information. | ||
*/ | ||
content: LogprobsContent[] | null = null; | ||
} | ||
/** | ||
* Log probability information for a message content token. | ||
*/ | ||
@json | ||
class LogprobsContent { | ||
/** | ||
* The token. | ||
*/ | ||
token!: string; | ||
/** | ||
* The log probability of this token, if it is within the top 20 most likely tokens. | ||
* Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. | ||
*/ | ||
logprob!: f64; | ||
/** | ||
* A list of integers representing the UTF-8 bytes representation of the token. | ||
* | ||
* Useful in instances where characters are represented by multiple tokens and their byte | ||
* representations must be combined to generate the correct text representation. | ||
* Can be null if there is no bytes representation for the token. | ||
*/ | ||
bytes!: u8[] | null; // TODO: verify this works | ||
/** | ||
* List of the most likely tokens and their log probability, at this token position. | ||
* In rare cases, there may be fewer than the number of requested `topLogprobs` returned. | ||
*/ | ||
@alias("top_logprobs") | ||
@@ -227,13 +529,38 @@ topLogprobs!: TopLogprobsContent[]; // TODO: verify this works | ||
/** | ||
* Log probability information for the most likely tokens at a given position. | ||
*/ | ||
@json | ||
class TopLogprobsContent { | ||
/** | ||
* The token. | ||
*/ | ||
token!: string; | ||
/** | ||
* The log probability of this token, if it is within the top 20 most likely tokens. | ||
* Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. | ||
*/ | ||
logprob!: f64; | ||
/** | ||
* A list of integers representing the UTF-8 bytes representation of the token. | ||
* Useful in instances where characters are represented by multiple tokens and their byte | ||
* representations must be combined to generate the correct text representation. | ||
* Can be null if there is no bytes representation for the token. | ||
*/ | ||
bytes!: u8[] | null; // TODO: verify this works | ||
} | ||
/** | ||
* A message object that can be sent to the chat model. | ||
*/ | ||
@json | ||
abstract class Message { | ||
/** | ||
* Creates a new message object. | ||
* | ||
* @param role The role of the author of this message. | ||
* @param content The contents of the message. | ||
*/ | ||
constructor(role: string, content: string) { | ||
@@ -247,2 +574,6 @@ this._role = role; | ||
protected _role: string; | ||
/** | ||
* The role of the author of this message. | ||
*/ | ||
get role(): string { | ||
@@ -252,8 +583,18 @@ return this._role; | ||
/** | ||
* The contents of the message. | ||
*/ | ||
content: string; | ||
} | ||
/** | ||
* A system message. | ||
*/ | ||
@json | ||
export class SystemMessage extends Message { | ||
/** | ||
* Creates a new system message object. | ||
* | ||
* @param content The contents of the message. | ||
*/ | ||
constructor(content: string) { | ||
@@ -263,3 +604,6 @@ super("system", content); | ||
/** | ||
* An optional name for the participant. | ||
* Provides the model information to differentiate between participants of the same role. | ||
*/ | ||
@omitnull() | ||
@@ -269,5 +613,12 @@ name: string | null = null; | ||
/** | ||
* A user message. | ||
*/ | ||
@json | ||
export class UserMessage extends Message { | ||
/** | ||
* Creates a new user message object. | ||
* | ||
* @param content The contents of the message. | ||
*/ | ||
constructor(content: string) { | ||
@@ -277,3 +628,6 @@ super("user", content); | ||
/** | ||
* An optional name for the participant. | ||
* Provides the model information to differentiate between participants of the same role. | ||
*/ | ||
@omitnull() | ||
@@ -283,5 +637,12 @@ name: string | null = null; | ||
/** | ||
* An assistant message. | ||
*/ | ||
@json | ||
export class AssistantMessage extends Message { | ||
/** | ||
* Creates a new assistant message object. | ||
* | ||
* @param content The contents of the message. | ||
*/ | ||
constructor(content: string) { | ||
@@ -291,7 +652,12 @@ super("assistant", content); | ||
/** | ||
* An optional name for the participant. | ||
* Provides the model information to differentiate between participants of the same role. | ||
*/ | ||
@omitnull() | ||
name: string | null = null; | ||
/** | ||
* The tool calls generated by the model, such as function calls. | ||
*/ | ||
@alias("tool_calls") | ||
@@ -302,10 +668,20 @@ @omitif("this.toolCalls.length == 0") | ||
/** | ||
* A tool message. | ||
*/ | ||
@json | ||
export class ToolMessage extends Message { | ||
constructor(content: string) { | ||
/** | ||
* Creates a new tool message object. | ||
* | ||
* @param content The contents of the message. | ||
*/ | ||
constructor(content: string, toolCallId: string) { | ||
super("tool", content); | ||
this.toolCallId = toolCallId; | ||
} | ||
/** | ||
* Tool call that this message is responding to. | ||
*/ | ||
@alias("tool_call_id") | ||
@@ -315,5 +691,13 @@ toolCallId!: string; | ||
/** | ||
* A chat completion message generated by the model. | ||
*/ | ||
@json | ||
class CompletionMessage extends Message { | ||
/** | ||
* Creates a new completion message object. | ||
* | ||
* @param role The role of the author of this message. | ||
* @param content The contents of the message. | ||
*/ | ||
constructor(role: string, content: string) { | ||
@@ -323,5 +707,7 @@ super(role, content); | ||
/** | ||
* The tool calls generated by the model, such as function calls. | ||
*/ | ||
@alias("tool_calls") | ||
toolCalls: ToolCall[] = []; | ||
} |
import { Model } from "../.."; | ||
// Reference: https://platform.openai.com/docs/api-reference/embeddings | ||
/** | ||
* Provides input and output types that conform to the OpenAI Embeddings API. | ||
* | ||
* Reference: https://platform.openai.com/docs/api-reference/embeddings | ||
*/ | ||
export class EmbeddingsModel extends Model<EmbeddingsInput, EmbeddingsOutput> { | ||
createInput(text: string): EmbeddingsInput { | ||
/** | ||
* Creates an input object for the OpenAI Embeddings API. | ||
* | ||
* @param content The input content to vectorize. Can be any of: | ||
* - A string representing the text to vectorize. | ||
* - An array of strings representing multiple texts to vectorize. | ||
* - An array of integers representing pre-tokenized text to vectorize. | ||
* - An array of arrays of integers representing multiple pre-tokenized texts to vectorize. | ||
* | ||
* @returns An input object that can be passed to the `invoke` method. | ||
* | ||
* @remarks | ||
* The input content must not exceed the maximum token limit of the model. | ||
*/ | ||
createInput<T>(content: T): EmbeddingsInput { | ||
const model = this.info.fullName; | ||
return <EmbeddingsInput>{ model, input: text }; | ||
switch (idof<T>()) { | ||
case idof<string>(): | ||
case idof<string[]>(): | ||
case idof<i64[]>(): | ||
case idof<i32[]>(): | ||
case idof<i16[]>(): | ||
case idof<i8[]>(): | ||
case idof<u64[]>(): | ||
case idof<u32[]>(): | ||
case idof<u16[]>(): | ||
case idof<u8[]>(): | ||
case idof<i64[][]>(): | ||
case idof<i32[][]>(): | ||
case idof<i16[][]>(): | ||
case idof<i8[][]>(): | ||
case idof<u64[][]>(): | ||
case idof<u32[][]>(): | ||
case idof<u16[][]>(): | ||
case idof<u8[][]>(): | ||
return <TypedEmbeddingsInput<T>>{ model, input: content }; | ||
} | ||
throw new Error("Unsupported input content type."); | ||
} | ||
} | ||
/** | ||
* The input object for the OpenAI Embeddings API. | ||
*/ | ||
@json | ||
class EmbeddingsInput { | ||
input!: string; // todo: support other types of input (arrays, etc.) | ||
/** | ||
* The name of the model to use for the embeddings. | ||
* Must be the exact string expected by the model provider. | ||
* For example, "text-embedding-3-small". | ||
* | ||
* @remarks | ||
* This field is automatically set by the `createInput` method when creating this object. | ||
* It does not need to be set manually. | ||
*/ | ||
model!: string; | ||
/** | ||
* The encoding format for the output embeddings. | ||
* | ||
* @default EncodingFormat.Float | ||
* | ||
* @remarks | ||
* Currently only `EncodingFormat.Float` is supported. | ||
*/ | ||
@alias("encoding_format") | ||
@omitif("this.encodingFormat == 'float'") | ||
encodingFormat: string = EncodingFormat.Float; | ||
@omitif("this.encodingFormat.type == 'float'") | ||
encodingFormat: EncodingFormat = EncodingFormat.Float; | ||
/** | ||
* The maximum number of dimensions for the output embeddings. | ||
* If not specified, the model's default number of dimensions will be used. | ||
*/ | ||
@omitif("this.dimensions == -1") | ||
dimensions: i32 = -1; // TODO: make this an `i32 | null` when supported | ||
/** | ||
* The user ID to associate with the request. | ||
* If not specified, the request will be anonymous. | ||
* See https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids | ||
*/ | ||
@omitnull() | ||
@@ -31,38 +96,97 @@ user: string | null = null; | ||
/** | ||
* The input object for the OpenAI Embeddings API. | ||
*/ | ||
@json | ||
class TypedEmbeddingsInput<T> extends EmbeddingsInput { | ||
/** | ||
* The input content to vectorize. | ||
*/ | ||
input!: T; | ||
} | ||
/** | ||
* The output object for the OpenAI Embeddings API. | ||
*/ | ||
@json | ||
class EmbeddingsOutput { | ||
/** | ||
* The name of the output object type returned by the API. | ||
* Always `"list"`. | ||
*/ | ||
object!: string; | ||
/** | ||
* The name of the model used to generate the embeddings. | ||
* In most cases, this will match the requested `model` field in the input. | ||
*/ | ||
model!: string; | ||
/** | ||
* The usage statistics for the request. | ||
*/ | ||
usage!: Usage; | ||
/** | ||
* The output vector embeddings data. | ||
*/ | ||
data!: Embedding[]; | ||
} | ||
/** | ||
* The encoding format for the output embeddings. | ||
*/ | ||
// eslint-disable-next-line @typescript-eslint/no-namespace | ||
export namespace EncodingFormat { | ||
/** | ||
* The output embeddings are encoded as an array of floating-point numbers. | ||
*/ | ||
export const Float = "float"; | ||
@json | ||
export class EncodingFormat { | ||
type: string = "float"; | ||
static Float: EncodingFormat = { type: "float" }; | ||
static Base64: EncodingFormat = { type: "base64" }; | ||
/** | ||
* The output embeddings are encoded as a base64-encoded string, | ||
* containing an binary representation of an array of floating-point numbers. | ||
* | ||
* @remarks | ||
* This format is currently not supported through this interface. | ||
*/ | ||
export const Base64 = "base64"; | ||
} | ||
export type EncodingFormat = string; | ||
/** | ||
* The output vector embeddings data. | ||
*/ | ||
@json | ||
class Embedding { | ||
/** | ||
* The name of the output object type returned by the API. | ||
* Always `"embedding"`. | ||
*/ | ||
object!: string; | ||
/** | ||
* The index of the input text that corresponds to this embedding. | ||
* Used when requesting embeddings for multiple texts. | ||
*/ | ||
index!: i32; | ||
embedding!: f64[]; | ||
embedding!: f32[]; // TODO: support `f32[] | string` based on input encoding format | ||
} | ||
/** | ||
* The usage statistics for the request. | ||
*/ | ||
@json | ||
class Usage { | ||
/** | ||
* The number of prompt tokens used in the request. | ||
*/ | ||
@alias("prompt_tokens") | ||
promptTokens!: i32; | ||
/** | ||
* The total number of tokens used in the request. | ||
*/ | ||
@alias("total_tokens") | ||
totalTokens!: i32; | ||
} |
{ | ||
"name": "@hypermode/models-as", | ||
"version": "0.1.6", | ||
"version": "0.2.0", | ||
"description": "Hypermode Model Interface Library for AssemblyScript", | ||
@@ -17,13 +17,13 @@ "author": "Hypermode, Inc.", | ||
"dependencies": { | ||
"json-as": "^0.9.6" | ||
"json-as": "^0.9.8" | ||
}, | ||
"devDependencies": { | ||
"@types/node": "^20.14.5", | ||
"@typescript-eslint/eslint-plugin": "^7.13.1", | ||
"@typescript-eslint/parser": "^7.13.1", | ||
"assemblyscript": "^0.27.27", | ||
"@types/node": "^20.14.8", | ||
"@typescript-eslint/eslint-plugin": "^7.14.1", | ||
"@typescript-eslint/parser": "^7.14.1", | ||
"assemblyscript": "^0.27.28", | ||
"assemblyscript-prettier": "^3.0.1", | ||
"eslint": "^8.57.0", | ||
"prettier": "^3.3.2", | ||
"typescript": "^5.4.5", | ||
"typescript": "^5.5.2", | ||
"visitor-as": "^0.11.4" | ||
@@ -30,0 +30,0 @@ }, |
Major refactor
Supply chain riskPackage has recently undergone a major refactor. It may be unstable or indicate significant internal changes. Use caution when updating to versions that include significant changes.
Found 1 instance in 1 package
31417
9
1012
1
Updatedjson-as@^0.9.8