@hypermode/models-as
Advanced tools
Comparing version 0.2.3 to 0.2.4
@@ -95,3 +95,3 @@ import { Model } from "../.."; | ||
@json | ||
class AnthropicMessagesInput { | ||
export class AnthropicMessagesInput { | ||
/** | ||
@@ -262,3 +262,3 @@ * The model that will complete your prompt. | ||
@json | ||
class ToolChoice { | ||
export class ToolChoice { | ||
constructor(type: string, name: string | null = null) { | ||
@@ -301,3 +301,3 @@ this._type = type; | ||
@json | ||
class AnthropicMessagesOutput { | ||
export class AnthropicMessagesOutput { | ||
/** | ||
@@ -354,3 +354,3 @@ * Unique object identifier. | ||
@json | ||
class ContentBlock { | ||
export class ContentBlock { | ||
type!: string; | ||
@@ -377,3 +377,3 @@ | ||
@json | ||
class Usage { | ||
export class Usage { | ||
/** | ||
@@ -380,0 +380,0 @@ * The number of input tokens which were used. |
@@ -29,3 +29,3 @@ import { Model } from "../.."; | ||
@json | ||
class ClassificationInput { | ||
export class ClassificationInput { | ||
/** | ||
@@ -41,3 +41,3 @@ * A list of one or more text strings of text to classify. | ||
@json | ||
class ClassificationOutput { | ||
export class ClassificationOutput { | ||
/** | ||
@@ -74,3 +74,3 @@ * A list of prediction results that correspond to each input text string. | ||
@json | ||
class ClassifierLabel { | ||
export class ClassifierLabel { | ||
/** | ||
@@ -77,0 +77,0 @@ * The classification label. |
@@ -26,3 +26,3 @@ import { Model } from "../.."; | ||
@json | ||
class EmbeddingsInput { | ||
export class EmbeddingsInput { | ||
/** | ||
@@ -38,3 +38,3 @@ * A list of one or more text strings to create vector embeddings for. | ||
@json | ||
class EmbeddingsOutput { | ||
export class EmbeddingsOutput { | ||
/** | ||
@@ -41,0 +41,0 @@ * A list of vector embeddings that correspond to each input text string. |
@@ -61,3 +61,3 @@ import { Model } from "../.."; | ||
@json | ||
class Part { | ||
export class Part { | ||
text!: string; | ||
@@ -145,3 +145,3 @@ } | ||
@json | ||
class GeminiGenerateInput { | ||
export class GeminiGenerateInput { | ||
/** | ||
@@ -262,3 +262,3 @@ * The content of the current conversation with the model. | ||
@json | ||
class SafetySetting { | ||
export class SafetySetting { | ||
category!: HarmCategory; | ||
@@ -318,3 +318,3 @@ threshold!: HarmBlockThreshold; | ||
@json | ||
class GeminiGenerateOutput { | ||
export class GeminiGenerateOutput { | ||
/** | ||
@@ -341,3 +341,3 @@ * Candidate responses from the model. | ||
@json | ||
class Candidate { | ||
export class Candidate { | ||
/** | ||
@@ -433,3 +433,3 @@ * Index of the candidate in the list of candidates. | ||
@json | ||
class SafetyRating { | ||
export class SafetyRating { | ||
category!: HarmCategory; | ||
@@ -475,3 +475,3 @@ probability!: HarmProbability; | ||
@json | ||
class CitationMetadata { | ||
export class CitationMetadata { | ||
citationSources!: CitationSource[]; | ||
@@ -484,3 +484,3 @@ } | ||
@json | ||
class CitationSource { | ||
export class CitationSource { | ||
/** | ||
@@ -516,3 +516,3 @@ * Start of segment of the response that is attributed to this source. | ||
@json | ||
class PromptFeedback { | ||
export class PromptFeedback { | ||
blockReason!: BlockReason; | ||
@@ -548,3 +548,3 @@ safetyRatings!: SafetyRating[]; | ||
@json | ||
class UsageMetadata { | ||
export class UsageMetadata { | ||
/** | ||
@@ -551,0 +551,0 @@ * Number of tokens in the prompt. |
@@ -21,3 +21,3 @@ import { Model } from "../.."; | ||
@json | ||
class TextGenerationInput { | ||
export class TextGenerationInput { | ||
/** | ||
@@ -62,3 +62,3 @@ * The prompt text to pass to the model. | ||
@json | ||
class TextGenerationOutput { | ||
export class TextGenerationOutput { | ||
/** | ||
@@ -65,0 +65,0 @@ * The generated text. |
@@ -26,3 +26,3 @@ import { Model } from "../.."; | ||
@json | ||
class OpenAIChatInput { | ||
export class OpenAIChatInput { | ||
/** | ||
@@ -242,3 +242,3 @@ * The name of the model to use for the chat. | ||
@json | ||
class OpenAIChatOutput { | ||
export class OpenAIChatOutput { | ||
/** | ||
@@ -322,3 +322,3 @@ * A unique identifier for the chat completion. | ||
*/ | ||
static Json: ResponseFormat = { type: "json_object" }; | ||
static Json: ResponseFormat = { type: "json_object", jsonSchema: null }; | ||
@@ -345,3 +345,3 @@ /** | ||
*/ | ||
static Text: ResponseFormat = { type: "text" }; | ||
static Text: ResponseFormat = { type: "text", jsonSchema: null }; | ||
} | ||
@@ -466,3 +466,3 @@ | ||
@json | ||
class Usage { | ||
export class Usage { | ||
/** | ||
@@ -491,3 +491,3 @@ * The number of completion tokens used in the response. | ||
@json | ||
class Choice { | ||
export class Choice { | ||
/** | ||
@@ -525,3 +525,3 @@ * The reason the model stopped generating tokens. | ||
@json | ||
class Logprobs { | ||
export class Logprobs { | ||
/** | ||
@@ -537,3 +537,3 @@ * A list of message content tokens with log probability information. | ||
@json | ||
class LogprobsContent { | ||
export class LogprobsContent { | ||
/** | ||
@@ -571,3 +571,3 @@ * The token. | ||
@json | ||
class TopLogprobsContent { | ||
export class TopLogprobsContent { | ||
/** | ||
@@ -725,3 +725,3 @@ * The token. | ||
@json | ||
class CompletionMessage extends Message { | ||
export class CompletionMessage extends Message { | ||
/** | ||
@@ -728,0 +728,0 @@ * Creates a new completion message object. |
@@ -59,3 +59,3 @@ import { Model } from "../.."; | ||
@json | ||
class OpenAIEmbeddingsInput { | ||
export class OpenAIEmbeddingsInput { | ||
/** | ||
@@ -104,3 +104,3 @@ * The name of the model to use for the embeddings. | ||
@json | ||
class TypedEmbeddingsInput<T> extends OpenAIEmbeddingsInput { | ||
export class TypedEmbeddingsInput<T> extends OpenAIEmbeddingsInput { | ||
/** | ||
@@ -116,3 +116,3 @@ * The input content to vectorize. | ||
@json | ||
class OpenAIEmbeddingsOutput { | ||
export class OpenAIEmbeddingsOutput { | ||
/** | ||
@@ -166,3 +166,3 @@ * The name of the output object type returned by the API. | ||
@json | ||
class Embedding { | ||
export class Embedding { | ||
/** | ||
@@ -179,2 +179,6 @@ * The name of the output object type returned by the API. | ||
index!: i32; | ||
/** | ||
* The vector embedding of the input text. | ||
*/ | ||
embedding!: f32[]; // TODO: support `f32[] | string` based on input encoding format | ||
@@ -187,3 +191,3 @@ } | ||
@json | ||
class Usage { | ||
export class Usage { | ||
/** | ||
@@ -190,0 +194,0 @@ * The number of prompt tokens used in the request. |
{ | ||
"name": "@hypermode/models-as", | ||
"version": "0.2.3", | ||
"version": "0.2.4", | ||
"description": "Hypermode Model Interface Library for AssemblyScript", | ||
@@ -19,13 +19,13 @@ "author": "Hypermode, Inc.", | ||
"devDependencies": { | ||
"@types/node": "^20.14.14", | ||
"@eslint/js": "^9.8.0", | ||
"@types/node": "^20.16.5", | ||
"@eslint/js": "^9.10.0", | ||
"@types/eslint__js": "^8.42.3", | ||
"@typescript-eslint/parser": "^8.0.1", | ||
"as-test": "^0.3.3", | ||
"@typescript-eslint/parser": "^8.6.0", | ||
"as-test": "^0.3.4", | ||
"assemblyscript": "^0.27.29", | ||
"assemblyscript-prettier": "^3.0.1", | ||
"eslint": "^9.8.0", | ||
"eslint": "^9.10.0", | ||
"prettier": "^3.3.3", | ||
"typescript": "^5.5.4", | ||
"typescript-eslint": "^8.0.1", | ||
"typescript": "^5.6.2", | ||
"typescript-eslint": "^8.6.0", | ||
"visitor-as": "^0.11.4" | ||
@@ -32,0 +32,0 @@ }, |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
55285
1908