Socket
Socket
Sign inDemoInstall

@axflow/models

Package Overview
Dependencies
3
Maintainers
1
Versions
38
Alerts
File Explorer

Advanced tools

Install Socket

Detect and block malicious and high-risk dependencies

Install

Comparing version 0.0.1-y.0 to 0.0.1

dist/react/index.d.mts

68

dist/anthropic/completion.d.ts

@@ -47,5 +47,64 @@ declare namespace AnthropicCompletionTypes {

}
/**
* Run a completion against the Anthropic API.
*
* @see https://docs.anthropic.com/claude/reference/complete_post
*
* @param request The request body sent to Anthropic. See Anthropic's documentation for /v1/complete for supported parameters.
* @param options
* @param options.apiKey Anthropic API key.
* @param options.apiUrl The url of the Anthropic (or compatible) API. Defaults to https://api.anthropic.com/v1/complete.
* @param options.version The Anthropic API version. Defaults to 2023-06-01. Note that older versions are not currently supported.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @returns Anthropic completion. See Anthropic's documentation for /v1/complete.
*/
declare function run(request: AnthropicCompletionTypes.Request, options: AnthropicCompletionTypes.RequestOptions): Promise<AnthropicCompletionTypes.Response>;
/**
* Run a streaming completion against the Anthropic API. The resulting stream is the raw unmodified bytes from the API.
*
* @see https://docs.anthropic.com/claude/reference/complete_post
* @see https://docs.anthropic.com/claude/reference/streaming
*
* @param request The request body sent to Anthropic. See Anthropic's documentation for /v1/complete for supported parameters.
* @param options
* @param options.apiKey Anthropic API key.
* @param options.apiUrl The url of the Anthropic (or compatible) API. Defaults to https://api.anthropic.com/v1/complete.
* @param options.version The Anthropic API version. Defaults to 2023-06-01. Note that older versions are not currently supported.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @returns A stream of bytes directly from the API.
*/
declare function streamBytes(request: AnthropicCompletionTypes.Request, options: AnthropicCompletionTypes.RequestOptions): Promise<ReadableStream<Uint8Array>>;
/**
* Run a streaming completion against the Anthropic API. The resulting stream is the parsed stream data as JavaScript objects.
*
* @see https://docs.anthropic.com/claude/reference/complete_post
* @see https://docs.anthropic.com/claude/reference/streaming
*
* @param request The request body sent to Anthropic. See Anthropic's documentation for /v1/complete for supported parameters.
* @param options
* @param options.apiKey Anthropic API key.
* @param options.apiUrl The url of the Anthropic (or compatible) API. Defaults to https://api.anthropic.com/v1/complete.
* @param options.version The Anthropic API version. Defaults to 2023-06-01. Note that older versions are not currently supported.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @returns A stream of objects representing each chunk from the API.
*/
declare function stream(request: AnthropicCompletionTypes.Request, options: AnthropicCompletionTypes.RequestOptions): Promise<ReadableStream<AnthropicCompletionTypes.Chunk>>;
/**
* Run a streaming completion against the Anthropic API. The resulting stream emits only the string tokens.
*
* @see https://docs.anthropic.com/claude/reference/complete_post
* @see https://docs.anthropic.com/claude/reference/streaming
*
* @param request The request body sent to Anthropic. See Anthropic's documentation for /v1/complete for supported parameters.
* @param options
* @param options.apiKey Anthropic API key.
* @param options.apiUrl The url of the Anthropic (or compatible) API. Defaults to https://api.anthropic.com/v1/complete.
* @param options.version The Anthropic API version. Defaults to 2023-06-01. Note that older versions are not currently supported.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @returns A stream of tokens from the API.
*/
declare function streamTokens(request: AnthropicCompletionTypes.Request, options: AnthropicCompletionTypes.RequestOptions): Promise<ReadableStream<string>>;
/**
* An object that encapsulates methods for calling the Anthropic Completion API.
*/
declare class AnthropicCompletion {

@@ -55,10 +114,5 @@ static run: typeof run;

static streamBytes: typeof streamBytes;
static streamTokens: typeof streamTokens;
}
declare class AnthropicCompletionDecoderStream extends TransformStream<Uint8Array, AnthropicCompletionTypes.Chunk> {
private static EVENT_LINES_RE;
private static parse;
private static transformer;
constructor();
}
export { AnthropicCompletion, AnthropicCompletionDecoderStream, AnthropicCompletionTypes };
export { AnthropicCompletion, AnthropicCompletionTypes };

40

dist/anthropic/completion.js

@@ -23,7 +23,6 @@ "use strict";

__export(completion_exports, {
AnthropicCompletion: () => AnthropicCompletion,
AnthropicCompletionDecoderStream: () => AnthropicCompletionDecoderStream
AnthropicCompletion: () => AnthropicCompletion
});
module.exports = __toCommonJS(completion_exports);
var import_utils = require("@axflow/models/utils");
var import_shared = require("@axflow/models/shared");
var ANTHROPIC_API_URL = "https://api.anthropic.com/v1/complete";

@@ -43,3 +42,3 @@ function headers(apiKey, version) {

const url = options.apiUrl || ANTHROPIC_API_URL;
const response = await (0, import_utils.POST)(url, {
const response = await (0, import_shared.POST)(url, {
headers: headers(options.apiKey, options.version),

@@ -53,3 +52,3 @@ body: JSON.stringify({ ...request, stream: false }),

const url = options.apiUrl || ANTHROPIC_API_URL;
const response = await (0, import_utils.POST)(url, {
const response = await (0, import_shared.POST)(url, {
headers: headers(options.apiKey, options.version),

@@ -60,10 +59,20 @@ body: JSON.stringify({ ...request, stream: true }),

if (!response.body) {
throw new import_utils.HttpError("Expected response body to be a ReadableStream", response);
throw new import_shared.HttpError("Expected response body to be a ReadableStream", response);
}
return response.body;
}
function noop(chunk) {
return chunk;
}
async function stream(request, options) {
const byteStream = await streamBytes(request, options);
return byteStream.pipeThrough(new AnthropicCompletionDecoderStream());
return byteStream.pipeThrough(new AnthropicCompletionDecoderStream(noop));
}
function chunkToToken(chunk) {
return chunk.event === "completion" ? chunk.data.completion : "";
}
async function streamTokens(request, options) {
const byteStream = await streamBytes(request, options);
return byteStream.pipeThrough(new AnthropicCompletionDecoderStream(chunkToToken));
}
var AnthropicCompletion = class {

@@ -73,2 +82,3 @@ static run = run;

static streamBytes = streamBytes;
static streamTokens = streamTokens;
};

@@ -94,3 +104,3 @@ var AnthropicCompletionDecoderStream = class _AnthropicCompletionDecoderStream extends TransformStream {

}
static transformer() {
static transformer(map) {
let buffer = [];

@@ -108,4 +118,7 @@ const decoder = new TextDecoder();

const event = _AnthropicCompletionDecoderStream.parse(buffer.join(""));
if (event) {
controller.enqueue(event);
if (event && event.event === "error") {
const error = event.data.error;
controller.error(`${error.type}: ${error.message}`);
} else if (event) {
controller.enqueue(map(event));
}

@@ -116,4 +129,4 @@ buffer = [];

}
constructor() {
super({ transform: _AnthropicCompletionDecoderStream.transformer() });
constructor(map) {
super({ transform: _AnthropicCompletionDecoderStream.transformer(map) });
}

@@ -123,4 +136,3 @@ };

0 && (module.exports = {
AnthropicCompletion,
AnthropicCompletionDecoderStream
AnthropicCompletion
});

@@ -28,3 +28,18 @@ type SharedRequestOptions = {

}
/**
* Calculate text embeddings using the Cohere API.
*
* @see https://docs.cohere.com/reference/embed
*
* @param request The request body sent to Cohere. See Cohere's documentation for /v1/embed for supported parameters.
* @param options
* @param options.apiKey Cohere API key.
* @param options.apiUrl The url of the Cohere (or compatible) API. Defaults to https://api.cohere.ai/v1/embed.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @returns An object consisting of the text embeddings and other metadata. See Cohere's documentation for /v1/embed.
*/
declare function run(request: CohereEmbeddingTypes.Request, options: CohereEmbeddingTypes.RequestOptions): Promise<CohereEmbeddingTypes.Response>;
/**
* An object that encapsulates methods for calling the Cohere Embed API.
*/
declare class CohereEmbedding {

@@ -31,0 +46,0 @@ static run: typeof run;

@@ -26,3 +26,3 @@ "use strict";

module.exports = __toCommonJS(embedding_exports);
var import_utils = require("@axflow/models/utils");
var import_shared = require("@axflow/models/shared");

@@ -45,3 +45,3 @@ // src/cohere/shared.ts

const url = options.apiUrl || COHERE_API_URL;
const response = await (0, import_utils.POST)(url, {
const response = await (0, import_shared.POST)(url, {
headers: headers(options.apiKey),

@@ -48,0 +48,0 @@ body: JSON.stringify(request),

@@ -60,5 +60,57 @@ type SharedRequestOptions = {

}
/**
* Run a generation against the Cohere API.
*
* @see https://docs.cohere.com/reference/generate
*
* @param request The request body sent to Cohere. See Cohere's documentation for /v1/generate for supported parameters.
* @param options
* @param options.apiKey Cohere API key.
* @param options.apiUrl The url of the Cohere (or compatible) API. Defaults to https://api.cohere.ai/v1/generate.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @returns Cohere completion. See Cohere's documentation for /v1/generate.
*/
declare function run(request: CohereGenerationTypes.Request, options: CohereGenerationTypes.RequestOptions): Promise<CohereGenerationTypes.Response>;
/**
* Run a streaming generation against the Cohere API. The resulting stream is the raw unmodified bytes from the API.
*
* @see https://docs.cohere.com/reference/generate
*
* @param request The request body sent to Cohere. See Cohere's documentation for /v1/generate for supported parameters.
* @param options
* @param options.apiKey Cohere API key.
* @param options.apiUrl The url of the Cohere (or compatible) API. Defaults to https://api.cohere.ai/v1/generate.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @returns A stream of bytes directly from the API.
*/
declare function streamBytes(request: CohereGenerationTypes.Request, options: CohereGenerationTypes.RequestOptions): Promise<ReadableStream<Uint8Array>>;
/**
* Run a streaming generation against the Cohere API. The resulting stream is the parsed stream data as JavaScript objects.
*
* @see https://docs.cohere.com/reference/generate
*
* @param request The request body sent to Cohere. See Cohere's documentation for /v1/generate for supported parameters.
* @param options
* @param options.apiKey Cohere API key.
* @param options.apiUrl The url of the Cohere (or compatible) API. Defaults to https://api.cohere.ai/v1/generate.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @returns A stream of objects representing each chunk from the API.
*/
declare function stream(request: CohereGenerationTypes.Request, options: CohereGenerationTypes.RequestOptions): Promise<ReadableStream<CohereGenerationTypes.Chunk>>;
/**
* Run a streaming generation against the Cohere API. The resulting stream emits only the string tokens.
*
* @see https://docs.cohere.com/reference/generate
*
* @param request The request body sent to Cohere. See Cohere's documentation for /v1/generate for supported parameters.
* @param options
* @param options.apiKey Cohere API key.
* @param options.apiUrl The url of the Cohere (or compatible) API. Defaults to https://api.cohere.ai/v1/generate.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @returns A stream of tokens from the API.
*/
declare function streamTokens(request: CohereGenerationTypes.Request, options: CohereGenerationTypes.RequestOptions): Promise<ReadableStream<string>>;
/**
* An object that encapsulates methods for calling the Cohere Generate API.
*/
declare class CohereGeneration {

@@ -68,9 +120,5 @@ static run: typeof run;

static streamBytes: typeof streamBytes;
static streamTokens: typeof streamTokens;
}
declare class CohereGenerationDecoderStream extends TransformStream<Uint8Array, CohereGenerationTypes.Chunk> {
private static parse;
private static transformer;
constructor();
}
export { CohereGeneration, CohereGenerationDecoderStream, CohereGenerationTypes };
export { CohereGeneration, CohereGenerationTypes };

@@ -23,7 +23,6 @@ "use strict";

__export(generation_exports, {
CohereGeneration: () => CohereGeneration,
CohereGenerationDecoderStream: () => CohereGenerationDecoderStream
CohereGeneration: () => CohereGeneration
});
module.exports = __toCommonJS(generation_exports);
var import_utils = require("@axflow/models/utils");
var import_shared = require("@axflow/models/shared");

@@ -46,3 +45,3 @@ // src/cohere/shared.ts

const url = options.apiUrl || COHERE_API_URL;
const response = await (0, import_utils.POST)(url, {
const response = await (0, import_shared.POST)(url, {
headers: headers(options.apiKey),

@@ -56,3 +55,3 @@ body: JSON.stringify({ ...request, stream: false }),

const url = options.apiUrl || COHERE_API_URL;
const response = await (0, import_utils.POST)(url, {
const response = await (0, import_shared.POST)(url, {
headers: headers(options.apiKey),

@@ -63,10 +62,20 @@ body: JSON.stringify({ ...request, stream: true }),

if (!response.body) {
throw new import_utils.HttpError("Expected response body to be a ReadableStream", response);
throw new import_shared.HttpError("Expected response body to be a ReadableStream", response);
}
return response.body;
}
function noop(chunk) {
return chunk;
}
async function stream(request, options) {
const byteStream = await streamBytes(request, options);
return byteStream.pipeThrough(new CohereGenerationDecoderStream());
return byteStream.pipeThrough(new CohereGenerationDecoderStream(noop));
}
function chunkToToken(chunk) {
return chunk.text || "";
}
async function streamTokens(request, options) {
const byteStream = await streamBytes(request, options);
return byteStream.pipeThrough(new CohereGenerationDecoderStream(chunkToToken));
}
var CohereGeneration = class {

@@ -76,2 +85,3 @@ static run = run;

static streamBytes = streamBytes;
static streamTokens = streamTokens;
};

@@ -92,3 +102,3 @@ var CohereGenerationDecoderStream = class _CohereGenerationDecoderStream extends TransformStream {

}
static transformer() {
static transformer(map) {
let buffer = [];

@@ -106,3 +116,3 @@ const decoder = new TextDecoder();

if (event) {
controller.enqueue(event);
controller.enqueue(map(event));
}

@@ -113,4 +123,4 @@ buffer = [];

}
constructor() {
super({ transform: _CohereGenerationDecoderStream.transformer() });
constructor(map) {
super({ transform: _CohereGenerationDecoderStream.transformer(map) });
}

@@ -120,4 +130,3 @@ };

0 && (module.exports = {
CohereGeneration,
CohereGenerationDecoderStream
CohereGeneration
});

@@ -75,5 +75,57 @@ type SharedRequestOptions = {

}
/**
* Run a chat completion against the OpenAI API.
*
* @see https://platform.openai.com/docs/api-reference/chat
*
* @param request The request body sent to OpenAI. See OpenAI's documentation for /v1/chat/completions for supported parameters.
* @param options
* @param options.apiKey OpenAI API key.
* @param options.apiUrl The url of the OpenAI (or compatible) API. Defaults to https://api.openai.com/v1/chat/completions.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @returns OpenAI chat completion. See OpenAI's documentation for /v1/chat/completions.
*/
declare function run(request: OpenAIChatTypes.Request, options: OpenAIChatTypes.RequestOptions): Promise<OpenAIChatTypes.Response>;
/**
* Run a streaming chat completion against the OpenAI API. The resulting stream is the raw unmodified bytes from the API.
*
* @see https://platform.openai.com/docs/api-reference/chat
*
* @param request The request body sent to OpenAI. See OpenAI's documentation for /v1/chat/completions for supported parameters.
* @param options
* @param options.apiKey OpenAI API key.
* @param options.apiUrl The url of the OpenAI (or compatible) API. Defaults to https://api.openai.com/v1/chat/completions.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @returns A stream of bytes directly from the API.
*/
declare function streamBytes(request: OpenAIChatTypes.Request, options: OpenAIChatTypes.RequestOptions): Promise<ReadableStream<Uint8Array>>;
/**
* Run a streaming chat completion against the OpenAI API. The resulting stream is the parsed stream data as JavaScript objects.
*
* @see https://platform.openai.com/docs/api-reference/chat
*
* @param request The request body sent to OpenAI. See OpenAI's documentation for /v1/chat/completions for supported parameters.
* @param options
* @param options.apiKey OpenAI API key.
* @param options.apiUrl The url of the OpenAI (or compatible) API. Defaults to https://api.openai.com/v1/chat/completions.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @returns A stream of objects representing each chunk from the API.
*/
declare function stream(request: OpenAIChatTypes.Request, options: OpenAIChatTypes.RequestOptions): Promise<ReadableStream<OpenAIChatTypes.Chunk>>;
/**
* Run a streaming chat completion against the OpenAI API. The resulting stream emits only the string tokens.
*
* @see https://platform.openai.com/docs/api-reference/chat
*
* @param request The request body sent to OpenAI. See OpenAI's documentation for /v1/chat/completions for supported parameters.
* @param options
* @param options.apiKey OpenAI API key.
* @param options.apiUrl The url of the OpenAI (or compatible) API. Defaults to https://api.openai.com/v1/chat/completions.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @returns A stream of tokens from the API.
*/
declare function streamTokens(request: OpenAIChatTypes.Request, options: OpenAIChatTypes.RequestOptions): Promise<ReadableStream<string>>;
/**
* An object that encapsulates methods for calling the OpenAI Chat Completion API.
*/
declare class OpenAIChat {

@@ -83,7 +135,5 @@ static run: typeof run;

static streamBytes: typeof streamBytes;
static streamTokens: typeof streamTokens;
}
declare class OpenAIChatDecoderStream extends TransformStream<Uint8Array, OpenAIChatTypes.Chunk> {
constructor();
}
export { OpenAIChat, OpenAIChatDecoderStream, OpenAIChatTypes };
export { OpenAIChat, OpenAIChatTypes };

@@ -23,7 +23,6 @@ "use strict";

__export(chat_exports, {
OpenAIChat: () => OpenAIChat,
OpenAIChatDecoderStream: () => OpenAIChatDecoderStream
OpenAIChat: () => OpenAIChat
});
module.exports = __toCommonJS(chat_exports);
var import_utils = require("@axflow/models/utils");
var import_shared = require("@axflow/models/shared");

@@ -41,3 +40,3 @@ // src/openai/shared.ts

}
function streamTransformer() {
function streamTransformer(map) {
let buffer = [];

@@ -55,3 +54,3 @@ const decoder = new TextDecoder();

if (parsedChunk) {
controller.enqueue(parsedChunk);
controller.enqueue(map(parsedChunk));
}

@@ -85,3 +84,3 @@ buffer = [];

const url = options.apiUrl || OPENAI_CHAT_COMPLETIONS_API_URL;
const response = await (0, import_utils.POST)(url, {
const response = await (0, import_shared.POST)(url, {
headers: headers(options.apiKey),

@@ -95,3 +94,3 @@ body: JSON.stringify({ ...request, stream: false }),

const url = options.apiUrl || OPENAI_CHAT_COMPLETIONS_API_URL;
const response = await (0, import_utils.POST)(url, {
const response = await (0, import_shared.POST)(url, {
headers: headers(options.apiKey),

@@ -102,10 +101,20 @@ body: JSON.stringify({ ...request, stream: true }),

if (!response.body) {
throw new import_utils.HttpError("Expected response body to be a ReadableStream", response);
throw new import_shared.HttpError("Expected response body to be a ReadableStream", response);
}
return response.body;
}
function noop(chunk) {
return chunk;
}
async function stream(request, options) {
const byteStream = await streamBytes(request, options);
return byteStream.pipeThrough(new OpenAIChatDecoderStream());
return byteStream.pipeThrough(new OpenAIChatDecoderStream(noop));
}
function chunkToToken(chunk) {
return chunk.choices[0].delta.content || "";
}
async function streamTokens(request, options) {
const byteStream = await streamBytes(request, options);
return byteStream.pipeThrough(new OpenAIChatDecoderStream(chunkToToken));
}
var OpenAIChat = class {

@@ -115,6 +124,7 @@ static run = run;

static streamBytes = streamBytes;
static streamTokens = streamTokens;
};
var OpenAIChatDecoderStream = class extends TransformStream {
constructor() {
super({ transform: streamTransformer() });
constructor(map) {
super({ transform: streamTransformer(map) });
}

@@ -124,4 +134,3 @@ };

0 && (module.exports = {
OpenAIChat,
OpenAIChatDecoderStream
OpenAIChat
});

@@ -57,5 +57,57 @@ type SharedRequestOptions = {

}
/**
* Run a completion against the OpenAI API.
*
* @see https://platform.openai.com/docs/api-reference/completions
*
* @param request The request body sent to OpenAI. See OpenAI's documentation for /v1/completions for supported parameters.
* @param options
* @param options.apiKey OpenAI API key.
* @param options.apiUrl The url of the OpenAI (or compatible) API. Defaults to https://api.openai.com/v1/completions.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @returns OpenAI completion. See OpenAI's documentation for /v1/completions.
*/
declare function run(request: OpenAICompletionTypes.Request, options: OpenAICompletionTypes.RequestOptions): Promise<OpenAICompletionTypes.Response>;
/**
* Run a streaming completion against the OpenAI API. The resulting stream is the raw unmodified bytes from the API.
*
* @see https://platform.openai.com/docs/api-reference/completions
*
* @param request The request body sent to OpenAI. See OpenAI's documentation for /v1/completions for supported parameters.
* @param options
* @param options.apiKey OpenAI API key.
* @param options.apiUrl The url of the OpenAI (or compatible) API. Defaults to https://api.openai.com/v1/completions.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @returns A stream of bytes directly from the API.
*/
declare function streamBytes(request: OpenAICompletionTypes.Request, options: OpenAICompletionTypes.RequestOptions): Promise<ReadableStream<Uint8Array>>;
/**
* Run a streaming completion against the OpenAI API. The resulting stream is the parsed stream data as JavaScript objects.
*
* @see https://platform.openai.com/docs/api-reference/completions
*
* @param request The request body sent to OpenAI. See OpenAI's documentation for /v1/completions for supported parameters.
* @param options
* @param options.apiKey OpenAI API key.
* @param options.apiUrl The url of the OpenAI (or compatible) API. Defaults to https://api.openai.com/v1/completions.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @returns A stream of objects representing each chunk from the API.
*/
declare function stream(request: OpenAICompletionTypes.Request, options: OpenAICompletionTypes.RequestOptions): Promise<ReadableStream<OpenAICompletionTypes.Chunk>>;
/**
* Run a streaming completion against the OpenAI API. The resulting stream emits only the string tokens.
*
* @see https://platform.openai.com/docs/api-reference/completions
*
* @param request The request body sent to OpenAI. See OpenAI's documentation for /v1/completions for supported parameters.
* @param options
* @param options.apiKey OpenAI API key.
* @param options.apiUrl The url of the OpenAI (or compatible) API. Defaults to https://api.openai.com/v1/completions.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @returns A stream of tokens from the API.
*/
declare function streamTokens(request: OpenAICompletionTypes.Request, options: OpenAICompletionTypes.RequestOptions): Promise<ReadableStream<string>>;
/**
* An object that encapsulates methods for calling the OpenAI Completion API.
*/
declare class OpenAICompletion {

@@ -65,7 +117,5 @@ static run: typeof run;

static streamBytes: typeof streamBytes;
static streamTokens: typeof streamTokens;
}
declare class OpenAICompletionDecoderStream extends TransformStream<Uint8Array, OpenAICompletionTypes.Chunk> {
constructor();
}
export { OpenAICompletion, OpenAICompletionDecoderStream, OpenAICompletionTypes };
export { OpenAICompletion, OpenAICompletionTypes };

@@ -23,7 +23,6 @@ "use strict";

__export(completion_exports, {
OpenAICompletion: () => OpenAICompletion,
OpenAICompletionDecoderStream: () => OpenAICompletionDecoderStream
OpenAICompletion: () => OpenAICompletion
});
module.exports = __toCommonJS(completion_exports);
var import_utils = require("@axflow/models/utils");
var import_shared = require("@axflow/models/shared");

@@ -41,3 +40,3 @@ // src/openai/shared.ts

}
function streamTransformer() {
function streamTransformer(map) {
let buffer = [];

@@ -55,3 +54,3 @@ const decoder = new TextDecoder();

if (parsedChunk) {
controller.enqueue(parsedChunk);
controller.enqueue(map(parsedChunk));
}

@@ -85,3 +84,3 @@ buffer = [];

const url = options.apiUrl || OPENAI_COMPLETIONS_API_URL;
const response = await (0, import_utils.POST)(url, {
const response = await (0, import_shared.POST)(url, {
headers: headers(options.apiKey),

@@ -95,3 +94,3 @@ body: JSON.stringify({ ...request, stream: false }),

const url = options.apiUrl || OPENAI_COMPLETIONS_API_URL;
const response = await (0, import_utils.POST)(url, {
const response = await (0, import_shared.POST)(url, {
headers: headers(options.apiKey),

@@ -102,10 +101,20 @@ body: JSON.stringify({ ...request, stream: true }),

if (!response.body) {
throw new import_utils.HttpError("Expected response body to be a ReadableStream", response);
throw new import_shared.HttpError("Expected response body to be a ReadableStream", response);
}
return response.body;
}
function noop(chunk) {
return chunk;
}
async function stream(request, options) {
const byteStream = await streamBytes(request, options);
return byteStream.pipeThrough(new OpenAICompletionDecoderStream());
return byteStream.pipeThrough(new OpenAICompletionDecoderStream(noop));
}
function chunkToToken(chunk) {
return chunk.choices[0].text || "";
}
async function streamTokens(request, options) {
const byteStream = await streamBytes(request, options);
return byteStream.pipeThrough(new OpenAICompletionDecoderStream(chunkToToken));
}
var OpenAICompletion = class {

@@ -115,6 +124,7 @@ static run = run;

static streamBytes = streamBytes;
static streamTokens = streamTokens;
};
var OpenAICompletionDecoderStream = class extends TransformStream {
constructor() {
super({ transform: streamTransformer() });
constructor(map) {
super({ transform: streamTransformer(map) });
}

@@ -124,4 +134,3 @@ };

0 && (module.exports = {
OpenAICompletion,
OpenAICompletionDecoderStream
OpenAICompletion
});

@@ -29,2 +29,14 @@ type SharedRequestOptions = {

}
/**
* Calculate text embeddings using the OpenAI API.
*
* @see https://platform.openai.com/docs/api-reference/embeddings
*
* @param request The request body sent to OpenAI. See OpenAI's documentation for /v1/embeddings for supported parameters.
* @param options
* @param options.apiKey OpenAI API key.
* @param options.apiUrl The url of the OpenAI (or compatible) API. Defaults to https://api.openai.com/v1/embeddings.
* @param options.fetch A custom implementation of fetch. Defaults to globalThis.fetch.
* @returns An object consisting of the text embeddings and other metadata. See OpenAI's documentation for /v1/embeddings.
*/
declare function run(request: OpenAIEmbeddingTypes.Request, options: OpenAIEmbeddingTypes.RequestOptions): Promise<OpenAIEmbeddingTypes.Response>;

@@ -31,0 +43,0 @@ declare class OpenAIEmbedding {

@@ -26,3 +26,3 @@ "use strict";

module.exports = __toCommonJS(embedding_exports);
var import_utils = require("@axflow/models/utils");
var import_shared = require("@axflow/models/shared");

@@ -45,3 +45,3 @@ // src/openai/shared.ts

const url = options.apiUrl || OPENAI_COMPLETIONS_API_URL;
const response = await (0, import_utils.POST)(url, {
const response = await (0, import_shared.POST)(url, {
headers: headers(options.apiKey),

@@ -48,0 +48,0 @@ body: JSON.stringify(request),

{
"name": "@axflow/models",
"version": "0.0.1-y.0",
"description": "Zero-dependency module to run, stream, and render results across the most popular LLMs and embedding models",
"version": "0.0.1",
"description": "Zero-dependency, modular SDK for building robust natural language applications",
"author": "Axilla (https://axilla.io)",

@@ -12,7 +12,10 @@ "homepage": "https://github.com/axilla-io/ax/tree/main/packages/models#readme",

"llm",
"stream",
"streaming",
"streams",
"openai",
"anthropic",
"cohere",
"vertexai",
"palm",
"llama2",
"embeddings",

@@ -22,2 +25,3 @@ "generative ai",

"models",
"useChat",
"nextjs",

@@ -49,2 +53,4 @@ "react"

"@types/jest": "^29.5.3",
"@types/react": "^18.2.21",
"@types/react-dom": "^18.2.7",
"jest": "^29.6.2",

@@ -56,2 +62,10 @@ "prettier": "^3.0.2",

},
"peerDependencies": {
"react": "^18.2.0"
},
"peerDependenciesMeta": {
"react": {
"optional": true
}
},
"typesVersions": {

@@ -77,4 +91,7 @@ "*": {

],
"utils": [
"./dist/utils/index.d.ts"
"react": [
"./dist/react/index.d.ts"
],
"shared": [
"./dist/shared/index.d.ts"
]

@@ -121,10 +138,16 @@ }

},
"./utils": {
"types": "./dist/utils/index.d.ts",
"import": "./dist/utils/index.mjs",
"module": "./dist/utils/index.mjs",
"require": "./dist/utils/index.js"
"./react": {
"types": "./dist/react/index.d.ts",
"import": "./dist/react/index.mjs",
"module": "./dist/react/index.mjs",
"require": "./dist/react/index.js"
},
"./shared": {
"types": "./dist/shared/index.d.ts",
"import": "./dist/shared/index.mjs",
"module": "./dist/shared/index.mjs",
"require": "./dist/shared/index.js"
}
},
"gitHead": "9de9a6dd04f2925515996e0020844c6cb5c8ec11"
"gitHead": "00b92806e94b8baf3206897a6f9ec5eb3e653d32"
}
# @axflow/models
Zero-dependency module to run, stream, and render results across the most popular LLMs and embedding models.
Zero-dependency, modular SDK for building robust natural language applications.

@@ -11,3 +11,4 @@ ```

* Zero-dependency, lightweight package to consume all the most popular LLMs, embedding models, and more
* Zero-dependency, modular package to consume all the most popular LLMs, embedding models, and more
* Comes with a set of React hooks for easily creating robust completion and chat components
* Built exclusively on modern web standards such as `fetch` and the stream APIs

@@ -28,2 +29,6 @@ * First-class streaming support with both low-level byte streams or higher-level JavaScript objects

## Guides
See the guides at [docs.axilla.io](https://docs.axilla.io/guides.html).
## Basic Usage

@@ -34,3 +39,3 @@

import {CohereGenerate} from '@axflow/models/cohere/generate';
import {StreamToIterable} from '@axflow/models/utils';
import {StreamToIterable} from '@axflow/models/shared';

@@ -68,5 +73,77 @@ const gpt4Stream = OpenAIChat.stream(

For models that support streaming, there is a convenience method for streaming only the string tokens.
```ts
import {OpenAIChat} from '@axflow/models/openai/chat';
const tokenStream = OpenAIChat.streamTokens(
{
model: 'gpt-4',
messages: [{ role: 'user', content: 'What is the Eiffel tower?' }],
},
{
apiKey: '<openai api key>',
},
);
// Example stdout output:
//
// The Eiffel Tower is a renowned wrought-iron landmark located in Paris, France, known globally as a symbol of romance and elegance.
//
for await (const token of tokenStream) {
process.stdout.write(token);
}
process.stdout.write("\n");
```
## `useChat` hook for dead simple UI integration
We've made building chat and completion UIs trivial. It doesn't get any easier than this 🚀
```ts
///////////////////
// On the server //
///////////////////
import { OpenAIChat } from '@axflow/models/openai/chat';
import { StreamingJsonResponse, type MessageType } from '@axflow/models/shared';
export const runtime = 'edge';
export async function POST(request: Request) {
const { messages } = await request.json();
const stream = await OpenAIChat.streamTokens(
{
model: 'gpt-4',
messages: messages.map((msg: MessageType) => ({ role: msg.role, content: msg.content })),
},
{
apiKey: process.env.OPENAI_API_KEY!,
},
);
return new StreamingJsonResponse(stream);
}
///////////////////
// On the client //
///////////////////
import { useChat } from '@axflow/models/react';
function ChatComponent() {
const {input, messages, onChange, onSubmit} = useChat();
return (
<>
<Messages messages={messages} />
<Form input={input} onChange={onChange} onSubmit={onSubmit} />
</>
);
}
```
## Next.js edge proxy example
The server intercepts the request on the edge, adds the proper API key, and forwards the byte stream back to the client.
Sometimes you just want to create a proxy to the underlying LLM API. In this example, the server intercepts the request on the edge, adds the proper API key, and forwards the byte stream back to the client.

@@ -100,3 +177,3 @@ *Note this pattern works exactly the same with our other models that support streaming, like Cohere and Anthropic.*

import { OpenAIChat } from '@axflow/models/openai/chat';
import { StreamToIterable } from '@axflow/models/utils';
import { StreamToIterable } from '@axflow/models/shared';

@@ -123,3 +200,3 @@ const stream = await OpenAIChat.stream(

```ts
import {OpenAIChat, OpenAIChatDecoderStream} from '@axflow/models/openai/chat';
import {OpenAIChat} from '@axflow/models/openai/chat';
import type {OpenAIChatTypes} from '@axflow/models/openai/chat';

@@ -130,2 +207,3 @@

OpenAIChat.streamBytes(/* args */)
OpenAIChat.streamTokens(/* args */)
```

@@ -136,3 +214,3 @@

```ts
import {OpenAICompletion, OpenAICompletionDecoderStream} from '@axflow/models/openai/completion';
import {OpenAICompletion} from '@axflow/models/openai/completion';
import type {OpenAICompletionTypes} from '@axflow/models/openai/completion';

@@ -143,2 +221,3 @@

OpenAICompletion.streamBytes(/* args */)
OpenAICompletion.streamTokens(/* args */)
```

@@ -158,3 +237,3 @@

```ts
import {CohereGeneration, CohereGenerationDecoderStream} from '@axflow/models/cohere/generation';
import {CohereGeneration} from '@axflow/models/cohere/generation';
import type {CohereGenerationTypes} from '@axflow/models/cohere/generation';

@@ -165,2 +244,3 @@

CohereGeneration.streamBytes(/* args */)
CohereGeneration.streamTokens(/* args */)
```

@@ -180,3 +260,3 @@

```ts
import {AnthropicCompletion, AnthropicCompletionDecoderStream} from '@axflow/models/anthropic/completion';
import {AnthropicCompletion} from '@axflow/models/anthropic/completion';
import type {AnthropicCompletionTypes} from '@axflow/models/anthropic/completion';

@@ -187,8 +267,19 @@

AnthropicCompletion.streamBytes(/* args */)
AnthropicCompletion.streamTokens(/* args */)
```
### @axflow/models/utils
### @axflow/models/react
```ts
import {StreamToIterable, NdJsonStream, HttpError, isHttpError} from '@axflow/models/anthropic/completion';
import {useChat} from '@axflow/models/react';
import type {UseChatOptionsType, UseChatResultType} from '@axflow/models/shared';
```
`useChat` is a react hook that makes building chat componets a breeze.
### @axflow/models/shared
```ts
import {StreamToIterable, NdJsonStream, StreamingJsonResponse, HttpError, isHttpError} from '@axflow/models/shared';
import type {NdJsonValueType, JSONValueType, MessageType} from '@axflow/models/shared';
```

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap

Packages

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc