New Research: Supply Chain Attack on Axios Pulls Malicious Dependency from npm.Details →
Socket
Book a DemoSign in
Socket

llm-api

Package Overview
Dependencies
Maintainers
1
Versions
40
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

llm-api - npm Package Compare versions

Comparing version
1.2.0
to
1.3.2
+1
-1
dist/playground.js

@@ -30,3 +30,3 @@ "use strict";

apiKey: process.env.ANTHROPIC_KEY ?? 'YOUR_client_KEY',
}, { stream: true, temperature: 0, model: 'claude-2' });
}, { stream: true, temperature: 0, model: 'claude-3-sonnet-20240229' });
}

@@ -33,0 +33,0 @@ else if (process.env.AWS_BEDROCK_ACCESS_KEY &&

export declare const CompletionDefaultRetries = 3;
export declare const CompletionDefaultTimeout = 300000;
export declare const MinimumResponseTokens = 200;
export declare const MaximumResponseTokens = 8000;
export declare const MaximumResponseTokens = 4096;
export declare const DefaultOpenAIModel = "gpt-3.5-turbo";

@@ -6,0 +6,0 @@ export declare const DefaultAnthropicModel = "claude-instant-1";

@@ -7,5 +7,5 @@ "use strict";

exports.MinimumResponseTokens = 200;
exports.MaximumResponseTokens = 8_000;
exports.MaximumResponseTokens = 4_096;
exports.DefaultOpenAIModel = 'gpt-3.5-turbo';
exports.DefaultAnthropicModel = 'claude-instant-1';
exports.DefaultAzureVersion = '2023-09-01-preview';
export * from './models/errors';
export * from './models/openai';
export * from './models/openai-legacy';
export * from './models/anthropic';

@@ -5,0 +4,0 @@ export * from './models/anthropic-bedrock';

@@ -1,1 +0,1 @@

{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,cAAc,iBAAiB,CAAC;AAChC,cAAc,iBAAiB,CAAC;AAChC,cAAc,wBAAwB,CAAC;AACvC,cAAc,oBAAoB,CAAC;AACnC,cAAc,4BAA4B,CAAC;AAC3C,cAAc,oBAAoB,CAAC;AACnC,cAAc,SAAS,CAAC"}
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,cAAc,iBAAiB,CAAC;AAChC,cAAc,iBAAiB,CAAC;AAChC,cAAc,oBAAoB,CAAC;AACnC,cAAc,4BAA4B,CAAC;AAC3C,cAAc,oBAAoB,CAAC;AACnC,cAAc,SAAS,CAAC"}

@@ -19,3 +19,2 @@ "use strict";

__exportStar(require("./models/openai"), exports);
__exportStar(require("./models/openai-legacy"), exports);
__exportStar(require("./models/anthropic"), exports);

@@ -22,0 +21,0 @@ __exportStar(require("./models/anthropic-bedrock"), exports);

@@ -1,1 +0,1 @@

{"version":3,"file":"anthropic.d.ts","sourceRoot":"","sources":["../../../src/models/anthropic.ts"],"names":[],"mappings":"AAAA,OAAO,SAAsC,MAAM,mBAAmB,CAAC;AAUvE,OAAO,EACL,eAAe,EACf,kBAAkB,EAClB,WAAW,EACX,mBAAmB,EACnB,YAAY,EACb,MAAM,UAAU,CAAC;AAIlB,OAAO,EAAE,aAAa,EAAE,MAAM,aAAa,CAAC;AAC5C,OAAO,EAAE,2BAA2B,EAAE,MAAM,aAAa,CAAC;AAW1D,qBAAa,gBAAiB,YAAW,aAAa;IACpD,MAAM,EAAE,SAAS,CAAC;IAClB,WAAW,EAAE,WAAW,CAAC;gBAEb,MAAM,CAAC,EAAE,eAAe,EAAE,WAAW,CAAC,EAAE,WAAW;IAK/D,mBAAmB,qCAA+B;IAI5C,cAAc,CAClB,eAAe,EAAE,kBAAkB,EAAE,EACrC,cAAc,CAAC,EAAE,mBAAmB,GAAG,SAAS,GAC/C,OAAO,CAAC,YAAY,CAAC;IAsJxB,cAAc,CACZ,MAAM,EAAE,MAAM,EACd,cAAc,+BAAqC,GAClD,OAAO,CAAC,YAAY,CAAC;CAIzB"}
{"version":3,"file":"anthropic.d.ts","sourceRoot":"","sources":["../../../src/models/anthropic.ts"],"names":[],"mappings":"AAAA,OAAO,SAAS,MAAM,mBAAmB,CAAC;AAW1C,OAAO,EACL,eAAe,EACf,kBAAkB,EAClB,WAAW,EACX,mBAAmB,EACnB,YAAY,EACb,MAAM,UAAU,CAAC;AAIlB,OAAO,EAAE,aAAa,EAAE,MAAM,aAAa,CAAC;AAC5C,OAAO,EAAE,2BAA2B,EAAE,MAAM,aAAa,CAAC;AAS1D,qBAAa,gBAAiB,YAAW,aAAa;IACpD,MAAM,EAAE,SAAS,CAAC;IAClB,WAAW,EAAE,WAAW,CAAC;gBAEb,MAAM,CAAC,EAAE,eAAe,EAAE,WAAW,CAAC,EAAE,WAAW;IAK/D,mBAAmB,qCAA+B;IAI5C,cAAc,CAClB,eAAe,EAAE,kBAAkB,EAAE,EACrC,cAAc,CAAC,EAAE,mBAAmB,GAAG,SAAS,GAC/C,OAAO,CAAC,YAAY,CAAC;IAiJxB,cAAc,CACZ,MAAM,EAAE,MAAM,EACd,cAAc,+BAAqC,GAClD,OAAO,CAAC,YAAY,CAAC;CAIzB"}
"use strict";
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
var desc = Object.getOwnPropertyDescriptor(m, k);
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
desc = { enumerable: true, get: function() { return m[k]; } };
}
Object.defineProperty(o, k2, desc);
}) : (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
o[k2] = m[k];
}));
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
Object.defineProperty(o, "default", { enumerable: true, value: v });
}) : function(o, v) {
o["default"] = v;
});
var __importStar = (this && this.__importStar) || function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
__setModuleDefault(result, mod);
return result;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.AnthropicChatApi = void 0;
const sdk_1 = __importStar(require("@anthropic-ai/sdk"));
const sdk_1 = __importDefault(require("@anthropic-ai/sdk"));
const lodash_1 = require("lodash");

@@ -33,3 +13,2 @@ const config_1 = require("../config");

const tokenizer_1 = require("./tokenizer");
const ForbiddenTokens = [sdk_1.HUMAN_PROMPT.trim(), sdk_1.AI_PROMPT.trim()];
const RequestDefaults = {

@@ -51,40 +30,16 @@ retries: config_1.CompletionDefaultRetries,

const finalRequestOptions = (0, lodash_1.defaults)(requestOptions, RequestDefaults);
const messages = (finalRequestOptions.systemMessage
? [
{
role: 'system',
content: typeof finalRequestOptions.systemMessage === 'string'
? finalRequestOptions.systemMessage
: finalRequestOptions.systemMessage(),
},
...initialMessages,
]
: initialMessages).map((message) => ({
...message,
content: message.content &&
ForbiddenTokens.reduce((prev, token) => prev.replaceAll(token, ''), message.content),
}));
const prompt = messages
.map((message) => {
switch (message.role) {
case 'user':
return `${sdk_1.HUMAN_PROMPT} ${message.content}`;
case 'assistant':
return `${sdk_1.AI_PROMPT} ${message.content}`;
case 'system':
return message.content;
default:
throw new Error(`Anthropic models do not support message with the role ${message.role}`);
}
})
.join('') +
sdk_1.AI_PROMPT +
(finalRequestOptions.responsePrefix
? ` ${finalRequestOptions.responsePrefix}`
: '');
utils_1.debug.log(`🔼 completion requested:\n${prompt}\nconfig: ${JSON.stringify(this.modelConfig)}, options: ${JSON.stringify(finalRequestOptions)}`);
const messages = (0, lodash_1.compact)([
...initialMessages,
finalRequestOptions.responsePrefix
? {
role: 'assistant',
content: finalRequestOptions.responsePrefix,
}
: null,
]);
utils_1.debug.log(`🔼 completion requested: ${JSON.stringify(messages)}, config: ${JSON.stringify(this.modelConfig)}, options: ${JSON.stringify(finalRequestOptions)}`);
const maxPromptTokens = this.modelConfig.contextSize
? this.modelConfig.contextSize - finalRequestOptions.minimumResponseTokens
: 100_000;
const messageTokens = this.getTokensFromPrompt([prompt]);
const messageTokens = this.getTokensFromPrompt(messages.map((m) => m.content ?? ''));
if (messageTokens > maxPromptTokens) {

@@ -101,4 +56,14 @@ throw new errors_1.TokenError('Prompt too big, not enough tokens to meet minimum response', messageTokens - maxPromptTokens);

model: this.modelConfig.model ?? config_1.DefaultAnthropicModel,
max_tokens_to_sample: finalRequestOptions.maximumResponseTokens,
prompt,
max_tokens: finalRequestOptions.maximumResponseTokens,
system: finalRequestOptions.systemMessage
? typeof finalRequestOptions.systemMessage === 'string'
? finalRequestOptions.systemMessage
: finalRequestOptions.systemMessage()
: undefined,
messages: messages
.filter((m) => (m.role === 'user' || m.role === 'assistant') && m.content)
.map((m) => ({
role: m.role,
content: m.content ?? '',
})),
};

@@ -110,3 +75,3 @@ const completionOptions = {

if (this.modelConfig.stream) {
const stream = await this.client.completions.create({
const stream = await this.client.messages.create({
...completionBody,

@@ -119,6 +84,18 @@ stream: true,

for await (const part of stream) {
const text = part.completion;
utils_1.debug.write(text);
completion += text;
finalRequestOptions?.events?.emit('data', text);
if (part.type === 'content_block_start' &&
part.content_block.type === 'text' &&
part.index === 0) {
const text = part.content_block.text;
utils_1.debug.write(text);
completion += text;
finalRequestOptions?.events?.emit('data', text);
}
else if (part.type === 'content_block_delta' &&
part.delta.type === 'text_delta' &&
part.index === 0) {
const text = part.delta.text;
utils_1.debug.write(text);
completion += text;
finalRequestOptions?.events?.emit('data', text);
}
}

@@ -128,4 +105,4 @@ utils_1.debug.write('\n[STREAM] response end\n');

else {
const response = await this.client.completions.create(completionBody, completionOptions);
completion = response.completion;
const response = await this.client.messages.create({ ...completionBody, stream: false }, completionOptions);
completion = response.content[0].text;
utils_1.debug.log('🔽 completion received', completion);

@@ -132,0 +109,0 @@ }

@@ -38,3 +38,3 @@ "use strict";

? `${config.azureEndpoint}${config.azureEndpoint?.at(-1) === '/' ? '' : '/'}openai/deployments/${config.azureDeployment}`
: undefined,
: config.baseURL,
defaultHeaders: this._isAzure

@@ -106,3 +106,3 @@ ? { 'api-key': String(config.apiKey) }

role: 'assistant',
content: m.content ?? null,
content: m.content ?? '',
tool_calls: m.toolCall ? [m.toolCall] : undefined,

@@ -113,6 +113,6 @@ }

role: 'tool',
content: m.content ?? null,
content: m.content ?? '',
tool_call_id: m.toolCallId ?? '',
}
: { role: m.role, content: m.content ?? null }),
: { role: m.role, content: m.content ?? '' }),
};

@@ -119,0 +119,0 @@ const completionOptions = {

@@ -1,1 +0,1 @@

{"version":3,"file":"tokenizer.d.ts","sourceRoot":"","sources":["../../../src/models/tokenizer.ts"],"names":[],"mappings":"AAEA,OAAO,EAAE,aAAa,EAAE,MAAM,UAAU,CAAC;AAIzC,wBAAgB,2BAA2B,CACzC,gBAAgB,EAAE,MAAM,EAAE,EAC1B,SAAS,CAAC,EAAE,aAAa,EAAE,UAoB5B"}
{"version":3,"file":"tokenizer.d.ts","sourceRoot":"","sources":["../../../src/models/tokenizer.ts"],"names":[],"mappings":"AAEA,OAAO,EAAE,aAAa,EAAE,MAAM,UAAU,CAAC;AAKzC,wBAAgB,2BAA2B,CACzC,gBAAgB,EAAE,MAAM,EAAE,EAC1B,SAAS,CAAC,EAAE,aAAa,EAAE,UAoB5B"}
{
"name": "llm-api",
"description": "Fully typed chat APIs for OpenAI and Azure's chat models - with token checking and retries",
"version": "1.2.0",
"version": "1.3.2",
"packageManager": "yarn@3.4.1",

@@ -42,14 +42,15 @@ "main": "dist/src/index.js",

"test:update": "jest -u --passWithNoTests",
"playground": "node -r tsconfig-paths/register -r ts-node/register playground"
"playground": "tsx playground"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.8.1",
"@anthropic-ai/sdk": "^0.16.1",
"@aws-sdk/client-bedrock-runtime": "^3.427.0",
"debug": "^4.3.4",
"js-tiktoken": "^1.0.7",
"js-tiktoken": "^1.0.10",
"jsonic": "^1.0.1",
"jsonrepair": "^3.4.0",
"jsonrepair": "^3.6.0",
"lodash": "^4.17.21",
"openai": "^4.20.0",
"type-fest": "^4.6.0"
"openai": "^4.28.4",
"tsx": "^4.7.1",
"type-fest": "^4.11.0"
},

@@ -62,13 +63,12 @@ "devDependencies": {

"eslint": "^8.53.0",
"eslint-config-prettier": "^8.5.0",
"eslint-config-universe": "^11.1.1",
"eslint-config-prettier": "^9.1.0",
"eslint-config-universe": "^12.0.0",
"eslint-import-resolver-typescript": "^3.3.0",
"eslint-plugin-import": "^2.26.0",
"eslint-plugin-prettier": "^4.2.1",
"husky": "^8.0.3",
"eslint-plugin-prettier": "^5.1.3",
"husky": "^9.0.11",
"jest": "^29.7.0",
"lint-staged": "^15.0.2",
"prettier": "^2.8.0",
"prettier": "^3.2.5",
"ts-jest": "^29.1.1",
"ts-node": "^10.9.1",
"typescript": "^5.2.2"

@@ -75,0 +75,0 @@ },

import 'openai/shims/web';
import { OpenAI } from 'openai';
import type { ModelRequestOptions, ModelConfig, OpenAIConfig, ChatRequestMessage, ChatResponse } from '../types';
import type { CompletionApi } from './interface';
import { getTikTokenTokensFromPrompt } from './tokenizer';
export declare class OpenAILegacyChatApi implements CompletionApi {
client: OpenAI;
_isAzure: boolean;
_headers?: Record<string, string>;
modelConfig: ModelConfig;
constructor(config: OpenAIConfig, modelConfig?: ModelConfig);
getTokensFromPrompt: typeof getTikTokenTokensFromPrompt;
chatCompletion(initialMessages: ChatRequestMessage[], requestOptions?: Partial<ModelRequestOptions>): Promise<ChatResponse>;
textCompletion(prompt: string, requestOptions?: Partial<ModelRequestOptions>): Promise<ChatResponse>;
}
//# sourceMappingURL=openai-legacy.d.ts.map
{"version":3,"file":"openai-legacy.d.ts","sourceRoot":"","sources":["../../../src/models/openai-legacy.ts"],"names":[],"mappings":"AACA,OAAO,kBAAkB,CAAC;AAE1B,OAAO,EAAE,MAAM,EAAE,MAAM,QAAQ,CAAC;AAWhC,OAAO,KAAK,EACV,mBAAmB,EACnB,WAAW,EACX,YAAY,EACZ,kBAAkB,EAClB,YAAY,EACb,MAAM,UAAU,CAAC;AAIlB,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,aAAa,CAAC;AACjD,OAAO,EAAE,2BAA2B,EAAE,MAAM,aAAa,CAAC;AAwB1D,qBAAa,mBAAoB,YAAW,aAAa;IACvD,MAAM,EAAE,MAAM,CAAC;IACf,QAAQ,EAAE,OAAO,CAAC;IAClB,QAAQ,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAClC,WAAW,EAAE,WAAW,CAAC;gBAEb,MAAM,EAAE,YAAY,EAAE,WAAW,CAAC,EAAE,WAAW;IAsB3D,mBAAmB,qCAA+B;IAG5C,cAAc,CAClB,eAAe,EAAE,kBAAkB,EAAE,EACrC,cAAc,+BAAqC,GAClD,OAAO,CAAC,YAAY,CAAC;IA0NlB,cAAc,CAClB,MAAM,EAAE,MAAM,EACd,cAAc,+BAAqC,GAClD,OAAO,CAAC,YAAY,CAAC;CAIzB"}
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.OpenAILegacyChatApi = void 0;
require("openai/shims/web");
const lodash_1 = require("lodash");
const openai_1 = require("openai");
const config_1 = require("../config");
const utils_1 = require("../utils");
const errors_1 = require("./errors");
const tokenizer_1 = require("./tokenizer");
const RequestDefaults = {
retries: config_1.CompletionDefaultRetries,
timeout: config_1.CompletionDefaultTimeout,
minimumResponseTokens: config_1.MinimumResponseTokens,
};
const convertConfig = (config) => ({
model: config.model,
temperature: config.temperature,
top_p: config.topP,
n: 1,
presence_penalty: config.presencePenalty,
frequency_penalty: config.frequencyPenalty,
logit_bias: config.logitBias,
user: config.user,
stream: config.stream,
});
class OpenAILegacyChatApi {
client;
_isAzure;
_headers;
modelConfig;
constructor(config, modelConfig) {
this._isAzure = Boolean(config.azureEndpoint && config.azureDeployment);
this.client = new openai_1.OpenAI({
...config,
baseURL: this._isAzure
? `${config.azureEndpoint}${config.azureEndpoint?.at(-1) === '/' ? '' : '/'}openai/deployments/${config.azureDeployment}`
: undefined,
defaultHeaders: this._isAzure
? { 'api-key': String(config.apiKey) }
: undefined,
defaultQuery: this._isAzure
? {
'api-version': config.azureApiVersion ?? config_1.DefaultAzureVersion,
}
: undefined,
});
this.modelConfig = modelConfig ?? {};
}
getTokensFromPrompt = tokenizer_1.getTikTokenTokensFromPrompt;
async chatCompletion(initialMessages, requestOptions = {}) {
const finalRequestOptions = (0, lodash_1.defaults)(requestOptions, RequestDefaults);
if (finalRequestOptions.responsePrefix) {
console.warn('OpenAI models currently does not support responsePrefix');
}
const messages = finalRequestOptions.systemMessage
? [
{
role: 'system',
content: typeof finalRequestOptions.systemMessage === 'string'
? finalRequestOptions.systemMessage
: finalRequestOptions.systemMessage(),
},
...initialMessages,
]
: initialMessages;
utils_1.debug.log(`🔼 completion requested: ${JSON.stringify(messages)}, config: ${JSON.stringify(this.modelConfig)}, options: ${JSON.stringify(finalRequestOptions)}`);
const maxPromptTokens = this.modelConfig.contextSize
? this.modelConfig.contextSize - finalRequestOptions.minimumResponseTokens
: 100_000;
const messageTokens = this.getTokensFromPrompt(messages.map((m) => m.content ?? ''), finalRequestOptions.functions);
if (messageTokens > maxPromptTokens) {
throw new errors_1.TokenError('Prompt too big, not enough tokens to meet minimum response', messageTokens - maxPromptTokens);
}
const maxTokens = this.modelConfig.contextSize && finalRequestOptions.maximumResponseTokens
? Math.min(this.modelConfig.contextSize - maxPromptTokens, finalRequestOptions.maximumResponseTokens)
: undefined;
if (finalRequestOptions.maximumResponseTokens &&
!this.modelConfig.contextSize) {
console.warn('maximumResponseTokens option ignored, please set contextSize in ModelConfig so the parameter can be calculated safely');
}
let completion = '';
let functionCall;
let usage;
const completionBody = {
model: config_1.DefaultOpenAIModel,
...convertConfig(this.modelConfig),
max_tokens: maxTokens,
stop: finalRequestOptions.stop,
functions: finalRequestOptions.functions,
function_call: finalRequestOptions.callFunction
? { name: finalRequestOptions.callFunction }
: finalRequestOptions.functions
? 'auto'
: undefined,
messages: messages.map((m) => m.role === 'assistant'
? {
role: 'assistant',
content: m.content ?? null,
function_call: m.toolCall?.function,
}
: m.role === 'tool'
? {
role: 'user',
content: m.content ?? null,
}
: {
role: m.role,
content: m.content ?? null,
}),
};
const completionOptions = {
timeout: finalRequestOptions.timeout,
maxRetries: finalRequestOptions.retries,
};
if (this.modelConfig.stream) {
const stream = await this.client.chat.completions.create({ ...completionBody, stream: true }, completionOptions);
if (finalRequestOptions?.responsePrefix) {
finalRequestOptions?.events?.emit('data', finalRequestOptions.responsePrefix);
}
const functionCallStreamParts = [];
for await (const part of stream) {
const text = part.choices[0]?.delta?.content;
const call = part.choices[0]?.delta?.function_call;
if (text) {
utils_1.debug.write(text);
completion += text;
finalRequestOptions?.events?.emit('data', text);
}
else if (call) {
utils_1.debug.write(call.name
? `${call.name}: ${call.arguments}\n`
: call.arguments ?? '');
functionCallStreamParts.push(call);
}
}
if (functionCallStreamParts.length > 0) {
functionCall = functionCallStreamParts.reduce((prev, part) => ({
name: (prev.name ?? '') + (part.name ?? ''),
arguments: (prev.arguments ?? '') + (part.arguments ?? ''),
}), {});
}
utils_1.debug.write('\n[STREAM] response end\n');
}
else {
const response = await this.client.chat.completions.create({ ...completionBody, stream: false }, completionOptions);
completion = response.choices[0].message.content ?? '';
functionCall = response.choices[0].message.function_call;
usage = response.usage;
utils_1.debug.log('🔽 completion received', completion);
}
if (completion) {
const receivedMessage = {
role: 'assistant',
content: completion,
};
return {
message: receivedMessage,
content: completion,
respond: (message, opt) => this.chatCompletion([
...messages,
receivedMessage,
typeof message === 'string'
? { role: 'user', content: message }
: message,
], opt ?? requestOptions),
usage: usage
? {
totalTokens: usage.total_tokens,
promptTokens: usage.prompt_tokens,
completionTokens: usage.completion_tokens,
}
: undefined,
};
}
else if (functionCall) {
const receivedMessage = {
role: 'assistant',
content: '',
toolCall: { type: 'function', id: '1', function: functionCall },
};
return {
message: receivedMessage,
name: functionCall.name,
arguments: (0, utils_1.parseUnsafeJson)(functionCall.arguments),
respond: (message, opt) => this.chatCompletion([
...messages,
receivedMessage,
typeof message === 'string'
? { role: 'user', content: message }
: message,
], opt ?? requestOptions),
usage: usage
? {
totalTokens: usage.total_tokens,
promptTokens: usage.prompt_tokens,
completionTokens: usage.completion_tokens,
}
: undefined,
};
}
else {
throw new Error('Completion response malformed');
}
}
async textCompletion(prompt, requestOptions = {}) {
const messages = [{ role: 'user', content: prompt }];
return this.chatCompletion(messages, requestOptions);
}
}
exports.OpenAILegacyChatApi = OpenAILegacyChatApi;