Socket
Socket
Sign inDemoInstall

openai

Package Overview
Dependencies
Maintainers
5
Versions
190
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

openai - npm Package Compare versions

Comparing version 4.54.0 to 4.55.0

_vendor/partial-json-parser/parser.d.ts

6

error.d.ts

@@ -57,2 +57,8 @@ import { Headers } from "./core.js";

}
export declare class LengthFinishReasonError extends OpenAIError {
constructor();
}
export declare class ContentFilterFinishReasonError extends OpenAIError {
constructor();
}
//# sourceMappingURL=error.d.ts.map

14

error.js
"use strict";
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
Object.defineProperty(exports, "__esModule", { value: true });
exports.InternalServerError = exports.RateLimitError = exports.UnprocessableEntityError = exports.ConflictError = exports.NotFoundError = exports.PermissionDeniedError = exports.AuthenticationError = exports.BadRequestError = exports.APIConnectionTimeoutError = exports.APIConnectionError = exports.APIUserAbortError = exports.APIError = exports.OpenAIError = void 0;
exports.ContentFilterFinishReasonError = exports.LengthFinishReasonError = exports.InternalServerError = exports.RateLimitError = exports.UnprocessableEntityError = exports.ConflictError = exports.NotFoundError = exports.PermissionDeniedError = exports.AuthenticationError = exports.BadRequestError = exports.APIConnectionTimeoutError = exports.APIConnectionError = exports.APIUserAbortError = exports.APIError = exports.OpenAIError = void 0;
const core_1 = require("./core.js");

@@ -148,2 +148,14 @@ class OpenAIError extends Error {

exports.InternalServerError = InternalServerError;
class LengthFinishReasonError extends OpenAIError {
constructor() {
super(`Could not parse response content as the length limit was reached`);
}
}
exports.LengthFinishReasonError = LengthFinishReasonError;
class ContentFilterFinishReasonError extends OpenAIError {
constructor() {
super(`Could not parse response content as the request was rejected by the content filter`);
}
}
exports.ContentFilterFinishReasonError = ContentFilterFinishReasonError;
//# sourceMappingURL=error.js.map

@@ -156,2 +156,3 @@ import * as Errors from "./error.js";

export import ChatCompletionContentPartImage = API.ChatCompletionContentPartImage;
export import ChatCompletionContentPartRefusal = API.ChatCompletionContentPartRefusal;
export import ChatCompletionContentPartText = API.ChatCompletionContentPartText;

@@ -220,2 +221,5 @@ export import ChatCompletionFunctionCallOption = API.ChatCompletionFunctionCallOption;

export import FunctionParameters = API.FunctionParameters;
export import ResponseFormatJSONObject = API.ResponseFormatJSONObject;
export import ResponseFormatJSONSchema = API.ResponseFormatJSONSchema;
export import ResponseFormatText = API.ResponseFormatText;
}

@@ -222,0 +226,0 @@ /** API Client for interfacing with the Azure OpenAI API. */

24

lib/AbstractChatCompletionRunner.d.ts
import * as Core from 'openai/core';
import { type CompletionUsage } from 'openai/resources/completions';
import { type Completions, type ChatCompletion, type ChatCompletionMessage, type ChatCompletionMessageParam, type ChatCompletionCreateParams } from 'openai/resources/chat/completions';
import { type ChatCompletion, type ChatCompletionMessage, type ChatCompletionMessageParam, type ChatCompletionCreateParams } from 'openai/resources/chat/completions';
import { type BaseFunctionsArgs } from "./RunnableFunction.js";

@@ -8,2 +8,4 @@ import { ChatCompletionFunctionRunnerParams, ChatCompletionToolRunnerParams } from "./ChatCompletionRunner.js";

import { BaseEvents, EventStream } from "./EventStream.js";
import { ParsedChatCompletion } from "../resources/beta/chat/completions.js";
import OpenAI from "../index.js";
export interface RunnerOptions extends Core.RequestOptions {

@@ -13,8 +15,8 @@ /** How many requests to make before canceling. Default 10. */

}
export declare class AbstractChatCompletionRunner<EventTypes extends AbstractChatCompletionRunnerEvents> extends EventStream<EventTypes> {
export declare class AbstractChatCompletionRunner<EventTypes extends AbstractChatCompletionRunnerEvents, ParsedT> extends EventStream<EventTypes> {
#private;
protected _chatCompletions: ChatCompletion[];
protected _chatCompletions: ParsedChatCompletion<ParsedT>[];
messages: ChatCompletionMessageParam[];
protected _addChatCompletion(this: AbstractChatCompletionRunner<AbstractChatCompletionRunnerEvents>, chatCompletion: ChatCompletion): ChatCompletion;
protected _addMessage(this: AbstractChatCompletionRunner<AbstractChatCompletionRunnerEvents>, message: ChatCompletionMessageParam, emit?: boolean): void;
protected _addChatCompletion(this: AbstractChatCompletionRunner<AbstractChatCompletionRunnerEvents, ParsedT>, chatCompletion: ParsedChatCompletion<ParsedT>): ParsedChatCompletion<ParsedT>;
protected _addMessage(this: AbstractChatCompletionRunner<AbstractChatCompletionRunnerEvents, ParsedT>, message: ChatCompletionMessageParam, emit?: boolean): void;
/**

@@ -24,3 +26,3 @@ * @returns a promise that resolves with the final ChatCompletion, or rejects

*/
finalChatCompletion(): Promise<ChatCompletion>;
finalChatCompletion(): Promise<ParsedChatCompletion<ParsedT>>;
/**

@@ -44,7 +46,7 @@ * @returns a promise that resolves with the content of the final ChatCompletionMessage, or rejects

allChatCompletions(): ChatCompletion[];
protected _emitFinal(this: AbstractChatCompletionRunner<AbstractChatCompletionRunnerEvents>): void;
protected _createChatCompletion(completions: Completions, params: ChatCompletionCreateParams, options?: Core.RequestOptions): Promise<ChatCompletion>;
protected _runChatCompletion(completions: Completions, params: ChatCompletionCreateParams, options?: Core.RequestOptions): Promise<ChatCompletion>;
protected _runFunctions<FunctionsArgs extends BaseFunctionsArgs>(completions: Completions, params: ChatCompletionFunctionRunnerParams<FunctionsArgs> | ChatCompletionStreamingFunctionRunnerParams<FunctionsArgs>, options?: RunnerOptions): Promise<void>;
protected _runTools<FunctionsArgs extends BaseFunctionsArgs>(completions: Completions, params: ChatCompletionToolRunnerParams<FunctionsArgs> | ChatCompletionStreamingToolRunnerParams<FunctionsArgs>, options?: RunnerOptions): Promise<void>;
protected _emitFinal(this: AbstractChatCompletionRunner<AbstractChatCompletionRunnerEvents, ParsedT>): void;
protected _createChatCompletion(client: OpenAI, params: ChatCompletionCreateParams, options?: Core.RequestOptions): Promise<ParsedChatCompletion<ParsedT>>;
protected _runChatCompletion(client: OpenAI, params: ChatCompletionCreateParams, options?: Core.RequestOptions): Promise<ChatCompletion>;
protected _runFunctions<FunctionsArgs extends BaseFunctionsArgs>(client: OpenAI, params: ChatCompletionFunctionRunnerParams<FunctionsArgs> | ChatCompletionStreamingFunctionRunnerParams<FunctionsArgs>, options?: RunnerOptions): Promise<void>;
protected _runTools<FunctionsArgs extends BaseFunctionsArgs>(client: OpenAI, params: ChatCompletionToolRunnerParams<FunctionsArgs> | ChatCompletionStreamingToolRunnerParams<FunctionsArgs>, options?: RunnerOptions): Promise<void>;
}

@@ -51,0 +53,0 @@ export interface AbstractChatCompletionRunnerEvents extends BaseEvents {

@@ -14,2 +14,3 @@ "use strict";

const EventStream_1 = require("./EventStream.js");
const parser_1 = require("openai/lib/parser");
const DEFAULT_MAX_CHAT_COMPLETIONS = 10;

@@ -119,3 +120,3 @@ class AbstractChatCompletionRunner extends EventStream_1.EventStream {

}
async _createChatCompletion(completions, params, options) {
async _createChatCompletion(client, params, options) {
const signal = options?.signal;

@@ -128,13 +129,13 @@ if (signal) {

__classPrivateFieldGet(this, _AbstractChatCompletionRunner_instances, "m", _AbstractChatCompletionRunner_validateParams).call(this, params);
const chatCompletion = await completions.create({ ...params, stream: false }, { ...options, signal: this.controller.signal });
const chatCompletion = await client.chat.completions.create({ ...params, stream: false }, { ...options, signal: this.controller.signal });
this._connected();
return this._addChatCompletion(chatCompletion);
return this._addChatCompletion((0, parser_1.parseChatCompletion)(chatCompletion, params));
}
async _runChatCompletion(completions, params, options) {
async _runChatCompletion(client, params, options) {
for (const message of params.messages) {
this._addMessage(message, false);
}
return await this._createChatCompletion(completions, params, options);
return await this._createChatCompletion(client, params, options);
}
async _runFunctions(completions, params, options) {
async _runFunctions(client, params, options) {
const role = 'function';

@@ -157,3 +158,3 @@ const { function_call = 'auto', stream, ...restParams } = params;

for (let i = 0; i < maxChatCompletions; ++i) {
const chatCompletion = await this._createChatCompletion(completions, {
const chatCompletion = await this._createChatCompletion(client, {
...restParams,

@@ -204,3 +205,3 @@ function_call,

}
async _runTools(completions, params, options) {
async _runTools(client, params, options) {
const role = 'tool';

@@ -210,4 +211,24 @@ const { tool_choice = 'auto', stream, ...restParams } = params;

const { maxChatCompletions = DEFAULT_MAX_CHAT_COMPLETIONS } = options || {};
// TODO(someday): clean this logic up
const inputTools = params.tools.map((tool) => {
if ((0, parser_1.isAutoParsableTool)(tool)) {
if (!tool.$callback) {
throw new error_1.OpenAIError('Tool given to `.runTools()` that does not have an associated function');
}
return {
type: 'function',
function: {
function: tool.$callback,
name: tool.function.name,
description: tool.function.description || '',
parameters: tool.function.parameters,
parse: tool.$parseRaw,
strict: true,
},
};
}
return tool;
});
const functionsByName = {};
for (const f of params.tools) {
for (const f of inputTools) {
if (f.type === 'function') {

@@ -218,3 +239,3 @@ functionsByName[f.function.name || f.function.function.name] = f.function;

const tools = 'tools' in params ?
params.tools.map((t) => t.type === 'function' ?
inputTools.map((t) => t.type === 'function' ?
{

@@ -226,2 +247,3 @@ type: 'function',

description: t.function.description,
strict: t.function.strict,
},

@@ -235,3 +257,3 @@ }

for (let i = 0; i < maxChatCompletions; ++i) {
const chatCompletion = await this._createChatCompletion(completions, {
const chatCompletion = await this._createChatCompletion(client, {
...restParams,

@@ -246,3 +268,3 @@ tool_choice,

}
if (!message.tool_calls) {
if (!message.tool_calls?.length) {
return;

@@ -257,4 +279,4 @@ }

if (!fn) {
const content = `Invalid tool_call: ${JSON.stringify(name)}. Available options are: ${tools
.map((f) => JSON.stringify(f.function.name))
const content = `Invalid tool_call: ${JSON.stringify(name)}. Available options are: ${Object.keys(functionsByName)
.map((name) => JSON.stringify(name))
.join(', ')}. Please try again`;

@@ -299,3 +321,7 @@ this._addMessage({ role, tool_call_id, content });

const { function_call, ...rest } = message;
const ret = { ...rest, content: message.content ?? null };
const ret = {
...rest,
content: message.content ?? null,
refusal: message.refusal ?? null,
};
if (function_call) {

@@ -327,2 +353,3 @@ ret.function_call = function_call;

message.content != null &&
typeof message.content === 'string' &&
this.messages.some((x) => x.role === 'assistant' &&

@@ -329,0 +356,0 @@ x.tool_calls?.some((y) => y.type === 'function' && y.id === message.tool_call_id))) {

@@ -43,5 +43,5 @@ import { Message, Text, ImageFile, TextDelta, Messages } from 'openai/resources/beta/threads/messages';

toReadableStream(): ReadableStream;
static createToolAssistantStream(threadId: string, runId: string, runs: Runs, body: RunSubmitToolOutputsParamsStream, options: RequestOptions | undefined): AssistantStream;
static createToolAssistantStream(threadId: string, runId: string, runs: Runs, params: RunSubmitToolOutputsParamsStream, options: RequestOptions | undefined): AssistantStream;
protected _createToolAssistantStream(run: Runs, threadId: string, runId: string, params: RunSubmitToolOutputsParamsStream, options?: Core.RequestOptions): Promise<Run>;
static createThreadAssistantStream(body: ThreadCreateAndRunParamsBaseStream, thread: Threads, options?: RequestOptions): AssistantStream;
static createThreadAssistantStream(params: ThreadCreateAndRunParamsBaseStream, thread: Threads, options?: RequestOptions): AssistantStream;
static createAssistantStream(threadId: string, runs: Runs, params: RunCreateParamsBaseStream, options?: RequestOptions): AssistantStream;

@@ -59,3 +59,3 @@ currentEvent(): AssistantStreamEvent | undefined;

protected _addRun(run: Run): Run;
protected _threadAssistantStream(body: ThreadCreateAndRunParamsBase, thread: Threads, options?: Core.RequestOptions): Promise<Run>;
protected _threadAssistantStream(params: ThreadCreateAndRunParamsBase, thread: Threads, options?: Core.RequestOptions): Promise<Run>;
protected _runAssistantStream(threadId: string, runs: Runs, params: RunCreateParamsBase, options?: Core.RequestOptions): Promise<Run>;

@@ -62,0 +62,0 @@ protected _runToolAssistantStream(threadId: string, runId: string, runs: Runs, params: RunSubmitToolOutputsParamsStream, options?: Core.RequestOptions): Promise<Run>;

@@ -142,5 +142,5 @@ "use strict";

}
static createToolAssistantStream(threadId, runId, runs, body, options) {
static createToolAssistantStream(threadId, runId, runs, params, options) {
const runner = new AssistantStream();
runner._run(() => runner._runToolAssistantStream(threadId, runId, runs, body, {
runner._run(() => runner._runToolAssistantStream(threadId, runId, runs, params, {
...options,

@@ -172,5 +172,5 @@ headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' },

}
static createThreadAssistantStream(body, thread, options) {
static createThreadAssistantStream(params, thread, options) {
const runner = new AssistantStream();
runner._run(() => runner._threadAssistantStream(body, thread, {
runner._run(() => runner._threadAssistantStream(params, thread, {
...options,

@@ -293,4 +293,4 @@ headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' },

}
async _threadAssistantStream(body, thread, options) {
return await this._createThreadAssistantStream(thread, body, options);
async _threadAssistantStream(params, thread, options) {
return await this._createThreadAssistantStream(thread, params, options);
}

@@ -297,0 +297,0 @@ async _runAssistantStream(threadId, runs, params, options) {

@@ -1,4 +0,6 @@

import { type Completions, type ChatCompletionMessageParam, type ChatCompletionCreateParamsNonStreaming } from 'openai/resources/chat/completions';
import { type ChatCompletionMessageParam, type ChatCompletionCreateParamsNonStreaming } from 'openai/resources/chat/completions';
import { type RunnableFunctions, type BaseFunctionsArgs, RunnableTools } from "./RunnableFunction.js";
import { AbstractChatCompletionRunner, AbstractChatCompletionRunnerEvents, RunnerOptions } from "./AbstractChatCompletionRunner.js";
import OpenAI from 'openai/index';
import { AutoParseableTool } from 'openai/lib/parser';
export interface ChatCompletionRunnerEvents extends AbstractChatCompletionRunnerEvents {

@@ -11,10 +13,10 @@ content: (content: string) => void;

export type ChatCompletionToolRunnerParams<FunctionsArgs extends BaseFunctionsArgs> = Omit<ChatCompletionCreateParamsNonStreaming, 'tools'> & {
tools: RunnableTools<FunctionsArgs>;
tools: RunnableTools<FunctionsArgs> | AutoParseableTool<any, true>[];
};
export declare class ChatCompletionRunner extends AbstractChatCompletionRunner<ChatCompletionRunnerEvents> {
export declare class ChatCompletionRunner<ParsedT = null> extends AbstractChatCompletionRunner<ChatCompletionRunnerEvents, ParsedT> {
/** @deprecated - please use `runTools` instead. */
static runFunctions(completions: Completions, params: ChatCompletionFunctionRunnerParams<any[]>, options?: RunnerOptions): ChatCompletionRunner;
static runTools(completions: Completions, params: ChatCompletionToolRunnerParams<any[]>, options?: RunnerOptions): ChatCompletionRunner;
_addMessage(this: ChatCompletionRunner, message: ChatCompletionMessageParam): void;
static runFunctions(client: OpenAI, params: ChatCompletionFunctionRunnerParams<any[]>, options?: RunnerOptions): ChatCompletionRunner<null>;
static runTools<ParsedT>(client: OpenAI, params: ChatCompletionToolRunnerParams<any[]>, options?: RunnerOptions): ChatCompletionRunner<ParsedT>;
_addMessage(this: ChatCompletionRunner<ParsedT>, message: ChatCompletionMessageParam): void;
}
//# sourceMappingURL=ChatCompletionRunner.d.ts.map

@@ -8,3 +8,3 @@ "use strict";

/** @deprecated - please use `runTools` instead. */
static runFunctions(completions, params, options) {
static runFunctions(client, params, options) {
const runner = new ChatCompletionRunner();

@@ -15,6 +15,6 @@ const opts = {

};
runner._run(() => runner._runFunctions(completions, params, opts));
runner._run(() => runner._runFunctions(client, params, opts));
return runner;
}
static runTools(completions, params, options) {
static runTools(client, params, options) {
const runner = new ChatCompletionRunner();

@@ -25,3 +25,3 @@ const opts = {

};
runner._run(() => runner._runTools(completions, params, opts));
runner._run(() => runner._runTools(client, params, opts));
return runner;

@@ -28,0 +28,0 @@ }

import * as Core from 'openai/core';
import { Completions, type ChatCompletion, type ChatCompletionChunk, type ChatCompletionCreateParams, type ChatCompletionCreateParamsBase } from 'openai/resources/chat/completions';
import { ChatCompletionTokenLogprob, type ChatCompletion, type ChatCompletionChunk, type ChatCompletionCreateParams, type ChatCompletionCreateParamsBase } from 'openai/resources/chat/completions';
import { AbstractChatCompletionRunner, type AbstractChatCompletionRunnerEvents } from "./AbstractChatCompletionRunner.js";
import { type ReadableStream } from 'openai/_shims/index';
export interface ChatCompletionStreamEvents extends AbstractChatCompletionRunnerEvents {
import OpenAI from 'openai/index';
import { ParsedChatCompletion } from 'openai/resources/beta/chat/completions';
export interface ContentDeltaEvent {
delta: string;
snapshot: string;
parsed: unknown | null;
}
export interface ContentDoneEvent<ParsedT = null> {
content: string;
parsed: ParsedT | null;
}
export interface RefusalDeltaEvent {
delta: string;
snapshot: string;
}
export interface RefusalDoneEvent {
refusal: string;
}
export interface FunctionToolCallArgumentsDeltaEvent {
name: string;
index: number;
arguments: string;
parsed_arguments: unknown;
arguments_delta: string;
}
export interface FunctionToolCallArgumentsDoneEvent {
name: string;
index: number;
arguments: string;
parsed_arguments: unknown;
}
export interface LogProbsContentDeltaEvent {
content: Array<ChatCompletionTokenLogprob>;
snapshot: Array<ChatCompletionTokenLogprob>;
}
export interface LogProbsContentDoneEvent {
content: Array<ChatCompletionTokenLogprob>;
}
export interface LogProbsRefusalDeltaEvent {
refusal: Array<ChatCompletionTokenLogprob>;
snapshot: Array<ChatCompletionTokenLogprob>;
}
export interface LogProbsRefusalDoneEvent {
refusal: Array<ChatCompletionTokenLogprob>;
}
export interface ChatCompletionStreamEvents<ParsedT = null> extends AbstractChatCompletionRunnerEvents {
content: (contentDelta: string, contentSnapshot: string) => void;
chunk: (chunk: ChatCompletionChunk, snapshot: ChatCompletionSnapshot) => void;
'content.delta': (props: ContentDeltaEvent) => void;
'content.done': (props: ContentDoneEvent<ParsedT>) => void;
'refusal.delta': (props: RefusalDeltaEvent) => void;
'refusal.done': (props: RefusalDoneEvent) => void;
'tool_calls.function.arguments.delta': (props: FunctionToolCallArgumentsDeltaEvent) => void;
'tool_calls.function.arguments.done': (props: FunctionToolCallArgumentsDoneEvent) => void;
'logprobs.content.delta': (props: LogProbsContentDeltaEvent) => void;
'logprobs.content.done': (props: LogProbsContentDoneEvent) => void;
'logprobs.refusal.delta': (props: LogProbsRefusalDeltaEvent) => void;
'logprobs.refusal.done': (props: LogProbsRefusalDoneEvent) => void;
}

@@ -12,4 +67,5 @@ export type ChatCompletionStreamParams = Omit<ChatCompletionCreateParamsBase, 'stream'> & {

};
export declare class ChatCompletionStream extends AbstractChatCompletionRunner<ChatCompletionStreamEvents> implements AsyncIterable<ChatCompletionChunk> {
export declare class ChatCompletionStream<ParsedT = null> extends AbstractChatCompletionRunner<ChatCompletionStreamEvents<ParsedT>, ParsedT> implements AsyncIterable<ChatCompletionChunk> {
#private;
constructor(params: ChatCompletionCreateParams | null);
get currentChatCompletionSnapshot(): ChatCompletionSnapshot | undefined;

@@ -23,7 +79,7 @@ /**

*/
static fromReadableStream(stream: ReadableStream): ChatCompletionStream;
static createChatCompletion(completions: Completions, params: ChatCompletionStreamParams, options?: Core.RequestOptions): ChatCompletionStream;
protected _createChatCompletion(completions: Completions, params: ChatCompletionCreateParams, options?: Core.RequestOptions): Promise<ChatCompletion>;
static fromReadableStream(stream: ReadableStream): ChatCompletionStream<null>;
static createChatCompletion<ParsedT>(client: OpenAI, params: ChatCompletionStreamParams, options?: Core.RequestOptions): ChatCompletionStream<ParsedT>;
protected _createChatCompletion(client: OpenAI, params: ChatCompletionCreateParams, options?: Core.RequestOptions): Promise<ParsedChatCompletion<ParsedT>>;
protected _fromReadableStream(readableStream: ReadableStream, options?: Core.RequestOptions): Promise<ChatCompletion>;
[Symbol.asyncIterator](this: ChatCompletionStream): AsyncIterator<ChatCompletionChunk>;
[Symbol.asyncIterator](this: ChatCompletionStream<ParsedT>): AsyncIterator<ChatCompletionChunk>;
toReadableStream(): ReadableStream;

@@ -93,2 +149,4 @@ }

content?: string | null;
refusal?: string | null;
parsed?: unknown | null;
/**

@@ -110,8 +168,8 @@ * The name and arguments of a function that should be called, as generated by the

*/
id?: string;
function?: ToolCall.Function;
id: string;
function: ToolCall.Function;
/**
* The type of the tool.
*/
type?: 'function';
type: 'function';
}

@@ -126,7 +184,8 @@ namespace ToolCall {

*/
arguments?: string;
arguments: string;
parsed_arguments?: unknown;
/**
* The name of the function to call.
*/
name?: string;
name: string;
}

@@ -133,0 +192,0 @@ }

"use strict";
var __classPrivateFieldGet = (this && this.__classPrivateFieldGet) || function (receiver, state, kind, f) {
if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a getter");
if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot read private member from an object whose class did not declare it");
return kind === "m" ? f : kind === "a" ? f.call(receiver) : f ? f.value : state.get(receiver);
};
var __classPrivateFieldSet = (this && this.__classPrivateFieldSet) || function (receiver, state, value, kind, f) {

@@ -13,3 +8,8 @@ if (kind === "m") throw new TypeError("Private method is not writable");

};
var _ChatCompletionStream_instances, _ChatCompletionStream_currentChatCompletionSnapshot, _ChatCompletionStream_beginRequest, _ChatCompletionStream_addChunk, _ChatCompletionStream_endRequest, _ChatCompletionStream_accumulateChatCompletion;
var __classPrivateFieldGet = (this && this.__classPrivateFieldGet) || function (receiver, state, kind, f) {
if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a getter");
if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot read private member from an object whose class did not declare it");
return kind === "m" ? f : kind === "a" ? f.call(receiver) : f ? f.value : state.get(receiver);
};
var _ChatCompletionStream_instances, _ChatCompletionStream_params, _ChatCompletionStream_choiceEventStates, _ChatCompletionStream_currentChatCompletionSnapshot, _ChatCompletionStream_beginRequest, _ChatCompletionStream_getChoiceEventState, _ChatCompletionStream_addChunk, _ChatCompletionStream_emitToolCallDoneEvent, _ChatCompletionStream_emitContentDoneEvents, _ChatCompletionStream_endRequest, _ChatCompletionStream_getAutoParseableResponseFormat, _ChatCompletionStream_accumulateChatCompletion;
Object.defineProperty(exports, "__esModule", { value: true });

@@ -20,7 +20,13 @@ exports.ChatCompletionStream = void 0;

const streaming_1 = require("openai/streaming");
const parser_1 = require("openai/lib/parser");
const parser_2 = require("../_vendor/partial-json-parser/parser.js");
class ChatCompletionStream extends AbstractChatCompletionRunner_1.AbstractChatCompletionRunner {
constructor() {
super(...arguments);
constructor(params) {
super();
_ChatCompletionStream_instances.add(this);
_ChatCompletionStream_params.set(this, void 0);
_ChatCompletionStream_choiceEventStates.set(this, void 0);
_ChatCompletionStream_currentChatCompletionSnapshot.set(this, void 0);
__classPrivateFieldSet(this, _ChatCompletionStream_params, params, "f");
__classPrivateFieldSet(this, _ChatCompletionStream_choiceEventStates, [], "f");
}

@@ -38,12 +44,13 @@ get currentChatCompletionSnapshot() {

static fromReadableStream(stream) {
const runner = new ChatCompletionStream();
const runner = new ChatCompletionStream(null);
runner._run(() => runner._fromReadableStream(stream));
return runner;
}
static createChatCompletion(completions, params, options) {
const runner = new ChatCompletionStream();
runner._run(() => runner._runChatCompletion(completions, { ...params, stream: true }, { ...options, headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' } }));
static createChatCompletion(client, params, options) {
const runner = new ChatCompletionStream(params);
runner._run(() => runner._runChatCompletion(client, { ...params, stream: true }, { ...options, headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' } }));
return runner;
}
async _createChatCompletion(completions, params, options) {
async _createChatCompletion(client, params, options) {
super._createChatCompletion;
const signal = options?.signal;

@@ -56,3 +63,3 @@ if (signal) {

__classPrivateFieldGet(this, _ChatCompletionStream_instances, "m", _ChatCompletionStream_beginRequest).call(this);
const stream = await completions.create({ ...params, stream: true }, { ...options, signal: this.controller.signal });
const stream = await client.chat.completions.create({ ...params, stream: true }, { ...options, signal: this.controller.signal });
this._connected();

@@ -91,6 +98,21 @@ for await (const chunk of stream) {

}
[(_ChatCompletionStream_currentChatCompletionSnapshot = new WeakMap(), _ChatCompletionStream_instances = new WeakSet(), _ChatCompletionStream_beginRequest = function _ChatCompletionStream_beginRequest() {
[(_ChatCompletionStream_params = new WeakMap(), _ChatCompletionStream_choiceEventStates = new WeakMap(), _ChatCompletionStream_currentChatCompletionSnapshot = new WeakMap(), _ChatCompletionStream_instances = new WeakSet(), _ChatCompletionStream_beginRequest = function _ChatCompletionStream_beginRequest() {
if (this.ended)
return;
__classPrivateFieldSet(this, _ChatCompletionStream_currentChatCompletionSnapshot, undefined, "f");
}, _ChatCompletionStream_getChoiceEventState = function _ChatCompletionStream_getChoiceEventState(choice) {
let state = __classPrivateFieldGet(this, _ChatCompletionStream_choiceEventStates, "f")[choice.index];
if (state) {
return state;
}
state = {
content_done: false,
refusal_done: false,
logprobs_content_done: false,
logprobs_refusal_done: false,
done_tool_calls: new Set(),
current_tool_call_index: null,
};
__classPrivateFieldGet(this, _ChatCompletionStream_choiceEventStates, "f")[choice.index] = state;
return state;
}, _ChatCompletionStream_addChunk = function _ChatCompletionStream_addChunk(chunk) {

@@ -101,7 +123,119 @@ if (this.ended)

this._emit('chunk', chunk, completion);
const delta = chunk.choices[0]?.delta?.content;
const snapshot = completion.choices[0]?.message;
if (delta != null && snapshot?.role === 'assistant' && snapshot?.content) {
this._emit('content', delta, snapshot.content);
for (const choice of chunk.choices) {
const choiceSnapshot = completion.choices[choice.index];
if (choice.delta.content != null &&
choiceSnapshot.message?.role === 'assistant' &&
choiceSnapshot.message?.content) {
this._emit('content', choice.delta.content, choiceSnapshot.message.content);
this._emit('content.delta', {
delta: choice.delta.content,
snapshot: choiceSnapshot.message.content,
parsed: choiceSnapshot.message.parsed,
});
}
if (choice.delta.refusal != null &&
choiceSnapshot.message?.role === 'assistant' &&
choiceSnapshot.message?.refusal) {
this._emit('refusal.delta', {
delta: choice.delta.refusal,
snapshot: choiceSnapshot.message.refusal,
});
}
if (choice.logprobs?.content != null && choiceSnapshot.message?.role === 'assistant') {
this._emit('logprobs.content.delta', {
content: choice.logprobs?.content,
snapshot: choiceSnapshot.logprobs?.content ?? [],
});
}
if (choice.logprobs?.refusal != null && choiceSnapshot.message?.role === 'assistant') {
this._emit('logprobs.refusal.delta', {
refusal: choice.logprobs?.refusal,
snapshot: choiceSnapshot.logprobs?.refusal ?? [],
});
}
const state = __classPrivateFieldGet(this, _ChatCompletionStream_instances, "m", _ChatCompletionStream_getChoiceEventState).call(this, choiceSnapshot);
if (choiceSnapshot.finish_reason) {
__classPrivateFieldGet(this, _ChatCompletionStream_instances, "m", _ChatCompletionStream_emitContentDoneEvents).call(this, choiceSnapshot);
if (state.current_tool_call_index != null) {
__classPrivateFieldGet(this, _ChatCompletionStream_instances, "m", _ChatCompletionStream_emitToolCallDoneEvent).call(this, choiceSnapshot, state.current_tool_call_index);
}
}
for (const toolCall of choice.delta.tool_calls ?? []) {
if (state.current_tool_call_index !== toolCall.index) {
__classPrivateFieldGet(this, _ChatCompletionStream_instances, "m", _ChatCompletionStream_emitContentDoneEvents).call(this, choiceSnapshot);
// new tool call started, the previous one is done
if (state.current_tool_call_index != null) {
__classPrivateFieldGet(this, _ChatCompletionStream_instances, "m", _ChatCompletionStream_emitToolCallDoneEvent).call(this, choiceSnapshot, state.current_tool_call_index);
}
}
state.current_tool_call_index = toolCall.index;
}
for (const toolCallDelta of choice.delta.tool_calls ?? []) {
const toolCallSnapshot = choiceSnapshot.message.tool_calls?.[toolCallDelta.index];
if (!toolCallSnapshot?.type) {
continue;
}
if (toolCallSnapshot?.type === 'function') {
this._emit('tool_calls.function.arguments.delta', {
name: toolCallSnapshot.function?.name,
index: toolCallDelta.index,
arguments: toolCallSnapshot.function.arguments,
parsed_arguments: toolCallSnapshot.function.parsed_arguments,
arguments_delta: toolCallDelta.function?.arguments ?? '',
});
}
else {
assertNever(toolCallSnapshot?.type);
}
}
}
}, _ChatCompletionStream_emitToolCallDoneEvent = function _ChatCompletionStream_emitToolCallDoneEvent(choiceSnapshot, toolCallIndex) {
const state = __classPrivateFieldGet(this, _ChatCompletionStream_instances, "m", _ChatCompletionStream_getChoiceEventState).call(this, choiceSnapshot);
if (state.done_tool_calls.has(toolCallIndex)) {
// we've already fired the done event
return;
}
const toolCallSnapshot = choiceSnapshot.message.tool_calls?.[toolCallIndex];
if (!toolCallSnapshot) {
throw new Error('no tool call snapshot');
}
if (!toolCallSnapshot.type) {
throw new Error('tool call snapshot missing `type`');
}
if (toolCallSnapshot.type === 'function') {
const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool) => tool.type === 'function' && tool.function.name === toolCallSnapshot.function.name);
this._emit('tool_calls.function.arguments.done', {
name: toolCallSnapshot.function.name,
index: toolCallIndex,
arguments: toolCallSnapshot.function.arguments,
parsed_arguments: (0, parser_1.isAutoParsableTool)(inputTool) ? inputTool.$parseRaw(toolCallSnapshot.function.arguments)
: inputTool?.function.strict ? JSON.parse(toolCallSnapshot.function.arguments)
: null,
});
}
else {
assertNever(toolCallSnapshot.type);
}
}, _ChatCompletionStream_emitContentDoneEvents = function _ChatCompletionStream_emitContentDoneEvents(choiceSnapshot) {
const state = __classPrivateFieldGet(this, _ChatCompletionStream_instances, "m", _ChatCompletionStream_getChoiceEventState).call(this, choiceSnapshot);
if (choiceSnapshot.message.content && !state.content_done) {
state.content_done = true;
const responseFormat = __classPrivateFieldGet(this, _ChatCompletionStream_instances, "m", _ChatCompletionStream_getAutoParseableResponseFormat).call(this);
this._emit('content.done', {
content: choiceSnapshot.message.content,
parsed: responseFormat ? responseFormat.$parseRaw(choiceSnapshot.message.content) : null,
});
}
if (choiceSnapshot.message.refusal && !state.refusal_done) {
state.refusal_done = true;
this._emit('refusal.done', { refusal: choiceSnapshot.message.refusal });
}
if (choiceSnapshot.logprobs?.content && !state.logprobs_content_done) {
state.logprobs_content_done = true;
this._emit('logprobs.content.done', { content: choiceSnapshot.logprobs.content });
}
if (choiceSnapshot.logprobs?.refusal && !state.logprobs_refusal_done) {
state.logprobs_refusal_done = true;
this._emit('logprobs.refusal.done', { refusal: choiceSnapshot.logprobs.refusal });
}
}, _ChatCompletionStream_endRequest = function _ChatCompletionStream_endRequest() {

@@ -116,5 +250,12 @@ if (this.ended) {

__classPrivateFieldSet(this, _ChatCompletionStream_currentChatCompletionSnapshot, undefined, "f");
return finalizeChatCompletion(snapshot);
__classPrivateFieldSet(this, _ChatCompletionStream_choiceEventStates, [], "f");
return finalizeChatCompletion(snapshot, __classPrivateFieldGet(this, _ChatCompletionStream_params, "f"));
}, _ChatCompletionStream_getAutoParseableResponseFormat = function _ChatCompletionStream_getAutoParseableResponseFormat() {
const responseFormat = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.response_format;
if ((0, parser_1.isAutoParsableResponseFormat)(responseFormat)) {
return responseFormat;
}
return null;
}, _ChatCompletionStream_accumulateChatCompletion = function _ChatCompletionStream_accumulateChatCompletion(chunk) {
var _a, _b, _c;
var _a, _b, _c, _d;
let snapshot = __classPrivateFieldGet(this, _ChatCompletionStream_currentChatCompletionSnapshot, "f");

@@ -141,3 +282,4 @@ const { choices, ...rest } = chunk;

else {
const { content, ...rest } = logprobs;
const { content, refusal, ...rest } = logprobs;
assertIsEmpty(rest);
Object.assign(choice.logprobs, rest);

@@ -148,13 +290,28 @@ if (content) {

}
if (refusal) {
(_b = choice.logprobs).refusal ?? (_b.refusal = []);
choice.logprobs.refusal.push(...refusal);
}
}
}
if (finish_reason)
if (finish_reason) {
choice.finish_reason = finish_reason;
if (__classPrivateFieldGet(this, _ChatCompletionStream_params, "f") && (0, parser_1.hasAutoParseableInput)(__classPrivateFieldGet(this, _ChatCompletionStream_params, "f"))) {
if (finish_reason === 'length') {
throw new error_1.LengthFinishReasonError();
}
if (finish_reason === 'content_filter') {
throw new error_1.ContentFilterFinishReasonError();
}
}
}
Object.assign(choice, other);
if (!delta)
continue; // Shouldn't happen; just in case.
const { content, function_call, role, tool_calls, ...rest } = delta;
const { content, refusal, function_call, role, tool_calls, ...rest } = delta;
assertIsEmpty(rest);
Object.assign(choice.message, rest);
if (content)
choice.message.content = (choice.message.content || '') + content;
if (refusal) {
choice.message.refusal = (choice.message.refusal || '') + refusal;
}
if (role)

@@ -170,3 +327,3 @@ choice.message.role = role;

if (function_call.arguments) {
(_b = choice.message.function_call).arguments ?? (_b.arguments = '');
(_c = choice.message.function_call).arguments ?? (_c.arguments = '');
choice.message.function_call.arguments += function_call.arguments;

@@ -176,2 +333,8 @@ }

}
if (content) {
choice.message.content = (choice.message.content || '') + content;
if (!choice.message.refusal && __classPrivateFieldGet(this, _ChatCompletionStream_instances, "m", _ChatCompletionStream_getAutoParseableResponseFormat).call(this)) {
choice.message.parsed = (0, parser_2.partialParse)(choice.message.content);
}
}
if (tool_calls) {

@@ -181,3 +344,3 @@ if (!choice.message.tool_calls)

for (const { index, id, type, function: fn, ...rest } of tool_calls) {
const tool_call = ((_c = choice.message.tool_calls)[index] ?? (_c[index] = {}));
const tool_call = ((_d = choice.message.tool_calls)[index] ?? (_d[index] = {}));
Object.assign(tool_call, rest);

@@ -189,7 +352,11 @@ if (id)

if (fn)
tool_call.function ?? (tool_call.function = { arguments: '' });
tool_call.function ?? (tool_call.function = { name: fn.name ?? '', arguments: '' });
if (fn?.name)
tool_call.function.name = fn.name;
if (fn?.arguments)
if (fn?.arguments) {
tool_call.function.arguments += fn.arguments;
if ((0, parser_1.shouldParseToolCall)(__classPrivateFieldGet(this, _ChatCompletionStream_params, "f"), tool_call)) {
tool_call.function.parsed_arguments = (0, parser_2.partialParse)(tool_call.function.arguments);
}
}
}

@@ -256,23 +423,32 @@ }

exports.ChatCompletionStream = ChatCompletionStream;
function finalizeChatCompletion(snapshot) {
function finalizeChatCompletion(snapshot, params) {
const { id, choices, created, model, system_fingerprint, ...rest } = snapshot;
return {
const completion = {
...rest,
id,
choices: choices.map(({ message, finish_reason, index, logprobs, ...choiceRest }) => {
if (!finish_reason)
if (!finish_reason) {
throw new error_1.OpenAIError(`missing finish_reason for choice ${index}`);
}
const { content = null, function_call, tool_calls, ...messageRest } = message;
const role = message.role; // this is what we expect; in theory it could be different which would make our types a slight lie but would be fine.
if (!role)
if (!role) {
throw new error_1.OpenAIError(`missing role for choice ${index}`);
}
if (function_call) {
const { arguments: args, name } = function_call;
if (args == null)
if (args == null) {
throw new error_1.OpenAIError(`missing function_call.arguments for choice ${index}`);
if (!name)
}
if (!name) {
throw new error_1.OpenAIError(`missing function_call.name for choice ${index}`);
}
return {
...choiceRest,
message: { content, function_call: { arguments: args, name }, role },
message: {
content,
function_call: { arguments: args, name },
role,
refusal: message.refusal ?? null,
},
finish_reason,

@@ -293,13 +469,18 @@ index,

content,
refusal: message.refusal ?? null,
tool_calls: tool_calls.map((tool_call, i) => {
const { function: fn, type, id, ...toolRest } = tool_call;
const { arguments: args, name, ...fnRest } = fn || {};
if (id == null)
if (id == null) {
throw new error_1.OpenAIError(`missing choices[${index}].tool_calls[${i}].id\n${str(snapshot)}`);
if (type == null)
}
if (type == null) {
throw new error_1.OpenAIError(`missing choices[${index}].tool_calls[${i}].type\n${str(snapshot)}`);
if (name == null)
}
if (name == null) {
throw new error_1.OpenAIError(`missing choices[${index}].tool_calls[${i}].function.name\n${str(snapshot)}`);
if (args == null)
}
if (args == null) {
throw new error_1.OpenAIError(`missing choices[${index}].tool_calls[${i}].function.arguments\n${str(snapshot)}`);
}
return { ...toolRest, id, type, function: { ...fnRest, name, arguments: args } };

@@ -312,3 +493,3 @@ }),

...choiceRest,
message: { ...messageRest, content, role },
message: { ...messageRest, content, role, refusal: message.refusal ?? null },
finish_reason,

@@ -324,2 +505,3 @@ index,

};
return (0, parser_1.maybeParseChatCompletion)(completion, params);
}

@@ -329,2 +511,11 @@ function str(x) {

}
/**
* Ensures the given argument is an empty object, useful for
* asserting that all known properties on an object have been
* destructured.
*/
function assertIsEmpty(obj) {
return;
}
function assertNever(_x) { }
//# sourceMappingURL=ChatCompletionStream.js.map

@@ -1,2 +0,2 @@

import { Completions, type ChatCompletionChunk, type ChatCompletionCreateParamsStreaming } from 'openai/resources/chat/completions';
import { type ChatCompletionChunk, type ChatCompletionCreateParamsStreaming } from 'openai/resources/chat/completions';
import { RunnerOptions, type AbstractChatCompletionRunnerEvents } from "./AbstractChatCompletionRunner.js";

@@ -6,2 +6,4 @@ import { type ReadableStream } from 'openai/_shims/index';

import { ChatCompletionSnapshot, ChatCompletionStream } from "./ChatCompletionStream.js";
import OpenAI from 'openai/index';
import { AutoParseableTool } from 'openai/lib/parser';
export interface ChatCompletionStreamEvents extends AbstractChatCompletionRunnerEvents {

@@ -15,10 +17,10 @@ content: (contentDelta: string, contentSnapshot: string) => void;

export type ChatCompletionStreamingToolRunnerParams<FunctionsArgs extends BaseFunctionsArgs> = Omit<ChatCompletionCreateParamsStreaming, 'tools'> & {
tools: RunnableTools<FunctionsArgs>;
tools: RunnableTools<FunctionsArgs> | AutoParseableTool<any, true>[];
};
export declare class ChatCompletionStreamingRunner extends ChatCompletionStream implements AsyncIterable<ChatCompletionChunk> {
static fromReadableStream(stream: ReadableStream): ChatCompletionStreamingRunner;
export declare class ChatCompletionStreamingRunner<ParsedT = null> extends ChatCompletionStream<ParsedT> implements AsyncIterable<ChatCompletionChunk> {
static fromReadableStream(stream: ReadableStream): ChatCompletionStreamingRunner<null>;
/** @deprecated - please use `runTools` instead. */
static runFunctions<T extends (string | object)[]>(completions: Completions, params: ChatCompletionStreamingFunctionRunnerParams<T>, options?: RunnerOptions): ChatCompletionStreamingRunner;
static runTools<T extends (string | object)[]>(completions: Completions, params: ChatCompletionStreamingToolRunnerParams<T>, options?: RunnerOptions): ChatCompletionStreamingRunner;
static runFunctions<T extends (string | object)[]>(client: OpenAI, params: ChatCompletionStreamingFunctionRunnerParams<T>, options?: RunnerOptions): ChatCompletionStreamingRunner<null>;
static runTools<T extends (string | object)[], ParsedT = null>(client: OpenAI, params: ChatCompletionStreamingToolRunnerParams<T>, options?: RunnerOptions): ChatCompletionStreamingRunner<ParsedT>;
}
//# sourceMappingURL=ChatCompletionStreamingRunner.d.ts.map

@@ -7,3 +7,3 @@ "use strict";

static fromReadableStream(stream) {
const runner = new ChatCompletionStreamingRunner();
const runner = new ChatCompletionStreamingRunner(null);
runner._run(() => runner._fromReadableStream(stream));

@@ -13,4 +13,4 @@ return runner;

/** @deprecated - please use `runTools` instead. */
static runFunctions(completions, params, options) {
const runner = new ChatCompletionStreamingRunner();
static runFunctions(client, params, options) {
const runner = new ChatCompletionStreamingRunner(null);
const opts = {

@@ -20,7 +20,9 @@ ...options,

};
runner._run(() => runner._runFunctions(completions, params, opts));
runner._run(() => runner._runFunctions(client, params, opts));
return runner;
}
static runTools(completions, params, options) {
const runner = new ChatCompletionStreamingRunner();
static runTools(client, params, options) {
const runner = new ChatCompletionStreamingRunner(
// @ts-expect-error TODO these types are incompatible
params);
const opts = {

@@ -30,3 +32,3 @@ ...options,

};
runner._run(() => runner._runTools(completions, params, opts));
runner._run(() => runner._runTools(client, params, opts));
return runner;

@@ -33,0 +35,0 @@ }

@@ -11,3 +11,3 @@ import { type ChatCompletionRunner } from "./ChatCompletionRunner.js";

*/
function: (args: Args, runner: ChatCompletionRunner | ChatCompletionStreamingRunner) => PromiseOrValue<unknown>;
function: (args: Args, runner: ChatCompletionRunner<unknown> | ChatCompletionStreamingRunner<unknown>) => PromiseOrValue<unknown>;
/**

@@ -30,2 +30,3 @@ * @param input the raw args from the OpenAI function call.

name?: string | undefined;
strict?: boolean | undefined;
};

@@ -37,3 +38,3 @@ export type RunnableFunctionWithoutParse = {

*/
function: (args: string, runner: ChatCompletionRunner | ChatCompletionStreamingRunner) => PromiseOrValue<unknown>;
function: (args: string, runner: ChatCompletionRunner<unknown> | ChatCompletionStreamingRunner<unknown>) => PromiseOrValue<unknown>;
/**

@@ -51,2 +52,3 @@ * The parameters the function accepts, describes as a JSON Schema object.

name?: string | undefined;
strict?: boolean | undefined;
};

@@ -53,0 +55,0 @@ export type RunnableFunction<Args extends object | string> = Args extends string ? RunnableFunctionWithoutParse : Args extends object ? RunnableFunctionWithParse<Args> : never;

{
"name": "openai",
"version": "4.54.0",
"version": "4.55.0",
"description": "The official TypeScript library for the OpenAI API",

@@ -103,3 +103,11 @@ "author": "OpenAI <support@openai.com>",

},
"bin": "./bin/cli"
"bin": "./bin/cli",
"peerDependencies": {
"zod": "^3.23.8"
},
"peerDependenciesMeta": {
"zod": {
"optional": true
}
}
}

@@ -22,3 +22,3 @@ # OpenAI Node API Library

```ts
import OpenAI from 'https://deno.land/x/openai@v4.54.0/mod.ts';
import OpenAI from 'https://deno.land/x/openai@v4.55.0/mod.ts';
```

@@ -25,0 +25,0 @@

@@ -92,2 +92,7 @@ import { APIResource } from "../../resource.js";

*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the

@@ -504,4 +509,4 @@ * message the model generates is valid JSON.

* The maximum number of results the file search tool should output. The default is
* 20 for gpt-4\* models and 5 for gpt-3.5-turbo. This number should be between 1
* and 50 inclusive.
* 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between
* 1 and 50 inclusive.
*

@@ -864,2 +869,7 @@ * Note that the file search tool may output fewer than `max_num_results` results.

*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the

@@ -1034,2 +1044,7 @@ * message the model generates is valid JSON.

*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the

@@ -1036,0 +1051,0 @@ * message the model generates is valid JSON.

@@ -38,3 +38,2 @@ import { APIResource } from "../../resource.js";

export import Threads = ThreadsAPI.Threads;
export import AssistantResponseFormat = ThreadsAPI.AssistantResponseFormat;
export import AssistantResponseFormatOption = ThreadsAPI.AssistantResponseFormatOption;

@@ -41,0 +40,0 @@ export import AssistantToolChoice = ThreadsAPI.AssistantToolChoice;

@@ -14,9 +14,29 @@ import * as Core from "../../../core.js";

import { ChatCompletionStream, type ChatCompletionStreamParams } from "../../../lib/ChatCompletionStream.js";
import { ChatCompletion, ChatCompletionCreateParamsNonStreaming, ChatCompletionMessage, ChatCompletionMessageToolCall } from "../../chat/completions.js";
import { ExtractParsedContentFromParams } from "../../../lib/parser.js";
export { ChatCompletionStream, type ChatCompletionStreamParams } from "../../../lib/ChatCompletionStream.js";
export interface ParsedFunction extends ChatCompletionMessageToolCall.Function {
parsed_arguments?: unknown;
}
export interface ParsedFunctionToolCall extends ChatCompletionMessageToolCall {
function: ParsedFunction;
}
export interface ParsedChatCompletionMessage<ParsedT> extends ChatCompletionMessage {
parsed: ParsedT | null;
tool_calls: Array<ParsedFunctionToolCall>;
}
export interface ParsedChoice<ParsedT> extends ChatCompletion.Choice {
message: ParsedChatCompletionMessage<ParsedT>;
}
export interface ParsedChatCompletion<ParsedT> extends ChatCompletion {
choices: Array<ParsedChoice<ParsedT>>;
}
export type ChatCompletionParseParams = ChatCompletionCreateParamsNonStreaming;
export declare class Completions extends APIResource {
parse<Params extends ChatCompletionParseParams, ParsedT = ExtractParsedContentFromParams<Params>>(body: Params, options?: Core.RequestOptions): Promise<ParsedChatCompletion<ParsedT>>;
/**
* @deprecated - use `runTools` instead.
*/
runFunctions<FunctionsArgs extends BaseFunctionsArgs>(body: ChatCompletionFunctionRunnerParams<FunctionsArgs>, options?: Core.RequestOptions): ChatCompletionRunner;
runFunctions<FunctionsArgs extends BaseFunctionsArgs>(body: ChatCompletionStreamingFunctionRunnerParams<FunctionsArgs>, options?: Core.RequestOptions): ChatCompletionStreamingRunner;
runFunctions<FunctionsArgs extends BaseFunctionsArgs>(body: ChatCompletionFunctionRunnerParams<FunctionsArgs>, options?: Core.RequestOptions): ChatCompletionRunner<null>;
runFunctions<FunctionsArgs extends BaseFunctionsArgs>(body: ChatCompletionStreamingFunctionRunnerParams<FunctionsArgs>, options?: Core.RequestOptions): ChatCompletionStreamingRunner<null>;
/**

@@ -31,9 +51,9 @@ * A convenience helper for using tool calls with the /chat/completions endpoint

*/
runTools<FunctionsArgs extends BaseFunctionsArgs>(body: ChatCompletionToolRunnerParams<FunctionsArgs>, options?: Core.RequestOptions): ChatCompletionRunner;
runTools<FunctionsArgs extends BaseFunctionsArgs>(body: ChatCompletionStreamingToolRunnerParams<FunctionsArgs>, options?: Core.RequestOptions): ChatCompletionStreamingRunner;
runTools<Params extends ChatCompletionToolRunnerParams<any>, ParsedT = ExtractParsedContentFromParams<Params>>(body: Params, options?: Core.RequestOptions): ChatCompletionRunner<ParsedT>;
runTools<Params extends ChatCompletionStreamingToolRunnerParams<any>, ParsedT = ExtractParsedContentFromParams<Params>>(body: Params, options?: Core.RequestOptions): ChatCompletionStreamingRunner<ParsedT>;
/**
* Creates a chat completion stream
*/
stream(body: ChatCompletionStreamParams, options?: Core.RequestOptions): ChatCompletionStream;
stream<Params extends ChatCompletionStreamParams, ParsedT = ExtractParsedContentFromParams<Params>>(body: Params, options?: Core.RequestOptions): ChatCompletionStream<ParsedT>;
}
//# sourceMappingURL=completions.d.ts.map

@@ -16,16 +16,28 @@ "use strict";

const ChatCompletionStream_1 = require("../../../lib/ChatCompletionStream.js");
const parser_1 = require("../../../lib/parser.js");
var ChatCompletionStream_2 = require("../../../lib/ChatCompletionStream.js");
Object.defineProperty(exports, "ChatCompletionStream", { enumerable: true, get: function () { return ChatCompletionStream_2.ChatCompletionStream; } });
class Completions extends resource_1.APIResource {
async parse(body, options) {
(0, parser_1.validateInputTools)(body.tools);
const completion = await this._client.chat.completions.create(body, {
...options,
headers: {
...options?.headers,
'X-Stainless-Helper-Method': 'beta.chat.completions.parse',
},
});
return (0, parser_1.parseChatCompletion)(completion, body);
}
runFunctions(body, options) {
if (body.stream) {
return ChatCompletionStreamingRunner_1.ChatCompletionStreamingRunner.runFunctions(this._client.chat.completions, body, options);
return ChatCompletionStreamingRunner_1.ChatCompletionStreamingRunner.runFunctions(this._client, body, options);
}
return ChatCompletionRunner_1.ChatCompletionRunner.runFunctions(this._client.chat.completions, body, options);
return ChatCompletionRunner_1.ChatCompletionRunner.runFunctions(this._client, body, options);
}
runTools(body, options) {
if (body.stream) {
return ChatCompletionStreamingRunner_1.ChatCompletionStreamingRunner.runTools(this._client.chat.completions, body, options);
return ChatCompletionStreamingRunner_1.ChatCompletionStreamingRunner.runTools(this._client, body, options);
}
return ChatCompletionRunner_1.ChatCompletionRunner.runTools(this._client.chat.completions, body, options);
return ChatCompletionRunner_1.ChatCompletionRunner.runTools(this._client, body, options);
}

@@ -36,3 +48,3 @@ /**

stream(body, options) {
return ChatCompletionStream_1.ChatCompletionStream.createChatCompletion(this._client.chat.completions, body, options);
return ChatCompletionStream_1.ChatCompletionStream.createChatCompletion(this._client, body, options);
}

@@ -39,0 +51,0 @@ }

export { Assistant, AssistantDeleted, AssistantStreamEvent, AssistantTool, CodeInterpreterTool, FileSearchTool, FunctionTool, MessageStreamEvent, RunStepStreamEvent, RunStreamEvent, ThreadStreamEvent, AssistantCreateParams, AssistantUpdateParams, AssistantListParams, AssistantsPage, Assistants, } from "./assistants.js";
export { AssistantResponseFormat, AssistantResponseFormatOption, AssistantToolChoice, AssistantToolChoiceFunction, AssistantToolChoiceOption, Thread, ThreadDeleted, ThreadCreateParams, ThreadUpdateParams, ThreadCreateAndRunParams, ThreadCreateAndRunParamsNonStreaming, ThreadCreateAndRunParamsStreaming, ThreadCreateAndRunPollParams, ThreadCreateAndRunStreamParams, Threads, } from "./threads/index.js";
export { AssistantResponseFormatOption, AssistantToolChoice, AssistantToolChoiceFunction, AssistantToolChoiceOption, Thread, ThreadDeleted, ThreadCreateParams, ThreadUpdateParams, ThreadCreateAndRunParams, ThreadCreateAndRunParamsNonStreaming, ThreadCreateAndRunParamsStreaming, ThreadCreateAndRunPollParams, ThreadCreateAndRunStreamParams, Threads, } from "./threads/index.js";
export { Beta } from "./beta.js";

@@ -4,0 +4,0 @@ export { Chat } from "./chat/index.js";

@@ -1,4 +0,4 @@

export { Annotation, AnnotationDelta, FileCitationAnnotation, FileCitationDeltaAnnotation, FilePathAnnotation, FilePathDeltaAnnotation, ImageFile, ImageFileContentBlock, ImageFileDelta, ImageFileDeltaBlock, ImageURL, ImageURLContentBlock, ImageURLDelta, ImageURLDeltaBlock, Message, MessageContent, MessageContentDelta, MessageContentPartParam, MessageDeleted, MessageDelta, MessageDeltaEvent, Text, TextContentBlock, TextContentBlockParam, TextDelta, TextDeltaBlock, MessageCreateParams, MessageUpdateParams, MessageListParams, MessagesPage, Messages, } from "./messages.js";
export { AssistantResponseFormat, AssistantResponseFormatOption, AssistantToolChoice, AssistantToolChoiceFunction, AssistantToolChoiceOption, Thread, ThreadDeleted, ThreadCreateParams, ThreadUpdateParams, ThreadCreateAndRunParams, ThreadCreateAndRunParamsNonStreaming, ThreadCreateAndRunParamsStreaming, ThreadCreateAndRunPollParams, ThreadCreateAndRunStreamParams, Threads, } from "./threads.js";
export { Annotation, AnnotationDelta, FileCitationAnnotation, FileCitationDeltaAnnotation, FilePathAnnotation, FilePathDeltaAnnotation, ImageFile, ImageFileContentBlock, ImageFileDelta, ImageFileDeltaBlock, ImageURL, ImageURLContentBlock, ImageURLDelta, ImageURLDeltaBlock, Message, MessageContent, MessageContentDelta, MessageContentPartParam, MessageDeleted, MessageDelta, MessageDeltaEvent, RefusalContentBlock, RefusalDeltaBlock, Text, TextContentBlock, TextContentBlockParam, TextDelta, TextDeltaBlock, MessageCreateParams, MessageUpdateParams, MessageListParams, MessagesPage, Messages, } from "./messages.js";
export { AssistantResponseFormatOption, AssistantToolChoice, AssistantToolChoiceFunction, AssistantToolChoiceOption, Thread, ThreadDeleted, ThreadCreateParams, ThreadUpdateParams, ThreadCreateAndRunParams, ThreadCreateAndRunParamsNonStreaming, ThreadCreateAndRunParamsStreaming, ThreadCreateAndRunPollParams, ThreadCreateAndRunStreamParams, Threads, } from "./threads.js";
export { RequiredActionFunctionToolCall, Run, RunStatus, RunCreateParams, RunCreateParamsNonStreaming, RunCreateParamsStreaming, RunUpdateParams, RunListParams, RunCreateAndPollParams, RunCreateAndStreamParams, RunStreamParams, RunSubmitToolOutputsParams, RunSubmitToolOutputsParamsNonStreaming, RunSubmitToolOutputsParamsStreaming, RunSubmitToolOutputsAndPollParams, RunSubmitToolOutputsStreamParams, RunsPage, Runs, } from "./runs/index.js";
//# sourceMappingURL=index.d.ts.map

@@ -361,3 +361,3 @@ import { APIResource } from "../../../resource.js";

*/
export type MessageContent = ImageFileContentBlock | ImageURLContentBlock | TextContentBlock;
export type MessageContent = ImageFileContentBlock | ImageURLContentBlock | TextContentBlock | RefusalContentBlock;
/**

@@ -367,3 +367,3 @@ * References an image [File](https://platform.openai.com/docs/api-reference/files)

*/
export type MessageContentDelta = ImageFileDeltaBlock | TextDeltaBlock | ImageURLDeltaBlock;
export type MessageContentDelta = ImageFileDeltaBlock | TextDeltaBlock | RefusalDeltaBlock | ImageURLDeltaBlock;
/**

@@ -410,2 +410,26 @@ * References an image [File](https://platform.openai.com/docs/api-reference/files)

}
/**
* The refusal content generated by the assistant.
*/
export interface RefusalContentBlock {
refusal: string;
/**
* Always `refusal`.
*/
type: 'refusal';
}
/**
* The refusal content that is part of a message.
*/
export interface RefusalDeltaBlock {
/**
* The index of the refusal part in the message.
*/
index: number;
/**
* Always `refusal`.
*/
type: 'refusal';
refusal?: string;
}
export interface Text {

@@ -557,2 +581,4 @@ annotations: Array<Annotation>;

export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent;
export import RefusalContentBlock = MessagesAPI.RefusalContentBlock;
export import RefusalDeltaBlock = MessagesAPI.RefusalDeltaBlock;
export import Text = MessagesAPI.Text;

@@ -559,0 +585,0 @@ export import TextContentBlock = MessagesAPI.TextContentBlock;

@@ -221,2 +221,7 @@ import { APIResource } from "../../../../resource.js";

*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the

@@ -443,2 +448,7 @@ * message the model generates is valid JSON.

*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the

@@ -445,0 +455,0 @@ * message the model generates is valid JSON.

@@ -6,2 +6,3 @@ import { APIResource } from "../../../resource.js";

import * as ThreadsAPI from "./threads.js";
import * as Shared from "../../shared.js";
import * as AssistantsAPI from "../assistants.js";

@@ -52,11 +53,12 @@ import * as ChatAPI from "../../chat/chat.js";

/**
* An object describing the expected output of the model. If `json_object` only
* `function` type `tools` are allowed to be passed to the Run. If `text` the model
* can return text or any value needed.
<<<<<<< HEAD
* An object describing the expected output of the model. If `json_object` or
* `json_schema`, only `function` type `tools` are allowed to be passed to the Run.
* If `text` the model can return text or any value needed.
*/
export interface AssistantResponseFormat {
/**
* Must be one of `text` or `json_object`.
* Must be one of `text`, `json_object` or `json_schema`.
*/
type?: 'text' | 'json_object';
type?: 'text' | 'json_object' | 'json_schema';
}

@@ -69,2 +71,7 @@ /**

*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the

@@ -81,3 +88,3 @@ * message the model generates is valid JSON.

*/
export type AssistantResponseFormatOption = 'none' | 'auto' | AssistantResponseFormat;
export type AssistantResponseFormatOption = 'auto' | Shared.ResponseFormatText | Shared.ResponseFormatJSONObject | Shared.ResponseFormatJSONSchema;
/**

@@ -441,2 +448,7 @@ * Specifies a tool the model should use. Use to force the model to call a specific

*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the

@@ -1282,3 +1294,2 @@ * message the model generates is valid JSON.

export declare namespace Threads {
export import AssistantResponseFormat = ThreadsAPI.AssistantResponseFormat;
export import AssistantResponseFormatOption = ThreadsAPI.AssistantResponseFormatOption;

@@ -1337,2 +1348,4 @@ export import AssistantToolChoice = ThreadsAPI.AssistantToolChoice;

export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent;
export import RefusalContentBlock = MessagesAPI.RefusalContentBlock;
export import RefusalDeltaBlock = MessagesAPI.RefusalDeltaBlock;
export import Text = MessagesAPI.Text;

@@ -1339,0 +1352,0 @@ export import TextContentBlock = MessagesAPI.TextContentBlock;

@@ -113,3 +113,3 @@ import { APIResource } from "../../../resource.js";

*/
code: 'internal_error' | 'file_not_found' | 'parsing_error' | 'unhandled_mime_type';
code: 'server_error' | 'unsupported_file' | 'invalid_file';
/**

@@ -116,0 +116,0 @@ * A human-readable description of the error.

@@ -7,3 +7,3 @@ import { APIResource } from "../../resource.js";

}
export type ChatModel = 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' | 'gpt-4-turbo-preview' | 'gpt-4-1106-preview' | 'gpt-4-vision-preview' | 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-16k-0613';
export type ChatModel = 'gpt-4o' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-05-13' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' | 'gpt-4-turbo-preview' | 'gpt-4-1106-preview' | 'gpt-4-vision-preview' | 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-16k-0613';
export declare namespace Chat {

@@ -17,2 +17,3 @@ export import ChatModel = ChatAPI.ChatModel;

export import ChatCompletionContentPartImage = CompletionsAPI.ChatCompletionContentPartImage;
export import ChatCompletionContentPartRefusal = CompletionsAPI.ChatCompletionContentPartRefusal;
export import ChatCompletionContentPartText = CompletionsAPI.ChatCompletionContentPartText;

@@ -19,0 +20,0 @@ export import ChatCompletionFunctionCallOption = CompletionsAPI.ChatCompletionFunctionCallOption;

@@ -93,2 +93,6 @@ import { APIResource } from "../../resource.js";

content: Array<ChatCompletionsAPI.ChatCompletionTokenLogprob> | null;
/**
* A list of message refusal tokens with log probability information.
*/
refusal: Array<ChatCompletionsAPI.ChatCompletionTokenLogprob> | null;
}

@@ -106,3 +110,3 @@ }

*/
content?: string | null;
content?: string | Array<ChatCompletionContentPartText | ChatCompletionContentPartRefusal> | null;
/**

@@ -119,2 +123,6 @@ * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of

/**
* The refusal message by the assistant.
*/
refusal?: string | null;
/**
* The tool calls generated by the model, such as function calls.

@@ -229,2 +237,6 @@ */

/**
* The refusal message generated by the model.
*/
refusal?: string | null;
/**
* The role of the author of this message.

@@ -289,2 +301,6 @@ */

content: Array<ChatCompletionsAPI.ChatCompletionTokenLogprob> | null;
/**
* A list of message refusal tokens with log probability information.
*/
refusal: Array<ChatCompletionsAPI.ChatCompletionTokenLogprob> | null;
}

@@ -314,2 +330,12 @@ }

}
export interface ChatCompletionContentPartRefusal {
/**
* The refusal message generated by the model.
*/
refusal: string;
/**
* The type of the content part.
*/
type: 'refusal';
}
export interface ChatCompletionContentPartText {

@@ -361,2 +387,6 @@ /**

/**
* The refusal message generated by the model.
*/
refusal: string | null;
/**
* The role of the author of this message.

@@ -466,3 +496,3 @@ */

*/
content: string;
content: string | Array<ChatCompletionContentPartText>;
/**

@@ -547,3 +577,3 @@ * The role of the messages author, in this case `system`.

*/
content: string;
content: string | Array<ChatCompletionContentPartText>;
/**

@@ -666,2 +696,4 @@ * The role of the messages author, in this case `tool`.

* An object specifying the format that the model must output. Compatible with
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
* [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and

@@ -681,3 +713,3 @@ * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.

*/
response_format?: ChatCompletionCreateParams.ResponseFormat;
response_format?: Shared.ResponseFormatText | Shared.ResponseFormatJSONObject | Shared.ResponseFormatJSONSchema;
/**

@@ -795,24 +827,2 @@ * This feature is in Beta. If specified, our system will make a best effort to

}
/**
* An object specifying the format that the model must output. Compatible with
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
* all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* message the model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to
* produce JSON yourself via a system or user message. Without this, the model may
* generate an unending stream of whitespace until the generation reaches the token
* limit, resulting in a long-running and seemingly "stuck" request. Also note that
* the message content may be partially cut off if `finish_reason="length"`, which
* indicates the generation exceeded `max_tokens` or the conversation exceeded the
* max context length.
*/
interface ResponseFormat {
/**
* Must be one of `text` or `json_object`.
*/
type?: 'text' | 'json_object';
}
type ChatCompletionCreateParamsNonStreaming = ChatCompletionsAPI.ChatCompletionCreateParamsNonStreaming;

@@ -861,2 +871,3 @@ type ChatCompletionCreateParamsStreaming = ChatCompletionsAPI.ChatCompletionCreateParamsStreaming;

export import ChatCompletionContentPartImage = ChatCompletionsAPI.ChatCompletionContentPartImage;
export import ChatCompletionContentPartRefusal = ChatCompletionsAPI.ChatCompletionContentPartRefusal;
export import ChatCompletionContentPartText = ChatCompletionsAPI.ChatCompletionContentPartText;

@@ -863,0 +874,0 @@ export import ChatCompletionFunctionCallOption = ChatCompletionsAPI.ChatCompletionFunctionCallOption;

@@ -1,3 +0,3 @@

export { ChatCompletion, ChatCompletionAssistantMessageParam, ChatCompletionChunk, ChatCompletionContentPart, ChatCompletionContentPartImage, ChatCompletionContentPartText, ChatCompletionFunctionCallOption, ChatCompletionFunctionMessageParam, ChatCompletionMessage, ChatCompletionMessageParam, ChatCompletionMessageToolCall, ChatCompletionNamedToolChoice, ChatCompletionRole, ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, ChatCompletionTokenLogprob, ChatCompletionTool, ChatCompletionToolChoiceOption, ChatCompletionToolMessageParam, ChatCompletionUserMessageParam, CreateChatCompletionRequestMessage, ChatCompletionCreateParams, CompletionCreateParams, ChatCompletionCreateParamsNonStreaming, CompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, CompletionCreateParamsStreaming, Completions, } from "./completions.js";
export { ChatCompletion, ChatCompletionAssistantMessageParam, ChatCompletionChunk, ChatCompletionContentPart, ChatCompletionContentPartImage, ChatCompletionContentPartRefusal, ChatCompletionContentPartText, ChatCompletionFunctionCallOption, ChatCompletionFunctionMessageParam, ChatCompletionMessage, ChatCompletionMessageParam, ChatCompletionMessageToolCall, ChatCompletionNamedToolChoice, ChatCompletionRole, ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, ChatCompletionTokenLogprob, ChatCompletionTool, ChatCompletionToolChoiceOption, ChatCompletionToolMessageParam, ChatCompletionUserMessageParam, CreateChatCompletionRequestMessage, ChatCompletionCreateParams, CompletionCreateParams, ChatCompletionCreateParamsNonStreaming, CompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, CompletionCreateParamsStreaming, Completions, } from "./completions.js";
export { ChatModel, Chat } from "./chat.js";
//# sourceMappingURL=index.d.ts.map

@@ -221,5 +221,5 @@ import { APIResource } from "../../../resource.js";

* The name of the model to fine-tune. You can select one of the
* [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned).
* [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
*/
model: (string & {}) | 'babbage-002' | 'davinci-002' | 'gpt-3.5-turbo';
model: (string & {}) | 'babbage-002' | 'davinci-002' | 'gpt-3.5-turbo' | 'gpt-4o-mini';
/**

@@ -262,3 +262,3 @@ * The ID of an uploaded file that contains training data.

* For example, a `suffix` of "custom-model-name" would produce a model name like
* `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`.
* `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
*/

@@ -265,0 +265,0 @@ suffix?: string | null;

@@ -28,2 +28,10 @@ export interface ErrorObject {

parameters?: FunctionParameters;
/**
* Whether to enable strict schema adherence when generating the function call. If
* set to true, the model will follow the exact schema defined in the `parameters`
* field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn
* more about Structured Outputs in the
* [function calling guide](docs/guides/function-calling).
*/
strict?: boolean | null;
}

@@ -40,2 +48,47 @@ /**

export type FunctionParameters = Record<string, unknown>;
export interface ResponseFormatJSONObject {
/**
* The type of response format being defined: `json_object`
*/
type: 'json_object';
}
export interface ResponseFormatJSONSchema {
json_schema: ResponseFormatJSONSchema.JSONSchema;
/**
* The type of response format being defined: `json_schema`
*/
type: 'json_schema';
}
export declare namespace ResponseFormatJSONSchema {
interface JSONSchema {
/**
* The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores
* and dashes, with a maximum length of 64.
*/
name: string;
/**
* A description of what the response format is for, used by the model to determine
* how to respond in the format.
*/
description?: string;
/**
* The schema for the response format, described as a JSON Schema object.
*/
schema?: Record<string, unknown>;
/**
* Whether to enable strict schema adherence when generating the output. If set to
* true, the model will always follow the exact schema defined in the `schema`
* field. Only a subset of JSON Schema is supported when `strict` is `true`. To
* learn more, read the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*/
strict?: boolean | null;
}
}
export interface ResponseFormatText {
/**
* The type of response format being defined: `text`
*/
type: 'text';
}
//# sourceMappingURL=shared.d.ts.map

@@ -159,1 +159,13 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

export class InternalServerError extends APIError {}
export class LengthFinishReasonError extends OpenAIError {
constructor() {
super(`Could not parse response content as the length limit was reached`);
}
}
export class ContentFilterFinishReasonError extends OpenAIError {
constructor() {
super(`Could not parse response content as the request was rejected by the content filter`);
}
}

@@ -251,2 +251,3 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

export import ChatCompletionContentPartImage = API.ChatCompletionContentPartImage;
export import ChatCompletionContentPartRefusal = API.ChatCompletionContentPartRefusal;
export import ChatCompletionContentPartText = API.ChatCompletionContentPartText;

@@ -326,2 +327,5 @@ export import ChatCompletionFunctionCallOption = API.ChatCompletionFunctionCallOption;

export import FunctionParameters = API.FunctionParameters;
export import ResponseFormatJSONObject = API.ResponseFormatJSONObject;
export import ResponseFormatJSONSchema = API.ResponseFormatJSONSchema;
export import ResponseFormatText = API.ResponseFormatText;
}

@@ -328,0 +332,0 @@

import * as Core from "../core";
import { type CompletionUsage } from "../resources/completions";
import {
type Completions,
type ChatCompletion,

@@ -16,2 +15,3 @@ type ChatCompletionMessage,

type BaseFunctionsArgs,
RunnableToolFunction,
} from './RunnableFunction';

@@ -25,2 +25,5 @@ import { ChatCompletionFunctionRunnerParams, ChatCompletionToolRunnerParams } from './ChatCompletionRunner';

import { BaseEvents, EventStream } from './EventStream';
import { ParsedChatCompletion } from '../resources/beta/chat/completions';
import OpenAI from '../index';
import { isAutoParsableTool, parseChatCompletion } from "./parser";

@@ -35,10 +38,11 @@ const DEFAULT_MAX_CHAT_COMPLETIONS = 10;

EventTypes extends AbstractChatCompletionRunnerEvents,
ParsedT,
> extends EventStream<EventTypes> {
protected _chatCompletions: ChatCompletion[] = [];
protected _chatCompletions: ParsedChatCompletion<ParsedT>[] = [];
messages: ChatCompletionMessageParam[] = [];
protected _addChatCompletion(
this: AbstractChatCompletionRunner<AbstractChatCompletionRunnerEvents>,
chatCompletion: ChatCompletion,
): ChatCompletion {
this: AbstractChatCompletionRunner<AbstractChatCompletionRunnerEvents, ParsedT>,
chatCompletion: ParsedChatCompletion<ParsedT>,
): ParsedChatCompletion<ParsedT> {
this._chatCompletions.push(chatCompletion);

@@ -52,3 +56,3 @@ this._emit('chatCompletion', chatCompletion);

protected _addMessage(
this: AbstractChatCompletionRunner<AbstractChatCompletionRunnerEvents>,
this: AbstractChatCompletionRunner<AbstractChatCompletionRunnerEvents, ParsedT>,
message: ChatCompletionMessageParam,

@@ -82,3 +86,3 @@ emit = true,

*/
async finalChatCompletion(): Promise<ChatCompletion> {
async finalChatCompletion(): Promise<ParsedChatCompletion<ParsedT>> {
await this.done();

@@ -109,3 +113,7 @@ const completion = this._chatCompletions[this._chatCompletions.length - 1];

const { function_call, ...rest } = message;
const ret: ChatCompletionMessage = { ...rest, content: message.content ?? null };
const ret: ChatCompletionMessage = {
...rest,
content: (message as ChatCompletionMessage).content ?? null,
refusal: (message as ChatCompletionMessage).refusal ?? null,
};
if (function_call) {

@@ -161,2 +169,3 @@ ret.function_call = function_call;

message.content != null &&
typeof message.content === 'string' &&
this.messages.some(

@@ -205,3 +214,5 @@ (x) =>

protected override _emitFinal(this: AbstractChatCompletionRunner<AbstractChatCompletionRunnerEvents>) {
protected override _emitFinal(
this: AbstractChatCompletionRunner<AbstractChatCompletionRunnerEvents, ParsedT>,
) {
const completion = this._chatCompletions[this._chatCompletions.length - 1];

@@ -234,6 +245,6 @@ if (completion) this._emit('finalChatCompletion', completion);

protected async _createChatCompletion(
completions: Completions,
client: OpenAI,
params: ChatCompletionCreateParams,
options?: Core.RequestOptions,
): Promise<ChatCompletion> {
): Promise<ParsedChatCompletion<ParsedT>> {
const signal = options?.signal;

@@ -246,3 +257,3 @@ if (signal) {

const chatCompletion = await completions.create(
const chatCompletion = await client.chat.completions.create(
{ ...params, stream: false },

@@ -252,7 +263,7 @@ { ...options, signal: this.controller.signal },

this._connected();
return this._addChatCompletion(chatCompletion);
return this._addChatCompletion(parseChatCompletion(chatCompletion, params));
}
protected async _runChatCompletion(
completions: Completions,
client: OpenAI,
params: ChatCompletionCreateParams,

@@ -264,7 +275,7 @@ options?: Core.RequestOptions,

}
return await this._createChatCompletion(completions, params, options);
return await this._createChatCompletion(client, params, options);
}
protected async _runFunctions<FunctionsArgs extends BaseFunctionsArgs>(
completions: Completions,
client: OpenAI,
params:

@@ -299,3 +310,3 @@ | ChatCompletionFunctionRunnerParams<FunctionsArgs>

const chatCompletion: ChatCompletion = await this._createChatCompletion(
completions,
client,
{

@@ -355,3 +366,3 @@ ...restParams,

protected async _runTools<FunctionsArgs extends BaseFunctionsArgs>(
completions: Completions,
client: OpenAI,
params:

@@ -367,4 +378,27 @@ | ChatCompletionToolRunnerParams<FunctionsArgs>

// TODO(someday): clean this logic up
const inputTools = params.tools.map((tool): RunnableToolFunction<any> => {
if (isAutoParsableTool(tool)) {
if (!tool.$callback) {
throw new OpenAIError('Tool given to `.runTools()` that does not have an associated function');
}
return {
type: 'function',
function: {
function: tool.$callback,
name: tool.function.name,
description: tool.function.description || '',
parameters: tool.function.parameters as any,
parse: tool.$parseRaw,
strict: true,
},
};
}
return tool as any as RunnableToolFunction<any>;
});
const functionsByName: Record<string, RunnableFunction<any>> = {};
for (const f of params.tools) {
for (const f of inputTools) {
if (f.type === 'function') {

@@ -377,3 +411,3 @@ functionsByName[f.function.name || f.function.function.name] = f.function;

'tools' in params ?
params.tools.map((t) =>
inputTools.map((t) =>
t.type === 'function' ?

@@ -386,2 +420,3 @@ {

description: t.function.description,
strict: t.function.strict,
},

@@ -399,3 +434,3 @@ }

const chatCompletion: ChatCompletion = await this._createChatCompletion(
completions,
client,
{

@@ -413,3 +448,3 @@ ...restParams,

}
if (!message.tool_calls) {
if (!message.tool_calls?.length) {
return;

@@ -425,4 +460,6 @@ }

if (!fn) {
const content = `Invalid tool_call: ${JSON.stringify(name)}. Available options are: ${tools
.map((f) => JSON.stringify(f.function.name))
const content = `Invalid tool_call: ${JSON.stringify(name)}. Available options are: ${Object.keys(
functionsByName,
)
.map((name) => JSON.stringify(name))
.join(', ')}. Please try again`;

@@ -429,0 +466,0 @@

@@ -194,3 +194,3 @@ import {

runs: Runs,
body: RunSubmitToolOutputsParamsStream,
params: RunSubmitToolOutputsParamsStream,
options: RequestOptions | undefined,

@@ -200,3 +200,3 @@ ) {

runner._run(() =>
runner._runToolAssistantStream(threadId, runId, runs, body, {
runner._runToolAssistantStream(threadId, runId, runs, params, {
...options,

@@ -241,3 +241,3 @@ headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' },

static createThreadAssistantStream(
body: ThreadCreateAndRunParamsBaseStream,
params: ThreadCreateAndRunParamsBaseStream,
thread: Threads,

@@ -248,3 +248,3 @@ options?: RequestOptions,

runner._run(() =>
runner._threadAssistantStream(body, thread, {
runner._threadAssistantStream(params, thread, {
...options,

@@ -730,7 +730,7 @@ headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' },

protected async _threadAssistantStream(
body: ThreadCreateAndRunParamsBase,
params: ThreadCreateAndRunParamsBase,
thread: Threads,
options?: Core.RequestOptions,
): Promise<Run> {
return await this._createThreadAssistantStream(thread, body, options);
return await this._createThreadAssistantStream(thread, params, options);
}

@@ -737,0 +737,0 @@

import {
type Completions,
type ChatCompletionMessageParam,

@@ -13,2 +12,4 @@ type ChatCompletionCreateParamsNonStreaming,

import { isAssistantMessage } from './chatCompletionUtils';
import OpenAI from "../index";
import { AutoParseableTool } from "./parser";

@@ -30,12 +31,15 @@ export interface ChatCompletionRunnerEvents extends AbstractChatCompletionRunnerEvents {

> & {
tools: RunnableTools<FunctionsArgs>;
tools: RunnableTools<FunctionsArgs> | AutoParseableTool<any, true>[];
};
export class ChatCompletionRunner extends AbstractChatCompletionRunner<ChatCompletionRunnerEvents> {
export class ChatCompletionRunner<ParsedT = null> extends AbstractChatCompletionRunner<
ChatCompletionRunnerEvents,
ParsedT
> {
/** @deprecated - please use `runTools` instead. */
static runFunctions(
completions: Completions,
client: OpenAI,
params: ChatCompletionFunctionRunnerParams<any[]>,
options?: RunnerOptions,
): ChatCompletionRunner {
): ChatCompletionRunner<null> {
const runner = new ChatCompletionRunner();

@@ -46,12 +50,12 @@ const opts = {

};
runner._run(() => runner._runFunctions(completions, params, opts));
runner._run(() => runner._runFunctions(client, params, opts));
return runner;
}
static runTools(
completions: Completions,
static runTools<ParsedT>(
client: OpenAI,
params: ChatCompletionToolRunnerParams<any[]>,
options?: RunnerOptions,
): ChatCompletionRunner {
const runner = new ChatCompletionRunner();
): ChatCompletionRunner<ParsedT> {
const runner = new ChatCompletionRunner<ParsedT>();
const opts = {

@@ -61,7 +65,7 @@ ...options,

};
runner._run(() => runner._runTools(completions, params, opts));
runner._run(() => runner._runTools(client, params, opts));
return runner;
}
override _addMessage(this: ChatCompletionRunner, message: ChatCompletionMessageParam) {
override _addMessage(this: ChatCompletionRunner<ParsedT>, message: ChatCompletionMessageParam) {
super._addMessage(message);

@@ -68,0 +72,0 @@ if (isAssistantMessage(message) && message.content) {

import * as Core from "../core";
import { OpenAIError, APIUserAbortError } from "../error";
import {
Completions,
OpenAIError,
APIUserAbortError,
LengthFinishReasonError,
ContentFilterFinishReasonError,
} from "../error";
import {
ChatCompletionTokenLogprob,
type ChatCompletion,
type ChatCompletionChunk,
type ChatCompletionCreateParams,
type ChatCompletionCreateParamsStreaming,
type ChatCompletionCreateParamsBase,

@@ -16,6 +22,92 @@ } from "../resources/chat/completions";

import { Stream } from "../streaming";
import OpenAI from "../index";
import { ParsedChatCompletion } from "../resources/beta/chat/completions";
import {
AutoParseableResponseFormat,
hasAutoParseableInput,
isAutoParsableResponseFormat,
isAutoParsableTool,
maybeParseChatCompletion,
shouldParseToolCall,
} from "./parser";
import { partialParse } from '../_vendor/partial-json-parser/parser';
export interface ChatCompletionStreamEvents extends AbstractChatCompletionRunnerEvents {
export interface ContentDeltaEvent {
delta: string;
snapshot: string;
parsed: unknown | null;
}
export interface ContentDoneEvent<ParsedT = null> {
content: string;
parsed: ParsedT | null;
}
export interface RefusalDeltaEvent {
delta: string;
snapshot: string;
}
export interface RefusalDoneEvent {
refusal: string;
}
export interface FunctionToolCallArgumentsDeltaEvent {
name: string;
index: number;
arguments: string;
parsed_arguments: unknown;
arguments_delta: string;
}
export interface FunctionToolCallArgumentsDoneEvent {
name: string;
index: number;
arguments: string;
parsed_arguments: unknown;
}
export interface LogProbsContentDeltaEvent {
content: Array<ChatCompletionTokenLogprob>;
snapshot: Array<ChatCompletionTokenLogprob>;
}
export interface LogProbsContentDoneEvent {
content: Array<ChatCompletionTokenLogprob>;
}
export interface LogProbsRefusalDeltaEvent {
refusal: Array<ChatCompletionTokenLogprob>;
snapshot: Array<ChatCompletionTokenLogprob>;
}
export interface LogProbsRefusalDoneEvent {
refusal: Array<ChatCompletionTokenLogprob>;
}
export interface ChatCompletionStreamEvents<ParsedT = null> extends AbstractChatCompletionRunnerEvents {
content: (contentDelta: string, contentSnapshot: string) => void;
chunk: (chunk: ChatCompletionChunk, snapshot: ChatCompletionSnapshot) => void;
'content.delta': (props: ContentDeltaEvent) => void;
'content.done': (props: ContentDoneEvent<ParsedT>) => void;
'refusal.delta': (props: RefusalDeltaEvent) => void;
'refusal.done': (props: RefusalDoneEvent) => void;
'tool_calls.function.arguments.delta': (props: FunctionToolCallArgumentsDeltaEvent) => void;
'tool_calls.function.arguments.done': (props: FunctionToolCallArgumentsDoneEvent) => void;
'logprobs.content.delta': (props: LogProbsContentDeltaEvent) => void;
'logprobs.content.done': (props: LogProbsContentDoneEvent) => void;
'logprobs.refusal.delta': (props: LogProbsRefusalDeltaEvent) => void;
'logprobs.refusal.done': (props: LogProbsRefusalDoneEvent) => void;
}

@@ -27,8 +119,25 @@

export class ChatCompletionStream
extends AbstractChatCompletionRunner<ChatCompletionStreamEvents>
interface ChoiceEventState {
content_done: boolean;
refusal_done: boolean;
logprobs_content_done: boolean;
logprobs_refusal_done: boolean;
current_tool_call_index: number | null;
done_tool_calls: Set<number>;
}
export class ChatCompletionStream<ParsedT = null>
extends AbstractChatCompletionRunner<ChatCompletionStreamEvents<ParsedT>, ParsedT>
implements AsyncIterable<ChatCompletionChunk>
{
#params: ChatCompletionCreateParams | null;
#choiceEventStates: ChoiceEventState[];
#currentChatCompletionSnapshot: ChatCompletionSnapshot | undefined;
constructor(params: ChatCompletionCreateParams | null) {
super();
this.#params = params;
this.#choiceEventStates = [];
}
get currentChatCompletionSnapshot(): ChatCompletionSnapshot | undefined {

@@ -45,4 +154,4 @@ return this.#currentChatCompletionSnapshot;

*/
static fromReadableStream(stream: ReadableStream): ChatCompletionStream {
const runner = new ChatCompletionStream();
static fromReadableStream(stream: ReadableStream): ChatCompletionStream<null> {
const runner = new ChatCompletionStream(null);
runner._run(() => runner._fromReadableStream(stream));

@@ -52,11 +161,11 @@ return runner;

static createChatCompletion(
completions: Completions,
static createChatCompletion<ParsedT>(
client: OpenAI,
params: ChatCompletionStreamParams,
options?: Core.RequestOptions,
): ChatCompletionStream {
const runner = new ChatCompletionStream();
): ChatCompletionStream<ParsedT> {
const runner = new ChatCompletionStream<ParsedT>(params as ChatCompletionCreateParamsStreaming);
runner._run(() =>
runner._runChatCompletion(
completions,
client,
{ ...params, stream: true },

@@ -73,13 +182,180 @@ { ...options, headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' } },

}
#addChunk(this: ChatCompletionStream, chunk: ChatCompletionChunk) {
#getChoiceEventState(choice: ChatCompletionSnapshot.Choice): ChoiceEventState {
let state = this.#choiceEventStates[choice.index];
if (state) {
return state;
}
state = {
content_done: false,
refusal_done: false,
logprobs_content_done: false,
logprobs_refusal_done: false,
done_tool_calls: new Set(),
current_tool_call_index: null,
};
this.#choiceEventStates[choice.index] = state;
return state;
}
#addChunk(this: ChatCompletionStream<ParsedT>, chunk: ChatCompletionChunk) {
if (this.ended) return;
const completion = this.#accumulateChatCompletion(chunk);
this._emit('chunk', chunk, completion);
const delta = chunk.choices[0]?.delta?.content;
const snapshot = completion.choices[0]?.message;
if (delta != null && snapshot?.role === 'assistant' && snapshot?.content) {
this._emit('content', delta, snapshot.content);
for (const choice of chunk.choices) {
const choiceSnapshot = completion.choices[choice.index]!;
if (
choice.delta.content != null &&
choiceSnapshot.message?.role === 'assistant' &&
choiceSnapshot.message?.content
) {
this._emit('content', choice.delta.content, choiceSnapshot.message.content);
this._emit('content.delta', {
delta: choice.delta.content,
snapshot: choiceSnapshot.message.content,
parsed: choiceSnapshot.message.parsed,
});
}
if (
choice.delta.refusal != null &&
choiceSnapshot.message?.role === 'assistant' &&
choiceSnapshot.message?.refusal
) {
this._emit('refusal.delta', {
delta: choice.delta.refusal,
snapshot: choiceSnapshot.message.refusal,
});
}
if (choice.logprobs?.content != null && choiceSnapshot.message?.role === 'assistant') {
this._emit('logprobs.content.delta', {
content: choice.logprobs?.content,
snapshot: choiceSnapshot.logprobs?.content ?? [],
});
}
if (choice.logprobs?.refusal != null && choiceSnapshot.message?.role === 'assistant') {
this._emit('logprobs.refusal.delta', {
refusal: choice.logprobs?.refusal,
snapshot: choiceSnapshot.logprobs?.refusal ?? [],
});
}
const state = this.#getChoiceEventState(choiceSnapshot);
if (choiceSnapshot.finish_reason) {
this.#emitContentDoneEvents(choiceSnapshot);
if (state.current_tool_call_index != null) {
this.#emitToolCallDoneEvent(choiceSnapshot, state.current_tool_call_index);
}
}
for (const toolCall of choice.delta.tool_calls ?? []) {
if (state.current_tool_call_index !== toolCall.index) {
this.#emitContentDoneEvents(choiceSnapshot);
// new tool call started, the previous one is done
if (state.current_tool_call_index != null) {
this.#emitToolCallDoneEvent(choiceSnapshot, state.current_tool_call_index);
}
}
state.current_tool_call_index = toolCall.index;
}
for (const toolCallDelta of choice.delta.tool_calls ?? []) {
const toolCallSnapshot = choiceSnapshot.message.tool_calls?.[toolCallDelta.index];
if (!toolCallSnapshot?.type) {
continue;
}
if (toolCallSnapshot?.type === 'function') {
this._emit('tool_calls.function.arguments.delta', {
name: toolCallSnapshot.function?.name,
index: toolCallDelta.index,
arguments: toolCallSnapshot.function.arguments,
parsed_arguments: toolCallSnapshot.function.parsed_arguments,
arguments_delta: toolCallDelta.function?.arguments ?? '',
});
} else {
assertNever(toolCallSnapshot?.type);
}
}
}
}
#endRequest(): ChatCompletion {
#emitToolCallDoneEvent(choiceSnapshot: ChatCompletionSnapshot.Choice, toolCallIndex: number) {
const state = this.#getChoiceEventState(choiceSnapshot);
if (state.done_tool_calls.has(toolCallIndex)) {
// we've already fired the done event
return;
}
const toolCallSnapshot = choiceSnapshot.message.tool_calls?.[toolCallIndex];
if (!toolCallSnapshot) {
throw new Error('no tool call snapshot');
}
if (!toolCallSnapshot.type) {
throw new Error('tool call snapshot missing `type`');
}
if (toolCallSnapshot.type === 'function') {
const inputTool = this.#params?.tools?.find(
(tool) => tool.type === 'function' && tool.function.name === toolCallSnapshot.function.name,
);
this._emit('tool_calls.function.arguments.done', {
name: toolCallSnapshot.function.name,
index: toolCallIndex,
arguments: toolCallSnapshot.function.arguments,
parsed_arguments:
isAutoParsableTool(inputTool) ? inputTool.$parseRaw(toolCallSnapshot.function.arguments)
: inputTool?.function.strict ? JSON.parse(toolCallSnapshot.function.arguments)
: null,
});
} else {
assertNever(toolCallSnapshot.type);
}
}
#emitContentDoneEvents(choiceSnapshot: ChatCompletionSnapshot.Choice) {
const state = this.#getChoiceEventState(choiceSnapshot);
if (choiceSnapshot.message.content && !state.content_done) {
state.content_done = true;
const responseFormat = this.#getAutoParseableResponseFormat();
this._emit('content.done', {
content: choiceSnapshot.message.content,
parsed: responseFormat ? responseFormat.$parseRaw(choiceSnapshot.message.content) : (null as any),
});
}
if (choiceSnapshot.message.refusal && !state.refusal_done) {
state.refusal_done = true;
this._emit('refusal.done', { refusal: choiceSnapshot.message.refusal });
}
if (choiceSnapshot.logprobs?.content && !state.logprobs_content_done) {
state.logprobs_content_done = true;
this._emit('logprobs.content.done', { content: choiceSnapshot.logprobs.content });
}
if (choiceSnapshot.logprobs?.refusal && !state.logprobs_refusal_done) {
state.logprobs_refusal_done = true;
this._emit('logprobs.refusal.done', { refusal: choiceSnapshot.logprobs.refusal });
}
}
#endRequest(): ParsedChatCompletion<ParsedT> {
if (this.ended) {

@@ -93,10 +369,12 @@ throw new OpenAIError(`stream has ended, this shouldn't happen`);

this.#currentChatCompletionSnapshot = undefined;
return finalizeChatCompletion(snapshot);
this.#choiceEventStates = [];
return finalizeChatCompletion(snapshot, this.#params);
}
protected override async _createChatCompletion(
completions: Completions,
client: OpenAI,
params: ChatCompletionCreateParams,
options?: Core.RequestOptions,
): Promise<ChatCompletion> {
): Promise<ParsedChatCompletion<ParsedT>> {
super._createChatCompletion;
const signal = options?.signal;

@@ -108,3 +386,4 @@ if (signal) {

this.#beginRequest();
const stream = await completions.create(
const stream = await client.chat.completions.create(
{ ...params, stream: true },

@@ -151,2 +430,11 @@ { ...options, signal: this.controller.signal },

#getAutoParseableResponseFormat(): AutoParseableResponseFormat<ParsedT> | null {
const responseFormat = this.#params?.response_format;
if (isAutoParsableResponseFormat<ParsedT>(responseFormat)) {
return responseFormat;
}
return null;
}
#accumulateChatCompletion(chunk: ChatCompletionChunk): ChatCompletionSnapshot {

@@ -174,4 +462,6 @@ let snapshot = this.#currentChatCompletionSnapshot;

} else {
const { content, ...rest } = logprobs;
const { content, refusal, ...rest } = logprobs;
assertIsEmpty(rest);
Object.assign(choice.logprobs, rest);
if (content) {

@@ -181,13 +471,36 @@ choice.logprobs.content ??= [];

}
if (refusal) {
choice.logprobs.refusal ??= [];
choice.logprobs.refusal.push(...refusal);
}
}
}
if (finish_reason) choice.finish_reason = finish_reason;
if (finish_reason) {
choice.finish_reason = finish_reason;
if (this.#params && hasAutoParseableInput(this.#params)) {
if (finish_reason === 'length') {
throw new LengthFinishReasonError();
}
if (finish_reason === 'content_filter') {
throw new ContentFilterFinishReasonError();
}
}
}
Object.assign(choice, other);
if (!delta) continue; // Shouldn't happen; just in case.
const { content, function_call, role, tool_calls, ...rest } = delta;
const { content, refusal, function_call, role, tool_calls, ...rest } = delta;
assertIsEmpty(rest);
Object.assign(choice.message, rest);
if (content) choice.message.content = (choice.message.content || '') + content;
if (refusal) {
choice.message.refusal = (choice.message.refusal || '') + refusal;
}
if (role) choice.message.role = role;

@@ -205,12 +518,28 @@ if (function_call) {

}
if (content) {
choice.message.content = (choice.message.content || '') + content;
if (!choice.message.refusal && this.#getAutoParseableResponseFormat()) {
choice.message.parsed = partialParse(choice.message.content);
}
}
if (tool_calls) {
if (!choice.message.tool_calls) choice.message.tool_calls = [];
for (const { index, id, type, function: fn, ...rest } of tool_calls) {
const tool_call = (choice.message.tool_calls[index] ??= {});
const tool_call = (choice.message.tool_calls[index] ??=
{} as ChatCompletionSnapshot.Choice.Message.ToolCall);
Object.assign(tool_call, rest);
if (id) tool_call.id = id;
if (type) tool_call.type = type;
if (fn) tool_call.function ??= { arguments: '' };
if (fn) tool_call.function ??= { name: fn.name ?? '', arguments: '' };
if (fn?.name) tool_call.function!.name = fn.name;
if (fn?.arguments) tool_call.function!.arguments += fn.arguments;
if (fn?.arguments) {
tool_call.function!.arguments += fn.arguments;
if (shouldParseToolCall(this.#params, tool_call)) {
tool_call.function!.parsed_arguments = partialParse(tool_call.function!.arguments);
}
}
}

@@ -222,3 +551,3 @@ }

[Symbol.asyncIterator](this: ChatCompletionStream): AsyncIterator<ChatCompletionChunk> {
[Symbol.asyncIterator](this: ChatCompletionStream<ParsedT>): AsyncIterator<ChatCompletionChunk> {
const pushQueue: ChatCompletionChunk[] = [];

@@ -290,5 +619,8 @@ const readQueue: {

function finalizeChatCompletion(snapshot: ChatCompletionSnapshot): ChatCompletion {
function finalizeChatCompletion<ParsedT>(
snapshot: ChatCompletionSnapshot,
params: ChatCompletionCreateParams | null,
): ParsedChatCompletion<ParsedT> {
const { id, choices, created, model, system_fingerprint, ...rest } = snapshot;
return {
const completion: ChatCompletion = {
...rest,

@@ -298,13 +630,30 @@ id,

({ message, finish_reason, index, logprobs, ...choiceRest }): ChatCompletion.Choice => {
if (!finish_reason) throw new OpenAIError(`missing finish_reason for choice ${index}`);
if (!finish_reason) {
throw new OpenAIError(`missing finish_reason for choice ${index}`);
}
const { content = null, function_call, tool_calls, ...messageRest } = message;
const role = message.role as 'assistant'; // this is what we expect; in theory it could be different which would make our types a slight lie but would be fine.
if (!role) throw new OpenAIError(`missing role for choice ${index}`);
if (!role) {
throw new OpenAIError(`missing role for choice ${index}`);
}
if (function_call) {
const { arguments: args, name } = function_call;
if (args == null) throw new OpenAIError(`missing function_call.arguments for choice ${index}`);
if (!name) throw new OpenAIError(`missing function_call.name for choice ${index}`);
if (args == null) {
throw new OpenAIError(`missing function_call.arguments for choice ${index}`);
}
if (!name) {
throw new OpenAIError(`missing function_call.name for choice ${index}`);
}
return {
...choiceRest,
message: { content, function_call: { arguments: args, name }, role },
message: {
content,
function_call: { arguments: args, name },
role,
refusal: message.refusal ?? null,
},
finish_reason,

@@ -315,2 +664,3 @@ index,

}
if (tool_calls) {

@@ -326,17 +676,22 @@ return {

content,
refusal: message.refusal ?? null,
tool_calls: tool_calls.map((tool_call, i) => {
const { function: fn, type, id, ...toolRest } = tool_call;
const { arguments: args, name, ...fnRest } = fn || {};
if (id == null)
if (id == null) {
throw new OpenAIError(`missing choices[${index}].tool_calls[${i}].id\n${str(snapshot)}`);
if (type == null)
}
if (type == null) {
throw new OpenAIError(`missing choices[${index}].tool_calls[${i}].type\n${str(snapshot)}`);
if (name == null)
}
if (name == null) {
throw new OpenAIError(
`missing choices[${index}].tool_calls[${i}].function.name\n${str(snapshot)}`,
);
if (args == null)
}
if (args == null) {
throw new OpenAIError(
`missing choices[${index}].tool_calls[${i}].function.arguments\n${str(snapshot)}`,
);
}

@@ -350,3 +705,3 @@ return { ...toolRest, id, type, function: { ...fnRest, name, arguments: args } };

...choiceRest,
message: { ...messageRest, content, role },
message: { ...messageRest, content, role, refusal: message.refusal ?? null },
finish_reason,

@@ -363,2 +718,4 @@ index,

};
return maybeParseChatCompletion(completion, params);
}

@@ -446,2 +803,6 @@

refusal?: string | null;
parsed?: unknown | null;
/**

@@ -466,5 +827,5 @@ * The name and arguments of a function that should be called, as generated by the

*/
id?: string;
id: string;
function?: ToolCall.Function;
function: ToolCall.Function;

@@ -474,3 +835,3 @@ /**

*/
type?: 'function';
type: 'function';
}

@@ -486,8 +847,10 @@

*/
arguments?: string;
arguments: string;
parsed_arguments?: unknown;
/**
* The name of the function to call.
*/
name?: string;
name: string;
}

@@ -517,1 +880,14 @@ }

}
type AssertIsEmpty<T extends {}> = keyof T extends never ? T : never;
/**
* Ensures the given argument is an empty object, useful for
* asserting that all known properties on an object have been
* destructured.
*/
function assertIsEmpty<T extends {}>(obj: AssertIsEmpty<T>): asserts obj is AssertIsEmpty<T> {
return;
}
function assertNever(_x: never) {}
import {
Completions,
type ChatCompletionChunk,

@@ -10,2 +9,4 @@ type ChatCompletionCreateParamsStreaming,

import { ChatCompletionSnapshot, ChatCompletionStream } from './ChatCompletionStream';
import OpenAI from "../index";
import { AutoParseableTool } from "./parser";

@@ -28,11 +29,11 @@ export interface ChatCompletionStreamEvents extends AbstractChatCompletionRunnerEvents {

> & {
tools: RunnableTools<FunctionsArgs>;
tools: RunnableTools<FunctionsArgs> | AutoParseableTool<any, true>[];
};
export class ChatCompletionStreamingRunner
extends ChatCompletionStream
export class ChatCompletionStreamingRunner<ParsedT = null>
extends ChatCompletionStream<ParsedT>
implements AsyncIterable<ChatCompletionChunk>
{
static override fromReadableStream(stream: ReadableStream): ChatCompletionStreamingRunner {
const runner = new ChatCompletionStreamingRunner();
static override fromReadableStream(stream: ReadableStream): ChatCompletionStreamingRunner<null> {
const runner = new ChatCompletionStreamingRunner(null);
runner._run(() => runner._fromReadableStream(stream));

@@ -44,7 +45,7 @@ return runner;

static runFunctions<T extends (string | object)[]>(
completions: Completions,
client: OpenAI,
params: ChatCompletionStreamingFunctionRunnerParams<T>,
options?: RunnerOptions,
): ChatCompletionStreamingRunner {
const runner = new ChatCompletionStreamingRunner();
): ChatCompletionStreamingRunner<null> {
const runner = new ChatCompletionStreamingRunner(null);
const opts = {

@@ -54,12 +55,15 @@ ...options,

};
runner._run(() => runner._runFunctions(completions, params, opts));
runner._run(() => runner._runFunctions(client, params, opts));
return runner;
}
static runTools<T extends (string | object)[]>(
completions: Completions,
static runTools<T extends (string | object)[], ParsedT = null>(
client: OpenAI,
params: ChatCompletionStreamingToolRunnerParams<T>,
options?: RunnerOptions,
): ChatCompletionStreamingRunner {
const runner = new ChatCompletionStreamingRunner();
): ChatCompletionStreamingRunner<ParsedT> {
const runner = new ChatCompletionStreamingRunner<ParsedT>(
// @ts-expect-error TODO these types are incompatible
params,
);
const opts = {

@@ -69,5 +73,5 @@ ...options,

};
runner._run(() => runner._runTools(completions, params, opts));
runner._run(() => runner._runTools(client, params, opts));
return runner;
}
}

@@ -15,3 +15,3 @@ import { type ChatCompletionRunner } from './ChatCompletionRunner';

args: Args,
runner: ChatCompletionRunner | ChatCompletionStreamingRunner,
runner: ChatCompletionRunner<unknown> | ChatCompletionStreamingRunner<unknown>,
) => PromiseOrValue<unknown>;

@@ -35,2 +35,3 @@ /**

name?: string | undefined;
strict?: boolean | undefined;
};

@@ -45,3 +46,3 @@

args: string,
runner: ChatCompletionRunner | ChatCompletionStreamingRunner,
runner: ChatCompletionRunner<unknown> | ChatCompletionStreamingRunner<unknown>,
) => PromiseOrValue<unknown>;

@@ -60,2 +61,3 @@ /**

name?: string | undefined;
strict?: boolean | undefined;
};

@@ -62,0 +64,0 @@

@@ -152,2 +152,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the

@@ -652,4 +657,4 @@ * message the model generates is valid JSON.

* The maximum number of results the file search tool should output. The default is
* 20 for gpt-4\* models and 5 for gpt-3.5-turbo. This number should be between 1
* and 50 inclusive.
* 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between
* 1 and 50 inclusive.
*

@@ -1091,2 +1096,7 @@ * Note that the file search tool may output fewer than `max_num_results` results.

*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the

@@ -1284,2 +1294,7 @@ * message the model generates is valid JSON.

*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the

@@ -1286,0 +1301,0 @@ * message the model generates is valid JSON.

@@ -42,3 +42,2 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

export import Threads = ThreadsAPI.Threads;
export import AssistantResponseFormat = ThreadsAPI.AssistantResponseFormat;
export import AssistantResponseFormatOption = ThreadsAPI.AssistantResponseFormatOption;

@@ -45,0 +44,0 @@ export import AssistantToolChoice = ThreadsAPI.AssistantToolChoice;

@@ -29,5 +29,52 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

import { ChatCompletionStream, type ChatCompletionStreamParams } from '../../../lib/ChatCompletionStream';
import {
ChatCompletion,
ChatCompletionCreateParamsNonStreaming,
ChatCompletionMessage,
ChatCompletionMessageToolCall,
} from '../../chat/completions';
import { ExtractParsedContentFromParams, parseChatCompletion, validateInputTools } from '../../../lib/parser';
export { ChatCompletionStream, type ChatCompletionStreamParams } from '../../../lib/ChatCompletionStream';
export interface ParsedFunction extends ChatCompletionMessageToolCall.Function {
parsed_arguments?: unknown;
}
export interface ParsedFunctionToolCall extends ChatCompletionMessageToolCall {
function: ParsedFunction;
}
export interface ParsedChatCompletionMessage<ParsedT> extends ChatCompletionMessage {
parsed: ParsedT | null;
tool_calls: Array<ParsedFunctionToolCall>;
}
export interface ParsedChoice<ParsedT> extends ChatCompletion.Choice {
message: ParsedChatCompletionMessage<ParsedT>;
}
export interface ParsedChatCompletion<ParsedT> extends ChatCompletion {
choices: Array<ParsedChoice<ParsedT>>;
}
export type ChatCompletionParseParams = ChatCompletionCreateParamsNonStreaming;
export class Completions extends APIResource {
async parse<Params extends ChatCompletionParseParams, ParsedT = ExtractParsedContentFromParams<Params>>(
body: Params,
options?: Core.RequestOptions,
): Promise<ParsedChatCompletion<ParsedT>> {
validateInputTools(body.tools);
const completion = await this._client.chat.completions.create(body, {
...options,
headers: {
...options?.headers,
'X-Stainless-Helper-Method': 'beta.chat.completions.parse',
},
});
return parseChatCompletion(completion, body);
}
/**

@@ -39,7 +86,7 @@ * @deprecated - use `runTools` instead.

options?: Core.RequestOptions,
): ChatCompletionRunner;
): ChatCompletionRunner<null>;
runFunctions<FunctionsArgs extends BaseFunctionsArgs>(
body: ChatCompletionStreamingFunctionRunnerParams<FunctionsArgs>,
options?: Core.RequestOptions,
): ChatCompletionStreamingRunner;
): ChatCompletionStreamingRunner<null>;
runFunctions<FunctionsArgs extends BaseFunctionsArgs>(

@@ -50,6 +97,6 @@ body:

options?: Core.RequestOptions,
): ChatCompletionRunner | ChatCompletionStreamingRunner {
): ChatCompletionRunner<null> | ChatCompletionStreamingRunner<null> {
if (body.stream) {
return ChatCompletionStreamingRunner.runFunctions(
this._client.chat.completions,
this._client,
body as ChatCompletionStreamingFunctionRunnerParams<FunctionsArgs>,

@@ -60,3 +107,3 @@ options,

return ChatCompletionRunner.runFunctions(
this._client.chat.completions,
this._client,
body as ChatCompletionFunctionRunnerParams<FunctionsArgs>,

@@ -76,28 +123,28 @@ options,

*/
runTools<FunctionsArgs extends BaseFunctionsArgs>(
body: ChatCompletionToolRunnerParams<FunctionsArgs>,
runTools<
Params extends ChatCompletionToolRunnerParams<any>,
ParsedT = ExtractParsedContentFromParams<Params>,
>(body: Params, options?: Core.RequestOptions): ChatCompletionRunner<ParsedT>;
runTools<
Params extends ChatCompletionStreamingToolRunnerParams<any>,
ParsedT = ExtractParsedContentFromParams<Params>,
>(body: Params, options?: Core.RequestOptions): ChatCompletionStreamingRunner<ParsedT>;
runTools<
Params extends ChatCompletionToolRunnerParams<any> | ChatCompletionStreamingToolRunnerParams<any>,
ParsedT = ExtractParsedContentFromParams<Params>,
>(
body: Params,
options?: Core.RequestOptions,
): ChatCompletionRunner;
runTools<FunctionsArgs extends BaseFunctionsArgs>(
body: ChatCompletionStreamingToolRunnerParams<FunctionsArgs>,
options?: Core.RequestOptions,
): ChatCompletionStreamingRunner;
runTools<FunctionsArgs extends BaseFunctionsArgs>(
body:
| ChatCompletionToolRunnerParams<FunctionsArgs>
| ChatCompletionStreamingToolRunnerParams<FunctionsArgs>,
options?: Core.RequestOptions,
): ChatCompletionRunner | ChatCompletionStreamingRunner {
): ChatCompletionRunner<ParsedT> | ChatCompletionStreamingRunner<ParsedT> {
if (body.stream) {
return ChatCompletionStreamingRunner.runTools(
this._client.chat.completions,
body as ChatCompletionStreamingToolRunnerParams<FunctionsArgs>,
this._client,
body as ChatCompletionStreamingToolRunnerParams<any>,
options,
);
}
return ChatCompletionRunner.runTools(
this._client.chat.completions,
body as ChatCompletionToolRunnerParams<FunctionsArgs>,
options,
);
return ChatCompletionRunner.runTools(this._client, body as ChatCompletionToolRunnerParams<any>, options);
}

@@ -108,5 +155,8 @@

*/
stream(body: ChatCompletionStreamParams, options?: Core.RequestOptions): ChatCompletionStream {
return ChatCompletionStream.createChatCompletion(this._client.chat.completions, body, options);
stream<Params extends ChatCompletionStreamParams, ParsedT = ExtractParsedContentFromParams<Params>>(
body: Params,
options?: Core.RequestOptions,
): ChatCompletionStream<ParsedT> {
return ChatCompletionStream.createChatCompletion(this._client, body, options);
}
}

@@ -22,3 +22,2 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

export {
AssistantResponseFormat,
AssistantResponseFormatOption,

@@ -25,0 +24,0 @@ AssistantToolChoice,

@@ -25,2 +25,4 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

MessageDeltaEvent,
RefusalContentBlock,
RefusalDeltaBlock,
Text,

@@ -38,3 +40,2 @@ TextContentBlock,

export {
AssistantResponseFormat,
AssistantResponseFormatOption,

@@ -41,0 +42,0 @@ AssistantToolChoice,

@@ -484,3 +484,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

*/
export type MessageContent = ImageFileContentBlock | ImageURLContentBlock | TextContentBlock;
export type MessageContent =
| ImageFileContentBlock
| ImageURLContentBlock
| TextContentBlock
| RefusalContentBlock;

@@ -491,3 +495,7 @@ /**

*/
export type MessageContentDelta = ImageFileDeltaBlock | TextDeltaBlock | ImageURLDeltaBlock;
export type MessageContentDelta =
| ImageFileDeltaBlock
| TextDeltaBlock
| RefusalDeltaBlock
| ImageURLDeltaBlock;

@@ -544,2 +552,31 @@ /**

/**
* The refusal content generated by the assistant.
*/
export interface RefusalContentBlock {
refusal: string;
/**
* Always `refusal`.
*/
type: 'refusal';
}
/**
* The refusal content that is part of a message.
*/
export interface RefusalDeltaBlock {
/**
* The index of the refusal part in the message.
*/
index: number;
/**
* Always `refusal`.
*/
type: 'refusal';
refusal?: string;
}
export interface Text {

@@ -713,2 +750,4 @@ annotations: Array<Annotation>;

export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent;
export import RefusalContentBlock = MessagesAPI.RefusalContentBlock;
export import RefusalDeltaBlock = MessagesAPI.RefusalDeltaBlock;
export import Text = MessagesAPI.Text;

@@ -715,0 +754,0 @@ export import TextContentBlock = MessagesAPI.TextContentBlock;

@@ -425,2 +425,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the

@@ -688,2 +693,7 @@ * message the model generates is valid JSON.

*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the

@@ -690,0 +700,0 @@ * message the model generates is valid JSON.

@@ -9,2 +9,3 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

import * as ThreadsAPI from './threads';
import * as Shared from '../../shared';
import * as AssistantsAPI from '../assistants';

@@ -122,11 +123,12 @@ import * as ChatAPI from '../../chat/chat';

/**
* An object describing the expected output of the model. If `json_object` only
* `function` type `tools` are allowed to be passed to the Run. If `text` the model
* can return text or any value needed.
<<<<<<< HEAD
* An object describing the expected output of the model. If `json_object` or
* `json_schema`, only `function` type `tools` are allowed to be passed to the Run.
* If `text` the model can return text or any value needed.
*/
export interface AssistantResponseFormat {
/**
* Must be one of `text` or `json_object`.
* Must be one of `text`, `json_object` or `json_schema`.
*/
type?: 'text' | 'json_object';
type?: 'text' | 'json_object' | 'json_schema';
}

@@ -140,2 +142,7 @@

*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the

@@ -152,3 +159,7 @@ * message the model generates is valid JSON.

*/
export type AssistantResponseFormatOption = 'none' | 'auto' | AssistantResponseFormat;
export type AssistantResponseFormatOption =
| 'auto'
| Shared.ResponseFormatText
| Shared.ResponseFormatJSONObject
| Shared.ResponseFormatJSONSchema;

@@ -568,2 +579,7 @@ /**

*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which guarantees the model will match your supplied JSON schema. Learn
* more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the

@@ -1572,3 +1588,2 @@ * message the model generates is valid JSON.

export namespace Threads {
export import AssistantResponseFormat = ThreadsAPI.AssistantResponseFormat;
export import AssistantResponseFormatOption = ThreadsAPI.AssistantResponseFormatOption;

@@ -1627,2 +1642,4 @@ export import AssistantToolChoice = ThreadsAPI.AssistantToolChoice;

export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent;
export import RefusalContentBlock = MessagesAPI.RefusalContentBlock;
export import RefusalDeltaBlock = MessagesAPI.RefusalDeltaBlock;
export import Text = MessagesAPI.Text;

@@ -1629,0 +1646,0 @@ export import TextContentBlock = MessagesAPI.TextContentBlock;

@@ -235,3 +235,3 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

*/
code: 'internal_error' | 'file_not_found' | 'parsing_error' | 'unhandled_mime_type';
code: 'server_error' | 'unsupported_file' | 'invalid_file';

@@ -238,0 +238,0 @@ /**

@@ -13,2 +13,3 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

| 'gpt-4o'
| 'gpt-4o-2024-08-06'
| 'gpt-4o-2024-05-13'

@@ -45,2 +46,3 @@ | 'gpt-4o-mini'

export import ChatCompletionContentPartImage = CompletionsAPI.ChatCompletionContentPartImage;
export import ChatCompletionContentPartRefusal = CompletionsAPI.ChatCompletionContentPartRefusal;
export import ChatCompletionContentPartText = CompletionsAPI.ChatCompletionContentPartText;

@@ -47,0 +49,0 @@ export import ChatCompletionFunctionCallOption = CompletionsAPI.ChatCompletionFunctionCallOption;

@@ -126,2 +126,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

content: Array<ChatCompletionsAPI.ChatCompletionTokenLogprob> | null;
/**
* A list of message refusal tokens with log probability information.
*/
refusal: Array<ChatCompletionsAPI.ChatCompletionTokenLogprob> | null;
}

@@ -141,3 +146,3 @@ }

*/
content?: string | null;
content?: string | Array<ChatCompletionContentPartText | ChatCompletionContentPartRefusal> | null;

@@ -157,2 +162,7 @@ /**

/**
* The refusal message by the assistant.
*/
refusal?: string | null;
/**
* The tool calls generated by the model, such as function calls.

@@ -284,2 +294,7 @@ */

/**
* The refusal message generated by the model.
*/
refusal?: string | null;
/**
* The role of the author of this message.

@@ -354,2 +369,7 @@ */

content: Array<ChatCompletionsAPI.ChatCompletionTokenLogprob> | null;
/**
* A list of message refusal tokens with log probability information.
*/
refusal: Array<ChatCompletionsAPI.ChatCompletionTokenLogprob> | null;
}

@@ -385,2 +405,14 @@ }

export interface ChatCompletionContentPartRefusal {
/**
* The refusal message generated by the model.
*/
refusal: string;
/**
* The type of the content part.
*/
type: 'refusal';
}
export interface ChatCompletionContentPartText {

@@ -439,2 +471,7 @@ /**

/**
* The refusal message generated by the model.
*/
refusal: string | null;
/**
* The role of the author of this message.

@@ -565,3 +602,3 @@ */

*/
content: string;
content: string | Array<ChatCompletionContentPartText>;

@@ -659,3 +696,3 @@ /**

*/
content: string;
content: string | Array<ChatCompletionContentPartText>;

@@ -799,2 +836,4 @@ /**

* An object specifying the format that the model must output. Compatible with
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
* [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and

@@ -814,3 +853,6 @@ * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.

*/
response_format?: ChatCompletionCreateParams.ResponseFormat;
response_format?:
| Shared.ResponseFormatText
| Shared.ResponseFormatJSONObject
| Shared.ResponseFormatJSONSchema;

@@ -943,25 +985,2 @@ /**

/**
* An object specifying the format that the model must output. Compatible with
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
* all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
* message the model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to
* produce JSON yourself via a system or user message. Without this, the model may
* generate an unending stream of whitespace until the generation reaches the token
* limit, resulting in a long-running and seemingly "stuck" request. Also note that
* the message content may be partially cut off if `finish_reason="length"`, which
* indicates the generation exceeded `max_tokens` or the conversation exceeded the
* max context length.
*/
export interface ResponseFormat {
/**
* Must be one of `text` or `json_object`.
*/
type?: 'text' | 'json_object';
}
export type ChatCompletionCreateParamsNonStreaming =

@@ -1017,2 +1036,3 @@ ChatCompletionsAPI.ChatCompletionCreateParamsNonStreaming;

export import ChatCompletionContentPartImage = ChatCompletionsAPI.ChatCompletionContentPartImage;
export import ChatCompletionContentPartRefusal = ChatCompletionsAPI.ChatCompletionContentPartRefusal;
export import ChatCompletionContentPartText = ChatCompletionsAPI.ChatCompletionContentPartText;

@@ -1019,0 +1039,0 @@ export import ChatCompletionFunctionCallOption = ChatCompletionsAPI.ChatCompletionFunctionCallOption;

@@ -9,2 +9,3 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

ChatCompletionContentPartImage,
ChatCompletionContentPartRefusal,
ChatCompletionContentPartText,

@@ -11,0 +12,0 @@ ChatCompletionFunctionCallOption,

@@ -302,5 +302,5 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

* The name of the model to fine-tune. You can select one of the
* [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned).
* [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
*/
model: (string & {}) | 'babbage-002' | 'davinci-002' | 'gpt-3.5-turbo';
model: (string & {}) | 'babbage-002' | 'davinci-002' | 'gpt-3.5-turbo' | 'gpt-4o-mini';

@@ -348,3 +348,3 @@ /**

* For example, a `suffix` of "custom-model-name" would produce a model name like
* `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`.
* `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
*/

@@ -351,0 +351,0 @@ suffix?: string | null;

@@ -36,2 +36,11 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

parameters?: FunctionParameters;
/**
* Whether to enable strict schema adherence when generating the function call. If
* set to true, the model will follow the exact schema defined in the `parameters`
* field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn
* more about Structured Outputs in the
* [function calling guide](docs/guides/function-calling).
*/
strict?: boolean | null;
}

@@ -49,1 +58,54 @@

export type FunctionParameters = Record<string, unknown>;
export interface ResponseFormatJSONObject {
/**
* The type of response format being defined: `json_object`
*/
type: 'json_object';
}
export interface ResponseFormatJSONSchema {
json_schema: ResponseFormatJSONSchema.JSONSchema;
/**
* The type of response format being defined: `json_schema`
*/
type: 'json_schema';
}
export namespace ResponseFormatJSONSchema {
export interface JSONSchema {
/**
* The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores
* and dashes, with a maximum length of 64.
*/
name: string;
/**
* A description of what the response format is for, used by the model to determine
* how to respond in the format.
*/
description?: string;
/**
* The schema for the response format, described as a JSON Schema object.
*/
schema?: Record<string, unknown>;
/**
* Whether to enable strict schema adherence when generating the output. If set to
* true, the model will always follow the exact schema defined in the `schema`
* field. Only a subset of JSON Schema is supported when `strict` is `true`. To
* learn more, read the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*/
strict?: boolean | null;
}
}
export interface ResponseFormatText {
/**
* The type of response format being defined: `text`
*/
type: 'text';
}

@@ -1,1 +0,1 @@

export const VERSION = '4.54.0'; // x-release-please-version
export const VERSION = '4.55.0'; // x-release-please-version

@@ -1,2 +0,2 @@

export declare const VERSION = "4.54.0";
export declare const VERSION = "4.55.0";
//# sourceMappingURL=version.d.ts.map
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.VERSION = void 0;
exports.VERSION = '4.54.0'; // x-release-please-version
exports.VERSION = '4.55.0'; // x-release-please-version
//# sourceMappingURL=version.js.map

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc