Comparing version 4.0.0-beta.7 to 4.0.0-beta.8
@@ -213,3 +213,3 @@ import { APIError } from './error.js'; | ||
* | ||
* Will return an empty string if the environment variable doesn't exist or cannot be accessed. | ||
* Will return undefined if the environment variable doesn't exist or cannot be accessed. | ||
*/ | ||
@@ -225,2 +225,3 @@ export declare const readEnv: (env: string) => string | undefined; | ||
export declare function hasOwn(obj: Object, key: string): boolean; | ||
export declare const isRunningInBrowser: () => boolean; | ||
export interface HeadersProtocol { | ||
@@ -227,0 +228,0 @@ get: (header: string) => string | null | undefined; |
70
core.js
@@ -34,2 +34,3 @@ 'use strict'; | ||
exports.isHeadersProtocol = | ||
exports.isRunningInBrowser = | ||
exports.hasOwn = | ||
@@ -554,3 +555,14 @@ exports.isEmptyObj = | ||
} | ||
// TODO add support for Cloudflare workers, browsers, etc. | ||
const browserInfo = getBrowserInfo(); | ||
if (browserInfo) { | ||
return { | ||
'X-Stainless-Lang': 'js', | ||
'X-Stainless-Package-Version': version_1.VERSION, | ||
'X-Stainless-OS': 'Unknown', | ||
'X-Stainless-Arch': 'unknown', | ||
'X-Stainless-Runtime': `browser:${browserInfo.browser}`, | ||
'X-Stainless-Runtime-Version': browserInfo.version, | ||
}; | ||
} | ||
// TODO add support for Cloudflare workers, etc. | ||
return { | ||
@@ -565,2 +577,28 @@ 'X-Stainless-Lang': 'js', | ||
}; | ||
// Note: modified from https://github.com/JS-DevTools/host-environment/blob/b1ab79ecde37db5d6e163c050e54fe7d287d7c92/src/isomorphic.browser.ts | ||
function getBrowserInfo() { | ||
if (!navigator || typeof navigator === 'undefined') { | ||
return null; | ||
} | ||
// NOTE: The order matters here! | ||
const browserPatterns = [ | ||
{ key: 'edge', pattern: /Edge(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ }, | ||
{ key: 'ie', pattern: /MSIE(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ }, | ||
{ key: 'ie', pattern: /Trident(?:.*rv\:(\d+)\.(\d+)(?:\.(\d+))?)?/ }, | ||
{ key: 'chrome', pattern: /Chrome(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ }, | ||
{ key: 'firefox', pattern: /Firefox(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ }, | ||
{ key: 'safari', pattern: /(?:Version\W+(\d+)\.(\d+)(?:\.(\d+))?)?(?:\W+Mobile\S*)?\W+Safari/ }, | ||
]; | ||
// Find the FIRST matching browser | ||
for (const { key, pattern } of browserPatterns) { | ||
const match = pattern.exec(navigator.userAgent); | ||
if (match) { | ||
const major = match[1] || 0; | ||
const minor = match[2] || 0; | ||
const patch = match[3] || 0; | ||
return { browser: key, version: `${major}.${minor}.${patch}` }; | ||
} | ||
} | ||
return null; | ||
} | ||
const normalizeArch = (arch) => { | ||
@@ -620,3 +658,3 @@ // Node docs: | ||
const validatePositiveInteger = (name, n) => { | ||
if (!Number.isInteger(n)) { | ||
if (typeof n !== 'number' || !Number.isInteger(n)) { | ||
throw new Error(`${name} must be an integer`); | ||
@@ -642,10 +680,17 @@ } | ||
* | ||
* Will return an empty string if the environment variable doesn't exist or cannot be accessed. | ||
* Will return undefined if the environment variable doesn't exist or cannot be accessed. | ||
*/ | ||
const readEnv = (env) => { | ||
var _a; | ||
if (typeof process === 'undefined') { | ||
return undefined; | ||
var _a, _b, _c, _d; | ||
if (typeof process !== 'undefined') { | ||
return (_b = (_a = process.env) === null || _a === void 0 ? void 0 : _a[env]) !== null && _b !== void 0 ? | ||
_b | ||
: undefined; | ||
} | ||
return (_a = process.env[env]) !== null && _a !== void 0 ? _a : undefined; | ||
if (typeof Deno !== 'undefined') { | ||
return (_d = (_c = Deno.env) === null || _c === void 0 ? void 0 : _c.get) === null || _d === void 0 ? | ||
void 0 | ||
: _d.call(_c, env); | ||
} | ||
return undefined; | ||
}; | ||
@@ -714,2 +759,13 @@ exports.readEnv = readEnv; | ||
}; | ||
const isRunningInBrowser = () => { | ||
return ( | ||
// @ts-ignore | ||
typeof window !== 'undefined' && | ||
// @ts-ignore | ||
typeof window.document !== 'undefined' && | ||
// @ts-ignore | ||
typeof navigator !== 'undefined' | ||
); | ||
}; | ||
exports.isRunningInBrowser = isRunningInBrowser; | ||
const isHeadersProtocol = (headers) => { | ||
@@ -716,0 +772,0 @@ return typeof (headers === null || headers === void 0 ? void 0 : headers.get) === 'function'; |
@@ -59,2 +59,7 @@ import * as Core from './core.js'; | ||
defaultQuery?: Core.DefaultQuery; | ||
/** | ||
* By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers. | ||
* Only set this option to `true` if you understand the risks and have appropriate mitigations in place. | ||
*/ | ||
dangerouslyAllowBrowser?: boolean; | ||
organization?: string | null; | ||
@@ -118,2 +123,4 @@ } | ||
export import CompletionCreateParams = API.CompletionCreateParams; | ||
export import CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; | ||
export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; | ||
export import Chat = API.Chat; | ||
@@ -154,4 +161,6 @@ export import Edits = API.Edits; | ||
export import FineTuneListEventsParams = API.FineTuneListEventsParams; | ||
export import FineTuneListEventsParamsNonStreaming = API.FineTuneListEventsParamsNonStreaming; | ||
export import FineTuneListEventsParamsStreaming = API.FineTuneListEventsParamsStreaming; | ||
} | ||
export default OpenAI; | ||
//# sourceMappingURL=index.d.ts.map |
@@ -86,2 +86,7 @@ 'use strict'; | ||
}; | ||
if (!options.dangerouslyAllowBrowser && Core.isRunningInBrowser()) { | ||
throw new Error( | ||
"It looks like you're running in a browser-like environment.\n\nThis is disabled by default, as it risks exposing your secret API credentials to attackers. \nIf you understand the risks and have appropriate mitigations in place,\nyou can set the `dangerouslyAllowBrowser` option to `true`, e.g.,\n\nnew OpenAI({ apiKey, dangerouslyAllowBrowser: true });\n\nhttps://help.openai.com/en/articles/5112595-best-practices-for-api-key-safety\n", | ||
); | ||
} | ||
super({ | ||
@@ -88,0 +93,0 @@ baseURL: options.baseURL, |
{ | ||
"name": "openai", | ||
"version": "4.0.0-beta.7", | ||
"version": "4.0.0-beta.8", | ||
"description": "Client library for the OpenAI API", | ||
@@ -5,0 +5,0 @@ "author": "OpenAI <support@openai.com>", |
@@ -233,8 +233,12 @@ # OpenAI Node API Library | ||
## Status | ||
## Semantic Versioning | ||
This package is in beta. Its internals and interfaces are not stable | ||
and subject to change without a major semver bump; | ||
please reach out if you rely on any undocumented behavior. | ||
This package generally attempts to follow [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: | ||
1. Changes that only affect static types, without breaking runtime behavior. | ||
2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals)_. | ||
3. Changes that we do not expect to impact the vast majority of users in practice. | ||
We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. | ||
We are keen for your feedback; please open an [issue](https://www.github.com/openai/openai-node/issues) with questions, bugs, or suggestions. | ||
@@ -241,0 +245,0 @@ |
@@ -13,3 +13,5 @@ import { APIResource } from 'openai/resource'; | ||
export import CompletionCreateParams = API.CompletionCreateParams; | ||
export import CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; | ||
export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; | ||
} | ||
//# sourceMappingURL=chat.d.ts.map |
@@ -10,9 +10,13 @@ import * as Core from 'openai/core'; | ||
create( | ||
body: CompletionCreateParams.CreateChatCompletionRequestNonStreaming, | ||
body: CompletionCreateParamsNonStreaming, | ||
options?: Core.RequestOptions, | ||
): Promise<Core.APIResponse<ChatCompletion>>; | ||
create( | ||
body: CompletionCreateParams.CreateChatCompletionRequestStreaming, | ||
body: CompletionCreateParamsStreaming, | ||
options?: Core.RequestOptions, | ||
): Promise<Core.APIResponse<Stream<ChatCompletionChunk>>>; | ||
create( | ||
body: CompletionCreateParams, | ||
options?: Core.RequestOptions, | ||
): Promise<Core.APIResponse<ChatCompletion | Stream<ChatCompletionChunk>>>; | ||
} | ||
@@ -168,297 +172,168 @@ export interface ChatCompletion { | ||
} | ||
export type CompletionCreateParams = | ||
| CompletionCreateParams.CreateChatCompletionRequestNonStreaming | ||
| CompletionCreateParams.CreateChatCompletionRequestStreaming; | ||
export interface CompletionCreateParams { | ||
/** | ||
* A list of messages comprising the conversation so far. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). | ||
*/ | ||
messages: Array<CreateChatCompletionRequestMessage>; | ||
/** | ||
* ID of the model to use. See the | ||
* [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table | ||
* for details on which models work with the Chat API. | ||
*/ | ||
model: | ||
| (string & {}) | ||
| 'gpt-4' | ||
| 'gpt-4-0314' | ||
| 'gpt-4-0613' | ||
| 'gpt-4-32k' | ||
| 'gpt-4-32k-0314' | ||
| 'gpt-4-32k-0613' | ||
| 'gpt-3.5-turbo' | ||
| 'gpt-3.5-turbo-16k' | ||
| 'gpt-3.5-turbo-0301' | ||
| 'gpt-3.5-turbo-0613' | ||
| 'gpt-3.5-turbo-16k-0613'; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their | ||
* existing frequency in the text so far, decreasing the model's likelihood to | ||
* repeat the same line verbatim. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
frequency_penalty?: number | null; | ||
/** | ||
* Controls how the model responds to function calls. "none" means the model does | ||
* not call a function, and responds to the end-user. "auto" means the model can | ||
* pick between an end-user or calling a function. Specifying a particular function | ||
* via `{"name":\ "my_function"}` forces the model to call that function. "none" is | ||
* the default when no functions are present. "auto" is the default if functions | ||
* are present. | ||
*/ | ||
function_call?: 'none' | 'auto' | CompletionCreateParams.FunctionCallOption; | ||
/** | ||
* A list of functions the model may generate JSON inputs for. | ||
*/ | ||
functions?: Array<CompletionCreateParams.Function>; | ||
/** | ||
* Modify the likelihood of specified tokens appearing in the completion. | ||
* | ||
* Accepts a json object that maps tokens (specified by their token ID in the | ||
* tokenizer) to an associated bias value from -100 to 100. Mathematically, the | ||
* bias is added to the logits generated by the model prior to sampling. The exact | ||
* effect will vary per model, but values between -1 and 1 should decrease or | ||
* increase likelihood of selection; values like -100 or 100 should result in a ban | ||
* or exclusive selection of the relevant token. | ||
*/ | ||
logit_bias?: Record<string, number> | null; | ||
/** | ||
* The maximum number of [tokens](/tokenizer) to generate in the chat completion. | ||
* | ||
* The total length of input tokens and generated tokens is limited by the model's | ||
* context length. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) | ||
* for counting tokens. | ||
*/ | ||
max_tokens?: number; | ||
/** | ||
* How many chat completion choices to generate for each input message. | ||
*/ | ||
n?: number | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on | ||
* whether they appear in the text so far, increasing the model's likelihood to | ||
* talk about new topics. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
presence_penalty?: number | null; | ||
/** | ||
* Up to 4 sequences where the API will stop generating further tokens. | ||
*/ | ||
stop?: string | null | Array<string>; | ||
/** | ||
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream?: boolean | null; | ||
/** | ||
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will | ||
* make the output more random, while lower values like 0.2 will make it more | ||
* focused and deterministic. | ||
* | ||
* We generally recommend altering this or `top_p` but not both. | ||
*/ | ||
temperature?: number | null; | ||
/** | ||
* An alternative to sampling with temperature, called nucleus sampling, where the | ||
* model considers the results of the tokens with top_p probability mass. So 0.1 | ||
* means only the tokens comprising the top 10% probability mass are considered. | ||
* | ||
* We generally recommend altering this or `temperature` but not both. | ||
*/ | ||
top_p?: number | null; | ||
/** | ||
* A unique identifier representing your end-user, which can help OpenAI to monitor | ||
* and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). | ||
*/ | ||
user?: string; | ||
} | ||
export declare namespace CompletionCreateParams { | ||
interface CreateChatCompletionRequestNonStreaming { | ||
interface FunctionCallOption { | ||
/** | ||
* A list of messages comprising the conversation so far. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). | ||
* The name of the function to call. | ||
*/ | ||
messages: Array<CreateChatCompletionRequestMessage>; | ||
/** | ||
* ID of the model to use. See the | ||
* [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table | ||
* for details on which models work with the Chat API. | ||
*/ | ||
model: | ||
| (string & {}) | ||
| 'gpt-4' | ||
| 'gpt-4-0314' | ||
| 'gpt-4-0613' | ||
| 'gpt-4-32k' | ||
| 'gpt-4-32k-0314' | ||
| 'gpt-4-32k-0613' | ||
| 'gpt-3.5-turbo' | ||
| 'gpt-3.5-turbo-16k' | ||
| 'gpt-3.5-turbo-0301' | ||
| 'gpt-3.5-turbo-0613' | ||
| 'gpt-3.5-turbo-16k-0613'; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their | ||
* existing frequency in the text so far, decreasing the model's likelihood to | ||
* repeat the same line verbatim. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
frequency_penalty?: number | null; | ||
/** | ||
* Controls how the model responds to function calls. "none" means the model does | ||
* not call a function, and responds to the end-user. "auto" means the model can | ||
* pick between an end-user or calling a function. Specifying a particular function | ||
* via `{"name":\ "my_function"}` forces the model to call that function. "none" is | ||
* the default when no functions are present. "auto" is the default if functions | ||
* are present. | ||
*/ | ||
function_call?: | ||
| 'none' | ||
| 'auto' | ||
| CompletionCreateParams.CreateChatCompletionRequestNonStreaming.FunctionCallOption; | ||
/** | ||
* A list of functions the model may generate JSON inputs for. | ||
*/ | ||
functions?: Array<CompletionCreateParams.CreateChatCompletionRequestNonStreaming.Function>; | ||
/** | ||
* Modify the likelihood of specified tokens appearing in the completion. | ||
* | ||
* Accepts a json object that maps tokens (specified by their token ID in the | ||
* tokenizer) to an associated bias value from -100 to 100. Mathematically, the | ||
* bias is added to the logits generated by the model prior to sampling. The exact | ||
* effect will vary per model, but values between -1 and 1 should decrease or | ||
* increase likelihood of selection; values like -100 or 100 should result in a ban | ||
* or exclusive selection of the relevant token. | ||
*/ | ||
logit_bias?: Record<string, number> | null; | ||
/** | ||
* The maximum number of [tokens](/tokenizer) to generate in the chat completion. | ||
* | ||
* The total length of input tokens and generated tokens is limited by the model's | ||
* context length. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) | ||
* for counting tokens. | ||
*/ | ||
max_tokens?: number; | ||
/** | ||
* How many chat completion choices to generate for each input message. | ||
*/ | ||
n?: number | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on | ||
* whether they appear in the text so far, increasing the model's likelihood to | ||
* talk about new topics. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
presence_penalty?: number | null; | ||
/** | ||
* Up to 4 sequences where the API will stop generating further tokens. | ||
*/ | ||
stop?: string | null | Array<string>; | ||
/** | ||
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream?: false | null; | ||
/** | ||
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will | ||
* make the output more random, while lower values like 0.2 will make it more | ||
* focused and deterministic. | ||
* | ||
* We generally recommend altering this or `top_p` but not both. | ||
*/ | ||
temperature?: number | null; | ||
/** | ||
* An alternative to sampling with temperature, called nucleus sampling, where the | ||
* model considers the results of the tokens with top_p probability mass. So 0.1 | ||
* means only the tokens comprising the top 10% probability mass are considered. | ||
* | ||
* We generally recommend altering this or `temperature` but not both. | ||
*/ | ||
top_p?: number | null; | ||
/** | ||
* A unique identifier representing your end-user, which can help OpenAI to monitor | ||
* and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). | ||
*/ | ||
user?: string; | ||
name: string; | ||
} | ||
namespace CreateChatCompletionRequestNonStreaming { | ||
interface FunctionCallOption { | ||
/** | ||
* The name of the function to call. | ||
*/ | ||
name: string; | ||
} | ||
interface Function { | ||
/** | ||
* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain | ||
* underscores and dashes, with a maximum length of 64. | ||
*/ | ||
name: string; | ||
/** | ||
* The parameters the functions accepts, described as a JSON Schema object. See the | ||
* [guide](/docs/guides/gpt/function-calling) for examples, and the | ||
* [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for | ||
* documentation about the format. | ||
* | ||
* To describe a function that accepts no parameters, provide the value | ||
* `{"type": "object", "properties": {}}`. | ||
*/ | ||
parameters: Record<string, unknown>; | ||
/** | ||
* A description of what the function does, used by the model to choose when and | ||
* how to call the function. | ||
*/ | ||
description?: string; | ||
} | ||
} | ||
interface CreateChatCompletionRequestStreaming { | ||
interface Function { | ||
/** | ||
* A list of messages comprising the conversation so far. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). | ||
* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain | ||
* underscores and dashes, with a maximum length of 64. | ||
*/ | ||
messages: Array<CreateChatCompletionRequestMessage>; | ||
name: string; | ||
/** | ||
* ID of the model to use. See the | ||
* [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table | ||
* for details on which models work with the Chat API. | ||
*/ | ||
model: | ||
| (string & {}) | ||
| 'gpt-4' | ||
| 'gpt-4-0314' | ||
| 'gpt-4-0613' | ||
| 'gpt-4-32k' | ||
| 'gpt-4-32k-0314' | ||
| 'gpt-4-32k-0613' | ||
| 'gpt-3.5-turbo' | ||
| 'gpt-3.5-turbo-16k' | ||
| 'gpt-3.5-turbo-0301' | ||
| 'gpt-3.5-turbo-0613' | ||
| 'gpt-3.5-turbo-16k-0613'; | ||
/** | ||
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream: true; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their | ||
* existing frequency in the text so far, decreasing the model's likelihood to | ||
* repeat the same line verbatim. | ||
* The parameters the functions accepts, described as a JSON Schema object. See the | ||
* [guide](/docs/guides/gpt/function-calling) for examples, and the | ||
* [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for | ||
* documentation about the format. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
* To describe a function that accepts no parameters, provide the value | ||
* `{"type": "object", "properties": {}}`. | ||
*/ | ||
frequency_penalty?: number | null; | ||
parameters: Record<string, unknown>; | ||
/** | ||
* Controls how the model responds to function calls. "none" means the model does | ||
* not call a function, and responds to the end-user. "auto" means the model can | ||
* pick between an end-user or calling a function. Specifying a particular function | ||
* via `{"name":\ "my_function"}` forces the model to call that function. "none" is | ||
* the default when no functions are present. "auto" is the default if functions | ||
* are present. | ||
* A description of what the function does, used by the model to choose when and | ||
* how to call the function. | ||
*/ | ||
function_call?: | ||
| 'none' | ||
| 'auto' | ||
| CompletionCreateParams.CreateChatCompletionRequestStreaming.FunctionCallOption; | ||
/** | ||
* A list of functions the model may generate JSON inputs for. | ||
*/ | ||
functions?: Array<CompletionCreateParams.CreateChatCompletionRequestStreaming.Function>; | ||
/** | ||
* Modify the likelihood of specified tokens appearing in the completion. | ||
* | ||
* Accepts a json object that maps tokens (specified by their token ID in the | ||
* tokenizer) to an associated bias value from -100 to 100. Mathematically, the | ||
* bias is added to the logits generated by the model prior to sampling. The exact | ||
* effect will vary per model, but values between -1 and 1 should decrease or | ||
* increase likelihood of selection; values like -100 or 100 should result in a ban | ||
* or exclusive selection of the relevant token. | ||
*/ | ||
logit_bias?: Record<string, number> | null; | ||
/** | ||
* The maximum number of [tokens](/tokenizer) to generate in the chat completion. | ||
* | ||
* The total length of input tokens and generated tokens is limited by the model's | ||
* context length. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) | ||
* for counting tokens. | ||
*/ | ||
max_tokens?: number; | ||
/** | ||
* How many chat completion choices to generate for each input message. | ||
*/ | ||
n?: number | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on | ||
* whether they appear in the text so far, increasing the model's likelihood to | ||
* talk about new topics. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
presence_penalty?: number | null; | ||
/** | ||
* Up to 4 sequences where the API will stop generating further tokens. | ||
*/ | ||
stop?: string | null | Array<string>; | ||
/** | ||
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will | ||
* make the output more random, while lower values like 0.2 will make it more | ||
* focused and deterministic. | ||
* | ||
* We generally recommend altering this or `top_p` but not both. | ||
*/ | ||
temperature?: number | null; | ||
/** | ||
* An alternative to sampling with temperature, called nucleus sampling, where the | ||
* model considers the results of the tokens with top_p probability mass. So 0.1 | ||
* means only the tokens comprising the top 10% probability mass are considered. | ||
* | ||
* We generally recommend altering this or `temperature` but not both. | ||
*/ | ||
top_p?: number | null; | ||
/** | ||
* A unique identifier representing your end-user, which can help OpenAI to monitor | ||
* and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). | ||
*/ | ||
user?: string; | ||
description?: string; | ||
} | ||
namespace CreateChatCompletionRequestStreaming { | ||
interface FunctionCallOption { | ||
/** | ||
* The name of the function to call. | ||
*/ | ||
name: string; | ||
} | ||
interface Function { | ||
/** | ||
* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain | ||
* underscores and dashes, with a maximum length of 64. | ||
*/ | ||
name: string; | ||
/** | ||
* The parameters the functions accepts, described as a JSON Schema object. See the | ||
* [guide](/docs/guides/gpt/function-calling) for examples, and the | ||
* [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for | ||
* documentation about the format. | ||
* | ||
* To describe a function that accepts no parameters, provide the value | ||
* `{"type": "object", "properties": {}}`. | ||
*/ | ||
parameters: Record<string, unknown>; | ||
/** | ||
* A description of what the function does, used by the model to choose when and | ||
* how to call the function. | ||
*/ | ||
description?: string; | ||
} | ||
} | ||
type CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; | ||
type CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; | ||
} | ||
export interface CompletionCreateParamsNonStreaming extends CompletionCreateParams { | ||
/** | ||
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream?: false | null; | ||
} | ||
export interface CompletionCreateParamsStreaming extends CompletionCreateParams { | ||
/** | ||
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream: true; | ||
} | ||
export declare namespace Completions { | ||
@@ -469,3 +344,5 @@ export import ChatCompletion = API.ChatCompletion; | ||
export import CompletionCreateParams = API.CompletionCreateParams; | ||
export import CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; | ||
export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; | ||
} | ||
//# sourceMappingURL=completions.d.ts.map |
@@ -7,4 +7,6 @@ export { Chat } from './chat.js'; | ||
CompletionCreateParams, | ||
CompletionCreateParamsNonStreaming, | ||
CompletionCreateParamsStreaming, | ||
Completions, | ||
} from './completions.js'; | ||
//# sourceMappingURL=index.d.ts.map |
@@ -10,9 +10,13 @@ import * as Core from 'openai/core'; | ||
create( | ||
body: CompletionCreateParams.CreateCompletionRequestNonStreaming, | ||
body: CompletionCreateParamsNonStreaming, | ||
options?: Core.RequestOptions, | ||
): Promise<Core.APIResponse<Completion>>; | ||
create( | ||
body: CompletionCreateParams.CreateCompletionRequestStreaming, | ||
body: CompletionCreateParamsStreaming, | ||
options?: Core.RequestOptions, | ||
): Promise<Core.APIResponse<Stream<Completion>>>; | ||
create( | ||
body: CompletionCreateParams, | ||
options?: Core.RequestOptions, | ||
): Promise<Core.APIResponse<Completion | Stream<Completion>>>; | ||
} | ||
@@ -48,285 +52,167 @@ export interface Completion { | ||
} | ||
export type CompletionCreateParams = | ||
| CompletionCreateParams.CreateCompletionRequestNonStreaming | ||
| CompletionCreateParams.CreateCompletionRequestStreaming; | ||
export interface CompletionCreateParams { | ||
/** | ||
* ID of the model to use. You can use the | ||
* [List models](/docs/api-reference/models/list) API to see all of your available | ||
* models, or see our [Model overview](/docs/models/overview) for descriptions of | ||
* them. | ||
*/ | ||
model: | ||
| (string & {}) | ||
| 'text-davinci-003' | ||
| 'text-davinci-002' | ||
| 'text-davinci-001' | ||
| 'code-davinci-002' | ||
| 'text-curie-001' | ||
| 'text-babbage-001' | ||
| 'text-ada-001'; | ||
/** | ||
* The prompt(s) to generate completions for, encoded as a string, array of | ||
* strings, array of tokens, or array of token arrays. | ||
* | ||
* Note that <|endoftext|> is the document separator that the model sees during | ||
* training, so if a prompt is not specified the model will generate as if from the | ||
* beginning of a new document. | ||
*/ | ||
prompt: string | Array<string> | Array<number> | Array<Array<number>> | null; | ||
/** | ||
* Generates `best_of` completions server-side and returns the "best" (the one with | ||
* the highest log probability per token). Results cannot be streamed. | ||
* | ||
* When used with `n`, `best_of` controls the number of candidate completions and | ||
* `n` specifies how many to return – `best_of` must be greater than `n`. | ||
* | ||
* **Note:** Because this parameter generates many completions, it can quickly | ||
* consume your token quota. Use carefully and ensure that you have reasonable | ||
* settings for `max_tokens` and `stop`. | ||
*/ | ||
best_of?: number | null; | ||
/** | ||
* Echo back the prompt in addition to the completion | ||
*/ | ||
echo?: boolean | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their | ||
* existing frequency in the text so far, decreasing the model's likelihood to | ||
* repeat the same line verbatim. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
frequency_penalty?: number | null; | ||
/** | ||
* Modify the likelihood of specified tokens appearing in the completion. | ||
* | ||
* Accepts a json object that maps tokens (specified by their token ID in the GPT | ||
* tokenizer) to an associated bias value from -100 to 100. You can use this | ||
* [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to | ||
* convert text to token IDs. Mathematically, the bias is added to the logits | ||
* generated by the model prior to sampling. The exact effect will vary per model, | ||
* but values between -1 and 1 should decrease or increase likelihood of selection; | ||
* values like -100 or 100 should result in a ban or exclusive selection of the | ||
* relevant token. | ||
* | ||
* As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token | ||
* from being generated. | ||
*/ | ||
logit_bias?: Record<string, number> | null; | ||
/** | ||
* Include the log probabilities on the `logprobs` most likely tokens, as well the | ||
* chosen tokens. For example, if `logprobs` is 5, the API will return a list of | ||
* the 5 most likely tokens. The API will always return the `logprob` of the | ||
* sampled token, so there may be up to `logprobs+1` elements in the response. | ||
* | ||
* The maximum value for `logprobs` is 5. | ||
*/ | ||
logprobs?: number | null; | ||
/** | ||
* The maximum number of [tokens](/tokenizer) to generate in the completion. | ||
* | ||
* The token count of your prompt plus `max_tokens` cannot exceed the model's | ||
* context length. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) | ||
* for counting tokens. | ||
*/ | ||
max_tokens?: number | null; | ||
/** | ||
* How many completions to generate for each prompt. | ||
* | ||
* **Note:** Because this parameter generates many completions, it can quickly | ||
* consume your token quota. Use carefully and ensure that you have reasonable | ||
* settings for `max_tokens` and `stop`. | ||
*/ | ||
n?: number | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on | ||
* whether they appear in the text so far, increasing the model's likelihood to | ||
* talk about new topics. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
presence_penalty?: number | null; | ||
/** | ||
* Up to 4 sequences where the API will stop generating further tokens. The | ||
* returned text will not contain the stop sequence. | ||
*/ | ||
stop?: string | null | Array<string>; | ||
/** | ||
* Whether to stream back partial progress. If set, tokens will be sent as | ||
* data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream?: boolean | null; | ||
/** | ||
* The suffix that comes after a completion of inserted text. | ||
*/ | ||
suffix?: string | null; | ||
/** | ||
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will | ||
* make the output more random, while lower values like 0.2 will make it more | ||
* focused and deterministic. | ||
* | ||
* We generally recommend altering this or `top_p` but not both. | ||
*/ | ||
temperature?: number | null; | ||
/** | ||
* An alternative to sampling with temperature, called nucleus sampling, where the | ||
* model considers the results of the tokens with top_p probability mass. So 0.1 | ||
* means only the tokens comprising the top 10% probability mass are considered. | ||
* | ||
* We generally recommend altering this or `temperature` but not both. | ||
*/ | ||
top_p?: number | null; | ||
/** | ||
* A unique identifier representing your end-user, which can help OpenAI to monitor | ||
* and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). | ||
*/ | ||
user?: string; | ||
} | ||
export declare namespace CompletionCreateParams { | ||
interface CreateCompletionRequestNonStreaming { | ||
/** | ||
* ID of the model to use. You can use the | ||
* [List models](/docs/api-reference/models/list) API to see all of your available | ||
* models, or see our [Model overview](/docs/models/overview) for descriptions of | ||
* them. | ||
*/ | ||
model: | ||
| (string & {}) | ||
| 'text-davinci-003' | ||
| 'text-davinci-002' | ||
| 'text-davinci-001' | ||
| 'code-davinci-002' | ||
| 'text-curie-001' | ||
| 'text-babbage-001' | ||
| 'text-ada-001'; | ||
/** | ||
* The prompt(s) to generate completions for, encoded as a string, array of | ||
* strings, array of tokens, or array of token arrays. | ||
* | ||
* Note that <|endoftext|> is the document separator that the model sees during | ||
* training, so if a prompt is not specified the model will generate as if from the | ||
* beginning of a new document. | ||
*/ | ||
prompt: string | Array<string> | Array<number> | Array<Array<number>> | null; | ||
/** | ||
* Generates `best_of` completions server-side and returns the "best" (the one with | ||
* the highest log probability per token). Results cannot be streamed. | ||
* | ||
* When used with `n`, `best_of` controls the number of candidate completions and | ||
* `n` specifies how many to return – `best_of` must be greater than `n`. | ||
* | ||
* **Note:** Because this parameter generates many completions, it can quickly | ||
* consume your token quota. Use carefully and ensure that you have reasonable | ||
* settings for `max_tokens` and `stop`. | ||
*/ | ||
best_of?: number | null; | ||
/** | ||
* Echo back the prompt in addition to the completion | ||
*/ | ||
echo?: boolean | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their | ||
* existing frequency in the text so far, decreasing the model's likelihood to | ||
* repeat the same line verbatim. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
frequency_penalty?: number | null; | ||
/** | ||
* Modify the likelihood of specified tokens appearing in the completion. | ||
* | ||
* Accepts a json object that maps tokens (specified by their token ID in the GPT | ||
* tokenizer) to an associated bias value from -100 to 100. You can use this | ||
* [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to | ||
* convert text to token IDs. Mathematically, the bias is added to the logits | ||
* generated by the model prior to sampling. The exact effect will vary per model, | ||
* but values between -1 and 1 should decrease or increase likelihood of selection; | ||
* values like -100 or 100 should result in a ban or exclusive selection of the | ||
* relevant token. | ||
* | ||
* As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token | ||
* from being generated. | ||
*/ | ||
logit_bias?: Record<string, number> | null; | ||
/** | ||
* Include the log probabilities on the `logprobs` most likely tokens, as well the | ||
* chosen tokens. For example, if `logprobs` is 5, the API will return a list of | ||
* the 5 most likely tokens. The API will always return the `logprob` of the | ||
* sampled token, so there may be up to `logprobs+1` elements in the response. | ||
* | ||
* The maximum value for `logprobs` is 5. | ||
*/ | ||
logprobs?: number | null; | ||
/** | ||
* The maximum number of [tokens](/tokenizer) to generate in the completion. | ||
* | ||
* The token count of your prompt plus `max_tokens` cannot exceed the model's | ||
* context length. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) | ||
* for counting tokens. | ||
*/ | ||
max_tokens?: number | null; | ||
/** | ||
* How many completions to generate for each prompt. | ||
* | ||
* **Note:** Because this parameter generates many completions, it can quickly | ||
* consume your token quota. Use carefully and ensure that you have reasonable | ||
* settings for `max_tokens` and `stop`. | ||
*/ | ||
n?: number | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on | ||
* whether they appear in the text so far, increasing the model's likelihood to | ||
* talk about new topics. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
presence_penalty?: number | null; | ||
/** | ||
* Up to 4 sequences where the API will stop generating further tokens. The | ||
* returned text will not contain the stop sequence. | ||
*/ | ||
stop?: string | null | Array<string>; | ||
/** | ||
* Whether to stream back partial progress. If set, tokens will be sent as | ||
* data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream?: false | null; | ||
/** | ||
* The suffix that comes after a completion of inserted text. | ||
*/ | ||
suffix?: string | null; | ||
/** | ||
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will | ||
* make the output more random, while lower values like 0.2 will make it more | ||
* focused and deterministic. | ||
* | ||
* We generally recommend altering this or `top_p` but not both. | ||
*/ | ||
temperature?: number | null; | ||
/** | ||
* An alternative to sampling with temperature, called nucleus sampling, where the | ||
* model considers the results of the tokens with top_p probability mass. So 0.1 | ||
* means only the tokens comprising the top 10% probability mass are considered. | ||
* | ||
* We generally recommend altering this or `temperature` but not both. | ||
*/ | ||
top_p?: number | null; | ||
/** | ||
* A unique identifier representing your end-user, which can help OpenAI to monitor | ||
* and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). | ||
*/ | ||
user?: string; | ||
} | ||
interface CreateCompletionRequestStreaming { | ||
/** | ||
* ID of the model to use. You can use the | ||
* [List models](/docs/api-reference/models/list) API to see all of your available | ||
* models, or see our [Model overview](/docs/models/overview) for descriptions of | ||
* them. | ||
*/ | ||
model: | ||
| (string & {}) | ||
| 'text-davinci-003' | ||
| 'text-davinci-002' | ||
| 'text-davinci-001' | ||
| 'code-davinci-002' | ||
| 'text-curie-001' | ||
| 'text-babbage-001' | ||
| 'text-ada-001'; | ||
/** | ||
* The prompt(s) to generate completions for, encoded as a string, array of | ||
* strings, array of tokens, or array of token arrays. | ||
* | ||
* Note that <|endoftext|> is the document separator that the model sees during | ||
* training, so if a prompt is not specified the model will generate as if from the | ||
* beginning of a new document. | ||
*/ | ||
prompt: string | Array<string> | Array<number> | Array<Array<number>> | null; | ||
/** | ||
* Whether to stream back partial progress. If set, tokens will be sent as | ||
* data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream: true; | ||
/** | ||
* Generates `best_of` completions server-side and returns the "best" (the one with | ||
* the highest log probability per token). Results cannot be streamed. | ||
* | ||
* When used with `n`, `best_of` controls the number of candidate completions and | ||
* `n` specifies how many to return – `best_of` must be greater than `n`. | ||
* | ||
* **Note:** Because this parameter generates many completions, it can quickly | ||
* consume your token quota. Use carefully and ensure that you have reasonable | ||
* settings for `max_tokens` and `stop`. | ||
*/ | ||
best_of?: number | null; | ||
/** | ||
* Echo back the prompt in addition to the completion | ||
*/ | ||
echo?: boolean | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their | ||
* existing frequency in the text so far, decreasing the model's likelihood to | ||
* repeat the same line verbatim. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
frequency_penalty?: number | null; | ||
/** | ||
* Modify the likelihood of specified tokens appearing in the completion. | ||
* | ||
* Accepts a json object that maps tokens (specified by their token ID in the GPT | ||
* tokenizer) to an associated bias value from -100 to 100. You can use this | ||
* [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to | ||
* convert text to token IDs. Mathematically, the bias is added to the logits | ||
* generated by the model prior to sampling. The exact effect will vary per model, | ||
* but values between -1 and 1 should decrease or increase likelihood of selection; | ||
* values like -100 or 100 should result in a ban or exclusive selection of the | ||
* relevant token. | ||
* | ||
* As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token | ||
* from being generated. | ||
*/ | ||
logit_bias?: Record<string, number> | null; | ||
/** | ||
* Include the log probabilities on the `logprobs` most likely tokens, as well the | ||
* chosen tokens. For example, if `logprobs` is 5, the API will return a list of | ||
* the 5 most likely tokens. The API will always return the `logprob` of the | ||
* sampled token, so there may be up to `logprobs+1` elements in the response. | ||
* | ||
* The maximum value for `logprobs` is 5. | ||
*/ | ||
logprobs?: number | null; | ||
/** | ||
* The maximum number of [tokens](/tokenizer) to generate in the completion. | ||
* | ||
* The token count of your prompt plus `max_tokens` cannot exceed the model's | ||
* context length. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) | ||
* for counting tokens. | ||
*/ | ||
max_tokens?: number | null; | ||
/** | ||
* How many completions to generate for each prompt. | ||
* | ||
* **Note:** Because this parameter generates many completions, it can quickly | ||
* consume your token quota. Use carefully and ensure that you have reasonable | ||
* settings for `max_tokens` and `stop`. | ||
*/ | ||
n?: number | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on | ||
* whether they appear in the text so far, increasing the model's likelihood to | ||
* talk about new topics. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
presence_penalty?: number | null; | ||
/** | ||
* Up to 4 sequences where the API will stop generating further tokens. The | ||
* returned text will not contain the stop sequence. | ||
*/ | ||
stop?: string | null | Array<string>; | ||
/** | ||
* The suffix that comes after a completion of inserted text. | ||
*/ | ||
suffix?: string | null; | ||
/** | ||
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will | ||
* make the output more random, while lower values like 0.2 will make it more | ||
* focused and deterministic. | ||
* | ||
* We generally recommend altering this or `top_p` but not both. | ||
*/ | ||
temperature?: number | null; | ||
/** | ||
* An alternative to sampling with temperature, called nucleus sampling, where the | ||
* model considers the results of the tokens with top_p probability mass. So 0.1 | ||
* means only the tokens comprising the top 10% probability mass are considered. | ||
* | ||
* We generally recommend altering this or `temperature` but not both. | ||
*/ | ||
top_p?: number | null; | ||
/** | ||
* A unique identifier representing your end-user, which can help OpenAI to monitor | ||
* and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). | ||
*/ | ||
user?: string; | ||
} | ||
type CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; | ||
type CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; | ||
} | ||
export interface CompletionCreateParamsNonStreaming extends CompletionCreateParams { | ||
/** | ||
* Whether to stream back partial progress. If set, tokens will be sent as | ||
* data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream?: false | null; | ||
} | ||
export interface CompletionCreateParamsStreaming extends CompletionCreateParams { | ||
/** | ||
* Whether to stream back partial progress. If set, tokens will be sent as | ||
* data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream: true; | ||
} | ||
export declare namespace Completions { | ||
@@ -336,3 +222,5 @@ export import Completion = API.Completion; | ||
export import CompletionCreateParams = API.CompletionCreateParams; | ||
export import CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; | ||
export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; | ||
} | ||
//# sourceMappingURL=completions.d.ts.map |
@@ -36,3 +36,3 @@ import * as Core from 'openai/core'; | ||
fineTuneId: string, | ||
query?: FineTuneListEventsParams.ListEventsRequestNonStreaming, | ||
query?: FineTuneListEventsParamsNonStreaming, | ||
options?: Core.RequestOptions, | ||
@@ -42,5 +42,10 @@ ): Promise<Core.APIResponse<FineTuneEventsListResponse>>; | ||
fineTuneId: string, | ||
query: FineTuneListEventsParams.ListEventsRequestStreaming, | ||
query: FineTuneListEventsParamsStreaming, | ||
options?: Core.RequestOptions, | ||
): Promise<Core.APIResponse<Stream<FineTuneEvent>>>; | ||
listEvents( | ||
fineTuneId: string, | ||
query?: FineTuneListEventsParams, | ||
options?: Core.RequestOptions, | ||
): Promise<Core.APIResponse<FineTuneEventsListResponse | Stream<FineTuneEvent>>>; | ||
} | ||
@@ -203,31 +208,42 @@ /** | ||
} | ||
export type FineTuneListEventsParams = | ||
| FineTuneListEventsParams.ListEventsRequestNonStreaming | ||
| FineTuneListEventsParams.ListEventsRequestStreaming; | ||
export interface FineTuneListEventsParams { | ||
/** | ||
* Whether to stream events for the fine-tune job. If set to true, events will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available. The stream will terminate with a `data: [DONE]` | ||
* message when the job is finished (succeeded, cancelled, or failed). | ||
* | ||
* If set to false, only events generated so far will be returned. | ||
*/ | ||
stream?: boolean; | ||
} | ||
export declare namespace FineTuneListEventsParams { | ||
interface ListEventsRequestNonStreaming { | ||
/** | ||
* Whether to stream events for the fine-tune job. If set to true, events will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available. The stream will terminate with a `data: [DONE]` | ||
* message when the job is finished (succeeded, cancelled, or failed). | ||
* | ||
* If set to false, only events generated so far will be returned. | ||
*/ | ||
stream?: false; | ||
} | ||
interface ListEventsRequestStreaming { | ||
/** | ||
* Whether to stream events for the fine-tune job. If set to true, events will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available. The stream will terminate with a `data: [DONE]` | ||
* message when the job is finished (succeeded, cancelled, or failed). | ||
* | ||
* If set to false, only events generated so far will be returned. | ||
*/ | ||
stream: true; | ||
} | ||
type FineTuneListEventsParamsNonStreaming = API.FineTuneListEventsParamsNonStreaming; | ||
type FineTuneListEventsParamsStreaming = API.FineTuneListEventsParamsStreaming; | ||
} | ||
export interface FineTuneListEventsParamsNonStreaming extends FineTuneListEventsParams { | ||
/** | ||
* Whether to stream events for the fine-tune job. If set to true, events will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available. The stream will terminate with a `data: [DONE]` | ||
* message when the job is finished (succeeded, cancelled, or failed). | ||
* | ||
* If set to false, only events generated so far will be returned. | ||
*/ | ||
stream?: false; | ||
} | ||
export interface FineTuneListEventsParamsStreaming extends FineTuneListEventsParams { | ||
/** | ||
* Whether to stream events for the fine-tune job. If set to true, events will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available. The stream will terminate with a `data: [DONE]` | ||
* message when the job is finished (succeeded, cancelled, or failed). | ||
* | ||
* If set to false, only events generated so far will be returned. | ||
*/ | ||
stream: true; | ||
} | ||
export declare namespace FineTunes { | ||
@@ -240,4 +256,6 @@ export import FineTune = API.FineTune; | ||
export import FineTuneListEventsParams = API.FineTuneListEventsParams; | ||
export import FineTuneListEventsParamsNonStreaming = API.FineTuneListEventsParamsNonStreaming; | ||
export import FineTuneListEventsParamsStreaming = API.FineTuneListEventsParamsStreaming; | ||
} | ||
export {}; | ||
//# sourceMappingURL=fine-tunes.d.ts.map |
export { Audio } from './audio/audio.js'; | ||
export { Chat } from './chat/chat.js'; | ||
export { Completion, CompletionChoice, CompletionCreateParams, Completions } from './completions.js'; | ||
export { | ||
Completion, | ||
CompletionChoice, | ||
CompletionCreateParams, | ||
CompletionCreateParamsNonStreaming, | ||
CompletionCreateParamsStreaming, | ||
Completions, | ||
} from './completions.js'; | ||
export { Edit, EditCreateParams, Edits } from './edits.js'; | ||
@@ -13,2 +20,4 @@ export { Embedding, EmbeddingCreateParams, Embeddings } from './embeddings.js'; | ||
FineTuneListEventsParams, | ||
FineTuneListEventsParamsNonStreaming, | ||
FineTuneListEventsParamsStreaming, | ||
FineTunesPage, | ||
@@ -15,0 +24,0 @@ FineTunes, |
@@ -638,2 +638,3 @@ import { VERSION } from './version'; | ||
| 'Unknown'; | ||
type Browser = 'ie' | 'edge' | 'chrome' | 'firefox' | 'safari'; | ||
type PlatformProperties = { | ||
@@ -644,3 +645,3 @@ 'X-Stainless-Lang': 'js'; | ||
'X-Stainless-Arch': Arch; | ||
'X-Stainless-Runtime': 'node' | 'deno' | 'edge' | 'unknown'; | ||
'X-Stainless-Runtime': 'node' | 'deno' | 'edge' | `browser:${Browser}` | 'unknown'; | ||
'X-Stainless-Runtime-Version': string; | ||
@@ -680,3 +681,16 @@ }; | ||
} | ||
// TODO add support for Cloudflare workers, browsers, etc. | ||
const browserInfo = getBrowserInfo(); | ||
if (browserInfo) { | ||
return { | ||
'X-Stainless-Lang': 'js', | ||
'X-Stainless-Package-Version': VERSION, | ||
'X-Stainless-OS': 'Unknown', | ||
'X-Stainless-Arch': 'unknown', | ||
'X-Stainless-Runtime': `browser:${browserInfo.browser}`, | ||
'X-Stainless-Runtime-Version': browserInfo.version, | ||
}; | ||
} | ||
// TODO add support for Cloudflare workers, etc. | ||
return { | ||
@@ -692,2 +706,40 @@ 'X-Stainless-Lang': 'js', | ||
type BrowserInfo = { | ||
browser: Browser; | ||
version: string; | ||
}; | ||
declare const navigator: { userAgent: string } | undefined; | ||
// Note: modified from https://github.com/JS-DevTools/host-environment/blob/b1ab79ecde37db5d6e163c050e54fe7d287d7c92/src/isomorphic.browser.ts | ||
function getBrowserInfo(): BrowserInfo | null { | ||
if (!navigator || typeof navigator === 'undefined') { | ||
return null; | ||
} | ||
// NOTE: The order matters here! | ||
const browserPatterns = [ | ||
{ key: 'edge' as const, pattern: /Edge(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ }, | ||
{ key: 'ie' as const, pattern: /MSIE(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ }, | ||
{ key: 'ie' as const, pattern: /Trident(?:.*rv\:(\d+)\.(\d+)(?:\.(\d+))?)?/ }, | ||
{ key: 'chrome' as const, pattern: /Chrome(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ }, | ||
{ key: 'firefox' as const, pattern: /Firefox(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ }, | ||
{ key: 'safari' as const, pattern: /(?:Version\W+(\d+)\.(\d+)(?:\.(\d+))?)?(?:\W+Mobile\S*)?\W+Safari/ }, | ||
]; | ||
// Find the FIRST matching browser | ||
for (const { key, pattern } of browserPatterns) { | ||
const match = pattern.exec(navigator.userAgent); | ||
if (match) { | ||
const major = match[1] || 0; | ||
const minor = match[2] || 0; | ||
const patch = match[3] || 0; | ||
return { browser: key, version: `${major}.${minor}.${patch}` }; | ||
} | ||
} | ||
return null; | ||
} | ||
const normalizeArch = (arch: string): Arch => { | ||
@@ -751,4 +803,4 @@ // Node docs: | ||
const validatePositiveInteger = (name: string, n: number) => { | ||
if (!Number.isInteger(n)) { | ||
const validatePositiveInteger = (name: string, n: unknown): number => { | ||
if (typeof n !== 'number' || !Number.isInteger(n)) { | ||
throw new Error(`${name} must be an integer`); | ||
@@ -775,10 +827,12 @@ } | ||
* | ||
* Will return an empty string if the environment variable doesn't exist or cannot be accessed. | ||
* Will return undefined if the environment variable doesn't exist or cannot be accessed. | ||
*/ | ||
export const readEnv = (env: string): string | undefined => { | ||
if (typeof process === 'undefined') { | ||
return undefined; | ||
if (typeof process !== 'undefined') { | ||
return process.env?.[env] ?? undefined; | ||
} | ||
return process.env[env] ?? undefined; | ||
if (typeof Deno !== 'undefined') { | ||
return Deno.env?.get?.(env); | ||
} | ||
return undefined; | ||
}; | ||
@@ -850,2 +904,13 @@ | ||
export const isRunningInBrowser = () => { | ||
return ( | ||
// @ts-ignore | ||
typeof window !== 'undefined' && | ||
// @ts-ignore | ||
typeof window.document !== 'undefined' && | ||
// @ts-ignore | ||
typeof navigator !== 'undefined' | ||
); | ||
}; | ||
export interface HeadersProtocol { | ||
@@ -852,0 +917,0 @@ get: (header: string) => string | null | undefined; |
@@ -70,2 +70,8 @@ // File generated from our OpenAPI spec by Stainless. | ||
/** | ||
* By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers. | ||
* Only set this option to `true` if you understand the risks and have appropriate mitigations in place. | ||
*/ | ||
dangerouslyAllowBrowser?: boolean; | ||
organization?: string | null; | ||
@@ -99,2 +105,8 @@ } | ||
if (!options.dangerouslyAllowBrowser && Core.isRunningInBrowser()) { | ||
throw new Error( | ||
"It looks like you're running in a browser-like environment.\n\nThis is disabled by default, as it risks exposing your secret API credentials to attackers. \nIf you understand the risks and have appropriate mitigations in place,\nyou can set the `dangerouslyAllowBrowser` option to `true`, e.g.,\n\nnew OpenAI({ apiKey, dangerouslyAllowBrowser: true });\n\nhttps://help.openai.com/en/articles/5112595-best-practices-for-api-key-safety\n", | ||
); | ||
} | ||
super({ | ||
@@ -186,2 +198,4 @@ baseURL: options.baseURL!, | ||
export import CompletionCreateParams = API.CompletionCreateParams; | ||
export import CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; | ||
export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; | ||
@@ -231,4 +245,6 @@ export import Chat = API.Chat; | ||
export import FineTuneListEventsParams = API.FineTuneListEventsParams; | ||
export import FineTuneListEventsParamsNonStreaming = API.FineTuneListEventsParamsNonStreaming; | ||
export import FineTuneListEventsParamsStreaming = API.FineTuneListEventsParamsStreaming; | ||
} | ||
export default OpenAI; |
@@ -6,3 +6,3 @@ // File generated from our OpenAPI spec by Stainless. | ||
import { Translations } from './translations'; | ||
import * as API from '.'; | ||
import * as API from './index'; | ||
@@ -9,0 +9,0 @@ export class Audio extends APIResource { |
@@ -5,3 +5,3 @@ // File generated from our OpenAPI spec by Stainless. | ||
import { APIResource } from '../../resource'; | ||
import * as API from '.'; | ||
import * as API from './index'; | ||
import { type Uploadable, multipartFormRequestOptions } from '../../core'; | ||
@@ -8,0 +8,0 @@ |
@@ -5,3 +5,3 @@ // File generated from our OpenAPI spec by Stainless. | ||
import { APIResource } from '../../resource'; | ||
import * as API from '.'; | ||
import * as API from './index'; | ||
import { type Uploadable, multipartFormRequestOptions } from '../../core'; | ||
@@ -8,0 +8,0 @@ |
@@ -5,3 +5,3 @@ // File generated from our OpenAPI spec by Stainless. | ||
import { Completions } from './completions'; | ||
import * as API from '.'; | ||
import * as API from './index'; | ||
@@ -18,2 +18,4 @@ export class Chat extends APIResource { | ||
export import CompletionCreateParams = API.CompletionCreateParams; | ||
export import CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; | ||
export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; | ||
} |
@@ -5,3 +5,3 @@ // File generated from our OpenAPI spec by Stainless. | ||
import { APIResource } from '../../resource'; | ||
import * as API from '.'; | ||
import * as API from './index'; | ||
import { Stream } from '../../streaming'; | ||
@@ -14,7 +14,7 @@ | ||
create( | ||
body: CompletionCreateParams.CreateChatCompletionRequestNonStreaming, | ||
body: CompletionCreateParamsNonStreaming, | ||
options?: Core.RequestOptions, | ||
): Promise<Core.APIResponse<ChatCompletion>>; | ||
create( | ||
body: CompletionCreateParams.CreateChatCompletionRequestStreaming, | ||
body: CompletionCreateParamsStreaming, | ||
options?: Core.RequestOptions, | ||
@@ -25,2 +25,6 @@ ): Promise<Core.APIResponse<Stream<ChatCompletionChunk>>>; | ||
options?: Core.RequestOptions, | ||
): Promise<Core.APIResponse<ChatCompletion | Stream<ChatCompletionChunk>>>; | ||
create( | ||
body: CompletionCreateParams, | ||
options?: Core.RequestOptions, | ||
): Promise<Core.APIResponse<ChatCompletion | Stream<ChatCompletionChunk>>> { | ||
@@ -216,332 +220,187 @@ return this.post('/chat/completions', { body, ...options, stream: body.stream ?? false }); | ||
export type CompletionCreateParams = | ||
| CompletionCreateParams.CreateChatCompletionRequestNonStreaming | ||
| CompletionCreateParams.CreateChatCompletionRequestStreaming; | ||
export interface CompletionCreateParams { | ||
/** | ||
* A list of messages comprising the conversation so far. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). | ||
*/ | ||
messages: Array<CreateChatCompletionRequestMessage>; | ||
export namespace CompletionCreateParams { | ||
export interface CreateChatCompletionRequestNonStreaming { | ||
/** | ||
* A list of messages comprising the conversation so far. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). | ||
*/ | ||
messages: Array<CreateChatCompletionRequestMessage>; | ||
/** | ||
* ID of the model to use. See the | ||
* [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table | ||
* for details on which models work with the Chat API. | ||
*/ | ||
model: | ||
| (string & {}) | ||
| 'gpt-4' | ||
| 'gpt-4-0314' | ||
| 'gpt-4-0613' | ||
| 'gpt-4-32k' | ||
| 'gpt-4-32k-0314' | ||
| 'gpt-4-32k-0613' | ||
| 'gpt-3.5-turbo' | ||
| 'gpt-3.5-turbo-16k' | ||
| 'gpt-3.5-turbo-0301' | ||
| 'gpt-3.5-turbo-0613' | ||
| 'gpt-3.5-turbo-16k-0613'; | ||
/** | ||
* ID of the model to use. See the | ||
* [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table | ||
* for details on which models work with the Chat API. | ||
*/ | ||
model: | ||
| (string & {}) | ||
| 'gpt-4' | ||
| 'gpt-4-0314' | ||
| 'gpt-4-0613' | ||
| 'gpt-4-32k' | ||
| 'gpt-4-32k-0314' | ||
| 'gpt-4-32k-0613' | ||
| 'gpt-3.5-turbo' | ||
| 'gpt-3.5-turbo-16k' | ||
| 'gpt-3.5-turbo-0301' | ||
| 'gpt-3.5-turbo-0613' | ||
| 'gpt-3.5-turbo-16k-0613'; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their | ||
* existing frequency in the text so far, decreasing the model's likelihood to | ||
* repeat the same line verbatim. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
frequency_penalty?: number | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their | ||
* existing frequency in the text so far, decreasing the model's likelihood to | ||
* repeat the same line verbatim. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
frequency_penalty?: number | null; | ||
/** | ||
* Controls how the model responds to function calls. "none" means the model does | ||
* not call a function, and responds to the end-user. "auto" means the model can | ||
* pick between an end-user or calling a function. Specifying a particular function | ||
* via `{"name":\ "my_function"}` forces the model to call that function. "none" is | ||
* the default when no functions are present. "auto" is the default if functions | ||
* are present. | ||
*/ | ||
function_call?: 'none' | 'auto' | CompletionCreateParams.FunctionCallOption; | ||
/** | ||
* Controls how the model responds to function calls. "none" means the model does | ||
* not call a function, and responds to the end-user. "auto" means the model can | ||
* pick between an end-user or calling a function. Specifying a particular function | ||
* via `{"name":\ "my_function"}` forces the model to call that function. "none" is | ||
* the default when no functions are present. "auto" is the default if functions | ||
* are present. | ||
*/ | ||
function_call?: | ||
| 'none' | ||
| 'auto' | ||
| CompletionCreateParams.CreateChatCompletionRequestNonStreaming.FunctionCallOption; | ||
/** | ||
* A list of functions the model may generate JSON inputs for. | ||
*/ | ||
functions?: Array<CompletionCreateParams.Function>; | ||
/** | ||
* A list of functions the model may generate JSON inputs for. | ||
*/ | ||
functions?: Array<CompletionCreateParams.CreateChatCompletionRequestNonStreaming.Function>; | ||
/** | ||
* Modify the likelihood of specified tokens appearing in the completion. | ||
* | ||
* Accepts a json object that maps tokens (specified by their token ID in the | ||
* tokenizer) to an associated bias value from -100 to 100. Mathematically, the | ||
* bias is added to the logits generated by the model prior to sampling. The exact | ||
* effect will vary per model, but values between -1 and 1 should decrease or | ||
* increase likelihood of selection; values like -100 or 100 should result in a ban | ||
* or exclusive selection of the relevant token. | ||
*/ | ||
logit_bias?: Record<string, number> | null; | ||
/** | ||
* Modify the likelihood of specified tokens appearing in the completion. | ||
* | ||
* Accepts a json object that maps tokens (specified by their token ID in the | ||
* tokenizer) to an associated bias value from -100 to 100. Mathematically, the | ||
* bias is added to the logits generated by the model prior to sampling. The exact | ||
* effect will vary per model, but values between -1 and 1 should decrease or | ||
* increase likelihood of selection; values like -100 or 100 should result in a ban | ||
* or exclusive selection of the relevant token. | ||
*/ | ||
logit_bias?: Record<string, number> | null; | ||
/** | ||
* The maximum number of [tokens](/tokenizer) to generate in the chat completion. | ||
* | ||
* The total length of input tokens and generated tokens is limited by the model's | ||
* context length. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) | ||
* for counting tokens. | ||
*/ | ||
max_tokens?: number; | ||
/** | ||
* The maximum number of [tokens](/tokenizer) to generate in the chat completion. | ||
* | ||
* The total length of input tokens and generated tokens is limited by the model's | ||
* context length. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) | ||
* for counting tokens. | ||
*/ | ||
max_tokens?: number; | ||
/** | ||
* How many chat completion choices to generate for each input message. | ||
*/ | ||
n?: number | null; | ||
/** | ||
* How many chat completion choices to generate for each input message. | ||
*/ | ||
n?: number | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on | ||
* whether they appear in the text so far, increasing the model's likelihood to | ||
* talk about new topics. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
presence_penalty?: number | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on | ||
* whether they appear in the text so far, increasing the model's likelihood to | ||
* talk about new topics. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
presence_penalty?: number | null; | ||
/** | ||
* Up to 4 sequences where the API will stop generating further tokens. | ||
*/ | ||
stop?: string | null | Array<string>; | ||
/** | ||
* Up to 4 sequences where the API will stop generating further tokens. | ||
*/ | ||
stop?: string | null | Array<string>; | ||
/** | ||
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream?: boolean | null; | ||
/** | ||
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream?: false | null; | ||
/** | ||
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will | ||
* make the output more random, while lower values like 0.2 will make it more | ||
* focused and deterministic. | ||
* | ||
* We generally recommend altering this or `top_p` but not both. | ||
*/ | ||
temperature?: number | null; | ||
/** | ||
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will | ||
* make the output more random, while lower values like 0.2 will make it more | ||
* focused and deterministic. | ||
* | ||
* We generally recommend altering this or `top_p` but not both. | ||
*/ | ||
temperature?: number | null; | ||
/** | ||
* An alternative to sampling with temperature, called nucleus sampling, where the | ||
* model considers the results of the tokens with top_p probability mass. So 0.1 | ||
* means only the tokens comprising the top 10% probability mass are considered. | ||
* | ||
* We generally recommend altering this or `temperature` but not both. | ||
*/ | ||
top_p?: number | null; | ||
/** | ||
* An alternative to sampling with temperature, called nucleus sampling, where the | ||
* model considers the results of the tokens with top_p probability mass. So 0.1 | ||
* means only the tokens comprising the top 10% probability mass are considered. | ||
* | ||
* We generally recommend altering this or `temperature` but not both. | ||
*/ | ||
top_p?: number | null; | ||
/** | ||
* A unique identifier representing your end-user, which can help OpenAI to monitor | ||
* and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). | ||
*/ | ||
user?: string; | ||
} | ||
export namespace CompletionCreateParams { | ||
export interface FunctionCallOption { | ||
/** | ||
* A unique identifier representing your end-user, which can help OpenAI to monitor | ||
* and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). | ||
* The name of the function to call. | ||
*/ | ||
user?: string; | ||
name: string; | ||
} | ||
export namespace CreateChatCompletionRequestNonStreaming { | ||
export interface FunctionCallOption { | ||
/** | ||
* The name of the function to call. | ||
*/ | ||
name: string; | ||
} | ||
export interface Function { | ||
/** | ||
* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain | ||
* underscores and dashes, with a maximum length of 64. | ||
*/ | ||
name: string; | ||
/** | ||
* The parameters the functions accepts, described as a JSON Schema object. See the | ||
* [guide](/docs/guides/gpt/function-calling) for examples, and the | ||
* [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for | ||
* documentation about the format. | ||
* | ||
* To describe a function that accepts no parameters, provide the value | ||
* `{"type": "object", "properties": {}}`. | ||
*/ | ||
parameters: Record<string, unknown>; | ||
/** | ||
* A description of what the function does, used by the model to choose when and | ||
* how to call the function. | ||
*/ | ||
description?: string; | ||
} | ||
} | ||
export interface CreateChatCompletionRequestStreaming { | ||
export interface Function { | ||
/** | ||
* A list of messages comprising the conversation so far. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). | ||
* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain | ||
* underscores and dashes, with a maximum length of 64. | ||
*/ | ||
messages: Array<CreateChatCompletionRequestMessage>; | ||
name: string; | ||
/** | ||
* ID of the model to use. See the | ||
* [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table | ||
* for details on which models work with the Chat API. | ||
*/ | ||
model: | ||
| (string & {}) | ||
| 'gpt-4' | ||
| 'gpt-4-0314' | ||
| 'gpt-4-0613' | ||
| 'gpt-4-32k' | ||
| 'gpt-4-32k-0314' | ||
| 'gpt-4-32k-0613' | ||
| 'gpt-3.5-turbo' | ||
| 'gpt-3.5-turbo-16k' | ||
| 'gpt-3.5-turbo-0301' | ||
| 'gpt-3.5-turbo-0613' | ||
| 'gpt-3.5-turbo-16k-0613'; | ||
/** | ||
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream: true; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their | ||
* existing frequency in the text so far, decreasing the model's likelihood to | ||
* repeat the same line verbatim. | ||
* The parameters the functions accepts, described as a JSON Schema object. See the | ||
* [guide](/docs/guides/gpt/function-calling) for examples, and the | ||
* [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for | ||
* documentation about the format. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
* To describe a function that accepts no parameters, provide the value | ||
* `{"type": "object", "properties": {}}`. | ||
*/ | ||
frequency_penalty?: number | null; | ||
parameters: Record<string, unknown>; | ||
/** | ||
* Controls how the model responds to function calls. "none" means the model does | ||
* not call a function, and responds to the end-user. "auto" means the model can | ||
* pick between an end-user or calling a function. Specifying a particular function | ||
* via `{"name":\ "my_function"}` forces the model to call that function. "none" is | ||
* the default when no functions are present. "auto" is the default if functions | ||
* are present. | ||
* A description of what the function does, used by the model to choose when and | ||
* how to call the function. | ||
*/ | ||
function_call?: | ||
| 'none' | ||
| 'auto' | ||
| CompletionCreateParams.CreateChatCompletionRequestStreaming.FunctionCallOption; | ||
/** | ||
* A list of functions the model may generate JSON inputs for. | ||
*/ | ||
functions?: Array<CompletionCreateParams.CreateChatCompletionRequestStreaming.Function>; | ||
/** | ||
* Modify the likelihood of specified tokens appearing in the completion. | ||
* | ||
* Accepts a json object that maps tokens (specified by their token ID in the | ||
* tokenizer) to an associated bias value from -100 to 100. Mathematically, the | ||
* bias is added to the logits generated by the model prior to sampling. The exact | ||
* effect will vary per model, but values between -1 and 1 should decrease or | ||
* increase likelihood of selection; values like -100 or 100 should result in a ban | ||
* or exclusive selection of the relevant token. | ||
*/ | ||
logit_bias?: Record<string, number> | null; | ||
/** | ||
* The maximum number of [tokens](/tokenizer) to generate in the chat completion. | ||
* | ||
* The total length of input tokens and generated tokens is limited by the model's | ||
* context length. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) | ||
* for counting tokens. | ||
*/ | ||
max_tokens?: number; | ||
/** | ||
* How many chat completion choices to generate for each input message. | ||
*/ | ||
n?: number | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on | ||
* whether they appear in the text so far, increasing the model's likelihood to | ||
* talk about new topics. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
presence_penalty?: number | null; | ||
/** | ||
* Up to 4 sequences where the API will stop generating further tokens. | ||
*/ | ||
stop?: string | null | Array<string>; | ||
/** | ||
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will | ||
* make the output more random, while lower values like 0.2 will make it more | ||
* focused and deterministic. | ||
* | ||
* We generally recommend altering this or `top_p` but not both. | ||
*/ | ||
temperature?: number | null; | ||
/** | ||
* An alternative to sampling with temperature, called nucleus sampling, where the | ||
* model considers the results of the tokens with top_p probability mass. So 0.1 | ||
* means only the tokens comprising the top 10% probability mass are considered. | ||
* | ||
* We generally recommend altering this or `temperature` but not both. | ||
*/ | ||
top_p?: number | null; | ||
/** | ||
* A unique identifier representing your end-user, which can help OpenAI to monitor | ||
* and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). | ||
*/ | ||
user?: string; | ||
description?: string; | ||
} | ||
export namespace CreateChatCompletionRequestStreaming { | ||
export interface FunctionCallOption { | ||
/** | ||
* The name of the function to call. | ||
*/ | ||
name: string; | ||
} | ||
export type CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; | ||
export type CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; | ||
} | ||
export interface Function { | ||
/** | ||
* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain | ||
* underscores and dashes, with a maximum length of 64. | ||
*/ | ||
name: string; | ||
export interface CompletionCreateParamsNonStreaming extends CompletionCreateParams { | ||
/** | ||
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream?: false | null; | ||
} | ||
/** | ||
* The parameters the functions accepts, described as a JSON Schema object. See the | ||
* [guide](/docs/guides/gpt/function-calling) for examples, and the | ||
* [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for | ||
* documentation about the format. | ||
* | ||
* To describe a function that accepts no parameters, provide the value | ||
* `{"type": "object", "properties": {}}`. | ||
*/ | ||
parameters: Record<string, unknown>; | ||
/** | ||
* A description of what the function does, used by the model to choose when and | ||
* how to call the function. | ||
*/ | ||
description?: string; | ||
} | ||
} | ||
export interface CompletionCreateParamsStreaming extends CompletionCreateParams { | ||
/** | ||
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream: true; | ||
} | ||
@@ -554,2 +413,4 @@ | ||
export import CompletionCreateParams = API.CompletionCreateParams; | ||
export import CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; | ||
export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; | ||
} |
@@ -9,3 +9,5 @@ // File generated from our OpenAPI spec by Stainless. | ||
CompletionCreateParams, | ||
CompletionCreateParamsNonStreaming, | ||
CompletionCreateParamsStreaming, | ||
Completions, | ||
} from './completions'; |
@@ -5,3 +5,3 @@ // File generated from our OpenAPI spec by Stainless. | ||
import { APIResource } from '../resource'; | ||
import * as API from '.'; | ||
import * as API from './index'; | ||
import { Stream } from '../streaming'; | ||
@@ -14,7 +14,7 @@ | ||
create( | ||
body: CompletionCreateParams.CreateCompletionRequestNonStreaming, | ||
body: CompletionCreateParamsNonStreaming, | ||
options?: Core.RequestOptions, | ||
): Promise<Core.APIResponse<Completion>>; | ||
create( | ||
body: CompletionCreateParams.CreateCompletionRequestStreaming, | ||
body: CompletionCreateParamsStreaming, | ||
options?: Core.RequestOptions, | ||
@@ -25,2 +25,6 @@ ): Promise<Core.APIResponse<Stream<Completion>>>; | ||
options?: Core.RequestOptions, | ||
): Promise<Core.APIResponse<Completion | Stream<Completion>>>; | ||
create( | ||
body: CompletionCreateParams, | ||
options?: Core.RequestOptions, | ||
): Promise<Core.APIResponse<Completion | Stream<Completion>>> { | ||
@@ -77,316 +81,184 @@ return this.post('/completions', { body, ...options, stream: body.stream ?? false }); | ||
export type CompletionCreateParams = | ||
| CompletionCreateParams.CreateCompletionRequestNonStreaming | ||
| CompletionCreateParams.CreateCompletionRequestStreaming; | ||
export interface CompletionCreateParams { | ||
/** | ||
* ID of the model to use. You can use the | ||
* [List models](/docs/api-reference/models/list) API to see all of your available | ||
* models, or see our [Model overview](/docs/models/overview) for descriptions of | ||
* them. | ||
*/ | ||
model: | ||
| (string & {}) | ||
| 'text-davinci-003' | ||
| 'text-davinci-002' | ||
| 'text-davinci-001' | ||
| 'code-davinci-002' | ||
| 'text-curie-001' | ||
| 'text-babbage-001' | ||
| 'text-ada-001'; | ||
export namespace CompletionCreateParams { | ||
export interface CreateCompletionRequestNonStreaming { | ||
/** | ||
* ID of the model to use. You can use the | ||
* [List models](/docs/api-reference/models/list) API to see all of your available | ||
* models, or see our [Model overview](/docs/models/overview) for descriptions of | ||
* them. | ||
*/ | ||
model: | ||
| (string & {}) | ||
| 'text-davinci-003' | ||
| 'text-davinci-002' | ||
| 'text-davinci-001' | ||
| 'code-davinci-002' | ||
| 'text-curie-001' | ||
| 'text-babbage-001' | ||
| 'text-ada-001'; | ||
/** | ||
* The prompt(s) to generate completions for, encoded as a string, array of | ||
* strings, array of tokens, or array of token arrays. | ||
* | ||
* Note that <|endoftext|> is the document separator that the model sees during | ||
* training, so if a prompt is not specified the model will generate as if from the | ||
* beginning of a new document. | ||
*/ | ||
prompt: string | Array<string> | Array<number> | Array<Array<number>> | null; | ||
/** | ||
* The prompt(s) to generate completions for, encoded as a string, array of | ||
* strings, array of tokens, or array of token arrays. | ||
* | ||
* Note that <|endoftext|> is the document separator that the model sees during | ||
* training, so if a prompt is not specified the model will generate as if from the | ||
* beginning of a new document. | ||
*/ | ||
prompt: string | Array<string> | Array<number> | Array<Array<number>> | null; | ||
/** | ||
* Generates `best_of` completions server-side and returns the "best" (the one with | ||
* the highest log probability per token). Results cannot be streamed. | ||
* | ||
* When used with `n`, `best_of` controls the number of candidate completions and | ||
* `n` specifies how many to return – `best_of` must be greater than `n`. | ||
* | ||
* **Note:** Because this parameter generates many completions, it can quickly | ||
* consume your token quota. Use carefully and ensure that you have reasonable | ||
* settings for `max_tokens` and `stop`. | ||
*/ | ||
best_of?: number | null; | ||
/** | ||
* Generates `best_of` completions server-side and returns the "best" (the one with | ||
* the highest log probability per token). Results cannot be streamed. | ||
* | ||
* When used with `n`, `best_of` controls the number of candidate completions and | ||
* `n` specifies how many to return – `best_of` must be greater than `n`. | ||
* | ||
* **Note:** Because this parameter generates many completions, it can quickly | ||
* consume your token quota. Use carefully and ensure that you have reasonable | ||
* settings for `max_tokens` and `stop`. | ||
*/ | ||
best_of?: number | null; | ||
/** | ||
* Echo back the prompt in addition to the completion | ||
*/ | ||
echo?: boolean | null; | ||
/** | ||
* Echo back the prompt in addition to the completion | ||
*/ | ||
echo?: boolean | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their | ||
* existing frequency in the text so far, decreasing the model's likelihood to | ||
* repeat the same line verbatim. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
frequency_penalty?: number | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their | ||
* existing frequency in the text so far, decreasing the model's likelihood to | ||
* repeat the same line verbatim. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
frequency_penalty?: number | null; | ||
/** | ||
* Modify the likelihood of specified tokens appearing in the completion. | ||
* | ||
* Accepts a json object that maps tokens (specified by their token ID in the GPT | ||
* tokenizer) to an associated bias value from -100 to 100. You can use this | ||
* [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to | ||
* convert text to token IDs. Mathematically, the bias is added to the logits | ||
* generated by the model prior to sampling. The exact effect will vary per model, | ||
* but values between -1 and 1 should decrease or increase likelihood of selection; | ||
* values like -100 or 100 should result in a ban or exclusive selection of the | ||
* relevant token. | ||
* | ||
* As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token | ||
* from being generated. | ||
*/ | ||
logit_bias?: Record<string, number> | null; | ||
/** | ||
* Modify the likelihood of specified tokens appearing in the completion. | ||
* | ||
* Accepts a json object that maps tokens (specified by their token ID in the GPT | ||
* tokenizer) to an associated bias value from -100 to 100. You can use this | ||
* [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to | ||
* convert text to token IDs. Mathematically, the bias is added to the logits | ||
* generated by the model prior to sampling. The exact effect will vary per model, | ||
* but values between -1 and 1 should decrease or increase likelihood of selection; | ||
* values like -100 or 100 should result in a ban or exclusive selection of the | ||
* relevant token. | ||
* | ||
* As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token | ||
* from being generated. | ||
*/ | ||
logit_bias?: Record<string, number> | null; | ||
/** | ||
* Include the log probabilities on the `logprobs` most likely tokens, as well the | ||
* chosen tokens. For example, if `logprobs` is 5, the API will return a list of | ||
* the 5 most likely tokens. The API will always return the `logprob` of the | ||
* sampled token, so there may be up to `logprobs+1` elements in the response. | ||
* | ||
* The maximum value for `logprobs` is 5. | ||
*/ | ||
logprobs?: number | null; | ||
/** | ||
* Include the log probabilities on the `logprobs` most likely tokens, as well the | ||
* chosen tokens. For example, if `logprobs` is 5, the API will return a list of | ||
* the 5 most likely tokens. The API will always return the `logprob` of the | ||
* sampled token, so there may be up to `logprobs+1` elements in the response. | ||
* | ||
* The maximum value for `logprobs` is 5. | ||
*/ | ||
logprobs?: number | null; | ||
/** | ||
* The maximum number of [tokens](/tokenizer) to generate in the completion. | ||
* | ||
* The token count of your prompt plus `max_tokens` cannot exceed the model's | ||
* context length. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) | ||
* for counting tokens. | ||
*/ | ||
max_tokens?: number | null; | ||
/** | ||
* The maximum number of [tokens](/tokenizer) to generate in the completion. | ||
* | ||
* The token count of your prompt plus `max_tokens` cannot exceed the model's | ||
* context length. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) | ||
* for counting tokens. | ||
*/ | ||
max_tokens?: number | null; | ||
/** | ||
* How many completions to generate for each prompt. | ||
* | ||
* **Note:** Because this parameter generates many completions, it can quickly | ||
* consume your token quota. Use carefully and ensure that you have reasonable | ||
* settings for `max_tokens` and `stop`. | ||
*/ | ||
n?: number | null; | ||
/** | ||
* How many completions to generate for each prompt. | ||
* | ||
* **Note:** Because this parameter generates many completions, it can quickly | ||
* consume your token quota. Use carefully and ensure that you have reasonable | ||
* settings for `max_tokens` and `stop`. | ||
*/ | ||
n?: number | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on | ||
* whether they appear in the text so far, increasing the model's likelihood to | ||
* talk about new topics. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
presence_penalty?: number | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on | ||
* whether they appear in the text so far, increasing the model's likelihood to | ||
* talk about new topics. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
presence_penalty?: number | null; | ||
/** | ||
* Up to 4 sequences where the API will stop generating further tokens. The | ||
* returned text will not contain the stop sequence. | ||
*/ | ||
stop?: string | null | Array<string>; | ||
/** | ||
* Up to 4 sequences where the API will stop generating further tokens. The | ||
* returned text will not contain the stop sequence. | ||
*/ | ||
stop?: string | null | Array<string>; | ||
/** | ||
* Whether to stream back partial progress. If set, tokens will be sent as | ||
* data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream?: boolean | null; | ||
/** | ||
* Whether to stream back partial progress. If set, tokens will be sent as | ||
* data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream?: false | null; | ||
/** | ||
* The suffix that comes after a completion of inserted text. | ||
*/ | ||
suffix?: string | null; | ||
/** | ||
* The suffix that comes after a completion of inserted text. | ||
*/ | ||
suffix?: string | null; | ||
/** | ||
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will | ||
* make the output more random, while lower values like 0.2 will make it more | ||
* focused and deterministic. | ||
* | ||
* We generally recommend altering this or `top_p` but not both. | ||
*/ | ||
temperature?: number | null; | ||
/** | ||
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will | ||
* make the output more random, while lower values like 0.2 will make it more | ||
* focused and deterministic. | ||
* | ||
* We generally recommend altering this or `top_p` but not both. | ||
*/ | ||
temperature?: number | null; | ||
/** | ||
* An alternative to sampling with temperature, called nucleus sampling, where the | ||
* model considers the results of the tokens with top_p probability mass. So 0.1 | ||
* means only the tokens comprising the top 10% probability mass are considered. | ||
* | ||
* We generally recommend altering this or `temperature` but not both. | ||
*/ | ||
top_p?: number | null; | ||
/** | ||
* An alternative to sampling with temperature, called nucleus sampling, where the | ||
* model considers the results of the tokens with top_p probability mass. So 0.1 | ||
* means only the tokens comprising the top 10% probability mass are considered. | ||
* | ||
* We generally recommend altering this or `temperature` but not both. | ||
*/ | ||
top_p?: number | null; | ||
/** | ||
* A unique identifier representing your end-user, which can help OpenAI to monitor | ||
* and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). | ||
*/ | ||
user?: string; | ||
} | ||
/** | ||
* A unique identifier representing your end-user, which can help OpenAI to monitor | ||
* and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). | ||
*/ | ||
user?: string; | ||
} | ||
export namespace CompletionCreateParams { | ||
export type CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; | ||
export type CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; | ||
} | ||
export interface CreateCompletionRequestStreaming { | ||
/** | ||
* ID of the model to use. You can use the | ||
* [List models](/docs/api-reference/models/list) API to see all of your available | ||
* models, or see our [Model overview](/docs/models/overview) for descriptions of | ||
* them. | ||
*/ | ||
model: | ||
| (string & {}) | ||
| 'text-davinci-003' | ||
| 'text-davinci-002' | ||
| 'text-davinci-001' | ||
| 'code-davinci-002' | ||
| 'text-curie-001' | ||
| 'text-babbage-001' | ||
| 'text-ada-001'; | ||
export interface CompletionCreateParamsNonStreaming extends CompletionCreateParams { | ||
/** | ||
* Whether to stream back partial progress. If set, tokens will be sent as | ||
* data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream?: false | null; | ||
} | ||
/** | ||
* The prompt(s) to generate completions for, encoded as a string, array of | ||
* strings, array of tokens, or array of token arrays. | ||
* | ||
* Note that <|endoftext|> is the document separator that the model sees during | ||
* training, so if a prompt is not specified the model will generate as if from the | ||
* beginning of a new document. | ||
*/ | ||
prompt: string | Array<string> | Array<number> | Array<Array<number>> | null; | ||
/** | ||
* Whether to stream back partial progress. If set, tokens will be sent as | ||
* data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream: true; | ||
/** | ||
* Generates `best_of` completions server-side and returns the "best" (the one with | ||
* the highest log probability per token). Results cannot be streamed. | ||
* | ||
* When used with `n`, `best_of` controls the number of candidate completions and | ||
* `n` specifies how many to return – `best_of` must be greater than `n`. | ||
* | ||
* **Note:** Because this parameter generates many completions, it can quickly | ||
* consume your token quota. Use carefully and ensure that you have reasonable | ||
* settings for `max_tokens` and `stop`. | ||
*/ | ||
best_of?: number | null; | ||
/** | ||
* Echo back the prompt in addition to the completion | ||
*/ | ||
echo?: boolean | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their | ||
* existing frequency in the text so far, decreasing the model's likelihood to | ||
* repeat the same line verbatim. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
frequency_penalty?: number | null; | ||
/** | ||
* Modify the likelihood of specified tokens appearing in the completion. | ||
* | ||
* Accepts a json object that maps tokens (specified by their token ID in the GPT | ||
* tokenizer) to an associated bias value from -100 to 100. You can use this | ||
* [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to | ||
* convert text to token IDs. Mathematically, the bias is added to the logits | ||
* generated by the model prior to sampling. The exact effect will vary per model, | ||
* but values between -1 and 1 should decrease or increase likelihood of selection; | ||
* values like -100 or 100 should result in a ban or exclusive selection of the | ||
* relevant token. | ||
* | ||
* As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token | ||
* from being generated. | ||
*/ | ||
logit_bias?: Record<string, number> | null; | ||
/** | ||
* Include the log probabilities on the `logprobs` most likely tokens, as well the | ||
* chosen tokens. For example, if `logprobs` is 5, the API will return a list of | ||
* the 5 most likely tokens. The API will always return the `logprob` of the | ||
* sampled token, so there may be up to `logprobs+1` elements in the response. | ||
* | ||
* The maximum value for `logprobs` is 5. | ||
*/ | ||
logprobs?: number | null; | ||
/** | ||
* The maximum number of [tokens](/tokenizer) to generate in the completion. | ||
* | ||
* The token count of your prompt plus `max_tokens` cannot exceed the model's | ||
* context length. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) | ||
* for counting tokens. | ||
*/ | ||
max_tokens?: number | null; | ||
/** | ||
* How many completions to generate for each prompt. | ||
* | ||
* **Note:** Because this parameter generates many completions, it can quickly | ||
* consume your token quota. Use carefully and ensure that you have reasonable | ||
* settings for `max_tokens` and `stop`. | ||
*/ | ||
n?: number | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on | ||
* whether they appear in the text so far, increasing the model's likelihood to | ||
* talk about new topics. | ||
* | ||
* [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
*/ | ||
presence_penalty?: number | null; | ||
/** | ||
* Up to 4 sequences where the API will stop generating further tokens. The | ||
* returned text will not contain the stop sequence. | ||
*/ | ||
stop?: string | null | Array<string>; | ||
/** | ||
* The suffix that comes after a completion of inserted text. | ||
*/ | ||
suffix?: string | null; | ||
/** | ||
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will | ||
* make the output more random, while lower values like 0.2 will make it more | ||
* focused and deterministic. | ||
* | ||
* We generally recommend altering this or `top_p` but not both. | ||
*/ | ||
temperature?: number | null; | ||
/** | ||
* An alternative to sampling with temperature, called nucleus sampling, where the | ||
* model considers the results of the tokens with top_p probability mass. So 0.1 | ||
* means only the tokens comprising the top 10% probability mass are considered. | ||
* | ||
* We generally recommend altering this or `temperature` but not both. | ||
*/ | ||
top_p?: number | null; | ||
/** | ||
* A unique identifier representing your end-user, which can help OpenAI to monitor | ||
* and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). | ||
*/ | ||
user?: string; | ||
} | ||
export interface CompletionCreateParamsStreaming extends CompletionCreateParams { | ||
/** | ||
* Whether to stream back partial progress. If set, tokens will be sent as | ||
* data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available, with the stream terminated by a `data: [DONE]` | ||
* message. | ||
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). | ||
*/ | ||
stream: true; | ||
} | ||
@@ -398,2 +270,4 @@ | ||
export import CompletionCreateParams = API.CompletionCreateParams; | ||
export import CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; | ||
export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; | ||
} |
@@ -5,3 +5,3 @@ // File generated from our OpenAPI spec by Stainless. | ||
import { APIResource } from '../resource'; | ||
import * as API from '.'; | ||
import * as API from './index'; | ||
@@ -8,0 +8,0 @@ export class Edits extends APIResource { |
@@ -5,3 +5,3 @@ // File generated from our OpenAPI spec by Stainless. | ||
import { APIResource } from '../resource'; | ||
import * as API from '.'; | ||
import * as API from './index'; | ||
@@ -8,0 +8,0 @@ export class Embeddings extends APIResource { |
@@ -5,3 +5,3 @@ // File generated from our OpenAPI spec by Stainless. | ||
import { APIResource } from '../resource'; | ||
import * as API from '.'; | ||
import * as API from './index'; | ||
import { type Uploadable, multipartFormRequestOptions } from '../core'; | ||
@@ -8,0 +8,0 @@ import { Page } from '../pagination'; |
@@ -6,3 +6,3 @@ // File generated from our OpenAPI spec by Stainless. | ||
import * as Files from './files'; | ||
import * as API from '.'; | ||
import * as API from './index'; | ||
import { Page } from '../pagination'; | ||
@@ -52,3 +52,3 @@ import { Stream } from '../streaming'; | ||
fineTuneId: string, | ||
query?: FineTuneListEventsParams.ListEventsRequestNonStreaming, | ||
query?: FineTuneListEventsParamsNonStreaming, | ||
options?: Core.RequestOptions, | ||
@@ -58,3 +58,3 @@ ): Promise<Core.APIResponse<FineTuneEventsListResponse>>; | ||
fineTuneId: string, | ||
query: FineTuneListEventsParams.ListEventsRequestStreaming, | ||
query: FineTuneListEventsParamsStreaming, | ||
options?: Core.RequestOptions, | ||
@@ -64,2 +64,7 @@ ): Promise<Core.APIResponse<Stream<FineTuneEvent>>>; | ||
fineTuneId: string, | ||
query?: FineTuneListEventsParams, | ||
options?: Core.RequestOptions, | ||
): Promise<Core.APIResponse<FineTuneEventsListResponse | Stream<FineTuneEvent>>>; | ||
listEvents( | ||
fineTuneId: string, | ||
query?: FineTuneListEventsParams | undefined, | ||
@@ -273,34 +278,46 @@ options?: Core.RequestOptions, | ||
export type FineTuneListEventsParams = | ||
| FineTuneListEventsParams.ListEventsRequestNonStreaming | ||
| FineTuneListEventsParams.ListEventsRequestStreaming; | ||
export interface FineTuneListEventsParams { | ||
/** | ||
* Whether to stream events for the fine-tune job. If set to true, events will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available. The stream will terminate with a `data: [DONE]` | ||
* message when the job is finished (succeeded, cancelled, or failed). | ||
* | ||
* If set to false, only events generated so far will be returned. | ||
*/ | ||
stream?: boolean; | ||
} | ||
export namespace FineTuneListEventsParams { | ||
export interface ListEventsRequestNonStreaming { | ||
/** | ||
* Whether to stream events for the fine-tune job. If set to true, events will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available. The stream will terminate with a `data: [DONE]` | ||
* message when the job is finished (succeeded, cancelled, or failed). | ||
* | ||
* If set to false, only events generated so far will be returned. | ||
*/ | ||
stream?: false; | ||
} | ||
export type FineTuneListEventsParamsNonStreaming = API.FineTuneListEventsParamsNonStreaming; | ||
export type FineTuneListEventsParamsStreaming = API.FineTuneListEventsParamsStreaming; | ||
} | ||
export interface ListEventsRequestStreaming { | ||
/** | ||
* Whether to stream events for the fine-tune job. If set to true, events will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available. The stream will terminate with a `data: [DONE]` | ||
* message when the job is finished (succeeded, cancelled, or failed). | ||
* | ||
* If set to false, only events generated so far will be returned. | ||
*/ | ||
stream: true; | ||
} | ||
export interface FineTuneListEventsParamsNonStreaming extends FineTuneListEventsParams { | ||
/** | ||
* Whether to stream events for the fine-tune job. If set to true, events will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available. The stream will terminate with a `data: [DONE]` | ||
* message when the job is finished (succeeded, cancelled, or failed). | ||
* | ||
* If set to false, only events generated so far will be returned. | ||
*/ | ||
stream?: false; | ||
} | ||
export interface FineTuneListEventsParamsStreaming extends FineTuneListEventsParams { | ||
/** | ||
* Whether to stream events for the fine-tune job. If set to true, events will be | ||
* sent as data-only | ||
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) | ||
* as they become available. The stream will terminate with a `data: [DONE]` | ||
* message when the job is finished (succeeded, cancelled, or failed). | ||
* | ||
* If set to false, only events generated so far will be returned. | ||
*/ | ||
stream: true; | ||
} | ||
export namespace FineTunes { | ||
@@ -313,2 +330,4 @@ export import FineTune = API.FineTune; | ||
export import FineTuneListEventsParams = API.FineTuneListEventsParams; | ||
export import FineTuneListEventsParamsNonStreaming = API.FineTuneListEventsParamsNonStreaming; | ||
export import FineTuneListEventsParamsStreaming = API.FineTuneListEventsParamsStreaming; | ||
} |
@@ -5,3 +5,3 @@ // File generated from our OpenAPI spec by Stainless. | ||
import { APIResource } from '../resource'; | ||
import * as API from '.'; | ||
import * as API from './index'; | ||
import { type Uploadable, multipartFormRequestOptions } from '../core'; | ||
@@ -8,0 +8,0 @@ |
@@ -5,3 +5,10 @@ // File generated from our OpenAPI spec by Stainless. | ||
export { Chat } from './chat/chat'; | ||
export { Completion, CompletionChoice, CompletionCreateParams, Completions } from './completions'; | ||
export { | ||
Completion, | ||
CompletionChoice, | ||
CompletionCreateParams, | ||
CompletionCreateParamsNonStreaming, | ||
CompletionCreateParamsStreaming, | ||
Completions, | ||
} from './completions'; | ||
export { Edit, EditCreateParams, Edits } from './edits'; | ||
@@ -16,2 +23,4 @@ export { Embedding, EmbeddingCreateParams, Embeddings } from './embeddings'; | ||
FineTuneListEventsParams, | ||
FineTuneListEventsParamsNonStreaming, | ||
FineTuneListEventsParamsStreaming, | ||
FineTunesPage, | ||
@@ -18,0 +27,0 @@ FineTunes, |
@@ -5,3 +5,3 @@ // File generated from our OpenAPI spec by Stainless. | ||
import { APIResource } from '../resource'; | ||
import * as API from '.'; | ||
import * as API from './index'; | ||
import { Page } from '../pagination'; | ||
@@ -8,0 +8,0 @@ |
@@ -5,3 +5,3 @@ // File generated from our OpenAPI spec by Stainless. | ||
import { APIResource } from '../resource'; | ||
import * as API from '.'; | ||
import * as API from './index'; | ||
@@ -8,0 +8,0 @@ export class Moderations extends APIResource { |
import type { Response } from './_shims/fetch.js'; | ||
import { ReadableStream } from './_shims/ReadableStream.js'; | ||
@@ -81,2 +82,34 @@ import { APIResponse, Headers, createResponseHeaders } from './core'; | ||
} | ||
toReadableStream(): ReadableStream { | ||
const self = this; | ||
let iter: AsyncIterator<Item>; | ||
const encoder = new TextEncoder(); | ||
return new ReadableStream({ | ||
async start() { | ||
iter = self[Symbol.asyncIterator](); | ||
}, | ||
async pull(ctrl) { | ||
try { | ||
const { value, done } = await iter.next(); | ||
if (done) return ctrl.close(); | ||
const str = | ||
typeof value === 'string' ? value : ( | ||
// Add a newline after JSON to make it easier to parse newline-separated JSON on the frontend. | ||
JSON.stringify(value) + '\n' | ||
); | ||
const bytes = encoder.encode(str); | ||
ctrl.enqueue(bytes); | ||
} catch (err) { | ||
ctrl.error(err); | ||
} | ||
}, | ||
async cancel() { | ||
await iter.return?.(); | ||
}, | ||
}); | ||
} | ||
} | ||
@@ -83,0 +116,0 @@ |
import { type RequestOptions } from './core'; | ||
import { type Readable } from './_shims/node-readable'; | ||
import { type BodyInit } from './_shims/fetch.js'; | ||
import { FormData, File, type FilePropertyBag } from './_shims/formdata.js'; | ||
import { FormData, File, type Blob, type FilePropertyBag } from './_shims/formdata.js'; | ||
import { getMultipartRequestOptions } from './_shims/getMultipartRequestOptions'; | ||
@@ -11,3 +11,4 @@ import { fileFromPath } from './_shims/fileFromPath'; | ||
export type BlobPart = string | ArrayBuffer | ArrayBufferView | BlobLike | Uint8Array | DataView; | ||
type BlobLikePart = string | ArrayBuffer | ArrayBufferView | BlobLike | Uint8Array | DataView; | ||
export type BlobPart = string | ArrayBuffer | ArrayBufferView | Blob | Uint8Array | DataView; | ||
@@ -88,7 +89,7 @@ /** | ||
export type ToFileInput = Uploadable | Exclude<BlobPart, string> | AsyncIterable<BlobPart>; | ||
export type ToFileInput = Uploadable | Exclude<BlobLikePart, string> | AsyncIterable<BlobLikePart>; | ||
/** | ||
* Helper for creating a {@link File} to pass to an SDK upload method from a variety of different data formats | ||
* @param value the raw content of the file. Can be an {@link Uploadable}, {@link BlobPart}, or {@link AsyncIterable} of {@link BlobPart}s | ||
* @param value the raw content of the file. Can be an {@link Uploadable}, {@link BlobLikePart}, or {@link AsyncIterable} of {@link BlobLikePart}s | ||
* @param {string=} name the name of the file. If omitted, toFile will try to determine a file name from bits if possible | ||
@@ -236,6 +237,6 @@ * @param {Object=} options additional properties | ||
if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') { | ||
form.append(key, value); | ||
form.append(key, String(value)); | ||
} else if (isUploadable(value)) { | ||
const file = await toFile(value); | ||
form.append(key, file); | ||
form.append(key, file as File); | ||
} else if (Array.isArray(value)) { | ||
@@ -242,0 +243,0 @@ await Promise.all(value.map((entry) => addFormValue(form, key + '[]', entry))); |
@@ -1,1 +0,1 @@ | ||
export const VERSION = '4.0.0-beta.7'; | ||
export const VERSION = '4.0.0-beta.8'; |
import type { Response } from 'openai/_shims/fetch'; | ||
import { ReadableStream } from 'openai/_shims/ReadableStream'; | ||
import { APIResponse, Headers } from './core.js'; | ||
@@ -13,3 +14,4 @@ export declare class Stream<Item> implements AsyncIterable<Item>, APIResponse<Stream<Item>> { | ||
[Symbol.asyncIterator](): AsyncIterator<Item, any, undefined>; | ||
toReadableStream(): ReadableStream; | ||
} | ||
//# sourceMappingURL=streaming.d.ts.map |
'use strict'; | ||
Object.defineProperty(exports, '__esModule', { value: true }); | ||
exports.Stream = void 0; | ||
const ReadableStream_1 = require('openai/_shims/ReadableStream'); | ||
const core_1 = require('./core.js'); | ||
@@ -59,2 +60,31 @@ class Stream { | ||
} | ||
toReadableStream() { | ||
const self = this; | ||
let iter; | ||
const encoder = new TextEncoder(); | ||
return new ReadableStream_1.ReadableStream({ | ||
async start() { | ||
iter = self[Symbol.asyncIterator](); | ||
}, | ||
async pull(ctrl) { | ||
try { | ||
const { value, done } = await iter.next(); | ||
if (done) return ctrl.close(); | ||
const str = | ||
typeof value === 'string' ? value : ( | ||
// Add a newline after JSON to make it easier to parse newline-separated JSON on the frontend. | ||
JSON.stringify(value) + '\n' | ||
); | ||
const bytes = encoder.encode(str); | ||
ctrl.enqueue(bytes); | ||
} catch (err) { | ||
ctrl.error(err); | ||
} | ||
}, | ||
async cancel() { | ||
var _a; | ||
await ((_a = iter.return) === null || _a === void 0 ? void 0 : _a.call(iter)); | ||
}, | ||
}); | ||
} | ||
} | ||
@@ -61,0 +91,0 @@ exports.Stream = Stream; |
import { type RequestOptions } from './core.js'; | ||
import { type Readable } from 'openai/_shims/node-readable'; | ||
import { type BodyInit } from 'openai/_shims/fetch'; | ||
import { FormData, type FilePropertyBag } from 'openai/_shims/formdata'; | ||
import { FormData, type Blob, type FilePropertyBag } from 'openai/_shims/formdata'; | ||
import { fileFromPath } from 'openai/_shims/fileFromPath'; | ||
import { type FsReadStream } from 'openai/_shims/node-readable'; | ||
export { fileFromPath }; | ||
export type BlobPart = string | ArrayBuffer | ArrayBufferView | BlobLike | Uint8Array | DataView; | ||
type BlobLikePart = string | ArrayBuffer | ArrayBufferView | BlobLike | Uint8Array | DataView; | ||
export type BlobPart = string | ArrayBuffer | ArrayBufferView | Blob | Uint8Array | DataView; | ||
/** | ||
@@ -58,6 +59,6 @@ * Typically, this is a native "File" class. | ||
export declare const isUploadable: (value: any) => value is Uploadable; | ||
export type ToFileInput = Uploadable | Exclude<BlobPart, string> | AsyncIterable<BlobPart>; | ||
export type ToFileInput = Uploadable | Exclude<BlobLikePart, string> | AsyncIterable<BlobLikePart>; | ||
/** | ||
* Helper for creating a {@link File} to pass to an SDK upload method from a variety of different data formats | ||
* @param value the raw content of the file. Can be an {@link Uploadable}, {@link BlobPart}, or {@link AsyncIterable} of {@link BlobPart}s | ||
* @param value the raw content of the file. Can be an {@link Uploadable}, {@link BlobLikePart}, or {@link AsyncIterable} of {@link BlobLikePart}s | ||
* @param {string=} name the name of the file. If omitted, toFile will try to determine a file name from bits if possible | ||
@@ -64,0 +65,0 @@ * @param {Object=} options additional properties |
@@ -61,3 +61,3 @@ 'use strict'; | ||
* Helper for creating a {@link File} to pass to an SDK upload method from a variety of different data formats | ||
* @param value the raw content of the file. Can be an {@link Uploadable}, {@link BlobPart}, or {@link AsyncIterable} of {@link BlobPart}s | ||
* @param value the raw content of the file. Can be an {@link Uploadable}, {@link BlobLikePart}, or {@link AsyncIterable} of {@link BlobLikePart}s | ||
* @param {string=} name the name of the file. If omitted, toFile will try to determine a file name from bits if possible | ||
@@ -193,3 +193,3 @@ * @param {Object=} options additional properties | ||
if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') { | ||
form.append(key, value); | ||
form.append(key, String(value)); | ||
} else if ((0, exports.isUploadable)(value)) { | ||
@@ -196,0 +196,0 @@ const file = await toFile(value); |
@@ -1,2 +0,2 @@ | ||
export declare const VERSION = '4.0.0-beta.7'; | ||
export declare const VERSION = '4.0.0-beta.8'; | ||
//# sourceMappingURL=version.d.ts.map |
'use strict'; | ||
Object.defineProperty(exports, '__esModule', { value: true }); | ||
exports.VERSION = void 0; | ||
exports.VERSION = '4.0.0-beta.7'; | ||
exports.VERSION = '4.0.0-beta.8'; | ||
//# sourceMappingURL=version.js.map |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Environment variable access
Supply chain riskPackage accesses environment variables, which may be a sign of credential stuffing or data theft.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Environment variable access
Supply chain riskPackage accesses environment variables, which may be a sign of credential stuffing or data theft.
Found 1 instance in 1 package
555648
267
10270
254
26
2
208
12
130