Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

openai

Package Overview
Dependencies
Maintainers
5
Versions
209
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

openai - npm Package Compare versions

Comparing version 4.66.1 to 4.67.0

2

package.json
{
"name": "openai",
"version": "4.66.1",
"version": "4.67.0",
"description": "The official TypeScript library for the OpenAI API",

@@ -5,0 +5,0 @@ "author": "OpenAI <support@openai.com>",

@@ -22,3 +22,3 @@ # OpenAI Node API Library

```ts
import OpenAI from 'https://deno.land/x/openai@v4.66.1/mod.ts';
import OpenAI from 'https://deno.land/x/openai@v4.67.0/mod.ts';
```

@@ -25,0 +25,0 @@

@@ -7,3 +7,3 @@ import { APIResource } from "../../resource.js";

}
export type ChatModel = 'o1-preview' | 'o1-preview-2024-09-12' | 'o1-mini' | 'o1-mini-2024-09-12' | 'gpt-4o' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-05-13' | 'chatgpt-4o-latest' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' | 'gpt-4-turbo-preview' | 'gpt-4-1106-preview' | 'gpt-4-vision-preview' | 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-16k-0613';
export type ChatModel = 'o1-preview' | 'o1-preview-2024-09-12' | 'o1-mini' | 'o1-mini-2024-09-12' | 'gpt-4o' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-05-13' | 'gpt-4o-realtime-preview-2024-10-01' | 'chatgpt-4o-latest' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' | 'gpt-4-turbo-preview' | 'gpt-4-1106-preview' | 'gpt-4-vision-preview' | 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-16k-0613';
export declare namespace Chat {

@@ -10,0 +10,0 @@ export import ChatModel = ChatAPI.ChatModel;

@@ -601,4 +601,8 @@ import { APIResource } from "../../resource.js";

/**
* A list of messages comprising the conversation so far.
* [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).
* A list of messages comprising the conversation so far. Depending on the
* [model](https://platform.openai.com/docs/models) you use, different message
* types (modalities) are supported, like
* [text](https://platform.openai.com/docs/guides/text-generation),
* [images](https://platform.openai.com/docs/guides/vision), and
* [audio](https://platform.openai.com/docs/guides/audio).
*/

@@ -673,2 +677,7 @@ messages: Array<ChatCompletionMessageParam>;

/**
* Developer-defined tags and values used for filtering completions in the
* [dashboard](https://platform.openai.com/completions).
*/
metadata?: Record<string, string> | null;
/**
* How many chat completion choices to generate for each input message. Note that

@@ -747,2 +756,7 @@ * you will be charged based on the number of generated tokens across all of the

/**
* Whether or not to store the output of this completion request for traffic
* logging in the [dashboard](https://platform.openai.com/completions).
*/
store?: boolean | null;
/**
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be

@@ -749,0 +763,0 @@ * sent as data-only

@@ -92,2 +92,6 @@ import { APIResource } from "../resource.js";

completion_tokens_details?: CompletionUsage.CompletionTokensDetails;
/**
* Breakdown of tokens used in the prompt.
*/
prompt_tokens_details?: CompletionUsage.PromptTokensDetails;
}

@@ -100,2 +104,6 @@ export declare namespace CompletionUsage {

/**
* Audio input tokens generated by the model.
*/
audio_tokens?: number;
/**
* Tokens generated by the model for reasoning.

@@ -105,2 +113,15 @@ */

}
/**
* Breakdown of tokens used in the prompt.
*/
interface PromptTokensDetails {
/**
* Audio input tokens present in the prompt.
*/
audio_tokens?: number;
/**
* Cached tokens present in the prompt.
*/
cached_tokens?: number;
}
}

@@ -107,0 +128,0 @@ export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming;

@@ -19,2 +19,3 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

| 'gpt-4o-2024-05-13'
| 'gpt-4o-realtime-preview-2024-10-01'
| 'chatgpt-4o-latest'

@@ -21,0 +22,0 @@ | 'gpt-4o-mini'

@@ -730,4 +730,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

/**
* A list of messages comprising the conversation so far.
* [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).
* A list of messages comprising the conversation so far. Depending on the
* [model](https://platform.openai.com/docs/models) you use, different message
* types (modalities) are supported, like
* [text](https://platform.openai.com/docs/guides/text-generation),
* [images](https://platform.openai.com/docs/guides/vision), and
* [audio](https://platform.openai.com/docs/guides/audio).
*/

@@ -811,2 +815,8 @@ messages: Array<ChatCompletionMessageParam>;

/**
* Developer-defined tags and values used for filtering completions in the
* [dashboard](https://platform.openai.com/completions).
*/
metadata?: Record<string, string> | null;
/**
* How many chat completion choices to generate for each input message. Note that

@@ -895,2 +905,8 @@ * you will be charged based on the number of generated tokens across all of the

/**
* Whether or not to store the output of this completion request for traffic
* logging in the [dashboard](https://platform.openai.com/completions).
*/
store?: boolean | null;
/**
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be

@@ -897,0 +913,0 @@ * sent as data-only

@@ -128,2 +128,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

completion_tokens_details?: CompletionUsage.CompletionTokensDetails;
/**
* Breakdown of tokens used in the prompt.
*/
prompt_tokens_details?: CompletionUsage.PromptTokensDetails;
}

@@ -137,2 +142,7 @@

/**
* Audio input tokens generated by the model.
*/
audio_tokens?: number;
/**
* Tokens generated by the model for reasoning.

@@ -142,2 +152,17 @@ */

}
/**
* Breakdown of tokens used in the prompt.
*/
export interface PromptTokensDetails {
/**
* Audio input tokens present in the prompt.
*/
audio_tokens?: number;
/**
* Cached tokens present in the prompt.
*/
cached_tokens?: number;
}
}

@@ -144,0 +169,0 @@

@@ -1,1 +0,1 @@

export const VERSION = '4.66.1'; // x-release-please-version
export const VERSION = '4.67.0'; // x-release-please-version

@@ -1,2 +0,2 @@

export declare const VERSION = "4.66.1";
export declare const VERSION = "4.67.0";
//# sourceMappingURL=version.d.ts.map
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.VERSION = void 0;
exports.VERSION = '4.66.1'; // x-release-please-version
exports.VERSION = '4.67.0'; // x-release-please-version
//# sourceMappingURL=version.js.map

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc