Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@ai-sdk/openai

Package Overview
Dependencies
Maintainers
2
Versions
82
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@ai-sdk/openai - npm Package Compare versions

Comparing version 0.0.60 to 0.0.61

40

./dist/index.js

@@ -245,5 +245,6 @@ "use strict";

responseFormat,
seed
seed,
providerMetadata
}) {
var _a;
var _a, _b, _c;
const type = mode.type;

@@ -292,2 +293,4 @@ const warnings = [];

seed,
// openai specific settings:
max_completion_tokens: (_b = (_a = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _a.maxCompletionTokens) != null ? _b : void 0,
// response format:

@@ -302,6 +305,6 @@ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? { type: "json_object" } : void 0,

if (this.modelId === "o1-preview" || this.modelId === "o1-mini") {
baseArgs.temperature = 1;
baseArgs.top_p = 1;
baseArgs.frequency_penalty = 0;
baseArgs.presence_penalty = 0;
baseArgs.temperature = void 0;
baseArgs.top_p = void 0;
baseArgs.frequency_penalty = void 0;
baseArgs.presence_penalty = void 0;
}

@@ -331,3 +334,3 @@ switch (type) {

strict: true,
name: (_a = mode.name) != null ? _a : "response",
name: (_c = mode.name) != null ? _c : "response",
description: mode.description

@@ -382,3 +385,3 @@ }

async doGenerate(options) {
var _a, _b, _c, _d, _e, _f;
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
const { args, warnings } = this.getArgs(options);

@@ -401,4 +404,9 @@ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({

const choice = response.choices[0];
const providerMetadata = ((_b = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? {
openai: {
reasoningTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens
}
} : void 0;
return {
text: (_a = choice.message.content) != null ? _a : void 0,
text: (_e = choice.message.content) != null ? _e : void 0,
toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [

@@ -411,3 +419,3 @@ {

}
] : (_b = choice.message.tool_calls) == null ? void 0 : _b.map((toolCall) => {
] : (_f = choice.message.tool_calls) == null ? void 0 : _f.map((toolCall) => {
var _a2;

@@ -423,4 +431,4 @@ return {

usage: {
promptTokens: (_d = (_c = response.usage) == null ? void 0 : _c.prompt_tokens) != null ? _d : NaN,
completionTokens: (_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens) != null ? _f : NaN
promptTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : NaN,
completionTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : NaN
},

@@ -431,3 +439,4 @@ rawCall: { rawPrompt, rawSettings },

warnings,
logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs)
logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
providerMetadata
};

@@ -622,3 +631,6 @@ }

prompt_tokens: import_zod2.z.number().nullish(),
completion_tokens: import_zod2.z.number().nullish()
completion_tokens: import_zod2.z.number().nullish(),
completion_tokens_details: import_zod2.z.object({
reasoning_tokens: import_zod2.z.number().nullish()
}).nullish()
}).nullish();

@@ -625,0 +637,0 @@ var openAIChatResponseSchema = import_zod2.z.object({

# @ai-sdk/openai
## 0.0.61
### Patch Changes
- 8132a60: feat (provider/openai): support reasoning token usage and max_completion_tokens
## 0.0.60

@@ -4,0 +10,0 @@

@@ -245,5 +245,6 @@ "use strict";

responseFormat,
seed
seed,
providerMetadata
}) {
var _a;
var _a, _b, _c;
const type = mode.type;

@@ -292,2 +293,4 @@ const warnings = [];

seed,
// openai specific settings:
max_completion_tokens: (_b = (_a = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _a.maxCompletionTokens) != null ? _b : void 0,
// response format:

@@ -302,6 +305,6 @@ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? { type: "json_object" } : void 0,

if (this.modelId === "o1-preview" || this.modelId === "o1-mini") {
baseArgs.temperature = 1;
baseArgs.top_p = 1;
baseArgs.frequency_penalty = 0;
baseArgs.presence_penalty = 0;
baseArgs.temperature = void 0;
baseArgs.top_p = void 0;
baseArgs.frequency_penalty = void 0;
baseArgs.presence_penalty = void 0;
}

@@ -331,3 +334,3 @@ switch (type) {

strict: true,
name: (_a = mode.name) != null ? _a : "response",
name: (_c = mode.name) != null ? _c : "response",
description: mode.description

@@ -382,3 +385,3 @@ }

async doGenerate(options) {
var _a, _b, _c, _d, _e, _f;
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
const { args, warnings } = this.getArgs(options);

@@ -401,4 +404,9 @@ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({

const choice = response.choices[0];
const providerMetadata = ((_b = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? {
openai: {
reasoningTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens
}
} : void 0;
return {
text: (_a = choice.message.content) != null ? _a : void 0,
text: (_e = choice.message.content) != null ? _e : void 0,
toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [

@@ -411,3 +419,3 @@ {

}
] : (_b = choice.message.tool_calls) == null ? void 0 : _b.map((toolCall) => {
] : (_f = choice.message.tool_calls) == null ? void 0 : _f.map((toolCall) => {
var _a2;

@@ -423,4 +431,4 @@ return {

usage: {
promptTokens: (_d = (_c = response.usage) == null ? void 0 : _c.prompt_tokens) != null ? _d : NaN,
completionTokens: (_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens) != null ? _f : NaN
promptTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : NaN,
completionTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : NaN
},

@@ -431,3 +439,4 @@ rawCall: { rawPrompt, rawSettings },

warnings,
logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs)
logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
providerMetadata
};

@@ -622,3 +631,6 @@ }

prompt_tokens: import_zod2.z.number().nullish(),
completion_tokens: import_zod2.z.number().nullish()
completion_tokens: import_zod2.z.number().nullish(),
completion_tokens_details: import_zod2.z.object({
reasoning_tokens: import_zod2.z.number().nullish()
}).nullish()
}).nullish();

@@ -625,0 +637,0 @@ var openAIChatResponseSchema = import_zod2.z.object({

@@ -242,5 +242,6 @@ "use strict";

responseFormat,
seed
seed,
providerMetadata
}) {
var _a;
var _a, _b, _c;
const type = mode.type;

@@ -289,2 +290,4 @@ const warnings = [];

seed,
// openai specific settings:
max_completion_tokens: (_b = (_a = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _a.maxCompletionTokens) != null ? _b : void 0,
// response format:

@@ -299,6 +302,6 @@ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? { type: "json_object" } : void 0,

if (this.modelId === "o1-preview" || this.modelId === "o1-mini") {
baseArgs.temperature = 1;
baseArgs.top_p = 1;
baseArgs.frequency_penalty = 0;
baseArgs.presence_penalty = 0;
baseArgs.temperature = void 0;
baseArgs.top_p = void 0;
baseArgs.frequency_penalty = void 0;
baseArgs.presence_penalty = void 0;
}

@@ -328,3 +331,3 @@ switch (type) {

strict: true,
name: (_a = mode.name) != null ? _a : "response",
name: (_c = mode.name) != null ? _c : "response",
description: mode.description

@@ -379,3 +382,3 @@ }

async doGenerate(options) {
var _a, _b, _c, _d, _e, _f;
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
const { args, warnings } = this.getArgs(options);

@@ -398,4 +401,9 @@ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({

const choice = response.choices[0];
const providerMetadata = ((_b = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? {
openai: {
reasoningTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens
}
} : void 0;
return {
text: (_a = choice.message.content) != null ? _a : void 0,
text: (_e = choice.message.content) != null ? _e : void 0,
toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [

@@ -408,3 +416,3 @@ {

}
] : (_b = choice.message.tool_calls) == null ? void 0 : _b.map((toolCall) => {
] : (_f = choice.message.tool_calls) == null ? void 0 : _f.map((toolCall) => {
var _a2;

@@ -420,4 +428,4 @@ return {

usage: {
promptTokens: (_d = (_c = response.usage) == null ? void 0 : _c.prompt_tokens) != null ? _d : NaN,
completionTokens: (_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens) != null ? _f : NaN
promptTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : NaN,
completionTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : NaN
},

@@ -428,3 +436,4 @@ rawCall: { rawPrompt, rawSettings },

warnings,
logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs)
logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
providerMetadata
};

@@ -619,3 +628,6 @@ }

prompt_tokens: import_zod2.z.number().nullish(),
completion_tokens: import_zod2.z.number().nullish()
completion_tokens: import_zod2.z.number().nullish(),
completion_tokens_details: import_zod2.z.object({
reasoning_tokens: import_zod2.z.number().nullish()
}).nullish()
}).nullish();

@@ -622,0 +634,0 @@ var openAIChatResponseSchema = import_zod2.z.object({

{
"name": "@ai-sdk/openai",
"version": "0.0.60",
"version": "0.0.61",
"license": "Apache-2.0",

@@ -5,0 +5,0 @@ "sideEffects": false,

@@ -1,4 +0,4 @@

# Vercel AI SDK - OpenAI Provider
# AI SDK - OpenAI Provider
The **[OpenAI provider](https://sdk.vercel.ai/providers/ai-sdk-providers/openai)** for the [Vercel AI SDK](https://sdk.vercel.ai/docs)
The **[OpenAI provider](https://sdk.vercel.ai/providers/ai-sdk-providers/openai)** for the [AI SDK](https://sdk.vercel.ai/docs)
contains language model support for the OpenAI chat and completion APIs and embedding model support for the OpenAI embeddings API.

@@ -5,0 +5,0 @@

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc