@ai-sdk/openai
Advanced tools
Comparing version 1.0.8 to 1.0.9
# @ai-sdk/openai | ||
## 1.0.9 | ||
### Patch Changes | ||
- 3fab0fb: feat (provider/openai): support reasoning_effort setting | ||
- e956eed: feat (provider/openai): update model list and add o1 | ||
- 6faab13: feat (provider/openai): simulated streaming setting | ||
## 1.0.8 | ||
@@ -4,0 +12,0 @@ |
import { LanguageModelV1, ProviderV1, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider'; | ||
import { FetchFunction } from '@ai-sdk/provider-utils'; | ||
type OpenAIChatModelId = 'o1-preview' | 'o1-mini' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {}); | ||
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {}); | ||
interface OpenAIChatSettings { | ||
@@ -68,2 +68,13 @@ /** | ||
downloadImages?: boolean; | ||
/** | ||
Simulates streaming by using a normal generate call and returning it as a stream. | ||
Enable this if the model that you are using does not support streaming. | ||
Defaults to `false`. | ||
*/ | ||
simulateStreaming?: boolean; | ||
/** | ||
Reasoning effort for reasoning models. Defaults to `medium`. | ||
*/ | ||
reasoningEffort?: 'low' | 'medium' | 'high'; | ||
} | ||
@@ -70,0 +81,0 @@ |
@@ -379,3 +379,3 @@ "use strict"; | ||
}) { | ||
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j; | ||
var _a, _b, _c, _d, _e, _f, _g, _h; | ||
const type = mode.type; | ||
@@ -434,6 +434,7 @@ const warnings = []; | ||
// openai specific settings: | ||
max_completion_tokens: (_c = (_b = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _b.maxCompletionTokens) != null ? _c : void 0, | ||
store: (_e = (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.store) != null ? _e : void 0, | ||
metadata: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.metadata) != null ? _g : void 0, | ||
prediction: (_i = (_h = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _h.prediction) != null ? _i : void 0, | ||
max_completion_tokens: (_b = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _b.maxCompletionTokens, | ||
store: (_c = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _c.store, | ||
metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata, | ||
prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction, | ||
reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort, | ||
// messages: | ||
@@ -478,3 +479,3 @@ messages: convertToOpenAIChatMessages({ | ||
strict: true, | ||
name: (_j = mode.name) != null ? _j : "response", | ||
name: (_h = mode.name) != null ? _h : "response", | ||
description: mode.description | ||
@@ -590,2 +591,38 @@ } | ||
async doStream(options) { | ||
if (this.settings.simulateStreaming) { | ||
const result = await this.doGenerate(options); | ||
const simulatedStream = new ReadableStream({ | ||
start(controller) { | ||
controller.enqueue({ type: "response-metadata", ...result.response }); | ||
if (result.text) { | ||
controller.enqueue({ | ||
type: "text-delta", | ||
textDelta: result.text | ||
}); | ||
} | ||
if (result.toolCalls) { | ||
for (const toolCall of result.toolCalls) { | ||
controller.enqueue({ | ||
type: "tool-call", | ||
...toolCall | ||
}); | ||
} | ||
} | ||
controller.enqueue({ | ||
type: "finish", | ||
finishReason: result.finishReason, | ||
usage: result.usage, | ||
logprobs: result.logprobs, | ||
providerMetadata: result.providerMetadata | ||
}); | ||
controller.close(); | ||
} | ||
}); | ||
return { | ||
stream: simulatedStream, | ||
rawCall: result.rawCall, | ||
rawResponse: result.rawResponse, | ||
warnings: result.warnings | ||
}; | ||
} | ||
const { args, warnings } = this.getArgs(options); | ||
@@ -592,0 +629,0 @@ const body = { |
import { LanguageModelV1, EmbeddingModelV1 } from '@ai-sdk/provider'; | ||
import { FetchFunction } from '@ai-sdk/provider-utils'; | ||
type OpenAIChatModelId = 'o1-preview' | 'o1-mini' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {}); | ||
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {}); | ||
interface OpenAIChatSettings { | ||
@@ -68,2 +68,13 @@ /** | ||
downloadImages?: boolean; | ||
/** | ||
Simulates streaming by using a normal generate call and returning it as a stream. | ||
Enable this if the model that you are using does not support streaming. | ||
Defaults to `false`. | ||
*/ | ||
simulateStreaming?: boolean; | ||
/** | ||
Reasoning effort for reasoning models. Defaults to `medium`. | ||
*/ | ||
reasoningEffort?: 'low' | 'medium' | 'high'; | ||
} | ||
@@ -70,0 +81,0 @@ |
@@ -377,3 +377,3 @@ "use strict"; | ||
}) { | ||
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j; | ||
var _a, _b, _c, _d, _e, _f, _g, _h; | ||
const type = mode.type; | ||
@@ -432,6 +432,7 @@ const warnings = []; | ||
// openai specific settings: | ||
max_completion_tokens: (_c = (_b = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _b.maxCompletionTokens) != null ? _c : void 0, | ||
store: (_e = (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.store) != null ? _e : void 0, | ||
metadata: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.metadata) != null ? _g : void 0, | ||
prediction: (_i = (_h = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _h.prediction) != null ? _i : void 0, | ||
max_completion_tokens: (_b = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _b.maxCompletionTokens, | ||
store: (_c = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _c.store, | ||
metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata, | ||
prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction, | ||
reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort, | ||
// messages: | ||
@@ -476,3 +477,3 @@ messages: convertToOpenAIChatMessages({ | ||
strict: true, | ||
name: (_j = mode.name) != null ? _j : "response", | ||
name: (_h = mode.name) != null ? _h : "response", | ||
description: mode.description | ||
@@ -588,2 +589,38 @@ } | ||
async doStream(options) { | ||
if (this.settings.simulateStreaming) { | ||
const result = await this.doGenerate(options); | ||
const simulatedStream = new ReadableStream({ | ||
start(controller) { | ||
controller.enqueue({ type: "response-metadata", ...result.response }); | ||
if (result.text) { | ||
controller.enqueue({ | ||
type: "text-delta", | ||
textDelta: result.text | ||
}); | ||
} | ||
if (result.toolCalls) { | ||
for (const toolCall of result.toolCalls) { | ||
controller.enqueue({ | ||
type: "tool-call", | ||
...toolCall | ||
}); | ||
} | ||
} | ||
controller.enqueue({ | ||
type: "finish", | ||
finishReason: result.finishReason, | ||
usage: result.usage, | ||
logprobs: result.logprobs, | ||
providerMetadata: result.providerMetadata | ||
}); | ||
controller.close(); | ||
} | ||
}); | ||
return { | ||
stream: simulatedStream, | ||
rawCall: result.rawCall, | ||
rawResponse: result.rawResponse, | ||
warnings: result.warnings | ||
}; | ||
} | ||
const { args, warnings } = this.getArgs(options); | ||
@@ -590,0 +627,0 @@ const body = { |
{ | ||
"name": "@ai-sdk/openai", | ||
"version": "1.0.8", | ||
"version": "1.0.9", | ||
"license": "Apache-2.0", | ||
@@ -5,0 +5,0 @@ "sideEffects": false, |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
609536
6130