Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@ai-sdk/openai

Package Overview
Dependencies
Maintainers
2
Versions
82
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@ai-sdk/openai - npm Package Compare versions

Comparing version 0.0.11 to 0.0.12

118

./dist/index.js

@@ -301,3 +301,6 @@ "use strict";

...args,
stream: true
stream: true,
stream_options: {
include_usage: true
}
},

@@ -730,3 +733,6 @@ failedResponseHandler: openaiFailedResponseHandler,

...this.getArgs(options),
stream: true
stream: true,
stream_options: {
include_usage: true
}
},

@@ -877,4 +883,97 @@ failedResponseHandler: openaiFailedResponseHandler,

// src/openai-provider.ts
var import_provider_utils7 = require("@ai-sdk/provider-utils");
// src/openai-embedding-model.ts
var import_provider4 = require("@ai-sdk/provider");
var import_provider_utils6 = require("@ai-sdk/provider-utils");
var import_zod4 = require("zod");
var OpenAIEmbeddingModel = class {
constructor(modelId, settings, config) {
this.specificationVersion = "v1";
this.modelId = modelId;
this.settings = settings;
this.config = config;
}
get provider() {
return this.config.provider;
}
get maxEmbeddingsPerCall() {
var _a;
return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
}
get supportsParallelCalls() {
var _a;
return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
}
async doEmbed({
values,
abortSignal
}) {
if (values.length > this.maxEmbeddingsPerCall) {
throw new import_provider4.TooManyEmbeddingValuesForCallError({
provider: this.provider,
modelId: this.modelId,
maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
values
});
}
const { responseHeaders, value: response } = await (0, import_provider_utils6.postJsonToApi)({
url: `${this.config.baseURL}/embeddings`,
headers: this.config.headers(),
body: {
model: this.modelId,
input: values,
encoding_format: "float",
dimensions: this.settings.dimensions,
user: this.settings.user
},
failedResponseHandler: openaiFailedResponseHandler,
successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
openaiTextEmbeddingResponseSchema
),
abortSignal
});
return {
embeddings: response.data.map((item) => item.embedding),
rawResponse: { headers: responseHeaders }
};
}
};
var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
data: import_zod4.z.array(
import_zod4.z.object({
embedding: import_zod4.z.array(import_zod4.z.number())
})
)
});
// src/openai-provider.ts
function createOpenAI(options = {}) {
const openai2 = new OpenAI(options);
var _a, _b;
const baseURL = (_b = (0, import_provider_utils7.withoutTrailingSlash)((_a = options.baseURL) != null ? _a : options.baseUrl)) != null ? _b : "https://api.openai.com/v1";
const getHeaders = () => ({
Authorization: `Bearer ${(0, import_provider_utils7.loadApiKey)({
apiKey: options.apiKey,
environmentVariableName: "OPENAI_API_KEY",
description: "OpenAI"
})}`,
"OpenAI-Organization": options.organization,
"OpenAI-Project": options.project,
...options.headers
});
const createChatModel = (modelId, settings = {}) => new OpenAIChatLanguageModel(modelId, settings, {
provider: "openai.chat",
baseURL,
headers: getHeaders
});
const createCompletionModel = (modelId, settings = {}) => new OpenAICompletionLanguageModel(modelId, settings, {
provider: "openai.completion",
baseURL,
headers: getHeaders
});
const createEmbeddingModel = (modelId, settings = {}) => new OpenAIEmbeddingModel(modelId, settings, {
provider: "openai.embedding",
baseURL,
headers: getHeaders
});
const provider = function(modelId, settings) {

@@ -887,9 +986,12 @@ if (new.target) {

if (modelId === "gpt-3.5-turbo-instruct") {
return openai2.completion(modelId, settings);
} else {
return openai2.chat(modelId, settings);
return createCompletionModel(
modelId,
settings
);
}
return createChatModel(modelId, settings);
};
provider.chat = openai2.chat.bind(openai2);
provider.completion = openai2.completion.bind(openai2);
provider.chat = createChatModel;
provider.completion = createCompletionModel;
provider.embedding = createEmbeddingModel;
return provider;

@@ -896,0 +998,0 @@ }

@@ -1,4 +0,4 @@

import { LanguageModelV1 } from '@ai-sdk/provider';
import { LanguageModelV1, EmbeddingModelV1 } from '@ai-sdk/provider';
type OpenAIChatModelId = 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4-vision-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k-0613' | (string & {});
type OpenAIChatModelId = 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4-vision-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k-0613' | (string & {});
interface OpenAIChatSettings {

@@ -119,7 +119,56 @@ /**

type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
interface OpenAIEmbeddingSettings {
/**
Override the maximum number of embeddings per call.
*/
maxEmbeddingsPerCall?: number;
/**
Override the parallelism of embedding calls.
*/
supportsParallelCalls?: boolean;
/**
The number of dimensions the resulting output embeddings should have.
Only supported in text-embedding-3 and later models.
*/
dimensions?: number;
/**
A unique identifier representing your end-user, which can help OpenAI to
monitor and detect abuse. Learn more.
*/
user?: string;
}
type OpenAIEmbeddingConfig = {
provider: string;
baseURL: string;
headers: () => Record<string, string | undefined>;
};
declare class OpenAIEmbeddingModel implements EmbeddingModelV1<string> {
readonly specificationVersion = "v1";
readonly modelId: OpenAIEmbeddingModelId;
private readonly config;
private readonly settings;
get provider(): string;
get maxEmbeddingsPerCall(): number;
get supportsParallelCalls(): boolean;
constructor(modelId: OpenAIEmbeddingModelId, settings: OpenAIEmbeddingSettings, config: OpenAIEmbeddingConfig);
doEmbed({ values, abortSignal, }: Parameters<EmbeddingModelV1<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV1<string>['doEmbed']>>>;
}
interface OpenAIProvider {
(modelId: 'gpt-3.5-turbo-instruct', settings?: OpenAICompletionSettings): OpenAICompletionLanguageModel;
(modelId: OpenAIChatModelId, settings?: OpenAIChatSettings): OpenAIChatLanguageModel;
/**
Creates an OpenAI chat model for text generation.
*/
chat(modelId: OpenAIChatModelId, settings?: OpenAIChatSettings): OpenAIChatLanguageModel;
/**
Creates an OpenAI completion model for text generation.
*/
completion(modelId: OpenAICompletionModelId, settings?: OpenAICompletionSettings): OpenAICompletionLanguageModel;
/**
Creates a model for text embeddings.
*/
embedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): OpenAIEmbeddingModel;
}

@@ -157,3 +206,3 @@ interface OpenAIProviderSettings {

/**
* Default OpenAI provider instance.
Default OpenAI provider instance.
*/

@@ -160,0 +209,0 @@ declare const openai: OpenAIProvider;

@@ -301,3 +301,6 @@ "use strict";

...args,
stream: true
stream: true,
stream_options: {
include_usage: true
}
},

@@ -730,3 +733,6 @@ failedResponseHandler: openaiFailedResponseHandler,

...this.getArgs(options),
stream: true
stream: true,
stream_options: {
include_usage: true
}
},

@@ -877,4 +883,97 @@ failedResponseHandler: openaiFailedResponseHandler,

// src/openai-provider.ts
var import_provider_utils7 = require("@ai-sdk/provider-utils");
// src/openai-embedding-model.ts
var import_provider4 = require("@ai-sdk/provider");
var import_provider_utils6 = require("@ai-sdk/provider-utils");
var import_zod4 = require("zod");
var OpenAIEmbeddingModel = class {
constructor(modelId, settings, config) {
this.specificationVersion = "v1";
this.modelId = modelId;
this.settings = settings;
this.config = config;
}
get provider() {
return this.config.provider;
}
get maxEmbeddingsPerCall() {
var _a;
return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
}
get supportsParallelCalls() {
var _a;
return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
}
async doEmbed({
values,
abortSignal
}) {
if (values.length > this.maxEmbeddingsPerCall) {
throw new import_provider4.TooManyEmbeddingValuesForCallError({
provider: this.provider,
modelId: this.modelId,
maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
values
});
}
const { responseHeaders, value: response } = await (0, import_provider_utils6.postJsonToApi)({
url: `${this.config.baseURL}/embeddings`,
headers: this.config.headers(),
body: {
model: this.modelId,
input: values,
encoding_format: "float",
dimensions: this.settings.dimensions,
user: this.settings.user
},
failedResponseHandler: openaiFailedResponseHandler,
successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
openaiTextEmbeddingResponseSchema
),
abortSignal
});
return {
embeddings: response.data.map((item) => item.embedding),
rawResponse: { headers: responseHeaders }
};
}
};
var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
data: import_zod4.z.array(
import_zod4.z.object({
embedding: import_zod4.z.array(import_zod4.z.number())
})
)
});
// src/openai-provider.ts
function createOpenAI(options = {}) {
const openai2 = new OpenAI(options);
var _a, _b;
const baseURL = (_b = (0, import_provider_utils7.withoutTrailingSlash)((_a = options.baseURL) != null ? _a : options.baseUrl)) != null ? _b : "https://api.openai.com/v1";
const getHeaders = () => ({
Authorization: `Bearer ${(0, import_provider_utils7.loadApiKey)({
apiKey: options.apiKey,
environmentVariableName: "OPENAI_API_KEY",
description: "OpenAI"
})}`,
"OpenAI-Organization": options.organization,
"OpenAI-Project": options.project,
...options.headers
});
const createChatModel = (modelId, settings = {}) => new OpenAIChatLanguageModel(modelId, settings, {
provider: "openai.chat",
baseURL,
headers: getHeaders
});
const createCompletionModel = (modelId, settings = {}) => new OpenAICompletionLanguageModel(modelId, settings, {
provider: "openai.completion",
baseURL,
headers: getHeaders
});
const createEmbeddingModel = (modelId, settings = {}) => new OpenAIEmbeddingModel(modelId, settings, {
provider: "openai.embedding",
baseURL,
headers: getHeaders
});
const provider = function(modelId, settings) {

@@ -887,9 +986,12 @@ if (new.target) {

if (modelId === "gpt-3.5-turbo-instruct") {
return openai2.completion(modelId, settings);
} else {
return openai2.chat(modelId, settings);
return createCompletionModel(
modelId,
settings
);
}
return createChatModel(modelId, settings);
};
provider.chat = openai2.chat.bind(openai2);
provider.completion = openai2.completion.bind(openai2);
provider.chat = createChatModel;
provider.completion = createCompletionModel;
provider.embedding = createEmbeddingModel;
return provider;

@@ -896,0 +998,0 @@ }

6

package.json
{
"name": "@ai-sdk/openai",
"version": "0.0.11",
"version": "0.0.12",
"license": "Apache-2.0",

@@ -21,4 +21,4 @@ "sideEffects": false,

"dependencies": {
"@ai-sdk/provider": "0.0.4",
"@ai-sdk/provider-utils": "0.0.7"
"@ai-sdk/provider": "0.0.5",
"@ai-sdk/provider-utils": "0.0.8"
},

@@ -25,0 +25,0 @@ "devDependencies": {

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc