You're Invited:Meet the Socket Team at BlackHat and DEF CON in Las Vegas, Aug 4-6.RSVP
Socket
Book a DemoInstallSign in
Socket

genkitx-openai

Package Overview
Dependencies
Maintainers
3
Versions
33
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

genkitx-openai - npm Package Compare versions

Comparing version

to
0.20.3

lib/embedder-BUBoitMD.d.mts

4

lib/embedder.d.ts
import 'genkit';
export { S as SUPPORTED_EMBEDDING_MODELS, T as TextEmbeddingConfigSchema, c as TextEmbeddingGeckoConfig, d as TextEmbeddingInputSchema, e as openaiEmbedder, t as textEmbedding3Large, a as textEmbedding3Small, b as textEmbeddingAda002 } from './embedder-DXIcCYcu.js';
export { S as SUPPORTED_EMBEDDING_MODELS, T as TextEmbeddingConfigSchema, c as TextEmbeddingGeckoConfig, d as TextEmbeddingInputSchema, e as openaiEmbedder, t as textEmbedding3Large, a as textEmbedding3Small, b as textEmbeddingAda002 } from './embedder-DIOltsXz.js';
import 'genkit/plugin';
import 'openai';
import 'genkit/model';
import './dalle.js';
import 'genkit/model';
import './gpt.js';

@@ -8,0 +8,0 @@ import 'openai/resources/index.mjs';

@@ -305,2 +305,49 @@ import { CandidateData, GenerateResponseChunkData, ModelInfo, ModelAction } from 'genkit/model';

}>>;
declare const o3: ModelReference<z.ZodObject<z.objectUtil.extendShape<{
version: z.ZodOptional<z.ZodString>;
temperature: z.ZodOptional<z.ZodNumber>;
maxOutputTokens: z.ZodOptional<z.ZodNumber>;
topK: z.ZodOptional<z.ZodNumber>;
topP: z.ZodOptional<z.ZodNumber>;
stopSequences: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
}, {
frequencyPenalty: z.ZodOptional<z.ZodNumber>;
logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
logProbs: z.ZodOptional<z.ZodBoolean>;
presencePenalty: z.ZodOptional<z.ZodNumber>;
seed: z.ZodOptional<z.ZodNumber>;
topLogProbs: z.ZodOptional<z.ZodNumber>;
user: z.ZodOptional<z.ZodString>;
visualDetailLevel: z.ZodOptional<z.ZodEnum<["auto", "low", "high"]>>;
}>, "strip", z.ZodTypeAny, {
user?: string | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
frequencyPenalty?: number | undefined;
logitBias?: Record<string, number> | undefined;
logProbs?: boolean | undefined;
presencePenalty?: number | undefined;
seed?: number | undefined;
topLogProbs?: number | undefined;
visualDetailLevel?: "auto" | "low" | "high" | undefined;
}, {
user?: string | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
frequencyPenalty?: number | undefined;
logitBias?: Record<string, number> | undefined;
logProbs?: boolean | undefined;
presencePenalty?: number | undefined;
seed?: number | undefined;
topLogProbs?: number | undefined;
visualDetailLevel?: "auto" | "low" | "high" | undefined;
}>>;
declare const o3Mini: ModelReference<z.ZodObject<z.objectUtil.extendShape<{

@@ -353,2 +400,49 @@ version: z.ZodOptional<z.ZodString>;

}>>;
declare const o4Mini: ModelReference<z.ZodObject<z.objectUtil.extendShape<{
version: z.ZodOptional<z.ZodString>;
temperature: z.ZodOptional<z.ZodNumber>;
maxOutputTokens: z.ZodOptional<z.ZodNumber>;
topK: z.ZodOptional<z.ZodNumber>;
topP: z.ZodOptional<z.ZodNumber>;
stopSequences: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
}, {
frequencyPenalty: z.ZodOptional<z.ZodNumber>;
logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
logProbs: z.ZodOptional<z.ZodBoolean>;
presencePenalty: z.ZodOptional<z.ZodNumber>;
seed: z.ZodOptional<z.ZodNumber>;
topLogProbs: z.ZodOptional<z.ZodNumber>;
user: z.ZodOptional<z.ZodString>;
visualDetailLevel: z.ZodOptional<z.ZodEnum<["auto", "low", "high"]>>;
}>, "strip", z.ZodTypeAny, {
user?: string | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
frequencyPenalty?: number | undefined;
logitBias?: Record<string, number> | undefined;
logProbs?: boolean | undefined;
presencePenalty?: number | undefined;
seed?: number | undefined;
topLogProbs?: number | undefined;
visualDetailLevel?: "auto" | "low" | "high" | undefined;
}, {
user?: string | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
frequencyPenalty?: number | undefined;
logitBias?: Record<string, number> | undefined;
logProbs?: boolean | undefined;
presencePenalty?: number | undefined;
seed?: number | undefined;
topLogProbs?: number | undefined;
visualDetailLevel?: "auto" | "low" | "high" | undefined;
}>>;
declare const gpt4oMini: ModelReference<z.ZodObject<z.objectUtil.extendShape<{

@@ -542,2 +636,143 @@ version: z.ZodOptional<z.ZodString>;

}>>;
declare const gpt41: ModelReference<z.ZodObject<z.objectUtil.extendShape<{
version: z.ZodOptional<z.ZodString>;
temperature: z.ZodOptional<z.ZodNumber>;
maxOutputTokens: z.ZodOptional<z.ZodNumber>;
topK: z.ZodOptional<z.ZodNumber>;
topP: z.ZodOptional<z.ZodNumber>;
stopSequences: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
}, {
frequencyPenalty: z.ZodOptional<z.ZodNumber>;
logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
logProbs: z.ZodOptional<z.ZodBoolean>;
presencePenalty: z.ZodOptional<z.ZodNumber>;
seed: z.ZodOptional<z.ZodNumber>;
topLogProbs: z.ZodOptional<z.ZodNumber>;
user: z.ZodOptional<z.ZodString>;
visualDetailLevel: z.ZodOptional<z.ZodEnum<["auto", "low", "high"]>>;
}>, "strip", z.ZodTypeAny, {
user?: string | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
frequencyPenalty?: number | undefined;
logitBias?: Record<string, number> | undefined;
logProbs?: boolean | undefined;
presencePenalty?: number | undefined;
seed?: number | undefined;
topLogProbs?: number | undefined;
visualDetailLevel?: "auto" | "low" | "high" | undefined;
}, {
user?: string | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
frequencyPenalty?: number | undefined;
logitBias?: Record<string, number> | undefined;
logProbs?: boolean | undefined;
presencePenalty?: number | undefined;
seed?: number | undefined;
topLogProbs?: number | undefined;
visualDetailLevel?: "auto" | "low" | "high" | undefined;
}>>;
declare const gpt41Mini: ModelReference<z.ZodObject<z.objectUtil.extendShape<{
version: z.ZodOptional<z.ZodString>;
temperature: z.ZodOptional<z.ZodNumber>;
maxOutputTokens: z.ZodOptional<z.ZodNumber>;
topK: z.ZodOptional<z.ZodNumber>;
topP: z.ZodOptional<z.ZodNumber>;
stopSequences: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
}, {
frequencyPenalty: z.ZodOptional<z.ZodNumber>;
logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
logProbs: z.ZodOptional<z.ZodBoolean>;
presencePenalty: z.ZodOptional<z.ZodNumber>;
seed: z.ZodOptional<z.ZodNumber>;
topLogProbs: z.ZodOptional<z.ZodNumber>;
user: z.ZodOptional<z.ZodString>;
visualDetailLevel: z.ZodOptional<z.ZodEnum<["auto", "low", "high"]>>;
}>, "strip", z.ZodTypeAny, {
user?: string | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
frequencyPenalty?: number | undefined;
logitBias?: Record<string, number> | undefined;
logProbs?: boolean | undefined;
presencePenalty?: number | undefined;
seed?: number | undefined;
topLogProbs?: number | undefined;
visualDetailLevel?: "auto" | "low" | "high" | undefined;
}, {
user?: string | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
frequencyPenalty?: number | undefined;
logitBias?: Record<string, number> | undefined;
logProbs?: boolean | undefined;
presencePenalty?: number | undefined;
seed?: number | undefined;
topLogProbs?: number | undefined;
visualDetailLevel?: "auto" | "low" | "high" | undefined;
}>>;
declare const gpt41Nano: ModelReference<z.ZodObject<z.objectUtil.extendShape<{
version: z.ZodOptional<z.ZodString>;
temperature: z.ZodOptional<z.ZodNumber>;
maxOutputTokens: z.ZodOptional<z.ZodNumber>;
topK: z.ZodOptional<z.ZodNumber>;
topP: z.ZodOptional<z.ZodNumber>;
stopSequences: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
}, {
frequencyPenalty: z.ZodOptional<z.ZodNumber>;
logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
logProbs: z.ZodOptional<z.ZodBoolean>;
presencePenalty: z.ZodOptional<z.ZodNumber>;
seed: z.ZodOptional<z.ZodNumber>;
topLogProbs: z.ZodOptional<z.ZodNumber>;
user: z.ZodOptional<z.ZodString>;
visualDetailLevel: z.ZodOptional<z.ZodEnum<["auto", "low", "high"]>>;
}>, "strip", z.ZodTypeAny, {
user?: string | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
frequencyPenalty?: number | undefined;
logitBias?: Record<string, number> | undefined;
logProbs?: boolean | undefined;
presencePenalty?: number | undefined;
seed?: number | undefined;
topLogProbs?: number | undefined;
visualDetailLevel?: "auto" | "low" | "high" | undefined;
}, {
user?: string | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
frequencyPenalty?: number | undefined;
logitBias?: Record<string, number> | undefined;
logProbs?: boolean | undefined;
presencePenalty?: number | undefined;
seed?: number | undefined;
topLogProbs?: number | undefined;
visualDetailLevel?: "auto" | "low" | "high" | undefined;
}>>;
declare const gpt35Turbo: ModelReference<z.ZodObject<z.objectUtil.extendShape<{

@@ -651,2 +886,2 @@ version: z.ZodOptional<z.ZodString>;

export { OpenAiConfigSchema, SUPPORTED_GPT_MODELS, fromOpenAiChoice, fromOpenAiChunkChoice, fromOpenAiToolCall, gpt35Turbo, gpt4, gpt45, gpt4Turbo, gpt4Vision, gpt4o, gpt4oMini, gptModel, gptRunner, o1, o1Mini, o1Preview, o3Mini, toOpenAIRole, toOpenAiMessages, toOpenAiRequestBody, toOpenAiTextAndMedia };
export { OpenAiConfigSchema, SUPPORTED_GPT_MODELS, fromOpenAiChoice, fromOpenAiChunkChoice, fromOpenAiToolCall, gpt35Turbo, gpt4, gpt41, gpt41Mini, gpt41Nano, gpt45, gpt4Turbo, gpt4Vision, gpt4o, gpt4oMini, gptModel, gptRunner, o1, o1Mini, o1Preview, o3, o3Mini, o4Mini, toOpenAIRole, toOpenAiMessages, toOpenAiRequestBody, toOpenAiTextAndMedia };

@@ -67,2 +67,5 @@ "use strict";

gpt4: () => gpt4,
gpt41: () => gpt41,
gpt41Mini: () => gpt41Mini,
gpt41Nano: () => gpt41Nano,
gpt45: () => gpt45,

@@ -78,3 +81,5 @@ gpt4Turbo: () => gpt4Turbo,

o1Preview: () => o1Preview,
o3: () => o3,
o3Mini: () => o3Mini,
o4Mini: () => o4Mini,
toOpenAIRole: () => toOpenAIRole,

@@ -189,2 +194,17 @@ toOpenAiMessages: () => toOpenAiMessages,

});
const o3 = (0, import_model.modelRef)({
name: "openai/o3",
info: {
versions: ["o3"],
label: "OpenAI - o3",
supports: {
multiturn: true,
tools: true,
media: false,
systemRole: false,
output: ["text", "json"]
}
},
configSchema: OpenAiConfigSchema
});
const o3Mini = (0, import_model.modelRef)({

@@ -205,2 +225,17 @@ name: "openai/o3-mini",

});
const o4Mini = (0, import_model.modelRef)({
name: "openai/o4-mini",
info: {
versions: ["o4-mini"],
label: "OpenAI - o4 Mini",
supports: {
multiturn: true,
tools: true,
media: false,
systemRole: false,
output: ["text", "json"]
}
},
configSchema: OpenAiConfigSchema
});
const gpt4oMini = (0, import_model.modelRef)({

@@ -272,2 +307,47 @@ name: "openai/gpt-4o-mini",

});
const gpt41 = (0, import_model.modelRef)({
name: "openai/gpt-4.1",
info: {
versions: ["gpt-4.1"],
label: "OpenAI - GPT-4.1",
supports: {
multiturn: true,
tools: true,
media: false,
systemRole: true,
output: ["text", "json"]
}
},
configSchema: OpenAiConfigSchema
});
const gpt41Mini = (0, import_model.modelRef)({
name: "openai/gpt-4.1-mini",
info: {
versions: ["gpt-4.1-mini"],
label: "OpenAI - GPT-4.1 Mini",
supports: {
multiturn: true,
tools: true,
media: false,
systemRole: true,
output: ["text", "json"]
}
},
configSchema: OpenAiConfigSchema
});
const gpt41Nano = (0, import_model.modelRef)({
name: "openai/gpt-4.1-nano",
info: {
versions: ["gpt-4.1-nano"],
label: "OpenAI - GPT-4.1 Nano",
supports: {
multiturn: true,
tools: true,
media: false,
systemRole: true,
output: ["text", "json"]
}
},
configSchema: OpenAiConfigSchema
});
const gpt35Turbo = (0, import_model.modelRef)({

@@ -295,2 +375,5 @@ name: "openai/gpt-3.5-turbo",

"gpt-4": gpt4,
"gpt-4.1": gpt41,
"gpt-4.1-mini": gpt41Mini,
"gpt-4.1-nano": gpt41Nano,
"gpt-3.5-turbo": gpt35Turbo,

@@ -300,3 +383,5 @@ "o1-preview": o1Preview,

"o1-mini": o1Mini,
"o3-mini": o3Mini
o3,
"o3-mini": o3Mini,
"o4-mini": o4Mini
};

@@ -624,2 +709,5 @@ function toOpenAIRole(role) {

gpt4,
gpt41,
gpt41Mini,
gpt41Nano,
gpt45,

@@ -635,3 +723,5 @@ gpt4Turbo,

o1Preview,
o3,
o3Mini,
o4Mini,
toOpenAIRole,

@@ -638,0 +728,0 @@ toOpenAiMessages,

import 'genkit/plugin';
import 'openai';
import 'genkit/model';
export { dallE3 } from './dalle.js';
export { M as ModelDefinition, P as PluginOptions, o as default, o as openAI, t as textEmbedding3Large, a as textEmbedding3Small, b as textEmbeddingAda002 } from './embedder-DXIcCYcu.js';
export { gpt35Turbo, gpt4, gpt45, gpt4Turbo, gpt4Vision, gpt4o, gpt4oMini, o1, o1Mini, o1Preview, o3Mini } from './gpt.js';
export { tts1, tts1Hd } from './tts.js';
export { whisper1 } from './whisper.js';
import 'genkit/model';
export { M as ModelDefinition, P as PluginOptions, o as default, o as openAI, t as textEmbedding3Large, a as textEmbedding3Small, b as textEmbeddingAda002 } from './embedder-DIOltsXz.js';
export { gpt35Turbo, gpt4, gpt41, gpt41Mini, gpt41Nano, gpt45, gpt4Turbo, gpt4Vision, gpt4o, gpt4oMini, o1, o1Mini, o1Preview, o3, o3Mini, o4Mini } from './gpt.js';
export { gpt4oMiniTts, tts1, tts1Hd } from './tts.js';
export { gpt4oTranscribe, whisper1 } from './whisper.js';
import 'genkit';
import 'openai/resources/index.mjs';

@@ -45,2 +45,5 @@ "use strict";

gpt4: () => import_gpt.gpt4,
gpt41: () => import_gpt.gpt41,
gpt41Mini: () => import_gpt.gpt41Mini,
gpt41Nano: () => import_gpt.gpt41Nano,
gpt45: () => import_gpt.gpt45,

@@ -51,6 +54,10 @@ gpt4Turbo: () => import_gpt.gpt4Turbo,

gpt4oMini: () => import_gpt.gpt4oMini,
gpt4oMiniTts: () => import_tts.gpt4oMiniTts,
gpt4oTranscribe: () => import_whisper.gpt4oTranscribe,
o1: () => import_gpt.o1,
o1Mini: () => import_gpt.o1Mini,
o1Preview: () => import_gpt.o1Preview,
o3: () => import_gpt.o3,
o3Mini: () => import_gpt.o3Mini,
o4Mini: () => import_gpt.o4Mini,
openAI: () => openAI,

@@ -85,3 +92,5 @@ textEmbedding3Large: () => import_embedder.textEmbedding3Large,

(0, import_dalle.dallE3Model)(ai, client);
(0, import_whisper.whisper1Model)(ai, client);
for (const name of Object.keys(import_whisper.SUPPORTED_STT_MODELS)) {
(0, import_whisper.sttModel)(ai, name, client);
}
for (const name of Object.keys(import_tts.SUPPORTED_TTS_MODELS)) {

@@ -100,2 +109,5 @@ (0, import_tts.ttsModel)(ai, name, client);

gpt4,
gpt41,
gpt41Mini,
gpt41Nano,
gpt45,

@@ -106,6 +118,10 @@ gpt4Turbo,

gpt4oMini,
gpt4oMiniTts,
gpt4oTranscribe,
o1,
o1Mini,
o1Preview,
o3,
o3Mini,
o4Mini,
openAI,

@@ -112,0 +128,0 @@ textEmbedding3Large,

@@ -102,2 +102,34 @@ import * as genkit from 'genkit';

}>>;
declare const gpt4oMiniTts: genkit.ModelReference<z.ZodObject<z.objectUtil.extendShape<{
version: z.ZodOptional<z.ZodString>;
temperature: z.ZodOptional<z.ZodNumber>;
maxOutputTokens: z.ZodOptional<z.ZodNumber>;
topK: z.ZodOptional<z.ZodNumber>;
topP: z.ZodOptional<z.ZodNumber>;
stopSequences: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
}, {
voice: z.ZodDefault<z.ZodOptional<z.ZodEnum<["alloy", "echo", "fable", "onyx", "nova", "shimmer"]>>>;
speed: z.ZodOptional<z.ZodNumber>;
response_format: z.ZodOptional<z.ZodEnum<["mp3", "opus", "aac", "flac", "wav", "pcm"]>>;
}>, "strip", z.ZodTypeAny, {
voice: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer";
response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
speed?: number | undefined;
}, {
response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
voice?: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer" | undefined;
speed?: number | undefined;
}>>;
declare const SUPPORTED_TTS_MODELS: {

@@ -168,2 +200,34 @@ 'tts-1': genkit.ModelReference<z.ZodObject<z.objectUtil.extendShape<{

}>>;
'gpt-4o-mini-tts': genkit.ModelReference<z.ZodObject<z.objectUtil.extendShape<{
version: z.ZodOptional<z.ZodString>;
temperature: z.ZodOptional<z.ZodNumber>;
maxOutputTokens: z.ZodOptional<z.ZodNumber>;
topK: z.ZodOptional<z.ZodNumber>;
topP: z.ZodOptional<z.ZodNumber>;
stopSequences: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
}, {
voice: z.ZodDefault<z.ZodOptional<z.ZodEnum<["alloy", "echo", "fable", "onyx", "nova", "shimmer"]>>>;
speed: z.ZodOptional<z.ZodNumber>;
response_format: z.ZodOptional<z.ZodEnum<["mp3", "opus", "aac", "flac", "wav", "pcm"]>>;
}>, "strip", z.ZodTypeAny, {
voice: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer";
response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
speed?: number | undefined;
}, {
response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
voice?: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer" | undefined;
speed?: number | undefined;
}>>;
};

@@ -180,2 +244,2 @@ declare const RESPONSE_FORMAT_MEDIA_TYPES: {

export { RESPONSE_FORMAT_MEDIA_TYPES, SUPPORTED_TTS_MODELS, TTSConfigSchema, tts1, tts1Hd, ttsModel };
export { RESPONSE_FORMAT_MEDIA_TYPES, SUPPORTED_TTS_MODELS, TTSConfigSchema, gpt4oMiniTts, tts1, tts1Hd, ttsModel };

@@ -61,2 +61,3 @@ "use strict";

TTSConfigSchema: () => TTSConfigSchema,
gpt4oMiniTts: () => gpt4oMiniTts,
tts1: () => tts1,

@@ -102,5 +103,20 @@ tts1Hd: () => tts1Hd,

});
const gpt4oMiniTts = (0, import_model.modelRef)({
name: "openai/gpt-4o-mini-tts",
info: {
label: "OpenAI - GPT-4o Mini Text-to-speech",
supports: {
media: false,
output: ["media"],
multiturn: false,
systemRole: false,
tools: false
}
},
configSchema: TTSConfigSchema
});
const SUPPORTED_TTS_MODELS = {
"tts-1": tts1,
"tts-1-hd": tts1Hd
"tts-1-hd": tts1Hd,
"gpt-4o-mini-tts": gpt4oMiniTts
};

@@ -178,2 +194,3 @@ const RESPONSE_FORMAT_MEDIA_TYPES = {

TTSConfigSchema,
gpt4oMiniTts,
tts1,

@@ -180,0 +197,0 @@ tts1Hd,

@@ -70,4 +70,102 @@ import * as genkit from 'genkit';

}>>;
declare function whisper1Model(ai: Genkit, client: OpenAI): ModelAction<typeof Whisper1ConfigSchema>;
declare const gpt4oTranscribe: genkit.ModelReference<z.ZodObject<z.objectUtil.extendShape<{
version: z.ZodOptional<z.ZodString>;
temperature: z.ZodOptional<z.ZodNumber>;
maxOutputTokens: z.ZodOptional<z.ZodNumber>;
topK: z.ZodOptional<z.ZodNumber>;
topP: z.ZodOptional<z.ZodNumber>;
stopSequences: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
}, {
language: z.ZodOptional<z.ZodString>;
timestamp_granularities: z.ZodOptional<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>;
response_format: z.ZodOptional<z.ZodEnum<["json", "text", "srt", "verbose_json", "vtt"]>>;
}>, "strip", z.ZodTypeAny, {
response_format?: "text" | "json" | "srt" | "verbose_json" | "vtt" | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
language?: string | undefined;
timestamp_granularities?: ("word" | "segment")[] | undefined;
}, {
response_format?: "text" | "json" | "srt" | "verbose_json" | "vtt" | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
language?: string | undefined;
timestamp_granularities?: ("word" | "segment")[] | undefined;
}>>;
declare const SUPPORTED_STT_MODELS: {
'gpt-4o-transcribe': genkit.ModelReference<z.ZodObject<z.objectUtil.extendShape<{
version: z.ZodOptional<z.ZodString>;
temperature: z.ZodOptional<z.ZodNumber>;
maxOutputTokens: z.ZodOptional<z.ZodNumber>;
topK: z.ZodOptional<z.ZodNumber>;
topP: z.ZodOptional<z.ZodNumber>;
stopSequences: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
}, {
language: z.ZodOptional<z.ZodString>;
timestamp_granularities: z.ZodOptional<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>;
response_format: z.ZodOptional<z.ZodEnum<["json", "text", "srt", "verbose_json", "vtt"]>>;
}>, "strip", z.ZodTypeAny, {
response_format?: "text" | "json" | "srt" | "verbose_json" | "vtt" | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
language?: string | undefined;
timestamp_granularities?: ("word" | "segment")[] | undefined;
}, {
response_format?: "text" | "json" | "srt" | "verbose_json" | "vtt" | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
language?: string | undefined;
timestamp_granularities?: ("word" | "segment")[] | undefined;
}>>;
'whisper-1': genkit.ModelReference<z.ZodObject<z.objectUtil.extendShape<{
version: z.ZodOptional<z.ZodString>;
temperature: z.ZodOptional<z.ZodNumber>;
maxOutputTokens: z.ZodOptional<z.ZodNumber>;
topK: z.ZodOptional<z.ZodNumber>;
topP: z.ZodOptional<z.ZodNumber>;
stopSequences: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
}, {
language: z.ZodOptional<z.ZodString>;
timestamp_granularities: z.ZodOptional<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>;
response_format: z.ZodOptional<z.ZodEnum<["json", "text", "srt", "verbose_json", "vtt"]>>;
}>, "strip", z.ZodTypeAny, {
response_format?: "text" | "json" | "srt" | "verbose_json" | "vtt" | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
language?: string | undefined;
timestamp_granularities?: ("word" | "segment")[] | undefined;
}, {
response_format?: "text" | "json" | "srt" | "verbose_json" | "vtt" | undefined;
version?: string | undefined;
temperature?: number | undefined;
maxOutputTokens?: number | undefined;
topK?: number | undefined;
topP?: number | undefined;
stopSequences?: string[] | undefined;
language?: string | undefined;
timestamp_granularities?: ("word" | "segment")[] | undefined;
}>>;
};
declare function sttModel(ai: Genkit, name: string, client: OpenAI): ModelAction<typeof Whisper1ConfigSchema>;
export { Whisper1ConfigSchema, whisper1, whisper1Model };
export { SUPPORTED_STT_MODELS, Whisper1ConfigSchema, gpt4oTranscribe, sttModel, whisper1 };

@@ -58,5 +58,7 @@ "use strict";

__export(whisper_exports, {
SUPPORTED_STT_MODELS: () => SUPPORTED_STT_MODELS,
Whisper1ConfigSchema: () => Whisper1ConfigSchema,
whisper1: () => whisper1,
whisper1Model: () => whisper1Model
gpt4oTranscribe: () => gpt4oTranscribe,
sttModel: () => sttModel,
whisper1: () => whisper1
});

@@ -85,2 +87,16 @@ module.exports = __toCommonJS(whisper_exports);

});
const gpt4oTranscribe = (0, import_model.modelRef)({
name: "openai/gpt-4o-transcribe",
info: {
label: "OpenAI - GPT-4o Transcribe",
supports: {
media: true,
output: ["text", "json"],
multiturn: false,
systemRole: false,
tools: false
}
},
configSchema: Whisper1ConfigSchema
});
function toWhisper1Request(request) {

@@ -146,13 +162,21 @@ var _a, _b, _c, _d, _e, _f;

}
function whisper1Model(ai, client) {
const SUPPORTED_STT_MODELS = {
"gpt-4o-transcribe": gpt4oTranscribe,
"whisper-1": whisper1
};
function sttModel(ai, name, client) {
const modelId = `openai/${name}`;
const model = SUPPORTED_STT_MODELS[name];
if (!model) throw new Error(`Unsupported model: ${name}`);
return ai.defineModel(
__spreadProps(__spreadValues({
name: whisper1.name
}, whisper1.info), {
configSchema: whisper1.configSchema
name: modelId
}, model.info), {
configSchema: model.configSchema
}),
(request) => __async(this, null, function* () {
const result = yield client.audio.transcriptions.create(
toWhisper1Request(request)
);
const params = toWhisper1Request(request);
const result = yield client.audio.transcriptions.create(__spreadProps(__spreadValues({}, params), {
stream: false
}));
return toGenerateResponse(result);

@@ -164,6 +188,8 @@ })

0 && (module.exports = {
SUPPORTED_STT_MODELS,
Whisper1ConfigSchema,
whisper1,
whisper1Model
gpt4oTranscribe,
sttModel,
whisper1
});
//# sourceMappingURL=whisper.js.map

@@ -17,3 +17,3 @@ {

],
"version": "0.20.2",
"version": "0.20.3",
"type": "commonjs",

@@ -28,3 +28,3 @@ "repository": {

"dependencies": {
"openai": "^4.47.1"
"openai": "^4.95.0"
},

@@ -67,3 +67,3 @@ "peerDependencies": {

},
"gitHead": "07d893c51ed1b64d2a33c7be7c6ad494bbe9a01d"
"gitHead": "6dcb587c5aa87b090c3313590f690521c9375f19"
}

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet