@ai-sdk/google
Advanced tools
Comparing version 1.1.5 to 1.1.6
# @ai-sdk/google | ||
## 1.1.6 | ||
### Patch Changes | ||
- e012cd8: feat (provider/google): add reasoning support | ||
## 1.1.5 | ||
@@ -4,0 +10,0 @@ |
@@ -5,3 +5,3 @@ import { FetchFunction } from '@ai-sdk/provider-utils'; | ||
type GoogleGenerativeAIModelId = 'gemini-2.0-flash-exp' | 'gemini-1.5-flash' | 'gemini-1.5-flash-latest' | 'gemini-1.5-flash-001' | 'gemini-1.5-flash-002' | 'gemini-1.5-flash-exp-0827' | 'gemini-1.5-flash-8b' | 'gemini-1.5-flash-8b-latest' | 'gemini-1.5-flash-8b-exp-0924' | 'gemini-1.5-flash-8b-exp-0827' | 'gemini-1.5-pro-latest' | 'gemini-1.5-pro' | 'gemini-1.5-pro-001' | 'gemini-1.5-pro-002' | 'gemini-1.5-pro-exp-0827' | 'gemini-1.0-pro' | (string & {}); | ||
type GoogleGenerativeAIModelId = 'gemini-2.0-flash-thinking-exp' | 'gemini-2.0-flash-exp' | 'gemini-1.5-flash' | 'gemini-1.5-flash-latest' | 'gemini-1.5-flash-001' | 'gemini-1.5-flash-002' | 'gemini-1.5-flash-exp-0827' | 'gemini-1.5-flash-8b' | 'gemini-1.5-flash-8b-latest' | 'gemini-1.5-flash-8b-exp-0924' | 'gemini-1.5-flash-8b-exp-0827' | 'gemini-1.5-pro-latest' | 'gemini-1.5-pro' | 'gemini-1.5-pro-001' | 'gemini-1.5-pro-002' | 'gemini-1.5-pro-exp-0827' | 'gemini-1.0-pro' | (string & {}); | ||
interface GoogleGenerativeAISettings { | ||
@@ -8,0 +8,0 @@ /** |
@@ -409,2 +409,6 @@ "use strict"; | ||
audioTimestamp: this.settings.audioTimestamp | ||
}, | ||
// reasoning models: | ||
...isReasoningModel(this.modelId) && { | ||
thinking_config: { include_thoughts: true } | ||
} | ||
@@ -491,6 +495,10 @@ }; | ||
); | ||
let url = `${this.config.baseURL}/${getModelPath( | ||
this.modelId | ||
)}:generateContent`; | ||
if (isReasoningModel(this.modelId)) { | ||
url = url.replace("v1beta", "v1alpha"); | ||
} | ||
const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({ | ||
url: `${this.config.baseURL}/${getModelPath( | ||
this.modelId | ||
)}:generateContent`, | ||
url, | ||
headers: mergedHeaders, | ||
@@ -511,3 +519,10 @@ body: args, | ||
return { | ||
text: getTextFromParts((_d = (_c = candidate.content) == null ? void 0 : _c.parts) != null ? _d : []), | ||
text: getTextFromParts({ | ||
parts: (_c = candidate.content) == null ? void 0 : _c.parts, | ||
isThought: false | ||
}), | ||
reasoning: getTextFromParts({ | ||
parts: (_d = candidate.content) == null ? void 0 : _d.parts, | ||
isThought: true | ||
}), | ||
toolCalls, | ||
@@ -541,6 +556,10 @@ finishReason: mapGoogleGenerativeAIFinishReason({ | ||
); | ||
let url = `${this.config.baseURL}/${getModelPath( | ||
this.modelId | ||
)}:streamGenerateContent?alt=sse`; | ||
if (isReasoningModel(this.modelId)) { | ||
url = url.replace("v1beta", "v1alpha"); | ||
} | ||
const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({ | ||
url: `${this.config.baseURL}/${getModelPath( | ||
this.modelId | ||
)}:streamGenerateContent?alt=sse`, | ||
url, | ||
headers, | ||
@@ -599,3 +618,6 @@ body: args, | ||
} | ||
const deltaText = getTextFromParts(content.parts); | ||
const deltaText = getTextFromParts({ | ||
parts: content.parts, | ||
isThought: false | ||
}); | ||
if (deltaText != null) { | ||
@@ -607,2 +629,12 @@ controller.enqueue({ | ||
} | ||
const reasoningText = getTextFromParts({ | ||
parts: content.parts, | ||
isThought: true | ||
}); | ||
if (reasoningText != null) { | ||
controller.enqueue({ | ||
type: "reasoning", | ||
textDelta: reasoningText | ||
}); | ||
} | ||
const toolCallDeltas = getToolCallsFromParts({ | ||
@@ -663,4 +695,12 @@ parts: content.parts, | ||
} | ||
function getTextFromParts(parts) { | ||
const textParts = parts.filter((part) => "text" in part); | ||
function getTextFromParts({ | ||
parts, | ||
isThought | ||
}) { | ||
const textParts = (parts != null ? parts : []).filter( | ||
(part) => { | ||
var _a; | ||
return "text" in part && ((_a = part.thought) != null ? _a : false) === isThought; | ||
} | ||
); | ||
return textParts.length === 0 ? void 0 : textParts.map((part) => part.text).join(""); | ||
@@ -673,3 +713,4 @@ } | ||
import_zod2.z.object({ | ||
text: import_zod2.z.string() | ||
text: import_zod2.z.string(), | ||
thought: import_zod2.z.boolean().nullish() | ||
}), | ||
@@ -762,2 +803,5 @@ import_zod2.z.object({ | ||
}); | ||
function isReasoningModel(modelId) { | ||
return modelId === "gemini-2.0-flash-thinking-exp"; | ||
} | ||
@@ -764,0 +808,0 @@ // src/google-generative-ai-embedding-model.ts |
@@ -5,3 +5,3 @@ import { LanguageModelV1 } from '@ai-sdk/provider'; | ||
type GoogleGenerativeAIModelId = 'gemini-2.0-flash-exp' | 'gemini-1.5-flash' | 'gemini-1.5-flash-latest' | 'gemini-1.5-flash-001' | 'gemini-1.5-flash-002' | 'gemini-1.5-flash-exp-0827' | 'gemini-1.5-flash-8b' | 'gemini-1.5-flash-8b-latest' | 'gemini-1.5-flash-8b-exp-0924' | 'gemini-1.5-flash-8b-exp-0827' | 'gemini-1.5-pro-latest' | 'gemini-1.5-pro' | 'gemini-1.5-pro-001' | 'gemini-1.5-pro-002' | 'gemini-1.5-pro-exp-0827' | 'gemini-1.0-pro' | (string & {}); | ||
type GoogleGenerativeAIModelId = 'gemini-2.0-flash-thinking-exp' | 'gemini-2.0-flash-exp' | 'gemini-1.5-flash' | 'gemini-1.5-flash-latest' | 'gemini-1.5-flash-001' | 'gemini-1.5-flash-002' | 'gemini-1.5-flash-exp-0827' | 'gemini-1.5-flash-8b' | 'gemini-1.5-flash-8b-latest' | 'gemini-1.5-flash-8b-exp-0924' | 'gemini-1.5-flash-8b-exp-0827' | 'gemini-1.5-pro-latest' | 'gemini-1.5-pro' | 'gemini-1.5-pro-001' | 'gemini-1.5-pro-002' | 'gemini-1.5-pro-exp-0827' | 'gemini-1.0-pro' | (string & {}); | ||
interface GoogleGenerativeAISettings { | ||
@@ -8,0 +8,0 @@ /** |
@@ -407,2 +407,6 @@ "use strict"; | ||
audioTimestamp: this.settings.audioTimestamp | ||
}, | ||
// reasoning models: | ||
...isReasoningModel(this.modelId) && { | ||
thinking_config: { include_thoughts: true } | ||
} | ||
@@ -489,6 +493,10 @@ }; | ||
); | ||
let url = `${this.config.baseURL}/${getModelPath( | ||
this.modelId | ||
)}:generateContent`; | ||
if (isReasoningModel(this.modelId)) { | ||
url = url.replace("v1beta", "v1alpha"); | ||
} | ||
const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({ | ||
url: `${this.config.baseURL}/${getModelPath( | ||
this.modelId | ||
)}:generateContent`, | ||
url, | ||
headers: mergedHeaders, | ||
@@ -509,3 +517,10 @@ body: args, | ||
return { | ||
text: getTextFromParts((_d = (_c = candidate.content) == null ? void 0 : _c.parts) != null ? _d : []), | ||
text: getTextFromParts({ | ||
parts: (_c = candidate.content) == null ? void 0 : _c.parts, | ||
isThought: false | ||
}), | ||
reasoning: getTextFromParts({ | ||
parts: (_d = candidate.content) == null ? void 0 : _d.parts, | ||
isThought: true | ||
}), | ||
toolCalls, | ||
@@ -539,6 +554,10 @@ finishReason: mapGoogleGenerativeAIFinishReason({ | ||
); | ||
let url = `${this.config.baseURL}/${getModelPath( | ||
this.modelId | ||
)}:streamGenerateContent?alt=sse`; | ||
if (isReasoningModel(this.modelId)) { | ||
url = url.replace("v1beta", "v1alpha"); | ||
} | ||
const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({ | ||
url: `${this.config.baseURL}/${getModelPath( | ||
this.modelId | ||
)}:streamGenerateContent?alt=sse`, | ||
url, | ||
headers, | ||
@@ -597,3 +616,6 @@ body: args, | ||
} | ||
const deltaText = getTextFromParts(content.parts); | ||
const deltaText = getTextFromParts({ | ||
parts: content.parts, | ||
isThought: false | ||
}); | ||
if (deltaText != null) { | ||
@@ -605,2 +627,12 @@ controller.enqueue({ | ||
} | ||
const reasoningText = getTextFromParts({ | ||
parts: content.parts, | ||
isThought: true | ||
}); | ||
if (reasoningText != null) { | ||
controller.enqueue({ | ||
type: "reasoning", | ||
textDelta: reasoningText | ||
}); | ||
} | ||
const toolCallDeltas = getToolCallsFromParts({ | ||
@@ -661,4 +693,12 @@ parts: content.parts, | ||
} | ||
function getTextFromParts(parts) { | ||
const textParts = parts.filter((part) => "text" in part); | ||
function getTextFromParts({ | ||
parts, | ||
isThought | ||
}) { | ||
const textParts = (parts != null ? parts : []).filter( | ||
(part) => { | ||
var _a; | ||
return "text" in part && ((_a = part.thought) != null ? _a : false) === isThought; | ||
} | ||
); | ||
return textParts.length === 0 ? void 0 : textParts.map((part) => part.text).join(""); | ||
@@ -671,3 +711,4 @@ } | ||
import_zod2.z.object({ | ||
text: import_zod2.z.string() | ||
text: import_zod2.z.string(), | ||
thought: import_zod2.z.boolean().nullish() | ||
}), | ||
@@ -760,2 +801,5 @@ import_zod2.z.object({ | ||
}); | ||
function isReasoningModel(modelId) { | ||
return modelId === "gemini-2.0-flash-thinking-exp"; | ||
} | ||
// Annotate the CommonJS export names for ESM import in node: | ||
@@ -762,0 +806,0 @@ 0 && (module.exports = { |
{ | ||
"name": "@ai-sdk/google", | ||
"version": "1.1.5", | ||
"version": "1.1.6", | ||
"license": "Apache-2.0", | ||
@@ -5,0 +5,0 @@ "sideEffects": false, |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
367646
3931