You're Invited:Meet the Socket Team at RSAC and BSidesSF 2026, March 23–26.RSVP
Socket
Book a DemoSign in
Socket

@ai-sdk/google

Package Overview
Dependencies
Maintainers
3
Versions
418
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@ai-sdk/google - npm Package Compare versions

Comparing version
4.0.0-beta.14
to
4.0.0-beta.15
+11
-0
CHANGELOG.md
# @ai-sdk/google
## 4.0.0-beta.15
### Patch Changes
- 18c1970: feat(provider/google): Add multimodal tool-result support for Google function responses.
Tool results with `output.type = 'content'` now map media parts into
`functionResponse.parts` for Google models, including `image-data`,
`file-data`, and base64 `data:` URLs in URL-style content parts.
Remote HTTP(S) URLs in URL-style tool-result parts are not supported.
## 4.0.0-beta.14

@@ -4,0 +15,0 @@

+119
-35

@@ -178,4 +178,108 @@ // src/google-generative-ai-language-model.ts

import { convertToBase64 } from "@ai-sdk/provider-utils";
var dataUrlRegex = /^data:([^;,]+);base64,(.+)$/s;
function parseBase64DataUrl(value) {
const match = dataUrlRegex.exec(value);
if (match == null) {
return void 0;
}
return {
mediaType: match[1],
data: match[2]
};
}
function convertUrlToolResultPart(url) {
const parsedDataUrl = parseBase64DataUrl(url);
if (parsedDataUrl == null) {
return void 0;
}
return {
inlineData: {
mimeType: parsedDataUrl.mediaType,
data: parsedDataUrl.data
}
};
}
function appendToolResultParts(parts, toolName, outputValue) {
const functionResponseParts = [];
const responseTextParts = [];
for (const contentPart of outputValue) {
switch (contentPart.type) {
case "text": {
responseTextParts.push(contentPart.text);
break;
}
case "image-data":
case "file-data": {
functionResponseParts.push({
inlineData: {
mimeType: contentPart.mediaType,
data: contentPart.data
}
});
break;
}
case "image-url":
case "file-url": {
const functionResponsePart = convertUrlToolResultPart(
contentPart.url
);
if (functionResponsePart != null) {
functionResponseParts.push(functionResponsePart);
} else {
responseTextParts.push(JSON.stringify(contentPart));
}
break;
}
default: {
responseTextParts.push(JSON.stringify(contentPart));
break;
}
}
}
parts.push({
functionResponse: {
name: toolName,
response: {
name: toolName,
content: responseTextParts.length > 0 ? responseTextParts.join("\n") : "Tool executed successfully."
},
...functionResponseParts.length > 0 ? { parts: functionResponseParts } : {}
}
});
}
function appendLegacyToolResultParts(parts, toolName, outputValue) {
for (const contentPart of outputValue) {
switch (contentPart.type) {
case "text":
parts.push({
functionResponse: {
name: toolName,
response: {
name: toolName,
content: contentPart.text
}
}
});
break;
case "image-data":
parts.push(
{
inlineData: {
mimeType: String(contentPart.mediaType),
data: String(contentPart.data)
}
},
{
text: "Tool executed successfully and returned this image as a response"
}
);
break;
default:
parts.push({ text: JSON.stringify(contentPart) });
break;
}
}
}
function convertToGoogleGenerativeAIMessages(prompt, options) {
var _a, _b, _c;
var _a, _b, _c, _d;
const systemInstructionParts = [];

@@ -186,2 +290,3 @@ const contents = [];

const providerOptionsName = (_b = options == null ? void 0 : options.providerOptionsName) != null ? _b : "google";
const supportsFunctionResponseParts = (_c = options == null ? void 0 : options.supportsFunctionResponseParts) != null ? _c : true;
for (const { role, content } of prompt) {

@@ -234,4 +339,4 @@ switch (role) {

parts: content.map((part) => {
var _a2, _b2, _c2, _d;
const providerOpts = (_d = (_a2 = part.providerOptions) == null ? void 0 : _a2[providerOptionsName]) != null ? _d : providerOptionsName !== "google" ? (_b2 = part.providerOptions) == null ? void 0 : _b2.google : (_c2 = part.providerOptions) == null ? void 0 : _c2.vertex;
var _a2, _b2, _c2, _d2;
const providerOpts = (_d2 = (_a2 = part.providerOptions) == null ? void 0 : _a2[providerOptionsName]) != null ? _d2 : providerOptionsName !== "google" ? (_b2 = part.providerOptions) == null ? void 0 : _b2.google : (_c2 = part.providerOptions) == null ? void 0 : _c2.vertex;
const thoughtSignature = (providerOpts == null ? void 0 : providerOpts.thoughtSignature) != null ? String(providerOpts.thoughtSignature) : void 0;

@@ -305,32 +410,6 @@ switch (part.type) {

if (output.type === "content") {
for (const contentPart of output.value) {
switch (contentPart.type) {
case "text":
parts.push({
functionResponse: {
name: part.toolName,
response: {
name: part.toolName,
content: contentPart.text
}
}
});
break;
case "image-data":
parts.push(
{
inlineData: {
mimeType: contentPart.mediaType,
data: contentPart.data
}
},
{
text: "Tool executed successfully and returned this image as a response"
}
);
break;
default:
parts.push({ text: JSON.stringify(contentPart) });
break;
}
if (supportsFunctionResponseParts) {
appendToolResultParts(parts, part.toolName, output.value);
} else {
appendLegacyToolResultParts(parts, part.toolName, output.value);
}

@@ -343,3 +422,3 @@ } else {

name: part.toolName,
content: output.type === "execution-denied" ? (_c = output.reason) != null ? _c : "Tool execution denied." : output.value
content: output.type === "execution-denied" ? (_d = output.reason) != null ? _d : "Tool execution denied." : output.value
}

@@ -812,5 +891,10 @@ }

const isGemmaModel = this.modelId.toLowerCase().startsWith("gemma-");
const supportsFunctionResponseParts = this.modelId.startsWith("gemini-3");
const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(
prompt,
{ isGemmaModel, providerOptionsName }
{
isGemmaModel,
providerOptionsName,
supportsFunctionResponseParts
}
);

@@ -817,0 +901,0 @@ const {

{
"name": "@ai-sdk/google",
"version": "4.0.0-beta.14",
"version": "4.0.0-beta.15",
"license": "Apache-2.0",

@@ -5,0 +5,0 @@ "sideEffects": false,

@@ -5,12 +5,167 @@ import {

} from '@ai-sdk/provider';
import { convertToBase64 } from '@ai-sdk/provider-utils';
import {
GoogleGenerativeAIContent,
GoogleGenerativeAIContentPart,
GoogleGenerativeAIFunctionResponsePart,
GoogleGenerativeAIPrompt,
} from './google-generative-ai-prompt';
import { convertToBase64 } from '@ai-sdk/provider-utils';
const dataUrlRegex = /^data:([^;,]+);base64,(.+)$/s;
function parseBase64DataUrl(
value: string,
): { mediaType: string; data: string } | undefined {
const match = dataUrlRegex.exec(value);
if (match == null) {
return undefined;
}
return {
mediaType: match[1],
data: match[2],
};
}
function convertUrlToolResultPart(
url: string,
): GoogleGenerativeAIFunctionResponsePart | undefined {
// Per https://ai.google.dev/api/caching#FunctionResponsePart, only inline data is supported.
// https://docs.cloud.google.com/vertex-ai/generative-ai/docs/model-reference/function-calling#functionresponsepart suggests that this
// may be different for Vertex, but this needs to be confirmed and further tested for both APIs.
const parsedDataUrl = parseBase64DataUrl(url);
if (parsedDataUrl == null) {
return undefined;
}
return {
inlineData: {
mimeType: parsedDataUrl.mediaType,
data: parsedDataUrl.data,
},
};
}
/*
* Appends tool result content parts to the message using the functionResponse
* format with support for multimodal parts (e.g. inline images/files alongside
* text). This format is supported by Gemini 3+ models.
*/
function appendToolResultParts(
parts: GoogleGenerativeAIContentPart[],
toolName: string,
outputValue: Array<{
type: string;
[key: string]: unknown;
}>,
): void {
const functionResponseParts: GoogleGenerativeAIFunctionResponsePart[] = [];
const responseTextParts: string[] = [];
for (const contentPart of outputValue) {
switch (contentPart.type) {
case 'text': {
responseTextParts.push(contentPart.text as string);
break;
}
case 'image-data':
case 'file-data': {
functionResponseParts.push({
inlineData: {
mimeType: contentPart.mediaType as string,
data: contentPart.data as string,
},
});
break;
}
case 'image-url':
case 'file-url': {
const functionResponsePart = convertUrlToolResultPart(
contentPart.url as string,
);
if (functionResponsePart != null) {
functionResponseParts.push(functionResponsePart);
} else {
responseTextParts.push(JSON.stringify(contentPart));
}
break;
}
default: {
responseTextParts.push(JSON.stringify(contentPart));
break;
}
}
}
parts.push({
functionResponse: {
name: toolName,
response: {
name: toolName,
content:
responseTextParts.length > 0
? responseTextParts.join('\n')
: 'Tool executed successfully.',
},
...(functionResponseParts.length > 0
? { parts: functionResponseParts }
: {}),
},
});
}
/*
* Appends tool result content parts using a legacy format for pre-Gemini 3
* models that do not support multimodal parts within functionResponse. Instead,
* non-text content like images is sent as separate top-level inlineData parts.
*/
function appendLegacyToolResultParts(
parts: GoogleGenerativeAIContentPart[],
toolName: string,
outputValue: Array<{
type: string;
[key: string]: unknown;
}>,
): void {
for (const contentPart of outputValue) {
switch (contentPart.type) {
case 'text':
parts.push({
functionResponse: {
name: toolName,
response: {
name: toolName,
content: contentPart.text,
},
},
});
break;
case 'image-data':
parts.push(
{
inlineData: {
mimeType: String(contentPart.mediaType),
data: String(contentPart.data),
},
},
{
text: 'Tool executed successfully and returned this image as a response',
},
);
break;
default:
parts.push({ text: JSON.stringify(contentPart) });
break;
}
}
}
export function convertToGoogleGenerativeAIMessages(
prompt: LanguageModelV4Prompt,
options?: { isGemmaModel?: boolean; providerOptionsName?: string },
options?: {
isGemmaModel?: boolean;
providerOptionsName?: string;
supportsFunctionResponseParts?: boolean;
},
): GoogleGenerativeAIPrompt {

@@ -22,2 +177,4 @@ const systemInstructionParts: Array<{ text: string }> = [];

const providerOptionsName = options?.providerOptionsName ?? 'google';
const supportsFunctionResponseParts =
options?.supportsFunctionResponseParts ?? true;

@@ -183,32 +340,6 @@ for (const { role, content } of prompt) {

if (output.type === 'content') {
for (const contentPart of output.value) {
switch (contentPart.type) {
case 'text':
parts.push({
functionResponse: {
name: part.toolName,
response: {
name: part.toolName,
content: contentPart.text,
},
},
});
break;
case 'image-data':
parts.push(
{
inlineData: {
mimeType: contentPart.mediaType,
data: contentPart.data,
},
},
{
text: 'Tool executed successfully and returned this image as a response',
},
);
break;
default:
parts.push({ text: JSON.stringify(contentPart) });
break;
}
if (supportsFunctionResponseParts) {
appendToolResultParts(parts, part.toolName, output.value);
} else {
appendLegacyToolResultParts(parts, part.toolName, output.value);
}

@@ -215,0 +346,0 @@ } else {

@@ -142,6 +142,11 @@ import {

const isGemmaModel = this.modelId.toLowerCase().startsWith('gemma-');
const supportsFunctionResponseParts = this.modelId.startsWith('gemini-3');
const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(
prompt,
{ isGemmaModel, providerOptionsName },
{
isGemmaModel,
providerOptionsName,
supportsFunctionResponseParts,
},
);

@@ -148,0 +153,0 @@

@@ -5,5 +5,5 @@ import {

UrlContextMetadataSchema,
type SafetyRatingSchema,
UsageMetadataSchema,
} from './google-generative-ai-language-model';
import { type SafetyRatingSchema } from './google-generative-ai-language-model';

@@ -28,5 +28,15 @@ export type GoogleGenerativeAIPrompt = {

| { functionCall: { name: string; args: unknown }; thoughtSignature?: string }
| { functionResponse: { name: string; response: unknown } }
| {
functionResponse: {
name: string;
response: unknown;
parts?: Array<GoogleGenerativeAIFunctionResponsePart>;
};
}
| { fileData: { mimeType: string; fileUri: string } };
export type GoogleGenerativeAIFunctionResponsePart = {
inlineData: { mimeType: string; data: string };
};
export type GoogleGenerativeAIGroundingMetadata = GroundingMetadataSchema;

@@ -33,0 +43,0 @@

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display