You're Invited:Meet the Socket Team at RSAC and BSidesSF 2026, March 23–26.RSVP
Socket
Book a DemoSign in
Socket

ai

Package Overview
Dependencies
Maintainers
5
Versions
1024
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

ai - npm Package Compare versions

Comparing version
7.0.0-beta.30
to
7.0.0-beta.32
+81
src/generate-text/invoke-tool-callbacks-from-stream.ts
import { UglyTransformedStreamTextPart } from './create-stream-text-part-transform';
import { ToolSet } from './tool-set';
import { ModelMessage } from '@ai-sdk/provider-utils';
export function invokeToolCallbacksFromStream<TOOLS extends ToolSet>({
stream,
tools,
stepInputMessages,
abortSignal,
experimental_context,
}: {
stream: ReadableStream<UglyTransformedStreamTextPart<TOOLS>>;
tools: TOOLS | undefined;
stepInputMessages: Array<ModelMessage>;
abortSignal: AbortSignal | undefined;
experimental_context: unknown;
}): ReadableStream<UglyTransformedStreamTextPart<TOOLS>> {
if (tools == null) return stream;
const ongoingToolCallToolNames: Record<string, string> = {};
return stream.pipeThrough(
new TransformStream({
async transform(chunk, controller) {
controller.enqueue(chunk);
switch (chunk.type) {
case 'tool-input-start': {
ongoingToolCallToolNames[chunk.id] = chunk.toolName;
const tool = tools?.[chunk.toolName];
if (tool?.onInputStart != null) {
await tool.onInputStart({
toolCallId: chunk.id,
messages: stepInputMessages,
abortSignal,
experimental_context,
});
}
break;
}
case 'tool-input-delta': {
const toolName = ongoingToolCallToolNames[chunk.id];
const tool = tools?.[toolName];
if (tool?.onInputDelta != null) {
await tool.onInputDelta({
inputTextDelta: chunk.delta,
toolCallId: chunk.id,
messages: stepInputMessages,
abortSignal,
experimental_context,
});
}
break;
}
case 'tool-call': {
const toolName = ongoingToolCallToolNames[chunk.toolCallId];
const tool = tools?.[toolName];
delete ongoingToolCallToolNames[chunk.toolCallId];
if (tool?.onInputAvailable != null) {
await tool.onInputAvailable({
input: chunk.input,
toolCallId: chunk.toolCallId,
messages: stepInputMessages,
abortSignal,
experimental_context,
});
}
}
}
},
}),
);
}
+2
-23

@@ -87,17 +87,3 @@ import { SystemModelMessage, ModelMessage, Tool } from '@ai-sdk/provider-utils';

/**
* Timeout configuration for API calls. Can be specified as:
* - A number representing milliseconds
* - An object with `totalMs` property for the total timeout in milliseconds
* - An object with `stepMs` property for the timeout of each step in milliseconds
* - An object with `chunkMs` property for the timeout between stream chunks (streaming only)
*/
type TimeoutConfiguration<TOOLS extends ToolSet> = number | {
totalMs?: number;
stepMs?: number;
chunkMs?: number;
toolMs?: number;
tools?: Partial<Record<`${keyof TOOLS & string}Ms`, number>>;
};
type CallSettings<TOOLS extends ToolSet> = {
type CallSettings = {
/**

@@ -175,9 +161,2 @@ * Maximum number of tokens to generate.

/**
* Timeout in milliseconds. The call will be aborted if it takes longer
* than the specified timeout. Can be used alongside abortSignal.
*
* Can be specified as a number (milliseconds) or as an object with `totalMs`.
*/
timeout?: TimeoutConfiguration<TOOLS>;
/**
* Additional HTTP headers to be sent with the request.

@@ -311,3 +290,3 @@ * Only applicable for HTTP-based providers.

*/
declare function prepareCallSettings({ maxOutputTokens, temperature, topP, topK, presencePenalty, frequencyPenalty, seed, stopSequences, reasoning, }: Omit<CallSettings<any>, 'abortSignal' | 'headers' | 'maxRetries'>): Omit<CallSettings<any>, 'abortSignal' | 'headers' | 'maxRetries'>;
declare function prepareCallSettings({ maxOutputTokens, temperature, topP, topK, presencePenalty, frequencyPenalty, seed, stopSequences, reasoning, }: Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries'>): Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries'>;

@@ -314,0 +293,0 @@ type RetryFunction = <OUTPUT>(fn: () => PromiseLike<OUTPUT>) => PromiseLike<OUTPUT>;

@@ -87,17 +87,3 @@ import { SystemModelMessage, ModelMessage, Tool } from '@ai-sdk/provider-utils';

/**
* Timeout configuration for API calls. Can be specified as:
* - A number representing milliseconds
* - An object with `totalMs` property for the total timeout in milliseconds
* - An object with `stepMs` property for the timeout of each step in milliseconds
* - An object with `chunkMs` property for the timeout between stream chunks (streaming only)
*/
type TimeoutConfiguration<TOOLS extends ToolSet> = number | {
totalMs?: number;
stepMs?: number;
chunkMs?: number;
toolMs?: number;
tools?: Partial<Record<`${keyof TOOLS & string}Ms`, number>>;
};
type CallSettings<TOOLS extends ToolSet> = {
type CallSettings = {
/**

@@ -175,9 +161,2 @@ * Maximum number of tokens to generate.

/**
* Timeout in milliseconds. The call will be aborted if it takes longer
* than the specified timeout. Can be used alongside abortSignal.
*
* Can be specified as a number (milliseconds) or as an object with `totalMs`.
*/
timeout?: TimeoutConfiguration<TOOLS>;
/**
* Additional HTTP headers to be sent with the request.

@@ -311,3 +290,3 @@ * Only applicable for HTTP-based providers.

*/
declare function prepareCallSettings({ maxOutputTokens, temperature, topP, topK, presencePenalty, frequencyPenalty, seed, stopSequences, reasoning, }: Omit<CallSettings<any>, 'abortSignal' | 'headers' | 'maxRetries'>): Omit<CallSettings<any>, 'abortSignal' | 'headers' | 'maxRetries'>;
declare function prepareCallSettings({ maxOutputTokens, temperature, topP, topK, presencePenalty, frequencyPenalty, seed, stopSequences, reasoning, }: Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries'>): Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries'>;

@@ -314,0 +293,0 @@ type RetryFunction = <OUTPUT>(fn: () => PromiseLike<OUTPUT>) => PromiseLike<OUTPUT>;

@@ -156,3 +156,3 @@ "use strict";

// src/version.ts
var VERSION = true ? "7.0.0-beta.30" : "0.0.0-test";
var VERSION = true ? "7.0.0-beta.32" : "0.0.0-test";

@@ -159,0 +159,0 @@ // src/util/download/download.ts

@@ -136,3 +136,3 @@ // internal/index.ts

// src/version.ts
var VERSION = true ? "7.0.0-beta.30" : "0.0.0-test";
var VERSION = true ? "7.0.0-beta.32" : "0.0.0-test";

@@ -139,0 +139,0 @@ // src/util/download/download.ts

{
"name": "ai",
"version": "7.0.0-beta.30",
"version": "7.0.0-beta.32",
"description": "AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript",

@@ -48,3 +48,3 @@ "license": "Apache-2.0",

"@opentelemetry/api": "1.9.0",
"@ai-sdk/gateway": "4.0.0-beta.17",
"@ai-sdk/gateway": "4.0.0-beta.18",
"@ai-sdk/provider": "4.0.0-beta.4",

@@ -51,0 +51,0 @@ "@ai-sdk/provider-utils": "5.0.0-beta.6"

@@ -20,3 +20,3 @@ import {

import { ToolSet } from '../generate-text/tool-set';
import { CallSettings } from '../prompt/call-settings';
import { CallSettings, TimeoutConfiguration } from '../prompt/call-settings';
import { Prompt } from '../prompt/prompt';

@@ -61,4 +61,12 @@ import { TelemetrySettings } from '../telemetry/telemetry-settings';

OUTPUT extends Output = never,
> = Omit<CallSettings<TOOLS>, 'abortSignal'> & {
> = Omit<CallSettings, 'abortSignal'> & {
/**
* Timeout in milliseconds. The call will be aborted if it takes longer
* than the specified timeout. Can be used alongside abortSignal.
*
* Can be specified as a number (milliseconds) or as an object with `totalMs`.
*/
timeout?: TimeoutConfiguration<TOOLS>;
/**
* The id of the agent.

@@ -65,0 +73,0 @@ */

@@ -121,3 +121,3 @@ import { JSONValue } from '@ai-sdk/provider';

>(
options: Omit<CallSettings<any>, 'stopSequences'> &
options: Omit<CallSettings, 'stopSequences'> &
Prompt &

@@ -124,0 +124,0 @@ (OUTPUT extends 'enum'

@@ -182,3 +182,3 @@ import {

>(
options: Omit<CallSettings<any>, 'stopSequences'> &
options: Omit<CallSettings, 'stopSequences'> &
Prompt &

@@ -404,3 +404,3 @@ (OUTPUT extends 'enum'

headers: Record<string, string | undefined> | undefined;
settings: Omit<CallSettings<any>, 'abortSignal' | 'headers'>;
settings: Omit<CallSettings, 'abortSignal' | 'headers'>;
maxRetries: number | undefined;

@@ -407,0 +407,0 @@ abortSignal: AbortSignal | undefined;

@@ -127,12 +127,2 @@ import { IdGenerator, ModelMessage } from '@ai-sdk/provider-utils';

if (tool.onInputAvailable != null) {
await tool.onInputAvailable({
input: chunk.input,
toolCallId: chunk.toolCallId,
messages,
abortSignal,
experimental_context,
});
}
if (

@@ -139,0 +129,0 @@ await isApprovalNeeded({

@@ -223,2 +223,13 @@ import {

case 'tool-input-start': {
const tool = tools?.[chunk.toolName];
controller.enqueue({
...chunk,
dynamic: chunk.dynamic ?? tool?.type === 'dynamic',
title: tool?.title,
});
break;
}
default:

@@ -225,0 +236,0 @@ controller.enqueue(chunk);

@@ -282,3 +282,3 @@ import {

...settings
}: CallSettings<TOOLS> &
}: CallSettings &
Prompt & {

@@ -291,2 +291,10 @@ /**

/**
* Timeout in milliseconds. The call will be aborted if it takes longer
* than the specified timeout. Can be used alongside abortSignal.
*
* Can be specified as a number (milliseconds) or as an object with `totalMs`.
*/
timeout?: TimeoutConfiguration<TOOLS>;
/**
* The tools that the model can call. The model needs to support calling tools.

@@ -293,0 +301,0 @@ */

import { LanguageModelV4CallOptions } from '@ai-sdk/provider';
import { ToolSet } from '../generate-text/tool-set';
import type { ToolSet } from '../generate-text/tool-set';

@@ -81,3 +81,3 @@ /**

export type CallSettings<TOOLS extends ToolSet> = {
export type CallSettings = {
/**

@@ -166,10 +166,2 @@ * Maximum number of tokens to generate.

/**
* Timeout in milliseconds. The call will be aborted if it takes longer
* than the specified timeout. Can be used alongside abortSignal.
*
* Can be specified as a number (milliseconds) or as an object with `totalMs`.
*/
timeout?: TimeoutConfiguration<TOOLS>;
/**
* Additional HTTP headers to be sent with the request.

@@ -176,0 +168,0 @@ * Only applicable for HTTP-based providers.

@@ -17,4 +17,4 @@ import { InvalidArgumentError } from '../error/invalid-argument-error';

reasoning,
}: Omit<CallSettings<any>, 'abortSignal' | 'headers' | 'maxRetries'>): Omit<
CallSettings<any>,
}: Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries'>): Omit<
CallSettings,
'abortSignal' | 'headers' | 'maxRetries'

@@ -21,0 +21,0 @@ > {

import { Attributes, AttributeValue } from '@opentelemetry/api';
import { CallSettings, getTotalTimeoutMs } from '../prompt/call-settings';
import { CallSettings } from '../prompt/call-settings';
import { TelemetrySettings } from './telemetry-settings';

@@ -12,3 +12,3 @@

model: { modelId: string; provider: string };
settings: Omit<CallSettings<any>, 'abortSignal' | 'headers' | 'temperature'>;
settings: Omit<CallSettings, 'abortSignal' | 'headers' | 'temperature'>;
telemetry: TelemetrySettings | undefined;

@@ -23,13 +23,3 @@ headers: Record<string, string | undefined> | undefined;

...Object.entries(settings).reduce((attributes, [key, value]) => {
// Handle timeout specially since it can be a number or object
if (key === 'timeout') {
const totalTimeoutMs = getTotalTimeoutMs(
value as Parameters<typeof getTotalTimeoutMs>[0],
);
if (totalTimeoutMs != null) {
attributes[`ai.settings.${key}`] = totalTimeoutMs;
}
} else {
attributes[`ai.settings.${key}`] = value as AttributeValue;
}
attributes[`ai.settings.${key}`] = value as AttributeValue;
return attributes;

@@ -36,0 +26,0 @@ }, {} as Attributes),

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display