You're Invited:Meet the Socket Team at RSAC and BSidesSF 2026, March 23–26.RSVP
Socket
Book a DemoSign in
Socket

ai

Package Overview
Dependencies
Maintainers
5
Versions
1024
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

ai - npm Package Compare versions

Comparing version
6.0.133
to
6.0.134
+1
-1
dist/internal/index.js

@@ -156,3 +156,3 @@ "use strict";

// src/version.ts
var VERSION = true ? "6.0.133" : "0.0.0-test";
var VERSION = true ? "6.0.134" : "0.0.0-test";

@@ -159,0 +159,0 @@ // src/util/download/download.ts

@@ -136,3 +136,3 @@ // internal/index.ts

// src/version.ts
var VERSION = true ? "6.0.133" : "0.0.0-test";
var VERSION = true ? "6.0.134" : "0.0.0-test";

@@ -139,0 +139,0 @@ // src/util/download/download.ts

{
"name": "ai",
"version": "6.0.133",
"description": "AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript",
"version": "6.0.134",
"description": "AI SDK by Vercel - build apps like ChatGPT, Claude, Gemini, and more with a single interface for any model using the Vercel AI Gateway or go direct to OpenAI, Anthropic, Google, or any other model provider.",
"license": "Apache-2.0",

@@ -57,3 +57,2 @@ "sideEffects": false,

"esbuild": "^0.24.2",
"eslint": "8.57.1",
"tsup": "^7.2.0",

@@ -64,4 +63,3 @@ "tsx": "^4.19.2",

"@ai-sdk/test-server": "1.0.3",
"@vercel/ai-tsconfig": "0.0.0",
"eslint-config-vercel-ai": "0.0.0"
"@vercel/ai-tsconfig": "0.0.0"
},

@@ -114,5 +112,3 @@ "peerDependencies": {

"clean": "del-cli dist docs *.tsbuildinfo",
"lint": "eslint \"./**/*.ts*\"",
"type-check": "tsc --build",
"prettier-check": "prettier --check \"./**/*.ts*\"",
"test": "pnpm test:node && pnpm test:edge",

@@ -119,0 +115,0 @@ "test:update": "pnpm test:node -u",

@@ -27,4 +27,3 @@ import { generateText } from '../generate-text/generate-text';

OUTPUT extends Output = never,
> implements Agent<CALL_OPTIONS, TOOLS, OUTPUT>
{
> implements Agent<CALL_OPTIONS, TOOLS, OUTPUT> {
readonly version = 'agent-v1';

@@ -31,0 +30,0 @@

@@ -1,6 +0,2 @@

import {
createIdGenerator,
ProviderOptions,
withUserAgentSuffix,
} from '@ai-sdk/provider-utils';
import { ProviderOptions, withUserAgentSuffix } from '@ai-sdk/provider-utils';
import { logWarnings } from '../logger/log-warnings';

@@ -16,15 +12,7 @@ import { resolveEmbeddingModel } from '../model/resolve-model';

import { Warning } from '../types/warning';
import { notify } from '../util/notify';
import { prepareRetries } from '../util/prepare-retries';
import { splitArray } from '../util/split-array';
import type { EmbedOnFinishEvent, EmbedOnStartEvent } from './embed-events';
import { EmbedManyResult } from './embed-many-result';
import { VERSION } from '../version';
import type { Listener } from '../util/notify';
const originalGenerateCallId = createIdGenerator({
prefix: 'call',
size: 24,
});
/**

@@ -63,5 +51,2 @@ * Embed several values using an embedding model. The type of the value is defined

experimental_telemetry: telemetry,
experimental_onStart: onStart,
experimental_onFinish: onFinish,
_internal: { generateCallId = originalGenerateCallId } = {},
}: {

@@ -114,21 +99,2 @@ /**

maxParallelCalls?: number;
/**
* Callback that is called when the embedMany operation begins,
* before the embedding model is called.
*/
experimental_onStart?: Listener<EmbedOnStartEvent>;
/**
* Callback that is called when the embedMany operation completes,
* after all embedding model calls return.
*/
experimental_onFinish?: Listener<EmbedOnFinishEvent>;
/**
* Internal. For test use only. May change without notice.
*/
_internal?: {
generateCallId?: () => string;
};
}): Promise<EmbedManyResult> {

@@ -147,24 +113,2 @@ const model = resolveEmbeddingModel(modelArg);

const callId = generateCallId();
const modelInfo = { provider: model.provider, modelId: model.modelId };
await notify({
event: {
callId,
operationId: 'ai.embedMany',
model: modelInfo,
value: values,
maxRetries,
abortSignal,
headers: headersWithUserAgent,
providerOptions,
isEnabled: telemetry?.isEnabled,
recordInputs: telemetry?.recordInputs,
recordOutputs: telemetry?.recordOutputs,
functionId: telemetry?.functionId,
metadata: telemetry?.metadata,
},
callbacks: [onStart],
});
const baseTelemetryAttributes = getBaseTelemetryAttributes({

@@ -278,22 +222,2 @@ model,

await notify({
event: {
callId,
operationId: 'ai.embedMany',
model: modelInfo,
value: values,
embedding: embeddings,
usage,
warnings,
providerMetadata,
response: [response],
isEnabled: telemetry?.isEnabled,
recordInputs: telemetry?.recordInputs,
recordOutputs: telemetry?.recordOutputs,
functionId: telemetry?.functionId,
metadata: telemetry?.metadata,
},
callbacks: [onFinish],
});
return new DefaultEmbedManyResult({

@@ -432,22 +356,2 @@ values,

await notify({
event: {
callId,
operationId: 'ai.embedMany',
model: modelInfo,
value: values,
embedding: embeddings,
usage: { tokens },
warnings,
providerMetadata,
response: responses,
isEnabled: telemetry?.isEnabled,
recordInputs: telemetry?.recordInputs,
recordOutputs: telemetry?.recordOutputs,
functionId: telemetry?.functionId,
metadata: telemetry?.metadata,
},
callbacks: [onFinish],
});
return new DefaultEmbedManyResult({

@@ -454,0 +358,0 @@ values,

@@ -1,6 +0,2 @@

import {
createIdGenerator,
ProviderOptions,
withUserAgentSuffix,
} from '@ai-sdk/provider-utils';
import { ProviderOptions, withUserAgentSuffix } from '@ai-sdk/provider-utils';
import { logWarnings } from '../logger/log-warnings';

@@ -15,14 +11,6 @@ import { resolveEmbeddingModel } from '../model/resolve-model';

import { EmbeddingModel } from '../types';
import { notify } from '../util/notify';
import { prepareRetries } from '../util/prepare-retries';
import type { EmbedOnFinishEvent, EmbedOnStartEvent } from './embed-events';
import { EmbedResult } from './embed-result';
import { VERSION } from '../version';
import type { Listener } from '../util/notify';
const originalGenerateCallId = createIdGenerator({
prefix: 'call',
size: 24,
});
/**

@@ -54,5 +42,2 @@ * Embed a value using an embedding model. The type of the value is defined by the embedding model.

experimental_telemetry: telemetry,
experimental_onStart: onStart,
experimental_onFinish: onFinish,
_internal: { generateCallId = originalGenerateCallId } = {},
}: {

@@ -98,21 +83,2 @@ /**

experimental_telemetry?: TelemetrySettings;
/**
* Callback that is called when the embed operation begins,
* before the embedding model is called.
*/
experimental_onStart?: Listener<EmbedOnStartEvent>;
/**
* Callback that is called when the embed operation completes,
* after the embedding model returns.
*/
experimental_onFinish?: Listener<EmbedOnFinishEvent>;
/**
* Internal. For test use only. May change without notice.
*/
_internal?: {
generateCallId?: () => string;
};
}): Promise<EmbedResult> {

@@ -131,24 +97,2 @@ const model = resolveEmbeddingModel(modelArg);

const callId = generateCallId();
const modelInfo = { provider: model.provider, modelId: model.modelId };
await notify({
event: {
callId,
operationId: 'ai.embed',
model: modelInfo,
value,
maxRetries,
abortSignal,
headers: headersWithUserAgent,
providerOptions,
isEnabled: telemetry?.isEnabled,
recordInputs: telemetry?.recordInputs,
recordOutputs: telemetry?.recordOutputs,
functionId: telemetry?.functionId,
metadata: telemetry?.metadata,
},
callbacks: [onStart],
});
const baseTelemetryAttributes = getBaseTelemetryAttributes({

@@ -242,22 +186,2 @@ model: model,

await notify({
event: {
callId,
operationId: 'ai.embed',
model: modelInfo,
value,
embedding,
usage,
warnings,
providerMetadata,
response,
isEnabled: telemetry?.isEnabled,
recordInputs: telemetry?.recordInputs,
recordOutputs: telemetry?.recordOutputs,
functionId: telemetry?.functionId,
metadata: telemetry?.metadata,
},
callbacks: [onFinish],
});
return new DefaultEmbedResult({

@@ -264,0 +188,0 @@ value,

export * from './embed';
export * from './embed-events';
export * from './embed-many';
export * from './embed-many-result';
export * from './embed-result';

@@ -115,7 +115,4 @@ import { JSONValue } from '@ai-sdk/provider';

SCHEMA extends FlexibleSchema<unknown> = FlexibleSchema<JSONValue>,
OUTPUT extends
| 'object'
| 'array'
| 'enum'
| 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object',
OUTPUT extends 'object' | 'array' | 'enum' | 'no-schema' =
InferSchema<SCHEMA> extends string ? 'enum' : 'object',
RESULT = OUTPUT extends 'array'

@@ -122,0 +119,0 @@ ? Array<InferSchema<SCHEMA>>

@@ -176,7 +176,4 @@ import {

SCHEMA extends FlexibleSchema<unknown> = FlexibleSchema<JSONValue>,
OUTPUT extends
| 'object'
| 'array'
| 'enum'
| 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object',
OUTPUT extends 'object' | 'array' | 'enum' | 'no-schema' =
InferSchema<SCHEMA> extends string ? 'enum' : 'object',
RESULT = OUTPUT extends 'array'

@@ -357,5 +354,7 @@ ? Array<InferSchema<SCHEMA>>

class DefaultStreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM>
implements StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM>
{
class DefaultStreamObjectResult<
PARTIAL,
RESULT,
ELEMENT_STREAM,
> implements StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
private readonly _object = new DelayedPromise<RESULT>();

@@ -362,0 +361,0 @@ private readonly _usage = new DelayedPromise<LanguageModelUsage>();

@@ -19,3 +19,3 @@ import type { LanguageModelV3ToolChoice } from '@ai-sdk/provider';

*/
export interface ModelEventInfo {
export interface CallbackModelInfo {
/** The provider identifier (e.g., 'openai', 'anthropic'). */

@@ -38,3 +38,3 @@ readonly provider: string;

/** The model being used for generation. */
readonly model: ModelEventInfo;
readonly model: CallbackModelInfo;

@@ -142,3 +142,3 @@ /** The system message(s) provided to the model. */

/** The model being used for this step. */
readonly model: ModelEventInfo;
readonly model: CallbackModelInfo;

@@ -227,3 +227,3 @@ /**

/** The model being used for this step. */
readonly model: ModelEventInfo | undefined;
readonly model: CallbackModelInfo | undefined;

@@ -260,3 +260,3 @@ /** The full tool call object. */

/** The model being used for this step. */
readonly model: ModelEventInfo | undefined;
readonly model: CallbackModelInfo | undefined;

@@ -263,0 +263,0 @@ /** The full tool call object. */

@@ -1312,5 +1312,6 @@ import {

class DefaultGenerateTextResult<TOOLS extends ToolSet, OUTPUT extends Output>
implements GenerateTextResult<TOOLS, OUTPUT>
{
class DefaultGenerateTextResult<
TOOLS extends ToolSet,
OUTPUT extends Output,
> implements GenerateTextResult<TOOLS, OUTPUT> {
readonly steps: GenerateTextResult<TOOLS, OUTPUT>['steps'];

@@ -1317,0 +1318,0 @@ readonly totalUsage: LanguageModelUsage;

@@ -168,5 +168,5 @@ import { ReasoningPart } from '@ai-sdk/provider-utils';

export class DefaultStepResult<TOOLS extends ToolSet>
implements StepResult<TOOLS>
{
export class DefaultStepResult<
TOOLS extends ToolSet,
> implements StepResult<TOOLS> {
readonly stepNumber: StepResult<TOOLS>['stepNumber'];

@@ -173,0 +173,0 @@ readonly model: StepResult<TOOLS>['model'];

@@ -135,4 +135,3 @@ import {

SEPARATOR extends string,
> implements ProviderRegistryProvider<PROVIDERS, SEPARATOR>
{
> implements ProviderRegistryProvider<PROVIDERS, SEPARATOR> {
private providers: PROVIDERS = {} as PROVIDERS;

@@ -139,0 +138,0 @@ private separator: SEPARATOR;

@@ -39,5 +39,3 @@ import { Attributes, AttributeValue } from '@opentelemetry/api';

(attributes, [key, value]) => {
if (value != undefined) {
attributes[`ai.telemetry.metadata.${key}`] = value as AttributeValue;
}
attributes[`ai.telemetry.metadata.${key}`] = value;
return attributes;

@@ -44,0 +42,0 @@ },

import { AttributeValue, Tracer } from '@opentelemetry/api';
import type { JSONValue } from '@ai-sdk/provider';
import type { TelemetryIntegration } from './telemetry-integration';

@@ -40,3 +39,3 @@

*/
metadata?: Record<string, JSONValue>;
metadata?: Record<string, AttributeValue>;

@@ -43,0 +42,0 @@ /**

@@ -58,4 +58,3 @@ import { Output } from '../generate-text/output';

>,
> implements ChatTransport<UI_MESSAGE>
{
> implements ChatTransport<UI_MESSAGE> {
private readonly agent: Agent<CALL_OPTIONS, TOOLS, OUTPUT>;

@@ -62,0 +61,0 @@ private readonly agentOptions: CALL_OPTIONS | undefined;

@@ -116,5 +116,5 @@ import {

export abstract class HttpChatTransport<UI_MESSAGE extends UIMessage>
implements ChatTransport<UI_MESSAGE>
{
export abstract class HttpChatTransport<
UI_MESSAGE extends UIMessage,
> implements ChatTransport<UI_MESSAGE> {
protected api: string;

@@ -121,0 +121,0 @@ protected credentials: HttpChatTransportInitOptions<UI_MESSAGE>['credentials'];

import type { JSONValue } from '@ai-sdk/provider';
import type { ProviderOptions } from '@ai-sdk/provider-utils';
import type { ModelEventInfo } from '../generate-text/callback-events';
import type { Embedding, ProviderMetadata } from '../types';
import type { EmbeddingModelUsage } from '../types/usage';
import type { Warning } from '../types/warning';
/**
* Event passed to the `onStart` callback for embed and embedMany operations.
*
* Called when the operation begins, before the embedding model is called.
*/
export interface EmbedOnStartEvent {
/** Unique identifier for this embed call, used to correlate events. */
readonly callId: string;
/** Identifies the operation type (e.g. 'ai.embed' or 'ai.embedMany'). */
readonly operationId: string;
/** The embedding model being used. */
readonly model: ModelEventInfo;
/** The value(s) being embedded. A string for embed, an array for embedMany. */
readonly value: string | Array<string>;
/** Maximum number of retries for failed requests. */
readonly maxRetries: number;
/** Abort signal for cancelling the operation. */
readonly abortSignal: AbortSignal | undefined;
/** Additional HTTP headers sent with the request. */
readonly headers: Record<string, string | undefined> | undefined;
/** Additional provider-specific options. */
readonly providerOptions: ProviderOptions | undefined;
/** Whether telemetry is enabled. */
readonly isEnabled: boolean | undefined;
/** Whether to record inputs in telemetry. Enabled by default. */
readonly recordInputs: boolean | undefined;
/** Whether to record outputs in telemetry. Enabled by default. */
readonly recordOutputs: boolean | undefined;
/** Identifier from telemetry settings for grouping related operations. */
readonly functionId: string | undefined;
/** Additional metadata from telemetry settings. */
readonly metadata: Record<string, JSONValue> | undefined;
}
/**
* Event passed to the `onFinish` callback for embed and embedMany operations.
*
* Called when the operation completes, after the embedding model returns.
*/
export interface EmbedOnFinishEvent {
/** Unique identifier for this embed call, used to correlate events. */
readonly callId: string;
/** Identifies the operation type (e.g. 'ai.embed' or 'ai.embedMany'). */
readonly operationId: string;
/** The embedding model that was used. */
readonly model: ModelEventInfo;
/** The value(s) that were embedded. A string for embed, an array for embedMany. */
readonly value: string | Array<string>;
/** The resulting embedding(s). A single vector for embed, an array for embedMany. */
readonly embedding: Embedding | Array<Embedding>;
/** Token usage for the embedding operation. */
readonly usage: EmbeddingModelUsage;
/** Warnings from the embedding model, e.g. unsupported settings. */
readonly warnings: Array<Warning>;
/** Optional provider-specific metadata. */
readonly providerMetadata: ProviderMetadata | undefined;
/** Response data including headers and body. A single response for embed, an array for embedMany. */
readonly response:
| { headers?: Record<string, string>; body?: unknown }
| Array<{ headers?: Record<string, string>; body?: unknown } | undefined>
| undefined;
/** Whether telemetry is enabled. */
readonly isEnabled: boolean | undefined;
/** Whether to record inputs in telemetry. Enabled by default. */
readonly recordInputs: boolean | undefined;
/** Whether to record outputs in telemetry. Enabled by default. */
readonly recordOutputs: boolean | undefined;
/** Identifier from telemetry settings for grouping related operations. */
readonly functionId: string | undefined;
/** Additional metadata from telemetry settings. */
readonly metadata: Record<string, JSONValue> | undefined;
}

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display