Socket
Socket
Sign inDemoInstall

ai

Package Overview
Dependencies
Maintainers
11
Versions
246
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

ai - npm Package Compare versions

Comparing version 3.3.8 to 3.3.9

16

package.json
{
"name": "ai",
"version": "3.3.8",
"version": "3.3.9",
"description": "Vercel AI SDK - The AI Toolkit for TypeScript and JavaScript",

@@ -61,9 +61,9 @@ "license": "Apache-2.0",

"dependencies": {
"@ai-sdk/provider": "0.0.19",
"@ai-sdk/provider-utils": "1.0.12",
"@ai-sdk/react": "0.0.44",
"@ai-sdk/solid": "0.0.35",
"@ai-sdk/svelte": "0.0.37",
"@ai-sdk/ui-utils": "0.0.32",
"@ai-sdk/vue": "0.0.36",
"@ai-sdk/provider": "0.0.20",
"@ai-sdk/provider-utils": "1.0.13",
"@ai-sdk/react": "0.0.45",
"@ai-sdk/solid": "0.0.36",
"@ai-sdk/svelte": "0.0.38",
"@ai-sdk/ui-utils": "0.0.33",
"@ai-sdk/vue": "0.0.37",
"@opentelemetry/api": "1.9.0",

@@ -70,0 +70,0 @@ "eventsource-parser": "1.1.2",

@@ -7,2 +7,4 @@ import { Message } from '@ai-sdk/ui-utils';

* @see https://docs.anthropic.com/claude/reference/getting-started-with-the-api
*
* @deprecated Will be removed. Use the new provider architecture instead.
*/

@@ -14,2 +16,4 @@ declare function experimental_buildAnthropicPrompt(messages: Pick<Message, 'content' | 'role'>[]): string;

* @see https://docs.anthropic.com/claude/reference/messages_post
*
* @deprecated Will be removed. Use the new provider architecture instead.
*/

@@ -27,3 +31,4 @@ declare function experimental_buildAnthropicMessages(messages: Pick<Message, 'content' | 'role'>[]): {

* Does not support `function` messages.
* @see https://huggingface.co/HuggingFaceH4/starchat-beta
* @see https://huggingface.co/HuggingFaceH4/starchat-beta *
* @deprecated Will be removed. Use the new provider architecture instead.
*/

@@ -34,3 +39,4 @@ declare function experimental_buildStarChatBetaPrompt(messages: Pick<Message, 'content' | 'role'>[]): string;

* Does not support `function` or `system` messages.
* @see https://huggingface.co/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5
* @see https://huggingface.co/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5 *
* @deprecated Will be removed. Use the new provider architecture instead.
*/

@@ -42,5 +48,10 @@ declare function experimental_buildOpenAssistantPrompt(messages: Pick<Message, 'content' | 'role'>[]): string;

* @see https://huggingface.co/meta-llama/Llama-2-70b-chat-hf and https://huggingface.co/blog/llama2#how-to-prompt-llama-2
*
* @deprecated Will be removed. Use the new provider architecture instead.
*/
declare function experimental_buildLlama2Prompt(messages: Pick<Message, 'content' | 'role'>[]): string;
/**
* @deprecated Will be removed. Use the new provider architecture instead.
*/
declare function experimental_buildOpenAIMessages(messages: Message[]): ChatCompletionMessageParam[];

@@ -47,0 +58,0 @@ type ChatCompletionMessageParam = ChatCompletionSystemMessageParam | ChatCompletionUserMessageParam | ChatCompletionAssistantMessageParam | ChatCompletionToolMessageParam | ChatCompletionFunctionMessageParam;

@@ -1,2 +0,2 @@

import { LanguageModelV1FinishReason, LanguageModelV1CallWarning, LanguageModelV1 } from '@ai-sdk/provider';
import { LanguageModelV1FinishReason, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1 } from '@ai-sdk/provider';
import { ReactNode } from 'react';

@@ -182,2 +182,56 @@ import { z } from 'zod';

/**
Represents the number of tokens used in a prompt and completion.
*/
type CompletionTokenUsage = {
/**
The number of tokens used in the prompt.
*/
promptTokens: number;
/**
The number of tokens used in the completion.
*/
completionTokens: number;
/**
The total number of tokens used (promptTokens + completionTokens).
*/
totalTokens: number;
};
/**
Reason why a language model finished generating a response.
Can be one of the following:
- `stop`: model generated stop sequence
- `length`: model generated maximum number of tokens
- `content-filter`: content filter violation stopped the model
- `tool-calls`: model triggered tool calls
- `error`: model stopped because of an error
- `other`: model stopped for other reasons
*/
type FinishReason = LanguageModelV1FinishReason;
/**
Warning from the model provider for this call. The call will proceed, but e.g.
some settings might not be supported, which can lead to suboptimal results.
*/
type CallWarning = LanguageModelV1CallWarning;
/**
Additional provider-specific metadata. They are passed through
to the provider from the AI SDK and enable provider-specific
functionality that can be fully encapsulated in the provider.
*/
type ProviderMetadata = LanguageModelV1ProviderMetadata;
/**
Tool choice for the generation. It supports the following settings:
- `auto` (default): the model can choose whether and which tools to call.
- `required`: the model must call a tool. It can choose which tool to call.
- `none`: the model must not call tools
- `{ type: 'tool', toolName: string (typed) }`: the model must call the specified tool
*/
type CoreToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | {
type: 'tool';
toolName: keyof TOOLS;
};
/**
Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.

@@ -196,2 +250,8 @@ */

text: string;
/**
Additional provider-specific metadata. They are passed through
to the provider from the AI SDK and enable provider-specific
functionality that can be fully encapsulated in the provider.
*/
experimental_providerMetadata?: ProviderMetadata;
}

@@ -214,2 +274,8 @@ /**

mimeType?: string;
/**
Additional provider-specific metadata. They are passed through
to the provider from the AI SDK and enable provider-specific
functionality that can be fully encapsulated in the provider.
*/
experimental_providerMetadata?: ProviderMetadata;
}

@@ -255,2 +321,8 @@ /**

isError?: boolean;
/**
Additional provider-specific metadata. They are passed through
to the provider from the AI SDK and enable provider-specific
functionality that can be fully encapsulated in the provider.
*/
experimental_providerMetadata?: ProviderMetadata;
}

@@ -273,2 +345,8 @@

content: string;
/**
Additional provider-specific metadata. They are passed through
to the provider from the AI SDK and enable provider-specific
functionality that can be fully encapsulated in the provider.
*/
experimental_providerMetadata?: ProviderMetadata;
};

@@ -281,2 +359,8 @@ /**

content: UserContent;
/**
Additional provider-specific metadata. They are passed through
to the provider from the AI SDK and enable provider-specific
functionality that can be fully encapsulated in the provider.
*/
experimental_providerMetadata?: ProviderMetadata;
};

@@ -293,2 +377,8 @@ /**

content: AssistantContent;
/**
Additional provider-specific metadata. They are passed through
to the provider from the AI SDK and enable provider-specific
functionality that can be fully encapsulated in the provider.
*/
experimental_providerMetadata?: ProviderMetadata;
};

@@ -305,2 +395,8 @@ /**

content: ToolContent;
/**
Additional provider-specific metadata. They are passed through
to the provider from the AI SDK and enable provider-specific
functionality that can be fully encapsulated in the provider.
*/
experimental_providerMetadata?: ProviderMetadata;
};

@@ -330,50 +426,2 @@ /**

/**
Represents the number of tokens used in a prompt and completion.
*/
type CompletionTokenUsage = {
/**
The number of tokens used in the prompt.
*/
promptTokens: number;
/**
The number of tokens used in the completion.
*/
completionTokens: number;
/**
The total number of tokens used (promptTokens + completionTokens).
*/
totalTokens: number;
};
/**
Reason why a language model finished generating a response.
Can be one of the following:
- `stop`: model generated stop sequence
- `length`: model generated maximum number of tokens
- `content-filter`: content filter violation stopped the model
- `tool-calls`: model triggered tool calls
- `error`: model stopped because of an error
- `other`: model stopped for other reasons
*/
type FinishReason = LanguageModelV1FinishReason;
/**
Warning from the model provider for this call. The call will proceed, but e.g.
some settings might not be supported, which can lead to suboptimal results.
*/
type CallWarning = LanguageModelV1CallWarning;
/**
Tool choice for the generation. It supports the following settings:
- `auto` (default): the model can choose whether and which tools to call.
- `required`: the model must call a tool. It can choose which tool to call.
- `none`: the model must not call tools
- `{ type: 'tool', toolName: string (typed) }`: the model must call the specified tool
*/
type CoreToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | {
type: 'tool';
toolName: keyof TOOLS;
};
type Streamable$1 = ReactNode | Promise<ReactNode>;

@@ -380,0 +428,0 @@ type Renderer$1<T extends Array<any>> = (...args: T) => Streamable$1 | Generator<Streamable$1, Streamable$1, void> | AsyncGenerator<Streamable$1, Streamable$1, void>;

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc