New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

@ai-sdk/anthropic

Package Overview
Dependencies
Maintainers
2
Versions
81
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@ai-sdk/anthropic - npm Package Compare versions

Comparing version 0.0.0-85f9a635-20240518005312 to 0.0.0-fbda7b18-20240815003233

288

./dist/index.js

@@ -56,55 +56,90 @@ "use strict";

function convertToAnthropicMessagesPrompt(prompt) {
var _a;
const blocks = groupIntoBlocks(prompt);
let system = void 0;
const messages = [];
for (const { role, content } of prompt) {
switch (role) {
for (let i = 0; i < blocks.length; i++) {
const block = blocks[i];
const type = block.type;
switch (type) {
case "system": {
if (system != null) {
throw new import_provider.UnsupportedFunctionalityError({
functionality: "Multiple system messages"
functionality: "Multiple system messages that are separated by user/assistant messages"
});
}
system = content;
system = block.messages.map(({ content }) => content).join("\n");
break;
}
case "user": {
messages.push({
role: "user",
content: content.map((part) => {
var _a;
switch (part.type) {
case "text": {
return { type: "text", text: part.text };
}
case "image": {
if (part.image instanceof URL) {
throw new import_provider.UnsupportedFunctionalityError({
functionality: "URL image parts"
});
} else {
return {
type: "image",
source: {
type: "base64",
media_type: (_a = part.mimeType) != null ? _a : "image/jpeg",
data: (0, import_provider_utils2.convertUint8ArrayToBase64)(part.image)
const anthropicContent = [];
for (const { role, content } of block.messages) {
switch (role) {
case "user": {
for (const part of content) {
switch (part.type) {
case "text": {
anthropicContent.push({ type: "text", text: part.text });
break;
}
case "image": {
if (part.image instanceof URL) {
throw new import_provider.UnsupportedFunctionalityError({
functionality: "Image URLs in user messages"
});
}
};
anthropicContent.push({
type: "image",
source: {
type: "base64",
media_type: (_a = part.mimeType) != null ? _a : "image/jpeg",
data: (0, import_provider_utils2.convertUint8ArrayToBase64)(part.image)
}
});
break;
}
}
}
break;
}
})
});
case "tool": {
for (const part of content) {
anthropicContent.push({
type: "tool_result",
tool_use_id: part.toolCallId,
content: JSON.stringify(part.result),
is_error: part.isError
});
}
break;
}
default: {
const _exhaustiveCheck = role;
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
}
}
}
messages.push({ role: "user", content: anthropicContent });
break;
}
case "assistant": {
messages.push({
role: "assistant",
content: content.map((part) => {
const anthropicContent = [];
for (const { content } of block.messages) {
for (let j = 0; j < content.length; j++) {
const part = content[j];
switch (part.type) {
case "text": {
return { type: "text", text: part.text };
anthropicContent.push({
type: "text",
text: (
// trim the last text part if it's the last message in the block
// because Anthropic does not allow trailing whitespace
// in pre-filled assistant responses
i === blocks.length - 1 && j === block.messages.length - 1 ? part.text.trim() : part.text
)
});
break;
}
case "tool-call": {
return {
anthropicContent.push({
type: "tool_use",

@@ -114,19 +149,57 @@ id: part.toolCallId,

input: part.args
};
});
break;
}
}
})
});
}
}
messages.push({ role: "assistant", content: anthropicContent });
break;
}
default: {
const _exhaustiveCheck = type;
throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
}
}
}
return {
system,
messages
};
}
function groupIntoBlocks(prompt) {
const blocks = [];
let currentBlock = void 0;
for (const { role, content } of prompt) {
switch (role) {
case "system": {
if ((currentBlock == null ? void 0 : currentBlock.type) !== "system") {
currentBlock = { type: "system", messages: [] };
blocks.push(currentBlock);
}
currentBlock.messages.push({ role, content });
break;
}
case "assistant": {
if ((currentBlock == null ? void 0 : currentBlock.type) !== "assistant") {
currentBlock = { type: "assistant", messages: [] };
blocks.push(currentBlock);
}
currentBlock.messages.push({ role, content });
break;
}
case "user": {
if ((currentBlock == null ? void 0 : currentBlock.type) !== "user") {
currentBlock = { type: "user", messages: [] };
blocks.push(currentBlock);
}
currentBlock.messages.push({ role, content });
break;
}
case "tool": {
messages.push({
role: "user",
content: content.map((part) => ({
type: "tool_result",
tool_use_id: part.toolCallId,
content: JSON.stringify(part.result),
is_error: part.isError
}))
});
if ((currentBlock == null ? void 0 : currentBlock.type) !== "user") {
currentBlock = { type: "user", messages: [] };
blocks.push(currentBlock);
}
currentBlock.messages.push({ role, content });
break;

@@ -140,6 +213,3 @@ }

}
return {
system,
messages
};
return blocks;
}

@@ -158,3 +228,3 @@

default:
return "other";
return "unknown";
}

@@ -168,2 +238,3 @@ }

this.defaultObjectGenerationMode = "tool";
this.supportsImageUrls = false;
this.modelId = modelId;

@@ -176,3 +247,3 @@ this.settings = settings;

}
getArgs({
async getArgs({
mode,

@@ -183,7 +254,9 @@ prompt,

topP,
topK,
frequencyPenalty,
presencePenalty,
stopSequences,
responseFormat,
seed
}) {
var _a;
const type = mode.type;

@@ -209,2 +282,9 @@ const warnings = [];

}
if (responseFormat != null && responseFormat.type !== "text") {
warnings.push({
type: "unsupported-setting",
setting: "responseFormat",
details: "JSON response format is not supported."
});
}
const messagesPrompt = convertToAnthropicMessagesPrompt(prompt);

@@ -215,3 +295,3 @@ const baseArgs = {

// model specific settings:
top_k: this.settings.topK,
top_k: topK != null ? topK : this.settings.topK,
// standardized settings:

@@ -222,2 +302,3 @@ max_tokens: maxTokens != null ? maxTokens : 4096,

top_p: topP,
stop_sequences: stopSequences,
// prompt:

@@ -229,12 +310,4 @@ system: messagesPrompt.system,

case "regular": {
const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
return {
args: {
...baseArgs,
tools: tools == null ? void 0 : tools.map((tool) => ({
name: tool.name,
description: tool.description,
input_schema: tool.parameters
}))
},
args: { ...baseArgs, ...prepareToolsAndToolChoice(mode) },
warnings

@@ -250,12 +323,7 @@ };

const { name, description, parameters } = mode.tool;
baseArgs.messages[baseArgs.messages.length - 1].content.push({
type: "text",
text: `
Use the '${name}' tool.`
});
return {
args: {
...baseArgs,
tools: [{ name, description, input_schema: parameters }]
tools: [{ name, description, input_schema: parameters }],
tool_choice: { type: "tool", name }
},

@@ -265,7 +333,2 @@ warnings

}
case "object-grammar": {
throw new import_provider2.UnsupportedFunctionalityError({
functionality: "grammar-mode object generation"
});
}
default: {

@@ -278,6 +341,6 @@ const _exhaustiveCheck = type;

async doGenerate(options) {
const { args, warnings } = this.getArgs(options);
const { args, warnings } = await this.getArgs(options);
const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
url: `${this.config.baseURL}/messages`,
headers: this.config.headers(),
headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
body: args,

@@ -288,3 +351,4 @@ failedResponseHandler: anthropicFailedResponseHandler,

),
abortSignal: options.abortSignal
abortSignal: options.abortSignal,
fetch: this.config.fetch
});

@@ -326,8 +390,19 @@ const { messages: rawPrompt, ...rawSettings } = args;

async doStream(options) {
const { args, warnings } = this.getArgs(options);
const { args, warnings } = await this.getArgs(options);
let { system, ...rest } = args;
if (system && system.startsWith("__cache__me__pls__")) {
system = [
{
type: "text",
text: system.replace("__cache__me__pls__", ""),
cache_control: { type: "ephemeral" }
}
];
}
const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
url: `${this.config.baseURL}/messages`,
headers: this.config.headers(),
headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
body: {
...args,
...rest,
system,
stream: true

@@ -339,6 +414,7 @@ },

),
abortSignal: options.abortSignal
abortSignal: options.abortSignal,
fetch: this.config.fetch
});
const { messages: rawPrompt, ...rawSettings } = args;
let finishReason = "other";
let finishReason = "unknown";
const usage = {

@@ -442,2 +518,6 @@ promptTokens: Number.NaN,

}
case "error": {
controller.enqueue({ type: "error", error: value.error });
return;
}
default: {

@@ -523,2 +603,9 @@ const _exhaustiveCheck = value;

import_zod2.z.object({
type: import_zod2.z.literal("error"),
error: import_zod2.z.object({
type: import_zod2.z.string(),
message: import_zod2.z.string()
})
}),
import_zod2.z.object({
type: import_zod2.z.literal("message_delta"),

@@ -535,2 +622,36 @@ delta: import_zod2.z.object({ stop_reason: import_zod2.z.string().optional().nullable() }),

]);
function prepareToolsAndToolChoice(mode) {
var _a;
const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
if (tools == null) {
return { tools: void 0, tool_choice: void 0 };
}
const mappedTools = tools.map((tool) => ({
name: tool.name,
description: tool.description,
input_schema: tool.parameters
}));
const toolChoice = mode.toolChoice;
if (toolChoice == null) {
return { tools: mappedTools, tool_choice: void 0 };
}
const type = toolChoice.type;
switch (type) {
case "auto":
return { tools: mappedTools, tool_choice: { type: "auto" } };
case "required":
return { tools: mappedTools, tool_choice: { type: "any" } };
case "none":
return { tools: void 0, tool_choice: void 0 };
case "tool":
return {
tools: mappedTools,
tool_choice: { type: "tool", name: toolChoice.toolName }
};
default: {
const _exhaustiveCheck = type;
throw new Error(`Unsupported tool choice type: ${_exhaustiveCheck}`);
}
}
}

@@ -584,3 +705,2 @@ // src/anthropic-facade.ts

"anthropic-version": "2023-06-01",
"anthropic-beta": "tools-2024-05-16",
"x-api-key": (0, import_provider_utils5.loadApiKey)({

@@ -596,3 +716,4 @@ apiKey: options.apiKey,

baseURL,
headers: getHeaders
headers: getHeaders,
fetch: options.fetch
});

@@ -607,2 +728,3 @@ const provider = function(modelId, settings) {

};
provider.languageModel = createChatModel;
provider.chat = createChatModel;

@@ -609,0 +731,0 @@ provider.messages = createChatModel;

import { LanguageModelV1 } from '@ai-sdk/provider';
type AnthropicMessagesModelId = 'claude-3-opus-20240229' | 'claude-3-sonnet-20240229' | 'claude-3-haiku-20240307' | (string & {});
type AnthropicMessagesModelId = 'claude-3-5-sonnet-20240620' | 'claude-3-opus-20240229' | 'claude-3-sonnet-20240229' | 'claude-3-haiku-20240307' | (string & {});
interface AnthropicMessagesSettings {

@@ -10,2 +10,4 @@ /**

Recommended for advanced use cases only. You usually only need to use temperature.
@deprecated use the topK setting on the request instead.
*/

@@ -19,2 +21,3 @@ topK?: number;

headers: () => Record<string, string | undefined>;
fetch?: typeof fetch;
};

@@ -24,2 +27,3 @@ declare class AnthropicMessagesLanguageModel implements LanguageModelV1 {

readonly defaultObjectGenerationMode = "tool";
readonly supportsImageUrls = false;
readonly modelId: AnthropicMessagesModelId;

@@ -43,2 +47,6 @@ readonly settings: AnthropicMessagesSettings;

*/
languageModel(modelId: AnthropicMessagesModelId, settings?: AnthropicMessagesSettings): AnthropicMessagesLanguageModel;
/**
Creates a model for text generation.
*/
chat(modelId: AnthropicMessagesModelId, settings?: AnthropicMessagesSettings): AnthropicMessagesLanguageModel;

@@ -69,2 +77,7 @@ /**

headers?: Record<string, string>;
/**
Custom fetch implementation. You can use it as a middleware to intercept requests,
or to provide a custom fetch implementation for e.g. testing.
*/
fetch?: typeof fetch;
generateId?: () => string;

@@ -71,0 +84,0 @@ }

@@ -56,55 +56,90 @@ "use strict";

function convertToAnthropicMessagesPrompt(prompt) {
var _a;
const blocks = groupIntoBlocks(prompt);
let system = void 0;
const messages = [];
for (const { role, content } of prompt) {
switch (role) {
for (let i = 0; i < blocks.length; i++) {
const block = blocks[i];
const type = block.type;
switch (type) {
case "system": {
if (system != null) {
throw new import_provider.UnsupportedFunctionalityError({
functionality: "Multiple system messages"
functionality: "Multiple system messages that are separated by user/assistant messages"
});
}
system = content;
system = block.messages.map(({ content }) => content).join("\n");
break;
}
case "user": {
messages.push({
role: "user",
content: content.map((part) => {
var _a;
switch (part.type) {
case "text": {
return { type: "text", text: part.text };
}
case "image": {
if (part.image instanceof URL) {
throw new import_provider.UnsupportedFunctionalityError({
functionality: "URL image parts"
});
} else {
return {
type: "image",
source: {
type: "base64",
media_type: (_a = part.mimeType) != null ? _a : "image/jpeg",
data: (0, import_provider_utils2.convertUint8ArrayToBase64)(part.image)
const anthropicContent = [];
for (const { role, content } of block.messages) {
switch (role) {
case "user": {
for (const part of content) {
switch (part.type) {
case "text": {
anthropicContent.push({ type: "text", text: part.text });
break;
}
case "image": {
if (part.image instanceof URL) {
throw new import_provider.UnsupportedFunctionalityError({
functionality: "Image URLs in user messages"
});
}
};
anthropicContent.push({
type: "image",
source: {
type: "base64",
media_type: (_a = part.mimeType) != null ? _a : "image/jpeg",
data: (0, import_provider_utils2.convertUint8ArrayToBase64)(part.image)
}
});
break;
}
}
}
break;
}
})
});
case "tool": {
for (const part of content) {
anthropicContent.push({
type: "tool_result",
tool_use_id: part.toolCallId,
content: JSON.stringify(part.result),
is_error: part.isError
});
}
break;
}
default: {
const _exhaustiveCheck = role;
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
}
}
}
messages.push({ role: "user", content: anthropicContent });
break;
}
case "assistant": {
messages.push({
role: "assistant",
content: content.map((part) => {
const anthropicContent = [];
for (const { content } of block.messages) {
for (let j = 0; j < content.length; j++) {
const part = content[j];
switch (part.type) {
case "text": {
return { type: "text", text: part.text };
anthropicContent.push({
type: "text",
text: (
// trim the last text part if it's the last message in the block
// because Anthropic does not allow trailing whitespace
// in pre-filled assistant responses
i === blocks.length - 1 && j === block.messages.length - 1 ? part.text.trim() : part.text
)
});
break;
}
case "tool-call": {
return {
anthropicContent.push({
type: "tool_use",

@@ -114,19 +149,57 @@ id: part.toolCallId,

input: part.args
};
});
break;
}
}
})
});
}
}
messages.push({ role: "assistant", content: anthropicContent });
break;
}
default: {
const _exhaustiveCheck = type;
throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
}
}
}
return {
system,
messages
};
}
function groupIntoBlocks(prompt) {
const blocks = [];
let currentBlock = void 0;
for (const { role, content } of prompt) {
switch (role) {
case "system": {
if ((currentBlock == null ? void 0 : currentBlock.type) !== "system") {
currentBlock = { type: "system", messages: [] };
blocks.push(currentBlock);
}
currentBlock.messages.push({ role, content });
break;
}
case "assistant": {
if ((currentBlock == null ? void 0 : currentBlock.type) !== "assistant") {
currentBlock = { type: "assistant", messages: [] };
blocks.push(currentBlock);
}
currentBlock.messages.push({ role, content });
break;
}
case "user": {
if ((currentBlock == null ? void 0 : currentBlock.type) !== "user") {
currentBlock = { type: "user", messages: [] };
blocks.push(currentBlock);
}
currentBlock.messages.push({ role, content });
break;
}
case "tool": {
messages.push({
role: "user",
content: content.map((part) => ({
type: "tool_result",
tool_use_id: part.toolCallId,
content: JSON.stringify(part.result),
is_error: part.isError
}))
});
if ((currentBlock == null ? void 0 : currentBlock.type) !== "user") {
currentBlock = { type: "user", messages: [] };
blocks.push(currentBlock);
}
currentBlock.messages.push({ role, content });
break;

@@ -140,6 +213,3 @@ }

}
return {
system,
messages
};
return blocks;
}

@@ -158,3 +228,3 @@

default:
return "other";
return "unknown";
}

@@ -168,2 +238,3 @@ }

this.defaultObjectGenerationMode = "tool";
this.supportsImageUrls = false;
this.modelId = modelId;

@@ -176,3 +247,3 @@ this.settings = settings;

}
getArgs({
async getArgs({
mode,

@@ -183,7 +254,9 @@ prompt,

topP,
topK,
frequencyPenalty,
presencePenalty,
stopSequences,
responseFormat,
seed
}) {
var _a;
const type = mode.type;

@@ -209,2 +282,9 @@ const warnings = [];

}
if (responseFormat != null && responseFormat.type !== "text") {
warnings.push({
type: "unsupported-setting",
setting: "responseFormat",
details: "JSON response format is not supported."
});
}
const messagesPrompt = convertToAnthropicMessagesPrompt(prompt);

@@ -215,3 +295,3 @@ const baseArgs = {

// model specific settings:
top_k: this.settings.topK,
top_k: topK != null ? topK : this.settings.topK,
// standardized settings:

@@ -222,2 +302,3 @@ max_tokens: maxTokens != null ? maxTokens : 4096,

top_p: topP,
stop_sequences: stopSequences,
// prompt:

@@ -229,12 +310,4 @@ system: messagesPrompt.system,

case "regular": {
const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
return {
args: {
...baseArgs,
tools: tools == null ? void 0 : tools.map((tool) => ({
name: tool.name,
description: tool.description,
input_schema: tool.parameters
}))
},
args: { ...baseArgs, ...prepareToolsAndToolChoice(mode) },
warnings

@@ -250,12 +323,7 @@ };

const { name, description, parameters } = mode.tool;
baseArgs.messages[baseArgs.messages.length - 1].content.push({
type: "text",
text: `
Use the '${name}' tool.`
});
return {
args: {
...baseArgs,
tools: [{ name, description, input_schema: parameters }]
tools: [{ name, description, input_schema: parameters }],
tool_choice: { type: "tool", name }
},

@@ -265,7 +333,2 @@ warnings

}
case "object-grammar": {
throw new import_provider2.UnsupportedFunctionalityError({
functionality: "grammar-mode object generation"
});
}
default: {

@@ -278,6 +341,6 @@ const _exhaustiveCheck = type;

async doGenerate(options) {
const { args, warnings } = this.getArgs(options);
const { args, warnings } = await this.getArgs(options);
const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
url: `${this.config.baseURL}/messages`,
headers: this.config.headers(),
headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
body: args,

@@ -288,3 +351,4 @@ failedResponseHandler: anthropicFailedResponseHandler,

),
abortSignal: options.abortSignal
abortSignal: options.abortSignal,
fetch: this.config.fetch
});

@@ -326,8 +390,19 @@ const { messages: rawPrompt, ...rawSettings } = args;

async doStream(options) {
const { args, warnings } = this.getArgs(options);
const { args, warnings } = await this.getArgs(options);
let { system, ...rest } = args;
if (system && system.startsWith("__cache__me__pls__")) {
system = [
{
type: "text",
text: system.replace("__cache__me__pls__", ""),
cache_control: { type: "ephemeral" }
}
];
}
const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
url: `${this.config.baseURL}/messages`,
headers: this.config.headers(),
headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
body: {
...args,
...rest,
system,
stream: true

@@ -339,6 +414,7 @@ },

),
abortSignal: options.abortSignal
abortSignal: options.abortSignal,
fetch: this.config.fetch
});
const { messages: rawPrompt, ...rawSettings } = args;
let finishReason = "other";
let finishReason = "unknown";
const usage = {

@@ -442,2 +518,6 @@ promptTokens: Number.NaN,

}
case "error": {
controller.enqueue({ type: "error", error: value.error });
return;
}
default: {

@@ -523,2 +603,9 @@ const _exhaustiveCheck = value;

import_zod2.z.object({
type: import_zod2.z.literal("error"),
error: import_zod2.z.object({
type: import_zod2.z.string(),
message: import_zod2.z.string()
})
}),
import_zod2.z.object({
type: import_zod2.z.literal("message_delta"),

@@ -535,2 +622,36 @@ delta: import_zod2.z.object({ stop_reason: import_zod2.z.string().optional().nullable() }),

]);
function prepareToolsAndToolChoice(mode) {
var _a;
const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
if (tools == null) {
return { tools: void 0, tool_choice: void 0 };
}
const mappedTools = tools.map((tool) => ({
name: tool.name,
description: tool.description,
input_schema: tool.parameters
}));
const toolChoice = mode.toolChoice;
if (toolChoice == null) {
return { tools: mappedTools, tool_choice: void 0 };
}
const type = toolChoice.type;
switch (type) {
case "auto":
return { tools: mappedTools, tool_choice: { type: "auto" } };
case "required":
return { tools: mappedTools, tool_choice: { type: "any" } };
case "none":
return { tools: void 0, tool_choice: void 0 };
case "tool":
return {
tools: mappedTools,
tool_choice: { type: "tool", name: toolChoice.toolName }
};
default: {
const _exhaustiveCheck = type;
throw new Error(`Unsupported tool choice type: ${_exhaustiveCheck}`);
}
}
}

@@ -584,3 +705,2 @@ // src/anthropic-facade.ts

"anthropic-version": "2023-06-01",
"anthropic-beta": "tools-2024-05-16",
"x-api-key": (0, import_provider_utils5.loadApiKey)({

@@ -596,3 +716,4 @@ apiKey: options.apiKey,

baseURL,
headers: getHeaders
headers: getHeaders,
fetch: options.fetch
});

@@ -607,2 +728,3 @@ const provider = function(modelId, settings) {

};
provider.languageModel = createChatModel;
provider.chat = createChatModel;

@@ -609,0 +731,0 @@ provider.messages = createChatModel;

{
"name": "@ai-sdk/anthropic",
"version": "0.0.0-85f9a635-20240518005312",
"version": "0.0.0-fbda7b18-20240815003233",
"license": "Apache-2.0",

@@ -21,4 +21,4 @@ "sideEffects": false,

"dependencies": {
"@ai-sdk/provider": "0.0.0-85f9a635-20240518005312",
"@ai-sdk/provider-utils": "0.0.0-85f9a635-20240518005312"
"@ai-sdk/provider": "0.0.19",
"@ai-sdk/provider-utils": "1.0.11"
},

@@ -29,3 +29,3 @@ "devDependencies": {

"typescript": "5.1.3",
"zod": "3.22.4",
"zod": "3.23.8",
"@vercel/ai-tsconfig": "0.0.0"

@@ -36,7 +36,2 @@ },

},
"peerDependenciesMeta": {
"zod": {
"optional": true
}
},
"engines": {

@@ -43,0 +38,0 @@ "node": ">=18"

# Vercel AI SDK - Anthropic Provider
The [Anthropic](https://www.anthropic.com/) provider for the [Vercel AI SDK](https://sdk.vercel.ai/docs) contains language model support for the [Anthropic Messages API](https://docs.anthropic.com/claude/reference/messages_post).
It creates language model objects that can be used with the `generateText`, `streamText` and `generateObject` AI functions.
The **[Anthropic provider](https://sdk.vercel.ai/providers/ai-sdk-providers/anthropic)** for the [Vercel AI SDK](https://sdk.vercel.ai/docs) contains language model support for the [Anthropic Messages API](https://docs.anthropic.com/claude/reference/messages_post).
> **Note: The Anthropic API does not support streaming tool calls.**
## Setup

@@ -24,54 +21,16 @@

If you need a customized setup, you can import `createAnthropic` from `@ai-sdk/anthropic` and create a provider instance with your settings:
## Example
```ts
import { createAnthropic } from '@ai-sdk/anthropic';
import { anthropic } from '@ai-sdk/anthropic';
import { generateText } from 'ai';
const anthropic = createAnthropic({
// custom settings
const { text } = await generateText({
model: anthropic('claude-3-haiku-20240307'),
prompt: 'Write a vegetarian lasagna recipe for 4 people.',
});
```
You can use the following optional settings to customize the Google Generative AI provider instance:
## Documentation
- **baseURL** _string_
Use a different URL prefix for API calls, e.g. to use proxy servers.
The default prefix is `https://api.anthropic.com/v1`.
- **apiKey** _string_
API key that is being send using the `x-api-key` header.
It defaults to the `ANTHROPIC_API_KEY` environment variable.
- **headers** _Record<string,string>_
Custom headers to include in the requests.
## Models
You can create models that call the [Anthropic Messages API](https://docs.anthropic.com/claude/reference/messages_post) using the provider instance.
The first argument is the model id, e.g. `claude-3-haiku-20240307`.
Some models have multi-modal capabilities.
```ts
const model = anthropic('claude-3-haiku-20240307');
```
Anthropic Messages` models support also some model specific settings that are not part of the [standard call settings](/docs/ai-core/settings).
You can pass them as an options argument:
```ts
const model = anthropic('claude-3-haiku-20240307', {
topK: 0.2,
});
```
The following optional settings are available for Anthropic models:
- **topK** _number_
Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
Recommended for advanced use cases only. You usually only need to use temperature.
Please check out the **[Anthropic provider documentation](https://sdk.vercel.ai/providers/ai-sdk-providers/anthropic)** for more information.

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc