@ai-sdk/anthropic
Advanced tools
Comparing version 1.1.0 to 1.1.1
# @ai-sdk/anthropic | ||
## 1.1.1 | ||
### Patch Changes | ||
- 858f934: feat (provider/anthropic): default cache-control on and mark model setting deprecated | ||
- b284e2c: feat (provider/google-vertex): support prompt caching for Anthropic Claude models | ||
- Updated dependencies [e7a9ec9] | ||
- Updated dependencies [0a699f1] | ||
- @ai-sdk/provider-utils@2.1.1 | ||
- @ai-sdk/provider@1.0.5 | ||
## 1.1.0 | ||
@@ -4,0 +15,0 @@ |
@@ -8,5 +8,8 @@ import { ProviderV1, LanguageModelV1 } from '@ai-sdk/provider'; | ||
/** | ||
Enable Anthropic cache control (beta feature). This will add the beta header and | ||
allow you to use provider-specific cacheControl metadata. | ||
*/ | ||
Enable Anthropic cache control. This will allow you to use provider-specific | ||
`cacheControl` metadata. | ||
@deprecated cache control is now enabled by default (meaning you are able to | ||
optionally mark content for caching) and this setting is no longer needed. | ||
*/ | ||
cacheControl?: boolean; | ||
@@ -13,0 +16,0 @@ } |
@@ -56,4 +56,3 @@ "use strict"; | ||
function convertToAnthropicMessagesPrompt({ | ||
prompt, | ||
cacheControl: isCacheControlEnabled | ||
prompt | ||
}) { | ||
@@ -67,5 +66,2 @@ var _a, _b, _c, _d; | ||
var _a2; | ||
if (isCacheControlEnabled === false) { | ||
return void 0; | ||
} | ||
const anthropic2 = providerMetadata == null ? void 0 : providerMetadata.anthropic; | ||
@@ -437,3 +433,2 @@ const cacheControlValue = (_a2 = anthropic2 == null ? void 0 : anthropic2.cacheControl) != null ? _a2 : anthropic2 == null ? void 0 : anthropic2.cache_control; | ||
}) { | ||
var _a; | ||
const type = mode.type; | ||
@@ -467,4 +462,3 @@ const warnings = []; | ||
const { prompt: messagesPrompt, betas: messagesBetas } = convertToAnthropicMessagesPrompt({ | ||
prompt, | ||
cacheControl: (_a = this.settings.cacheControl) != null ? _a : false | ||
prompt | ||
}); | ||
@@ -526,5 +520,2 @@ const baseArgs = { | ||
}) { | ||
if (this.settings.cacheControl) { | ||
betas.add("prompt-caching-2024-07-31"); | ||
} | ||
return (0, import_provider_utils3.combineHeaders)( | ||
@@ -594,3 +585,3 @@ await (0, import_provider_utils3.resolve)(this.config.headers), | ||
warnings, | ||
providerMetadata: this.settings.cacheControl === true ? { | ||
providerMetadata: { | ||
anthropic: { | ||
@@ -600,3 +591,3 @@ cacheCreationInputTokens: (_c = response.usage.cache_creation_input_tokens) != null ? _c : null, | ||
} | ||
} : void 0, | ||
}, | ||
request: { body: JSON.stringify(args) } | ||
@@ -711,10 +702,8 @@ }; | ||
usage.completionTokens = value.message.usage.output_tokens; | ||
if (self.settings.cacheControl === true) { | ||
providerMetadata = { | ||
anthropic: { | ||
cacheCreationInputTokens: (_a = value.message.usage.cache_creation_input_tokens) != null ? _a : null, | ||
cacheReadInputTokens: (_b = value.message.usage.cache_read_input_tokens) != null ? _b : null | ||
} | ||
}; | ||
} | ||
providerMetadata = { | ||
anthropic: { | ||
cacheCreationInputTokens: (_a = value.message.usage.cache_creation_input_tokens) != null ? _a : null, | ||
cacheReadInputTokens: (_b = value.message.usage.cache_read_input_tokens) != null ? _b : null | ||
} | ||
}; | ||
controller.enqueue({ | ||
@@ -721,0 +710,0 @@ type: "response-metadata", |
@@ -8,5 +8,8 @@ import { LanguageModelV1 } from '@ai-sdk/provider'; | ||
/** | ||
Enable Anthropic cache control (beta feature). This will add the beta header and | ||
allow you to use provider-specific cacheControl metadata. | ||
*/ | ||
Enable Anthropic cache control. This will allow you to use provider-specific | ||
`cacheControl` metadata. | ||
@deprecated cache control is now enabled by default (meaning you are able to | ||
optionally mark content for caching) and this setting is no longer needed. | ||
*/ | ||
cacheControl?: boolean; | ||
@@ -13,0 +16,0 @@ } |
@@ -52,4 +52,3 @@ "use strict"; | ||
function convertToAnthropicMessagesPrompt({ | ||
prompt, | ||
cacheControl: isCacheControlEnabled | ||
prompt | ||
}) { | ||
@@ -63,5 +62,2 @@ var _a, _b, _c, _d; | ||
var _a2; | ||
if (isCacheControlEnabled === false) { | ||
return void 0; | ||
} | ||
const anthropic = providerMetadata == null ? void 0 : providerMetadata.anthropic; | ||
@@ -433,3 +429,2 @@ const cacheControlValue = (_a2 = anthropic == null ? void 0 : anthropic.cacheControl) != null ? _a2 : anthropic == null ? void 0 : anthropic.cache_control; | ||
}) { | ||
var _a; | ||
const type = mode.type; | ||
@@ -463,4 +458,3 @@ const warnings = []; | ||
const { prompt: messagesPrompt, betas: messagesBetas } = convertToAnthropicMessagesPrompt({ | ||
prompt, | ||
cacheControl: (_a = this.settings.cacheControl) != null ? _a : false | ||
prompt | ||
}); | ||
@@ -522,5 +516,2 @@ const baseArgs = { | ||
}) { | ||
if (this.settings.cacheControl) { | ||
betas.add("prompt-caching-2024-07-31"); | ||
} | ||
return (0, import_provider_utils3.combineHeaders)( | ||
@@ -590,3 +581,3 @@ await (0, import_provider_utils3.resolve)(this.config.headers), | ||
warnings, | ||
providerMetadata: this.settings.cacheControl === true ? { | ||
providerMetadata: { | ||
anthropic: { | ||
@@ -596,3 +587,3 @@ cacheCreationInputTokens: (_c = response.usage.cache_creation_input_tokens) != null ? _c : null, | ||
} | ||
} : void 0, | ||
}, | ||
request: { body: JSON.stringify(args) } | ||
@@ -707,10 +698,8 @@ }; | ||
usage.completionTokens = value.message.usage.output_tokens; | ||
if (self.settings.cacheControl === true) { | ||
providerMetadata = { | ||
anthropic: { | ||
cacheCreationInputTokens: (_a = value.message.usage.cache_creation_input_tokens) != null ? _a : null, | ||
cacheReadInputTokens: (_b = value.message.usage.cache_read_input_tokens) != null ? _b : null | ||
} | ||
}; | ||
} | ||
providerMetadata = { | ||
anthropic: { | ||
cacheCreationInputTokens: (_a = value.message.usage.cache_creation_input_tokens) != null ? _a : null, | ||
cacheReadInputTokens: (_b = value.message.usage.cache_read_input_tokens) != null ? _b : null | ||
} | ||
}; | ||
controller.enqueue({ | ||
@@ -717,0 +706,0 @@ type: "response-metadata", |
{ | ||
"name": "@ai-sdk/anthropic", | ||
"version": "1.1.0", | ||
"version": "1.1.1", | ||
"license": "Apache-2.0", | ||
@@ -29,4 +29,4 @@ "sideEffects": false, | ||
"dependencies": { | ||
"@ai-sdk/provider": "1.0.4", | ||
"@ai-sdk/provider-utils": "2.1.0" | ||
"@ai-sdk/provider": "1.0.5", | ||
"@ai-sdk/provider-utils": "2.1.1" | ||
}, | ||
@@ -33,0 +33,0 @@ "devDependencies": { |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
424455
4123
+ Added@ai-sdk/provider@1.0.5(transitive)
+ Added@ai-sdk/provider-utils@2.1.1(transitive)
- Removed@ai-sdk/provider@1.0.4(transitive)
- Removed@ai-sdk/provider-utils@2.1.0(transitive)
Updated@ai-sdk/provider@1.0.5
Updated@ai-sdk/provider-utils@2.1.1