@promptbook/anthropic-claude
Advanced tools
Comparing version 0.52.0-1 to 0.52.0-2
@@ -0,2 +1,325 @@ | ||
import Anthropic from '@anthropic-ai/sdk'; | ||
import colors from 'colors'; | ||
/*! ***************************************************************************** | ||
Copyright (c) Microsoft Corporation. | ||
Permission to use, copy, modify, and/or distribute this software for any | ||
purpose with or without fee is hereby granted. | ||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH | ||
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY | ||
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, | ||
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM | ||
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR | ||
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR | ||
PERFORMANCE OF THIS SOFTWARE. | ||
***************************************************************************** */ | ||
/* global Reflect, Promise */ | ||
var extendStatics = function(d, b) { | ||
extendStatics = Object.setPrototypeOf || | ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || | ||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; }; | ||
return extendStatics(d, b); | ||
}; | ||
function __extends(d, b) { | ||
if (typeof b !== "function" && b !== null) | ||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null"); | ||
extendStatics(d, b); | ||
function __() { this.constructor = d; } | ||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); | ||
} | ||
var __assign = function() { | ||
__assign = Object.assign || function __assign(t) { | ||
for (var s, i = 1, n = arguments.length; i < n; i++) { | ||
s = arguments[i]; | ||
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p]; | ||
} | ||
return t; | ||
}; | ||
return __assign.apply(this, arguments); | ||
}; | ||
function __awaiter(thisArg, _arguments, P, generator) { | ||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } | ||
return new (P || (P = Promise))(function (resolve, reject) { | ||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } | ||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } | ||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } | ||
step((generator = generator.apply(thisArg, _arguments || [])).next()); | ||
}); | ||
} | ||
function __generator(thisArg, body) { | ||
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g; | ||
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g; | ||
function verb(n) { return function (v) { return step([n, v]); }; } | ||
function step(op) { | ||
if (f) throw new TypeError("Generator is already executing."); | ||
while (_) try { | ||
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t; | ||
if (y = 0, t) op = [op[0] & 2, t.value]; | ||
switch (op[0]) { | ||
case 0: case 1: t = op; break; | ||
case 4: _.label++; return { value: op[1], done: false }; | ||
case 5: _.label++; y = op[1]; op = [0]; continue; | ||
case 7: op = _.ops.pop(); _.trys.pop(); continue; | ||
default: | ||
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; } | ||
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; } | ||
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; } | ||
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; } | ||
if (t[2]) _.ops.pop(); | ||
_.trys.pop(); continue; | ||
} | ||
op = body.call(thisArg, _); | ||
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; } | ||
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true }; | ||
} | ||
} | ||
function __read(o, n) { | ||
var m = typeof Symbol === "function" && o[Symbol.iterator]; | ||
if (!m) return o; | ||
var i = m.call(o), r, ar = [], e; | ||
try { | ||
while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); | ||
} | ||
catch (error) { e = { error: error }; } | ||
finally { | ||
try { | ||
if (r && !r.done && (m = i["return"])) m.call(i); | ||
} | ||
finally { if (e) throw e.error; } | ||
} | ||
return ar; | ||
} | ||
/** | ||
* This error indicates errors during the execution of the promptbook | ||
*/ | ||
var PromptbookExecutionError = /** @class */ (function (_super) { | ||
__extends(PromptbookExecutionError, _super); | ||
function PromptbookExecutionError(message) { | ||
var _this = _super.call(this, message) || this; | ||
_this.name = 'PromptbookExecutionError'; | ||
Object.setPrototypeOf(_this, PromptbookExecutionError.prototype); | ||
return _this; | ||
} | ||
return PromptbookExecutionError; | ||
}(Error)); | ||
/** | ||
* Get current date in ISO 8601 format | ||
* | ||
* @private This is internal util of the promptbook | ||
*/ | ||
function getCurrentIsoDate() { | ||
return new Date().toISOString(); | ||
} | ||
/** | ||
* Function computeUsage will create price per one token based on the string value found on openai page | ||
* | ||
* @private within the library, used only as internal helper for `OPENAI_MODELS` | ||
*/ | ||
function computeUsage(value) { | ||
var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1]; | ||
return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000; | ||
} | ||
/** | ||
* List of available Anthropic Claude models with pricing | ||
* | ||
* Note: Done at 2024-05-25 | ||
* | ||
* @see https://docs.anthropic.com/en/docs/models-overview | ||
*/ | ||
var ANTHROPIC_CLAUDE_MODELS = [ | ||
{ | ||
modelVariant: 'CHAT', | ||
modelTitle: 'Claude 3 Opus', | ||
modelName: 'claude-3-opus', | ||
pricing: { | ||
prompt: computeUsage("$15.00 / 1M tokens"), | ||
output: computeUsage("$75.00 / 1M tokens"), | ||
}, | ||
}, | ||
{ | ||
modelVariant: 'CHAT', | ||
modelTitle: 'Claude 3 Sonnet', | ||
modelName: 'claude-3-sonnet', | ||
pricing: { | ||
prompt: computeUsage("$3.00 / 1M tokens"), | ||
output: computeUsage("$15.00 / 1M tokens"), | ||
}, | ||
}, | ||
{ | ||
modelVariant: 'CHAT', | ||
modelTitle: 'Claude 3 Haiku', | ||
modelName: ' claude-3-haiku', | ||
pricing: { | ||
prompt: computeUsage("$0.25 / 1M tokens"), | ||
output: computeUsage("$1.25 / 1M tokens"), | ||
}, | ||
}, | ||
{ | ||
modelVariant: 'CHAT', | ||
modelTitle: 'Claude 2.1', | ||
modelName: 'claude-2.1', | ||
pricing: { | ||
prompt: computeUsage("$8.00 / 1M tokens"), | ||
output: computeUsage("$24.00 / 1M tokens"), | ||
}, | ||
}, | ||
{ | ||
modelVariant: 'CHAT', | ||
modelTitle: 'Claude 2', | ||
modelName: 'claude-2.0', | ||
pricing: { | ||
prompt: computeUsage("$8.00 / 1M tokens"), | ||
output: computeUsage("$24.00 / 1M tokens"), | ||
}, | ||
}, | ||
{ | ||
modelVariant: 'CHAT', | ||
modelTitle: ' Claude Instant 1.2', | ||
modelName: 'claude-instant-1.2', | ||
pricing: { | ||
prompt: computeUsage("$0.80 / 1M tokens"), | ||
output: computeUsage("$2.40 / 1M tokens"), | ||
}, | ||
}, | ||
// TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy | ||
]; | ||
/** | ||
* TODO: [๐ง ] Some mechanism to propagate unsureness | ||
* TODO: [๐ง ][๐ฎโโ๏ธ] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,... | ||
* TODO: [๐] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing | ||
*/ | ||
/** | ||
* Execution Tools for calling Anthropic Claude API. | ||
*/ | ||
var AnthropicClaudeExecutionTools = /** @class */ (function () { | ||
/** | ||
* Creates Anthropic Claude Execution Tools. | ||
* | ||
* @param options which are relevant are directly passed to the Anthropic Claude client | ||
*/ | ||
function AnthropicClaudeExecutionTools(options) { | ||
this.options = options; | ||
// Note: Passing only Anthropic Claude relevant options to Anthropic constructor | ||
var anthropicOptions = __assign({}, options); | ||
delete anthropicOptions.isVerbose; | ||
this.client = new Anthropic(anthropicOptions); | ||
} | ||
/** | ||
* Calls Anthropic Claude API to use a chat model. | ||
*/ | ||
AnthropicClaudeExecutionTools.prototype.gptChat = function (prompt) { | ||
return __awaiter(this, void 0, void 0, function () { | ||
var content, modelRequirements, rawRequest, start, complete, rawResponse, resultContent, usage; | ||
return __generator(this, function (_a) { | ||
switch (_a.label) { | ||
case 0: | ||
if (this.options.isVerbose) { | ||
console.info('๐ฌ Anthropic Claude gptChat call'); | ||
} | ||
content = prompt.content, modelRequirements = prompt.modelRequirements; | ||
// TODO: [โ] Use here more modelRequirements | ||
if (modelRequirements.modelVariant !== 'CHAT') { | ||
throw new PromptbookExecutionError('Use gptChat only for CHAT variant'); | ||
} | ||
rawRequest = { | ||
model: modelRequirements.modelName || this.getDefaultChatModel().modelName, | ||
max_tokens: modelRequirements.maxTokens || 10000, | ||
// <- TODO: Make some global max cap for maxTokens | ||
messages: [ | ||
{ | ||
role: 'user', | ||
content: content, | ||
}, | ||
], | ||
// TODO: Is here some equivalent of user identification?> user: this.options.user, | ||
}; | ||
start = getCurrentIsoDate(); | ||
if (this.options.isVerbose) { | ||
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4)); | ||
} | ||
return [4 /*yield*/, this.client.messages.create(rawRequest)]; | ||
case 1: | ||
rawResponse = _a.sent(); | ||
if (this.options.isVerbose) { | ||
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4)); | ||
} | ||
if (!rawResponse.content[0]) { | ||
throw new PromptbookExecutionError('No content from Anthropic Claude'); | ||
} | ||
if (rawResponse.content.length > 1) { | ||
throw new PromptbookExecutionError('More than one content blocks from Anthropic Claude'); | ||
} | ||
resultContent = rawResponse.content[0].text; | ||
// eslint-disable-next-line prefer-const | ||
complete = getCurrentIsoDate(); | ||
usage = { | ||
price: 'UNKNOWN' /* <- TODO: [๐] Compute usage */, | ||
inputTokens: rawResponse.usage.input_tokens, | ||
outputTokens: rawResponse.usage.output_tokens, | ||
}; | ||
if (!resultContent) { | ||
throw new PromptbookExecutionError('No response message from Anthropic Claude'); | ||
} | ||
return [2 /*return*/, { | ||
content: resultContent, | ||
modelName: rawResponse.model, | ||
timing: { | ||
start: start, | ||
complete: complete, | ||
}, | ||
usage: usage, | ||
rawResponse: rawResponse, | ||
// <- [๐คนโโ๏ธ] | ||
}]; | ||
} | ||
}); | ||
}); | ||
}; | ||
/** | ||
* Calls Anthropic Claude API to use a complete model. | ||
*/ | ||
AnthropicClaudeExecutionTools.prototype.gptComplete = function (prompt) { | ||
return __awaiter(this, void 0, void 0, function () { | ||
return __generator(this, function (_a) { | ||
throw new Error('Anthropic complation models are not implemented to Promptbook yet'); | ||
}); | ||
}); | ||
}; | ||
/** | ||
* Default model for chat variant. | ||
*/ | ||
AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () { | ||
return ANTHROPIC_CLAUDE_MODELS.find(function (_a) { | ||
var modelName = _a.modelName; | ||
return modelName === 'claude-3-opus'; | ||
}); | ||
}; | ||
/** | ||
* List all available Anthropic Claude models that can be used | ||
*/ | ||
AnthropicClaudeExecutionTools.prototype.listModels = function () { | ||
return ANTHROPIC_CLAUDE_MODELS; | ||
}; | ||
return AnthropicClaudeExecutionTools; | ||
}()); | ||
/** | ||
* TODO: [๐][โ] Allow to list compatible models with each variant | ||
* TODO: Maybe Create some common util for gptChat and gptComplete | ||
* TODO: Maybe make custom OpenaiError | ||
*/ | ||
export { AnthropicClaudeExecutionTools }; | ||
//# sourceMappingURL=index.es.js.map |
@@ -1,4 +0,4 @@ | ||
import { OPENAI_MODELS } from '../execution/plugins/llm-execution-tools/openai/models'; | ||
import { OPENAI_MODELS } from '../execution/plugins/llm-execution-tools/openai/openai-models'; | ||
import { OpenAiExecutionTools } from '../execution/plugins/llm-execution-tools/openai/OpenAiExecutionTools'; | ||
import { OpenAiExecutionToolsOptions } from '../execution/plugins/llm-execution-tools/openai/OpenAiExecutionToolsOptions'; | ||
export { OPENAI_MODELS, OpenAiExecutionTools, OpenAiExecutionToolsOptions }; |
@@ -14,7 +14,7 @@ import type { Prompt } from '../../../../types/Prompt'; | ||
*/ | ||
gptChat(prompt: Prompt): Promise<PromptChatResult>; | ||
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>; | ||
/** | ||
* Mocks completion model | ||
*/ | ||
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>; | ||
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>; | ||
/** | ||
@@ -21,0 +21,0 @@ * List all available mocked-models that can be used |
@@ -14,7 +14,7 @@ import type { Prompt } from '../../../../types/Prompt'; | ||
*/ | ||
gptChat(prompt: Prompt): Promise<PromptChatResult & PromptCompletionResult>; | ||
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptChatResult & PromptCompletionResult>; | ||
/** | ||
* Fakes completion model | ||
*/ | ||
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>; | ||
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptCompletionResult>; | ||
/** | ||
@@ -21,0 +21,0 @@ * List all available fake-models that can be used |
/** | ||
* String value found on openai page | ||
* String value found on OpenAI and Anthropic Claude page | ||
* | ||
* @see https://openai.com/api/pricing/ | ||
* @see https://docs.anthropic.com/en/docs/models-overview | ||
* | ||
* @private within the library, used only as internal helper for `OPENAI_MODELS` and `computeUsage` | ||
*/ | ||
type string_openai_price = `$${number}.${number} / ${number}M tokens`; | ||
type string_model_price = `$${number}.${number} / ${number}M tokens`; | ||
/** | ||
@@ -12,3 +15,3 @@ * Function computeUsage will create price per one token based on the string value found on openai page | ||
*/ | ||
export declare function computeUsage(value: string_openai_price): number; | ||
export declare function computeUsage(value: string_model_price): number; | ||
export {}; |
@@ -13,3 +13,3 @@ import type { Prompt } from '../../../../types/Prompt'; | ||
*/ | ||
private readonly openai; | ||
private readonly client; | ||
/** | ||
@@ -24,7 +24,7 @@ * Creates OpenAI Execution Tools. | ||
*/ | ||
gptChat(prompt: Prompt): Promise<PromptChatResult>; | ||
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>; | ||
/** | ||
* Calls OpenAI API to use a complete model. | ||
*/ | ||
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>; | ||
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>; | ||
/** | ||
@@ -31,0 +31,0 @@ * Default model for chat variant. |
@@ -13,5 +13,7 @@ import type { ClientOptions } from 'openai'; | ||
* A unique identifier representing your end-user, which can help OpenAI to monitor | ||
* and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). | ||
* and detect abuse. | ||
* | ||
* @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids | ||
*/ | ||
user?: string_token; | ||
}; |
@@ -26,3 +26,3 @@ import type { number_positive_or_zero, number_tokens, number_usd, string_date_iso8601, string_model_name } from '.././types/typeAliases'; | ||
*/ | ||
readonly model: string_model_name; | ||
readonly modelName: string_model_name; | ||
/** | ||
@@ -58,7 +58,7 @@ * Timing | ||
*/ | ||
inputTokens: number_tokens; | ||
inputTokens: number_tokens | 'UNKNOWN'; | ||
/** | ||
* Number of tokens used in the output aka. `completion_tokens` | ||
*/ | ||
outputTokens: number_tokens; | ||
outputTokens: number_tokens | 'UNKNOWN'; | ||
}; | ||
@@ -65,0 +65,0 @@ /** |
{ | ||
"name": "@promptbook/anthropic-claude", | ||
"version": "0.52.0-1", | ||
"version": "0.52.0-2", | ||
"description": "Library to supercharge your use of large language models", | ||
@@ -36,3 +36,6 @@ "private": false, | ||
"homepage": "https://www.npmjs.com/package/@promptbook/core", | ||
"dependencies": {}, | ||
"dependencies": { | ||
"@anthropic-ai/sdk": "^0.21.1", | ||
"colors": "1.4.0" | ||
}, | ||
"funding": [ | ||
@@ -49,3 +52,3 @@ { | ||
"peerDependencies": { | ||
"@promptbook/core": "0.52.0-1" | ||
"@promptbook/core": "0.52.0-2" | ||
}, | ||
@@ -52,0 +55,0 @@ "main": "./umd/index.umd.js", |
@@ -35,2 +35,5 @@ # ![Promptbook logo - cube with letters P and B](./other/design/logo-h1.png) Promptbook | ||
--- | ||
@@ -333,3 +336,3 @@ | ||
- **[@promptbook/openai](https://www.npmjs.com/package/@promptbook/openai)** - Execution tools for OpenAI API, wrapper around OpenAI SDK | ||
- **[@promptbook/anthropic-claude](https://www.npmjs.com/package/@promptbook/anthropic-claude)** - Execution tools for Anthropic Claude API, wrapper around Anthropic Claude SDK | ||
- **[@promptbook/anthropic-claude](https://www.npmjs.com/package/@promptbook/anthropic-claude)** - Execution tools for Anthropic Claude API, wrapper around Anthropic Claude SDK | ||
- **[@promptbook/azure-openai](https://www.npmjs.com/package/@promptbook/azure-openai)** - Execution tools for Azure OpenAI API | ||
@@ -487,4 +490,5 @@ - **[@promptbook/langtail](https://www.npmjs.com/package/@promptbook/langtail)** - Execution tools for Langtail API, wrapper around Langtail SDK | ||
- `OpenAiExecutionTools` | ||
- _(Not implemented yet !!!!! )_ `AnthropicClaudeExecutionTools` | ||
- _(Not implemented yet !!!!! )_ `AzureOpenAiExecutionTools` | ||
- `AnthropicClaudeExecutionTools` | ||
- `AzureOpenAiExecutionTools` | ||
- `LangtailExecutionTools` | ||
- _(Not implemented yet)_ `BardExecutionTools` | ||
@@ -491,0 +495,0 @@ - _(Not implemented yet)_ `LamaExecutionTools` |
@@ -1,9 +0,337 @@ | ||
(function (factory) { | ||
typeof define === 'function' && define.amd ? define(factory) : | ||
factory(); | ||
})((function () { 'use strict'; | ||
(function (global, factory) { | ||
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('@anthropic-ai/sdk'), require('colors')) : | ||
typeof define === 'function' && define.amd ? define(['exports', '@anthropic-ai/sdk', 'colors'], factory) : | ||
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-anthropic-claude"] = {}, global.Anthropic, global.colors)); | ||
})(this, (function (exports, Anthropic, colors) { 'use strict'; | ||
function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; } | ||
var Anthropic__default = /*#__PURE__*/_interopDefaultLegacy(Anthropic); | ||
var colors__default = /*#__PURE__*/_interopDefaultLegacy(colors); | ||
/*! ***************************************************************************** | ||
Copyright (c) Microsoft Corporation. | ||
Permission to use, copy, modify, and/or distribute this software for any | ||
purpose with or without fee is hereby granted. | ||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH | ||
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY | ||
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, | ||
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM | ||
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR | ||
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR | ||
PERFORMANCE OF THIS SOFTWARE. | ||
***************************************************************************** */ | ||
/* global Reflect, Promise */ | ||
var extendStatics = function(d, b) { | ||
extendStatics = Object.setPrototypeOf || | ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || | ||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; }; | ||
return extendStatics(d, b); | ||
}; | ||
function __extends(d, b) { | ||
if (typeof b !== "function" && b !== null) | ||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null"); | ||
extendStatics(d, b); | ||
function __() { this.constructor = d; } | ||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); | ||
} | ||
var __assign = function() { | ||
__assign = Object.assign || function __assign(t) { | ||
for (var s, i = 1, n = arguments.length; i < n; i++) { | ||
s = arguments[i]; | ||
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p]; | ||
} | ||
return t; | ||
}; | ||
return __assign.apply(this, arguments); | ||
}; | ||
function __awaiter(thisArg, _arguments, P, generator) { | ||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } | ||
return new (P || (P = Promise))(function (resolve, reject) { | ||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } | ||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } | ||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } | ||
step((generator = generator.apply(thisArg, _arguments || [])).next()); | ||
}); | ||
} | ||
function __generator(thisArg, body) { | ||
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g; | ||
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g; | ||
function verb(n) { return function (v) { return step([n, v]); }; } | ||
function step(op) { | ||
if (f) throw new TypeError("Generator is already executing."); | ||
while (_) try { | ||
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t; | ||
if (y = 0, t) op = [op[0] & 2, t.value]; | ||
switch (op[0]) { | ||
case 0: case 1: t = op; break; | ||
case 4: _.label++; return { value: op[1], done: false }; | ||
case 5: _.label++; y = op[1]; op = [0]; continue; | ||
case 7: op = _.ops.pop(); _.trys.pop(); continue; | ||
default: | ||
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; } | ||
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; } | ||
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; } | ||
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; } | ||
if (t[2]) _.ops.pop(); | ||
_.trys.pop(); continue; | ||
} | ||
op = body.call(thisArg, _); | ||
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; } | ||
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true }; | ||
} | ||
} | ||
function __read(o, n) { | ||
var m = typeof Symbol === "function" && o[Symbol.iterator]; | ||
if (!m) return o; | ||
var i = m.call(o), r, ar = [], e; | ||
try { | ||
while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); | ||
} | ||
catch (error) { e = { error: error }; } | ||
finally { | ||
try { | ||
if (r && !r.done && (m = i["return"])) m.call(i); | ||
} | ||
finally { if (e) throw e.error; } | ||
} | ||
return ar; | ||
} | ||
/** | ||
* This error indicates errors during the execution of the promptbook | ||
*/ | ||
var PromptbookExecutionError = /** @class */ (function (_super) { | ||
__extends(PromptbookExecutionError, _super); | ||
function PromptbookExecutionError(message) { | ||
var _this = _super.call(this, message) || this; | ||
_this.name = 'PromptbookExecutionError'; | ||
Object.setPrototypeOf(_this, PromptbookExecutionError.prototype); | ||
return _this; | ||
} | ||
return PromptbookExecutionError; | ||
}(Error)); | ||
/** | ||
* Get current date in ISO 8601 format | ||
* | ||
* @private This is internal util of the promptbook | ||
*/ | ||
function getCurrentIsoDate() { | ||
return new Date().toISOString(); | ||
} | ||
/** | ||
* Function computeUsage will create price per one token based on the string value found on openai page | ||
* | ||
* @private within the library, used only as internal helper for `OPENAI_MODELS` | ||
*/ | ||
function computeUsage(value) { | ||
var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1]; | ||
return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000; | ||
} | ||
/** | ||
* List of available Anthropic Claude models with pricing | ||
* | ||
* Note: Done at 2024-05-25 | ||
* | ||
* @see https://docs.anthropic.com/en/docs/models-overview | ||
*/ | ||
var ANTHROPIC_CLAUDE_MODELS = [ | ||
{ | ||
modelVariant: 'CHAT', | ||
modelTitle: 'Claude 3 Opus', | ||
modelName: 'claude-3-opus', | ||
pricing: { | ||
prompt: computeUsage("$15.00 / 1M tokens"), | ||
output: computeUsage("$75.00 / 1M tokens"), | ||
}, | ||
}, | ||
{ | ||
modelVariant: 'CHAT', | ||
modelTitle: 'Claude 3 Sonnet', | ||
modelName: 'claude-3-sonnet', | ||
pricing: { | ||
prompt: computeUsage("$3.00 / 1M tokens"), | ||
output: computeUsage("$15.00 / 1M tokens"), | ||
}, | ||
}, | ||
{ | ||
modelVariant: 'CHAT', | ||
modelTitle: 'Claude 3 Haiku', | ||
modelName: ' claude-3-haiku', | ||
pricing: { | ||
prompt: computeUsage("$0.25 / 1M tokens"), | ||
output: computeUsage("$1.25 / 1M tokens"), | ||
}, | ||
}, | ||
{ | ||
modelVariant: 'CHAT', | ||
modelTitle: 'Claude 2.1', | ||
modelName: 'claude-2.1', | ||
pricing: { | ||
prompt: computeUsage("$8.00 / 1M tokens"), | ||
output: computeUsage("$24.00 / 1M tokens"), | ||
}, | ||
}, | ||
{ | ||
modelVariant: 'CHAT', | ||
modelTitle: 'Claude 2', | ||
modelName: 'claude-2.0', | ||
pricing: { | ||
prompt: computeUsage("$8.00 / 1M tokens"), | ||
output: computeUsage("$24.00 / 1M tokens"), | ||
}, | ||
}, | ||
{ | ||
modelVariant: 'CHAT', | ||
modelTitle: ' Claude Instant 1.2', | ||
modelName: 'claude-instant-1.2', | ||
pricing: { | ||
prompt: computeUsage("$0.80 / 1M tokens"), | ||
output: computeUsage("$2.40 / 1M tokens"), | ||
}, | ||
}, | ||
// TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy | ||
]; | ||
/** | ||
* TODO: [๐ง ] Some mechanism to propagate unsureness | ||
* TODO: [๐ง ][๐ฎโโ๏ธ] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,... | ||
* TODO: [๐] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing | ||
*/ | ||
/** | ||
* Execution Tools for calling Anthropic Claude API. | ||
*/ | ||
var AnthropicClaudeExecutionTools = /** @class */ (function () { | ||
/** | ||
* Creates Anthropic Claude Execution Tools. | ||
* | ||
* @param options which are relevant are directly passed to the Anthropic Claude client | ||
*/ | ||
function AnthropicClaudeExecutionTools(options) { | ||
this.options = options; | ||
// Note: Passing only Anthropic Claude relevant options to Anthropic constructor | ||
var anthropicOptions = __assign({}, options); | ||
delete anthropicOptions.isVerbose; | ||
this.client = new Anthropic__default["default"](anthropicOptions); | ||
} | ||
/** | ||
* Calls Anthropic Claude API to use a chat model. | ||
*/ | ||
AnthropicClaudeExecutionTools.prototype.gptChat = function (prompt) { | ||
return __awaiter(this, void 0, void 0, function () { | ||
var content, modelRequirements, rawRequest, start, complete, rawResponse, resultContent, usage; | ||
return __generator(this, function (_a) { | ||
switch (_a.label) { | ||
case 0: | ||
if (this.options.isVerbose) { | ||
console.info('๐ฌ Anthropic Claude gptChat call'); | ||
} | ||
content = prompt.content, modelRequirements = prompt.modelRequirements; | ||
// TODO: [โ] Use here more modelRequirements | ||
if (modelRequirements.modelVariant !== 'CHAT') { | ||
throw new PromptbookExecutionError('Use gptChat only for CHAT variant'); | ||
} | ||
rawRequest = { | ||
model: modelRequirements.modelName || this.getDefaultChatModel().modelName, | ||
max_tokens: modelRequirements.maxTokens || 10000, | ||
// <- TODO: Make some global max cap for maxTokens | ||
messages: [ | ||
{ | ||
role: 'user', | ||
content: content, | ||
}, | ||
], | ||
// TODO: Is here some equivalent of user identification?> user: this.options.user, | ||
}; | ||
start = getCurrentIsoDate(); | ||
if (this.options.isVerbose) { | ||
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4)); | ||
} | ||
return [4 /*yield*/, this.client.messages.create(rawRequest)]; | ||
case 1: | ||
rawResponse = _a.sent(); | ||
if (this.options.isVerbose) { | ||
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4)); | ||
} | ||
if (!rawResponse.content[0]) { | ||
throw new PromptbookExecutionError('No content from Anthropic Claude'); | ||
} | ||
if (rawResponse.content.length > 1) { | ||
throw new PromptbookExecutionError('More than one content blocks from Anthropic Claude'); | ||
} | ||
resultContent = rawResponse.content[0].text; | ||
// eslint-disable-next-line prefer-const | ||
complete = getCurrentIsoDate(); | ||
usage = { | ||
price: 'UNKNOWN' /* <- TODO: [๐] Compute usage */, | ||
inputTokens: rawResponse.usage.input_tokens, | ||
outputTokens: rawResponse.usage.output_tokens, | ||
}; | ||
if (!resultContent) { | ||
throw new PromptbookExecutionError('No response message from Anthropic Claude'); | ||
} | ||
return [2 /*return*/, { | ||
content: resultContent, | ||
modelName: rawResponse.model, | ||
timing: { | ||
start: start, | ||
complete: complete, | ||
}, | ||
usage: usage, | ||
rawResponse: rawResponse, | ||
// <- [๐คนโโ๏ธ] | ||
}]; | ||
} | ||
}); | ||
}); | ||
}; | ||
/** | ||
* Calls Anthropic Claude API to use a complete model. | ||
*/ | ||
AnthropicClaudeExecutionTools.prototype.gptComplete = function (prompt) { | ||
return __awaiter(this, void 0, void 0, function () { | ||
return __generator(this, function (_a) { | ||
throw new Error('Anthropic complation models are not implemented to Promptbook yet'); | ||
}); | ||
}); | ||
}; | ||
/** | ||
* Default model for chat variant. | ||
*/ | ||
AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () { | ||
return ANTHROPIC_CLAUDE_MODELS.find(function (_a) { | ||
var modelName = _a.modelName; | ||
return modelName === 'claude-3-opus'; | ||
}); | ||
}; | ||
/** | ||
* List all available Anthropic Claude models that can be used | ||
*/ | ||
AnthropicClaudeExecutionTools.prototype.listModels = function () { | ||
return ANTHROPIC_CLAUDE_MODELS; | ||
}; | ||
return AnthropicClaudeExecutionTools; | ||
}()); | ||
/** | ||
* TODO: [๐][โ] Allow to list compatible models with each variant | ||
* TODO: Maybe Create some common util for gptChat and gptComplete | ||
* TODO: Maybe make custom OpenaiError | ||
*/ | ||
exports.AnthropicClaudeExecutionTools = AnthropicClaudeExecutionTools; | ||
Object.defineProperty(exports, '__esModule', { value: true }); | ||
})); | ||
//# sourceMappingURL=index.umd.js.map |
@@ -1,4 +0,4 @@ | ||
import { OPENAI_MODELS } from '../execution/plugins/llm-execution-tools/openai/models'; | ||
import { OPENAI_MODELS } from '../execution/plugins/llm-execution-tools/openai/openai-models'; | ||
import { OpenAiExecutionTools } from '../execution/plugins/llm-execution-tools/openai/OpenAiExecutionTools'; | ||
import { OpenAiExecutionToolsOptions } from '../execution/plugins/llm-execution-tools/openai/OpenAiExecutionToolsOptions'; | ||
export { OPENAI_MODELS, OpenAiExecutionTools, OpenAiExecutionToolsOptions }; |
@@ -14,7 +14,7 @@ import type { Prompt } from '../../../../types/Prompt'; | ||
*/ | ||
gptChat(prompt: Prompt): Promise<PromptChatResult>; | ||
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>; | ||
/** | ||
* Mocks completion model | ||
*/ | ||
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>; | ||
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>; | ||
/** | ||
@@ -21,0 +21,0 @@ * List all available mocked-models that can be used |
@@ -14,7 +14,7 @@ import type { Prompt } from '../../../../types/Prompt'; | ||
*/ | ||
gptChat(prompt: Prompt): Promise<PromptChatResult & PromptCompletionResult>; | ||
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptChatResult & PromptCompletionResult>; | ||
/** | ||
* Fakes completion model | ||
*/ | ||
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>; | ||
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptCompletionResult>; | ||
/** | ||
@@ -21,0 +21,0 @@ * List all available fake-models that can be used |
/** | ||
* String value found on openai page | ||
* String value found on OpenAI and Anthropic Claude page | ||
* | ||
* @see https://openai.com/api/pricing/ | ||
* @see https://docs.anthropic.com/en/docs/models-overview | ||
* | ||
* @private within the library, used only as internal helper for `OPENAI_MODELS` and `computeUsage` | ||
*/ | ||
type string_openai_price = `$${number}.${number} / ${number}M tokens`; | ||
type string_model_price = `$${number}.${number} / ${number}M tokens`; | ||
/** | ||
@@ -12,3 +15,3 @@ * Function computeUsage will create price per one token based on the string value found on openai page | ||
*/ | ||
export declare function computeUsage(value: string_openai_price): number; | ||
export declare function computeUsage(value: string_model_price): number; | ||
export {}; |
@@ -13,3 +13,3 @@ import type { Prompt } from '../../../../types/Prompt'; | ||
*/ | ||
private readonly openai; | ||
private readonly client; | ||
/** | ||
@@ -24,7 +24,7 @@ * Creates OpenAI Execution Tools. | ||
*/ | ||
gptChat(prompt: Prompt): Promise<PromptChatResult>; | ||
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>; | ||
/** | ||
* Calls OpenAI API to use a complete model. | ||
*/ | ||
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>; | ||
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>; | ||
/** | ||
@@ -31,0 +31,0 @@ * Default model for chat variant. |
@@ -13,5 +13,7 @@ import type { ClientOptions } from 'openai'; | ||
* A unique identifier representing your end-user, which can help OpenAI to monitor | ||
* and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). | ||
* and detect abuse. | ||
* | ||
* @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids | ||
*/ | ||
user?: string_token; | ||
}; |
@@ -26,3 +26,3 @@ import type { number_positive_or_zero, number_tokens, number_usd, string_date_iso8601, string_model_name } from '.././types/typeAliases'; | ||
*/ | ||
readonly model: string_model_name; | ||
readonly modelName: string_model_name; | ||
/** | ||
@@ -58,7 +58,7 @@ * Timing | ||
*/ | ||
inputTokens: number_tokens; | ||
inputTokens: number_tokens | 'UNKNOWN'; | ||
/** | ||
* Number of tokens used in the output aka. `completion_tokens` | ||
*/ | ||
outputTokens: number_tokens; | ||
outputTokens: number_tokens | 'UNKNOWN'; | ||
}; | ||
@@ -65,0 +65,0 @@ /** |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
464383
506
7754
712
3
+ Added@anthropic-ai/sdk@^0.21.1
+ Addedcolors@1.4.0
+ Added@anthropic-ai/sdk@0.21.1(transitive)
+ Added@promptbook/core@0.52.0-2(transitive)
+ Added@types/node@18.19.65(transitive)
+ Added@types/node-fetch@2.6.12(transitive)
+ Addedabort-controller@3.0.0(transitive)
+ Addedagentkeepalive@4.5.0(transitive)
+ Addedasynckit@0.4.0(transitive)
+ Addedcolors@1.4.0(transitive)
+ Addedcombined-stream@1.0.8(transitive)
+ Addeddelayed-stream@1.0.0(transitive)
+ Addedevent-target-shim@5.0.1(transitive)
+ Addedform-data@4.0.1(transitive)
+ Addedform-data-encoder@1.7.2(transitive)
+ Addedformdata-node@4.4.1(transitive)
+ Addedhumanize-ms@1.2.1(transitive)
+ Addedmime-db@1.52.0(transitive)
+ Addedmime-types@2.1.35(transitive)
+ Addedms@2.1.3(transitive)
+ Addednode-domexception@1.0.0(transitive)
+ Addednode-fetch@2.7.0(transitive)
+ Addedtr46@0.0.3(transitive)
+ Addedundici-types@5.26.5(transitive)
+ Addedweb-streams-polyfill@3.3.34.0.0-beta.3(transitive)
+ Addedwebidl-conversions@3.0.1(transitive)
+ Addedwhatwg-url@5.0.0(transitive)
- Removed@promptbook/core@0.52.0-1(transitive)