Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@promptbook/anthropic-claude

Package Overview
Dependencies
Maintainers
0
Versions
281
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@promptbook/anthropic-claude - npm Package Compare versions

Comparing version 0.60.0-0 to 0.60.0-3

esm/typings/promptbook-collection/index.d.ts

30

esm/index.es.js

@@ -105,11 +105,11 @@ import Anthropic from '@anthropic-ai/sdk';

*/
var PromptbookExecutionError = /** @class */ (function (_super) {
__extends(PromptbookExecutionError, _super);
function PromptbookExecutionError(message) {
var ExecutionError = /** @class */ (function (_super) {
__extends(ExecutionError, _super);
function ExecutionError(message) {
var _this = _super.call(this, message) || this;
_this.name = 'PromptbookExecutionError';
Object.setPrototypeOf(_this, PromptbookExecutionError.prototype);
_this.name = 'ExecutionError';
Object.setPrototypeOf(_this, ExecutionError.prototype);
return _this;
}
return PromptbookExecutionError;
return ExecutionError;
}(Error));

@@ -490,3 +490,3 @@

*
* @private within the library, used only as internal helper for `OPENAI_MODELS`
* @private within the package, used only as internal helper for `OPENAI_MODELS`
*/

@@ -589,3 +589,3 @@ function computeUsage(value) {

*/
AnthropicClaudeExecutionTools.prototype.gptChat = function (prompt) {
AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
return __awaiter(this, void 0, void 0, function () {

@@ -597,3 +597,3 @@ var content, modelRequirements, rawRequest, start, complete, rawResponse, resultContent, usage;

if (this.options.isVerbose) {
console.info('💬 Anthropic Claude gptChat call');
console.info('💬 Anthropic Claude callChatModel call');
}

@@ -603,3 +603,3 @@ content = prompt.content, modelRequirements = prompt.modelRequirements;

if (modelRequirements.modelVariant !== 'CHAT') {
throw new PromptbookExecutionError('Use gptChat only for CHAT variant');
throw new ExecutionError('Use callChatModel only for CHAT variant');
}

@@ -629,6 +629,6 @@ rawRequest = {

if (!rawResponse.content[0]) {
throw new PromptbookExecutionError('No content from Anthropic Claude');
throw new ExecutionError('No content from Anthropic Claude');
}
if (rawResponse.content.length > 1) {
throw new PromptbookExecutionError('More than one content blocks from Anthropic Claude');
throw new ExecutionError('More than one content blocks from Anthropic Claude');
}

@@ -661,3 +661,3 @@ resultContent = rawResponse.content[0].text;

*/
AnthropicClaudeExecutionTools.prototype.gptComplete = function (prompt) {
AnthropicClaudeExecutionTools.prototype.callCompletionModel = function (prompt) {
return __awaiter(this, void 0, void 0, function () {

@@ -704,3 +704,3 @@ return __generator(this, function (_a) {

* TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
* TODO: Maybe Create some common util for gptChat and gptComplete
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
* TODO: Maybe make custom OpenaiError

@@ -712,5 +712,5 @@ */

*/
var PROMPTBOOK_VERSION = '0.59.0';
var PROMPTBOOK_VERSION = '0.60.0-2';
export { AnthropicClaudeExecutionTools, PROMPTBOOK_VERSION };
//# sourceMappingURL=index.es.js.map

@@ -1,15 +0,15 @@

import { prettifyPromptbookString } from '../conversion/prettify/prettifyPromptbookString';
import { promptbookJsonToString } from '../conversion/promptbookJsonToString';
import { promptbookStringToJson } from '../conversion/promptbookStringToJson';
import { promptbookStringToJsonSync } from '../conversion/promptbookStringToJsonSync';
import { validatePromptbook } from '../conversion/validation/validatePromptbook';
import { ExpectError } from '../errors/_ExpectError';
import { PromptbookExecutionError } from '../errors/PromptbookExecutionError';
import { PromptbookLibraryError } from '../errors/PromptbookLibraryError';
import { PromptbookLogicError } from '../errors/PromptbookLogicError';
import { PromptbookNotFoundError } from '../errors/PromptbookNotFoundError';
import { PromptbookReferenceError } from '../errors/PromptbookReferenceError';
import { PromptbookSyntaxError } from '../errors/PromptbookSyntaxError';
import { pipelineJsonToString } from '../conversion/pipelineJsonToString';
import { pipelineStringToJson } from '../conversion/pipelineStringToJson';
import { pipelineStringToJsonSync } from '../conversion/pipelineStringToJsonSync';
import { prettifyPipelineString } from '../conversion/prettify/prettifyPipelineString';
import { validatePipeline } from '../conversion/validation/validatePipeline';
import { CollectionError } from '../errors/CollectionError';
import { ExecutionError } from '../errors/ExecutionError';
import { NotFoundError } from '../errors/NotFoundError';
import { PipelineLogicError } from '../errors/PipelineLogicError';
import { ReferenceError } from '../errors/ReferenceError';
import { SyntaxError } from '../errors/SyntaxError';
import { TemplateError } from '../errors/TemplateError';
import { UnexpectedError } from '../errors/UnexpectedError';
import { ExpectError } from '../errors/_ExpectError';
import { assertsExecutionSuccessful } from '../execution/assertsExecutionSuccessful';

@@ -25,21 +25,21 @@ import { createPromptbookExecutor } from '../execution/createPromptbookExecutor';

import { prepareKnowledgeFromMarkdown } from '../knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown';
import { createLibraryFromJson } from '../library/constructors/createLibraryFromJson';
import { createLibraryFromPromise } from '../library/constructors/createLibraryFromPromise';
import { createLibraryFromUrl } from '../library/constructors/createLibraryFromUrl';
import { createSublibrary } from '../library/constructors/createSublibrary';
import { createCollectionFromJson } from '../library/constructors/createCollectionFromJson';
import { createCollectionFromPromise } from '../library/constructors/createCollectionFromPromise';
import { createCollectionFromUrl } from '../library/constructors/createCollectionFromUrl';
import { createSubcollection } from '../library/constructors/createSubcollection';
import { libraryToJson } from '../library/libraryToJson';
import { MultipleLlmExecutionTools } from '../llm-providers/multiple/MultipleLlmExecutionTools';
import { executionReportJsonToString } from '../types/execution-report/executionReportJsonToString';
import { ExecutionTypes } from '../types/ExecutionTypes';
import type { ExecutionReportStringOptions } from '../types/execution-report/ExecutionReportStringOptions';
import { ExecutionReportStringOptionsDefaults } from '../types/execution-report/ExecutionReportStringOptions';
import { ExecutionTypes } from '../types/ExecutionTypes';
import { executionReportJsonToString } from '../types/execution-report/executionReportJsonToString';
import { PROMPTBOOK_VERSION } from '../version';
export { PROMPTBOOK_VERSION };
export { ExecutionTypes };
export { addUsage, assertsExecutionSuccessful, checkExpectations, embeddingVectorToString, executionReportJsonToString, ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults, isPassingExpectations, prepareKnowledgeFromMarkdown, prettifyPromptbookString, usageToWorktime, };
export { createLibraryFromJson, createLibraryFromPromise, createLibraryFromUrl, createSublibrary, libraryToJson };
export { ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults, addUsage, assertsExecutionSuccessful, checkExpectations, embeddingVectorToString, executionReportJsonToString, isPassingExpectations, prepareKnowledgeFromMarkdown, prettifyPipelineString, usageToWorktime, };
export { createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createSubcollection as createSublibrary, libraryToJson, };
export { SimplePromptInterfaceTools };
export { promptbookJsonToString, promptbookStringToJson, promptbookStringToJsonSync, validatePromptbook };
export { createPromptbookExecutor, MultipleLlmExecutionTools };
export { pipelineJsonToString, pipelineStringToJson, pipelineStringToJsonSync, validatePipeline };
export { MultipleLlmExecutionTools, createPromptbookExecutor };
export { CallbackInterfaceTools, CallbackInterfaceToolsOptions };
export { ExpectError, PromptbookExecutionError, PromptbookLibraryError, PromptbookLogicError, PromptbookNotFoundError, PromptbookReferenceError, PromptbookSyntaxError, TemplateError, UnexpectedError, };
export { CollectionError, ExecutionError, ExpectError, NotFoundError, PipelineLogicError, ReferenceError, SyntaxError, TemplateError, UnexpectedError, };

@@ -1,4 +0,4 @@

import { createLibraryFromDirectory } from '../library/constructors/createLibraryFromDirectory';
import { createCollectionFromDirectory } from '../library/constructors/createCollectionFromDirectory';
import { PROMPTBOOK_VERSION } from '../version';
export { PROMPTBOOK_VERSION };
export { createLibraryFromDirectory };
export { createCollectionFromDirectory };

@@ -5,18 +5,18 @@ import type { CommonExecutionToolsOptions } from '../execution/CommonExecutionToolsOptions';

import type { AvailableModel, LlmExecutionTools } from '../execution/LlmExecutionTools';
import type { PromptEmbeddingResult, PromptChatResult, PromptCommonResult, PromptCompletionResult, PromptResult, PromptResultUsage, PromptResultUsageCounts, UncertainNumber } from '../execution/PromptResult';
import type { PromptChatResult, PromptCommonResult, PromptCompletionResult, PromptEmbeddingResult, PromptResult, PromptResultUsage, PromptResultUsageCounts, UncertainNumber } from '../execution/PromptResult';
import type { PromptbookExecutor } from '../execution/PromptbookExecutor';
import type { ScriptExecutionTools, ScriptExecutionToolsExecuteOptions } from '../execution/ScriptExecutionTools';
import type { UserInterfaceTools, UserInterfaceToolsPromptDialogOptions } from '../execution/UserInterfaceTools';
import type { PromptbookLibrary } from '../library/PromptbookLibrary';
import type { PipelineCollection } from '../library/PipelineCollection';
import type { ExecutionType } from '../types/ExecutionTypes';
import type { ModelRequirements, ModelVariant } from '../types/ModelRequirements';
import type { Parameters } from '../types/Parameters';
import { KnowledgeJson } from '../types/PipelineJson/KnowledgeJson';
import { MaterialKnowledgePieceJson } from '../types/PipelineJson/MaterialKnowledgePieceJson';
import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
import type { ExpectationAmount, ExpectationUnit, Expectations, LlmTemplateJson, PromptDialogJson, PromptTemplateJson, ScriptJson, SimpleTemplateJson } from '../types/PipelineJson/PromptTemplateJson';
import { EXPECTATION_UNITS } from '../types/PipelineJson/PromptTemplateJson';
import type { PromptTemplateParameterJson } from '../types/PipelineJson/PromptTemplateParameterJson';
import type { PipelineString } from '../types/PipelineString';
import type { Prompt } from '../types/Prompt';
import { KnowledgeJson } from '../types/PromptbookJson/KnowledgeJson';
import { MaterialKnowledgePieceJson } from '../types/PromptbookJson/MaterialKnowledgePieceJson';
import type { ExpectationAmount, Expectations, ExpectationUnit, LlmTemplateJson, PromptDialogJson, PromptTemplateJson, ScriptJson, SimpleTemplateJson } from '../types/PromptbookJson/PromptTemplateJson';
import { EXPECTATION_UNITS } from '../types/PromptbookJson/PromptTemplateJson';
import type { PromptTemplateParameterJson } from '../types/PromptbookJson/PromptTemplateParameterJson';
import type { PromptbookJson } from '../types/PromptbookJson/PromptbookJson';
import type { PromptbookString } from '../types/PromptbookString';
import type { ScriptLanguage } from '../types/ScriptLanguage';

@@ -26,3 +26,3 @@ import type { TaskProgress } from '../types/TaskProgress';

import type { string_char_emoji } from '../types/typeAliasEmoji';
import type { client_id, string_char, string_chat_prompt, string_completion_prompt, string_data_url, string_domain, string_email, string_file_absolute_path, string_file_extension, string_file_path, string_file_relative_path, string_filename, string_folder_absolute_path, string_folder_path, string_folder_relative_path, string_host, string_hostname, string_href, string_html, string_javascript, string_javascript_name, string_license, string_markdown, string_markdown_text, string_mime_type, string_mime_type_with_wildcard, string_model_name, string_name, string_person_fullname, string_prompt, string_promptbook_url, string_promptbook_url_with_hashtemplate, string_script, string_sha256, string_tdl, string_template, string_text_prompt, string_title, string_token, string_translate_language, string_uri, string_uri_part, string_url, string_url_image, string_version } from '../types/typeAliases';
import type { client_id, string_char, string_chat_prompt, string_completion_prompt, string_data_url, string_domain, string_email, string_file_absolute_path, string_file_extension, string_file_path, string_file_relative_path, string_filename, string_folder_absolute_path, string_folder_path, string_folder_relative_path, string_host, string_hostname, string_href, string_html, string_javascript, string_javascript_name, string_license, string_markdown, string_markdown_text, string_mime_type, string_mime_type_with_wildcard, string_model_name, string_name, string_person_fullname, string_pipeline_url, string_pipeline_url_with_hashtemplate, string_prompt, string_script, string_sha256, string_tdl, string_template, string_text_prompt, string_title, string_token, string_translate_language, string_uri, string_uri_part, string_url, string_url_image, string_version } from '../types/typeAliases';
import type { FromtoItems } from '../utils/FromtoItems';

@@ -32,5 +32,5 @@ import { PROMPTBOOK_VERSION } from '../version';

export { EXPECTATION_UNITS };
export type { AvailableModel, client_id, CommonExecutionToolsOptions, ExecutionReportJson, ExecutionTools, ExecutionType, ExpectationAmount, Expectations, ExpectationUnit, FromtoItems, KnowledgeJson, LlmExecutionTools, LlmTemplateJson, MaterialKnowledgePieceJson, ModelRequirements, ModelVariant, Parameters, Prompt, PromptbookExecutor, PromptbookJson, PromptbookLibrary, PromptbookString, PromptChatResult, PromptEmbeddingResult, EmbeddingVector, PromptCommonResult, PromptCompletionResult, PromptDialogJson, PromptResult, PromptResultUsage, PromptResultUsageCounts, PromptTemplateJson, PromptTemplateParameterJson, ScriptExecutionTools, ScriptExecutionToolsExecuteOptions, ScriptJson, ScriptLanguage, SimpleTemplateJson, string_char, string_char_emoji, string_chat_prompt, string_completion_prompt, string_data_url, string_domain, string_email, string_file_absolute_path, string_file_extension, string_file_path, string_file_relative_path, string_filename, string_folder_absolute_path, string_folder_path, string_folder_relative_path, string_host, string_hostname, string_href, string_html, string_javascript, string_javascript_name, string_license, string_markdown, string_markdown_text, string_mime_type, string_mime_type_with_wildcard, string_model_name, string_name, string_person_fullname, string_prompt, string_promptbook_url, string_promptbook_url_with_hashtemplate, string_script, string_sha256, string_tdl, string_template, string_text_prompt, string_title, string_token, string_translate_language, string_uri, string_uri_part, string_url, string_url_image, string_version, TaskProgress, UncertainNumber, UserInterfaceTools, UserInterfaceToolsPromptDialogOptions, };
export type { AvailableModel, CommonExecutionToolsOptions, EmbeddingVector, ExecutionReportJson, ExecutionTools, ExecutionType, ExpectationAmount, ExpectationUnit, Expectations, FromtoItems, KnowledgeJson, LlmExecutionTools, LlmTemplateJson, MaterialKnowledgePieceJson, ModelRequirements, ModelVariant, Parameters, PipelineCollection, PipelineJson, PipelineString, Prompt, PromptChatResult, PromptCommonResult, PromptCompletionResult, PromptDialogJson, PromptEmbeddingResult, PromptResult, PromptResultUsage, PromptResultUsageCounts, PromptTemplateJson, PromptTemplateParameterJson, PromptbookExecutor, ScriptExecutionTools, ScriptExecutionToolsExecuteOptions, ScriptJson, ScriptLanguage, SimpleTemplateJson, TaskProgress, UncertainNumber, UserInterfaceTools, UserInterfaceToolsPromptDialogOptions, client_id, string_char, string_char_emoji, string_chat_prompt, string_completion_prompt, string_data_url, string_domain, string_email, string_file_absolute_path, string_file_extension, string_file_path, string_file_relative_path, string_filename, string_folder_absolute_path, string_folder_path, string_folder_relative_path, string_host, string_hostname, string_href, string_html, string_javascript, string_javascript_name, string_license, string_markdown, string_markdown_text, string_mime_type, string_mime_type_with_wildcard, string_model_name, string_name, string_person_fullname, string_pipeline_url, string_pipeline_url_with_hashtemplate, string_prompt, string_script, string_sha256, string_tdl, string_template, string_text_prompt, string_title, string_token, string_translate_language, string_uri, string_uri_part, string_url, string_url_image, string_version, };
/**
* TODO: Delete type aliases (from ../types/typeAliases) that are not exported here
*/
import { spaceTrim } from 'spacetrim';
import { renderPromptbookMermaid } from '../conversion/prettify/renderPromptbookMermaid';
import { renderPromptbookMermaid } from '../conversion/prettify/renderPipelineMermaidOptions';
import { extractParametersFromPromptTemplate } from '../conversion/utils/extractParametersFromPromptTemplate';

@@ -11,3 +11,2 @@ import { extractVariables } from '../conversion/utils/extractVariables';

import { isValidJsonString } from '../formats/json/utils/isValidJsonString';
import { CountUtils } from '../utils/expectation-counters/index';
import { countCharacters } from '../utils/expectation-counters/countCharacters';

@@ -17,5 +16,5 @@ import { countLines } from '../utils/expectation-counters/countLines';

import { countParagraphs } from '../utils/expectation-counters/countParagraphs';
import { countSentences } from '../utils/expectation-counters/countSentences';
import { splitIntoSentences } from '../utils/expectation-counters/countSentences';
import { countSentences, splitIntoSentences } from '../utils/expectation-counters/countSentences';
import { countWords } from '../utils/expectation-counters/countWords';
import { CountUtils } from '../utils/expectation-counters/index';
import { extractParameters } from '../utils/extractParameters';

@@ -27,7 +26,6 @@ import { extractAllBlocksFromMarkdown } from '../utils/markdown/extractAllBlocksFromMarkdown';

import { removeMarkdownFormatting } from '../utils/markdown/removeMarkdownFormatting';
import { DIACRITIC_VARIANTS_LETTERS } from '../utils/normalization/DIACRITIC_VARIANTS_LETTERS';
import type { IKeywords, string_keyword } from '../utils/normalization/IKeywords';
import { capitalize } from '../utils/normalization/capitalize';
import { decapitalize } from '../utils/normalization/decapitalize';
import { DIACRITIC_VARIANTS_LETTERS } from '../utils/normalization/DIACRITIC_VARIANTS_LETTERS';
import type { IKeywords } from '../utils/normalization/IKeywords';
import type { string_keyword } from '../utils/normalization/IKeywords';
import { isValidKeyword } from '../utils/normalization/isValidKeyword';

@@ -37,5 +35,5 @@ import { nameToUriPart } from '../utils/normalization/nameToUriPart';

import { normalizeToKebabCase } from '../utils/normalization/normalize-to-kebab-case';
import { normalizeTo_camelCase } from '../utils/normalization/normalizeTo_camelCase';
import { normalizeTo_PascalCase } from '../utils/normalization/normalizeTo_PascalCase';
import { normalizeTo_SCREAMING_CASE } from '../utils/normalization/normalizeTo_SCREAMING_CASE';
import { normalizeTo_camelCase } from '../utils/normalization/normalizeTo_camelCase';
import { normalizeTo_snake_case } from '../utils/normalization/normalizeTo_snake_case';

@@ -57,3 +55,3 @@ import { normalizeWhitespaces } from '../utils/normalization/normalizeWhitespaces';

import { PROMPTBOOK_VERSION } from '../version';
export { forEachAsync, PROMPTBOOK_VERSION };
export { PROMPTBOOK_VERSION, forEachAsync };
export { extractAllBlocksFromMarkdown, // <- [🌻]

@@ -63,3 +61,3 @@ extractAllListItemsFromMarkdown, extractBlock, // <- [🌻]

removeContentComments, removeEmojis, removeMarkdownFormatting, removeQuotes, replaceParameters, spaceTrim, trimCodeBlock, trimEndOfCodeBlock, unwrapResult, };
export { countCharacters, countLines, countPages, countParagraphs, countSentences, CountUtils, countWords };
export { CountUtils, countCharacters, countLines, countPages, countParagraphs, countSentences, countWords };
export { splitIntoSentences };

@@ -73,3 +71,3 @@ export declare const normalizeTo: {

};
export { capitalize, decapitalize, DIACRITIC_VARIANTS_LETTERS, IKeywords, isValidKeyword, nameToUriPart, nameToUriParts, normalizeTo_camelCase, normalizeTo_PascalCase, normalizeTo_SCREAMING_CASE, normalizeTo_snake_case, normalizeToKebabCase, normalizeWhitespaces, parseKeywords, parseKeywordsFromString, removeDiacritics, searchKeywords, string_keyword, titleToName, };
export { DIACRITIC_VARIANTS_LETTERS, IKeywords, capitalize, decapitalize, isValidKeyword, nameToUriPart, nameToUriParts, normalizeToKebabCase, normalizeTo_PascalCase, normalizeTo_SCREAMING_CASE, normalizeTo_camelCase, normalizeTo_snake_case, normalizeWhitespaces, parseKeywords, parseKeywordsFromString, removeDiacritics, searchKeywords, string_keyword, titleToName, };
export { extractParametersFromPromptTemplate, renameParameter, renderPromptbookMermaid };

@@ -76,0 +74,0 @@ export { difference, intersection, union };

@@ -10,4 +10,4 @@ /**

/**
* The name of the builded promptbook library made by CLI `promptbook make` and for lookup in `createLibraryFromDirectory`
* The name of the builded promptbook library made by CLI `promptbook make` and for lookup in `createCollectionFromDirectory`
*/
export declare const PROMPTBOOK_MAKED_BASE_FILENAME = "index";
/**
* Options for `prettifyPromptbookString` function
* Options for `prettifyPipelineString` function
*/

@@ -4,0 +4,0 @@ export type PrettifyOptions = {

@@ -1,2 +0,2 @@

import type { PromptTemplateJson } from '../../types/PromptbookJson/PromptTemplateJson';
import type { PromptTemplateJson } from '../../types/PipelineJson/PromptTemplateJson';
import type { string_name } from '../../types/typeAliases';

@@ -8,3 +8,3 @@ /**

* @returns the set of parameter names
* @throws {PromptbookSyntaxError} if the script is invalid
* @throws {SyntaxError} if the script is invalid
*/

@@ -11,0 +11,0 @@ export declare function extractParametersFromPromptTemplate(promptTemplate: Pick<PromptTemplateJson, 'title' | 'description' | 'executionType' | 'content'>): Set<string_name>;

@@ -8,3 +8,3 @@ import type { string_javascript } from '../../types/typeAliases';

* @returns the list of variable names
* @throws {PromptbookSyntaxError} if the script is invalid
* @throws {SyntaxError} if the script is invalid
*/

@@ -11,0 +11,0 @@ export declare function extractVariables(script: string_javascript): Set<string_javascript_name>;

@@ -7,6 +7,6 @@ import type { Command } from '../../types/Command';

* @returns parsed command object
* @throws {PromptbookSyntaxError} if the command is invalid
* @throws {SyntaxError} if the command is invalid
*
* @private within the promptbookStringToJson
* @private within the pipelineStringToJson
*/
export declare function parseCommand(listItem: string_markdown_text): Command;

@@ -8,3 +8,3 @@ /**

* @returns parsed number
* @throws {PromptbookSyntaxError} if the value is not a number
* @throws {SyntaxError} if the value is not a number
*

@@ -11,0 +11,0 @@ * @private within the parseCommand

@@ -1,2 +0,2 @@

import type { PromptbookJson } from '../../types/PromptbookJson/PromptbookJson';
import type { PipelineJson } from '../../types/PipelineJson/PipelineJson';
import type { string_name } from '../../types/typeAliases';

@@ -8,3 +8,3 @@ type RenameParameterOptions = {

*/
promptbook: PromptbookJson;
promptbook: PipelineJson;
/**

@@ -23,5 +23,5 @@ * Original parameter name that should be replaced

*
* @throws {PromptbookLogicError} If the new parameter name is already used in the promptbook
* @throws {PipelineLogicError} If the new parameter name is already used in the promptbook
*/
export declare function renameParameter(options: RenameParameterOptions): PromptbookJson;
export declare function renameParameter(options: RenameParameterOptions): PipelineJson;
export {};
/**
* This error occurs when some expectation is not met in the execution of the pipeline
*
* @private Always catched and rethrown as `PromptbookExecutionError`
* Note: This is a kindof subtype of PromptbookExecutionError
* @private Always catched and rethrown as `ExecutionError`
* Note: This is a kindof subtype of ExecutionError
*/

@@ -7,0 +7,0 @@ export declare class ExpectError extends Error {

/**
* This error occurs during the parameter replacement in the template
*
* Note: This is a kindof subtype of PromptbookExecutionError because it occurs during the execution of the pipeline
* Note: This is a kindof subtype of ExecutionError because it occurs during the execution of the pipeline
*/

@@ -6,0 +6,0 @@ export declare class TemplateError extends Error {

@@ -6,3 +6,3 @@ import type { PromptbookExecutor } from './PromptbookExecutor';

* @param executionResult - The partial result of the promptnook execution
* @throws {PromptbookExecutionError} If the execution is not successful or if multiple errors occurred
* @throws {ExecutionError} If the execution is not successful or if multiple errors occurred
*/

@@ -9,0 +9,0 @@ export declare function assertsExecutionSuccessful(executionResult: Pick<Awaited<ReturnType<PromptbookExecutor>>, 'isSuccessful' | 'errors'>): void;

@@ -1,2 +0,2 @@

import type { PromptbookJson } from '../types/PromptbookJson/PromptbookJson';
import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
import type { ExecutionTools } from './ExecutionTools';

@@ -19,3 +19,3 @@ import type { PromptbookExecutor } from './PromptbookExecutor';

*/
readonly promptbook: PromptbookJson;
readonly promptbook: PipelineJson;
/**

@@ -34,3 +34,3 @@ * The execution tools to be used during the execution of the PROMPTBOOK

* @returns The executor function
* @throws {PromptbookLogicError} on logical error in the promptbook
* @throws {PipelineLogicError} on logical error in the promptbook
*/

@@ -37,0 +37,0 @@ export declare function createPromptbookExecutor(options: CreatePromptbookExecutorOptions): PromptbookExecutor;

@@ -5,3 +5,3 @@ import type { LlmExecutionTools } from './LlmExecutionTools';

/**
* All the tools needed to execute prompts (template pipelines).
* All the tools needed to execute pipelines.
*

@@ -8,0 +8,0 @@ * @see https://github.com/webgptorg/promptbook#execution-tools

@@ -19,7 +19,7 @@ import type { Promisable } from 'type-fest';

*/
gptChat(prompt: Prompt): Promise<PromptChatResult>;
callChatModel(prompt: Prompt): Promise<PromptChatResult>;
/**
* Use a completion model
*/
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
callCompletionModel(prompt: Prompt): Promise<PromptCompletionResult>;
/**

@@ -50,4 +50,4 @@ * List all available models that can be used

* TODO: [🍓][♐] Some heuristic to pick the best model in listed models
* TODO: [🏳] gptChat -> chat, gptComplete -> complete, translate
* TODO: [🧠] Should or should not there be a word "GPT" in both gptComplete and gptChat
* TODO: [🏳] callChatModel -> chat, callCompletionModel -> complete, translate
* TODO: [🧠] Should or should not there be a word "GPT" in both callCompletionModel and callChatModel
*/
import type { KebabCase } from 'type-fest';
import type { ExpectationUnit } from '../types/PromptbookJson/PromptTemplateJson';
import type { ExpectationUnit } from '../types/PipelineJson/PromptTemplateJson';
import type { number_positive } from '../types/typeAliases';

@@ -4,0 +4,0 @@ import type { number_usd } from '../types/typeAliases';

@@ -5,4 +5,4 @@ /**

* @returns
* @throws {PromptbookExecutionError}
* @throws {ExecutionError}
*/
export declare function extractMultiplicatedOccurrence(message: string): string;

@@ -1,2 +0,2 @@

import type { Expectations } from '../../types/PromptbookJson/PromptTemplateJson';
import type { Expectations } from '../../types/PipelineJson/PromptTemplateJson';
/**

@@ -3,0 +3,0 @@ * Function checkExpectations will check if the expectations on given value are met

import type { LlmExecutionTools } from '../../../execution/LlmExecutionTools';
import type { KnowledgeJson } from '../../../types/PromptbookJson/KnowledgeJson';
import type { KnowledgeJson } from '../../../types/PipelineJson/KnowledgeJson';
import type { string_markdown } from '../../../types/typeAliases';

@@ -4,0 +4,0 @@ type PrepareKnowledgeFromMarkdownOptions = {

import type { LlmExecutionTools } from '../../../execution/LlmExecutionTools';
import type { KnowledgeJson } from '../../../types/PromptbookJson/KnowledgeJson';
import type { KnowledgeJson } from '../../../types/PipelineJson/KnowledgeJson';
import type { string_base64 } from '../../../types/typeAliases';

@@ -4,0 +4,0 @@ type PrepareKnowledgeFromPdfOptions = {

@@ -1,8 +0,8 @@

import type { PromptbookJson } from '../types/PromptbookJson/PromptbookJson';
import type { PromptbookLibrary } from './PromptbookLibrary';
import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
import type { PipelineCollection } from './PipelineCollection';
/**
* Converts PromptbookLibrary to serialized JSON
* Converts PipelineCollection to serialized JSON
*
* Note: Functions `libraryToJson` and `createLibraryFromJson` are complementary
* Note: Functions `libraryToJson` and `createCollectionFromJson` are complementary
*/
export declare function libraryToJson(library: PromptbookLibrary): Promise<Array<PromptbookJson>>;
export declare function libraryToJson(library: PipelineCollection): Promise<Array<PipelineJson>>;

@@ -25,7 +25,7 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

*/
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
/**
* Calls Anthropic Claude API to use a complete model.
*/
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
/**

@@ -47,4 +47,4 @@ * Get the model that should be used as default

* TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
* TODO: Maybe Create some common util for gptChat and gptComplete
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
* TODO: Maybe make custom OpenaiError
*/

@@ -25,7 +25,7 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

*/
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
/**
* Calls Azure OpenAI API to use a complete model.
*/
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
/**

@@ -41,4 +41,4 @@ * Changes Azure error (which is not propper Error but object) to propper Error

/**
* TODO: Maybe Create some common util for gptChat and gptComplete
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
* TODO: Maybe make custom AzureOpenaiError
*/
import type { PostprocessingFunction } from '../../scripting/javascript/JavascriptExecutionToolsOptions';
import type { Expectations } from '../../types/PromptbookJson/PromptTemplateJson';
import type { Expectations } from '../../types/PipelineJson/PromptTemplateJson';
/**

@@ -4,0 +4,0 @@ * Gets the expectations and creates a fake text that meets the expectations

@@ -16,7 +16,7 @@ import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';

*/
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
/**
* Mocks completion model
*/
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
/**

@@ -23,0 +23,0 @@ * List all available mocked-models that can be used

@@ -16,7 +16,7 @@ import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';

*/
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptChatResult & PromptCompletionResult>;
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptChatResult & PromptCompletionResult>;
/**
* Fakes completion model
*/
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptCompletionResult>;
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptCompletionResult>;
/**

@@ -29,2 +29,2 @@ * List all available fake-models that can be used

* TODO: [🕵️‍♀️] Maybe just remove
*/
*/

@@ -23,11 +23,11 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

*/
gptChat(prompt: Prompt): Promise<PromptChatResult>;
callChatModel(prompt: Prompt): Promise<PromptChatResult>;
/**
* Calls the best available completion model
*/
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
callCompletionModel(prompt: Prompt): Promise<PromptCompletionResult>;
/**
* Calls the best available model
*/
private gptCommon;
private callModelCommon;
/**

@@ -34,0 +34,0 @@ * List all available models that can be used

@@ -10,3 +10,3 @@ import type OpenAI from 'openai';

* @param rawResponse The raw response from OpenAI API
* @throws {PromptbookExecutionError} If the usage is not defined in the response from OpenAI
* @throws {ExecutionError} If the usage is not defined in the response from OpenAI
* @private internal util of `OpenAiExecutionTools`

@@ -13,0 +13,0 @@ */

@@ -7,3 +7,3 @@ /**

*
* @private within the library, used only as internal helper for `OPENAI_MODELS` and `computeUsage`
* @private within the package, used only as internal helper for `OPENAI_MODELS` and `computeUsage`
*/

@@ -14,5 +14,5 @@ type string_model_price = `$${number}.${number} / ${number}M tokens`;

*
* @private within the library, used only as internal helper for `OPENAI_MODELS`
* @private within the package, used only as internal helper for `OPENAI_MODELS`
*/
export declare function computeUsage(value: string_model_price): number;
export {};

@@ -26,7 +26,7 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

*/
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectFormat'>): Promise<PromptChatResult>;
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectFormat'>): Promise<PromptChatResult>;
/**
* Calls OpenAI API to use a complete model.
*/
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
/**

@@ -59,4 +59,4 @@ * Calls OpenAI API to use a embedding model

* TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
* TODO: Maybe Create some common util for gptChat and gptComplete
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
* TODO: Maybe make custom OpenaiError
*/
import type { CommonExecutionToolsOptions } from '../../../execution/CommonExecutionToolsOptions';
import type { LlmExecutionTools } from '../../../execution/LlmExecutionTools';
import type { PromptbookLibrary } from '../../../library/PromptbookLibrary';
import type { PipelineCollection } from '../../../library/PipelineCollection';
import type { client_id } from '../../../types/typeAliases';

@@ -23,3 +23,3 @@ import type { string_uri } from '../../../types/typeAliases';

*/
readonly library: PromptbookLibrary;
readonly library: PipelineCollection;
/**

@@ -26,0 +26,0 @@ * Creates llm execution tools for each client

@@ -25,11 +25,11 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

*/
gptChat(prompt: Prompt): Promise<PromptChatResult>;
callChatModel(prompt: Prompt): Promise<PromptChatResult>;
/**
* Calls remote proxy server to use a completion model.
*/
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
callCompletionModel(prompt: Prompt): Promise<PromptCompletionResult>;
/**
* Calls remote proxy server to use both completion or chat model.
*/
private gptCommon;
private callModelCommon;
/**

@@ -36,0 +36,0 @@ * List all available models that can be used

@@ -13,3 +13,3 @@ import type { IDestroyable } from 'destroyable';

/**
* TODO: [⚖] Expose the library to be able to connect to same library via createLibraryFromUrl
* TODO: [⚖] Expose the library to be able to connect to same library via createCollectionFromUrl
* TODO: Handle progress - support streaming

@@ -16,0 +16,0 @@ * TODO: [🤹‍♂️] Do not hang up immediately but wait until client closes OR timeout

@@ -0,8 +1,8 @@

import type { ExecutionType } from './ExecutionTypes';
import type { ModelRequirements } from './ModelRequirements';
import type { ExpectationAmount } from './PipelineJson/PromptTemplateJson';
import type { ExpectationUnit } from './PipelineJson/PromptTemplateJson';
import type { string_markdown_text } from './typeAliases';
import type { string_name } from './typeAliases';
import type { string_version } from './typeAliases';
import type { ExecutionType } from './ExecutionTypes';
import type { ModelRequirements } from './ModelRequirements';
import type { ExpectationAmount } from './PromptbookJson/PromptTemplateJson';
import type { ExpectationUnit } from './PromptbookJson/PromptTemplateJson';
/**

@@ -9,0 +9,0 @@ * Command is one piece of the prompt template which adds some logic to the prompt template or the whole pipeline.

@@ -5,4 +5,4 @@ import type { FromtoItems } from '../../utils/FromtoItems';

*
* @private within the library
* @private within the package
*/
export declare function countWorkingDuration(items: FromtoItems): number;
import type { PromptResult } from '../../execution/PromptResult';
import type { Prompt } from '../Prompt';
import type { string_markdown_text } from '../typeAliases';
import type { string_promptbook_url } from '../typeAliases';
import type { string_pipeline_url } from '../typeAliases';
import type { string_version } from '../typeAliases';

@@ -20,3 +20,3 @@ /**

*/
readonly promptbookUrl?: string_promptbook_url;
readonly promptbookUrl?: string_pipeline_url;
/**

@@ -23,0 +23,0 @@ * Title of from promptbook which was executed

/**
* Parameters of the prompt template (pipeline)
* Parameters of the pipeline
*

@@ -4,0 +4,0 @@ * There are three types of parameters:

import type { PostprocessingFunction } from '../scripting/javascript/JavascriptExecutionToolsOptions';
import type { ExpectFormatCommand } from './Command';
import type { ModelRequirements } from './ModelRequirements';
import type { Expectations } from './PromptbookJson/PromptTemplateJson';
import type { Expectations } from './PipelineJson/PromptTemplateJson';
import type { string_name } from './typeAliases';
import type { string_pipeline_url_with_hashtemplate } from './typeAliases';
import type { string_prompt } from './typeAliases';
import type { string_promptbook_url_with_hashtemplate } from './typeAliases';
import type { string_title } from './typeAliases';

@@ -55,3 +55,3 @@ /**

*/
readonly promptbookUrl: string_promptbook_url_with_hashtemplate;
readonly promptbookUrl: string_pipeline_url_with_hashtemplate;
/**

@@ -58,0 +58,0 @@ * Parameters used in the prompt

@@ -184,3 +184,3 @@ /**

*/
export type string_promptbook_url = string;
export type string_pipeline_url = string;
/**

@@ -191,3 +191,3 @@ * Semantic helper

*/
export type string_promptbook_url_with_hashtemplate = string;
export type string_pipeline_url_with_hashtemplate = string;
/**

@@ -194,0 +194,0 @@ * Semantic helper

@@ -8,3 +8,3 @@ import type { string_char_emoji } from '../types/typeAliasEmoji';

*
* @private within the library
* @private within the package
* @deprecated Use /\p{Extended_Pictographic}/ instead

@@ -17,3 +17,3 @@ */

*
* @private within the library
* @private within the package
* @deprecated Use /\p{Extended_Pictographic}/ instead

@@ -20,0 +20,0 @@ */

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PromptbookJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
/**

@@ -3,0 +3,0 @@ * Counts number of characters in the text

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PromptbookJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
/**

@@ -3,0 +3,0 @@ * Counts number of lines in the text

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PromptbookJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
/**

@@ -3,0 +3,0 @@ * Counts number of pages in the text

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PromptbookJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
/**

@@ -3,0 +3,0 @@ * Counts number of paragraphs in the text

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PromptbookJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
/**

@@ -3,0 +3,0 @@ * Split text into sentences

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PromptbookJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
/**

@@ -3,0 +3,0 @@ * Counts number of words in the text

@@ -1,3 +0,3 @@

import type { ExpectationAmount } from '../../types/PromptbookJson/PromptTemplateJson';
import type { ExpectationUnit } from '../../types/PromptbookJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
import type { ExpectationUnit } from '../../types/PipelineJson/PromptTemplateJson';
/**

@@ -4,0 +4,0 @@ * Index of all counter functions

/**
* Format either small or big number
*
* @private within the library
* @private within the package
*/
export declare function formatNumber(value: number): string;

@@ -5,4 +5,4 @@ import type { MarkdownStructure } from './MarkdownStructure';

*
* @private within the library
* @private within the package
*/
export declare function countMarkdownStructureDeepness(markdownStructure: MarkdownStructure): number;
/**
* Represents the structure of a markdown file.
*
* @private within the library
* @private within the package
*/

@@ -6,0 +6,0 @@ export type MarkdownStructure = {

@@ -11,4 +11,4 @@ import type { MarkdownStructure } from './MarkdownStructure';

*
* @private within the library
* @private within the package
*/
export declare function markdownToMarkdownStructure(markdown: string): MarkdownStructure;

@@ -6,3 +6,3 @@ import type { string_markdown } from '../../types/typeAliases';

*
* @private within the library
* @private within the package
*/

@@ -9,0 +9,0 @@ export declare function addAutoGeneratedSection(content: string_markdown, options: {

@@ -34,3 +34,3 @@ import type { string_markdown } from '../../types/typeAliases';

*
* @private within the library
* @private within the package
*/

@@ -37,0 +37,0 @@ export declare function createMarkdownChart(options: CreateMarkdownChartOptions): string_markdown;

@@ -6,4 +6,4 @@ import type { string_markdown } from '../../types/typeAliases';

*
* @private within the library
* @private within the package
*/
export declare function createMarkdownTable(table: Array<Array<string_markdown_text>>): string_markdown;
{
"name": "@promptbook/anthropic-claude",
"version": "0.60.0-0",
"description": "Library to supercharge your use of large language models",
"version": "0.60.0-3",
"description": "Supercharge your use of large language models",
"private": false,

@@ -52,3 +52,3 @@ "sideEffects": false,

"peerDependencies": {
"@promptbook/core": "0.60.0-0"
"@promptbook/core": "0.60.0-3"
},

@@ -55,0 +55,0 @@ "main": "./umd/index.umd.js",

# ![Promptbook logo - cube with letters P and B](./other/design/logo-h1.png) Promptbook
Library to supercharge your use of large language models
Supercharge your use of large language models

@@ -37,4 +37,4 @@

```typescript
import { createPromptbookExecutor, createLibraryFromDirectory, assertsExecutionSuccessful } from '@promptbook/core';
import { createLibraryFromDirectory } from '@promptbook/node';
import { createPromptbookExecutor, createCollectionFromDirectory, assertsExecutionSuccessful } from '@promptbook/core';
import { createCollectionFromDirectory } from '@promptbook/node';
import { JavascriptExecutionTools } from '@promptbook/execute-javascript';

@@ -44,6 +44,6 @@ import { OpenAiExecutionTools } from '@promptbook/openai';

// ▶ Create whole Promptbook library
const library = await createLibraryFromDirectory('./promptbook-library');
const library = await createCollectionFromDirectory('./promptbook-collection');
// ▶ Get one Promptbook
const promptbook = await library.getPromptbookByUrl(`https://promptbook.studio/my-library/write-article.ptbk.md`);
const promptbook = await library.getPipelineByUrl(`https://promptbook.studio/my-library/write-article.ptbk.md`);

@@ -81,3 +81,3 @@ // ▶ Prepare tools

```typescript
import { createPromptbookExecutor, createLibraryFromDirectory, assertsExecutionSuccessful } from '@promptbook/core';
import { createPromptbookExecutor, createCollectionFromDirectory, assertsExecutionSuccessful } from '@promptbook/core';
import { JavascriptExecutionTools } from '@promptbook/execute-javascript';

@@ -87,6 +87,6 @@ import { OpenAiExecutionTools } from '@promptbook/openai';

// ▶ Create whole Promptbook library
const library = await createLibraryFromDirectory('./promptbook-library');
const library = await createCollectionFromDirectory('./promptbook-collection');
// ▶ Get one Promptbook
const promptbook = await library.getPromptbookByUrl(`https://promptbook.studio/my-library/write-article.ptbk.md`);
const promptbook = await library.getPipelineByUrl(`https://promptbook.studio/my-library/write-article.ptbk.md`);

@@ -154,3 +154,3 @@ // ▶ Prepare tools

- **Separation of concerns** between prompt engineer and programmer; between code files and prompt files; and between prompts, templates, templating pipelines, and their execution logic.
- **Separation of concerns** between prompt engineer and programmer; between code files and prompt files; and between prompts and their execution logic.
- Set up a **common format** for prompts that is interchangeable between projects and language/technology stacks.

@@ -157,0 +157,0 @@ - **Preprocessing** and cleaning the input data from the user.

@@ -113,11 +113,11 @@ (function (global, factory) {

*/
var PromptbookExecutionError = /** @class */ (function (_super) {
__extends(PromptbookExecutionError, _super);
function PromptbookExecutionError(message) {
var ExecutionError = /** @class */ (function (_super) {
__extends(ExecutionError, _super);
function ExecutionError(message) {
var _this = _super.call(this, message) || this;
_this.name = 'PromptbookExecutionError';
Object.setPrototypeOf(_this, PromptbookExecutionError.prototype);
_this.name = 'ExecutionError';
Object.setPrototypeOf(_this, ExecutionError.prototype);
return _this;
}
return PromptbookExecutionError;
return ExecutionError;
}(Error));

@@ -498,3 +498,3 @@

*
* @private within the library, used only as internal helper for `OPENAI_MODELS`
* @private within the package, used only as internal helper for `OPENAI_MODELS`
*/

@@ -597,3 +597,3 @@ function computeUsage(value) {

*/
AnthropicClaudeExecutionTools.prototype.gptChat = function (prompt) {
AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
return __awaiter(this, void 0, void 0, function () {

@@ -605,3 +605,3 @@ var content, modelRequirements, rawRequest, start, complete, rawResponse, resultContent, usage;

if (this.options.isVerbose) {
console.info('💬 Anthropic Claude gptChat call');
console.info('💬 Anthropic Claude callChatModel call');
}

@@ -611,3 +611,3 @@ content = prompt.content, modelRequirements = prompt.modelRequirements;

if (modelRequirements.modelVariant !== 'CHAT') {
throw new PromptbookExecutionError('Use gptChat only for CHAT variant');
throw new ExecutionError('Use callChatModel only for CHAT variant');
}

@@ -637,6 +637,6 @@ rawRequest = {

if (!rawResponse.content[0]) {
throw new PromptbookExecutionError('No content from Anthropic Claude');
throw new ExecutionError('No content from Anthropic Claude');
}
if (rawResponse.content.length > 1) {
throw new PromptbookExecutionError('More than one content blocks from Anthropic Claude');
throw new ExecutionError('More than one content blocks from Anthropic Claude');
}

@@ -669,3 +669,3 @@ resultContent = rawResponse.content[0].text;

*/
AnthropicClaudeExecutionTools.prototype.gptComplete = function (prompt) {
AnthropicClaudeExecutionTools.prototype.callCompletionModel = function (prompt) {
return __awaiter(this, void 0, void 0, function () {

@@ -712,3 +712,3 @@ return __generator(this, function (_a) {

* TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
* TODO: Maybe Create some common util for gptChat and gptComplete
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
* TODO: Maybe make custom OpenaiError

@@ -720,3 +720,3 @@ */

*/
var PROMPTBOOK_VERSION = '0.59.0';
var PROMPTBOOK_VERSION = '0.60.0-2';

@@ -723,0 +723,0 @@ exports.AnthropicClaudeExecutionTools = AnthropicClaudeExecutionTools;

@@ -1,15 +0,15 @@

import { prettifyPromptbookString } from '../conversion/prettify/prettifyPromptbookString';
import { promptbookJsonToString } from '../conversion/promptbookJsonToString';
import { promptbookStringToJson } from '../conversion/promptbookStringToJson';
import { promptbookStringToJsonSync } from '../conversion/promptbookStringToJsonSync';
import { validatePromptbook } from '../conversion/validation/validatePromptbook';
import { ExpectError } from '../errors/_ExpectError';
import { PromptbookExecutionError } from '../errors/PromptbookExecutionError';
import { PromptbookLibraryError } from '../errors/PromptbookLibraryError';
import { PromptbookLogicError } from '../errors/PromptbookLogicError';
import { PromptbookNotFoundError } from '../errors/PromptbookNotFoundError';
import { PromptbookReferenceError } from '../errors/PromptbookReferenceError';
import { PromptbookSyntaxError } from '../errors/PromptbookSyntaxError';
import { pipelineJsonToString } from '../conversion/pipelineJsonToString';
import { pipelineStringToJson } from '../conversion/pipelineStringToJson';
import { pipelineStringToJsonSync } from '../conversion/pipelineStringToJsonSync';
import { prettifyPipelineString } from '../conversion/prettify/prettifyPipelineString';
import { validatePipeline } from '../conversion/validation/validatePipeline';
import { CollectionError } from '../errors/CollectionError';
import { ExecutionError } from '../errors/ExecutionError';
import { NotFoundError } from '../errors/NotFoundError';
import { PipelineLogicError } from '../errors/PipelineLogicError';
import { ReferenceError } from '../errors/ReferenceError';
import { SyntaxError } from '../errors/SyntaxError';
import { TemplateError } from '../errors/TemplateError';
import { UnexpectedError } from '../errors/UnexpectedError';
import { ExpectError } from '../errors/_ExpectError';
import { assertsExecutionSuccessful } from '../execution/assertsExecutionSuccessful';

@@ -25,21 +25,21 @@ import { createPromptbookExecutor } from '../execution/createPromptbookExecutor';

import { prepareKnowledgeFromMarkdown } from '../knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown';
import { createLibraryFromJson } from '../library/constructors/createLibraryFromJson';
import { createLibraryFromPromise } from '../library/constructors/createLibraryFromPromise';
import { createLibraryFromUrl } from '../library/constructors/createLibraryFromUrl';
import { createSublibrary } from '../library/constructors/createSublibrary';
import { createCollectionFromJson } from '../library/constructors/createCollectionFromJson';
import { createCollectionFromPromise } from '../library/constructors/createCollectionFromPromise';
import { createCollectionFromUrl } from '../library/constructors/createCollectionFromUrl';
import { createSubcollection } from '../library/constructors/createSubcollection';
import { libraryToJson } from '../library/libraryToJson';
import { MultipleLlmExecutionTools } from '../llm-providers/multiple/MultipleLlmExecutionTools';
import { executionReportJsonToString } from '../types/execution-report/executionReportJsonToString';
import { ExecutionTypes } from '../types/ExecutionTypes';
import type { ExecutionReportStringOptions } from '../types/execution-report/ExecutionReportStringOptions';
import { ExecutionReportStringOptionsDefaults } from '../types/execution-report/ExecutionReportStringOptions';
import { ExecutionTypes } from '../types/ExecutionTypes';
import { executionReportJsonToString } from '../types/execution-report/executionReportJsonToString';
import { PROMPTBOOK_VERSION } from '../version';
export { PROMPTBOOK_VERSION };
export { ExecutionTypes };
export { addUsage, assertsExecutionSuccessful, checkExpectations, embeddingVectorToString, executionReportJsonToString, ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults, isPassingExpectations, prepareKnowledgeFromMarkdown, prettifyPromptbookString, usageToWorktime, };
export { createLibraryFromJson, createLibraryFromPromise, createLibraryFromUrl, createSublibrary, libraryToJson };
export { ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults, addUsage, assertsExecutionSuccessful, checkExpectations, embeddingVectorToString, executionReportJsonToString, isPassingExpectations, prepareKnowledgeFromMarkdown, prettifyPipelineString, usageToWorktime, };
export { createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createSubcollection as createSublibrary, libraryToJson, };
export { SimplePromptInterfaceTools };
export { promptbookJsonToString, promptbookStringToJson, promptbookStringToJsonSync, validatePromptbook };
export { createPromptbookExecutor, MultipleLlmExecutionTools };
export { pipelineJsonToString, pipelineStringToJson, pipelineStringToJsonSync, validatePipeline };
export { MultipleLlmExecutionTools, createPromptbookExecutor };
export { CallbackInterfaceTools, CallbackInterfaceToolsOptions };
export { ExpectError, PromptbookExecutionError, PromptbookLibraryError, PromptbookLogicError, PromptbookNotFoundError, PromptbookReferenceError, PromptbookSyntaxError, TemplateError, UnexpectedError, };
export { CollectionError, ExecutionError, ExpectError, NotFoundError, PipelineLogicError, ReferenceError, SyntaxError, TemplateError, UnexpectedError, };

@@ -1,4 +0,4 @@

import { createLibraryFromDirectory } from '../library/constructors/createLibraryFromDirectory';
import { createCollectionFromDirectory } from '../library/constructors/createCollectionFromDirectory';
import { PROMPTBOOK_VERSION } from '../version';
export { PROMPTBOOK_VERSION };
export { createLibraryFromDirectory };
export { createCollectionFromDirectory };

@@ -5,18 +5,18 @@ import type { CommonExecutionToolsOptions } from '../execution/CommonExecutionToolsOptions';

import type { AvailableModel, LlmExecutionTools } from '../execution/LlmExecutionTools';
import type { PromptEmbeddingResult, PromptChatResult, PromptCommonResult, PromptCompletionResult, PromptResult, PromptResultUsage, PromptResultUsageCounts, UncertainNumber } from '../execution/PromptResult';
import type { PromptChatResult, PromptCommonResult, PromptCompletionResult, PromptEmbeddingResult, PromptResult, PromptResultUsage, PromptResultUsageCounts, UncertainNumber } from '../execution/PromptResult';
import type { PromptbookExecutor } from '../execution/PromptbookExecutor';
import type { ScriptExecutionTools, ScriptExecutionToolsExecuteOptions } from '../execution/ScriptExecutionTools';
import type { UserInterfaceTools, UserInterfaceToolsPromptDialogOptions } from '../execution/UserInterfaceTools';
import type { PromptbookLibrary } from '../library/PromptbookLibrary';
import type { PipelineCollection } from '../library/PipelineCollection';
import type { ExecutionType } from '../types/ExecutionTypes';
import type { ModelRequirements, ModelVariant } from '../types/ModelRequirements';
import type { Parameters } from '../types/Parameters';
import { KnowledgeJson } from '../types/PipelineJson/KnowledgeJson';
import { MaterialKnowledgePieceJson } from '../types/PipelineJson/MaterialKnowledgePieceJson';
import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
import type { ExpectationAmount, ExpectationUnit, Expectations, LlmTemplateJson, PromptDialogJson, PromptTemplateJson, ScriptJson, SimpleTemplateJson } from '../types/PipelineJson/PromptTemplateJson';
import { EXPECTATION_UNITS } from '../types/PipelineJson/PromptTemplateJson';
import type { PromptTemplateParameterJson } from '../types/PipelineJson/PromptTemplateParameterJson';
import type { PipelineString } from '../types/PipelineString';
import type { Prompt } from '../types/Prompt';
import { KnowledgeJson } from '../types/PromptbookJson/KnowledgeJson';
import { MaterialKnowledgePieceJson } from '../types/PromptbookJson/MaterialKnowledgePieceJson';
import type { ExpectationAmount, Expectations, ExpectationUnit, LlmTemplateJson, PromptDialogJson, PromptTemplateJson, ScriptJson, SimpleTemplateJson } from '../types/PromptbookJson/PromptTemplateJson';
import { EXPECTATION_UNITS } from '../types/PromptbookJson/PromptTemplateJson';
import type { PromptTemplateParameterJson } from '../types/PromptbookJson/PromptTemplateParameterJson';
import type { PromptbookJson } from '../types/PromptbookJson/PromptbookJson';
import type { PromptbookString } from '../types/PromptbookString';
import type { ScriptLanguage } from '../types/ScriptLanguage';

@@ -26,3 +26,3 @@ import type { TaskProgress } from '../types/TaskProgress';

import type { string_char_emoji } from '../types/typeAliasEmoji';
import type { client_id, string_char, string_chat_prompt, string_completion_prompt, string_data_url, string_domain, string_email, string_file_absolute_path, string_file_extension, string_file_path, string_file_relative_path, string_filename, string_folder_absolute_path, string_folder_path, string_folder_relative_path, string_host, string_hostname, string_href, string_html, string_javascript, string_javascript_name, string_license, string_markdown, string_markdown_text, string_mime_type, string_mime_type_with_wildcard, string_model_name, string_name, string_person_fullname, string_prompt, string_promptbook_url, string_promptbook_url_with_hashtemplate, string_script, string_sha256, string_tdl, string_template, string_text_prompt, string_title, string_token, string_translate_language, string_uri, string_uri_part, string_url, string_url_image, string_version } from '../types/typeAliases';
import type { client_id, string_char, string_chat_prompt, string_completion_prompt, string_data_url, string_domain, string_email, string_file_absolute_path, string_file_extension, string_file_path, string_file_relative_path, string_filename, string_folder_absolute_path, string_folder_path, string_folder_relative_path, string_host, string_hostname, string_href, string_html, string_javascript, string_javascript_name, string_license, string_markdown, string_markdown_text, string_mime_type, string_mime_type_with_wildcard, string_model_name, string_name, string_person_fullname, string_pipeline_url, string_pipeline_url_with_hashtemplate, string_prompt, string_script, string_sha256, string_tdl, string_template, string_text_prompt, string_title, string_token, string_translate_language, string_uri, string_uri_part, string_url, string_url_image, string_version } from '../types/typeAliases';
import type { FromtoItems } from '../utils/FromtoItems';

@@ -32,5 +32,5 @@ import { PROMPTBOOK_VERSION } from '../version';

export { EXPECTATION_UNITS };
export type { AvailableModel, client_id, CommonExecutionToolsOptions, ExecutionReportJson, ExecutionTools, ExecutionType, ExpectationAmount, Expectations, ExpectationUnit, FromtoItems, KnowledgeJson, LlmExecutionTools, LlmTemplateJson, MaterialKnowledgePieceJson, ModelRequirements, ModelVariant, Parameters, Prompt, PromptbookExecutor, PromptbookJson, PromptbookLibrary, PromptbookString, PromptChatResult, PromptEmbeddingResult, EmbeddingVector, PromptCommonResult, PromptCompletionResult, PromptDialogJson, PromptResult, PromptResultUsage, PromptResultUsageCounts, PromptTemplateJson, PromptTemplateParameterJson, ScriptExecutionTools, ScriptExecutionToolsExecuteOptions, ScriptJson, ScriptLanguage, SimpleTemplateJson, string_char, string_char_emoji, string_chat_prompt, string_completion_prompt, string_data_url, string_domain, string_email, string_file_absolute_path, string_file_extension, string_file_path, string_file_relative_path, string_filename, string_folder_absolute_path, string_folder_path, string_folder_relative_path, string_host, string_hostname, string_href, string_html, string_javascript, string_javascript_name, string_license, string_markdown, string_markdown_text, string_mime_type, string_mime_type_with_wildcard, string_model_name, string_name, string_person_fullname, string_prompt, string_promptbook_url, string_promptbook_url_with_hashtemplate, string_script, string_sha256, string_tdl, string_template, string_text_prompt, string_title, string_token, string_translate_language, string_uri, string_uri_part, string_url, string_url_image, string_version, TaskProgress, UncertainNumber, UserInterfaceTools, UserInterfaceToolsPromptDialogOptions, };
export type { AvailableModel, CommonExecutionToolsOptions, EmbeddingVector, ExecutionReportJson, ExecutionTools, ExecutionType, ExpectationAmount, ExpectationUnit, Expectations, FromtoItems, KnowledgeJson, LlmExecutionTools, LlmTemplateJson, MaterialKnowledgePieceJson, ModelRequirements, ModelVariant, Parameters, PipelineCollection, PipelineJson, PipelineString, Prompt, PromptChatResult, PromptCommonResult, PromptCompletionResult, PromptDialogJson, PromptEmbeddingResult, PromptResult, PromptResultUsage, PromptResultUsageCounts, PromptTemplateJson, PromptTemplateParameterJson, PromptbookExecutor, ScriptExecutionTools, ScriptExecutionToolsExecuteOptions, ScriptJson, ScriptLanguage, SimpleTemplateJson, TaskProgress, UncertainNumber, UserInterfaceTools, UserInterfaceToolsPromptDialogOptions, client_id, string_char, string_char_emoji, string_chat_prompt, string_completion_prompt, string_data_url, string_domain, string_email, string_file_absolute_path, string_file_extension, string_file_path, string_file_relative_path, string_filename, string_folder_absolute_path, string_folder_path, string_folder_relative_path, string_host, string_hostname, string_href, string_html, string_javascript, string_javascript_name, string_license, string_markdown, string_markdown_text, string_mime_type, string_mime_type_with_wildcard, string_model_name, string_name, string_person_fullname, string_pipeline_url, string_pipeline_url_with_hashtemplate, string_prompt, string_script, string_sha256, string_tdl, string_template, string_text_prompt, string_title, string_token, string_translate_language, string_uri, string_uri_part, string_url, string_url_image, string_version, };
/**
* TODO: Delete type aliases (from ../types/typeAliases) that are not exported here
*/
import { spaceTrim } from 'spacetrim';
import { renderPromptbookMermaid } from '../conversion/prettify/renderPromptbookMermaid';
import { renderPromptbookMermaid } from '../conversion/prettify/renderPipelineMermaidOptions';
import { extractParametersFromPromptTemplate } from '../conversion/utils/extractParametersFromPromptTemplate';

@@ -11,3 +11,2 @@ import { extractVariables } from '../conversion/utils/extractVariables';

import { isValidJsonString } from '../formats/json/utils/isValidJsonString';
import { CountUtils } from '../utils/expectation-counters/index';
import { countCharacters } from '../utils/expectation-counters/countCharacters';

@@ -17,5 +16,5 @@ import { countLines } from '../utils/expectation-counters/countLines';

import { countParagraphs } from '../utils/expectation-counters/countParagraphs';
import { countSentences } from '../utils/expectation-counters/countSentences';
import { splitIntoSentences } from '../utils/expectation-counters/countSentences';
import { countSentences, splitIntoSentences } from '../utils/expectation-counters/countSentences';
import { countWords } from '../utils/expectation-counters/countWords';
import { CountUtils } from '../utils/expectation-counters/index';
import { extractParameters } from '../utils/extractParameters';

@@ -27,7 +26,6 @@ import { extractAllBlocksFromMarkdown } from '../utils/markdown/extractAllBlocksFromMarkdown';

import { removeMarkdownFormatting } from '../utils/markdown/removeMarkdownFormatting';
import { DIACRITIC_VARIANTS_LETTERS } from '../utils/normalization/DIACRITIC_VARIANTS_LETTERS';
import type { IKeywords, string_keyword } from '../utils/normalization/IKeywords';
import { capitalize } from '../utils/normalization/capitalize';
import { decapitalize } from '../utils/normalization/decapitalize';
import { DIACRITIC_VARIANTS_LETTERS } from '../utils/normalization/DIACRITIC_VARIANTS_LETTERS';
import type { IKeywords } from '../utils/normalization/IKeywords';
import type { string_keyword } from '../utils/normalization/IKeywords';
import { isValidKeyword } from '../utils/normalization/isValidKeyword';

@@ -37,5 +35,5 @@ import { nameToUriPart } from '../utils/normalization/nameToUriPart';

import { normalizeToKebabCase } from '../utils/normalization/normalize-to-kebab-case';
import { normalizeTo_camelCase } from '../utils/normalization/normalizeTo_camelCase';
import { normalizeTo_PascalCase } from '../utils/normalization/normalizeTo_PascalCase';
import { normalizeTo_SCREAMING_CASE } from '../utils/normalization/normalizeTo_SCREAMING_CASE';
import { normalizeTo_camelCase } from '../utils/normalization/normalizeTo_camelCase';
import { normalizeTo_snake_case } from '../utils/normalization/normalizeTo_snake_case';

@@ -57,3 +55,3 @@ import { normalizeWhitespaces } from '../utils/normalization/normalizeWhitespaces';

import { PROMPTBOOK_VERSION } from '../version';
export { forEachAsync, PROMPTBOOK_VERSION };
export { PROMPTBOOK_VERSION, forEachAsync };
export { extractAllBlocksFromMarkdown, // <- [🌻]

@@ -63,3 +61,3 @@ extractAllListItemsFromMarkdown, extractBlock, // <- [🌻]

removeContentComments, removeEmojis, removeMarkdownFormatting, removeQuotes, replaceParameters, spaceTrim, trimCodeBlock, trimEndOfCodeBlock, unwrapResult, };
export { countCharacters, countLines, countPages, countParagraphs, countSentences, CountUtils, countWords };
export { CountUtils, countCharacters, countLines, countPages, countParagraphs, countSentences, countWords };
export { splitIntoSentences };

@@ -73,3 +71,3 @@ export declare const normalizeTo: {

};
export { capitalize, decapitalize, DIACRITIC_VARIANTS_LETTERS, IKeywords, isValidKeyword, nameToUriPart, nameToUriParts, normalizeTo_camelCase, normalizeTo_PascalCase, normalizeTo_SCREAMING_CASE, normalizeTo_snake_case, normalizeToKebabCase, normalizeWhitespaces, parseKeywords, parseKeywordsFromString, removeDiacritics, searchKeywords, string_keyword, titleToName, };
export { DIACRITIC_VARIANTS_LETTERS, IKeywords, capitalize, decapitalize, isValidKeyword, nameToUriPart, nameToUriParts, normalizeToKebabCase, normalizeTo_PascalCase, normalizeTo_SCREAMING_CASE, normalizeTo_camelCase, normalizeTo_snake_case, normalizeWhitespaces, parseKeywords, parseKeywordsFromString, removeDiacritics, searchKeywords, string_keyword, titleToName, };
export { extractParametersFromPromptTemplate, renameParameter, renderPromptbookMermaid };

@@ -76,0 +74,0 @@ export { difference, intersection, union };

@@ -10,4 +10,4 @@ /**

/**
* The name of the builded promptbook library made by CLI `promptbook make` and for lookup in `createLibraryFromDirectory`
* The name of the builded promptbook library made by CLI `promptbook make` and for lookup in `createCollectionFromDirectory`
*/
export declare const PROMPTBOOK_MAKED_BASE_FILENAME = "index";
/**
* Options for `prettifyPromptbookString` function
* Options for `prettifyPipelineString` function
*/

@@ -4,0 +4,0 @@ export type PrettifyOptions = {

@@ -1,2 +0,2 @@

import type { PromptTemplateJson } from '../../types/PromptbookJson/PromptTemplateJson';
import type { PromptTemplateJson } from '../../types/PipelineJson/PromptTemplateJson';
import type { string_name } from '../../types/typeAliases';

@@ -8,3 +8,3 @@ /**

* @returns the set of parameter names
* @throws {PromptbookSyntaxError} if the script is invalid
* @throws {SyntaxError} if the script is invalid
*/

@@ -11,0 +11,0 @@ export declare function extractParametersFromPromptTemplate(promptTemplate: Pick<PromptTemplateJson, 'title' | 'description' | 'executionType' | 'content'>): Set<string_name>;

@@ -8,3 +8,3 @@ import type { string_javascript } from '../../types/typeAliases';

* @returns the list of variable names
* @throws {PromptbookSyntaxError} if the script is invalid
* @throws {SyntaxError} if the script is invalid
*/

@@ -11,0 +11,0 @@ export declare function extractVariables(script: string_javascript): Set<string_javascript_name>;

@@ -7,6 +7,6 @@ import type { Command } from '../../types/Command';

* @returns parsed command object
* @throws {PromptbookSyntaxError} if the command is invalid
* @throws {SyntaxError} if the command is invalid
*
* @private within the promptbookStringToJson
* @private within the pipelineStringToJson
*/
export declare function parseCommand(listItem: string_markdown_text): Command;

@@ -8,3 +8,3 @@ /**

* @returns parsed number
* @throws {PromptbookSyntaxError} if the value is not a number
* @throws {SyntaxError} if the value is not a number
*

@@ -11,0 +11,0 @@ * @private within the parseCommand

@@ -1,2 +0,2 @@

import type { PromptbookJson } from '../../types/PromptbookJson/PromptbookJson';
import type { PipelineJson } from '../../types/PipelineJson/PipelineJson';
import type { string_name } from '../../types/typeAliases';

@@ -8,3 +8,3 @@ type RenameParameterOptions = {

*/
promptbook: PromptbookJson;
promptbook: PipelineJson;
/**

@@ -23,5 +23,5 @@ * Original parameter name that should be replaced

*
* @throws {PromptbookLogicError} If the new parameter name is already used in the promptbook
* @throws {PipelineLogicError} If the new parameter name is already used in the promptbook
*/
export declare function renameParameter(options: RenameParameterOptions): PromptbookJson;
export declare function renameParameter(options: RenameParameterOptions): PipelineJson;
export {};
/**
* This error occurs when some expectation is not met in the execution of the pipeline
*
* @private Always catched and rethrown as `PromptbookExecutionError`
* Note: This is a kindof subtype of PromptbookExecutionError
* @private Always catched and rethrown as `ExecutionError`
* Note: This is a kindof subtype of ExecutionError
*/

@@ -7,0 +7,0 @@ export declare class ExpectError extends Error {

/**
* This error occurs during the parameter replacement in the template
*
* Note: This is a kindof subtype of PromptbookExecutionError because it occurs during the execution of the pipeline
* Note: This is a kindof subtype of ExecutionError because it occurs during the execution of the pipeline
*/

@@ -6,0 +6,0 @@ export declare class TemplateError extends Error {

@@ -6,3 +6,3 @@ import type { PromptbookExecutor } from './PromptbookExecutor';

* @param executionResult - The partial result of the promptnook execution
* @throws {PromptbookExecutionError} If the execution is not successful or if multiple errors occurred
* @throws {ExecutionError} If the execution is not successful or if multiple errors occurred
*/

@@ -9,0 +9,0 @@ export declare function assertsExecutionSuccessful(executionResult: Pick<Awaited<ReturnType<PromptbookExecutor>>, 'isSuccessful' | 'errors'>): void;

@@ -1,2 +0,2 @@

import type { PromptbookJson } from '../types/PromptbookJson/PromptbookJson';
import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
import type { ExecutionTools } from './ExecutionTools';

@@ -19,3 +19,3 @@ import type { PromptbookExecutor } from './PromptbookExecutor';

*/
readonly promptbook: PromptbookJson;
readonly promptbook: PipelineJson;
/**

@@ -34,3 +34,3 @@ * The execution tools to be used during the execution of the PROMPTBOOK

* @returns The executor function
* @throws {PromptbookLogicError} on logical error in the promptbook
* @throws {PipelineLogicError} on logical error in the promptbook
*/

@@ -37,0 +37,0 @@ export declare function createPromptbookExecutor(options: CreatePromptbookExecutorOptions): PromptbookExecutor;

@@ -5,3 +5,3 @@ import type { LlmExecutionTools } from './LlmExecutionTools';

/**
* All the tools needed to execute prompts (template pipelines).
* All the tools needed to execute pipelines.
*

@@ -8,0 +8,0 @@ * @see https://github.com/webgptorg/promptbook#execution-tools

@@ -19,7 +19,7 @@ import type { Promisable } from 'type-fest';

*/
gptChat(prompt: Prompt): Promise<PromptChatResult>;
callChatModel(prompt: Prompt): Promise<PromptChatResult>;
/**
* Use a completion model
*/
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
callCompletionModel(prompt: Prompt): Promise<PromptCompletionResult>;
/**

@@ -50,4 +50,4 @@ * List all available models that can be used

* TODO: [🍓][♐] Some heuristic to pick the best model in listed models
* TODO: [🏳] gptChat -> chat, gptComplete -> complete, translate
* TODO: [🧠] Should or should not there be a word "GPT" in both gptComplete and gptChat
* TODO: [🏳] callChatModel -> chat, callCompletionModel -> complete, translate
* TODO: [🧠] Should or should not there be a word "GPT" in both callCompletionModel and callChatModel
*/
import type { KebabCase } from 'type-fest';
import type { ExpectationUnit } from '../types/PromptbookJson/PromptTemplateJson';
import type { ExpectationUnit } from '../types/PipelineJson/PromptTemplateJson';
import type { number_positive } from '../types/typeAliases';

@@ -4,0 +4,0 @@ import type { number_usd } from '../types/typeAliases';

@@ -5,4 +5,4 @@ /**

* @returns
* @throws {PromptbookExecutionError}
* @throws {ExecutionError}
*/
export declare function extractMultiplicatedOccurrence(message: string): string;

@@ -1,2 +0,2 @@

import type { Expectations } from '../../types/PromptbookJson/PromptTemplateJson';
import type { Expectations } from '../../types/PipelineJson/PromptTemplateJson';
/**

@@ -3,0 +3,0 @@ * Function checkExpectations will check if the expectations on given value are met

import type { LlmExecutionTools } from '../../../execution/LlmExecutionTools';
import type { KnowledgeJson } from '../../../types/PromptbookJson/KnowledgeJson';
import type { KnowledgeJson } from '../../../types/PipelineJson/KnowledgeJson';
import type { string_markdown } from '../../../types/typeAliases';

@@ -4,0 +4,0 @@ type PrepareKnowledgeFromMarkdownOptions = {

import type { LlmExecutionTools } from '../../../execution/LlmExecutionTools';
import type { KnowledgeJson } from '../../../types/PromptbookJson/KnowledgeJson';
import type { KnowledgeJson } from '../../../types/PipelineJson/KnowledgeJson';
import type { string_base64 } from '../../../types/typeAliases';

@@ -4,0 +4,0 @@ type PrepareKnowledgeFromPdfOptions = {

@@ -1,8 +0,8 @@

import type { PromptbookJson } from '../types/PromptbookJson/PromptbookJson';
import type { PromptbookLibrary } from './PromptbookLibrary';
import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
import type { PipelineCollection } from './PipelineCollection';
/**
* Converts PromptbookLibrary to serialized JSON
* Converts PipelineCollection to serialized JSON
*
* Note: Functions `libraryToJson` and `createLibraryFromJson` are complementary
* Note: Functions `libraryToJson` and `createCollectionFromJson` are complementary
*/
export declare function libraryToJson(library: PromptbookLibrary): Promise<Array<PromptbookJson>>;
export declare function libraryToJson(library: PipelineCollection): Promise<Array<PipelineJson>>;

@@ -25,7 +25,7 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

*/
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
/**
* Calls Anthropic Claude API to use a complete model.
*/
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
/**

@@ -47,4 +47,4 @@ * Get the model that should be used as default

* TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
* TODO: Maybe Create some common util for gptChat and gptComplete
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
* TODO: Maybe make custom OpenaiError
*/

@@ -25,7 +25,7 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

*/
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
/**
* Calls Azure OpenAI API to use a complete model.
*/
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
/**

@@ -41,4 +41,4 @@ * Changes Azure error (which is not propper Error but object) to propper Error

/**
* TODO: Maybe Create some common util for gptChat and gptComplete
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
* TODO: Maybe make custom AzureOpenaiError
*/
import type { PostprocessingFunction } from '../../scripting/javascript/JavascriptExecutionToolsOptions';
import type { Expectations } from '../../types/PromptbookJson/PromptTemplateJson';
import type { Expectations } from '../../types/PipelineJson/PromptTemplateJson';
/**

@@ -4,0 +4,0 @@ * Gets the expectations and creates a fake text that meets the expectations

@@ -16,7 +16,7 @@ import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';

*/
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
/**
* Mocks completion model
*/
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
/**

@@ -23,0 +23,0 @@ * List all available mocked-models that can be used

@@ -16,7 +16,7 @@ import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';

*/
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptChatResult & PromptCompletionResult>;
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptChatResult & PromptCompletionResult>;
/**
* Fakes completion model
*/
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptCompletionResult>;
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptCompletionResult>;
/**

@@ -29,2 +29,2 @@ * List all available fake-models that can be used

* TODO: [🕵️‍♀️] Maybe just remove
*/
*/

@@ -23,11 +23,11 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

*/
gptChat(prompt: Prompt): Promise<PromptChatResult>;
callChatModel(prompt: Prompt): Promise<PromptChatResult>;
/**
* Calls the best available completion model
*/
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
callCompletionModel(prompt: Prompt): Promise<PromptCompletionResult>;
/**
* Calls the best available model
*/
private gptCommon;
private callModelCommon;
/**

@@ -34,0 +34,0 @@ * List all available models that can be used

@@ -10,3 +10,3 @@ import type OpenAI from 'openai';

* @param rawResponse The raw response from OpenAI API
* @throws {PromptbookExecutionError} If the usage is not defined in the response from OpenAI
* @throws {ExecutionError} If the usage is not defined in the response from OpenAI
* @private internal util of `OpenAiExecutionTools`

@@ -13,0 +13,0 @@ */

@@ -7,3 +7,3 @@ /**

*
* @private within the library, used only as internal helper for `OPENAI_MODELS` and `computeUsage`
* @private within the package, used only as internal helper for `OPENAI_MODELS` and `computeUsage`
*/

@@ -14,5 +14,5 @@ type string_model_price = `$${number}.${number} / ${number}M tokens`;

*
* @private within the library, used only as internal helper for `OPENAI_MODELS`
* @private within the package, used only as internal helper for `OPENAI_MODELS`
*/
export declare function computeUsage(value: string_model_price): number;
export {};

@@ -26,7 +26,7 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

*/
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectFormat'>): Promise<PromptChatResult>;
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectFormat'>): Promise<PromptChatResult>;
/**
* Calls OpenAI API to use a complete model.
*/
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
/**

@@ -59,4 +59,4 @@ * Calls OpenAI API to use a embedding model

* TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
* TODO: Maybe Create some common util for gptChat and gptComplete
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
* TODO: Maybe make custom OpenaiError
*/
import type { CommonExecutionToolsOptions } from '../../../execution/CommonExecutionToolsOptions';
import type { LlmExecutionTools } from '../../../execution/LlmExecutionTools';
import type { PromptbookLibrary } from '../../../library/PromptbookLibrary';
import type { PipelineCollection } from '../../../library/PipelineCollection';
import type { client_id } from '../../../types/typeAliases';

@@ -23,3 +23,3 @@ import type { string_uri } from '../../../types/typeAliases';

*/
readonly library: PromptbookLibrary;
readonly library: PipelineCollection;
/**

@@ -26,0 +26,0 @@ * Creates llm execution tools for each client

@@ -25,11 +25,11 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

*/
gptChat(prompt: Prompt): Promise<PromptChatResult>;
callChatModel(prompt: Prompt): Promise<PromptChatResult>;
/**
* Calls remote proxy server to use a completion model.
*/
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
callCompletionModel(prompt: Prompt): Promise<PromptCompletionResult>;
/**
* Calls remote proxy server to use both completion or chat model.
*/
private gptCommon;
private callModelCommon;
/**

@@ -36,0 +36,0 @@ * List all available models that can be used

@@ -13,3 +13,3 @@ import type { IDestroyable } from 'destroyable';

/**
* TODO: [⚖] Expose the library to be able to connect to same library via createLibraryFromUrl
* TODO: [⚖] Expose the library to be able to connect to same library via createCollectionFromUrl
* TODO: Handle progress - support streaming

@@ -16,0 +16,0 @@ * TODO: [🤹‍♂️] Do not hang up immediately but wait until client closes OR timeout

@@ -0,8 +1,8 @@

import type { ExecutionType } from './ExecutionTypes';
import type { ModelRequirements } from './ModelRequirements';
import type { ExpectationAmount } from './PipelineJson/PromptTemplateJson';
import type { ExpectationUnit } from './PipelineJson/PromptTemplateJson';
import type { string_markdown_text } from './typeAliases';
import type { string_name } from './typeAliases';
import type { string_version } from './typeAliases';
import type { ExecutionType } from './ExecutionTypes';
import type { ModelRequirements } from './ModelRequirements';
import type { ExpectationAmount } from './PromptbookJson/PromptTemplateJson';
import type { ExpectationUnit } from './PromptbookJson/PromptTemplateJson';
/**

@@ -9,0 +9,0 @@ * Command is one piece of the prompt template which adds some logic to the prompt template or the whole pipeline.

@@ -5,4 +5,4 @@ import type { FromtoItems } from '../../utils/FromtoItems';

*
* @private within the library
* @private within the package
*/
export declare function countWorkingDuration(items: FromtoItems): number;
import type { PromptResult } from '../../execution/PromptResult';
import type { Prompt } from '../Prompt';
import type { string_markdown_text } from '../typeAliases';
import type { string_promptbook_url } from '../typeAliases';
import type { string_pipeline_url } from '../typeAliases';
import type { string_version } from '../typeAliases';

@@ -20,3 +20,3 @@ /**

*/
readonly promptbookUrl?: string_promptbook_url;
readonly promptbookUrl?: string_pipeline_url;
/**

@@ -23,0 +23,0 @@ * Title of from promptbook which was executed

/**
* Parameters of the prompt template (pipeline)
* Parameters of the pipeline
*

@@ -4,0 +4,0 @@ * There are three types of parameters:

import type { PostprocessingFunction } from '../scripting/javascript/JavascriptExecutionToolsOptions';
import type { ExpectFormatCommand } from './Command';
import type { ModelRequirements } from './ModelRequirements';
import type { Expectations } from './PromptbookJson/PromptTemplateJson';
import type { Expectations } from './PipelineJson/PromptTemplateJson';
import type { string_name } from './typeAliases';
import type { string_pipeline_url_with_hashtemplate } from './typeAliases';
import type { string_prompt } from './typeAliases';
import type { string_promptbook_url_with_hashtemplate } from './typeAliases';
import type { string_title } from './typeAliases';

@@ -55,3 +55,3 @@ /**

*/
readonly promptbookUrl: string_promptbook_url_with_hashtemplate;
readonly promptbookUrl: string_pipeline_url_with_hashtemplate;
/**

@@ -58,0 +58,0 @@ * Parameters used in the prompt

@@ -184,3 +184,3 @@ /**

*/
export type string_promptbook_url = string;
export type string_pipeline_url = string;
/**

@@ -191,3 +191,3 @@ * Semantic helper

*/
export type string_promptbook_url_with_hashtemplate = string;
export type string_pipeline_url_with_hashtemplate = string;
/**

@@ -194,0 +194,0 @@ * Semantic helper

@@ -8,3 +8,3 @@ import type { string_char_emoji } from '../types/typeAliasEmoji';

*
* @private within the library
* @private within the package
* @deprecated Use /\p{Extended_Pictographic}/ instead

@@ -17,3 +17,3 @@ */

*
* @private within the library
* @private within the package
* @deprecated Use /\p{Extended_Pictographic}/ instead

@@ -20,0 +20,0 @@ */

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PromptbookJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
/**

@@ -3,0 +3,0 @@ * Counts number of characters in the text

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PromptbookJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
/**

@@ -3,0 +3,0 @@ * Counts number of lines in the text

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PromptbookJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
/**

@@ -3,0 +3,0 @@ * Counts number of pages in the text

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PromptbookJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
/**

@@ -3,0 +3,0 @@ * Counts number of paragraphs in the text

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PromptbookJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
/**

@@ -3,0 +3,0 @@ * Split text into sentences

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PromptbookJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
/**

@@ -3,0 +3,0 @@ * Counts number of words in the text

@@ -1,3 +0,3 @@

import type { ExpectationAmount } from '../../types/PromptbookJson/PromptTemplateJson';
import type { ExpectationUnit } from '../../types/PromptbookJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
import type { ExpectationUnit } from '../../types/PipelineJson/PromptTemplateJson';
/**

@@ -4,0 +4,0 @@ * Index of all counter functions

/**
* Format either small or big number
*
* @private within the library
* @private within the package
*/
export declare function formatNumber(value: number): string;

@@ -5,4 +5,4 @@ import type { MarkdownStructure } from './MarkdownStructure';

*
* @private within the library
* @private within the package
*/
export declare function countMarkdownStructureDeepness(markdownStructure: MarkdownStructure): number;
/**
* Represents the structure of a markdown file.
*
* @private within the library
* @private within the package
*/

@@ -6,0 +6,0 @@ export type MarkdownStructure = {

@@ -11,4 +11,4 @@ import type { MarkdownStructure } from './MarkdownStructure';

*
* @private within the library
* @private within the package
*/
export declare function markdownToMarkdownStructure(markdown: string): MarkdownStructure;

@@ -6,3 +6,3 @@ import type { string_markdown } from '../../types/typeAliases';

*
* @private within the library
* @private within the package
*/

@@ -9,0 +9,0 @@ export declare function addAutoGeneratedSection(content: string_markdown, options: {

@@ -34,3 +34,3 @@ import type { string_markdown } from '../../types/typeAliases';

*
* @private within the library
* @private within the package
*/

@@ -37,0 +37,0 @@ export declare function createMarkdownChart(options: CreateMarkdownChartOptions): string_markdown;

@@ -6,4 +6,4 @@ import type { string_markdown } from '../../types/typeAliases';

*
* @private within the library
* @private within the package
*/
export declare function createMarkdownTable(table: Array<Array<string_markdown_text>>): string_markdown;

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc