Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@promptbook/node

Package Overview
Dependencies
Maintainers
0
Versions
243
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@promptbook/node - npm Package Compare versions

Comparing version 0.60.1 to 0.61.0-0

esm/typings/src/_packages/markdown-utils.d.ts

65

esm/typings/promptbook-collection/index.d.ts

@@ -1,2 +0,2 @@

declare const _default: {
declare const _default: ({
title: string;

@@ -15,3 +15,3 @@ pipelineUrl: string;

dependentParameterNames: string[];
executionType: string;
blockType: string;
modelRequirements: {

@@ -24,4 +24,61 @@ modelVariant: string;

}[];
knowledge: never[];
}[];
knowledge: {
name: string;
title: string;
content: string;
keywords: string[];
index: {
modelName: string;
position: number[];
}[];
sources: {
title: string;
href: string;
}[];
}[];
sourceFile: string;
} | {
title: string;
pipelineUrl: string;
promptbookVersion: string;
parameters: {
name: string;
description: string;
isInput: boolean;
isOutput: boolean;
}[];
promptTemplates: {
name: string;
title: string;
dependentParameterNames: string[];
blockType: string;
expectations: {
words: {
min: number;
max: number;
};
};
modelRequirements: {
modelVariant: string;
modelName: string;
};
content: string;
resultingParameterName: string;
}[];
knowledge: {
name: string;
title: string;
content: string;
keywords: string[];
index: {
modelName: string;
position: number[];
}[];
sources: {
title: string;
href: string;
}[];
}[];
sourceFile: string;
})[];
export default _default;

16

esm/typings/src/_packages/core.index.d.ts

@@ -6,2 +6,4 @@ import { collectionToJson } from '../collection/collectionToJson';

import { createSubcollection } from '../collection/constructors/createSubcollection';
import { BlockTypes } from '../commands/BLOCK/BlockTypes';
import { RESERVED_PARAMETER_NAMES } from '../config';
import { pipelineJsonToString } from '../conversion/pipelineJsonToString';

@@ -13,8 +15,7 @@ import { pipelineStringToJson } from '../conversion/pipelineStringToJson';

import { CollectionError } from '../errors/CollectionError';
import { ExecutionError } from '../errors/ExecutionError';
import { NotFoundError } from '../errors/NotFoundError';
import { ParsingError } from '../errors/ParsingError';
import { PipelineExecutionError } from '../errors/PipelineExecutionError';
import { PipelineLogicError } from '../errors/PipelineLogicError';
import { ReferenceError } from '../errors/ReferenceError';
import { SyntaxError } from '../errors/SyntaxError';
import { TemplateError } from '../errors/TemplateError';
import { UnexpectedError } from '../errors/UnexpectedError';

@@ -32,4 +33,3 @@ import { ExpectError } from '../errors/_ExpectError';

import { prepareKnowledgeFromMarkdown } from '../knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown';
import { MultipleLlmExecutionTools } from '../llm-providers/multiple/MultipleLlmExecutionTools';
import { ExecutionTypes } from '../types/ExecutionTypes';
import { joinLlmExecutionTools } from '../llm-providers/multiple/joinLlmExecutionTools';
import type { ExecutionReportStringOptions } from '../types/execution-report/ExecutionReportStringOptions';

@@ -40,3 +40,3 @@ import { ExecutionReportStringOptionsDefaults } from '../types/execution-report/ExecutionReportStringOptions';

export { PROMPTBOOK_VERSION };
export { ExecutionTypes };
export { BlockTypes, RESERVED_PARAMETER_NAMES };
export { addUsage, assertsExecutionSuccessful, checkExpectations, embeddingVectorToString, executionReportJsonToString, ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults, isPassingExpectations, prepareKnowledgeFromMarkdown, prettifyPipelineString, usageToWorktime, };

@@ -46,4 +46,4 @@ export { collectionToJson, createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createSubcollection, };

export { pipelineJsonToString, pipelineStringToJson, pipelineStringToJsonSync, validatePipeline };
export { createPipelineExecutor, MultipleLlmExecutionTools };
export { createPipelineExecutor, joinLlmExecutionTools };
export { CallbackInterfaceTools, CallbackInterfaceToolsOptions };
export { CollectionError, ExecutionError, ExpectError, NotFoundError, PipelineLogicError, ReferenceError, SyntaxError, TemplateError, UnexpectedError, };
export { CollectionError, ExpectError, NotFoundError, ParsingError, PipelineExecutionError, PipelineLogicError, ReferenceError, UnexpectedError, };
import type { PipelineCollection } from '../collection/PipelineCollection';
import type { BlockType } from '../commands/BLOCK/BlockTypes';
import type { CommonExecutionToolsOptions } from '../execution/CommonExecutionToolsOptions';

@@ -10,11 +11,15 @@ import { EmbeddingVector } from '../execution/EmbeddingVector';

import type { UserInterfaceTools, UserInterfaceToolsPromptDialogOptions } from '../execution/UserInterfaceTools';
import type { ExecutionType } from '../types/ExecutionTypes';
import type { ModelRequirements, ModelVariant } from '../types/ModelRequirements';
import type { Parameters } from '../types/Parameters';
import type { ExpectationAmount, ExpectationUnit, Expectations } from '../types/PipelineJson/Expectations';
import { EXPECTATION_UNITS } from '../types/PipelineJson/Expectations';
import { KnowledgeJson } from '../types/PipelineJson/KnowledgeJson';
import type { LlmTemplateJson } from '../types/PipelineJson/LlmTemplateJson';
import { MaterialKnowledgePieceJson } from '../types/PipelineJson/MaterialKnowledgePieceJson';
import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
import type { ExpectationAmount, ExpectationUnit, Expectations, LlmTemplateJson, PromptDialogJson, PromptTemplateJson, ScriptJson, SimpleTemplateJson } from '../types/PipelineJson/PromptTemplateJson';
import { EXPECTATION_UNITS } from '../types/PipelineJson/PromptTemplateJson';
import type { PromptDialogJson } from '../types/PipelineJson/PromptDialogJson';
import type { PromptTemplateJson } from '../types/PipelineJson/PromptTemplateJson';
import type { PromptTemplateParameterJson } from '../types/PipelineJson/PromptTemplateParameterJson';
import type { ScriptJson } from '../types/PipelineJson/ScriptJson';
import type { SimpleTemplateJson } from '../types/PipelineJson/SimpleTemplateJson';
import type { PipelineString } from '../types/PipelineString';

@@ -26,10 +31,10 @@ import type { Prompt } from '../types/Prompt';

import type { string_char_emoji } from '../types/typeAliasEmoji';
import type { client_id, string_char, string_chat_prompt, string_completion_prompt, string_data_url, string_domain, string_email, string_file_absolute_path, string_file_extension, string_file_path, string_file_relative_path, string_filename, string_folder_absolute_path, string_folder_path, string_folder_relative_path, string_host, string_hostname, string_href, string_html, string_javascript, string_javascript_name, string_license, string_markdown, string_markdown_text, string_mime_type, string_mime_type_with_wildcard, string_model_name, string_name, string_person_fullname, string_pipeline_url, string_pipeline_url_with_hashtemplate, string_prompt, string_script, string_sha256, string_tdl, string_template, string_text_prompt, string_title, string_token, string_translate_language, string_uri, string_uri_part, string_url, string_url_image, string_version } from '../types/typeAliases';
import type { client_id, string_char, string_chat_prompt, string_completion_prompt, string_data_url, string_domain, string_email, string_emails, string_file_absolute_path, string_file_extension, string_file_path, string_file_relative_path, string_filename, string_folder_absolute_path, string_folder_path, string_folder_relative_path, string_host, string_hostname, string_href, string_html, string_javascript, string_javascript_name, string_license, string_markdown, string_markdown_text, string_mime_type, string_mime_type_with_wildcard, string_model_name, string_name, string_person_fullname, string_pipeline_url, string_pipeline_url_with_hashtemplate, string_prompt, string_script, string_semantic_version, string_sha256, string_tdl, string_template, string_text_prompt, string_title, string_token, string_translate_language, string_uri, string_uri_part, string_url, string_url_image, string_uuid } from '../types/typeAliases';
import type { FromtoItems } from '../utils/FromtoItems';
import { PROMPTBOOK_VERSION } from '../version';
import { PROMPTBOOK_VERSION, string_promptbook_version } from '../version';
export { PROMPTBOOK_VERSION };
export { EXPECTATION_UNITS };
export type { AvailableModel, CommonExecutionToolsOptions, EmbeddingVector, ExecutionReportJson, ExecutionTools, ExecutionType, ExpectationAmount, ExpectationUnit, Expectations, FromtoItems, KnowledgeJson, LlmExecutionTools, LlmTemplateJson, MaterialKnowledgePieceJson, ModelRequirements, ModelVariant, Parameters, PipelineCollection, PipelineExecutor, PipelineJson, PipelineString, Prompt, PromptChatResult, PromptCommonResult, PromptCompletionResult, PromptDialogJson, PromptEmbeddingResult, PromptResult, PromptResultUsage, PromptResultUsageCounts, PromptTemplateJson, PromptTemplateParameterJson, ScriptExecutionTools, ScriptExecutionToolsExecuteOptions, ScriptJson, ScriptLanguage, SimpleTemplateJson, TaskProgress, UncertainNumber, UserInterfaceTools, UserInterfaceToolsPromptDialogOptions, client_id, string_char, string_char_emoji, string_chat_prompt, string_completion_prompt, string_data_url, string_domain, string_email, string_file_absolute_path, string_file_extension, string_file_path, string_file_relative_path, string_filename, string_folder_absolute_path, string_folder_path, string_folder_relative_path, string_host, string_hostname, string_href, string_html, string_javascript, string_javascript_name, string_license, string_markdown, string_markdown_text, string_mime_type, string_mime_type_with_wildcard, string_model_name, string_name, string_person_fullname, string_pipeline_url, string_pipeline_url_with_hashtemplate, string_prompt, string_script, string_sha256, string_tdl, string_template, string_text_prompt, string_title, string_token, string_translate_language, string_uri, string_uri_part, string_url, string_url_image, string_version, };
export type { AvailableModel, BlockType, CommonExecutionToolsOptions, EmbeddingVector, ExecutionReportJson, ExecutionTools, ExpectationAmount, ExpectationUnit, Expectations, FromtoItems, KnowledgeJson, LlmExecutionTools, LlmTemplateJson, MaterialKnowledgePieceJson, ModelRequirements, ModelVariant, Parameters, PipelineCollection, PipelineExecutor, PipelineJson, PipelineString, Prompt, PromptChatResult, PromptCommonResult, PromptCompletionResult, PromptDialogJson, PromptEmbeddingResult, PromptResult, PromptResultUsage, PromptResultUsageCounts, PromptTemplateJson, PromptTemplateParameterJson, ScriptExecutionTools, ScriptExecutionToolsExecuteOptions, ScriptJson, ScriptLanguage, SimpleTemplateJson, TaskProgress, UncertainNumber, UserInterfaceTools, UserInterfaceToolsPromptDialogOptions, client_id, string_char, string_char_emoji, string_chat_prompt, string_completion_prompt, string_data_url, string_domain, string_email, string_emails, string_file_absolute_path, string_file_extension, string_file_path, string_file_relative_path, string_filename, string_folder_absolute_path, string_folder_path, string_folder_relative_path, string_host, string_hostname, string_href, string_html, string_javascript, string_javascript_name, string_license, string_markdown, string_markdown_text, string_mime_type, string_mime_type_with_wildcard, string_model_name, string_name, string_person_fullname, string_pipeline_url, string_pipeline_url_with_hashtemplate, string_prompt, string_promptbook_version, string_script, string_sha256, string_tdl, string_template, string_text_prompt, string_title, string_token, string_translate_language, string_uri, string_uri_part, string_url, string_url_image, string_uuid, string_semantic_version as string_version, };
/**
* TODO: Delete type aliases (from ../types/typeAliases) that are not exported here
*/

@@ -5,3 +5,2 @@ import { spaceTrim } from 'spacetrim';

import { extractVariables } from '../conversion/utils/extractVariables';
import { parseNumber } from '../conversion/utils/parseNumber';
import { renameParameter } from '../conversion/utils/renameParameter';

@@ -20,7 +19,2 @@ import { titleToName } from '../conversion/utils/titleToName';

import { extractParameters } from '../utils/extractParameters';
import { extractAllBlocksFromMarkdown } from '../utils/markdown/extractAllBlocksFromMarkdown';
import { extractAllListItemsFromMarkdown } from '../utils/markdown/extractAllListItemsFromMarkdown';
import { extractOneBlockFromMarkdown } from '../utils/markdown/extractOneBlockFromMarkdown';
import { removeContentComments } from '../utils/markdown/removeContentComments';
import { removeMarkdownFormatting } from '../utils/markdown/removeMarkdownFormatting';
import { DIACRITIC_VARIANTS_LETTERS } from '../utils/normalization/DIACRITIC_VARIANTS_LETTERS';

@@ -33,7 +27,7 @@ import type { IKeywords, string_keyword } from '../utils/normalization/IKeywords';

import { nameToUriParts } from '../utils/normalization/nameToUriParts';
import { normalizeToKebabCase } from '../utils/normalization/normalize-to-kebab-case';
import { normalizeTo_PascalCase } from '../utils/normalization/normalizeTo_PascalCase';
import { normalizeTo_SCREAMING_CASE } from '../utils/normalization/normalizeTo_SCREAMING_CASE';
import { normalizeTo_camelCase } from '../utils/normalization/normalizeTo_camelCase';
import { normalizeTo_snake_case } from '../utils/normalization/normalizeTo_snake_case';
import { normalizeToKebabCase, string_kebab_case } from '../utils/normalization/normalize-to-kebab-case';
import { normalizeTo_PascalCase, string_PascalCase } from '../utils/normalization/normalizeTo_PascalCase';
import { normalizeTo_SCREAMING_CASE, string_SCREAMING_CASE } from '../utils/normalization/normalizeTo_SCREAMING_CASE';
import { normalizeTo_camelCase, string_camelCase } from '../utils/normalization/normalizeTo_camelCase';
import { normalizeTo_snake_case, string_snake_case } from '../utils/normalization/normalizeTo_snake_case';
import { normalizeWhitespaces } from '../utils/normalization/normalizeWhitespaces';

@@ -44,2 +38,3 @@ import { parseKeywords } from '../utils/normalization/parseKeywords';

import { searchKeywords } from '../utils/normalization/searchKeywords';
import { parseNumber } from '../utils/parseNumber';
import { extractBlock } from '../utils/postprocessing/extractBlock';

@@ -54,9 +49,17 @@ import { removeEmojis } from '../utils/removeEmojis';

import { unwrapResult } from '../utils/unwrapResult';
import { isValidFilePath } from '../utils/validators/filePath/isValidFilePath';
import { isValidJavascriptName } from '../utils/validators/javascriptName/isValidJavascriptName';
import { isValidPromptbookVersion } from '../utils/validators/semanticVersion/isValidPromptbookVersion';
import { isValidSemanticVersion } from '../utils/validators/semanticVersion/isValidSemanticVersion';
import { isHostnameOnPrivateNetwork } from '../utils/validators/url/isHostnameOnPrivateNetwork';
import { isUrlOnPrivateNetwork } from '../utils/validators/url/isUrlOnPrivateNetwork';
import { isValidPipelineUrl } from '../utils/validators/url/isValidPipelineUrl';
import { isValidUrl } from '../utils/validators/url/isValidUrl';
import { isValidUuid } from '../utils/validators/uuid/isValidUuid';
import { PROMPTBOOK_VERSION } from '../version';
export { PROMPTBOOK_VERSION, forEachAsync };
export { extractAllBlocksFromMarkdown, // <- [🌻]
extractAllListItemsFromMarkdown, extractBlock, // <- [🌻]
extractOneBlockFromMarkdown, extractParameters, extractVariables, isValidJsonString, parseNumber, // <- [🌻]
removeContentComments, removeEmojis, removeMarkdownFormatting, removeQuotes, replaceParameters, spaceTrim, trimCodeBlock, trimEndOfCodeBlock, unwrapResult, };
export { CountUtils, countCharacters, countLines, countPages, countParagraphs, countSentences, countWords };
export { forEachAsync, PROMPTBOOK_VERSION };
export { extractBlock, // <- [🌻] + maybe export through `@promptbook/markdown-utils`
extractParameters, extractVariables, isHostnameOnPrivateNetwork, isUrlOnPrivateNetwork, isValidFilePath, isValidJavascriptName, isValidJsonString, isValidPipelineUrl as isValidPipelineUrl, isValidPromptbookVersion, isValidSemanticVersion, isValidUrl, isValidUuid, parseNumber, // <- [🌻]
removeEmojis, removeQuotes, replaceParameters, spaceTrim, trimCodeBlock, trimEndOfCodeBlock, unwrapResult, };
export { countCharacters, countLines, countPages, countParagraphs, countSentences, CountUtils, countWords };
export { splitIntoSentences };

@@ -70,8 +73,9 @@ export declare const normalizeTo: {

};
export { DIACRITIC_VARIANTS_LETTERS, IKeywords, capitalize, decapitalize, isValidKeyword, nameToUriPart, nameToUriParts, normalizeToKebabCase, normalizeTo_PascalCase, normalizeTo_SCREAMING_CASE, normalizeTo_camelCase, normalizeTo_snake_case, normalizeWhitespaces, parseKeywords, parseKeywordsFromString, removeDiacritics, searchKeywords, string_keyword, titleToName, };
export { capitalize, decapitalize, DIACRITIC_VARIANTS_LETTERS, IKeywords, isValidKeyword, nameToUriPart, nameToUriParts, normalizeTo_camelCase, normalizeTo_PascalCase, normalizeTo_SCREAMING_CASE, normalizeTo_snake_case, normalizeToKebabCase, normalizeWhitespaces, parseKeywords, parseKeywordsFromString, removeDiacritics, searchKeywords, string_keyword, titleToName, };
export type { string_camelCase, string_kebab_case, string_PascalCase, string_SCREAMING_CASE, string_snake_case };
export { extractParametersFromPromptTemplate, renameParameter, renderPromptbookMermaid };
export { difference, intersection, union };
/**
* TODO: [🧠] Maybe create some indipendent package like `markdown-tools` from both here exported and @private utilities
* TODO: [🧠] Maybe create some indipendent package like `@promptbook/markdown-utils`
* Note: [🕙] It does not make sence to have simple lower / UPPER case normalization
*/

@@ -9,1 +9,4 @@ import type { PipelineJson } from '../types/PipelineJson/PipelineJson';

export declare function collectionToJson(collection: PipelineCollection): Promise<Array<PipelineJson>>;
/**
* TODO: [🧠] Maybe clear `sourceFile` or clear when exposing through API or remote server
*/
export {};
/**
* Note: [🐠] For example here URL https://example.com/pipeline.ptbk.md is not valid
* because it is on private network BUT its very hard to debug because
* there is no error message and false return (the error) happen deep in:
* `isValidPipelineUrl` -> `isValidPipelineUrl` -> `isUrlOnPrivateNetwork`
*/

@@ -0,1 +1,2 @@

import type { PipelineStringToJsonOptions } from '../../conversion/pipelineStringToJson';
import type { string_folder_path } from '../../types/typeAliases';

@@ -6,3 +7,3 @@ import type { PipelineCollection } from '../PipelineCollection';

*/
type CreatePipelineCollectionFromDirectoryOptions = {
type CreatePipelineCollectionFromDirectoryOptions = PipelineStringToJsonOptions & {
/**

@@ -9,0 +10,0 @@ * If true, the directory is searched recursively for promptbooks

@@ -13,1 +13,5 @@ /**

export declare const PIPELINE_COLLECTION_BASE_FILENAME = "index";
/**
* The names of the parameters that are reserved for special purposes
*/
export declare const RESERVED_PARAMETER_NAMES: string[];

@@ -11,3 +11,5 @@ import type { PipelineJson } from '../types/PipelineJson/PipelineJson';

/**
* TODO: !!!!! Implement new features and commands into `promptTemplateParameterJsonToString`
* TODO: [🧠] Is there a way to auto-detect missing features in pipelineJsonToString
* TODO: Escape all
*/

@@ -7,3 +7,3 @@ import type { LlmExecutionTools } from '../execution/LlmExecutionTools';

*/
type PipelineStringToJsonOptions = {
export type PipelineStringToJsonOptions = {
/**

@@ -24,3 +24,3 @@ * Tools for processing required for knowledge processing *(not for actual execution)*

* @returns {Promptbook} compiled in JSON format (.ptbk.json)
* @throws {SyntaxError} if the promptbook string is not valid
* @throws {ParsingError} if the promptbook string is not valid
*

@@ -31,5 +31,4 @@ * Note: This function does not validate logic of the pipeline only the syntax

export declare function pipelineStringToJson(pipelineString: PipelineString, options?: PipelineStringToJsonOptions): Promise<PipelineJson>;
export {};
/**
* TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
*/

@@ -13,3 +13,3 @@ import type { PipelineJson } from '../types/PipelineJson/PipelineJson';

* @returns {Promptbook} compiled in JSON format (.ptbk.json)
* @throws {SyntaxError} if the promptbook string is not valid
* @throws {ParsingError} if the promptbook string is not valid
*

@@ -21,5 +21,7 @@ * Note: This function does not validate logic of the pipeline only the syntax

/**
* TODO: Report here line/column of error
* TODO: !!!! Warn if used only sync version
* TODO: [🚞] Report here line/column of error
* TODO: Use spaceTrim more effectively
* TODO: [🧠] Parameter flags - isInput, isOutput, isInternal
* TODO: [🥞] Not optimal parsing because `splitMarkdownIntoSections` is executed twice with same string, once through `flattenMarkdown` and second directly here
*/

@@ -8,7 +8,7 @@ import type { PromptTemplateJson } from '../../types/PipelineJson/PromptTemplateJson';

* @returns the set of parameter names
* @throws {SyntaxError} if the script is invalid
* @throws {ParsingError} if the script is invalid
*/
export declare function extractParametersFromPromptTemplate(promptTemplate: Pick<PromptTemplateJson, 'title' | 'description' | 'executionType' | 'content'>): Set<string_name>;
export declare function extractParametersFromPromptTemplate(promptTemplate: Pick<PromptTemplateJson, 'title' | 'description' | 'blockType' | 'content'>): Set<string_name>;
/**
* TODO: [🔣] If script require contentLanguage
*/

@@ -8,3 +8,3 @@ import type { string_javascript } from '../../types/typeAliases';

* @returns the list of variable names
* @throws {SyntaxError} if the script is invalid
* @throws {ParsingError} if the script is invalid
*/

@@ -11,0 +11,0 @@ export declare function extractVariables(script: string_javascript): Set<string_javascript_name>;

export {};
/**
* TODO: Include automatically all samples from logic errors folder (same with syntax errors)
*/
/**
* This error occurs when some expectation is not met in the execution of the pipeline
*
* @private Always catched and rethrown as `ExecutionError`
* Note: This is a kindof subtype of ExecutionError
* @private Always catched and rethrown as `PipelineExecutionError`
* Note: This is a kindof subtype of PipelineExecutionError
*/

@@ -7,0 +7,0 @@ export declare class ExpectError extends Error {

@@ -6,3 +6,3 @@ import type { PipelineExecutor } from './PipelineExecutor';

* @param executionResult - The partial result of the promptnook execution
* @throws {ExecutionError} If the execution is not successful or if multiple errors occurred
* @throws {PipelineExecutionError} If the execution is not successful or if multiple errors occurred
*/

@@ -9,0 +9,0 @@ export declare function assertsExecutionSuccessful(executionResult: Pick<Awaited<ReturnType<PipelineExecutor>>, 'isSuccessful' | 'errors'>): void;

@@ -0,1 +1,2 @@

import type { Arrayable } from '../types/Arrayable';
import type { LlmExecutionTools } from './LlmExecutionTools';

@@ -13,6 +14,6 @@ import type { ScriptExecutionTools } from './ScriptExecutionTools';

*
* Tip: Combine multiple LLM execution tools with `MultipleLlmExecutionTools`
* Tip: Combine multiple LLM execution tools - use array of LlmExecutionTools instead of single LlmExecutionTools
* @see https://github.com/webgptorg/promptbook/?tab=readme-ov-file#llm-execution-tools
*/
llm: LlmExecutionTools;
llm?: Arrayable<LlmExecutionTools>;
/**

@@ -25,3 +26,3 @@ * Tools for executing scripts

*/
script: Array<ScriptExecutionTools>;
script?: Arrayable<ScriptExecutionTools>;
/**

@@ -28,0 +29,0 @@ * Tools for interacting with the user

import type { Promisable } from 'type-fest';
import type { ModelVariant } from '../types/ModelRequirements';
import type { Prompt } from '../types/Prompt';
import type { string_markdown } from '../types/typeAliases';
import type { string_markdown_text } from '../types/typeAliases';
import type { string_model_name } from '../types/typeAliases';

@@ -8,2 +10,3 @@ import type { string_title } from '../types/typeAliases';

import type { PromptCompletionResult } from './PromptResult';
import type { PromptEmbeddingResult } from './PromptResult';
/**

@@ -18,10 +21,26 @@ * Container for all the tools needed to execute prompts to large language models like GPT-4

/**
* Use a chat model
* Title of the model provider
*
* @example "OpenAI"
*/
callChatModel(prompt: Prompt): Promise<PromptChatResult>;
readonly title: string_title & string_markdown_text;
/**
* Use a completion model
* Description of the provider
*
* @example "Use all models from OpenAI"
*/
callCompletionModel(prompt: Prompt): Promise<PromptCompletionResult>;
readonly description: string_markdown;
/**
* Calls a chat model
*/
callChatModel?(prompt: Prompt): Promise<PromptChatResult>;
/**
* Calls a completion model
*/
callCompletionModel?(prompt: Prompt): Promise<PromptCompletionResult>;
/**
* Calls an embedding model
*/
callEmbeddingModel?(prompt: Prompt): Promise<PromptEmbeddingResult>;
/**
* List all available models that can be used

@@ -49,6 +68,6 @@ */

/**
* TODO: !!!! Translation model
* TODO: [🧠] Emulation of one type of model with another one - emuate chat with completion; emulate translation with chat
* TODO: [🍓][♐] Some heuristic to pick the best model in listed models
* TODO: [🏳] callChatModel -> chat, callCompletionModel -> complete, translate
* TODO: [🧠] Should or should not there be a word "GPT" in both callCompletionModel and callChatModel
*/
import type { KebabCase } from 'type-fest';
import type { ExpectationUnit } from '../types/PipelineJson/PromptTemplateJson';
import type { ExpectationUnit } from '../types/PipelineJson/Expectations';
import type { number_positive } from '../types/typeAliases';

@@ -14,3 +14,3 @@ import type { number_usd } from '../types/typeAliases';

*/
export type PromptResult = PromptCompletionResult | PromptChatResult;
export type PromptResult = PromptCompletionResult | PromptChatResult | PromptEmbeddingResult;
/**

@@ -17,0 +17,0 @@ * Prompt completion result

@@ -5,4 +5,4 @@ /**

* @returns
* @throws {ExecutionError}
* @throws {PipelineExecutionError}
*/
export declare function extractMultiplicatedOccurrence(message: string): string;

@@ -1,2 +0,2 @@

import type { Expectations } from '../../types/PipelineJson/PromptTemplateJson';
import type { Expectations } from '../../types/PipelineJson/Expectations';
/**

@@ -3,0 +3,0 @@ * Function checkExpectations will check if the expectations on given value are met

@@ -9,3 +9,3 @@ import type { Parameters } from '../../types/Parameters';

* @returns the template with replaced parameters
* @throws {TemplateError} if parameter is not defined, not closed, or not opened
* @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
*

@@ -12,0 +12,0 @@ * @private within the createPipelineExecutor

@@ -11,2 +11,4 @@ import type { LlmExecutionTools } from '../../../execution/LlmExecutionTools';

* The LLM tools to use for the conversion and extraction of knowledge
*
* Note: If you want to use multiple LLMs, you can use `joinLlmExecutionTools` to join them first
*/

@@ -23,1 +25,4 @@ llmTools: LlmExecutionTools;

export {};
/**
* 11:11
*/

@@ -17,2 +17,3 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

/**
* Note: [🤖] Add models of new variant
* TODO: !!!! Add embedding models OR Anthropic has only chat+completion models?

@@ -19,0 +20,0 @@ * TODO: [🧠] Some mechanism to propagate unsureness

import type { AvailableModel } from '../../execution/LlmExecutionTools';
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
import type { PromptChatResult } from '../../execution/PromptResult';
import type { PromptCompletionResult } from '../../execution/PromptResult';
import type { Prompt } from '../../types/Prompt';
import type { string_markdown } from '../../types/typeAliases';
import type { string_markdown_text } from '../../types/typeAliases';
import type { string_title } from '../../types/typeAliases';
import type { AnthropicClaudeExecutionToolsOptions } from './AnthropicClaudeExecutionToolsOptions';

@@ -22,2 +24,4 @@ /**

constructor(options?: AnthropicClaudeExecutionToolsOptions);
get title(): string_title & string_markdown_text;
get description(): string_markdown;
/**

@@ -28,6 +32,2 @@ * Calls Anthropic Claude API to use a chat model.

/**
* Calls Anthropic Claude API to use a complete model.
*/
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
/**
* Get the model that should be used as default

@@ -34,0 +34,0 @@ */

@@ -6,2 +6,5 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

import type { Prompt } from '../../types/Prompt';
import type { string_markdown } from '../../types/typeAliases';
import type { string_markdown_text } from '../../types/typeAliases';
import type { string_title } from '../../types/typeAliases';
import type { AzureOpenAiExecutionToolsOptions } from './AzureOpenAiExecutionToolsOptions';

@@ -23,2 +26,4 @@ /**

constructor(options: AzureOpenAiExecutionToolsOptions);
get title(): string_title & string_markdown_text;
get description(): string_markdown;
/**

@@ -25,0 +30,0 @@ * Calls OpenAI API to use a chat model.

import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
import type { string_markdown } from '../../types/typeAliases';
import type { string_markdown_text } from '../../types/typeAliases';
import type { string_title } from '../../types/typeAliases';
import { OpenAiExecutionTools } from '../openai/OpenAiExecutionTools';

@@ -7,2 +10,4 @@ /**

export declare class LangtailExecutionTools extends OpenAiExecutionTools implements LlmExecutionTools {
get title(): string_title & string_markdown_text;
get description(): string_markdown;
}
import type { PostprocessingFunction } from '../../scripting/javascript/JavascriptExecutionToolsOptions';
import type { Expectations } from '../../types/PipelineJson/PromptTemplateJson';
import type { Expectations } from '../../types/PipelineJson/Expectations';
/**

@@ -4,0 +4,0 @@ * Gets the expectations and creates a fake text that meets the expectations

@@ -7,2 +7,5 @@ import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';

import type { Prompt } from '../../types/Prompt';
import type { string_markdown } from '../../types/typeAliases';
import type { string_markdown_text } from '../../types/typeAliases';
import type { string_title } from '../../types/typeAliases';
/**

@@ -14,2 +17,4 @@ * Mocked execution Tools for just echoing the requests for testing purposes.

constructor(options?: CommonExecutionToolsOptions);
get title(): string_title & string_markdown_text;
get description(): string_markdown;
/**

@@ -16,0 +21,0 @@ * Mocks chat model

@@ -6,3 +6,7 @@ import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';

import type { PromptCompletionResult } from '../../execution/PromptResult';
import type { PromptEmbeddingResult } from '../../execution/PromptResult';
import type { Prompt } from '../../types/Prompt';
import type { string_markdown } from '../../types/typeAliases';
import type { string_markdown_text } from '../../types/typeAliases';
import type { string_title } from '../../types/typeAliases';
/**

@@ -14,2 +18,4 @@ * Mocked execution Tools for just faking expected responses for testing purposes

constructor(options?: CommonExecutionToolsOptions);
get title(): string_title & string_markdown_text;
get description(): string_markdown;
/**

@@ -24,2 +30,6 @@ * Fakes chat model

/**
* Fakes embedding model
*/
callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptEmbeddingResult>;
/**
* List all available fake-models that can be used

@@ -26,0 +36,0 @@ */

@@ -5,7 +5,11 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

import type { PromptCompletionResult } from '../../execution/PromptResult';
import type { PromptEmbeddingResult } from '../../execution/PromptResult';
import type { Prompt } from '../../types/Prompt';
import type { string_markdown } from '../../types/typeAliases';
import type { string_markdown_text } from '../../types/typeAliases';
import type { string_title } from '../../types/typeAliases';
/**
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
*
* @see https://github.com/webgptorg/promptbook#multiple-server
* @private Internal utility of `joinLlmExecutionTools`
*/

@@ -21,2 +25,4 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {

constructor(...llmExecutionTools: Array<LlmExecutionTools>);
get title(): string_title & string_markdown_text;
get description(): string_markdown;
/**

@@ -31,2 +37,6 @@ * Calls the best available chat model

/**
* Calls the best available embedding model
*/
callEmbeddingModel(prompt: Prompt): Promise<PromptEmbeddingResult>;
/**
* Calls the best available model

@@ -41,1 +51,4 @@ */

}
/**
* TODO: [🧠][🎛] Aggregating multiple models - have result not only from one first aviable model BUT all of them
*/

@@ -10,3 +10,3 @@ import type OpenAI from 'openai';

* @param rawResponse The raw response from OpenAI API
* @throws {ExecutionError} If the usage is not defined in the response from OpenAI
* @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
* @private internal util of `OpenAiExecutionTools`

@@ -13,0 +13,0 @@ */

@@ -18,2 +18,3 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

/**
* Note: [🤖] Add models of new variant
* TODO: [🧠] Some mechanism to propagate unsureness

@@ -20,0 +21,0 @@ * TODO: [🕚][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing

@@ -7,2 +7,5 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

import type { Prompt } from '../../types/Prompt';
import type { string_markdown } from '../../types/typeAliases';
import type { string_markdown_text } from '../../types/typeAliases';
import type { string_title } from '../../types/typeAliases';
import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions';

@@ -24,2 +27,4 @@ /**

constructor(options?: OpenAiExecutionToolsOptions);
get title(): string_title & string_markdown_text;
get description(): string_markdown;
/**

@@ -36,3 +41,3 @@ * Calls OpenAI API to use a chat model.

*/
embed(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptEmbeddingResult>;
callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptEmbeddingResult>;
/**

@@ -39,0 +44,0 @@ * Get the model that should be used as default

@@ -5,3 +5,7 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

import type { PromptCompletionResult } from '../../execution/PromptResult';
import type { PromptEmbeddingResult } from '../../execution/PromptResult';
import type { Prompt } from '../../types/Prompt';
import type { string_markdown } from '../../types/typeAliases';
import type { string_markdown_text } from '../../types/typeAliases';
import type { string_title } from '../../types/typeAliases';
import type { RemoteLlmExecutionToolsOptions } from './RemoteLlmExecutionToolsOptions';

@@ -19,2 +23,4 @@ /**

constructor(options: RemoteLlmExecutionToolsOptions);
get title(): string_title & string_markdown_text;
get description(): string_markdown;
/**

@@ -25,12 +31,16 @@ * Creates a connection to the remote proxy server.

/**
* Calls remote proxy server to use a chat model.
* Calls remote proxy server to use a chat model
*/
callChatModel(prompt: Prompt): Promise<PromptChatResult>;
/**
* Calls remote proxy server to use a completion model.
* Calls remote proxy server to use a completion model
*/
callCompletionModel(prompt: Prompt): Promise<PromptCompletionResult>;
/**
* Calls remote proxy server to use both completion or chat model.
* Calls remote proxy server to use a embedding model
*/
callEmbeddingModel(prompt: Prompt): Promise<PromptEmbeddingResult>;
/**
* Calls remote proxy server to use both completion or chat model
*/
private callModelCommon;

@@ -37,0 +47,0 @@ /**

@@ -18,2 +18,3 @@ import type { IDestroyable } from 'destroyable';

* TODO: [🃏] Pass here some security token to prevent malitious usage and/or DDoS
* TODO: [0] Set unavailable models as undefined in `RemoteLlmExecutionTools` NOT throw error here
*/

@@ -0,1 +1,2 @@

import type { really_any } from '../../../types/typeAliases';
/**

@@ -8,5 +9,5 @@ * Does nothing, but preserves the function in the bundle

*/
export declare function preserve(func: (...params: Array<any>) => unknown): void;
export declare function preserve(func: (...params: Array<really_any>) => unknown): void;
/**
* TODO: !! [1] This maybe does memory leak
*/

@@ -5,3 +5,3 @@ import type { PromptResult } from '../../execution/PromptResult';

import type { string_pipeline_url } from '../typeAliases';
import type { string_version } from '../typeAliases';
import type { string_semantic_version } from '../typeAliases';
/**

@@ -29,7 +29,7 @@ * ExecutionReport is result of executing one promptbook

*/
readonly promptbookUsedVersion: string_version;
readonly promptbookUsedVersion: string_semantic_version;
/**
* Version from promptbook which was requested by promptbook
*/
readonly promptbookRequestedVersion?: string_version;
readonly promptbookRequestedVersion?: string_semantic_version;
/**

@@ -36,0 +36,0 @@ * Description of the promptbook which was executed

@@ -0,4 +1,5 @@

import type { string_file_path } from '../typeAliases';
import type { string_markdown_text } from '../typeAliases';
import type { string_pipeline_url } from '../typeAliases';
import type { string_version } from '../typeAliases';
import type { string_semantic_version } from '../typeAliases';
import type { KnowledgeJson } from './KnowledgeJson';

@@ -25,2 +26,6 @@ import type { PromptTemplateJson } from './PromptTemplateJson';

/**
* Internal helper for tracking the source `.ptbk.md` file of the pipeline
*/
readonly sourceFile?: string_file_path;
/**
* Title of the promptbook

@@ -33,3 +38,3 @@ * -It can use simple markdown formatting like **bold**, *italic*, [link](https://example.com), ... BUT not code blocks and structure

*/
readonly promptbookVersion: string_version;
readonly promptbookVersion: string_semantic_version;
/**

@@ -54,2 +59,3 @@ * Description of the promptbook

/**
* TODO: !!!!! Implement new commands
* Note: There was a proposal for multiple types of promptbook objects 78816ff33e2705ee1a187aa2eb8affd976d4ea1a

@@ -56,0 +62,0 @@ * But then immediately reverted back to the single type

@@ -1,145 +0,9 @@

import type { ExpectFormatCommand } from '../Command';
import type { ExecutionType } from '../ExecutionTypes';
import type { ModelRequirements } from '../ModelRequirements';
import type { ScriptLanguage } from '../ScriptLanguage';
import type { number_integer } from '../typeAliases';
import type { number_positive } from '../typeAliases';
import type { string_javascript } from '../typeAliases';
import type { string_javascript_name } from '../typeAliases';
import type { string_markdown } from '../typeAliases';
import type { string_markdown_text } from '../typeAliases';
import type { string_name } from '../typeAliases';
import type { string_prompt } from '../typeAliases';
import type { string_template } from '../typeAliases';
import type { ___ } from '../typeAliases';
import type { LlmTemplateJson } from './LlmTemplateJson';
import type { PromptDialogJson } from './PromptDialogJson';
import type { ScriptJson } from './ScriptJson';
import type { SimpleTemplateJson } from './SimpleTemplateJson';
/**
* Describes one prompt template in the promptbook
*/
export type PromptTemplateJson = LlmTemplateJson | SimpleTemplateJson | ScriptJson | PromptDialogJson;
/**
* Template for prompt to LLM
*/
export type LlmTemplateJson = PromptTemplateJsonCommon & {
readonly executionType: 'PROMPT_TEMPLATE';
/**
* Requirements for the model
* - This is required only for executionType PROMPT_TEMPLATE
*/
readonly modelRequirements: ModelRequirements;
};
/**
* Expect this amount of each unit in the answer
*
* For example 5 words, 3 sentences, 2 paragraphs, ...
*
* Note: Expectations are performed after all postprocessing steps
*/
export type Expectations = Partial<Record<Lowercase<ExpectationUnit>, {
min?: ExpectationAmount;
max?: ExpectationAmount;
}>>;
/**
* Units of text measurement
*/
export declare const EXPECTATION_UNITS: readonly ["CHARACTERS", "WORDS", "SENTENCES", "LINES", "PARAGRAPHS", "PAGES"];
/**
* Unit of text measurement
*/
export type ExpectationUnit = typeof EXPECTATION_UNITS[number];
/**
* Amount of text measurement
*/
export type ExpectationAmount = number_integer & (number_positive | 0);
/**
* Template for simple concatenation of strings
*/
export interface SimpleTemplateJson extends PromptTemplateJsonCommon {
readonly executionType: 'SIMPLE_TEMPLATE';
}
/**
* Template for script execution
*/
export interface ScriptJson extends PromptTemplateJsonCommon {
readonly executionType: 'SCRIPT';
/**
* Language of the script
* - This is required only for executionType SCRIPT
*
*/
readonly contentLanguage?: ScriptLanguage;
}
/**
* Template for prompt to user
*/
export interface PromptDialogJson extends PromptTemplateJsonCommon {
readonly executionType: 'PROMPT_DIALOG';
}
/**
* Common properties of all prompt templates
*/
interface PromptTemplateJsonCommon {
/**
* Name of the template
* - It must be unique across the pipeline
* - It should start uppercase and contain letters and numbers
* - The pipelineUrl together with hash and name are used to identify the prompt template in the pipeline
*/
readonly name: string_name;
/**
* Title of the prompt template
* It can use simple markdown formatting like **bold**, *italic*, [link](https://example.com), ... BUT not code blocks and structure
*/
readonly title: string;
/**
* Description of the prompt template
* It can use multiple paragraphs of simple markdown formatting like **bold**, *italic*, [link](https://example.com), ... BUT not code blocks and structure
*/
readonly description?: string_markdown_text;
/**
* List of parameter names that are used in the prompt template and must be defined before the prompt template is executed
*
* Note: Joker is one of the dependent parameters
*/
readonly dependentParameterNames: Array<string_name>;
/**
* If theese parameters meet the expectations requirements, they are used instead of executing this prompt template
*/
readonly jokers?: Array<string>;
/**
* Type of the execution
* This determines if the prompt template is send to LLM, user or some scripting evaluation
*/
readonly executionType: ExecutionType;
/**
* Content of the template with {placeholders} for parameters
*/
readonly content: (string_prompt | string_javascript | string_markdown) & string_template;
/**
* List of postprocessing steps that are executed after the prompt template
*/
readonly postprocessing?: Array<string_javascript_name>;
/**
* Expect this amount of each unit in the answer
*
* For example 5 words, 3 sentences, 2 paragraphs, ...
*
* Note: Expectations are performed after all postprocessing steps
*/
readonly expectations?: Expectations;
/**
* Expect this format of the answer
*
* Note: Expectations are performed after all postprocessing steps
* @deprecated [💝]
*/
readonly expectFormat?: ExpectFormatCommand['format'];
/**
* Name of the parameter that is the result of the prompt template
*/
readonly resultingParameterName: string_name;
}
export {};
/**
* TODO: [💝] Unite object for expecting amount and format - remove expectFormat
* TODO: use one helper type> (string_prompt | string_javascript | string_markdown) & string_template
* TODO: [👙][🧠] Just selecting gpt3 or gpt4 level of model
*/
export type PromptTemplateJson = LlmTemplateJson | SimpleTemplateJson | ScriptJson | PromptDialogJson | ___ | ___ | ___ | ___;

@@ -0,5 +1,5 @@

import type { ExpectFormatCommand } from '../commands/EXPECT/ExpectFormatCommand';
import type { PostprocessingFunction } from '../scripting/javascript/JavascriptExecutionToolsOptions';
import type { ExpectFormatCommand } from './Command';
import type { ModelRequirements } from './ModelRequirements';
import type { Expectations } from './PipelineJson/PromptTemplateJson';
import type { Expectations } from './PipelineJson/Expectations';
import type { string_name } from './typeAliases';

@@ -55,3 +55,3 @@ import type { string_pipeline_url_with_hashtemplate } from './typeAliases';

*/
readonly pipelineUrl: string_pipeline_url_with_hashtemplate;
readonly pipelineUrl?: string_pipeline_url_with_hashtemplate;
/**

@@ -58,0 +58,0 @@ * Parameters used in the prompt

@@ -1,2 +0,2 @@

import type { ExecutionType } from './ExecutionTypes';
import type { BlockType } from '../commands/BLOCK/BlockTypes';
import type { string_markdown_text } from './typeAliases';

@@ -34,3 +34,3 @@ import type { string_name } from './typeAliases';

*/
readonly executionType: ExecutionType;
readonly blockType: BlockType;
/**

@@ -37,0 +37,0 @@ * The parameter name that is being processed.

@@ -107,4 +107,17 @@ /**

*
* Markdown text with exactly ONE heading on first line NO less NO more
*/
export type string_markdown_section = string;
/**
* Semantic helper
*
* Markdown without any headings like h1, h2
* BUT with formatting, lists, blockquotes, blocks, etc. is allowed
*/
export type string_markdown_section_content = string;
/**
* Semantic helper
*
* Markdown text without any structure like h1, h2, lists, blockquotes, blocks, etc.
* BUT with bold, italic, etc.
* BUT with bold, italic, etc. is allowed
*

@@ -236,6 +249,2 @@ * For example `"**Hello** World!"`

* Semantic helper
*/
export type string_protocol = 'http:' | 'https:';
/**
* Semantic helper
*

@@ -253,2 +262,6 @@ * For example `"localhost"` or `"collboard.com"`

* Semantic helper
*/
export type string_protocol = 'http:' | 'https:';
/**
* Semantic helper
*

@@ -259,2 +272,21 @@ * For example `"pavol@hejny.org"`

/**
* Semantic helper
*
* For example `"pavol@hejny.org, jirka@webgpt.cz"`
*/
export type string_emails = string;
/**
* Branded type for UUIDs version 4
* This will not allow to pass some random string where should be only a valid UUID
*
* Use utils:
* - `randomUuid` to generate
* - `isValidUuid to check validity
*
* For example `"5a0a153d-7be9-4018-9eda-e0e2e2b89bd9"`
*/
export type string_uuid = string & {
readonly _type: 'uuid';
};
/**
* Branded type client id

@@ -276,3 +308,3 @@ */

*/
export type string_version = string;
export type string_semantic_version = string;
/**

@@ -439,4 +471,16 @@ * Semantic helper

/**
* Formatting helper to put void to keep longer version of prettier
*/
export type ___ = never;
/**
* Organizational helper to better mark the place where the type is missing
*/
export type TODO = any;
/**
* Organizational helper to mark a place where to really use any
*/
export type really_any = any;
/**
* TODO: !! Cleanup
* TODO: !! Change to branded types
*/

@@ -21,3 +21,4 @@ import type { string_char_emoji } from '../types/typeAliasEmoji';

/**
* TODO: [💴] DRY - just one version of emojis.ts
* TODO: Mirror from Collboard or some common package
*/

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/Expectations';
/**

@@ -3,0 +3,0 @@ * Counts number of characters in the text

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/Expectations';
/**

@@ -3,0 +3,0 @@ * Counts number of lines in the text

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/Expectations';
/**

@@ -3,0 +3,0 @@ * Counts number of pages in the text

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/Expectations';
/**

@@ -3,0 +3,0 @@ * Counts number of paragraphs in the text

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/Expectations';
/**

@@ -3,0 +3,0 @@ * Split text into sentences

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/Expectations';
/**

@@ -3,0 +3,0 @@ * Counts number of words in the text

@@ -1,3 +0,3 @@

import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
import type { ExpectationUnit } from '../../types/PipelineJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/Expectations';
import type { ExpectationUnit } from '../../types/PipelineJson/Expectations';
/**

@@ -4,0 +4,0 @@ * Index of all counter functions

@@ -7,3 +7,4 @@ import type { string_html } from '../../types/typeAliases';

* @returns formatted html code
* @private withing the package because of HUGE size of prettier dependency
*/
export declare function prettifyMarkdown<TContent extends string_html>(content: TContent): TContent;

@@ -1,1 +0,8 @@

export declare function normalizeToKebabCase(sentence: string): string;
/**
* Semantic helper for kebab-case strings
*
* @example 'hello-world'
* @example 'i-love-promptbook'
*/
export type string_kebab_case = string;
export declare function normalizeToKebabCase(text: string): string_kebab_case;

@@ -1,4 +0,11 @@

export declare function normalizeTo_camelCase(sentence: string, __firstLetterCapital?: boolean): string;
/**
* Semantic helper for camelCase strings
*
* @example 'helloWorld'
* @example 'iLovePromptbook'
*/
export type string_camelCase = string;
export declare function normalizeTo_camelCase(text: string, _isFirstLetterCapital?: boolean): string_camelCase;
/**
* TODO: [🌺] Use some intermediate util splitWords
*/

@@ -1,1 +0,8 @@

export declare function normalizeTo_PascalCase(sentence: string): string;
/**
* Semantic helper for PascalCase strings
*
* @example 'HelloWorld'
* @example 'ILovePromptbook'
*/
export type string_PascalCase = string;
export declare function normalizeTo_PascalCase(text: string): string_PascalCase;

@@ -1,4 +0,16 @@

export declare function normalizeTo_SCREAMING_CASE(sentence: string): string;
/**
* Semantic helper for SCREAMING_CASE strings
*
* @example 'HELLO_WORLD'
* @example 'I_LOVE_PROMPTBOOK'
*/
export type string_SCREAMING_CASE = string;
export declare function normalizeTo_SCREAMING_CASE(text: string): string_SCREAMING_CASE;
/**
* TODO: Tests
* > expect(encodeRoutePath({ uriId: 'VtG7sR9rRJqwNEdM2', name: 'Moje tabule' })).toEqual('/VtG7sR9rRJqwNEdM2/Moje tabule');
* > expect(encodeRoutePath({ uriId: 'VtG7sR9rRJqwNEdM2', name: 'ěščřžžýáíúů' })).toEqual('/VtG7sR9rRJqwNEdM2/escrzyaieuu');
* > expect(encodeRoutePath({ uriId: 'VtG7sR9rRJqwNEdM2', name: ' ahoj ' })).toEqual('/VtG7sR9rRJqwNEdM2/ahoj');
* > expect(encodeRoutePath({ uriId: 'VtG7sR9rRJqwNEdM2', name: ' ahoj_ahojAhoj ahoj ' })).toEqual('/VtG7sR9rRJqwNEdM2/ahoj-ahoj-ahoj-ahoj');
* TODO: [🌺] Use some intermediate util splitWords
*/

@@ -1,1 +0,8 @@

export declare function normalizeTo_snake_case(sentence: string): string;
/**
* Semantic helper for snake_case strings
*
* @example 'hello_world'
* @example 'i_love_promptbook'
*/
export type string_snake_case = string;
export declare function normalizeTo_snake_case(text: string): string_snake_case;
/**
* Create difference set of two sets.
*
* @deprecated use new javascript set methods instead @see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Set
*/
export declare function difference<TItem>(a: Set<TItem>, b: Set<TItem>, isEqual?: (a: TItem, b: TItem) => boolean): Set<TItem>;
/**
* Creates a new set with all elements that are present in all sets
*
* @deprecated use new javascript set methods instead @see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Set
*/
export declare function intersection<TItem>(...sets: Array<Set<TItem>>): Set<TItem>;
/**
* Creates a new set with all elements that are present in either set
*
* @deprecated use new javascript set methods instead @see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Set
*/
export declare function union<TItem>(...sets: Array<Set<TItem>>): Set<TItem>;

@@ -6,3 +6,6 @@ import type { string_url } from '../../../types/typeAliases';

* Note: Dataurl are considered perfectly valid.
* Note: There are two simmilar functions:
* - `isValidUrl` which tests any URL
* - `isValidPipelineUrl` *(this one)* which tests just promptbook URL
*/
export declare function isValidUrl(url: unknown): url is string_url;

@@ -1,5 +0,6 @@

import type { string_version } from './types/typeAliases';
import type { string_semantic_version } from './types/typeAliases';
/**
* The version of the Promptbook library
*/
export declare const PROMPTBOOK_VERSION: string_version;
export declare const PROMPTBOOK_VERSION: string_promptbook_version;
export type string_promptbook_version = string_semantic_version;
{
"name": "@promptbook/node",
"version": "0.60.1",
"version": "0.61.0-0",
"description": "Supercharge your use of large language models",

@@ -52,3 +52,3 @@ "private": false,

"peerDependencies": {
"@promptbook/core": "0.60.1"
"@promptbook/core": "0.61.0-0"
},

@@ -55,0 +55,0 @@ "main": "./umd/index.umd.js",

@@ -323,2 +323,3 @@ # ![Promptbook logo - cube with letters P and B](./other/design/logo-h1.png) Promptbook

- ⭐ **[@promptbook/utils](https://www.npmjs.com/package/@promptbook/utils)** - Utility functions used in the library but also useful for individual use in preprocessing and postprocessing LLM inputs and outputs
- **[@promptbook/markdown-utils](https://www.npmjs.com/package/@promptbook/markdown-utils)** - Utility functions used for processing markdown
- _(Not finished)_ **[@promptbook/wizzard](https://www.npmjs.com/package/@promptbook/wizzard)** - Wizard for creating+running promptbooks in single line

@@ -397,3 +398,3 @@ - **[@promptbook/execute-javascript](https://www.npmjs.com/package/@promptbook/execute-javascript)** - Execution tools for javascript inside promptbooks

### Execution type
### Block type

@@ -487,5 +488,4 @@ Each block of promptbook can have a different execution type.

- _(Not implemented yet)_ `GpuExecutionTools`
- And a special case are `MultipleLlmExecutionTools` that combines multiple execution tools together and tries to execute the prompt on the best one.
- Another special case are `RemoteLlmExecutionTools` that connect to a remote server and run one of the above execution tools on that server.
- The another special case is `MockedEchoLlmExecutionTools` that is used for testing and mocking.
- Special case are `RemoteLlmExecutionTools` that connect to a remote server and run one of the above execution tools on that server.
- Another special case is `MockedEchoLlmExecutionTools` that is used for testing and mocking.
- The another special case is `LogLlmExecutionToolsWrapper` that is technically also an execution tools but it is more proxy wrapper around other execution tools that logs all calls to execution tools.

@@ -578,4 +578,2 @@

### Execution report

@@ -582,0 +580,0 @@

@@ -1,2 +0,2 @@

declare const _default: {
declare const _default: ({
title: string;

@@ -15,3 +15,3 @@ pipelineUrl: string;

dependentParameterNames: string[];
executionType: string;
blockType: string;
modelRequirements: {

@@ -24,4 +24,61 @@ modelVariant: string;

}[];
knowledge: never[];
}[];
knowledge: {
name: string;
title: string;
content: string;
keywords: string[];
index: {
modelName: string;
position: number[];
}[];
sources: {
title: string;
href: string;
}[];
}[];
sourceFile: string;
} | {
title: string;
pipelineUrl: string;
promptbookVersion: string;
parameters: {
name: string;
description: string;
isInput: boolean;
isOutput: boolean;
}[];
promptTemplates: {
name: string;
title: string;
dependentParameterNames: string[];
blockType: string;
expectations: {
words: {
min: number;
max: number;
};
};
modelRequirements: {
modelVariant: string;
modelName: string;
};
content: string;
resultingParameterName: string;
}[];
knowledge: {
name: string;
title: string;
content: string;
keywords: string[];
index: {
modelName: string;
position: number[];
}[];
sources: {
title: string;
href: string;
}[];
}[];
sourceFile: string;
})[];
export default _default;

@@ -6,2 +6,4 @@ import { collectionToJson } from '../collection/collectionToJson';

import { createSubcollection } from '../collection/constructors/createSubcollection';
import { BlockTypes } from '../commands/BLOCK/BlockTypes';
import { RESERVED_PARAMETER_NAMES } from '../config';
import { pipelineJsonToString } from '../conversion/pipelineJsonToString';

@@ -13,8 +15,7 @@ import { pipelineStringToJson } from '../conversion/pipelineStringToJson';

import { CollectionError } from '../errors/CollectionError';
import { ExecutionError } from '../errors/ExecutionError';
import { NotFoundError } from '../errors/NotFoundError';
import { ParsingError } from '../errors/ParsingError';
import { PipelineExecutionError } from '../errors/PipelineExecutionError';
import { PipelineLogicError } from '../errors/PipelineLogicError';
import { ReferenceError } from '../errors/ReferenceError';
import { SyntaxError } from '../errors/SyntaxError';
import { TemplateError } from '../errors/TemplateError';
import { UnexpectedError } from '../errors/UnexpectedError';

@@ -32,4 +33,3 @@ import { ExpectError } from '../errors/_ExpectError';

import { prepareKnowledgeFromMarkdown } from '../knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown';
import { MultipleLlmExecutionTools } from '../llm-providers/multiple/MultipleLlmExecutionTools';
import { ExecutionTypes } from '../types/ExecutionTypes';
import { joinLlmExecutionTools } from '../llm-providers/multiple/joinLlmExecutionTools';
import type { ExecutionReportStringOptions } from '../types/execution-report/ExecutionReportStringOptions';

@@ -40,3 +40,3 @@ import { ExecutionReportStringOptionsDefaults } from '../types/execution-report/ExecutionReportStringOptions';

export { PROMPTBOOK_VERSION };
export { ExecutionTypes };
export { BlockTypes, RESERVED_PARAMETER_NAMES };
export { addUsage, assertsExecutionSuccessful, checkExpectations, embeddingVectorToString, executionReportJsonToString, ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults, isPassingExpectations, prepareKnowledgeFromMarkdown, prettifyPipelineString, usageToWorktime, };

@@ -46,4 +46,4 @@ export { collectionToJson, createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createSubcollection, };

export { pipelineJsonToString, pipelineStringToJson, pipelineStringToJsonSync, validatePipeline };
export { createPipelineExecutor, MultipleLlmExecutionTools };
export { createPipelineExecutor, joinLlmExecutionTools };
export { CallbackInterfaceTools, CallbackInterfaceToolsOptions };
export { CollectionError, ExecutionError, ExpectError, NotFoundError, PipelineLogicError, ReferenceError, SyntaxError, TemplateError, UnexpectedError, };
export { CollectionError, ExpectError, NotFoundError, ParsingError, PipelineExecutionError, PipelineLogicError, ReferenceError, UnexpectedError, };
import type { PipelineCollection } from '../collection/PipelineCollection';
import type { BlockType } from '../commands/BLOCK/BlockTypes';
import type { CommonExecutionToolsOptions } from '../execution/CommonExecutionToolsOptions';

@@ -10,11 +11,15 @@ import { EmbeddingVector } from '../execution/EmbeddingVector';

import type { UserInterfaceTools, UserInterfaceToolsPromptDialogOptions } from '../execution/UserInterfaceTools';
import type { ExecutionType } from '../types/ExecutionTypes';
import type { ModelRequirements, ModelVariant } from '../types/ModelRequirements';
import type { Parameters } from '../types/Parameters';
import type { ExpectationAmount, ExpectationUnit, Expectations } from '../types/PipelineJson/Expectations';
import { EXPECTATION_UNITS } from '../types/PipelineJson/Expectations';
import { KnowledgeJson } from '../types/PipelineJson/KnowledgeJson';
import type { LlmTemplateJson } from '../types/PipelineJson/LlmTemplateJson';
import { MaterialKnowledgePieceJson } from '../types/PipelineJson/MaterialKnowledgePieceJson';
import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
import type { ExpectationAmount, ExpectationUnit, Expectations, LlmTemplateJson, PromptDialogJson, PromptTemplateJson, ScriptJson, SimpleTemplateJson } from '../types/PipelineJson/PromptTemplateJson';
import { EXPECTATION_UNITS } from '../types/PipelineJson/PromptTemplateJson';
import type { PromptDialogJson } from '../types/PipelineJson/PromptDialogJson';
import type { PromptTemplateJson } from '../types/PipelineJson/PromptTemplateJson';
import type { PromptTemplateParameterJson } from '../types/PipelineJson/PromptTemplateParameterJson';
import type { ScriptJson } from '../types/PipelineJson/ScriptJson';
import type { SimpleTemplateJson } from '../types/PipelineJson/SimpleTemplateJson';
import type { PipelineString } from '../types/PipelineString';

@@ -26,10 +31,10 @@ import type { Prompt } from '../types/Prompt';

import type { string_char_emoji } from '../types/typeAliasEmoji';
import type { client_id, string_char, string_chat_prompt, string_completion_prompt, string_data_url, string_domain, string_email, string_file_absolute_path, string_file_extension, string_file_path, string_file_relative_path, string_filename, string_folder_absolute_path, string_folder_path, string_folder_relative_path, string_host, string_hostname, string_href, string_html, string_javascript, string_javascript_name, string_license, string_markdown, string_markdown_text, string_mime_type, string_mime_type_with_wildcard, string_model_name, string_name, string_person_fullname, string_pipeline_url, string_pipeline_url_with_hashtemplate, string_prompt, string_script, string_sha256, string_tdl, string_template, string_text_prompt, string_title, string_token, string_translate_language, string_uri, string_uri_part, string_url, string_url_image, string_version } from '../types/typeAliases';
import type { client_id, string_char, string_chat_prompt, string_completion_prompt, string_data_url, string_domain, string_email, string_emails, string_file_absolute_path, string_file_extension, string_file_path, string_file_relative_path, string_filename, string_folder_absolute_path, string_folder_path, string_folder_relative_path, string_host, string_hostname, string_href, string_html, string_javascript, string_javascript_name, string_license, string_markdown, string_markdown_text, string_mime_type, string_mime_type_with_wildcard, string_model_name, string_name, string_person_fullname, string_pipeline_url, string_pipeline_url_with_hashtemplate, string_prompt, string_script, string_semantic_version, string_sha256, string_tdl, string_template, string_text_prompt, string_title, string_token, string_translate_language, string_uri, string_uri_part, string_url, string_url_image, string_uuid } from '../types/typeAliases';
import type { FromtoItems } from '../utils/FromtoItems';
import { PROMPTBOOK_VERSION } from '../version';
import { PROMPTBOOK_VERSION, string_promptbook_version } from '../version';
export { PROMPTBOOK_VERSION };
export { EXPECTATION_UNITS };
export type { AvailableModel, CommonExecutionToolsOptions, EmbeddingVector, ExecutionReportJson, ExecutionTools, ExecutionType, ExpectationAmount, ExpectationUnit, Expectations, FromtoItems, KnowledgeJson, LlmExecutionTools, LlmTemplateJson, MaterialKnowledgePieceJson, ModelRequirements, ModelVariant, Parameters, PipelineCollection, PipelineExecutor, PipelineJson, PipelineString, Prompt, PromptChatResult, PromptCommonResult, PromptCompletionResult, PromptDialogJson, PromptEmbeddingResult, PromptResult, PromptResultUsage, PromptResultUsageCounts, PromptTemplateJson, PromptTemplateParameterJson, ScriptExecutionTools, ScriptExecutionToolsExecuteOptions, ScriptJson, ScriptLanguage, SimpleTemplateJson, TaskProgress, UncertainNumber, UserInterfaceTools, UserInterfaceToolsPromptDialogOptions, client_id, string_char, string_char_emoji, string_chat_prompt, string_completion_prompt, string_data_url, string_domain, string_email, string_file_absolute_path, string_file_extension, string_file_path, string_file_relative_path, string_filename, string_folder_absolute_path, string_folder_path, string_folder_relative_path, string_host, string_hostname, string_href, string_html, string_javascript, string_javascript_name, string_license, string_markdown, string_markdown_text, string_mime_type, string_mime_type_with_wildcard, string_model_name, string_name, string_person_fullname, string_pipeline_url, string_pipeline_url_with_hashtemplate, string_prompt, string_script, string_sha256, string_tdl, string_template, string_text_prompt, string_title, string_token, string_translate_language, string_uri, string_uri_part, string_url, string_url_image, string_version, };
export type { AvailableModel, BlockType, CommonExecutionToolsOptions, EmbeddingVector, ExecutionReportJson, ExecutionTools, ExpectationAmount, ExpectationUnit, Expectations, FromtoItems, KnowledgeJson, LlmExecutionTools, LlmTemplateJson, MaterialKnowledgePieceJson, ModelRequirements, ModelVariant, Parameters, PipelineCollection, PipelineExecutor, PipelineJson, PipelineString, Prompt, PromptChatResult, PromptCommonResult, PromptCompletionResult, PromptDialogJson, PromptEmbeddingResult, PromptResult, PromptResultUsage, PromptResultUsageCounts, PromptTemplateJson, PromptTemplateParameterJson, ScriptExecutionTools, ScriptExecutionToolsExecuteOptions, ScriptJson, ScriptLanguage, SimpleTemplateJson, TaskProgress, UncertainNumber, UserInterfaceTools, UserInterfaceToolsPromptDialogOptions, client_id, string_char, string_char_emoji, string_chat_prompt, string_completion_prompt, string_data_url, string_domain, string_email, string_emails, string_file_absolute_path, string_file_extension, string_file_path, string_file_relative_path, string_filename, string_folder_absolute_path, string_folder_path, string_folder_relative_path, string_host, string_hostname, string_href, string_html, string_javascript, string_javascript_name, string_license, string_markdown, string_markdown_text, string_mime_type, string_mime_type_with_wildcard, string_model_name, string_name, string_person_fullname, string_pipeline_url, string_pipeline_url_with_hashtemplate, string_prompt, string_promptbook_version, string_script, string_sha256, string_tdl, string_template, string_text_prompt, string_title, string_token, string_translate_language, string_uri, string_uri_part, string_url, string_url_image, string_uuid, string_semantic_version as string_version, };
/**
* TODO: Delete type aliases (from ../types/typeAliases) that are not exported here
*/

@@ -5,3 +5,2 @@ import { spaceTrim } from 'spacetrim';

import { extractVariables } from '../conversion/utils/extractVariables';
import { parseNumber } from '../conversion/utils/parseNumber';
import { renameParameter } from '../conversion/utils/renameParameter';

@@ -20,7 +19,2 @@ import { titleToName } from '../conversion/utils/titleToName';

import { extractParameters } from '../utils/extractParameters';
import { extractAllBlocksFromMarkdown } from '../utils/markdown/extractAllBlocksFromMarkdown';
import { extractAllListItemsFromMarkdown } from '../utils/markdown/extractAllListItemsFromMarkdown';
import { extractOneBlockFromMarkdown } from '../utils/markdown/extractOneBlockFromMarkdown';
import { removeContentComments } from '../utils/markdown/removeContentComments';
import { removeMarkdownFormatting } from '../utils/markdown/removeMarkdownFormatting';
import { DIACRITIC_VARIANTS_LETTERS } from '../utils/normalization/DIACRITIC_VARIANTS_LETTERS';

@@ -33,7 +27,7 @@ import type { IKeywords, string_keyword } from '../utils/normalization/IKeywords';

import { nameToUriParts } from '../utils/normalization/nameToUriParts';
import { normalizeToKebabCase } from '../utils/normalization/normalize-to-kebab-case';
import { normalizeTo_PascalCase } from '../utils/normalization/normalizeTo_PascalCase';
import { normalizeTo_SCREAMING_CASE } from '../utils/normalization/normalizeTo_SCREAMING_CASE';
import { normalizeTo_camelCase } from '../utils/normalization/normalizeTo_camelCase';
import { normalizeTo_snake_case } from '../utils/normalization/normalizeTo_snake_case';
import { normalizeToKebabCase, string_kebab_case } from '../utils/normalization/normalize-to-kebab-case';
import { normalizeTo_PascalCase, string_PascalCase } from '../utils/normalization/normalizeTo_PascalCase';
import { normalizeTo_SCREAMING_CASE, string_SCREAMING_CASE } from '../utils/normalization/normalizeTo_SCREAMING_CASE';
import { normalizeTo_camelCase, string_camelCase } from '../utils/normalization/normalizeTo_camelCase';
import { normalizeTo_snake_case, string_snake_case } from '../utils/normalization/normalizeTo_snake_case';
import { normalizeWhitespaces } from '../utils/normalization/normalizeWhitespaces';

@@ -44,2 +38,3 @@ import { parseKeywords } from '../utils/normalization/parseKeywords';

import { searchKeywords } from '../utils/normalization/searchKeywords';
import { parseNumber } from '../utils/parseNumber';
import { extractBlock } from '../utils/postprocessing/extractBlock';

@@ -54,9 +49,17 @@ import { removeEmojis } from '../utils/removeEmojis';

import { unwrapResult } from '../utils/unwrapResult';
import { isValidFilePath } from '../utils/validators/filePath/isValidFilePath';
import { isValidJavascriptName } from '../utils/validators/javascriptName/isValidJavascriptName';
import { isValidPromptbookVersion } from '../utils/validators/semanticVersion/isValidPromptbookVersion';
import { isValidSemanticVersion } from '../utils/validators/semanticVersion/isValidSemanticVersion';
import { isHostnameOnPrivateNetwork } from '../utils/validators/url/isHostnameOnPrivateNetwork';
import { isUrlOnPrivateNetwork } from '../utils/validators/url/isUrlOnPrivateNetwork';
import { isValidPipelineUrl } from '../utils/validators/url/isValidPipelineUrl';
import { isValidUrl } from '../utils/validators/url/isValidUrl';
import { isValidUuid } from '../utils/validators/uuid/isValidUuid';
import { PROMPTBOOK_VERSION } from '../version';
export { PROMPTBOOK_VERSION, forEachAsync };
export { extractAllBlocksFromMarkdown, // <- [🌻]
extractAllListItemsFromMarkdown, extractBlock, // <- [🌻]
extractOneBlockFromMarkdown, extractParameters, extractVariables, isValidJsonString, parseNumber, // <- [🌻]
removeContentComments, removeEmojis, removeMarkdownFormatting, removeQuotes, replaceParameters, spaceTrim, trimCodeBlock, trimEndOfCodeBlock, unwrapResult, };
export { CountUtils, countCharacters, countLines, countPages, countParagraphs, countSentences, countWords };
export { forEachAsync, PROMPTBOOK_VERSION };
export { extractBlock, // <- [🌻] + maybe export through `@promptbook/markdown-utils`
extractParameters, extractVariables, isHostnameOnPrivateNetwork, isUrlOnPrivateNetwork, isValidFilePath, isValidJavascriptName, isValidJsonString, isValidPipelineUrl as isValidPipelineUrl, isValidPromptbookVersion, isValidSemanticVersion, isValidUrl, isValidUuid, parseNumber, // <- [🌻]
removeEmojis, removeQuotes, replaceParameters, spaceTrim, trimCodeBlock, trimEndOfCodeBlock, unwrapResult, };
export { countCharacters, countLines, countPages, countParagraphs, countSentences, CountUtils, countWords };
export { splitIntoSentences };

@@ -70,8 +73,9 @@ export declare const normalizeTo: {

};
export { DIACRITIC_VARIANTS_LETTERS, IKeywords, capitalize, decapitalize, isValidKeyword, nameToUriPart, nameToUriParts, normalizeToKebabCase, normalizeTo_PascalCase, normalizeTo_SCREAMING_CASE, normalizeTo_camelCase, normalizeTo_snake_case, normalizeWhitespaces, parseKeywords, parseKeywordsFromString, removeDiacritics, searchKeywords, string_keyword, titleToName, };
export { capitalize, decapitalize, DIACRITIC_VARIANTS_LETTERS, IKeywords, isValidKeyword, nameToUriPart, nameToUriParts, normalizeTo_camelCase, normalizeTo_PascalCase, normalizeTo_SCREAMING_CASE, normalizeTo_snake_case, normalizeToKebabCase, normalizeWhitespaces, parseKeywords, parseKeywordsFromString, removeDiacritics, searchKeywords, string_keyword, titleToName, };
export type { string_camelCase, string_kebab_case, string_PascalCase, string_SCREAMING_CASE, string_snake_case };
export { extractParametersFromPromptTemplate, renameParameter, renderPromptbookMermaid };
export { difference, intersection, union };
/**
* TODO: [🧠] Maybe create some indipendent package like `markdown-tools` from both here exported and @private utilities
* TODO: [🧠] Maybe create some indipendent package like `@promptbook/markdown-utils`
* Note: [🕙] It does not make sence to have simple lower / UPPER case normalization
*/

@@ -9,1 +9,4 @@ import type { PipelineJson } from '../types/PipelineJson/PipelineJson';

export declare function collectionToJson(collection: PipelineCollection): Promise<Array<PipelineJson>>;
/**
* TODO: [🧠] Maybe clear `sourceFile` or clear when exposing through API or remote server
*/
export {};
/**
* Note: [🐠] For example here URL https://example.com/pipeline.ptbk.md is not valid
* because it is on private network BUT its very hard to debug because
* there is no error message and false return (the error) happen deep in:
* `isValidPipelineUrl` -> `isValidPipelineUrl` -> `isUrlOnPrivateNetwork`
*/

@@ -0,1 +1,2 @@

import type { PipelineStringToJsonOptions } from '../../conversion/pipelineStringToJson';
import type { string_folder_path } from '../../types/typeAliases';

@@ -6,3 +7,3 @@ import type { PipelineCollection } from '../PipelineCollection';

*/
type CreatePipelineCollectionFromDirectoryOptions = {
type CreatePipelineCollectionFromDirectoryOptions = PipelineStringToJsonOptions & {
/**

@@ -9,0 +10,0 @@ * If true, the directory is searched recursively for promptbooks

@@ -13,1 +13,5 @@ /**

export declare const PIPELINE_COLLECTION_BASE_FILENAME = "index";
/**
* The names of the parameters that are reserved for special purposes
*/
export declare const RESERVED_PARAMETER_NAMES: string[];

@@ -11,3 +11,5 @@ import type { PipelineJson } from '../types/PipelineJson/PipelineJson';

/**
* TODO: !!!!! Implement new features and commands into `promptTemplateParameterJsonToString`
* TODO: [🧠] Is there a way to auto-detect missing features in pipelineJsonToString
* TODO: Escape all
*/

@@ -7,3 +7,3 @@ import type { LlmExecutionTools } from '../execution/LlmExecutionTools';

*/
type PipelineStringToJsonOptions = {
export type PipelineStringToJsonOptions = {
/**

@@ -24,3 +24,3 @@ * Tools for processing required for knowledge processing *(not for actual execution)*

* @returns {Promptbook} compiled in JSON format (.ptbk.json)
* @throws {SyntaxError} if the promptbook string is not valid
* @throws {ParsingError} if the promptbook string is not valid
*

@@ -31,5 +31,4 @@ * Note: This function does not validate logic of the pipeline only the syntax

export declare function pipelineStringToJson(pipelineString: PipelineString, options?: PipelineStringToJsonOptions): Promise<PipelineJson>;
export {};
/**
* TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
*/

@@ -13,3 +13,3 @@ import type { PipelineJson } from '../types/PipelineJson/PipelineJson';

* @returns {Promptbook} compiled in JSON format (.ptbk.json)
* @throws {SyntaxError} if the promptbook string is not valid
* @throws {ParsingError} if the promptbook string is not valid
*

@@ -21,5 +21,7 @@ * Note: This function does not validate logic of the pipeline only the syntax

/**
* TODO: Report here line/column of error
* TODO: !!!! Warn if used only sync version
* TODO: [🚞] Report here line/column of error
* TODO: Use spaceTrim more effectively
* TODO: [🧠] Parameter flags - isInput, isOutput, isInternal
* TODO: [🥞] Not optimal parsing because `splitMarkdownIntoSections` is executed twice with same string, once through `flattenMarkdown` and second directly here
*/

@@ -8,7 +8,7 @@ import type { PromptTemplateJson } from '../../types/PipelineJson/PromptTemplateJson';

* @returns the set of parameter names
* @throws {SyntaxError} if the script is invalid
* @throws {ParsingError} if the script is invalid
*/
export declare function extractParametersFromPromptTemplate(promptTemplate: Pick<PromptTemplateJson, 'title' | 'description' | 'executionType' | 'content'>): Set<string_name>;
export declare function extractParametersFromPromptTemplate(promptTemplate: Pick<PromptTemplateJson, 'title' | 'description' | 'blockType' | 'content'>): Set<string_name>;
/**
* TODO: [🔣] If script require contentLanguage
*/

@@ -8,3 +8,3 @@ import type { string_javascript } from '../../types/typeAliases';

* @returns the list of variable names
* @throws {SyntaxError} if the script is invalid
* @throws {ParsingError} if the script is invalid
*/

@@ -11,0 +11,0 @@ export declare function extractVariables(script: string_javascript): Set<string_javascript_name>;

export {};
/**
* TODO: Include automatically all samples from logic errors folder (same with syntax errors)
*/
/**
* This error occurs when some expectation is not met in the execution of the pipeline
*
* @private Always catched and rethrown as `ExecutionError`
* Note: This is a kindof subtype of ExecutionError
* @private Always catched and rethrown as `PipelineExecutionError`
* Note: This is a kindof subtype of PipelineExecutionError
*/

@@ -7,0 +7,0 @@ export declare class ExpectError extends Error {

@@ -6,3 +6,3 @@ import type { PipelineExecutor } from './PipelineExecutor';

* @param executionResult - The partial result of the promptnook execution
* @throws {ExecutionError} If the execution is not successful or if multiple errors occurred
* @throws {PipelineExecutionError} If the execution is not successful or if multiple errors occurred
*/

@@ -9,0 +9,0 @@ export declare function assertsExecutionSuccessful(executionResult: Pick<Awaited<ReturnType<PipelineExecutor>>, 'isSuccessful' | 'errors'>): void;

@@ -0,1 +1,2 @@

import type { Arrayable } from '../types/Arrayable';
import type { LlmExecutionTools } from './LlmExecutionTools';

@@ -13,6 +14,6 @@ import type { ScriptExecutionTools } from './ScriptExecutionTools';

*
* Tip: Combine multiple LLM execution tools with `MultipleLlmExecutionTools`
* Tip: Combine multiple LLM execution tools - use array of LlmExecutionTools instead of single LlmExecutionTools
* @see https://github.com/webgptorg/promptbook/?tab=readme-ov-file#llm-execution-tools
*/
llm: LlmExecutionTools;
llm?: Arrayable<LlmExecutionTools>;
/**

@@ -25,3 +26,3 @@ * Tools for executing scripts

*/
script: Array<ScriptExecutionTools>;
script?: Arrayable<ScriptExecutionTools>;
/**

@@ -28,0 +29,0 @@ * Tools for interacting with the user

import type { Promisable } from 'type-fest';
import type { ModelVariant } from '../types/ModelRequirements';
import type { Prompt } from '../types/Prompt';
import type { string_markdown } from '../types/typeAliases';
import type { string_markdown_text } from '../types/typeAliases';
import type { string_model_name } from '../types/typeAliases';

@@ -8,2 +10,3 @@ import type { string_title } from '../types/typeAliases';

import type { PromptCompletionResult } from './PromptResult';
import type { PromptEmbeddingResult } from './PromptResult';
/**

@@ -18,10 +21,26 @@ * Container for all the tools needed to execute prompts to large language models like GPT-4

/**
* Use a chat model
* Title of the model provider
*
* @example "OpenAI"
*/
callChatModel(prompt: Prompt): Promise<PromptChatResult>;
readonly title: string_title & string_markdown_text;
/**
* Use a completion model
* Description of the provider
*
* @example "Use all models from OpenAI"
*/
callCompletionModel(prompt: Prompt): Promise<PromptCompletionResult>;
readonly description: string_markdown;
/**
* Calls a chat model
*/
callChatModel?(prompt: Prompt): Promise<PromptChatResult>;
/**
* Calls a completion model
*/
callCompletionModel?(prompt: Prompt): Promise<PromptCompletionResult>;
/**
* Calls an embedding model
*/
callEmbeddingModel?(prompt: Prompt): Promise<PromptEmbeddingResult>;
/**
* List all available models that can be used

@@ -49,6 +68,6 @@ */

/**
* TODO: !!!! Translation model
* TODO: [🧠] Emulation of one type of model with another one - emuate chat with completion; emulate translation with chat
* TODO: [🍓][♐] Some heuristic to pick the best model in listed models
* TODO: [🏳] callChatModel -> chat, callCompletionModel -> complete, translate
* TODO: [🧠] Should or should not there be a word "GPT" in both callCompletionModel and callChatModel
*/
import type { KebabCase } from 'type-fest';
import type { ExpectationUnit } from '../types/PipelineJson/PromptTemplateJson';
import type { ExpectationUnit } from '../types/PipelineJson/Expectations';
import type { number_positive } from '../types/typeAliases';

@@ -14,3 +14,3 @@ import type { number_usd } from '../types/typeAliases';

*/
export type PromptResult = PromptCompletionResult | PromptChatResult;
export type PromptResult = PromptCompletionResult | PromptChatResult | PromptEmbeddingResult;
/**

@@ -17,0 +17,0 @@ * Prompt completion result

@@ -5,4 +5,4 @@ /**

* @returns
* @throws {ExecutionError}
* @throws {PipelineExecutionError}
*/
export declare function extractMultiplicatedOccurrence(message: string): string;

@@ -1,2 +0,2 @@

import type { Expectations } from '../../types/PipelineJson/PromptTemplateJson';
import type { Expectations } from '../../types/PipelineJson/Expectations';
/**

@@ -3,0 +3,0 @@ * Function checkExpectations will check if the expectations on given value are met

@@ -9,3 +9,3 @@ import type { Parameters } from '../../types/Parameters';

* @returns the template with replaced parameters
* @throws {TemplateError} if parameter is not defined, not closed, or not opened
* @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
*

@@ -12,0 +12,0 @@ * @private within the createPipelineExecutor

@@ -11,2 +11,4 @@ import type { LlmExecutionTools } from '../../../execution/LlmExecutionTools';

* The LLM tools to use for the conversion and extraction of knowledge
*
* Note: If you want to use multiple LLMs, you can use `joinLlmExecutionTools` to join them first
*/

@@ -23,1 +25,4 @@ llmTools: LlmExecutionTools;

export {};
/**
* 11:11
*/

@@ -17,2 +17,3 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

/**
* Note: [🤖] Add models of new variant
* TODO: !!!! Add embedding models OR Anthropic has only chat+completion models?

@@ -19,0 +20,0 @@ * TODO: [🧠] Some mechanism to propagate unsureness

import type { AvailableModel } from '../../execution/LlmExecutionTools';
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
import type { PromptChatResult } from '../../execution/PromptResult';
import type { PromptCompletionResult } from '../../execution/PromptResult';
import type { Prompt } from '../../types/Prompt';
import type { string_markdown } from '../../types/typeAliases';
import type { string_markdown_text } from '../../types/typeAliases';
import type { string_title } from '../../types/typeAliases';
import type { AnthropicClaudeExecutionToolsOptions } from './AnthropicClaudeExecutionToolsOptions';

@@ -22,2 +24,4 @@ /**

constructor(options?: AnthropicClaudeExecutionToolsOptions);
get title(): string_title & string_markdown_text;
get description(): string_markdown;
/**

@@ -28,6 +32,2 @@ * Calls Anthropic Claude API to use a chat model.

/**
* Calls Anthropic Claude API to use a complete model.
*/
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
/**
* Get the model that should be used as default

@@ -34,0 +34,0 @@ */

@@ -6,2 +6,5 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

import type { Prompt } from '../../types/Prompt';
import type { string_markdown } from '../../types/typeAliases';
import type { string_markdown_text } from '../../types/typeAliases';
import type { string_title } from '../../types/typeAliases';
import type { AzureOpenAiExecutionToolsOptions } from './AzureOpenAiExecutionToolsOptions';

@@ -23,2 +26,4 @@ /**

constructor(options: AzureOpenAiExecutionToolsOptions);
get title(): string_title & string_markdown_text;
get description(): string_markdown;
/**

@@ -25,0 +30,0 @@ * Calls OpenAI API to use a chat model.

import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
import type { string_markdown } from '../../types/typeAliases';
import type { string_markdown_text } from '../../types/typeAliases';
import type { string_title } from '../../types/typeAliases';
import { OpenAiExecutionTools } from '../openai/OpenAiExecutionTools';

@@ -7,2 +10,4 @@ /**

export declare class LangtailExecutionTools extends OpenAiExecutionTools implements LlmExecutionTools {
get title(): string_title & string_markdown_text;
get description(): string_markdown;
}
import type { PostprocessingFunction } from '../../scripting/javascript/JavascriptExecutionToolsOptions';
import type { Expectations } from '../../types/PipelineJson/PromptTemplateJson';
import type { Expectations } from '../../types/PipelineJson/Expectations';
/**

@@ -4,0 +4,0 @@ * Gets the expectations and creates a fake text that meets the expectations

@@ -7,2 +7,5 @@ import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';

import type { Prompt } from '../../types/Prompt';
import type { string_markdown } from '../../types/typeAliases';
import type { string_markdown_text } from '../../types/typeAliases';
import type { string_title } from '../../types/typeAliases';
/**

@@ -14,2 +17,4 @@ * Mocked execution Tools for just echoing the requests for testing purposes.

constructor(options?: CommonExecutionToolsOptions);
get title(): string_title & string_markdown_text;
get description(): string_markdown;
/**

@@ -16,0 +21,0 @@ * Mocks chat model

@@ -6,3 +6,7 @@ import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';

import type { PromptCompletionResult } from '../../execution/PromptResult';
import type { PromptEmbeddingResult } from '../../execution/PromptResult';
import type { Prompt } from '../../types/Prompt';
import type { string_markdown } from '../../types/typeAliases';
import type { string_markdown_text } from '../../types/typeAliases';
import type { string_title } from '../../types/typeAliases';
/**

@@ -14,2 +18,4 @@ * Mocked execution Tools for just faking expected responses for testing purposes

constructor(options?: CommonExecutionToolsOptions);
get title(): string_title & string_markdown_text;
get description(): string_markdown;
/**

@@ -24,2 +30,6 @@ * Fakes chat model

/**
* Fakes embedding model
*/
callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptEmbeddingResult>;
/**
* List all available fake-models that can be used

@@ -26,0 +36,0 @@ */

@@ -5,7 +5,11 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

import type { PromptCompletionResult } from '../../execution/PromptResult';
import type { PromptEmbeddingResult } from '../../execution/PromptResult';
import type { Prompt } from '../../types/Prompt';
import type { string_markdown } from '../../types/typeAliases';
import type { string_markdown_text } from '../../types/typeAliases';
import type { string_title } from '../../types/typeAliases';
/**
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
*
* @see https://github.com/webgptorg/promptbook#multiple-server
* @private Internal utility of `joinLlmExecutionTools`
*/

@@ -21,2 +25,4 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {

constructor(...llmExecutionTools: Array<LlmExecutionTools>);
get title(): string_title & string_markdown_text;
get description(): string_markdown;
/**

@@ -31,2 +37,6 @@ * Calls the best available chat model

/**
* Calls the best available embedding model
*/
callEmbeddingModel(prompt: Prompt): Promise<PromptEmbeddingResult>;
/**
* Calls the best available model

@@ -41,1 +51,4 @@ */

}
/**
* TODO: [🧠][🎛] Aggregating multiple models - have result not only from one first aviable model BUT all of them
*/

@@ -10,3 +10,3 @@ import type OpenAI from 'openai';

* @param rawResponse The raw response from OpenAI API
* @throws {ExecutionError} If the usage is not defined in the response from OpenAI
* @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
* @private internal util of `OpenAiExecutionTools`

@@ -13,0 +13,0 @@ */

@@ -18,2 +18,3 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

/**
* Note: [🤖] Add models of new variant
* TODO: [🧠] Some mechanism to propagate unsureness

@@ -20,0 +21,0 @@ * TODO: [🕚][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing

@@ -7,2 +7,5 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

import type { Prompt } from '../../types/Prompt';
import type { string_markdown } from '../../types/typeAliases';
import type { string_markdown_text } from '../../types/typeAliases';
import type { string_title } from '../../types/typeAliases';
import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions';

@@ -24,2 +27,4 @@ /**

constructor(options?: OpenAiExecutionToolsOptions);
get title(): string_title & string_markdown_text;
get description(): string_markdown;
/**

@@ -36,3 +41,3 @@ * Calls OpenAI API to use a chat model.

*/
embed(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptEmbeddingResult>;
callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptEmbeddingResult>;
/**

@@ -39,0 +44,0 @@ * Get the model that should be used as default

@@ -5,3 +5,7 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools';

import type { PromptCompletionResult } from '../../execution/PromptResult';
import type { PromptEmbeddingResult } from '../../execution/PromptResult';
import type { Prompt } from '../../types/Prompt';
import type { string_markdown } from '../../types/typeAliases';
import type { string_markdown_text } from '../../types/typeAliases';
import type { string_title } from '../../types/typeAliases';
import type { RemoteLlmExecutionToolsOptions } from './RemoteLlmExecutionToolsOptions';

@@ -19,2 +23,4 @@ /**

constructor(options: RemoteLlmExecutionToolsOptions);
get title(): string_title & string_markdown_text;
get description(): string_markdown;
/**

@@ -25,12 +31,16 @@ * Creates a connection to the remote proxy server.

/**
* Calls remote proxy server to use a chat model.
* Calls remote proxy server to use a chat model
*/
callChatModel(prompt: Prompt): Promise<PromptChatResult>;
/**
* Calls remote proxy server to use a completion model.
* Calls remote proxy server to use a completion model
*/
callCompletionModel(prompt: Prompt): Promise<PromptCompletionResult>;
/**
* Calls remote proxy server to use both completion or chat model.
* Calls remote proxy server to use a embedding model
*/
callEmbeddingModel(prompt: Prompt): Promise<PromptEmbeddingResult>;
/**
* Calls remote proxy server to use both completion or chat model
*/
private callModelCommon;

@@ -37,0 +47,0 @@ /**

@@ -18,2 +18,3 @@ import type { IDestroyable } from 'destroyable';

* TODO: [🃏] Pass here some security token to prevent malitious usage and/or DDoS
* TODO: [0] Set unavailable models as undefined in `RemoteLlmExecutionTools` NOT throw error here
*/

@@ -0,1 +1,2 @@

import type { really_any } from '../../../types/typeAliases';
/**

@@ -8,5 +9,5 @@ * Does nothing, but preserves the function in the bundle

*/
export declare function preserve(func: (...params: Array<any>) => unknown): void;
export declare function preserve(func: (...params: Array<really_any>) => unknown): void;
/**
* TODO: !! [1] This maybe does memory leak
*/

@@ -5,3 +5,3 @@ import type { PromptResult } from '../../execution/PromptResult';

import type { string_pipeline_url } from '../typeAliases';
import type { string_version } from '../typeAliases';
import type { string_semantic_version } from '../typeAliases';
/**

@@ -29,7 +29,7 @@ * ExecutionReport is result of executing one promptbook

*/
readonly promptbookUsedVersion: string_version;
readonly promptbookUsedVersion: string_semantic_version;
/**
* Version from promptbook which was requested by promptbook
*/
readonly promptbookRequestedVersion?: string_version;
readonly promptbookRequestedVersion?: string_semantic_version;
/**

@@ -36,0 +36,0 @@ * Description of the promptbook which was executed

@@ -0,4 +1,5 @@

import type { string_file_path } from '../typeAliases';
import type { string_markdown_text } from '../typeAliases';
import type { string_pipeline_url } from '../typeAliases';
import type { string_version } from '../typeAliases';
import type { string_semantic_version } from '../typeAliases';
import type { KnowledgeJson } from './KnowledgeJson';

@@ -25,2 +26,6 @@ import type { PromptTemplateJson } from './PromptTemplateJson';

/**
* Internal helper for tracking the source `.ptbk.md` file of the pipeline
*/
readonly sourceFile?: string_file_path;
/**
* Title of the promptbook

@@ -33,3 +38,3 @@ * -It can use simple markdown formatting like **bold**, *italic*, [link](https://example.com), ... BUT not code blocks and structure

*/
readonly promptbookVersion: string_version;
readonly promptbookVersion: string_semantic_version;
/**

@@ -54,2 +59,3 @@ * Description of the promptbook

/**
* TODO: !!!!! Implement new commands
* Note: There was a proposal for multiple types of promptbook objects 78816ff33e2705ee1a187aa2eb8affd976d4ea1a

@@ -56,0 +62,0 @@ * But then immediately reverted back to the single type

@@ -1,145 +0,9 @@

import type { ExpectFormatCommand } from '../Command';
import type { ExecutionType } from '../ExecutionTypes';
import type { ModelRequirements } from '../ModelRequirements';
import type { ScriptLanguage } from '../ScriptLanguage';
import type { number_integer } from '../typeAliases';
import type { number_positive } from '../typeAliases';
import type { string_javascript } from '../typeAliases';
import type { string_javascript_name } from '../typeAliases';
import type { string_markdown } from '../typeAliases';
import type { string_markdown_text } from '../typeAliases';
import type { string_name } from '../typeAliases';
import type { string_prompt } from '../typeAliases';
import type { string_template } from '../typeAliases';
import type { ___ } from '../typeAliases';
import type { LlmTemplateJson } from './LlmTemplateJson';
import type { PromptDialogJson } from './PromptDialogJson';
import type { ScriptJson } from './ScriptJson';
import type { SimpleTemplateJson } from './SimpleTemplateJson';
/**
* Describes one prompt template in the promptbook
*/
export type PromptTemplateJson = LlmTemplateJson | SimpleTemplateJson | ScriptJson | PromptDialogJson;
/**
* Template for prompt to LLM
*/
export type LlmTemplateJson = PromptTemplateJsonCommon & {
readonly executionType: 'PROMPT_TEMPLATE';
/**
* Requirements for the model
* - This is required only for executionType PROMPT_TEMPLATE
*/
readonly modelRequirements: ModelRequirements;
};
/**
* Expect this amount of each unit in the answer
*
* For example 5 words, 3 sentences, 2 paragraphs, ...
*
* Note: Expectations are performed after all postprocessing steps
*/
export type Expectations = Partial<Record<Lowercase<ExpectationUnit>, {
min?: ExpectationAmount;
max?: ExpectationAmount;
}>>;
/**
* Units of text measurement
*/
export declare const EXPECTATION_UNITS: readonly ["CHARACTERS", "WORDS", "SENTENCES", "LINES", "PARAGRAPHS", "PAGES"];
/**
* Unit of text measurement
*/
export type ExpectationUnit = typeof EXPECTATION_UNITS[number];
/**
* Amount of text measurement
*/
export type ExpectationAmount = number_integer & (number_positive | 0);
/**
* Template for simple concatenation of strings
*/
export interface SimpleTemplateJson extends PromptTemplateJsonCommon {
readonly executionType: 'SIMPLE_TEMPLATE';
}
/**
* Template for script execution
*/
export interface ScriptJson extends PromptTemplateJsonCommon {
readonly executionType: 'SCRIPT';
/**
* Language of the script
* - This is required only for executionType SCRIPT
*
*/
readonly contentLanguage?: ScriptLanguage;
}
/**
* Template for prompt to user
*/
export interface PromptDialogJson extends PromptTemplateJsonCommon {
readonly executionType: 'PROMPT_DIALOG';
}
/**
* Common properties of all prompt templates
*/
interface PromptTemplateJsonCommon {
/**
* Name of the template
* - It must be unique across the pipeline
* - It should start uppercase and contain letters and numbers
* - The pipelineUrl together with hash and name are used to identify the prompt template in the pipeline
*/
readonly name: string_name;
/**
* Title of the prompt template
* It can use simple markdown formatting like **bold**, *italic*, [link](https://example.com), ... BUT not code blocks and structure
*/
readonly title: string;
/**
* Description of the prompt template
* It can use multiple paragraphs of simple markdown formatting like **bold**, *italic*, [link](https://example.com), ... BUT not code blocks and structure
*/
readonly description?: string_markdown_text;
/**
* List of parameter names that are used in the prompt template and must be defined before the prompt template is executed
*
* Note: Joker is one of the dependent parameters
*/
readonly dependentParameterNames: Array<string_name>;
/**
* If theese parameters meet the expectations requirements, they are used instead of executing this prompt template
*/
readonly jokers?: Array<string>;
/**
* Type of the execution
* This determines if the prompt template is send to LLM, user or some scripting evaluation
*/
readonly executionType: ExecutionType;
/**
* Content of the template with {placeholders} for parameters
*/
readonly content: (string_prompt | string_javascript | string_markdown) & string_template;
/**
* List of postprocessing steps that are executed after the prompt template
*/
readonly postprocessing?: Array<string_javascript_name>;
/**
* Expect this amount of each unit in the answer
*
* For example 5 words, 3 sentences, 2 paragraphs, ...
*
* Note: Expectations are performed after all postprocessing steps
*/
readonly expectations?: Expectations;
/**
* Expect this format of the answer
*
* Note: Expectations are performed after all postprocessing steps
* @deprecated [💝]
*/
readonly expectFormat?: ExpectFormatCommand['format'];
/**
* Name of the parameter that is the result of the prompt template
*/
readonly resultingParameterName: string_name;
}
export {};
/**
* TODO: [💝] Unite object for expecting amount and format - remove expectFormat
* TODO: use one helper type> (string_prompt | string_javascript | string_markdown) & string_template
* TODO: [👙][🧠] Just selecting gpt3 or gpt4 level of model
*/
export type PromptTemplateJson = LlmTemplateJson | SimpleTemplateJson | ScriptJson | PromptDialogJson | ___ | ___ | ___ | ___;

@@ -0,5 +1,5 @@

import type { ExpectFormatCommand } from '../commands/EXPECT/ExpectFormatCommand';
import type { PostprocessingFunction } from '../scripting/javascript/JavascriptExecutionToolsOptions';
import type { ExpectFormatCommand } from './Command';
import type { ModelRequirements } from './ModelRequirements';
import type { Expectations } from './PipelineJson/PromptTemplateJson';
import type { Expectations } from './PipelineJson/Expectations';
import type { string_name } from './typeAliases';

@@ -55,3 +55,3 @@ import type { string_pipeline_url_with_hashtemplate } from './typeAliases';

*/
readonly pipelineUrl: string_pipeline_url_with_hashtemplate;
readonly pipelineUrl?: string_pipeline_url_with_hashtemplate;
/**

@@ -58,0 +58,0 @@ * Parameters used in the prompt

@@ -1,2 +0,2 @@

import type { ExecutionType } from './ExecutionTypes';
import type { BlockType } from '../commands/BLOCK/BlockTypes';
import type { string_markdown_text } from './typeAliases';

@@ -34,3 +34,3 @@ import type { string_name } from './typeAliases';

*/
readonly executionType: ExecutionType;
readonly blockType: BlockType;
/**

@@ -37,0 +37,0 @@ * The parameter name that is being processed.

@@ -107,4 +107,17 @@ /**

*
* Markdown text with exactly ONE heading on first line NO less NO more
*/
export type string_markdown_section = string;
/**
* Semantic helper
*
* Markdown without any headings like h1, h2
* BUT with formatting, lists, blockquotes, blocks, etc. is allowed
*/
export type string_markdown_section_content = string;
/**
* Semantic helper
*
* Markdown text without any structure like h1, h2, lists, blockquotes, blocks, etc.
* BUT with bold, italic, etc.
* BUT with bold, italic, etc. is allowed
*

@@ -236,6 +249,2 @@ * For example `"**Hello** World!"`

* Semantic helper
*/
export type string_protocol = 'http:' | 'https:';
/**
* Semantic helper
*

@@ -253,2 +262,6 @@ * For example `"localhost"` or `"collboard.com"`

* Semantic helper
*/
export type string_protocol = 'http:' | 'https:';
/**
* Semantic helper
*

@@ -259,2 +272,21 @@ * For example `"pavol@hejny.org"`

/**
* Semantic helper
*
* For example `"pavol@hejny.org, jirka@webgpt.cz"`
*/
export type string_emails = string;
/**
* Branded type for UUIDs version 4
* This will not allow to pass some random string where should be only a valid UUID
*
* Use utils:
* - `randomUuid` to generate
* - `isValidUuid to check validity
*
* For example `"5a0a153d-7be9-4018-9eda-e0e2e2b89bd9"`
*/
export type string_uuid = string & {
readonly _type: 'uuid';
};
/**
* Branded type client id

@@ -276,3 +308,3 @@ */

*/
export type string_version = string;
export type string_semantic_version = string;
/**

@@ -439,4 +471,16 @@ * Semantic helper

/**
* Formatting helper to put void to keep longer version of prettier
*/
export type ___ = never;
/**
* Organizational helper to better mark the place where the type is missing
*/
export type TODO = any;
/**
* Organizational helper to mark a place where to really use any
*/
export type really_any = any;
/**
* TODO: !! Cleanup
* TODO: !! Change to branded types
*/

@@ -21,3 +21,4 @@ import type { string_char_emoji } from '../types/typeAliasEmoji';

/**
* TODO: [💴] DRY - just one version of emojis.ts
* TODO: Mirror from Collboard or some common package
*/

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/Expectations';
/**

@@ -3,0 +3,0 @@ * Counts number of characters in the text

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/Expectations';
/**

@@ -3,0 +3,0 @@ * Counts number of lines in the text

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/Expectations';
/**

@@ -3,0 +3,0 @@ * Counts number of pages in the text

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/Expectations';
/**

@@ -3,0 +3,0 @@ * Counts number of paragraphs in the text

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/Expectations';
/**

@@ -3,0 +3,0 @@ * Split text into sentences

@@ -1,2 +0,2 @@

import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/Expectations';
/**

@@ -3,0 +3,0 @@ * Counts number of words in the text

@@ -1,3 +0,3 @@

import type { ExpectationAmount } from '../../types/PipelineJson/PromptTemplateJson';
import type { ExpectationUnit } from '../../types/PipelineJson/PromptTemplateJson';
import type { ExpectationAmount } from '../../types/PipelineJson/Expectations';
import type { ExpectationUnit } from '../../types/PipelineJson/Expectations';
/**

@@ -4,0 +4,0 @@ * Index of all counter functions

@@ -7,3 +7,4 @@ import type { string_html } from '../../types/typeAliases';

* @returns formatted html code
* @private withing the package because of HUGE size of prettier dependency
*/
export declare function prettifyMarkdown<TContent extends string_html>(content: TContent): TContent;

@@ -1,1 +0,8 @@

export declare function normalizeToKebabCase(sentence: string): string;
/**
* Semantic helper for kebab-case strings
*
* @example 'hello-world'
* @example 'i-love-promptbook'
*/
export type string_kebab_case = string;
export declare function normalizeToKebabCase(text: string): string_kebab_case;

@@ -1,4 +0,11 @@

export declare function normalizeTo_camelCase(sentence: string, __firstLetterCapital?: boolean): string;
/**
* Semantic helper for camelCase strings
*
* @example 'helloWorld'
* @example 'iLovePromptbook'
*/
export type string_camelCase = string;
export declare function normalizeTo_camelCase(text: string, _isFirstLetterCapital?: boolean): string_camelCase;
/**
* TODO: [🌺] Use some intermediate util splitWords
*/

@@ -1,1 +0,8 @@

export declare function normalizeTo_PascalCase(sentence: string): string;
/**
* Semantic helper for PascalCase strings
*
* @example 'HelloWorld'
* @example 'ILovePromptbook'
*/
export type string_PascalCase = string;
export declare function normalizeTo_PascalCase(text: string): string_PascalCase;

@@ -1,4 +0,16 @@

export declare function normalizeTo_SCREAMING_CASE(sentence: string): string;
/**
* Semantic helper for SCREAMING_CASE strings
*
* @example 'HELLO_WORLD'
* @example 'I_LOVE_PROMPTBOOK'
*/
export type string_SCREAMING_CASE = string;
export declare function normalizeTo_SCREAMING_CASE(text: string): string_SCREAMING_CASE;
/**
* TODO: Tests
* > expect(encodeRoutePath({ uriId: 'VtG7sR9rRJqwNEdM2', name: 'Moje tabule' })).toEqual('/VtG7sR9rRJqwNEdM2/Moje tabule');
* > expect(encodeRoutePath({ uriId: 'VtG7sR9rRJqwNEdM2', name: 'ěščřžžýáíúů' })).toEqual('/VtG7sR9rRJqwNEdM2/escrzyaieuu');
* > expect(encodeRoutePath({ uriId: 'VtG7sR9rRJqwNEdM2', name: ' ahoj ' })).toEqual('/VtG7sR9rRJqwNEdM2/ahoj');
* > expect(encodeRoutePath({ uriId: 'VtG7sR9rRJqwNEdM2', name: ' ahoj_ahojAhoj ahoj ' })).toEqual('/VtG7sR9rRJqwNEdM2/ahoj-ahoj-ahoj-ahoj');
* TODO: [🌺] Use some intermediate util splitWords
*/

@@ -1,1 +0,8 @@

export declare function normalizeTo_snake_case(sentence: string): string;
/**
* Semantic helper for snake_case strings
*
* @example 'hello_world'
* @example 'i_love_promptbook'
*/
export type string_snake_case = string;
export declare function normalizeTo_snake_case(text: string): string_snake_case;
/**
* Create difference set of two sets.
*
* @deprecated use new javascript set methods instead @see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Set
*/
export declare function difference<TItem>(a: Set<TItem>, b: Set<TItem>, isEqual?: (a: TItem, b: TItem) => boolean): Set<TItem>;
/**
* Creates a new set with all elements that are present in all sets
*
* @deprecated use new javascript set methods instead @see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Set
*/
export declare function intersection<TItem>(...sets: Array<Set<TItem>>): Set<TItem>;
/**
* Creates a new set with all elements that are present in either set
*
* @deprecated use new javascript set methods instead @see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Set
*/
export declare function union<TItem>(...sets: Array<Set<TItem>>): Set<TItem>;

@@ -6,3 +6,6 @@ import type { string_url } from '../../../types/typeAliases';

* Note: Dataurl are considered perfectly valid.
* Note: There are two simmilar functions:
* - `isValidUrl` which tests any URL
* - `isValidPipelineUrl` *(this one)* which tests just promptbook URL
*/
export declare function isValidUrl(url: unknown): url is string_url;

@@ -1,5 +0,6 @@

import type { string_version } from './types/typeAliases';
import type { string_semantic_version } from './types/typeAliases';
/**
* The version of the Promptbook library
*/
export declare const PROMPTBOOK_VERSION: string_version;
export declare const PROMPTBOOK_VERSION: string_promptbook_version;
export type string_promptbook_version = string_semantic_version;

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc