Socket
Socket
Sign inDemoInstall

@promptbook/remote-client

Package Overview
Dependencies
Maintainers
0
Versions
401
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@promptbook/remote-client - npm Package Compare versions

Comparing version 0.66.0-1 to 0.66.0-4

esm/typings/src/execution/AvailableModel.d.ts

2

esm/index.es.js

@@ -7,3 +7,3 @@ import { io } from 'socket.io-client';

*/
var PROMPTBOOK_VERSION = '0.66.0-0';
var PROMPTBOOK_VERSION = '0.66.0-3';
// TODO: !!!! List here all the versions and annotate + put into script

@@ -10,0 +10,0 @@

@@ -8,2 +8,3 @@ import { PROMPTBOOK_VERSION } from '../version';

import { createAnthropicClaudeExecutionTools } from '../llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools';
import { _AnthropicClaudeRegistration } from '../llm-providers/anthropic-claude/register-constructor';
import { _ } from '../llm-providers/anthropic-claude/register1';

@@ -17,2 +18,3 @@ export { PROMPTBOOK_VERSION };

export { createAnthropicClaudeExecutionTools };
export { _AnthropicClaudeRegistration };
export { _ };

@@ -20,3 +20,2 @@ import { PROMPTBOOK_VERSION } from '../version';

import { DEFAULT_REMOTE_URL_PATH } from '../config';
import { BOILERPLATE_LLM_TOOLS_CONFIGURATION_ } from '../config';
import { pipelineJsonToString } from '../conversion/pipelineJsonToString';

@@ -52,2 +51,4 @@ import type { PipelineStringToJsonOptions } from '../conversion/pipelineStringToJson';

import { prepareKnowledgeFromMarkdown } from '../knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown';
import { $llmToolsConfigurationBoilerplatesRegister } from '../llm-providers/_common/$llmToolsConfigurationBoilerplatesRegister';
import { $llmToolsRegister } from '../llm-providers/_common/$llmToolsRegister';
import { createLlmToolsFromConfiguration } from '../llm-providers/_common/createLlmToolsFromConfiguration';

@@ -57,3 +58,5 @@ import { cacheLlmTools } from '../llm-providers/_common/utils/cache/cacheLlmTools';

import { limitTotalUsage } from '../llm-providers/_common/utils/count-total-usage/limitTotalUsage';
import { _AnthropicClaudeConfigurationRegistration } from '../llm-providers/anthropic-claude/register-configuration';
import { joinLlmExecutionTools } from '../llm-providers/multiple/joinLlmExecutionTools';
import { _OpenAiConfigurationRegistration } from '../llm-providers/openai/register-configuration';
import { preparePersona } from '../personas/preparePersona';

@@ -90,3 +93,2 @@ import { isPipelinePrepared } from '../prepare/isPipelinePrepared';

export { DEFAULT_REMOTE_URL_PATH };
export { BOILERPLATE_LLM_TOOLS_CONFIGURATION_ };
export { pipelineJsonToString };

@@ -122,2 +124,4 @@ export type { PipelineStringToJsonOptions };

export { prepareKnowledgeFromMarkdown };
export { $llmToolsConfigurationBoilerplatesRegister };
export { $llmToolsRegister };
export { createLlmToolsFromConfiguration };

@@ -127,3 +131,5 @@ export { cacheLlmTools };

export { limitTotalUsage };
export { _AnthropicClaudeConfigurationRegistration };
export { joinLlmExecutionTools };
export { _OpenAiConfigurationRegistration };
export { preparePersona };

@@ -130,0 +136,0 @@ export { isPipelinePrepared };

import { PROMPTBOOK_VERSION } from '../version';
import { createOpenAiExecutionTools } from '../llm-providers/openai/createOpenAiExecutionTools';
import { OPENAI_MODELS } from '../llm-providers/openai/openai-models';
import { OpenAiExecutionTools } from '../llm-providers/openai/OpenAiExecutionTools';
import type { OpenAiExecutionToolsOptions } from '../llm-providers/openai/OpenAiExecutionToolsOptions';
import { _OpenAiRegistration } from '../llm-providers/openai/register-constructor';
export { PROMPTBOOK_VERSION };
export { createOpenAiExecutionTools };
export { OPENAI_MODELS };
export { OpenAiExecutionTools };
export type { OpenAiExecutionToolsOptions };
export { _OpenAiRegistration };

@@ -13,2 +13,3 @@ import type { PipelineCollection } from '../collection/PipelineCollection';

import type { renderPipelineMermaidOptions } from '../conversion/prettify/renderPipelineMermaidOptions';
import type { AvailableModel } from '../execution/AvailableModel';
import type { CommonExecutionToolsOptions } from '../execution/CommonExecutionToolsOptions';

@@ -18,3 +19,3 @@ import type { EmbeddingVector } from '../execution/EmbeddingVector';

import type { LlmExecutionTools } from '../execution/LlmExecutionTools';
import type { AvailableModel } from '../execution/LlmExecutionTools';
import type { LlmExecutionToolsConstructor } from '../execution/LlmExecutionToolsConstructor';
import type { PipelineExecutor } from '../execution/PipelineExecutor';

@@ -217,2 +218,3 @@ import type { PipelineExecutorResult } from '../execution/PipelineExecutor';

import type { TODO_any } from '../utils/organization/TODO_any';
import type { Registered } from '../utils/Register';
import type { string_promptbook_version } from '../version';

@@ -231,2 +233,3 @@ export type { PipelineCollection };

export type { renderPipelineMermaidOptions };
export type { AvailableModel };
export type { CommonExecutionToolsOptions };

@@ -236,3 +239,3 @@ export type { EmbeddingVector };

export type { LlmExecutionTools };
export type { AvailableModel };
export type { LlmExecutionToolsConstructor };
export type { PipelineExecutor };

@@ -435,2 +438,3 @@ export type { PipelineExecutorResult };

export type { TODO_any };
export type { Registered };
export type { string_promptbook_version };

@@ -1,2 +0,1 @@

import type { LlmToolsConfiguration } from './llm-providers/_common/LlmToolsConfiguration';
/**

@@ -126,8 +125,2 @@ * Warning message for the generated sections and files files

*
* @public exported from `@promptbook/core`
*/
export declare const BOILERPLATE_LLM_TOOLS_CONFIGURATION_: LlmToolsConfiguration;
/**
* @@@
*
* @private within the repository

@@ -134,0 +127,0 @@ */

import type { Promisable } from 'type-fest';
import type { ModelVariant } from '../types/ModelVariant';
import type { Prompt } from '../types/Prompt';
import type { string_markdown } from '../types/typeAliases';
import type { string_markdown_text } from '../types/typeAliases';
import type { string_model_name } from '../types/typeAliases';
import type { string_title } from '../types/typeAliases';
import type { AvailableModel } from './AvailableModel';
import type { ChatPromptResult } from './PromptResult';

@@ -55,19 +54,2 @@ import type { CompletionPromptResult } from './PromptResult';

/**
* Represents a model that can be used for prompt execution
*/
export type AvailableModel = {
/**
* The model title
*/
readonly modelTitle: string_title;
/**
* The model name aviailable
*/
readonly modelName: string_model_name;
/**
* Variant of the model
*/
readonly modelVariant: ModelVariant;
};
/**
* TODO: Implement destroyable pattern to free resources

@@ -74,0 +56,0 @@ * TODO: [🏳] Add `callTranslationModel`

import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
import type { TODO_any } from '../../utils/organization/TODO_any';
/**
* @@@
*
* TODO: !!!!!! Not centralized - register each provider to each package
*
* @private internal type for `createLlmToolsFromConfiguration`

@@ -5,0 +9,0 @@ */

import type { string_title } from '../../types/typeAliases';
import type { TODO_object } from '../../utils/organization/TODO_object';
import type { TODO_string } from '../../utils/organization/TODO_string';
import type { Registered } from '../../utils/Register';
/**
* @@@
*/
export type LlmToolsConfiguration = Array<{
export type LlmToolsConfiguration = Array<Registered & {
/**

@@ -15,10 +15,2 @@ * @@@

*/
packageName: TODO_string;
/**
* @@@
*/
className: TODO_string;
/**
* @@@
*/
options: TODO_object;

@@ -25,0 +17,0 @@ }>;

@@ -1,2 +0,2 @@

import type { AvailableModel } from '../../execution/LlmExecutionTools';
import type { AvailableModel } from '../../execution/AvailableModel';
import type { number_usd } from '../../types/typeAliases';

@@ -3,0 +3,0 @@ /**

@@ -1,2 +0,2 @@

import type { AvailableModel } from '../../execution/LlmExecutionTools';
import type { AvailableModel } from '../../execution/AvailableModel';
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';

@@ -50,3 +50,3 @@ import type { ChatPromptResult } from '../../execution/PromptResult';

* TODO: Maybe Create some common util for callChatModel and callCompletionModel
* TODO: Maybe make custom OpenaiError
* TODO: Maybe make custom OpenAiError
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options

@@ -53,0 +53,0 @@ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes

@@ -17,3 +17,3 @@ import type Anthropic from '@anthropic-ai/sdk';

/**
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
*/
export {};
/**
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
*/

@@ -9,7 +9,12 @@ import { RemoteLlmExecutionTools } from '../remote/RemoteLlmExecutionTools';

*/
export declare function createAnthropicClaudeExecutionTools(options: AnthropicClaudeExecutionToolsOptions): AnthropicClaudeExecutionTools | RemoteLlmExecutionTools;
export declare const createAnthropicClaudeExecutionTools: ((options: AnthropicClaudeExecutionToolsOptions) => AnthropicClaudeExecutionTools | RemoteLlmExecutionTools) & {
packageName: string;
className: string;
};
/**
* TODO: [🧠] !!!! Make anonymous this with all LLM providers
* TODO: [🧠] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
* TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
* TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
* TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
* TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
*/

@@ -1,2 +0,2 @@

import type { AvailableModel } from '../../execution/LlmExecutionTools';
import type { AvailableModel } from '../../execution/AvailableModel';
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';

@@ -48,5 +48,5 @@ import type { ChatPromptResult } from '../../execution/PromptResult';

* TODO: Maybe Create some common util for callChatModel and callCompletionModel
* TODO: Maybe make custom AzureOpenaiError
* TODO: Maybe make custom AzureOpenAiError
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
*/

@@ -0,3 +1,3 @@

import type { AvailableModel } from '../../execution/AvailableModel';
import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';
import type { AvailableModel } from '../../execution/LlmExecutionTools';
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';

@@ -4,0 +4,0 @@ import type { ChatPromptResult } from '../../execution/PromptResult';

@@ -0,3 +1,3 @@

import type { AvailableModel } from '../../execution/AvailableModel';
import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';
import type { AvailableModel } from '../../execution/LlmExecutionTools';
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';

@@ -4,0 +4,0 @@ import type { ChatPromptResult } from '../../execution/PromptResult';

@@ -1,2 +0,2 @@

import type { AvailableModel } from '../../execution/LlmExecutionTools';
import type { AvailableModel } from '../../execution/AvailableModel';
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';

@@ -3,0 +3,0 @@ import type { ChatPromptResult } from '../../execution/PromptResult';

@@ -1,2 +0,2 @@

import type { AvailableModel } from '../../execution/LlmExecutionTools';
import type { AvailableModel } from '../../execution/AvailableModel';
import type { number_usd } from '../../types/typeAliases';

@@ -3,0 +3,0 @@ /**

@@ -1,2 +0,2 @@

import type { AvailableModel } from '../../execution/LlmExecutionTools';
import type { AvailableModel } from '../../execution/AvailableModel';
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';

@@ -12,3 +12,3 @@ import type { ChatPromptResult } from '../../execution/PromptResult';

/**
* Execution Tools for calling OpenAI API.
* Execution Tools for calling OpenAI API
*

@@ -67,5 +67,5 @@ * @public exported from `@promptbook/openai`

* TODO: Maybe Create some common util for callChatModel and callCompletionModel
* TODO: Maybe make custom OpenaiError
* TODO: Maybe make custom OpenAiError
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
*/

@@ -0,3 +1,3 @@

import type { AvailableModel } from '../../../execution/AvailableModel';
import type { CommonExecutionToolsOptions } from '../../../execution/CommonExecutionToolsOptions';
import type { AvailableModel } from '../../../execution/LlmExecutionTools';
import type { client_id } from '../../../types/typeAliases';

@@ -56,2 +56,2 @@ import type { string_base_url } from '../../../types/typeAliases';

* TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
*/
*/

@@ -1,2 +0,2 @@

import type { AvailableModel } from '../../execution/LlmExecutionTools';
import type { AvailableModel } from '../../execution/AvailableModel';
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';

@@ -3,0 +3,0 @@ import type { ChatPromptResult } from '../../execution/PromptResult';

{
"name": "@promptbook/remote-client",
"version": "0.66.0-1",
"version": "0.66.0-4",
"description": "Supercharge your use of large language models",

@@ -50,3 +50,3 @@ "private": false,

"peerDependencies": {
"@promptbook/core": "0.66.0-1"
"@promptbook/core": "0.66.0-4"
},

@@ -53,0 +53,0 @@ "dependencies": {

@@ -11,3 +11,3 @@ (function (global, factory) {

*/
var PROMPTBOOK_VERSION = '0.66.0-0';
var PROMPTBOOK_VERSION = '0.66.0-3';
// TODO: !!!! List here all the versions and annotate + put into script

@@ -14,0 +14,0 @@

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc