Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@promptbook/remote-client

Package Overview
Dependencies
Maintainers
1
Versions
466
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@promptbook/remote-client - npm Package Compare versions

Comparing version 0.52.0-1 to 0.52.0-2

esm/typings/_packages/anthropic-claude.index.d.ts

2

esm/index.es.js

@@ -144,3 +144,3 @@ import { io } from 'socket.io-client';

return [2 /*return*/, [
/* !!!!! */
/* !!! */
]];

@@ -147,0 +147,0 @@ });

@@ -1,4 +0,4 @@

import { OPENAI_MODELS } from '../execution/plugins/llm-execution-tools/openai/models';
import { OPENAI_MODELS } from '../execution/plugins/llm-execution-tools/openai/openai-models';
import { OpenAiExecutionTools } from '../execution/plugins/llm-execution-tools/openai/OpenAiExecutionTools';
import { OpenAiExecutionToolsOptions } from '../execution/plugins/llm-execution-tools/openai/OpenAiExecutionToolsOptions';
export { OPENAI_MODELS, OpenAiExecutionTools, OpenAiExecutionToolsOptions };

@@ -14,7 +14,7 @@ import type { Prompt } from '../../../../types/Prompt';

*/
gptChat(prompt: Prompt): Promise<PromptChatResult>;
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
/**
* Mocks completion model
*/
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
/**

@@ -21,0 +21,0 @@ * List all available mocked-models that can be used

@@ -14,7 +14,7 @@ import type { Prompt } from '../../../../types/Prompt';

*/
gptChat(prompt: Prompt): Promise<PromptChatResult & PromptCompletionResult>;
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptChatResult & PromptCompletionResult>;
/**
* Fakes completion model
*/
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptCompletionResult>;
/**

@@ -21,0 +21,0 @@ * List all available fake-models that can be used

/**
* String value found on openai page
* String value found on OpenAI and Anthropic Claude page
*
* @see https://openai.com/api/pricing/
* @see https://docs.anthropic.com/en/docs/models-overview
*
* @private within the library, used only as internal helper for `OPENAI_MODELS` and `computeUsage`
*/
type string_openai_price = `$${number}.${number} / ${number}M tokens`;
type string_model_price = `$${number}.${number} / ${number}M tokens`;
/**

@@ -12,3 +15,3 @@ * Function computeUsage will create price per one token based on the string value found on openai page

*/
export declare function computeUsage(value: string_openai_price): number;
export declare function computeUsage(value: string_model_price): number;
export {};

@@ -13,3 +13,3 @@ import type { Prompt } from '../../../../types/Prompt';

*/
private readonly openai;
private readonly client;
/**

@@ -24,7 +24,7 @@ * Creates OpenAI Execution Tools.

*/
gptChat(prompt: Prompt): Promise<PromptChatResult>;
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
/**
* Calls OpenAI API to use a complete model.
*/
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
/**

@@ -31,0 +31,0 @@ * Default model for chat variant.

@@ -13,5 +13,7 @@ import type { ClientOptions } from 'openai';

* A unique identifier representing your end-user, which can help OpenAI to monitor
* and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* and detect abuse.
*
* @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
*/
user?: string_token;
};

@@ -26,3 +26,3 @@ import type { number_positive_or_zero, number_tokens, number_usd, string_date_iso8601, string_model_name } from '.././types/typeAliases';

*/
readonly model: string_model_name;
readonly modelName: string_model_name;
/**

@@ -58,7 +58,7 @@ * Timing

*/
inputTokens: number_tokens;
inputTokens: number_tokens | 'UNKNOWN';
/**
* Number of tokens used in the output aka. `completion_tokens`
*/
outputTokens: number_tokens;
outputTokens: number_tokens | 'UNKNOWN';
};

@@ -65,0 +65,0 @@ /**

{
"name": "@promptbook/remote-client",
"version": "0.52.0-1",
"version": "0.52.0-2",
"description": "Library to supercharge your use of large language models",

@@ -50,3 +50,3 @@ "private": false,

"peerDependencies": {
"@promptbook/core": "0.52.0-1"
"@promptbook/core": "0.52.0-2"
},

@@ -53,0 +53,0 @@ "main": "./umd/index.umd.js",

@@ -332,3 +332,3 @@ # ![Promptbook logo - cube with letters P and B](./other/design/logo-h1.png) Promptbook

- **[@promptbook/openai](https://www.npmjs.com/package/@promptbook/openai)** - Execution tools for OpenAI API, wrapper around OpenAI SDK
- **[@promptbook/anthropic-claude](https://www.npmjs.com/package/@promptbook/anthropic-claude)** - Execution tools for Anthropic Claude API, wrapper around Anthropic Claude SDK
- **[@promptbook/anthropic-claude](https://www.npmjs.com/package/@promptbook/anthropic-claude)** - Execution tools for Anthropic Claude API, wrapper around Anthropic Claude SDK
- **[@promptbook/azure-openai](https://www.npmjs.com/package/@promptbook/azure-openai)** - Execution tools for Azure OpenAI API

@@ -486,4 +486,5 @@ - **[@promptbook/langtail](https://www.npmjs.com/package/@promptbook/langtail)** - Execution tools for Langtail API, wrapper around Langtail SDK

- `OpenAiExecutionTools`
- _(Not implemented yet !!!!! )_ `AnthropicClaudeExecutionTools`
- _(Not implemented yet !!!!! )_ `AzureOpenAiExecutionTools`
- `AnthropicClaudeExecutionTools`
- `AzureOpenAiExecutionTools`
- `LangtailExecutionTools`
- _(Not implemented yet)_ `BardExecutionTools`

@@ -490,0 +491,0 @@ - _(Not implemented yet)_ `LamaExecutionTools`

@@ -148,3 +148,3 @@ (function (global, factory) {

return [2 /*return*/, [
/* !!!!! */
/* !!! */
]];

@@ -151,0 +151,0 @@ });

@@ -1,4 +0,4 @@

import { OPENAI_MODELS } from '../execution/plugins/llm-execution-tools/openai/models';
import { OPENAI_MODELS } from '../execution/plugins/llm-execution-tools/openai/openai-models';
import { OpenAiExecutionTools } from '../execution/plugins/llm-execution-tools/openai/OpenAiExecutionTools';
import { OpenAiExecutionToolsOptions } from '../execution/plugins/llm-execution-tools/openai/OpenAiExecutionToolsOptions';
export { OPENAI_MODELS, OpenAiExecutionTools, OpenAiExecutionToolsOptions };

@@ -14,7 +14,7 @@ import type { Prompt } from '../../../../types/Prompt';

*/
gptChat(prompt: Prompt): Promise<PromptChatResult>;
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
/**
* Mocks completion model
*/
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
/**

@@ -21,0 +21,0 @@ * List all available mocked-models that can be used

@@ -14,7 +14,7 @@ import type { Prompt } from '../../../../types/Prompt';

*/
gptChat(prompt: Prompt): Promise<PromptChatResult & PromptCompletionResult>;
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptChatResult & PromptCompletionResult>;
/**
* Fakes completion model
*/
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptCompletionResult>;
/**

@@ -21,0 +21,0 @@ * List all available fake-models that can be used

/**
* String value found on openai page
* String value found on OpenAI and Anthropic Claude page
*
* @see https://openai.com/api/pricing/
* @see https://docs.anthropic.com/en/docs/models-overview
*
* @private within the library, used only as internal helper for `OPENAI_MODELS` and `computeUsage`
*/
type string_openai_price = `$${number}.${number} / ${number}M tokens`;
type string_model_price = `$${number}.${number} / ${number}M tokens`;
/**

@@ -12,3 +15,3 @@ * Function computeUsage will create price per one token based on the string value found on openai page

*/
export declare function computeUsage(value: string_openai_price): number;
export declare function computeUsage(value: string_model_price): number;
export {};

@@ -13,3 +13,3 @@ import type { Prompt } from '../../../../types/Prompt';

*/
private readonly openai;
private readonly client;
/**

@@ -24,7 +24,7 @@ * Creates OpenAI Execution Tools.

*/
gptChat(prompt: Prompt): Promise<PromptChatResult>;
gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
/**
* Calls OpenAI API to use a complete model.
*/
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
/**

@@ -31,0 +31,0 @@ * Default model for chat variant.

@@ -13,5 +13,7 @@ import type { ClientOptions } from 'openai';

* A unique identifier representing your end-user, which can help OpenAI to monitor
* and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* and detect abuse.
*
* @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
*/
user?: string_token;
};

@@ -26,3 +26,3 @@ import type { number_positive_or_zero, number_tokens, number_usd, string_date_iso8601, string_model_name } from '.././types/typeAliases';

*/
readonly model: string_model_name;
readonly modelName: string_model_name;
/**

@@ -58,7 +58,7 @@ * Timing

*/
inputTokens: number_tokens;
inputTokens: number_tokens | 'UNKNOWN';
/**
* Number of tokens used in the output aka. `completion_tokens`
*/
outputTokens: number_tokens;
outputTokens: number_tokens | 'UNKNOWN';
};

@@ -65,0 +65,0 @@ /**

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc