Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@markprompt/core

Package Overview
Dependencies
Maintainers
1
Versions
105
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@markprompt/core - npm Package Compare versions

Comparing version 0.27.0 to 0.28.0

154

dist/chat.d.ts

@@ -7,45 +7,43 @@ import type { BaseOptions, Chat, ChatCompletionMessage, ChatCompletionMessageParam, ChatCompletionMetadata, ChatCompletionTool, ChatCompletionToolChoiceOption, OpenAIModelId } from './types.js';

}
export interface SubmitChatOptions {
export interface PoliciesOptions {
/**
* Conversation ID. Returned with the first response of a conversation. Used to continue a conversation.
* @default undefined
*/
conversationId?: string;
* If true, enable the use of policies.
* @default true
**/
enabled?: boolean;
/**
* Conversation metadata. An arbitrary JSON payload to attach to the conversation.
* @default undefined
*/
conversationMetadata?: any;
/**
* Enabled debug mode. This will log debug and error information to the console.
* @default false
*/
debug?: boolean;
/**
* Message returned when the model does not have an answer
* @default "Sorry, I am not sure how to answer that."
* If true, use all policies added in the project.
* Otherwise, only use the ones excplicitly specified
* in the `ids` list.
* @default true
**/
iDontKnowMessage?: string;
useAll?: boolean;
/**
* Whether or not to inject context relevant to the query.
* @default false
* Only use specific policies for retrieval.
* @default []
**/
doNotInjectContext?: boolean;
ids?: string[];
}
export interface RetrievalOptions {
/**
* If true, the bot may encourage the user to ask a follow-up question, for instance to gather additional information. Default `true`.
* If true, enable retrieval.
* @default true
**/
allowFollowUpQuestions?: boolean;
enabled?: boolean;
/**
* Whether or not to include message in insights.
* @default false
* If true, use all sources connected in the project.
* Otherwise, only use the ones excplicitly specified
* in the `ids` list.
* @default true
**/
excludeFromInsights?: boolean;
useAll?: boolean;
/**
* The OpenAI model to use
* @default "gpt-3.5-turbo"
* Only use specific sources for retrieval.
* @default []
**/
model?: OpenAIModelId;
ids?: string[];
}
export interface SubmitChatOptions {
/**
* The system prompt
* The system prompt.
* @default "You are a very enthusiastic company representative who loves to help people!"

@@ -55,3 +53,36 @@ **/

/**
* The model temperature
* Context to use for template variable replacements in the system prompt.
* @default {}
**/
context?: any;
/**
* The OpenAI model to use.
* @default "gpt-4-turbo-preview"
**/
model?: OpenAIModelId;
/**
* Options for the use of policies.
**/
policiesOptions?: PoliciesOptions;
/**
* Options for retrieval.
**/
retrievalOptions?: RetrievalOptions;
/**
* The output format of the response.
* @default "markdown"
*/
outputFormat?: 'markdown' | 'slack' | 'html';
/**
* If true, output the response in JSON format.
* @default false
*/
jsonOutput?: boolean;
/**
* Remove PII from chat messages.
* @default false
*/
redact?: boolean;
/**
* The model temperature.
* @default 0.1

@@ -61,3 +92,3 @@ **/

/**
* The model top P
* The model top P.
* @default 1

@@ -67,3 +98,3 @@ **/

/**
* The model frequency penalty
* The model frequency penalty.
* @default 0

@@ -73,3 +104,3 @@ **/

/**
* The model present penalty
* The model present penalty.
* @default 0

@@ -79,3 +110,3 @@ **/

/**
* The max number of tokens to include in the response
* The max number of tokens to include in the response.
* @default 500

@@ -85,3 +116,3 @@ * */

/**
* The number of sections to include in the prompt context
* The number of sections to include in the prompt context.
* @default 10

@@ -91,3 +122,3 @@ * */

/**
* The similarity threshold between the input question and selected sections
* The similarity threshold between the input question and selected sections.
* @default 0.5

@@ -97,10 +128,6 @@ * */

/**
* AbortController signal
* Thread ID. Returned with the first, and every subsequent, chat response. Used to continue a thread.
* @default undefined
**/
signal?: AbortSignal;
/**
* Disable streaming and return the entire response at once.
*/
stream?: boolean;
threadId?: string;
/**

@@ -118,15 +145,40 @@ * A list of tools the model may call. Currently, only functions are

* `{"type: "function", "function": {"name": "my_function"}}` forces the
* model to call that function.
*
* `none` is the default when no functions are present. `auto` is the default if functions are present.
* model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present.
*/
tool_choice?: ChatCompletionToolChoiceOption;
toolChoice?: ChatCompletionToolChoiceOption;
/**
* The output format of the response
* Whether or not to inject context relevant to the query.
* @default false
**/
doNotInjectContext?: boolean;
/**
* If true, the bot may encourage the user to ask a follow-up question, for instance to gather additional information.
* @default true
**/
allowFollowUpQuestions?: boolean;
/**
* Whether or not to include message in insights.
* @default false
**/
excludeFromInsights?: boolean;
/**
* AbortController signal.
* @default undefined
**/
signal?: AbortSignal;
/**
* Enabled debug mode. This will log debug and error information to the console.
* @default false
*/
outputFormat?: 'slack' | 'markdown';
debug?: boolean;
/**
* Remove PII from chat messages.
* Message returned when the model does not have an answer.
* @default "Sorry, I am not sure how to answer that."
* @deprecated Will be removed.
**/
iDontKnowMessage?: string;
/**
* Disable streaming and return the entire response at once.
*/
redact?: boolean;
stream?: boolean;
}

@@ -133,0 +185,0 @@ export declare const DEFAULT_SUBMIT_CHAT_OPTIONS: {

@@ -29,4 +29,3 @@ import defaults from 'defaults';

'allowFollowUpQuestions',
'conversationId',
'conversationMetadata',
'context',
'debug',

@@ -37,6 +36,10 @@ 'doNotInjectContext',

'iDontKnowMessage',
'jsonOutput',
'maxTokens',
'model',
'outputFormat',
'policiesOptions',
'presencePenalty',
'redact',
'retrievalOptions',
'sectionsMatchCount',

@@ -47,6 +50,6 @@ 'sectionsMatchThreshold',

'temperature',
'tool_choice',
'threadId',
'toolChoice',
'tools',
'topP',
'redact',
];

@@ -72,4 +75,4 @@ const isValidSubmitChatOptionsKey = (key) => {

const validOptions = Object.fromEntries(Object.entries(options).filter(([key]) => isValidSubmitChatOptionsKey(key)));
const { signal, tools, ...cloneableOpts } = validOptions;
const { debug, ...resolvedOptions } = defaults({
const { signal, tools, toolChoice, ...cloneableOpts } = validOptions;
const { debug, policiesOptions, retrievalOptions, ...resolvedOptions } = defaults({
...cloneableOpts,

@@ -81,2 +84,3 @@ // only include known tool properties

})),
toolChoice: toolChoice,
}, { ...DEFAULT_OPTIONS, ...DEFAULT_SUBMIT_CHAT_OPTIONS });

@@ -94,2 +98,4 @@ const res = await fetch(`${resolvedOptions.apiUrl}/chat`, {

...resolvedOptions,
policies: policiesOptions,
retrieval: retrievalOptions,
}),

@@ -96,0 +102,0 @@ signal,

@@ -6,3 +6,3 @@ import type { PromptFeedback, CSAT, BaseOptions } from './types.js';

/** ID of the prompt for which feedback is being submitted. */
promptId: string;
messageId: string;
}

@@ -9,0 +9,0 @@ export interface SubmitFeedbackOptions {

@@ -16,3 +16,3 @@ import defaults from 'defaults';

try {
const response = await fetch(`${resolvedOptions.apiUrl}/messages/${body.promptId}`, {
const response = await fetch(`${resolvedOptions.apiUrl}/messages/${body.messageId}`, {
method: 'POST',

@@ -19,0 +19,0 @@ headers: new Headers({

@@ -83,4 +83,4 @@ import type { DocSearchHit } from './docsearch.js';

export interface ChatCompletionMetadata {
conversationId?: string;
promptId?: string;
threadId?: string;
messageId?: string;
references?: FileSectionReference[];

@@ -87,0 +87,0 @@ }

@@ -38,4 +38,4 @@ export const getErrorMessage = async (res) => {

json !== null &&
(('conversationId' in json && typeof json.conversationId === 'string') ||
('promptId' in json && typeof json.promptId === 'string') ||
(('threadId' in json && typeof json.threadId === 'string') ||
('messageId' in json && typeof json.messageId === 'string') ||
('references' in json && isFileSectionReferences(json.references))));

@@ -42,0 +42,0 @@ }

{
"name": "@markprompt/core",
"version": "0.27.0",
"version": "0.28.0",
"repository": {

@@ -5,0 +5,0 @@ "type": "git",

@@ -70,3 +70,3 @@ # Markprompt Core

- `conversationId` (`string`): Conversation ID
- `threadId` (`string`): Thread ID
- `iDontKnowMessage` (`string`): Message returned when the model does not have

@@ -86,3 +86,3 @@ an answer

- `tools`: (`OpenAI.ChatCompletionTool[]`): A list of tools the model may call
- `tool_choice`: (`OpenAI.ChatCompletionToolChoiceOption`): Controls which (if
- `toolChoice`: (`OpenAI.ChatCompletionToolChoiceOption`): Controls which (if
any) function is called by the model

@@ -122,3 +122,3 @@

- `feedback.feedback.vote` (`"1" | "-1" | "escalated"`): Vote
- `feedback.promptId` (`string`): Prompt ID
- `feedback.messageId` (`string`): Message ID
- `projectKey` (`string`): Project key for the project

@@ -125,0 +125,0 @@ - `options` (`object`): Optional parameters

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc