@waylaidwanderer/chatgpt-api
Advanced tools
Comparing version 1.6.2 to 1.7.0
{ | ||
"name": "@waylaidwanderer/chatgpt-api", | ||
"version": "1.6.2", | ||
"version": "1.7.0", | ||
"description": "A ChatGPT implementation using the official ChatGPT model via OpenAI's API.", | ||
@@ -5,0 +5,0 @@ "main": "index.js", |
@@ -66,4 +66,4 @@ <p align="center"> | ||
}, | ||
// (Optional) Set a custom prompt prefix. As per my testing it should work with two newlines | ||
// promptPrefix: 'You are not ChatGPT...\n\n', | ||
// (Optional) Set custom instructions instead of "You are ChatGPT...". | ||
// promptPrefix: 'You are Bob, a cowboy in Western times...', | ||
// (Optional) Set a custom name for the user | ||
@@ -115,4 +115,4 @@ // userLabel: 'User', | ||
}, | ||
// (Optional) Set a custom prompt prefix. As per my testing it should work with two newlines | ||
// promptPrefix: 'You are not ChatGPT...\n\n', | ||
// (Optional) Set custom instructions instead of "You are ChatGPT...". | ||
// promptPrefix: 'You are Bob, a cowboy in Western times...', | ||
// (Optional) Set a custom name for the user | ||
@@ -119,0 +119,0 @@ // userLabel: 'User', |
@@ -11,4 +11,4 @@ export default { | ||
}, | ||
// (Optional) Set a custom prompt prefix. As per my testing it should work with two newlines | ||
// promptPrefix: 'You are not ChatGPT...\n\n', | ||
// (Optional) Set custom instructions instead of "You are ChatGPT...". | ||
// promptPrefix: 'You are Bob, a cowboy in Western times...', | ||
// (Optional) Set a custom name for the user | ||
@@ -15,0 +15,0 @@ // userLabel: 'User', |
@@ -22,3 +22,3 @@ import fetch from 'node-fetch'; | ||
model: modelOptions.model || CHATGPT_MODEL, | ||
temperature: typeof modelOptions.temperature === 'undefined' ? 0.9 : modelOptions.temperature, | ||
temperature: typeof modelOptions.temperature === 'undefined' ? 0.8 : modelOptions.temperature, | ||
top_p: typeof modelOptions.top_p === 'undefined' ? 1 : modelOptions.top_p, | ||
@@ -29,2 +29,5 @@ presence_penalty: typeof modelOptions.presence_penalty === 'undefined' ? 0.6 : modelOptions.presence_penalty, | ||
this.userLabel = this.options.userLabel || 'User'; | ||
this.chatGptLabel = this.options.chatGptLabel || 'ChatGPT'; | ||
if (this.modelOptions.model.startsWith('text-chat')) { | ||
@@ -44,2 +47,5 @@ this.endToken = '<|im_end|>'; | ||
} | ||
this.modelOptions.stop.push(`\n\n${this.userLabel}:`); | ||
this.modelOptions.stop.push(`\n\nInstructions:`); | ||
// I chose not to do one for `chatGptLabel` because I've never seen it happen, plus there's a max of 4 stops | ||
} | ||
@@ -106,2 +112,3 @@ | ||
console.debug(JSON.stringify(result)); | ||
console.debug(); | ||
} | ||
@@ -145,3 +152,3 @@ | ||
if (this.options.promptPrefix) { | ||
promptPrefix = this.options.promptPrefix; | ||
promptPrefix = this.options.promptPrefix.trim(); | ||
// If the prompt prefix doesn't end with the separator token, add it. | ||
@@ -151,2 +158,3 @@ if (!promptPrefix.endsWith(`${this.separatorToken}\n\n`)) { | ||
} | ||
promptPrefix = `\nInstructions:\n${promptPrefix}`; | ||
} else { | ||
@@ -158,10 +166,7 @@ const currentDateString = new Date().toLocaleDateString( | ||
promptPrefix = `You are ChatGPT, a large language model trained by OpenAI.\nCurrent date: ${currentDateString}${this.endToken}\n\n` | ||
promptPrefix = `\nInstructions:\nYou are ChatGPT, a large language model trained by OpenAI.\nCurrent date: ${currentDateString}${this.separatorToken}\n\n` | ||
} | ||
const userLabel = this.options.userLabel || 'User'; | ||
const chatGptLabel = this.options.chatGptLabel || 'ChatGPT'; | ||
const promptSuffix = `${this.chatGptLabel}:\n`; // Prompt ChatGPT to respond. | ||
const promptSuffix = `${chatGptLabel}:\n`; // Prompt ChatGPT to respond. | ||
let currentTokenCount = this.getTokenCount(`${promptPrefix}${promptSuffix}`); | ||
@@ -174,5 +179,14 @@ let promptBody = ''; | ||
const message = orderedMessages.pop(); | ||
const roleLabel = message.role === 'User' ? userLabel : chatGptLabel; | ||
const roleLabel = message.role === 'User' ? this.userLabel : this.chatGptLabel; | ||
const messageString = `${roleLabel}:\n${message.message}${this.separatorToken}\n`; | ||
const newPromptBody = `${messageString}${promptBody}`; | ||
let newPromptBody; | ||
if (promptBody) { | ||
newPromptBody = `${messageString}${promptBody}`; | ||
} else { | ||
// Always insert prompt prefix before the last user message. | ||
// This makes the AI obey the prompt instructions better, which is important for custom instructions. | ||
// After a bunch of testing, it doesn't seem to cause the AI any confusion, even if you ask it things | ||
// like "what's the last thing I wrote?". | ||
newPromptBody = `${promptPrefix}${messageString}${promptBody}`; | ||
} | ||
@@ -193,3 +207,3 @@ // The reason I don't simply get the token count of the messageString and add it to currentTokenCount is because | ||
const prompt = `${promptPrefix}${promptBody}${promptSuffix}`; | ||
const prompt = `${promptBody}${promptSuffix}`; | ||
@@ -196,0 +210,0 @@ const numTokens = this.getTokenCount(prompt); |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
173030
358