chatgpt-optimized-official
Advanced tools
Comparing version 1.1.2 to 1.1.3
@@ -19,2 +19,3 @@ import Usage from "../models/chatgpt-usage.js"; | ||
askStream(data: (arg0: string) => void, usage: (usage: Usage) => void, prompt: string, conversationId?: string, userName?: string): Promise<string>; | ||
askPost(data: (arg0: string) => void, usage: (usage: Usage) => void, prompt: string, conversationId?: string, userName?: string, type?: number): Promise<any>; | ||
moderate(prompt: string, key: string): Promise<boolean>; | ||
@@ -21,0 +22,0 @@ private generatePrompt; |
@@ -49,2 +49,4 @@ import axios from "axios"; | ||
moderation: options?.moderation || false, | ||
functions: options?.functions || [], | ||
function_call: options?.function_call || "", | ||
}; | ||
@@ -113,3 +115,4 @@ } | ||
resetConversation(conversationId) { | ||
let conversation = this.db.conversations.Where((conversation) => conversation.id === conversationId).FirstOrDefault(); | ||
let conversation = this.db.conversations.Where((conversation) => conversation.id == conversationId).FirstOrDefault(); | ||
console.log(conversation); | ||
if (conversation) { | ||
@@ -138,2 +141,3 @@ conversation.messages = []; | ||
let promptStr = this.generatePrompt(conversation, prompt); | ||
console.log(promptStr); | ||
let prompt_tokens = this.countTokens(promptStr); | ||
@@ -150,2 +154,4 @@ try { | ||
stream: true, | ||
functions: this.options.functions, | ||
function_call: this.options.function_call, | ||
}, { | ||
@@ -160,7 +166,18 @@ responseType: "stream", | ||
let responseStr = ""; | ||
let responseArg = ""; | ||
let responseNameFunction = ""; | ||
for await (const message of this.streamCompletion(response.data)) { | ||
try { | ||
const parsed = JSON.parse(message); | ||
const { content } = parsed.choices[0].delta; | ||
if (content) { | ||
const { delta, finish_reason } = parsed.choices[0]; | ||
const { content, function_call } = delta; | ||
if (function_call) { | ||
responseNameFunction += function_call.name; | ||
responseArg += function_call.arguments; | ||
} | ||
if (finish_reason === "function_call") { | ||
responseStr = JSON.stringify({ "name": responseNameFunction, "arguments": responseArg }); | ||
data(responseStr); | ||
} | ||
else if (content) { | ||
responseStr += content; | ||
@@ -209,2 +226,63 @@ data(content); | ||
} | ||
async askPost(data, usage, prompt, conversationId = "default", userName = "User", type = MessageType.User) { | ||
let oAIKey = this.getOpenAIKey(); | ||
let conversation = this.getConversation(conversationId, userName); | ||
if (this.options.moderation) { | ||
let flagged = await this.moderate(prompt, oAIKey.key); | ||
if (flagged) { | ||
return { message: "Your message was flagged as inappropriate and was not sent." }; | ||
} | ||
} | ||
let promptStr = this.generatePrompt(conversation, prompt, type); | ||
let prompt_tokens = this.countTokens(promptStr); | ||
try { | ||
const response = await axios.post(this.options.endpoint, { | ||
model: this.options.model, | ||
messages: promptStr, | ||
temperature: this.options.temperature, | ||
max_tokens: this.options.max_tokens, | ||
top_p: this.options.top_p, | ||
frequency_penalty: this.options.frequency_penalty, | ||
presence_penalty: this.options.presence_penalty, | ||
stream: false, | ||
functions: this.options.functions, | ||
function_call: this.options.function_call, | ||
}, { | ||
responseType: "json", | ||
headers: { | ||
Accept: "application/json", | ||
"Content-Type": "application/json", | ||
Authorization: `Bearer ${oAIKey.key}`, | ||
}, | ||
}); | ||
let completion_tokens = response.data.usage['completion_tokens']; | ||
let usageData = { | ||
key: oAIKey.key, | ||
prompt_tokens: prompt_tokens, | ||
completion_tokens: completion_tokens, | ||
total_tokens: prompt_tokens + completion_tokens, | ||
}; | ||
if (this.onUsage) | ||
this.onUsage(usageData); | ||
oAIKey.tokens += usageData.total_tokens; | ||
oAIKey.balance = (oAIKey.tokens / 1000) * this.options.price; | ||
oAIKey.queries++; | ||
conversation.messages.push({ | ||
id: randomUUID(), | ||
content: response.data.choices[0]['message']['content'] ? response.data.choices[0]['message']['content'] : "", | ||
type: MessageType.Assistant, | ||
date: Date.now(), | ||
}); | ||
data(JSON.stringify(response.data.choices[0])); | ||
return response.data.choices[0]; | ||
} | ||
catch (error) { | ||
if (error.response && error.response.data && error.response.headers["content-type"] === "application/json") { | ||
throw new Error(error.response.data.error.message); | ||
} | ||
else { | ||
throw new Error(error.message); | ||
} | ||
} | ||
} | ||
async moderate(prompt, key) { | ||
@@ -222,7 +300,7 @@ try { | ||
} | ||
generatePrompt(conversation, prompt) { | ||
generatePrompt(conversation, prompt, type = MessageType.User) { | ||
conversation.messages.push({ | ||
id: randomUUID(), | ||
content: prompt, | ||
type: MessageType.User, | ||
type: type, | ||
date: Date.now(), | ||
@@ -229,0 +307,0 @@ }); |
@@ -13,3 +13,5 @@ interface ChatGPTOptions { | ||
endpoint?: string; | ||
functions?: []; | ||
function_call?: string; | ||
} | ||
export default ChatGPTOptions; |
@@ -15,3 +15,5 @@ interface Options { | ||
max_conversation_tokens?: number; | ||
functions?: []; | ||
function_call?: string; | ||
} | ||
export default Options; |
@@ -9,8 +9,46 @@ import { ChatGPT } from "../dist/index.js"; | ||
let bot = new ChatGPT(process.env.OPENAI_API_KEY); | ||
let bot = new ChatGPT("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", { | ||
temperature: 0.7, // OpenAI parameter | ||
max_tokens: 256, // OpenAI parameter [Max response size by tokens] | ||
top_p: 0.9, // OpenAI parameter | ||
frequency_penalty: 0, // OpenAI parameter | ||
presence_penalty: 0, // OpenAI parameter | ||
instructions: "Eres un asistente de Mg Motors tu trabajo es ayudar a los clientes a resolver sus dudas sobre nuestros autos. y a la vez ofrecer pruebas de manejo para esto ultimo debes solicitar al cliente su nombre, email y correo", | ||
model: "gpt-3.5-turbo-0613", // OpenAI parameter `gpt-3.5-turbo` is PAID | ||
functions: [ | ||
{ | ||
"name": "saveDataUser", | ||
"description": "Guardar los datos del usuario para solicitar un prueba de manejo", | ||
"parameters": { | ||
"type": "object", | ||
"properties": { | ||
"name": { | ||
"type": "string", | ||
"description": "Nombre del usuario", | ||
}, | ||
"email": { | ||
"type": "string", | ||
"description": "Correo del usuario", | ||
}, | ||
"phone": { | ||
"type": "string", | ||
"description": "Telefono del usuario", | ||
} | ||
}, | ||
"required": ["name", "email", "phone"], | ||
}, | ||
} | ||
], | ||
function_call: "auto", | ||
} | ||
); | ||
// bot.onUsage = console.log; | ||
async function main() { | ||
bot.resetConversation("16"); | ||
while (true) { | ||
let prompt = await new Promise((resolve) => { | ||
@@ -23,5 +61,5 @@ rl.question("You: ", (answer) => { | ||
process.stdout.write("ChatGPT: "); | ||
await bot.askStream(res => { | ||
await bot.askPost(res => { | ||
process.stdout.write(res.toString()); | ||
}, _ => { }, prompt); | ||
}, _ => { }, prompt, "16"); | ||
console.log(); | ||
@@ -28,0 +66,0 @@ } |
{ | ||
"name": "chatgpt-optimized-official", | ||
"version": "1.1.2", | ||
"version": "1.1.3", | ||
"description": "ChatGPT Client using official OpenAI API", | ||
@@ -5,0 +5,0 @@ "main": "dist/index.js", |
@@ -12,2 +12,3 @@ import axios from "axios"; | ||
import { Configuration, OpenAIApi } from "openai"; | ||
import { type } from "os"; | ||
@@ -54,2 +55,4 @@ class ChatGPT { | ||
moderation: options?.moderation || false, | ||
functions: options?.functions || [], | ||
function_call: options?.function_call || "", | ||
}; | ||
@@ -131,3 +134,4 @@ } | ||
public resetConversation(conversationId: string) { | ||
let conversation = this.db.conversations.Where((conversation) => conversation.id === conversationId).FirstOrDefault(); | ||
let conversation = this.db.conversations.Where((conversation) => conversation.id == conversationId).FirstOrDefault(); | ||
//console.log(conversation); | ||
if (conversation) { | ||
@@ -143,4 +147,4 @@ conversation.messages = []; | ||
return await this.askStream( | ||
(data) => {}, | ||
(data) => {}, | ||
(data) => { }, | ||
(data) => { }, | ||
prompt, | ||
@@ -155,3 +159,2 @@ conversationId, | ||
let conversation = this.getConversation(conversationId, userName); | ||
if (this.options.moderation) { | ||
@@ -169,2 +172,3 @@ let flagged = await this.moderate(prompt, oAIKey.key); | ||
let promptStr = this.generatePrompt(conversation, prompt); | ||
console.log(promptStr); | ||
let prompt_tokens = this.countTokens(promptStr); | ||
@@ -183,2 +187,4 @@ try { | ||
stream: true, | ||
functions: this.options.functions, | ||
function_call: this.options.function_call, | ||
}, | ||
@@ -194,13 +200,26 @@ { | ||
); | ||
// console.log("Stream message:", response.data) | ||
let responseStr = ""; | ||
let responseArg = ""; | ||
let responseNameFunction = ""; | ||
for await (const message of this.streamCompletion(response.data)) { | ||
try { | ||
const parsed = JSON.parse(message); | ||
const { content } = parsed.choices[0].delta; | ||
if (content) { | ||
responseStr += content; | ||
data(content); | ||
const { delta, finish_reason } = parsed.choices[0]; | ||
const { content, function_call } = delta; | ||
if (function_call) { | ||
responseNameFunction += function_call.name; | ||
responseArg += function_call.arguments; | ||
} | ||
//console.log("Stream message:", parsed.choices[0]) | ||
if (finish_reason === "function_call") { | ||
responseStr = JSON.stringify({ "name": responseNameFunction, "arguments": responseArg }); | ||
data(responseStr); | ||
} else | ||
if (content) { | ||
responseStr += content; | ||
data(content); | ||
} | ||
} catch (error) { | ||
@@ -250,3 +269,80 @@ console.error("Could not JSON parse stream message", message, error); | ||
} | ||
public async askV1(prompt: string, conversationId: string = "default", userName: string = "User", type:number=1) { | ||
return await this.askPost( | ||
(data) => { }, | ||
(data) => { }, | ||
prompt, | ||
conversationId, | ||
userName, | ||
type | ||
); | ||
} | ||
public async askPost(data: (arg0: string) => void, usage: (usage: Usage) => void, prompt: string, conversationId: string = "default", userName: string = "User", type: number = MessageType.User) { | ||
let oAIKey = this.getOpenAIKey(); | ||
let conversation = this.getConversation(conversationId, userName); | ||
if (this.options.moderation) { | ||
let flagged = await this.moderate(prompt, oAIKey.key); | ||
if (flagged) { | ||
return { message: "Your message was flagged as inappropriate and was not sent." }; | ||
} | ||
} | ||
let promptStr = this.generatePrompt(conversation, prompt, type); | ||
let prompt_tokens = this.countTokens(promptStr); | ||
try { | ||
const response = await axios.post( | ||
this.options.endpoint, | ||
{ | ||
model: this.options.model, | ||
messages: promptStr, | ||
temperature: this.options.temperature, | ||
max_tokens: this.options.max_tokens, | ||
top_p: this.options.top_p, | ||
frequency_penalty: this.options.frequency_penalty, | ||
presence_penalty: this.options.presence_penalty, | ||
stream: false, // Note this | ||
functions: this.options.functions, | ||
function_call: this.options.function_call, | ||
}, | ||
{ | ||
responseType: "json", // Note this | ||
headers: { | ||
Accept: "application/json", // Note this | ||
"Content-Type": "application/json", | ||
Authorization: `Bearer ${oAIKey.key}`, | ||
}, | ||
}, | ||
); | ||
// console.log("Stream message:", response.data.choices[0]) | ||
let completion_tokens = response.data.usage['completion_tokens']; | ||
let usageData = { | ||
key: oAIKey.key, | ||
prompt_tokens: prompt_tokens, | ||
completion_tokens: completion_tokens, | ||
total_tokens: prompt_tokens + completion_tokens, | ||
}; | ||
if (this.onUsage) this.onUsage(usageData); | ||
oAIKey.tokens += usageData.total_tokens; | ||
oAIKey.balance = (oAIKey.tokens / 1000) * this.options.price; | ||
oAIKey.queries++; | ||
conversation.messages.push({ | ||
id: randomUUID(), | ||
content: response.data.choices[0]['message']['content'] ? response.data.choices[0]['message']['content'] : "", | ||
type: MessageType.Assistant, | ||
date: Date.now(), | ||
}); | ||
data(JSON.stringify(response.data.choices[0])) | ||
return response.data.choices[0]; // return the full response | ||
} catch (error: any) { | ||
if (error.response && error.response.data && error.response.headers["content-type"] === "application/json") { | ||
throw new Error(error.response.data.error.message); | ||
} else { | ||
throw new Error(error.message); | ||
} | ||
} | ||
} | ||
public async moderate(prompt: string, key: string) { | ||
@@ -264,7 +360,7 @@ try { | ||
private generatePrompt(conversation: Conversation, prompt: string): Message[] { | ||
private generatePrompt(conversation: Conversation, prompt: string, type: number = MessageType.User): Message[] { | ||
conversation.messages.push({ | ||
id: randomUUID(), | ||
content: prompt, | ||
type: MessageType.User, | ||
type: type, | ||
date: Date.now(), | ||
@@ -271,0 +367,0 @@ }); |
@@ -13,4 +13,6 @@ interface ChatGPTOptions { | ||
endpoint?: string; | ||
} | ||
functions?:[]; | ||
function_call?: string; | ||
} | ||
export default ChatGPTOptions; |
@@ -15,4 +15,6 @@ interface Options { | ||
max_conversation_tokens?: number; | ||
functions?:[]; | ||
function_call?: string; | ||
} | ||
export default Options; |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
117528
1684
1