Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoInstallSign in
Socket

gpt-query

Package Overview
Dependencies
Maintainers
0
Versions
10
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

gpt-query - npm Package Compare versions

Comparing version
0.0.0-beta2
to
0.0.0-beta3
+2
-2
dist/cjs/index.js

@@ -32,4 +32,4 @@ "use strict";

*/
processPrompt(prompt_1) {
return __awaiter(this, arguments, void 0, function* (prompt, context = '', options = {}) {
processPrompt(prompt, context, options) {
return __awaiter(this, void 0, void 0, function* () {
try {

@@ -36,0 +36,0 @@ return yield this.promptProcessor.processPrompt(prompt, context, options);

@@ -14,3 +14,3 @@ "use strict";

class PromptProcessor {
constructor({ openai, lightweightModel, finalModel, toolExecutor, }) {
constructor({ openai, lightweightModel = 'gpt-4o-mini', finalModel = 'gpt-4o', toolExecutor, }) {
this.openai = openai;

@@ -24,9 +24,7 @@ this.lightweightModel = lightweightModel;

*/
decideSearchAndDetectLanguage(prompt) {
decideSearchAndDetectLanguage(prompt, options) {
return __awaiter(this, void 0, void 0, function* () {
var _a, _b, _c;
try {
const response = yield this.openai.chat.completions.create({
model: this.lightweightModel,
messages: [
const response = yield this.openai.chat.completions.create(Object.assign(Object.assign({}, options), { max_tokens: options.max_tokens || 100, model: this.lightweightModel, messages: [
{

@@ -37,4 +35,3 @@ role: 'system',

{ role: 'user', content: prompt },
],
tools: [
], tools: [
{

@@ -62,7 +59,3 @@ type: 'function',

},
],
tool_choice: 'auto',
max_tokens: 50,
temperature: 0,
});
], tool_choice: 'auto', stream: false }));
const toolCall = (_c = (_b = (_a = response.choices[0]) === null || _a === void 0 ? void 0 : _a.message) === null || _b === void 0 ? void 0 : _b.tool_calls) === null || _c === void 0 ? void 0 : _c[0];

@@ -87,11 +80,13 @@ if (toolCall && toolCall.function.name === 'decide_search_and_language') {

*/
processPrompt(prompt, userContext, options) {
return __awaiter(this, void 0, void 0, function* () {
var _a, _b, _c, _d, _e, _f, _g, _h, _j;
processPrompt(prompt_1, userContext_1) {
return __awaiter(this, arguments, void 0, function* (prompt, userContext, options = {
lightweightModel: this.lightweightModel,
finalModel: this.finalModel,
}) {
var _a, _b, _c, _d;
try {
const { searchQuery, language } = yield this.decideSearchAndDetectLanguage(prompt);
const { searchQuery, language } = yield this.decideSearchAndDetectLanguage(prompt, options.lightWeightModelOptions || {});
const searchResults = searchQuery
? yield this.toolExecutor.performSearch(searchQuery)
: '';
// Step 5: Prepare final messages
const finalMessages = [{ role: 'system', content: userContext }];

@@ -105,13 +100,4 @@ if (searchResults) {

}, { role: 'user', content: prompt });
// Step 6: Generate the final response
const finalResponse = yield this.openai.chat.completions.create({
model: this.finalModel,
messages: finalMessages,
frequency_penalty: (_a = options.frequencyPenalty) !== null && _a !== void 0 ? _a : 0,
presence_penalty: (_b = options.presencePenalty) !== null && _b !== void 0 ? _b : 0,
max_tokens: (_c = options.maxTokens) !== null && _c !== void 0 ? _c : 200,
temperature: (_d = options.temperature) !== null && _d !== void 0 ? _d : 0.5,
top_p: (_e = options.topP) !== null && _e !== void 0 ? _e : 1,
});
return ((_j = (_h = (_g = (_f = finalResponse.choices[0]) === null || _f === void 0 ? void 0 : _f.message) === null || _g === void 0 ? void 0 : _g.content) === null || _h === void 0 ? void 0 : _h.trim()) !== null && _j !== void 0 ? _j : 'No response generated.');
const finalResponse = yield this.openai.chat.completions.create(Object.assign(Object.assign({}, options.finalModelOptions), { stream: false, model: this.finalModel, messages: finalMessages }));
return ((_d = (_c = (_b = (_a = finalResponse.choices[0]) === null || _a === void 0 ? void 0 : _a.message) === null || _b === void 0 ? void 0 : _b.content) === null || _c === void 0 ? void 0 : _c.trim()) !== null && _d !== void 0 ? _d : 'No response generated.');
}

@@ -118,0 +104,0 @@ catch (error) {

@@ -29,4 +29,4 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {

*/
processPrompt(prompt_1) {
return __awaiter(this, arguments, void 0, function* (prompt, context = '', options = {}) {
processPrompt(prompt, context, options) {
return __awaiter(this, void 0, void 0, function* () {
try {

@@ -33,0 +33,0 @@ return yield this.promptProcessor.processPrompt(prompt, context, options);

@@ -11,3 +11,3 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {

export class PromptProcessor {
constructor({ openai, lightweightModel, finalModel, toolExecutor, }) {
constructor({ openai, lightweightModel = 'gpt-4o-mini', finalModel = 'gpt-4o', toolExecutor, }) {
this.openai = openai;

@@ -21,9 +21,7 @@ this.lightweightModel = lightweightModel;

*/
decideSearchAndDetectLanguage(prompt) {
decideSearchAndDetectLanguage(prompt, options) {
return __awaiter(this, void 0, void 0, function* () {
var _a, _b, _c;
try {
const response = yield this.openai.chat.completions.create({
model: this.lightweightModel,
messages: [
const response = yield this.openai.chat.completions.create(Object.assign(Object.assign({}, options), { max_tokens: options.max_tokens || 100, model: this.lightweightModel, messages: [
{

@@ -34,4 +32,3 @@ role: 'system',

{ role: 'user', content: prompt },
],
tools: [
], tools: [
{

@@ -59,7 +56,3 @@ type: 'function',

},
],
tool_choice: 'auto',
max_tokens: 50,
temperature: 0,
});
], tool_choice: 'auto', stream: false }));
const toolCall = (_c = (_b = (_a = response.choices[0]) === null || _a === void 0 ? void 0 : _a.message) === null || _b === void 0 ? void 0 : _b.tool_calls) === null || _c === void 0 ? void 0 : _c[0];

@@ -84,11 +77,13 @@ if (toolCall && toolCall.function.name === 'decide_search_and_language') {

*/
processPrompt(prompt, userContext, options) {
return __awaiter(this, void 0, void 0, function* () {
var _a, _b, _c, _d, _e, _f, _g, _h, _j;
processPrompt(prompt_1, userContext_1) {
return __awaiter(this, arguments, void 0, function* (prompt, userContext, options = {
lightweightModel: this.lightweightModel,
finalModel: this.finalModel,
}) {
var _a, _b, _c, _d;
try {
const { searchQuery, language } = yield this.decideSearchAndDetectLanguage(prompt);
const { searchQuery, language } = yield this.decideSearchAndDetectLanguage(prompt, options.lightWeightModelOptions || {});
const searchResults = searchQuery
? yield this.toolExecutor.performSearch(searchQuery)
: '';
// Step 5: Prepare final messages
const finalMessages = [{ role: 'system', content: userContext }];

@@ -102,13 +97,4 @@ if (searchResults) {

}, { role: 'user', content: prompt });
// Step 6: Generate the final response
const finalResponse = yield this.openai.chat.completions.create({
model: this.finalModel,
messages: finalMessages,
frequency_penalty: (_a = options.frequencyPenalty) !== null && _a !== void 0 ? _a : 0,
presence_penalty: (_b = options.presencePenalty) !== null && _b !== void 0 ? _b : 0,
max_tokens: (_c = options.maxTokens) !== null && _c !== void 0 ? _c : 200,
temperature: (_d = options.temperature) !== null && _d !== void 0 ? _d : 0.5,
top_p: (_e = options.topP) !== null && _e !== void 0 ? _e : 1,
});
return ((_j = (_h = (_g = (_f = finalResponse.choices[0]) === null || _f === void 0 ? void 0 : _f.message) === null || _g === void 0 ? void 0 : _g.content) === null || _h === void 0 ? void 0 : _h.trim()) !== null && _j !== void 0 ? _j : 'No response generated.');
const finalResponse = yield this.openai.chat.completions.create(Object.assign(Object.assign({}, options.finalModelOptions), { stream: false, model: this.finalModel, messages: finalMessages }));
return ((_d = (_c = (_b = (_a = finalResponse.choices[0]) === null || _a === void 0 ? void 0 : _a.message) === null || _b === void 0 ? void 0 : _b.content) === null || _c === void 0 ? void 0 : _c.trim()) !== null && _d !== void 0 ? _d : 'No response generated.');
}

@@ -115,0 +101,0 @@ catch (error) {

@@ -20,4 +20,4 @@ import type OpenAI from 'openai';

*/
processPrompt(prompt: string, context?: string, options?: PromptOptions): Promise<string>;
processPrompt(prompt: string, context: string, options: PromptOptions): Promise<string>;
}
export default GPTQuery;

@@ -22,3 +22,3 @@ import type OpenAI from 'openai';

*/
processPrompt(prompt: string, userContext: string, options: PromptOptions): Promise<string>;
processPrompt(prompt: string, userContext: string, options?: PromptOptions): Promise<string>;
}

@@ -0,1 +1,2 @@

import type { ChatCompletionCreateParamsBase } from 'openai/resources/chat/completions';
export type GoogleParams = {

@@ -5,8 +6,8 @@ key: string;

};
export type PartialChatCompletionCreateParams = Omit<ChatCompletionCreateParamsBase, 'model' | 'messages'>;
export type PromptOptions = {
maxTokens?: number;
temperature?: number;
topP?: number;
frequencyPenalty?: number;
presencePenalty?: number;
lightweightModel?: string;
lightWeightModelOptions?: PartialChatCompletionCreateParams;
finalModel?: string;
finalModelOptions?: PartialChatCompletionCreateParams;
};
{
"name": "gpt-query",
"version": "0.0.0-beta2",
"version": "0.0.0-beta3",
"keywords": [],

@@ -5,0 +5,0 @@ "author": "",