New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

node-red-contrib-openai-ubos

Package Overview
Dependencies
Maintainers
1
Versions
9
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

node-red-contrib-openai-ubos - npm Package Compare versions

Comparing version 1.0.1 to 1.0.2

2

package.json
{
"name": "node-red-contrib-openai-ubos",
"version": "1.0.1",
"version": "1.0.2",
"description": "",

@@ -5,0 +5,0 @@ "main": "subflow.js",

@@ -231,3 +231,3 @@ {

"name": "",
"func": "/*\n model: \"text-davinci-003\" | \"text-davinci-002\" | \"gpt-3.5-turbo\" | \"gpt-4\"\n*/\nconst model = msg.model || env.get(\"model\");\n\nif (!env.get(\"OPENAI_API_KEY\") && !msg.OPENAI_API_KEY) {\n msg.payload = \"Enter OPENAI_API_KEY\";\n \n return [null, msg];\n}\n\nmsg.headers = {\n 'Content-Type': 'application/json',\n 'Authorization': `Bearer ${env.get(\"OPENAI_API_KEY\") || msg.OPENAI_API_KEY}`\n};\n\nconst stopReq = msg.stop || env.get(\"stop\");\nconst baseRequestBody = {\n model,\n temperature: msg.temperature || env.get(\"temperature\"),\n max_tokens: msg.max_tokens || env.get(\"max_tokens\"),\n top_p: msg.top_p || env.get(\"top_p\"),\n frequency_penalty: msg.frequency_penalty || env.get(\"frequency_penalty\"),\n presence_penalty: msg.presence_penalty || env.get(\"presence_penalty\"),\n stop: stopReq && stopReq.length === 0 ? null : stopReq,\n}\n\nif (model === \"text-davinci-003\" || model === \"text-davinci-002\") {\n const prompt = msg.prompt || env.get(\"prompt\");\n\n if (prompt) {\n msg.url = 'https://api.openai.com/v1/completions';\n\n msg.payload = {\n ...baseRequestBody,\n prompt,\n };\n\n return [msg, null]\n } else {\n msg.payload = \"Enter prompt\";\n return [null, msg]\n }\n}\n\nif (model === \"gpt-3.5-turbo\") {\n if (msg.messages) {\n msg.url = 'https://api.openai.com/v1/chat/completions';\n msg.payload = {\n ...baseRequestBody,\n messages: msg.messages\n }\n\n return [msg, null]\n } else {\n msg.payload = \"Enter valid prompt\";\n return [null, msg]\n }\n}\n\nif (model === \"gpt-4\") {\n if (msg.messages) {\n msg.url = 'https://api.openai.com/v1/chat/completions';\n msg.payload = {\n ...baseRequestBody,\n messages: msg.messages\n }\n \n return [msg, null]\n } else {\n msg.payload = \"Enter valid prompt\";\n return [null, msg]\n }\n}\n\nmsg.payload = \"Enter an existing model\";\nreturn [null, msg];",
"func": "/*\n model: \"text-davinci-003\" | \"text-davinci-002\" | \"gpt-3.5-turbo\" | \"gpt-4\"\n*/\nconst model = msg.model || env.get(\"model\");\n\nif (!env.get(\"OPENAI_API_KEY\") && !msg.OPENAI_API_KEY) {\n msg.payload = \"Enter OPENAI_API_KEY\";\n \n return [null, msg];\n}\n\nmsg.headers = {\n 'Content-Type': 'application/json',\n 'Authorization': `Bearer ${env.get(\"OPENAI_API_KEY\") || msg.OPENAI_API_KEY}`\n};\n\nconst stopReq = msg.stop || env.get(\"stop\");\nconst baseRequestBody = {\n model,\n temperature: msg.temperature || env.get(\"temperature\"),\n max_tokens: msg.max_tokens || env.get(\"max_tokens\"),\n top_p: msg.top_p || env.get(\"top_p\"),\n frequency_penalty: msg.frequency_penalty || env.get(\"frequency_penalty\"),\n presence_penalty: msg.presence_penalty || env.get(\"presence_penalty\"),\n stop: stopReq && stopReq.length === 0 ? null : stopReq,\n}\n\nif (model === \"text-davinci-003\" || model === \"text-davinci-002\") {\n const prompt = msg.prompt || env.get(\"prompt\");\n\n if (prompt) {\n msg.url = 'https://api.openai.com/v1/completions';\n\n msg.payload = {\n ...baseRequestBody,\n prompt,\n };\n\n return [msg, null]\n } else {\n msg.payload = \"Enter prompt\";\n return [null, msg]\n }\n}\n\nif (model === \"gpt-3.5-turbo\") {\n if (msg.messages) {\n msg.url = 'https://api.openai.com/v1/chat/completions';\n msg.payload = {\n ...baseRequestBody,\n messages: msg.messages\n }\n\n return [msg, null]\n } else {\n msg.payload = \"Enter valid prompt\";\n return [null, msg]\n }\n}\n\nif (model === \"gpt-4\") {\n if (msg.messages) {\n msg.url = 'https://api.openai.com/v1/chat/completions';\n msg.payload = {\n ...baseRequestBody,\n messages: msg.messages\n }\n \n return [msg, null]\n } else {\n msg.payload = \"Enter valid prompt\";\n return [null, msg]\n }\n}\n\nif (model && model.length > 0) {\n if (msg.messages) {\n msg.url = \"https://api.openai.com/v1/chat/completions\";\n msg.payload = {\n ...baseRequestBody,\n messages: msg.messages\n }\n\n return [msg, null]\n } else {\n msg.payload = \"Enter valid prompt\";\n return [null, msg]\n }\n}\n\nmsg.payload = \"Enter an existing model\";\nreturn [null, msg];",
"outputs": 2,

@@ -234,0 +234,0 @@ "noerr": 0,

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc