Socket
Socket
Sign inDemoInstall

aigc

Package Overview
Dependencies
Maintainers
1
Versions
4
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

aigc - npm Package Compare versions

Comparing version 1.0.1 to 1.0.2

dist/cli-image.js

19

CHANGELOG.md

@@ -0,1 +1,20 @@

## [1.0.2](https://github.com/durkajs/aigc/compare/v1.0.1...v1.0.2) (2023-03-01)
### Bug Fixes
* update error message to indicate correct ENV variable for OpenAI API key ([f4c3312](https://github.com/durkajs/aigc/commit/f4c331213f98a1c42c710f2c295968e4f78d101a))
### Features
* add --ada option for fastest model ([9934406](https://github.com/durkajs/aigc/commit/993440632f82f35287d909c6ac2b898a05ede6b6))
* add command 'aigc image' ([ee494bc](https://github.com/durkajs/aigc/commit/ee494bc6451ee6d94093ca112a334d4be23782d5))
* add examples to README.md ([e7f42a0](https://github.com/durkajs/aigc/commit/e7f42a0e8f77223c99623352ea6cd7abc08b45ac))
* add explains for more models ([7e8335d](https://github.com/durkajs/aigc/commit/7e8335d5af01baffefcd3d43ae584de33d5fcab9))
* add more short options for cli ([e1dc2f1](https://github.com/durkajs/aigc/commit/e1dc2f1fa1a89742403eb1bb3fc7c9c9bc0e37fa))
* support reading prompt from environment ([cfcb744](https://github.com/durkajs/aigc/commit/cfcb744ae080809a864542753e335bfbd927fc23))
## 1.0.1 (2023-03-01)

@@ -2,0 +21,0 @@

12

dist/cli-list-models.js

@@ -1,3 +0,3 @@

import { _ as __awaiter, a as __generator, t as tryRun, o as openai } from './tryRun-9177684b.js';
import { c as cmd } from './cmder-f903ee2b.js';
import { _ as __awaiter, a as __generator, t as tryRun, o as openai } from './tryRun-f2a8444a.js';
import { c as cmd } from './cmder-d3b1d5d5.js';
import ora from 'ora';

@@ -19,4 +19,8 @@ import 'http';

var EXPLAIN = {
'code-davinci-002': 'Most capable Codex model. Particularly good at translating natural language to code. In addition to completing code, also supports inserting completions within code.',
'code-cushman-001': 'Almost as capable as Davinci Codex, but slightly faster. This speed advantage may make it preferable for real-time applications.',
'code-davinci-002': 'Most capable Codex model. Particularly good at translating natural language to code. In addition to completing code, also supports inserting completions within code',
'code-cushman-001': 'Almost as capable as Davinci Codex, but slightly faster. This speed advantage may make it preferable for real-time applications',
'text-davinci-003': 'Most capable GPT-3 model. Can do any task the other models can do, often with higher quality, longer output and better instruction-following. Also supports inserting completions within text',
'text-curie-001': 'Very capable, but faster and lower cost than Davinci',
'text-babbage-001': 'Capable of straightforward tasks, very fast, and lower cost',
'text-ada-001': 'Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost',
};

@@ -23,0 +27,0 @@ var cliListModels = cmd({}, function (ctx) { return __awaiter(void 0, void 0, void 0, function () {

@@ -1,2 +0,2 @@

import { c as cmd } from './cmder-f903ee2b.js';
import { c as cmd } from './cmder-d3b1d5d5.js';
import { encode, decode } from 'gpt-3-encoder';

@@ -3,0 +3,0 @@ import 'os';

@@ -1,6 +0,7 @@

import { _ as __awaiter, a as __generator, t as tryRun, b as __rest, c as aigc, d as __assign } from './tryRun-9177684b.js';
import { _ as __awaiter, a as __generator, t as tryRun, b as __rest, c as aigc, d as __assign } from './tryRun-f2a8444a.js';
import fs from 'fs';
import ora from 'ora';
import clip from 'clipboardy';
import { c as cmd, o as opt, e as env, s as sub } from './cmder-f903ee2b.js';
import { c as cmd, o as opt, a as env, s as sub } from './cmder-d3b1d5d5.js';
import { C as COMMAND } from './constant-0fcefa10.js';
import 'http';

@@ -19,4 +20,2 @@ import 'https';

var COMMAND = 'aigc';
var cli = cmd({

@@ -31,3 +30,3 @@ usage: "".concat(COMMAND, " [options] path/to/file"),

],
version: '1.0.1',
version: '1.0.2',
options: {

@@ -37,10 +36,11 @@ spinner: opt('boolean', 'Display spinner or not, enabled by default. You can disable it by using `--spinner=false` {{ true }}'),

echo: opt('boolean', 'Echo the prompt in terminal'),
code: opt('boolean', '<c> Setting the GPT model to "code-davinci-002" is equivalent to using the option `--model=code-davinci-002`'),
code: opt('boolean', 'Setting the GPT model to "code-davinci-002" (codex model) is equivalent to using the option `--model=code-davinci-002`'),
ada: opt('boolean', 'Setting the GPT model to "text-ada-001" (fastest model) is equivalent to using the option `--model=text-ada-001`'),
temperature: opt('number', '[GPT] <t> Between 0 and 2. Higher values make the output more random, lower values make it more focused and deterministic {{ 0.7 }}'),
model: opt('string', "[GPT] ID of the model to use, using `".concat(COMMAND, " models` show currently available models {{ \"text-davinci-003\" }}")),
prompt: opt('string', '[GPT] <p> The prompt to generate completions for'),
model: opt('string', "[GPT] <m> ID of the model to use, using `".concat(COMMAND, " models` show currently available models {{ \"text-davinci-003\" }}")),
prompt: opt('string', '[GPT] <p> The prompt to generate completions for, or the key of the prompt saved in the environment variable'),
presence_penalty: opt('number', '[GPT] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far {{ 0 }}'),
frequency_penalty: opt('number', '[GPT] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far {{ 0 }}'),
max_tokens: opt('number', '[GPT] The maximum number of tokens to generate in the completion {{ 1000 }}'),
top_p: opt('number', '[GPT] An alternative to sampling with temperature {{ 1 }}'),
max_tokens: opt('number', '[GPT] <M> The maximum number of tokens to generate in the completion {{ 1000 }}'),
top_p: opt('number', '[GPT] <P> An alternative to sampling with temperature {{ 1 }}'),
n: opt('number', '[GPT] How many completions to generate for each prompt {{ 1 }}'),

@@ -60,13 +60,16 @@ best_of: opt('number', '[GPT] Generates `best_of` completions and returns the "best"'),

'<tokenizer> Convert text to token IDs': sub('./cli-tokenizer.js'),
'<image> Creates an image given a prompt': sub('./cli-image.js'),
},
}, function (ctx) { return __awaiter(void 0, void 0, void 0, function () {
var _a, _b, prompt, stdin, enableSpinner, echo, code, opts, additional, newPrompt;
return __generator(this, function (_c) {
switch (_c.label) {
var _a, _b, prompt, stdin, enableSpinner, echo, code, ada, opts, newPrompt, _c;
return __generator(this, function (_d) {
switch (_d.label) {
case 0:
_a = ctx.options, _b = _a.prompt, prompt = _b === void 0 ? '' : _b, stdin = _a.stdin, enableSpinner = _a.spinner, echo = _a.echo, code = _a.code, opts = __rest(_a, ["prompt", "stdin", "spinner", "echo", "code"]);
_a = ctx.options, _b = _a.prompt, prompt = _b === void 0 ? '' : _b, stdin = _a.stdin, enableSpinner = _a.spinner, echo = _a.echo, code = _a.code, ada = _a.ada, opts = __rest(_a, ["prompt", "stdin", "spinner", "echo", "code", "ada"]);
_c = [prompt && process.env[prompt] ? process.env[prompt] : prompt];
return [4 /*yield*/, (stdin ? readContentFromStdin() : readContentFromFiles(ctx.args))];
case 1:
additional = _c.sent();
newPrompt = [prompt, additional].filter(function (str) { return !!str.trim(); }).join('\n\n');
newPrompt = _c.concat([
_d.sent()
]).filter(function (str) { return !!str.trim(); }).join('\n\n');
if (echo)

@@ -85,2 +88,4 @@ console.log(newPrompt + '\n');

opts.model = 'code-davinci-002';
else if (ada)
opts.model = 'text-ada-001';
return [4 /*yield*/, aigc(__assign(__assign({}, opts), { stop: stop, logit_bias: logit_bias, prompt: "".concat(newPrompt) }))

@@ -87,0 +92,0 @@ // stop spinner and output result

import { fileURLToPath } from 'url';
import { g as getBoolEnv, _ as __spreadArray, a as __rest, b as __assign, i as infoExports } from './cmder-f903ee2b.js';
import { g as getBoolEnv, _ as __spreadArray, b as __rest, d as __assign, i as infoExports } from './cmder-d3b1d5d5.js';
import { Buffer as Buffer$1 } from 'node:buffer';

@@ -21,3 +21,3 @@ import path$4 from 'node:path';

import 'module';
import './tryRun-9177684b.js';
import './tryRun-f2a8444a.js';
import 'http';

@@ -28,2 +28,3 @@ import 'https';

import 'clipboardy';
import './constant-0fcefa10.js';

@@ -30,0 +31,0 @@ var commonjsGlobal = typeof globalThis !== 'undefined' ? globalThis : typeof window !== 'undefined' ? window : typeof global !== 'undefined' ? global : typeof self !== 'undefined' ? self : {};

{
"name": "aigc",
"version": "1.0.1",
"version": "1.0.2",
"description": "Generate content for you with OpenAI",

@@ -5,0 +5,0 @@ "bin": {

@@ -32,5 +32,16 @@ # aigc

aigc --prompt "Write unit test for the code below" path/to/your/codeFile
# create image
aigc image "cute cat"
```
#### Example 1: Generate document comment for function
![Generate document comment](https://mdn.alipayobjects.com/huamei_am125r/afts/img/A*_gezSIVeBHoAAAAAAAAAAAAADjp-AQ/original)
#### Example 2: Generate unit test for function
![Generate unit test](https://mdn.alipayobjects.com/huamei_am125r/afts/img/A*ng82S7WiUEsAAAAAAAAAAAAADjp-AQ/original)
<!--

@@ -37,0 +48,0 @@ ## Roadmap

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc