Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoInstallSign in
Socket

heyi

Package Overview
Dependencies
Maintainers
1
Versions
6
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

heyi - npm Package Compare versions

Comparing version
1.1.0
to
2.0.0
+11
src/utils/argv.js
/**
* Check if any of the specified flag names exist in process.argv.
*
* @param {string[]} flagNames - Array of flag names to check for (e.g., ['--model', '-m'])
* @returns {boolean} True if any flag is found in process.argv
*/
export const hasFlag = (flagNames) => {
return process.argv.some((arg) => {
return flagNames.includes(arg) || flagNames.some((name) => arg.startsWith(`${name}=`))
})
}
import { readFile } from 'node:fs/promises'
import { z } from 'zod'
const presetSchema = z.object({
prompt: z.string(),
model: z.string().optional(),
format: z.enum(['string', 'number', 'object', 'array']).optional(),
schema: z.string().optional(),
files: z.array(z.string()).default([]),
urls: z.array(z.string()).default([]),
})
/**
* Load and parse a preset JSON file.
*
* @param {string} filePath - Path to the preset JSON file
* @returns {Promise<object>} The parsed preset configuration
*/
export const loadPreset = async (filePath) => {
try {
const content = await readFile(filePath, 'utf8')
const preset = JSON.parse(content)
return presetSchema.parse(preset)
} catch (error) {
if (error.code === 'ENOENT') {
throw new Error(`Preset file '${filePath}' not found`, { cause: error })
}
throw new Error(`Error while parsing preset file '${filePath}'`, { cause: error })
}
}
import { fetchUrlContent, readFileContent } from './input.js'
/**
* Build a prompt with context by combining prompt with file and URL contexts.
*
* @param {string} prompt - The prompt
* @param {string[]} filePaths - Array of file paths to include as context
* @param {string[]} urls - Array of URLs to include as context
* @returns {Promise<string>} The final prompt with all contexts combined
*/
export const buildPrompt = async (prompt, filePaths = [], urls = []) => {
// Handle file content as context
const fileContents = []
for (const filePath of filePaths) {
const content = await readFileContent(filePath)
fileContents.push({ path: filePath, content })
}
// Handle URL content as context
const urlContents = []
for (const url of urls) {
const content = await fetchUrlContent(url)
urlContents.push({ path: url, content })
}
// Combine file and URL contexts
const allContexts = [...fileContents, ...urlContents]
if (allContexts.length > 0) {
const contextItems = allContexts.map(({ path, content }) => `Source: ${path}\n${content}`).join('\n\n---\n\n')
const contextLabel = allContexts.length === 1 ? 'Context from source:' : 'Context from sources:'
return `${prompt}\n\n${contextLabel}\n${contextItems}`
}
return prompt
}
/**
* Replace variables in a prompt string.
*
* @param {string} prompt - The prompt with variables in {{variable}} format
* @param {object} variables - Object with variable names as keys and replacement values as values
* @returns {string} The prompt with variables replaced
*/
export const replaceVariables = (prompt, variables = {}) => {
let result = prompt
for (const [variable, value] of Object.entries(variables)) {
const pattern = new RegExp(`{{\\s*${variable}\\s*}}`, 'g')
result = result.replace(pattern, value)
}
return result
}
+196
-52
#!/usr/bin/env node
import { Command } from 'commander'
import { z } from 'zod'
import pkg from '../package.json' with { type: 'json' }
import { executePrompt } from '../src/index.js'
import { hasStdinData, readFileContent, readStdin } from '../src/utils/input.js'
import { hasFlag } from '../src/utils/argv.js'
import { hasStdinData, readStdin } from '../src/utils/input.js'
import { loadPreset } from '../src/utils/preset.js'
import { buildPrompt } from '../src/utils/prompt.js'
import { replaceVariables } from '../src/utils/variables.js'
const DEFAULT_MODEL = 'openai/gpt-4o-mini'
const modelFlag = ['-m, --model <model>', 'AI model to use', process.env.MODEL ?? DEFAULT_MODEL]
const formatFlag = ['-f, --format <format>', 'Output format: string, number, object, array', 'string']
const schemaFlag = [
'-s, --schema <schema>',
'Zod schema for object/array format (required when format is object or array)',
]
const fileFlag = [
'--file <path>',
'Read content from file and include as context (can be used multiple times)',
(value, previous) => {
return previous ? [...previous, value] : [value]
},
]
const urlFlag = [
'--url <url>',
'Fetch content from URL and include as context (can be used multiple times)',
(value, previous) => {
return previous ? [...previous, value] : [value]
},
]
const varFlag = [
'--var <variable=value>',
'Define variables for replacement in prompt using {{variable}} syntax (can be used multiple times)',
(value, previous) => {
const [variable, ...variableValueParts] = value.split('=')
const variableValue = variableValueParts.join('=') // Handle values with = in them
if (!variable) {
throw new Error(`Invalid --var format: '${value}'. Expected format: variable=value`)
}
return { ...previous, [variable]: variableValue }
},
]
const hasModelFlag = hasFlag(['--model', '-m'])
const hasFormatFlag = hasFlag(['--format', '-f'])
const hasSchemaFlag = hasFlag(['--schema', '-s'])
const program = new Command()

@@ -14,36 +58,98 @@

Examples:
$ heyi "What is the capital of France?"
$ heyi "What is quantum computing?" --model google/gemini-2.5-pro
# Prompts
$ heyi prompt "What is the capital of France?"
$ heyi prompt "What is quantum computing?" --model google/gemini-2.5-pro
$ heyi help prompt
# Presets
$ heyi preset file.json
$ heyi preset file.json --model google/gemini-2.5-pro
$ heyi help preset
`
const promptHelpText = `
Examples:
$ heyi prompt "What is the capital of France?"
$ heyi prompt "What is quantum computing?" --model google/gemini-2.5-pro
# Different output formats
$ heyi "List 5 programming languages" --format array --schema "z.string()"
$ heyi "Analyze this data" --format object --schema "z.object({revenue:z.number(),costs:z.number()})"
$ heyi "List 3 countries" --format array --schema "z.object({name:z.string(),capital:z.string()})"
$ heyi prompt "List 5 programming languages" --format array --schema "z.string()"
$ heyi prompt "Analyze this data" --format object --schema "z.object({revenue:z.number(),costs:z.number()})"
$ heyi prompt "List 3 countries" --format array --schema "z.object({name:z.string(),capital:z.string()})"
# Variable replacement
$ heyi prompt "Preset in {{language}}" --var language="German"
# Environment variables
$ MODEL=perplexity/sonar heyi "Explain AI"
$ API_KEY=your-key heyi "Hello, AI!"
$ MODEL=perplexity/sonar heyi prompt "Explain AI"
$ API_KEY=your-key heyi prompt "Hello, AI!"
# Input from stdin or file
$ heyi "Summarize this content" --file input.txt
$ heyi "Compare these files" --file a.txt --file b.txt
$ cat prompt.txt | heyi
# Attach context
$ heyi prompt "Summarize this content" --file input.txt
$ heyi prompt "Compare these files" --file a.txt --file b.txt
$ heyi prompt "Summarize this article" --url https://example.com/article.html
# Input from stdin
$ cat prompt.txt | heyi prompt
`
const action = async (prompt, options) => {
try {
// Validate that schema is provided for object/array formats
if ((options.format === 'object' || options.format === 'array') && !options.schema) {
throw new Error(`--schema or -s is required when format is '${options.format}'`)
}
const presetHelpText = `
Examples:
$ heyi preset file.json
$ heyi preset file.json --model google/gemini-2.5-pro
// Handle file content as context
const fileContents = []
if (options.file) {
for (const filePath of options.file) {
const content = await readFileContent(filePath)
fileContents.push({ path: filePath, content })
}
}
# Overwrite options from preset
$ heyi preset file.json --model openai/gpt-4
$ heyi preset file.json --format array --schema "z.string()"
# Variable replacement
$ heyi preset file.json --var language=german
# Attach additional context
$ heyi preset file.json --file additional.txt
$ heyi preset file.json --url https://example.com/additional.html
`
const optionsSchema = z
.object({
model: z.string(),
format: z.enum(['string', 'number', 'object', 'array']),
schema: z.string().optional(),
files: z.array(z.string()).default([]),
urls: z.array(z.string()).default([]),
vars: z.record(z.string(), z.string()).default({}),
})
.refine((data) => !['object', 'array'].includes(data.format) || data.schema, {
message: '--schema or -s is required when format is object or array',
path: ['schema'],
})
const flagsToOptions = (flags) => {
return optionsSchema.parse({
model: flags.model,
format: flags.format,
schema: flags.schema,
files: flags.file,
urls: flags.url,
vars: flags.var,
})
}
const mergeOptionsWithPreset = (options, presetContent) => {
return optionsSchema.parse({
// Overwrite model, format, schema only if not provided via flags
model: hasModelFlag ? options.model : (presetContent.model ?? options.model),
format: hasFormatFlag ? options.format : (presetContent.format ?? options.format),
schema: hasSchemaFlag ? options.schema : (presetContent.schema ?? options.schema),
// Merge files
files: [...presetContent.files, ...options.files],
// Merge URLs
urls: [...presetContent.urls, ...options.urls],
// Keep vars as is
vars: options.vars,
})
}
const executePromptAction = async (prompt, flags) => {
try {
// Handle stdin input

@@ -57,13 +163,44 @@ let stdinContent = null

if (!prompt && !stdinContent) {
throw new Error('A prompt is required. Provide it as an argument or via stdin.')
throw new Error('A prompt is required either as an argument or via stdin')
}
// Build the final prompt
let finalPrompt = prompt ?? stdinContent
if (fileContents.length > 0) {
const fileContexts = fileContents.map(({ path, content }) => `File: ${path}\n${content}`).join('\n\n---\n\n')
const contextLabel = fileContents.length === 1 ? 'Context from file:' : 'Context from files:'
finalPrompt = `${finalPrompt}\n\n${contextLabel}\n${fileContexts}`
// Build options from flags
const options = flagsToOptions(flags)
// Build the prompt and prefer the argument over stdin
const userPrompt = replaceVariables(prompt ?? stdinContent, options.vars)
const finalPrompt = await buildPrompt(userPrompt, options.files, options.urls)
const result = await executePrompt(finalPrompt, {
model: options.model,
format: options.format,
schema: options.schema,
})
console.log(result)
} catch (error) {
console.error(error)
process.exit(1)
}
}
const executePresetAction = async (preset, flags) => {
try {
// Validate that preset file is provided
if (!preset) {
throw new Error('Preset file path is required when using "preset" command')
}
// Load preset and use prompt from it
const presetContent = await loadPreset(preset)
const prompt = presetContent.prompt
// Build options from flags and merge with preset
const options = mergeOptionsWithPreset(flagsToOptions(flags), presetContent)
// Build the prompt
const userPrompt = replaceVariables(prompt, options.vars)
const finalPrompt = await buildPrompt(userPrompt, options.files, options.urls)
const result = await executePrompt(finalPrompt, {

@@ -77,5 +214,3 @@ model: options.model,

} catch (error) {
const relevantFields = Object.keys(error).filter((key) => ['stack', 'isRetryable', 'data'].includes(key) === false)
const relevantError = Object.fromEntries(relevantFields.map((key) => [key, error[key]]))
console.error(relevantError)
console.error(error)

@@ -86,19 +221,28 @@ process.exit(1)

program.name(pkg.name).description(pkg.description).version(pkg.version).addHelpText('after', helpText)
program
.name(pkg.name)
.description(pkg.description)
.version(pkg.version)
.command('prompt')
.argument('[prompt]', 'The AI prompt to execute (optional when using stdin)')
.option('-m, --model <model>', 'AI model to use', process.env.MODEL ?? DEFAULT_MODEL)
.option('-f, --format <format>', 'Output format: string, number, object, array', 'string')
.option('-s, --schema <schema>', 'Zod schema for object/array format (required when format is object or array)')
.option(
'--file <path>',
'Read content from file and include as context (can be used multiple times)',
(value, previous) => {
return previous ? [...previous, value] : [value]
},
)
.addHelpText('after', helpText)
.action(action)
.parse()
.option(...modelFlag)
.option(...formatFlag)
.option(...schemaFlag)
.option(...fileFlag)
.option(...urlFlag)
.option(...varFlag)
.addHelpText('after', promptHelpText)
.action(executePromptAction)
program
.command('preset')
.argument('[file]', 'Path to preset JSON file')
.option(...modelFlag)
.option(...formatFlag)
.option(...schemaFlag)
.option(...fileFlag)
.option(...urlFlag)
.option(...varFlag)
.addHelpText('after', presetHelpText)
.action(executePresetAction)
program.parse()
{
"name": "heyi",
"version": "1.1.0",
"version": "2.0.0",
"description": "CLI tool to execute AI prompts with flexible output formatting",

@@ -12,2 +12,7 @@ "keywords": [

],
"homepage": "https://github.com/electerious/heyi",
"repository": {
"type": "git",
"url": "https://github.com/electerious/heyi.git"
},
"license": "MIT",

@@ -29,7 +34,8 @@ "type": "module",

"dependencies": {
"@openrouter/ai-sdk-provider": "^1.5.3",
"ai": "^5.0.113",
"@openrouter/ai-sdk-provider": "^1.5.4",
"ai": "^5.0.121",
"commander": "^14.0.2",
"dotenv": "^14.3.2",
"zod": "^4.2.0"
"dotenv": "^16.6.1",
"sanitize-html": "^2.17.0",
"zod": "^4.3.5"
},

@@ -36,0 +42,0 @@ "devDependencies": {

+131
-16

@@ -18,3 +18,4 @@ # heyi

```sh
heyi [prompt] [options]
heyi prompt [prompt] [options]
heyi preset [file] [options]
```

@@ -28,2 +29,4 @@

- `--file <path>` - Read content from file and include as context (can be used multiple times)
- `--url <url>` - Fetch content from URL and include as context (can be used multiple times)
- `--var <key=value>` - Define variables for replacement in prompt using `{{key}}` syntax (can be used multiple times)
- `-h, --help` - Display help information

@@ -41,37 +44,149 @@ - `-V, --version` - Display version number

# Simple text prompt
heyi "What is the capital of France?"
heyi prompt "What is the capital of France?"
# Use a different model
heyi "Explain quantum computing" --model google/gemini-2.0-flash-exp
heyi prompt "Explain quantum computing" --model google/gemini-2.0-flash-exp
# Get structured output as array of strings
heyi "List 5 programming languages" --format array --schema "z.string()"
heyi prompt "List 5 programming languages" --format array --schema "z.string()"
# Get structured output as array of objects
heyi "List 3 countries with their capitals" --format array --schema "z.object({name:z.string(),capital:z.string()})"
heyi prompt "List 3 countries with their capitals" --format array --schema "z.object({name:z.string(),capital:z.string()})"
# Get structured output as single object
heyi "Analyze: revenue 100k, costs 60k" --format object --schema "z.object({revenue:z.number(),costs:z.number()})"
heyi prompt "Analyze: revenue 100k, costs 60k" --format object --schema "z.object({revenue:z.number(),costs:z.number()})"
# Complex nested schema
heyi "Analyze top 3 tech companies" --format array --schema "z.object({name:z.string(),founded:z.number(),products:z.array(z.string())})"
heyi prompt "Analyze top 3 tech companies" --format array --schema "z.object({name:z.string(),founded:z.number(),products:z.array(z.string())})"
# Variable replacement in prompts
heyi prompt "Preset in {{language}}" --var language="German"
heyi prompt "Preset in {{input}} and output in {{output}}" --var input="German" --var output="English"
# Variable replacement with stdin
echo "Translate to {{language}}" | heyi prompt --var language="Spanish"
# Set default model via environment variable
MODEL=perplexity/sonar heyi "Explain AI"
MODEL=perplexity/sonar heyi prompt "Explain AI"
# Set API key via environment variable
API_KEY=your-key heyi "Hello, AI!"
API_KEY=your-key heyi prompt "Hello, AI!"
# Input from file as context
heyi "Summarize this content" --file input.txt
heyi prompt "Summarize this content" --file input.txt
# Input from multiple files as context
heyi "Compare these files" --file file1.txt --file file2.txt
heyi "Analyze all these documents" --file doc1.md --file doc2.md --file doc3.md
heyi prompt "Compare these files" --file file1.txt --file file2.txt
heyi prompt "Analyze all these documents" --file doc1.md --file doc2.md --file doc3.md
# Input from URL as context
heyi prompt "Summarize this article" --url https://example.com/article.html
# Input from multiple URLs as context
heyi prompt "Compare these articles" --url https://example.com/article1.html --url https://example.com/article2.html
# Mix files and URLs as context
heyi prompt "Compare local and remote content" --file local.txt --url https://example.com/remote.txt
# Input from stdin
cat article.md | heyi "Extract all URLs mentioned"
echo "Analyze this text" | heyi
cat article.md | heyi prompt "Extract all URLs mentioned"
echo "Analyze this text" | heyi prompt
# Preset files
heyi preset file.json
heyi preset file.json --var language=german
heyi preset file.json --model openai/gpt-4o
heyi preset file.json --file additional.txt --url https://example.com
```
## Preset Files
Preset files allow you to define reusable configurations with prompts, models, files, and URLs. Create a JSON file with the following structure:
```json
{
"prompt": "Your prompt with {{variables}}",
"model": "openai/gpt-4o-mini",
"format": "array",
"schema": "z.string()",
"files": ["path/to/file1.txt", "path/to/file2.txt"],
"urls": ["https://example.com/page.html"]
}
```
### Preset Configuration
- **prompt**: The AI prompt to execute. Supports variable replacement using `{{variable}}` syntax.
- **model** (optional): AI model to use (e.g., `openai/gpt-4o-mini`, `google/gemini-2.0-flash-exp`).
- **format** (optional): Output format: `string`, `number`, `object`, `array` (default: `string`).
- **schema** (optional): Zod schema for object/array format (required when format is `object` or `array`).
- **files** (optional): Array of file paths to include as context.
- **urls** (optional): Array of URLs to fetch and include as context.
### Preset Examples
**Basic preset with variables:**
```json
{
"prompt": "Explain {{topic}} in {{language}}"
}
```
```sh
heyi preset explain.json --var topic="quantum computing" --var language="simple terms"
```
**Preset with files and URLs:**
```json
{
"prompt": "Analyze and compare the following documents",
"model": "google/gemini-2.0-flash-exp",
"files": ["report1.txt", "report2.txt"],
"urls": ["https://example.com/data.html"]
}
```
```sh
heyi preset analyze.json
```
**Preset with structured output:**
```json
{
"prompt": "List programming languages mentioned in these files",
"format": "array",
"schema": "z.string()",
"files": ["code1.js", "code2.py"]
}
```
```sh
heyi preset languages.json
```
### CLI Override Behavior
- **Model override**: Using `--model` flag overrides the model specified in the preset file.
- **Format override**: Using `--format` flag overrides the format specified in the preset file.
- **Schema override**: Using `--schema` flag overrides the schema specified in the preset file.
- **Files and URLs append**: Using `--file` or `--url` flags adds additional context to the preset's files and URLs.
- **Variables**: Use `--var` to replace variables in the preset's prompt.
```sh
# Override model from preset
heyi preset file.json --model openai/gpt-4o
# Override format from preset
heyi preset file.json --format object --schema "z.object({name:z.string()})"
# Add additional files to preset's files
heyi preset file.json --file extra.txt
# Replace variables in preset prompt
heyi preset file.json --var name="Alice" --var role="developer"
```
## Output Formats

@@ -106,6 +221,6 @@

# Run the CLI in development
npm start -- "Your prompt here"
npm start -- prompt "Your prompt here"
# Or run directly
./bin/index.js "Your prompt here"
./bin/index.js prompt "Your prompt here"
```

@@ -112,0 +227,0 @@

import { readFile } from 'node:fs/promises'
import { createInterface } from 'node:readline'
import sanitizeHtml from 'sanitize-html'

@@ -57,1 +58,27 @@ /**

}
/**
* Fetch content from a URL.
*
* @param {string} url - URL to fetch content from
* @returns {Promise<string>} The URL content
*/
export const fetchUrlContent = async (url) => {
try {
const response = await fetch(url)
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`)
}
const html = await response.text()
// Sanitize HTML to extract only text content and avoid large data
const cleanText = sanitizeHtml(html, {
allowedTags: [],
allowedAttributes: {},
allowedSchemes: [],
allowedSchemesAppliedToAttributes: [],
})
return cleanText.trim()
} catch (error) {
throw new Error(`Failed to fetch URL '${url}'`, { cause: error })
}
}