ai-renamer
Advanced tools
Comparing version 1.0.20 to 1.0.21
{ | ||
"version": "1.0.20", | ||
"version": "1.0.21", | ||
"license": "GPL-3.0", | ||
@@ -4,0 +4,0 @@ "name": "ai-renamer", |
@@ -38,3 +38,3 @@ # ai-renamer | ||
Ollama Usage | ||
## Ollama Usage | ||
@@ -47,3 +47,3 @@ Ollama is the default provider so you don't have to do anything. You can just run `npx ai-renamer /images`. At the first launch it will try to auto-select the Llava model but if it couldn't do that you can specify the model. | ||
LM Studio Usage | ||
## LM Studio Usage | ||
@@ -56,2 +56,12 @@ You need to set the provider as `lm-studio` and it will auto-select the loaded model in LM Studio. | ||
## OpenAI Usage | ||
You need to set the provider as `openai` and the api-key with your API key and it will auto-select the gpt-4o model. But you can assign any model with `--model` flag. | ||
```bash | ||
npx ai-renamer /path --provider=openai --api-key=OPENAI_API_KEY | ||
``` | ||
## Custom Ports | ||
If you're using a different port in Ollama or LM Studio you could simply specify the base URLs. | ||
@@ -64,6 +74,6 @@ | ||
## Params | ||
The values of the flags will be saved to your disk when you use them. You can find the config file at `~/ai-renamer.json`. If you're using a Mac it's `/Users/your-user-name/ai-renamer.json`. Also when you set a flag you don't have to use them again. The script gets the values from this config file. | ||
## Params | ||
```bash | ||
@@ -74,8 +84,10 @@ npx ai-renamer --help | ||
--version Show version number [boolean] | ||
-p, --provider Set the provider (e.g. ollama, lm-studio) | ||
[string] | ||
-p, --provider Set the provider (e.g. ollama, openai, | ||
lm-studio) [string] | ||
-a, --api-key Set the API key if you're using openai as | ||
provider [string] | ||
-u, --base-url Set the API base URL (e.g. | ||
http://127.0.0.1:11434 for ollama) [string] | ||
-m, --model Set the model to use (e.g. gemma2, llama3) | ||
[string] | ||
-m, --model Set the model to use (e.g. gemma2, llama3, | ||
gpt-4o) [string] | ||
-c, --case Set the case style (e.g. camelCase, pascalCase, | ||
@@ -82,0 +94,0 @@ snakeCase, kebabCase) [string] |
@@ -1,2 +0,1 @@ | ||
const fs = require('fs') | ||
const axios = require('axios') | ||
@@ -40,2 +39,8 @@ | ||
return lmStudioApis(options) | ||
} else if (provider === 'openai') { | ||
return [ | ||
{ name: 'gpt-4o' }, | ||
{ name: 'gpt-4' }, | ||
{ name: 'gpt-3.5-turbo' } | ||
] | ||
} else { | ||
@@ -42,0 +47,0 @@ throw new Error('🔴 No supported provider found') |
@@ -34,4 +34,9 @@ const os = require('os') | ||
type: 'string', | ||
description: 'Set the provider (e.g. ollama, lm-studio)' | ||
description: 'Set the provider (e.g. ollama, openai, lm-studio)' | ||
}) | ||
.option('api-key', { | ||
alias: 'a', | ||
type: 'string', | ||
description: 'Set the API key if you\'re using openai as provider' | ||
}) | ||
.option('base-url', { | ||
@@ -45,3 +50,3 @@ alias: 'u', | ||
type: 'string', | ||
description: 'Set the model to use (e.g. gemma2, llama3)' | ||
description: 'Set the model to use (e.g. gemma2, llama3, gpt-4o)' | ||
}) | ||
@@ -79,2 +84,7 @@ .option('case', { | ||
if (argv['api-key']) { | ||
config.defaultApiKey = argv['api-key'] | ||
await saveConfig({ config }) | ||
} | ||
if (argv['base-url']) { | ||
@@ -81,0 +91,0 @@ config.defaultBaseURL = argv['base-url'] |
const fs = require('fs') | ||
const axios = require('axios') | ||
const ollamaApis = async ({ model, prompt, images: _images, baseURL }) => { | ||
const ollamaApis = async ({ model, prompt, images, baseURL }) => { | ||
try { | ||
@@ -14,4 +14,4 @@ const url = `${baseURL}/api/generate` | ||
if (_images && _images.length > 0) { | ||
const imageData = await fs.readFileSync(_images[0]) | ||
if (images && images.length > 0) { | ||
const imageData = await fs.readFileSync(images[0]) | ||
data.images = [imageData.toString('base64')] | ||
@@ -29,7 +29,7 @@ } | ||
} catch (err) { | ||
throw new Error(err?.response?.data?.error || err.message) | ||
throw new Error(err?.response?.data?.error?.message || err?.response?.data?.error || err.message) | ||
} | ||
} | ||
const lmStudioApis = async ({ model, prompt, images: _images, baseURL }) => { | ||
const openaiApis = async ({ model, prompt, images, apiKey, baseURL }) => { | ||
try { | ||
@@ -50,4 +50,4 @@ const url = `${baseURL}/v1/chat/completions` | ||
if (_images && _images.length > 0) { | ||
const imageData = await fs.readFileSync(_images[0]) | ||
if (images && images.length > 0) { | ||
const imageData = await fs.readFileSync(images[0]) | ||
messages[0].content.push({ | ||
@@ -65,3 +65,6 @@ type: 'image_url', | ||
method: 'post', | ||
headers: { 'Content-Type': 'application/json' } | ||
headers: { | ||
'Content-Type': 'application/json', | ||
...(apiKey && { Authorization: `Bearer ${apiKey}` }) | ||
} | ||
}) | ||
@@ -71,3 +74,3 @@ | ||
} catch (err) { | ||
throw new Error(err?.response?.data?.error || err.message) | ||
throw new Error(err?.response?.data?.error?.message || err?.response?.data?.error || err.message) | ||
} | ||
@@ -82,4 +85,4 @@ } | ||
return ollamaApis(options) | ||
} else if (provider === 'lm-studio') { | ||
return lmStudioApis(options) | ||
} else if (provider === 'openai' || provider === 'lm-studio') { | ||
return openaiApis(options) | ||
} else { | ||
@@ -86,0 +89,0 @@ throw new Error('🔴 No supported provider found') |
const changeCase = require('./changeCase') | ||
const getModelResponse = require('./getModelResponse') | ||
module.exports = async ({ model, _case, chars, images, content, baseURL, language, provider, relativeFilePath }) => { | ||
module.exports = async options => { | ||
const { _case, chars, content, language, relativeFilePath } = options | ||
try { | ||
@@ -24,3 +26,3 @@ const promptLines = [ | ||
const modelResult = await getModelResponse({ model, prompt, images, baseURL, provider }) | ||
const modelResult = await getModelResponse({ ...options, prompt }) | ||
@@ -27,0 +29,0 @@ const maxChars = chars + 10 |
@@ -9,4 +9,6 @@ const path = require('path') | ||
module.exports = async ({ model, _case, chars, baseURL, language, provider, filePath, inputPath }) => { | ||
module.exports = async options => { | ||
try { | ||
const { filePath, inputPath } = options | ||
const fileName = path.basename(filePath) | ||
@@ -35,13 +37,3 @@ const ext = path.extname(filePath).toLowerCase() | ||
const newName = await getNewName({ | ||
model, | ||
_case, | ||
chars, | ||
images, | ||
content, | ||
baseURL, | ||
language, | ||
provider, | ||
relativeFilePath | ||
}) | ||
const newName = await getNewName({ ...options, images, content, relativeFilePath }) | ||
if (!newName) return | ||
@@ -48,0 +40,0 @@ |
@@ -7,3 +7,3 @@ const fs = require('fs').promises | ||
module.exports = async ({ inputPath, defaultCase, defaultModel, defaultChars, defaultBaseURL, defaultLanguage, defaultProvider, defaultIncludeSubdirectories }) => { | ||
module.exports = async ({ inputPath, defaultCase, defaultModel, defaultChars, defaultApiKey, defaultBaseURL, defaultLanguage, defaultProvider, defaultIncludeSubdirectories }) => { | ||
try { | ||
@@ -13,2 +13,7 @@ const provider = defaultProvider || 'ollama' | ||
const apiKey = defaultApiKey | ||
if (apiKey) { | ||
console.log('⚪ API key: **********') | ||
} | ||
let baseURL = defaultBaseURL | ||
@@ -19,2 +24,4 @@ if (provider === 'ollama' && !baseURL) { | ||
baseURL = 'http://127.0.0.1:1234' | ||
} else if (provider === 'openai' && !baseURL) { | ||
baseURL = 'https://api.openai.com' | ||
} | ||
@@ -45,2 +52,3 @@ console.log(`⚪ Base URL: ${baseURL}`) | ||
chars, | ||
apiKey, | ||
baseURL, | ||
@@ -47,0 +55,0 @@ language, |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
55975
506
123
6