Socket
Socket
Sign inDemoInstall

llm-interface

Package Overview
Dependencies
Maintainers
0
Versions
38
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

llm-interface - npm Package Compare versions

Comparing version 2.0.14 to 2.0.141

2

package.json
{
"name": "llm-interface",
"version": "2.0.14",
"version": "2.0.141",
"main": "src/index.js",

@@ -5,0 +5,0 @@ "description": "A simple, unified NPM-based interface for interacting with multiple Large Language Model (LLM) APIs, including OpenAI, AI21 Studio, Anthropic, Cloudflare AI, Cohere, Fireworks AI, Google Gemini, Goose AI, Groq, Hugging Face, Mistral AI, Perplexity, Reka AI, watsonx.ai, and LLaMA.cpp.",

@@ -5,3 +5,3 @@ # llm-interface

![Version 2.0.14](https://img.shields.io/badge/Version-2.0.14-blue) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Built with Node.js](https://img.shields.io/badge/Built%20with-Node.js-green)](https://nodejs.org/)
![Version 2.0.141](https://img.shields.io/badge/Version-2.0.141-blue) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Built with Node.js](https://img.shields.io/badge/Built%20with-Node.js-green)](https://nodejs.org/)

@@ -29,3 +29,3 @@ ## Introduction

- **Unified Interface**: `LLMInterface.sendMessage` is a single, consistent interface to interact with **36 different LLM APIs** (34 hosted LLM providers and 2 local LLM providers).
- **Chat Completion, Streaming and Embeddings**: Supports [chat completion, streaming, and embeddings](docs/providers.md) (with failover).
- **Chat Completion, Streaming and Embeddings**: Supports [chat completion, streaming, and embeddings](docs/providers/README.md) (with failover).
- **Dynamic Module Loading**: Automatically loads and manages LLM interfaces only when they are invoked, minimizing resource usage.

@@ -32,0 +32,0 @@ - **Error Handling**: Robust error handling mechanisms to ensure reliable API interactions.

@@ -143,9 +143,15 @@ /**

let response = {};
try {
const response = await retryWithBackoff(
response = await retryWithBackoff(
embeddingsWithRetries,
interfaceOptions,
'EmbeddingsError',
);
if (LLMInterface && LLMInterface.cacheManagerInstance && response) {
} catch (error) {
throw error;
}
if (LLMInterface && LLMInterface.cacheManagerInstance && response?.results) {
try {
const { cacheManagerInstance } = LLMInterface;

@@ -162,12 +168,8 @@

}
} catch (error) {
throw error;
}
}
return response;
} catch (error) {
throw new EmbeddingsError(
`Failed to generate embeddings using LLM ${interfaceName}:`,
error.message,
error.stack,
);
}
return response;
}

@@ -174,0 +176,0 @@

@@ -132,9 +132,15 @@ /**

let response = {};
try {
const response = await retryWithBackoff(
response = await retryWithBackoff(
sendMessageWithRetries,
interfaceOptions,
'SendMessageError',
);
} catch (error) {
throw error;
}
if (LLMInterface && LLMInterface.cacheManagerInstance && response) {
if (LLMInterface && LLMInterface.cacheManagerInstance && response?.results) {
try {
const { cacheManagerInstance } = LLMInterface;

@@ -151,11 +157,8 @@

}
} catch (error) {
throw error;
}
}
return response;
} catch (error) {
throw new SendMessageError(
`Failed to send message using LLM interfaceName ${interfaceName}: ${error.message}`,
error.stack,
);
}
return response;
}

@@ -162,0 +165,0 @@

@@ -15,3 +15,3 @@ const { SendMessageError, EmbeddingsError } = require('./errors.js');

* @returns {Promise<any>} - The result of the function call.
* @throws {SendMessageError|EmbeddingsError} - Throws an error if all retry attempts fail.
* @throws {SendMessageError|EmbeddingsError} - Throws an error if all retry attempts fail or on specific HTTP errors.
*/

@@ -22,6 +22,7 @@ async function retryWithBackoff(fn, options, errorType) {

let currentRetry = 0;
let lastError;
while (retryAttempts > 0) {
try {
log.log(`retryWithBackoff:${retryAttempts}`);
log.log(`retryWithBackoff: Attempt ${currentRetry + 1}`);
let response = await fn();

@@ -32,5 +33,7 @@ if (response?.results) {

response.total_time = milliseconds.toFixed(5);
response.retries = currentRetry;
return response;
}
} catch (error) {
lastError = error;
const statusCode = error.response?.status;

@@ -51,3 +54,2 @@ const delayTime = (currentRetry + 1) * retryMultiplier * 1000 + 500;

throw createError(errorType, statusCode, error);
break;

@@ -58,3 +60,2 @@ case 429:

const retryAfter = error.response?.headers['retry-after'];
if (retryAfter) {

@@ -79,3 +80,2 @@ log.log(

throw createError(errorType, statusCode || 'Unknown', error);
break;
}

@@ -87,2 +87,18 @@ }

// If all retries are exhausted without specific HTTP errors, return the last response with additional info
if (lastError) {
const end = hrtime(start);
const milliseconds = end[0] * 1e3 + end[1] / 1e6;
const results = {
total_time: milliseconds.toFixed(5),
retries: currentRetry,
success: false,
error: `HTTP ${statusCode}: ${
lastError.response?.statusText || lastError.message
}}`,
};
return results; // Return the last error with total_time and retries
}
if (errorType === 'SendMessageError') {

@@ -89,0 +105,0 @@ throw new SendMessageError('All retry attempts failed');

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc