@empiricalrun/llm
Advanced tools
Comparing version 0.4.1 to 0.4.2
# @empiricalrun/llm | ||
## 0.4.2 | ||
### Patch Changes | ||
- a9423b7: feat: add support for retries in llm calls | ||
## 0.4.1 | ||
@@ -4,0 +10,0 @@ |
@@ -7,2 +7,3 @@ "use strict"; | ||
exports.langfuseInstance = exports.getPrompt = exports.flushAllTraces = exports.getLLMResult = void 0; | ||
const async_retry_1 = __importDefault(require("async-retry")); | ||
const openai_1 = __importDefault(require("openai")); | ||
@@ -29,3 +30,3 @@ const portkey_ai_1 = require("portkey-ai"); | ||
}); | ||
const completion = await openai.chat.completions.create({ | ||
const completion = await (0, async_retry_1.default)(async () => await openai.chat.completions.create({ | ||
messages, | ||
@@ -37,2 +38,8 @@ model, | ||
stream: false, | ||
}), { | ||
retries: 5, | ||
factor: 3, | ||
minTimeout: 1000, | ||
maxTimeout: 60000, | ||
randomize: true, | ||
}); | ||
@@ -39,0 +46,0 @@ const output = completion.choices[0]?.message; |
{ | ||
"name": "@empiricalrun/llm", | ||
"version": "0.4.1", | ||
"version": "0.4.2", | ||
"main": "dist/index.js", | ||
@@ -15,2 +15,3 @@ "publishConfig": { | ||
"dependencies": { | ||
"async-retry": "^1.3.3", | ||
"langfuse": "^3.11.2", | ||
@@ -20,2 +21,5 @@ "openai": "^4.47.2", | ||
}, | ||
"devDependencies": { | ||
"@types/async-retry": "^1.4.8" | ||
}, | ||
"scripts": { | ||
@@ -22,0 +26,0 @@ "dev": "tsc --build --watch", |
Sorry, the diff of this file is not supported yet
10281
144
4
1
+ Addedasync-retry@^1.3.3
+ Addedasync-retry@1.3.3(transitive)
+ Addedretry@0.13.1(transitive)