You're Invited:Meet the Socket Team at BlackHat and DEF CON in Las Vegas, Aug 4-6.RSVP
Socket
Book a DemoInstallSign in
Socket

@empiricalrun/llm

Package Overview
Dependencies
Maintainers
1
Versions
100
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@empiricalrun/llm - npm Package Compare versions

Comparing version

to
0.16.1

6

CHANGELOG.md
# @empiricalrun/llm
## 0.16.1
### Patch Changes
- 79857b3: feat: enhance GeminiChatModel to filter pending tool calls by ID
## 0.16.0

@@ -4,0 +10,0 @@

2

dist/chat/claude/index.d.ts

@@ -26,3 +26,3 @@ import type { Anthropic } from "@anthropic-ai/sdk";

assistantMessage: Anthropic.Beta.Messages.BetaMessage | undefined;
} | undefined>;
}>;
getHumanReadableLatestMessage(): {

@@ -29,0 +29,0 @@ role: string;

@@ -76,3 +76,3 @@ "use strict";

}
return undefined;
throw error;
}

@@ -79,0 +79,0 @@ }

@@ -130,3 +130,3 @@ "use strict";

}
return undefined;
throw error;
}

@@ -217,3 +217,3 @@ }

const matchIndex = functionCallRequests.findIndex((call) => {
return call.name === response.name;
return call.id === response.id;
});

@@ -220,0 +220,0 @@ if (matchIndex !== -1) {

@@ -7,2 +7,3 @@ "use strict";

exports.OpenAIChatModel = void 0;
const async_retry_1 = __importDefault(require("async-retry"));
const openai_1 = __importDefault(require("openai"));

@@ -67,12 +68,34 @@ const __1 = require("../..");

: new openai_1.default();
// TODO: Add async-retry
const response = await openai.responses.create({
model: selectedModel,
store: false,
parallel_tool_calls: false,
tools: params.tools.map(chatCompletionToolToFunctionTool),
instructions: systemPrompt,
input,
truncation: "auto",
const response = await (0, async_retry_1.default)(async (bail) => {
try {
return await openai.responses.create({
model: selectedModel,
store: false,
parallel_tool_calls: false,
tools: params.tools.map(chatCompletionToolToFunctionTool),
instructions: systemPrompt,
input,
truncation: "auto",
});
}
catch (err) {
if (err instanceof openai_1.default.APIError &&
err.status &&
err.status >= 400 &&
err.status < 500) {
bail(err);
return;
}
throw err;
}
}, {
retries: 3,
factor: 2,
minTimeout: 1000,
maxTimeout: 5000,
randomize: true,
});
if (!response) {
throw new Error("No response from OpenAI after retries");
}
const usage = response.usage;

@@ -79,0 +102,0 @@ this.tokensUsedSoFar.input += usage?.input_tokens ?? 0;

@@ -26,4 +26,5 @@ "use strict";

const response = await this.internalModel.getLLMResponse(params);
if (!response)
return undefined;
if (!response) {
throw new Error("Error getting LLM response");
}
const canonical = this.internalToCanonical(response, this.messages);

@@ -30,0 +31,0 @@ return canonical;

{
"name": "@empiricalrun/llm",
"version": "0.16.0",
"version": "0.16.1",
"main": "dist/index.js",

@@ -51,3 +51,3 @@ "exports": {

"@types/async-retry": "^1.4.8",
"@empiricalrun/shared-types": "0.1.0"
"@empiricalrun/shared-types": "0.2.0"
},

@@ -54,0 +54,0 @@ "scripts": {

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet