Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@empiricalrun/ai

Package Overview
Dependencies
Maintainers
2
Versions
15
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@empiricalrun/ai - npm Package Compare versions

Comparing version 0.2.0 to 0.3.0

6

CHANGELOG.md
# @empiricalrun/ai
## 0.3.0
### Minor Changes
- a94aa16: feat: add stop reason and token usage metrics to run output
## 0.2.0

@@ -4,0 +10,0 @@

14

dist/providers/anthropic/index.js

@@ -12,3 +12,3 @@ "use strict";

const batchTaskManager = new utils_1.BatchTaskManager(5);
const finishReaonReverseMap = new Map([
const finishReasonReverseMap = new Map([
["end_turn", "stop"],

@@ -55,2 +55,3 @@ ["max_tokens", "length"],

try {
const startedAt = Date.now();
const response = await (0, promise_retry_1.default)((retry) => {

@@ -87,2 +88,5 @@ return anthropic.messages

executionDone();
const latency = Date.now() - startedAt;
// renaming to terms used by openai, mistral
const { input_tokens: prompt_tokens, output_tokens: completion_tokens } = response.usage;
return {

@@ -93,5 +97,10 @@ id: response.id,

created: Date.now() / 1000,
usage: {
total_tokens: prompt_tokens + completion_tokens,
completion_tokens,
prompt_tokens,
},
choices: [
{
finish_reason: finishReaonReverseMap.get(response.stop_reason) || "stop",
finish_reason: finishReasonReverseMap.get(response.stop_reason) || "stop",
index: 0,

@@ -105,2 +114,3 @@ message: {

],
latency,
};

@@ -107,0 +117,0 @@ }

@@ -53,2 +53,3 @@ "use strict";

try {
const startedAt = Date.now();
const completion = await (0, promise_retry_1.default)((retry) => {

@@ -67,2 +68,18 @@ // TODO: move to model.startChat which support model config (e.g. temperature)

executionDone();
const latency = Date.now() - startedAt;
const responseContent = completion.response.text();
let totalTokens = 0, promptTokens = 0, completionTokens = 0;
try {
[{ totalTokens: completionTokens }, { totalTokens: promptTokens }] =
await Promise.all([
modelInstance.countTokens(responseContent),
modelInstance.countTokens({
contents,
}),
]);
totalTokens = completionTokens + promptTokens;
}
catch (e) {
console.warn(`Failed to fetch token usage for google:${model}`);
}
const response = {

@@ -75,3 +92,3 @@ id: crypto_1.default.randomUUID(),

message: {
content: completion.response.text(),
content: responseContent,
role: "assistant",

@@ -84,3 +101,9 @@ },

created: Date.now(),
usage: {
total_tokens: totalTokens,
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
},
model,
latency,
};

@@ -87,0 +110,0 @@ return response;

4

dist/providers/mistral/index.js

@@ -22,2 +22,3 @@ "use strict";

const mistralMessages = messages;
const startedAt = Date.now();
// no retry needed as mistral internally handles it well

@@ -35,5 +36,6 @@ const completions = await mistralai.chat({

executionDone();
const latency = Date.now() - startedAt;
// typecasting as the only difference present in mistral interface is the it doesnt contain logprobs.
// currently its not being used. hence typecasting it for now.
return completions;
return { ...completions, latency };
}

@@ -40,0 +42,0 @@ catch (err) {

@@ -19,2 +19,3 @@ "use strict";

try {
const startedAt = Date.now();
const completions = await (0, promise_retry_1.default)((retry) => {

@@ -39,3 +40,4 @@ return openai.chat.completions.create(body).catch((err) => {

});
return completions;
const latency = Date.now() - startedAt;
return { ...completions, latency };
}

@@ -42,0 +44,0 @@ catch (err) {

{
"name": "@empiricalrun/ai",
"version": "0.2.0",
"version": "0.3.0",
"publishConfig": {

@@ -9,3 +9,3 @@ "registry": "https://registry.npmjs.org/",

"main": "dist/index.js",
"author": "Empirical.run Team <hey@empirical.run>",
"author": "Empirical Team <hey@empirical.run>",
"license": "MIT",

@@ -18,3 +18,3 @@ "repository": {

"@types/promise-retry": "^1.1.6",
"@empiricalrun/types": "0.2.0"
"@empiricalrun/types": "0.3.1"
},

@@ -21,0 +21,0 @@ "dependencies": {

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc