Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

autoevals

Package Overview
Dependencies
Maintainers
1
Versions
103
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

autoevals - npm Package Compare versions

Comparing version 0.0.25 to 0.0.26

jsdist/util.d.ts

128

jsdist/bundle.js

@@ -38,6 +38,3 @@ var __defProp = Object.defineProperty;

// js/oai.ts
import {
Configuration,
OpenAIApi
} from "openai";
import { OpenAI } from "openai";

@@ -49,21 +46,70 @@ // js/env.ts

// js/util.ts
var NoopSpan = class {
constructor() {
this.kind = "span";
this.id = "";
this.span_id = "";
this.root_span_id = "";
}
log(_) {
}
startSpan(_0, _1) {
return this;
}
traced(_0, callback, _1) {
return callback(this);
}
end(args) {
var _a;
return (_a = args == null ? void 0 : args.endTime) != null ? _a : (/* @__PURE__ */ new Date()).getTime() / 1e3;
}
close(args) {
return this.end(args);
}
};
function currentSpan() {
if (globalThis.__inherited_braintrust_state) {
return globalThis.__inherited_braintrust_state.currentSpan.getStore();
} else {
return new NoopSpan();
}
}
// js/oai.ts
async function cachedChatCompletion(params, options) {
const { cache, openAiApiKey, openAiOrganizationId } = options;
const cached = await (cache == null ? void 0 : cache.get(params));
if (cached) {
return cached;
}
const config = new Configuration({
apiKey: openAiApiKey || Env.OPENAI_API_KEY,
organization: openAiOrganizationId
return await currentSpan().traced("OpenAI Completion", async (span) => {
var _b, _c, _d;
let cached = false;
let ret = await (cache == null ? void 0 : cache.get(params));
if (ret) {
cached = true;
} else {
const openai = new OpenAI({
apiKey: openAiApiKey || Env.OPENAI_API_KEY,
organization: openAiOrganizationId
});
if (openai === null) {
throw new Error("OPENAI_API_KEY not set");
}
const completion = await openai.chat.completions.create(params);
await (cache == null ? void 0 : cache.set(params, completion));
ret = completion;
}
const _a = params, { messages } = _a, rest = __objRest(_a, ["messages"]);
span.log({
input: messages,
metadata: __spreadProps(__spreadValues({}, rest), {
cached
}),
output: ret.choices[0],
metrics: {
tokens: (_b = ret.usage) == null ? void 0 : _b.total_tokens,
prompt_tokens: (_c = ret.usage) == null ? void 0 : _c.prompt_tokens,
completion_tokens: (_d = ret.usage) == null ? void 0 : _d.completion_tokens
}
});
return ret;
});
const openai = new OpenAIApi(config);
if (openai === null) {
throw new Error("OPENAI_API_KEY not set");
}
const completion = await openai.createChatCompletion(params);
const data = completion.data;
await (cache == null ? void 0 : cache.set(params, data));
return data;
}

@@ -210,2 +256,12 @@

expected,
openAiApiKey,
openAiOrganizationId
} = _a, remaining = __objRest(_a, [
"name",
"output",
"expected",
"openAiApiKey",
"openAiOrganizationId"
]);
const _b = remaining, {
messages: messagesArg,

@@ -217,9 +273,4 @@ model,

temperature,
cache,
openAiApiKey,
openAiOrganizationId
} = _a, remainingRenderArgs = __objRest(_a, [
"name",
"output",
"expected",
cache
} = _b, remainingRenderArgs = __objRest(_b, [
"messages",

@@ -231,5 +282,3 @@ "model",

"temperature",
"cache",
"openAiApiKey",
"openAiOrganizationId"
"cache"
]);

@@ -259,2 +308,4 @@ let found = false;

}));
let ret = null;
let validityScore = 1;
try {

@@ -275,3 +326,3 @@ const resp = await cachedChatCompletion(

if (resp.choices.length > 0) {
return __spreadValues({
ret = __spreadValues({
name

@@ -283,3 +334,4 @@ }, parseResponse(resp.choices[0].message, choiceScores));

} catch (error) {
return {
validityScore = 0;
ret = {
name,

@@ -290,4 +342,6 @@ score: 0,

}
return ret;
}
function parseResponse(resp, choiceScores) {
var _a;
let score = 0;

@@ -297,4 +351,5 @@ let error = void 0;

try {
metadata["rationale"] = `${resp.content}`;
const choice = JSON.parse(resp.function_call.arguments)["choice"].trim();
const args = JSON.parse(resp.function_call.arguments);
metadata["rationale"] = (_a = args["reasons"]) == null ? void 0 : _a.join("\n");
const choice = args["choice"].trim();
metadata["choice"] = choice;

@@ -325,3 +380,3 @@ if (choiceScores[choice] !== void 0) {

const choiceStrings = Object.keys(choiceScores);
return async (runtimeArgs) => {
const ret = async (runtimeArgs) => {
var _a, _b;

@@ -352,2 +407,7 @@ const useCoT = (_b = (_a = runtimeArgs.useCoT) != null ? _a : useCoTArg) != null ? _b : true;

};
Object.defineProperty(ret, "name", {
value: name,
configurable: true
});
return ret;
}

@@ -354,0 +414,0 @@ function LLMClassifierFromSpec(name, spec) {

import { Score, Scorer, ScorerArgs } from "./base.js";
import { ChatCompletionFunctions, ChatCompletionRequestMessage } from "openai";
import { ChatCache } from "./oai.js";
import { templates } from "./templates.js";
import { ChatCompletionCreateParams, ChatCompletionMessage } from "openai/resources/index.mjs";
interface LLMArgs {

@@ -30,5 +30,5 @@ maxTokens?: number;

model: string;
messages: ChatCompletionRequestMessage[];
messages: ChatCompletionMessage[];
choiceScores: Record<string, number>;
classificationFunctions: ChatCompletionFunctions[];
classificationFunctions: ChatCompletionCreateParams.Function[];
cache?: ChatCache;

@@ -35,0 +35,0 @@ } & LLMArgs & RenderArgs;

@@ -91,3 +91,4 @@ "use strict";

return __awaiter(this, void 0, void 0, function* () {
const { name, output, expected, messages: messagesArg, model, choiceScores, classificationFunctions, maxTokens, temperature, cache, openAiApiKey, openAiOrganizationId } = args, remainingRenderArgs = __rest(args, ["name", "output", "expected", "messages", "model", "choiceScores", "classificationFunctions", "maxTokens", "temperature", "cache", "openAiApiKey", "openAiOrganizationId"]);
const { name, output, expected, openAiApiKey, openAiOrganizationId } = args, remaining = __rest(args, ["name", "output", "expected", "openAiApiKey", "openAiOrganizationId"]);
const { messages: messagesArg, model, choiceScores, classificationFunctions, maxTokens, temperature, cache } = remaining, remainingRenderArgs = __rest(remaining, ["messages", "model", "choiceScores", "classificationFunctions", "maxTokens", "temperature", "cache"]);
let found = false;

@@ -110,2 +111,4 @@ for (const m of SUPPORTED_MODELS) {

const messages = messagesArg.map((m) => (Object.assign(Object.assign({}, m), { content: m.content && mustache_1.default.render(m.content, renderArgs) })));
let ret = null;
let validityScore = 1;
try {

@@ -119,3 +122,3 @@ const resp = yield (0, oai_js_1.cachedChatCompletion)(Object.assign({ model,

if (resp.choices.length > 0) {
return Object.assign({ name }, parseResponse(resp.choices[0].message, choiceScores));
ret = Object.assign({ name }, parseResponse(resp.choices[0].message, choiceScores));
}

@@ -127,3 +130,4 @@ else {

catch (error) {
return {
validityScore = 0;
ret = {
name,

@@ -134,2 +138,3 @@ score: 0,

}
return ret;
});

@@ -139,2 +144,3 @@ }

function parseResponse(resp, choiceScores) {
var _a;
let score = 0;

@@ -144,4 +150,5 @@ let error = undefined;

try {
metadata["rationale"] = `${resp.content}`;
const choice = JSON.parse(resp.function_call.arguments)["choice"].trim();
const args = JSON.parse(resp.function_call.arguments);
metadata["rationale"] = (_a = args["reasons"]) === null || _a === void 0 ? void 0 : _a.join("\n");
const choice = args["choice"].trim();
metadata["choice"] = choice;

@@ -167,3 +174,3 @@ if (choiceScores[choice] !== undefined) {

const choiceStrings = Object.keys(choiceScores);
return (runtimeArgs) => __awaiter(this, void 0, void 0, function* () {
const ret = (runtimeArgs) => __awaiter(this, void 0, void 0, function* () {
var _a, _b;

@@ -188,2 +195,7 @@ const useCoT = (_b = (_a = runtimeArgs.useCoT) !== null && _a !== void 0 ? _a : useCoTArg) !== null && _b !== void 0 ? _b : true;

});
Object.defineProperty(ret, "name", {
value: name,
configurable: true,
});
return ret;
}

@@ -190,0 +202,0 @@ exports.LLMClassifierFromTemplate = LLMClassifierFromTemplate;

@@ -43,24 +43,72 @@ var __defProp = Object.defineProperty;

// js/oai.ts
import {
Configuration,
OpenAIApi
} from "openai";
import { OpenAI } from "openai";
// js/util.ts
var NoopSpan = class {
constructor() {
this.kind = "span";
this.id = "";
this.span_id = "";
this.root_span_id = "";
}
log(_) {
}
startSpan(_0, _1) {
return this;
}
traced(_0, callback, _1) {
return callback(this);
}
end(args) {
var _a;
return (_a = args == null ? void 0 : args.endTime) != null ? _a : (/* @__PURE__ */ new Date()).getTime() / 1e3;
}
close(args) {
return this.end(args);
}
};
function currentSpan() {
if (globalThis.__inherited_braintrust_state) {
return globalThis.__inherited_braintrust_state.currentSpan.getStore();
} else {
return new NoopSpan();
}
}
// js/oai.ts
async function cachedChatCompletion(params, options) {
const { cache, openAiApiKey, openAiOrganizationId } = options;
const cached = await (cache == null ? void 0 : cache.get(params));
if (cached) {
return cached;
}
const config = new Configuration({
apiKey: openAiApiKey || Env.OPENAI_API_KEY,
organization: openAiOrganizationId
return await currentSpan().traced("OpenAI Completion", async (span) => {
var _b, _c, _d;
let cached = false;
let ret = await (cache == null ? void 0 : cache.get(params));
if (ret) {
cached = true;
} else {
const openai = new OpenAI({
apiKey: openAiApiKey || Env.OPENAI_API_KEY,
organization: openAiOrganizationId
});
if (openai === null) {
throw new Error("OPENAI_API_KEY not set");
}
const completion = await openai.chat.completions.create(params);
await (cache == null ? void 0 : cache.set(params, completion));
ret = completion;
}
const _a = params, { messages } = _a, rest = __objRest(_a, ["messages"]);
span.log({
input: messages,
metadata: __spreadProps(__spreadValues({}, rest), {
cached
}),
output: ret.choices[0],
metrics: {
tokens: (_b = ret.usage) == null ? void 0 : _b.total_tokens,
prompt_tokens: (_c = ret.usage) == null ? void 0 : _c.prompt_tokens,
completion_tokens: (_d = ret.usage) == null ? void 0 : _d.completion_tokens
}
});
return ret;
});
const openai = new OpenAIApi(config);
if (openai === null) {
throw new Error("OPENAI_API_KEY not set");
}
const completion = await openai.createChatCompletion(params);
const data = completion.data;
await (cache == null ? void 0 : cache.set(params, data));
return data;
}

@@ -207,2 +255,12 @@

expected,
openAiApiKey,
openAiOrganizationId
} = _a, remaining = __objRest(_a, [
"name",
"output",
"expected",
"openAiApiKey",
"openAiOrganizationId"
]);
const _b = remaining, {
messages: messagesArg,

@@ -214,9 +272,4 @@ model,

temperature,
cache,
openAiApiKey,
openAiOrganizationId
} = _a, remainingRenderArgs = __objRest(_a, [
"name",
"output",
"expected",
cache
} = _b, remainingRenderArgs = __objRest(_b, [
"messages",

@@ -228,5 +281,3 @@ "model",

"temperature",
"cache",
"openAiApiKey",
"openAiOrganizationId"
"cache"
]);

@@ -256,2 +307,4 @@ let found = false;

}));
let ret = null;
let validityScore = 1;
try {

@@ -272,3 +325,3 @@ const resp = await cachedChatCompletion(

if (resp.choices.length > 0) {
return __spreadValues({
ret = __spreadValues({
name

@@ -280,3 +333,4 @@ }, parseResponse(resp.choices[0].message, choiceScores));

} catch (error) {
return {
validityScore = 0;
ret = {
name,

@@ -287,4 +341,6 @@ score: 0,

}
return ret;
}
function parseResponse(resp, choiceScores) {
var _a;
let score = 0;

@@ -294,4 +350,5 @@ let error = void 0;

try {
metadata["rationale"] = `${resp.content}`;
const choice = JSON.parse(resp.function_call.arguments)["choice"].trim();
const args = JSON.parse(resp.function_call.arguments);
metadata["rationale"] = (_a = args["reasons"]) == null ? void 0 : _a.join("\n");
const choice = args["choice"].trim();
metadata["choice"] = choice;

@@ -322,3 +379,3 @@ if (choiceScores[choice] !== void 0) {

const choiceStrings = Object.keys(choiceScores);
return async (runtimeArgs) => {
const ret = async (runtimeArgs) => {
var _a, _b;

@@ -349,2 +406,7 @@ const useCoT = (_b = (_a = runtimeArgs.useCoT) != null ? _a : useCoTArg) != null ? _b : true;

};
Object.defineProperty(ret, "name", {
value: name,
configurable: true
});
return ret;
}

@@ -351,0 +413,0 @@ function LLMClassifierFromSpec(name, spec) {

@@ -1,7 +0,7 @@

import { ChatCompletionFunctions, ChatCompletionRequestMessage, CreateChatCompletionRequestFunctionCall, CreateChatCompletionResponse } from "openai";
import { ChatCompletion, ChatCompletionCreateParams, ChatCompletionMessage } from "openai/resources/index.mjs";
export interface CachedLLMParams {
model: string;
messages: ChatCompletionRequestMessage[];
functions?: ChatCompletionFunctions[];
function_call?: CreateChatCompletionRequestFunctionCall;
messages: ChatCompletionMessage[];
functions?: ChatCompletionCreateParams.Function[];
function_call?: ChatCompletionCreateParams.FunctionCallOption;
temperature?: number;

@@ -11,4 +11,4 @@ max_tokens?: number;

export interface ChatCache {
get(params: CachedLLMParams): Promise<CreateChatCompletionResponse | null>;
set(params: CachedLLMParams, response: CreateChatCompletionResponse): Promise<void>;
get(params: CachedLLMParams): Promise<ChatCompletion | null>;
set(params: CachedLLMParams, response: ChatCompletion): Promise<void>;
}

@@ -21,2 +21,2 @@ export interface OpenAIAuth {

cache?: ChatCache;
} & OpenAIAuth): Promise<CreateChatCompletionResponse>;
} & OpenAIAuth): Promise<ChatCompletion>;

@@ -11,2 +11,13 @@ "use strict";

};
var __rest = (this && this.__rest) || function (s, e) {
var t = {};
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)
t[p] = s[p];
if (s != null && typeof Object.getOwnPropertySymbols === "function")
for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {
if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))
t[p[i]] = s[p[i]];
}
return t;
};
Object.defineProperty(exports, "__esModule", { value: true });

@@ -16,23 +27,40 @@ exports.cachedChatCompletion = void 0;

const env_js_1 = require("./env.js");
const util_js_1 = require("./util.js");
function cachedChatCompletion(params, options) {
return __awaiter(this, void 0, void 0, function* () {
const { cache, openAiApiKey, openAiOrganizationId } = options;
const cached = yield (cache === null || cache === void 0 ? void 0 : cache.get(params));
if (cached) {
return cached;
}
const config = new openai_1.Configuration({
apiKey: openAiApiKey || env_js_1.Env.OPENAI_API_KEY,
organization: openAiOrganizationId,
});
const openai = new openai_1.OpenAIApi(config);
if (openai === null) {
throw new Error("OPENAI_API_KEY not set");
}
const completion = yield openai.createChatCompletion(params);
const data = completion.data;
yield (cache === null || cache === void 0 ? void 0 : cache.set(params, data));
return data;
return yield (0, util_js_1.currentSpan)().traced("OpenAI Completion", (span) => __awaiter(this, void 0, void 0, function* () {
var _a, _b, _c;
let cached = false;
let ret = yield (cache === null || cache === void 0 ? void 0 : cache.get(params));
if (ret) {
cached = true;
}
else {
const openai = new openai_1.OpenAI({
apiKey: openAiApiKey || env_js_1.Env.OPENAI_API_KEY,
organization: openAiOrganizationId,
});
if (openai === null) {
throw new Error("OPENAI_API_KEY not set");
}
const completion = yield openai.chat.completions.create(params);
yield (cache === null || cache === void 0 ? void 0 : cache.set(params, completion));
ret = completion;
}
const { messages } = params, rest = __rest(params, ["messages"]);
span.log({
input: messages,
metadata: Object.assign(Object.assign({}, rest), { cached }),
output: ret.choices[0],
metrics: {
tokens: (_a = ret.usage) === null || _a === void 0 ? void 0 : _a.total_tokens,
prompt_tokens: (_b = ret.usage) === null || _b === void 0 ? void 0 : _b.prompt_tokens,
completion_tokens: (_c = ret.usage) === null || _c === void 0 ? void 0 : _c.completion_tokens,
},
});
return ret;
}));
});
}
exports.cachedChatCompletion = cachedChatCompletion;

@@ -0,1 +1,2 @@

import { ChatCompletionMessage } from "openai/resources/index.mjs";
import {

@@ -7,3 +8,2 @@ Battle,

} from "../js/llm";
import { ChatCompletionRequestMessage } from "openai";
import { ChatCache } from "../js/oai";

@@ -22,3 +22,3 @@

const messages: ChatCompletionRequestMessage[] = [
const messages: ChatCompletionMessage[] = [
{

@@ -25,0 +25,0 @@ role: "system",

{
"name": "autoevals",
"version": "0.0.25",
"version": "0.0.26",
"description": "Universal library for evaluating AI models",

@@ -53,5 +53,5 @@ "main": "jsdist/bundle.js",

"mustache": "^4.2.0",
"openai": "^3.3.0",
"openai": "^4.12.1",
"tsx": "^3.12.7"
}
}

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc