Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@empiricalrun/llm

Package Overview
Dependencies
Maintainers
0
Versions
44
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@empiricalrun/llm - npm Package Compare versions

Comparing version 0.9.3 to 0.9.4

6

CHANGELOG.md
# @empiricalrun/llm
## 0.9.4
### Patch Changes
- fcacf06: feat: add support for poms in master agent
## 0.9.3

@@ -4,0 +10,0 @@

7

dist/index.d.ts

@@ -11,3 +11,2 @@ import { LangfuseGenerationClient, LangfuseSpanClient, LangfuseTraceClient } from "langfuse";

private _providerApiKey;
private _traceName;
private _usedTokens;

@@ -18,5 +17,4 @@ private _defaultModel;

promptTokens: number;
constructor({ trace, provider, providerApiKey, traceName, maxTokens, defaultModel, }: {
constructor({ trace, provider, providerApiKey, maxTokens, defaultModel, }: {
trace?: TraceClient;
traceName?: string;
provider: LLMProvider;

@@ -27,3 +25,3 @@ providerApiKey?: string;

});
createChatCompletion({ messages, modelParameters, model, tools, trace, responseFormat, }: {
createChatCompletion({ messages, modelParameters, model, tools, trace, responseFormat, traceName, }: {
tools?: OpenAI.Chat.Completions.ChatCompletionTool[];

@@ -34,2 +32,3 @@ messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[];

trace?: TraceClient;
traceName?: string;
responseFormat?: OpenAI.ChatCompletionCreateParamsNonStreaming["response_format"];

@@ -36,0 +35,0 @@ }): Promise<OpenAI.Chat.Completions.ChatCompletionMessage | undefined>;

@@ -19,3 +19,2 @@ "use strict";

_providerApiKey;
_traceName;
_usedTokens = 0;

@@ -27,11 +26,10 @@ _defaultModel;

promptTokens = 0;
constructor({ trace, provider, providerApiKey, traceName = "get-llm-result", maxTokens, defaultModel, }) {
constructor({ trace, provider, providerApiKey, maxTokens, defaultModel, }) {
this._trace = trace;
this._provider = provider;
this._providerApiKey = providerApiKey;
this._traceName = traceName;
this._maxTokens = maxTokens ?? 1000000;
this._defaultModel = defaultModel;
}
async createChatCompletion({ messages, modelParameters, model, tools, trace, responseFormat, }) {
async createChatCompletion({ messages, modelParameters, model, tools, trace, responseFormat, traceName = "get-llm-result", }) {
if (this._usedTokens >= this._maxTokens) {

@@ -50,3 +48,3 @@ throw new Error(`Exceeded max tokens limit of ${this._maxTokens} tokens. Please try again later.`);

const generation = (trace || this._trace)?.generation({
name: this._traceName,
name: traceName,
model,

@@ -53,0 +51,0 @@ modelParameters,

{
"name": "@empiricalrun/llm",
"version": "0.9.3",
"version": "0.9.4",
"main": "dist/index.js",

@@ -5,0 +5,0 @@ "exports": {

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc