@empiricalrun/llm
Advanced tools
Comparing version 0.9.3 to 0.9.4
# @empiricalrun/llm | ||
## 0.9.4 | ||
### Patch Changes | ||
- fcacf06: feat: add support for poms in master agent | ||
## 0.9.3 | ||
@@ -4,0 +10,0 @@ |
@@ -11,3 +11,2 @@ import { LangfuseGenerationClient, LangfuseSpanClient, LangfuseTraceClient } from "langfuse"; | ||
private _providerApiKey; | ||
private _traceName; | ||
private _usedTokens; | ||
@@ -18,5 +17,4 @@ private _defaultModel; | ||
promptTokens: number; | ||
constructor({ trace, provider, providerApiKey, traceName, maxTokens, defaultModel, }: { | ||
constructor({ trace, provider, providerApiKey, maxTokens, defaultModel, }: { | ||
trace?: TraceClient; | ||
traceName?: string; | ||
provider: LLMProvider; | ||
@@ -27,3 +25,3 @@ providerApiKey?: string; | ||
}); | ||
createChatCompletion({ messages, modelParameters, model, tools, trace, responseFormat, }: { | ||
createChatCompletion({ messages, modelParameters, model, tools, trace, responseFormat, traceName, }: { | ||
tools?: OpenAI.Chat.Completions.ChatCompletionTool[]; | ||
@@ -34,2 +32,3 @@ messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[]; | ||
trace?: TraceClient; | ||
traceName?: string; | ||
responseFormat?: OpenAI.ChatCompletionCreateParamsNonStreaming["response_format"]; | ||
@@ -36,0 +35,0 @@ }): Promise<OpenAI.Chat.Completions.ChatCompletionMessage | undefined>; |
@@ -19,3 +19,2 @@ "use strict"; | ||
_providerApiKey; | ||
_traceName; | ||
_usedTokens = 0; | ||
@@ -27,11 +26,10 @@ _defaultModel; | ||
promptTokens = 0; | ||
constructor({ trace, provider, providerApiKey, traceName = "get-llm-result", maxTokens, defaultModel, }) { | ||
constructor({ trace, provider, providerApiKey, maxTokens, defaultModel, }) { | ||
this._trace = trace; | ||
this._provider = provider; | ||
this._providerApiKey = providerApiKey; | ||
this._traceName = traceName; | ||
this._maxTokens = maxTokens ?? 1000000; | ||
this._defaultModel = defaultModel; | ||
} | ||
async createChatCompletion({ messages, modelParameters, model, tools, trace, responseFormat, }) { | ||
async createChatCompletion({ messages, modelParameters, model, tools, trace, responseFormat, traceName = "get-llm-result", }) { | ||
if (this._usedTokens >= this._maxTokens) { | ||
@@ -50,3 +48,3 @@ throw new Error(`Exceeded max tokens limit of ${this._maxTokens} tokens. Please try again later.`); | ||
const generation = (trace || this._trace)?.generation({ | ||
name: this._traceName, | ||
name: traceName, | ||
model, | ||
@@ -53,0 +51,0 @@ modelParameters, |
{ | ||
"name": "@empiricalrun/llm", | ||
"version": "0.9.3", | ||
"version": "0.9.4", | ||
"main": "dist/index.js", | ||
@@ -5,0 +5,0 @@ "exports": { |
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
151616
555