@empiricalrun/llm
Advanced tools
Comparing version 0.9.25 to 0.9.26
# @empiricalrun/llm | ||
## 0.9.26 | ||
### Patch Changes | ||
- bd5c945: fix: run update scenario prompts on claude | ||
## 0.9.25 | ||
@@ -4,0 +10,0 @@ |
@@ -7,3 +7,3 @@ import OpenAI from "openai"; | ||
export type LLMProvider = "openai" | "google" | "anthropic"; | ||
export type LLMModel = "gpt-3.5-turbo" | "gpt-4" | "gpt-4o" | "gpt-4o-mini" | "gpt-4o-2024-08-06" | "claude-3-5-sonnet-20240620" | "gemini-1.5-flash-latest" | "gemini-1.5-pro-latest" | "o1-preview" | "o1-mini"; | ||
export type LLMModel = "gpt-3.5-turbo" | "gpt-4" | "gpt-4o" | "gpt-4o-mini" | "gpt-4o-2024-08-06" | "claude-3-5-sonnet-latest" | "gemini-1.5-flash-latest" | "gemini-1.5-pro-latest" | "o1-preview" | "o1-mini"; | ||
export interface ModelParameters { | ||
@@ -10,0 +10,0 @@ frequency_penalty?: number | null; |
{ | ||
"name": "@empiricalrun/llm", | ||
"version": "0.9.25", | ||
"version": "0.9.26", | ||
"main": "dist/index.js", | ||
@@ -5,0 +5,0 @@ "exports": { |
Sorry, the diff of this file is not supported yet
49101