@empiricalrun/llm
Advanced tools
Comparing version 0.9.5 to 0.9.6
# @empiricalrun/llm | ||
## 0.9.6 | ||
### Patch Changes | ||
- e20abfb: feat: query now returns structured outputs | ||
## 0.9.5 | ||
@@ -4,0 +10,0 @@ |
@@ -1,2 +0,5 @@ | ||
export declare function query(base64Image: string, instruction: string): Promise<string>; | ||
import { z, ZodType } from "zod"; | ||
type ExtractType<T> = T extends ZodType ? z.infer<T> : never; | ||
export declare function query<T extends z.ZodType>(base64Image: string, instruction: string, responseFormat?: T): Promise<ExtractType<T>>; | ||
export {}; | ||
//# sourceMappingURL=index.d.ts.map |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.query = void 0; | ||
const zod_1 = require("openai/helpers/zod"); | ||
const zod_2 = require("zod"); | ||
const __1 = require("../.."); | ||
const utils_1 = require("../utils"); | ||
async function query(base64Image, instruction) { | ||
async function query(base64Image, instruction, responseFormat = zod_2.z.string()) { | ||
const llm = new __1.LLM({ | ||
@@ -13,2 +15,16 @@ provider: "openai", | ||
think it through, and then respond with the "answer".`; | ||
const isResponseString = responseFormat instanceof zod_2.z.ZodString; | ||
let extendedResponseFormat; | ||
if (isResponseString) { | ||
extendedResponseFormat = zod_2.z.object({ | ||
explanation: zod_2.z.string(), | ||
answer: zod_2.z.string(), | ||
}); | ||
} | ||
else { | ||
extendedResponseFormat = zod_2.z.object({ | ||
explanation: zod_2.z.string(), | ||
answer: responseFormat, | ||
}); | ||
} | ||
const llmResponse = await llm.createChatCompletion({ | ||
@@ -39,38 +55,12 @@ messages: [ | ||
temperature: 0.5, | ||
tool_choice: { | ||
type: "function", | ||
function: { name: "send_response" }, | ||
}, | ||
}, | ||
tools: [ | ||
{ | ||
type: "function", | ||
function: { | ||
name: "send_response", | ||
description: "Use this tool to send your response.", | ||
parameters: { | ||
type: "object", | ||
properties: { | ||
explanation: { | ||
type: "string", | ||
description: "Your explanation to find the answer.", | ||
}, | ||
answer: { | ||
type: "string", | ||
description: "A precise and succint answer to the extract text instruction.", | ||
}, | ||
}, | ||
required: ["explanation", "answer"], | ||
}, | ||
}, | ||
}, | ||
], | ||
responseFormat: (0, zod_1.zodResponseFormat)(extendedResponseFormat, "your_response"), | ||
}); | ||
if (!llmResponse) { | ||
throw new Error("Failed to extract text from image"); | ||
if (!llmResponse || !llmResponse.content) { | ||
throw new Error("Query failed: no response content from LLM"); | ||
} | ||
const response = llmResponse.tool_calls[0]; | ||
const { answer } = JSON.parse(response.function.arguments); | ||
return answer; | ||
const response = llmResponse.content; | ||
const jsonData = JSON.parse(response); | ||
return responseFormat.parse(jsonData.answer); | ||
} | ||
exports.query = query; |
{ | ||
"name": "@empiricalrun/llm", | ||
"version": "0.9.5", | ||
"version": "0.9.6", | ||
"main": "dist/index.js", | ||
@@ -29,3 +29,4 @@ "exports": { | ||
"openai": "^4.67.0", | ||
"portkey-ai": "^1.3.2" | ||
"portkey-ai": "^1.3.2", | ||
"zod": "^3.23.8" | ||
}, | ||
@@ -32,0 +33,0 @@ "devDependencies": { |
Sorry, the diff of this file is not supported yet
151506
7
539
+ Addedzod@^3.23.8
+ Addedzod@3.24.1(transitive)