Comparing version 1.1.0 to 2.0.0
@@ -7,3 +7,7 @@ import { OpenAIApi } from "openai"; | ||
excludeChoices?: boolean; | ||
sendType?: "batch" | "immediate"; | ||
} | ||
export declare function withAxiom(openai: OpenAIApi, opts?: WithAxiomOptions): OpenAIApi; | ||
export declare function withAxiom(openai: OpenAIApi, opts?: WithAxiomOptions): { | ||
openai: OpenAIApi; | ||
flush: Function; | ||
}; |
@@ -11,11 +11,14 @@ "use strict"; | ||
}; | ||
var __importDefault = (this && this.__importDefault) || function (mod) { | ||
return (mod && mod.__esModule) ? mod : { "default": mod }; | ||
}; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.withAxiom = void 0; | ||
const axiom_node_1 = __importDefault(require("@axiomhq/axiom-node")); | ||
const shared_1 = require("./shared"); | ||
function withAxiom(openai, opts) { | ||
const axiom = new axiom_node_1.default({ token: opts === null || opts === void 0 ? void 0 : opts.token }); | ||
const dataset = (opts === null || opts === void 0 ? void 0 : opts.dataset) || process.env.AXIOM_DATASET; | ||
let axiom; | ||
if ((opts === null || opts === void 0 ? void 0 : opts.sendType) === "immediate") { | ||
axiom = new shared_1.ImmediateAxiomClient(opts === null || opts === void 0 ? void 0 : opts.token, dataset); | ||
} | ||
else { | ||
axiom = new shared_1.BatchedAxiomClient(opts === null || opts === void 0 ? void 0 : opts.token, dataset); | ||
} | ||
const createCompletion = openai.createCompletion; | ||
@@ -35,3 +38,3 @@ openai.createCompletion = (request, options) => __awaiter(this, void 0, void 0, function* () { | ||
catch (e) { | ||
yield axiom.ingestEvents(dataset, { | ||
yield axiom.ingestEvents({ | ||
_time: start.toISOString(), | ||
@@ -50,3 +53,3 @@ type: "completion", | ||
transformedResponse.created = new Date(transformedResponse.created * 1000).toISOString(); | ||
yield axiom.ingestEvents(dataset, { | ||
yield axiom.ingestEvents({ | ||
_time: start.toISOString(), | ||
@@ -74,3 +77,3 @@ type: "completion", | ||
catch (e) { | ||
yield axiom.ingestEvents(dataset, { | ||
yield axiom.ingestEvents({ | ||
_time: start.toISOString(), | ||
@@ -89,3 +92,3 @@ type: "chatCompletion", | ||
transformedResponse.created = new Date(transformedResponse.created * 1000).toISOString(); | ||
yield axiom.ingestEvents(dataset, { | ||
yield axiom.ingestEvents({ | ||
_time: start.toISOString(), | ||
@@ -99,4 +102,4 @@ type: "chatCompletion", | ||
}); | ||
return openai; | ||
return { openai, flush: axiom.flush.bind(axiom) }; | ||
} | ||
exports.withAxiom = withAxiom; |
@@ -11,3 +11,7 @@ import * as dotenv from 'dotenv'; | ||
}); | ||
const openai = withAxiom(new OpenAIApi(configuration)); | ||
const { openai, flush } = withAxiom(new OpenAIApi(configuration)); | ||
process.on("beforeExit", async () => { | ||
await flush() | ||
process.exit(0); | ||
}); | ||
@@ -14,0 +18,0 @@ const completion = await openai.createCompletion({ |
import { OpenAIApi, CreateCompletionRequest, CreateChatCompletionRequest } from "openai"; | ||
import { AxiosRequestConfig } from "axios"; | ||
import Client from "@axiomhq/axiom-node"; | ||
import { AxiomClient, BatchedAxiomClient, ImmediateAxiomClient } from './shared'; | ||
@@ -10,8 +10,15 @@ export interface WithAxiomOptions { | ||
excludeChoices?: boolean; | ||
sendType?: "batch"|"immediate"; | ||
} | ||
export function withAxiom(openai: OpenAIApi, opts?: WithAxiomOptions): OpenAIApi { | ||
const axiom = new Client({ token: opts?.token }); | ||
export function withAxiom(openai: OpenAIApi, opts?: WithAxiomOptions): { openai: OpenAIApi, flush: Function } { | ||
const dataset = opts?.dataset || process.env.AXIOM_DATASET; | ||
let axiom: AxiomClient; | ||
if (opts?.sendType === "immediate") { | ||
axiom = new ImmediateAxiomClient(opts?.token, dataset!); | ||
} else { | ||
axiom = new BatchedAxiomClient(opts?.token, dataset!); | ||
} | ||
const createCompletion = openai.createCompletion; | ||
@@ -32,3 +39,3 @@ openai.createCompletion = async (request: CreateCompletionRequest, options?: AxiosRequestConfig<any>) => { | ||
} catch (e: any) { | ||
await axiom.ingestEvents(dataset!, { | ||
await axiom.ingestEvents({ | ||
_time: start.toISOString(), | ||
@@ -49,3 +56,3 @@ type: "completion", | ||
await axiom.ingestEvents(dataset!, { | ||
await axiom.ingestEvents({ | ||
_time: start.toISOString(), | ||
@@ -76,3 +83,3 @@ type: "completion", | ||
} catch (e: any) { | ||
await axiom.ingestEvents(dataset!, { | ||
await axiom.ingestEvents({ | ||
_time: start.toISOString(), | ||
@@ -93,3 +100,3 @@ type: "chatCompletion", | ||
await axiom.ingestEvents(dataset!, { | ||
await axiom.ingestEvents({ | ||
_time: start.toISOString(), | ||
@@ -105,3 +112,4 @@ type: "chatCompletion", | ||
return openai; | ||
return { openai, flush: axiom.flush.bind(axiom) }; | ||
} | ||
{ | ||
"name": "axiom-ai", | ||
"version": "1.1.0", | ||
"version": "2.0.0", | ||
"description": "The official package to send events from AI libraries to Axiom.", | ||
@@ -45,2 +45,2 @@ "scripts": { | ||
} | ||
} | ||
} |
@@ -36,7 +36,16 @@ <picture> | ||
}); | ||
const openai = withAxiom(new OpenAIApi(configuration), { | ||
const { openai, flush } = withAxiom(new OpenAIApi(configuration), { | ||
token: process.env.AXIOM_TOKEN, | ||
dataset: process.env.AXIOM_DATASET, | ||
// excludePromptOrMessages: false, | ||
// excludeChoices: false, | ||
// sendType: "batch", // or "immediate" for sending events synchronously | ||
}); | ||
// We need to flush events before exit | ||
process.on("beforeExit", async () => { | ||
await flush() | ||
process.exit(0); | ||
}); | ||
const completion = await openai.createCompletion({ | ||
@@ -81,5 +90,1 @@ model: "text-davinci-003", | ||
``` | ||
If you pass `excludePromptOrMessages: true` and/or `excludeChoices: true` to | ||
the `withAxiom` options it won't send the prompt/messages or choices, | ||
respectively. |
260408
18
1133
89