Socket
Socket
Sign inDemoInstall

@ai-d/aid

Package Overview
Dependencies
31
Maintainers
1
Versions
9
Alerts
File Explorer

Advanced tools

Install Socket

Detect and block malicious and high-risk dependencies

Install

Comparing version 0.0.1 to 0.0.2

38

dist/index.d.ts
import { OpenAI } from 'openai';
import { z, ZodIssue } from 'zod';
import { ZodIssue, z } from 'zod';

@@ -9,10 +9,11 @@ type PromiseOr<T> = T | Promise<T>;

}[]) => PromiseOr<string>;
type AidInput = string;
/**
* Options for an custom Aid Task.
*/
interface AidTaskOptions<In extends string, Out> {
interface AidTaskOptions<Out> {
/**
* The few-shot prompt examples.
*/
examples?: [string, Out][];
examples?: [AidInput, Out][];
/**

@@ -28,3 +29,16 @@ * The output schema strategy.

check?: boolean;
/**
* The default input (last user message) if no input is provided.
*/
default?: AidInput;
}
type AidTaskRunner<Out> = (input?: AidInput) => Promise<{
result: Out;
errors: ZodIssue[];
}>;
type MessageRole = "system" | "user" | "assistant";
type Message<R extends MessageRole = MessageRole> = {
role: R;
content: string;
};

@@ -34,9 +48,17 @@ declare class Aid {

constructor(q: LLMQuery);
task<In extends string, Out>(goal: string, expected: z.ZodType<Out>, opt?: AidTaskOptions<In, Out>): (input: In) => Promise<{
result: Out;
errors: ZodIssue[];
}>;
task<Out>(goal: string, expected: z.ZodType<Out>, opt?: AidTaskOptions<Out>): AidTaskRunner<Out>;
static from(openai: OpenAI, param: Omit<OpenAI.Chat.ChatCompletionCreateParamsNonStreaming, "messages">): Aid;
}
export { Aid, type AidTaskOptions, type LLMQuery, type PromiseOr };
interface CohereQueryOptions {
model: string;
temperature: number;
connectors: unknown[];
documents: unknown[];
prompt_truncation: string;
}
declare const CohereQuery: (token: string, opt: Partial<CohereQueryOptions>) => LLMQuery;
declare const OpenAIQuery: (openai: OpenAI, param: Omit<OpenAI.Chat.ChatCompletionCreateParamsNonStreaming, "messages">) => LLMQuery;
export { Aid, type AidInput, type AidTaskOptions, type AidTaskRunner, CohereQuery, type CohereQueryOptions, type LLMQuery, type Message, type MessageRole, OpenAIQuery, type PromiseOr };

@@ -33,3 +33,5 @@ "use strict";

__export(src_exports, {
Aid: () => Aid
Aid: () => Aid,
CohereQuery: () => CohereQuery,
OpenAIQuery: () => OpenAIQuery
});

@@ -39,6 +41,32 @@ module.exports = __toCommonJS(src_exports);

// src/aid.ts
var import_debug = __toESM(require("debug"));
var import_debug2 = __toESM(require("debug"));
var import_zod_to_json_schema = require("zod-to-json-schema");
var import_zod_to_ts = require("zod-to-ts");
var log = (0, import_debug.default)("aid");
// src/query/openai.ts
var import_debug = __toESM(require("debug"));
var log = (0, import_debug.default)("aid:openai");
var OpenAIQuery = (openai, param) => {
const q = async (messages) => {
const payload = {
temperature: 0,
...param,
messages,
response_format: { type: "json_object" }
};
log("payload", payload);
const res = await openai.chat.completions.create(payload);
log({ id: res.id, fingerprint: res.system_fingerprint, usage: res.usage });
const text = res.choices[0].message.content;
if (!text) {
throw new Error("No text returned from OpenAI");
}
log("text", text);
return text;
};
return q;
};
// src/aid.ts
var log2 = (0, import_debug2.default)("aid");
var Aid = class _Aid {

@@ -49,3 +77,4 @@ constructor(q) {

task(goal, expected, opt) {
return async (input) => {
const default_input = (opt == null ? void 0 : opt.default) ?? "Follows the instruction and give me the disired output.";
return async (input = default_input) => {
const messages = [

@@ -64,5 +93,5 @@ {

messages.push({ role: "user", content: input });
log("messages", messages);
log2("messages", messages);
const output = await this.q(messages);
log("output", output);
log2("output", output);
const json = JSON.parse(output);

@@ -81,23 +110,48 @@ if (!(opt == null ? void 0 : opt.check)) {

static from(openai, param) {
const q = async (messages) => {
const payload = {
temperature: 0,
...param,
messages,
response_format: { type: "json_object" }
};
const res = await openai.chat.completions.create(payload);
log({ fingerprint: res.system_fingerprint, usage: res.usage });
const text = res.choices[0].message.content;
if (!text) {
throw new Error("No text returned from OpenAI");
}
return text;
};
const q = OpenAIQuery(openai, param);
return new _Aid(q);
}
};
// src/query/cohere.ts
var import_debug3 = __toESM(require("debug"));
var log3 = (0, import_debug3.default)("aid:cohere");
var CohereQuery = (token, opt) => {
const q = async (messages) => {
const message = messages[messages.length - 1].content;
const chat_history = messages.slice(0, -1).map((message2) => ({
role: message2.role === "user" ? "User" : "Chatbot",
message: message2.content
}));
const payload = {
model: "command",
temperature: 0,
connectors: [],
documents: [],
prompt_truncation: "auto",
...opt,
message,
chat_history
};
log3("payload", payload);
const res = await fetch("https://api.cohere.ai/v1/chat", {
method: "POST",
headers: {
Authorization: `Bearer ${token}`,
"Content-Type": "application/json"
},
body: JSON.stringify(payload)
});
log3("status", res.status);
const result = await res.json();
log3("result", result);
return result.text;
};
return q;
};
// Annotate the CommonJS export names for ESM import in node:
0 && (module.exports = {
Aid
Aid,
CohereQuery,
OpenAIQuery
});
{
"name": "@ai-d/aid",
"description": "Aid provides a structured and type-safe way to interact with LLMs.",
"version": "0.0.1",
"version": "0.0.2",
"author": "JacobLinCool <jacoblincool@gmail.com> (https://jacoblin.cool)",

@@ -6,0 +6,0 @@ "license": "MIT",

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc