New Research: Supply Chain Attack on Axios Pulls Malicious Dependency from npm.Details
Socket
Book a DemoSign in
Socket

@aria-cli/types

Package Overview
Dependencies
Maintainers
1
Versions
22
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@aria-cli/types - npm Package Compare versions

Comparing version
1.0.19
to
1.0.31
+49
dist-cjs/errors.js
"use strict";
/**
* Error narrowing utilities for replacing `catch (err: any)` patterns.
* @module @aria/types/errors
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.getErrorMessage = getErrorMessage;
exports.getErrorStatusCode = getErrorStatusCode;
exports.isNodeError = isNodeError;
/**
* Extract a human-readable message from an unknown caught value.
*
* Replaces the pattern:
* `catch (err: any) { ... err.message ... }`
* with:
* `catch (err: unknown) { ... getErrorMessage(err) ... }`
*/
function getErrorMessage(err) {
if (err instanceof Error)
return err.message;
if (typeof err === "string")
return err;
return String(err);
}
/**
* Extract an HTTP-style status code from an unknown caught value.
*
* Replaces the pattern:
* `(error as any).statusCode`
* with:
* `getErrorStatusCode(error)`
*/
function getErrorStatusCode(err) {
if (typeof err === "object" &&
err !== null &&
"statusCode" in err &&
typeof err.statusCode === "number") {
return err.statusCode;
}
return undefined;
}
/**
* Type guard: is this caught value an Error with a `.code` property?
* Common for Node.js system errors (ENOENT, ECONNREFUSED, etc.)
*/
function isNodeError(err) {
return (err instanceof Error && "code" in err && typeof err.code === "string");
}
//# sourceMappingURL=errors.js.map
"use strict";
/**
* Lightweight leveled logger for ARIA.
*
* Respects `ARIA_LOG_LEVEL` env var (debug | info | warn | error | silent).
* Default level: "info" — debug messages are suppressed in normal usage.
*
* Architecture: logs dispatch through pluggable sinks. The default console
* sink writes to stdout/stderr with ANSI styling. Execution contexts that
* lack a terminal (daemon, headless server) replace or supplement it with
* durable sinks (e.g., JSONL file sink). This decoupling makes it impossible
* for diagnostic output to silently go to /dev/null — every context
* configures the sink that matches its output channel.
*
* Usage:
* import { log } from "@aria-cli/types";
* log.debug("[PhaseTimer] ..."); // only prints when ARIA_LOG_LEVEL=debug
* log.info("[runner] ..."); // prints at info level and above
* log.warn("[runner] ..."); // prints at warn level and above
* log.error("[runner] ..."); // always prints (unless level > error)
*
* Sink configuration (daemon example):
* import { log } from "@aria-cli/types";
* log.addSink(myFileLogSink);
* log.removeConsoleSink(); // stdio is /dev/null in detached mode
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.log = void 0;
const DIM_GRAY = "\x1b[2m\x1b[90m";
const CYAN = "\x1b[36m";
const RESET = "\x1b[0m";
const LEVEL_ORDER = {
debug: 0,
info: 1,
warn: 2,
error: 3,
silent: 4,
};
function resolveLevel() {
const env = typeof process !== "undefined" ? process.env.ARIA_LOG_LEVEL : undefined;
if (env && env in LEVEL_ORDER)
return env;
return "info";
}
let currentLevel = resolveLevel();
function enabled(level) {
return LEVEL_ORDER[level] >= LEVEL_ORDER[currentLevel];
}
function isBrokenPipeError(error) {
if (!error || typeof error !== "object") {
return false;
}
const maybe = error;
return maybe.code === "EPIPE" || maybe.code === "ERR_STREAM_DESTROYED";
}
function writeSafely(method, args) {
try {
method(...args);
}
catch (error) {
if (isBrokenPipeError(error)) {
return;
}
throw error;
}
}
function styleSystemDebugArgs(args) {
if (args.length === 0)
return args;
const [first, ...rest] = args;
if (typeof first !== "string")
return args;
if (!first.startsWith("["))
return args;
return [`${DIM_GRAY}${first}${RESET}`, ...rest];
}
function styleSystemInfoArgs(args) {
if (args.length === 0)
return args;
const [first, ...rest] = args;
if (typeof first !== "string")
return args;
if (!first.startsWith("["))
return args;
return [`${CYAN}${first}${RESET}`, ...rest];
}
// ── Sinks ────────────────────────────────────────────────────────
/** Default console sink — applies ANSI styling for terminal readability. */
const consoleSink = {
write(level, args) {
let styled;
let method;
switch (level) {
case "debug":
styled = styleSystemDebugArgs(args);
method = console.debug;
break;
case "info":
styled = styleSystemInfoArgs(args);
method = console.info;
break;
case "warn":
styled = args;
method = console.warn;
break;
default:
styled = args;
method = console.error;
break;
}
writeSafely(method, styled);
},
};
const sinks = [consoleSink];
// ── Public API ───────────────────────────────────────────────────
exports.log = {
debug(...args) {
if (enabled("debug"))
for (const s of sinks)
s.write("debug", args);
},
info(...args) {
if (enabled("info"))
for (const s of sinks)
s.write("info", args);
},
warn(...args) {
if (enabled("warn"))
for (const s of sinks)
s.write("warn", args);
},
error(...args) {
if (enabled("error"))
for (const s of sinks)
s.write("error", args);
},
setLevel(level) {
currentLevel = level;
},
getLevel() {
return currentLevel;
},
/** Add a log sink. All subsequent log calls dispatch to it. */
addSink(sink) {
sinks.push(sink);
},
/** Remove a previously added sink. */
removeSink(sink) {
const idx = sinks.indexOf(sink);
if (idx >= 0)
sinks.splice(idx, 1);
},
/**
* Remove the built-in console sink.
*
* Use this in execution contexts where stdio is unavailable (detached
* daemon, headless server) AFTER adding a durable file sink. Prevents
* wasted console.* calls that go to /dev/null.
*/
removeConsoleSink() {
const idx = sinks.indexOf(consoleSink);
if (idx >= 0)
sinks.splice(idx, 1);
},
};
//# sourceMappingURL=logger.js.map
"use strict";
/**
* Minimal IMemoria interface for consumers that need memory capabilities
* without depending on the concrete @aria/memoria package.
*
* Structural types (MemoryItem, ToolItem, RecallResult) replace bare
* `unknown` returns so consumers get usable shapes without coupling to
* the full Memoria domain model.
*/
Object.defineProperty(exports, "__esModule", { value: true });
//# sourceMappingURL=memoria.js.map
"use strict";
/**
* Types for ARIA's multi-model system
* @module @aria/types/models
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.isFunctionToolSchema = isFunctionToolSchema;
exports.isNativeToolSchema = isNativeToolSchema;
/**
* Type guard for function tools
*/
function isFunctionToolSchema(tool) {
return tool.kind === "function";
}
/**
* Type guard for native tools
*/
function isNativeToolSchema(tool) {
return tool.kind === "native";
}
//# sourceMappingURL=models.js.map
"use strict";
/**
* Native tool types and type guards for provider-native capabilities
* @module @aria/types/native-tools
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.isFunctionTool = isFunctionTool;
exports.isNativeTool = isNativeTool;
/**
* Type guard to check if a tool call is a function/custom tool
* (as opposed to a native provider tool like search or code execution)
*/
function isFunctionTool(toolCall) {
// Function tools are the default - they don't have special name prefixes
return !isNativeTool(toolCall);
}
/**
* Type guard to check if a tool call is a native provider tool
* Native tools have special name prefixes like "brave_search", "computer", etc.
*/
function isNativeTool(toolCall) {
const nativePrefixes = [
"brave_search",
"computer",
"code_interpreter",
"text_editor",
"bash",
"dalle",
"file_search",
];
return nativePrefixes.some((prefix) => toolCall.name.startsWith(prefix));
}
//# sourceMappingURL=native-tools.js.map
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.RESTART_KIND_ENV = exports.RESUME_ARION_ENV = exports.RESUME_SESSION_ENV = exports.NO_RELAUNCH_ENV = exports.RELAUNCH_EXIT_CODE = void 0;
exports.getRelaunchMarkerDir = getRelaunchMarkerDir;
exports.getRelaunchMarkerPath = getRelaunchMarkerPath;
exports.writeRelaunchMarker = writeRelaunchMarker;
exports.readRelaunchMarker = readRelaunchMarker;
exports.clearRelaunchMarker = clearRelaunchMarker;
const node_fs_1 = require("node:fs");
const node_os_1 = require("node:os");
const node_path_1 = require("node:path");
/**
* Magic exit code that tells the parent supervisor to respawn the child.
*/
exports.RELAUNCH_EXIT_CODE = 199;
/**
* Environment variables used by relaunch/supervisor flows.
*/
exports.NO_RELAUNCH_ENV = "ARIA_NO_RELAUNCH";
exports.RESUME_SESSION_ENV = "ARIA_RESUME_SESSION_ID";
exports.RESUME_ARION_ENV = "ARIA_RESUME_ARION";
exports.RESTART_KIND_ENV = "ARIA_RESTART_KIND";
function getAriaDir() {
const ariaHome = process.env.ARIA_HOME?.trim();
if (ariaHome)
return ariaHome;
return (0, node_path_1.join)((0, node_os_1.homedir)(), ".aria");
}
function getRelaunchMarkerDir() {
return (0, node_path_1.join)(getAriaDir(), "relaunch-pending");
}
function getRelaunchMarkerPath(pid = process.pid) {
return (0, node_path_1.join)(getRelaunchMarkerDir(), `${pid}.json`);
}
function writeRelaunchMarker(marker) {
try {
const markerPath = getRelaunchMarkerPath(marker.pid);
(0, node_fs_1.mkdirSync)((0, node_path_1.dirname)(markerPath), { recursive: true });
(0, node_fs_1.writeFileSync)(markerPath, JSON.stringify(marker), "utf-8");
}
catch {
// Best-effort — must never prevent restart
}
}
function readRelaunchMarker(pid = process.pid) {
try {
const markerPath = getRelaunchMarkerPath(pid);
if (!(0, node_fs_1.existsSync)(markerPath))
return null;
const raw = (0, node_fs_1.readFileSync)(markerPath, "utf-8");
return JSON.parse(raw);
}
catch {
return null;
}
}
function clearRelaunchMarker(pid = process.pid) {
try {
const markerPath = getRelaunchMarkerPath(pid);
if ((0, node_fs_1.existsSync)(markerPath))
(0, node_fs_1.unlinkSync)(markerPath);
}
catch {
// Best-effort
}
}
//# sourceMappingURL=relaunch.js.map
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.setGlobalStallPhase = setGlobalStallPhase;
exports.clearGlobalStallPhase = clearGlobalStallPhase;
function getGlobalPhaseStore() {
return globalThis;
}
function setGlobalStallPhase(label) {
const store = getGlobalPhaseStore();
store.__aria_stall_phase = label;
store.__aria_stall_phase_ts = performance.now();
}
function clearGlobalStallPhase() {
const store = getGlobalPhaseStore();
store.__aria_stall_phase = undefined;
store.__aria_stall_phase_ts = undefined;
}
//# sourceMappingURL=stall-phase.js.map
"use strict";
/** Canonical output shapes for tools with dedicated renderers.
* Executors MUST return these shapes. Renderers consume them directly. */
Object.defineProperty(exports, "__esModule", { value: true });
//# sourceMappingURL=tool-outputs.js.map
/**
* Error narrowing utilities for replacing `catch (err: any)` patterns.
* @module @aria/types/errors
*/
/**
* Extract a human-readable message from an unknown caught value.
*
* Replaces the pattern:
* `catch (err: any) { ... err.message ... }`
* with:
* `catch (err: unknown) { ... getErrorMessage(err) ... }`
*/
export function getErrorMessage(err) {
if (err instanceof Error)
return err.message;
if (typeof err === "string")
return err;
return String(err);
}
/**
* Extract an HTTP-style status code from an unknown caught value.
*
* Replaces the pattern:
* `(error as any).statusCode`
* with:
* `getErrorStatusCode(error)`
*/
export function getErrorStatusCode(err) {
if (typeof err === "object" &&
err !== null &&
"statusCode" in err &&
typeof err.statusCode === "number") {
return err.statusCode;
}
return undefined;
}
/**
* Type guard: is this caught value an Error with a `.code` property?
* Common for Node.js system errors (ENOENT, ECONNREFUSED, etc.)
*/
export function isNodeError(err) {
return (err instanceof Error && "code" in err && typeof err.code === "string");
}
//# sourceMappingURL=errors.js.map
/**
* Lightweight leveled logger for ARIA.
*
* Respects `ARIA_LOG_LEVEL` env var (debug | info | warn | error | silent).
* Default level: "info" — debug messages are suppressed in normal usage.
*
* Architecture: logs dispatch through pluggable sinks. The default console
* sink writes to stdout/stderr with ANSI styling. Execution contexts that
* lack a terminal (daemon, headless server) replace or supplement it with
* durable sinks (e.g., JSONL file sink). This decoupling makes it impossible
* for diagnostic output to silently go to /dev/null — every context
* configures the sink that matches its output channel.
*
* Usage:
* import { log } from "@aria-cli/types";
* log.debug("[PhaseTimer] ..."); // only prints when ARIA_LOG_LEVEL=debug
* log.info("[runner] ..."); // prints at info level and above
* log.warn("[runner] ..."); // prints at warn level and above
* log.error("[runner] ..."); // always prints (unless level > error)
*
* Sink configuration (daemon example):
* import { log } from "@aria-cli/types";
* log.addSink(myFileLogSink);
* log.removeConsoleSink(); // stdio is /dev/null in detached mode
*/
const DIM_GRAY = "\x1b[2m\x1b[90m";
const CYAN = "\x1b[36m";
const RESET = "\x1b[0m";
const LEVEL_ORDER = {
debug: 0,
info: 1,
warn: 2,
error: 3,
silent: 4,
};
function resolveLevel() {
const env = typeof process !== "undefined" ? process.env.ARIA_LOG_LEVEL : undefined;
if (env && env in LEVEL_ORDER)
return env;
return "info";
}
let currentLevel = resolveLevel();
function enabled(level) {
return LEVEL_ORDER[level] >= LEVEL_ORDER[currentLevel];
}
function isBrokenPipeError(error) {
if (!error || typeof error !== "object") {
return false;
}
const maybe = error;
return maybe.code === "EPIPE" || maybe.code === "ERR_STREAM_DESTROYED";
}
function writeSafely(method, args) {
try {
method(...args);
}
catch (error) {
if (isBrokenPipeError(error)) {
return;
}
throw error;
}
}
function styleSystemDebugArgs(args) {
if (args.length === 0)
return args;
const [first, ...rest] = args;
if (typeof first !== "string")
return args;
if (!first.startsWith("["))
return args;
return [`${DIM_GRAY}${first}${RESET}`, ...rest];
}
function styleSystemInfoArgs(args) {
if (args.length === 0)
return args;
const [first, ...rest] = args;
if (typeof first !== "string")
return args;
if (!first.startsWith("["))
return args;
return [`${CYAN}${first}${RESET}`, ...rest];
}
// ── Sinks ────────────────────────────────────────────────────────
/** Default console sink — applies ANSI styling for terminal readability. */
const consoleSink = {
write(level, args) {
let styled;
let method;
switch (level) {
case "debug":
styled = styleSystemDebugArgs(args);
method = console.debug;
break;
case "info":
styled = styleSystemInfoArgs(args);
method = console.info;
break;
case "warn":
styled = args;
method = console.warn;
break;
default:
styled = args;
method = console.error;
break;
}
writeSafely(method, styled);
},
};
const sinks = [consoleSink];
// ── Public API ───────────────────────────────────────────────────
export const log = {
debug(...args) {
if (enabled("debug"))
for (const s of sinks)
s.write("debug", args);
},
info(...args) {
if (enabled("info"))
for (const s of sinks)
s.write("info", args);
},
warn(...args) {
if (enabled("warn"))
for (const s of sinks)
s.write("warn", args);
},
error(...args) {
if (enabled("error"))
for (const s of sinks)
s.write("error", args);
},
setLevel(level) {
currentLevel = level;
},
getLevel() {
return currentLevel;
},
/** Add a log sink. All subsequent log calls dispatch to it. */
addSink(sink) {
sinks.push(sink);
},
/** Remove a previously added sink. */
removeSink(sink) {
const idx = sinks.indexOf(sink);
if (idx >= 0)
sinks.splice(idx, 1);
},
/**
* Remove the built-in console sink.
*
* Use this in execution contexts where stdio is unavailable (detached
* daemon, headless server) AFTER adding a durable file sink. Prevents
* wasted console.* calls that go to /dev/null.
*/
removeConsoleSink() {
const idx = sinks.indexOf(consoleSink);
if (idx >= 0)
sinks.splice(idx, 1);
},
};
//# sourceMappingURL=logger.js.map
/**
* Minimal IMemoria interface for consumers that need memory capabilities
* without depending on the concrete @aria/memoria package.
*
* Structural types (MemoryItem, ToolItem, RecallResult) replace bare
* `unknown` returns so consumers get usable shapes without coupling to
* the full Memoria domain model.
*/
export {};
//# sourceMappingURL=memoria.js.map
/**
* Types for ARIA's multi-model system
* @module @aria/types/models
*/
/**
* Type guard for function tools
*/
export function isFunctionToolSchema(tool) {
return tool.kind === "function";
}
/**
* Type guard for native tools
*/
export function isNativeToolSchema(tool) {
return tool.kind === "native";
}
//# sourceMappingURL=models.js.map
/**
* Native tool types and type guards for provider-native capabilities
* @module @aria/types/native-tools
*/
/**
* Type guard to check if a tool call is a function/custom tool
* (as opposed to a native provider tool like search or code execution)
*/
export function isFunctionTool(toolCall) {
// Function tools are the default - they don't have special name prefixes
return !isNativeTool(toolCall);
}
/**
* Type guard to check if a tool call is a native provider tool
* Native tools have special name prefixes like "brave_search", "computer", etc.
*/
export function isNativeTool(toolCall) {
const nativePrefixes = [
"brave_search",
"computer",
"code_interpreter",
"text_editor",
"bash",
"dalle",
"file_search",
];
return nativePrefixes.some((prefix) => toolCall.name.startsWith(prefix));
}
//# sourceMappingURL=native-tools.js.map
import { existsSync, mkdirSync, readFileSync, unlinkSync, writeFileSync } from "node:fs";
import { homedir } from "node:os";
import { dirname, join } from "node:path";
/**
* Magic exit code that tells the parent supervisor to respawn the child.
*/
export const RELAUNCH_EXIT_CODE = 199;
/**
* Environment variables used by relaunch/supervisor flows.
*/
export const NO_RELAUNCH_ENV = "ARIA_NO_RELAUNCH";
export const RESUME_SESSION_ENV = "ARIA_RESUME_SESSION_ID";
export const RESUME_ARION_ENV = "ARIA_RESUME_ARION";
export const RESTART_KIND_ENV = "ARIA_RESTART_KIND";
function getAriaDir() {
const ariaHome = process.env.ARIA_HOME?.trim();
if (ariaHome)
return ariaHome;
return join(homedir(), ".aria");
}
export function getRelaunchMarkerDir() {
return join(getAriaDir(), "relaunch-pending");
}
export function getRelaunchMarkerPath(pid = process.pid) {
return join(getRelaunchMarkerDir(), `${pid}.json`);
}
export function writeRelaunchMarker(marker) {
try {
const markerPath = getRelaunchMarkerPath(marker.pid);
mkdirSync(dirname(markerPath), { recursive: true });
writeFileSync(markerPath, JSON.stringify(marker), "utf-8");
}
catch {
// Best-effort — must never prevent restart
}
}
export function readRelaunchMarker(pid = process.pid) {
try {
const markerPath = getRelaunchMarkerPath(pid);
if (!existsSync(markerPath))
return null;
const raw = readFileSync(markerPath, "utf-8");
return JSON.parse(raw);
}
catch {
return null;
}
}
export function clearRelaunchMarker(pid = process.pid) {
try {
const markerPath = getRelaunchMarkerPath(pid);
if (existsSync(markerPath))
unlinkSync(markerPath);
}
catch {
// Best-effort
}
}
//# sourceMappingURL=relaunch.js.map
function getGlobalPhaseStore() {
return globalThis;
}
export function setGlobalStallPhase(label) {
const store = getGlobalPhaseStore();
store.__aria_stall_phase = label;
store.__aria_stall_phase_ts = performance.now();
}
export function clearGlobalStallPhase() {
const store = getGlobalPhaseStore();
store.__aria_stall_phase = undefined;
store.__aria_stall_phase_ts = undefined;
}
//# sourceMappingURL=stall-phase.js.map
/** Canonical output shapes for tools with dedicated renderers.
* Executors MUST return these shapes. Renderers consume them directly. */
export {};
//# sourceMappingURL=tool-outputs.js.map
+29
-1

@@ -1,1 +0,29 @@

var{defineProperty:X,getOwnPropertyNames:F,getOwnPropertyDescriptor:H}=Object,U=Object.prototype.hasOwnProperty;function j(_){return this[_]}var M=(_)=>{var p=($??=new WeakMap).get(_),G;if(p)return p;if(p=X({},"__esModule",{value:!0}),_&&typeof _==="object"||typeof _==="function"){for(var q of F(_))if(!U.call(p,q))X(p,q,{get:j.bind(_,q),enumerable:!(G=H(_,q))||G.enumerable})}return $.set(_,p),p},$;var V=(_)=>_;function A(_,p){this[_]=V.bind(null,p)}var f=(_,p)=>{for(var G in p)X(_,G,{get:p[G],enumerable:!0,configurable:!0,set:A.bind(p,G)})};var i={};f(i,{writeRelaunchMarker:()=>c,setGlobalStallPhase:()=>l,readRelaunchMarker:()=>m,log:()=>B,isNodeError:()=>w,isNativeToolSchema:()=>P,isNativeTool:()=>b,isFunctionToolSchema:()=>u,isFunctionTool:()=>C,getRelaunchMarkerPath:()=>K,getRelaunchMarkerDir:()=>I,getErrorStatusCode:()=>S,getErrorMessage:()=>N,clearRelaunchMarker:()=>v,clearGlobalStallPhase:()=>k,RESUME_SESSION_ENV:()=>R,RESUME_ARION_ENV:()=>E,RESTART_KIND_ENV:()=>y,RELAUNCH_EXIT_CODE:()=>D,NO_RELAUNCH_ENV:()=>L});module.exports=M(i);function u(_){return _.kind==="function"}function P(_){return _.kind==="native"}function N(_){if(_ instanceof Error)return _.message;if(typeof _==="string")return _;return String(_)}function S(_){if(typeof _==="object"&&_!==null&&"statusCode"in _&&typeof _.statusCode==="number")return _.statusCode;return}function w(_){return _ instanceof Error&&"code"in _&&typeof _.code==="string"}function C(_){return!b(_)}function b(_){return["brave_search","computer","code_interpreter","text_editor","bash","dalle","file_search"].some((G)=>_.name.startsWith(G))}var W=require("node:fs"),x=require("node:os"),J=require("node:path"),D=199,L="ARIA_NO_RELAUNCH",R="ARIA_RESUME_SESSION_ID",E="ARIA_RESUME_ARION",y="ARIA_RESTART_KIND";function h(){let _=process.env.ARIA_HOME?.trim();if(_)return _;return J.join(x.homedir(),".aria")}function I(){return J.join(h(),"relaunch-pending")}function K(_=process.pid){return J.join(I(),`${_}.json`)}function c(_){try{let p=K(_.pid);W.mkdirSync(J.dirname(p),{recursive:!0}),W.writeFileSync(p,JSON.stringify(_),"utf-8")}catch{}}function m(_=process.pid){try{let p=K(_);if(!W.existsSync(p))return null;let G=W.readFileSync(p,"utf-8");return JSON.parse(G)}catch{return null}}function v(_=process.pid){try{let p=K(_);if(W.existsSync(p))W.unlinkSync(p)}catch{}}function O(){return globalThis}function l(_){let p=O();p.__aria_stall_phase=_,p.__aria_stall_phase_ts=performance.now()}function k(){let _=O();_.__aria_stall_phase=void 0,_.__aria_stall_phase_ts=void 0}var Y={debug:0,info:1,warn:2,error:3,silent:4};function o(){let _=typeof process<"u"?process.env.ARIA_LOG_LEVEL:void 0;if(_&&_ in Y)return _;return"info"}var Z=o();function Q(_){return Y[_]>=Y[Z]}function t(_){if(!_||typeof _!=="object")return!1;let p=_;return p.code==="EPIPE"||p.code==="ERR_STREAM_DESTROYED"}function d(_,p){try{_(...p)}catch(G){if(t(G))return;throw G}}function g(_){if(_.length===0)return _;let[p,...G]=_;if(typeof p!=="string")return _;if(!p.startsWith("["))return _;return[`\x1B[2m\x1B[90m${p}\x1B[0m`,...G]}function n(_){if(_.length===0)return _;let[p,...G]=_;if(typeof p!=="string")return _;if(!p.startsWith("["))return _;return[`\x1B[36m${p}\x1B[0m`,...G]}var T={write(_,p){let G,q;switch(_){case"debug":G=g(p),q=console.debug;break;case"info":G=n(p),q=console.info;break;case"warn":G=p,q=console.warn;break;default:G=p,q=console.error;break}d(q,G)}},z=[T],B={debug(..._){if(Q("debug"))for(let p of z)p.write("debug",_)},info(..._){if(Q("info"))for(let p of z)p.write("info",_)},warn(..._){if(Q("warn"))for(let p of z)p.write("warn",_)},error(..._){if(Q("error"))for(let p of z)p.write("error",_)},setLevel(_){Z=_},getLevel(){return Z},addSink(_){z.push(_)},removeSink(_){let p=z.indexOf(_);if(p>=0)z.splice(p,1)},removeConsoleSink(){let _=z.indexOf(T);if(_>=0)z.splice(_,1)}};
"use strict";
/**
* @aria/types - Minimal shared types
*/
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
var desc = Object.getOwnPropertyDescriptor(m, k);
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
desc = { enumerable: true, get: function() { return m[k]; } };
}
Object.defineProperty(o, k2, desc);
}) : (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
o[k2] = m[k];
}));
var __exportStar = (this && this.__exportStar) || function(m, exports) {
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.log = void 0;
__exportStar(require("./models.js"), exports);
__exportStar(require("./memoria.js"), exports);
__exportStar(require("./errors.js"), exports);
__exportStar(require("./native-tools.js"), exports);
__exportStar(require("./relaunch.js"), exports);
__exportStar(require("./stall-phase.js"), exports);
var logger_js_1 = require("./logger.js");
Object.defineProperty(exports, "log", { enumerable: true, get: function () { return logger_js_1.log; } });
//# sourceMappingURL=index.js.map

@@ -1,1 +0,11 @@

function P(_){return _.kind==="function"}function N(_){return _.kind==="native"}function w(_){if(_ instanceof Error)return _.message;if(typeof _==="string")return _;return String(_)}function C(_){if(typeof _==="object"&&_!==null&&"statusCode"in _&&typeof _.statusCode==="number")return _.statusCode;return}function D(_){return _ instanceof Error&&"code"in _&&typeof _.code==="string"}function R(_){return!b(_)}function b(_){return["brave_search","computer","code_interpreter","text_editor","bash","dalle","file_search"].some((G)=>_.name.startsWith(G))}import{existsSync as Y,mkdirSync as x,readFileSync as I,unlinkSync as O,writeFileSync as T}from"node:fs";import{homedir as B}from"node:os";import{dirname as F,join as J}from"node:path";var m=199,v="ARIA_NO_RELAUNCH",l="ARIA_RESUME_SESSION_ID",k="ARIA_RESUME_ARION",o="ARIA_RESTART_KIND";function H(){let _=process.env.ARIA_HOME?.trim();if(_)return _;return J(B(),".aria")}function U(){return J(H(),"relaunch-pending")}function K(_=process.pid){return J(U(),`${_}.json`)}function t(_){try{let p=K(_.pid);x(F(p),{recursive:!0}),T(p,JSON.stringify(_),"utf-8")}catch{}}function d(_=process.pid){try{let p=K(_);if(!Y(p))return null;let G=I(p,"utf-8");return JSON.parse(G)}catch{return null}}function g(_=process.pid){try{let p=K(_);if(Y(p))O(p)}catch{}}function Z(){return globalThis}function i(_){let p=Z();p.__aria_stall_phase=_,p.__aria_stall_phase_ts=performance.now()}function a(){let _=Z();_.__aria_stall_phase=void 0,_.__aria_stall_phase_ts=void 0}var Q={debug:0,info:1,warn:2,error:3,silent:4};function j(){let _=typeof process<"u"?process.env.ARIA_LOG_LEVEL:void 0;if(_&&_ in Q)return _;return"info"}var X=j();function z(_){return Q[_]>=Q[X]}function M(_){if(!_||typeof _!=="object")return!1;let p=_;return p.code==="EPIPE"||p.code==="ERR_STREAM_DESTROYED"}function V(_,p){try{_(...p)}catch(G){if(M(G))return;throw G}}function A(_){if(_.length===0)return _;let[p,...G]=_;if(typeof p!=="string")return _;if(!p.startsWith("["))return _;return[`\x1B[2m\x1B[90m${p}\x1B[0m`,...G]}function f(_){if(_.length===0)return _;let[p,...G]=_;if(typeof p!=="string")return _;if(!p.startsWith("["))return _;return[`\x1B[36m${p}\x1B[0m`,...G]}var $={write(_,p){let G,q;switch(_){case"debug":G=A(p),q=console.debug;break;case"info":G=f(p),q=console.info;break;case"warn":G=p,q=console.warn;break;default:G=p,q=console.error;break}V(q,G)}},W=[$],u={debug(..._){if(z("debug"))for(let p of W)p.write("debug",_)},info(..._){if(z("info"))for(let p of W)p.write("info",_)},warn(..._){if(z("warn"))for(let p of W)p.write("warn",_)},error(..._){if(z("error"))for(let p of W)p.write("error",_)},setLevel(_){X=_},getLevel(){return X},addSink(_){W.push(_)},removeSink(_){let p=W.indexOf(_);if(p>=0)W.splice(p,1)},removeConsoleSink(){let _=W.indexOf($);if(_>=0)W.splice(_,1)}};export{t as writeRelaunchMarker,i as setGlobalStallPhase,d as readRelaunchMarker,u as log,D as isNodeError,N as isNativeToolSchema,b as isNativeTool,P as isFunctionToolSchema,R as isFunctionTool,K as getRelaunchMarkerPath,U as getRelaunchMarkerDir,C as getErrorStatusCode,w as getErrorMessage,g as clearRelaunchMarker,a as clearGlobalStallPhase,l as RESUME_SESSION_ENV,k as RESUME_ARION_ENV,o as RESTART_KIND_ENV,m as RELAUNCH_EXIT_CODE,v as NO_RELAUNCH_ENV};
/**
* @aria/types - Minimal shared types
*/
export * from "./models.js";
export * from "./memoria.js";
export * from "./errors.js";
export * from "./native-tools.js";
export * from "./relaunch.js";
export * from "./stall-phase.js";
export { log } from "./logger.js";
//# sourceMappingURL=index.js.map
+4
-2
{
"name": "@aria-cli/types",
"version": "1.0.19",
"version": "1.0.31",
"type": "module",

@@ -26,3 +26,5 @@ "main": "./dist/index.js",

"!**/*.map",
"!**/*.tsbuildinfo"
"!**/*.tsbuildinfo",
"!**/*.d.ts",
"!**/*.d.ts.map"
],

@@ -29,0 +31,0 @@ "engines": {

/**
* Error narrowing utilities for replacing `catch (err: any)` patterns.
* @module @aria/types/errors
*/
/**
* Extract a human-readable message from an unknown caught value.
*
* Replaces the pattern:
* `catch (err: any) { ... err.message ... }`
* with:
* `catch (err: unknown) { ... getErrorMessage(err) ... }`
*/
export declare function getErrorMessage(err: unknown): string;
/**
* Extract an HTTP-style status code from an unknown caught value.
*
* Replaces the pattern:
* `(error as any).statusCode`
* with:
* `getErrorStatusCode(error)`
*/
export declare function getErrorStatusCode(err: unknown): number | undefined;
/**
* Type guard: is this caught value an Error with a `.code` property?
* Common for Node.js system errors (ENOENT, ECONNREFUSED, etc.)
*/
export declare function isNodeError(err: unknown): err is Error & {
code: string;
};
/**
* @aria/types - Minimal shared types
*/
export * from "./models.js";
export * from "./memoria.js";
export * from "./errors.js";
export * from "./native-tools.js";
export * from "./relaunch.js";
export * from "./stall-phase.js";
export { log, type LogLevel, type LogSink } from "./logger.js";
export type { EditFileOutput, WriteFileOutput, BashToolOutput, NotebookEditOutput, } from "./tool-outputs.js";
/**
* Lightweight leveled logger for ARIA.
*
* Respects `ARIA_LOG_LEVEL` env var (debug | info | warn | error | silent).
* Default level: "info" — debug messages are suppressed in normal usage.
*
* Architecture: logs dispatch through pluggable sinks. The default console
* sink writes to stdout/stderr with ANSI styling. Execution contexts that
* lack a terminal (daemon, headless server) replace or supplement it with
* durable sinks (e.g., JSONL file sink). This decoupling makes it impossible
* for diagnostic output to silently go to /dev/null — every context
* configures the sink that matches its output channel.
*
* Usage:
* import { log } from "@aria-cli/types";
* log.debug("[PhaseTimer] ..."); // only prints when ARIA_LOG_LEVEL=debug
* log.info("[runner] ..."); // prints at info level and above
* log.warn("[runner] ..."); // prints at warn level and above
* log.error("[runner] ..."); // always prints (unless level > error)
*
* Sink configuration (daemon example):
* import { log } from "@aria-cli/types";
* log.addSink(myFileLogSink);
* log.removeConsoleSink(); // stdio is /dev/null in detached mode
*/
export type LogLevel = "debug" | "info" | "warn" | "error" | "silent";
/**
* A log sink receives raw (unstyled) log arguments and writes them
* to a destination. Each sink decides its own formatting.
*/
export interface LogSink {
write(level: LogLevel, args: unknown[]): void;
close?(): void;
}
export declare const log: {
debug(...args: unknown[]): void;
info(...args: unknown[]): void;
warn(...args: unknown[]): void;
error(...args: unknown[]): void;
setLevel(level: LogLevel): void;
getLevel(): LogLevel;
/** Add a log sink. All subsequent log calls dispatch to it. */
addSink(sink: LogSink): void;
/** Remove a previously added sink. */
removeSink(sink: LogSink): void;
/**
* Remove the built-in console sink.
*
* Use this in execution contexts where stdio is unavailable (detached
* daemon, headless server) AFTER adding a durable file sink. Prevents
* wasted console.* calls that go to /dev/null.
*/
removeConsoleSink(): void;
};
/**
* Minimal IMemoria interface for consumers that need memory capabilities
* without depending on the concrete @aria/memoria package.
*
* Structural types (MemoryItem, ToolItem, RecallResult) replace bare
* `unknown` returns so consumers get usable shapes without coupling to
* the full Memoria domain model.
*/
/** A single stored memory. */
export interface MemoryItem {
id: string;
content: string;
summary?: string;
network?: string;
importance?: number;
confidence?: number;
createdAt?: Date;
metadata?: Record<string, unknown>;
}
/** A single tool-store entry. */
export interface ToolItem {
id: string;
name: string;
description: string;
source?: ToolSourceItem;
importance?: number;
confidence?: number;
tags?: string[];
createdAt?: Date;
updatedAt?: Date;
accessedAt?: Date;
accessCount?: number;
evidenceIds?: string[];
category?: "filesystem" | "code" | "shell" | "web" | "data" | "memory" | "meta" | "arion";
parameters?: Record<string, unknown>;
riskLevel?: "safe" | "moderate" | "dangerous";
responseTemplate?: string;
knowledge?: string;
usageHint?: string;
adoptedAt?: number;
lastUsedAt?: number;
usageCount?: number;
schema?: Record<string, unknown>;
capabilities?: string[];
metrics?: Record<string, unknown>;
}
export type ToolSourceItem = {
type: "built-in";
} | {
type: "external";
ref: string;
format: "skill-file" | "markdown" | "json" | "web-search";
} | {
type: "organic";
method: "execution" | "observation" | "feedback";
episodeId?: string;
};
/** Result shape returned by observation query primitives. */
export interface ObservationQueryResult {
observations: string;
currentTask: string | null;
suggestedResponse: string | null;
sessionId: string;
updatedAt: number;
}
/** Result envelope returned by the APR pipeline. */
export interface RecallResult {
memories: MemoryItem[];
/** @deprecated Legacy alias retained for compatibility with older callers. */
data?: MemoryItem[];
query?: string;
totalCount?: number;
degradationLevel?: "full" | "reduced" | "minimal";
skippedStages?: string[];
warnings?: string[];
}
/** Result from lightweight FTS5-only recall. */
export interface LightweightRecallResult {
memories: MemoryItem[];
formattedContext: {
context: string;
};
query: string;
}
export interface ExtractedConversationMemoryItem {
content: string;
network: "world" | "episodes" | "beliefs" | "entities" | "procedures" | "strategies";
/** Optional durability marker describing how the memory was routed */
durability?: "permanent" | "session";
}
export interface ConversationExtractionResult {
learned: ExtractedConversationMemoryItem[];
error?: string;
}
export interface SkillTriggerItem {
type: "keyword" | "intent" | "context" | "entity";
pattern: string;
confidence: number;
}
export interface SkillRequirementsItem {
bins?: string[];
env?: string[];
os?: ("darwin" | "linux" | "win32")[];
}
export type SkillSourceItem = {
type: "auto-learned";
sequenceId: string;
} | {
type: "file";
path: string;
format: "skill-file";
} | {
type: "cli";
command: string;
} | {
type: "web";
url: string;
} | {
type: "user";
ref?: string;
};
export interface SkillItem {
id: string;
name: string;
description: string;
content: string;
toolIds: string[];
triggers: SkillTriggerItem[];
requires?: SkillRequirementsItem;
tags: string[];
source: SkillSourceItem;
executionCount: number;
successCount: number;
lastExecuted: Date | null;
averageDurationMs: number | null;
confidence: number;
importance: number;
createdAt: Date;
updatedAt: Date;
accessedAt: Date;
accessCount: number;
archivedAt?: Date;
}
export interface SkillExecutionRecord {
id: string;
skillId: string;
success: boolean;
durationMs?: number;
notes?: string;
episodeId?: string;
timestamp: Date;
}
export interface ToolUseSequenceRecord {
tool: string;
input?: Record<string, unknown>;
output?: string;
success: boolean;
timestamp: Date;
}
/**
* Minimal observation engine interface for consumers that need observation
* capabilities without depending on the concrete @aria/memoria package.
*
* Matches the public surface of ObservationEngine from @aria/memoria.
*/
export interface IObservationEngine {
/** Prepare context before model call — may trigger observation and filter messages. */
prepareContext(opts: {
sessionId: string;
messages: unknown[];
originalSystemPrompt: string;
readOnly?: boolean;
signal?: AbortSignal;
}): Promise<{
messagesToSend: unknown[];
filteredMessageIds: Set<string>;
observationTriggered: boolean;
}>;
/** Track new message IDs added to the conversation since last observation. */
trackNewMessages(messageIds: string[]): void;
/** Force observation of remaining unobserved messages at session end. */
finalObservation(sessionId: string, messages?: unknown[], signal?: AbortSignal): Promise<void>;
/** Observe messages — used for explicit observation triggers. */
observe(sessionId: string, messages: unknown[], signal?: AbortSignal, opts?: {
force?: boolean;
}): Promise<void>;
/** Get the observation record for a session, or null. */
getRecord(sessionId: string): unknown;
/** Get the active observation text for a session. */
getActiveObservations(sessionId: string): string;
/** Drain pending observation work (wait for in-flight observations to complete). */
drain(): Promise<void>;
}
export interface IMemoria {
/** Store a memory. Returns an object with `id` on success, or null. */
remember(content: string, options?: Record<string, unknown>): Promise<{
id: string;
} | null>;
/**
* Internal fast-path for storing execution-trace episodes without the full
* entity extraction pipeline. Optional for backward compatibility.
*/
storeEpisode?(content: string): Promise<{
id: string;
} | null>;
/** Retrieve memories matching a query. Includes degradation metadata when available. */
recall(query: string, options?: Record<string, unknown>): Promise<RecallResult>;
/** Lightweight FTS5-only recall for session seeding. No LLM calls. */
recallLightweight(query: string, options?: {
limit?: number;
networks?: string[];
}): Promise<LightweightRecallResult>;
/**
* Planner-first unified recall: 1 LLM call for classification + plan + expansion.
* Routes to direct lookup (skipping APR) or mixed APR + primitive execution.
* This is the preferred recall path — recallWithAPR delegates to this.
*/
recallUnified?(query: string, options?: {
limit?: number;
networks?: string[];
}): Promise<{
memories: MemoryItem[];
formattedContext?: {
context?: string;
text?: string;
tokenCount?: number;
} | null;
intent?: {
type?: string;
confidence?: number;
entities?: string[];
temporalFocus?: string | null;
extractedEntities?: string[];
extractedTimeframe?: Record<string, unknown>;
};
sourceStats?: unknown;
plan?: Array<{
primitive: string;
args: Record<string, unknown>;
}>;
planReasoning?: string;
primitiveResults?: Array<{
source: string;
data: unknown;
}>;
}>;
/** Retrieve memories via the Adaptive Parallel Retrieval pipeline. */
recallWithAPR(query: string, options?: {
limit?: number;
networks?: string[];
}): Promise<{
memories: MemoryItem[];
formattedContext?: {
context?: string;
text?: string;
tokenCount?: number;
};
intent?: {
type?: string;
confidence?: number;
entities?: string[];
temporalFocus?: string | null;
extractedEntities?: string[];
extractedTimeframe?: Record<string, unknown>;
};
sourceStats?: unknown;
query?: string;
totalCount?: number;
degradationLevel?: "full" | "reduced" | "minimal";
skippedStages?: string[];
warnings?: string[];
}>;
/**
* Lightweight vector-only recall with network filtering. Bypasses the full APR
* pipeline (no classification, no multi-index fusion, no diversity). Designed
* for time-critical paths like session bootstrap.
*/
recallDirect(query: string, options: {
networks: string[];
limit: number;
threshold?: number;
}): Promise<MemoryItem[]>;
/** Recall memories by network type. Direct SQL query, no LLM classification. */
recallByNetwork(network: string, options?: {
limit?: number;
}): Promise<MemoryItem[]>;
/** Recall recent memories, optionally filtered by network. Direct SQL query sorted by created_at DESC. */
recallRecent(options?: {
limit?: number;
network?: string;
}): Promise<MemoryItem[]>;
/** Recall memories by vector similarity. Uses embedding model only (0 LLM calls). */
recallSimilar(query: string, options?: {
k?: number;
threshold?: number;
}): Promise<MemoryItem[]>;
/** Store a tool with semantic indexing. Returns the generated id. */
rememberTool(tool: {
name: string;
description: string;
} & Record<string, unknown>): Promise<string>;
/** Query the tool store. */
recallTools(options: {
query: string;
limit?: number;
/** Optional offset used for paged list retrieval (matchAll mode). */
offset?: number;
minConfidence?: number;
/** When true, bypasses semantic/keyword matching and lists stored tools. */
matchAll?: boolean;
/** When false, recall becomes read-only and does not mutate access stats. */
updateAccessStats?: boolean;
}): Promise<ToolItem[]>;
/** Retrieve a single tool entry by ID. */
getToolById(id: string): Promise<ToolItem | null>;
/** Delete a tool entry by ID. Returns true if deleted, false if not found. */
forgetTool(id: string): Promise<boolean>;
/** Store a skill directly. Returns the generated skill ID. */
rememberSkill(skill: {
name: string;
description: string;
content: string;
toolIds?: string[];
triggers?: SkillTriggerItem[];
requires?: SkillRequirementsItem;
tags?: string[];
source: SkillSourceItem;
importance?: number;
confidence?: number;
}): Promise<string>;
/** Skill retrieval API. */
recallSkills(options?: {
query?: string;
triggerType?: SkillTriggerItem["type"];
limit?: number;
}): Promise<SkillItem[]>;
/** Skill lookup API. */
getSkill(idOrName: string): Promise<SkillItem | null>;
/** Record skill execution metrics. */
recordSkillExecution(options: {
skillId: string;
success: boolean;
durationMs?: number;
notes?: string;
episodeId?: string;
}): Promise<SkillExecutionRecord>;
/** Start recording a tool-use sequence for procedural learning. */
startToolSequence?(task: string, options?: {
sessionId?: string;
}): void;
/** Record a single tool execution within the active sequence. */
recordToolUse?(record: ToolUseSequenceRecord, options?: {
sessionId?: string;
}): void;
/** Finalize the active tool-use sequence and trigger learning. */
endToolSequence?(success: boolean, options?: {
sessionId?: string;
}): Promise<void>;
/** Delete a single memory by ID. */
deleteMemory(id: string): Promise<boolean>;
/** Merge a JSON patch into a memory's metadata. */
updateMemoryMetadata(id: string, patch: Record<string, unknown>): void;
/** Soft-invalidate a memory in place (sets confidence to 0, marks reason). Allows recovery. */
invalidateMemoryInPlace(memoryId: string, reason?: string): Promise<void>;
/** Requeue failed embedding jobs and return how many were scheduled. */
retryFailedEmbeddings?(): Promise<number>;
/** Extract memories from a user/assistant conversation turn. */
extractFromConversation(userMessage: string, assistantResponse: string, options?: {
signal?: AbortSignal;
}): Promise<ConversationExtractionResult>;
/** Observation engine for Mastra-style observation/reflection memory layer. */
readonly observationEngine?: IObservationEngine;
/**
* Extract durable knowledge from observation text into the knowledge graph.
* Optional for backward compatibility — always present on concrete Memoria instances.
*/
extractFromObservations?(observations: string, options?: {
sessionId?: string;
signal?: AbortSignal;
}): Promise<ConversationExtractionResult>;
/**
* Return the most recently updated observation session ID.
* When `arionId` is provided, scope to that arion's observation records.
*/
getMostRecentSessionId?(options?: {
arionId?: string;
}): Promise<string | null>;
/**
* Fetch persisted active observations for a session.
* Returns null when no observation record exists for the session.
*/
getSessionObservations?(sessionId: string): Promise<string | null>;
/**
* Full-text search across observation records (LIKE on observations, current_task, suggested_response).
* Returns matching records ordered by recency.
*/
searchObservations?(query: string, limit?: number): Promise<ObservationQueryResult[]>;
/**
* Retrieve a specific field from the most recent observation record.
* Valid fields: "suggested_response", "current_task", "active_observations".
*/
getObservationField?(field: "suggested_response" | "current_task" | "active_observations"): Promise<string | null>;
/**
* Retrieve the N most recent observation records, ordered by updated_at DESC.
*/
getRecentObservations?(limit?: number): Promise<ObservationQueryResult[]>;
/** Shut down the memory system and release resources. */
close(): Promise<void>;
/**
* Entity-first recall — fast-path for entity/self queries.
* Returns a formatted string when the query matches a known entity or
* self-query pattern. Returns null to signal fallback to APR.
*/
entityRecall(query: string): Promise<string | null>;
/** Whether this instance has been closed. */
readonly isClosed: boolean;
/** Return the total number of stored memories. */
count(): Promise<number>;
/** List stored memories. */
list(options?: {
limit?: number;
network?: string;
}): Promise<MemoryItem[]>;
/** Get a config value by key. Returns null if not found. */
getConfig(key: string): Promise<string | null>;
/** Set a config value by key. Creates or overwrites. */
setConfig(key: string, value: string): Promise<void>;
}
/**
* Types for ARIA's multi-model system
* @module @aria/types/models
*/
import type { NativeToolMetadata } from "./native-tools.js";
/**
* Supported model providers
*/
export type ModelProvider = "anthropic" | "openai" | "github-copilot" | "openai-codex" | "google" | "local" | "bedrock" | "bedrock-converse";
/**
* Model tier for task routing
* - 'fast': Quick responses, simple tasks (e.g., Haiku, GPT-4o-mini)
* - 'balanced': Good balance of speed and capability (e.g., Sonnet, GPT-5.2)
* - 'powerful': Maximum capability for complex tasks (e.g., Opus, o1)
* - 'ensemble': Multiple models working together
*/
export type ModelTier = "fast" | "balanced" | "powerful" | "ensemble";
/**
* Configuration for a specific model
*/
export interface ModelConfig {
/** Model provider */
provider: ModelProvider;
/** Model identifier (e.g., 'claude-opus-4-6') */
modelId: string;
/** Human-readable name */
name: string;
/** Model tier classification */
tier: ModelTier;
/** Maximum context window size in tokens */
maxContextTokens: number;
/** Maximum output tokens */
maxOutputTokens: number;
/** Cost per million input tokens (USD) */
inputCostPerMillion: number;
/** Cost per million output tokens (USD) */
outputCostPerMillion: number;
/** Capabilities this model supports */
capabilities: {
/** Supports tool/function calling */
tools: boolean;
/** Supports vision/image input */
vision: boolean;
/** Supports structured output (JSON mode) */
structuredOutput: boolean;
/** Supports streaming responses */
streaming: boolean;
};
/** API endpoint (for local/custom providers) */
endpoint?: string;
/** Additional provider-specific options */
options?: Record<string, unknown>;
}
/**
* Configuration for the model router
*/
export interface ModelRouterConfig {
/** Default model tier to use */
defaultTier: ModelTier;
/** Available models by tier */
models: {
fast: ModelConfig[];
balanced: ModelConfig[];
powerful: ModelConfig[];
ensemble?: ModelConfig[];
};
/** Rules for automatic tier selection */
routingRules: {
/** Use powerful tier for tasks with risk above this level */
riskThreshold: number;
/** Use powerful tier for tasks with complexity above this score */
complexityThreshold: number;
/** Maximum cost per request before requiring approval (USD) */
costThreshold: number;
};
/** Fallback model if primary fails */
fallbackModel?: ModelConfig;
}
/**
* Role of a message in a conversation
*/
export type MessageRole = "system" | "user" | "assistant" | "tool";
/**
* A message in a conversation
*/
export interface Message {
/** Role of the message sender */
role: MessageRole;
/** Text content of the message */
content: string;
/** Name of the sender (optional) */
name?: string;
/** Tool call ID this message is responding to (for tool role) */
toolCallId?: string;
/** Tool calls requested by the assistant for execution (present on assistant messages with tool_use blocks) */
toolCalls?: ToolCall[];
/** Image attachments (for vision models) */
images?: {
/** Base64 encoded image data or URL */
data: string;
/** MIME type of the image */
mimeType: string;
}[];
}
/**
* JSON Schema for function tool parameters
*/
export interface FunctionToolParameters {
type: "object";
properties: Record<string, unknown>;
required?: string[];
[key: string]: unknown;
}
/**
* Function tool schema (standard ARIA tool)
*/
export interface FunctionToolSchema {
kind: "function";
name: string;
description: string;
parameters: FunctionToolParameters;
/** When true, the provider defers loading this tool until needed */
defer_loading?: boolean;
/** Native-only fields are forbidden on function tools */
provider?: never;
capability?: never;
config?: never;
suppresses?: never;
}
/**
* Provider-native passthrough tool schema
*/
export interface NativeToolSchema {
kind: "native";
/** Provider that supplies this native tool */
provider: "anthropic" | "openai" | "google";
/** Native capability identifier */
capability: string;
/** Provider-specific native tool payload */
config: Record<string, unknown>;
/** ARIA function tools suppressed by this native tool */
suppresses?: string[];
/** Function-only fields are forbidden on native tools */
name?: never;
description?: never;
parameters?: never;
defer_loading?: never;
}
/**
* Tool schema for model requests (discriminated by `kind`)
*/
export type ToolSchema = FunctionToolSchema | NativeToolSchema;
/**
* Type guard for function tools
*/
export declare function isFunctionToolSchema(tool: ToolSchema): tool is FunctionToolSchema;
/**
* Type guard for native tools
*/
export declare function isNativeToolSchema(tool: ToolSchema): tool is NativeToolSchema;
/**
* A tool call requested by the model
*/
export interface ToolCall {
/** Unique identifier for this tool call */
id: string;
/** Name of the tool to call */
name: string;
/** Arguments to pass to the tool */
arguments: Record<string, unknown>;
/** Gemini thought signature — must be replayed verbatim in conversation history */
thoughtSignature?: string;
}
/**
* Thinking mode for models that support extended reasoning.
* - 'adaptive': Model decides when and how much to think (Opus 4.6+, recommended)
* - 'enabled': Explicit thinking with a token budget (Sonnet 4.5, Opus 4.5)
*/
export type ThinkingMode = "adaptive" | "enabled";
/**
* Configuration for extended thinking/reasoning
*/
export interface ThinkingConfig {
/** Thinking mode */
mode: ThinkingMode;
/** Token budget for thinking (required for 'enabled' mode, ignored for 'adaptive') */
budgetTokens?: number;
}
/**
* Effort level controlling reasoning depth vs speed/cost.
* - 'low': Fastest, cheapest. Skips thinking for simple tasks.
* - 'medium': Balanced. May skip thinking for very simple queries.
* - 'high': Default. Deep reasoning on complex tasks.
* - 'max': Maximum capability, no constraints. Opus 4.6 only.
*/
export type EffortLevel = "low" | "medium" | "high" | "max";
/**
* A thinking block from a model's internal reasoning process
*/
export interface ThinkingBlock {
/** The thinking text content (empty string if redacted) */
thinking: string;
/** Cryptographic signature for verification (required for tool-use continuations) */
signature?: string;
/** Whether this block was redacted by safety systems */
redacted?: boolean;
}
/**
* Request to a model
*/
export interface ModelRequest {
/** Messages in the conversation */
messages: Message[];
/** System prompt (if not in messages) */
systemPrompt?: string;
/** Available tools */
tools?: ToolSchema[];
/** Temperature for sampling (0-2) */
temperature?: number;
/** Maximum tokens to generate */
maxTokens?: number;
/** Stop sequences */
stopSequences?: string[];
/** Whether to stream the response */
stream?: boolean;
/** Force structured JSON output */
jsonMode?: boolean;
/** Extended thinking configuration */
thinking?: ThinkingConfig;
/** Effort level for response quality vs speed/cost tradeoff */
effort?: EffortLevel;
/** Tool choice strategy (e.g. "auto", "none", or a specific tool name) */
tool_choice?: string;
/** Request metadata */
metadata?: {
/** Task ID for tracking */
taskId?: string;
/** User ID for billing */
userId?: string;
/** Request priority */
priority?: "low" | "normal" | "high";
};
/** Callback for streaming thinking blocks as they arrive (Gemini 2.5+/3.x).
* Allows the runner to yield thinking events during model streaming
* instead of waiting for the full response. */
onThinking?: (text: string) => void;
/** AbortSignal for cancelling in-flight HTTP requests.
* Passed through to provider fetch() calls so Ctrl+C cancels immediately
* instead of waiting for the next network chunk. */
abortSignal?: AbortSignal;
}
/** Capabilities that a model provider supports */
export interface ProviderCapabilities {
/** Whether the provider supports structured JSON output */
jsonMode: boolean;
/** Whether the provider supports extended thinking */
thinking: boolean;
/** Whether the provider supports tool/function calling */
toolUse: boolean;
/** Whether the provider supports streaming responses */
streaming: boolean;
/** Whether the provider supports native defer_loading on tool schemas */
nativeDeferredLoading: boolean;
/** Whether the provider supports native web search */
nativeSearch: boolean;
/** Whether the provider supports native web fetch */
nativeFetch: boolean;
/** Whether the provider supports native code execution */
nativeCodeExec: boolean;
/** Whether the provider supports native computer use */
nativeComputerUse: boolean;
/** Whether the provider supports native image generation */
nativeImageGen: boolean;
/** Whether the provider supports native file search */
nativeFileSearch: boolean;
}
/**
* Usage statistics from a model response
*/
export interface ModelUsage {
/** Input tokens consumed (TOTAL context window — includes cached tokens) */
inputTokens: number;
/** Output tokens generated */
outputTokens: number;
/** Total tokens (input + output) */
totalTokens: number;
/** Estimated cost in USD */
estimatedCost: number;
/** Thinking tokens consumed (subset of outputTokens, if available) */
thinkingTokens?: number;
/** Tokens read from prompt cache (subset of inputTokens) */
cacheReadTokens?: number;
/** Tokens written to prompt cache (subset of inputTokens) */
cacheCreationTokens?: number;
}
/**
* Response from a model
*/
export interface ModelResponse {
/** The model's text response */
content: string;
/** Tool calls requested by the model */
toolCalls?: ToolCall[];
/** Why the model stopped generating */
stopReason: "end" | "max_tokens" | "tool_call" | "stop_sequence" | "aborted";
/** Token usage statistics */
usage: ModelUsage;
/** Model that generated the response */
model: string;
/** Response generation time in milliseconds */
latency: number;
/** Unique ID for this response */
responseId: string;
/** Thinking blocks from the model's reasoning process */
thinking?: ThinkingBlock[];
/** Raw content blocks from the API (for tool-use continuations with thinking) */
rawContentBlocks?: unknown[];
/** Provider that generated the response */
provider?: ModelProvider;
/** Native tool metadata (e.g., search results, code exec output) */
nativeToolMetadata?: NativeToolMetadata;
}
/**
* Native tool types and type guards for provider-native capabilities
* @module @aria/types/native-tools
*/
import type { ToolCall } from "./models.js";
/**
* Type guard to check if a tool call is a function/custom tool
* (as opposed to a native provider tool like search or code execution)
*/
export declare function isFunctionTool(toolCall: ToolCall): boolean;
/**
* Type guard to check if a tool call is a native provider tool
* Native tools have special name prefixes like "brave_search", "computer", etc.
*/
export declare function isNativeTool(toolCall: ToolCall): boolean;
/**
* A single source from a search result
*/
export interface NormalizedSource {
/** URL of the source */
url: string;
/** Title of the source */
title: string;
/** Optional snippet/excerpt from the source */
snippet?: string;
/** Confidence score (0-1) if provided by the search API */
confidence?: number;
/** Which segments of the response cited this source */
citedSegments?: Array<{
text: string;
startIndex: number;
endIndex: number;
}>;
}
/**
* Normalized metadata for native search tool calls
* Abstracts over provider-specific search APIs (Anthropic Brave, Google Search, etc.)
*/
export interface NormalizedSearchMetadata {
/** Provider that executed the search */
provider: "anthropic" | "openai" | "google";
/** Search queries executed (may be multiple if refined) */
queries: string[];
/** Sources returned by the search */
sources: NormalizedSource[];
/** The grounded response generated from search results (if available) */
groundedResponse?: string;
}
/**
* Metadata attached to ModelResponse when native tools are used
*/
export interface NativeToolMetadata {
/** Search metadata (if native search was used) */
search?: NormalizedSearchMetadata;
/** Raw provider-specific metadata for other native tools */
raw?: unknown;
}
/**
* Magic exit code that tells the parent supervisor to respawn the child.
*/
export declare const RELAUNCH_EXIT_CODE = 199;
/**
* Environment variables used by relaunch/supervisor flows.
*/
export declare const NO_RELAUNCH_ENV = "ARIA_NO_RELAUNCH";
export declare const RESUME_SESSION_ENV = "ARIA_RESUME_SESSION_ID";
export declare const RESUME_ARION_ENV = "ARIA_RESUME_ARION";
export declare const RESTART_KIND_ENV = "ARIA_RESTART_KIND";
export interface RelaunchMarker {
sessionId: string | null;
arionName: string;
pid: number;
timestamp: string;
}
export declare function getRelaunchMarkerDir(): string;
export declare function getRelaunchMarkerPath(pid?: number): string;
export declare function writeRelaunchMarker(marker: RelaunchMarker): void;
export declare function readRelaunchMarker(pid?: number): RelaunchMarker | null;
export declare function clearRelaunchMarker(pid?: number): void;
/**
* Error narrowing utilities for replacing `catch (err: any)` patterns.
* @module @aria-cli/types/errors
*/
/**
* Extract a human-readable message from an unknown caught value.
*
* Replaces the pattern:
* `catch (err: any) { ... err.message ... }`
* with:
* `catch (err: unknown) { ... getErrorMessage(err) ... }`
*/
export declare function getErrorMessage(err: unknown): string;
/**
* Extract an HTTP-style status code from an unknown caught value.
*
* Replaces the pattern:
* `(error as any).statusCode`
* with:
* `getErrorStatusCode(error)`
*/
export declare function getErrorStatusCode(err: unknown): number | undefined;
/**
* Type guard: is this caught value an Error with a `.code` property?
* Common for Node.js system errors (ENOENT, ECONNREFUSED, etc.)
*/
export declare function isNodeError(err: unknown): err is Error & {
code: string;
};
/**
* @aria-cli/types - Minimal shared types
*/
export * from "./models.js";
export * from "./memoria.js";
export * from "./errors.js";
export * from "./native-tools.js";
export * from "./relaunch.js";
export { log, type LogLevel } from "./logger.js";
export type { EditFileOutput, WriteFileOutput, BashToolOutput, NotebookEditOutput, } from "./tool-outputs.js";
/**
* Lightweight leveled logger for ARIA.
*
* Respects `ARIA_LOG_LEVEL` env var (debug | info | warn | error | silent).
* Default level: "info" — debug messages are suppressed in normal usage.
*
* Usage:
* import { log } from "@aria-cli/types";
* log.debug("[PhaseTimer] ..."); // only prints when ARIA_LOG_LEVEL=debug
* log.info("[runner] ..."); // prints at info level and above
* log.warn("[runner] ..."); // prints at warn level and above
* log.error("[runner] ..."); // always prints (unless level > error)
*/
export type LogLevel = "debug" | "info" | "warn" | "error" | "silent";
export declare const log: {
debug(...args: unknown[]): void;
info(...args: unknown[]): void;
warn(...args: unknown[]): void;
error(...args: unknown[]): void;
setLevel(level: LogLevel): void;
getLevel(): LogLevel;
};
/**
* Minimal IMemoria interface for consumers that need memory capabilities
* without depending on the concrete @aria-cli/memoria package.
*
* Structural types (MemoryItem, ToolItem, RecallResult) replace bare
* `unknown` returns so consumers get usable shapes without coupling to
* the full Memoria domain model.
*/
/** A single stored memory. */
export interface MemoryItem {
id: string;
content: string;
summary?: string;
network?: string;
importance?: number;
confidence?: number;
createdAt?: Date;
metadata?: Record<string, unknown>;
}
/** A single tool-store entry. */
export interface ToolItem {
id: string;
name: string;
description: string;
source?: ToolSourceItem;
importance?: number;
confidence?: number;
tags?: string[];
createdAt?: Date;
updatedAt?: Date;
accessedAt?: Date;
accessCount?: number;
evidenceIds?: string[];
category?: "filesystem" | "code" | "shell" | "web" | "data" | "memory" | "meta" | "arion";
parameters?: Record<string, unknown>;
riskLevel?: "safe" | "moderate" | "dangerous";
responseTemplate?: string;
knowledge?: string;
usageHint?: string;
adoptedAt?: number;
lastUsedAt?: number;
usageCount?: number;
schema?: Record<string, unknown>;
capabilities?: string[];
metrics?: Record<string, unknown>;
}
export type ToolSourceItem = {
type: "built-in";
} | {
type: "external";
ref: string;
format: "skill-file" | "markdown" | "json" | "web-search";
} | {
type: "organic";
method: "execution" | "observation" | "feedback";
episodeId?: string;
};
/** Result shape returned by observation query primitives. */
export interface ObservationQueryResult {
observations: string;
currentTask: string | null;
suggestedResponse: string | null;
sessionId: string;
updatedAt: number;
}
/** Result envelope returned by the APR pipeline. */
export interface RecallResult {
memories: MemoryItem[];
/** @deprecated Legacy alias retained for compatibility with older callers. */
data?: MemoryItem[];
query?: string;
totalCount?: number;
degradationLevel?: "full" | "reduced" | "minimal";
skippedStages?: string[];
warnings?: string[];
}
/** Result from lightweight FTS5-only recall. */
export interface LightweightRecallResult {
memories: MemoryItem[];
formattedContext: {
context: string;
};
query: string;
}
export interface ExtractedConversationMemoryItem {
content: string;
network: "world" | "episodes" | "beliefs" | "entities" | "procedures" | "strategies";
/** Optional durability marker describing how the memory was routed */
durability?: "permanent" | "session";
}
export interface ConversationExtractionResult {
learned: ExtractedConversationMemoryItem[];
error?: string;
}
export interface SkillTriggerItem {
type: "keyword" | "intent" | "context" | "entity";
pattern: string;
confidence: number;
}
export interface SkillRequirementsItem {
bins?: string[];
env?: string[];
os?: ("darwin" | "linux" | "win32")[];
}
export type SkillSourceItem = {
type: "auto-learned";
sequenceId: string;
} | {
type: "file";
path: string;
format: "skill-file";
} | {
type: "cli";
command: string;
} | {
type: "web";
url: string;
} | {
type: "user";
ref?: string;
};
export interface SkillItem {
id: string;
name: string;
description: string;
content: string;
toolIds: string[];
triggers: SkillTriggerItem[];
requires?: SkillRequirementsItem;
tags: string[];
source: SkillSourceItem;
executionCount: number;
successCount: number;
lastExecuted: Date | null;
averageDurationMs: number | null;
confidence: number;
importance: number;
createdAt: Date;
updatedAt: Date;
accessedAt: Date;
accessCount: number;
archivedAt?: Date;
}
export interface SkillExecutionRecord {
id: string;
skillId: string;
success: boolean;
durationMs?: number;
notes?: string;
episodeId?: string;
timestamp: Date;
}
export interface ToolUseSequenceRecord {
tool: string;
input?: Record<string, unknown>;
output?: string;
success: boolean;
timestamp: Date;
}
/**
* Minimal observation engine interface for consumers that need observation
* capabilities without depending on the concrete @aria-cli/memoria package.
*
* Matches the public surface of ObservationEngine from @aria-cli/memoria.
*/
export interface IObservationEngine {
/** Prepare context before model call — may trigger observation and filter messages. */
prepareContext(opts: {
sessionId: string;
messages: unknown[];
originalSystemPrompt: string;
readOnly?: boolean;
signal?: AbortSignal;
}): Promise<{
messagesToSend: unknown[];
filteredMessageIds: Set<string>;
observationTriggered: boolean;
}>;
/** Track new message IDs added to the conversation since last observation. */
trackNewMessages(messageIds: string[]): void;
/** Force observation of remaining unobserved messages at session end. */
finalObservation(sessionId: string, messages?: unknown[], signal?: AbortSignal): Promise<void>;
/** Observe messages — used for explicit observation triggers. */
observe(sessionId: string, messages: unknown[], signal?: AbortSignal, opts?: {
force?: boolean;
}): Promise<void>;
/** Get the observation record for a session, or null. */
getRecord(sessionId: string): unknown;
/** Get the active observation text for a session. */
getActiveObservations(sessionId: string): string;
/** Drain pending observation work (wait for in-flight observations to complete). */
drain(): Promise<void>;
}
export interface IMemoria {
/** Store a memory. Returns an object with `id` on success, or null. */
remember(content: string, options?: Record<string, unknown>): Promise<{
id: string;
} | null>;
/**
* Internal fast-path for storing execution-trace episodes without the full
* entity extraction pipeline. Optional for backward compatibility.
*/
storeEpisode?(content: string): Promise<{
id: string;
} | null>;
/** Retrieve memories matching a query. Includes degradation metadata when available. */
recall(query: string, options?: Record<string, unknown>): Promise<RecallResult>;
/** Lightweight FTS5-only recall for session seeding. No LLM calls. */
recallLightweight(query: string, options?: {
limit?: number;
networks?: string[];
}): Promise<LightweightRecallResult>;
/**
* Planner-first unified recall: 1 LLM call for classification + plan + expansion.
* Routes to direct lookup (skipping APR) or mixed APR + primitive execution.
* This is the preferred recall path — recallWithAPR delegates to this.
*/
recallUnified?(query: string, options?: {
limit?: number;
networks?: string[];
}): Promise<{
memories: MemoryItem[];
formattedContext?: {
context?: string;
text?: string;
tokenCount?: number;
} | null;
intent?: {
type?: string;
confidence?: number;
entities?: string[];
temporalFocus?: string | null;
extractedEntities?: string[];
extractedTimeframe?: Record<string, unknown>;
};
sourceStats?: unknown;
plan?: Array<{
primitive: string;
args: Record<string, unknown>;
}>;
planReasoning?: string;
primitiveResults?: Array<{
source: string;
data: unknown;
}>;
}>;
/** Retrieve memories via the Adaptive Parallel Retrieval pipeline. */
recallWithAPR(query: string, options?: {
limit?: number;
networks?: string[];
}): Promise<{
memories: MemoryItem[];
formattedContext?: {
context?: string;
text?: string;
tokenCount?: number;
};
intent?: {
type?: string;
confidence?: number;
entities?: string[];
temporalFocus?: string | null;
extractedEntities?: string[];
extractedTimeframe?: Record<string, unknown>;
};
sourceStats?: unknown;
query?: string;
totalCount?: number;
degradationLevel?: "full" | "reduced" | "minimal";
skippedStages?: string[];
warnings?: string[];
}>;
/** Recall memories by network type. Direct SQL query, no LLM classification. */
recallByNetwork(network: string, options?: {
limit?: number;
}): Promise<MemoryItem[]>;
/** Recall recent memories, optionally filtered by network. Direct SQL query sorted by created_at DESC. */
recallRecent(options?: {
limit?: number;
network?: string;
}): Promise<MemoryItem[]>;
/** Recall memories by vector similarity. Uses embedding model only (0 LLM calls). */
recallSimilar(query: string, options?: {
k?: number;
threshold?: number;
}): Promise<MemoryItem[]>;
/** Store a tool with semantic indexing. Returns the generated id. */
rememberTool(tool: {
name: string;
description: string;
} & Record<string, unknown>): Promise<string>;
/** Query the tool store. */
recallTools(options: {
query: string;
limit?: number;
/** Optional offset used for paged list retrieval (matchAll mode). */
offset?: number;
minConfidence?: number;
/** When true, bypasses semantic/keyword matching and lists stored tools. */
matchAll?: boolean;
/** When false, recall becomes read-only and does not mutate access stats. */
updateAccessStats?: boolean;
}): Promise<ToolItem[]>;
/** Retrieve a single tool entry by ID. */
getToolById(id: string): Promise<ToolItem | null>;
/** Delete a tool entry by ID. Returns true if deleted, false if not found. */
forgetTool(id: string): Promise<boolean>;
/** Store a skill directly. Returns the generated skill ID. */
rememberSkill(skill: {
name: string;
description: string;
content: string;
toolIds?: string[];
triggers?: SkillTriggerItem[];
requires?: SkillRequirementsItem;
tags?: string[];
source: SkillSourceItem;
importance?: number;
confidence?: number;
}): Promise<string>;
/** Skill retrieval API. */
recallSkills(options?: {
query?: string;
triggerType?: SkillTriggerItem["type"];
limit?: number;
}): Promise<SkillItem[]>;
/** Skill lookup API. */
getSkill(idOrName: string): Promise<SkillItem | null>;
/** Record skill execution metrics. */
recordSkillExecution(options: {
skillId: string;
success: boolean;
durationMs?: number;
notes?: string;
episodeId?: string;
}): Promise<SkillExecutionRecord>;
/** Start recording a tool-use sequence for procedural learning. */
startToolSequence?(task: string, options?: {
sessionId?: string;
}): void;
/** Record a single tool execution within the active sequence. */
recordToolUse?(record: ToolUseSequenceRecord, options?: {
sessionId?: string;
}): void;
/** Finalize the active tool-use sequence and trigger learning. */
endToolSequence?(success: boolean, options?: {
sessionId?: string;
}): Promise<void>;
/** Delete a single memory by ID. */
deleteMemory(id: string): Promise<boolean>;
/** Merge a JSON patch into a memory's metadata. */
updateMemoryMetadata(id: string, patch: Record<string, unknown>): void;
/** Soft-invalidate a memory in place (sets confidence to 0, marks reason). Allows recovery. */
invalidateMemoryInPlace(memoryId: string, reason?: string): Promise<void>;
/** Requeue failed embedding jobs and return how many were scheduled. */
retryFailedEmbeddings?(): Promise<number>;
/** Extract memories from a user/assistant conversation turn. */
extractFromConversation(userMessage: string, assistantResponse: string, options?: {
signal?: AbortSignal;
}): Promise<ConversationExtractionResult>;
/** Observation engine for Mastra-style observation/reflection memory layer. */
readonly observationEngine?: IObservationEngine;
/**
* Extract durable knowledge from observation text into the knowledge graph.
* Optional for backward compatibility — always present on concrete Memoria instances.
*/
extractFromObservations?(observations: string, options?: {
sessionId?: string;
signal?: AbortSignal;
}): Promise<ConversationExtractionResult>;
/**
* Return the most recently updated observation session ID.
* When `arionId` is provided, scope to that arion's observation records.
*/
getMostRecentSessionId?(options?: {
arionId?: string;
}): Promise<string | null>;
/**
* Fetch persisted active observations for a session.
* Returns null when no observation record exists for the session.
*/
getSessionObservations?(sessionId: string): Promise<string | null>;
/**
* Full-text search across observation records (LIKE on observations, current_task, suggested_response).
* Returns matching records ordered by recency.
*/
searchObservations?(query: string, limit?: number): Promise<ObservationQueryResult[]>;
/**
* Retrieve a specific field from the most recent observation record.
* Valid fields: "suggested_response", "current_task", "active_observations".
*/
getObservationField?(field: "suggested_response" | "current_task" | "active_observations"): Promise<string | null>;
/**
* Retrieve the N most recent observation records, ordered by updated_at DESC.
*/
getRecentObservations?(limit?: number): Promise<ObservationQueryResult[]>;
/** Shut down the memory system and release resources. */
close(): Promise<void>;
/**
* Entity-first recall — fast-path for entity/self queries.
* Returns a formatted string when the query matches a known entity or
* self-query pattern. Returns null to signal fallback to APR.
*/
entityRecall(query: string): Promise<string | null>;
/** Whether this instance has been closed. */
readonly isClosed: boolean;
/** Return the total number of stored memories. */
count(): Promise<number>;
/** List stored memories. */
list(options?: {
limit?: number;
network?: string;
}): Promise<MemoryItem[]>;
/** Get a config value by key. Returns null if not found. */
getConfig(key: string): Promise<string | null>;
/** Set a config value by key. Creates or overwrites. */
setConfig(key: string, value: string): Promise<void>;
}
/**
* Types for ARIA's multi-model system
* @module @aria-cli/types/models
*/
import type { NativeToolMetadata } from "./native-tools.js";
/**
* Supported model providers
*/
export type ModelProvider = "anthropic" | "openai" | "github-copilot" | "openai-codex" | "google" | "local" | "bedrock" | "bedrock-converse";
/**
* Model tier for task routing
* - 'fast': Quick responses, simple tasks (e.g., Haiku, GPT-4o-mini)
* - 'balanced': Good balance of speed and capability (e.g., Sonnet, GPT-5.2)
* - 'powerful': Maximum capability for complex tasks (e.g., Opus, o1)
* - 'ensemble': Multiple models working together
*/
export type ModelTier = "fast" | "balanced" | "powerful" | "ensemble";
/**
* Configuration for a specific model
*/
export interface ModelConfig {
/** Model provider */
provider: ModelProvider;
/** Model identifier (e.g., 'claude-opus-4-6') */
modelId: string;
/** Human-readable name */
name: string;
/** Model tier classification */
tier: ModelTier;
/** Maximum context window size in tokens */
maxContextTokens: number;
/** Maximum output tokens */
maxOutputTokens: number;
/** Cost per million input tokens (USD) */
inputCostPerMillion: number;
/** Cost per million output tokens (USD) */
outputCostPerMillion: number;
/** Capabilities this model supports */
capabilities: {
/** Supports tool/function calling */
tools: boolean;
/** Supports vision/image input */
vision: boolean;
/** Supports structured output (JSON mode) */
structuredOutput: boolean;
/** Supports streaming responses */
streaming: boolean;
};
/** API endpoint (for local/custom providers) */
endpoint?: string;
/** Additional provider-specific options */
options?: Record<string, unknown>;
}
/**
* Configuration for the model router
*/
export interface ModelRouterConfig {
/** Default model tier to use */
defaultTier: ModelTier;
/** Available models by tier */
models: {
fast: ModelConfig[];
balanced: ModelConfig[];
powerful: ModelConfig[];
ensemble?: ModelConfig[];
};
/** Rules for automatic tier selection */
routingRules: {
/** Use powerful tier for tasks with risk above this level */
riskThreshold: number;
/** Use powerful tier for tasks with complexity above this score */
complexityThreshold: number;
/** Maximum cost per request before requiring approval (USD) */
costThreshold: number;
};
/** Fallback model if primary fails */
fallbackModel?: ModelConfig;
}
/**
* Role of a message in a conversation
*/
export type MessageRole = "system" | "user" | "assistant" | "tool";
/**
* A message in a conversation
*/
export interface Message {
/** Role of the message sender */
role: MessageRole;
/** Text content of the message */
content: string;
/** Name of the sender (optional) */
name?: string;
/** Tool call ID this message is responding to (for tool role) */
toolCallId?: string;
/** Tool calls requested by the assistant for execution (present on assistant messages with tool_use blocks) */
toolCalls?: ToolCall[];
/** Image attachments (for vision models) */
images?: {
/** Base64 encoded image data or URL */
data: string;
/** MIME type of the image */
mimeType: string;
}[];
}
/**
* JSON Schema for function tool parameters
*/
export interface FunctionToolParameters {
type: "object";
properties: Record<string, unknown>;
required?: string[];
[key: string]: unknown;
}
/**
* Function tool schema (standard ARIA tool)
*/
export interface FunctionToolSchema {
kind: "function";
name: string;
description: string;
parameters: FunctionToolParameters;
/** When true, the provider defers loading this tool until needed */
defer_loading?: boolean;
/** Native-only fields are forbidden on function tools */
provider?: never;
capability?: never;
config?: never;
suppresses?: never;
}
/**
* Provider-native passthrough tool schema
*/
export interface NativeToolSchema {
kind: "native";
/** Provider that supplies this native tool */
provider: "anthropic" | "openai" | "google";
/** Native capability identifier */
capability: string;
/** Provider-specific native tool payload */
config: Record<string, unknown>;
/** ARIA function tools suppressed by this native tool */
suppresses?: string[];
/** Function-only fields are forbidden on native tools */
name?: never;
description?: never;
parameters?: never;
defer_loading?: never;
}
/**
* Tool schema for model requests (discriminated by `kind`)
*/
export type ToolSchema = FunctionToolSchema | NativeToolSchema;
/**
* Type guard for function tools
*/
export declare function isFunctionToolSchema(tool: ToolSchema): tool is FunctionToolSchema;
/**
* Type guard for native tools
*/
export declare function isNativeToolSchema(tool: ToolSchema): tool is NativeToolSchema;
/**
* A tool call requested by the model
*/
export interface ToolCall {
/** Unique identifier for this tool call */
id: string;
/** Name of the tool to call */
name: string;
/** Arguments to pass to the tool */
arguments: Record<string, unknown>;
/** Gemini thought signature — must be replayed verbatim in conversation history */
thoughtSignature?: string;
}
/**
* Thinking mode for models that support extended reasoning.
* - 'adaptive': Model decides when and how much to think (Opus 4.6+, recommended)
* - 'enabled': Explicit thinking with a token budget (Sonnet 4.5, Opus 4.5)
*/
export type ThinkingMode = "adaptive" | "enabled";
/**
* Configuration for extended thinking/reasoning
*/
export interface ThinkingConfig {
/** Thinking mode */
mode: ThinkingMode;
/** Token budget for thinking (required for 'enabled' mode, ignored for 'adaptive') */
budgetTokens?: number;
}
/**
* Effort level controlling reasoning depth vs speed/cost.
* - 'low': Fastest, cheapest. Skips thinking for simple tasks.
* - 'medium': Balanced. May skip thinking for very simple queries.
* - 'high': Default. Deep reasoning on complex tasks.
* - 'max': Maximum capability, no constraints. Opus 4.6 only.
*/
export type EffortLevel = "low" | "medium" | "high" | "max";
/**
* A thinking block from a model's internal reasoning process
*/
export interface ThinkingBlock {
/** The thinking text content (empty string if redacted) */
thinking: string;
/** Cryptographic signature for verification (required for tool-use continuations) */
signature?: string;
/** Whether this block was redacted by safety systems */
redacted?: boolean;
}
/**
* Request to a model
*/
export interface ModelRequest {
/** Messages in the conversation */
messages: Message[];
/** System prompt (if not in messages) */
systemPrompt?: string;
/** Available tools */
tools?: ToolSchema[];
/** Temperature for sampling (0-2) */
temperature?: number;
/** Maximum tokens to generate */
maxTokens?: number;
/** Stop sequences */
stopSequences?: string[];
/** Whether to stream the response */
stream?: boolean;
/** Force structured JSON output */
jsonMode?: boolean;
/** Extended thinking configuration */
thinking?: ThinkingConfig;
/** Effort level for response quality vs speed/cost tradeoff */
effort?: EffortLevel;
/** Tool choice strategy (e.g. "auto", "none", or a specific tool name) */
tool_choice?: string;
/** Request metadata */
metadata?: {
/** Task ID for tracking */
taskId?: string;
/** User ID for billing */
userId?: string;
/** Request priority */
priority?: "low" | "normal" | "high";
};
/** Callback for streaming thinking blocks as they arrive (Gemini 2.5+/3.x).
* Allows the runner to yield thinking events during model streaming
* instead of waiting for the full response. */
onThinking?: (text: string) => void;
/** AbortSignal for cancelling in-flight HTTP requests.
* Passed through to provider fetch() calls so Ctrl+C cancels immediately
* instead of waiting for the next network chunk. */
abortSignal?: AbortSignal;
}
/** Capabilities that a model provider supports */
export interface ProviderCapabilities {
/** Whether the provider supports structured JSON output */
jsonMode: boolean;
/** Whether the provider supports extended thinking */
thinking: boolean;
/** Whether the provider supports tool/function calling */
toolUse: boolean;
/** Whether the provider supports streaming responses */
streaming: boolean;
/** Whether the provider supports native defer_loading on tool schemas */
nativeDeferredLoading: boolean;
/** Whether the provider supports native web search */
nativeSearch: boolean;
/** Whether the provider supports native web fetch */
nativeFetch: boolean;
/** Whether the provider supports native code execution */
nativeCodeExec: boolean;
/** Whether the provider supports native computer use */
nativeComputerUse: boolean;
/** Whether the provider supports native image generation */
nativeImageGen: boolean;
/** Whether the provider supports native file search */
nativeFileSearch: boolean;
}
/**
* Usage statistics from a model response
*/
export interface ModelUsage {
/** Input tokens consumed (TOTAL context window — includes cached tokens) */
inputTokens: number;
/** Output tokens generated */
outputTokens: number;
/** Total tokens (input + output) */
totalTokens: number;
/** Estimated cost in USD */
estimatedCost: number;
/** Thinking tokens consumed (subset of outputTokens, if available) */
thinkingTokens?: number;
/** Tokens read from prompt cache (subset of inputTokens) */
cacheReadTokens?: number;
/** Tokens written to prompt cache (subset of inputTokens) */
cacheCreationTokens?: number;
}
/**
* Response from a model
*/
export interface ModelResponse {
/** The model's text response */
content: string;
/** Tool calls requested by the model */
toolCalls?: ToolCall[];
/** Why the model stopped generating */
stopReason: "end" | "max_tokens" | "tool_call" | "stop_sequence" | "aborted";
/** Token usage statistics */
usage: ModelUsage;
/** Model that generated the response */
model: string;
/** Response generation time in milliseconds */
latency: number;
/** Unique ID for this response */
responseId: string;
/** Thinking blocks from the model's reasoning process */
thinking?: ThinkingBlock[];
/** Raw content blocks from the API (for tool-use continuations with thinking) */
rawContentBlocks?: unknown[];
/** Provider that generated the response */
provider?: ModelProvider;
/** Native tool metadata (e.g., search results, code exec output) */
nativeToolMetadata?: NativeToolMetadata;
}
/**
* Native tool types and type guards for provider-native capabilities
* @module @aria-cli/types/native-tools
*/
import type { ToolCall } from "./models.js";
/**
* Type guard to check if a tool call is a function/custom tool
* (as opposed to a native provider tool like search or code execution)
*/
export declare function isFunctionTool(toolCall: ToolCall): boolean;
/**
* Type guard to check if a tool call is a native provider tool
* Native tools have special name prefixes like "brave_search", "computer", etc.
*/
export declare function isNativeTool(toolCall: ToolCall): boolean;
/**
* A single source from a search result
*/
export interface NormalizedSource {
/** URL of the source */
url: string;
/** Title of the source */
title: string;
/** Optional snippet/excerpt from the source */
snippet?: string;
/** Confidence score (0-1) if provided by the search API */
confidence?: number;
/** Which segments of the response cited this source */
citedSegments?: Array<{
text: string;
startIndex: number;
endIndex: number;
}>;
}
/**
* Normalized metadata for native search tool calls
* Abstracts over provider-specific search APIs (Anthropic Brave, Google Search, etc.)
*/
export interface NormalizedSearchMetadata {
/** Provider that executed the search */
provider: "anthropic" | "openai" | "google";
/** Search queries executed (may be multiple if refined) */
queries: string[];
/** Sources returned by the search */
sources: NormalizedSource[];
/** The grounded response generated from search results (if available) */
groundedResponse?: string;
}
/**
* Metadata attached to ModelResponse when native tools are used
*/
export interface NativeToolMetadata {
/** Search metadata (if native search was used) */
search?: NormalizedSearchMetadata;
/** Raw provider-specific metadata for other native tools */
raw?: unknown;
}
/**
* Magic exit code that tells the parent supervisor to respawn the child.
*/
export declare const RELAUNCH_EXIT_CODE = 199;
/**
* Environment variables used by relaunch/supervisor flows.
*/
export declare const NO_RELAUNCH_ENV = "ARIA_NO_RELAUNCH";
export declare const RESUME_SESSION_ENV = "ARIA_RESUME_SESSION_ID";
export declare const RESUME_ARION_ENV = "ARIA_RESUME_ARION";
export declare const RESTART_KIND_ENV = "ARIA_RESTART_KIND";
export interface RelaunchMarker {
sessionId: string | null;
arionName: string;
pid: number;
timestamp: string;
}
export declare function getRelaunchMarkerDir(): string;
export declare function getRelaunchMarkerPath(pid?: number): string;
export declare function writeRelaunchMarker(marker: RelaunchMarker): void;
export declare function readRelaunchMarker(pid?: number): RelaunchMarker | null;
export declare function clearRelaunchMarker(pid?: number): void;
/** Canonical output shapes for tools with dedicated renderers.
* Executors MUST return these shapes. Renderers consume them directly. */
export interface EditFileOutput {
filePath: string;
structuredPatch: Array<{
oldStart: number;
oldLines: number;
newStart: number;
newLines: number;
lines: string[];
}>;
replacements: number;
strategy: string;
}
export interface WriteFileOutput {
filePath: string;
action: "created" | "overwritten" | "appended";
bytesWritten: number;
structuredPatch?: Array<{
oldStart: number;
oldLines: number;
newStart: number;
newLines: number;
lines: string[];
}>;
}
export interface BashToolOutput {
stdout: string;
stderr: string;
exitCode: number;
}
export interface NotebookEditOutput {
cellNumber: number;
newSource: string;
language: string;
error?: string;
}
export declare function setGlobalStallPhase(label: string): void;
export declare function clearGlobalStallPhase(): void;
/** Canonical output shapes for tools with dedicated renderers.
* Executors MUST return these shapes. Renderers consume them directly. */
export interface EditFileOutput {
filePath: string;
structuredPatch: Array<{
oldStart: number;
oldLines: number;
newStart: number;
newLines: number;
lines: string[];
}>;
replacements: number;
strategy: string;
}
export interface WriteFileOutput {
filePath: string;
action: "created" | "overwritten" | "appended";
bytesWritten: number;
structuredPatch?: Array<{
oldStart: number;
oldLines: number;
newStart: number;
newLines: number;
lines: string[];
}>;
}
export interface BashToolOutput {
stdout: string;
stderr: string;
exitCode: number;
}
export interface NotebookEditOutput {
cellNumber: number;
newSource: string;
language: string;
error?: string;
}
declare const _default: import("vite").UserConfig;
export default _default;
/**
* Error narrowing utilities for replacing `catch (err: any)` patterns.
* @module @aria/types/errors
*/
/**
* Extract a human-readable message from an unknown caught value.
*
* Replaces the pattern:
* `catch (err: any) { ... err.message ... }`
* with:
* `catch (err: unknown) { ... getErrorMessage(err) ... }`
*/
export declare function getErrorMessage(err: unknown): string;
/**
* Extract an HTTP-style status code from an unknown caught value.
*
* Replaces the pattern:
* `(error as any).statusCode`
* with:
* `getErrorStatusCode(error)`
*/
export declare function getErrorStatusCode(err: unknown): number | undefined;
/**
* Type guard: is this caught value an Error with a `.code` property?
* Common for Node.js system errors (ENOENT, ECONNREFUSED, etc.)
*/
export declare function isNodeError(err: unknown): err is Error & {
code: string;
};
/**
* @aria/types - Minimal shared types
*/
export * from "./models.js";
export * from "./memoria.js";
export * from "./errors.js";
export * from "./native-tools.js";
export * from "./relaunch.js";
export * from "./stall-phase.js";
export { log, type LogLevel, type LogSink } from "./logger.js";
export type { EditFileOutput, WriteFileOutput, BashToolOutput, NotebookEditOutput, } from "./tool-outputs.js";
/**
* Lightweight leveled logger for ARIA.
*
* Respects `ARIA_LOG_LEVEL` env var (debug | info | warn | error | silent).
* Default level: "info" — debug messages are suppressed in normal usage.
*
* Architecture: logs dispatch through pluggable sinks. The default console
* sink writes to stdout/stderr with ANSI styling. Execution contexts that
* lack a terminal (daemon, headless server) replace or supplement it with
* durable sinks (e.g., JSONL file sink). This decoupling makes it impossible
* for diagnostic output to silently go to /dev/null — every context
* configures the sink that matches its output channel.
*
* Usage:
* import { log } from "@aria-cli/types";
* log.debug("[PhaseTimer] ..."); // only prints when ARIA_LOG_LEVEL=debug
* log.info("[runner] ..."); // prints at info level and above
* log.warn("[runner] ..."); // prints at warn level and above
* log.error("[runner] ..."); // always prints (unless level > error)
*
* Sink configuration (daemon example):
* import { log } from "@aria-cli/types";
* log.addSink(myFileLogSink);
* log.removeConsoleSink(); // stdio is /dev/null in detached mode
*/
export type LogLevel = "debug" | "info" | "warn" | "error" | "silent";
/**
* A log sink receives raw (unstyled) log arguments and writes them
* to a destination. Each sink decides its own formatting.
*/
export interface LogSink {
write(level: LogLevel, args: unknown[]): void;
close?(): void;
}
export declare const log: {
debug(...args: unknown[]): void;
info(...args: unknown[]): void;
warn(...args: unknown[]): void;
error(...args: unknown[]): void;
setLevel(level: LogLevel): void;
getLevel(): LogLevel;
/** Add a log sink. All subsequent log calls dispatch to it. */
addSink(sink: LogSink): void;
/** Remove a previously added sink. */
removeSink(sink: LogSink): void;
/**
* Remove the built-in console sink.
*
* Use this in execution contexts where stdio is unavailable (detached
* daemon, headless server) AFTER adding a durable file sink. Prevents
* wasted console.* calls that go to /dev/null.
*/
removeConsoleSink(): void;
};
/**
* Minimal IMemoria interface for consumers that need memory capabilities
* without depending on the concrete @aria/memoria package.
*
* Structural types (MemoryItem, ToolItem, RecallResult) replace bare
* `unknown` returns so consumers get usable shapes without coupling to
* the full Memoria domain model.
*/
/** A single stored memory. */
export interface MemoryItem {
id: string;
content: string;
summary?: string;
network?: string;
importance?: number;
confidence?: number;
createdAt?: Date;
metadata?: Record<string, unknown>;
}
/** A single tool-store entry. */
export interface ToolItem {
id: string;
name: string;
description: string;
source?: ToolSourceItem;
importance?: number;
confidence?: number;
tags?: string[];
createdAt?: Date;
updatedAt?: Date;
accessedAt?: Date;
accessCount?: number;
evidenceIds?: string[];
category?: "filesystem" | "code" | "shell" | "web" | "data" | "memory" | "meta" | "arion";
parameters?: Record<string, unknown>;
riskLevel?: "safe" | "moderate" | "dangerous";
responseTemplate?: string;
knowledge?: string;
usageHint?: string;
adoptedAt?: number;
lastUsedAt?: number;
usageCount?: number;
schema?: Record<string, unknown>;
capabilities?: string[];
metrics?: Record<string, unknown>;
}
export type ToolSourceItem = {
type: "built-in";
} | {
type: "external";
ref: string;
format: "skill-file" | "markdown" | "json" | "web-search";
} | {
type: "organic";
method: "execution" | "observation" | "feedback";
episodeId?: string;
};
/** Result shape returned by observation query primitives. */
export interface ObservationQueryResult {
observations: string;
currentTask: string | null;
suggestedResponse: string | null;
sessionId: string;
updatedAt: number;
}
/** Result envelope returned by the APR pipeline. */
export interface RecallResult {
memories: MemoryItem[];
/** @deprecated Legacy alias retained for compatibility with older callers. */
data?: MemoryItem[];
query?: string;
totalCount?: number;
degradationLevel?: "full" | "reduced" | "minimal";
skippedStages?: string[];
warnings?: string[];
}
/** Result from lightweight FTS5-only recall. */
export interface LightweightRecallResult {
memories: MemoryItem[];
formattedContext: {
context: string;
};
query: string;
}
export interface ExtractedConversationMemoryItem {
content: string;
network: "world" | "episodes" | "beliefs" | "entities" | "procedures" | "strategies";
/** Optional durability marker describing how the memory was routed */
durability?: "permanent" | "session";
}
export interface ConversationExtractionResult {
learned: ExtractedConversationMemoryItem[];
error?: string;
}
export interface SkillTriggerItem {
type: "keyword" | "intent" | "context" | "entity";
pattern: string;
confidence: number;
}
export interface SkillRequirementsItem {
bins?: string[];
env?: string[];
os?: ("darwin" | "linux" | "win32")[];
}
export type SkillSourceItem = {
type: "auto-learned";
sequenceId: string;
} | {
type: "file";
path: string;
format: "skill-file";
} | {
type: "cli";
command: string;
} | {
type: "web";
url: string;
} | {
type: "user";
ref?: string;
};
export interface SkillItem {
id: string;
name: string;
description: string;
content: string;
toolIds: string[];
triggers: SkillTriggerItem[];
requires?: SkillRequirementsItem;
tags: string[];
source: SkillSourceItem;
executionCount: number;
successCount: number;
lastExecuted: Date | null;
averageDurationMs: number | null;
confidence: number;
importance: number;
createdAt: Date;
updatedAt: Date;
accessedAt: Date;
accessCount: number;
archivedAt?: Date;
}
export interface SkillExecutionRecord {
id: string;
skillId: string;
success: boolean;
durationMs?: number;
notes?: string;
episodeId?: string;
timestamp: Date;
}
export interface ToolUseSequenceRecord {
tool: string;
input?: Record<string, unknown>;
output?: string;
success: boolean;
timestamp: Date;
}
/**
* Minimal observation engine interface for consumers that need observation
* capabilities without depending on the concrete @aria/memoria package.
*
* Matches the public surface of ObservationEngine from @aria/memoria.
*/
export interface IObservationEngine {
/** Prepare context before model call — may trigger observation and filter messages. */
prepareContext(opts: {
sessionId: string;
messages: unknown[];
originalSystemPrompt: string;
readOnly?: boolean;
signal?: AbortSignal;
}): Promise<{
messagesToSend: unknown[];
filteredMessageIds: Set<string>;
observationTriggered: boolean;
}>;
/** Track new message IDs added to the conversation since last observation. */
trackNewMessages(messageIds: string[]): void;
/** Force observation of remaining unobserved messages at session end. */
finalObservation(sessionId: string, messages?: unknown[], signal?: AbortSignal): Promise<void>;
/** Observe messages — used for explicit observation triggers. */
observe(sessionId: string, messages: unknown[], signal?: AbortSignal, opts?: {
force?: boolean;
}): Promise<void>;
/** Get the observation record for a session, or null. */
getRecord(sessionId: string): unknown;
/** Get the active observation text for a session. */
getActiveObservations(sessionId: string): string;
/** Drain pending observation work (wait for in-flight observations to complete). */
drain(): Promise<void>;
}
export interface IMemoria {
/** Store a memory. Returns an object with `id` on success, or null. */
remember(content: string, options?: Record<string, unknown>): Promise<{
id: string;
} | null>;
/**
* Internal fast-path for storing execution-trace episodes without the full
* entity extraction pipeline. Optional for backward compatibility.
*/
storeEpisode?(content: string): Promise<{
id: string;
} | null>;
/** Retrieve memories matching a query. Includes degradation metadata when available. */
recall(query: string, options?: Record<string, unknown>): Promise<RecallResult>;
/** Lightweight FTS5-only recall for session seeding. No LLM calls. */
recallLightweight(query: string, options?: {
limit?: number;
networks?: string[];
}): Promise<LightweightRecallResult>;
/**
* Planner-first unified recall: 1 LLM call for classification + plan + expansion.
* Routes to direct lookup (skipping APR) or mixed APR + primitive execution.
* This is the preferred recall path — recallWithAPR delegates to this.
*/
recallUnified?(query: string, options?: {
limit?: number;
networks?: string[];
}): Promise<{
memories: MemoryItem[];
formattedContext?: {
context?: string;
text?: string;
tokenCount?: number;
} | null;
intent?: {
type?: string;
confidence?: number;
entities?: string[];
temporalFocus?: string | null;
extractedEntities?: string[];
extractedTimeframe?: Record<string, unknown>;
};
sourceStats?: unknown;
plan?: Array<{
primitive: string;
args: Record<string, unknown>;
}>;
planReasoning?: string;
primitiveResults?: Array<{
source: string;
data: unknown;
}>;
}>;
/** Retrieve memories via the Adaptive Parallel Retrieval pipeline. */
recallWithAPR(query: string, options?: {
limit?: number;
networks?: string[];
}): Promise<{
memories: MemoryItem[];
formattedContext?: {
context?: string;
text?: string;
tokenCount?: number;
};
intent?: {
type?: string;
confidence?: number;
entities?: string[];
temporalFocus?: string | null;
extractedEntities?: string[];
extractedTimeframe?: Record<string, unknown>;
};
sourceStats?: unknown;
query?: string;
totalCount?: number;
degradationLevel?: "full" | "reduced" | "minimal";
skippedStages?: string[];
warnings?: string[];
}>;
/**
* Lightweight vector-only recall with network filtering. Bypasses the full APR
* pipeline (no classification, no multi-index fusion, no diversity). Designed
* for time-critical paths like session bootstrap.
*/
recallDirect(query: string, options: {
networks: string[];
limit: number;
threshold?: number;
}): Promise<MemoryItem[]>;
/** Recall memories by network type. Direct SQL query, no LLM classification. */
recallByNetwork(network: string, options?: {
limit?: number;
}): Promise<MemoryItem[]>;
/** Recall recent memories, optionally filtered by network. Direct SQL query sorted by created_at DESC. */
recallRecent(options?: {
limit?: number;
network?: string;
}): Promise<MemoryItem[]>;
/** Recall memories by vector similarity. Uses embedding model only (0 LLM calls). */
recallSimilar(query: string, options?: {
k?: number;
threshold?: number;
}): Promise<MemoryItem[]>;
/** Store a tool with semantic indexing. Returns the generated id. */
rememberTool(tool: {
name: string;
description: string;
} & Record<string, unknown>): Promise<string>;
/** Query the tool store. */
recallTools(options: {
query: string;
limit?: number;
/** Optional offset used for paged list retrieval (matchAll mode). */
offset?: number;
minConfidence?: number;
/** When true, bypasses semantic/keyword matching and lists stored tools. */
matchAll?: boolean;
/** When false, recall becomes read-only and does not mutate access stats. */
updateAccessStats?: boolean;
}): Promise<ToolItem[]>;
/** Retrieve a single tool entry by ID. */
getToolById(id: string): Promise<ToolItem | null>;
/** Delete a tool entry by ID. Returns true if deleted, false if not found. */
forgetTool(id: string): Promise<boolean>;
/** Store a skill directly. Returns the generated skill ID. */
rememberSkill(skill: {
name: string;
description: string;
content: string;
toolIds?: string[];
triggers?: SkillTriggerItem[];
requires?: SkillRequirementsItem;
tags?: string[];
source: SkillSourceItem;
importance?: number;
confidence?: number;
}): Promise<string>;
/** Skill retrieval API. */
recallSkills(options?: {
query?: string;
triggerType?: SkillTriggerItem["type"];
limit?: number;
}): Promise<SkillItem[]>;
/** Skill lookup API. */
getSkill(idOrName: string): Promise<SkillItem | null>;
/** Record skill execution metrics. */
recordSkillExecution(options: {
skillId: string;
success: boolean;
durationMs?: number;
notes?: string;
episodeId?: string;
}): Promise<SkillExecutionRecord>;
/** Start recording a tool-use sequence for procedural learning. */
startToolSequence?(task: string, options?: {
sessionId?: string;
}): void;
/** Record a single tool execution within the active sequence. */
recordToolUse?(record: ToolUseSequenceRecord, options?: {
sessionId?: string;
}): void;
/** Finalize the active tool-use sequence and trigger learning. */
endToolSequence?(success: boolean, options?: {
sessionId?: string;
}): Promise<void>;
/** Delete a single memory by ID. */
deleteMemory(id: string): Promise<boolean>;
/** Merge a JSON patch into a memory's metadata. */
updateMemoryMetadata(id: string, patch: Record<string, unknown>): void;
/** Soft-invalidate a memory in place (sets confidence to 0, marks reason). Allows recovery. */
invalidateMemoryInPlace(memoryId: string, reason?: string): Promise<void>;
/** Requeue failed embedding jobs and return how many were scheduled. */
retryFailedEmbeddings?(): Promise<number>;
/** Extract memories from a user/assistant conversation turn. */
extractFromConversation(userMessage: string, assistantResponse: string, options?: {
signal?: AbortSignal;
}): Promise<ConversationExtractionResult>;
/** Observation engine for Mastra-style observation/reflection memory layer. */
readonly observationEngine?: IObservationEngine;
/**
* Extract durable knowledge from observation text into the knowledge graph.
* Optional for backward compatibility — always present on concrete Memoria instances.
*/
extractFromObservations?(observations: string, options?: {
sessionId?: string;
signal?: AbortSignal;
}): Promise<ConversationExtractionResult>;
/**
* Return the most recently updated observation session ID.
* When `arionId` is provided, scope to that arion's observation records.
*/
getMostRecentSessionId?(options?: {
arionId?: string;
}): Promise<string | null>;
/**
* Fetch persisted active observations for a session.
* Returns null when no observation record exists for the session.
*/
getSessionObservations?(sessionId: string): Promise<string | null>;
/**
* Full-text search across observation records (LIKE on observations, current_task, suggested_response).
* Returns matching records ordered by recency.
*/
searchObservations?(query: string, limit?: number): Promise<ObservationQueryResult[]>;
/**
* Retrieve a specific field from the most recent observation record.
* Valid fields: "suggested_response", "current_task", "active_observations".
*/
getObservationField?(field: "suggested_response" | "current_task" | "active_observations"): Promise<string | null>;
/**
* Retrieve the N most recent observation records, ordered by updated_at DESC.
*/
getRecentObservations?(limit?: number): Promise<ObservationQueryResult[]>;
/** Shut down the memory system and release resources. */
close(): Promise<void>;
/**
* Entity-first recall — fast-path for entity/self queries.
* Returns a formatted string when the query matches a known entity or
* self-query pattern. Returns null to signal fallback to APR.
*/
entityRecall(query: string): Promise<string | null>;
/** Whether this instance has been closed. */
readonly isClosed: boolean;
/** Return the total number of stored memories. */
count(): Promise<number>;
/** List stored memories. */
list(options?: {
limit?: number;
network?: string;
}): Promise<MemoryItem[]>;
/** Get a config value by key. Returns null if not found. */
getConfig(key: string): Promise<string | null>;
/** Set a config value by key. Creates or overwrites. */
setConfig(key: string, value: string): Promise<void>;
}
/**
* Types for ARIA's multi-model system
* @module @aria/types/models
*/
import type { NativeToolMetadata } from "./native-tools.js";
/**
* Supported model providers
*/
export type ModelProvider = "anthropic" | "openai" | "github-copilot" | "openai-codex" | "google" | "local" | "bedrock" | "bedrock-converse";
/**
* Model tier for task routing
* - 'fast': Quick responses, simple tasks (e.g., Haiku, GPT-4o-mini)
* - 'balanced': Good balance of speed and capability (e.g., Sonnet, GPT-5.2)
* - 'powerful': Maximum capability for complex tasks (e.g., Opus, o1)
* - 'ensemble': Multiple models working together
*/
export type ModelTier = "fast" | "balanced" | "powerful" | "ensemble";
/**
* Configuration for a specific model
*/
export interface ModelConfig {
/** Model provider */
provider: ModelProvider;
/** Model identifier (e.g., 'claude-opus-4-6') */
modelId: string;
/** Human-readable name */
name: string;
/** Model tier classification */
tier: ModelTier;
/** Maximum context window size in tokens */
maxContextTokens: number;
/** Maximum output tokens */
maxOutputTokens: number;
/** Cost per million input tokens (USD) */
inputCostPerMillion: number;
/** Cost per million output tokens (USD) */
outputCostPerMillion: number;
/** Capabilities this model supports */
capabilities: {
/** Supports tool/function calling */
tools: boolean;
/** Supports vision/image input */
vision: boolean;
/** Supports structured output (JSON mode) */
structuredOutput: boolean;
/** Supports streaming responses */
streaming: boolean;
};
/** API endpoint (for local/custom providers) */
endpoint?: string;
/** Additional provider-specific options */
options?: Record<string, unknown>;
}
/**
* Configuration for the model router
*/
export interface ModelRouterConfig {
/** Default model tier to use */
defaultTier: ModelTier;
/** Available models by tier */
models: {
fast: ModelConfig[];
balanced: ModelConfig[];
powerful: ModelConfig[];
ensemble?: ModelConfig[];
};
/** Rules for automatic tier selection */
routingRules: {
/** Use powerful tier for tasks with risk above this level */
riskThreshold: number;
/** Use powerful tier for tasks with complexity above this score */
complexityThreshold: number;
/** Maximum cost per request before requiring approval (USD) */
costThreshold: number;
};
/** Fallback model if primary fails */
fallbackModel?: ModelConfig;
}
/**
* Role of a message in a conversation
*/
export type MessageRole = "system" | "user" | "assistant" | "tool";
/**
* A message in a conversation
*/
export interface Message {
/** Role of the message sender */
role: MessageRole;
/** Text content of the message */
content: string;
/** Name of the sender (optional) */
name?: string;
/** Tool call ID this message is responding to (for tool role) */
toolCallId?: string;
/** Tool calls requested by the assistant for execution (present on assistant messages with tool_use blocks) */
toolCalls?: ToolCall[];
/** Image attachments (for vision models) */
images?: {
/** Base64 encoded image data or URL */
data: string;
/** MIME type of the image */
mimeType: string;
}[];
}
/**
* JSON Schema for function tool parameters
*/
export interface FunctionToolParameters {
type: "object";
properties: Record<string, unknown>;
required?: string[];
[key: string]: unknown;
}
/**
* Function tool schema (standard ARIA tool)
*/
export interface FunctionToolSchema {
kind: "function";
name: string;
description: string;
parameters: FunctionToolParameters;
/** When true, the provider defers loading this tool until needed */
defer_loading?: boolean;
/** Native-only fields are forbidden on function tools */
provider?: never;
capability?: never;
config?: never;
suppresses?: never;
}
/**
* Provider-native passthrough tool schema
*/
export interface NativeToolSchema {
kind: "native";
/** Provider that supplies this native tool */
provider: "anthropic" | "openai" | "google";
/** Native capability identifier */
capability: string;
/** Provider-specific native tool payload */
config: Record<string, unknown>;
/** ARIA function tools suppressed by this native tool */
suppresses?: string[];
/** Function-only fields are forbidden on native tools */
name?: never;
description?: never;
parameters?: never;
defer_loading?: never;
}
/**
* Tool schema for model requests (discriminated by `kind`)
*/
export type ToolSchema = FunctionToolSchema | NativeToolSchema;
/**
* Type guard for function tools
*/
export declare function isFunctionToolSchema(tool: ToolSchema): tool is FunctionToolSchema;
/**
* Type guard for native tools
*/
export declare function isNativeToolSchema(tool: ToolSchema): tool is NativeToolSchema;
/**
* A tool call requested by the model
*/
export interface ToolCall {
/** Unique identifier for this tool call */
id: string;
/** Name of the tool to call */
name: string;
/** Arguments to pass to the tool */
arguments: Record<string, unknown>;
/** Gemini thought signature — must be replayed verbatim in conversation history */
thoughtSignature?: string;
}
/**
* Thinking mode for models that support extended reasoning.
* - 'adaptive': Model decides when and how much to think (Opus 4.6+, recommended)
* - 'enabled': Explicit thinking with a token budget (Sonnet 4.5, Opus 4.5)
*/
export type ThinkingMode = "adaptive" | "enabled";
/**
* Configuration for extended thinking/reasoning
*/
export interface ThinkingConfig {
/** Thinking mode */
mode: ThinkingMode;
/** Token budget for thinking (required for 'enabled' mode, ignored for 'adaptive') */
budgetTokens?: number;
}
/**
* Effort level controlling reasoning depth vs speed/cost.
* - 'low': Fastest, cheapest. Skips thinking for simple tasks.
* - 'medium': Balanced. May skip thinking for very simple queries.
* - 'high': Default. Deep reasoning on complex tasks.
* - 'max': Maximum capability, no constraints. Opus 4.6 only.
*/
export type EffortLevel = "low" | "medium" | "high" | "max";
/**
* A thinking block from a model's internal reasoning process
*/
export interface ThinkingBlock {
/** The thinking text content (empty string if redacted) */
thinking: string;
/** Cryptographic signature for verification (required for tool-use continuations) */
signature?: string;
/** Whether this block was redacted by safety systems */
redacted?: boolean;
}
/**
* Request to a model
*/
export interface ModelRequest {
/** Messages in the conversation */
messages: Message[];
/** System prompt (if not in messages) */
systemPrompt?: string;
/** Available tools */
tools?: ToolSchema[];
/** Temperature for sampling (0-2) */
temperature?: number;
/** Maximum tokens to generate */
maxTokens?: number;
/** Stop sequences */
stopSequences?: string[];
/** Whether to stream the response */
stream?: boolean;
/** Force structured JSON output */
jsonMode?: boolean;
/** Extended thinking configuration */
thinking?: ThinkingConfig;
/** Effort level for response quality vs speed/cost tradeoff */
effort?: EffortLevel;
/** Tool choice strategy (e.g. "auto", "none", or a specific tool name) */
tool_choice?: string;
/** Request metadata */
metadata?: {
/** Task ID for tracking */
taskId?: string;
/** User ID for billing */
userId?: string;
/** Request priority */
priority?: "low" | "normal" | "high";
};
/** Callback for streaming thinking blocks as they arrive (Gemini 2.5+/3.x).
* Allows the runner to yield thinking events during model streaming
* instead of waiting for the full response. */
onThinking?: (text: string) => void;
/** AbortSignal for cancelling in-flight HTTP requests.
* Passed through to provider fetch() calls so Ctrl+C cancels immediately
* instead of waiting for the next network chunk. */
abortSignal?: AbortSignal;
}
/** Capabilities that a model provider supports */
export interface ProviderCapabilities {
/** Whether the provider supports structured JSON output */
jsonMode: boolean;
/** Whether the provider supports extended thinking */
thinking: boolean;
/** Whether the provider supports tool/function calling */
toolUse: boolean;
/** Whether the provider supports streaming responses */
streaming: boolean;
/** Whether the provider supports native defer_loading on tool schemas */
nativeDeferredLoading: boolean;
/** Whether the provider supports native web search */
nativeSearch: boolean;
/** Whether the provider supports native web fetch */
nativeFetch: boolean;
/** Whether the provider supports native code execution */
nativeCodeExec: boolean;
/** Whether the provider supports native computer use */
nativeComputerUse: boolean;
/** Whether the provider supports native image generation */
nativeImageGen: boolean;
/** Whether the provider supports native file search */
nativeFileSearch: boolean;
}
/**
* Usage statistics from a model response
*/
export interface ModelUsage {
/** Input tokens consumed (TOTAL context window — includes cached tokens) */
inputTokens: number;
/** Output tokens generated */
outputTokens: number;
/** Total tokens (input + output) */
totalTokens: number;
/** Estimated cost in USD */
estimatedCost: number;
/** Thinking tokens consumed (subset of outputTokens, if available) */
thinkingTokens?: number;
/** Tokens read from prompt cache (subset of inputTokens) */
cacheReadTokens?: number;
/** Tokens written to prompt cache (subset of inputTokens) */
cacheCreationTokens?: number;
}
/**
* Response from a model
*/
export interface ModelResponse {
/** The model's text response */
content: string;
/** Tool calls requested by the model */
toolCalls?: ToolCall[];
/** Why the model stopped generating */
stopReason: "end" | "max_tokens" | "tool_call" | "stop_sequence" | "aborted";
/** Token usage statistics */
usage: ModelUsage;
/** Model that generated the response */
model: string;
/** Response generation time in milliseconds */
latency: number;
/** Unique ID for this response */
responseId: string;
/** Thinking blocks from the model's reasoning process */
thinking?: ThinkingBlock[];
/** Raw content blocks from the API (for tool-use continuations with thinking) */
rawContentBlocks?: unknown[];
/** Provider that generated the response */
provider?: ModelProvider;
/** Native tool metadata (e.g., search results, code exec output) */
nativeToolMetadata?: NativeToolMetadata;
}
/**
* Native tool types and type guards for provider-native capabilities
* @module @aria/types/native-tools
*/
import type { ToolCall } from "./models.js";
/**
* Type guard to check if a tool call is a function/custom tool
* (as opposed to a native provider tool like search or code execution)
*/
export declare function isFunctionTool(toolCall: ToolCall): boolean;
/**
* Type guard to check if a tool call is a native provider tool
* Native tools have special name prefixes like "brave_search", "computer", etc.
*/
export declare function isNativeTool(toolCall: ToolCall): boolean;
/**
* A single source from a search result
*/
export interface NormalizedSource {
/** URL of the source */
url: string;
/** Title of the source */
title: string;
/** Optional snippet/excerpt from the source */
snippet?: string;
/** Confidence score (0-1) if provided by the search API */
confidence?: number;
/** Which segments of the response cited this source */
citedSegments?: Array<{
text: string;
startIndex: number;
endIndex: number;
}>;
}
/**
* Normalized metadata for native search tool calls
* Abstracts over provider-specific search APIs (Anthropic Brave, Google Search, etc.)
*/
export interface NormalizedSearchMetadata {
/** Provider that executed the search */
provider: "anthropic" | "openai" | "google";
/** Search queries executed (may be multiple if refined) */
queries: string[];
/** Sources returned by the search */
sources: NormalizedSource[];
/** The grounded response generated from search results (if available) */
groundedResponse?: string;
}
/**
* Metadata attached to ModelResponse when native tools are used
*/
export interface NativeToolMetadata {
/** Search metadata (if native search was used) */
search?: NormalizedSearchMetadata;
/** Raw provider-specific metadata for other native tools */
raw?: unknown;
}
/**
* Magic exit code that tells the parent supervisor to respawn the child.
*/
export declare const RELAUNCH_EXIT_CODE = 199;
/**
* Environment variables used by relaunch/supervisor flows.
*/
export declare const NO_RELAUNCH_ENV = "ARIA_NO_RELAUNCH";
export declare const RESUME_SESSION_ENV = "ARIA_RESUME_SESSION_ID";
export declare const RESUME_ARION_ENV = "ARIA_RESUME_ARION";
export declare const RESTART_KIND_ENV = "ARIA_RESTART_KIND";
export interface RelaunchMarker {
sessionId: string | null;
arionName: string;
pid: number;
timestamp: string;
}
export declare function getRelaunchMarkerDir(): string;
export declare function getRelaunchMarkerPath(pid?: number): string;
export declare function writeRelaunchMarker(marker: RelaunchMarker): void;
export declare function readRelaunchMarker(pid?: number): RelaunchMarker | null;
export declare function clearRelaunchMarker(pid?: number): void;
export declare function setGlobalStallPhase(label: string): void;
export declare function clearGlobalStallPhase(): void;
/** Canonical output shapes for tools with dedicated renderers.
* Executors MUST return these shapes. Renderers consume them directly. */
export interface EditFileOutput {
filePath: string;
structuredPatch: Array<{
oldStart: number;
oldLines: number;
newStart: number;
newLines: number;
lines: string[];
}>;
replacements: number;
strategy: string;
}
export interface WriteFileOutput {
filePath: string;
action: "created" | "overwritten" | "appended";
bytesWritten: number;
structuredPatch?: Array<{
oldStart: number;
oldLines: number;
newStart: number;
newLines: number;
lines: string[];
}>;
}
export interface BashToolOutput {
stdout: string;
stderr: string;
exitCode: number;
}
export interface NotebookEditOutput {
cellNumber: number;
newSource: string;
language: string;
error?: string;
}