| """Bounded mutation target for the smoke experiment.""" | ||
| def count_words(text: str) -> int: | ||
| cleaned = text.strip() | ||
| if not cleaned: | ||
| return 0 | ||
| return len(cleaned.split()) |
| """Shared keep/revert loop for a bounded local experiment.""" | ||
| from __future__ import annotations | ||
| import argparse | ||
| import csv | ||
| import json | ||
| import os | ||
| from pathlib import Path | ||
| import shutil | ||
| import subprocess | ||
| import sys | ||
| from datetime import datetime, timezone | ||
| EXPERIMENT_DIR = Path(__file__).resolve().parent | ||
| DEFAULT_TARGET = EXPERIMENT_DIR / "candidate.py" | ||
| DEFAULT_MEASURE = EXPERIMENT_DIR / "measure.py" | ||
| DEFAULT_RESULTS = EXPERIMENT_DIR / "results.tsv" | ||
| def run_measure(measure_path: Path) -> dict: | ||
| proc = subprocess.run( | ||
| [sys.executable, str(measure_path)], | ||
| cwd=str(EXPERIMENT_DIR), | ||
| capture_output=True, | ||
| text=True, | ||
| check=True, | ||
| ) | ||
| return json.loads(proc.stdout.strip()) | ||
| def append_result(results_path: Path, row: dict) -> None: | ||
| write_header = not results_path.exists() or results_path.stat().st_size == 0 | ||
| with results_path.open("a", newline="", encoding="utf-8") as handle: | ||
| writer = csv.DictWriter( | ||
| handle, | ||
| fieldnames=[ | ||
| "timestamp", | ||
| "trial", | ||
| "status", | ||
| "old_score", | ||
| "new_score", | ||
| "proposal", | ||
| "description", | ||
| ], | ||
| delimiter="\t", | ||
| ) | ||
| if write_header: | ||
| writer.writeheader() | ||
| writer.writerow(row) | ||
| def restore_backup(backup_path: Path, target_path: Path) -> None: | ||
| shutil.copy2(backup_path, target_path) | ||
| backup_path.unlink(missing_ok=True) | ||
| def main() -> int: | ||
| parser = argparse.ArgumentParser(description="Run a bounded keep/revert experiment.") | ||
| parser.add_argument("--proposal", action="append", default=[]) | ||
| args = parser.parse_args() | ||
| target_path = DEFAULT_TARGET.resolve() | ||
| measure_path = DEFAULT_MEASURE.resolve() | ||
| results_path = DEFAULT_RESULTS.resolve() | ||
| baseline = run_measure(measure_path) | ||
| current_score = float(baseline["score"]) | ||
| print(f"BASELINE {current_score:.4f}") | ||
| for trial_index, proposal in enumerate(args.proposal, start=1): | ||
| proposal_path = Path(proposal).resolve() | ||
| backup_path = target_path.with_suffix(target_path.suffix + f".trial{trial_index}.bak") | ||
| shutil.copy2(target_path, backup_path) | ||
| status = "error" | ||
| old_score = current_score | ||
| new_score = current_score | ||
| description = "" | ||
| try: | ||
| proc = subprocess.run( | ||
| [sys.executable, str(proposal_path)], | ||
| cwd=str(EXPERIMENT_DIR), | ||
| capture_output=True, | ||
| text=True, | ||
| check=True, | ||
| env={**os.environ, "EXPERIMENT_TARGET": str(target_path)}, | ||
| ) | ||
| if proc.stdout.strip(): | ||
| description = proc.stdout.strip().splitlines()[-1][:200] | ||
| measured = run_measure(measure_path) | ||
| new_score = float(measured["score"]) | ||
| if new_score > current_score: | ||
| status = "kept" | ||
| current_score = new_score | ||
| backup_path.unlink(missing_ok=True) | ||
| else: | ||
| status = "reverted" | ||
| restore_backup(backup_path, target_path) | ||
| except subprocess.CalledProcessError as exc: | ||
| restore_backup(backup_path, target_path) | ||
| stderr = (exc.stderr or exc.stdout or "").strip() | ||
| description = (stderr.splitlines()[-1] if stderr else "proposal failed")[:200] | ||
| status = "error" | ||
| append_result( | ||
| results_path, | ||
| { | ||
| "timestamp": datetime.now(timezone.utc).isoformat(), | ||
| "trial": trial_index, | ||
| "status": status, | ||
| "old_score": f"{old_score:.4f}", | ||
| "new_score": f"{new_score:.4f}", | ||
| "proposal": proposal_path.name, | ||
| "description": description, | ||
| }, | ||
| ) | ||
| print(f"TRIAL {trial_index} {status.upper()} score={new_score:.4f} proposal={proposal_path.name}") | ||
| final_measure = run_measure(measure_path) | ||
| print(f"FINAL {final_measure['score']:.4f}") | ||
| return 0 | ||
| if __name__ == "__main__": | ||
| raise SystemExit(main()) |
| """Objective metric for the smoke keep/revert example.""" | ||
| from __future__ import annotations | ||
| import json | ||
| from pathlib import Path | ||
| import sys | ||
| EXPERIMENT_DIR = Path(__file__).resolve().parent | ||
| if str(EXPERIMENT_DIR) not in sys.path: | ||
| sys.path.insert(0, str(EXPERIMENT_DIR)) | ||
| from candidate import count_words | ||
| CASES = [ | ||
| ("", 0), | ||
| ("one", 1), | ||
| ("two words", 2), | ||
| (" three spaced words ", 3), | ||
| ("punctuation, still counts", 3), | ||
| ] | ||
| def main() -> int: | ||
| passed = 0 | ||
| for text, expected in CASES: | ||
| actual = count_words(text) | ||
| if actual == expected: | ||
| passed += 1 | ||
| total = len(CASES) | ||
| score = passed / total if total else 0.0 | ||
| payload = { | ||
| "score": round(score, 4), | ||
| "passed": passed, | ||
| "total": total, | ||
| "status": "pass" if passed == total else "fail", | ||
| } | ||
| print(json.dumps(payload)) | ||
| return 0 | ||
| if __name__ == "__main__": | ||
| raise SystemExit(main()) |
| # Program | ||
| Prove the keep/revert loop in the smallest honest way. |
| """A deliberately bad mutation that should be reverted.""" | ||
| from pathlib import Path | ||
| import os | ||
| TARGET = Path(os.environ["EXPERIMENT_TARGET"]) | ||
| TARGET.write_text( | ||
| '''"""Bounded mutation target for the smoke experiment.""" | ||
| def count_words(text: str) -> int: | ||
| return 0 | ||
| ''', | ||
| encoding="utf-8", | ||
| ) | ||
| print("applied bad proposal") |
| """A good mutation that should be kept.""" | ||
| from pathlib import Path | ||
| import os | ||
| TARGET = Path(os.environ["EXPERIMENT_TARGET"]) | ||
| TARGET.write_text( | ||
| '''"""Bounded mutation target for the smoke experiment.""" | ||
| def count_words(text: str) -> int: | ||
| cleaned = text.strip() | ||
| if not cleaned: | ||
| return 0 | ||
| return len(cleaned.split()) | ||
| ''', | ||
| encoding="utf-8", | ||
| ) | ||
| print("applied good proposal") |
| # smoke-keep-revert | ||
| Smallest honest example of the framework. | ||
|  | ||
| ## Files | ||
| ```text | ||
| candidate.py -> bounded target | ||
| measure.py -> hard score | ||
| loop.py -> keep/revert engine | ||
| reset.py -> restore baseline | ||
| results.tsv -> trial log | ||
| proposals/ -> bad patch + good patch | ||
| ``` | ||
| ## Flow | ||
| ```text | ||
| candidate.py is wrong | ||
| ↓ | ||
| measure.py scores baseline | ||
| ↓ | ||
| loop.py applies bad_patch.py | ||
| ↓ | ||
| score does not improve | ||
| ↓ | ||
| loop.py reverts the change | ||
| ↓ | ||
| loop.py applies fix_patch.py | ||
| ↓ | ||
| score improves | ||
| ↓ | ||
| loop.py keeps the change | ||
| ``` | ||
| ## Run | ||
| ```bash | ||
| python reset.py | ||
| python loop.py \ | ||
| --proposal proposals/bad_patch.py \ | ||
| --proposal proposals/fix_patch.py | ||
| ``` |
| """Restore the smoke example to its baseline.""" | ||
| from pathlib import Path | ||
| TARGET = Path(__file__).resolve().parent / "candidate.py" | ||
| TARGET.write_text( | ||
| '''"""Bounded mutation target for the smoke experiment.""" | ||
| def count_words(text: str) -> int: | ||
| cleaned = text.strip() | ||
| if not cleaned: | ||
| return 0 | ||
| return len(cleaned) | ||
| ''', | ||
| encoding="utf-8", | ||
| ) | ||
| print("reset smoke-keep-revert to baseline") |
| timestamp trial status old_score new_score proposal description | ||
| 2026-03-11T11:05:17.887045+00:00 1 reverted 0.2000 0.2000 bad_patch.py applied bad proposal | ||
| 2026-03-11T11:05:17.920737+00:00 2 kept 0.2000 1.0000 fix_patch.py applied good proposal | ||
| 2026-03-11T11:05:40.063680+00:00 1 reverted 0.2000 0.2000 bad_patch.py applied bad proposal | ||
| 2026-03-11T11:05:40.097842+00:00 2 kept 0.2000 1.0000 fix_patch.py applied good proposal |
| <svg width="980" height="260" viewBox="0 0 980 260" fill="none" xmlns="http://www.w3.org/2000/svg"> | ||
| <rect width="980" height="260" fill="#F7F5EF"/> | ||
| <text x="40" y="42" font-family="Helvetica, Arial, sans-serif" font-size="28" font-weight="700" fill="#111111">Smoke Keep/Revert</text> | ||
| <text x="40" y="68" font-family="Helvetica, Arial, sans-serif" font-size="14" fill="#4B5563">One bounded target. One hard metric. Reject the loser. Keep the winner.</text> | ||
| <rect x="40" y="110" width="150" height="88" rx="16" fill="#FFF7ED" stroke="#C2410C" stroke-width="2"/> | ||
| <text x="115" y="140" text-anchor="middle" font-family="Helvetica, Arial, sans-serif" font-size="18" font-weight="700" fill="#9A3412">Broken Target</text> | ||
| <text x="115" y="166" text-anchor="middle" font-family="Helvetica, Arial, sans-serif" font-size="14" fill="#7C2D12">candidate.py</text> | ||
| <text x="115" y="186" text-anchor="middle" font-family="Helvetica, Arial, sans-serif" font-size="14" fill="#7C2D12">buggy on purpose</text> | ||
| <rect x="220" y="110" width="150" height="88" rx="16" fill="#EFF6FF" stroke="#1D4ED8" stroke-width="2"/> | ||
| <text x="295" y="140" text-anchor="middle" font-family="Helvetica, Arial, sans-serif" font-size="18" font-weight="700" fill="#1E3A8A">Measure</text> | ||
| <text x="295" y="166" text-anchor="middle" font-family="Helvetica, Arial, sans-serif" font-size="14" fill="#1D4ED8">score = 0.2</text> | ||
| <text x="295" y="186" text-anchor="middle" font-family="Helvetica, Arial, sans-serif" font-size="14" fill="#1D4ED8">baseline truth</text> | ||
| <rect x="400" y="38" width="160" height="72" rx="16" fill="#FEF2F2" stroke="#DC2626" stroke-width="2"/> | ||
| <text x="480" y="66" text-anchor="middle" font-family="Helvetica, Arial, sans-serif" font-size="18" font-weight="700" fill="#991B1B">Bad Patch</text> | ||
| <text x="480" y="92" text-anchor="middle" font-family="Helvetica, Arial, sans-serif" font-size="14" fill="#B91C1C">score falls to 0.0</text> | ||
| <rect x="400" y="150" width="160" height="72" rx="16" fill="#ECFDF5" stroke="#059669" stroke-width="2"/> | ||
| <text x="480" y="178" text-anchor="middle" font-family="Helvetica, Arial, sans-serif" font-size="18" font-weight="700" fill="#065F46">Good Patch</text> | ||
| <text x="480" y="204" text-anchor="middle" font-family="Helvetica, Arial, sans-serif" font-size="14" fill="#047857">score rises to 1.0</text> | ||
| <rect x="610" y="38" width="150" height="72" rx="16" fill="#FEE2E2" stroke="#DC2626" stroke-width="2"/> | ||
| <text x="685" y="66" text-anchor="middle" font-family="Helvetica, Arial, sans-serif" font-size="18" font-weight="700" fill="#991B1B">REVERT</text> | ||
| <text x="685" y="92" text-anchor="middle" font-family="Helvetica, Arial, sans-serif" font-size="14" fill="#B91C1C">reject loser</text> | ||
| <rect x="610" y="150" width="150" height="72" rx="16" fill="#DCFCE7" stroke="#16A34A" stroke-width="2"/> | ||
| <text x="685" y="178" text-anchor="middle" font-family="Helvetica, Arial, sans-serif" font-size="18" font-weight="700" fill="#166534">KEEP</text> | ||
| <text x="685" y="204" text-anchor="middle" font-family="Helvetica, Arial, sans-serif" font-size="14" fill="#15803D">accept winner</text> | ||
| <rect x="800" y="110" width="140" height="88" rx="16" fill="#F0FDF4" stroke="#16A34A" stroke-width="2"/> | ||
| <text x="870" y="140" text-anchor="middle" font-family="Helvetica, Arial, sans-serif" font-size="18" font-weight="700" fill="#166534">Final State</text> | ||
| <text x="870" y="166" text-anchor="middle" font-family="Helvetica, Arial, sans-serif" font-size="14" fill="#15803D">fixed target</text> | ||
| <text x="870" y="186" text-anchor="middle" font-family="Helvetica, Arial, sans-serif" font-size="14" fill="#15803D">score = 1.0</text> | ||
| <path d="M190 154H220" stroke="#6B7280" stroke-width="3"/> | ||
| <path d="M365 154H385" stroke="#6B7280" stroke-width="3"/> | ||
| <path d="M560 74H600" stroke="#DC2626" stroke-width="3"/> | ||
| <path d="M560 186H600" stroke="#16A34A" stroke-width="3"/> | ||
| <path d="M760 154H790" stroke="#6B7280" stroke-width="3"/> | ||
| <path d="M480 110V138" stroke="#6B7280" stroke-width="3" stroke-dasharray="8 8"/> | ||
| <path d="M295 154C340 154 350 74 400 74" stroke="#DC2626" stroke-width="3" fill="none"/> | ||
| <path d="M295 154C340 154 350 186 400 186" stroke="#16A34A" stroke-width="3" fill="none"/> | ||
| <polygon points="220,154 210,148 210,160" fill="#6B7280"/> | ||
| <polygon points="385,154 375,148 375,160" fill="#6B7280"/> | ||
| <polygon points="600,74 590,68 590,80" fill="#DC2626"/> | ||
| <polygon points="600,186 590,180 590,192" fill="#16A34A"/> | ||
| <polygon points="790,154 780,148 780,160" fill="#6B7280"/> | ||
| </svg> |
| print("ok") |
| # Program | ||
| This pack is invalid because the folder name is bad and files are missing. |
| timestamp trial status old_score new_score proposal description |
| # Program | ||
| This program is intentionally too long. | ||
| aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | ||
| aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa |
| timestamp trial status old_score new_score proposal description |
| # Program | ||
| Keep this experiment small and measurable. |
| timestamp trial status old_score new_score proposal description |
| """Minimal keep/revert loop template.""" | ||
| print("Replace this template with a bounded keep/revert loop.") |
| """Return a machine-readable score for the experiment.""" | ||
| import json | ||
| payload = { | ||
| "score": 0.0, | ||
| "passed": 0, | ||
| "total": 0, | ||
| "status": "fail", | ||
| } | ||
| print(json.dumps(payload)) |
| # Program | ||
| State the outcome you want to improve in one short paragraph. |
| """Restore the experiment pack to baseline.""" | ||
| print("Implement baseline reset here.") |
| timestamp trial status old_score new_score proposal description |
| """Runtime benchmark for example experiment packs.""" | ||
| from __future__ import annotations | ||
| import json | ||
| from pathlib import Path | ||
| import subprocess | ||
| import sys | ||
| ROOT = Path(__file__).resolve().parent | ||
| EXAMPLES_DIR = ROOT / "_examples" | ||
| CASES = [ | ||
| { | ||
| "name": "smoke-keep-revert", | ||
| "baseline_below": 1.0, | ||
| "expected_final": 1.0, | ||
| "proposals": ["proposals/bad_patch.py", "proposals/fix_patch.py"], | ||
| }, | ||
| ] | ||
| def run_python(script: Path, *args: str) -> subprocess.CompletedProcess[str]: | ||
| return subprocess.run( | ||
| [sys.executable, str(script), *args], | ||
| cwd=str(script.parent), | ||
| capture_output=True, | ||
| text=True, | ||
| check=True, | ||
| ) | ||
| def run_measure(exp_dir: Path) -> dict: | ||
| proc = run_python(exp_dir / "measure.py") | ||
| return json.loads(proc.stdout.strip()) | ||
| def main() -> int: | ||
| passed = 0 | ||
| failures = [] | ||
| for case in CASES: | ||
| exp_dir = EXAMPLES_DIR / case["name"] | ||
| run_python(exp_dir / "reset.py") | ||
| baseline = run_measure(exp_dir) | ||
| if float(baseline["score"]) >= case["baseline_below"]: | ||
| failures.append(f"{case['name']}: baseline too high ({baseline['score']})") | ||
| continue | ||
| proposal_args: list[str] = [] | ||
| for proposal in case["proposals"]: | ||
| proposal_args.extend(["--proposal", str(exp_dir / proposal)]) | ||
| run_python(exp_dir / "loop.py", *proposal_args) | ||
| final = run_measure(exp_dir) | ||
| if float(final["score"]) != case["expected_final"]: | ||
| failures.append( | ||
| f"{case['name']}: final score {final['score']} != {case['expected_final']}" | ||
| ) | ||
| continue | ||
| passed += 1 | ||
| total = len(CASES) | ||
| score = passed / total if total else 0.0 | ||
| print(f"SCORE {score:.4f} ({passed}/{total})") | ||
| if failures: | ||
| for failure in failures: | ||
| print(f"FAIL {failure}") | ||
| return 1 | ||
| print("PASS benchmark_runtime") | ||
| return 0 | ||
| if __name__ == "__main__": | ||
| raise SystemExit(main()) |
| """Benchmark the validator against fixed good/bad fixtures.""" | ||
| from __future__ import annotations | ||
| from pathlib import Path | ||
| import sys | ||
| ROOT = Path(__file__).resolve().parent | ||
| if str(ROOT) not in sys.path: | ||
| sys.path.insert(0, str(ROOT)) | ||
| from validate import validate_experiment | ||
| FIXTURES_DIR = ROOT / "_fixtures" | ||
| CASES = [ | ||
| { | ||
| "path": FIXTURES_DIR / "valid" / "good-experiment", | ||
| "expect_ok": True, | ||
| "must_contain": [], | ||
| }, | ||
| { | ||
| "path": FIXTURES_DIR / "invalid" / "BadName", | ||
| "expect_ok": False, | ||
| "must_contain": ["invalid folder name", "missing required file measure.py"], | ||
| }, | ||
| { | ||
| "path": FIXTURES_DIR / "invalid" / "bloated-context", | ||
| "expect_ok": False, | ||
| "must_contain": ["program.md too long"], | ||
| }, | ||
| ] | ||
| def main() -> int: | ||
| passed = 0 | ||
| failures = [] | ||
| for case in CASES: | ||
| issues = validate_experiment(case["path"]) | ||
| is_ok = not issues | ||
| if case["expect_ok"] != is_ok: | ||
| failures.append(f"{case['path'].name}: expected ok={case['expect_ok']} got ok={is_ok}") | ||
| continue | ||
| missing = [needle for needle in case["must_contain"] if not any(needle in issue for issue in issues)] | ||
| if missing: | ||
| failures.append(f"{case['path'].name}: missing expected issue(s): {', '.join(missing)}") | ||
| continue | ||
| passed += 1 | ||
| total = len(CASES) | ||
| score = passed / total if total else 0.0 | ||
| print(f"SCORE {score:.4f} ({passed}/{total})") | ||
| if failures: | ||
| for failure in failures: | ||
| print(f"FAIL {failure}") | ||
| return 1 | ||
| print("PASS benchmark_validate") | ||
| return 0 | ||
| if __name__ == "__main__": | ||
| raise SystemExit(main()) |
| # experiments | ||
| Karpathy-style experiment framework for Atris workspaces. | ||
| This folder defines the schema, validation rules, and benchmark harness for self-improvement loops. | ||
| Live experiment packs belong directly inside `atris/experiments/`. | ||
| ## What This Is | ||
| An experiment is not "the agent rewrote its prompt and said it improved." | ||
| An experiment is: | ||
| 1. one bounded target | ||
| 2. one external metric | ||
| 3. one keep/revert loop | ||
| 4. one append-only log | ||
| If the metric goes up, keep the change. | ||
| If it does not, revert it. | ||
| ## Schema | ||
| ```text | ||
| atris/experiments/ | ||
| ├── README.md | ||
| ├── validate.py | ||
| ├── benchmark_validate.py | ||
| ├── benchmark_runtime.py | ||
| ├── _template/ # packaged scaffolds | ||
| ├── _examples/ # packaged smoke examples | ||
| ├── _fixtures/ # validator benchmark cases | ||
| └── <experiment-slug>/ | ||
| ├── program.md | ||
| ├── measure.py | ||
| ├── loop.py | ||
| ├── results.tsv | ||
| ├── reset.py # preferred | ||
| ├── proposals/ # optional | ||
| └── <bounded-target> # candidate.py, system_prompt.txt, etc. | ||
| ``` | ||
| ## Rules | ||
| 1. One bounded mutation target per experiment. | ||
| 2. `measure.py` must use an external metric the agent cannot fake. | ||
| 3. `loop.py` must keep only improvements and revert regressions. | ||
| 4. `program.md` stays short and task-specific. | ||
| 5. `results.tsv` stays append-only. | ||
| ## Repo Contents | ||
| - `_template/pack/` - starter files for a new experiment | ||
| - `validate.py` - structural and bloat checks | ||
| - `benchmark_validate.py` - validator benchmark on fixed good/bad fixtures | ||
| - `benchmark_runtime.py` - runtime benchmark on packaged example packs | ||
| - `_examples/` - tiny reference implementation | ||
| ## Example | ||
| Start with the smallest honest pack: | ||
| ```text | ||
| _examples/smoke-keep-revert/ | ||
| ├── candidate.py | ||
| ├── measure.py | ||
| ├── loop.py | ||
| ├── reset.py | ||
| ├── results.tsv | ||
| └── proposals/ | ||
| ├── bad_patch.py | ||
| └── fix_patch.py | ||
| ``` | ||
| What it does: | ||
| - `candidate.py` starts broken on purpose | ||
| - `measure.py` scores it on a fixed word-count test | ||
| - `bad_patch.py` makes it worse | ||
| - `fix_patch.py` actually fixes it | ||
| - `loop.py` keeps only the fix | ||
| Run it: | ||
| ```bash | ||
| python _examples/smoke-keep-revert/reset.py | ||
| python _examples/smoke-keep-revert/loop.py \ | ||
| --proposal _examples/smoke-keep-revert/proposals/bad_patch.py \ | ||
| --proposal _examples/smoke-keep-revert/proposals/fix_patch.py | ||
| ``` | ||
| Visual: | ||
| ```text | ||
| broken target | ||
| ↓ | ||
| score = 0.2 | ||
| ↓ | ||
| bad patch | ||
| ↓ | ||
| score = 0.0 | ||
| ↓ | ||
| REVERT | ||
| ↓ | ||
| good patch | ||
| ↓ | ||
| score = 1.0 | ||
| ↓ | ||
| KEEP | ||
| ``` | ||
| ## Commands | ||
| ```bash | ||
| python validate.py . | ||
| python benchmark_validate.py | ||
| python benchmark_runtime.py | ||
| ``` |
| """Validate experiments for structure and context hygiene.""" | ||
| from __future__ import annotations | ||
| import argparse | ||
| import re | ||
| from pathlib import Path | ||
| REQUIRED_FILES = ("program.md", "measure.py", "loop.py", "results.tsv") | ||
| MAX_PROGRAM_CHARS = 1200 | ||
| MAX_RESULTS_BYTES = 64_000 | ||
| SLUG_RE = re.compile(r"^[a-z0-9]+(?:-[a-z0-9]+)*$") | ||
| def find_experiments(root: Path) -> list[Path]: | ||
| return sorted( | ||
| path | ||
| for path in root.iterdir() | ||
| if path.is_dir() and not path.name.startswith((".", "_")) | ||
| ) | ||
| def resolve_experiments(root: Path) -> list[Path]: | ||
| if not root.exists() or not root.is_dir(): | ||
| return [] | ||
| # Allow validating a single pack directly, not just a parent directory. | ||
| if any((root / filename).exists() for filename in REQUIRED_FILES): | ||
| return [root] | ||
| return find_experiments(root) | ||
| def validate_experiment(path: Path) -> list[str]: | ||
| issues: list[str] = [] | ||
| if not SLUG_RE.match(path.name): | ||
| issues.append(f"{path.name}: invalid folder name, use lowercase-hyphen slug") | ||
| for filename in REQUIRED_FILES: | ||
| if not (path / filename).exists(): | ||
| issues.append(f"{path.name}: missing required file {filename}") | ||
| program_path = path / "program.md" | ||
| if program_path.exists(): | ||
| size = len(program_path.read_text(encoding="utf-8")) | ||
| if size > MAX_PROGRAM_CHARS: | ||
| issues.append( | ||
| f"{path.name}: program.md too long ({size} chars > {MAX_PROGRAM_CHARS})" | ||
| ) | ||
| results_path = path / "results.tsv" | ||
| if results_path.exists(): | ||
| size = results_path.stat().st_size | ||
| if size > MAX_RESULTS_BYTES: | ||
| issues.append( | ||
| f"{path.name}: results.tsv too large ({size} bytes > {MAX_RESULTS_BYTES})" | ||
| ) | ||
| return issues | ||
| def main() -> int: | ||
| parser = argparse.ArgumentParser(description="Validate experiment packs.") | ||
| parser.add_argument("root", nargs="?", default=".", help="Directory containing experiment packs") | ||
| args = parser.parse_args() | ||
| root = Path(args.root).resolve() | ||
| experiments = resolve_experiments(root) | ||
| if not experiments: | ||
| print("FAIL: no experiments found") | ||
| return 1 | ||
| all_issues: list[str] = [] | ||
| for path in experiments: | ||
| all_issues.extend(validate_experiment(path)) | ||
| if all_issues: | ||
| print("FAIL") | ||
| for issue in all_issues: | ||
| print(f"- {issue}") | ||
| return 1 | ||
| print(f"PASS: {len(experiments)} experiment(s) valid") | ||
| for path in experiments: | ||
| print(f"- {path.name}") | ||
| return 0 | ||
| if __name__ == "__main__": | ||
| raise SystemExit(main()) |
| --- | ||
| name: apps | ||
| description: View, manage, and trigger Atris apps. Use when user asks about their apps, app status, runs, data, or wants to trigger an app. | ||
| version: 1.0.0 | ||
| tags: | ||
| - apps | ||
| - atris | ||
| - management | ||
| --- | ||
| # Apps | ||
| View and manage your Atris apps — status, runs, data, secrets, members. | ||
| ## Bootstrap | ||
| ```bash | ||
| TOKEN=$(node -e "console.log(require('$HOME/.atris/credentials.json').token)" 2>/dev/null \ | ||
| || python3 -c "import json,os; print(json.load(open(os.path.expanduser('~/.atris/credentials.json')))['token'])" 2>/dev/null) | ||
| if [ -z "$TOKEN" ]; then echo "Not logged in. Run: atris login"; exit 1; fi | ||
| echo "Ready." | ||
| ``` | ||
| Base URL: `https://api.atris.ai/api/apps` | ||
| Auth: `-H "Authorization: Bearer $TOKEN"` | ||
| --- | ||
| ## List My Apps | ||
| ```bash | ||
| curl -s "https://api.atris.ai/api/apps" \ | ||
| -H "Authorization: Bearer $TOKEN" | ||
| ``` | ||
| Returns all apps you own with id, name, slug, description, template, status. | ||
| ### Filter Apps | ||
| ```bash | ||
| # Template apps only | ||
| curl -s "https://api.atris.ai/api/apps?filter=template" \ | ||
| -H "Authorization: Bearer $TOKEN" | ||
| # Paid apps | ||
| curl -s "https://api.atris.ai/api/apps?filter=paid" \ | ||
| -H "Authorization: Bearer $TOKEN" | ||
| # Free apps | ||
| curl -s "https://api.atris.ai/api/apps?filter=free" \ | ||
| -H "Authorization: Bearer $TOKEN" | ||
| ``` | ||
| --- | ||
| ## App Details | ||
| ### Get App Status | ||
| ```bash | ||
| curl -s "https://api.atris.ai/api/apps/{slug}/status" \ | ||
| -H "Authorization: Bearer $TOKEN" | ||
| ``` | ||
| Returns: last run, next run, health, active members. | ||
| ### Get App Runs | ||
| ```bash | ||
| curl -s "https://api.atris.ai/api/apps/{slug}/runs?limit=10" \ | ||
| -H "Authorization: Bearer $TOKEN" | ||
| ``` | ||
| ### Get Single Run | ||
| ```bash | ||
| curl -s "https://api.atris.ai/api/apps/{slug}/runs/{run_id}" \ | ||
| -H "Authorization: Bearer $TOKEN" | ||
| ``` | ||
| --- | ||
| ## App Data | ||
| ### Read All Data | ||
| ```bash | ||
| curl -s "https://api.atris.ai/api/apps/{slug}/data" \ | ||
| -H "Authorization: Bearer $TOKEN" | ||
| ``` | ||
| ### Read Specific Collection | ||
| ```bash | ||
| curl -s "https://api.atris.ai/api/apps/{slug}/data/{collection}" \ | ||
| -H "Authorization: Bearer $TOKEN" | ||
| ``` | ||
| ### Push Data In | ||
| ```bash | ||
| curl -s -X POST "https://api.atris.ai/api/apps/{slug}/ingest" \ | ||
| -H "Authorization: Bearer $TOKEN" \ | ||
| -H "Content-Type: application/json" \ | ||
| -d '{"collection": "leads", "data": {"name": "Acme", "score": 85}}' | ||
| ``` | ||
| --- | ||
| ## Secrets | ||
| ### List Secret Keys (names + storage tier) | ||
| ```bash | ||
| curl -s "https://api.atris.ai/api/apps/{slug}/secrets" \ | ||
| -H "Authorization: Bearer $TOKEN" | ||
| ``` | ||
| Returns key names and where they're stored: | ||
| - `"storage_tier": "cloud"` — encrypted in Atris vault | ||
| - `"storage_tier": "local"` — on your machine at `~/.atris/secrets/{slug}/` | ||
| ### Store Secret (cloud) | ||
| ```bash | ||
| curl -s -X PUT "https://api.atris.ai/api/apps/{slug}/secrets/{key}" \ | ||
| -H "Authorization: Bearer $TOKEN" \ | ||
| -H "Content-Type: application/json" \ | ||
| -d '{"value": "sk-secret-value"}' | ||
| ``` | ||
| ### Register Local Secret (manifest only) | ||
| ```bash | ||
| curl -s -X POST "https://api.atris.ai/api/apps/{slug}/secrets/{key}/register-local" \ | ||
| -H "Authorization: Bearer $TOKEN" | ||
| ``` | ||
| No value sent. Just tells the web UI "this key exists on my machine." | ||
| ### Delete Secret | ||
| ```bash | ||
| curl -s -X DELETE "https://api.atris.ai/api/apps/{slug}/secrets/{key}" \ | ||
| -H "Authorization: Bearer $TOKEN" | ||
| ``` | ||
| --- | ||
| ## Members | ||
| ### List App Members | ||
| ```bash | ||
| curl -s "https://api.atris.ai/api/apps/{slug}/members" \ | ||
| -H "Authorization: Bearer $TOKEN" | ||
| ``` | ||
| ### Add Member (agent operator) | ||
| ```bash | ||
| curl -s -X POST "https://api.atris.ai/api/apps/{slug}/members" \ | ||
| -H "Authorization: Bearer $TOKEN" \ | ||
| -H "Content-Type: application/json" \ | ||
| -d '{"agent_id": "AGENT_ID", "role": "operator"}' | ||
| ``` | ||
| ### Remove Member | ||
| ```bash | ||
| curl -s -X DELETE "https://api.atris.ai/api/apps/{slug}/members/{agent_id}" \ | ||
| -H "Authorization: Bearer $TOKEN" | ||
| ``` | ||
| --- | ||
| ## Trigger | ||
| ### Run App Now | ||
| ```bash | ||
| curl -s -X POST "https://api.atris.ai/api/apps/{slug}/trigger" \ | ||
| -H "Authorization: Bearer $TOKEN" \ | ||
| -H "Content-Type: application/json" \ | ||
| -d '{"trigger_type": "manual"}' | ||
| ``` | ||
| --- | ||
| ## App Manifest (for published apps) | ||
| ```bash | ||
| curl -s "https://api.atris.ai/api/apps/{slug}/manifest" | ||
| ``` | ||
| No auth needed. Returns name, description, required secrets, schedule. | ||
| ### Install a Published App | ||
| ```bash | ||
| curl -s -X POST "https://api.atris.ai/api/apps/{slug}/install" \ | ||
| -H "Authorization: Bearer $TOKEN" \ | ||
| -H "Content-Type: application/json" \ | ||
| -d '{}' | ||
| ``` | ||
| --- | ||
| ## Workflows | ||
| ### "What apps do I have?" | ||
| 1. List apps: `GET /api/apps` | ||
| 2. Display: name, slug, template, last run status | ||
| ### "How is my app doing?" | ||
| 1. Get status: `GET /api/apps/{slug}/status` | ||
| 2. Get recent runs: `GET /api/apps/{slug}/runs?limit=5` | ||
| 3. Show: health, last run time, success/failure, output | ||
| ### "Check my app's secrets" | ||
| 1. List secrets: `GET /api/apps/{slug}/secrets` | ||
| 2. Show each key with storage tier (cloud/local) | ||
| 3. If required secrets are missing, tell the user how to add them | ||
| ### "Run my app" | ||
| 1. Trigger: `POST /api/apps/{slug}/trigger` | ||
| 2. Poll status: `GET /api/apps/{slug}/status` (wait for completion) | ||
| 3. Show run result: `GET /api/apps/{slug}/runs?limit=1` | ||
| --- | ||
| ## Quick Reference | ||
| ```bash | ||
| TOKEN=$(node -e "console.log(require('$HOME/.atris/credentials.json').token)") | ||
| # List all my apps | ||
| curl -s "https://api.atris.ai/api/apps" -H "Authorization: Bearer $TOKEN" | ||
| # App status | ||
| curl -s "https://api.atris.ai/api/apps/SLUG/status" -H "Authorization: Bearer $TOKEN" | ||
| # Recent runs | ||
| curl -s "https://api.atris.ai/api/apps/SLUG/runs?limit=5" -H "Authorization: Bearer $TOKEN" | ||
| # Trigger a run | ||
| curl -s -X POST "https://api.atris.ai/api/apps/SLUG/trigger" \ | ||
| -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" \ | ||
| -d '{"trigger_type":"manual"}' | ||
| # Read app data | ||
| curl -s "https://api.atris.ai/api/apps/SLUG/data" -H "Authorization: Bearer $TOKEN" | ||
| # List secrets (with storage tier) | ||
| curl -s "https://api.atris.ai/api/apps/SLUG/secrets" -H "Authorization: Bearer $TOKEN" | ||
| ``` |
| --- | ||
| name: autoresearch | ||
| description: Karpathy-style keep/revert experiment loop for Atris experiment packs. Use when improving prompts, tools, workers, or bounded repo targets. | ||
| version: 1.0.0 | ||
| tags: | ||
| - experiments | ||
| - keep-revert | ||
| - optimization | ||
| - metrics | ||
| --- | ||
| # Autoresearch Skill | ||
| Autoresearch means one bounded target, one external metric, one keep/revert loop, one append-only log. | ||
| ## When to use | ||
| - prompt optimization | ||
| - worker routing | ||
| - tool behavior | ||
| - evaluation harnesses | ||
| - any repo-local target that can be measured honestly | ||
| ## Process | ||
| 1. Read `atris/experiments/<slug>/program.md` | ||
| 2. Confirm the target is bounded | ||
| 3. Run the baseline with `measure.py` | ||
| 4. Apply one candidate change | ||
| 5. Rerun the metric | ||
| 6. Keep only if the score improves | ||
| 7. Write the outcome to `results.tsv` | ||
| 8. Revert losses | ||
| ## Rules | ||
| - external metric only | ||
| - no unlogged keeps | ||
| - no broad refactors inside an experiment | ||
| - one experiment pack = one target | ||
| - if variance exists, define the keep margin first | ||
| ## Commands | ||
| ```bash | ||
| atris experiments init <slug> | ||
| atris experiments validate | ||
| atris experiments benchmark | ||
| ``` | ||
| ## Good output | ||
| - short `program.md` | ||
| - honest `measure.py` | ||
| - deterministic `loop.py` | ||
| - append-only `results.tsv` | ||
| ## Bad output | ||
| - "felt better" | ||
| - changed three things at once | ||
| - kept a patch without a measured win | ||
| - no reset/revert path |
| const fs = require('fs'); | ||
| const path = require('path'); | ||
| const { loadCredentials } = require('../utils/auth'); | ||
| const { apiRequestJson } = require('../utils/api'); | ||
| function getBusinessConfigPath() { | ||
| const home = require('os').homedir(); | ||
| const dir = path.join(home, '.atris'); | ||
| if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true }); | ||
| return path.join(dir, 'businesses.json'); | ||
| } | ||
| function loadBusinesses() { | ||
| const p = getBusinessConfigPath(); | ||
| if (!fs.existsSync(p)) return {}; | ||
| try { return JSON.parse(fs.readFileSync(p, 'utf8')); } catch { return {}; } | ||
| } | ||
| function saveBusinesses(data) { | ||
| fs.writeFileSync(getBusinessConfigPath(), JSON.stringify(data, null, 2)); | ||
| } | ||
| async function addBusiness(slug) { | ||
| if (!slug) { | ||
| console.error('Usage: atris business add <slug>'); | ||
| process.exit(1); | ||
| } | ||
| const creds = loadCredentials(); | ||
| if (!creds || !creds.token) { | ||
| console.error('Not logged in. Run: atris login'); | ||
| process.exit(1); | ||
| } | ||
| // Resolve slug to business | ||
| const result = await apiRequestJson(`/businesses/by-slug/${slug}`, { | ||
| method: 'GET', | ||
| token: creds.token, | ||
| }); | ||
| if (!result.ok) { | ||
| // Try listing all and matching | ||
| const listResult = await apiRequestJson('/businesses/', { method: 'GET', token: creds.token }); | ||
| if (listResult.ok && Array.isArray(listResult.data)) { | ||
| const match = listResult.data.find(b => b.slug === slug || b.name.toLowerCase() === slug.toLowerCase()); | ||
| if (match) { | ||
| const businesses = loadBusinesses(); | ||
| businesses[slug] = { | ||
| business_id: match.id, | ||
| workspace_id: match.workspace_id, | ||
| name: match.name, | ||
| slug: match.slug, | ||
| added_at: new Date().toISOString(), | ||
| }; | ||
| saveBusinesses(businesses); | ||
| console.log(`\nAdded "${match.name}" (${match.slug})`); | ||
| return; | ||
| } | ||
| } | ||
| console.error(`Business "${slug}" not found.`); | ||
| process.exit(1); | ||
| } | ||
| const biz = result.data; | ||
| const businesses = loadBusinesses(); | ||
| businesses[slug] = { | ||
| business_id: biz.id, | ||
| workspace_id: biz.workspace_id, | ||
| name: biz.name, | ||
| slug: biz.slug, | ||
| added_at: new Date().toISOString(), | ||
| }; | ||
| saveBusinesses(businesses); | ||
| console.log(`\nAdded "${biz.name}" (${biz.slug})`); | ||
| } | ||
| async function listBusinesses() { | ||
| const businesses = loadBusinesses(); | ||
| const slugs = Object.keys(businesses); | ||
| if (slugs.length === 0) { | ||
| console.log('\nNo businesses connected. Run: atris business add <slug>'); | ||
| return; | ||
| } | ||
| console.log('\nConnected businesses:\n'); | ||
| for (const slug of slugs) { | ||
| const b = businesses[slug]; | ||
| console.log(` ${b.name || slug} (${b.slug || slug})`); | ||
| console.log(` ID: ${b.business_id}`); | ||
| console.log(` Added: ${b.added_at || 'unknown'}`); | ||
| console.log(''); | ||
| } | ||
| } | ||
| async function removeBusiness(slug) { | ||
| if (!slug) { | ||
| console.error('Usage: atris business remove <slug>'); | ||
| process.exit(1); | ||
| } | ||
| const businesses = loadBusinesses(); | ||
| if (!businesses[slug]) { | ||
| console.error(`Business "${slug}" not connected.`); | ||
| process.exit(1); | ||
| } | ||
| const name = businesses[slug].name || slug; | ||
| delete businesses[slug]; | ||
| saveBusinesses(businesses); | ||
| console.log(`\nRemoved "${name}"`); | ||
| } | ||
| async function businessCommand(subcommand, ...args) { | ||
| switch (subcommand) { | ||
| case 'add': | ||
| await addBusiness(args[0]); | ||
| break; | ||
| case 'list': | ||
| case 'ls': | ||
| await listBusinesses(); | ||
| break; | ||
| case 'remove': | ||
| case 'rm': | ||
| await removeBusiness(args[0]); | ||
| break; | ||
| default: | ||
| console.log('Usage: atris business <add|list|remove> [slug]'); | ||
| } | ||
| } | ||
| module.exports = { businessCommand, loadBusinesses, saveBusinesses, getBusinessConfigPath }; |
| const fs = require('fs'); | ||
| const path = require('path'); | ||
| const os = require('os'); | ||
| const { spawn, spawnSync } = require('child_process'); | ||
| const readline = require('readline'); | ||
| // ── Context Gathering ────────────────────────────────────────────── | ||
| function gatherAtrisContext(workspaceDir) { | ||
| const atrisDir = path.join(workspaceDir, 'atris'); | ||
| const ctx = { | ||
| hasAtris: fs.existsSync(atrisDir), | ||
| skills: [], | ||
| teamMembers: [], | ||
| backlogCount: 0, | ||
| todayJournal: null, | ||
| persona: null, | ||
| }; | ||
| if (!ctx.hasAtris) return ctx; | ||
| // Skills — scan atris/skills/ and .claude/skills/ | ||
| for (const skillsRoot of [ | ||
| path.join(atrisDir, 'skills'), | ||
| path.join(workspaceDir, '.claude', 'skills'), | ||
| ]) { | ||
| if (!fs.existsSync(skillsRoot)) continue; | ||
| for (const name of fs.readdirSync(skillsRoot)) { | ||
| const skillMd = path.join(skillsRoot, name, 'SKILL.md'); | ||
| // Resolve symlinks | ||
| let resolvedPath = skillMd; | ||
| try { | ||
| const fullDir = path.join(skillsRoot, name); | ||
| const stat = fs.lstatSync(fullDir); | ||
| if (stat.isSymbolicLink()) { | ||
| resolvedPath = path.join(fs.realpathSync(fullDir), 'SKILL.md'); | ||
| } | ||
| } catch {} | ||
| if (!fs.existsSync(resolvedPath)) continue; | ||
| // Extract name + description from frontmatter | ||
| try { | ||
| const raw = fs.readFileSync(resolvedPath, 'utf8'); | ||
| const fmMatch = raw.match(/^---\n([\s\S]*?)\n---/); | ||
| let desc = name; | ||
| if (fmMatch) { | ||
| const descLine = fmMatch[1].match(/description:\s*(.+)/); | ||
| if (descLine) desc = descLine[1].trim(); | ||
| } | ||
| ctx.skills.push({ name, description: desc, path: resolvedPath }); | ||
| } catch { | ||
| ctx.skills.push({ name, description: name, path: resolvedPath }); | ||
| } | ||
| } | ||
| } | ||
| // Dedupe by name | ||
| const seen = new Set(); | ||
| ctx.skills = ctx.skills.filter(s => { | ||
| if (seen.has(s.name)) return false; | ||
| seen.add(s.name); | ||
| return true; | ||
| }); | ||
| // Team members — scan atris/team/ | ||
| const teamDir = path.join(atrisDir, 'team'); | ||
| if (fs.existsSync(teamDir)) { | ||
| for (const name of fs.readdirSync(teamDir)) { | ||
| if (name.startsWith('_')) continue; | ||
| const memberMd = path.join(teamDir, name, 'MEMBER.md'); | ||
| if (fs.existsSync(memberMd)) { | ||
| ctx.teamMembers.push(name); | ||
| } | ||
| } | ||
| } | ||
| // Backlog count from TODO.md | ||
| const todoFile = path.join(atrisDir, 'TODO.md'); | ||
| if (fs.existsSync(todoFile)) { | ||
| const content = fs.readFileSync(todoFile, 'utf8'); | ||
| const backlogMatch = content.match(/## Backlog\n([\s\S]*?)(?=\n## |$)/); | ||
| if (backlogMatch) { | ||
| ctx.backlogCount = (backlogMatch[1].match(/^- /gm) || []).length; | ||
| } | ||
| } | ||
| // Today's journal | ||
| const now = new Date(); | ||
| const y = now.getFullYear(); | ||
| const m = String(now.getMonth() + 1).padStart(2, '0'); | ||
| const d = String(now.getDate()).padStart(2, '0'); | ||
| const journalPath = path.join(atrisDir, 'logs', String(y), `${y}-${m}-${d}.md`); | ||
| if (fs.existsSync(journalPath)) { | ||
| ctx.todayJournal = journalPath; | ||
| } | ||
| // Persona | ||
| const personaPath = path.join(atrisDir, 'PERSONA.md'); | ||
| if (fs.existsSync(personaPath)) { | ||
| ctx.persona = fs.readFileSync(personaPath, 'utf8').slice(0, 2000); | ||
| } | ||
| return ctx; | ||
| } | ||
| function buildSystemPrompt(ctx) { | ||
| const lines = []; | ||
| lines.push('# Atris Console'); | ||
| lines.push('You are running inside the Atris Console — an AI workspace operating system.'); | ||
| lines.push(''); | ||
| if (ctx.persona) { | ||
| lines.push('## Persona'); | ||
| lines.push(ctx.persona); | ||
| lines.push(''); | ||
| } | ||
| if (ctx.teamMembers.length > 0) { | ||
| lines.push('## Team Members'); | ||
| lines.push(`Available: ${ctx.teamMembers.join(', ')}`); | ||
| lines.push('Each has a MEMBER.md in atris/team/<name>/ defining their role.'); | ||
| lines.push(''); | ||
| } | ||
| if (ctx.skills.length > 0) { | ||
| lines.push('## Skills'); | ||
| lines.push('Before replying, scan these skills. If one applies, read its SKILL.md then follow it.'); | ||
| lines.push(''); | ||
| for (const s of ctx.skills) { | ||
| lines.push(`- **${s.name}**: ${s.description} (${s.path})`); | ||
| } | ||
| lines.push(''); | ||
| } | ||
| if (ctx.backlogCount > 0) { | ||
| lines.push(`## Current Work`); | ||
| lines.push(`${ctx.backlogCount} task${ctx.backlogCount > 1 ? 's' : ''} in backlog. Check atris/TODO.md for details.`); | ||
| lines.push(''); | ||
| } | ||
| if (ctx.todayJournal) { | ||
| lines.push(`## Journal`); | ||
| lines.push(`Today's journal: ${ctx.todayJournal}`); | ||
| lines.push(''); | ||
| } | ||
| return lines.join('\n'); | ||
| } | ||
| // ── TUI Rendering ────────────────────────────────────────────────── | ||
| const COLORS = { | ||
| reset: '\x1b[0m', | ||
| bold: '\x1b[1m', | ||
| dim: '\x1b[2m', | ||
| cyan: '\x1b[36m', | ||
| green: '\x1b[32m', | ||
| yellow: '\x1b[33m', | ||
| magenta: '\x1b[35m', | ||
| white: '\x1b[37m', | ||
| bgBlack: '\x1b[40m', | ||
| gray: '\x1b[90m', | ||
| }; | ||
| function renderHeader(ctx, backend) { | ||
| const c = COLORS; | ||
| const width = Math.min(process.stdout.columns || 60, 70); | ||
| const line = '─'.repeat(width); | ||
| console.log(''); | ||
| console.log(`${c.cyan}${c.bold} ┌${line}┐${c.reset}`); | ||
| console.log(`${c.cyan}${c.bold} │${''.padEnd(width)}│${c.reset}`); | ||
| // Title | ||
| const title = 'A T R I S C O N S O L E'; | ||
| const pad = Math.max(0, Math.floor((width - title.length) / 2)); | ||
| console.log(`${c.cyan}${c.bold} │${' '.repeat(pad)}${c.white}${title}${' '.repeat(width - pad - title.length)}${c.cyan}│${c.reset}`); | ||
| console.log(`${c.cyan}${c.bold} │${''.padEnd(width)}│${c.reset}`); | ||
| console.log(`${c.cyan} ├${line}┤${c.reset}`); | ||
| // Status row | ||
| const engine = `Engine: ${backend}`; | ||
| const skills = `Skills: ${ctx.skills.length}`; | ||
| const team = `Team: ${ctx.teamMembers.length}`; | ||
| const tasks = `Tasks: ${ctx.backlogCount}`; | ||
| const statusItems = [engine, skills, team, tasks].join(' │ '); | ||
| const statusPad = Math.max(0, Math.floor((width - statusItems.length) / 2)); | ||
| console.log(`${c.cyan} │${c.reset}${' '.repeat(statusPad)}${c.dim}${statusItems}${' '.repeat(Math.max(0, width - statusPad - statusItems.length))}${c.cyan}│${c.reset}`); | ||
| console.log(`${c.cyan} └${line}┘${c.reset}`); | ||
| console.log(''); | ||
| } | ||
| function renderSkillsBar(ctx) { | ||
| if (ctx.skills.length === 0) return; | ||
| const c = COLORS; | ||
| const names = ctx.skills.map(s => s.name).slice(0, 8); | ||
| console.log(`${c.dim} Skills: ${names.map(n => `${c.magenta}/${n}${c.dim}`).join(' ')}${c.reset}`); | ||
| if (ctx.skills.length > 8) { | ||
| console.log(`${c.dim} ... and ${ctx.skills.length - 8} more${c.reset}`); | ||
| } | ||
| console.log(''); | ||
| } | ||
| // ── Backend Detection & Auth ─────────────────────────────────────── | ||
| function detectBackend(requested) { | ||
| const hasClaude = spawnSync('which', ['claude'], { stdio: 'pipe' }).status === 0; | ||
| const hasCodex = spawnSync('which', ['codex'], { stdio: 'pipe' }).status === 0; | ||
| if (requested) { | ||
| const installed = requested === 'claude' ? hasClaude : hasCodex; | ||
| if (!installed) return { backend: requested, installed: false, hasClaude, hasCodex }; | ||
| return { backend: requested, installed: true, hasClaude, hasCodex }; | ||
| } | ||
| if (hasClaude) return { backend: 'claude', installed: true, hasClaude, hasCodex }; | ||
| if (hasCodex) return { backend: 'codex', installed: true, hasClaude, hasCodex }; | ||
| return { backend: null, installed: false, hasClaude, hasCodex }; | ||
| } | ||
| function offerInstall(target, callback) { | ||
| const pkg = target === 'claude' | ||
| ? '@anthropic-ai/claude-code' | ||
| : '@openai/codex'; | ||
| console.log(` ${target} is not installed.\n`); | ||
| if (target === 'claude') { | ||
| console.log(' Install: npm install -g @anthropic-ai/claude-code'); | ||
| console.log(' Auth: claude (follow login prompts)\n'); | ||
| } else { | ||
| console.log(' Install: npm install -g @openai/codex'); | ||
| console.log(' Auth: export OPENAI_API_KEY=sk-...\n'); | ||
| } | ||
| const rl = readline.createInterface({ input: process.stdin, output: process.stdout }); | ||
| rl.question(` Install ${target} now? (y/N) `, (answer) => { | ||
| rl.close(); | ||
| if (answer.trim().toLowerCase() === 'y') { | ||
| console.log(`\n Installing ${pkg}...\n`); | ||
| const install = spawnSync('npm', ['install', '-g', pkg], { | ||
| stdio: 'inherit', | ||
| env: process.env, | ||
| }); | ||
| if (install.status !== 0) { | ||
| console.error(`\n✗ Install failed. Try: npm install -g ${pkg}`); | ||
| process.exit(1); | ||
| } | ||
| console.log(`\n✓ ${target} installed.\n`); | ||
| callback(); | ||
| } else { | ||
| process.exit(0); | ||
| } | ||
| }); | ||
| } | ||
| function checkAuth(backend) { | ||
| if (backend === 'claude') { | ||
| // Claude handles its own auth interactively — always allow | ||
| return true; | ||
| } | ||
| if (backend === 'codex') { | ||
| if (!process.env.OPENAI_API_KEY) { | ||
| console.error('\n ✗ OPENAI_API_KEY not set.\n'); | ||
| console.error(' Codex requires an OpenAI API key:'); | ||
| console.error(' export OPENAI_API_KEY=sk-...\n'); | ||
| console.error(' Get one at: https://platform.openai.com/api-keys\n'); | ||
| process.exit(1); | ||
| return false; | ||
| } | ||
| return true; | ||
| } | ||
| return true; | ||
| } | ||
| // ── Launch ────────────────────────────────────────────────────────── | ||
| function launchClaude(systemPrompt, extraArgs) { | ||
| const args = [ | ||
| '--dangerously-skip-permissions', | ||
| '--append-system-prompt', systemPrompt, | ||
| ...extraArgs, | ||
| ]; | ||
| const child = spawnSync('claude', args, { | ||
| cwd: process.cwd(), | ||
| stdio: 'inherit', | ||
| env: { ...process.env, CLAUDECODE: undefined }, | ||
| }); | ||
| if (child.error) { | ||
| console.error(`✗ Failed to start claude: ${child.error.message}`); | ||
| process.exit(1); | ||
| } | ||
| process.exit(child.status ?? 0); | ||
| } | ||
| function launchCodex(systemPrompt, extraArgs) { | ||
| // Codex uses ~/.codex/instructions.md for system instructions | ||
| // Write a temporary instructions file and point to it | ||
| const codexDir = path.join(os.homedir(), '.codex'); | ||
| const instructionsPath = path.join(codexDir, 'instructions.md'); | ||
| // Preserve existing instructions | ||
| let existingInstructions = ''; | ||
| if (fs.existsSync(instructionsPath)) { | ||
| existingInstructions = fs.readFileSync(instructionsPath, 'utf8'); | ||
| } | ||
| // Prepend Atris context | ||
| const combined = systemPrompt + '\n\n---\n\n' + existingInstructions; | ||
| // Write combined, launch, restore | ||
| if (!fs.existsSync(codexDir)) fs.mkdirSync(codexDir, { recursive: true }); | ||
| fs.writeFileSync(instructionsPath, combined, 'utf8'); | ||
| const child = spawnSync('codex', ['--full-auto', ...extraArgs], { | ||
| cwd: process.cwd(), | ||
| stdio: 'inherit', | ||
| env: process.env, | ||
| }); | ||
| // Restore original | ||
| if (existingInstructions) { | ||
| fs.writeFileSync(instructionsPath, existingInstructions, 'utf8'); | ||
| } else { | ||
| try { fs.unlinkSync(instructionsPath); } catch {} | ||
| } | ||
| if (child.error) { | ||
| console.error(`✗ Failed to start codex: ${child.error.message}`); | ||
| process.exit(1); | ||
| } | ||
| process.exit(child.status ?? 0); | ||
| } | ||
| // ── Main ──────────────────────────────────────────────────────────── | ||
| function consoleCommand() { | ||
| const args = process.argv.slice(3); | ||
| let requested = args[0]; | ||
| if (requested === '--help' || requested === 'help') { | ||
| console.log('Usage: atris console [claude|codex] [...args]\n'); | ||
| console.log('Launch a coding agent wrapped in Atris context.\n'); | ||
| console.log('The console injects your team, skills, tasks, and persona'); | ||
| console.log('into the agent session. Everything Atris knows, it knows.\n'); | ||
| console.log('Examples:'); | ||
| console.log(' atris console Auto-detect and launch'); | ||
| console.log(' atris console claude Launch with Claude Code'); | ||
| console.log(' atris console codex Launch with Codex'); | ||
| process.exit(0); | ||
| } | ||
| if (requested && !['claude', 'codex'].includes(requested)) { | ||
| console.error(`✗ Unknown backend: ${requested}`); | ||
| console.error(' Usage: atris console [claude|codex]'); | ||
| process.exit(1); | ||
| } | ||
| const detection = detectBackend(requested); | ||
| function boot() { | ||
| const backend = detection.backend || 'claude'; | ||
| checkAuth(backend); | ||
| // Gather Atris context | ||
| const ctx = gatherAtrisContext(process.cwd()); | ||
| // Render TUI header | ||
| renderHeader(ctx, backend); | ||
| renderSkillsBar(ctx); | ||
| // Build system prompt | ||
| const systemPrompt = buildSystemPrompt(ctx); | ||
| // Extra args (skip backend name if it was first arg) | ||
| const extraArgs = requested === args[0] ? args.slice(1) : args; | ||
| // Launch | ||
| if (backend === 'claude') { | ||
| launchClaude(systemPrompt, extraArgs); | ||
| } else { | ||
| launchCodex(systemPrompt, extraArgs); | ||
| } | ||
| } | ||
| if (!detection.installed) { | ||
| const target = detection.backend || 'claude'; | ||
| offerInstall(target, boot); | ||
| } else { | ||
| boot(); | ||
| } | ||
| } | ||
| module.exports = { consoleCommand, gatherAtrisContext, buildSystemPrompt }; |
| const fs = require('fs'); | ||
| const path = require('path'); | ||
| const { spawnSync } = require('child_process'); | ||
| const SLUG_RE = /^[a-z0-9]+(?:-[a-z0-9]+)*$/; | ||
| const ROOT_FILES = ['README.md', 'validate.py', 'benchmark_validate.py', 'benchmark_runtime.py']; | ||
| const SUPPORT_DIRS = ['_fixtures', '_template', '_examples']; | ||
| function ensureAtrisWorkspace(workspaceDir = process.cwd()) { | ||
| const atrisDir = path.join(workspaceDir, 'atris'); | ||
| if (!fs.existsSync(atrisDir)) { | ||
| console.error('✗ Error: atris/ folder not found. Run "atris init" first.'); | ||
| process.exit(1); | ||
| } | ||
| return atrisDir; | ||
| } | ||
| function copyRecursive(src, dest) { | ||
| fs.mkdirSync(dest, { recursive: true }); | ||
| const entries = fs.readdirSync(src, { withFileTypes: true }); | ||
| for (const entry of entries) { | ||
| if (entry.name === '.DS_Store' || entry.name === '__pycache__') continue; | ||
| const srcPath = path.join(src, entry.name); | ||
| const destPath = path.join(dest, entry.name); | ||
| if (entry.isDirectory()) { | ||
| copyRecursive(srcPath, destPath); | ||
| continue; | ||
| } | ||
| if (!fs.existsSync(destPath)) { | ||
| fs.copyFileSync(srcPath, destPath); | ||
| } | ||
| } | ||
| } | ||
| function ensureExperimentsFramework(workspaceDir = process.cwd(), { silent = false } = {}) { | ||
| const atrisDir = ensureAtrisWorkspace(workspaceDir); | ||
| const packageExperimentsDir = path.join(__dirname, '..', 'atris', 'experiments'); | ||
| const experimentsDir = path.join(atrisDir, 'experiments'); | ||
| const created = []; | ||
| if (!fs.existsSync(experimentsDir)) { | ||
| fs.mkdirSync(experimentsDir, { recursive: true }); | ||
| created.push('atris/experiments/'); | ||
| } | ||
| for (const file of ROOT_FILES) { | ||
| const src = path.join(packageExperimentsDir, file); | ||
| const dest = path.join(experimentsDir, file); | ||
| if (!fs.existsSync(dest) && fs.existsSync(src)) { | ||
| fs.copyFileSync(src, dest); | ||
| created.push(`atris/experiments/${file}`); | ||
| } | ||
| } | ||
| for (const dirName of SUPPORT_DIRS) { | ||
| const src = path.join(packageExperimentsDir, dirName); | ||
| const dest = path.join(experimentsDir, dirName); | ||
| if (fs.existsSync(src)) { | ||
| const hadDest = fs.existsSync(dest); | ||
| copyRecursive(src, dest); | ||
| if (!hadDest) { | ||
| created.push(`atris/experiments/${dirName}/`); | ||
| } | ||
| } | ||
| } | ||
| if (!silent) { | ||
| if (created.length > 0) { | ||
| console.log(`✓ Prepared atris/experiments/ (${created.length} item${created.length === 1 ? '' : 's'})`); | ||
| } else { | ||
| console.log('✓ atris/experiments/ already ready'); | ||
| } | ||
| } | ||
| return { atrisDir, experimentsDir, created }; | ||
| } | ||
| function resolvePython() { | ||
| const candidates = [ | ||
| process.env.ATRIS_EXPERIMENTS_PYTHON, | ||
| 'python3', | ||
| 'python', | ||
| ].filter(Boolean); | ||
| for (const candidate of candidates) { | ||
| const probe = spawnSync(candidate, ['--version'], { encoding: 'utf8' }); | ||
| if (!probe.error && probe.status === 0) { | ||
| return candidate; | ||
| } | ||
| } | ||
| return null; | ||
| } | ||
| function runPython(scriptPath, args = [], cwd = process.cwd()) { | ||
| const python = resolvePython(); | ||
| if (!python) { | ||
| console.error('✗ Error: Python not found. Set ATRIS_EXPERIMENTS_PYTHON or install python3.'); | ||
| process.exit(1); | ||
| } | ||
| const result = spawnSync(python, [scriptPath, ...args], { | ||
| cwd, | ||
| stdio: 'inherit', | ||
| env: { | ||
| ...process.env, | ||
| PYTHONDONTWRITEBYTECODE: '1', | ||
| }, | ||
| }); | ||
| if (result.error) { | ||
| throw result.error; | ||
| } | ||
| if (typeof result.status === 'number' && result.status !== 0) { | ||
| process.exit(result.status); | ||
| } | ||
| } | ||
| function experimentsInit(name) { | ||
| const { experimentsDir } = ensureExperimentsFramework(); | ||
| if (!name) { | ||
| console.log(''); | ||
| console.log('Experiments framework ready.'); | ||
| console.log('Next: atris experiments init <slug>'); | ||
| console.log(''); | ||
| return; | ||
| } | ||
| if (!SLUG_RE.test(name)) { | ||
| console.error('✗ Invalid experiment name. Use lowercase-hyphen slug, for example: self-heal'); | ||
| process.exit(1); | ||
| } | ||
| const targetDir = path.join(experimentsDir, name); | ||
| if (fs.existsSync(targetDir)) { | ||
| console.error(`✗ Experiment "${name}" already exists at atris/experiments/${name}/`); | ||
| process.exit(1); | ||
| } | ||
| const templateDir = path.join(experimentsDir, '_template', 'pack'); | ||
| copyRecursive(templateDir, targetDir); | ||
| console.log(`✓ Created atris/experiments/${name}/`); | ||
| console.log(' Files: program.md, measure.py, loop.py, results.tsv, reset.py'); | ||
| } | ||
| function experimentsValidate(rootArg) { | ||
| const { experimentsDir } = ensureExperimentsFramework(); | ||
| const args = []; | ||
| if (rootArg) { | ||
| args.push(rootArg); | ||
| } | ||
| runPython(path.join(experimentsDir, 'validate.py'), args, experimentsDir); | ||
| } | ||
| function experimentsBenchmark(kind = 'all') { | ||
| const { experimentsDir } = ensureExperimentsFramework(); | ||
| const modes = kind === 'all' ? ['validate', 'runtime'] : [kind]; | ||
| for (const mode of modes) { | ||
| if (mode === 'validate') { | ||
| console.log('Running experiment validator benchmark...'); | ||
| runPython(path.join(experimentsDir, 'benchmark_validate.py'), [], experimentsDir); | ||
| continue; | ||
| } | ||
| if (mode === 'runtime') { | ||
| console.log('Running experiment runtime benchmark...'); | ||
| runPython(path.join(experimentsDir, 'benchmark_runtime.py'), [], experimentsDir); | ||
| continue; | ||
| } | ||
| console.error('Usage: atris experiments benchmark [validate|runtime|all]'); | ||
| process.exit(1); | ||
| } | ||
| } | ||
| function experimentsCommand(subcommand, ...args) { | ||
| switch (subcommand) { | ||
| case 'init': | ||
| case 'new': | ||
| return experimentsInit(args[0]); | ||
| case 'validate': | ||
| return experimentsValidate(args[0]); | ||
| case 'benchmark': | ||
| return experimentsBenchmark(args[0] || 'all'); | ||
| default: | ||
| console.log(''); | ||
| console.log('Usage: atris experiments <subcommand> [name]'); | ||
| console.log(''); | ||
| console.log('Subcommands:'); | ||
| console.log(' init [slug] Prepare atris/experiments/ or scaffold a new pack'); | ||
| console.log(' validate [path|slug] Run structural validation on packs or a single pack'); | ||
| console.log(' benchmark [mode] Run validate/runtime/all benchmark harness'); | ||
| console.log(''); | ||
| console.log('Examples:'); | ||
| console.log(' atris experiments init'); | ||
| console.log(' atris experiments init self-heal'); | ||
| console.log(' atris experiments validate'); | ||
| console.log(' atris experiments benchmark runtime'); | ||
| console.log(''); | ||
| } | ||
| } | ||
| module.exports = { | ||
| experimentsCommand, | ||
| ensureExperimentsFramework, | ||
| }; |
+311
| const fs = require('fs'); | ||
| const path = require('path'); | ||
| const { loadCredentials } = require('../utils/auth'); | ||
| const { apiRequestJson } = require('../utils/api'); | ||
| const { findAllMembers } = require('./member'); | ||
| const { loadConfig } = require('../utils/config'); | ||
| const { getLogPath } = require('../lib/file-ops'); | ||
| const { parseJournalSections, mergeSections, reconstructJournal } = require('../lib/journal'); | ||
| const { loadBusinesses } = require('./business'); | ||
| async function pullAtris() { | ||
| const arg = process.argv[3]; | ||
| // If a business name is given, do a business pull | ||
| if (arg && arg !== '--help') { | ||
| return pullBusiness(arg); | ||
| } | ||
| // Otherwise, do the existing journal pull | ||
| const targetDir = path.join(process.cwd(), 'atris'); | ||
| if (!fs.existsSync(targetDir)) { | ||
| console.error('atris/ folder not found. Run "atris init" first.'); | ||
| process.exit(1); | ||
| } | ||
| const creds = loadCredentials(); | ||
| if (!creds || !creds.token) { | ||
| console.error('Not logged in. Run: atris login'); | ||
| process.exit(1); | ||
| } | ||
| console.log(''); | ||
| console.log('Pulling from cloud...'); | ||
| console.log(''); | ||
| let totalSynced = 0; | ||
| // --- 1. General journal sync --- | ||
| const config = loadConfig(); | ||
| if (config.agent_id) { | ||
| const journalSynced = await pullGeneralJournal(creds.token, config.agent_id); | ||
| totalSynced += journalSynced; | ||
| } else { | ||
| console.log(' Skip general journal (no agent selected, run "atris agent")'); | ||
| } | ||
| // --- 2. Member journal sync --- | ||
| const teamDir = path.join(targetDir, 'team'); | ||
| const members = findAllMembers(teamDir); | ||
| const membersWithAgents = members.filter(m => m.frontmatter && m.frontmatter['agent-id']); | ||
| if (membersWithAgents.length === 0) { | ||
| console.log(' No members with cloud agents (run "atris member push <name>")'); | ||
| } else { | ||
| for (const member of membersWithAgents) { | ||
| const agentId = member.frontmatter['agent-id']; | ||
| const synced = await pullMemberJournal(creds.token, agentId, member.name, member.dir); | ||
| totalSynced += synced; | ||
| } | ||
| } | ||
| // --- Summary --- | ||
| console.log(''); | ||
| if (totalSynced > 0) { | ||
| console.log(`Done. ${totalSynced} file${totalSynced > 1 ? 's' : ''} synced.`); | ||
| } else { | ||
| console.log('Everything up to date.'); | ||
| } | ||
| } | ||
| async function pullBusiness(slug) { | ||
| const creds = loadCredentials(); | ||
| if (!creds || !creds.token) { | ||
| console.error('Not logged in. Run: atris login'); | ||
| process.exit(1); | ||
| } | ||
| // Determine output directory | ||
| const intoIdx = process.argv.indexOf('--into'); | ||
| let outputDir; | ||
| if (intoIdx !== -1 && process.argv[intoIdx + 1]) { | ||
| outputDir = path.resolve(process.argv[intoIdx + 1]); | ||
| } else { | ||
| // Default: atris/{slug}/ in current directory, or just {slug}/ if no atris/ folder | ||
| const atrisDir = path.join(process.cwd(), 'atris'); | ||
| if (fs.existsSync(atrisDir)) { | ||
| outputDir = path.join(atrisDir, slug); | ||
| } else { | ||
| outputDir = path.join(process.cwd(), slug); | ||
| } | ||
| } | ||
| // Resolve business ID — check local config first, then API | ||
| let businessId, workspaceId, businessName; | ||
| const businesses = loadBusinesses(); | ||
| if (businesses[slug]) { | ||
| businessId = businesses[slug].business_id; | ||
| workspaceId = businesses[slug].workspace_id; | ||
| businessName = businesses[slug].name || slug; | ||
| } else { | ||
| // Try to find by slug via API | ||
| const listResult = await apiRequestJson('/businesses/', { method: 'GET', token: creds.token }); | ||
| if (!listResult.ok) { | ||
| console.error(`Failed to fetch businesses: ${listResult.errorMessage || listResult.status}`); | ||
| process.exit(1); | ||
| } | ||
| const match = (listResult.data || []).find( | ||
| b => b.slug === slug || b.name.toLowerCase() === slug.toLowerCase() | ||
| ); | ||
| if (!match) { | ||
| console.error(`Business "${slug}" not found.`); | ||
| process.exit(1); | ||
| } | ||
| businessId = match.id; | ||
| workspaceId = match.workspace_id; | ||
| businessName = match.name; | ||
| // Auto-save for next time | ||
| businesses[slug] = { | ||
| business_id: businessId, | ||
| workspace_id: workspaceId, | ||
| name: businessName, | ||
| slug: match.slug, | ||
| added_at: new Date().toISOString(), | ||
| }; | ||
| const { saveBusinesses } = require('./business'); | ||
| saveBusinesses(businesses); | ||
| } | ||
| if (!workspaceId) { | ||
| console.error(`Business "${slug}" has no workspace. Set one up first.`); | ||
| process.exit(1); | ||
| } | ||
| console.log(''); | ||
| console.log(`Pulling ${businessName}...`); | ||
| // Snapshot — one API call gets everything | ||
| const result = await apiRequestJson( | ||
| `/businesses/${businessId}/workspaces/${workspaceId}/snapshot?include_content=true`, | ||
| { method: 'GET', token: creds.token } | ||
| ); | ||
| if (!result.ok) { | ||
| const msg = result.errorMessage || `HTTP ${result.status}`; | ||
| if (result.status === 409) { | ||
| console.error(`\nComputer is sleeping. Wake it first, then pull again.`); | ||
| } else if (result.status === 403) { | ||
| console.error(`\nAccess denied. You're not a member of "${slug}".`); | ||
| } else if (result.status === 404) { | ||
| console.error(`\nBusiness "${slug}" not found.`); | ||
| } else { | ||
| console.error(`\nPull failed: ${msg}`); | ||
| } | ||
| process.exit(1); | ||
| } | ||
| const files = result.data.files || []; | ||
| if (files.length === 0) { | ||
| console.log(' Workspace is empty.'); | ||
| return; | ||
| } | ||
| // Write files to local directory | ||
| let written = 0; | ||
| let skipped = 0; | ||
| for (const file of files) { | ||
| if (!file.path || file.content === null || file.content === undefined) { | ||
| skipped++; | ||
| continue; | ||
| } | ||
| if (file.binary) { | ||
| skipped++; | ||
| continue; | ||
| } | ||
| const localPath = path.join(outputDir, file.path.replace(/^\//, '')); | ||
| const localDir = path.dirname(localPath); | ||
| // Check if unchanged | ||
| if (fs.existsSync(localPath)) { | ||
| const existing = fs.readFileSync(localPath, 'utf8'); | ||
| if (existing === file.content) { | ||
| skipped++; | ||
| continue; | ||
| } | ||
| } | ||
| fs.mkdirSync(localDir, { recursive: true }); | ||
| fs.writeFileSync(localPath, file.content); | ||
| written++; | ||
| } | ||
| console.log(''); | ||
| if (written > 0) { | ||
| console.log(` ${written} file${written > 1 ? 's' : ''} pulled to ${outputDir}`); | ||
| } | ||
| if (skipped > 0) { | ||
| console.log(` ${skipped} unchanged`); | ||
| } | ||
| console.log(`\n Total: ${files.length} files (${result.data.total_size} bytes)`); | ||
| } | ||
| async function pullGeneralJournal(token, agentId) { | ||
| // Pull today's journal and recent days | ||
| const today = new Date(); | ||
| const dates = []; | ||
| for (let i = 0; i < 3; i++) { | ||
| const d = new Date(today); | ||
| d.setDate(d.getDate() - i); | ||
| dates.push(d.toISOString().split('T')[0]); | ||
| } | ||
| let synced = 0; | ||
| for (const date of dates) { | ||
| const result = await apiRequestJson(`/agents/${agentId}/journal/${date}`, { | ||
| method: 'GET', | ||
| token, | ||
| }); | ||
| if (!result.ok || !result.data || !result.data.content) continue; | ||
| const remoteContent = result.data.content; | ||
| const { logFile, yearDir } = getLogPath(date); | ||
| if (!fs.existsSync(yearDir)) { | ||
| fs.mkdirSync(yearDir, { recursive: true }); | ||
| } | ||
| const localContent = fs.existsSync(logFile) ? fs.readFileSync(logFile, 'utf8') : ''; | ||
| if (localContent.trim() === remoteContent.trim()) continue; | ||
| if (!localContent || localContent.trim() === '') { | ||
| // No local — just write remote | ||
| fs.writeFileSync(logFile, remoteContent); | ||
| console.log(` Journal ${date} pulled`); | ||
| synced++; | ||
| } else { | ||
| // Both exist and differ — merge | ||
| try { | ||
| const localSections = parseJournalSections(localContent); | ||
| const remoteSections = parseJournalSections(remoteContent); | ||
| const { merged, conflicts } = mergeSections(localSections, remoteSections); | ||
| if (conflicts.length === 0) { | ||
| const mergedContent = reconstructJournal(merged); | ||
| fs.writeFileSync(logFile, mergedContent); | ||
| console.log(` Journal ${date} merged`); | ||
| synced++; | ||
| } else { | ||
| // Conflicts — keep local, warn | ||
| console.log(` Journal ${date} has conflicts (kept local, run "atris log sync" to resolve)`); | ||
| } | ||
| } catch { | ||
| console.log(` Journal ${date} differs (run "atris log sync" to resolve)`); | ||
| } | ||
| } | ||
| } | ||
| if (synced === 0) { | ||
| console.log(' General journal: up to date'); | ||
| } | ||
| return synced; | ||
| } | ||
| async function pullMemberJournal(token, agentId, memberName, memberDir) { | ||
| const result = await apiRequestJson(`/agent/${agentId}/export-journal`, { | ||
| method: 'GET', | ||
| token, | ||
| }); | ||
| if (!result.ok || !result.data || !result.data.files) { | ||
| console.log(` ${memberName}: no journal entries`); | ||
| return 0; | ||
| } | ||
| const files = result.data.files; | ||
| let synced = 0; | ||
| for (const file of files) { | ||
| if (!file.path || !file.content) continue; | ||
| const localPath = path.resolve(memberDir, file.path); | ||
| if (!localPath.startsWith(path.resolve(memberDir))) continue; | ||
| const localContent = fs.existsSync(localPath) ? fs.readFileSync(localPath, 'utf8') : ''; | ||
| if (localContent.trim() === file.content.trim()) continue; | ||
| fs.mkdirSync(path.dirname(localPath), { recursive: true }); | ||
| fs.writeFileSync(localPath, file.content); | ||
| synced++; | ||
| } | ||
| if (synced > 0) { | ||
| console.log(` ${memberName}: ${synced} journal ${synced === 1 ? 'entry' : 'entries'} pulled`); | ||
| } else { | ||
| console.log(` ${memberName}: up to date`); | ||
| } | ||
| return synced; | ||
| } | ||
| module.exports = { pullAtris }; |
+170
| const fs = require('fs'); | ||
| const path = require('path'); | ||
| const { loadCredentials } = require('../utils/auth'); | ||
| const { apiRequestJson } = require('../utils/api'); | ||
| const { loadBusinesses, saveBusinesses } = require('./business'); | ||
| async function pushAtris() { | ||
| const slug = process.argv[3]; | ||
| if (!slug || slug === '--help') { | ||
| console.log('Usage: atris push <business-slug> [--from <path>]'); | ||
| console.log(''); | ||
| console.log('Push local files to a Business Computer.'); | ||
| console.log(''); | ||
| console.log('Examples:'); | ||
| console.log(' atris push pallet Push from atris/pallet/ or ./pallet/'); | ||
| console.log(' atris push pallet --from ./my-dir/ Push from a custom directory'); | ||
| process.exit(0); | ||
| } | ||
| const creds = loadCredentials(); | ||
| if (!creds || !creds.token) { | ||
| console.error('Not logged in. Run: atris login'); | ||
| process.exit(1); | ||
| } | ||
| // Determine source directory | ||
| const fromIdx = process.argv.indexOf('--from'); | ||
| let sourceDir; | ||
| if (fromIdx !== -1 && process.argv[fromIdx + 1]) { | ||
| sourceDir = path.resolve(process.argv[fromIdx + 1]); | ||
| } else { | ||
| const atrisDir = path.join(process.cwd(), 'atris', slug); | ||
| const cwdDir = path.join(process.cwd(), slug); | ||
| if (fs.existsSync(atrisDir)) { | ||
| sourceDir = atrisDir; | ||
| } else if (fs.existsSync(cwdDir)) { | ||
| sourceDir = cwdDir; | ||
| } else { | ||
| console.error(`No local folder found for "${slug}".`); | ||
| console.error(`Expected: atris/${slug}/ or ./${slug}/`); | ||
| console.error('Or specify: atris push pallet --from ./path/to/folder'); | ||
| process.exit(1); | ||
| } | ||
| } | ||
| if (!fs.existsSync(sourceDir)) { | ||
| console.error(`Source directory not found: ${sourceDir}`); | ||
| process.exit(1); | ||
| } | ||
| // Resolve business ID | ||
| let businessId, workspaceId, businessName; | ||
| const businesses = loadBusinesses(); | ||
| if (businesses[slug]) { | ||
| businessId = businesses[slug].business_id; | ||
| workspaceId = businesses[slug].workspace_id; | ||
| businessName = businesses[slug].name || slug; | ||
| } else { | ||
| // Try to find by slug via API | ||
| const listResult = await apiRequestJson('/businesses/', { method: 'GET', token: creds.token }); | ||
| if (!listResult.ok) { | ||
| console.error(`Failed to fetch businesses: ${listResult.errorMessage || listResult.status}`); | ||
| process.exit(1); | ||
| } | ||
| const match = (listResult.data || []).find( | ||
| b => b.slug === slug || b.name.toLowerCase() === slug.toLowerCase() | ||
| ); | ||
| if (!match) { | ||
| console.error(`Business "${slug}" not found.`); | ||
| process.exit(1); | ||
| } | ||
| businessId = match.id; | ||
| workspaceId = match.workspace_id; | ||
| businessName = match.name; | ||
| // Auto-save | ||
| businesses[slug] = { | ||
| business_id: businessId, | ||
| workspace_id: workspaceId, | ||
| name: businessName, | ||
| slug: match.slug, | ||
| added_at: new Date().toISOString(), | ||
| }; | ||
| saveBusinesses(businesses); | ||
| } | ||
| if (!workspaceId) { | ||
| console.error(`Business "${slug}" has no workspace.`); | ||
| process.exit(1); | ||
| } | ||
| // Walk local directory and collect files | ||
| const files = []; | ||
| const SKIP_DIRS = new Set(['node_modules', '__pycache__', '.git', 'venv', '.venv', 'lost+found', '.cache']); | ||
| function walkDir(dir) { | ||
| const entries = fs.readdirSync(dir, { withFileTypes: true }); | ||
| for (const entry of entries) { | ||
| if (entry.name.startsWith('.')) continue; | ||
| const fullPath = path.join(dir, entry.name); | ||
| if (entry.isDirectory()) { | ||
| if (SKIP_DIRS.has(entry.name)) continue; | ||
| walkDir(fullPath); | ||
| } else if (entry.isFile()) { | ||
| const relPath = '/' + path.relative(sourceDir, fullPath); | ||
| try { | ||
| const content = fs.readFileSync(fullPath, 'utf8'); | ||
| files.push({ path: relPath, content }); | ||
| } catch { | ||
| // Skip binary files | ||
| } | ||
| } | ||
| } | ||
| } | ||
| walkDir(sourceDir); | ||
| if (files.length === 0) { | ||
| console.log(`\nNo files to push from ${sourceDir}`); | ||
| return; | ||
| } | ||
| console.log(''); | ||
| console.log(`Pushing ${files.length} files to ${businessName}...`); | ||
| // Sync — one API call pushes everything | ||
| const result = await apiRequestJson( | ||
| `/businesses/${businessId}/workspaces/${workspaceId}/sync`, | ||
| { | ||
| method: 'POST', | ||
| token: creds.token, | ||
| body: { files }, | ||
| } | ||
| ); | ||
| if (!result.ok) { | ||
| const msg = result.errorMessage || `HTTP ${result.status}`; | ||
| if (result.status === 409) { | ||
| console.error(`\nComputer is sleeping. Wake it first, then push.`); | ||
| } else if (result.status === 403) { | ||
| console.error(`\nAccess denied: ${msg}`); | ||
| } else { | ||
| console.error(`\nPush failed: ${msg}`); | ||
| } | ||
| process.exit(1); | ||
| } | ||
| const data = result.data; | ||
| console.log(''); | ||
| if (data.written > 0) { | ||
| console.log(` ${data.written} file${data.written > 1 ? 's' : ''} written`); | ||
| } | ||
| if (data.unchanged > 0) { | ||
| console.log(` ${data.unchanged} unchanged`); | ||
| } | ||
| if (data.errors > 0) { | ||
| console.log(` ${data.errors} error${data.errors > 1 ? 's' : ''}`); | ||
| for (const r of (data.results || [])) { | ||
| if (r.status === 'error') { | ||
| console.log(` ${r.path}: ${r.error}`); | ||
| } | ||
| } | ||
| } | ||
| console.log(`\n Synced to ${businessName}.`); | ||
| } | ||
| module.exports = { pushAtris }; |
+366
| /** | ||
| * Atris Run — Auto-chain plan → do → review cycles | ||
| * | ||
| * The ignition switch. Reads inbox/backlog, loops autonomously | ||
| * until work is done or max cycles reached. | ||
| * | ||
| * Uses claude -p (subprocess) — no auth required. | ||
| */ | ||
| const fs = require('fs'); | ||
| const path = require('path'); | ||
| const { execSync } = require('child_process'); | ||
| const { getLogPath, ensureLogDirectory, createLogFile } = require('../lib/journal'); | ||
| const { parseTodo } = require('../lib/todo'); | ||
| const { cleanAtris } = require('./clean'); | ||
| const pkg = require('../package.json'); | ||
| const DEFAULT_MAX_CYCLES = 5; | ||
| const PHASE_TIMEOUT = 600000; // 10 min per phase | ||
| /** | ||
| * Build prompt for each phase with full context | ||
| */ | ||
| function buildRunPrompt(phase, context) { | ||
| const { mapPath, todoPath, personaPath, lessonsPath, journalPath } = context; | ||
| const readFiles = [ | ||
| personaPath && `- ${personaPath}`, | ||
| mapPath && `- ${mapPath}`, | ||
| todoPath && `- ${todoPath}`, | ||
| lessonsPath && `- ${lessonsPath}`, | ||
| journalPath && `- ${journalPath}`, | ||
| ].filter(Boolean).join('\n'); | ||
| if (phase === 'plan') { | ||
| return `You are the Navigator agent. Your job is to plan work from the inbox. | ||
| Read these files first: | ||
| ${readFiles} | ||
| Workflow: | ||
| 1. Read the journal's ## Inbox section for ideas/tasks | ||
| 2. Read MAP.md for codebase navigation (file:line references) | ||
| 3. Read lessons.md for past learnings (if it exists) | ||
| 4. For each inbox item, create a task in TODO.md under ## Backlog | ||
| Format: - **T#:** Description [execute] | ||
| 5. Keep tasks small and specific (one function, one file, one fix) | ||
| 6. Do NOT write code. Planning only. | ||
| If inbox is empty but TODO.md has backlog tasks, skip planning — tasks already exist. | ||
| If both inbox and backlog are empty, reply: [NOTHING_TO_DO] | ||
| Reply [PLAN_COMPLETE] when done.`; | ||
| } | ||
| if (phase === 'do') { | ||
| return `You are the Executor agent. Your job is to build tasks from the backlog. | ||
| Read these files first: | ||
| ${readFiles} | ||
| Workflow: | ||
| 1. Read TODO.md — pick the first task from ## Backlog | ||
| 2. Move it to ## In Progress with: **Claimed by:** Executor at ${new Date().toISOString()} | ||
| 3. Read MAP.md to find exact file:line locations | ||
| 4. Implement the task step by step | ||
| 5. After implementation, verify the changes work | ||
| 6. Update MAP.md if you changed function locations or added new functions | ||
| 7. Commit changes: git add <specific-files> && git commit -m "feat: <description>" | ||
| Do NOT skip steps. Verify before marking complete. | ||
| Reply [DO_COMPLETE] when the task is built and committed.`; | ||
| } | ||
| if (phase === 'review') { | ||
| return `You are the Validator agent. Your job is to verify work quality. | ||
| Read these files first: | ||
| ${readFiles} | ||
| Workflow: | ||
| 1. Read TODO.md — find the task in ## In Progress | ||
| 2. Review the implementation: | ||
| - Does it actually work? Test it if possible. | ||
| - Does it follow existing patterns? (check MAP.md) | ||
| - Any bugs, edge cases, or security issues? | ||
| 3. If tests exist, run them | ||
| 4. If issues found: fix them, then continue | ||
| 5. When satisfied: | ||
| a. Delete the task from TODO.md (target state = 0) | ||
| b. Move the inbox item to ## Completed in today's journal | ||
| Format: - **C#:** Description [reviewed] | ||
| c. If you learned something, append to lessons.md | ||
| 6. Run: atris clean --dry-run (to check MAP.md refs) | ||
| Reply [REVIEW_COMPLETE] when validation passes. | ||
| Reply [REVIEW_FAILED] reason if something is broken.`; | ||
| } | ||
| return ''; | ||
| } | ||
| /** | ||
| * Execute a phase using claude -p | ||
| */ | ||
| function executePhase(phase, context, options = {}) { | ||
| const { verbose = false, timeout = PHASE_TIMEOUT } = options; | ||
| const prompt = buildRunPrompt(phase, context); | ||
| const tmpFile = path.join(process.cwd(), '.run-prompt.tmp'); | ||
| fs.writeFileSync(tmpFile, prompt); | ||
| try { | ||
| const cmd = `claude -p "$(cat '${tmpFile.replace(/'/g, "'\\''")}')" --allowedTools "Bash,Read,Write,Edit,Glob,Grep"`; | ||
| // Strip CLAUDECODE env var to allow spawning from within a Claude Code session | ||
| const env = { ...process.env }; | ||
| delete env.CLAUDECODE; | ||
| const output = execSync(cmd, { | ||
| cwd: process.cwd(), | ||
| encoding: 'utf8', | ||
| timeout, | ||
| stdio: verbose ? 'inherit' : 'pipe', | ||
| maxBuffer: 10 * 1024 * 1024, | ||
| env | ||
| }); | ||
| try { fs.unlinkSync(tmpFile); } catch {} | ||
| return output || ''; | ||
| } catch (err) { | ||
| try { fs.unlinkSync(tmpFile); } catch {} | ||
| if (err.killed) { | ||
| throw new Error(`${phase} timed out after ${timeout / 1000}s`); | ||
| } | ||
| // execSync throws on non-zero exit but may still have output | ||
| if (err.stdout) return err.stdout; | ||
| throw err; | ||
| } | ||
| } | ||
| /** | ||
| * Check if there's work to do (inbox items or backlog tasks) | ||
| */ | ||
| function hasWork(atrisDir) { | ||
| // Check backlog tasks | ||
| const todoPath = path.join(atrisDir, 'TODO.md'); | ||
| const todo = parseTodo(todoPath); | ||
| if (todo.backlog.length > 0 || todo.inProgress.length > 0) return true; | ||
| // Check inbox | ||
| const { logFile } = getLogPath(); | ||
| if (fs.existsSync(logFile)) { | ||
| const content = fs.readFileSync(logFile, 'utf8'); | ||
| const inboxMatch = content.match(/## Inbox\n([\s\S]*?)(?=\n##|$)/); | ||
| if (inboxMatch && inboxMatch[1].trim()) { | ||
| const items = inboxMatch[1].trim().split('\n').filter(l => l.trim().startsWith('-')); | ||
| if (items.length > 0) return true; | ||
| } | ||
| } | ||
| return false; | ||
| } | ||
| /** | ||
| * Log completion to journal | ||
| */ | ||
| function logRunCompletion(cycles, startTime, cycleTimings = []) { | ||
| ensureLogDirectory(); | ||
| const { logFile, dateFormatted } = getLogPath(); | ||
| if (!fs.existsSync(logFile)) { | ||
| createLogFile(logFile, dateFormatted); | ||
| } | ||
| let content = fs.readFileSync(logFile, 'utf8'); | ||
| const duration = Math.round((Date.now() - startTime) / 1000); | ||
| let timingLines = ''; | ||
| if (cycleTimings.length > 0) { | ||
| timingLines = cycleTimings.map((t, i) => | ||
| `- Cycle ${i + 1}: plan ${Math.round(t.plan / 1000)}s, do ${Math.round(t.do / 1000)}s, review ${Math.round(t.review / 1000)}s` | ||
| ).join('\n'); | ||
| timingLines = '\n' + timingLines; | ||
| } | ||
| const entry = `\n### Atris Run — ${new Date().toLocaleTimeString()}\n- Cycles: ${cycles}\n- Duration: ${duration}s${timingLines}\n`; | ||
| if (content.includes('## Notes')) { | ||
| content = content.replace(/(## Notes[^\n]*\n)/, `$1${entry}\n`); | ||
| } else { | ||
| content += `\n## Notes\n${entry}\n`; | ||
| } | ||
| fs.writeFileSync(logFile, content); | ||
| } | ||
| /** | ||
| * Main run function — the ignition switch | ||
| */ | ||
| async function runAtris(options = {}) { | ||
| const { | ||
| maxCycles = DEFAULT_MAX_CYCLES, | ||
| verbose = false, | ||
| dryRun = false, | ||
| once = false, | ||
| push = true, | ||
| timeout = PHASE_TIMEOUT | ||
| } = options; | ||
| const cycles = once ? 1 : maxCycles; | ||
| const atrisDir = path.join(process.cwd(), 'atris'); | ||
| if (!fs.existsSync(atrisDir)) { | ||
| console.error('atris/ folder not found. Run "atris init" first.'); | ||
| process.exit(1); | ||
| } | ||
| // Check claude CLI is available | ||
| try { | ||
| execSync('which claude', { stdio: 'pipe' }); | ||
| } catch { | ||
| console.error('claude CLI not found. Install Claude Code first.'); | ||
| process.exit(1); | ||
| } | ||
| console.log(''); | ||
| console.log('┌─────────────────────────────────────────────────────────────┐'); | ||
| console.log(`│ Atris Run v${pkg.version} — autonomous plan → do → review │`); | ||
| console.log('└─────────────────────────────────────────────────────────────┘'); | ||
| console.log(''); | ||
| console.log(`Max cycles: ${cycles}`); | ||
| console.log(`Phase timeout: ${timeout / 1000}s`); | ||
| console.log(`Verbose: ${verbose}`); | ||
| console.log(''); | ||
| // Build context paths | ||
| const context = { | ||
| mapPath: fs.existsSync(path.join(atrisDir, 'MAP.md')) ? 'atris/MAP.md' : null, | ||
| todoPath: fs.existsSync(path.join(atrisDir, 'TODO.md')) ? 'atris/TODO.md' : null, | ||
| personaPath: fs.existsSync(path.join(atrisDir, 'PERSONA.md')) ? 'atris/PERSONA.md' : null, | ||
| lessonsPath: fs.existsSync(path.join(atrisDir, 'lessons.md')) ? 'atris/lessons.md' : null, | ||
| journalPath: (() => { const { logFile } = getLogPath(); return fs.existsSync(logFile) ? path.relative(process.cwd(), logFile) : null; })(), | ||
| }; | ||
| if (dryRun) { | ||
| console.log('[DRY RUN] Would execute:'); | ||
| console.log(` ${cycles} cycles of plan → do → review`); | ||
| console.log(' Context:', JSON.stringify(context, null, 2)); | ||
| return; | ||
| } | ||
| const startTime = Date.now(); | ||
| const cycleTimings = []; | ||
| let completedCycles = 0; | ||
| for (let cycle = 1; cycle <= cycles; cycle++) { | ||
| console.log(`\n${'━'.repeat(60)}`); | ||
| console.log(`CYCLE ${cycle}/${cycles}`); | ||
| console.log(`${'━'.repeat(60)}`); | ||
| // Check if there's work | ||
| if (!hasWork(atrisDir)) { | ||
| console.log('\nInbox empty. Backlog empty. Nothing to do.'); | ||
| break; | ||
| } | ||
| const timing = { plan: 0, do: 0, review: 0 }; | ||
| try { | ||
| // PLAN | ||
| console.log('\n[1/3] PLAN — reading inbox, creating tasks...'); | ||
| let phaseStart = Date.now(); | ||
| const planOutput = executePhase('plan', context, { verbose, timeout }); | ||
| timing.plan = Date.now() - phaseStart; | ||
| if (planOutput.includes('[NOTHING_TO_DO]')) { | ||
| console.log('Nothing to do. Stopping.'); | ||
| break; | ||
| } | ||
| console.log(`✓ Plan complete (${Math.round(timing.plan / 1000)}s)`); | ||
| // Check if plan created tasks | ||
| if (!hasWork(atrisDir)) { | ||
| console.log('No tasks created. Stopping.'); | ||
| break; | ||
| } | ||
| // DO | ||
| console.log('\n[2/3] DO — building task...'); | ||
| phaseStart = Date.now(); | ||
| executePhase('do', context, { verbose, timeout }); | ||
| timing.do = Date.now() - phaseStart; | ||
| console.log(`✓ Build complete (${Math.round(timing.do / 1000)}s)`); | ||
| // REVIEW | ||
| console.log('\n[3/3] REVIEW — validating...'); | ||
| phaseStart = Date.now(); | ||
| const reviewOutput = executePhase('review', context, { verbose, timeout }); | ||
| timing.review = Date.now() - phaseStart; | ||
| if (reviewOutput.includes('[REVIEW_FAILED]')) { | ||
| console.log('⚠ Review found issues. Stopping for manual check.'); | ||
| cycleTimings.push(timing); | ||
| completedCycles++; | ||
| break; | ||
| } | ||
| console.log(`✓ Review complete (${Math.round(timing.review / 1000)}s)`); | ||
| cycleTimings.push(timing); | ||
| completedCycles++; | ||
| // Self-heal MAP.md refs after each cycle | ||
| console.log('\n[+] CLEAN — healing MAP.md refs...'); | ||
| try { | ||
| cleanAtris({ dryRun: false }); | ||
| } catch (cleanErr) { | ||
| console.log(`⚠ Clean failed: ${cleanErr.message}`); | ||
| } | ||
| // Auto-push if not disabled | ||
| if (push) { | ||
| console.log('\n[+] PUSH — pushing to remote...'); | ||
| try { | ||
| execSync('git push', { cwd: process.cwd(), encoding: 'utf8', stdio: 'pipe' }); | ||
| console.log('✓ Pushed to remote'); | ||
| } catch (pushErr) { | ||
| console.log(`⚠ Push failed: ${pushErr.message.split('\n')[0]}`); | ||
| } | ||
| } | ||
| console.log(`\n✓ Cycle ${cycle} done`); | ||
| } catch (err) { | ||
| console.error(`\n✗ Cycle ${cycle} failed: ${err.message}`); | ||
| break; | ||
| } | ||
| } | ||
| const elapsed = Math.round((Date.now() - startTime) / 1000); | ||
| // Log to journal | ||
| logRunCompletion(completedCycles, startTime, cycleTimings); | ||
| console.log(''); | ||
| console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); | ||
| console.log(`Run complete. ${elapsed}s elapsed.`); | ||
| // Print phase duration summary table | ||
| if (cycleTimings.length > 0) { | ||
| console.log(''); | ||
| console.log(' Cycle │ Plan │ Do │ Review'); | ||
| console.log(' ───────┼─────────┼─────────┼────────'); | ||
| cycleTimings.forEach((t, i) => { | ||
| const p = `${Math.round(t.plan / 1000)}s`.padStart(5); | ||
| const d = `${Math.round(t.do / 1000)}s`.padStart(5); | ||
| const r = `${Math.round(t.review / 1000)}s`.padStart(5); | ||
| console.log(` ${String(i + 1).padStart(2)} │ ${p} │ ${d} │ ${r}`); | ||
| }); | ||
| } | ||
| console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); | ||
| console.log(''); | ||
| } | ||
| module.exports = { runAtris }; |
@@ -55,2 +55,9 @@ # atris-design.md — frontend aesthetics | ||
| **specific anti-patterns:** | ||
| - cursor-following lines or elements | ||
| - meteor/particle effects shooting across screen | ||
| - buttons that follow the cursor (harder to click, not clever) | ||
| - FAQ/content that breaks if you scroll past before the fade-in finishes | ||
| - animations that swap styles endlessly without purpose (rotating shapes, morphing buttons) | ||
| **the move:** one well-timed animation beats ten scattered ones. page load with staggered reveals (animation-delay) creates more impact than hover effects on every button. | ||
@@ -62,2 +69,26 @@ | ||
| ## hover states | ||
| **avoid:** | ||
| - elements that fade out or disappear on hover | ||
| - nav items that shift position or slide horizontally on hover | ||
| - arrows/icons that move backwards or vertically on hover | ||
| - hiding critical info or functionality behind hover (hover doesn't exist on mobile) | ||
| **the move:** hover should make elements feel "lickable" — inviting to click. slightly brighten, scale up (1.02-1.05), or add a subtle glow. the user should feel pulled toward clicking, not confused about what happened. | ||
| test every hover on mobile. if something only works on hover, it's broken for half your users. | ||
| --- | ||
| ## scroll behavior | ||
| **avoid:** scrolljacking — never override native browser scroll with custom scroll logic. it feels like "moving through molasses" and users hate it. | ||
| **the move:** let the browser handle scrolling. if you want scroll-triggered effects, use intersection observer to trigger animations as sections enter the viewport — but don't mess with scroll speed or direction. | ||
| use the "peeking" technique: let a few pixels of the next section peek above the fold instead of full-screen heroes with "scroll down" arrows. this naturally signals more content below. | ||
| --- | ||
| ## backgrounds | ||
@@ -87,2 +118,33 @@ | ||
| ## information hierarchy | ||
| **avoid:** mixing 4-5 competing text styles on one page. labels, headers, subheaders, badges, and body text all fighting for attention. | ||
| **the move:** pick 2-3 levels max. one dominant style, one supporting, one accent. if you add a new style, ask: does this earn its place or is it clutter? | ||
| --- | ||
| ## hero section (the H1 test) | ||
| your hero must answer four questions in seconds: | ||
| 1. **what is it?** — clear product description | ||
| 2. **who is it for?** — the target user | ||
| 3. **to what end?** — why should they care | ||
| 4. **what's the CTA?** — one clear next step | ||
| if a stranger can't answer all four in 5 seconds of looking at your hero, rewrite it. | ||
| --- | ||
| ## assets | ||
| **avoid:** | ||
| - blurry or low-res screenshots | ||
| - "fake dashboard" mockups with Fisher-Price primary colors (red/yellow/green/blue) | ||
| - non-system emojis used as decoration (lazy AI tell) | ||
| **the move:** real product screenshots at high resolution. if you don't have a product yet, use a well-designed mockup — but make it sharp and believable. | ||
| --- | ||
| ## before shipping | ||
@@ -94,2 +156,6 @@ | ||
| - does the background have depth? | ||
| - do hover states feel inviting, not confusing? | ||
| - does scrolling feel native? | ||
| - does the hero pass the H1 test (what/who/why/CTA)? | ||
| - are all screenshots/assets crisp? | ||
| - would a designer immediately clock this as ai-generated? | ||
@@ -96,0 +162,0 @@ |
@@ -94,5 +94,8 @@ --- | ||
| **macOS/Linux:** | ||
| ```bash | ||
| mkdir -p ~/.atris/secrets/SLUG | ||
| ``` | ||
| For each key: | ||
| ```bash | ||
| read -s -p "Enter KEY_NAME: " secret_val | ||
@@ -105,12 +108,6 @@ printf '%s' "$secret_val" > ~/.atris/secrets/SLUG/KEY_NAME | ||
| **Windows (PowerShell):** | ||
| ```powershell | ||
| $dir = "$env:USERPROFILE\.atris\secrets\SLUG" | ||
| New-Item -ItemType Directory -Force -Path $dir | Out-Null | ||
| $val = Read-Host -Prompt "Enter KEY_NAME" -AsSecureString | ||
| $plain = [Runtime.InteropServices.Marshal]::PtrToStringAuto([Runtime.InteropServices.Marshal]::SecureStringToBSTR($val)) | ||
| Set-Content -Path "$dir\KEY_NAME" -Value $plain -NoNewline | ||
| icacls "$dir\KEY_NAME" /inheritance:r /grant:r "${env:USERNAME}:(R,W)" | Out-Null | ||
| Remove-Variable plain, val | ||
| Write-Host "Saved locally." | ||
| Register the key in the web UI (manifest only — no value sent): | ||
| ```bash | ||
| curl -s -X POST "https://api.atris.ai/api/apps/SLUG/secrets/KEY_NAME/register-local" \ | ||
| -H "Authorization: Bearer $TOKEN" | ||
| ``` | ||
@@ -120,4 +117,3 @@ | ||
| ```bash | ||
| ls ~/.atris/secrets/SLUG/ # macOS/Linux | ||
| dir $env:USERPROFILE\.atris\secrets\SLUG # Windows | ||
| ls ~/.atris/secrets/SLUG/ | ||
| ``` | ||
@@ -124,0 +120,0 @@ |
@@ -30,6 +30,16 @@ --- | ||
| **Motion:** one well-timed animation beats ten scattered ones. 200-300ms ease-out. | ||
| **Motion:** one well-timed animation beats ten scattered ones. 200-300ms ease-out. no cursor-following lines, no meteor effects, no buttons that chase the cursor. | ||
| **Hover:** make elements feel inviting on hover (brighten, subtle scale). never fade out, shift, or hide content behind hover. hover doesn't exist on mobile. | ||
| **Scroll:** never override native scroll. use "peeking" (show a few px of next section) instead of full-screen hero + scroll arrow. | ||
| **Hero (H1 test):** must answer in 5 seconds — what is it, who is it for, why care, what's the CTA. | ||
| **Assets:** high-res screenshots only. no fake dashboards with primary colors. no decorative non-system emojis. | ||
| **Backgrounds:** add depth. gradients, patterns, mesh effects. flat = boring. | ||
| **Hierarchy:** 2-3 text levels max. don't mix 5 competing styles. | ||
| ## Before Shipping Checklist | ||
@@ -42,2 +52,6 @@ | ||
| - background has depth? | ||
| - hover states feel inviting, not confusing? | ||
| - scrolling feels native? | ||
| - hero passes H1 test (what/who/why/CTA)? | ||
| - all assets crisp? | ||
| - would a designer clock this as ai-generated? | ||
@@ -44,0 +58,0 @@ |
@@ -30,2 +30,3 @@ # Atris Skills | ||
| | autopilot | PRD-driven autonomous execution | — | | ||
| | autoresearch | Bounded keep/revert experiment loops via `atris/experiments/` | — | | ||
| | backend | Backend architecture anti-patterns | `policies/atris-backend.md` | | ||
@@ -32,0 +33,0 @@ | design | Frontend aesthetics policy | `policies/atris-design.md` | |
+76
-5
@@ -167,2 +167,10 @@ #!/usr/bin/env node | ||
| function consoleCmd() { | ||
| const extractedCommand = path.join(__dirname, '..', 'commands', 'console.js'); | ||
| if (fs.existsSync(extractedCommand)) { | ||
| const loaded = require('../commands/console'); | ||
| if (loaded && typeof loaded.consoleCommand === 'function') { | ||
| return loaded.consoleCommand(); | ||
| } | ||
| } | ||
| const workspace = process.cwd(); | ||
@@ -215,2 +223,3 @@ const daemonScript = path.join(workspace, 'cli', 'atrisd.sh'); | ||
| console.log(' review - Validate work (tests, safety checks, docs)'); | ||
| console.log(' run - Auto-chain plan→do→review (autonomous loop, auto-pushes)'); | ||
| console.log(''); | ||
@@ -220,3 +229,3 @@ console.log('Context & tracking:'); | ||
| console.log(' activate - Load Atris context'); | ||
| console.log(' status - See active work and completions'); | ||
| console.log(' status - See active work and completions (--json for machine output)'); | ||
| console.log(' analytics - Show recent productivity from journals'); | ||
@@ -232,2 +241,7 @@ console.log(' search - Search journal history (atris search <keyword>)'); | ||
| console.log(''); | ||
| console.log('Experiments:'); | ||
| console.log(' experiments init [slug] - Prepare atris/experiments/ or scaffold a pack'); | ||
| console.log(' experiments validate - Validate experiment packs'); | ||
| console.log(' experiments benchmark [m] - Run validate/runtime experiment benchmarks'); | ||
| console.log(''); | ||
| console.log('Quick commands:'); | ||
@@ -237,2 +251,5 @@ console.log(' atris - Load context and start (natural language)'); | ||
| console.log(''); | ||
| console.log('Sync:'); | ||
| console.log(' pull - Pull journals + member data from cloud'); | ||
| console.log(''); | ||
| console.log('Cloud & agents:'); | ||
@@ -374,5 +391,5 @@ console.log(' console - Start/attach always-on coding console (tmux daemon)'); | ||
| // Check if this is a known command or natural language input | ||
| const knownCommands = ['init', 'log', 'status', 'analytics', 'visualize', 'brainstorm', 'autopilot', 'plan', 'do', 'review', | ||
| const knownCommands = ['init', 'log', 'status', 'analytics', 'visualize', 'brainstorm', 'autopilot', 'run', 'plan', 'do', 'review', | ||
| 'activate', 'agent', 'chat', 'console', 'login', 'logout', 'whoami', 'switch', 'accounts', 'update', 'upgrade', 'version', 'help', 'next', 'atris', | ||
| 'clean', 'verify', 'search', 'skill', 'member', 'plugin', 'sync', | ||
| 'clean', 'verify', 'search', 'skill', 'member', 'plugin', 'experiments', 'pull', 'sync', | ||
| 'gmail', 'calendar', 'twitter', 'slack', 'integrations']; | ||
@@ -724,2 +741,38 @@ | ||
| require('../commands/visualize').visualizeAtris(); | ||
| } else if (command === 'run') { | ||
| const args = process.argv.slice(3); | ||
| if (args.includes('--help') || args.includes('-h')) { | ||
| console.log(''); | ||
| console.log('Usage: atris run [options]'); | ||
| console.log(''); | ||
| console.log('Auto-chain plan → do → review cycles autonomously.'); | ||
| console.log('Reads inbox ideas, creates tasks, builds them, validates, repeats.'); | ||
| console.log(''); | ||
| console.log('Options:'); | ||
| console.log(' --cycles=N Max cycles (default: 5)'); | ||
| console.log(' --once Single plan→do→review cycle'); | ||
| console.log(' --verbose Show claude -p output'); | ||
| console.log(' --dry-run Preview without executing'); | ||
| console.log(' --timeout=N Phase timeout in seconds (default: 600)'); | ||
| console.log(' --push Auto-push after each cycle (default: true)'); | ||
| console.log(' --no-push Skip auto-push after each cycle'); | ||
| console.log(''); | ||
| process.exit(0); | ||
| } | ||
| const verbose = args.includes('--verbose') || args.includes('-v'); | ||
| const dryRun = args.includes('--dry-run'); | ||
| const once = args.includes('--once'); | ||
| const push = !args.includes('--no-push'); | ||
| const cyclesArg = args.find(a => a.startsWith('--cycles=')); | ||
| const maxCycles = cyclesArg ? parseInt(cyclesArg.split('=')[1]) : 5; | ||
| const timeoutArg = args.find(a => a.startsWith('--timeout=')); | ||
| const timeout = timeoutArg ? parseInt(timeoutArg.split('=')[1]) * 1000 : undefined; | ||
| require('../commands/run').runAtris({ maxCycles, verbose, dryRun, once, push, timeout }) | ||
| .then(() => process.exit(0)) | ||
| .catch((error) => { | ||
| console.error(`\u2717 Run failed: ${error.message || error}`); | ||
| process.exit(1); | ||
| }); | ||
| } else if (command === 'autopilot') { | ||
@@ -823,3 +876,4 @@ const args = process.argv.slice(3); | ||
| const isQuick = process.argv.includes('--quick') || process.argv.includes('-q'); | ||
| statusCmd(isQuick); | ||
| const isJson = process.argv.includes('--json'); | ||
| statusCmd(isQuick, isJson); | ||
| } else if (command === 'analytics') { | ||
@@ -877,2 +931,16 @@ require('../commands/analytics').analyticsAtris(); | ||
| require('../commands/member').memberCommand(subcommand, ...args); | ||
| } else if (command === 'pull') { | ||
| require('../commands/pull').pullAtris() | ||
| .then(() => process.exit(0)) | ||
| .catch((err) => { console.error(`\n✗ Error: ${err.message || err}`); process.exit(1); }); | ||
| } else if (command === 'push') { | ||
| require('../commands/push').pushAtris() | ||
| .then(() => process.exit(0)) | ||
| .catch((err) => { console.error(`\n✗ Error: ${err.message || err}`); process.exit(1); }); | ||
| } else if (command === 'business') { | ||
| const subcommand = process.argv[3]; | ||
| const args = process.argv.slice(4); | ||
| require('../commands/business').businessCommand(subcommand, ...args) | ||
| .then(() => process.exit(0)) | ||
| .catch((err) => { console.error(`\n✗ Error: ${err.message || err}`); process.exit(1); }); | ||
| } else if (command === 'plugin') { | ||
@@ -882,2 +950,6 @@ const subcommand = process.argv[3] || 'build'; | ||
| require('../commands/plugin').pluginCommand(subcommand, ...args); | ||
| } else if (command === 'experiments') { | ||
| const subcommand = process.argv[3]; | ||
| const args = process.argv.slice(4); | ||
| require('../commands/experiments').experimentsCommand(subcommand, ...args); | ||
| } else { | ||
@@ -1305,2 +1377,1 @@ console.log(`Unknown command: ${command}`); | ||
| } | ||
+112
-69
@@ -165,3 +165,4 @@ const fs = require('fs'); | ||
| /** | ||
| * Find and heal broken MAP.md references | ||
| * Find and heal broken MAP.md references (single-line AND range refs) | ||
| * Detects both out-of-bounds AND drift (symbol moved to different line) | ||
| * Returns { healed: number, unhealable: array } | ||
@@ -177,6 +178,21 @@ */ | ||
| // Match patterns like `file.js:123` with surrounding context | ||
| // Capture: full match, file path, extension, line number, and context after | ||
| const refPattern = /(`?)([a-zA-Z0-9_\-./\\]+\.(js|ts|py|go|rs|rb|java|c|cpp|h|hpp|md|json|yaml|yml)):(\d+)(`?)(\s*[\(\[—\-]?\s*([^)\]\n]+))?/g; | ||
| // Match both `file.js:123` and `file.js:123-456` with surrounding context | ||
| // [^\S\n] = horizontal whitespace only (no newlines) | ||
| // Required delimiter [(,[,—,-] prevents bleeding into adjacent refs on same line | ||
| const refPattern = /(`?)([a-zA-Z0-9_\-./\\]+\.(js|ts|py|go|rs|rb|java|c|cpp|h|hpp|md|json|yaml|yml)):(\d+)(?:-(\d+))?(`?)([^\S\n]*[\(\[—\-][^\S\n]*([^)\]\n]+))?/g; | ||
| // Cache file reads | ||
| const fileCache = {}; | ||
| function readFileCached(filePath) { | ||
| if (!fileCache[filePath]) { | ||
| const fullPath = path.join(cwd, filePath); | ||
| if (!fs.existsSync(fullPath)) return null; | ||
| try { | ||
| const content = fs.readFileSync(fullPath, 'utf8'); | ||
| fileCache[filePath] = { content, lines: content.split('\n') }; | ||
| } catch { return null; } | ||
| } | ||
| return fileCache[filePath]; | ||
| } | ||
| const replacements = []; | ||
@@ -186,56 +202,58 @@ let match; | ||
| while ((match = refPattern.exec(mapContent)) !== null) { | ||
| const fullMatch = match[0]; | ||
| const backtickBefore = match[1] || ''; | ||
| const filePath = match[2]; | ||
| const ext = match[3]; | ||
| const lineNum = parseInt(match[4], 10); | ||
| const backtickAfter = match[5] || ''; | ||
| const contextPart = match[7] || ''; | ||
| const startLine = parseInt(match[4], 10); | ||
| const endLine = match[5] ? parseInt(match[5], 10) : null; | ||
| const backtickAfter = match[6] || ''; | ||
| const contextPart = match[8] || ''; | ||
| const fullPath = path.join(cwd, filePath); | ||
| // Check if file exists | ||
| if (!fs.existsSync(fullPath)) { | ||
| unhealable.push({ file: filePath, line: lineNum, reason: 'File not found' }); | ||
| const file = readFileCached(filePath); | ||
| if (!file) { | ||
| unhealable.push({ file: filePath, line: startLine, reason: 'File not found' }); | ||
| continue; | ||
| } | ||
| // Read file and check line number | ||
| let fileContent; | ||
| try { | ||
| fileContent = fs.readFileSync(fullPath, 'utf8'); | ||
| } catch (err) { | ||
| unhealable.push({ file: filePath, line: lineNum, reason: `Cannot read: ${err.message}` }); | ||
| continue; | ||
| } | ||
| const symbol = extractSymbol(contextPart); | ||
| const lines = fileContent.split('\n'); | ||
| // Check if reference is still accurate | ||
| const outOfBounds = startLine > file.lines.length || (endLine && endLine > file.lines.length); | ||
| const drifted = symbol && startLine <= file.lines.length && !symbolAtLine(file.content, symbol, startLine); | ||
| // If line number is valid, skip | ||
| if (lineNum <= lines.length) { | ||
| continue; | ||
| } | ||
| if (!outOfBounds && !drifted) continue; | ||
| // Line number is broken - try to heal | ||
| const symbol = extractSymbol(contextPart); | ||
| if (!symbol) { | ||
| unhealable.push({ file: filePath, line: lineNum, reason: 'No symbol to search for' }); | ||
| unhealable.push({ file: filePath, line: startLine, reason: 'No symbol to search for' }); | ||
| continue; | ||
| } | ||
| // Search for the symbol in the file | ||
| const newLine = findSymbolLine(fileContent, symbol); | ||
| if (!newLine) { | ||
| unhealable.push({ file: filePath, line: lineNum, reason: `Symbol "${symbol}" not found` }); | ||
| // Find where the symbol actually is now | ||
| const newStart = findSymbolLine(file.content, symbol); | ||
| if (!newStart) { | ||
| unhealable.push({ file: filePath, line: startLine, reason: `Symbol "${symbol}" not found` }); | ||
| continue; | ||
| } | ||
| // Record the replacement | ||
| const oldRef = `${backtickBefore}${filePath}:${lineNum}${backtickAfter}`; | ||
| const newRef = `${backtickBefore}${filePath}:${newLine}${backtickAfter}`; | ||
| if (endLine) { | ||
| // Range ref: find new end by scanning for function end | ||
| const originalSpan = endLine - startLine; | ||
| const newEnd = findFunctionEnd(file.lines, newStart) || (newStart + originalSpan); | ||
| const clampedEnd = Math.min(newEnd, file.lines.length); | ||
| replacements.push({ old: oldRef, new: newRef, symbol }); | ||
| healed++; | ||
| const oldRef = `${backtickBefore}${filePath}:${startLine}-${endLine}${backtickAfter}`; | ||
| const newRef = `${backtickBefore}${filePath}:${newStart}-${clampedEnd}${backtickAfter}`; | ||
| if (oldRef !== newRef) { | ||
| replacements.push({ old: oldRef, new: newRef, symbol }); | ||
| healed++; | ||
| } | ||
| } else { | ||
| // Single-line ref | ||
| const oldRef = `${backtickBefore}${filePath}:${startLine}${backtickAfter}`; | ||
| const newRef = `${backtickBefore}${filePath}:${newStart}${backtickAfter}`; | ||
| if (oldRef !== newRef) { | ||
| replacements.push({ old: oldRef, new: newRef, symbol }); | ||
| healed++; | ||
| } | ||
| } | ||
| } | ||
@@ -251,6 +269,44 @@ | ||
| return { healed, unhealable }; | ||
| return { healed, unhealable, replacements: dryRun ? replacements : [] }; | ||
| } | ||
| /** | ||
| * Check if a symbol is actually defined at or near a given line | ||
| */ | ||
| function symbolAtLine(fileContent, symbol, lineNum) { | ||
| const lines = fileContent.split('\n'); | ||
| // Check a 5-line window around the referenced line | ||
| const start = Math.max(0, lineNum - 3); | ||
| const end = Math.min(lines.length, lineNum + 2); | ||
| const escaped = escapeRegExp(symbol); | ||
| const re = new RegExp(`\\b${escaped}\\b`); | ||
| for (let i = start; i < end; i++) { | ||
| if (re.test(lines[i])) return true; | ||
| } | ||
| return false; | ||
| } | ||
| /** | ||
| * Find the end line of a function starting at startLine (1-indexed) | ||
| * Tracks brace depth to find the matching closing brace | ||
| */ | ||
| function findFunctionEnd(lines, startLine) { | ||
| let depth = 0; | ||
| let foundOpen = false; | ||
| for (let i = startLine - 1; i < lines.length; i++) { | ||
| const line = lines[i]; | ||
| for (const ch of line) { | ||
| if (ch === '{') { depth++; foundOpen = true; } | ||
| if (ch === '}') { depth--; } | ||
| } | ||
| if (foundOpen && depth === 0) { | ||
| return i + 1; // 1-indexed | ||
| } | ||
| } | ||
| return null; | ||
| } | ||
| /** | ||
| * Extract a symbol name from context like "(atrisDevEntry function)" or "— Main entry" | ||
@@ -263,2 +319,3 @@ */ | ||
| const cleaned = context.trim() | ||
| .replace(/`/g, '') // Strip backticks | ||
| .replace(/^[\(\[—\-:]+\s*/, '') // Remove leading punctuation | ||
@@ -288,30 +345,23 @@ .replace(/[\)\]]+$/, '') // Remove trailing brackets | ||
| /** | ||
| * Find the line number where a symbol is defined | ||
| * Find the line number where a symbol is defined (strict patterns only) | ||
| * Returns null if only loose matches found — prevents healing to wrong locations | ||
| */ | ||
| function findSymbolLine(fileContent, symbol) { | ||
| const lines = fileContent.split('\n'); | ||
| const esc = escapeRegExp(symbol); | ||
| // Patterns to match symbol definitions | ||
| // Strict definition patterns only — no loose fallback | ||
| const patterns = [ | ||
| // function name( | ||
| new RegExp(`^\\s*(async\\s+)?function\\s+${escapeRegExp(symbol)}\\s*\\(`), | ||
| // const/let/var name = | ||
| new RegExp(`^\\s*(const|let|var)\\s+${escapeRegExp(symbol)}\\s*=`), | ||
| // class name | ||
| new RegExp(`^\\s*class\\s+${escapeRegExp(symbol)}\\b`), | ||
| // name: function or name() { | ||
| new RegExp(`^\\s*${escapeRegExp(symbol)}\\s*[:(]`), | ||
| // exports.name or module.exports.name | ||
| new RegExp(`exports\\.${escapeRegExp(symbol)}\\s*=`), | ||
| // def name( for Python | ||
| new RegExp(`^\\s*def\\s+${escapeRegExp(symbol)}\\s*\\(`), | ||
| // Just the symbol on a line (loose match) | ||
| new RegExp(`\\b${escapeRegExp(symbol)}\\b`) | ||
| new RegExp(`^\\s*(async\\s+)?function\\s+${esc}\\s*\\(`), // function name( | ||
| new RegExp(`^\\s*(const|let|var)\\s+${esc}\\s*=`), // const/let/var name = | ||
| new RegExp(`^\\s*class\\s+${esc}\\b`), // class name | ||
| new RegExp(`^\\s*${esc}\\s*[:(]`), // name: or name( | ||
| new RegExp(`exports\\.${esc}\\s*=`), // exports.name = | ||
| new RegExp(`^\\s*def\\s+${esc}\\s*\\(`), // def name( (Python) | ||
| ]; | ||
| // Try strict patterns first | ||
| for (let i = 0; i < patterns.length - 1; i++) { | ||
| for (const pattern of patterns) { | ||
| for (let lineIdx = 0; lineIdx < lines.length; lineIdx++) { | ||
| if (patterns[i].test(lines[lineIdx])) { | ||
| return lineIdx + 1; // 1-indexed | ||
| if (pattern.test(lines[lineIdx])) { | ||
| return lineIdx + 1; | ||
| } | ||
@@ -321,9 +371,2 @@ } | ||
| // Fallback: loose match (just contains the symbol) | ||
| for (let lineIdx = 0; lineIdx < lines.length; lineIdx++) { | ||
| if (patterns[patterns.length - 1].test(lines[lineIdx])) { | ||
| return lineIdx + 1; | ||
| } | ||
| } | ||
| return null; | ||
@@ -330,0 +373,0 @@ } |
+4
-0
| const fs = require('fs'); | ||
| const path = require('path'); | ||
| const { ensureExperimentsFramework } = require('./experiments'); | ||
@@ -477,3 +478,6 @@ /** | ||
| // Create experiments directory and packaged validation harness | ||
| ensureExperimentsFramework(process.cwd(), { silent: false }); | ||
| // Copy team members (MEMBER.md format — directory per member with skills/tools/context) | ||
@@ -480,0 +484,0 @@ const members = ['navigator', 'executor', 'validator', 'launcher', 'brainstormer', 'researcher']; |
+21
-1
@@ -15,6 +15,10 @@ const fs = require('fs'); | ||
| function statusAtris(isQuick = false) { | ||
| function statusAtris(isQuick = false, jsonMode = false) { | ||
| const targetDir = path.join(process.cwd(), 'atris'); | ||
| if (!fs.existsSync(targetDir)) { | ||
| if (jsonMode) { | ||
| console.log(JSON.stringify({ error: 'atris/ folder not found' })); | ||
| process.exit(1); | ||
| } | ||
| console.log('✗ atris/ folder not found. Run "atris init" first.'); | ||
@@ -92,2 +96,18 @@ process.exit(1); | ||
| // JSON mode — structured output for scripting | ||
| if (jsonMode) { | ||
| const output = { | ||
| date: dateFormatted, | ||
| backlog: todo.backlog, | ||
| inProgress: todo.inProgress, | ||
| completed: todo.completed, | ||
| inbox: inboxItems, | ||
| completions, | ||
| lessons: lessonsCount, | ||
| team: teamActivity, | ||
| }; | ||
| console.log(JSON.stringify(output, null, 2)); | ||
| return; | ||
| } | ||
| // Quick mode | ||
@@ -94,0 +114,0 @@ if (isQuick) { |
+2
-1
| { | ||
| "name": "atris", | ||
| "version": "2.5.3", | ||
| "version": "2.5.4", | ||
| "description": "atrisDev (atris dev) - CLI for AI coding agents. Works with Claude Code, Cursor, Windsurf. Make any codebase AI-navigable.", | ||
@@ -24,2 +24,3 @@ "main": "bin/atris.js", | ||
| "atris/team/", | ||
| "atris/experiments/", | ||
| "atris/features/_templates/", | ||
@@ -26,0 +27,0 @@ "atris/policies/", |
+10
-0
@@ -44,2 +44,12 @@ # atris | ||
| ## Experiments | ||
| Atris also supports Karpathy-style keep/revert loops inside `atris/experiments/`. | ||
| ```bash | ||
| atris experiments init self-heal | ||
| atris experiments validate | ||
| atris experiments benchmark | ||
| ``` | ||
| ## Update | ||
@@ -46,0 +56,0 @@ |
Network access
Supply chain riskThis module accesses the network.
Found 3 instances in 1 package
Shell access
Supply chain riskThis module accesses the system shell. Accessing the system shell increases the risk of executing arbitrary code.
Found 1 instance in 1 package
Dynamic require
Supply chain riskDynamic require can indicate the package is performing dangerous or unsafe dynamic code execution.
Found 1 instance in 1 package
Environment variable access
Supply chain riskPackage accesses environment variables, which may be a sign of credential stuffing or data theft.
Found 8 instances in 1 package
Filesystem access
Supply chain riskAccesses the file system, and could potentially read sensitive data.
Found 1 instance in 1 package
Long strings
Supply chain riskContains long string literals, which may be a sign of obfuscated or packed code.
Found 1 instance in 1 package
URL strings
Supply chain riskPackage contains fragments of external URLs or IP addresses, which the package may be accessing at runtime.
Found 1 instance in 1 package
Network access
Supply chain riskThis module accesses the network.
Found 3 instances in 1 package
Shell access
Supply chain riskThis module accesses the system shell. Accessing the system shell increases the risk of executing arbitrary code.
Found 1 instance in 1 package
Dynamic require
Supply chain riskDynamic require can indicate the package is performing dangerous or unsafe dynamic code execution.
Found 1 instance in 1 package
Environment variable access
Supply chain riskPackage accesses environment variables, which may be a sign of credential stuffing or data theft.
Found 6 instances in 1 package
Filesystem access
Supply chain riskAccesses the file system, and could potentially read sensitive data.
Found 1 instance in 1 package
Long strings
Supply chain riskContains long string literals, which may be a sign of obfuscated or packed code.
Found 1 instance in 1 package
URL strings
Supply chain riskPackage contains fragments of external URLs or IP addresses, which the package may be accessing at runtime.
Found 1 instance in 1 package
828738
12.25%128
42.22%12290
17.88%79
14.49%64
28%27
22.73%