🚀 Big News:Socket Has Acquired Secure Annex.Learn More
Socket
Book a DemoSign in
Socket

@prismer/claude-code-plugin

Package Overview
Dependencies
Maintainers
1
Versions
11
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@prismer/claude-code-plugin - npm Package Compare versions

Comparing version
1.7.3
to
1.7.4
scripts/deprecated/session-evolve.mjs.bak

Sorry, the diff of this file is not supported yet

+53
/**
* Resolve Prismer config with priority chain:
* 1. Environment variables (PRISMER_API_KEY, PRISMER_BASE_URL)
* 2. Claude Code userConfig (CLAUDE_PLUGIN_OPTION_API_KEY, CLAUDE_PLUGIN_OPTION_BASE_URL)
* 3. ~/.prismer/config.toml (shared with `prismer` CLI)
* 4. Defaults
*
* Usage:
* import { resolveConfig } from './lib/resolve-config.mjs';
* const { apiKey, baseUrl } = resolveConfig();
*/
import { readFileSync } from 'fs';
import { join } from 'path';
import { homedir } from 'os';
let _cached = null;
export function resolveConfig() {
if (_cached) return _cached;
let apiKey = process.env.PRISMER_API_KEY || process.env.CLAUDE_PLUGIN_OPTION_API_KEY || '';
let baseUrl = process.env.PRISMER_BASE_URL || process.env.CLAUDE_PLUGIN_OPTION_BASE_URL || '';
// Fallback: read ~/.prismer/config.toml
if (!apiKey || !baseUrl) {
try {
const raw = readFileSync(join(homedir(), '.prismer', 'config.toml'), 'utf-8');
if (!apiKey) {
const m = raw.match(/^api_key\s*=\s*['"]([^'"]+)['"]/m);
if (m?.[1]) apiKey = m[1];
}
if (!baseUrl) {
const m = raw.match(/^base_url\s*=\s*['"]([^'"]+)['"]/m);
if (m?.[1]) baseUrl = m[1];
}
} catch {
// No config file
}
}
// Validate: API key must be sk-prismer-* format, not a JWT token
if (apiKey && !apiKey.startsWith('sk-prismer-')) {
apiKey = ''; // Invalid format — treat as not configured
}
_cached = {
apiKey,
baseUrl: (baseUrl || 'https://prismer.cloud').replace(/\/$/, ''),
};
return _cached;
}
/**
* Shared signal patterns and detection utilities (v3)
*
* Single source of truth for error signal detection across all hooks.
*/
/** Signal patterns — matched against command output or error messages */
export const SIGNAL_PATTERNS = [
{ pattern: /timeout|timed?\s*out/i, type: 'error:timeout' },
{ pattern: /oom|out\s*of\s*memory/i, type: 'error:oom' },
{ pattern: /permission|denied|403|forbidden|EACCES/i, type: 'error:permission_error' },
{ pattern: /not[\s-]*found|404|missing|can'?t\s*resolve/i, type: 'error:not_found' },
{ pattern: /connect|refused|econnrefused/i, type: 'error:connection_refused' },
{ pattern: /port.*in\s*use|EADDRINUSE|address already in use/i, type: 'error:port_in_use' },
{ pattern: /module.*not.*found|cannot find module|ENOENT.*node_modules/i, type: 'error:module_not_found' },
{ pattern: /build\s*(fail|error)|compile.*error|tsc.*error|webpack.*error/i, type: 'error:build_failure' },
{ pattern: /deploy\s*(fail|error)|kubectl.*(error|fail)|docker.*(error|fail)/i, type: 'error:deploy_failure' },
{ pattern: /test\s*(fail|error)|jest.*(fail|error)|pytest.*(fail|error)|mocha.*(fail|error)|vitest.*(fail|error)/i, type: 'error:test_failure' },
{ pattern: /prisma|migration|schema.*push/i, type: 'error:prisma' },
{ pattern: /typescript|TS\d{4}/i, type: 'error:typescript' },
];
/** Error indicators in command output (gate before signal extraction) */
export const ERROR_RE = [
/error[\s:[]/i, /ERR[!_]/, /FAIL/i, /panic:/i, /exception/i, /traceback/i,
/command not found/i, /module.not.found/i, /cannot resolve/i,
/build failed/i, /exit code [1-9]/i, /non-zero exit/i,
/ENOENT|EACCES|ECONNREFUSED|ETIMEDOUT|EADDRINUSE/,
];
/** Error context indicators in command text (for pre-tool-use stuck detection) */
export const ERROR_CONTEXT_RE = [
/fix|debug|troubleshoot|resolve|repair/i,
/error|fail|broken|crash|timeout/i,
/retry|again|attempt/i,
];
/** Commands to skip (read-only, trivial) */
export const SKIP_RE = /^\s*(ls|pwd|echo|cat|head|tail|wc|which|whoami|date|env|printenv|git\s+(status|log|diff|branch|show|remote|tag)|cd\s)/;
/** Extract signal types from text */
export function detectSignals(text) {
const signals = [];
for (const { pattern, type } of SIGNAL_PATTERNS) {
if (pattern.test(text)) {
signals.push(type);
}
}
return signals.length > 0 ? signals : ['error:generic'];
}
/** Check if text contains error indicators */
export function hasError(text) {
return ERROR_RE.some(re => re.test(text));
}
/** Count signal occurrences in journal text */
export function countSignal(journalText, signalType) {
const escaped = signalType.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
const regex = new RegExp(`signal:${escaped}`, 'g');
return (journalText.match(regex) || []).length;
}
#!/usr/bin/env node
/**
* PostToolUseFailure hook — Direct Error Capture (v3)
*
* Called when a Bash tool execution FAILS. No regex guessing needed —
* the failure is the signal. Cleaner than parsing stdout for errors.
*
* Stdin JSON shape (PostToolUseFailure):
* { tool_name, tool_input: { command, ... }, error: "..." }
*
* Stdout: empty (informational only)
*/
import { readFileSync, writeFileSync, mkdirSync, appendFileSync, existsSync } from 'fs';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
import { SIGNAL_PATTERNS, SKIP_RE } from './lib/signals.mjs';
const __dirname = dirname(fileURLToPath(import.meta.url));
const CACHE_DIR = process.env.CLAUDE_PLUGIN_DATA || join(__dirname, '..', '.cache');
const JOURNAL_FILE = join(CACHE_DIR, 'session-journal.md');
const PENDING_FILE = join(CACHE_DIR, 'pending-suggestion.json');
const PENDING_TTL_MS = 3 * 60 * 1000;
function ensureJournal() {
mkdirSync(CACHE_DIR, { recursive: true });
if (!existsSync(JOURNAL_FILE)) {
writeFileSync(JOURNAL_FILE, '# Session Journal\n\n');
}
}
function appendJournal(line) {
try {
ensureJournal();
appendFileSync(JOURNAL_FILE, line + '\n');
} catch {}
}
function now() {
return new Date().toLocaleTimeString('en-US', { hour12: false, hour: '2-digit', minute: '2-digit' });
}
// --- Main ---
let input;
try {
input = JSON.parse(readFileSync(0, 'utf8'));
} catch {
process.exit(0);
}
const toolName = input?.tool_name || 'Bash';
let command = '';
let journalPrefix = 'bash';
if (toolName === 'Bash') {
command = input?.tool_input?.command || '';
journalPrefix = 'bash';
} else if (toolName === 'Edit') {
command = `edit ${input?.tool_input?.file_path || ''}`;
journalPrefix = 'edit';
} else if (toolName === 'Write') {
command = `write ${input?.tool_input?.file_path || ''}`;
journalPrefix = 'write';
}
const error = input?.error || '';
// Skip trivial commands (Bash only)
if (toolName === 'Bash' && SKIP_RE.test(command)) process.exit(0);
// Record the failed tool use
appendJournal(`- ${journalPrefix}: \`${command.slice(0, 120)}\` (${now()}) [FAILED]`);
// Extract signals from error message + command text
const detectedSignals = [];
const searchText = `${error}\n${command}`;
for (const { pattern, type } of SIGNAL_PATTERNS) {
if (pattern.test(searchText)) {
detectedSignals.push(type);
}
}
if (detectedSignals.length === 0) {
detectedSignals.push('error:generic');
}
// Count existing occurrences in journal
let existingContent = '';
try { existingContent = readFileSync(JOURNAL_FILE, 'utf8'); } catch {}
for (const sig of detectedSignals) {
const escaped = sig.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
const regex = new RegExp(`signal:${escaped}`, 'g');
const existingCount = (existingContent.match(regex) || []).length;
appendJournal(` - signal:${sig} (count: ${existingCount + 1}, at: ${now()})`);
}
// Gene feedback on failure
try {
const raw = readFileSync(PENDING_FILE, 'utf8');
if (raw) {
const pending = JSON.parse(raw);
if (pending?.geneId && Date.now() - (pending.suggestedAt || 0) < PENDING_TTL_MS) {
appendJournal(` - gene_feedback: "${pending.geneTitle}" gene_id=${pending.geneId} outcome=failed`);
writeFileSync(PENDING_FILE, '');
}
}
} catch {}
#!/usr/bin/env node
/**
* PostToolUse hook — Silent context cache save for WebFetch + WebSearch
*
* WebFetch: save URL + fetched content (HQCC format)
* WebSearch: save each result URL + snippet (best-effort, structure may vary)
*
* Always on. Fire-and-forget. Zero user impact.
*
* Stdin JSON:
* WebFetch: { tool_name, tool_input: { url }, tool_response: { url, code, result, bytes } }
* WebSearch: { tool_name, tool_input: { query }, tool_response: { result, ... } }
* Stdout: empty (silent)
*/
import { readFileSync } from 'fs';
import { resolveConfig } from './lib/resolve-config.mjs';
const { apiKey, baseUrl } = resolveConfig();
if (!apiKey) process.exit(0);
let input;
try {
input = JSON.parse(readFileSync(0, 'utf8'));
} catch {
process.exit(0);
}
const toolName = input?.tool_name;
const resp = input?.tool_response || input?.tool_result;
function isPublicUrl(url) {
if (!url || typeof url !== 'string') return false;
if (!url.startsWith('http://') && !url.startsWith('https://')) return false;
if (/localhost|127\.0\.0\.1|192\.168\.|10\.|172\.(1[6-9]|2\d|3[01])\./i.test(url)) return false;
return true;
}
function saveToCache(url, content) {
if (!content || content.length < 100) return;
fetch(`${baseUrl}/api/context/save`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
body: JSON.stringify({ url, hqcc: content }),
signal: AbortSignal.timeout(5000),
}).catch(() => {});
}
// --- WebFetch: save URL + content ---
if (toolName === 'WebFetch') {
const url = resp?.url || input?.tool_input?.url;
const content = resp?.result;
const code = resp?.code;
if (code === 200 && isPublicUrl(url) && content) {
saveToCache(url, content);
}
}
// --- WebSearch: save result as query-keyed cache ---
if (toolName === 'WebSearch') {
const query = input?.tool_input?.query;
const result = resp?.result;
// Save the search result summary keyed by query URL
if (query && result && result.length > 100) {
const queryUrl = `prismer://search/${encodeURIComponent(query)}`;
saveToCache(queryUrl, result);
}
}
#!/usr/bin/env node
/**
* PreToolUse hook — Fast context cache check before WebFetch
*
* Only for WebFetch (not WebSearch — search always fetches fresh).
* Disabled by default. Enable via PRISMER_WEB_CACHE_LOAD=1 env var.
*
* 1s budget. Hit → deny + cached content. Miss/timeout → allow fetch.
*
* Stdin JSON: { tool_name, tool_input: { url }, ... }
* Stdout JSON: deny with cached content, or nothing (allow)
*/
import { readFileSync } from 'fs';
import { resolveConfig } from './lib/resolve-config.mjs';
// Feature gate — disabled by default, enable with PRISMER_WEB_CACHE_LOAD=1
if (process.env.PRISMER_WEB_CACHE_LOAD !== '1') process.exit(0);
const { apiKey, baseUrl } = resolveConfig();
if (!apiKey) process.exit(0);
let input;
try {
input = JSON.parse(readFileSync(0, 'utf8'));
} catch {
process.exit(0);
}
// Only intercept WebFetch, never WebSearch
if (input?.tool_name !== 'WebFetch') process.exit(0);
const url = input?.tool_input?.url;
if (!url || (!url.startsWith('http://') && !url.startsWith('https://'))) process.exit(0);
if (/localhost|127\.0\.0\.1|192\.168\.|10\.|172\.(1[6-9]|2\d|3[01])\./i.test(url)) process.exit(0);
// 1s budget — if cache is slow, just let fetch proceed
try {
const res = await fetch(`${baseUrl}/api/context/load`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
body: JSON.stringify({ input: url }),
signal: AbortSignal.timeout(1000),
});
if (res.ok) {
const data = await res.json();
if (data?.success && data?.result?.cached && data?.result?.hqcc) {
process.stdout.write(JSON.stringify({
hookSpecificOutput: {
hookEventName: 'PreToolUse',
permissionDecision: 'deny',
permissionDecisionReason: `[Cache hit: ${url}]\n\n${data.result.hqcc}`,
},
}));
process.exit(0);
}
}
} catch {
// Timeout or error → allow fetch, no delay
}
#!/usr/bin/env node
/**
* SessionEnd hook — Async Evolution Sync (v3)
*
* Runs when the session is ending. Handles two cases:
*
* 1. Stop hook blocked and Claude did full review -> skip (already handled)
* 2. Stop hook did not block (cooldown/no value/skipped) -> async sync
*
* For case 2: pushes gene feedback outcomes to evolution network.
* Fire-and-forget; never blocks session exit.
*
* Stdin JSON: { session_id, ... }
* Stdout: empty
*/
import { readFileSync, writeFileSync } from 'fs';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
import { resolveConfig } from './lib/resolve-config.mjs';
const __dirname = dirname(fileURLToPath(import.meta.url));
const CACHE_DIR = process.env.CLAUDE_PLUGIN_DATA || join(__dirname, '..', '.cache');
const JOURNAL_FILE = join(CACHE_DIR, 'session-journal.md');
const CURSOR_FILE = join(CACHE_DIR, 'sync-cursor.json');
// --- Read stdin (discard; we do not need session metadata) ---
try { readFileSync(0, 'utf8'); } catch {}
// --- Check config ---
const { apiKey, baseUrl } = resolveConfig();
if (!apiKey) process.exit(0);
// Read project scope from sync-cursor.json
let scope = 'global';
try {
const raw = readFileSync(join(CACHE_DIR, 'sync-cursor.json'), 'utf8');
scope = JSON.parse(raw)?.scope || 'global';
} catch {}
// --- Read journal ---
let journal = '';
try {
journal = readFileSync(JOURNAL_FILE, 'utf8');
} catch {
process.exit(0);
}
// If Stop hook triggered AND Claude called MCP tools, skip journal push to avoid
// duplicate recording (server has no dedup). But if Stop hook triggered and
// Claude did NOT call any evolve_* MCP tool, we should still push journal feedback.
// Heuristic: check if journal has the marker but no "[evolve_record" or "evolve_create"
// evidence (Claude's MCP calls don't appear in the journal, so we can't detect them).
// Conservative approach: skip if marker exists. The Stop hook's explicit MCP path is
// higher quality than journal regex extraction, so prefer that path when available.
if (journal.includes('[evolution-review-triggered]')) {
process.exit(0);
}
// --- Extract gene feedback outcomes ---
const feedbackLines = journal.match(/gene_feedback:.*outcome=\w+/g) || [];
const outcomes = feedbackLines.map(line => {
const titleMatch = line.match(/"([^"]+)"/);
const geneIdMatch = line.match(/gene_id=(\S+)/);
const outcomeMatch = line.match(/outcome=(\w+)/);
if (!outcomeMatch) return null;
return {
title: titleMatch?.[1] || '',
geneId: geneIdMatch?.[1]?.replace(/\s+outcome=.*/, '') || '',
outcome: outcomeMatch[1],
};
}).filter(Boolean);
// --- Extract signal summary ---
const signalRe = /signal:(\S+)/g;
const signalCounts = {};
let m;
while ((m = signalRe.exec(journal)) !== null) {
signalCounts[m[1]] = (signalCounts[m[1]] || 0) + 1;
}
// Skip if nothing to sync
if (outcomes.length === 0 && Object.keys(signalCounts).length === 0) {
process.exit(0);
}
// --- Async push to evolution network ---
try {
let cursor = 0;
try {
const raw = readFileSync(CURSOR_FILE, 'utf8');
cursor = JSON.parse(raw)?.cursor || 0;
} catch {}
const controller = new AbortController();
const timer = setTimeout(() => controller.abort(), 5000);
timer.unref();
// Build signals array from journal signalCounts
const allSignals = Object.keys(signalCounts).map(type => ({ type: type.replace(/[()]/g, '') }));
const pushOutcomes = outcomes.map(o => ({
gene_id: o.geneId || o.title,
outcome: o.outcome,
summary: `Session-end sync: "${o.title}" ${o.outcome}`,
signals: allSignals.length > 0 ? allSignals : [{ type: 'session:end' }],
}));
const res = await fetch(`${baseUrl}/api/im/evolution/sync?scope=${encodeURIComponent(scope)}`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
body: JSON.stringify({
push: pushOutcomes.length > 0 ? { outcomes: pushOutcomes } : undefined,
pull: { since: cursor },
}),
signal: controller.signal,
});
clearTimeout(timer);
if (res.ok) {
const data = await res.json();
if (data?.data?.pulled?.cursor) {
try {
writeFileSync(CURSOR_FILE, JSON.stringify({
cursor: data.data.pulled.cursor,
scope,
ts: Date.now(),
}));
} catch {}
}
}
} catch {
// Sync failed — queue for retry on next SessionStart
try {
const queueFile = join(CACHE_DIR, 'sync-retry-queue.json');
let queue = [];
try { queue = JSON.parse(readFileSync(queueFile, 'utf8')); } catch {}
const retrySignals = Object.keys(signalCounts).map(type => ({ type: type.replace(/[()]/g, '') }));
const pushOutcomes = outcomes.map(o => ({
gene_id: o.geneId || o.title,
outcome: o.outcome,
summary: `Session-end sync (retry): "${o.title}" ${o.outcome}`,
signals: retrySignals.length > 0 ? retrySignals : [{ type: 'session:end' }],
}));
if (pushOutcomes.length > 0) {
queue.push({ outcomes: pushOutcomes, ts: Date.now() });
// Keep max 10 entries to prevent unbounded growth
if (queue.length > 10) queue = queue.slice(-10);
writeFileSync(queueFile, JSON.stringify(queue));
}
} catch {}
}
#!/usr/bin/env node
/**
* SubagentStart hook — Inject proven strategies into subagent context
*
* When Claude spawns a subagent (Agent tool), inject the top evolution
* strategies so subagents benefit from the evolution network too.
*
* Stdin JSON: { session_id, ... }
* Stdout: text injected into subagent's context (or empty)
*/
import { readFileSync } from 'fs';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
import { resolveConfig } from './lib/resolve-config.mjs';
const __dirname = dirname(fileURLToPath(import.meta.url));
const CACHE_DIR = process.env.CLAUDE_PLUGIN_DATA || join(__dirname, '..', '.cache');
const JOURNAL_FILE = join(CACHE_DIR, 'session-journal.md');
const { apiKey, baseUrl } = resolveConfig();
if (!apiKey) process.exit(0);
// Read parent session's journal for signal context
let journalSignals = '';
try {
const journal = readFileSync(JOURNAL_FILE, 'utf8');
const sigRe = /signal:(\S+)/g;
const counts = {};
let m;
while ((m = sigRe.exec(journal)) !== null) {
const sig = m[1].replace(/[()]/g, '');
counts[sig] = (counts[sig] || 0) + 1;
}
if (Object.keys(counts).length > 0) {
journalSignals = Object.entries(counts)
.sort((a, b) => b[1] - a[1])
.slice(0, 5)
.map(([sig, cnt]) => `${sig} (${cnt}x)`)
.join(', ');
}
} catch {
// No journal — that's OK
}
// Fetch top genes
try {
const controller = new AbortController();
const timer = setTimeout(() => controller.abort(), 1500);
timer.unref();
const res = await fetch(`${baseUrl}/api/im/evolution/public/hot?limit=3`, {
headers: { Authorization: `Bearer ${apiKey}` },
signal: controller.signal,
});
clearTimeout(timer);
if (res.ok) {
const data = await res.json();
const genes = data?.data || [];
if (genes.length > 0) {
const lines = genes.slice(0, 3).map(g => {
const total = (g.success_count || 0) + (g.failure_count || 0);
const rate = total > 0 ? Math.round(((g.success_count || 0) / total) * 100) : 0;
return `"${g.title}" (${rate}%)`;
});
let output = `[Evolution] Top strategies: ${lines.join('; ')}`;
if (journalSignals) {
output += ` | Parent session signals: ${journalSignals}`;
}
process.stdout.write(output);
}
}
} catch {
// Timeout — don't block subagent startup
}
---
name: evolve-session-review
description: Automatically triggered by Stop hook. Reviews session for evolution learnings.
disable-model-invocation: true
allowed-tools: Bash
---
# Evolve Session Review
Triggered automatically when the Stop hook detects evolution value in the session.
## What to do
1. **Assess**: Was a general (transferable) problem solved, or a project-specific one?
2. **If transferable**: `evolve_create_gene` — de-contextualize the fix into reusable steps
3. **If gene was suggested**: `evolve_record` — record success/failure outcome
4. **If project-specific**: `memory_write` — save for future sessions
At most 3 MCP tool calls. Skip if nothing is worth recording.
---
name: prismer-setup
description: Set up Prismer API key — registers or connects your account with one command
user-invocable: true
allowed-tools: Bash, Read, Write, AskUserQuestion
---
# Prismer Setup
Set up your Prismer API key for CLI commands, MCP tools, Claude Code plugin, and all SDKs.
This is the same flow as `prismer setup` in the CLI — browser auto-login, key auto-received, zero copy-paste needed.
## Flow
### Step 1: Check if already configured
```bash
# Check existing config
if [ -f ~/.prismer/config.toml ]; then
echo "Found existing config at ~/.prismer/config.toml"
grep "api_key" ~/.prismer/config.toml 2>/dev/null | head -1
fi
# Check env
if [ -n "$PRISMER_API_KEY" ]; then
echo "PRISMER_API_KEY is set in environment"
fi
```
If an API key is already configured and valid, tell the user:
- "You're already set up! Key: sk-prismer-...xxxx"
- Offer to verify connectivity: `curl -s -H "Authorization: Bearer $KEY" https://prismer.cloud/api/version | head -1`
- If they want to reconfigure, proceed to Step 2
### Step 2: Attempt auto-setup via CLI (preferred)
Try the automatic browser-based flow first — this is the **recommended path**:
```bash
npx -y @prismer/sdk setup
```
This will:
1. Start a local server on a random port
2. Open the browser to `https://prismer.cloud/setup?callback=http://127.0.0.1:PORT/callback&state=xxx`
3. The user signs in (or is already signed in) → key auto-created → redirected back → key saved
**If the CLI is not available or the auto flow fails**, fall back to Step 2B.
### Step 2B: Manual fallback
Open the browser manually:
```bash
open "https://prismer.cloud/setup?utm_source=claude-code-plugin&utm_medium=setup" 2>/dev/null || \
xdg-open "https://prismer.cloud/setup?utm_source=claude-code-plugin&utm_medium=setup" 2>/dev/null || \
echo "Please open: https://prismer.cloud/setup"
```
Tell the user: "After signing in, copy the API key from the page and paste it here."
Wait for the user to paste the key (AskUserQuestion).
Validate format: must start with `sk-prismer-`. If invalid, ask again.
Save to config:
```bash
mkdir -p ~/.prismer
cat > ~/.prismer/config.toml << EOF
# Prismer Cloud configuration
# Generated by /prismer-setup on $(date +%Y-%m-%d)
[default]
api_key = "THE_KEY_HERE"
base_url = "https://prismer.cloud"
EOF
chmod 600 ~/.prismer/config.toml
```
### Step 3: Configure MCP server (if not already)
Check if `~/.claude/mcp_servers.json` has a `prismer` entry. If not, add it:
```bash
# Check if prismer MCP is configured
if [ -f ~/.claude/mcp_servers.json ]; then
python3 -c "import json; d=json.load(open('$HOME/.claude/mcp_servers.json')); exit(0 if 'prismer' in d else 1)" 2>/dev/null
if [ $? -eq 0 ]; then
echo "MCP server already configured"
else
echo "Adding Prismer MCP server..."
fi
fi
```
If the MCP entry needs to be added or updated with the new key:
```json
{
"prismer": {
"command": "npx",
"args": ["-y", "@prismer/mcp-server@latest"],
"env": {
"PRISMER_API_KEY": "THE_KEY_HERE"
}
}
}
```
Use `python3` or `node` to merge this into the existing `mcp_servers.json` without overwriting other entries.
### Step 4: Verify everything works
```bash
echo "Verifying setup..."
# Test API connectivity
RESP=$(curl -s -H "Authorization: Bearer THE_KEY_HERE" https://prismer.cloud/api/version)
echo "API: $RESP"
# Show summary
echo ""
echo "Setup complete!"
echo " Config: ~/.prismer/config.toml"
echo " MCP: ~/.claude/mcp_servers.json"
echo ""
echo "You now have access to:"
echo " - CLI: prismer load, prismer search, prismer evolve, ..."
echo " - MCP: 29 tools (evolution, memory, context, parse, skills)"
echo " - Plugin: /evolve-analyze, /evolve-record, cross-agent learning"
echo " - SDKs: @prismer/sdk (TS), prismer (Py), prismer-sdk-go, prismer-sdk (Rust)"
echo ""
echo "Restart Claude Code to activate MCP tools."
```
## Important Notes
- **Prefer auto-flow**: Always try `npx -y @prismer/sdk setup` first — zero copy-paste, best UX.
- **Never display the full API key** in output after initial save. Show only `sk-prismer-...xxxx` (last 4 chars).
- **File permissions**: `~/.prismer/config.toml` must be `chmod 600` (owner-only read/write).
- **Existing config**: If config already exists, ask before overwriting.
- **API key format**: Must match `sk-prismer-*` pattern. Reject anything else.
- **MCP merge**: Never overwrite the entire `mcp_servers.json` — only add/update the `prismer` key.
- **Equivalent CLI command**: `prismer setup` does the same thing. Tell the user they can also use the CLI directly.
+15
-3

@@ -13,7 +13,19 @@ {

"name": "prismer",
"source": "./",
"description": "Prismer Evolution — auto-learning from every coding session via hooks + MCP + skills",
"version": "1.7.2"
"source": {
"source": "npm",
"package": "@prismer/claude-code-plugin"
},
"description": "Prismer Evolution — cross-agent learning network. Turns every coding session into shared knowledge: errors become strategies, successful fixes become recommendations for all agents.",
"author": {
"name": "Prismer",
"email": "dev@prismer.cloud"
},
"homepage": "https://prismer.cloud",
"repository": "https://github.com/Prismer-AI/PrismerCloud",
"license": "MIT",
"category": "ai",
"tags": ["evolution", "cross-agent-learning", "mcp", "hooks", "auto-learning"],
"keywords": ["prismer", "evolution", "ai-agent", "mcp"]
}
]
}
{
"name": "prismer",
"description": "Prismer Evolution — cross-agent learning network. Turns every coding session into shared knowledge: errors become strategies, successful fixes become recommendations for all agents.",
"version": "1.7.3",
"version": "1.7.4",
"author": {

@@ -18,3 +18,13 @@ "name": "Prismer",

"hooks"
]
],
"userConfig": {
"api_key": {
"description": "Prismer API key (get one at https://prismer.cloud/dashboard?tab=keys)",
"sensitive": true
},
"base_url": {
"description": "Prismer API base URL (default: https://prismer.cloud)",
"sensitive": false
}
}
}
+5
-2

@@ -5,6 +5,9 @@ {

"command": "npx",
"args": ["-y", "@prismer/mcp-server"],
"cwd": "${CLAUDE_PLUGIN_ROOT}"
"args": ["-y", "@prismer/mcp-server@latest"],
"env": {
"PRISMER_API_KEY": "${user_config.api_key}",
"PRISMER_BASE_URL": "${user_config.base_url}"
}
}
}
}

@@ -0,1 +1,31 @@

## [1.7.4] - 2026-04-01
### Added — **Data Loop Closure**
- **Stop hook reason injection**: `buildReason()` assembles signal summary + gene feedback + MCP tool instructions into the `reason` field. Claude reads this and knows to call `evolve_record`, `evolve_report`, `memory_write`, and suggest CLAUDE.md updates.
- Stop hook now outputs `{ decision: 'block', reason: '...' }` (was `{ decision: 'block' }` only)
- Incremental journal writes via PostTool hooks prevent data loss on session crash
### Changed — **v3 Eight-Hook Architecture**
- **SessionStart**: matcher expanded to `startup|resume|clear|compact`; added retry queue, memory pull, skill sync
- **Stop**: gene adherence self-evaluation in reason; once-per-session marker + 1h cooldown
- **PostToolUse**: expanded to `Bash|Edit|Write` (was Bash only); shared `lib/signals.mjs` module
- Journal rotation respects event type: rotate on startup/clear, preserve on resume/compact
- DESIGN.md rewritten for v3 (was v2.1) — all 8 hooks, WebFetch cache, SessionEnd documented
### Added
- **PostToolUseFailure** hook (`post-tool-failure.mjs`): `Bash|Edit|Write` failure signal extraction
- **SessionEnd** hook (`session-end.mjs`): async evolution sync fallback + retry queue persistence
- **SubagentStart** hook (`subagent-start.mjs`): top strategies + parent signals injection
- **PreToolUse(WebFetch)** hook (`pre-web-cache.mjs`): context cache load (opt-in via `PRISMER_WEB_CACHE_LOAD=1`)
- **PostToolUse(WebFetch|WebSearch)** hook (`post-web-save.mjs`): silent context cache save
- `scripts/lib/signals.mjs`: shared 13 signal patterns + `ERROR_RE` + `SKIP_RE` + `countSignal()`
- `scripts/lib/resolve-config.mjs`: config auto-discovery (env → `~/.prismer/config.toml` → defaults)
### Fixed
- Permission Denied on Stop hook — root cause: journal never rotated + no block cooldown
- `session-end.mjs` now preserves `scope` field in sync-cursor.json (was being dropped)
- `pre-web-cache.mjs` URL validation aligned with `post-web-save.mjs` (`http://`/`https://` only)
- `marketplace.json` version fields removed (was hardcoded at 1.7.3; version now from plugin.json only)
- MCP pre-warm only on startup (was every session event)
# Changelog

@@ -5,3 +35,3 @@

## [1.7.2] - 2026-03-26
## [1.7.3] - 2026-03-27

@@ -8,0 +38,0 @@ ### Changed — **v2 Three-Stage Evolution Model**

@@ -5,2 +5,3 @@ {

{
"matcher": "startup|resume|clear|compact",
"hooks": [

@@ -23,2 +24,11 @@ {

]
},
{
"matcher": "WebFetch",
"hooks": [
{
"type": "command",
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/pre-web-cache.mjs\""
}
]
}

@@ -28,3 +38,3 @@ ],

{
"matcher": "Bash",
"matcher": "Bash|Edit|Write",
"hooks": [

@@ -36,4 +46,34 @@ {

]
},
{
"matcher": "WebFetch|WebSearch",
"hooks": [
{
"type": "command",
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/post-web-save.mjs\""
}
]
}
],
"PostToolUseFailure": [
{
"matcher": "Bash|Edit|Write",
"hooks": [
{
"type": "command",
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/post-tool-failure.mjs\""
}
]
}
],
"SubagentStart": [
{
"hooks": [
{
"type": "command",
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/subagent-start.mjs\""
}
]
}
],
"Stop": [

@@ -48,4 +88,14 @@ {

}
],
"SessionEnd": [
{
"hooks": [
{
"type": "command",
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/session-end.mjs\""
}
]
}
]
}
}
{
"name": "@prismer/claude-code-plugin",
"version": "1.7.3",
"version": "1.7.4",
"description": "Prismer Evolution plugin for Claude Code — auto-learning from every coding session",

@@ -33,2 +33,12 @@ "keywords": [

],
"scripts": {
"test": "vitest run",
"test:watch": "vitest"
},
"devDependencies": {
"vitest": "^3.1.1"
},
"engines": {
"node": ">=18.0.0"
},
"publishConfig": {

@@ -35,0 +45,0 @@ "access": "public"

+98
-92

@@ -1,6 +0,6 @@

# @prismer/claude-code-plugin
# @prismer/claude-code-plugin (v1.7.4)
Prismer Evolution plugin for Claude Code (v2). Implements a **three-stage evolution model** that turns coding sessions into transferable knowledge — errors become learning opportunities, successful fixes become shared strategies that persist locally and propagate across agents.
Prismer Evolution plugin for Claude Code (v3). Implements an **8-hook evolution architecture** that turns coding sessions into transferable knowledge — errors become learning strategies, successful fixes become shared recommendations across all agents.
## How It Works (v2 Three-Stage Model)
## How It Works (v3 Eight-Hook Architecture)

@@ -10,6 +10,7 @@ ```

│ session-start.mjs │
│ 1. Rotate session journal (clear previous) │
│ 2. Sync pull: trending genes + hot strategies │
│ 3. Inject passive context (proven patterns) │
│ 4. Pre-warm MCP server │
│ 1. Sync pull: trending genes + hot strategies │
│ 2. Retry queue: resend any failed session-end pushes │
│ 3. Memory pull: inject persistent memory │
│ 4. Skill sync: download cloud-installed skills │
│ 5. Pre-warm MCP server (background) │
└────────────────────────────────────────────────────────┘

@@ -19,10 +20,23 @@

│ ▼ │
│ PreToolUse: pre-bash-suggest.mjs │
│ PreToolUse(Bash): pre-bash-suggest.mjs │
│ - Stuck detection: same error signal >= 2x in journal │
│ - Only queries /analyze when stuck (not every command) │
│ │
│ PostToolUse: post-bash-journal.mjs │
│ PreToolUse(WebFetch): pre-web-cache.mjs │
│ - Context cache load (opt-in, disabled by default) │
│ - Cache hit → return cached content, skip fetch │
│ │
│ PostToolUse(Bash|Edit|Write): post-bash-journal.mjs │
│ - Writes to LOCAL session-journal.md only │
│ - Does NOT write to evolution network (reduces noise) │
│ - 13 signal patterns for error classification │
│ - Tracks signal counts for stuck detection │
│ │
│ PostToolUse(WebFetch|WebSearch): post-web-save.mjs │
│ - Silently caches web content to Prismer Cloud │
│ │
│ PostToolUseFailure: post-tool-failure.mjs │
│ - Direct failure signal extraction to journal │
│ │
│ SubagentStart: subagent-start.mjs │
│ - Injects top strategies + parent signals │
└────────────────────────────────────────────────────────┘

@@ -32,29 +46,18 @@

│ ▼ │
│ Stop: session-stop.mjs (< 200ms, non-blocking) │
│ 1. Read session journal │
│ 2. Check: has evolution value? │
│ 3. Spawn async subagent (detached, fire-and-forget) │
│ Stop: session-stop.mjs (primary path) │
│ 1. Read session journal → has evolution value? │
│ 2. YES → block + inject gene adherence self-eval │
│ 3. Claude LLM reviews session with full context │
│ 4. Calls MCP: evolve_record, evolve_create_gene, │
│ memory_write (zero extra LLM cost) │
│ │
│ Async: session-evolve.mjs (30s timeout) │
│ - Extract repeated signals → create gene proposal │
│ - POST /genes (rule-based abstraction) │
│ - POST /record (feedback for suggested genes) │
│ - POST /sync (batch push + pull cursor update) │
│ - Write local suggestions (memory + CLAUDE.md hints) │
│ SessionEnd: session-end.mjs (fallback path) │
│ - Async sync push for gene feedback + signals │
│ - Retry queue for failed pushes │
└────────────────────────────────────────────────────────┘
```
### Key Changes from v1
| Aspect | v1 | v2 |
|--------|----|----|
| /analyze queries | Every bash command | Only when stuck (same error >= 2x) |
| Error reporting | Every failure → remote /report | Local journal only; batch at session end |
| Gene creation | Never (server-side /distill only) | Agent-side at session end via /genes |
| Local persistence | None | Suggestions for memory + CLAUDE.md |
| Session context | Lost after each command | Accumulated in session-journal.md |
## Quick Start
### Install from Marketplace (Recommended)
### Install from Marketplace

@@ -66,23 +69,41 @@ ```bash

On first enable, Claude Code will prompt for your API key (stored securely in keychain).
### Install from Local Directory (Development)
```bash
claude --plugin-dir /path/to/sdk/claude-code-plugin
claude --plugin-dir /path/to/sdk/prismer-cloud/claude-code-plugin
```
### Environment Variables
### Configuration
| Variable | Required | Default |
|----------|----------|---------|
| `PRISMER_API_KEY` | Yes | -- |
| `PRISMER_BASE_URL` | No | `https://prismer.cloud` |
| `PRISMER_SCOPE` | No | Auto-detected from package.json or git remote |
The plugin reads config from multiple sources (in priority order):
| Source | Variables |
|--------|-----------|
| Environment variables | `PRISMER_API_KEY`, `PRISMER_BASE_URL` |
| Claude Code userConfig | Prompted on plugin enable (stored in keychain) |
| `~/.prismer/config.toml` | `api_key`, `base_url` (shared with CLI) |
```bash
# Option 1: Environment variable
export PRISMER_API_KEY="sk-prismer-..."
# Option 2: Config file
cat > ~/.prismer/config.toml << 'EOF'
api_key = "sk-prismer-..."
base_url = "https://prismer.cloud"
EOF
```
### Optional Feature Flags
| Variable | Default | Description |
|----------|---------|-------------|
| `PRISMER_WEB_CACHE_LOAD` | `0` | Set to `1` to enable WebFetch cache load (pre-check before fetching) |
| `PRISMER_SCOPE` | auto-detected | Override project scope (default: from package.json name or git remote) |
## Components
### 1. Hooks (4 Lifecycle Events)
### 1. Hooks (8 Lifecycle Events)

@@ -93,23 +114,18 @@ Registered via `hooks/hooks.json`:

|-------|--------|---------|
| **SessionStart** | `session-start.mjs` | Sync pull + passive context inject + scope detection + MCP pre-warm |
| **SessionStart** | `session-start.mjs` | Sync pull + retry queue + memory + skill sync + MCP pre-warm |
| **PreToolUse** (Bash) | `pre-bash-suggest.mjs` | Stuck detection → conditional /analyze query |
| **PostToolUse** (Bash) | `post-bash-journal.mjs` | Local session journal (no remote writes) |
| **Stop** | `session-stop.mjs` | Collect context → spawn async `session-evolve.mjs` |
| **PreToolUse** (WebFetch) | `pre-web-cache.mjs` | Context cache load (opt-in) |
| **PostToolUse** (Bash\|Edit\|Write) | `post-bash-journal.mjs` | Local session journal with signal detection |
| **PostToolUse** (WebFetch\|WebSearch) | `post-web-save.mjs` | Silent context cache save |
| **PostToolUseFailure** | `post-tool-failure.mjs` | Failure signal extraction |
| **SubagentStart** | `subagent-start.mjs` | Strategy + signal injection to subagents |
| **Stop** | `session-stop.mjs` | Evolution value check → block + gene adherence self-eval |
| **SessionEnd** | `session-end.mjs` | Async fallback sync push + retry queue |
### 2. Async Subagent: session-evolve.mjs
### 2. MCP Server (33 Tools)
Spawned as a detached process at session end. Runs independently after Claude Code exits:
Configured via `.mcp.json` — `@prismer/mcp-server` provides tools including `evolve_analyze`, `evolve_record`, `evolve_create_gene`, `evolve_publish`, `evolve_delete`, `memory_write`, `recall`, `skill_search`, `skill_sync`, and more.
- Extracts repeated error signals from journal
- Creates gene proposals via `POST /genes` (rule-based; LLM version planned)
- Records outcomes for any genes suggested during the session
- Pushes batch sync to evolution network
- Writes local evolution suggestions for next session
### 3. Skills (5 Slash Commands)
### 3. MCP Server (26 Tools)
Configured via `.mcp.json` — `@prismer/mcp-server` provides tools including `evolve_analyze`, `evolve_record`, `evolve_create_gene`, `memory_write`, `recall`, `skill_search`, and more.
### 4. Skills (3 Slash Commands)
| Skill | Description |

@@ -120,2 +136,4 @@ |-------|-------------|

| `/prismer:evolve-record` | Record an outcome after applying a strategy |
| `/prismer:evolve-session-review` | Full session review with gene adherence evaluation |
| `/prismer:prismer-setup` | First-run setup guidance |

@@ -125,23 +143,29 @@ ## File Structure

```
sdk/claude-code-plugin/
claude-code-plugin/
├── .claude-plugin/
│ ├── plugin.json # Plugin manifest
│ ├── plugin.json # Plugin manifest (with userConfig)
│ └── marketplace.json # Marketplace catalog
├── hooks/
│ └── hooks.json # 4 events: SessionStart, PreToolUse, PostToolUse, Stop
│ └── hooks.json # 8 hook entries across 7 events
├── scripts/
│ ├── session-start.mjs # SessionStart: sync pull + context inject
│ ├── pre-bash-suggest.mjs # PreToolUse: stuck detection + conditional /analyze
│ ├── post-bash-journal.mjs # PostToolUse: local journal writer
│ ├── session-stop.mjs # Stop: collect context + spawn subagent
│ ├── session-evolve.mjs # Async: gene creation + feedback + sync + local persistence
│ └── deprecated/
│ └── post-bash-report.mjs # v1 PostToolUse (kept for reference)
│ ├── session-start.mjs # SessionStart: sync + retry + memory + skills
│ ├── pre-bash-suggest.mjs # PreToolUse(Bash): stuck detection
│ ├── pre-web-cache.mjs # PreToolUse(WebFetch): cache load (opt-in)
│ ├── post-bash-journal.mjs # PostToolUse(Bash|Edit|Write): journal
│ ├── post-web-save.mjs # PostToolUse(WebFetch|WebSearch): cache save
│ ├── post-tool-failure.mjs # PostToolUseFailure: failure signals
│ ├── subagent-start.mjs # SubagentStart: strategy injection
│ ├── session-stop.mjs # Stop: block + gene adherence
│ ├── session-end.mjs # SessionEnd: async fallback sync
│ └── lib/
│ ├── resolve-config.mjs # Config resolution (env → userConfig → toml)
│ └── signals.mjs # 13 shared signal patterns
├── skills/
│ ├── evolve-analyze/SKILL.md
│ ├── evolve-create/SKILL.md
│ └── evolve-record/SKILL.md
├── .mcp.json # MCP server configuration (26 tools)
├── DESIGN-V2.md # v2 architecture design document
├── DESIGN.md # v1 design (historical)
│ ├── evolve-record/SKILL.md
│ ├── evolve-session-review/SKILL.md
│ └── prismer-setup/SKILL.md
├── .mcp.json # MCP server configuration (33 tools)
├── DESIGN.md # v3 architecture design document
├── CHANGELOG.md

@@ -153,27 +177,9 @@ ├── LICENSE # MIT

## Session Journal Format
During a session, `post-bash-journal.mjs` writes a local markdown journal:
```markdown
# Session Journal
Started: 2026-03-26T10:00:00Z
- bash: `npm run build` (10:01)
- signal:error:typescript (count: 1, at: 10:01)
- bash: `vim src/fix.ts` (10:02)
- bash: `npm run build` (10:03)
- signal:error:typescript (count: 2, at: 10:03)
- bash: `npx prisma generate` (10:04)
- bash: `npm run build` (10:05)
- gene_feedback: "Prisma Generate Before Build" outcome=success
```
## Privacy & Security
**What is sent (at session end only):**
**What is sent:**
- Aggregated error signals (types + counts, not raw stderr)
- Gene proposals (abstracted strategies, no project-specific paths)
- Outcome feedback (success/failure + score)
- Outcome feedback (success/failure + adherence assessment)
- WebFetch/WebSearch cached content (public URLs only, fire-and-forget)

@@ -184,3 +190,3 @@ **What is NOT sent:**

- Raw error output (kept in local journal only)
- Mid-session command-by-command data
- Private/localhost URLs

@@ -192,3 +198,3 @@ All data is scoped to your API key. Evolution data propagates to other agents in the same scope.

- [@prismer/sdk](https://www.npmjs.com/package/@prismer/sdk) — Prismer SDK with CLI
- [@prismer/mcp-server](https://www.npmjs.com/package/@prismer/mcp-server) — MCP Server (26 tools)
- [@prismer/mcp-server](https://www.npmjs.com/package/@prismer/mcp-server) — MCP Server (33 tools)
- [@prismer/opencode-plugin](https://www.npmjs.com/package/@prismer/opencode-plugin) — OpenCode equivalent

@@ -195,0 +201,0 @@ - [Prismer Cloud](https://prismer.cloud) — Knowledge Drive for AI Agents

@@ -20,2 +20,3 @@ #!/usr/bin/env node

import { fileURLToPath } from 'url';
import { SIGNAL_PATTERNS, SKIP_RE, hasError, countSignal } from './lib/signals.mjs';

@@ -26,3 +27,3 @@ const __dirname = dirname(fileURLToPath(import.meta.url));

const PENDING_FILE = join(CACHE_DIR, 'pending-suggestion.json');
const PENDING_TTL_MS = 3 * 60 * 1000;
const PENDING_TTL_MS = 10 * 60 * 1000;

@@ -69,28 +70,2 @@ // --- Helpers ---

// --- Signal Extraction ---
const SIGNAL_PATTERNS = [
{ pattern: /timeout|timed?\s*out/i, type: 'error:timeout' },
{ pattern: /oom|out\s*of\s*memory/i, type: 'error:oom' },
{ pattern: /permission|denied|403|forbidden|EACCES/i, type: 'error:permission_error' },
{ pattern: /not[\s-]*found|404|missing|can'?t\s*resolve/i, type: 'error:not_found' },
{ pattern: /connect|refused|econnrefused/i, type: 'error:connection_refused' },
{ pattern: /port.*in\s*use|EADDRINUSE|address already in use/i, type: 'error:port_in_use' },
{ pattern: /module.*not.*found|cannot find module|ENOENT.*node_modules/i, type: 'error:module_not_found' },
{ pattern: /build|compile|tsc|webpack/i, type: 'error:build_failure' },
{ pattern: /deploy|k8s|kubectl|docker/i, type: 'error:deploy_failure' },
{ pattern: /test|jest|pytest|mocha|vitest/i, type: 'error:test_failure' },
{ pattern: /prisma|migration|schema.*push/i, type: 'error:prisma' },
{ pattern: /typescript|TS\d{4}/i, type: 'error:typescript' },
];
const ERROR_RE = [
/error[\s:[]/i, /ERR[!_]/, /FAIL/i, /panic:/i, /exception/i, /traceback/i,
/command not found/i, /module.not.found/i, /cannot resolve/i,
/build failed/i, /exit code [1-9]/i, /non-zero exit/i,
/ENOENT|EACCES|ECONNREFUSED|ETIMEDOUT|EADDRINUSE/,
];
const SKIP_RE = /^\s*(ls|pwd|echo|cat|head|tail|wc|which|whoami|date|env|printenv|git\s+(status|log|diff|branch|show|remote|tag)|cd\s)/;
// --- Main ---

@@ -105,8 +80,29 @@

const command = input?.tool_input?.command || '';
const resp = input?.tool_response || input?.tool_result || '';
const result = typeof resp === 'string' ? resp : [resp.stdout || '', resp.stderr || ''].join('\n');
// Detect tool type and extract relevant fields
const toolName = input?.tool_name || 'Bash';
let command = '';
let result = '';
let journalPrefix = 'bash';
// Skip trivial commands
if (SKIP_RE.test(command)) {
if (toolName === 'Bash') {
command = input?.tool_input?.command || '';
const resp = input?.tool_response || input?.tool_result || '';
result = typeof resp === 'string' ? resp : [resp.stdout || '', resp.stderr || ''].join('\n');
journalPrefix = 'bash';
} else if (toolName === 'Edit') {
command = `edit ${input?.tool_input?.file_path || ''}`;
const resp = input?.tool_response || input?.tool_result || '';
result = typeof resp === 'string' ? resp : '';
journalPrefix = 'edit';
} else if (toolName === 'Write') {
command = `write ${input?.tool_input?.file_path || ''}`;
const resp = input?.tool_response || input?.tool_result || '';
result = typeof resp === 'string' ? resp : '';
journalPrefix = 'write';
} else {
process.exit(0); // Unknown tool — skip
}
// Skip trivial commands (Bash only)
if (toolName === 'Bash' && SKIP_RE.test(command)) {
readPending(); // side-effect: clears if expired

@@ -117,11 +113,11 @@ process.exit(0);

// Record tool usage in journal
appendJournal(`- bash: \`${command.slice(0, 120)}\` (${now()})`);
appendJournal(`- ${journalPrefix}: \`${command.slice(0, 120)}\` (${now()})`);
const hasError = ERROR_RE.some((re) => re.test(result));
const errorDetected = hasError(result);
if (!hasError) {
if (!errorDetected) {
// Success path
const pending = readPending();
if (pending) {
appendJournal(` - gene_feedback: "${pending.geneTitle}" outcome=success`);
appendJournal(` - gene_feedback: "${pending.geneTitle}" gene_id=${pending.geneId} outcome=success`);
clearPending();

@@ -148,7 +144,4 @@ }

for (const sig of detectedSignals) {
// Count existing occurrences
const regex = new RegExp(`signal:${sig.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}`, 'g');
const existingCount = (existingContent.match(regex) || []).length;
const newCount = existingCount + 1;
appendJournal(` - signal:${sig} (count: ${newCount}, at: ${now()})`);
const existingCount = countSignal(existingContent, sig);
appendJournal(` - signal:${sig} (count: ${existingCount + 1}, at: ${now()})`);
}

@@ -159,4 +152,4 @@

if (pending) {
appendJournal(` - gene_feedback: "${pending.geneTitle}" outcome=failed`);
appendJournal(` - gene_feedback: "${pending.geneTitle}" gene_id=${pending.geneId} outcome=failed`);
clearPending();
}

@@ -19,2 +19,4 @@ #!/usr/bin/env node

import { fileURLToPath } from 'url';
import { resolveConfig } from './lib/resolve-config.mjs';
import { SIGNAL_PATTERNS, ERROR_CONTEXT_RE, SKIP_RE, countSignal } from './lib/signals.mjs';

@@ -26,5 +28,12 @@ const __dirname = dirname(fileURLToPath(import.meta.url));

const API_KEY = process.env.PRISMER_API_KEY;
const BASE_URL = (process.env.PRISMER_BASE_URL || 'https://prismer.cloud').replace(/\/$/, '');
const { apiKey: API_KEY, baseUrl: BASE_URL } = resolveConfig();
/** Read project scope from sync-cursor.json (written by session-start) */
function getScope() {
try {
const raw = readFileSync(join(CACHE_DIR, 'sync-cursor.json'), 'utf8');
return JSON.parse(raw)?.scope || 'global';
} catch { return 'global'; }
}
/** Minimum same-signal occurrences before querying evolution (stuck detection) */

@@ -43,29 +52,4 @@ const STUCK_THRESHOLD = 2;

// Skip trivial commands
const SKIP_RE = /^\s*(ls|pwd|echo|cat|head|tail|wc|which|whoami|date|env|printenv|git\s+(status|log|diff|branch|show|remote|tag)|cd\s)/;
if (SKIP_RE.test(command)) process.exit(0);
// --- Extract signals from command text (same patterns as journal writer) ---
const SIGNAL_PATTERNS = [
{ pattern: /timeout|timed?\s*out/i, type: 'error:timeout' },
{ pattern: /oom|out\s*of\s*memory|kill/i, type: 'error:oom' },
{ pattern: /permission|denied|403|forbidden/i, type: 'error:permission_error' },
{ pattern: /not[\s-]*found|404|missing|can'?t\s*resolve/i, type: 'error:not_found' },
{ pattern: /connect|refused|econnrefused/i, type: 'error:connection_refused' },
{ pattern: /port.*in\s*use|EADDRINUSE|address already in use/i, type: 'error:port_in_use' },
{ pattern: /module.*not.*found|cannot find module/i, type: 'error:module_not_found' },
{ pattern: /build|compile|tsc|webpack/i, type: 'task:build' },
{ pattern: /deploy|k8s|kubectl|docker/i, type: 'task:deploy' },
{ pattern: /test|jest|pytest|mocha|vitest/i, type: 'task:test' },
{ pattern: /migrate|migration|schema/i, type: 'task:migrate' },
{ pattern: /prisma/i, type: 'error:prisma' },
{ pattern: /typescript|TS\d{4}/i, type: 'error:typescript' },
];
const ERROR_CONTEXT_RE = [
/fix|debug|troubleshoot|resolve|repair/i,
/error|fail|broken|crash|timeout/i,
/retry|again|attempt/i,
];
const signals = [];

@@ -93,5 +77,3 @@ const isErrorContext = ERROR_CONTEXT_RE.some(re => re.test(command));

for (const sig of signals) {
const escaped = sig.type.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
const regex = new RegExp(`signal:${escaped}`, 'g');
const count = (journal.match(regex) || []).length;
const count = countSignal(journal, sig.type);
if (count > maxCount) maxCount = count;

@@ -118,3 +100,4 @@ }

const res = await fetch(`${BASE_URL}/api/im/evolution/analyze`, {
const scope = getScope();
const res = await fetch(`${BASE_URL}/api/im/evolution/analyze?scope=${encodeURIComponent(scope)}`, {
method: 'POST',

@@ -135,3 +118,5 @@ headers: {

if (advice?.action === 'apply_gene' && advice?.confidence >= 0.3) {
// Lower threshold for stuck detection — agent is already stuck,
// any matching gene is worth suggesting even with low confidence
if (advice?.action === 'apply_gene' && advice?.gene) {
const gene = advice.gene;

@@ -138,0 +123,0 @@ const strategy = gene?.strategy || [];

@@ -18,2 +18,3 @@ #!/usr/bin/env node

import { execFileSync, spawn } from 'child_process';
import { resolveConfig } from './lib/resolve-config.mjs';

@@ -26,13 +27,26 @@ const __dirname = dirname(fileURLToPath(import.meta.url));

const API_KEY = process.env.PRISMER_API_KEY;
const BASE_URL = (process.env.PRISMER_BASE_URL || 'https://prismer.cloud').replace(/\/$/, '');
const { apiKey: API_KEY, baseUrl: BASE_URL } = resolveConfig();
// --- Step 0: Read stdin to determine event type ---
let input = {};
try {
input = JSON.parse(readFileSync(0, 'utf8'));
} catch {}
const eventType = input?.type || input?.event || 'startup';
// --- Step 1: Rotate session journal ---
// Only rotate on startup/clear (new session). Skip on resume/compact (continuing session).
const shouldRotate = eventType === 'startup' || eventType === 'clear';
try {
mkdirSync(CACHE_DIR, { recursive: true });
if (existsSync(JOURNAL_FILE)) {
try { renameSync(JOURNAL_FILE, PREV_JOURNAL_FILE); } catch {}
if (shouldRotate) {
if (existsSync(JOURNAL_FILE)) {
try { renameSync(JOURNAL_FILE, PREV_JOURNAL_FILE); } catch {}
}
writeFileSync(JOURNAL_FILE, `# Session Journal\n\nStarted: ${new Date().toISOString()}\n\n`);
}
writeFileSync(JOURNAL_FILE, `# Session Journal\n\nStarted: ${new Date().toISOString()}\n\n`);
} catch {}

@@ -127,2 +141,10 @@

process.stdout.write(lines.join('\n'));
// Track injected genes for session-end feedback
try {
const injectedFile = join(CACHE_DIR, 'injected-genes.json');
writeFileSync(injectedFile, JSON.stringify(
topGenes.map(g => ({ id: g.id, title: g.title }))
));
} catch {}
}

@@ -136,10 +158,157 @@ }

// --- Step 4: Pre-warm MCP server (background, non-blocking) ---
// --- Step 3a-retry: Retry failed sync queue from previous sessions ---
try {
const child = spawn('npx', ['-y', '@prismer/mcp-server', '--version'], {
stdio: 'ignore',
detached: true,
});
child.unref();
} catch {}
if (API_KEY) {
try {
const queueFile = join(CACHE_DIR, 'sync-retry-queue.json');
const raw = readFileSync(queueFile, 'utf8');
const queue = JSON.parse(raw);
if (Array.isArray(queue) && queue.length > 0) {
// Batch all queued outcomes into one sync push
const allOutcomes = queue.flatMap(entry => entry.outcomes || []);
if (allOutcomes.length > 0) {
const retryRes = await fetch(`${BASE_URL}/api/im/evolution/sync?scope=${encodeURIComponent(scope)}`, {
method: 'POST',
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${API_KEY}` },
body: JSON.stringify({ push: { outcomes: allOutcomes } }),
signal: AbortSignal.timeout(3000),
});
if (retryRes.ok) {
writeFileSync(queueFile, '[]'); // Clear queue on success
}
}
}
} catch {
// Queue doesn't exist or retry failed — will try next time
}
}
// --- Step 3b: First-run guidance (only when no API key) ---
if (!API_KEY && !process.env._PRISMER_SETUP_SHOWN) {
process.env._PRISMER_SETUP_SHOWN = '1';
process.stderr.write('[Prismer] No API key. Run /prismer-setup or: npx prismer setup\n');
}
// --- Step 3b2: Memory recall (pull MEMORY.md + list available files) ---
if (API_KEY && eventType === 'startup') {
try {
// Pull MEMORY.md content
const memRes = await fetch(`${BASE_URL}/api/im/memory/load?scope=${scope}`, {
headers: { Authorization: `Bearer ${API_KEY}` },
signal: AbortSignal.timeout(2000),
});
if (memRes.ok) {
const mem = await memRes.json();
const content = mem?.data?.content;
if (content && content.trim().length > 10) {
// Inject truncated memory (max 2000 chars to not bloat context)
const truncated = content.length > 2000 ? content.slice(0, 2000) + '\n...(truncated)' : content;
process.stdout.write(`\n[Prismer Memory]\n${truncated}`);
}
}
// List other memory files (titles only, for Claude to recall on demand)
const listRes = await fetch(`${BASE_URL}/api/im/memory/files?scope=${scope}`, {
headers: { Authorization: `Bearer ${API_KEY}` },
signal: AbortSignal.timeout(1500),
});
if (listRes.ok) {
const list = await listRes.json();
const files = (list?.data || []).filter((f) => f.path !== 'MEMORY.md');
if (files.length > 0) {
const names = files.slice(0, 10).map((f) => f.path).join(', ');
process.stdout.write(`\n[Memory files available: ${names}] Use memory_read to access.`);
}
}
} catch {
// Memory pull failed — non-blocking
}
}
// --- Step 3c: Skill sync (download cloud-installed skills to local) ---
if (API_KEY && eventType === 'startup') {
try {
const controller2 = new AbortController();
const timer2 = setTimeout(() => controller2.abort(), 3000);
timer2.unref();
const installedRes = await fetch(`${BASE_URL}/api/im/skills/installed`, {
headers: { Authorization: `Bearer ${API_KEY}`, 'Content-Type': 'application/json' },
signal: controller2.signal,
});
clearTimeout(timer2);
if (installedRes.ok) {
const installedData = await installedRes.json();
const skills = installedData?.data || [];
if (skills.length > 0) {
const { homedir } = await import('os');
const home = homedir();
const skillsDir = join(home, '.claude', 'skills');
let synced = 0;
for (const entry of skills) {
const skill = entry?.skill || entry;
const slug = skill?.slug;
if (!slug || typeof slug !== 'string') continue;
// Sanitize slug (prevent directory traversal)
const safeSlug = slug.replace(/[^a-z0-9_-]/gi, '-');
const skillDir = join(skillsDir, safeSlug);
const skillFile = join(skillDir, 'SKILL.md');
// Skip if already exists locally
try {
readFileSync(skillFile, 'utf8');
continue; // File exists — skip
} catch {
// File doesn't exist — download and write
}
// Fetch content
try {
const contentRes = await fetch(`${BASE_URL}/api/im/skills/${encodeURIComponent(slug)}/content`, {
headers: { Authorization: `Bearer ${API_KEY}` },
signal: AbortSignal.timeout(2000),
});
if (contentRes.ok) {
const contentData = await contentRes.json();
const content = contentData?.data?.content;
if (content) {
mkdirSync(skillDir, { recursive: true });
writeFileSync(skillFile, content, 'utf8');
synced++;
}
}
} catch {
// Skip this skill on error
}
}
if (synced > 0) {
process.stdout.write(`\n[Prismer Skills] Synced ${synced} skill(s) to ~/.claude/skills/`);
}
}
}
} catch {
// Skill sync failed — non-blocking
}
}
// --- Step 4: Pre-warm MCP server (background, non-blocking, startup only) ---
if (eventType === 'startup') {
try {
const child = spawn('npx', ['-y', '@prismer/mcp-server', '--version'], {
stdio: 'ignore',
detached: true,
});
child.unref();
} catch {}
}
#!/usr/bin/env node
/**
* Stop hook — Session End Context Collection + Async Subagent Launch (v2)
* Stop hook — Evolution Session Review trigger (v3)
*
* When Claude Code session ends:
* 1. Read session-journal.md
* 2. Collect git diff stats
* 3. Determine if session has evolution value
* 4. Write session-context.json
* 5. Spawn async session-evolve.mjs (detached, fire-and-forget)
* v3 improvements over v2.1:
* - Only blocks ONCE per session (tracks marker in journal)
* - Cooldown: 1 hour between blocks across sessions
* - SessionEnd hook handles async fallback when block is skipped
*
* Must complete in < 200ms — all heavy work is delegated to the subagent.
* Stdin JSON: { stop_hook_active, session_id, cwd, ... }
* Stdout JSON: { decision: "block" } or nothing (exit 0 to allow stop)
*/
import { readFileSync, writeFileSync, mkdirSync, existsSync } from 'fs';
import { readFileSync, writeFileSync, appendFileSync, mkdirSync } from 'fs';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
import { execFileSync, spawn } from 'child_process';
import { resolveConfig } from './lib/resolve-config.mjs';

@@ -23,6 +22,40 @@ const __dirname = dirname(fileURLToPath(import.meta.url));

const JOURNAL_FILE = join(CACHE_DIR, 'session-journal.md');
const CONTEXT_FILE = join(CACHE_DIR, 'session-context.json');
const BLOCK_MARKER_FILE = join(CACHE_DIR, 'last-block.json');
// --- Step 1: Read journal ---
const BLOCK_COOLDOWN_MS = 60 * 60 * 1000; // 1 hour
// --- Step 1: Read stdin ---
let input = {};
try {
input = JSON.parse(readFileSync(0, 'utf8'));
} catch {
// No input — proceed with defaults
}
// --- Step 2: Prevent infinite loop ---
if (input.stop_hook_active === true) {
process.exit(0);
}
// --- Step 3: Check config ---
const { apiKey } = resolveConfig();
if (!apiKey) process.exit(0);
// --- Step 4: Check cooldown (1h between blocks) ---
try {
const raw = readFileSync(BLOCK_MARKER_FILE, 'utf8');
const marker = JSON.parse(raw);
if (Date.now() - (marker?.ts || 0) < BLOCK_COOLDOWN_MS) {
process.exit(0);
}
} catch {
// No marker — first block ever, proceed
}
// --- Step 5: Read journal ---
let journal = '';

@@ -35,8 +68,18 @@ try {

// --- Step 2: Determine evolution value ---
// --- Step 6: Check if already blocked this session ---
if (journal.includes('[evolution-review-triggered]')) {
process.exit(0);
}
// --- Step 7: Check evolution value ---
function hasEvolutionValue(text) {
const lines = text.split('\n').filter(l => l.trim() && !l.startsWith('#') && !l.startsWith('Started:'));
if (lines.length < 2) return false;
// Any error signal
if (/signal:error:/m.test(text)) return true;
// Repeated signals (>= 2x same type)
const signalCounts = {};

@@ -51,4 +94,9 @@ const re = /signal:(\S+)/g;

}
// Gene was used during session
if (/gene_feedback:/m.test(text)) return true;
if ((text.match(/^- bash:/gm) || []).length >= 5) return true;
// Enough activity (>= 5 tool entries — bash, edit, or write)
if ((text.match(/^- (bash|edit|write):/gm) || []).length >= 5) return true;
return false;

@@ -61,98 +109,65 @@ }

// --- Step 3: Collect git diff stats ---
// --- Step 8: Build concise context for Claude's self-evaluation ---
let gitDiffStat = '';
try {
gitDiffStat = execFileSync('git', ['diff', '--stat', 'HEAD'], {
encoding: 'utf8',
timeout: 2000,
}).trim();
} catch {}
function buildContext(text) {
const parts = [];
// --- Step 4: Detect scope ---
// Gene feedback: extract for Claude to self-evaluate adherence
const fbRe = /gene_feedback:\s*"([^"]+)"\s*gene_id=(\S+)\s*outcome=(\w+)/g;
const feedbacks = [];
let fbm;
while ((fbm = fbRe.exec(text)) !== null) {
feedbacks.push({ title: fbm[1], geneId: fbm[2], autoOutcome: fbm[3] });
}
function detectScope() {
if (process.env.PRISMER_SCOPE) return process.env.PRISMER_SCOPE;
try {
const pkg = JSON.parse(readFileSync(join(process.cwd(), 'package.json'), 'utf8'));
if (pkg.name) return pkg.name;
} catch {}
try {
const remote = execFileSync('git', ['remote', 'get-url', 'origin'], { encoding: 'utf8', timeout: 1000 }).trim();
if (remote) {
let hash = 0;
for (let i = 0; i < remote.length; i++) {
hash = ((hash << 5) - hash + remote.charCodeAt(i)) | 0;
}
return `git_${(hash >>> 0).toString(36)}`;
if (feedbacks.length > 0) {
parts.push('Gene suggestions were made this session. For each, self-evaluate:');
parts.push('Did you actually follow this strategy, or did you solve it independently?');
for (const fb of feedbacks) {
parts.push(` - "${fb.title}" (${fb.geneId}) auto-detected: ${fb.autoOutcome}`);
parts.push(` Call evolve_record with YOUR assessment of outcome + whether you used the strategy`);
}
} catch {}
return 'global';
}
}
// --- Step 5: Determine outcome ---
// Signal summary (concise)
const sigRe = /signal:(\S+)/g;
const sigs = {};
let sm;
while ((sm = sigRe.exec(text)) !== null) {
sigs[sm[1]] = (sigs[sm[1]] || 0) + 1;
}
const sigList = Object.entries(sigs).filter(([, c]) => c >= 2).map(([s, c]) => `${s}(${c}x)`);
if (sigList.length > 0) {
parts.push('');
parts.push(`Repeated signals: ${sigList.join(', ')}`);
}
function detectOutcome(text) {
const feedbackLines = text.split('\n').filter(l => /gene_feedback:/.test(l));
if (feedbackLines.length > 0) {
const last = feedbackLines[feedbackLines.length - 1];
if (/outcome=success/.test(last)) return 'success';
if (/outcome=failed/.test(last)) return 'failed';
// Concise instructions
if (parts.length === 0) {
parts.push('Session had evolution value. Review: evolve_record / evolve_create_gene / memory_write. Max 3 calls.');
} else {
parts.push('');
parts.push('Review: evolve_record (gene feedback) / evolve_create_gene (general pattern) / memory_write (project-specific). Max 3 calls.');
}
const lastSignalIdx = text.lastIndexOf('signal:error:');
const lastBashIdx = text.lastIndexOf('- bash:');
if (lastSignalIdx > lastBashIdx) return 'failed';
return 'unknown';
}
// --- Step 6: Parse journal into structured context for session-evolve.mjs ---
// Extract signal counts
const signalCounts = {};
const sigRe = /signal:(\S+)/g;
let sigMatch;
while ((sigMatch = sigRe.exec(journal)) !== null) {
const sig = sigMatch[1].replace(/[()]/g, '');
signalCounts[sig] = (signalCounts[sig] || 0) + 1;
return parts.join('\n');
}
// Extract gene feedback
const geneFeedback = [];
const fbRe = /gene_feedback:\s*"([^"]+)"\s*outcome=(\w+)/g;
let fbMatch;
while ((fbMatch = fbRe.exec(journal)) !== null) {
geneFeedback.push({ title: fbMatch[1], outcome: fbMatch[2] });
}
// --- Step 9: Mark journal + record block time ---
const context = {
signals: signalCounts,
geneFeedback,
outcome: detectOutcome(journal),
scope: detectScope(),
journalExcerpt: journal.slice(-4000),
gitDiffStat,
timestamp: new Date().toISOString(),
};
try {
mkdirSync(CACHE_DIR, { recursive: true });
writeFileSync(CONTEXT_FILE, JSON.stringify(context, null, 2));
appendFileSync(JOURNAL_FILE, '\n[evolution-review-triggered] (at: ' + new Date().toISOString() + ')\n');
writeFileSync(BLOCK_MARKER_FILE, JSON.stringify({ ts: Date.now() }));
} catch {
process.exit(0);
// Write failed — still block (best-effort marking)
}
// --- Step 7: Launch async subagent ---
// --- Step 10: Block with concise context ---
// Note: Claude Code displays reason as "Stop hook error:" text.
// We accept this UX tradeoff because the alternative (no reason) means
// Claude doesn't know which genes to evaluate. The self-evaluation context
// is essential for accurate Thompson Sampling feedback.
try {
const evolveScript = join(__dirname, 'session-evolve.mjs');
const child = spawn('node', [evolveScript], {
stdio: 'ignore',
detached: true,
env: {
...process.env,
PRISMER_SESSION_CONTEXT: CONTEXT_FILE,
PRISMER_CACHE_DIR: CACHE_DIR,
},
});
child.unref();
} catch {}
const context = buildContext(journal);
process.stdout.write(JSON.stringify({ decision: 'block', reason: context }));
---
name: evolve-analyze
description: Query the Prismer Evolution network for known fix strategies. Use when encountering build failures, runtime errors, test failures, deployment issues, dependency conflicts, or any recurring problem — before attempting your own fix.
description: Query the evolution network for known fix strategies when stuck on an error.
allowed-tools: Bash

@@ -9,48 +9,23 @@ ---

Before attempting to fix an error, query the Prismer Evolution network for strategies proven effective by other agents.
Query the Prismer Evolution network for proven fix strategies.
## When to use
- Build or compilation errors
- Runtime exceptions or crashes
- Test failures
- Deployment issues
- Dependency conflicts
- Any error you suspect others have encountered
When you encounter a recurring error (same type 2+ times) and need guidance.
## How to query
## How
### Via MCP tools (preferred — if Prismer MCP server is configured)
Use MCP tool `evolve_analyze`:
Use the `evolve_analyze` tool with:
- `error`: the error message or description
- `provider`: `"claude-code"`
- `stage`: current step (e.g., `"build"`, `"test"`, `"deploy"`)
- `scope`: (optional) partition gene pool (e.g., project name or team)
### Via CLI (if `@prismer/sdk` is installed)
```bash
prismer evolve analyze --error "$ARGUMENTS" --provider claude-code --stage <step> [--scope <scope>]
```
### Via curl (fallback)
```bash
curl -s -X POST "${PRISMER_BASE_URL:-https://prismer.cloud}/api/im/evolution/analyze" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $PRISMER_API_KEY" \
-d '{"error":"<error_message>","task_status":"pending","provider":"claude-code","stage":"<step>"}' \
# Optional: append ?scope=<scope> to the URL to partition gene pools
evolve_analyze({
error: "the error message",
signals: ["error:build_failure", "error:typescript"],
provider: "claude-code",
stage: "build"
})
```
## Interpreting results
## After
If the response includes a gene with strategies:
1. Note the `gene_id` and `confidence` score
2. **Try the recommended strategy first** before inventing your own approach
3. After applying the strategy, record the outcome with `/prismer:evolve-record`
If no gene is found, fix the error yourself and consider creating a new gene with `/prismer:evolve-create`.
If a gene is recommended, follow its strategy steps. Then record the outcome with `evolve_record`.
---
name: evolve-create
description: Create a new evolution gene when you discover a novel, reusable pattern for fixing a recurring problem.
description: Create a reusable gene from a novel fix pattern. Only invoke manually.
disable-model-invocation: true

@@ -10,42 +10,29 @@ allowed-tools: Bash

Create a new gene in the evolution network when you discover an effective pattern for solving a problem that others might encounter.
Create a new gene when you discover a reusable fix pattern.
## When to create a gene
## When to use
- You fixed a non-trivial error that others are likely to encounter
- The fix involves specific, reproducible steps
- The pattern is general enough to apply across projects
- No existing gene covers this pattern (check with `/prismer:evolve-analyze` first)
Only when explicitly asked, or when the Stop hook review identifies a transferable pattern.
## Arguments
## How
`/prismer:evolve-create <category> <signal_tags> <name>`
Use MCP tool `evolve_create_gene`:
## How to create
### Via MCP tools (preferred)
Use the `evolve_create_gene` tool with:
- `category`: `"repair"`, `"optimize"`, `"innovate"`, or `"diagnostic"`
- `signals_match`: array of signal patterns (e.g., `[{"type": "error:ECONNREFUSED"}, {"type": "stage:db_connect"}]`)
- `strategy`: array of step descriptions
- `title`: descriptive name for the pattern
- `scope`: (optional) evolution scope to partition gene pools
### Via CLI
```bash
prismer evolve create \
-c repair \
-s '["error:ECONNREFUSED","stage:db_connect"]' \
--strategy "Check if database is running" "Verify connection string" "Increase pool size" \
-n "Database Connection Recovery"
```
evolve_create_gene({
category: "repair", // repair | optimize | innovate | diagnostic
signals_match: ["error:typescript", "error:build_failure"],
strategy: [
"Step 1: concrete action",
"Step 2: concrete action",
"Step 3: verify fix"
],
title: "Short Pattern Name"
})
```
## Guidelines
## Rules
- **Be specific in signal tags**: Use `error:EXACT_ERROR_TEXT` rather than vague tags
- **Keep strategy steps actionable**: Each step should be something an agent can directly execute
- **Name descriptively**: The name should immediately convey what problem this solves
- **Don't duplicate**: Check `/prismer:evolve-analyze` first to ensure no existing gene covers this
- **De-contextualize**: no file paths, line numbers, project names
- **Keep**: error types, tool commands, methodology
- Strategy steps must be executable by ANY agent on ANY project
---
name: evolve-record
description: Record the outcome of applying an evolution strategy. Use after resolving an error where evolve-analyze provided a recommendation, to feed back success or failure to the network.
description: Record the outcome after applying an evolution strategy.
allowed-tools: Bash

@@ -9,55 +9,20 @@ ---

After applying a strategy from the evolution network, record whether it worked so future agents benefit.
Record whether a suggested gene strategy worked.
## Arguments
## When to use
`/prismer:evolve-record <gene_id> <success|failed> <score> <summary>`
After `evolve_analyze` recommended a gene and you applied its strategy.
## How to record
## How
### Via MCP tools (preferred)
Use MCP tool `evolve_record`:
Use the `evolve_record` tool with:
- `gene_id`: ID of the gene that was executed
- `signals`: signal array that triggered execution
- `outcome`: `"success"` or `"failed"`
- `score`: quality score (0-1)
- `summary`: brief summary of what happened
- `scope`: (optional) evolution scope to partition gene pools
### Via CLI
```bash
# Success
prismer evolve record -g <gene_id> -o success --score 0.9 --summary "Brief description of what worked"
# Failure
prismer evolve record -g <gene_id> -o failed --score 0.2 --summary "Why the strategy failed"
```
### Via curl (fallback)
```bash
curl -s -X POST "${PRISMER_BASE_URL:-https://prismer.cloud}/api/im/evolution/record" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $PRISMER_API_KEY" \
-d '{
"gene_id": "<gene_id>",
"outcome": "success",
"score": 0.9,
"summary": "Brief description",
"signals": [{"type": "strategy_applied", "provider": "claude-code"}]
}'
# Optional: append ?scope=<scope> to the URL to partition gene pools
evolve_record({
gene_id: "the gene ID from analyze",
outcome: "success", // or "failed"
signals: ["error:build_failure"],
score: 0.9, // 0.0 to 1.0
summary: "Brief description of what happened"
})
```
## Scoring guide
| Score | Meaning |
|-------|---------|
| 1.0 | Strategy worked perfectly, no modifications needed |
| 0.7–0.9 | Strategy worked with minor adjustments |
| 0.4–0.6 | Partially helpful, needed significant changes |
| 0.1–0.3 | Mostly unhelpful, solved differently |
| 0.0 | Strategy completely failed or made things worse |
{
"hooks": {
"PostToolUseFailure": [
"SessionStart": [
{
"matcher": "startup|resume|clear|compact",
"hooks": [
{
"type": "command",
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/session-start.mjs\""
}
]
}
],
"PreToolUse": [
{
"matcher": "Bash",

@@ -9,8 +20,57 @@ "hooks": [

"type": "command",
"command": "prismer evolve report --error \"$ARGUMENTS\" --status failed --provider claude-code --stage bash --severity high 2>/dev/null || true",
"timeout": 5000
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/pre-bash-suggest.mjs\""
}
]
},
{
"matcher": "WebFetch",
"hooks": [
{
"type": "command",
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/pre-web-cache.mjs\""
}
]
}
],
"PostToolUse": [
{
"matcher": "Bash|Edit|Write",
"hooks": [
{
"type": "command",
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/post-bash-journal.mjs\""
}
]
},
{
"matcher": "WebFetch|WebSearch",
"hooks": [
{
"type": "command",
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/post-web-save.mjs\""
}
]
}
],
"PostToolUseFailure": [
{
"matcher": "Bash|Edit|Write",
"hooks": [
{
"type": "command",
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/post-tool-failure.mjs\""
}
]
}
],
"SubagentStart": [
{
"hooks": [
{
"type": "command",
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/subagent-start.mjs\""
}
]
}
],
"Stop": [

@@ -21,4 +81,3 @@ {

"type": "command",
"command": "prismer evolve report --status success --provider claude-code --stage task --score 0.9 2>/dev/null || true",
"timeout": 5000
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/session-stop.mjs\""
}

@@ -28,3 +87,3 @@ ]

],
"SubagentStop": [
"SessionEnd": [
{

@@ -34,4 +93,3 @@ "hooks": [

"type": "command",
"command": "prismer evolve report --status success --provider claude-code --stage subagent 2>/dev/null || true",
"timeout": 5000
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/session-end.mjs\""
}

@@ -38,0 +96,0 @@ ]

#!/usr/bin/env node
/**
* Async Subagent — Session Evolution Processor (v2)
*
* Spawned as a detached process by session-stop.mjs. Runs fire-and-forget
* with a 30-second timeout. Does NOT block the Claude Code session exit.
*
* Workflow:
* 1. Read session-context.json
* 2. Extract repeated signals → build gene proposals (rule-based, LLM version later)
* 3. POST /api/im/evolution/genes (create gene, visibility='private')
* 4. POST /api/im/evolution/record (feedback for any suggested genes)
* 5. POST /api/im/evolution/sync (batch push + pull cursor update)
* 6. Write local evolution-suggestions.md (for next SessionStart)
* 7. Cleanup session-context.json + session-journal.md
*
* Env vars:
* PRISMER_SESSION_CONTEXT — path to session-context.json
* PRISMER_CACHE_DIR — cache directory
* PRISMER_API_KEY — API key
* PRISMER_BASE_URL — API base URL
*/
import { readFileSync, writeFileSync, unlinkSync, existsSync } from 'fs';
import { join } from 'path';
const CONTEXT_FILE = process.env.PRISMER_SESSION_CONTEXT;
const CACHE_DIR = process.env.PRISMER_CACHE_DIR;
const API_KEY = process.env.PRISMER_API_KEY;
const BASE_URL = (process.env.PRISMER_BASE_URL || 'https://prismer.cloud').replace(/\/$/, '');
const TIMEOUT_MS = 30_000;
const SUGGESTIONS_FILE = CACHE_DIR ? join(CACHE_DIR, 'evolution-suggestions.md') : null;
const JOURNAL_FILE = CACHE_DIR ? join(CACHE_DIR, 'session-journal.md') : null;
const CURSOR_FILE = CACHE_DIR ? join(CACHE_DIR, 'sync-cursor.json') : null;
// --- Global timeout ---
const killTimer = setTimeout(() => process.exit(0), TIMEOUT_MS);
killTimer.unref();
// --- Helpers ---
async function apiCall(endpoint, body) {
if (!API_KEY) return null;
try {
const controller = new AbortController();
const timer = setTimeout(() => controller.abort(), 10_000);
timer.unref();
const res = await fetch(`${BASE_URL}${endpoint}`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${API_KEY}`,
},
body: JSON.stringify(body),
signal: controller.signal,
});
clearTimeout(timer);
if (!res.ok) return null;
return await res.json();
} catch {
return null;
}
}
// --- Main ---
async function main() {
// Step 1: Read context
if (!CONTEXT_FILE || !existsSync(CONTEXT_FILE)) return;
let ctx;
try {
ctx = JSON.parse(readFileSync(CONTEXT_FILE, 'utf8'));
} catch {
return;
}
if (!API_KEY) return;
const { signals, geneFeedback, outcome, scope, journalExcerpt, gitDiffStat } = ctx;
// Step 2: Extract repeated signals for gene proposals
const repeatedSignals = Object.entries(signals || {})
.filter(([, count]) => count >= 2)
.map(([type, count]) => ({ type, count }));
const suggestions = [];
// Step 3: Create gene proposals for repeated + resolved errors
if (outcome === 'success' && repeatedSignals.length > 0) {
const signalsMatch = repeatedSignals.map(s => ({ type: s.type }));
// Rule-based title generation (LLM version in future iteration)
const titleParts = repeatedSignals.slice(0, 2).map(s => {
const suffix = s.type.split(':').pop() || 'unknown';
return suffix
.split('_')
.map(w => w.charAt(0).toUpperCase() + w.slice(1))
.join(' ');
});
const title = `${titleParts.join(' + ')} Resolution`;
// Rule-based strategy extraction from journal
const strategy = extractStrategyHints(journalExcerpt || '', repeatedSignals);
if (strategy.length > 0) {
const result = await apiCall('/api/im/evolution/genes', {
category: 'repair',
signals_match: signalsMatch,
strategy,
title,
description: `Auto-extracted by claude-code-plugin v2. Session outcome: ${outcome}. Signals: ${repeatedSignals.map(s => `${s.type}(${s.count}x)`).join(', ')}.`,
});
if (result?.ok && result?.data?.id) {
suggestions.push({
geneId: result.data.id,
title,
signalsMatch,
strategy,
});
}
}
}
// Step 4: Record gene feedback
if (geneFeedback && geneFeedback.length > 0) {
for (const fb of geneFeedback) {
if (!fb.title) continue;
// Find gene by title via analyze (best-effort matching)
const analyzeResult = await apiCall('/api/im/evolution/analyze', {
signals: [{ type: fb.title.toLowerCase().includes('timeout') ? 'error:timeout' : 'error:generic' }],
});
if (analyzeResult?.data?.gene_id) {
await apiCall('/api/im/evolution/record', {
gene_id: analyzeResult.data.gene_id,
outcome: fb.outcome,
summary: `claude-code-plugin v2: gene "${fb.title}" ${fb.outcome} during session`,
scope: scope || 'global',
});
}
}
}
// Step 5: Sync push
const outcomes = (geneFeedback || [])
.filter(fb => fb.title && fb.outcome)
.map(fb => ({
gene_id: fb.title, // Best-effort — title as fallback
signals: Object.keys(signals || {}),
outcome: fb.outcome,
summary: `Gene "${fb.title}" ${fb.outcome}`,
}));
let cursor = 0;
try {
const raw = readFileSync(CURSOR_FILE, 'utf8');
cursor = JSON.parse(raw)?.cursor || 0;
} catch {}
const syncResult = await apiCall('/api/im/evolution/sync', {
push: outcomes.length > 0 ? { outcomes } : undefined,
pull: { since: cursor, scope: scope || 'global' },
});
// Update cursor
if (syncResult?.data?.pulled?.cursor && CURSOR_FILE) {
try {
writeFileSync(CURSOR_FILE, JSON.stringify({
cursor: syncResult.data.pulled.cursor,
scope,
ts: Date.now(),
}));
} catch {}
}
// Step 6: Write local evolution suggestions for next session
if (SUGGESTIONS_FILE && suggestions.length > 0) {
try {
const lines = [
'# Evolution Suggestions',
'',
`Generated: ${new Date().toISOString()}`,
'',
...suggestions.map(s => [
`## ${s.title}`,
`Gene ID: ${s.geneId}`,
`Signals: ${s.signalsMatch.map(sm => sm.type).join(', ')}`,
`Strategy:`,
...s.strategy.map((st, i) => ` ${i + 1}. ${st}`),
'',
]).flat(),
];
writeFileSync(SUGGESTIONS_FILE, lines.join('\n'));
} catch {}
}
// Step 7: Cleanup
try { if (CONTEXT_FILE) unlinkSync(CONTEXT_FILE); } catch {}
// Keep journal for next session-start rotation (renamed to prev-session-journal.md)
}
// --- Strategy Extraction (rule-based) ---
function extractStrategyHints(journal, repeatedSignals) {
const strategies = [];
const lines = journal.split('\n');
// Find bash commands that followed error signals
const signalTypes = new Set(repeatedSignals.map(s => s.type));
let inErrorZone = false;
let errorType = '';
for (const line of lines) {
// Detect error signal
if (line.includes('signal:')) {
for (const sig of signalTypes) {
if (line.includes(`signal:${sig}`)) {
inErrorZone = true;
errorType = sig;
break;
}
}
continue;
}
// Detect successful bash command after error (likely the fix)
if (inErrorZone && line.startsWith('- bash:') && !line.includes('signal:error')) {
const cmdMatch = line.match(/`([^`]+)`/);
if (cmdMatch) {
const cmd = cmdMatch[1].trim();
// Skip trivial commands
if (!/^(ls|cat|echo|pwd|cd|git (status|log|diff))/.test(cmd)) {
strategies.push(`Run: ${cmd.slice(0, 200)}`);
inErrorZone = false;
}
}
}
// Gene feedback success = end of error zone
if (line.includes('outcome=success')) {
inErrorZone = false;
}
}
// Add generic resolution hint based on signal type
for (const sig of repeatedSignals) {
const hint = SIGNAL_STRATEGY_HINTS[sig.type];
if (hint && !strategies.some(s => s.includes(hint))) {
strategies.push(hint);
}
}
return strategies.slice(0, 5); // Max 5 strategy steps
}
const SIGNAL_STRATEGY_HINTS = {
'error:timeout': 'Add retry with exponential backoff and jitter',
'error:oom': 'Check memory allocation, increase Node.js heap size or add streaming',
'error:permission_error': 'Check file/API permissions and authentication tokens',
'error:not_found': 'Verify dependency installation and path resolution',
'error:connection_refused': 'Check if the target service is running and ports are correct',
'error:port_in_use': 'Kill the process using the port or switch to an available port',
'error:module_not_found': 'Run npm install or check package.json dependencies',
'error:build_failure': 'Check TypeScript errors, run type generation if needed',
'error:deploy_failure': 'Verify deployment config, credentials, and target environment',
'error:test_failure': 'Check test assertions, mocks, and environment setup',
'error:prisma': 'Run npx prisma generate, then npx prisma db push if schema changed',
'error:typescript': 'Fix type errors, check tsconfig paths and strict mode settings',
};
main().catch(() => {}).finally(() => {
clearTimeout(killTimer);
process.exit(0);
});