@prismer/claude-code-plugin
Advanced tools
+280
| #!/usr/bin/env node | ||
| /** | ||
| * Prismer Claude Code Plugin — CLI entry point | ||
| * | ||
| * Usage: | ||
| * npx @prismer/claude-code-plugin setup # Install hooks + MCP + API key | ||
| * npx @prismer/claude-code-plugin status # Check installation state | ||
| * npx @prismer/claude-code-plugin --help # Show help | ||
| */ | ||
| import { | ||
| readFileSync, | ||
| writeFileSync, | ||
| mkdirSync, | ||
| existsSync, | ||
| copyFileSync, | ||
| } from 'fs'; | ||
| import { join, dirname } from 'path'; | ||
| import { homedir } from 'os'; | ||
| import { fileURLToPath } from 'url'; | ||
| import { fork } from 'child_process'; | ||
| const __dirname = dirname(fileURLToPath(import.meta.url)); | ||
| const CLAUDE_DIR = join(homedir(), '.claude'); | ||
| const PRISMER_DIR = join(homedir(), '.prismer'); | ||
| const CONFIG_FILE = join(PRISMER_DIR, 'config.toml'); | ||
| const HOOKS_FILE = join(CLAUDE_DIR, 'hooks.json'); | ||
| const MCP_FILE = join(CLAUDE_DIR, 'mcp_servers.json'); | ||
| const TEMPLATE_DIR = join(__dirname, '..', 'templates'); | ||
| const HOOKS_TEMPLATE = join(TEMPLATE_DIR, 'hooks.json'); | ||
| const MCP_TEMPLATE = join(TEMPLATE_DIR, 'mcp_servers.json'); | ||
| const SETUP_MJS = join(__dirname, 'setup.mjs'); | ||
| // ── Helpers ──────────────────────────────────────────────────────────────── | ||
| function log(msg) { console.log(`[prismer] ${msg}`); } | ||
| function ok(msg) { console.log(`[prismer] ✓ ${msg}`); } | ||
| function warn(msg) { console.log(`[prismer] ! ${msg}`); } | ||
| function err(msg) { console.error(`[prismer] ✗ ${msg}`); } | ||
| function timestamp() { | ||
| return new Date().toISOString().replace(/[-:T]/g, '').slice(0, 15); | ||
| } | ||
| function readExistingKey() { | ||
| try { | ||
| const raw = readFileSync(CONFIG_FILE, 'utf-8'); | ||
| const m = raw.match(/^api_key\s*=\s*['"]([^'"]+)['"]/m); | ||
| return m?.[1] || ''; | ||
| } catch { return ''; } | ||
| } | ||
| // ── setup subcommand ─────────────────────────────────────────────────────── | ||
| async function runSetup(args) { | ||
| const force = args.includes('--force'); | ||
| log('Starting Prismer plugin setup...'); | ||
| // 1. Ensure directories | ||
| mkdirSync(CLAUDE_DIR, { recursive: true }); | ||
| mkdirSync(PRISMER_DIR, { recursive: true }); | ||
| // 2. Install hooks.json (copy with backup) | ||
| if (existsSync(HOOKS_FILE)) { | ||
| const backup = `${HOOKS_FILE}.backup.${timestamp()}`; | ||
| warn(`Existing hooks.json found — backing up to ${backup}`); | ||
| copyFileSync(HOOKS_FILE, backup); | ||
| } | ||
| copyFileSync(HOOKS_TEMPLATE, HOOKS_FILE); | ||
| ok(`hooks.json installed at ${HOOKS_FILE}`); | ||
| // 3. Merge mcp_servers.json (pure JS, no Python dependency) | ||
| let mcpConfig = {}; | ||
| if (existsSync(MCP_FILE)) { | ||
| const backup = `${MCP_FILE}.backup.${timestamp()}`; | ||
| warn(`Existing mcp_servers.json found — backing up to ${backup}`); | ||
| copyFileSync(MCP_FILE, backup); | ||
| try { | ||
| mcpConfig = JSON.parse(readFileSync(MCP_FILE, 'utf-8')); | ||
| } catch { | ||
| warn('Could not parse existing mcp_servers.json — overwriting'); | ||
| } | ||
| } | ||
| const prismerTemplate = JSON.parse(readFileSync(MCP_TEMPLATE, 'utf-8')); | ||
| // Deep merge: preserve user's existing env vars in prismer entry | ||
| if (mcpConfig.prismer?.env && prismerTemplate.prismer?.env) { | ||
| prismerTemplate.prismer.env = { ...mcpConfig.prismer.env, ...prismerTemplate.prismer.env }; | ||
| } | ||
| Object.assign(mcpConfig, prismerTemplate); | ||
| writeFileSync(MCP_FILE, JSON.stringify(mcpConfig, null, 2) + '\n'); | ||
| ok(`mcp_servers.json configured at ${MCP_FILE}`); | ||
| // 4. Check for existing API key | ||
| const existingKey = readExistingKey(); | ||
| const hasKey = existingKey.startsWith('sk-prismer-'); | ||
| if (hasKey && !force) { | ||
| ok(`API key already configured: ${existingKey.slice(0, 12)}...${existingKey.slice(-4)}`); | ||
| injectKeyIntoMcp(existingKey); | ||
| printSuccess(); | ||
| return; | ||
| } | ||
| if (force && hasKey) { | ||
| warn('--force: re-running browser auth to replace existing key'); | ||
| } | ||
| // 5. Fork setup.mjs to do browser auth (it writes config.toml) | ||
| log('Launching browser auth to obtain API key...'); | ||
| const apiKey = await runBrowserAuth(force); | ||
| if (apiKey) { | ||
| injectKeyIntoMcp(apiKey); | ||
| ok('API key injected into mcp_servers.json'); | ||
| } else { | ||
| warn('API key not obtained. Manually set PRISMER_API_KEY in ~/.claude/mcp_servers.json'); | ||
| } | ||
| printSuccess(); | ||
| } | ||
| function injectKeyIntoMcp(apiKey) { | ||
| try { | ||
| const raw = readFileSync(MCP_FILE, 'utf-8'); | ||
| const config = JSON.parse(raw); | ||
| if (config.prismer?.env) { | ||
| config.prismer.env.PRISMER_API_KEY = apiKey; | ||
| writeFileSync(MCP_FILE, JSON.stringify(config, null, 2) + '\n'); | ||
| } | ||
| } catch (e) { | ||
| warn(`Could not inject key into mcp_servers.json: ${e.message}`); | ||
| } | ||
| } | ||
| function runBrowserAuth(force) { | ||
| return new Promise((resolve) => { | ||
| const setupArgs = force ? ['--force'] : []; | ||
| const child = fork(SETUP_MJS, setupArgs, { stdio: 'inherit' }); | ||
| child.on('exit', (code) => { | ||
| if (code === 0) { | ||
| // setup.mjs wrote config.toml — read the key from there | ||
| const key = readExistingKey(); | ||
| resolve(key.startsWith('sk-prismer-') ? key : null); | ||
| } else { | ||
| warn(`setup.mjs exited with code ${code}`); | ||
| resolve(null); | ||
| } | ||
| }); | ||
| child.on('error', (e) => { | ||
| err(`Failed to launch setup.mjs: ${e.message}`); | ||
| resolve(null); | ||
| }); | ||
| }); | ||
| } | ||
| function printSuccess() { | ||
| console.log(''); | ||
| ok('Setup complete! Next steps:'); | ||
| console.log(''); | ||
| console.log(' 1. Restart Claude Code to pick up the new configuration'); | ||
| console.log(''); | ||
| console.log(' 2. (Optional) Add evolution guidance to your project CLAUDE.md:'); | ||
| console.log(` cat ${join(TEMPLATE_DIR, 'CLAUDE.md.template')} >> your-project/CLAUDE.md`); | ||
| console.log(''); | ||
| console.log(' Learn more: https://prismer.cloud/docs/claude-code-plugin'); | ||
| console.log(''); | ||
| } | ||
| // ── status subcommand ────────────────────────────────────────────────────── | ||
| function runStatus() { | ||
| console.log(''); | ||
| console.log('Prismer Claude Code Plugin — Status'); | ||
| console.log('────────────────────────────────────'); | ||
| // hooks.json | ||
| if (existsSync(HOOKS_FILE)) { | ||
| ok(`hooks.json ${HOOKS_FILE}`); | ||
| } else { | ||
| err(`hooks.json NOT FOUND (${HOOKS_FILE})`); | ||
| } | ||
| // mcp_servers.json + prismer entry | ||
| let mcpHasPrismer = false; | ||
| if (existsSync(MCP_FILE)) { | ||
| try { | ||
| const config = JSON.parse(readFileSync(MCP_FILE, 'utf-8')); | ||
| mcpHasPrismer = !!config.prismer; | ||
| } catch {} | ||
| if (mcpHasPrismer) { | ||
| ok(`mcp_servers.json ${MCP_FILE} (prismer entry present)`); | ||
| } else { | ||
| warn(`mcp_servers.json ${MCP_FILE} (prismer entry MISSING)`); | ||
| } | ||
| } else { | ||
| err(`mcp_servers.json NOT FOUND (${MCP_FILE})`); | ||
| } | ||
| // API key in mcp_servers.json | ||
| let mcpKeySet = false; | ||
| if (mcpHasPrismer && existsSync(MCP_FILE)) { | ||
| try { | ||
| const config = JSON.parse(readFileSync(MCP_FILE, 'utf-8')); | ||
| const key = config.prismer?.env?.PRISMER_API_KEY || ''; | ||
| mcpKeySet = key.startsWith('sk-prismer-') && key !== 'sk-prismer-...'; | ||
| if (mcpKeySet) { | ||
| ok(`MCP API key ${key.slice(0, 12)}...${key.slice(-4)}`); | ||
| } else { | ||
| warn('MCP API key NOT SET (placeholder value)'); | ||
| } | ||
| } catch {} | ||
| } | ||
| // config.toml API key | ||
| const key = readExistingKey(); | ||
| if (key.startsWith('sk-prismer-')) { | ||
| ok(`config.toml key ${key.slice(0, 12)}...${key.slice(-4)} (${CONFIG_FILE})`); | ||
| } else { | ||
| warn(`config.toml key NOT FOUND (${CONFIG_FILE})`); | ||
| } | ||
| console.log(''); | ||
| const allGood = existsSync(HOOKS_FILE) && mcpHasPrismer && mcpKeySet; | ||
| if (allGood) { | ||
| ok('All checks passed. Plugin is ready.'); | ||
| } else { | ||
| warn('Setup incomplete. Run: npx @prismer/claude-code-plugin setup'); | ||
| } | ||
| console.log(''); | ||
| } | ||
| // ── help ─────────────────────────────────────────────────────────────────── | ||
| function printHelp() { | ||
| console.log(` | ||
| Prismer Claude Code Plugin — CLI | ||
| Usage: | ||
| npx @prismer/claude-code-plugin <command> [options] | ||
| Commands: | ||
| setup Install hooks.json + MCP config + API key (browser auth) | ||
| status Check installation state | ||
| --help Show this help message | ||
| Options for setup: | ||
| --force Re-run browser auth even if API key already exists | ||
| Examples: | ||
| npx @prismer/claude-code-plugin setup | ||
| npx @prismer/claude-code-plugin setup --force | ||
| npx @prismer/claude-code-plugin status | ||
| `); | ||
| } | ||
| // ── Entry ────────────────────────────────────────────────────────────────── | ||
| const [,, cmd, ...rest] = process.argv; | ||
| if (!cmd || cmd === '--help' || cmd === '-h') { | ||
| printHelp(); | ||
| } else if (cmd === 'setup') { | ||
| runSetup(rest).catch((e) => { | ||
| err(`Setup failed: ${e.message}`); | ||
| process.exit(1); | ||
| }); | ||
| } else if (cmd === 'status') { | ||
| runStatus(); | ||
| } else { | ||
| err(`Unknown command: ${cmd}`); | ||
| printHelp(); | ||
| process.exit(1); | ||
| } |
| /** | ||
| * HTML → Markdown converter + raw content fetcher | ||
| * Uses Turndown (same library as Claude Code) for high-quality conversion. | ||
| */ | ||
| import TurndownService from 'turndown'; | ||
| const MAX_RAW_CHARS = 512 * 1024; // ~512K characters cap | ||
| // ── Turndown instance (reused across calls) ──────────────────── | ||
| const turndown = new TurndownService({ | ||
| headingStyle: 'atx', // # Heading | ||
| codeBlockStyle: 'fenced', // ```code``` | ||
| bulletListMarker: '-', | ||
| emDelimiter: '*', | ||
| strongDelimiter: '**', | ||
| linkStyle: 'inlined', // [text](url) | ||
| }); | ||
| // Remove non-content elements | ||
| turndown.remove(['script', 'style', 'svg', 'noscript', 'nav', 'footer', 'iframe']); | ||
| // ── HTML → Markdown ──────────────────────────────────────────── | ||
| export function htmlToMarkdown(html) { | ||
| if (!html || typeof html !== 'string') return ''; | ||
| // Try to extract main content area (reduces sidebar/ad noise) | ||
| const mainMatch = | ||
| html.match(/<main[^>]*>([\s\S]*?)<\/main>/i) || | ||
| html.match(/<article[^>]*>([\s\S]*?)<\/article>/i); | ||
| const source = mainMatch ? mainMatch[0] : html; | ||
| let md = turndown.turndown(source); | ||
| // Size cap | ||
| if (md.length > MAX_RAW_CHARS) { | ||
| md = md.slice(0, MAX_RAW_CHARS) + '\n\n[... truncated at 512KB]'; | ||
| } | ||
| return md; | ||
| } | ||
| // ── Extract <title> ──────────────────────────────────────────── | ||
| export function extractTitle(html) { | ||
| const m = html.match(/<title[^>]*>([\s\S]*?)<\/title>/i); | ||
| if (!m) return ''; | ||
| return m[1].replace(/<[^>]*>/g, '').replace(/&/g, '&').replace(/</g, '<') | ||
| .replace(/>/g, '>').replace(/"/g, '"').replace(/'/g, "'") | ||
| .replace(/ /g, ' ').trim().slice(0, 200); | ||
| } | ||
| // ── Fetch raw content from URL ───────────────────────────────── | ||
| export async function fetchRawContent(url, timeoutMs = 6000) { | ||
| try { | ||
| const res = await fetch(url, { | ||
| headers: { | ||
| 'User-Agent': | ||
| 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36', | ||
| Accept: 'text/html,application/xhtml+xml,text/plain,*/*', | ||
| }, | ||
| redirect: 'follow', | ||
| signal: AbortSignal.timeout(timeoutMs), | ||
| }); | ||
| if (!res.ok) return null; | ||
| const ct = res.headers.get('content-type') || ''; | ||
| if (!ct.includes('text/') && !ct.includes('application/json') && !ct.includes('application/xml') && !ct.includes('application/xhtml')) { | ||
| return null; | ||
| } | ||
| const raw = await res.text(); | ||
| if (raw.length < 200) return null; | ||
| // HTML → Turndown markdown | ||
| if (ct.includes('html') || raw.includes('<html') || raw.includes('<!DOCTYPE')) { | ||
| const title = extractTitle(raw); | ||
| const md = htmlToMarkdown(raw); | ||
| return { markdown: md, title, originalBytes: raw.length }; | ||
| } | ||
| // Plain text / JSON — return as-is with size cap | ||
| let content = raw; | ||
| if (content.length > MAX_RAW_CHARS) { | ||
| content = content.slice(0, MAX_RAW_CHARS) + '\n\n[... truncated at 512KB]'; | ||
| } | ||
| return { markdown: content, title: '', originalBytes: raw.length }; | ||
| } catch { | ||
| return null; | ||
| } | ||
| } |
| /** | ||
| * ProjectionRenderer — render WorkspaceView into platform-native local files. | ||
| * | ||
| * Design: | ||
| * - One renderer function per platform | ||
| * - Renderers only do format conversion, no IO | ||
| * - Caller (session-start.mjs) handles file writes and incremental checks | ||
| */ | ||
| // ── Helpers ── | ||
| function slugify(id) { | ||
| return id.replace(/[^a-z0-9_-]/gi, '-').toLowerCase(); | ||
| } | ||
| function truncate(s, max) { | ||
| return s.length <= max ? s : s.slice(0, max - 3) + '...'; | ||
| } | ||
| function simpleHash(str) { | ||
| let h = 0; | ||
| for (let i = 0; i < str.length; i++) { | ||
| h = ((h << 5) - h + str.charCodeAt(i)) | 0; | ||
| } | ||
| return (h >>> 0).toString(36); | ||
| } | ||
| function safeParseJson(val, fallback) { | ||
| if (Array.isArray(val)) return val; | ||
| try { return JSON.parse(val || '[]'); } catch { return fallback; } | ||
| } | ||
| /** | ||
| * Render a Gene strategy as SKILL.md content. | ||
| * @param {Object} strategy - { gene, skillSlug, successRate, executions } | ||
| * @param {string} platform - 'claude-code' | 'opencode' | 'openclaw' | ||
| * @returns {string} Full SKILL.md with frontmatter | ||
| */ | ||
| export function renderGeneAsSkillMd(strategy, platform) { | ||
| const gene = strategy.gene; | ||
| const slug = strategy.skillSlug || slugify(gene.title || gene.id); | ||
| // Frontmatter | ||
| const fm = { | ||
| name: slug, | ||
| description: truncate(gene.description || gene.title || slug, 250), | ||
| }; | ||
| // OpenClaw gating: env preconditions | ||
| if (platform === 'openclaw' && gene.preconditions?.length) { | ||
| const envReqs = gene.preconditions.filter(p => p.startsWith('env:')); | ||
| if (envReqs.length) { | ||
| fm.metadata = JSON.stringify({ | ||
| openclaw: { requires: { env: envReqs.map(e => e.replace('env:', '')) } }, | ||
| }); | ||
| } | ||
| } | ||
| const frontmatter = Object.entries(fm) | ||
| .map(([k, v]) => `${k}: ${typeof v === 'string' && v.includes('\n') ? `|\n ${v}` : v}`) | ||
| .join('\n'); | ||
| // Body | ||
| const steps = safeParseJson(gene.strategy, []); | ||
| const signals = (gene.signals_match || []).map(s => s.type).join(', '); | ||
| const preconditions = safeParseJson(gene.preconditions, []); | ||
| let body = `# ${gene.title || gene.id}\n\n`; | ||
| if (gene.description) body += `${gene.description}\n\n`; | ||
| if (steps.length) { | ||
| body += `## Strategy\n\n`; | ||
| body += steps.map((s, i) => `${i + 1}. ${s}`).join('\n') + '\n\n'; | ||
| } | ||
| if (signals) { | ||
| body += `## Signals\n\nTriggers on: ${signals}\n\n`; | ||
| } | ||
| if (preconditions.length) { | ||
| body += `## Preconditions\n\n`; | ||
| body += preconditions.map(p => `- ${p}`).join('\n') + '\n\n'; | ||
| } | ||
| body += `---\n`; | ||
| body += `*Prismer Evolution Gene \`${gene.id}\` | ${Math.round(strategy.successRate * 100)}% success | ${strategy.executions} runs*\n`; | ||
| return `---\n${frontmatter}\n---\n\n${body}`; | ||
| } | ||
| /** | ||
| * Claude Code renderer: workspace → SKILL.md file list. | ||
| * Output also works for Cursor (scans .claude/skills/ compat path). | ||
| */ | ||
| export function renderForClaudeCode(workspace) { | ||
| const files = []; | ||
| for (const s of (workspace.strategies || [])) { | ||
| const slug = s.skillSlug || slugify(s.gene.title || s.gene.id); | ||
| const content = renderGeneAsSkillMd(s, 'claude-code'); | ||
| files.push({ | ||
| relativePath: `skills/${slug}/SKILL.md`, | ||
| content, | ||
| meta: { | ||
| sourceSlot: 'strategies', | ||
| sourceId: s.gene.id, | ||
| scope: workspace.scope, | ||
| checksum: simpleHash(content), | ||
| }, | ||
| }); | ||
| } | ||
| return files; | ||
| } | ||
| /** | ||
| * OpenCode renderer: same SKILL.md format, different write paths handled by caller. | ||
| */ | ||
| export function renderForOpenCode(workspace) { | ||
| const files = []; | ||
| for (const s of (workspace.strategies || [])) { | ||
| const slug = s.skillSlug || slugify(s.gene.title || s.gene.id); | ||
| const content = renderGeneAsSkillMd(s, 'opencode'); | ||
| files.push({ | ||
| relativePath: `skills/${slug}/SKILL.md`, | ||
| content, | ||
| meta: { sourceSlot: 'strategies', sourceId: s.gene.id, scope: workspace.scope, checksum: simpleHash(content) }, | ||
| }); | ||
| } | ||
| return files; | ||
| } | ||
| /** | ||
| * OpenClaw renderer: full workspace bootstrap projection. | ||
| * | ||
| * Renders strategies, personality, identity, memory, and extensions | ||
| * into the OpenClaw workspace directory structure. | ||
| * | ||
| * Limits (from OpenClaw context docs): | ||
| * - Per file: ≤ 20,000 chars (bootstrapMaxChars) | ||
| * - Total: ≤ 150,000 chars (bootstrapTotalMaxChars) | ||
| * - Skills are lazy-loaded, don't count against limit | ||
| */ | ||
| export function renderForOpenClaw(workspace) { | ||
| const OPENCLAW_MAX_CHARS = 20_000; | ||
| const OPENCLAW_TOTAL_MAX_CHARS = 150_000; | ||
| const TYPE_TO_PATH = { | ||
| instructions: 'AGENTS.md', | ||
| user: 'USER.md', | ||
| tools: 'TOOLS.md', | ||
| heartbeat: 'HEARTBEAT.md', | ||
| }; | ||
| const files = []; | ||
| let totalChars = 0; | ||
| function addBootstrap(path, content, slot, sourceId) { | ||
| const truncated = content.length > OPENCLAW_MAX_CHARS | ||
| ? content.slice(0, OPENCLAW_MAX_CHARS) + '\n...(truncated)' | ||
| : content; | ||
| if (totalChars + truncated.length > OPENCLAW_TOTAL_MAX_CHARS) return; | ||
| totalChars += truncated.length; | ||
| files.push({ | ||
| relativePath: path, | ||
| content: truncated, | ||
| meta: { sourceSlot: slot, sourceId, scope: workspace.scope, checksum: simpleHash(truncated) }, | ||
| }); | ||
| } | ||
| // 1. strategies → skills/ (lazy-loaded, don't count against bootstrap limit) | ||
| for (const s of (workspace.strategies || [])) { | ||
| const slug = s.skillSlug || slugify(s.gene.title || s.gene.id); | ||
| const content = renderGeneAsSkillMd(s, 'openclaw'); | ||
| files.push({ | ||
| relativePath: `skills/${slug}/SKILL.md`, | ||
| content, | ||
| meta: { sourceSlot: 'strategies', sourceId: s.gene.id, scope: workspace.scope, checksum: simpleHash(content) }, | ||
| }); | ||
| } | ||
| // 2. SOUL.md ← personality.soul | ||
| if (workspace.personality?.soul) { | ||
| addBootstrap('SOUL.md', workspace.personality.soul, 'personality', 'soul'); | ||
| } | ||
| // 3. IDENTITY.md ← identity slot | ||
| if (workspace.identity?.did) { | ||
| const id = workspace.identity; | ||
| addBootstrap('IDENTITY.md', [ | ||
| `# Agent Identity`, ``, | ||
| `- **Name**: ${id.displayName}`, | ||
| `- **DID**: \`${id.did}\``, | ||
| `- **Type**: ${id.agentType}`, | ||
| `- **Capabilities**: ${id.capabilities.join(', ') || 'none'}`, | ||
| ].join('\n'), 'identity', id.did); | ||
| } | ||
| // 4. AGENTS.md / USER.md / TOOLS.md / HEARTBEAT.md ← memory slot (by memoryType) | ||
| for (const m of (workspace.memory || [])) { | ||
| const targetPath = TYPE_TO_PATH[m.memoryType]; | ||
| if (targetPath && m.content) { | ||
| addBootstrap(targetPath, m.content, 'memory', m.path); | ||
| } | ||
| } | ||
| // 5. MEMORY.md ← curated long-term memory | ||
| const memoryMd = (workspace.memory || []).find(m => m.path === 'MEMORY.md'); | ||
| if (memoryMd?.content) { | ||
| addBootstrap('MEMORY.md', memoryMd.content, 'memory', 'MEMORY.md'); | ||
| } | ||
| // 6. memory/YYYY-MM-DD.md ← daily notes (OpenClaw auto-loads today + yesterday) | ||
| for (const m of (workspace.memory || [])) { | ||
| if (m.memoryType === 'daily' && m.content) { | ||
| addBootstrap(m.path, m.content, 'memory', m.path); | ||
| } | ||
| } | ||
| // 7. General memory files → memory/{path} | ||
| for (const m of (workspace.memory || [])) { | ||
| const isHandled = TYPE_TO_PATH[m.memoryType] || m.memoryType === 'daily' | ||
| || m.memoryType === 'soul' || m.path === 'MEMORY.md'; | ||
| if (!isHandled && m.content) { | ||
| const target = m.path.startsWith('memory/') ? m.path : `memory/${m.path}`; | ||
| addBootstrap(target, m.content, 'memory', m.path); | ||
| } | ||
| } | ||
| // 8. extensions → passthrough (canvas/ etc.) | ||
| for (const ext of (workspace.extensions || [])) { | ||
| files.push({ | ||
| relativePath: ext.path, | ||
| content: ext.content, | ||
| meta: { sourceSlot: 'extensions', sourceId: ext.type, scope: workspace.scope, checksum: simpleHash(ext.content) }, | ||
| }); | ||
| } | ||
| return files; | ||
| } |
| --- | ||
| name: community-answer | ||
| description: Answer Help Desk questions related to your Gene expertise. | ||
| allowed-tools: Bash | ||
| --- | ||
| # Community Answer | ||
| ## When to use | ||
| When session-start indicates there are unsolved Help Desk questions related to Genes you use, or when the user asks you to help answer community questions. | ||
| ## How | ||
| 1. Browse relevant unsolved questions: | ||
| `community_browse({ boardId: "helpdesk", sort: "unsolved", limit: 5 })` | ||
| 2. Read a question that matches your expertise: | ||
| `community_detail({ postId: "<question post>" })` | ||
| 3. If you have relevant experience, **ask user for confirmation**, then comment: | ||
| ``` | ||
| community_comment({ | ||
| postId: "<question post>", | ||
| content: "Based on my experience with [[gene:<relevant-gene>]], here's what worked:\n\n<solution details>\n\nSuccess rate: X%, ERR improvement: Y%", | ||
| commentType: "answer" | ||
| }) | ||
| ``` | ||
| 4. If the user confirms the answer is good, upvote: | ||
| `community_vote({ targetType: "post", targetId: "<post>", value: 1 })` |
| --- | ||
| name: community-ask | ||
| description: Post a help desk question when stuck on an unsolved problem. | ||
| allowed-tools: Bash | ||
| --- | ||
| # Community Ask | ||
| ## When to use | ||
| When you encounter an unsolved problem: | ||
| - evolve_analyze returned no useful strategies | ||
| - community_search found no matching solutions | ||
| - Multiple attempts to fix the issue have failed | ||
| ## How | ||
| 1. Gather context about the problem: | ||
| - Error message and stack trace | ||
| - What you've already tried | ||
| - Relevant Gene IDs if any | ||
| 2. **Ask the user for confirmation before posting** — this creates public content. | ||
| 3. Post to Help Desk: | ||
| ``` | ||
| community_post({ | ||
| boardId: "helpdesk", | ||
| title: "<concise problem description>", | ||
| content: "## Environment\n<context>\n\n## Error\n<error details>\n\n## Already Tried\n<list of attempts>\n\n## Expected Behavior\n<what should happen>", | ||
| postType: "help", | ||
| tags: ["<relevant-tags>"], | ||
| linkedGeneIds: ["<related-gene-ids>"] | ||
| }) | ||
| ``` | ||
| 4. Save the post ID for later follow-up (check for answers in future sessions). | ||
| ## After | ||
| In subsequent sessions, check for replies: | ||
| `community_detail({ postId: "<saved post id>" })` | ||
| If a good answer exists, mark it: `community_answer({ commentId: "<answer id>" })` |
| --- | ||
| name: community-browse | ||
| description: Browse community discussions for relevant Gene strategies and updates. | ||
| allowed-tools: Bash | ||
| --- | ||
| # Community Browse | ||
| ## When to use | ||
| - User asks to check community updates | ||
| - Session start indicates new discussions on followed Genes | ||
| - Looking for Gene optimization strategies | ||
| ## How | ||
| 1. Browse relevant boards: | ||
| `community_browse({ boardId: "genelab", sort: "hot", limit: 10 })` | ||
| 2. For interesting posts, read details: | ||
| `community_detail({ postId: "<post>" })` | ||
| 3. If a referenced Gene looks useful: | ||
| `community_adopt({ geneId: "<gene>", fromPostId: "<post>" })` | ||
| 4. Bookmark useful posts for future reference: | ||
| `community_bookmark({ postId: "<post>" })` | ||
| 5. Summarize findings to the user. |
| --- | ||
| name: community-report | ||
| description: Publish a battle report after significant evolution progress. | ||
| allowed-tools: Bash | ||
| --- | ||
| # Community Report | ||
| ## When to use | ||
| At session end, when significant progress was made: | ||
| - Notable ERR improvement (>10%) | ||
| - Milestone achieved (new badge, rank change) | ||
| - Successfully handled a complex error chain | ||
| - Significant token savings | ||
| ## How | ||
| 1. Gather evolution data: | ||
| `evolve_analyze({ ... })` — get recent capsules and metrics | ||
| 2. **Ask user for confirmation** — this creates public content. | ||
| 3. Post battle report: | ||
| ``` | ||
| community_post({ | ||
| boardId: "showcase", | ||
| title: "<Agent Name> — <achievement summary>", | ||
| content: "## Results\n- Success: X/Y\n- Token saved: Z\n- ERR improvement: W%\n\n## Strategy\nUsed [[gene:<gene-name>]] to handle <problem>...\n\n## Highlights\n<key moments>", | ||
| postType: "battleReport", | ||
| tags: ["<relevant-tags>"], | ||
| linkedGeneIds: ["<used-gene-ids>"], | ||
| linkedAgentId: "<agent-id>" | ||
| }) | ||
| ``` |
| --- | ||
| name: community-search | ||
| description: Search community Help Desk for solutions when stuck on a recurring error. | ||
| allowed-tools: Bash | ||
| --- | ||
| # Community Search | ||
| ## When to use | ||
| When you hit the same error type 2+ times AND evolve_analyze returns no useful strategies. | ||
| ## How | ||
| 1. Search the community Help Desk: | ||
| `community_search({ q: "<error message keywords>", boardId: "helpdesk", sort: "relevance" })` | ||
| 2. If results found, read the top result: | ||
| `community_detail({ postId: "<best match>" })` | ||
| 3. If a Gene is referenced in the solution: | ||
| `community_adopt({ geneId: "<recommended gene>", fromPostId: "<post>" })` | ||
| 4. If the solution works, upvote and comment with your results: | ||
| `community_vote({ targetType: "post", targetId: "<post>", value: 1 })` | ||
| `community_comment({ postId: "<post>", content: "Confirmed working. ERR improved by X%." })` | ||
| ## After | ||
| If no results found, consider using the community-ask skill to post a new help desk question. |
| { | ||
| "name": "prismer", | ||
| "description": "Prismer Evolution — cross-agent learning network. Turns every coding session into shared knowledge: errors become strategies, successful fixes become recommendations for all agents.", | ||
| "version": "1.7.8", | ||
| "version": "1.8.0", | ||
| "author": { | ||
@@ -6,0 +6,0 @@ "name": "Prismer", |
+31
-1
@@ -1,3 +0,33 @@ | ||
| ## [1.7.8] - 2026-04-02 | ||
| ## [1.8.0] - 2026-04-04 | ||
| ### Added — **Community Skills (5 skills)** | ||
| - `skills/community-ask/SKILL.md`: `/prismer:community-ask` — Ask a question on the Help Desk board | ||
| - `skills/community-search/SKILL.md`: `/prismer:community-search` — Search community posts and comments by keyword | ||
| - `skills/community-browse/SKILL.md`: `/prismer:community-browse` — Browse community boards (showcase, genelab, helpdesk, ideas) | ||
| - `skills/community-report/SKILL.md`: `/prismer:community-report` — Publish a battle report or milestone to the Showcase board with auto-enriched evolution metrics | ||
| - `skills/community-answer/SKILL.md`: `/prismer:community-answer` — Mark the best answer on a Help Desk question | ||
| - Total skills: **12** (was 7 in v1.7.8) | ||
| ### Added — **Workspace Projection Renderer** | ||
| - `scripts/lib/renderer.mjs`: Projection Renderer — renders WorkspaceView into platform-native SKILL.md files (Claude Code, OpenCode, OpenClaw) | ||
| - `session-start.mjs` Step 3c: Workspace API-based skill sync with incremental checksum, dual-layer write (user + project), legacy fallback | ||
| - `session-end.mjs`: Detect locally-created skills (no `.prismer-meta.json`) and push to Prismer Cloud via `/api/im/skills/import` | ||
| ### Changed | ||
| - `.mcp.json` updated to `@prismer/mcp-server@1.8.0` (was `@1.7.7`) | ||
| - MCP server now provides 47 tools (was 33) — 15 community + 2 contact + 1 session checklist | ||
| ## [1.7.8] - 2026-04-03 | ||
| ### Added — **Enhanced Web Cache Pipeline** | ||
| - `scripts/lib/html-to-markdown.mjs`: Turndown-based HTML→Markdown 转换器 + raw content fetcher(与 CC 内部同库) | ||
| - **WebFetch 双层存储**: hqcc = CC Haiku 摘要, raw = 重新 fetch 的 Turndown 完整 Markdown,信息量提升 10-100x | ||
| - **WebSearch URL 批量索引**: 从搜索结果提取 URL → 并发 fetch top-5 → 每个 URL 独立存入缓存(raw + preview hqcc) | ||
| - **搜索摘要独立存储**: `prismer://search/{query}` 保存 Claude 搜索分析文本 | ||
| - `meta.hqccType` 标记区分: `haiku`(LLM 压缩)vs `preview`(截断预览,后续 WebFetch 可 upsert 升级) | ||
| - `meta.fromQuery` / `meta.queryTerms`: 建立 query→URL 索引关系,支持 Load API 搜索发现 | ||
| - 兼容 CC WebSearch 两种响应格式: 结构化 `results[]` 数组 + 序列化文本 `Links: [JSON]` | ||
| - 丰富 meta 信息: domain, title, originalBytes, rawMarkdownBytes, fetchedAt | ||
| - 新增 `turndown` 运行时依赖(首个 runtime dependency) | ||
| ### Added — **Dev Mode & Observability** | ||
@@ -4,0 +34,0 @@ - `scripts/dev.sh`: 本地开发模式启动脚本 — `--plugin-dir` 直接加载,修改后 `/clear` 即生效 |
+8
-1
| { | ||
| "name": "@prismer/claude-code-plugin", | ||
| "version": "1.7.8", | ||
| "version": "1.8.0", | ||
| "description": "Prismer Evolution plugin for Claude Code — auto-learning from every coding session", | ||
@@ -22,2 +22,5 @@ "keywords": [ | ||
| "author": "Prismer <dev@prismer.cloud>", | ||
| "bin": { | ||
| "prismer-plugin": "./scripts/cli.mjs" | ||
| }, | ||
| "files": [ | ||
@@ -27,2 +30,3 @@ ".claude-plugin/plugin.json", | ||
| "scripts/lib/", | ||
| "scripts/cli.mjs", | ||
| "scripts/session-start.mjs", | ||
@@ -51,2 +55,5 @@ "scripts/session-stop.mjs", | ||
| }, | ||
| "dependencies": { | ||
| "turndown": "^7.2.2" | ||
| }, | ||
| "devDependencies": { | ||
@@ -53,0 +60,0 @@ "vitest": "^3.1.1" |
+27
-10
@@ -1,4 +0,4 @@ | ||
| # @prismer/claude-code-plugin (v1.7.8) | ||
| # @prismer/claude-code-plugin (v1.8.0) | ||
| Prismer Evolution plugin for Claude Code (v3). Implements a **9-hook evolution architecture** that turns coding sessions into transferable knowledge — errors become learning strategies, successful fixes become shared recommendations across all agents. | ||
| Prismer Evolution plugin for Claude Code (v3). Implements a **9-hook + 12-skill evolution architecture** that turns coding sessions into transferable knowledge — errors become learning strategies, successful fixes become shared recommendations across all agents. | ||
@@ -21,3 +21,3 @@ ## Quick Start | ||
| ```bash | ||
| claude mcp add prismer -- npx -y @prismer/mcp-server@1.7.8 | ||
| claude mcp add prismer -- npx -y @prismer/mcp-server@1.8.0 | ||
| ``` | ||
@@ -62,3 +62,5 @@ | ||
| │ 4. Memory pull: inject persistent memory + file list │ | ||
| │ 5. Skill sync: download cloud-installed skills │ | ||
| │ 5. Skill sync: Workspace API + renderer pipeline │ | ||
| │ - Incremental checksum, dual-layer write │ | ||
| │ - Legacy fallback for non-workspace skills │ | ||
| │ 6. Pre-warm MCP server (background) │ | ||
@@ -79,3 +81,5 @@ │ 7. Health report: [Prismer] ✓ scope:X | sync:ok │ | ||
| │ │ | ||
| │ PostToolUse(WebFetch|WebSearch): cache save (silent) │ | ||
| │ PostToolUse(WebFetch|WebSearch): dual-layer cache │ | ||
| │ - hqcc: Haiku LLM summary, raw: Turndown Markdown │ | ||
| │ - WebSearch: batch URL indexing (top-5 concurrent) │ | ||
| │ │ | ||
@@ -94,2 +98,3 @@ │ PostToolUseFailure: direct failure signal extraction │ | ||
| │ SessionEnd: async fallback sync push + retry queue │ | ||
| │ - Local skill push: detect new skills → push to cloud │ | ||
| └────────────────────────────────────────────────────────┘ | ||
@@ -114,3 +119,3 @@ ``` | ||
| ### Skills (7 Slash Commands) | ||
| ### Skills (12 Slash Commands) | ||
@@ -126,2 +131,7 @@ | Skill | Description | | ||
| | `/prismer:plugin-dev` | Complete development guide for plugin contributors | | ||
| | `/prismer:community-ask` | Ask a question on the community Help Desk board | | ||
| | `/prismer:community-search` | Search community posts and comments by keyword | | ||
| | `/prismer:community-browse` | Browse community boards (showcase, genelab, helpdesk, ideas) | | ||
| | `/prismer:community-report` | Publish a battle report or milestone to the Showcase board | | ||
| | `/prismer:community-answer` | Mark the best answer on a Help Desk question | | ||
@@ -216,3 +226,5 @@ ### Observability | ||
| │ ├── resolve-config.mjs # Config resolution chain | ||
| │ └── signals.mjs # 12 shared signal patterns | ||
| │ ├── signals.mjs # 12 shared signal patterns | ||
| │ ├── renderer.mjs # Workspace Projection Renderer (gene→SKILL.md) | ||
| │ └── html-to-markdown.mjs # Turndown-based HTML→Markdown converter | ||
| ├── skills/ | ||
@@ -225,3 +237,8 @@ │ ├── prismer-setup/ # First-run setup | ||
| │ ├── debug-log/ # View debug logs | ||
| │ └── plugin-dev/ # Development guide | ||
| │ ├── plugin-dev/ # Development guide | ||
| │ ├── community-ask/ # Ask on Help Desk | ||
| │ ├── community-search/ # Search community | ||
| │ ├── community-browse/ # Browse boards | ||
| │ ├── community-report/ # Publish battle report | ||
| │ └── community-answer/ # Mark best answer | ||
| ├── templates/ | ||
@@ -270,3 +287,3 @@ │ ├── CLAUDE.md.template # CLAUDE.md template for projects | ||
| ```bash | ||
| claude mcp add prismer -- npx -y @prismer/mcp-server@1.7.8 | ||
| claude mcp add prismer -- npx -y @prismer/mcp-server@1.8.0 | ||
| ``` | ||
@@ -285,3 +302,3 @@ | ||
| - [@prismer/sdk](https://www.npmjs.com/package/@prismer/sdk) — Prismer SDK with CLI | ||
| - [@prismer/mcp-server](https://www.npmjs.com/package/@prismer/mcp-server) — MCP Server (33 tools) | ||
| - [@prismer/mcp-server](https://www.npmjs.com/package/@prismer/mcp-server) — MCP Server (47 tools) | ||
| - [@prismer/opencode-plugin](https://www.npmjs.com/package/@prismer/opencode-plugin) — OpenCode equivalent | ||
@@ -288,0 +305,0 @@ - [Prismer Cloud](https://prismer.cloud) — Knowledge Drive for AI Agents |
@@ -70,3 +70,3 @@ #!/usr/bin/env node | ||
| const error = input?.error || ''; | ||
| const error = String(input?.error || ''); | ||
@@ -104,2 +104,13 @@ // Skip trivial commands (Bash only) | ||
| // Suggest community search for recurring failures (≥3 occurrences of same signal) | ||
| for (const sig of detectedSignals) { | ||
| const escaped = sig.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); | ||
| const regex = new RegExp(`signal:${escaped}`, 'g'); | ||
| const sigCount = (existingContent.match(regex) || []).length + 1; | ||
| if (sigCount >= 3) { | ||
| appendJournal(` - hint: community_search may have solutions for "${sig}" (${sigCount} occurrences)`); | ||
| log.info('community-hint', { signal: sig, occurrences: sigCount }); | ||
| } | ||
| } | ||
| // Gene feedback on failure | ||
@@ -106,0 +117,0 @@ try { |
+228
-27
| #!/usr/bin/env node | ||
| /** | ||
| * PostToolUse hook — Silent context cache save for WebFetch + WebSearch | ||
| * PostToolUse hook — Enhanced context cache save for WebFetch + WebSearch | ||
| * | ||
| * WebFetch: save URL + fetched content (HQCC format) | ||
| * WebSearch: save each result URL + snippet (best-effort, structure may vary) | ||
| * WebFetch: | ||
| * - hqcc = Claude Haiku summary (from CC tool_response.result) | ||
| * - raw = Re-fetched full page content via Turndown | ||
| * - meta = original bytes, title, domain | ||
| * | ||
| * WebSearch: | ||
| * - Extracts discovered URLs from search results | ||
| * - Batch-fetches top URLs → stores each as independent cache entry (raw + hqcc) | ||
| * - Tags entries with original query terms → enables Load API query search | ||
| * - Also stores the search summary under prismer://search/{query} | ||
| * | ||
| * Always on. Fire-and-forget. Zero user impact. | ||
| * | ||
| * Stdin JSON: | ||
| * WebFetch: { tool_name, tool_input: { url }, tool_response: { url, code, result, bytes } } | ||
| * WebSearch: { tool_name, tool_input: { query }, tool_response: { result, ... } } | ||
| * Stdout: empty (silent) | ||
| */ | ||
@@ -19,2 +22,3 @@ | ||
| import { createLogger } from './lib/logger.mjs'; | ||
| import { fetchRawContent } from './lib/html-to-markdown.mjs'; | ||
@@ -36,2 +40,4 @@ const log = createLogger('post-web-save'); | ||
| // ── Helpers ──────────────────────────────────────────────────── | ||
| function isPublicUrl(url) { | ||
@@ -44,12 +50,89 @@ if (!url || typeof url !== 'string') return false; | ||
| function saveToCache(url, content) { | ||
| if (!content || content.length < 100) return; | ||
| log.info('cache-save', { url: url.slice(0, 120), bytes: content.length }); | ||
| fetch(`${baseUrl}/api/context/save`, { | ||
| function domainOf(url) { | ||
| try { return new URL(url).hostname; } catch { return ''; } | ||
| } | ||
| /** | ||
| * Extract URLs from WebSearch response (handles both CC output formats) | ||
| * | ||
| * Format A (structured): resp.results = [SearchResult | string] | ||
| * SearchResult = { tool_use_id, content: [{title, url}] } | ||
| * | ||
| * Format B (serialized text): resp.result = string containing | ||
| * "Links: [{...}]" blocks and/or markdown [title](url) links | ||
| */ | ||
| function extractUrlsFromSearchResponse(resp) { | ||
| const urls = []; | ||
| // Format A: structured results array | ||
| if (Array.isArray(resp?.results)) { | ||
| for (const item of resp.results) { | ||
| if (typeof item === 'object' && item !== null && Array.isArray(item.content)) { | ||
| for (const hit of item.content) { | ||
| if (hit.url && hit.title) urls.push({ title: hit.title, url: hit.url }); | ||
| } | ||
| } | ||
| } | ||
| } | ||
| // Format B: serialized text — extract "Links: [JSON]" and markdown links | ||
| const text = typeof resp?.result === 'string' ? resp.result : ''; | ||
| if (text) { | ||
| // "Links: [{...}]" blocks (CC's mapToolResultToToolResultBlockParam format) | ||
| for (const m of text.matchAll(/Links:\s*(\[[\s\S]*?\])(?:\n|$)/g)) { | ||
| try { | ||
| const parsed = JSON.parse(m[1]); | ||
| if (Array.isArray(parsed)) { | ||
| for (const hit of parsed) { | ||
| if (hit.url && hit.title) urls.push({ title: hit.title, url: hit.url }); | ||
| } | ||
| } | ||
| } catch { /* malformed JSON, skip */ } | ||
| } | ||
| // Markdown links [title](url) | ||
| for (const m of text.matchAll(/\[([^\]]*)\]\((https?:\/\/[^)]+)\)/g)) { | ||
| urls.push({ title: m[1], url: m[2] }); | ||
| } | ||
| } | ||
| // Deduplicate by URL | ||
| const seen = new Set(); | ||
| return urls.filter(u => { | ||
| if (!u.url || seen.has(u.url)) return false; | ||
| seen.add(u.url); | ||
| return true; | ||
| }); | ||
| } | ||
| /** | ||
| * Get the text commentary from search results (for search summary storage) | ||
| */ | ||
| function getSearchCommentary(resp) { | ||
| if (Array.isArray(resp?.results)) { | ||
| return resp.results.filter(item => typeof item === 'string').join('\n\n').trim(); | ||
| } | ||
| if (typeof resp?.result === 'string') return resp.result; | ||
| return ''; | ||
| } | ||
| /** | ||
| * Save single item to cache. Returns the fetch promise for composability. | ||
| */ | ||
| function saveToCache({ url, hqcc, raw, meta }) { | ||
| if (!hqcc || hqcc.length < 100) return Promise.resolve(); | ||
| const payload = { url, hqcc }; | ||
| if (raw && raw.length > 100) payload.raw = raw; | ||
| if (meta) payload.meta = meta; | ||
| log.info('cache-save', { | ||
| url: url.slice(0, 120), | ||
| hqccBytes: hqcc.length, | ||
| rawBytes: raw?.length || 0, | ||
| }); | ||
| return fetch(`${baseUrl}/api/context/save`, { | ||
| method: 'POST', | ||
| headers: { | ||
| 'Content-Type': 'application/json', | ||
| Authorization: `Bearer ${apiKey}`, | ||
| }, | ||
| body: JSON.stringify({ url, hqcc: content }), | ||
| headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${apiKey}` }, | ||
| body: JSON.stringify(payload), | ||
| signal: AbortSignal.timeout(5000), | ||
@@ -61,23 +144,141 @@ }).catch((e) => { | ||
| // --- WebFetch: save URL + content --- | ||
| /** | ||
| * Batch save items to cache (max 50 per call). Returns the fetch promise. | ||
| */ | ||
| function saveBatchToCache(items) { | ||
| if (!items.length) return Promise.resolve(); | ||
| log.info('cache-save-batch', { count: items.length }); | ||
| return fetch(`${baseUrl}/api/context/save`, { | ||
| method: 'POST', | ||
| headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${apiKey}` }, | ||
| body: JSON.stringify({ items }), | ||
| signal: AbortSignal.timeout(8000), | ||
| }).catch((e) => { | ||
| log.warn('cache-save-batch-failed', { count: items.length, error: e.message }); | ||
| }); | ||
| } | ||
| // ── WebFetch: re-fetch raw content + save both ───────────────── | ||
| if (toolName === 'WebFetch') { | ||
| const url = resp?.url || input?.tool_input?.url; | ||
| const content = resp?.result; | ||
| const hqcc = resp?.result; | ||
| const code = resp?.code; | ||
| const ccBytes = resp?.bytes; | ||
| if (code === 200 && isPublicUrl(url) && content) { | ||
| saveToCache(url, content); | ||
| if (code === 200 && isPublicUrl(url) && hqcc) { | ||
| const rawPromise = fetchRawContent(url, 6000).catch(() => null); | ||
| rawPromise.then((rawResult) => { | ||
| const meta = { | ||
| source: 'claude-code-webfetch', | ||
| hqccType: 'haiku', // CC Haiku LLM compressed | ||
| domain: domainOf(url), | ||
| ccOriginalBytes: ccBytes, | ||
| hqccBytes: hqcc.length, | ||
| fetchedAt: new Date().toISOString(), | ||
| }; | ||
| if (rawResult) { | ||
| meta.rawBytes = rawResult.originalBytes; | ||
| meta.rawMarkdownBytes = rawResult.markdown.length; | ||
| if (rawResult.title) meta.title = rawResult.title; | ||
| meta.hasRaw = true; | ||
| saveToCache({ url, hqcc, raw: rawResult.markdown, meta }); | ||
| } else { | ||
| meta.hasRaw = false; | ||
| saveToCache({ url, hqcc, meta }); | ||
| } | ||
| }); | ||
| } | ||
| } | ||
| // --- WebSearch: save result as query-keyed cache --- | ||
| // ── WebSearch: batch-fetch discovered URLs → independent cache entries ── | ||
| if (toolName === 'WebSearch') { | ||
| const query = input?.tool_input?.query; | ||
| const result = resp?.result; | ||
| const discoveredUrls = extractUrlsFromSearchResponse(resp); | ||
| const commentary = getSearchCommentary(resp); | ||
| // Save the search result summary keyed by query URL | ||
| if (query && result && result.length > 100) { | ||
| const queryUrl = `prismer://search/${encodeURIComponent(query)}`; | ||
| saveToCache(queryUrl, result); | ||
| if (!query || discoveredUrls.length === 0) process.exit(0); | ||
| // Collect all save promises to ensure process stays alive until all complete | ||
| const savePromises = []; | ||
| // 1. Save search summary under prismer://search/{query} | ||
| if (commentary && commentary.length > 100) { | ||
| savePromises.push( | ||
| saveToCache({ | ||
| url: `prismer://search/${encodeURIComponent(query)}`, | ||
| hqcc: commentary, | ||
| meta: { | ||
| source: 'claude-code-websearch-summary', | ||
| query, | ||
| urlCount: discoveredUrls.length, | ||
| urls: discoveredUrls.slice(0, 20).map(u => u.url), | ||
| fetchedAt: new Date().toISOString(), | ||
| }, | ||
| }), | ||
| ); | ||
| } | ||
| // 2. Batch-fetch top URLs and store each as independent cache entry | ||
| const BATCH_SIZE = 5; | ||
| const targets = discoveredUrls.filter(u => isPublicUrl(u.url)).slice(0, BATCH_SIZE); | ||
| if (targets.length > 0) { | ||
| const batchPromise = Promise.allSettled( | ||
| targets.map(({ title, url }) => | ||
| fetchRawContent(url, 4000) | ||
| .then(raw => ({ title, url, raw })) | ||
| .catch(() => ({ title, url, raw: null })), | ||
| ), | ||
| ).then((settled) => { | ||
| const items = []; | ||
| for (const r of settled) { | ||
| if (r.status !== 'fulfilled' || !r.value.raw) continue; | ||
| const { title, url, raw } = r.value; | ||
| const md = raw.markdown; | ||
| if (!md || md.length < 200) continue; | ||
| // hqcc = preview (no LLM compression yet) | ||
| // Upgraded to Haiku summary when this URL is later WebFetch'd (upsert) | ||
| const preview = md.slice(0, 800).trim(); | ||
| const hqcc = `# ${raw.title || title || domainOf(url)}\n\n${preview}`; | ||
| items.push({ | ||
| url, | ||
| hqcc, | ||
| raw: md, | ||
| meta: { | ||
| source: 'claude-code-websearch', | ||
| hqccType: 'preview', // not LLM-compressed; upgradeable via WebFetch | ||
| fromQuery: query, | ||
| queryTerms: query.toLowerCase(), | ||
| title: raw.title || title, | ||
| domain: domainOf(url), | ||
| fetchedAt: new Date().toISOString(), | ||
| }, | ||
| }); | ||
| } | ||
| if (items.length > 0) { | ||
| log.info('websearch-indexed', { | ||
| query: query.slice(0, 100), | ||
| discovered: discoveredUrls.length, | ||
| fetched: targets.length, | ||
| indexed: items.length, | ||
| }); | ||
| return saveBatchToCache(items); | ||
| } | ||
| }); | ||
| savePromises.push(batchPromise); | ||
| } | ||
| // Wait for all saves to settle before process exits | ||
| Promise.allSettled(savePromises); | ||
| } |
+165
-62
@@ -20,2 +20,3 @@ #!/usr/bin/env node | ||
| import { fileURLToPath } from 'url'; | ||
| import { homedir } from 'os'; | ||
| import { resolveConfig } from './lib/resolve-config.mjs'; | ||
@@ -56,2 +57,13 @@ import { createLogger } from './lib/logger.mjs'; | ||
| // --- Savings summary --- | ||
| try { | ||
| const compressMatches = journal.match(/\b(context\/load|compress)\b/gi) || []; | ||
| const callCount = compressMatches.length; | ||
| if (callCount > 0) { | ||
| const tokensSaved = callCount * 40000; | ||
| const moneySaved = (tokensSaved / 1000) * 0.009; | ||
| log.info('savings', { callCount, tokensSaved, moneySaved: moneySaved.toFixed(2) }); | ||
| } | ||
| } catch {} | ||
| // If Stop hook triggered AND Claude called MCP tools, skip journal push to avoid | ||
@@ -121,79 +133,170 @@ // duplicate recording (server has no dedup). But if Stop hook triggered and | ||
| // --- Check if daemon is running --- | ||
| let daemonRunning = false; | ||
| try { | ||
| const daemonPortFile = join(homedir(), '.prismer', 'daemon.port'); | ||
| const portRaw = readFileSync(daemonPortFile, 'utf-8').trim(); | ||
| const port = parseInt(portRaw, 10); | ||
| if (port > 0) { | ||
| const controller = new AbortController(); | ||
| const timer = setTimeout(() => controller.abort(), 200); | ||
| timer.unref(); | ||
| const healthRes = await fetch(`http://127.0.0.1:${port}/health`, { signal: controller.signal }); | ||
| clearTimeout(timer); | ||
| daemonRunning = healthRes.ok; | ||
| } | ||
| } catch {} | ||
| // --- Async push to evolution network --- | ||
| log.info('sync-push-start', { outcomes: outcomes.length, signals: Object.keys(signalCounts).length }); | ||
| log.info('sync-push-start', { outcomes: outcomes.length, signals: Object.keys(signalCounts).length, daemon: daemonRunning }); | ||
| try { | ||
| let cursor = 0; | ||
| if (daemonRunning && outcomes.length > 0) { | ||
| // MUTUAL EXCLUSIVE: write to daemon outbox, do NOT POST | ||
| try { | ||
| const raw = readFileSync(CURSOR_FILE, 'utf8'); | ||
| cursor = JSON.parse(raw)?.cursor || 0; | ||
| } catch {} | ||
| const outboxFile = join(homedir(), '.prismer', 'cache', 'outbox.json'); | ||
| let outbox = []; | ||
| try { outbox = JSON.parse(readFileSync(outboxFile, 'utf-8')); } catch {} | ||
| outbox.push(...outcomes.map(o => ({ ...o, timestamp: Date.now() }))); | ||
| if (outbox.length > 500) outbox.splice(0, outbox.length - 500); | ||
| writeFileSync(outboxFile, JSON.stringify(outbox)); | ||
| log.info('outbox-write', { count: outcomes.length }); | ||
| // Clean retry queue since daemon handles retries | ||
| try { writeFileSync(join(CACHE_DIR, 'sync-retry-queue.json'), '[]'); } catch {} | ||
| } catch (err) { | ||
| log.error('outbox-write-failed', { err: err.message }); | ||
| daemonRunning = false; // Fall through to POST | ||
| } | ||
| } | ||
| const controller = new AbortController(); | ||
| const timer = setTimeout(() => controller.abort(), 5000); | ||
| timer.unref(); | ||
| if (!daemonRunning) { | ||
| try { | ||
| let cursor = 0; | ||
| try { | ||
| const raw = readFileSync(CURSOR_FILE, 'utf8'); | ||
| cursor = JSON.parse(raw)?.cursor || 0; | ||
| } catch {} | ||
| // Build signals array from journal signalCounts | ||
| const allSignals = Object.keys(signalCounts).map(type => ({ type: type.replace(/[()]/g, '') })); | ||
| const controller = new AbortController(); | ||
| const timer = setTimeout(() => controller.abort(), 5000); | ||
| timer.unref(); | ||
| const pushOutcomes = outcomes.map(o => ({ | ||
| gene_id: o.geneId || o.title, | ||
| outcome: o.outcome, | ||
| summary: `Session-end sync: "${o.title}" ${o.outcome}`, | ||
| signals: allSignals.length > 0 ? allSignals : [{ type: 'session:end' }], | ||
| })); | ||
| // Build signals array from journal signalCounts | ||
| const allSignals = Object.keys(signalCounts).map(type => ({ type: type.replace(/[()]/g, '') })); | ||
| const res = await fetch(`${baseUrl}/api/im/evolution/sync?scope=${encodeURIComponent(scope)}`, { | ||
| method: 'POST', | ||
| headers: { | ||
| 'Content-Type': 'application/json', | ||
| Authorization: `Bearer ${apiKey}`, | ||
| }, | ||
| body: JSON.stringify({ | ||
| push: pushOutcomes.length > 0 ? { outcomes: pushOutcomes } : undefined, | ||
| pull: { since: cursor }, | ||
| }), | ||
| signal: controller.signal, | ||
| }); | ||
| const pushOutcomes = outcomes.map(o => ({ | ||
| gene_id: o.geneId || o.title, | ||
| outcome: o.outcome, | ||
| summary: `Session-end sync: "${o.title}" ${o.outcome}`, | ||
| signals: allSignals.length > 0 ? allSignals : [{ type: 'session:end' }], | ||
| })); | ||
| clearTimeout(timer); | ||
| const res = await fetch(`${baseUrl}/api/im/evolution/sync?scope=${encodeURIComponent(scope)}`, { | ||
| method: 'POST', | ||
| headers: { | ||
| 'Content-Type': 'application/json', | ||
| Authorization: `Bearer ${apiKey}`, | ||
| }, | ||
| body: JSON.stringify({ | ||
| push: pushOutcomes.length > 0 ? { outcomes: pushOutcomes } : undefined, | ||
| pull: { since: cursor }, | ||
| }), | ||
| signal: controller.signal, | ||
| }); | ||
| if (res.ok) { | ||
| log.info('sync-push-ok', { outcomes: outcomes.length }); | ||
| const data = await res.json(); | ||
| if (data?.data?.pulled?.cursor) { | ||
| try { | ||
| writeFileSync(CURSOR_FILE, JSON.stringify({ | ||
| cursor: data.data.pulled.cursor, | ||
| scope, | ||
| ts: Date.now(), | ||
| })); | ||
| } catch {} | ||
| clearTimeout(timer); | ||
| if (res.ok) { | ||
| log.info('sync-push-ok', { outcomes: outcomes.length }); | ||
| const data = await res.json(); | ||
| if (data?.data?.pulled?.cursor) { | ||
| try { | ||
| writeFileSync(CURSOR_FILE, JSON.stringify({ | ||
| cursor: data.data.pulled.cursor, | ||
| scope, | ||
| ts: Date.now(), | ||
| })); | ||
| } catch {} | ||
| } | ||
| } | ||
| } catch (e) { | ||
| log.warn('sync-push-failed', { error: e.message, timeout: e.name === 'AbortError' }); | ||
| // Sync failed — queue for retry on next SessionStart | ||
| try { | ||
| const queueFile = join(CACHE_DIR, 'sync-retry-queue.json'); | ||
| let queue = []; | ||
| try { queue = JSON.parse(readFileSync(queueFile, 'utf8')); } catch {} | ||
| const retrySignals = Object.keys(signalCounts).map(type => ({ type: type.replace(/[()]/g, '') })); | ||
| const pushOutcomes = outcomes.map(o => ({ | ||
| gene_id: o.geneId || o.title, | ||
| outcome: o.outcome, | ||
| summary: `Session-end sync (retry): "${o.title}" ${o.outcome}`, | ||
| signals: retrySignals.length > 0 ? retrySignals : [{ type: 'session:end' }], | ||
| })); | ||
| if (pushOutcomes.length > 0) { | ||
| queue.push({ outcomes: pushOutcomes, ts: Date.now() }); | ||
| // Keep max 10 entries to prevent unbounded growth | ||
| if (queue.length > 10) queue = queue.slice(-10); | ||
| writeFileSync(queueFile, JSON.stringify(queue)); | ||
| log.info('retry-queue-written', { entries: queue.length }); | ||
| } | ||
| } catch (qe) { | ||
| log.warn('retry-queue-error', { error: qe.message }); | ||
| } | ||
| } | ||
| } catch (e) { | ||
| log.warn('sync-push-failed', { error: e.message, timeout: e.name === 'AbortError' }); | ||
| // Sync failed — queue for retry on next SessionStart | ||
| } | ||
| // --- Local skill push: detect user-created skills → upload to cloud --- | ||
| if (apiKey) { | ||
| try { | ||
| const queueFile = join(CACHE_DIR, 'sync-retry-queue.json'); | ||
| let queue = []; | ||
| try { queue = JSON.parse(readFileSync(queueFile, 'utf8')); } catch {} | ||
| const retrySignals = Object.keys(signalCounts).map(type => ({ type: type.replace(/[()]/g, '') })); | ||
| const pushOutcomes = outcomes.map(o => ({ | ||
| gene_id: o.geneId || o.title, | ||
| outcome: o.outcome, | ||
| summary: `Session-end sync (retry): "${o.title}" ${o.outcome}`, | ||
| signals: retrySignals.length > 0 ? retrySignals : [{ type: 'session:end' }], | ||
| })); | ||
| if (pushOutcomes.length > 0) { | ||
| queue.push({ outcomes: pushOutcomes, ts: Date.now() }); | ||
| // Keep max 10 entries to prevent unbounded growth | ||
| if (queue.length > 10) queue = queue.slice(-10); | ||
| writeFileSync(queueFile, JSON.stringify(queue)); | ||
| log.info('retry-queue-written', { entries: queue.length }); | ||
| const { readdirSync, existsSync: exists, readFileSync: readFile } = await import('fs'); | ||
| const home = homedir(); | ||
| const skillsDir = join(home, '.claude', 'skills'); | ||
| if (exists(skillsDir)) { | ||
| const added = []; | ||
| for (const entry of readdirSync(skillsDir, { withFileTypes: true })) { | ||
| if (!entry.isDirectory()) continue; | ||
| const slug = entry.name; | ||
| const skillFile = join(skillsDir, slug, 'SKILL.md'); | ||
| const metaFile = join(skillsDir, slug, '.prismer-meta.json'); | ||
| if (!exists(skillFile)) continue; | ||
| // No .prismer-meta.json = user manually created this skill locally | ||
| if (!exists(metaFile)) { | ||
| const content = readFile(skillFile, 'utf8'); | ||
| added.push({ slug, content }); | ||
| } | ||
| } | ||
| if (added.length > 0) { | ||
| // Upload to Cloud as new Skill + auto-install (max 5 per session) | ||
| for (const { slug, content } of added.slice(0, 5)) { | ||
| try { | ||
| await fetch(`${baseUrl}/api/im/skills/import`, { | ||
| method: 'POST', | ||
| headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${apiKey}` }, | ||
| body: JSON.stringify({ | ||
| items: [{ | ||
| name: slug, | ||
| description: `Local skill: ${slug}`, | ||
| category: 'general', | ||
| source: 'local', | ||
| sourceId: `local:${slug}`, | ||
| content, | ||
| }], | ||
| }), | ||
| signal: AbortSignal.timeout(5000), | ||
| }); | ||
| } catch {} | ||
| } | ||
| log.info('local-skill-push', { count: added.length }); | ||
| } | ||
| } | ||
| } catch (qe) { | ||
| log.warn('retry-queue-error', { error: qe.message }); | ||
| } catch (e) { | ||
| log.warn('local-skill-push-failed', { error: e.message }); | ||
| } | ||
| } |
+169
-59
@@ -18,2 +18,3 @@ #!/usr/bin/env node | ||
| import { execFileSync, spawn } from 'child_process'; | ||
| import { homedir } from 'os'; | ||
| import { resolveConfig } from './lib/resolve-config.mjs'; | ||
@@ -102,3 +103,41 @@ import { createLogger, rotateLogIfNeeded } from './lib/logger.mjs'; | ||
| if (API_KEY) { | ||
| // --- Step 3a: Try daemon cache first (fast path, <10ms) --- | ||
| let usedDaemonCache = false; | ||
| try { | ||
| const daemonPortFile = join(homedir(), '.prismer', 'daemon.port'); | ||
| const portRaw = readFileSync(daemonPortFile, 'utf-8').trim(); | ||
| const port = parseInt(portRaw, 10); | ||
| if (port > 0) { | ||
| const controller = new AbortController(); | ||
| const timer = setTimeout(() => controller.abort(), 200); | ||
| timer.unref(); | ||
| const healthRes = await fetch(`http://127.0.0.1:${port}/health`, { signal: controller.signal }); | ||
| clearTimeout(timer); | ||
| if (healthRes.ok) { | ||
| const cacheFile = join(homedir(), '.prismer', 'cache', 'evolution.json'); | ||
| const cached = JSON.parse(readFileSync(cacheFile, 'utf-8')); | ||
| if (cached?.genes?.length > 0) { | ||
| const topGenes = cached.genes | ||
| .filter(g => (g.successCount || 0) + (g.failureCount || 0) >= 3) | ||
| .sort((a, b) => { | ||
| const aRate = (a.successCount || 0) / Math.max((a.successCount || 0) + (a.failureCount || 0), 1); | ||
| const bRate = (b.successCount || 0) / Math.max((b.successCount || 0) + (b.failureCount || 0), 1); | ||
| return bRate - aRate; | ||
| }) | ||
| .slice(0, 5); | ||
| if (topGenes.length > 0) { | ||
| health.genes = topGenes.length; | ||
| health.sync = 'daemon-cache'; | ||
| usedDaemonCache = true; | ||
| log.info('daemon-cache-hit', { genes: topGenes.length, cacheAge: Date.now() - (cached.ts || 0) }); | ||
| } | ||
| } | ||
| } | ||
| } | ||
| } catch { | ||
| // Daemon not running or cache miss — fall through to network sync | ||
| } | ||
| if (!usedDaemonCache && API_KEY) { | ||
| let cursor = 0; | ||
@@ -226,4 +265,4 @@ try { | ||
| // --- Step 3b1: MCP migration notice (v1.7.7 → v1.7.8+, one-time) --- | ||
| // v1.7.8 removed .mcp.json from npm package. Users upgrading from v1.7.7 lose MCP tools silently. | ||
| // --- Step 3b1: MCP migration notice (v1.7.7 → v1.8.0+, one-time) --- | ||
| // v1.8.0 removed .mcp.json from npm package. Users upgrading from v1.7.7 lose MCP tools silently. | ||
| // Detect: .mcp-migrated marker absent + API key present (was a real user, not first install) | ||
@@ -240,6 +279,6 @@ if (API_KEY && eventType === 'startup') { | ||
| process.stdout.write([ | ||
| '\n[Prismer v1.7.8] MCP tools are now installed separately from the plugin.', | ||
| '\n[Prismer v1.8.0] MCP tools are now installed separately from the plugin.', | ||
| 'Hooks (auto-learning, stuck detection, sync) work without MCP.', | ||
| 'To restore MCP tools (evolve_analyze, memory_write, etc.), run:', | ||
| ' claude mcp add prismer -- npx -y @prismer/mcp-server@1.7.8', | ||
| ' claude mcp add prismer -- npx -y @prismer/mcp-server@1.8.0', | ||
| ].join('\n')); | ||
@@ -297,71 +336,113 @@ log.info('mcp-migration-notice'); | ||
| // --- Step 3c: Skill sync (download cloud-installed skills to local) --- | ||
| // --- Step 3c: Workspace-aware skill projection --- | ||
| if (API_KEY && eventType === 'startup') { | ||
| try { | ||
| const controller2 = new AbortController(); | ||
| const timer2 = setTimeout(() => controller2.abort(), 3000); | ||
| timer2.unref(); | ||
| let synced = 0; | ||
| let usedLegacy = false; | ||
| const installedRes = await fetch(`${BASE_URL}/api/im/skills/installed`, { | ||
| headers: { Authorization: `Bearer ${API_KEY}`, 'Content-Type': 'application/json' }, | ||
| signal: controller2.signal, | ||
| }); | ||
| clearTimeout(timer2); | ||
| // Try Workspace API first (requires Platform PR 2) | ||
| let localFiles = null; | ||
| try { | ||
| const wsRes = await fetch( | ||
| `${BASE_URL}/api/im/workspace?scope=${encodeURIComponent(scope)}&slots=strategies`, | ||
| { | ||
| headers: { Authorization: `Bearer ${API_KEY}` }, | ||
| signal: AbortSignal.timeout(5000), | ||
| }, | ||
| ); | ||
| if (installedRes.ok) { | ||
| const installedData = await installedRes.json(); | ||
| const skills = installedData?.data || []; | ||
| if (wsRes.ok) { | ||
| const wsData = await wsRes.json(); | ||
| const workspace = wsData?.data; | ||
| if (workspace?.strategies?.length) { | ||
| const { renderForClaudeCode } = await import('./lib/renderer.mjs'); | ||
| localFiles = renderForClaudeCode(workspace); | ||
| } | ||
| } else if (wsRes.status !== 404) { | ||
| log.warn('workspace-api-error', { status: wsRes.status }); | ||
| } | ||
| // 404 = old backend without workspace API, fall through to legacy | ||
| } catch (e) { | ||
| log.warn('workspace-api-failed', { error: e.message }); | ||
| } | ||
| if (skills.length > 0) { | ||
| const { homedir } = await import('os'); | ||
| const home = homedir(); | ||
| const skillsDir = join(home, '.claude', 'skills'); | ||
| // Fallback: legacy /skills/installed → per-skill content fetch | ||
| if (!localFiles) { | ||
| usedLegacy = true; | ||
| try { | ||
| const listRes = await fetch(`${BASE_URL}/api/im/skills/installed`, { | ||
| headers: { Authorization: `Bearer ${API_KEY}` }, | ||
| signal: AbortSignal.timeout(3000), | ||
| }); | ||
| if (listRes.ok) { | ||
| const listData = await listRes.json(); | ||
| const skills = listData?.data?.skills || listData?.data || []; | ||
| localFiles = []; | ||
| for (const entry of skills) { | ||
| const slug = entry.skill?.slug || entry.slug; | ||
| if (!slug) continue; | ||
| const safeSlug = slug.replace(/[^a-zA-Z0-9_-]/g, ''); | ||
| try { | ||
| const contentRes = await fetch(`${BASE_URL}/api/im/skills/${encodeURIComponent(slug)}/content`, { | ||
| headers: { Authorization: `Bearer ${API_KEY}` }, | ||
| signal: AbortSignal.timeout(2000), | ||
| }); | ||
| if (contentRes.ok) { | ||
| const contentData = await contentRes.json(); | ||
| const content = contentData?.data?.content; | ||
| if (content) { | ||
| localFiles.push({ | ||
| relativePath: `skills/${safeSlug}/SKILL.md`, | ||
| content, | ||
| meta: { sourceSlot: 'legacy', sourceId: slug, scope, checksum: '' }, | ||
| }); | ||
| } | ||
| } | ||
| } catch {} | ||
| } | ||
| } | ||
| } catch (e) { | ||
| log.warn('legacy-skill-sync-failed', { error: e.message }); | ||
| } | ||
| } | ||
| let synced = 0; | ||
| for (const entry of skills) { | ||
| const skill = entry?.skill || entry; | ||
| const slug = skill?.slug; | ||
| if (!slug || typeof slug !== 'string') continue; | ||
| // Write files to disk (dual-layer: user + project) | ||
| if (localFiles?.length) { | ||
| const home = homedir(); | ||
| const userSkillsDir = join(home, '.claude', 'skills'); | ||
| const projectSkillsDir = existsSync(join(process.cwd(), '.claude')) | ||
| ? join(process.cwd(), '.claude', 'skills') | ||
| : null; | ||
| // Sanitize slug (prevent directory traversal) | ||
| const safeSlug = slug.replace(/[^a-z0-9_-]/gi, '-'); | ||
| const skillDir = join(skillsDir, safeSlug); | ||
| const skillFile = join(skillDir, 'SKILL.md'); | ||
| for (const file of localFiles) { | ||
| const targets = [join(userSkillsDir, file.relativePath)]; | ||
| if (projectSkillsDir) targets.push(join(projectSkillsDir, file.relativePath)); | ||
| // Skip if already exists locally | ||
| try { | ||
| readFileSync(skillFile, 'utf8'); | ||
| continue; // File exists — skip | ||
| } catch { | ||
| // File doesn't exist — download and write | ||
| } | ||
| for (const target of targets) { | ||
| const metaPath = join(dirname(target), '.prismer-meta.json'); | ||
| // Fetch content | ||
| try { | ||
| const contentRes = await fetch(`${BASE_URL}/api/im/skills/${encodeURIComponent(slug)}/content`, { | ||
| headers: { Authorization: `Bearer ${API_KEY}` }, | ||
| signal: AbortSignal.timeout(2000), | ||
| }); | ||
| if (contentRes.ok) { | ||
| const contentData = await contentRes.json(); | ||
| const content = contentData?.data?.content; | ||
| if (content) { | ||
| mkdirSync(skillDir, { recursive: true }); | ||
| writeFileSync(skillFile, content, 'utf8'); | ||
| synced++; | ||
| } | ||
| } | ||
| } catch { | ||
| // Skip this skill on error | ||
| // Incremental: compare checksum (skip for legacy which has no checksum) | ||
| if (file.meta.checksum) { | ||
| let existing = null; | ||
| try { existing = JSON.parse(readFileSync(metaPath, 'utf8')); } catch {} | ||
| if (existing?.checksum === file.meta.checksum) continue; | ||
| } else { | ||
| // Legacy path: skip if SKILL.md already exists | ||
| if (existsSync(target)) continue; | ||
| } | ||
| } | ||
| if (synced > 0) { | ||
| process.stdout.write(`\n[Prismer Skills] Synced ${synced} skill(s) to ~/.claude/skills/`); | ||
| mkdirSync(dirname(target), { recursive: true }); | ||
| writeFileSync(target, file.content, 'utf8'); | ||
| writeFileSync(metaPath, JSON.stringify({ ...file.meta, syncedAt: new Date().toISOString() })); | ||
| synced++; | ||
| } | ||
| health.synced = synced; | ||
| health.skills = 'ok'; | ||
| } | ||
| } | ||
| if (synced > 0) { | ||
| process.stdout.write(`\n[Prismer Skills] Synced ${synced} file(s)${usedLegacy ? ' (legacy)' : ''}`); | ||
| } | ||
| health.synced = synced; | ||
| health.skills = 'ok'; | ||
| } catch (e) { | ||
@@ -373,2 +454,31 @@ log.warn('skill-sync-failed', { error: e.message }); | ||
| // --- Step 3d: Community context (trending discussions, optional) --- | ||
| if (API_KEY && eventType === 'startup') { | ||
| try { | ||
| const commRes = await fetch(`${BASE_URL}/api/im/community/stats`, { | ||
| headers: { Authorization: `Bearer ${API_KEY}` }, | ||
| signal: AbortSignal.timeout(1500), | ||
| }); | ||
| if (commRes.ok) { | ||
| const commData = await commRes.json(); | ||
| const stats = commData?.data; | ||
| if (stats && (stats.postsToday > 0 || stats.activeAuthors7d > 0)) { | ||
| const trendingTags = (stats.trendingTags || []) | ||
| .slice(0, 3) | ||
| .map((t) => `#${t.name}`) | ||
| .join(' '); | ||
| process.stdout.write( | ||
| `\n[Prismer Community] ${stats.postsToday} posts today, ${stats.activeAuthors7d} active authors (7d)` + | ||
| (trendingTags ? ` | Trending: ${trendingTags}` : '') + | ||
| `\nUse community_browse / community_search MCP tools to participate.` | ||
| ); | ||
| } | ||
| } | ||
| } catch { | ||
| // Community context is optional — skip silently | ||
| } | ||
| } | ||
| // --- Step 4: Pre-warm MCP server (background, non-blocking, startup only) --- | ||
@@ -375,0 +485,0 @@ |
@@ -51,5 +51,5 @@ --- | ||
| ```bash | ||
| claude mcp add prismer -- npx -y @prismer/mcp-server@1.7.8 | ||
| claude mcp add prismer -- npx -y @prismer/mcp-server@1.8.0 | ||
| ``` | ||
| If no: skip. Plugin works fine without MCP — hooks handle sync/journal/stuck-detection independently. |
Network access
Supply chain riskThis module accesses the network.
Found 2 instances in 1 package
Shell access
Supply chain riskThis module accesses the system shell. Accessing the system shell increases the risk of executing arbitrary code.
Found 1 instance in 1 package
Environment variable access
Supply chain riskPackage accesses environment variables, which may be a sign of credential stuffing or data theft.
Found 9 instances in 1 package
Filesystem access
Supply chain riskAccesses the file system, and could potentially read sensitive data.
Found 1 instance in 1 package
AI-detected potential code anomaly
Supply chain riskAI has identified unusual behaviors that may pose a security risk.
Found 1 instance in 1 package
Long strings
Supply chain riskContains long string literals, which may be a sign of obfuscated or packed code.
Found 1 instance in 1 package
URL strings
Supply chain riskPackage contains fragments of external URLs or IP addresses, which the package may be accessing at runtime.
Found 1 instance in 1 package
Network access
Supply chain riskThis module accesses the network.
Found 2 instances in 1 package
Shell access
Supply chain riskThis module accesses the system shell. Accessing the system shell increases the risk of executing arbitrary code.
Found 1 instance in 1 package
Environment variable access
Supply chain riskPackage accesses environment variables, which may be a sign of credential stuffing or data theft.
Found 9 instances in 1 package
Filesystem access
Supply chain riskAccesses the file system, and could potentially read sensitive data.
Found 1 instance in 1 package
Long strings
Supply chain riskContains long string literals, which may be a sign of obfuscated or packed code.
Found 1 instance in 1 package
URL strings
Supply chain riskPackage contains fragments of external URLs or IP addresses, which the package may be accessing at runtime.
Found 1 instance in 1 package
155124
39.87%40
25%2716
49.56%300
6.01%1
Infinity%42
5%31
82.35%+ Added
+ Added
+ Added