New Research: Supply Chain Attack on Axios Pulls Malicious Dependency from npm.Details
Socket
Book a DemoSign in
Socket

wmdev

Package Overview
Dependencies
Maintainers
1
Versions
11
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

wmdev - npm Package Compare versions

Comparing version
0.2.1
to
0.2.2
backend/dist/server.js

Sorry, the diff of this file is too big to display

+1
-1

@@ -129,3 +129,3 @@ #!/usr/bin/env bun

const backendEntry = join(PKG_ROOT, "backend", "src", "server.ts");
const backendEntry = join(PKG_ROOT, "backend", "dist", "server.js");
const staticDir = join(PKG_ROOT, "frontend", "dist");

@@ -132,0 +132,0 @@

{
"name": "wmdev",
"version": "0.2.1",
"version": "0.2.2",
"description": "Web dashboard for workmux — browser UI with embedded terminals, PR monitoring, and CI integration",

@@ -30,3 +30,4 @@ "type": "module",

"build": "cd frontend && bun run build",
"prepublishOnly": "bun run build",
"build:backend": "bun build backend/src/server.ts --target=bun --outfile=backend/dist/server.js",
"prepublishOnly": "bun run build && bun run build:backend",
"test": "bun run --cwd backend test && bun run --cwd frontend test",

@@ -37,5 +38,3 @@ "test:coverage": "bun run --cwd backend test --coverage && bun run --cwd frontend test:coverage"

"bin/",
"backend/src/*.ts",
"backend/src/lib/",
"backend/tsconfig.json",
"backend/dist/",
"frontend/dist/"

@@ -42,0 +41,0 @@ ],

@@ -175,16 +175,5 @@ # wmdev

```
Browser (localhost:5111)
├── REST API (/api/*) ──┐
└── WebSocket (/ws/*) ──┤
Backend (Bun HTTP server)
┌──────────────┼──────────────┐
│ │ │
workmux CLI tmux sessions Docker
(worktree (terminal (sandbox
lifecycle) access) containers)
```
<p align="center">
<img src="docs/architecture.svg" alt="Architecture diagram" />
</p>

@@ -200,5 +189,5 @@ **Backend** — Bun/TypeScript HTTP + WebSocket server (`backend/src/server.ts`):

```
Browser (xterm.js) ←— WebSocket —→ Backend ←— stdin/stdout pipes —→ script (PTY) ←— tmux attach —→ tmux grouped session
```
<p align="center">
<img src="docs/terminal-streaming.svg" alt="Terminal streaming diagram" />
</p>

@@ -205,0 +194,0 @@ When a worktree is selected, the frontend opens a WebSocket to `/ws/<worktree>`. The backend spawns a PTY via `script` and attaches to a **grouped tmux session** — a separate view into the same windows. This allows the dashboard and a real terminal to view the same worktree simultaneously.

import { join } from "node:path";
import { parse as parseYaml } from "yaml";
export interface ServiceConfig {
name: string;
portEnv: string;
portStart?: number;
portStep?: number;
}
export interface ProfileConfig {
name: string;
systemPrompt?: string;
envPassthrough?: string[];
}
export interface SandboxProfileConfig extends ProfileConfig {
image: string;
extraMounts?: { hostPath: string; guestPath?: string; writable?: boolean }[];
}
export interface LinkedRepoConfig {
repo: string;
alias: string;
}
export interface WmdevConfig {
services: ServiceConfig[];
profiles: {
default: ProfileConfig;
sandbox?: SandboxProfileConfig;
};
autoName: boolean;
linkedRepos: LinkedRepoConfig[];
}
const DEFAULT_CONFIG: WmdevConfig = {
services: [],
profiles: { default: { name: "default" } },
autoName: false,
linkedRepos: [],
};
/** Check if .workmux.yaml has auto_name configured. */
function hasAutoName(dir: string): boolean {
try {
const filePath = join(gitRoot(dir), ".workmux.yaml");
const result = Bun.spawnSync(["cat", filePath], { stdout: "pipe", stderr: "pipe" });
const text = new TextDecoder().decode(result.stdout).trim();
if (!text) return false;
const parsed = parseYaml(text) as Record<string, unknown>;
const autoName = parsed.auto_name as Record<string, unknown> | undefined;
return !!autoName?.model;
} catch {
return false;
}
}
/** Resolve the git repository root from a directory. */
export function gitRoot(dir: string): string {
const result = Bun.spawnSync(["git", "rev-parse", "--show-toplevel"], { stdout: "pipe", cwd: dir });
return new TextDecoder().decode(result.stdout).trim() || dir;
}
/** Load .wmdev.yaml from the git root, merging with defaults. */
export function loadConfig(dir: string): WmdevConfig {
try {
const root = gitRoot(dir);
const filePath = join(root, ".wmdev.yaml");
const result = Bun.spawnSync(["cat", filePath], { stdout: "pipe" });
const text = new TextDecoder().decode(result.stdout).trim();
if (!text) return DEFAULT_CONFIG;
const parsed = parseYaml(text) as Record<string, unknown>;
const profiles = parsed.profiles as Record<string, unknown> | undefined;
const defaultProfile = profiles?.default as ProfileConfig | undefined;
const sandboxProfile = profiles?.sandbox as SandboxProfileConfig | undefined;
const autoName = hasAutoName(dir);
const linkedRepos: LinkedRepoConfig[] = Array.isArray(parsed.linkedRepos)
? (parsed.linkedRepos as Array<Record<string, unknown>>)
.filter((r) => typeof r === "object" && r !== null && typeof r.repo === "string")
.map((r) => ({
repo: r.repo as string,
alias: typeof r.alias === "string" ? r.alias : (r.repo as string).split("/").pop()!,
}))
: [];
return {
services: Array.isArray(parsed.services) ? parsed.services as ServiceConfig[] : DEFAULT_CONFIG.services,
profiles: {
default: defaultProfile?.name ? defaultProfile : DEFAULT_CONFIG.profiles.default,
...(sandboxProfile?.name && sandboxProfile?.image ? { sandbox: sandboxProfile } : {}),
},
autoName,
linkedRepos,
};
} catch {
return DEFAULT_CONFIG;
}
}
/** Expand ${VAR} placeholders in a template string using an env map. */
export function expandTemplate(template: string, env: Record<string, string>): string {
return template.replace(/\$\{(\w+)\}/g, (_, key: string) => env[key] ?? "");
}
/**
* Docker container lifecycle for sandbox worktrees.
*
* Replaces workmux's `-S` sandbox flag with direct `docker run -p` management.
* Containers run as root with published ports (no socat needed).
*/
import { access, constants, stat } from "node:fs/promises";
import { type SandboxProfileConfig, type ServiceConfig } from "./config";
import { log } from "./lib/log";
import { loadRpcSecret } from "./rpc-secret";
const DOCKER_RUN_TIMEOUT_MS = 60_000;
/** Check if a path (file or directory) exists on the host. */
async function pathExists(p: string): Promise<boolean> {
try { await stat(p); return true; } catch { return false; }
}
/**
* Sanitise a branch name into a Docker-safe segment.
* Docker container names must match [a-zA-Z0-9][a-zA-Z0-9_.\-]*.
* The "wm-" prefix (3) and "-<13-digit-ts>" suffix (14) consume 17 chars,
* leaving 46 for the branch segment (total ≤ 63).
*/
function sanitiseBranchForName(branch: string): string {
const s = branch
.replace(/[^a-zA-Z0-9_.-]/g, "-")
.replace(/-{2,}/g, "-")
.replace(/^[^a-zA-Z0-9]+/, "")
.replace(/-+$/, "")
.slice(0, 46);
return s || "x";
}
/** Container naming: wm-{sanitised-branch}-{timestamp} */
function containerName(branch: string): string {
return `wm-${sanitiseBranchForName(branch)}-${Date.now()}`;
}
/** Return true if s is a valid port number string (integer 1–65535). */
function isValidPort(s: string): boolean {
const n = Number(s);
return Number.isInteger(n) && n >= 1 && n <= 65535;
}
/** Return true if s is a valid environment variable key. */
function isValidEnvKey(s: string): boolean {
return /^[A-Za-z_][A-Za-z0-9_]*$/.test(s);
}
export interface LaunchContainerOpts {
branch: string;
wtDir: string;
mainRepoDir: string;
sandboxConfig: SandboxProfileConfig;
services: ServiceConfig[];
env: Record<string, string>;
}
function buildWorkmuxStub(): string {
return `#!/usr/bin/env python3
import sys, json, os, urllib.request
cmd = sys.argv[1] if len(sys.argv) > 1 else ""
args = sys.argv[2:]
host = os.environ.get("WORKMUX_RPC_HOST", "host.docker.internal")
port = os.environ.get("WORKMUX_RPC_PORT", "5111")
token = os.environ.get("WORKMUX_RPC_TOKEN", "")
branch = os.environ.get("WORKMUX_BRANCH", "")
payload = {"command": cmd, "args": args, "branch": branch}
data = json.dumps(payload).encode()
req = urllib.request.Request(
f"http://{host}:{port}/rpc/workmux",
data=data,
headers={"Content-Type": "application/json", "Authorization": f"Bearer {token}"}
)
try:
with urllib.request.urlopen(req, timeout=30) as resp:
result = json.loads(resp.read())
if result.get("ok"):
print(result.get("output", ""))
else:
print(result.get("error", "RPC failed"), file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"workmux rpc error: {e}", file=sys.stderr)
sys.exit(1)
`;
}
/**
* Build the `docker run` argument list from the given options.
*
* This is a pure function — all I/O (path existence checks, env reads) must
* be resolved by the caller and passed in as parameters.
*
* @param opts - Launch options (branch, dirs, config, env).
* @param existingPaths - Set of host paths confirmed to exist; used to decide
* which credential mounts to include.
* @param home - Resolved home directory (e.g. Bun.env.HOME ?? "/root").
* @param name - Pre-generated container name.
*/
export function buildDockerRunArgs(
opts: LaunchContainerOpts,
existingPaths: Set<string>,
home: string,
name: string,
rpcSecret: string,
rpcPort: string,
sshAuthSock: string | undefined,
hostUid: number,
hostGid: number,
): string[] {
const { wtDir, mainRepoDir, sandboxConfig, services, env } = opts;
const args: string[] = [
"docker", "run", "-d",
"--name", name,
"-w", wtDir,
"--add-host", "host.docker.internal:host-gateway",
// Run as the host user so files created in mounted dirs (.git, worktree)
// are owned by the right UID/GID instead of root.
"--user", `${hostUid}:${hostGid}`,
];
// Publish service ports bound to loopback only to avoid exposing dev services
// on external interfaces. Skip invalid or duplicate port values.
const seenPorts = new Set<string>();
for (const svc of services) {
const port = env[svc.portEnv];
if (!port) continue;
if (!isValidPort(port)) {
log.warn(`[docker] skipping invalid port for ${svc.portEnv}: ${JSON.stringify(port)}`);
continue;
}
if (seenPorts.has(port)) continue;
seenPorts.add(port);
args.push("-p", `127.0.0.1:${port}:${port}`);
}
// Core env vars — defined first so passthrough cannot override them.
const reservedKeys = new Set([
"HOME", "TERM", "IS_SANDBOX", "SSH_AUTH_SOCK",
"GIT_CONFIG_COUNT", "GIT_CONFIG_KEY_0", "GIT_CONFIG_VALUE_0",
"GIT_CONFIG_KEY_1", "GIT_CONFIG_VALUE_1",
]);
args.push("-e", "HOME=/root");
args.push("-e", "TERM=xterm-256color");
args.push("-e", "IS_SANDBOX=1");
// Git safe.directory config so git works in mounted worktrees.
args.push("-e", "GIT_CONFIG_COUNT=2");
args.push("-e", `GIT_CONFIG_KEY_0=safe.directory`);
args.push("-e", `GIT_CONFIG_VALUE_0=${wtDir}`);
args.push("-e", `GIT_CONFIG_KEY_1=safe.directory`);
args.push("-e", `GIT_CONFIG_VALUE_1=${mainRepoDir}`);
// Pass through host env vars listed in sandboxConfig.
if (sandboxConfig.envPassthrough) {
for (const key of sandboxConfig.envPassthrough) {
if (!isValidEnvKey(key)) {
log.warn(`[docker] skipping invalid envPassthrough key: ${JSON.stringify(key)}`);
continue;
}
if (reservedKeys.has(key)) continue;
const val = Bun.env[key];
if (val !== undefined) {
args.push("-e", `${key}=${val}`);
}
}
}
// Pass through .env.local vars; skip reserved keys and invalid key names.
for (const [key, val] of Object.entries(env)) {
if (!isValidEnvKey(key)) {
log.warn(`[docker] skipping invalid .env.local key: ${JSON.stringify(key)}`);
continue;
}
if (reservedKeys.has(key)) continue;
args.push("-e", `${key}=${val}`);
}
// Core mounts.
args.push("-v", `${wtDir}:${wtDir}`);
args.push("-v", `${mainRepoDir}/.git:${mainRepoDir}/.git`);
args.push("-v", `${mainRepoDir}:${mainRepoDir}:ro`);
// Claude config mounts.
args.push("-v", `${home}/.claude:/root/.claude`);
args.push("-v", `${home}/.claude.json:/root/.claude.json`);
// Compute which guest paths are already covered by extraMounts so credential
// mounts for the same path can be skipped (extraMounts win).
const extraMountGuestPaths = new Set<string>();
if (sandboxConfig.extraMounts) {
for (const mount of sandboxConfig.extraMounts) {
const hostPath = mount.hostPath.replace(/^~/, home);
if (!hostPath.startsWith("/")) continue;
extraMountGuestPaths.add(mount.guestPath ?? hostPath);
}
}
// Git/GitHub credential mounts (read-only, only if they exist on host and
// are not overridden by an extraMount for the same guest path).
const credentialMounts = [
{ hostPath: `${home}/.gitconfig`, guestPath: "/root/.gitconfig" },
{ hostPath: `${home}/.ssh`, guestPath: "/root/.ssh" },
{ hostPath: `${home}/.config/gh`, guestPath: "/root/.config/gh" },
];
for (const { hostPath, guestPath } of credentialMounts) {
if (extraMountGuestPaths.has(guestPath)) continue;
if (existingPaths.has(hostPath)) {
args.push("-v", `${hostPath}:${guestPath}:ro`);
}
}
// SSH agent forwarding — mount the socket so git+ssh works with
// passphrase-protected keys and hardware tokens. Use --mount instead
// of -v because Docker's -v tries to mkdir socket paths and fails.
if (sshAuthSock && existingPaths.has(sshAuthSock)) {
args.push("--mount", `type=bind,source=${sshAuthSock},target=${sshAuthSock}`);
args.push("-e", `SSH_AUTH_SOCK=${sshAuthSock}`);
}
// Extra mounts from config; require absolute host paths after ~ expansion.
if (sandboxConfig.extraMounts) {
for (const mount of sandboxConfig.extraMounts) {
const hostPath = mount.hostPath.replace(/^~/, home);
if (!hostPath.startsWith("/")) {
log.warn(`[docker] skipping extra mount with non-absolute host path: ${JSON.stringify(hostPath)}`);
continue;
}
const guestPath = mount.guestPath ?? hostPath;
const suffix = mount.writable ? "" : ":ro";
args.push("-v", `${hostPath}:${guestPath}${suffix}`);
}
}
// RPC env vars so workmux stub inside the container can reach the host.
args.push("-e", `WORKMUX_RPC_HOST=host.docker.internal`);
args.push("-e", `WORKMUX_RPC_PORT=${rpcPort}`);
args.push("-e", `WORKMUX_RPC_TOKEN=${rpcSecret}`);
args.push("-e", `WORKMUX_BRANCH=${opts.branch}`);
// Image + command.
args.push(sandboxConfig.image, "sleep", "infinity");
return args;
}
/**
* Launch a sandbox container for a worktree. Returns the container name.
* If a container for this branch is already running, returns its name without launching a second one.
*/
export async function launchContainer(opts: LaunchContainerOpts): Promise<string> {
const { branch } = opts;
// Idempotency: reuse an already-running container for this branch.
const existing = await findContainer(branch);
if (existing) {
log.info(`[docker] reusing existing container ${existing} for branch ${branch}`);
return existing;
}
if (!opts.sandboxConfig.image) {
throw new Error("sandboxConfig.image is required but was empty");
}
const name = containerName(branch);
const home = Bun.env.HOME ?? "/root";
const rpcSecret = await loadRpcSecret();
const rpcPort = Bun.env.DASHBOARD_PORT ?? "5111";
// Resolve which credential paths exist on the host before building args.
// Only forward SSH_AUTH_SOCK if the socket is world-accessible so the
// Docker daemon (separate process) can bind-mount it.
let sshAuthSock = Bun.env.SSH_AUTH_SOCK;
if (sshAuthSock) {
try {
const st = await stat(sshAuthSock);
// eslint-disable-next-line no-bitwise
if (!st.isSocket() || (st.mode & 0o007) === 0) {
log.debug(`[docker] skipping SSH_AUTH_SOCK (not world-accessible): ${sshAuthSock}`);
sshAuthSock = undefined;
}
} catch {
sshAuthSock = undefined;
}
}
const credentialHostPaths = [
`${home}/.gitconfig`,
`${home}/.ssh`,
`${home}/.config/gh`,
...(sshAuthSock ? [sshAuthSock] : []),
];
const existingPaths = new Set<string>();
await Promise.all(credentialHostPaths.map(async (p) => {
if (await pathExists(p)) existingPaths.add(p);
}));
const args = buildDockerRunArgs(opts, existingPaths, home, name, rpcSecret, rpcPort, sshAuthSock, process.getuid!(), process.getgid!());
log.info(`[docker] launching container: ${name}`);
const proc = Bun.spawn(args, { stdout: "pipe", stderr: "pipe" });
// Race process exit against a hard timeout so a hung daemon or slow image
// pull does not block the server indefinitely.
const timeout = Bun.sleep(DOCKER_RUN_TIMEOUT_MS).then(() => {
proc.kill();
return "timeout" as const;
});
const [exitResult, stderr, containerId] = await Promise.all([
Promise.race([proc.exited, timeout]),
new Response(proc.stderr).text(),
new Response(proc.stdout).text(),
]);
if (exitResult === "timeout") {
await Bun.spawn(["docker", "rm", "-f", name], { stdout: "ignore", stderr: "ignore" }).exited;
throw new Error(`docker run timed out after ${DOCKER_RUN_TIMEOUT_MS / 1000}s`);
}
if (exitResult !== 0) {
// Clean up any stopped container docker may have left behind.
await Bun.spawn(["docker", "rm", "-f", name], { stdout: "ignore", stderr: "ignore" }).exited;
throw new Error(`docker run failed (exit ${exitResult}): ${stderr}`);
}
log.info(`[docker] container ${name} ready (id=${containerId.trim().slice(0, 12)})`);
// Inject workmux stub so agents inside the container can call host-side workmux.
const stub = buildWorkmuxStub();
const injectProc = Bun.spawn(
["docker", "exec", "-u", "root", "-i", name, "sh", "-c",
"cat > /usr/local/bin/workmux && chmod +x /usr/local/bin/workmux"],
{ stdin: "pipe", stdout: "pipe", stderr: "pipe" },
);
const { stdin } = injectProc;
if (stdin) {
stdin.write(stub);
stdin.end();
}
const injectExit = await injectProc.exited;
if (injectExit !== 0) {
const injectStderr = await new Response(injectProc.stderr).text();
log.warn(`[docker] workmux stub injection failed for ${name}: ${injectStderr}`);
} else {
log.debug(`[docker] workmux stub injected into ${name}`);
}
return name;
}
/**
* Find the most-recently-started running container for a branch.
* Returns the container name, or null if none is running.
* Throws if the Docker daemon cannot be reached.
*/
export async function findContainer(branch: string): Promise<string | null> {
const sanitised = sanitiseBranchForName(branch);
const prefix = `wm-${sanitised}-`;
const proc = Bun.spawn(
["docker", "ps", "--filter", `name=${prefix}`, "--format", "{{.Names}}"],
{ stdout: "pipe", stderr: "pipe" },
);
const [exitCode, stdout, stderr] = await Promise.all([
proc.exited,
new Response(proc.stdout).text(),
new Response(proc.stderr).text(),
]);
if (exitCode !== 0) {
throw new Error(`docker ps failed (exit ${exitCode}): ${stderr}`);
}
// Filter to exact prefix matches: the part after the prefix must be only
// the numeric timestamp. This prevents "main" from matching "main-v2" containers.
const names = stdout
.trim()
.split("\n")
.filter(Boolean)
.filter(n => n.startsWith(prefix) && /^\d+$/.test(n.slice(prefix.length)));
// docker ps lists containers newest-first; return the first match.
return names.at(0) ?? null;
}
/**
* Remove all containers (running or stopped) for a branch.
* Individual removal errors are logged but do not abort remaining removals.
*/
export async function removeContainer(branch: string): Promise<void> {
const sanitised = sanitiseBranchForName(branch);
const prefix = `wm-${sanitised}-`;
const listProc = Bun.spawn(
["docker", "ps", "-a", "--filter", `name=${prefix}`, "--format", "{{.Names}}"],
{ stdout: "pipe", stderr: "pipe" },
);
const [listExit, listOut, listErr] = await Promise.all([
listProc.exited,
new Response(listProc.stdout).text(),
new Response(listProc.stderr).text(),
]);
if (listExit !== 0) {
log.error(`[docker] removeContainer: docker ps failed for ${branch}: ${listErr}`);
return;
}
const names = listOut
.trim()
.split("\n")
.filter(Boolean)
.filter(n => n.startsWith(prefix) && /^\d+$/.test(n.slice(prefix.length)));
await Promise.all(
names.map(async (cname) => {
log.info(`[docker] removing container: ${cname}`);
const rmProc = Bun.spawn(["docker", "rm", "-f", cname], { stdout: "ignore", stderr: "pipe" });
const [rmExit, rmErr] = await Promise.all([
rmProc.exited,
new Response(rmProc.stderr).text(),
]);
if (rmExit !== 0) {
log.error(`[docker] failed to remove container ${cname}: ${rmErr}`);
}
}),
);
}
import type { ServiceConfig } from "./config";
/** Read key=value pairs from a worktree's .env.local file. */
export async function readEnvLocal(wtDir: string): Promise<Record<string, string>> {
try {
const text = (await Bun.file(`${wtDir}/.env.local`).text()).trim();
const env: Record<string, string> = {};
for (const line of text.split("\n")) {
const match = line.match(/^(\w+)=(.*)$/);
if (match) env[match[1]] = match[2];
}
return env;
} catch {
return {};
}
}
/** Batch-write multiple key=value pairs to a worktree's .env.local (upsert each key). */
export async function writeEnvLocal(wtDir: string, entries: Record<string, string>): Promise<void> {
const filePath = `${wtDir}/.env.local`;
let lines: string[] = [];
try {
const content = (await Bun.file(filePath).text()).trim();
if (content) lines = content.split("\n");
} catch {
// File doesn't exist yet
}
for (const [key, value] of Object.entries(entries)) {
const pattern = new RegExp(`^${key}=`);
const idx = lines.findIndex((l) => pattern.test(l));
if (idx >= 0) {
lines[idx] = `${key}=${value}`;
} else {
lines.push(`${key}=${value}`);
}
}
await Bun.write(filePath, lines.join("\n") + "\n");
}
/** Read .env.local from all worktree paths, optionally excluding one directory. */
export async function readAllWorktreeEnvs(
worktreePaths: string[],
excludeDir?: string,
): Promise<Record<string, string>[]> {
const results: Record<string, string>[] = [];
for (const p of worktreePaths) {
if (excludeDir && p === excludeDir) continue;
results.push(await readEnvLocal(p));
}
return results;
}
/**
* Pure: compute port assignments for a new worktree.
* Uses the first allocatable service as a reference to reverse-compute
* occupied slot indices. Index 0 is reserved for main. Returns a map
* of portEnv → port value for all services that have portStart set.
*/
export function allocatePorts(
existingEnvs: Record<string, string>[],
services: ServiceConfig[],
): Record<string, string> {
const allocatable = services.filter((s) => s.portStart != null);
if (allocatable.length === 0) return {};
// Use the first allocatable service to discover occupied slot indices
const ref = allocatable[0];
const refStart = ref.portStart!;
const refStep = ref.portStep ?? 1;
const occupied = new Set<number>();
for (const env of existingEnvs) {
const raw = env[ref.portEnv];
if (raw == null) continue;
const port = Number(raw);
if (!Number.isInteger(port) || port < refStart) continue;
const diff = port - refStart;
if (diff % refStep !== 0) continue;
occupied.add(diff / refStep);
}
// Find the first free slot starting from 1 (0 is reserved for main)
let slot = 1;
while (occupied.has(slot)) slot++;
const result: Record<string, string> = {};
for (const svc of allocatable) {
const start = svc.portStart!;
const step = svc.portStep ?? 1;
result[svc.portEnv] = String(start + slot * step);
}
return result;
}
export function jsonResponse(data: unknown, status = 200): Response {
return new Response(JSON.stringify(data), {
status,
headers: { "Content-Type": "application/json" },
});
}
export function errorResponse(message: string, status = 500): Response {
return jsonResponse({ error: message }, status);
}
const DEBUG = Bun.env.WMDEV_DEBUG === "1";
function ts(): string {
return new Date().toISOString().slice(11, 23);
}
export const log = {
info(msg: string): void { console.log(`[${ts()}] ${msg}`); },
debug(msg: string): void { if (DEBUG) console.log(`[${ts()}] ${msg}`); },
warn(msg: string): void { console.warn(`[${ts()}] ${msg}`); },
error(msg: string, err?: unknown): void {
err !== undefined ? console.error(`[${ts()}] ${msg}`, err) : console.error(`[${ts()}] ${msg}`);
},
};
import { readEnvLocal, writeEnvLocal } from "./env";
import type { LinkedRepoConfig } from "./config";
import { log } from "./lib/log";
const PR_FETCH_LIMIT = 50;
const GH_TIMEOUT_MS = 15_000;
// ── Internal GH API shapes ────────────────────────────────────────────────────
type GhCheckStatus =
| "QUEUED"
| "IN_PROGRESS"
| "COMPLETED"
| "WAITING"
| "REQUESTED"
| "PENDING";
type GhCheckConclusion =
| "SUCCESS"
| "FAILURE"
| "NEUTRAL"
| "CANCELLED"
| "SKIPPED"
| "TIMED_OUT"
| "ACTION_REQUIRED";
export interface PrComment {
author: string;
body: string;
createdAt: string;
}
interface GhComment {
author: { login: string };
body: string;
createdAt: string;
}
interface GhCheckEntry {
conclusion: GhCheckConclusion | null;
status: GhCheckStatus;
name: string;
detailsUrl: string;
}
interface GhPrEntry {
number: number;
headRefName: string;
state: string;
statusCheckRollup: GhCheckEntry[] | null;
url: string;
comments: GhComment[];
}
// ── Public types ──────────────────────────────────────────────────────────────
export interface CiCheck {
name: string;
status: "pending" | "success" | "failed" | "skipped";
url: string;
runId: number | null;
}
export interface PrEntry {
repo: string;
number: number;
state: "open" | "closed" | "merged";
url: string;
ciStatus: "none" | "pending" | "success" | "failed";
ciChecks: CiCheck[];
comments: PrComment[];
}
type FetchPrsResult =
| { ok: true; data: Map<string, PrEntry> }
| { ok: false; error: string };
// ── Pure helper functions (exported for unit testing) ─────────────────────────
/** Summarize CI check status from a statusCheckRollup array. */
export function summarizeChecks(
checks: GhCheckEntry[] | null,
): PrEntry["ciStatus"] {
if (!checks || checks.length === 0) return "none";
const allDone = checks.every((c) => c.status === "COMPLETED");
if (!allDone) return "pending";
const allPass = checks.every(
(c) =>
c.conclusion === "SUCCESS" ||
c.conclusion === "NEUTRAL" ||
c.conclusion === "SKIPPED",
);
return allPass ? "success" : "failed";
}
/** Parse a GitHub Actions run ID from a details URL. Returns null when not found. */
export function parseRunId(detailsUrl: string): number | null {
const match = detailsUrl.match(/\/actions\/runs\/(\d+)/);
return match ? parseInt(match[1], 10) : null;
}
/** Derive a typed check status from GH conclusion/status fields. */
export function deriveCheckStatus(check: GhCheckEntry): CiCheck["status"] {
if (check.status !== "COMPLETED") return "pending";
const c = check.conclusion;
if (c === "SUCCESS" || c === "NEUTRAL") return "success";
if (c === "SKIPPED") return "skipped";
return "failed";
}
/** Map raw GH check entries to typed CiCheck array. */
export function mapChecks(checks: GhCheckEntry[] | null): CiCheck[] {
if (!checks || checks.length === 0) return [];
return checks.map((c) => ({
name: c.name,
status: deriveCheckStatus(c),
url: c.detailsUrl,
runId: parseRunId(c.detailsUrl),
}));
}
/** Parse raw `gh pr list --json` output into a branch → PrEntry map. Throws on invalid JSON. */
export function parsePrResponse(
json: string,
repoLabel?: string,
): Map<string, PrEntry> {
const prs = new Map<string, PrEntry>();
const entries = JSON.parse(json) as GhPrEntry[];
for (const entry of entries) {
// If multiple PRs share the same branch in one repo, the first (most recent) wins.
if (prs.has(entry.headRefName)) continue;
prs.set(entry.headRefName, {
repo: repoLabel ?? "",
number: entry.number,
state: entry.state.toLowerCase() as PrEntry["state"],
url: entry.url,
ciStatus: summarizeChecks(entry.statusCheckRollup),
ciChecks: mapChecks(entry.statusCheckRollup),
comments: (entry.comments ?? []).map((c) => ({
author: c.author?.login ?? "unknown",
body: c.body ?? "",
createdAt: c.createdAt ?? "",
})),
});
}
return prs;
}
// ── I/O functions ─────────────────────────────────────────────────────────────
/**
* Fetch all open PRs from a repo via `gh` CLI.
* Returns a Result: on success, a map of branch name → PrEntry; on failure, an error string.
* Applies a hard timeout so a hung `gh` process never stalls the caller.
*/
export async function fetchAllPrs(
repoSlug?: string,
repoLabel?: string,
cwd?: string,
): Promise<FetchPrsResult> {
const label = repoSlug ?? "current";
const args = [
"gh",
"pr",
"list",
"--state",
"open",
"--json",
"number,headRefName,state,statusCheckRollup,url,comments",
"--limit",
String(PR_FETCH_LIMIT),
];
if (repoSlug) args.push("--repo", repoSlug);
const proc = Bun.spawn(args, {
stdout: "pipe",
stderr: "pipe",
...(cwd ? { cwd } : {}),
});
const timeout = Bun.sleep(GH_TIMEOUT_MS).then(() => {
proc.kill();
return "timeout" as const;
});
const raceResult = await Promise.race([proc.exited, timeout]);
if (raceResult === "timeout") {
return { ok: false, error: `gh pr list timed out for ${label}` };
}
if (raceResult !== 0) {
const stderr = (await new Response(proc.stderr).text()).trim();
return {
ok: false,
error: `gh pr list failed for ${label} (exit ${raceResult}): ${stderr}`,
};
}
try {
const json = await new Response(proc.stdout).text();
return { ok: true, data: parsePrResponse(json, repoLabel) };
} catch (err) {
return { ok: false, error: `failed to parse gh output for ${label}: ${err}` };
}
}
/** Fetch the current state of a PR by its URL. Returns null on error. */
async function fetchPrState(url: string): Promise<PrEntry["state"] | null> {
const proc = Bun.spawn(["gh", "pr", "view", url, "--json", "state"], {
stdout: "pipe",
stderr: "pipe",
});
const timeout = Bun.sleep(GH_TIMEOUT_MS).then(() => {
proc.kill();
return "timeout" as const;
});
const raceResult = await Promise.race([proc.exited, timeout]);
if (raceResult === "timeout" || raceResult !== 0) return null;
try {
const data = JSON.parse(await new Response(proc.stdout).text()) as { state: string };
return data.state.toLowerCase() as PrEntry["state"];
} catch {
return null;
}
}
/** Update PR_DATA for a worktree whose PR is no longer in the open PR list.
* Fetches the actual current state for any entry still marked "open". */
async function refreshStalePrData(wtDir: string): Promise<void> {
const env = await readEnvLocal(wtDir);
if (!env.PR_DATA) return;
let entries: PrEntry[];
try {
entries = JSON.parse(env.PR_DATA) as PrEntry[];
} catch {
return;
}
if (!entries.some((e) => e.state === "open")) return;
const updated = await Promise.all(
entries.map(async (entry) => {
if (entry.state !== "open") return entry;
const state = await fetchPrState(entry.url);
return state ? { ...entry, state } : entry;
}),
);
await writeEnvLocal(wtDir, { PR_DATA: JSON.stringify(updated) });
}
/** Sync PR status to .env.local for all worktrees that have open PRs. */
export async function syncPrStatus(
getWorktreePaths: () => Promise<Map<string, string>>,
linkedRepos: LinkedRepoConfig[],
projectDir?: string,
): Promise<void> {
// Fetch current repo + all linked repos in parallel.
const allRepoResults = await Promise.all([
fetchAllPrs(undefined, undefined, projectDir),
...linkedRepos.map(({ repo, alias }) => fetchAllPrs(repo, alias, projectDir)),
]);
// Log fetch errors; aggregate successes into branch → PrEntry[].
const branchPrs = new Map<string, PrEntry[]>();
for (const result of allRepoResults) {
if (!result.ok) {
log.error(`[pr] ${result.error}`);
continue;
}
for (const [branch, entry] of result.data) {
const existing = branchPrs.get(branch) ?? [];
existing.push(entry);
branchPrs.set(branch, existing);
}
}
const wtPaths = await getWorktreePaths();
const seen = new Set<string>();
for (const [branch, entries] of branchPrs) {
const wtDir = wtPaths.get(branch);
if (!wtDir || seen.has(wtDir)) continue;
seen.add(wtDir);
await writeEnvLocal(wtDir, { PR_DATA: JSON.stringify(entries) });
}
if (seen.size > 0) {
log.debug(
`[pr] synced ${seen.size} worktree(s) with PR data from ${allRepoResults.length} repo(s)`,
);
}
// For worktrees not matched by the open-PR sync, refresh any stale "open"
// entries so merged/closed PRs are reflected in PR_DATA.
const uniqueDirs = new Set(wtPaths.values());
const staleRefreshes: Promise<void>[] = [];
for (const wtDir of uniqueDirs) {
if (seen.has(wtDir)) continue;
staleRefreshes.push(refreshStalePrData(wtDir));
}
await Promise.all(staleRefreshes);
}
/** Start periodic PR status sync. Returns a cleanup function that stops the monitor. */
export function startPrMonitor(
getWorktreePaths: () => Promise<Map<string, string>>,
linkedRepos: LinkedRepoConfig[],
projectDir?: string,
intervalMs: number = 20_000,
): () => void {
const run = (): void => {
syncPrStatus(getWorktreePaths, linkedRepos, projectDir).catch(
(err: unknown) => {
log.error(`[pr] sync error: ${err}`);
},
);
};
// Run once immediately (non-blocking).
run();
const timer = setInterval(run, intervalMs);
return (): void => {
clearInterval(timer);
};
}
import { chmod, mkdir } from "node:fs/promises";
import { dirname } from "node:path";
const SECRET_PATH = `${Bun.env.HOME ?? "/root"}/.config/workmux/rpc-secret`;
let cached: string | null = null;
export async function loadRpcSecret(): Promise<string> {
if (cached) return cached;
const file = Bun.file(SECRET_PATH);
if (await file.exists()) {
cached = (await file.text()).trim();
return cached;
}
const secret = crypto.randomUUID();
await mkdir(dirname(SECRET_PATH), { recursive: true });
await Bun.write(SECRET_PATH, secret);
await chmod(SECRET_PATH, 0o600);
cached = secret;
return secret;
}
import { loadRpcSecret } from "./rpc-secret";
import { jsonResponse } from "./http";
interface RpcRequest {
command: string;
args?: string[];
branch?: string;
}
type RpcResponse = { ok: true; output: string } | { ok: false; error: string }
/** Build env with TMUX set so workmux can resolve agent states outside tmux. */
function tmuxEnv(): Record<string, string | undefined> {
if (Bun.env.TMUX) return Bun.env;
const tmpdir = Bun.env.TMUX_TMPDIR || "/tmp";
const uid = process.getuid?.() ?? 1000;
return { ...Bun.env, TMUX: `${tmpdir}/tmux-${uid}/default,0,0` };
}
/**
* Resolve the tmux pane ID for a worktree window (wm-{branch}).
* Returns the first pane ID, or null if the window doesn't exist.
*/
async function resolvePaneId(branch: string): Promise<string | null> {
const proc = Bun.spawn(
["tmux", "list-panes", "-a", "-F", "#{window_name}\t#{pane_id}"],
{ stdout: "pipe", stderr: "pipe", env: tmuxEnv() },
);
const [stdout, , exitCode] = await Promise.all([
new Response(proc.stdout).text(),
new Response(proc.stderr).text(),
proc.exited,
]);
if (exitCode !== 0) return null;
const target = `wm-${branch}`;
for (const line of stdout.trim().split("\n")) {
const [windowName, paneId] = line.split("\t");
if (windowName === target && paneId) return paneId;
}
return null;
}
export async function handleWorkmuxRpc(req: Request): Promise<Response> {
const secret = await loadRpcSecret();
const authHeader = req.headers.get("Authorization");
const token = authHeader?.startsWith("Bearer ") ? authHeader.slice(7) : null;
if (token !== secret) {
return new Response("Unauthorized", { status: 401 });
}
let raw: RpcRequest;
try {
raw = await req.json() as RpcRequest;
} catch {
return jsonResponse({ ok: false, error: "Invalid JSON" } satisfies RpcResponse, 400);
}
const { command, args = [], branch } = raw;
if (!command) {
return jsonResponse({ ok: false, error: "Missing command" } satisfies RpcResponse, 400);
}
try {
// Build spawn environment. For set-window-status from a container,
// resolve the tmux pane ID so the workmux binary can target the right window.
const env = tmuxEnv();
if (command === "set-window-status" && branch) {
const paneId = await resolvePaneId(branch);
if (paneId) {
env.TMUX_PANE = paneId;
}
}
const proc = Bun.spawn(["workmux", command, ...args], {
stdout: "pipe",
stderr: "pipe",
env,
});
const [stdout, stderr, exitCode] = await Promise.all([
new Response(proc.stdout).text(),
new Response(proc.stderr).text(),
proc.exited,
]);
if (exitCode !== 0) {
return jsonResponse({ ok: false, error: stderr.trim() || `exit code ${exitCode}` } satisfies RpcResponse, 422);
}
return jsonResponse({ ok: true, output: stdout.trim() } satisfies RpcResponse);
} catch (err: unknown) {
const error = err instanceof Error ? err.message : String(err);
return jsonResponse({ ok: false, error } satisfies RpcResponse, 500);
}
}
export type { RpcRequest, RpcResponse };
import { join, resolve } from "node:path";
import { networkInterfaces } from "node:os";
import { log } from "./lib/log";
import {
listWorktrees,
getStatus,
addWorktree,
removeWorktree,
openWorktree,
mergeWorktree,
sendPrompt,
readEnvLocal,
parseWorktreePorcelain,
} from "./workmux";
import {
attach,
detach,
write,
resize,
selectPane,
getScrollback,
setCallbacks,
clearCallbacks,
cleanupStaleSessions,
} from "./terminal";
import { loadConfig, gitRoot, type WmdevConfig } from "./config";
import { startPrMonitor, type PrEntry } from "./pr";
import { handleWorkmuxRpc } from "./rpc";
import { jsonResponse, errorResponse } from "./http";
const PORT = parseInt(Bun.env.DASHBOARD_PORT || "5111", 10);
const STATIC_DIR = Bun.env.WMDEV_STATIC_DIR || "";
const PROJECT_DIR = Bun.env.WMDEV_PROJECT_DIR || gitRoot(process.cwd());
const config: WmdevConfig = loadConfig(PROJECT_DIR);
// --- WebSocket protocol types ---
interface WsData {
worktree: string;
attached: boolean;
}
type WsInboundMessage =
| { type: "input"; data: string }
| { type: "selectPane"; pane: number }
| { type: "resize"; cols: number; rows: number; initialPane?: number };
type WsOutboundMessage =
| { type: "output"; data: string }
| { type: "exit"; exitCode: number }
| { type: "error"; message: string }
| { type: "scrollback"; data: string };
function parseWsMessage(raw: string | Buffer): WsInboundMessage | null {
try {
const str = typeof raw === "string" ? raw : new TextDecoder().decode(raw);
const msg: unknown = JSON.parse(str);
if (!msg || typeof msg !== "object") return null;
const m = msg as Record<string, unknown>;
switch (m.type) {
case "input":
return typeof m.data === "string" ? { type: "input", data: m.data } : null;
case "selectPane":
return typeof m.pane === "number" ? { type: "selectPane", pane: m.pane } : null;
case "resize":
return typeof m.cols === "number" && typeof m.rows === "number"
? {
type: "resize",
cols: m.cols,
rows: m.rows,
initialPane: typeof m.initialPane === "number" ? m.initialPane : undefined,
}
: null;
default:
return null;
}
} catch {
return null;
}
}
// --- HTTP helpers ---
function sendWs(ws: { send: (data: string) => void }, msg: WsOutboundMessage): void {
ws.send(JSON.stringify(msg));
}
function isValidWorktreeName(name: string): boolean {
return name.length > 0 && /^[a-z0-9][a-z0-9\-_./]*$/.test(name) && !name.includes("..");
}
/** Wrap an async API handler to catch and log unhandled errors. */
function catching(label: string, fn: () => Promise<Response>): Promise<Response> {
return fn().catch((err: unknown) => {
const msg = err instanceof Error ? err.message : String(err);
log.error(`[api:error] ${label}: ${msg}`);
return errorResponse(msg);
});
}
function safeJsonParse<T>(str: string): T | null {
try {
return JSON.parse(str) as T;
} catch {
return null;
}
}
// --- Process helpers ---
/** Map branch name → worktree directory using git worktree list.
* Skips the main working tree (always the first entry). */
async function getWorktreePaths(): Promise<Map<string, string>> {
const proc = Bun.spawn(["git", "worktree", "list", "--porcelain"], { stdout: "pipe" });
await proc.exited;
const output = await new Response(proc.stdout).text();
const all = parseWorktreePorcelain(output);
const paths = new Map<string, string>();
let isFirst = true;
for (const [branch, path] of all) {
// Skip the main working tree (first entry in porcelain output)
if (isFirst) { isFirst = false; continue; }
paths.set(branch, path);
// Also map by directory basename (workmux uses basename as branch key)
const basename = path.split("/").pop() ?? "";
if (basename !== branch) paths.set(basename, path);
}
return paths;
}
/** Count tmux panes for a worktree window. */
async function getTmuxPaneCount(branch: string): Promise<number> {
const proc = Bun.spawn(
["tmux", "list-panes", "-t", `wm-${branch}`, "-F", "#{pane_index}"],
{ stdout: "pipe", stderr: "pipe" }
);
const exitCode = await proc.exited;
if (exitCode !== 0) return 0;
const out = await new Response(proc.stdout).text();
return out.trim().split("\n").filter(Boolean).length;
}
/** Check if a port has a service responding (not just a TCP handshake). */
function isPortListening(port: number): Promise<boolean> {
return new Promise((resolve) => {
const timeout = setTimeout(() => { resolve(false); }, 1000);
fetch(`http://127.0.0.1:${port}/`, { signal: AbortSignal.timeout(1000) })
.then(() => { clearTimeout(timeout); resolve(true); })
.catch(() => { clearTimeout(timeout); resolve(false); });
});
}
function makeCallbacks(ws: { send: (data: string) => void; readyState: number }): {
onData: (data: string) => void;
onExit: (exitCode: number) => void;
} {
return {
onData: (data: string) => {
if (ws.readyState <= 1) sendWs(ws, { type: "output", data });
},
onExit: (exitCode: number) => {
if (ws.readyState <= 1) sendWs(ws, { type: "exit", exitCode });
},
};
}
// --- API handler functions (thin I/O layer, testable by injecting deps) ---
async function apiGetWorktrees(): Promise<Response> {
const [worktrees, status, wtPaths] = await Promise.all([
listWorktrees(),
getStatus(),
getWorktreePaths(),
]);
const merged = await Promise.all(worktrees.map(async (wt) => {
const st = status.find(s =>
s.worktree.includes(wt.branch) || s.worktree.startsWith(wt.branch)
);
const wtDir = wtPaths.get(wt.branch);
const env = wtDir ? await readEnvLocal(wtDir) : {};
const services = await Promise.all(
config.services.map(async (svc) => {
const port = env[svc.portEnv] ? parseInt(env[svc.portEnv], 10) : null;
const running = port !== null && port >= 1 && port <= 65535
? await isPortListening(port)
: false;
return { name: svc.name, port, running };
})
);
return {
...wt,
dir: wtDir ?? null,
status: st?.status ?? "",
elapsed: st?.elapsed ?? "",
title: st?.title ?? "",
profile: env.PROFILE || null,
agentName: env.AGENT || null,
services,
paneCount: wt.mux === "✓" ? await getTmuxPaneCount(wt.branch) : 0,
prs: env.PR_DATA ? (safeJsonParse<PrEntry[]>(env.PR_DATA) ?? []).map(pr => ({ ...pr, comments: pr.comments ?? [] })) : [],
};
}));
return jsonResponse(merged);
}
async function apiCreateWorktree(req: Request): Promise<Response> {
const raw: unknown = await req.json();
if (!raw || typeof raw !== "object" || Array.isArray(raw)) {
return errorResponse("Invalid request body", 400);
}
const body = raw as Record<string, unknown>;
const branch = typeof body.branch === "string" ? body.branch : undefined;
const prompt = typeof body.prompt === "string" ? body.prompt : undefined;
const profileName = typeof body.profile === "string" ? body.profile : config.profiles.default.name;
const agent = typeof body.agent === "string" ? body.agent : "claude";
const isSandbox = config.profiles.sandbox !== undefined && profileName === config.profiles.sandbox.name;
const profileConfig = isSandbox ? config.profiles.sandbox! : config.profiles.default;
log.info(`[worktree:add] agent=${agent} profile=${profileName}${branch ? ` branch=${branch}` : ""}${prompt ? ` prompt="${prompt.slice(0, 80)}"` : ""}`);
const result = await addWorktree(branch, {
prompt,
profile: profileName,
agent,
autoName: config.autoName,
profileConfig,
isSandbox,
sandboxConfig: isSandbox ? config.profiles.sandbox : undefined,
services: config.services,
mainRepoDir: PROJECT_DIR,
});
if (!result.ok) return errorResponse(result.error, 422);
log.debug(`[worktree:add] done branch=${result.branch}: ${result.output}`);
return jsonResponse({ branch: result.branch }, 201);
}
async function apiDeleteWorktree(name: string): Promise<Response> {
log.info(`[worktree:rm] name=${name}`);
const result = await removeWorktree(name);
if (!result.ok) return errorResponse(result.error, 422);
log.debug(`[worktree:rm] done name=${name}: ${result.output}`);
return jsonResponse({ message: result.output });
}
async function apiOpenWorktree(name: string): Promise<Response> {
log.info(`[worktree:open] name=${name}`);
const result = await openWorktree(name);
if (!result.ok) return errorResponse(result.error, 422);
return jsonResponse({ message: result.output });
}
async function apiSendPrompt(name: string, req: Request): Promise<Response> {
const raw: unknown = await req.json();
if (!raw || typeof raw !== "object" || Array.isArray(raw)) {
return errorResponse("Invalid request body", 400);
}
const body = raw as Record<string, unknown>;
const text = typeof body.text === "string" ? body.text : "";
if (!text) return errorResponse("Missing 'text' field", 400);
const preamble = typeof body.preamble === "string" ? body.preamble : undefined;
log.info(`[worktree:send] name=${name} text="${text.slice(0, 80)}"`);
const result = await sendPrompt(name, text, 0, preamble);
if (!result.ok) return errorResponse(result.error, 503);
return jsonResponse({ ok: true });
}
async function apiMergeWorktree(name: string): Promise<Response> {
log.info(`[worktree:merge] name=${name}`);
const result = await mergeWorktree(name);
if (!result.ok) return errorResponse(result.error, 422);
log.debug(`[worktree:merge] done name=${name}: ${result.output}`);
return jsonResponse({ message: result.output });
}
async function apiWorktreeStatus(name: string): Promise<Response> {
const statuses = await getStatus();
const match = statuses.find(s => s.worktree.includes(name));
if (!match) return errorResponse("Worktree status not found", 404);
return jsonResponse(match);
}
async function apiCiLogs(runId: string): Promise<Response> {
if (!/^\d+$/.test(runId)) return errorResponse("Invalid run ID", 400);
const proc = Bun.spawn(["gh", "run", "view", runId, "--log-failed"], {
stdout: "pipe",
stderr: "pipe",
});
const exitCode = await proc.exited;
if (exitCode === 0) {
const logs = await new Response(proc.stdout).text();
return jsonResponse({ logs });
}
const stderr = (await new Response(proc.stderr).text()).trim();
return errorResponse(`Failed to fetch logs: ${stderr || "unknown error"}`, 502);
}
// --- Server ---
Bun.serve({
port: PORT,
idleTimeout: 255, // seconds; worktree removal can take >10s
routes: {
"/ws/:worktree": (req, server) => {
const worktree = decodeURIComponent(req.params.worktree);
return server.upgrade(req, { data: { worktree, attached: false } })
? undefined
: new Response("WebSocket upgrade failed", { status: 400 });
},
"/rpc/workmux": {
POST: (req) => handleWorkmuxRpc(req),
},
"/api/config": {
GET: () => jsonResponse(config),
},
"/api/worktrees": {
GET: () => catching("GET /api/worktrees", apiGetWorktrees),
POST: (req) => catching("POST /api/worktrees", () => apiCreateWorktree(req)),
},
"/api/worktrees/:name": {
DELETE: (req) => {
const name = decodeURIComponent(req.params.name);
if (!isValidWorktreeName(name)) return errorResponse("Invalid worktree name", 400);
return catching(`DELETE /api/worktrees/${name}`, () => apiDeleteWorktree(name));
},
},
"/api/worktrees/:name/open": {
POST: (req) => {
const name = decodeURIComponent(req.params.name);
if (!isValidWorktreeName(name)) return errorResponse("Invalid worktree name", 400);
return catching(`POST /api/worktrees/${name}/open`, () => apiOpenWorktree(name));
},
},
"/api/worktrees/:name/send": {
POST: (req) => {
const name = decodeURIComponent(req.params.name);
if (!isValidWorktreeName(name)) return errorResponse("Invalid worktree name", 400);
return catching(`POST /api/worktrees/${name}/send`, () => apiSendPrompt(name, req));
},
},
"/api/worktrees/:name/merge": {
POST: (req) => {
const name = decodeURIComponent(req.params.name);
if (!isValidWorktreeName(name)) return errorResponse("Invalid worktree name", 400);
return catching(`POST /api/worktrees/${name}/merge`, () => apiMergeWorktree(name));
},
},
"/api/worktrees/:name/status": {
GET: (req) => {
const name = decodeURIComponent(req.params.name);
if (!isValidWorktreeName(name)) return errorResponse("Invalid worktree name", 400);
return catching(`GET /api/worktrees/${name}/status`, () => apiWorktreeStatus(name));
},
},
"/api/ci-logs/:runId": {
GET: (req) => catching(`GET /api/ci-logs/${req.params.runId}`, () => apiCiLogs(req.params.runId)),
},
},
async fetch(req) {
// Static frontend files in production mode (fallback for unmatched routes)
if (STATIC_DIR) {
const url = new URL(req.url);
const rawPath = url.pathname === "/" ? "index.html" : url.pathname;
const filePath = join(STATIC_DIR, rawPath);
const staticRoot = resolve(STATIC_DIR);
// Path traversal protection: resolved path must stay within STATIC_DIR
if (!resolve(filePath).startsWith(staticRoot + "/")) {
return new Response("Forbidden", { status: 403 });
}
const file = Bun.file(filePath);
if (await file.exists()) {
return new Response(file);
}
// SPA fallback: serve index.html for unmatched routes
return new Response(Bun.file(join(STATIC_DIR, "index.html")));
}
return new Response("Not Found", { status: 404 });
},
websocket: {
// Type ws.data via the data property (Bun.serve<T> generic is deprecated)
data: {} as WsData,
open(ws) {
log.debug(`[ws] open worktree=${ws.data.worktree}`);
},
async message(ws, message) {
const msg = parseWsMessage(message);
if (!msg) {
sendWs(ws, { type: "error", message: "malformed message" });
return;
}
const { worktree } = ws.data;
switch (msg.type) {
case "input":
write(worktree, msg.data);
break;
case "selectPane":
if (ws.data.attached) {
log.debug(`[ws] selectPane pane=${msg.pane} worktree=${worktree}`);
await selectPane(worktree, msg.pane);
}
break;
case "resize":
if (!ws.data.attached) {
// First resize = client reporting actual dimensions. Attach now.
ws.data.attached = true;
log.debug(`[ws] first resize (attaching) worktree=${worktree} cols=${msg.cols} rows=${msg.rows}`);
try {
if (msg.initialPane !== undefined) {
log.debug(`[ws] initialPane=${msg.initialPane} worktree=${worktree}`);
}
await attach(worktree, msg.cols, msg.rows, msg.initialPane);
const { onData, onExit } = makeCallbacks(ws);
setCallbacks(worktree, onData, onExit);
const scrollback = getScrollback(worktree);
log.debug(`[ws] attached worktree=${worktree} scrollback=${scrollback.length} bytes`);
if (scrollback.length > 0) {
sendWs(ws, { type: "scrollback", data: scrollback });
}
} catch (err: unknown) {
const errMsg = err instanceof Error ? err.message : String(err);
log.error(`[ws] attach failed worktree=${worktree}: ${errMsg}`);
sendWs(ws, { type: "error", message: errMsg });
ws.close(1011, errMsg.slice(0, 123)); // 1011 = Internal Error
}
} else {
await resize(worktree, msg.cols, msg.rows);
}
break;
}
},
async close(ws) {
log.debug(`[ws] close worktree=${ws.data.worktree} attached=${ws.data.attached}`);
clearCallbacks(ws.data.worktree);
await detach(ws.data.worktree);
},
},
});
// Ensure tmux server is running (needs at least one session to persist)
const tmuxCheck = Bun.spawnSync(["tmux", "list-sessions"], { stdout: "pipe", stderr: "pipe" });
if (tmuxCheck.exitCode !== 0) {
Bun.spawnSync(["tmux", "new-session", "-d", "-s", "0"]);
log.info("Started tmux session");
}
cleanupStaleSessions();
startPrMonitor(getWorktreePaths, config.linkedRepos, PROJECT_DIR);
log.info(`Dev Dashboard API running at http://localhost:${PORT}`);
const nets = networkInterfaces();
for (const addrs of Object.values(nets)) {
for (const a of addrs ?? []) {
if (a.family === "IPv4" && !a.internal) {
log.info(` Network: http://${a.address}:${PORT}`);
}
}
}
import { log } from "./lib/log";
interface TerminalSession {
proc: Bun.Subprocess<"pipe", "pipe", "pipe">;
groupedSessionName: string;
scrollback: string[];
scrollbackBytes: number;
onData: ((data: string) => void) | null;
onExit: ((exitCode: number) => void) | null;
cancelled: boolean;
}
interface AttachCmdOptions {
gName: string;
worktreeName: string;
tmuxSession: string;
cols: number;
rows: number;
initialPane?: number;
}
// Scope session names per backend instance using the dashboard port so multiple
// dashboards sharing the same tmux server don't collide or kill each other's sessions.
const DASH_PORT = Bun.env.DASHBOARD_PORT || "5111";
const SESSION_PREFIX = `wm-dash-${DASH_PORT}-`;
const MAX_SCROLLBACK_BYTES = 1 * 1024 * 1024; // 1 MB
const sessions = new Map<string, TerminalSession>();
let sessionCounter = 0;
function groupedName(): string {
return `${SESSION_PREFIX}${++sessionCounter}`;
}
function buildAttachCmd(opts: AttachCmdOptions): string {
const windowTarget = `wm-${opts.worktreeName}`;
const paneTarget = `${opts.gName}:${windowTarget}.${opts.initialPane ?? 0}`;
return [
`tmux new-session -d -s "${opts.gName}" -t "${opts.tmuxSession}"`,
`tmux set-option -t "${opts.tmuxSession}" window-size latest`,
`tmux set-option -t "${opts.gName}" mouse on`,
`tmux set-option -t "${opts.gName}" set-clipboard on`,
`tmux select-window -t "${opts.gName}:${windowTarget}"`,
// Unzoom if a previous session left a pane zoomed (zoom state is shared across grouped sessions)
`if [ "$(tmux display-message -t '${opts.gName}:${windowTarget}' -p '#{window_zoomed_flag}')" = "1" ]; then tmux resize-pane -Z -t '${opts.gName}:${windowTarget}'; fi`,
`tmux select-pane -t "${paneTarget}"`,
// On mobile, zoom the selected pane to fill the window
...(opts.initialPane !== undefined ? [`tmux resize-pane -Z -t "${paneTarget}"`] : []),
`stty rows ${opts.rows} cols ${opts.cols}`,
`exec tmux attach-session -t "${opts.gName}"`,
].join(" && ");
}
async function asyncTmux(args: string[]): Promise<{ exitCode: number; stderr: string }> {
const proc = Bun.spawn(args, { stdin: "ignore", stdout: "ignore", stderr: "pipe" });
const exitCode = await proc.exited;
const stderr = (await new Response(proc.stderr).text()).trim();
return { exitCode, stderr };
}
/** Kill any orphaned wm-dash-* tmux sessions left from previous server runs. */
export function cleanupStaleSessions(): void {
try {
const result = Bun.spawnSync(
["tmux", "list-sessions", "-F", "#{session_name}"],
{ stdout: "pipe", stderr: "pipe" }
);
if (result.exitCode !== 0) return;
const lines = new TextDecoder().decode(result.stdout).trim().split("\n");
for (const name of lines) {
if (name.startsWith(SESSION_PREFIX)) {
Bun.spawnSync(["tmux", "kill-session", "-t", name]);
}
}
} catch {
// No tmux server running
}
}
/** Kill a tmux session by name, logging unexpected failures. */
function killTmuxSession(name: string): void {
const result = Bun.spawnSync(["tmux", "kill-session", "-t", name], { stderr: "pipe" });
if (result.exitCode !== 0) {
const stderr = new TextDecoder().decode(result.stderr).trim();
if (!stderr.includes("can't find session")) {
log.warn(`[term] killTmuxSession(${name}) exit=${result.exitCode} ${stderr}`);
}
}
}
/**
* Pure: parse `tmux list-windows -a` output to find the session owning
* a worktree window. Skips wm-dash-* viewer sessions.
* Returns the session name, or null if not found.
*/
export function parseTmuxSessionForWorktree(
tmuxOutput: string,
worktreeName: string,
): string | null {
const windowName = `wm-${worktreeName}`;
const lines = tmuxOutput.trim().split("\n").filter(Boolean);
// First pass: exact window match, skip viewer sessions
for (const line of lines) {
const colonIdx = line.indexOf(":");
if (colonIdx === -1) continue;
const session = line.slice(0, colonIdx);
const name = line.slice(colonIdx + 1);
if (name === windowName && !session.startsWith("wm-dash-")) {
return session;
}
}
// Fallback: any non-viewer session with a wm-* window
for (const line of lines) {
const colonIdx = line.indexOf(":");
if (colonIdx === -1) continue;
const session = line.slice(0, colonIdx);
const name = line.slice(colonIdx + 1);
if (name.startsWith("wm-") && !session.startsWith("wm-dash-")) {
return session;
}
}
return null;
}
/** Find the tmux session that owns the window for a given worktree.
* Skips wm-dash-* grouped/viewer sessions to find the real workmux session. */
async function findTmuxSessionForWorktree(worktreeName: string): Promise<string> {
try {
const proc = Bun.spawn(
["tmux", "list-windows", "-a", "-F", "#{session_name}:#{window_name}"],
{ stdout: "pipe", stderr: "pipe" },
);
if (await proc.exited !== 0) return "0";
const output = await new Response(proc.stdout).text();
return parseTmuxSessionForWorktree(output, worktreeName) ?? "0";
} catch {
// No tmux server running
}
return "0";
}
export async function attach(
worktreeName: string,
cols: number,
rows: number,
initialPane?: number
): Promise<string> {
log.debug(`[term] attach(${worktreeName}) cols=${cols} rows=${rows} existing=${sessions.has(worktreeName)}`);
if (sessions.has(worktreeName)) {
await detach(worktreeName);
}
const tmuxSession = await findTmuxSessionForWorktree(worktreeName);
const gName = groupedName();
log.debug(`[term] attach(${worktreeName}) tmuxSession=${tmuxSession} gName=${gName} window=wm-${worktreeName}`);
// Kill stale session with same name if it exists (leftover from previous server run)
killTmuxSession(gName);
const cmd = buildAttachCmd({ gName, worktreeName, tmuxSession, cols, rows, initialPane });
const proc = Bun.spawn(["script", "-q", "-c", cmd, "/dev/null"], {
stdin: "pipe",
stdout: "pipe",
stderr: "pipe",
env: { ...Bun.env, TERM: "xterm-256color" },
});
const session: TerminalSession = {
proc,
groupedSessionName: gName,
scrollback: [],
scrollbackBytes: 0,
onData: null,
onExit: null,
cancelled: false,
};
sessions.set(worktreeName, session);
log.debug(`[term] attach(${worktreeName}) spawned pid=${proc.pid}`);
// Read stdout → push to scrollback + callback
(async () => {
const reader = proc.stdout.getReader();
try {
while (true) {
if (session.cancelled) break;
const { done, value } = await reader.read();
if (done) break;
const str = new TextDecoder().decode(value);
const encoder = new TextEncoder();
session.scrollbackBytes += encoder.encode(str).byteLength;
session.scrollback.push(str);
while (session.scrollbackBytes > MAX_SCROLLBACK_BYTES && session.scrollback.length > 0) {
const removed = session.scrollback.shift()!;
session.scrollbackBytes -= encoder.encode(removed).byteLength;
}
session.onData?.(str);
}
} catch (err) {
// Stream closed normally — no action needed.
// Log anything unexpected so it surfaces during debugging.
if (!session.cancelled) {
log.error(`[term] stdout reader error(${worktreeName})`, err);
}
}
})();
// Read stderr → log for diagnostics
(async () => {
const reader = proc.stderr.getReader();
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
log.debug(`[term] stderr(${worktreeName}): ${new TextDecoder().decode(value).trimEnd()}`);
}
} catch { /* stream closed */ }
})();
proc.exited.then((exitCode) => {
log.debug(`[term] proc exited(${worktreeName}) pid=${proc.pid} code=${exitCode}`);
// Only clean up if this session is still the active one (not replaced by a new attach)
if (sessions.get(worktreeName) === session) {
session.onExit?.(exitCode);
sessions.delete(worktreeName);
} else {
log.debug(`[term] proc exited(${worktreeName}) stale session, skipping cleanup`);
}
killTmuxSession(gName);
});
return worktreeName;
}
export async function detach(worktreeName: string): Promise<void> {
const session = sessions.get(worktreeName);
if (!session) {
log.debug(`[term] detach(${worktreeName}) no session found`);
return;
}
log.debug(`[term] detach(${worktreeName}) killing pid=${session.proc.pid} tmux=${session.groupedSessionName}`);
session.cancelled = true;
session.proc.kill();
sessions.delete(worktreeName);
killTmuxSession(session.groupedSessionName);
}
export function write(worktreeName: string, data: string): void {
const session = sessions.get(worktreeName);
if (!session) {
log.warn(`[term] write(${worktreeName}) NO SESSION - input dropped (${data.length} bytes)`);
return;
}
try {
session.proc.stdin.write(new TextEncoder().encode(data));
session.proc.stdin.flush();
} catch (err) {
log.error(`[term] write(${worktreeName}) stdin closed`, err);
}
}
export async function resize(worktreeName: string, cols: number, rows: number): Promise<void> {
const session = sessions.get(worktreeName);
if (!session) return;
const windowTarget = `${session.groupedSessionName}:wm-${worktreeName}`;
const result = await asyncTmux(["tmux", "resize-window", "-t", windowTarget, "-x", String(cols), "-y", String(rows)]);
if (result.exitCode !== 0) log.warn(`[term] resize failed: ${result.stderr}`);
}
export function getScrollback(worktreeName: string): string {
return sessions.get(worktreeName)?.scrollback.join("") ?? "";
}
export function setCallbacks(
worktreeName: string,
onData: (data: string) => void,
onExit: (exitCode: number) => void
): void {
const session = sessions.get(worktreeName);
if (session) {
session.onData = onData;
session.onExit = onExit;
}
}
export async function selectPane(worktreeName: string, paneIndex: number): Promise<void> {
const session = sessions.get(worktreeName);
if (!session) {
log.debug(`[term] selectPane(${worktreeName}) no session found`);
return;
}
const windowTarget = `wm-${worktreeName}`;
const target = `${session.groupedSessionName}:${windowTarget}.${paneIndex}`;
log.debug(`[term] selectPane(${worktreeName}) pane=${paneIndex} target=${target}`);
const [r1, r2] = await Promise.all([
asyncTmux(["tmux", "select-pane", "-t", target]),
asyncTmux(["tmux", "resize-pane", "-Z", "-t", target]),
]);
log.debug(`[term] selectPane(${worktreeName}) select=${r1.exitCode} zoom=${r2.exitCode}`);
}
export function clearCallbacks(worktreeName: string): void {
const session = sessions.get(worktreeName);
if (session) {
session.onData = null;
session.onExit = null;
}
}
import { $ } from "bun";
import { readEnvLocal, writeEnvLocal, readAllWorktreeEnvs, allocatePorts } from "./env";
import { expandTemplate, type ProfileConfig, type SandboxProfileConfig, type ServiceConfig } from "./config";
import { launchContainer, removeContainer } from "./docker";
import { log } from "./lib/log";
export interface Worktree {
branch: string;
agent: string;
mux: string;
unmerged: string;
path: string;
}
export interface WorktreeStatus {
worktree: string;
status: string;
elapsed: string;
title: string;
}
const WORKTREE_HEADERS = ["BRANCH", "AGENT", "MUX", "UNMERGED", "PATH"] as const;
const STATUS_HEADERS = ["WORKTREE", "STATUS", "ELAPSED", "TITLE"] as const;
function parseTable<T>(
output: string,
mapper: (cols: string[]) => T,
expectedHeaders?: readonly string[],
): T[] {
const lines = output.trim().split("\n").filter(Boolean);
if (lines.length < 2) return [];
const headerLine = lines[0];
if (expectedHeaders) {
const actual = headerLine.trim().split(/\s+/).map(h => h.toUpperCase());
const match = expectedHeaders.every((h, i) => actual[i] === h.toUpperCase());
if (!match) {
log.warn(`[parseTable] unexpected headers: got [${actual.join(", ")}], expected [${expectedHeaders.join(", ")}]`);
}
}
// Find column positions based on header spacing
const colStarts: number[] = [];
let inSpace = true;
for (let i = 0; i < headerLine.length; i++) {
if (headerLine[i] !== " " && inSpace) {
colStarts.push(i);
inSpace = false;
} else if (headerLine[i] === " " && !inSpace) {
inSpace = true;
}
}
return lines.slice(1).map(line => {
const cols = colStarts.map((start, idx) => {
const end = idx + 1 < colStarts.length ? colStarts[idx + 1] : line.length;
return line.slice(start, end).trim();
});
return mapper(cols);
});
}
/** Build env with TMUX set so workmux can resolve agent states outside tmux. */
function workmuxEnv(): Record<string, string | undefined> {
if (process.env.TMUX) return process.env;
const tmpdir = process.env.TMUX_TMPDIR || "/tmp";
const uid = process.getuid?.() ?? 1000;
return { ...process.env, TMUX: `${tmpdir}/tmux-${uid}/default,0,0` };
}
export async function listWorktrees(): Promise<Worktree[]> {
const result = await $`workmux list`.env(workmuxEnv()).text();
return parseTable(result, (cols) => ({
branch: cols[0] ?? "",
agent: cols[1] ?? "",
mux: cols[2] ?? "",
unmerged: cols[3] ?? "",
path: cols[4] ?? "",
}), WORKTREE_HEADERS);
}
export async function getStatus(): Promise<WorktreeStatus[]> {
const result = await $`workmux status`.env(workmuxEnv()).text();
return parseTable(result, (cols) => ({
worktree: cols[0] ?? "",
status: cols[1] ?? "",
elapsed: cols[2] ?? "",
title: cols[3] ?? "",
}), STATUS_HEADERS);
}
async function tryExec(args: string[]): Promise<{ ok: true; stdout: string } | { ok: false; error: string }> {
const proc = Bun.spawn(args, { stdout: "pipe", stderr: "pipe" });
const stdout = await new Response(proc.stdout).text();
const stderr = await new Response(proc.stderr).text();
const exitCode = await proc.exited;
if (exitCode !== 0) {
const msg = `${args.join(" ")} failed (exit ${exitCode}): ${stderr || stdout}`;
log.error(`[workmux:exec] ${msg}`);
return { ok: false, error: msg };
}
return { ok: true, stdout: stdout.trim() };
}
export { readEnvLocal } from "./env";
function buildAgentCmd(env: Record<string, string>, agent: string, profileConfig: ProfileConfig, isSandbox: boolean, prompt?: string): string {
const systemPrompt = profileConfig.systemPrompt
? expandTemplate(profileConfig.systemPrompt, env)
: "";
// Escape for double-quoted shell context: backslash, double-quote, dollar, backtick.
const innerEscaped = systemPrompt.replace(/["\\$`]/g, "\\$&");
const promptEscaped = prompt ? prompt.replace(/["\\$`]/g, "\\$&") : "";
// For sandbox, env is passed via Docker -e flags, no inline prefix needed.
// For non-sandbox, build inline env prefix for passthrough vars.
// Merge host env with worktree env; worktree env takes precedence.
const envPrefix = !isSandbox && profileConfig.envPassthrough?.length
? buildEnvPrefix(profileConfig.envPassthrough, { ...process.env, ...env })
: "";
const promptSuffix = promptEscaped ? ` "${promptEscaped}"` : "";
if (agent === "codex") {
return systemPrompt
? `${envPrefix}codex --yolo -c "developer_instructions=${innerEscaped}"${promptSuffix}`
: `${envPrefix}codex --yolo${promptSuffix}`;
}
const skipPerms = isSandbox ? " --dangerously-skip-permissions" : "";
return systemPrompt
? `${envPrefix}claude${skipPerms} --append-system-prompt "${innerEscaped}"${promptSuffix}`
: `${envPrefix}claude${skipPerms}${promptSuffix}`;
}
/** Build an inline env prefix (e.g. "KEY='val' KEY2='val2' ") for vars listed in envPassthrough. */
function buildEnvPrefix(keys: string[], env: Record<string, string | undefined>): string {
const parts: string[] = [];
for (const key of keys) {
const val = env[key];
if (val) {
const escaped = val.replace(/'/g, "'\\''");
parts.push(`${key}='${escaped}'`);
}
}
return parts.length > 0 ? parts.join(" ") + " " : "";
}
/**
* Pure: parse `git worktree list --porcelain` output into a branch→path map.
* Detached HEAD entries (line === "detached") are skipped — they have no branch
* name to key on.
*/
export function parseWorktreePorcelain(output: string): Map<string, string> {
const paths = new Map<string, string>();
let currentPath = "";
for (const line of output.split("\n")) {
if (line.startsWith("worktree ")) {
currentPath = line.slice("worktree ".length);
} else if (line.startsWith("branch ")) {
const name = line.slice("branch ".length).replace("refs/heads/", "");
if (currentPath) paths.set(name, currentPath);
}
}
return paths;
}
function ensureTmux(): void {
const check = Bun.spawnSync(["tmux", "list-sessions"], { stdout: "pipe", stderr: "pipe" });
if (check.exitCode !== 0) {
const started = Bun.spawnSync(["tmux", "new-session", "-d", "-s", "0"]);
if (started.exitCode !== 0) {
log.debug("[workmux] tmux session already exists (concurrent start)");
} else {
log.debug("[workmux] restarted tmux session");
}
}
}
/** Sanitize user input into a valid git branch name. */
function sanitizeBranchName(raw: string): string {
return raw
.toLowerCase()
.replace(/\s+/g, "-")
.replace(/[~^:?*\[\]\\]+/g, "")
.replace(/@\{/g, "")
.replace(/\.{2,}/g, ".")
.replace(/\/{2,}/g, "/")
.replace(/-{2,}/g, "-")
.replace(/^[.\-/]+|[.\-/]+$/g, "")
.replace(/\.lock$/i, "");
}
function randomName(len: number): string {
const chars = "abcdefghijklmnopqrstuvwxyz0123456789";
let result = "";
for (let i = 0; i < len; i++) {
result += chars[Math.floor(Math.random() * chars.length)];
}
return result;
}
/** Parse branch name from workmux add output (e.g. "Branch: my-feature"). */
function parseBranchFromOutput(output: string): string | null {
const match = output.match(/branch:\s*(\S+)/i);
return match?.[1] ?? null;
}
export interface AddWorktreeOpts {
prompt?: string;
profile?: string;
agent?: string;
autoName?: boolean;
profileConfig?: ProfileConfig;
isSandbox?: boolean;
sandboxConfig?: SandboxProfileConfig;
services?: ServiceConfig[];
mainRepoDir?: string;
}
export async function addWorktree(
rawBranch: string | undefined,
opts?: AddWorktreeOpts
): Promise<{ ok: true; branch: string; output: string } | { ok: false; error: string }> {
ensureTmux();
const profile = opts?.profile ?? "default";
const agent = opts?.agent ?? "claude";
const profileConfig = opts?.profileConfig;
const isSandbox = opts?.isSandbox === true;
const hasSystemPrompt = !!profileConfig?.systemPrompt;
const args: string[] = ["workmux", "add", "-b"]; // -b = background (don't switch tmux)
let branch = "";
let useAutoName = false;
if (isSandbox) {
// Sandbox: we manage panes ourselves, don't pass -p (we pass prompt to claude directly)
args.push("-C"); // --no-pane-cmds
// No -p: workmux can't use it with -C
// No -A: auto-name needs -p which we can't pass
if (rawBranch) {
branch = sanitizeBranchName(rawBranch);
if (!branch) {
return { ok: false, error: `"${rawBranch}" is not a valid branch name after sanitization` };
}
} else {
branch = randomName(8);
}
args.push(branch);
} else {
// Non-sandbox: skip default pane commands for profiles with a system prompt (custom pane setup)
if (hasSystemPrompt) {
args.push("-C"); // --no-pane-cmds
}
if (opts?.prompt) args.push("-p", opts.prompt);
// Branch name resolution:
// 1. User provided a name → sanitize and use it
// 2. No name + prompt + autoName → let workmux generate via -A
// 3. No name + (no prompt or no autoName) → random
useAutoName = !rawBranch && !!opts?.prompt && !!opts?.autoName;
if (rawBranch) {
branch = sanitizeBranchName(rawBranch);
if (!branch) {
return { ok: false, error: `"${rawBranch}" is not a valid branch name after sanitization` };
}
args.push(branch);
} else if (useAutoName) {
args.push("-A");
} else {
branch = randomName(8);
args.push(branch);
}
}
log.debug(`[workmux:add] running: ${args.join(" ")}`);
const execResult = await tryExec(args);
if (!execResult.ok) return { ok: false, error: execResult.error };
const result = execResult.stdout;
// When using -A, extract the branch name from workmux output
if (useAutoName) {
const parsed = parseBranchFromOutput(result);
if (!parsed) {
return { ok: false, error: `Failed to parse branch name from workmux output: ${JSON.stringify(result)}` };
}
branch = parsed;
}
const windowTarget = `wm-${branch}`;
// Parse worktree list once — used for both dir lookup and port allocation
const porcelainResult = Bun.spawnSync(["git", "worktree", "list", "--porcelain"], { stdout: "pipe", stderr: "pipe" });
const worktreeMap = parseWorktreePorcelain(new TextDecoder().decode(porcelainResult.stdout));
const wtDir = worktreeMap.get(branch) ?? null;
// Allocate ports + write PROFILE/AGENT to .env.local
if (wtDir) {
const allPaths = [...worktreeMap.values()];
const existingEnvs = await readAllWorktreeEnvs(allPaths, wtDir);
const portAssignments = opts?.services ? allocatePorts(existingEnvs, opts.services) : {};
await writeEnvLocal(wtDir, { ...portAssignments, PROFILE: profile, AGENT: agent });
}
const env = wtDir ? await readEnvLocal(wtDir) : {};
log.debug(`[workmux:add] branch=${branch} dir=${wtDir ?? "(not found)"} env=${JSON.stringify(env)}`);
// For profiles with a system prompt, kill extra panes and send commands
if (hasSystemPrompt && profileConfig) {
// Kill extra panes (highest index first to avoid shifting)
const paneCountResult = Bun.spawnSync(
["tmux", "list-panes", "-t", windowTarget, "-F", "#{pane_index}"],
{ stdout: "pipe", stderr: "pipe" }
);
if (paneCountResult.exitCode === 0) {
const paneIds = new TextDecoder().decode(paneCountResult.stdout).trim().split("\n");
// Kill all panes except pane 0
for (let i = paneIds.length - 1; i >= 1; i--) {
Bun.spawnSync(["tmux", "kill-pane", "-t", `${windowTarget}.${paneIds[i]}`]);
}
}
// Launch Docker container for sandbox worktrees
let containerName: string | undefined;
if (isSandbox && opts?.sandboxConfig && wtDir) {
const mainRepoDir = opts.mainRepoDir ?? process.cwd();
containerName = await launchContainer({
branch,
wtDir,
mainRepoDir,
sandboxConfig: opts.sandboxConfig,
services: opts.services ?? [],
env,
});
}
// Build and send agent command (pass prompt for sandbox — we handle it directly)
const agentCmd = buildAgentCmd(env, agent, profileConfig, isSandbox, isSandbox ? opts?.prompt : undefined);
if (containerName) {
// Sandbox: enter container, run entrypoint visibly, then start agent
const dockerExec = `docker exec -it -w ${wtDir} ${containerName} bash`;
Bun.spawnSync(["tmux", "send-keys", "-t", `${windowTarget}.0`, dockerExec, "Enter"]);
// Wait for shell to be ready, then chain entrypoint → agent
await Bun.sleep(500);
const entrypointThenAgent = `/usr/local/bin/entrypoint.sh && ${agentCmd}`;
log.debug(`[workmux] sending to ${windowTarget}.0:\n${entrypointThenAgent}`);
Bun.spawnSync(["tmux", "send-keys", "-t", `${windowTarget}.0`, entrypointThenAgent, "Enter"]);
// Shell pane: host shell in worktree dir
Bun.spawnSync(["tmux", "split-window", "-h", "-t", `${windowTarget}.0`, "-l", "25%", "-c", wtDir ?? process.cwd()]);
} else {
// Non-sandbox: run agent directly in pane 0
log.debug(`[workmux] sending command to ${windowTarget}.0:\n${agentCmd}`);
Bun.spawnSync(["tmux", "send-keys", "-t", `${windowTarget}.0`, agentCmd, "Enter"]);
// Open a shell pane on the right (1/3 width) in the worktree dir
Bun.spawnSync(["tmux", "split-window", "-h", "-t", `${windowTarget}.0`, "-l", "25%", "-c", wtDir ?? process.cwd()]);
}
// Keep focus on the agent pane (left)
Bun.spawnSync(["tmux", "select-pane", "-t", `${windowTarget}.0`]);
}
return { ok: true, branch, output: result };
}
export async function removeWorktree(name: string): Promise<{ ok: true; output: string } | { ok: false; error: string }> {
log.debug(`[workmux:rm] running: workmux rm --force ${name}`);
await removeContainer(name);
const result = await tryExec(["workmux", "rm", "--force", name]);
if (!result.ok) return result;
return { ok: true, output: result.stdout };
}
const TMUX_TIMEOUT_MS = 5_000;
/** Run a tmux subprocess and await exit with a timeout. Kills the process on timeout. */
async function tmuxExec(args: string[], opts: { stdin?: Uint8Array } = {}): Promise<{ exitCode: number; stderr: string }> {
const proc = Bun.spawn(args, {
stdin: opts.stdin ?? "ignore",
stdout: "ignore",
stderr: "pipe",
});
const timeout = Bun.sleep(TMUX_TIMEOUT_MS).then(() => {
proc.kill();
return "timeout" as const;
});
const result = await Promise.race([proc.exited, timeout]);
if (result === "timeout") {
return { exitCode: -1, stderr: "timed out after 5s (agent may be busy)" };
}
const stderr = (await new Response(proc.stderr).text()).trim();
return { exitCode: result, stderr };
}
export async function sendPrompt(
branch: string,
text: string,
pane = 0,
preamble?: string,
): Promise<{ ok: true } | { ok: false; error: string }> {
const windowName = `wm-${branch}`;
const session = await findWorktreeSession(windowName);
if (!session) {
return { ok: false, error: `tmux window "${windowName}" not found` };
}
const target = `${session}:${windowName}.${pane}`;
log.debug(`[send:${branch}] target=${target} textBytes=${text.length}${preamble ? ` preamble=${preamble.length}b` : ""}`);
// Type the preamble as regular keystrokes so it shows inline in the agent,
// then paste the bulk payload via a tmux buffer (appears as [pasted text]).
if (preamble) {
const { exitCode, stderr } = await tmuxExec(["tmux", "send-keys", "-t", target, "-l", "--", preamble]);
if (exitCode !== 0) {
return { ok: false, error: `send-keys preamble failed${stderr ? `: ${stderr}` : ""}` };
}
}
const cleaned = text.replace(/\0/g, "");
// Use a unique buffer name per invocation to avoid races when concurrent
// sendPrompt calls overlap (e.g. two worktrees sending at the same time).
const bufName = `wm-prompt-${Date.now()}-${Math.random().toString(36).slice(2, 7)}`;
// Load text into a named tmux buffer via stdin — avoids all send-keys
// escaping/chunking issues and handles any text size in a single operation.
const load = await tmuxExec(["tmux", "load-buffer", "-b", bufName, "-"], { stdin: new TextEncoder().encode(cleaned) });
if (load.exitCode !== 0) {
return { ok: false, error: `load-buffer failed${load.stderr ? `: ${load.stderr}` : ""}` };
}
// Paste buffer into target pane; -d deletes the buffer after pasting.
const paste = await tmuxExec(["tmux", "paste-buffer", "-b", bufName, "-t", target, "-d"]);
if (paste.exitCode !== 0) {
return { ok: false, error: `paste-buffer failed${paste.stderr ? `: ${paste.stderr}` : ""}` };
}
return { ok: true };
}
async function findWorktreeSession(windowName: string): Promise<string | null> {
const proc = Bun.spawn(
["tmux", "list-windows", "-a", "-F", "#{session_name}:#{window_name}"],
{ stdout: "pipe", stderr: "pipe" }
);
if (await proc.exited !== 0) return null;
const output = (await new Response(proc.stdout).text()).trim();
if (!output) return null;
for (const line of output.split("\n")) {
const colonIdx = line.indexOf(":");
if (colonIdx === -1) continue;
const session = line.slice(0, colonIdx);
const name = line.slice(colonIdx + 1);
if (name === windowName) return session;
}
return null;
}
export async function openWorktree(name: string): Promise<{ ok: true; output: string } | { ok: false; error: string }> {
const result = await tryExec(["workmux", "open", name]);
if (!result.ok) return result;
return { ok: true, output: result.stdout };
}
export async function mergeWorktree(name: string): Promise<{ ok: true; output: string } | { ok: false; error: string }> {
log.debug(`[workmux:merge] running: workmux merge ${name}`);
await removeContainer(name);
const result = await tryExec(["workmux", "merge", name]);
if (!result.ok) return result;
return { ok: true, output: result.stdout };
}
{
"compilerOptions": {
"target": "ESNext",
"module": "ESNext",
"moduleResolution": "bundler",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"outDir": "dist",
"rootDir": "src",
"typeRoots": ["../node_modules/@types"],
"types": ["bun"]
},
"include": ["src/**/*.ts"]
}