/**
* All prompt templates for Hive commands.
* These capture the behavioral specs from the original Claude Code commands.
*/
// ─── Shared ────────────────────────────────────────────────────────────
export const FEATURES_MD_FORMAT = `\`\`\`markdown
# Features
## Feature N:
- Description:
- Dependencies:
- Status: pending
\`\`\`
Format rules:
- File starts with \`# Features\`
- Each feature: \`## Feature N: \` (N sequential from 1)
- Exactly 3 fields per feature: Description, Dependencies, Status
- Status: pending | in_progress | done
- Dependencies: \`Feature N\` references or \`none\``;
export const FEATURE_GENERATION_RULES = `Feature generation rules:
1. Each feature must be independent and implementable (unless deps listed)
2. Granularity: 1-4 hours of work. If bigger, split it.
3. Group related features together
4. Infrastructure/refactoring before new features in same module
5. Clear, action-oriented titles (e.g., "User Authentication with JWT", not "Auth")
6. Description explains WHAT and WHY, not HOW`;
// ─── /hive:spec chain prompts ──────────────────────────────────────────
export const SPEC_SCOUT_TASK = `Explore the codebase thoroughly. Identify:
- Languages, frameworks, and tech stack
- Project structure (directories, key files, entry points)
- Components, services, utilities, configs, tests
- File counts and patterns
- Dependencies between modules
- "Hub" files imported by many others
- Missing functionality, refactoring opportunities, cleanup tasks
Report findings in a structured format. Be thorough but concise.`;
export function specPlannerTask(userContext?: string): string {
const base = `Based on this codebase analysis, generate a complete features.md.
${FEATURES_MD_FORMAT}
${FEATURE_GENERATION_RULES}
Extract features from ALL categories:
- New functionality: missing CRUD ops, missing endpoints, missing pages
- Refactoring: high complexity, files >300 lines, god classes >10 methods
- Cleanup: missing tests, missing docs, unused exports
Generate ALL features found. Number sequentially. Set correct dependencies.`;
if (userContext) {
return `The user has described these specific features they want:\n\n${userContext}\n\nPrioritize the user's features. Complement with analysis findings.\n\n${base}\n\nCodebase analysis:\n\n{previous}`;
}
return `${base}\n\nCodebase analysis:\n\n{previous}`;
}
export const SPEC_REVIEWER_TASK = `Review this features.md plan. Check for:
- Missing features that should be extracted from the analysis
- Incorrect or missing dependencies
- Features that are too large (>4 hours) and should be split
- Features that are too small and should be merged
- Format compliance with the features.md spec
- Sequential numbering correctness
If issues found, output the CORRECTED features.md.
If plan is solid, output it unchanged with a brief "Plan approved" header.
Plan to review:
{previous}`;
// ─── /hive:map-codebase ────────────────────────────────────────────────
export const MAP_CODEBASE_TASK = `Analyze the entire codebase and create a comprehensive map.
Scan all source files (exclude node_modules, dist, build, .git, .hive, __pycache__, .next, .venv).
For each file, identify:
- Type (component, service, utility, config, test)
- Exports and imports
- Functions and signatures
- Classes and methods
- Complexity metrics (estimate)
- Whether it has tests
- Whether it has documentation
Map dependencies between modules. Identify "hub" files (imported by 3+ others).
Output TWO things:
1. Write \`.hive/codebase-map.json\` with this schema:
{
"project": { "name": "", "language": "", "framework": "", "root": "" },
"stats": { "totalFiles": 0, "components": 0, "services": 0, "utilities": 0, "configs": 0, "tests": 0 },
"files": [{ "path": "...", "type": "...", "exports": [], "imports": [], "functions": [], "classes": [], "complexity": 0, "lines": 0, "hasTests": false, "hasDocs": false }],
"dependencies": [{ "from": "...", "to": "...", "type": "import" }],
"hubFiles": ["..."]
}
2. Write \`.hive/summary.md\` with a human-readable summary.
Create \`.hive/\` directory if it doesn't exist.`;
// ─── /hive:to-features-md ──────────────────────────────────────────────
export const TO_FEATURES_MD_TASK = `Read .hive/codebase-map.json and .hive/summary.md.
Generate a complete features.md from the analysis.
${FEATURES_MD_FORMAT}
${FEATURE_GENERATION_RULES}
Extract ALL feature categories:
- New functionality: missing CRUD, missing endpoints, missing pages
- Refactoring: high complexity, large files, god classes
- Cleanup: missing tests, unused exports
Show a preview summary before writing:
Nova funcionalidade: X features
Refactoring: Y features
Cleanup: Z features
Total: N features
Then write the file to ./features.md.`;
// ─── /hive:run ─────────────────────────────────────────────────────────
export const RUN_PROMPT = `You are running Hive in sequential mode — implement features one at a time from features.md.
Read features.md and parse all features.
IMPLEMENTATION LOOP — repeat until all features are done:
1. CHECK PROGRESS: Count features by status (pending, in_progress, done).
If all done, congratulate and stop.
2. FIND NEXT: Find the first pending feature whose dependencies are all done.
If none ready but some blocked, report blocked features and stop.
3. MARK IN_PROGRESS: Edit features.md — change that feature's status from "pending" to "in_progress".
4. IMPLEMENT: Read the description carefully. Analyze the codebase. Write the code changes. Run tests if available.
5. MARK DONE: If successful, edit features.md — change status to "done".
If issues you can't resolve, change back to "pending" and move on.
6. REPORT PROGRESS:
Feature N: — done
Progress: [##########----------] 5/10 (50%)
Then continue to the next feature.
RULES:
- When updating status, use the Edit tool to change ONLY the status line
- If a feature fails repeatedly, mark it pending and skip
- Always leave features.md clean (no features stuck as in_progress)
- If you stop mid-run, mark in-progress features back to pending`;
// ─── /hive:q ───────────────────────────────────────────────────────────
export function qPrompt(question: string): string {
return `You are in Hive exploration mode. Answer questions about the codebase.
RULES:
- READ ONLY — do NOT create or modify any files
- Be specific — include file paths and line numbers
- Be concise — answer directly
- If .hive/codebase-map.json exists, use it
- If the answer suggests a feature gap, mention /hive:spec
Quick-scan the project structure first, then answer:
${question}`;
}
// ─── /hive:worktree-split ──────────────────────────────────────────────
export function worktreeSplitPrompt(
count: number,
featuresContent: string,
featureGroups?: string,
): string {
return `You are running Hive worktree split. Create ${count} zero-conflict parallel worktrees.
FEATURES.MD:
${featuresContent}
${featureGroups ? `EXPLICIT GROUPING: ${featureGroups}\n\n` : ""}STEP 1: Analyze the project structure.
- Read package.json / pyproject.toml / go.mod to identify language + framework
- Use find/ls/grep to discover source files
- Identify "hub" files imported by many others
STEP 2: For each pending feature, predict which files it will touch:
- willCreate: new files the feature will create
- willModify: existing files the feature will edit
- willImport: files the feature reads but should NOT modify
STEP 3: Identify shared files (appear in willModify for multiple features).
Assign each a strategy:
- append_only: package.json, requirements.txt, .env — each wt only ADDS entries
- per_feature_files: type aggregators — each wt creates its own file
- single_owner: entry points — one wt owns it, others create nested files
- deferred: route registries — no wt modifies, each creates ROUTES_TO_REGISTER.md
STEP 4: Group features into ${count} worktrees.
${featureGroups ? "Use the explicit grouping provided." : `Algorithm:
- Remove append_only/per_feature_files from overlap analysis
- Group features that share willModify files together
- Balance workload (no group >60% of features)
- Cross-group dependencies create integration contracts, not forced grouping`}
STEP 5: Build territory map for each worktree:
- ownedFiles: willCreate + willModify exclusive to this group
- ownedDirs: directories where all files belong to this worktree
- readOnlyFiles: willImport files owned by other worktrees
- sharedFiles: files with special strategies
STEP 6: Determine merge order based on dependency flow.
STEP 7: For features that depend on another worktree's code, define integration contracts:
- Provider: file path + exported functions/types (signatures)
- Consumer: stub code to use until merge
Show me the territory assignment, then call the hive_execute_split tool with your decisions.`;
}
// ─── /hive:worktree-merge ──────────────────────────────────────────────
export function worktreeMergePrompt(worktreeArg?: string): string {
if (worktreeArg) {
return `You are running Hive worktree merge for: ${worktreeArg}
1. List all git worktrees (git worktree list)
2. Find the worktree matching "${worktreeArg}" (by directory name, branch, or index)
3. Check for uncommitted changes — if found, ask me what to do
4. Check if assigned features are done in features.md (warn if not, but don't block)
5. Call the hive_execute_merge tool with the worktree path and branch`;
}
return `You are running Hive worktree merge.
1. List all git worktrees (git worktree list)
2. Show me the available worktrees and ask which one to merge
3. Once I choose, check for uncommitted changes and feature status
4. Call the hive_execute_merge tool with the worktree path and branch`;
}
// ─── PROMPT.md template for worktrees ──────────────────────────────────
export interface WorktreePromptData {
worktreeId: string;
branchName: string;
features: Array<{
number: number;
title: string;
description: string;
dependencies: string;
}>;
ownedFiles: string[];
ownedDirs: string[];
readOnlyFiles: string[];
forbiddenDirs: string[];
sharedFiles: Array<{
path: string;
strategy: string;
instructions: string;
}>;
provides: Array<{
file: string;
exports: string[];
types: string[];
}>;
consumes: Array<{
sourceWt: string;
file: string;
stubCode: string;
}>;
techStack: string;
testingInfo: string;
conventions: string;
}
export function generatePromptMd(data: WorktreePromptData): string {
const featuresSection = data.features
.map((f) => `### Feature ${f.number}: ${f.title}\n${f.description}\nDependencies: ${f.dependencies}`)
.join("\n\n");
const ownedSection = [
...data.ownedDirs.map((d) => `- \`${d}**\``),
...data.ownedFiles.map((f) => `- \`${f}\``),
].join("\n") || "- (none specified — create new files in your feature directories)";
const readOnlySection = data.readOnlyFiles.map((f) => `- \`${f}\``).join("\n") || "- (none)";
const forbiddenSection = data.forbiddenDirs.map((d) => `- \`${d}\``).join("\n") || "- (none)";
const sharedSection = data.sharedFiles.length > 0
? data.sharedFiles.map((s) => `#### \`${s.path}\` (Strategy: ${s.strategy})\n${s.instructions}`).join("\n\n")
: "(none)";
let contractsSection = "";
if (data.provides.length > 0) {
contractsSection += `### You Provide (other agents depend on your code)\n`;
for (const p of data.provides) {
contractsSection += `#### \`${p.file}\`\nExport these — do NOT change signatures:\n- ${[...p.exports, ...p.types].join(", ")}\n\n`;
}
}
if (data.consumes.length > 0) {
contractsSection += `### You Consume (code that does not exist yet)\n`;
for (const c of data.consumes) {
contractsSection += `#### From ${c.sourceWt}: \`${c.file}\`\nCreate a local stub at \`__stubs__/\` with this interface:\n\`\`\`\n${c.stubCode}\n\`\`\`\nImport from the stub. After merge, imports will be rewired.\n\n`;
}
}
return `# Worktree ${data.worktreeId} - Agent Instructions
You are an autonomous coding agent working in a **parallel worktree**. Other
agents are simultaneously working on other features in separate worktrees.
To prevent merge conflicts, you MUST follow the file ownership rules below.
## Your Branch
\`${data.branchName}\`
## Assigned Features
${featuresSection}
Implement features in the order listed above.
---
## FILE OWNERSHIP RULES (CRITICAL - DO NOT VIOLATE)
### Owned Files & Directories (you CAN freely create/edit)
${ownedSection}
You may also create NEW files within your owned directories.
### Read-Only Files (you can IMPORT but MUST NOT modify)
${readOnlySection}
### Forbidden (DO NOT touch - belongs to other worktrees)
${forbiddenSection}
### Shared Files (special handling required)
${sharedSection}
---
## INTEGRATION CONTRACTS
${contractsSection || "(none — all features are independent)"}
---
## Project Context
- **Tech Stack**: ${data.techStack}
- **Testing**: ${data.testingInfo}
- **Conventions**: ${data.conventions}
---
## Instructions
1. Implement features in the listed order.
2. BEFORE creating/editing any file, verify it is in your owned territory.
3. For shared files, follow the specific strategy listed above.
4. For consumed contracts, create stubs and import from them.
5. After each feature, commit with a descriptive message.
6. Report progress after completing each feature.
`;
}
// ─── Warp Launch Configuration ─────────────────────────────────────────
export interface WarpWorktree {
path: string;
command: string;
}
function yamlEscape(s: string): string {
return s.replace(/"/g, '\\"');
}
export function generateWarpYaml(worktrees: WarpWorktree[]): string {
if (worktrees.length === 1) {
const wt = worktrees[0];
return `---
name: Hive Agents
windows:
- tabs:
- title: Hive - 1 Agent
layout:
cwd: "${wt.path}"
commands:
- exec: "${yamlEscape(wt.command)}"
`;
}
if (worktrees.length === 2) {
return `---
name: Hive Agents
windows:
- tabs:
- title: Hive - 2 Agents
layout:
split_direction: vertical
panes:
${worktrees.map((wt) => ` - cwd: "${wt.path}"\n commands:\n - exec: "${yamlEscape(wt.command)}"`).join("\n")}
`;
}
// 3+: top row = ceil(N/2), bottom row = floor(N/2)
const topCount = Math.ceil(worktrees.length / 2);
const topRow = worktrees.slice(0, topCount);
const bottomRow = worktrees.slice(topCount);
const formatPane = (wt: WarpWorktree) =>
` - cwd: "${wt.path}"\n commands:\n - exec: "${yamlEscape(wt.command)}"`;
let yaml = `---
name: Hive Agents
windows:
- tabs:
- title: Hive - ${worktrees.length} Agents
layout:
split_direction: vertical
panes:
- split_direction: horizontal
panes:
${topRow.map(formatPane).join("\n")}`;
if (bottomRow.length === 1) {
yaml += `\n${formatPane(bottomRow[0]).replace(/^ {16}/, " ")}`;
} else if (bottomRow.length > 1) {
yaml += `\n - split_direction: horizontal\n panes:\n${bottomRow.map(formatPane).join("\n")}`;
}
return yaml + "\n";
}
// ─── tmux Launch ───────────────────────────────────────────────────────
/**
* Generate tmux commands to create a session with N panes in a tiled layout.
* Returns an array of shell commands to execute sequentially.
*/
export function generateTmuxCommands(worktrees: WarpWorktree[], sessionName = "hive"): string[] {
const cmds: string[] = [];
// Create detached session with first pane
cmds.push(`tmux new-session -d -s ${sessionName} -x 200 -y 50`);
cmds.push(`tmux send-keys -t ${sessionName}:0.0 '${worktrees[0].command}' Enter`);
for (let i = 1; i < worktrees.length; i++) {
// Alternate horizontal/vertical splits for a grid-like layout
const splitFrom = i < 2 ? 0 : Math.floor((i - 1) / 2);
const direction = i % 2 === 1 ? "-h" : "-v";
cmds.push(`tmux split-window -t ${sessionName}:0.${splitFrom} ${direction}`);
cmds.push(`tmux send-keys -t ${sessionName}:0.${i} '${worktrees[i].command}' Enter`);
}
// Even out the layout and attach
cmds.push(`tmux select-layout -t ${sessionName} tiled`);
cmds.push(`tmux attach -t ${sessionName}`);
return cmds;
}