From 1a1ca389f464612c4d1fa25b979ed4178efc8ea9 Mon Sep 17 00:00:00 2001 From: catlog22 Date: Thu, 26 Feb 2026 16:32:17 +0800 Subject: [PATCH] Add role and skill router templates for v3 style execution - Introduced a comprehensive role template for generating per-role execution detail files, including purpose, style rules, and structured phases. - Added a skill router template to facilitate role-based routing in SKILL.md, detailing input parsing, role registry, orchestration mode, and shared infrastructure. - Both templates adhere to v3 conventions, emphasizing clarity and structured decision-making through markdown tables and diagrams. --- .../skills/workflow-lite-plan copy/SKILL.md | 177 -- .../phases/01-lite-plan.md | 770 -------- .../phases/02-lite-execute.md | 776 -------- .../team-skill-designer-v2/SKILL.md | 274 +++ .../phases/01-requirements-collection.md | 200 +++ .../phases/02-pattern-analysis.md | 180 ++ .../phases/03-skill-generation.md | 239 +++ .../phases/04-integration-verification.md | 183 ++ .../phases/05-validation.md | 209 +++ .../specs/collaboration-patterns.md | 1555 +++++++++++++++++ .../specs/quality-standards.md | 242 +++ .../specs/team-design-patterns.md | 590 +++++++ .../templates/role-command-template.md | 820 +++++++++ .../templates/role-template.md | 586 +++++++ .../templates/skill-router-template.md | 360 ++++ 15 files changed, 5438 insertions(+), 1723 deletions(-) delete mode 100644 .claude/skills/workflow-lite-plan copy/SKILL.md delete mode 100644 .claude/skills/workflow-lite-plan copy/phases/01-lite-plan.md delete mode 100644 .claude/skills/workflow-lite-plan copy/phases/02-lite-execute.md create mode 100644 .claude/skills_lib/team-skill-designer-v2/SKILL.md create mode 100644 .claude/skills_lib/team-skill-designer-v2/phases/01-requirements-collection.md create mode 100644 .claude/skills_lib/team-skill-designer-v2/phases/02-pattern-analysis.md create mode 100644 .claude/skills_lib/team-skill-designer-v2/phases/03-skill-generation.md create mode 100644 .claude/skills_lib/team-skill-designer-v2/phases/04-integration-verification.md create mode 100644 .claude/skills_lib/team-skill-designer-v2/phases/05-validation.md create mode 100644 .claude/skills_lib/team-skill-designer-v2/specs/collaboration-patterns.md create mode 100644 .claude/skills_lib/team-skill-designer-v2/specs/quality-standards.md create mode 100644 .claude/skills_lib/team-skill-designer-v2/specs/team-design-patterns.md create mode 100644 .claude/skills_lib/team-skill-designer-v2/templates/role-command-template.md create mode 100644 .claude/skills_lib/team-skill-designer-v2/templates/role-template.md create mode 100644 .claude/skills_lib/team-skill-designer-v2/templates/skill-router-template.md diff --git a/.claude/skills/workflow-lite-plan copy/SKILL.md b/.claude/skills/workflow-lite-plan copy/SKILL.md deleted file mode 100644 index 3bb5cf0a..00000000 --- a/.claude/skills/workflow-lite-plan copy/SKILL.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -name: workflow-lite-plan -description: Lightweight planning and execution skill - route to lite-plan or lite-execute with prompt enhancement. Triggers on "workflow:lite-plan", "workflow:lite-execute". -allowed-tools: Skill, Task, AskUserQuestion, TodoWrite, Read, Write, Edit, Bash, Glob, Grep ---- - -# Workflow Lite-Plan - -Unified lightweight planning and execution skill. Routes to lite-plan (planning pipeline) or lite-execute (execution engine) based on trigger, with prompt enhancement for both modes. - -## Architecture Overview - -``` -┌─────────────────────────────────────────────────────┐ -│ SKILL.md (Router + Prompt Enhancement) │ -│ → Detect mode → Enhance prompt → Dispatch to phase │ -└──────────────────────┬──────────────────────────────┘ - │ - ┌───────────┼───────────┐ - ↓ ↓ - ┌───────────┐ ┌───────────┐ - │ lite-plan │ │lite-execute│ - │ Phase 1 │ │ Phase 2 │ - │ Plan+Exec │─direct──→│ Standalone │ - └───────────┘ └───────────┘ -``` - -## Mode Detection & Routing - -```javascript -const args = $ARGUMENTS -const mode = detectMode() - -function detectMode() { - if (skillName === 'workflow:lite-execute') return 'execute' - return 'plan' // default: workflow:lite-plan -} -``` - -**Routing Table**: - -| Trigger | Mode | Phase Document | Description | -|---------|------|----------------|-------------| -| `workflow:lite-plan` | plan | [phases/01-lite-plan.md](phases/01-lite-plan.md) | Full planning pipeline (explore → plan → confirm → execute) | -| `workflow:lite-execute` | execute | [phases/02-lite-execute.md](phases/02-lite-execute.md) | Standalone execution (in-memory / prompt / file) | - -## Interactive Preference Collection - -Before dispatching, collect workflow preferences via AskUserQuestion: - -```javascript -// ★ 统一 auto mode 检测:-y/--yes 从 $ARGUMENTS 或 ccw 传播 -const autoYes = /\b(-y|--yes)\b/.test($ARGUMENTS) - -if (autoYes) { - // 自动模式:跳过所有询问,使用默认值 - workflowPreferences = { autoYes: true, forceExplore: false } -} else if (mode === 'plan') { - const prefResponse = AskUserQuestion({ - questions: [ - { - question: "是否跳过所有确认步骤(自动模式)?", - header: "Auto Mode", - multiSelect: false, - options: [ - { label: "Interactive (Recommended)", description: "交互模式,包含确认步骤" }, - { label: "Auto", description: "跳过所有确认,自动执行" } - ] - }, - { - question: "是否强制执行代码探索阶段?", - header: "Exploration", - multiSelect: false, - options: [ - { label: "Auto-detect (Recommended)", description: "智能判断是否需要探索" }, - { label: "Force explore", description: "强制执行代码探索" } - ] - } - ] - }) - workflowPreferences = { - autoYes: prefResponse.autoMode === 'Auto', - forceExplore: prefResponse.exploration === 'Force explore' - } -} else if (mode !== 'plan') { - // Execute mode (standalone, not in-memory) - const prefResponse = AskUserQuestion({ - questions: [ - { - question: "是否跳过所有确认步骤(自动模式)?", - header: "Auto Mode", - multiSelect: false, - options: [ - { label: "Interactive (Recommended)", description: "交互模式,包含确认步骤" }, - { label: "Auto", description: "跳过所有确认,自动执行" } - ] - } - ] - }) - workflowPreferences = { - autoYes: prefResponse.autoMode === 'Auto', - forceExplore: false - } -} -``` - -**workflowPreferences** is passed to phase execution as context variable, referenced as `workflowPreferences.autoYes` and `workflowPreferences.forceExplore` within phases. - -## Prompt Enhancement - -After collecting preferences, enhance context and dispatch: - -```javascript -// Step 0: Parse --from-analysis handoff (from analyze-with-file) -const fromAnalysisMatch = args.match(/--from-analysis\s+(\S+)/) -if (fromAnalysisMatch) { - const handoffPath = fromAnalysisMatch[1] - workflowPreferences.analysisHandoff = JSON.parse(Read(handoffPath)) - workflowPreferences.forceExplore = false - // Strip flag from args, keep task description - args = args.replace(/--from-analysis\s+\S+\s*/, '').trim() -} - -// Step 1: Check for project context files -const hasProjectTech = fileExists('.workflow/project-tech.json') -const hasProjectGuidelines = fileExists('.workflow/project-guidelines.json') - -// Step 2: Log available context -if (hasProjectTech) { - console.log('Project tech context available: .workflow/project-tech.json') -} -if (hasProjectGuidelines) { - console.log('Project guidelines available: .workflow/project-guidelines.json') -} - -// Step 3: Dispatch to phase (workflowPreferences available as context) -if (mode === 'plan') { - // Read phases/01-lite-plan.md and execute -} else { - // Read phases/02-lite-execute.md and execute -} -``` - -## Execution Flow - -### Plan Mode - -``` -1. Collect preferences via AskUserQuestion (autoYes, forceExplore) -2. Enhance prompt with project context availability -3. Read phases/01-lite-plan.md -4. Execute lite-plan pipeline (Phase 1-5 within the phase doc) -5. lite-plan Phase 5 directly reads and executes Phase 2 (lite-execute) with executionContext -``` - -### Execute Mode - -``` -1. Collect preferences via AskUserQuestion (autoYes) -2. Enhance prompt with project context availability -3. Read phases/02-lite-execute.md -4. Execute lite-execute pipeline (input detection → execution → review) -``` - -## Usage - -Plan mode and execute mode are triggered by skill name routing (see Mode Detection). Workflow preferences (auto mode, force explore) are collected interactively via AskUserQuestion before dispatching to phases. - -**Plan mode**: Task description provided as arguments → interactive preference collection → planning pipeline -**Execute mode**: Task description, file path, or in-memory context → interactive preference collection → execution pipeline - -## Phase Reference Documents - -| Phase | Document | Purpose | -|-------|----------|---------| -| 1 | [phases/01-lite-plan.md](phases/01-lite-plan.md) | Complete planning pipeline: exploration, clarification, planning, confirmation, handoff | -| 2 | [phases/02-lite-execute.md](phases/02-lite-execute.md) | Complete execution engine: input modes, task grouping, batch execution, code review | diff --git a/.claude/skills/workflow-lite-plan copy/phases/01-lite-plan.md b/.claude/skills/workflow-lite-plan copy/phases/01-lite-plan.md deleted file mode 100644 index 1469bfba..00000000 --- a/.claude/skills/workflow-lite-plan copy/phases/01-lite-plan.md +++ /dev/null @@ -1,770 +0,0 @@ -# Phase 1: Lite-Plan - -Complete planning pipeline: task analysis, multi-angle exploration, clarification, adaptive planning, confirmation, and execution handoff. - ---- - -## Overview - -Intelligent lightweight planning command with dynamic workflow adaptation based on task complexity. Focuses on planning phases (exploration, clarification, planning, confirmation) and delegates execution to Phase 2 (lite-execute). - -**Core capabilities:** -- Intelligent task analysis with automatic exploration detection -- Dynamic code exploration (cli-explore-agent) when codebase understanding needed -- Interactive clarification after exploration to gather missing information -- Adaptive planning: Low complexity → Direct Claude; Medium/High → cli-lite-planning-agent -- Two-step confirmation: plan display → multi-dimensional input collection -- Execution handoff with complete context to lite-execute - -## Input - -``` - Task description or path to .md file (required) -``` - -Workflow preferences (`autoYes`, `forceExplore`) are collected by SKILL.md via AskUserQuestion and passed as `workflowPreferences` context variable. - -## Output Artifacts - -| Artifact | Description | -|----------|-------------| -| `exploration-{angle}.json` | Per-angle exploration results (1-4 files based on complexity) | -| `explorations-manifest.json` | Index of all exploration files | -| `planning-context.md` | Evidence paths + synthesized understanding | -| `plan.json` | Plan overview with task_ids[] (plan-overview-base-schema.json) | -| `.task/TASK-*.json` | Independent task files (one per task) | - -**Output Directory**: `.workflow/.lite-plan/{task-slug}-{YYYY-MM-DD}/` - -**Agent Usage**: -- Low complexity → Direct Claude planning (no agent) -- Medium/High complexity → `cli-lite-planning-agent` generates `plan.json` - -**Schema Reference**: `~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json` - -## Auto Mode Defaults - -When `workflowPreferences.autoYes === true`: -- **Clarification Questions**: Skipped (no clarification phase) -- **Plan Confirmation**: Auto-selected "Allow" -- **Execution Method**: Auto-selected "Auto" -- **Code Review**: Auto-selected "Skip" - -## Execution Process - -``` -Phase 1: Task Analysis & Exploration - ├─ Parse input (description or .md file) - ├─ intelligent complexity assessment (Low/Medium/High) - ├─ Exploration decision (auto-detect or workflowPreferences.forceExplore) - ├─ Context protection: If file reading ≥50k chars → force cli-explore-agent - └─ Decision: - ├─ needsExploration=true → Launch parallel cli-explore-agents (1-4 based on complexity) - └─ needsExploration=false → Skip to Phase 2/3 - -Phase 2: Clarification (optional, multi-round) - ├─ Aggregate clarification_needs from all exploration angles - ├─ Deduplicate similar questions - └─ Decision: - ├─ Has clarifications → AskUserQuestion (max 4 questions per round, multiple rounds allowed) - └─ No clarifications → Skip to Phase 3 - -Phase 3: Planning (NO CODE EXECUTION - planning only) - └─ Decision (based on Phase 1 complexity): - ├─ Low → Load schema: cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json → Direct Claude planning (following schema) → plan.json - └─ Medium/High → cli-lite-planning-agent → plan.json (agent internally executes quality check) - -Phase 4: Confirmation & Selection - ├─ Display plan summary (tasks, complexity, estimated time) - └─ AskUserQuestion: - ├─ Confirm: Allow / Modify / Cancel - ├─ Execution: Agent / Codex / Auto - └─ Review: Gemini / Agent / Skip - -Phase 5: Execute - ├─ Build executionContext (plan + explorations + clarifications + selections) - └─ Direct handoff: Read phases/02-lite-execute.md → Execute with executionContext (Mode 1) -``` - -## Implementation - -### Phase 1: Intelligent Multi-Angle Exploration - -**Session Setup** (MANDATORY - follow exactly): -```javascript -// Helper: Get UTC+8 (China Standard Time) ISO string -const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString() - -const taskSlug = task_description.toLowerCase().replace(/[^a-z0-9]+/g, '-').substring(0, 40) -const dateStr = getUtc8ISOString().substring(0, 10) // Format: 2025-11-29 - -const sessionId = `${taskSlug}-${dateStr}` // e.g., "implement-jwt-refresh-2025-11-29" -const sessionFolder = `.workflow/.lite-plan/${sessionId}` - -bash(`mkdir -p ${sessionFolder} && test -d ${sessionFolder} && echo "SUCCESS: ${sessionFolder}" || echo "FAILED: ${sessionFolder}"`) -``` - -**Exploration Decision Logic**: -```javascript -// Analysis handoff: reconstruct exploration from upstream analysis artifacts -if (workflowPreferences.analysisHandoff) { - const handoff = workflowPreferences.analysisHandoff - Write(`${sessionFolder}/exploration-from-analysis.json`, JSON.stringify({ - relevant_files: handoff.exploration_digest.relevant_files || [], - patterns: handoff.exploration_digest.patterns || [], - key_findings: handoff.exploration_digest.key_findings || [], - clarification_needs: [], // analysis already did multi-round discussion - _metadata: { exploration_angle: "from-analysis", source_session: handoff.source_session, reconstructed: true } - }, null, 2)) - Write(`${sessionFolder}/explorations-manifest.json`, JSON.stringify({ - session_id: sessionId, task_description: task_description, timestamp: getUtc8ISOString(), - complexity: complexity, exploration_count: 1, from_analysis: handoff.source_session, - explorations: [{ angle: "from-analysis", file: "exploration-from-analysis.json", - path: `${sessionFolder}/exploration-from-analysis.json`, index: 1 }] - }, null, 2)) - needsExploration = false - // clarification_needs=[] → Phase 2 naturally skipped → proceed to Phase 3 -} - -needsExploration = needsExploration ?? ( - workflowPreferences.forceExplore || - task.mentions_specific_files || - task.requires_codebase_context || - task.needs_architecture_understanding || - task.modifies_existing_code -) - -if (!needsExploration) { - // Skip to Phase 2 (Clarification) or Phase 3 (Planning) - proceed_to_next_phase() -} -``` - -**⚠️ Context Protection**: File reading ≥50k chars → force `needsExploration=true` (delegate to cli-explore-agent) - -**Complexity Assessment** (Intelligent Analysis): -```javascript -// analyzes task complexity based on: -// - Scope: How many systems/modules are affected? -// - Depth: Surface change vs architectural impact? -// - Risk: Potential for breaking existing functionality? -// - Dependencies: How interconnected is the change? - -const complexity = analyzeTaskComplexity(task_description) -// Returns: 'Low' | 'Medium' | 'High' -// Low: ONLY truly trivial — single file, single function, zero cross-module impact, no new patterns -// Examples: fix typo, rename variable, add log line, adjust constant value -// Medium: Multiple files OR any integration point OR new pattern introduction OR moderate risk -// Examples: add endpoint, implement feature, refactor module, fix bug spanning files -// High: Cross-module, architectural, or systemic change -// Examples: new subsystem, migration, security overhaul, API redesign -// ⚠️ Default bias: When uncertain between Low and Medium, choose Medium - -// Angle assignment based on task type (orchestrator decides, not agent) -const ANGLE_PRESETS = { - architecture: ['architecture', 'dependencies', 'modularity', 'integration-points'], - security: ['security', 'auth-patterns', 'dataflow', 'validation'], - performance: ['performance', 'bottlenecks', 'caching', 'data-access'], - bugfix: ['error-handling', 'dataflow', 'state-management', 'edge-cases'], - feature: ['patterns', 'integration-points', 'testing', 'dependencies'] -} - -function selectAngles(taskDescription, count) { - const text = taskDescription.toLowerCase() - let preset = 'feature' // default - - if (/refactor|architect|restructure|modular/.test(text)) preset = 'architecture' - else if (/security|auth|permission|access/.test(text)) preset = 'security' - else if (/performance|slow|optimi|cache/.test(text)) preset = 'performance' - else if (/fix|bug|error|issue|broken/.test(text)) preset = 'bugfix' - - return ANGLE_PRESETS[preset].slice(0, count) -} - -const selectedAngles = selectAngles(task_description, complexity === 'High' ? 4 : (complexity === 'Medium' ? 3 : 1)) - -// Planning strategy determination -// Agent trigger: anything beyond trivial single-file change -// - analysisHandoff → always agent (analysis validated non-trivial task) -// - multi-angle exploration → agent (complexity warranted multiple angles) -// - Medium/High complexity → agent -// Direct Claude planning ONLY for truly trivial Low + no analysis + single angle -const planningStrategy = ( - complexity === 'Low' && !workflowPreferences.analysisHandoff && selectedAngles.length <= 1 -) ? 'Direct Claude Planning' - : 'cli-lite-planning-agent' - -console.log(` -## Exploration Plan - -Task Complexity: ${complexity} -Selected Angles: ${selectedAngles.join(', ')} -Planning Strategy: ${planningStrategy} - -Launching ${selectedAngles.length} parallel explorations... -`) -``` - -**Launch Parallel Explorations** - Orchestrator assigns angle to each agent: - -**⚠️ CRITICAL - NO BACKGROUND EXECUTION**: -- **MUST NOT use `run_in_background: true`** - exploration results are REQUIRED before planning - - -```javascript -// Launch agents with pre-assigned angles -const explorationTasks = selectedAngles.map((angle, index) => - Task( - subagent_type="cli-explore-agent", - run_in_background=false, // ⚠️ MANDATORY: Must wait for results - description=`Explore: ${angle}`, - prompt=` -## Task Objective -Execute **${angle}** exploration for task planning context. Analyze codebase from this specific angle to discover relevant structure, patterns, and constraints. - -## Output Location - -**Session Folder**: ${sessionFolder} -**Output File**: ${sessionFolder}/exploration-${angle}.json - -## Assigned Context -- **Exploration Angle**: ${angle} -- **Task Description**: ${task_description} -- **Exploration Index**: ${index + 1} of ${selectedAngles.length} - -## Agent Initialization -cli-explore-agent autonomously handles: project structure discovery, schema loading, project context loading (project-tech.json, project-guidelines.json), and keyword search. These steps execute automatically. - -## Exploration Strategy (${angle} focus) - -**Step 1: Structural Scan** (Bash) -- get_modules_by_depth.sh → identify modules related to ${angle} -- find/rg → locate files relevant to ${angle} aspect -- Analyze imports/dependencies from ${angle} perspective - -**Step 2: Semantic Analysis** (Gemini CLI) -- How does existing code handle ${angle} concerns? -- What patterns are used for ${angle}? -- Where would new code integrate from ${angle} viewpoint? - -**Step 3: Write Output** -- Consolidate ${angle} findings into JSON -- Identify ${angle}-specific clarification needs - -## Expected Output - -**Schema Reference**: explore-json-schema.json (auto-loaded by agent during initialization) - -**Required Fields** (all ${angle} focused): -- Follow explore-json-schema.json exactly (auto-loaded by agent) -- All fields scoped to ${angle} perspective -- Ensure rationale is specific and >10 chars (not generic) -- Include file:line locations in integration_points -- _metadata.exploration_angle: "${angle}" - -## Success Criteria -- [ ] get_modules_by_depth.sh executed -- [ ] At least 3 relevant files identified with specific rationale + role -- [ ] Every file has rationale >10 chars (not generic like "Related to ${angle}") -- [ ] Every file has role classification (modify_target/dependency/etc.) -- [ ] Patterns are actionable (code examples, not generic advice) -- [ ] Integration points include file:line locations -- [ ] Constraints are project-specific to ${angle} -- [ ] JSON output follows schema exactly -- [ ] clarification_needs includes options + recommended - -## Execution -**Write**: \`${sessionFolder}/exploration-${angle}.json\` -**Return**: 2-3 sentence summary of ${angle} findings -` - ) -) - -// Execute all exploration tasks in parallel -``` - -**Auto-discover Generated Exploration Files**: -```javascript -// After explorations complete, auto-discover all exploration-*.json files -const explorationFiles = bash(`find ${sessionFolder} -name "exploration-*.json" -type f`) - .split('\n') - .filter(f => f.trim()) - -// Read metadata to build manifest -const explorationManifest = { - session_id: sessionId, - task_description: task_description, - timestamp: getUtc8ISOString(), - complexity: complexity, - exploration_count: explorationCount, - explorations: explorationFiles.map(file => { - const data = JSON.parse(Read(file)) - const filename = path.basename(file) - return { - angle: data._metadata.exploration_angle, - file: filename, - path: file, - index: data._metadata.exploration_index - } - }) -} - -Write(`${sessionFolder}/explorations-manifest.json`, JSON.stringify(explorationManifest, null, 2)) - -console.log(` -## Exploration Complete - -Generated exploration files in ${sessionFolder}: -${explorationManifest.explorations.map(e => `- exploration-${e.angle}.json (angle: ${e.angle})`).join('\n')} - -Manifest: explorations-manifest.json -Angles explored: ${explorationManifest.explorations.map(e => e.angle).join(', ')} -`) -``` - -**Output**: -- `${sessionFolder}/exploration-{angle1}.json` -- `${sessionFolder}/exploration-{angle2}.json` -- ... (1-4 files based on complexity) -- `${sessionFolder}/explorations-manifest.json` - ---- - -### Phase 2: Clarification (Optional, Multi-Round) - -**Skip if**: No exploration or `clarification_needs` is empty across all explorations - -**⚠️ CRITICAL**: AskUserQuestion tool limits max 4 questions per call. **MUST execute multiple rounds** to exhaust all clarification needs - do NOT stop at round 1. - -**Aggregate clarification needs from all exploration angles**: -```javascript -// Load manifest and all exploration files -const manifest = JSON.parse(Read(`${sessionFolder}/explorations-manifest.json`)) -const explorations = manifest.explorations.map(exp => ({ - angle: exp.angle, - data: JSON.parse(Read(exp.path)) -})) - -// Aggregate clarification needs from all explorations -const allClarifications = [] -explorations.forEach(exp => { - if (exp.data.clarification_needs?.length > 0) { - exp.data.clarification_needs.forEach(need => { - allClarifications.push({ - ...need, - source_angle: exp.angle - }) - }) - } -}) - -// Intelligent deduplication: analyze allClarifications by intent -// - Identify questions with similar intent across different angles -// - Merge similar questions: combine options, consolidate context -// - Produce dedupedClarifications with unique intents only -const dedupedClarifications = intelligentMerge(allClarifications) - -const autoYes = workflowPreferences.autoYes - -if (autoYes) { - // Auto mode: Skip clarification phase - console.log(`[Auto] Skipping ${dedupedClarifications.length} clarification questions`) - console.log(`Proceeding to planning with exploration results...`) - // Continue to Phase 3 -} else if (dedupedClarifications.length > 0) { - // Interactive mode: Multi-round clarification - const BATCH_SIZE = 4 - const totalRounds = Math.ceil(dedupedClarifications.length / BATCH_SIZE) - - for (let i = 0; i < dedupedClarifications.length; i += BATCH_SIZE) { - const batch = dedupedClarifications.slice(i, i + BATCH_SIZE) - const currentRound = Math.floor(i / BATCH_SIZE) + 1 - - console.log(`### Clarification Round ${currentRound}/${totalRounds}`) - - AskUserQuestion({ - questions: batch.map(need => ({ - question: `[${need.source_angle}] ${need.question}\n\nContext: ${need.context}`, - header: need.source_angle.substring(0, 12), - multiSelect: false, - options: need.options.map((opt, index) => ({ - label: need.recommended === index ? `${opt} ★` : opt, - description: need.recommended === index ? `Recommended` : `Use ${opt}` - })) - })) - }) - - // Store batch responses in clarificationContext before next round - } -} -``` - -**Output**: `clarificationContext` (in-memory) - ---- - -### Phase 3: Planning - -**Planning Strategy Selection** (based on Phase 1 complexity): - -**IMPORTANT**: Phase 3 is **planning only** - NO code execution. All execution happens in Phase 5 via lite-execute. - -**Executor Assignment** (Claude 智能分配,plan 生成后执行): - -```javascript -// 分配规则(优先级从高到低): -// 1. 用户明确指定:"用 gemini 分析..." → gemini, "codex 实现..." → codex -// 2. 默认 → agent - -const executorAssignments = {} // { taskId: { executor: 'gemini'|'codex'|'agent', reason: string } } - -// Load tasks from .task/ directory for executor assignment -const taskFiles = Glob(`${sessionFolder}/.task/TASK-*.json`) -taskFiles.forEach(taskPath => { - const task = JSON.parse(Read(taskPath)) - // Claude 根据上述规则语义分析,为每个 task 分配 executor - executorAssignments[task.id] = { executor: '...', reason: '...' } -}) -``` - -**Low Complexity** - Direct planning by Claude: -```javascript -// Step 1: Read schema -const schema = Bash(`cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json`) - -// Step 2: ⚠️ MANDATORY - Read and review ALL exploration files -const manifest = JSON.parse(Read(`${sessionFolder}/explorations-manifest.json`)) -manifest.explorations.forEach(exp => { - const explorationData = Read(exp.path) - console.log(`\n### Exploration: ${exp.angle}\n${explorationData}`) -}) - -// Step 3: Generate task objects (Claude directly, no agent) -// ⚠️ Tasks MUST incorporate insights from exploration files read in Step 2 -// Task fields use NEW names: convergence.criteria (not acceptance), files[].change (not modification_points), test (not verification) -const tasks = [ - { - id: "TASK-001", - title: "...", - description: "...", - depends_on: [], - convergence: { criteria: ["..."] }, - files: [{ path: "...", change: "..." }], - implementation: ["..."], - test: "..." - }, - // ... more tasks -] - -// Step 4: Write task files to .task/ directory -const taskDir = `${sessionFolder}/.task` -Bash(`mkdir -p "${taskDir}"`) -tasks.forEach(task => { - Write(`${taskDir}/${task.id}.json`, JSON.stringify(task, null, 2)) -}) - -// Step 5: Generate plan overview (NO embedded tasks[]) -const plan = { - summary: "...", - approach: "...", - task_ids: tasks.map(t => t.id), - task_count: tasks.length, - complexity: "Low", - estimated_time: "...", - recommended_execution: "Agent", - _metadata: { - timestamp: getUtc8ISOString(), - source: "direct-planning", - planning_mode: "direct", - plan_type: "feature" - } -} - -// Step 6: Write plan overview to session folder -Write(`${sessionFolder}/plan.json`, JSON.stringify(plan, null, 2)) - -// Step 7: MUST continue to Phase 4 (Confirmation) - DO NOT execute code here -``` - -**Medium/High Complexity** - Invoke cli-lite-planning-agent: - -```javascript -Task( - subagent_type="cli-lite-planning-agent", - run_in_background=false, - description="Generate detailed implementation plan", - prompt=` -Generate implementation plan and write plan.json. - -## Output Location - -**Session Folder**: ${sessionFolder} -**Output Files**: -- ${sessionFolder}/planning-context.md (evidence + understanding) -- ${sessionFolder}/plan.json (plan overview -- NO embedded tasks[]) -- ${sessionFolder}/.task/TASK-*.json (independent task files, one per task) - -## Output Schema Reference -Execute: cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json (get schema reference before generating plan) - -## Project Context (MANDATORY - Read Both Files) -1. Read: .workflow/project-tech.json (technology stack, architecture, key components) -2. Read: .workflow/project-guidelines.json (user-defined constraints and conventions) - -**CRITICAL**: All generated tasks MUST comply with constraints in project-guidelines.json - -## Task Description -${task_description} - -## Multi-Angle Exploration Context - -${manifest.explorations.map(exp => `### Exploration: ${exp.angle} (${exp.file}) -Path: ${exp.path} - -Read this file for detailed ${exp.angle} analysis.`).join('\n\n')} - -Total explorations: ${manifest.exploration_count} -Angles covered: ${manifest.explorations.map(e => e.angle).join(', ')} - -Manifest: ${sessionFolder}/explorations-manifest.json - -## User Clarifications -${JSON.stringify(clarificationContext) || "None"} - -## Complexity Level -${complexity} - -## Requirements -Generate plan.json and .task/*.json following the schema obtained above. Key constraints: -- _metadata.exploration_angles: ${JSON.stringify(manifest.explorations.map(e => e.angle))} - -**Output Format**: Two-layer structure: -- plan.json: Overview with task_ids[] referencing .task/ files (NO tasks[] array) -- .task/TASK-*.json: Independent task files following task-schema.json - -Follow plan-overview-base-schema.json (loaded via cat command above) for plan.json structure. -Follow task-schema.json for .task/TASK-*.json structure. -Note: Use files[].change (not modification_points), convergence.criteria (not acceptance). - -## Task Grouping Rules -1. **Group by feature**: All changes for one feature = one task (even if 3-5 files) -2. **Group by context**: Tasks with similar context or related functional changes can be grouped together -3. **Minimize agent count**: Simple, unrelated tasks can also be grouped to reduce agent execution overhead -4. **Avoid file-per-task**: Do NOT create separate tasks for each file -5. **Substantial tasks**: Each task should represent 15-60 minutes of work -6. **True dependencies only**: Only use depends_on when Task B cannot start without Task A's output -7. **Prefer parallel**: Most tasks should be independent (no depends_on) - -## Execution -1. Read schema file (cat command above) -2. Execute CLI planning using Gemini (Qwen fallback) -3. Read ALL exploration files for comprehensive context -4. Synthesize findings and generate tasks + plan overview -5. **Write**: \`${sessionFolder}/planning-context.md\` (evidence paths + understanding) -6. **Create**: \`${sessionFolder}/.task/\` directory (mkdir -p) -7. **Write**: \`${sessionFolder}/.task/TASK-001.json\`, \`TASK-002.json\`, etc. (one per task) -8. **Write**: \`${sessionFolder}/plan.json\` (overview with task_ids[], NO tasks[]) -9. Return brief completion summary -` -) -``` - -**Output**: `${sessionFolder}/plan.json` - ---- - -### Phase 4: Task Confirmation & Execution Selection - -**Step 4.1: Display Plan** -```javascript -const plan = JSON.parse(Read(`${sessionFolder}/plan.json`)) - -// Load tasks from .task/ directory -const tasks = (plan.task_ids || []).map(id => { - const taskPath = `${sessionFolder}/.task/${id}.json` - return JSON.parse(Read(taskPath)) -}) -const taskList = tasks - -console.log(` -## Implementation Plan - -**Summary**: ${plan.summary} -**Approach**: ${plan.approach} - -**Tasks** (${taskList.length}): -${taskList.map((t, i) => `${i+1}. ${t.title} (${t.scope || t.files?.[0]?.path || ''})`).join('\n')} - -**Complexity**: ${plan.complexity} -**Estimated Time**: ${plan.estimated_time} -**Recommended**: ${plan.recommended_execution} -`) -``` - -**Step 4.2: Collect Confirmation** -```javascript -const autoYes = workflowPreferences.autoYes - -let userSelection - -if (autoYes) { - // Auto mode: Use defaults - console.log(`[Auto] Auto-confirming plan:`) - console.log(` - Confirmation: Allow`) - console.log(` - Execution: Auto`) - console.log(` - Review: Skip`) - - userSelection = { - confirmation: "Allow", - execution_method: "Auto", - code_review_tool: "Skip" - } -} else { - // Interactive mode: Ask user - // Note: Execution "Other" option allows specifying CLI tools from ~/.claude/cli-tools.json - userSelection = AskUserQuestion({ - questions: [ - { - question: `Confirm plan? (${taskList.length} tasks, ${plan.complexity})`, - header: "Confirm", - multiSelect: false, - options: [ - { label: "Allow", description: "Proceed as-is" }, - { label: "Modify", description: "Adjust before execution" }, - { label: "Cancel", description: "Abort workflow" } - ] - }, - { - question: "Execution method:", - header: "Execution", - multiSelect: false, - options: [ - { label: "Agent", description: "@code-developer agent" }, - { label: "Codex", description: "codex CLI tool" }, - { label: "Auto", description: `Auto: ${plan.complexity === 'Low' ? 'Agent' : 'Codex'}` } - ] - }, - { - question: "Code review after execution?", - header: "Review", - multiSelect: false, - options: [ - { label: "Gemini Review", description: "Gemini CLI review" }, - { label: "Codex Review", description: "Git-aware review (prompt OR --uncommitted)" }, - { label: "Agent Review", description: "@code-reviewer agent" }, - { label: "Skip", description: "No review" } - ] - } - ] - }) -} -``` - ---- - -### Phase 5: Handoff to Execution - -**CRITICAL**: lite-plan NEVER executes code directly. ALL execution MUST go through lite-execute. - -**Step 5.1: Build executionContext** - -```javascript -// Load manifest and all exploration files -const manifest = JSON.parse(Read(`${sessionFolder}/explorations-manifest.json`)) -const explorations = {} - -manifest.explorations.forEach(exp => { - if (file_exists(exp.path)) { - explorations[exp.angle] = JSON.parse(Read(exp.path)) - } -}) - -const plan = JSON.parse(Read(`${sessionFolder}/plan.json`)) - -executionContext = { - planObject: plan, // plan overview (no tasks[]) - taskFiles: (plan.task_ids || []).map(id => ({ - id, - path: `${sessionFolder}/.task/${id}.json` - })), - explorationsContext: explorations, - explorationAngles: manifest.explorations.map(e => e.angle), - explorationManifest: manifest, - clarificationContext: clarificationContext || null, - executionMethod: userSelection.execution_method, // 全局默认,可被 executorAssignments 覆盖 - codeReviewTool: userSelection.code_review_tool, - originalUserInput: task_description, - - // 任务级 executor 分配(优先于全局 executionMethod) - executorAssignments: executorAssignments, // { taskId: { executor, reason } } - - session: { - id: sessionId, - folder: sessionFolder, - artifacts: { - explorations: manifest.explorations.map(exp => ({ - angle: exp.angle, - path: exp.path - })), - explorations_manifest: `${sessionFolder}/explorations-manifest.json`, - plan: `${sessionFolder}/plan.json`, - task_dir: `${sessionFolder}/.task` - } - } -} -``` - -**Step 5.2: Handoff** - -```javascript -// Direct phase handoff: Read and execute Phase 2 (lite-execute) with in-memory context -// No Skill routing needed - executionContext is already set in Step 5.1 -Read("phases/02-lite-execute.md") -// Execute Phase 2 with executionContext (Mode 1: In-Memory Plan) -``` - -## Session Folder Structure - -``` -.workflow/.lite-plan/{task-slug}-{YYYY-MM-DD}/ -├── exploration-{angle1}.json # Exploration angle 1 -├── exploration-{angle2}.json # Exploration angle 2 -├── exploration-{angle3}.json # Exploration angle 3 (if applicable) -├── exploration-{angle4}.json # Exploration angle 4 (if applicable) -├── explorations-manifest.json # Exploration index -├── planning-context.md # Evidence paths + understanding -├── plan.json # Plan overview (task_ids[]) -└── .task/ # Task files directory - ├── TASK-001.json - ├── TASK-002.json - └── ... -``` - -**Example**: -``` -.workflow/.lite-plan/implement-jwt-refresh-2025-11-25-14-30-25/ -├── exploration-architecture.json -├── exploration-auth-patterns.json -├── exploration-security.json -├── explorations-manifest.json -├── planning-context.md -├── plan.json -└── .task/ - ├── TASK-001.json - ├── TASK-002.json - └── TASK-003.json -``` - -## Error Handling - -| Error | Resolution | -|-------|------------| -| Exploration agent failure | Skip exploration, continue with task description only | -| Planning agent failure | Fallback to direct planning by Claude | -| Clarification timeout | Use exploration findings as-is | -| Confirmation timeout | Save context, display resume instructions | -| Modify loop > 3 times | Suggest breaking task or using /workflow:plan | - -## Next Phase - -After Phase 5 handoff, execution continues in [Phase 2: Lite-Execute](02-lite-execute.md). diff --git a/.claude/skills/workflow-lite-plan copy/phases/02-lite-execute.md b/.claude/skills/workflow-lite-plan copy/phases/02-lite-execute.md deleted file mode 100644 index 5550dbc1..00000000 --- a/.claude/skills/workflow-lite-plan copy/phases/02-lite-execute.md +++ /dev/null @@ -1,776 +0,0 @@ -# Phase 2: Lite-Execute - -Complete execution engine: multi-mode input, task grouping, batch execution, code review, and development index update. - ---- - -## Overview - -Flexible task execution command supporting three input modes: in-memory plan (from lite-plan), direct prompt description, or file content. Handles execution orchestration, progress tracking, and optional code review. - -**Core capabilities:** -- Multi-mode input (in-memory plan, prompt description, or file path) -- Execution orchestration (Agent or Codex) with full context -- Live progress tracking via TodoWrite at execution call level -- Optional code review with selected tool (Gemini, Agent, or custom) -- Context continuity across multiple executions -- Intelligent format detection (Enhanced Task JSON vs plain text) - -## Usage - -### Input -``` - Task description string, or path to file (required) -``` - -Mode 1 (In-Memory) is triggered by lite-plan direct handoff when `executionContext` is available. -Workflow preferences (`autoYes`) are passed from SKILL.md via `workflowPreferences` context variable. - -## Input Modes - -### Mode 1: In-Memory Plan - -**Trigger**: Called by lite-plan direct handoff after Phase 4 approval (executionContext available) - -**Input Source**: `executionContext` global variable set by lite-plan - -**Content**: Complete execution context (see Data Structures section) - -**Behavior**: -- Skip execution method selection (already set by lite-plan) -- Directly proceed to execution with full context -- All planning artifacts available (exploration, clarifications, plan) - -### Mode 2: Prompt Description - -**Trigger**: User calls with task description string - -**Input**: Simple task description (e.g., "Add unit tests for auth module") - -**Behavior**: -- Store prompt as `originalUserInput` -- Create simple execution plan from prompt -- AskUserQuestion: Select execution method (Agent/Codex/Auto) -- AskUserQuestion: Select code review tool (Skip/Gemini/Agent/Other) -- Proceed to execution with `originalUserInput` included - -**User Interaction**: -```javascript -const autoYes = workflowPreferences.autoYes - -let userSelection - -if (autoYes) { - // Auto mode: Use defaults - console.log(`[Auto] Auto-confirming execution:`) - console.log(` - Execution method: Auto`) - console.log(` - Code review: Skip`) - - userSelection = { - execution_method: "Auto", - code_review_tool: "Skip" - } -} else { - // Interactive mode: Ask user - userSelection = AskUserQuestion({ - questions: [ - { - question: "Select execution method:", - header: "Execution", - multiSelect: false, - options: [ - { label: "Agent", description: "@code-developer agent" }, - { label: "Codex", description: "codex CLI tool" }, - { label: "Auto", description: "Auto-select based on complexity" } - ] - }, - { - question: "Enable code review after execution?", - header: "Code Review", - multiSelect: false, - options: [ - { label: "Skip", description: "No review" }, - { label: "Gemini Review", description: "Gemini CLI tool" }, - { label: "Codex Review", description: "Git-aware review (prompt OR --uncommitted)" }, - { label: "Agent Review", description: "Current agent review" } - ] - } - ] - }) -} -``` - -### Mode 3: File Content - -**Trigger**: User calls with file path - -**Input**: Path to file containing task description or plan.json - -**Step 1: Read and Detect Format** - -```javascript -fileContent = Read(filePath) - -// Attempt JSON parsing -try { - jsonData = JSON.parse(fileContent) - - // Check if plan.json from lite-plan session (two-layer format: task_ids[]) - if (jsonData.summary && jsonData.approach && jsonData.task_ids) { - planObject = jsonData - originalUserInput = jsonData.summary - isPlanJson = true - - // Load tasks from .task/*.json files - const planDir = filePath.replace(/[/\\][^/\\]+$/, '') // parent directory - planObject._loadedTasks = loadTaskFiles(planDir, jsonData.task_ids) - } else { - // Valid JSON but not plan.json - treat as plain text - originalUserInput = fileContent - isPlanJson = false - } -} catch { - // Not valid JSON - treat as plain text prompt - originalUserInput = fileContent - isPlanJson = false -} -``` - -**Step 2: Create Execution Plan** - -If `isPlanJson === true`: -- Use `planObject` directly -- User selects execution method and code review - -If `isPlanJson === false`: -- Treat file content as prompt (same behavior as Mode 2) -- Create simple execution plan from content - -**Step 3: User Interaction** - -- AskUserQuestion: Select execution method (Agent/Codex/Auto) -- AskUserQuestion: Select code review tool -- Proceed to execution with full context - -## Helper Functions - -```javascript -// Load task files from .task/ directory (two-layer format) -function loadTaskFiles(planDir, taskIds) { - return taskIds.map(id => { - const taskPath = `${planDir}/.task/${id}.json` - return JSON.parse(Read(taskPath)) - }) -} - -// Get tasks array from loaded .task/*.json files -function getTasks(planObject) { - return planObject._loadedTasks || [] -} -``` - -## Execution Process - -``` -Input Parsing: - └─ Decision (mode detection): - ├─ executionContext exists → Mode 1: Load executionContext → Skip user selection - ├─ Ends with .md/.json/.txt → Mode 3: Read file → Detect format - │ ├─ Valid plan.json → Use planObject → User selects method + review - │ └─ Not plan.json → Treat as prompt → User selects method + review - └─ Other → Mode 2: Prompt description → User selects method + review - -Execution: - ├─ Step 1: Initialize result tracking (previousExecutionResults = []) - ├─ Step 2: Task grouping & batch creation - │ ├─ Extract explicit depends_on (no file/keyword inference) - │ ├─ Group: independent tasks → per-executor parallel batches (one CLI per batch) - │ ├─ Group: dependent tasks → sequential phases (respect dependencies) - │ └─ Create TodoWrite list for batches - ├─ Step 3: Launch execution - │ ├─ Phase 1: Independent tasks (⚡ per-executor batches, multi-CLI concurrent) - │ └─ Phase 2+: Dependent tasks by dependency order - ├─ Step 4: Track progress (TodoWrite updates per batch) - └─ Step 5: Code review (if codeReviewTool ≠ "Skip") - -Output: - └─ Execution complete with results in previousExecutionResults[] -``` - -## Detailed Execution Steps - -### Step 1: Initialize Execution Tracking - -**Operations**: -- Initialize result tracking for multi-execution scenarios -- Set up `previousExecutionResults` array for context continuity -- **In-Memory Mode**: Echo execution strategy from lite-plan for transparency - -```javascript -// Initialize result tracking -previousExecutionResults = [] - -// In-Memory Mode: Echo execution strategy (transparency before execution) -if (executionContext) { - console.log(` -📋 Execution Strategy (from lite-plan): - Method: ${executionContext.executionMethod} - Review: ${executionContext.codeReviewTool} - Tasks: ${getTasks(executionContext.planObject).length} - Complexity: ${executionContext.planObject.complexity} -${executionContext.executorAssignments ? ` Assignments: ${JSON.stringify(executionContext.executorAssignments)}` : ''} - `) -} -``` - -### Step 2: Task Grouping & Batch Creation - -**Dependency Analysis & Grouping Algorithm**: -```javascript -// Use explicit depends_on from plan.json (no inference from file/keywords) -function extractDependencies(tasks) { - const taskIdToIndex = {} - tasks.forEach((t, i) => { taskIdToIndex[t.id] = i }) - - return tasks.map((task, i) => { - // Only use explicit depends_on from plan.json - const deps = (task.depends_on || []) - .map(depId => taskIdToIndex[depId]) - .filter(idx => idx !== undefined && idx < i) - return { ...task, taskIndex: i, dependencies: deps } - }) -} - -// Executor Resolution (used by task grouping below) -// 获取任务的 executor(优先使用 executorAssignments,fallback 到全局 executionMethod) -function getTaskExecutor(task) { - const assignments = executionContext?.executorAssignments || {} - if (assignments[task.id]) { - return assignments[task.id].executor // 'gemini' | 'codex' | 'agent' - } - // Fallback: 全局 executionMethod 映射 - const method = executionContext?.executionMethod || 'Auto' - if (method === 'Agent') return 'agent' - if (method === 'Codex') return 'codex' - // Auto: 根据复杂度 - return planObject.complexity === 'Low' ? 'agent' : 'codex' -} - -// 按 executor 分组任务(核心分组组件) -function groupTasksByExecutor(tasks) { - const groups = { gemini: [], codex: [], agent: [] } - tasks.forEach(task => { - const executor = getTaskExecutor(task) - groups[executor].push(task) - }) - return groups -} - -// Group into batches: per-executor parallel batches (one CLI per batch) -function createExecutionCalls(tasks, executionMethod) { - const tasksWithDeps = extractDependencies(tasks) - const processed = new Set() - const calls = [] - - // Phase 1: Independent tasks → per-executor batches (multi-CLI concurrent) - const independentTasks = tasksWithDeps.filter(t => t.dependencies.length === 0) - if (independentTasks.length > 0) { - const executorGroups = groupTasksByExecutor(independentTasks) - let parallelIndex = 1 - - for (const [executor, tasks] of Object.entries(executorGroups)) { - if (tasks.length === 0) continue - tasks.forEach(t => processed.add(t.taskIndex)) - calls.push({ - method: executionMethod, - executor: executor, // 明确指定 executor - executionType: "parallel", - groupId: `P${parallelIndex++}`, - taskSummary: tasks.map(t => t.title).join(' | '), - tasks: tasks - }) - } - } - - // Phase 2: Dependent tasks → sequential/parallel batches (respect dependencies) - let sequentialIndex = 1 - let remaining = tasksWithDeps.filter(t => !processed.has(t.taskIndex)) - - while (remaining.length > 0) { - // Find tasks whose dependencies are all satisfied - const ready = remaining.filter(t => - t.dependencies.every(d => processed.has(d)) - ) - - if (ready.length === 0) { - console.warn('Circular dependency detected, forcing remaining tasks') - ready.push(...remaining) - } - - if (ready.length > 1) { - // Multiple ready tasks → per-executor batches (parallel within this phase) - const executorGroups = groupTasksByExecutor(ready) - for (const [executor, tasks] of Object.entries(executorGroups)) { - if (tasks.length === 0) continue - tasks.forEach(t => processed.add(t.taskIndex)) - calls.push({ - method: executionMethod, - executor: executor, - executionType: "parallel", - groupId: `P${calls.length + 1}`, - taskSummary: tasks.map(t => t.title).join(' | '), - tasks: tasks - }) - } - } else { - // Single ready task → sequential batch - ready.forEach(t => processed.add(t.taskIndex)) - calls.push({ - method: executionMethod, - executor: getTaskExecutor(ready[0]), - executionType: "sequential", - groupId: `S${sequentialIndex++}`, - taskSummary: ready[0].title, - tasks: ready - }) - } - - remaining = remaining.filter(t => !processed.has(t.taskIndex)) - } - - return calls -} - -executionCalls = createExecutionCalls(getTasks(planObject), executionMethod).map(c => ({ ...c, id: `[${c.groupId}]` })) - -TodoWrite({ - todos: executionCalls.map(c => ({ - content: `${c.executionType === "parallel" ? "⚡" : "→"} ${c.id} (${c.tasks.length} tasks)`, - status: "pending", - activeForm: `Executing ${c.id}` - })) -}) -``` - -### Step 3: Launch Execution - -**Executor Resolution**: `getTaskExecutor()` and `groupTasksByExecutor()` defined in Step 2 (Task Grouping). - -**Batch Execution Routing** (根据 batch.executor 字段路由): -```javascript -// executeBatch 根据 batch 自身的 executor 字段决定调用哪个 CLI -function executeBatch(batch) { - const executor = batch.executor || getTaskExecutor(batch.tasks[0]) - const sessionId = executionContext?.session?.id || 'standalone' - const fixedId = `${sessionId}-${batch.groupId}` - - if (executor === 'agent') { - // Agent execution (synchronous) - return Task({ - subagent_type: "code-developer", - run_in_background: false, - description: batch.taskSummary, - prompt: buildExecutionPrompt(batch) - }) - } else if (executor === 'codex') { - // Codex CLI (background) - return Bash(`ccw cli -p "${buildExecutionPrompt(batch)}" --tool codex --mode write --id ${fixedId}`, { run_in_background: true }) - } else if (executor === 'gemini') { - // Gemini CLI (background) - return Bash(`ccw cli -p "${buildExecutionPrompt(batch)}" --tool gemini --mode write --id ${fixedId}`, { run_in_background: true }) - } -} -``` - -**并行执行原则**: -- 每个 batch 对应一个独立的 CLI 实例或 Agent 调用 -- 并行 = 多个 Bash(run_in_background=true) 或多个 Task() 同时发出 -- 绝不将多个独立任务合并到同一个 CLI prompt 中 -- Agent 任务不可后台执行(run_in_background=false),但多个 Agent 任务可通过单条消息中的多个 Task() 调用并发 - -**Execution Flow**: Parallel batches concurrently → Sequential batches in order -```javascript -const parallel = executionCalls.filter(c => c.executionType === "parallel") -const sequential = executionCalls.filter(c => c.executionType === "sequential") - -// Phase 1: Launch all parallel batches (single message with multiple tool calls) -if (parallel.length > 0) { - TodoWrite({ todos: executionCalls.map(c => ({ status: c.executionType === "parallel" ? "in_progress" : "pending" })) }) - parallelResults = await Promise.all(parallel.map(c => executeBatch(c))) - previousExecutionResults.push(...parallelResults) - TodoWrite({ todos: executionCalls.map(c => ({ status: parallel.includes(c) ? "completed" : "pending" })) }) -} - -// Phase 2: Execute sequential batches one by one -for (const call of sequential) { - TodoWrite({ todos: executionCalls.map(c => ({ status: c === call ? "in_progress" : "..." })) }) - result = await executeBatch(call) - previousExecutionResults.push(result) - TodoWrite({ todos: executionCalls.map(c => ({ status: "completed" or "pending" })) }) -} -``` - -### Unified Task Prompt Builder - -**Task Formatting Principle**: Each task is a self-contained checklist. The executor only needs to know what THIS task requires. Same template for Agent and CLI. - -```javascript -function buildExecutionPrompt(batch) { - // Task template (6 parts: Files → Why → How → Reference → Risks → Done) - const formatTask = (t) => ` -## ${t.title} - -**Scope**: \`${t.scope}\` | **Action**: ${t.action} - -### Files -${(t.files || []).map(f => `- **${f.path}** → \`${f.target || ''}\`: ${f.change || (f.changes || []).join(', ') || ''}`).join('\n')} - -${t.rationale ? ` -### Why this approach (Medium/High) -${t.rationale.chosen_approach} -${t.rationale.decision_factors?.length > 0 ? `\nKey factors: ${t.rationale.decision_factors.join(', ')}` : ''} -${t.rationale.tradeoffs ? `\nTradeoffs: ${t.rationale.tradeoffs}` : ''} -` : ''} - -### How to do it -${t.description} - -${t.implementation.map(step => `- ${step}`).join('\n')} - -${t.code_skeleton ? ` -### Code skeleton (High) -${t.code_skeleton.interfaces?.length > 0 ? `**Interfaces**: ${t.code_skeleton.interfaces.map(i => `\`${i.name}\` - ${i.purpose}`).join(', ')}` : ''} -${t.code_skeleton.key_functions?.length > 0 ? `\n**Functions**: ${t.code_skeleton.key_functions.map(f => `\`${f.signature}\` - ${f.purpose}`).join(', ')}` : ''} -${t.code_skeleton.classes?.length > 0 ? `\n**Classes**: ${t.code_skeleton.classes.map(c => `\`${c.name}\` - ${c.purpose}`).join(', ')}` : ''} -` : ''} - -### Reference -- Pattern: ${t.reference?.pattern || 'N/A'} -- Files: ${t.reference?.files?.join(', ') || 'N/A'} -${t.reference?.examples ? `- Notes: ${t.reference.examples}` : ''} - -${t.risks?.length > 0 ? ` -### Risk mitigations (High) -${t.risks.map(r => `- ${r.description} → **${r.mitigation}**`).join('\n')} -` : ''} - -### Done when -${(t.convergence?.criteria || []).map(c => `- [ ] ${c}`).join('\n')} -${(t.test?.success_metrics || []).length > 0 ? `\n**Success metrics**: ${t.test.success_metrics.join(', ')}` : ''}` - - // Build prompt - const sections = [] - - if (originalUserInput) sections.push(`## Goal\n${originalUserInput}`) - - sections.push(`## Tasks\n${batch.tasks.map(formatTask).join('\n\n---\n')}`) - - // Context (reference only) - const context = [] - if (previousExecutionResults.length > 0) { - context.push(`### Previous Work\n${previousExecutionResults.map(r => `- ${r.tasksSummary}: ${r.status}`).join('\n')}`) - } - if (clarificationContext) { - context.push(`### Clarifications\n${Object.entries(clarificationContext).map(([q, a]) => `- ${q}: ${a}`).join('\n')}`) - } - if (executionContext?.planObject?.data_flow?.diagram) { - context.push(`### Data Flow\n${executionContext.planObject.data_flow.diagram}`) - } - if (executionContext?.session?.artifacts?.plan) { - context.push(`### Artifacts\nPlan: ${executionContext.session.artifacts.plan}`) - } - // Project guidelines (user-defined constraints from /workflow:session:solidify) - context.push(`### Project Guidelines\n@.workflow/project-guidelines.json`) - if (context.length > 0) sections.push(`## Context\n${context.join('\n\n')}`) - - sections.push(`Complete each task according to its "Done when" checklist.`) - - return sections.join('\n\n') -} -``` - -**Option A: Agent Execution** - -When to use: -- `getTaskExecutor(task) === "agent"` -- 或 `executionMethod = "Agent"` (全局 fallback) -- 或 `executionMethod = "Auto" AND complexity = "Low"` (全局 fallback) - -```javascript -Task( - subagent_type="code-developer", - run_in_background=false, - description=batch.taskSummary, - prompt=buildExecutionPrompt(batch) -) -``` - -**Result Collection**: After completion, collect result following `executionResult` structure (see Data Structures section) - -**Option B: CLI Execution (Codex)** - -When to use: -- `getTaskExecutor(task) === "codex"` -- 或 `executionMethod = "Codex"` (全局 fallback) -- 或 `executionMethod = "Auto" AND complexity = "Medium/High"` (全局 fallback) - -```bash -ccw cli -p "${buildExecutionPrompt(batch)}" --tool codex --mode write -``` - -**Execution with fixed IDs** (predictable ID pattern): -```javascript -// Launch CLI in background, wait for task hook callback -// Generate fixed execution ID: ${sessionId}-${groupId} -const sessionId = executionContext?.session?.id || 'standalone' -const fixedExecutionId = `${sessionId}-${batch.groupId}` // e.g., "implement-auth-2025-12-13-P1" - -// Check if resuming from previous failed execution -const previousCliId = batch.resumeFromCliId || null - -// Build command with fixed ID (and optional resume for continuation) -const cli_command = previousCliId - ? `ccw cli -p "${buildExecutionPrompt(batch)}" --tool codex --mode write --id ${fixedExecutionId} --resume ${previousCliId}` - : `ccw cli -p "${buildExecutionPrompt(batch)}" --tool codex --mode write --id ${fixedExecutionId}` - -// Execute in background, stop output and wait for task hook callback -Bash( - command=cli_command, - run_in_background=true -) -// STOP HERE - CLI executes in background, task hook will notify on completion -``` - -**Resume on Failure** (with fixed ID): -```javascript -// If execution failed or timed out, offer resume option -if (bash_result.status === 'failed' || bash_result.status === 'timeout') { - console.log(` -⚠️ Execution incomplete. Resume available: - Fixed ID: ${fixedExecutionId} - Lookup: ccw cli detail ${fixedExecutionId} - Resume: ccw cli -p "Continue tasks" --resume ${fixedExecutionId} --tool codex --mode write --id ${fixedExecutionId}-retry -`) - - // Store for potential retry in same session - batch.resumeFromCliId = fixedExecutionId -} -``` - -**Result Collection**: After completion, analyze output and collect result following `executionResult` structure (include `cliExecutionId` for resume capability) - -**Option C: CLI Execution (Gemini)** - -When to use: `getTaskExecutor(task) === "gemini"` (分析类任务) - -```bash -# 使用统一的 buildExecutionPrompt,切换 tool 和 mode -ccw cli -p "${buildExecutionPrompt(batch)}" --tool gemini --mode analysis --id ${sessionId}-${batch.groupId} -``` - -### Step 4: Progress Tracking - -Progress tracked at batch level (not individual task level). Icons: ⚡ (parallel, concurrent), → (sequential, one-by-one) - -### Step 5: Code Review (Optional) - -**Skip Condition**: Only run if `codeReviewTool ≠ "Skip"` - -**Review Focus**: Verify implementation against plan convergence criteria and test requirements -- Read plan.json + .task/*.json for task convergence criteria and test checklist -- Check each convergence criterion is fulfilled -- Verify success metrics from test field (Medium/High complexity) -- Run unit/integration tests specified in test field -- Validate code quality and identify issues -- Ensure alignment with planned approach and risk mitigations - -**Operations**: -- Agent Review: Current agent performs direct review -- Gemini Review: Execute gemini CLI with review prompt -- Codex Review: Two options - (A) with prompt for complex reviews, (B) `--uncommitted` flag only for quick reviews -- Custom tool: Execute specified CLI tool (qwen, etc.) - -**Unified Review Template** (All tools use same standard): - -**Review Criteria**: -- **Convergence Criteria**: Verify each criterion from task convergence.criteria -- **Test Checklist** (Medium/High): Check unit, integration, success_metrics from task test -- **Code Quality**: Analyze quality, identify issues, suggest improvements -- **Plan Alignment**: Validate implementation matches planned approach and risk mitigations - -**Shared Prompt Template** (used by all CLI tools): -``` -PURPOSE: Code review for implemented changes against plan convergence criteria and test requirements -TASK: • Verify plan convergence criteria fulfillment • Check test requirements (unit, integration, success_metrics) • Analyze code quality • Identify issues • Suggest improvements • Validate plan adherence and risk mitigations -MODE: analysis -CONTEXT: @**/* @{plan.json} @{.task/*.json} [@{exploration.json}] | Memory: Review lite-execute changes against plan requirements including test checklist -EXPECTED: Quality assessment with: - - Convergence criteria verification (all tasks from .task/*.json) - - Test checklist validation (Medium/High: unit, integration, success_metrics) - - Issue identification - - Recommendations - Explicitly check each convergence criterion and test item from .task/*.json files. -CONSTRAINTS: Focus on plan convergence criteria, test requirements, and plan adherence | analysis=READ-ONLY -``` - -**Tool-Specific Execution** (Apply shared prompt template above): - -```bash -# Method 1: Agent Review (current agent) -# - Read plan.json: ${executionContext.session.artifacts.plan} -# - Apply unified review criteria (see Shared Prompt Template) -# - Report findings directly - -# Method 2: Gemini Review (recommended) -ccw cli -p "[Shared Prompt Template with artifacts]" --tool gemini --mode analysis -# CONTEXT includes: @**/* @${plan.json} [@${exploration.json}] - -# Method 3: Qwen Review (alternative) -ccw cli -p "[Shared Prompt Template with artifacts]" --tool qwen --mode analysis -# Same prompt as Gemini, different execution engine - -# Method 4: Codex Review (git-aware) - Two mutually exclusive options: - -# Option A: With custom prompt (reviews uncommitted by default) -ccw cli -p "[Shared Prompt Template with artifacts]" --tool codex --mode review -# Use for complex reviews with specific focus areas - -# Option B: Target flag only (no prompt allowed) -ccw cli --tool codex --mode review --uncommitted -# Quick review of uncommitted changes without custom instructions - -# ⚠️ IMPORTANT: -p prompt and target flags (--uncommitted/--base/--commit) are MUTUALLY EXCLUSIVE -``` - -**Multi-Round Review with Fixed IDs**: -```javascript -// Generate fixed review ID -const reviewId = `${sessionId}-review` - -// First review pass with fixed ID -const reviewResult = Bash(`ccw cli -p "[Review prompt]" --tool gemini --mode analysis --id ${reviewId}`) - -// If issues found, continue review dialog with fixed ID chain -if (hasUnresolvedIssues(reviewResult)) { - // Resume with follow-up questions - Bash(`ccw cli -p "Clarify the security concerns you mentioned" --resume ${reviewId} --tool gemini --mode analysis --id ${reviewId}-followup`) -} -``` - -**Implementation Note**: Replace `[Shared Prompt Template with artifacts]` placeholder with actual template content, substituting: -- `@{plan.json}` → `@${executionContext.session.artifacts.plan}` -- `[@{exploration.json}]` → exploration files from artifacts (if exists) - -### Step 6: Auto-Sync Project State - -**Trigger**: After all executions complete (regardless of code review) - -**Operation**: Execute `/workflow:session:sync -y "{summary}"` to update both `project-guidelines.json` and `project-tech.json` in one shot. - -Summary 取值优先级:`originalUserInput` → `planObject.summary` → git log 自动推断。 - -## Best Practices - -**Input Modes**: In-memory (lite-plan), prompt (standalone), file (JSON/text) -**Task Grouping**: Based on explicit depends_on only; independent tasks split by executor, each batch runs as separate CLI instance -**Execution**: Independent task batches launch concurrently via single Claude message with multiple tool calls (one tool call per batch) - -## Error Handling - -| Error | Cause | Resolution | -|-------|-------|------------| -| Missing executionContext | In-memory mode without context | Error: "No execution context found. Only available when called by lite-plan." | -| File not found | File path doesn't exist | Error: "File not found: {path}. Check file path." | -| Empty file | File exists but no content | Error: "File is empty: {path}. Provide task description." | -| Invalid Enhanced Task JSON | JSON missing required fields | Warning: "Missing required fields. Treating as plain text." | -| Malformed JSON | JSON parsing fails | Treat as plain text (expected for non-JSON files) | -| Execution failure | Agent/Codex crashes | Display error, use fixed ID `${sessionId}-${groupId}` for resume: `ccw cli -p "Continue" --resume --id -retry` | -| Execution timeout | CLI exceeded timeout | Use fixed ID for resume with extended timeout | -| Codex unavailable | Codex not installed | Show installation instructions, offer Agent execution | -| Fixed ID not found | Custom ID lookup failed | Check `ccw cli history`, verify date directories | - -## Data Structures - -### executionContext (Input - Mode 1) - -Passed from lite-plan via global variable: - -```javascript -{ - planObject: { - summary: string, - approach: string, - task_ids: string[], // Task IDs referencing .task/*.json files - task_count: number, // Number of tasks - _loadedTasks: [...], // Populated at runtime from .task/*.json files - estimated_time: string, - recommended_execution: string, - complexity: string - }, - // Task file paths (populated for two-layer format) - taskFiles: [{id: string, path: string}] | null, - explorationsContext: {...} | null, // Multi-angle explorations - explorationAngles: string[], // List of exploration angles - explorationManifest: {...} | null, // Exploration manifest - clarificationContext: {...} | null, - executionMethod: "Agent" | "Codex" | "Auto", // 全局默认 - codeReviewTool: "Skip" | "Gemini Review" | "Agent Review" | string, - originalUserInput: string, - - // 任务级 executor 分配(优先于 executionMethod) - executorAssignments: { - [taskId]: { executor: "gemini" | "codex" | "agent", reason: string } - }, - - // Session artifacts location (saved by lite-plan) - session: { - id: string, // Session identifier: {taskSlug}-{shortTimestamp} - folder: string, // Session folder path: .workflow/.lite-plan/{session-id} - artifacts: { - explorations: [{angle, path}], // exploration-{angle}.json paths - explorations_manifest: string, // explorations-manifest.json path - plan: string // plan.json path (always present) - } - } -} -``` - -**Artifact Usage**: -- Artifact files contain detailed planning context -- Pass artifact paths to CLI tools and agents for enhanced context -- See execution options below for usage examples - -### executionResult (Output) - -Collected after each execution call completes: - -```javascript -{ - executionId: string, // e.g., "[Agent-1]", "[Codex-1]" - status: "completed" | "partial" | "failed", - tasksSummary: string, // Brief description of tasks handled - completionSummary: string, // What was completed - keyOutputs: string, // Files created/modified, key changes - notes: string, // Important context for next execution - fixedCliId: string | null // Fixed CLI execution ID (e.g., "implement-auth-2025-12-13-P1") -} -``` - -Appended to `previousExecutionResults` array for context continuity in multi-execution scenarios. - -## Post-Completion Expansion - -**Auto-sync**: 执行 `/workflow:session:sync -y "{summary}"` 更新 project-guidelines + project-tech(Step 6 已触发,此处不重复)。 - -完成后询问用户是否扩展为issue(test/enhance/refactor/doc),选中项调用 `/issue:new "{summary} - {dimension}"` - -**Fixed ID Pattern**: `${sessionId}-${groupId}` enables predictable lookup without auto-generated timestamps. - -**Resume Usage**: If `status` is "partial" or "failed", use `fixedCliId` to resume: -```bash -# Lookup previous execution -ccw cli detail ${fixedCliId} - -# Resume with new fixed ID for retry -ccw cli -p "Continue from where we left off" --resume ${fixedCliId} --tool codex --mode write --id ${fixedCliId}-retry -``` diff --git a/.claude/skills_lib/team-skill-designer-v2/SKILL.md b/.claude/skills_lib/team-skill-designer-v2/SKILL.md new file mode 100644 index 00000000..350fa775 --- /dev/null +++ b/.claude/skills_lib/team-skill-designer-v2/SKILL.md @@ -0,0 +1,274 @@ +--- +name: team-skill-designer +description: Design and generate unified team skills with role-based routing. All team members invoke ONE skill, SKILL.md routes to role-specific execution via --role arg. Triggers on "design team skill", "create team skill", "team skill designer". +allowed-tools: Task, AskUserQuestion, Read, Write, Bash, Glob, Grep +--- + +# Team Skill Designer v2 + +Meta-skill for creating unified team skills where all team members invoke ONE skill with role-based routing. Generates a complete skill package with SKILL.md as role router and `roles/` folder for per-role execution detail. + +**v2 Style**: 生成的技能包遵循 v3 撰写规范 — text + decision tables + flow symbols, 无伪代码, `` 占位符, 显式节拍控制。 + +## Architecture Overview + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Team Skill Designer (this meta-skill) │ +│ → Collect requirements → Analyze patterns → Generate skill pkg │ +└───────────────┬─────────────────────────────────────────────────┘ + │ + ┌───────────┼───────────┬───────────┬───────────┐ + ↓ ↓ ↓ ↓ ↓ +┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ +│ Phase 1 │ │ Phase 2 │ │ Phase 3 │ │ Phase 4 │ │ Phase 5 │ +│ Require │ │ Pattern │ │ Skill │ │ Integ │ │ Valid │ +│ Collect │ │ Analyze │ │ Gen │ │ Verify │ │ │ +└─────────┘ └─────────┘ └─────────┘ └─────────┘ └─────────┘ + ↓ ↓ ↓ ↓ ↓ + team- pattern- preview/ integ- validated + config.json analysis SKILL.md + report skill pkg + .json roles/*.md .json → delivery +``` + +## Key Innovation: Unified Skill + Role Router + +**Before** (command approach): 5 separate command files → 5 separate skill paths + +**After** (unified skill approach): + +``` +.claude/skills/team-/ +├── SKILL.md → Skill(skill="team-", args="--role=xxx") +├── roles/ +│ ├── coordinator/ +│ │ ├── role.md +│ │ └── commands/ +│ ├── / +│ │ ├── role.md +│ │ └── commands/ +│ └── / +│ ├── role.md +│ └── commands/ +└── specs/ + └── team-config.json +``` + +→ 1 skill entry point, `--role` arg routes to per-role execution + +## Core Design Patterns + +### Pattern 1: Role Router (Unified Entry Point) + +SKILL.md parses `$ARGUMENTS` to extract `--role`: + +``` +Input: Skill(skill="team-", args="--role=planner") + → Parse --role=planner + → Read roles/planner/role.md + → Execute planner-specific phases +``` + +No `--role` → Orchestration Mode (auto route to coordinator). + +### Pattern 2: SKILL.md = Orchestration-Level Only + +SKILL.md 仅包含编排级内容: + +| 包含 | 不包含 | +|------|--------| +| Role Router (parse → dispatch) | Message bus 代码 | +| Architecture 图 | Task lifecycle 代码 | +| Role Registry 表 (含 markdown links) | 工具声明/使用示例 | +| Pipeline 定义 + Cadence Control | 角色特定检测逻辑 | +| Coordinator Spawn Template | 实现级代码块 | +| Shared Infrastructure (Phase 1/5 模板) | | +| Compact Protection | | + +### Pattern 3: Role Files = Self-Contained Execution + +每个 `roles//role.md` 包含角色执行所需的一切: +- Identity, Boundaries (MUST/MUST NOT) +- Toolbox 表 (command links) +- Phase 2-4 核心逻辑 (text + decision tables, 无伪代码) +- Error Handling 表 + +**关键原则**: subagent 加载 role.md 后无需回读 SKILL.md 即可执行。 + +### Pattern 4: v3 Style Output + +生成的技能包遵循以下风格规则: + +| 规则 | 说明 | +|------|------| +| 无伪代码 | 流程用 text + decision tables + flow symbols (→ ├─ └─) | +| 代码块仅限工具调用 | 只有 Task(), TaskCreate(), Bash(), Read() 等实际执行的调用 | +| `` 占位符 | 不使用 `${variable}` 或 `{{handlebars}}` | +| Decision tables | 所有分支逻辑用 `\| Condition \| Action \|` 表格 | +| Phase 1/5 共享 | SKILL.md 定义 Shared Infrastructure, role.md 只写 Phase 2-4 | +| Cadence Control | SKILL.md 包含节拍图和检查点定义 | +| Compact Protection | Phase Reference 表含 Compact 列, 关键 phase 标记重读 | + +### Pattern 5: Batch Role Generation + +Phase 1 一次性收集所有角色 (非逐个): +- Team name + all role definitions in one pass +- Coordinator always generated +- Worker roles collected as batch + +### Pattern 6: Coordinator Commands Alignment + +**dispatch.md 约束**: owner 值匹配 Role Registry | 任务 ID 匹配 Pipeline 图 | 无幽灵角色名 + +**monitor.md 约束**: spawn prompt 含完整 `Skill(skill="...", args="--role=...")` | Task() 含 description + team_name + name | Message Routing 角色名匹配 Registry + +**验证时机**: Phase 4 自动检查。 + +--- + +## Mandatory Prerequisites + +> **Do NOT skip**: Read these before any execution. + +### Specification Documents + +| Document | Purpose | When | +|----------|---------|------| +| [specs/team-design-patterns.md](specs/team-design-patterns.md) | Infrastructure patterns (9) + collaboration index | Phase 0 必读 | +| [specs/collaboration-patterns.md](specs/collaboration-patterns.md) | 11 collaboration patterns with convergence control | Phase 0 必读 | +| [specs/quality-standards.md](specs/quality-standards.md) | Quality criteria (4 dimensions + command standards) | Phase 3 前必读 | + +### Template Files + +| Document | Purpose | +|----------|---------| +| [templates/skill-router-template.md](templates/skill-router-template.md) | 生成 SKILL.md 模板 (v3 style) | +| [templates/role-template.md](templates/role-template.md) | 生成 role.md 模板 (v3 style) | +| [templates/role-command-template.md](templates/role-command-template.md) | 生成 command 文件模板 (v3 style) | + +--- + +## Cadence Control + +**节拍模型**: 串行 5-Phase, 每个 Phase 产出一个 artifact 作为下一 Phase 的输入。 + +``` +Phase Cadence (设计生成节拍) +═══════════════════════════════════════════════════════════════════ +Phase 0 1 2 3 4 5 + │ │ │ │ │ │ + 读规格 → 收集需求 → 模式分析 → 技能生成 → 集成验证 → 质量交付 + │ │ │ │ │ │ + [memory] config analysis preview/ report delivery + .json .json SKILL.md .json → skills/ + roles/ + commands/ +═══════════════════════════════════════════════════════════════════ + +Phase 产物链: + Phase 0 → [in-memory] specs + templates 内化 + Phase 1 → team-config.json (角色定义 + pipeline) + Phase 2 → pattern-analysis.json (模式映射 + 协作模式) + Phase 3 → preview/ 完整技能包 + Phase 4 → integration-report.json (一致性报告) + Phase 5 → validation-report.json + delivery → .claude/skills/team-/ + +检查点: + Phase 3 完成 → ⏸ 向用户展示 preview 结构,确认后进入 Phase 4 + Phase 5 评分 → ⏸ 评分 < 60% 则回退 Phase 3 重新生成 +``` + +**Phase 间衔接**: + +| 当前 Phase | 完成条件 | 产物 | 下一步 | +|------------|----------|------|--------| +| Phase 0 | 3 个 spec + 3 个 template 已读取 | in-memory | → Phase 1 | +| Phase 1 | team-config.json 写入成功 | team-config.json | → Phase 2 | +| Phase 2 | pattern-analysis.json 写入成功 | pattern-analysis.json | → Phase 3 | +| Phase 3 | preview/ 目录下所有文件生成 | preview/* | → ⏸ 用户确认 → Phase 4 | +| Phase 4 | integration-report.json 无 FAIL 项 | integration-report.json | → Phase 5 | +| Phase 5 | score ≥ 80% | delivery to skills/ | → 完成 | + +**回退机制**: + +| 条件 | 回退到 | 动作 | +|------|--------|------| +| Phase 4 发现 FAIL | Phase 3 | 修复后重新生成 | +| Phase 5 score < 60% | Phase 3 | 重大返工 | +| Phase 5 score 60-79% | Phase 4 | 修复建议后重验 | + +--- + +## Execution Flow + +### Phase Reference Documents + +| Phase | Document | Purpose | Compact | +|-------|----------|---------|---------| +| 0 | (inline) | Read specs + templates | N/A | +| 1 | [phases/01-requirements-collection.md](phases/01-requirements-collection.md) | Batch collect team + all role definitions | 完成后可压缩 | +| 2 | [phases/02-pattern-analysis.md](phases/02-pattern-analysis.md) | Per-role pattern matching and phase mapping | 完成后可压缩 | +| 3 | [phases/03-skill-generation.md](phases/03-skill-generation.md) | Generate unified skill package | **⚠️ 压缩后必须重读** | +| 4 | [phases/04-integration-verification.md](phases/04-integration-verification.md) | Verify internal consistency | 压缩后必须重读 | +| 5 | [phases/05-validation.md](phases/05-validation.md) | Quality gate and delivery | 压缩后必须重读 | + +> **⚠️ COMPACT PROTECTION**: Phase 文件是执行文档。当 context compression 发生后,Phase 指令仅剩摘要时,**必须立即 `Read` 对应 phase 文件重新加载后再继续执行**。不得基于摘要执行任何 Step。 + +### Phase 0: Specification Study (Inline) + +**必须在生成前读取以下文件**: + +1. Read `specs/team-design-patterns.md` → 9 基础设施模式 +2. Read `specs/collaboration-patterns.md` → 11 协作模式 +3. Read `specs/quality-standards.md` → 质量标准 +4. Read `templates/skill-router-template.md` → SKILL.md 生成模板 +5. Read `templates/role-template.md` → role.md 生成模板 +6. Read `templates/role-command-template.md` → command 文件模板 + +### Phase 1-5: Delegated + +各 Phase 读取对应 phase 文件执行。See Phase Reference Documents table above. + +--- + +## Directory Setup + +工作目录: `.workflow/.scratchpad/team-skill-/` + +```bash +Bash("mkdir -p .workflow/.scratchpad/team-skill-$(date +%Y%m%d%H%M%S)") +``` + +## Output Structure + +``` +.workflow/.scratchpad/team-skill-/ +├── team-config.json # Phase 1 output +├── pattern-analysis.json # Phase 2 output +├── integration-report.json # Phase 4 output +├── validation-report.json # Phase 5 output +└── preview/ # Phase 3 output (preview before delivery) + ├── SKILL.md + ├── roles/ + │ ├── coordinator/ + │ │ ├── role.md + │ │ └── commands/ + │ └── / + │ ├── role.md + │ └── commands/ + └── specs/ + └── team-config.json + +Final delivery → .claude/skills/team-/ +``` + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Specs not found | Fall back to inline pattern knowledge | +| Role name conflicts | AskUserQuestion for rename | +| Task prefix conflicts | Suggest alternative prefix | +| Template variable unresolved | FAIL with specific variable name | +| Quality score < 60% | Re-run Phase 3 with additional context | +| Phase file compressed | Re-read phase file before continuing | diff --git a/.claude/skills_lib/team-skill-designer-v2/phases/01-requirements-collection.md b/.claude/skills_lib/team-skill-designer-v2/phases/01-requirements-collection.md new file mode 100644 index 00000000..78463cb0 --- /dev/null +++ b/.claude/skills_lib/team-skill-designer-v2/phases/01-requirements-collection.md @@ -0,0 +1,200 @@ +# Phase 1: Requirements Collection (Task-Driven Inference) + +Analyze task requirements, infer appropriate roles, and generate team configuration. + +## Objective + +- Determine team name and display name +- Analyze task description to infer needed roles (coordinator always included) +- For each role: name, responsibility type, task prefix, capabilities +- Build pipeline from inferred roles +- Generate `team-config.json` + +## Input + +| Source | Description | +|--------|-------------| +| User request | `$ARGUMENTS` or interactive input | +| Specification | `specs/team-design-patterns.md` (read in Phase 0) | + +## Execution Steps + +### Step 1: Team Name + Task Description + +Prompt the user for team name and core task description. + +``` +AskUserQuestion({ + questions: [ + { question: "Team name (lowercase, used as .claude/skills/team-{name}/)", + header: "Team Name", + options: ["custom", "dev", "spec", "security"] }, + { question: "Core task of this team? (system will infer roles automatically)", + header: "Task Desc", + options: ["custom", "fullstack dev", "code review + refactor", "doc writing"] } + ] +}) +``` + +### Step 2: Role Inference (Task-Driven) + +Coordinator is **always** included. Scan the task description for intent signals to infer worker roles. + +#### Role Signal Detection Table + +| Role | Keywords (CN/EN) | Responsibility Type | Task Prefix | +|------|-------------------|---------------------|-------------| +| planner | plan, design, architect, explore, analyze requirements | Orchestration | PLAN | +| executor | implement, develop, build, code, create, refactor, migrate | Code generation | IMPL | +| tester | test, verify, validate, QA, regression, fix, bug | Validation | TEST | +| reviewer | review, audit, inspect, code quality | Read-only analysis | REVIEW | +| analyst | research, analyze, investigate, diagnose | Orchestration | RESEARCH | +| writer | document, write doc, generate report | Code generation | DRAFT | +| debugger | debug, troubleshoot, root cause | Orchestration | DEBUG | +| security | security, vulnerability, OWASP, compliance | Read-only analysis | SEC | + +**Inference**: For each role, check if task description matches any keyword. Add matched roles to the inferred list. + +#### Implicit Role Completion Table + +| Condition | Add Role | Reason | +|-----------|----------|--------| +| Has executor, missing planner | Add planner (before executor) | Code needs planning first | +| Has executor, missing tester | Add tester (after executor) | Code needs validation | +| Has debugger, missing tester | Add tester | Bug fixes need verification | +| Has writer, missing reviewer | Add reviewer | Documents need review | + +**Minimum guarantee**: If fewer than 2 worker roles inferred, fall back to standard set: planner + executor + tester + reviewer. + +**Pipeline type tag** (for Step 5): + +| Condition | Pipeline Type | +|-----------|---------------| +| Has writer role | Document | +| Has debugger role | Debug | +| Default | Standard | + +### Step 3: Role Confirmation (Interactive) + +Present the inferred roles to the user for confirmation. + +``` +AskUserQuestion({ + questions: [ + { question: "Inferred roles: . Adjust?", + header: "Confirm", + options: ["Confirm (Recommended)", "Add role", "Remove role", "Re-describe"] } + ] +}) +``` + +| User Choice | Action | +|-------------|--------| +| Confirm | Proceed with inferred roles | +| Add role | AskUserQuestion for new role name + responsibility type | +| Remove role | AskUserQuestion for which role to remove | +| Re-describe | Return to Step 1, re-enter task description | + +### Step 4: Capability Assignment (Per Role) + +For each worker role, assign capabilities based on responsibility type. + +#### Tool Assignment Table + +| Responsibility Type | Extra Tools (beyond base set) | Adaptive Routing | +|---------------------|-------------------------------|------------------| +| Read-only analysis | Task(*) | No | +| Code generation | Write(*), Edit(*), Task(*) | Yes | +| Orchestration | Write(*), Task(*) | Yes | +| Validation | Write(*), Edit(*), Task(*) | No | + +> **Base tools** (all roles): SendMessage, TaskUpdate, TaskList, TaskGet, TodoWrite, Read, Bash, Glob, Grep + +#### Message Type Assignment Table + +| Responsibility Type | Message Types | +|---------------------|---------------| +| Read-only analysis | `_result` (analysis complete), `error` | +| Code generation | `_complete`, `_progress`, `error` | +| Orchestration | `_ready`, `_progress`, `error` | +| Validation | `_result`, `fix_required`, `error` | + +**Coordinator** gets special tools: TeamCreate, TeamDelete, AskUserQuestion, TaskCreate + all base tools. +**Coordinator** message types: `plan_approved`, `plan_revision`, `task_unblocked`, `shutdown`, `error`. + +#### Toolbox Assignment Table + +| Responsibility Type | Commands | Subagents | CLI Tools | +|---------------------|----------|-----------|-----------| +| Read-only analysis | review, analyze | (none) | gemini (analysis), codex (review) | +| Code generation | implement, validate | code-developer | (none) | +| Orchestration | explore, plan | cli-explore-agent, cli-lite-planning-agent | gemini (analysis) | +| Validation | validate | code-developer | (none) | + +**Coordinator** always gets: commands=[dispatch, monitor], no subagents, no CLI tools. + +### Step 5: Pipeline Definition + +Sort roles into execution stages by weight, then build dependency chain. + +#### Stage Weight Table + +| Role | Weight | Stage Position | +|------|--------|----------------| +| analyst, debugger, security | 1 | Analysis/Exploration | +| planner | 2 | Planning | +| executor, writer | 3 | Implementation | +| tester, reviewer | 4 | Validation/Review | + +**Pipeline construction flow**: + +1. Group worker roles by weight +2. Sort groups by weight ascending (1 → 2 → 3 → 4) +3. Within same weight → parallel (same stage) +4. Each stage `blockedBy` all roles in previous stage +5. Generate diagram: `Requirements → [Stage1] → [Stage2] → ... → Report` + +### Step 6: Generate Configuration + +Assemble all collected data into `team-config.json`. + +#### Config Schema + +| Field | Source | Example | +|-------|--------|---------| +| `team_name` | Step 1 | `"lifecycle"` | +| `team_display_name` | Capitalized team_name | `"Lifecycle"` | +| `skill_name` | `team-` | `"team-lifecycle"` | +| `skill_path` | `.claude/skills/team-/` | | +| `pipeline_type` | Step 2 tag | `"Standard"` | +| `pipeline` | Step 5 output | `{ stages: [...], diagram: "..." }` | +| `roles` | All roles with full metadata | Array | +| `worker_roles` | Roles excluding coordinator | Array | +| `all_roles_tools_union` | Union of all roles' allowed_tools | Comma-separated string | +| `role_list` | All role names | Comma-separated string | + +``` +Write("/team-config.json", ) +``` + +## Output + +| Item | Value | +|------|-------| +| File | `team-config.json` | +| Format | JSON | +| Location | `/team-config.json` | + +## Quality Checklist + +- [ ] Team name is lowercase, valid as folder/skill name +- [ ] Coordinator always included +- [ ] At least 2 worker roles defined +- [ ] Task prefixes are UPPERCASE and unique across roles +- [ ] Pipeline stages reference valid roles +- [ ] All roles have message types defined +- [ ] Allowed tools include minimum set per responsibility type + +## Next Phase + +-> [Phase 2: Pattern Analysis](02-pattern-analysis.md) diff --git a/.claude/skills_lib/team-skill-designer-v2/phases/02-pattern-analysis.md b/.claude/skills_lib/team-skill-designer-v2/phases/02-pattern-analysis.md new file mode 100644 index 00000000..ac852049 --- /dev/null +++ b/.claude/skills_lib/team-skill-designer-v2/phases/02-pattern-analysis.md @@ -0,0 +1,180 @@ +# Phase 2: Pattern Analysis + +Analyze applicable patterns for each role in the team. + +## Objective + +- Per-role: find most similar existing command reference +- Per-role: select infrastructure + collaboration patterns +- Per-role: map 5-phase structure to role responsibilities +- Generate `pattern-analysis.json` + +## Input + +| Source | Description | +|--------|-------------| +| `team-config.json` | Phase 1 output | +| `specs/team-design-patterns.md` | Infrastructure patterns (read in Phase 0) | +| `specs/collaboration-patterns.md` | Collaboration patterns (read in Phase 0) | + +## Execution Steps + +### Step 1: Load Configuration + +``` +Read("/team-config.json") +``` + +### Step 2: Per-Role Similarity Mapping + +For each worker role, find the most similar existing command based on responsibility type. + +#### Similarity Mapping Table + +| Responsibility Type | Primary Reference | Secondary Reference | Reason | +|---------------------|-------------------|---------------------|--------| +| Read-only analysis | review | plan | Both analyze code and report findings with severity classification | +| Code generation | execute | test | Both write/modify code and self-validate | +| Orchestration | plan | coordinate | Both coordinate sub-tasks and produce structured output | +| Validation | test | review | Both validate quality with structured criteria | + +For each worker role: +1. Look up responsibility type in table above +2. Record `similar_to.primary` and `similar_to.secondary` +3. Set `reference_command` = `.claude/commands/team/.md` + +### Step 3: Per-Role Phase Mapping + +Map the generic 5-phase structure to role-specific phase names. + +#### Phase Structure Mapping Table + +| Responsibility Type | Phase 2 | Phase 3 | Phase 4 | +|---------------------|---------|---------|---------| +| Read-only analysis | Context Loading | Analysis Execution | Finding Summary | +| Code generation | Task & Plan Loading | Code Implementation | Self-Validation | +| Orchestration | Context & Complexity Assessment | Orchestrated Execution | Result Aggregation | +| Validation | Environment Detection | Execution & Fix Cycle | Result Analysis | + +> Phase 1 is always "Task Discovery" and Phase 5 is always "Report to Coordinator" for all roles. + +### Step 4: Per-Role Infrastructure Patterns + +#### Core Patterns (mandatory for all roles) + +| Pattern | Name | +|---------|------| +| pattern-1 | Message Bus | +| pattern-2 | YAML Front Matter (adapted: no YAML in skill role files) | +| pattern-3 | Task Lifecycle | +| pattern-4 | Five Phase | +| pattern-6 | Coordinator Spawn | +| pattern-7 | Error Handling | + +#### Conditional Pattern Selection Table + +| Condition | Add Pattern | +|-----------|-------------| +| Role has `adaptive_routing = true` | pattern-5 (Complexity Adaptive) | +| Responsibility type is Code generation or Orchestration | pattern-8 (Session Files) | + +#### Pattern 9 Selection + +| Condition | Uses Pattern 9 | +|-----------|----------------| +| Role has subagents defined (length > 0) | Yes | +| Role has CLI tools defined (length > 0) | Yes | +| Neither | No | + +### Step 5: Command-to-Phase Mapping + +For each worker role, map commands to phases and determine extraction reasons. + +**Per-command extraction reasons**: + +| Condition | Extraction Reason | +|-----------|-------------------| +| Role has subagents | `subagent-delegation` | +| Role has CLI tools | `cli-fan-out` | +| Role has adaptive routing | `complexity-adaptive` | + +Record `phase_commands` mapping (from config): which command runs in which phase. + +### Step 6: Collaboration Pattern Selection + +Select team-level collaboration patterns based on team composition. + +#### Collaboration Pattern Selection Decision Table + +| Condition | Pattern | Name | +|-----------|---------|------| +| Always | CP-1 | Linear Pipeline (base) | +| Any role has Validation or Read-only analysis type | CP-2 | Review-Fix Cycle | +| Any role has Orchestration type | CP-3 | Fan-out/Fan-in | +| Worker roles >= 4 | CP-6 | Incremental Delivery | +| Always | CP-5 | Escalation Chain | +| Always | CP-10 | Post-Mortem | + +#### Convergence Defaults Table + +| Pattern | Max Iterations | Success Gate | +|---------|----------------|--------------| +| CP-1 | 1 | all_stages_completed | +| CP-2 | 5 | verdict_approve_or_conditional | +| CP-3 | 1 | quorum_100_percent | +| CP-5 | null | issue_resolved_at_any_level | +| CP-6 | 3 | all_increments_validated | +| CP-10 | 1 | report_generated | + +### Step 7: Read Reference Commands + +For each unique `similar_to.primary` across all roles: + +``` +Read(".claude/commands/team/.md") +``` + +Store content for Phase 3 reference. Skip silently if file not found. + +### Step 8: Generate Analysis Document + +Assemble all analysis results into `pattern-analysis.json`. + +#### Output Schema + +| Field | Source | +|-------|--------| +| `team_name` | config | +| `role_count` / `worker_count` | config | +| `role_analysis[]` | Steps 2-5 (per-role: similarity, phases, patterns, commands) | +| `collaboration_patterns[]` | Step 6 | +| `convergence_config[]` | Step 6 | +| `referenced_commands[]` | Step 7 | +| `pipeline` | config | +| `skill_patterns` | Fixed: role_router, shared_infrastructure, progressive_loading | +| `command_architecture` | Per-role command mapping + pattern-9 flag | + +``` +Write("/pattern-analysis.json", ) +``` + +## Output + +| Item | Value | +|------|-------| +| File | `pattern-analysis.json` | +| Format | JSON | +| Location | `/pattern-analysis.json` | + +## Quality Checklist + +- [ ] Every worker role has similarity mapping +- [ ] Every worker role has 5-phase structure +- [ ] Infrastructure patterns include all mandatory patterns +- [ ] Collaboration patterns selected at team level +- [ ] Referenced commands are readable +- [ ] Skill-specific patterns documented + +## Next Phase + +-> [Phase 3: Skill Package Generation](03-skill-generation.md) diff --git a/.claude/skills_lib/team-skill-designer-v2/phases/03-skill-generation.md b/.claude/skills_lib/team-skill-designer-v2/phases/03-skill-generation.md new file mode 100644 index 00000000..e38322be --- /dev/null +++ b/.claude/skills_lib/team-skill-designer-v2/phases/03-skill-generation.md @@ -0,0 +1,239 @@ +# Phase 3: Skill Package Generation + +> **COMPACT PROTECTION**: This is the core generation phase. If context compression has occurred and this file is only a summary, **MUST `Read` this file again before executing any Step**. Do not generate from memory. + +Generate the unified team skill package: SKILL.md (role router) + per-role `role.md` + per-role `commands/*.md`. + +## Objective + +- Generate `SKILL.md` with role router and shared infrastructure +- Generate `roles/coordinator/role.md` + `commands/` +- Generate `roles//role.md` + `commands/` for each worker role +- Generate `specs/team-config.json` +- All files written to `preview/` directory first + +## Input + +| Source | Description | +|--------|-------------| +| `team-config.json` | Phase 1 output (roles, pipeline, capabilities) | +| `pattern-analysis.json` | Phase 2 output (patterns, phase mapping, similarity) | +| `templates/skill-router-template.md` | SKILL.md generation template | +| `templates/role-template.md` | role.md generation template | +| `templates/role-command-template.md` | command file generation template | + +## Execution Steps + +### Step 1: Load Inputs + Create Preview Directory + +1. Read `/team-config.json` and `/pattern-analysis.json` +2. Read all 3 template files from `templates/` +3. Create directory structure: + +``` +Bash("mkdir -p /roles//commands /specs") +``` + +Repeat for every role in config. + +### Step 2: Generate SKILL.md (Role Router) + +Use `templates/skill-router-template.md` as the base. Fill the template using config values. + +#### Template Variable Mapping + +| Template Variable | Config Source | +|-------------------|--------------| +| `` | `config.skill_name` | +| `` | `config.team_name` | +| `` | `config.team_display_name` | +| `` | `config.all_roles_tools_union` | +| `` | Generated from `config.roles[]` (see Roles Table below) | +| `` | Per-role: `"": { file, prefix }` | +| `` | Per worker role: name + message types | +| `` | Per worker role: Task() spawn block | +| `` | `config.pipeline.diagram` | +| `` | Generated from role names | + +#### Roles Table Generation + +For each role in `config.roles[]`, produce one row: + +``` +| | | | [roles//role.md](roles//role.md) | +``` + +#### Spawn Block Generation + +For each worker role, generate a `Task()` spawn block containing: +- `subagent_type: "general-purpose"` +- `team_name: ` +- `name: ""` +- Prompt with: primary directive (MUST call Skill), role constraints, message bus requirement, workflow steps + +**Spawn prompt must include**: +1. Primary directive: `Skill(skill="", args="--role=")` +2. Task prefix constraint: only handle `-*` tasks +3. Output tag: all messages tagged `[]` +4. Communication rule: only talk to coordinator +5. Message bus: call `team_msg` before every `SendMessage` + +``` +Write("/SKILL.md", ) +``` + +### Step 3: Generate Coordinator Role File + +Build coordinator `role.md` with these sections: + +| Section | Content Source | +|---------|---------------| +| Role Identity | Fixed: name=coordinator, prefix=N/A, type=Orchestration | +| Message Types | Fixed 5 types: plan_approved, plan_revision, task_unblocked, shutdown, error | +| Execution Phase 1 | Requirement clarification via AskUserQuestion | +| Execution Phase 2 | TeamCreate + spawn blocks (same as SKILL.md Step 2) | +| Execution Phase 3 | Task chain creation from `config.pipeline.stages` | +| Execution Phase 4 | Message-driven coordination loop | +| Execution Phase 5 | Report + next steps (new requirement or shutdown) | +| Error Handling | Fixed table: unresponsive, rejected plan, stuck tests, critical review | + +#### Task Chain Generation + +For each stage in `config.pipeline.stages[]`: +1. Create task: `TaskCreate({ subject: "-001: work" })` +2. Set owner: `TaskUpdate({ owner: "" })` +3. Set dependencies: `addBlockedBy` = all prefixes from the previous stage + +#### Coordination Handler Table + +For each worker role, generate one row: + +``` +| : | team_msg log -> TaskUpdate completed -> check next | +``` + +``` +Write("/roles/coordinator/role.md", ) +``` + +Generate coordinator command files (dispatch.md, monitor.md) using `templates/role-command-template.md`. + +### Step 4: Generate Worker Role Files + +**For each worker role**, generate `role.md` using `templates/role-template.md`. + +#### Per-Role Template Variable Mapping + +| Template Variable | Source | +|-------------------|--------| +| `` | `role.name` | +| `` | `role.task_prefix` | +| `` | `role.responsibility_type` | +| `` | `role.description` | +| `` | From `role.message_types[]` | +| `` | First non-error, non-progress message type | +| `` | From `pattern-analysis.phase_structure.phase2` | +| `` | From `pattern-analysis.phase_structure.phase3` | +| `` | From `pattern-analysis.phase_structure.phase4` | +| `` | From `role.commands[]` with phase mapping | +| `` | From `role.subagents[]` | +| `` | From `role.cli_tools[]` | + +#### Role File Sections (v3 style) + +Each generated `role.md` must contain: + +1. **Role Identity**: name, prefix, output tag, responsibility, communication rule +2. **Role Boundaries**: MUST / MUST NOT lists +3. **Message Types**: table with type, direction, trigger +4. **Message Bus**: `team_msg` call pattern + CLI fallback +5. **Toolbox**: commands table, subagents table, CLI tools table +6. **Execution (5-Phase)**: + - Phase 1: Task Discovery (TaskList -> filter by prefix -> TaskGet -> TaskUpdate in_progress) + - Phase 2-4: Content varies by responsibility type (see Phase Content Table below) + - Phase 5: Report to Coordinator (team_msg + SendMessage + TaskUpdate completed + check next) +7. **Error Handling**: table with scenario/resolution + +#### Phase Content by Responsibility Type + +| Type | Phase 2 | Phase 3 | Phase 4 | +|------|---------|---------|---------| +| Read-only analysis | Load plan + get changed files + read contents | Domain-specific analysis per file | Classify findings by severity | +| Code generation | Extract plan path + load plan tasks | Implement tasks (adaptive: direct edit or delegate to code-developer) | Self-validation (syntax check + auto-fix) | +| Orchestration | Assess complexity (Low/Medium/High) | Execute (adaptive: direct search or delegate to sub-agent) | Aggregate results | +| Validation | Detect changed files for scope | Iterative test-fix cycle (max 5 iterations) | Analyze results (iterations, pass rate) | + +``` +Write("/roles//role.md", ) +``` + +### Step 5: Generate Command Files + +#### Command Extraction Decision Table + +| Condition | Extract to command file? | +|-----------|--------------------------| +| Role has subagents (delegation needed) | Yes | +| Role has CLI tools (fan-out needed) | Yes | +| Role has adaptive routing (complexity branching) | Yes | +| None of the above | No (all phases execute inline in role.md) | + +For each role that needs command files, generate from `templates/role-command-template.md`. + +#### Pre-built Command Patterns + +| Command | Description | Delegation Mode | Used By Phase | +|---------|-------------|-----------------|---------------| +| explore | Multi-angle codebase exploration | Subagent Fan-out | Phase 2 | +| analyze | Multi-perspective code analysis | CLI Fan-out | Phase 3 | +| implement | Code implementation via delegation | Sequential Delegation | Phase 3 | +| validate | Iterative test-fix cycle | Sequential Delegation | Phase 3 | +| review | 4-dimensional code review | CLI Fan-out | Phase 3 | +| dispatch | Task chain creation (coordinator) | Direct | Phase 3 | +| monitor | Message-driven coordination (coordinator) | Message-Driven | Phase 4 | + +If command name not in pre-built list, generate a skeleton with TODO placeholders. + +``` +Write("/roles//commands/.md", ) +``` + +### Step 6: Copy Team Config + +``` +Write("/specs/team-config.json", ) +``` + +### Step 7: Preview Checkpoint + +**PAUSE**: Present the generated preview structure to the user for confirmation before proceeding to Phase 4. + +Display: +- File tree of `/` +- Role count and names +- Pipeline diagram +- Total file count + +Wait for user confirmation before advancing. + +## Output + +| Item | Value | +|------|-------| +| Directory | `/preview/` | +| Files | SKILL.md + roles/*/role.md + roles/*/commands/*.md + specs/team-config.json | + +## Quality Checklist + +- [ ] SKILL.md has frontmatter, architecture diagram, role router, role dispatch, shared infrastructure +- [ ] Every role has `role.md` with all 7 required sections +- [ ] Coordinator has dispatch.md and monitor.md command files +- [ ] Worker roles with delegation have command files +- [ ] All generated files use v3 style: text + decision tables + flow symbols, no pseudocode +- [ ] Spawn blocks include complete prompt with primary directive + constraints +- [ ] All `` variables resolved (no `${variable}` syntax in output) +- [ ] Pipeline diagram matches actual role stages + +## Next Phase + +-> [Phase 4: Integration Verification](04-integration-verification.md) diff --git a/.claude/skills_lib/team-skill-designer-v2/phases/04-integration-verification.md b/.claude/skills_lib/team-skill-designer-v2/phases/04-integration-verification.md new file mode 100644 index 00000000..8b9a155e --- /dev/null +++ b/.claude/skills_lib/team-skill-designer-v2/phases/04-integration-verification.md @@ -0,0 +1,183 @@ +# Phase 4: Integration Verification + +Verify the generated skill package is internally consistent. + +## Objective + +- Verify SKILL.md role router references match actual role files +- Verify task prefixes are unique across all roles +- Verify message types are consistent between config and generated files +- Verify coordinator spawn template uses correct skill invocation +- Verify role file structural compliance +- Verify coordinator commands alignment +- Generate `integration-report.json` + +## Input + +| Source | Description | +|--------|-------------| +| `/preview/` | Phase 3 generated skill package | +| `team-config.json` | Phase 1 configuration | + +## Execution Steps + +### Step 1: Load Generated Files + +1. Read `/team-config.json` +2. Read `/SKILL.md` +3. Read each `/roles//role.md` +4. Read each `/roles//commands/*.md` + +### Step 2: Run 6 Integration Checks + +#### Check 1: Router Consistency + +For each role in config, verify 3 conditions in SKILL.md: + +| Item | Method | Pass Criteria | +|------|--------|---------------| +| Router entry | SKILL.md contains `""` | Found | +| Role file exists | `roles//role.md` is readable | File exists | +| Role link valid | SKILL.md contains `roles//role.md` | Found | + +**Status**: PASS if all 3 conditions met for every role, FAIL otherwise. + +#### Check 2: Prefix Uniqueness + +| Item | Method | Pass Criteria | +|------|--------|---------------| +| All task prefixes | Collect `task_prefix` from each worker role | No duplicates | + +**Status**: PASS if all prefixes unique, FAIL if any duplicate found. + +#### Check 3: Message Type Consistency + +For each worker role: + +| Item | Method | Pass Criteria | +|------|--------|---------------| +| Config message types | List types from `role.message_types[]` | Baseline | +| Types in role file | Search role.md for each type string | All present | + +**Status**: PASS if all configured types found in role file, WARN if any missing. + +#### Check 4: Spawn Template Verification + +For each worker role, verify in SKILL.md: + +| Item | Method | Pass Criteria | +|------|--------|---------------| +| Spawn present | SKILL.md contains `name: ""` | Found | +| Skill call correct | Contains `Skill(skill="", args="--role=")` | Found | +| Prefix in prompt | Contains `-*` | Found | + +**Status**: PASS if all 3 conditions met, FAIL otherwise. + +#### Check 5: Role File Pattern Compliance + +For each role file, check structural elements: + +| Item | Search Pattern | Required | +|------|---------------|----------| +| Role Identity section | `## Role Identity` | Yes | +| 5-Phase structure | `Phase 1` and `Phase 5` both present | Yes | +| Task lifecycle | `TaskList`, `TaskGet`, `TaskUpdate` all present | Yes | +| Message bus | `team_msg` present | Yes | +| SendMessage | `SendMessage` present | Yes | +| Error Handling | `## Error Handling` | Yes | + +**Status**: PASS if all 6 items found, PARTIAL if some missing, MISSING if file not found. + +#### Check 5b: Command File Verification + +For each role's command files: + +| Item | Search Pattern | Required | +|------|---------------|----------| +| Strategy section | `## Strategy` | Yes | +| Execution Steps | `## Execution Steps` | Yes | +| Error Handling | `## Error Handling` | Yes | +| When to Use | `## When to Use` | Yes | +| Self-contained | No `Read("../` cross-command references | Yes | + +**Status**: PASS if all items found, PARTIAL if some missing, MISSING if file not found. + +#### Check 6: Coordinator Commands Alignment + +> **Critical**: dispatch.md and monitor.md are the most common source of integration failures. + +**6a: dispatch.md role names** + +| Item | Method | Pass Criteria | +|------|--------|---------------| +| Owner values | Extract all `owner: ""` from dispatch.md | Every name exists in config roles | +| No ghost roles | Compare dispatch roles vs config roles | No invalid role names | + +**6b: monitor.md spawn completeness** + +| Item | Method | Pass Criteria | +|------|--------|---------------| +| Has `description:` | Search for `description:` | Found | +| Has `team_name:` | Search for `team_name:` | Found | +| Has `name:` param | Search for `name:` | Found | +| Has Skill callback | Search for `Skill(skill=` | Found | +| Has role boundaries | Search for role constraint / MUST keywords | Found | +| Not minimal prompt | No `prompt: \`Execute task` anti-pattern | Confirmed | + +**6c: Pipeline alignment** + +| Item | Method | Pass Criteria | +|------|--------|---------------| +| Pipeline task IDs | From `config.pipeline_tasks` (if defined) | Baseline | +| Dispatch task IDs | Extract `subject: ""` from dispatch.md | Match pipeline | + +**Status**: PASS if no mismatches, WARN if pipeline_tasks not defined, FAIL if mismatches found. + +### Step 3: Generate Report + +Compute overall status: PASS if all checks pass (excluding SKIP), NEEDS_ATTENTION otherwise. + +#### Report Schema + +| Field | Content | +|-------|---------| +| `team_name` | Config team name | +| `skill_name` | Config skill name | +| `checks.router_consistency` | Check 1 results per role | +| `checks.prefix_uniqueness` | Check 2 result | +| `checks.message_types` | Check 3 results per role | +| `checks.spawn_template` | Check 4 results per role | +| `checks.pattern_compliance` | Check 5 results per role | +| `checks.command_files` | Check 5b results per role | +| `checks.coordinator_commands` | Check 6a/6b/6c results | +| `overall` | PASS or NEEDS_ATTENTION | +| `file_count` | skill_md: 1, role_files: N, total: N+2 | + +``` +Write("/integration-report.json", ) +``` + +## Output + +| Item | Value | +|------|-------| +| File | `integration-report.json` | +| Format | JSON | +| Location | `/integration-report.json` | + +## Quality Checklist + +- [ ] Every role in config has a router entry in SKILL.md +- [ ] Every role has a file in `roles/` +- [ ] Task prefixes are unique +- [ ] Spawn template uses correct `Skill(skill="...", args="--role=...")` +- [ ] Spawn template includes `description`, `team_name`, `name` parameters +- [ ] All role files have 5-phase structure +- [ ] All role files have message bus integration +- [ ] dispatch.md `owner` values all exist in config roles (no ghost roles) +- [ ] monitor.md spawn prompt contains full Skill callback (not minimal) +- [ ] Task IDs in dispatch.md match pipeline diagram in SKILL.md + +## Next Phase + +-> [Phase 5: Validation](05-validation.md) diff --git a/.claude/skills_lib/team-skill-designer-v2/phases/05-validation.md b/.claude/skills_lib/team-skill-designer-v2/phases/05-validation.md new file mode 100644 index 00000000..58873673 --- /dev/null +++ b/.claude/skills_lib/team-skill-designer-v2/phases/05-validation.md @@ -0,0 +1,209 @@ +# Phase 5: Validation + +Verify quality and deliver the final skill package. + +## Objective + +- SKILL.md structural completeness check +- Per-role structural completeness check +- Per-role command file quality check +- Quality scoring across 5 dimensions +- Deliver final skill package to `.claude/skills/team-/` + +## Input + +| Source | Description | +|--------|-------------| +| `/preview/` | Phase 3 generated skill package | +| `integration-report.json` | Phase 4 integration check results | +| `specs/quality-standards.md` | Quality criteria (read in Phase 0) | + +## Execution Steps + +### Step 1: Load Files + +1. Read `/team-config.json` +2. Read `/integration-report.json` +3. Read `/SKILL.md` +4. Read each `/roles//role.md` +5. Read each `/roles//commands/*.md` + +### Step 2: SKILL.md Structural Check + +#### SKILL.md Structure Checklist + +| # | Check Item | Search For | +|---|------------|------------| +| 1 | Frontmatter | `---` block at file start | +| 2 | Architecture Overview | `## Architecture Overview` | +| 3 | Role Router | `## Role Router` | +| 4 | Role Dispatch Code | `VALID_ROLES` | +| 5 | Orchestration Mode | `Orchestration Mode` | +| 6 | Available Roles Table | `| Role | Task Prefix` | +| 7 | Shared Infrastructure | `## Shared Infrastructure` | +| 8 | Role Isolation Rules | `Role Isolation` | +| 9 | Pipeline Diagram | `## Pipeline` | +| 10 | Coordinator Spawn Template | `Coordinator Spawn` | +| 11 | Spawn Skill Directive | `MUST` + primary directive | +| 12 | Spawn Description Param | `description:` in spawn block | +| 13 | Error Handling | `## Error Handling` | + +**SKILL.md score** = (passed items / 13) * 100 + +### Step 3: Per-Role Structural Check + +#### Role Structure Checklist + +| # | Check Item | Search For | +|---|------------|------------| +| 1 | Role Identity | `## Role Identity` | +| 2 | Role Boundaries | `## Role Boundaries` | +| 3 | Output Tag | `Output Tag` | +| 4 | Message Types Table | `## Message Types` | +| 5 | Message Bus | `## Message Bus` | +| 6 | CLI Fallback | `CLI` fallback section | +| 7 | Toolbox Section | `## Toolbox` | +| 8 | 5-Phase Execution | `## Execution` | +| 9 | Phase 1 Task Discovery | `Phase 1` + `Task Discovery` | +| 10 | TaskList Usage | `TaskList` | +| 11 | TaskGet Usage | `TaskGet` | +| 12 | TaskUpdate Usage | `TaskUpdate` | +| 13 | team_msg Before SendMessage | `team_msg` | +| 14 | SendMessage to Coordinator | `SendMessage` | +| 15 | Error Handling | `## Error Handling` | + +**Per-role score** = (passed items / 15) * 100 + +| Score | Status | +|-------|--------| +| >= 80% | PASS | +| < 80% | PARTIAL | +| File missing | MISSING (score = 0) | + +### Step 3b: Command File Quality Check + +For each role's command files: + +#### Command Quality Checklist + +| # | Check Item | Search For | +|---|------------|------------| +| 1 | When to Use section | `## When to Use` | +| 2 | Strategy section | `## Strategy` | +| 3 | Delegation mode declared | `Delegation Mode` | +| 4 | Execution Steps section | `## Execution Steps` | +| 5 | Error Handling section | `## Error Handling` | +| 6 | Output Format section | `## Output Format` | +| 7 | Self-contained (no cross-ref) | No `Read("../` patterns | + +**Per-command score** = (passed items / 7) * 100. Role command score = average of all commands. + +### Step 4: Quality Scoring + +#### Quality Scoring Table + +| Dimension | Weight | Source | Calculation | +|-----------|--------|--------|-------------| +| `skill_md` | Equal | Step 2 | SKILL.md checklist score | +| `roles_avg` | Equal | Step 3 | Average of all role scores | +| `integration` | Equal | Phase 4 report | PASS=100, otherwise=50 | +| `consistency` | Equal | Cross-check | Start at 100, -20 per mismatch (see below) | +| `command_quality` | Equal | Step 3b | Average of all command scores | + +**Consistency deductions**: + +| Mismatch | Deduction | +|----------|-----------| +| Skill name not in SKILL.md | -20 | +| Team name not in SKILL.md | -20 | +| Any role name not in SKILL.md | -10 per role | + +**Overall score** = average of all 5 dimension scores. + +#### Delivery Decision Table + +| Score Range | Gate | Action | +|-------------|------|--------| +| >= 80% | PASS | Deliver to `.claude/skills/team-/` | +| 60-79% | REVIEW | Deliver with warnings, suggest fixes | +| < 60% | FAIL | Do not deliver, return to Phase 3 for rework | + +### Step 5: Generate Validation Report + +#### Report Schema + +| Field | Content | +|-------|---------| +| `team_name` | Config team name | +| `skill_name` | Config skill name | +| `timestamp` | ISO timestamp | +| `scores` | All 5 dimension scores | +| `overall_score` | Average score | +| `quality_gate` | PASS / REVIEW / FAIL | +| `skill_md_checks` | Step 2 results | +| `role_results` | Step 3 results per role | +| `integration_status` | Phase 4 overall status | +| `delivery.source` | Preview directory | +| `delivery.destination` | `.claude/skills//` | +| `delivery.ready` | true if gate is not FAIL | + +``` +Write("/validation-report.json", ) +``` + +### Step 6: Deliver Final Package + +**Only execute if `quality_gate` is not FAIL.** + +1. Create destination directory structure: + +``` +Bash("mkdir -p .claude/skills//roles//commands .claude/skills//specs") +``` + +2. Copy files from preview to destination: + +| Source | Destination | +|--------|-------------| +| `/SKILL.md` | `.claude/skills//SKILL.md` | +| `/roles//role.md` | `.claude/skills//roles//role.md` | +| `/roles//commands/*.md` | `.claude/skills//roles//commands/*.md` | +| `/specs/team-config.json` | `.claude/skills//specs/team-config.json` | + +3. Report delivery summary: + - Destination path + - Skill name + - Quality score and gate + - Role list + - Usage examples: `Skill(skill="", args="--role=")` + +4. List delivered files: + +``` +Bash("find .claude/skills/ -type f | sort") +``` + +**If gate is FAIL**: Report failure with score, suggest returning to Phase 3 for rework. + +## Output + +| Item | Value | +|------|-------| +| File | `validation-report.json` | +| Format | JSON | +| Location | `/validation-report.json` | +| Delivery | `.claude/skills/team-/` (if gate passes) | + +## Quality Checklist + +- [ ] SKILL.md passes all 13 routing-level structural checks +- [ ] All role files pass structural checks (>= 80%) +- [ ] All command files pass quality checks (>= 80%) +- [ ] Integration report is PASS +- [ ] Overall score >= 80% +- [ ] Final package delivered to `.claude/skills/team-/` +- [ ] Usage instructions provided to user + +## Completion + +This is the final phase. The unified team skill is ready for use. diff --git a/.claude/skills_lib/team-skill-designer-v2/specs/collaboration-patterns.md b/.claude/skills_lib/team-skill-designer-v2/specs/collaboration-patterns.md new file mode 100644 index 00000000..700165b0 --- /dev/null +++ b/.claude/skills_lib/team-skill-designer-v2/specs/collaboration-patterns.md @@ -0,0 +1,1555 @@ +# Collaboration Patterns Specification + +> 11 种团队协作模式,每种具备:收敛性、完整流程、反馈控制 + +--- + +## Pattern Standard Structure + +每种协作模式遵循统一规范: + +``` +┌─────────────────────────────────────────┐ +│ Entry Condition (何时启用) │ +├─────────────────────────────────────────┤ +│ Roles Required (需要哪些角色) │ +├─────────────────────────────────────────┤ +│ Workflow (完整执行流程) │ +├─────────────────────────────────────────┤ +│ Convergence (收敛条件) │ +│ ├─ Success Gate (成功判定) │ +│ ├─ Max Iterations (最大迭代) │ +│ └─ Timeout (超时处理) │ +├─────────────────────────────────────────┤ +│ Feedback Loop (反馈控制) │ +│ ├─ Signal (反馈信号) │ +│ ├─ Handler (处理逻辑) │ +│ └─ Correction (纠正动作) │ +├─────────────────────────────────────────┤ +│ Fallback (降级策略) │ +└─────────────────────────────────────────┘ +``` + +--- + +## CP-1: Linear Pipeline (线性流水线) + +### Description + +最基础的协作模式。任务沿固定顺序在角色间传递,每个阶段有明确的入口和出口条件。上一阶段的输出是下一阶段的输入。 + +### Entry Condition + +- 任务具有清晰的阶段划分(规划 → 实现 → 验证) +- 各阶段之间有天然的依赖关系 +- 适用于大多数标准特性开发 + +### Roles Required + +`coordinator` → `planner` → `executor` → `tester` + +### Workflow + +``` + ┌──────────┐ ┌──────────┐ ┌───────────┐ ┌──────────┐ +需求 ──→ │ PLAN │──→ │ IMPL │──→ │ TEST │──→ │ REPORT │ + │ planner │ │ executor │ │ + REVIEW │ │ coord. │ + └────┬─────┘ └────┬─────┘ └─────┬─────┘ └──────────┘ + │ │ │ + ▼ ▼ ▼ + plan.json code changes test results + review findings +``` + +```javascript +// Coordinator creates task chain +TaskCreate({ subject: "PLAN-001", owner: "planner" }) +TaskCreate({ subject: "IMPL-001", owner: "executor", addBlockedBy: [planId] }) +TaskCreate({ subject: "TEST-001", owner: "tester", addBlockedBy: [implId] }) +TaskCreate({ subject: "REVIEW-001", owner: "tester", addBlockedBy: [implId] }) +``` + +### Convergence + +| Element | Value | +|---------|-------| +| **Success Gate** | 所有阶段任务状态 = `completed` | +| **Max Iterations** | 每阶段 1 次(若失败触发 Review-Fix Cycle) | +| **Timeout** | 无显式超时,每阶段内部有各自收敛机制 | + +### Feedback Loop + +``` +┌─ Stage Transition Feedback ─────────────────────────┐ +│ │ +│ Plan rejected? → planner revises → resubmit │ +│ Impl has errors? → executor self-validates → fix │ +│ Tests fail? → tester fix cycle → retry │ +│ Review blocks? → create IMPL-fix → executor fixes │ +│ │ +└──────────────────────────────────────────────────────┘ +``` + +**Signal**: 下一阶段的 reject/fail message +**Handler**: Coordinator 路由消息回上一阶段 +**Correction**: 上一阶段 agent 修订并重新提交 + +### Fallback + +- Plan 拒绝 3+ 次 → Coordinator 自行规划 +- Test 达不到 95% 超过 5 次迭代 → 上报用户 +- Review 发现 critical → 创建 IMPL-fix 任务 + +### Implementation Reference + +当前 `coordinate.md` 即采用此模式。 + +--- + +## CP-2: Review-Fix Cycle (审查修复循环) + +### Description + +两个角色之间的迭代改进循环。一个角色产出工作成果,另一个角色审查,发现问题后退回修复,直到达到质量门控。这是软件开发中 code review 的自然映射。 + +### Entry Condition + +- 工作产出需要质量验证(代码实现、计划、文档) +- 存在明确的质量标准(pass rate、severity threshold、acceptance criteria) +- 需要多轮迭代才能达到质量要求 + +### Roles Required + +`producer` (executor/planner) ↔ `reviewer` (tester/reviewer) + +### Workflow + +``` + ┌─────────┐ ┌──────────┐ + │Producer │ │Reviewer │ + │ │──(1)产出───→│ │ + │ │ │ │ + │ │←─(2)反馈────│ │ + │ │ │ │ + │ │──(3)修订───→│ │ + │ │ │ │ + │ ... │ ...循环 │ ... │ + │ │ │ │ + └─────────┘ └──────────┘ + │ │ + ▼ ▼ + final artifact APPROVE verdict +``` + +```javascript +// Coordinator orchestrates review-fix cycle +function reviewFixCycle(producerRole, reviewerRole, maxIterations) { + let iteration = 0 + let verdict = 'PENDING' + + while (iteration < maxIterations && verdict !== 'APPROVE') { + iteration++ + + // Step 1: Producer delivers (or revises) + if (iteration === 1) { + // Wait for initial delivery + // msg type: impl_complete / plan_ready + } else { + // Wait for revision + // msg type: impl_complete (revision) + } + + // Step 2: Reviewer examines + // Creates REVIEW task, waits for review_result + + // Step 3: Check verdict + verdict = reviewResult.data.verdict // APPROVE | CONDITIONAL | BLOCK + + if (verdict === 'BLOCK') { + // Step 4: Create fix task for producer + TaskCreate({ + subject: `IMPL-fix-${iteration}`, + description: `Fix issues: ${reviewResult.data.findings}`, + owner: producerRole + }) + // Send feedback to producer + team_msg({ type: "fix_required", data: { iteration, findings: reviewResult.data.findings } }) + } + } + + return { verdict, iterations: iteration } +} +``` + +### Convergence + +| Element | Value | +|---------|-------| +| **Success Gate** | Reviewer verdict = `APPROVE` 或 `CONDITIONAL`(无 critical findings) | +| **Max Iterations** | 5 轮(可配置)。Producer 修复 → Reviewer 再审 = 1 轮 | +| **Timeout** | 单轮超时 = 阶段超时(由各角色内部控制) | + +### Feedback Loop + +``` +Signal: review_result { verdict: "BLOCK", findings: [...] } + ↓ +Handler: Coordinator 解析 findings,按 severity 分类 + ↓ +Correction: 创建 IMPL-fix 任务,附带 findings 明细 + Producer 收到任务 → 读取 findings → 修复 → 重新提交 + ↓ +Loop: Reviewer 再次审查修订后的产出 +``` + +**反馈信号结构**: +```javascript +{ + verdict: "APPROVE" | "CONDITIONAL" | "BLOCK", + findings: { + critical: [{ file, line, description, suggestion }], + high: [...], + medium: [...], + low: [...] + }, + iteration: 2, + delta: "+3 fixed, -1 new issue" // 对比上一轮的变化 +} +``` + +### Fallback + +| Condition | Action | +|-----------|--------| +| 达到 max iterations 仍 BLOCK | 上报用户,附带全部 findings 历史 | +| Reviewer 发现 Producer 无法修复的设计问题 | 升级到 CP-5 Escalation,或回退到 CP-1 重新规划 | +| 连续 2 轮 findings 不减少(无改善) | 中断循环,上报用户请求介入 | + +--- + +## CP-3: Parallel Fan-out/Fan-in (并行扇出扇入) + +### Description + +Coordinator 将同一任务或同一类任务广播给多个 agent 并行执行,收集所有结果后聚合。适用于需要多角度分析、分片处理、或冗余验证的场景。 + +### Entry Condition + +- 任务可分解为独立的并行子任务 +- 需要多角度/多维度分析(如安全 + 性能 + 架构审查) +- 大型任务需要分片并行处理 + +### Roles Required + +`coordinator` → `worker-1, worker-2, ... worker-N` → `coordinator` (aggregation) + +### Workflow + +``` + ┌─ worker-1 ─┐ + │ 角度 A │───┐ + broadcast ├─ worker-2 ─┤ │ aggregate +coord ──────────────┤ 角度 B │───┼──── coord + ├─ worker-3 ─┤ │ + │ 角度 C │───┘ + └────────────┘ +``` + +```javascript +// Phase 1: Fan-out - broadcast tasks +const workerTasks = angles.map((angle, i) => { + const taskId = TaskCreate({ + subject: `ANALYZE-${i+1}: ${angle} analysis`, + description: `Analyze from ${angle} perspective: ${requirement}`, + owner: `worker-${i+1}`, + activeForm: `Analyzing ${angle}` + }) + return taskId +}) + +// Phase 2: Wait for all workers (Stop-Wait with parallel Task calls) +// 同步阻塞 Task() 调用即等待机制。若 worker 在 Phase 1 已用 Task(run_in_background: false) +// 同步 spawn,则 Phase 1 返回时所有 worker 已完成,无需额外等待。 +// 若使用 run_in_background: true 并行 spawn,则逐个 TaskOutput(block: true) 等待完成。 +function waitForCompletion(taskIds) { + let completedCount = 0 + for (const id of taskIds) { + const task = TaskGet({ taskId: id }) + if (task.status === 'completed') completedCount++ + } + return { completed: completedCount, total: taskIds.length, timedOut: false } +} + +// Phase 3: Fan-in - aggregate results +function aggregateResults(taskIds) { + const results = taskIds.map(id => { + const task = TaskGet({ taskId: id }) + return { angle: task.subject, result: task.metadata?.result } + }) + + // Conflict detection + const conflicts = detectConflicts(results) + + return { + results, + conflicts, + consensus: conflicts.length === 0, + summary: synthesize(results) + } +} +``` + +### Convergence + +| Element | Value | +|---------|-------| +| **Success Gate** | 所有 worker 完成(或达到 quorum 比例,默认 100%) | +| **Max Iterations** | 1 轮扇出(若聚合后有冲突,可触发 CP-4 Consensus) | +| **Timeout** | 可配置,默认 5 分钟。超时后用已完成的结果聚合 | + +### Feedback Loop + +``` +Signal: 每个 worker 的 {角度}_result message + ↓ +Handler: Coordinator 收集所有结果 + ↓ +Aggregate: 合并结果 + 冲突检测 + ├─ 无冲突 → 直接合成最终结果 + └─ 有冲突 → 升级到 CP-4 Consensus Gate +``` + +**聚合策略**: +- **Union(并集)**: 合并所有发现(适用于安全审查 - 不遗漏任何 finding) +- **Intersection(交集)**: 只保留多个 worker 共同发现的问题(适用于降噪) +- **Weighted(加权)**: 按 worker 的专业权重合并(适用于多专家评审) + +### Fallback + +| Condition | Action | +|-----------|--------| +| Worker 超时未完成 | 用已完成的 worker 结果聚合,标注缺失角度 | +| Worker 返回错误 | 跳过该 worker,用 N-1 结果聚合 | +| 聚合结果有冲突 | 触发 CP-4 Consensus Gate 解决分歧 | + +--- + +## CP-4: Consensus Gate (共识门控) + +### Description + +在做出重要决策前,要求多个 agent 投票表达意见,只有达到 quorum 才能通过。模拟软件开发中的 Design Review、Architecture Decision Record (ADR) 的决策流程。 + +### Entry Condition + +- 架构决策(选择技术方案 A vs B vs C) +- 安全决策(是否可接受某个风险) +- 影响面大的重构(需要多方确认) +- CP-3 扇出结果有冲突需要裁决 + +### Roles Required + +`proposer` → `voter-1, voter-2, ... voter-N` → `coordinator` (tally) + +### Workflow + +``` + ┌─ voter-1 ─────────────┐ + │ APPROVE + rationale │ + proposal ├─ voter-2 ─────────────┤ tally +proposer ─────────→ │ REJECT + rationale │ ────→ coordinator + ├─ voter-3 ─────────────┤ │ + │ APPROVE + conditions │ ▼ + └───────────────────────┘ decision +``` + +```javascript +// Phase 1: Proposal +const proposal = { + id: `PROPOSAL-${Date.now()}`, + title: "Adopt Strategy Pattern for payment gateway", + options: [ + { id: "A", description: "Strategy Pattern with factory", pros: [...], cons: [...] }, + { id: "B", description: "Plugin architecture", pros: [...], cons: [...] } + ], + context: "Payment module needs multi-gateway support", + deadline: Date.now() + 300000 // 5 min +} + +// Phase 2: Broadcast proposal to voters +voters.forEach(voter => { + SendMessage({ + recipient: voter, + content: `## Proposal: ${proposal.title}\n${JSON.stringify(proposal)}`, + summary: "Vote requested" + }) +}) + +// Phase 3: Collect votes +function collectVotes(proposalId, voterCount, quorum, deadline) { + const votes = [] + while (votes.length < voterCount && Date.now() < deadline) { + // Listen for vote messages + const msgs = team_msg({ operation: "list", type: "vote" }) + const newVotes = msgs.filter(m => + m.data.proposalId === proposalId && !votes.find(v => v.from === m.from) + ) + votes.push(...newVotes) + + // Check quorum + if (votes.length >= quorum) break + } + return votes +} + +// Phase 4: Tally and decide +function tallyVotes(votes, quorumRatio = 0.67) { + const approvals = votes.filter(v => v.data.vote === 'APPROVE') + const rejections = votes.filter(v => v.data.vote === 'REJECT') + const conditions = votes.flatMap(v => v.data.conditions || []) + + const approvalRatio = approvals.length / votes.length + const passed = approvalRatio >= quorumRatio + + return { + passed, + approvalRatio, + approvals: approvals.length, + rejections: rejections.length, + conditions: [...new Set(conditions)], + rationales: votes.map(v => ({ from: v.from, vote: v.data.vote, rationale: v.data.rationale })) + } +} +``` + +### Convergence + +| Element | Value | +|---------|-------| +| **Success Gate** | 赞成率 ≥ quorum(默认 2/3)且无 BLOCK 级别反对 | +| **Max Iterations** | 2 轮。第 1 轮未达 quorum → 修订提案 → 第 2 轮重投 | +| **Timeout** | 5 分钟。超时后以已收到的票数计算(≥ quorum 则通过) | + +### Feedback Loop + +``` +Signal: vote { vote: "REJECT", rationale: "...", conditions: [...] } + ↓ +Handler: Coordinator 聚合所有反对意见和附加条件 + ↓ +Correction: ├─ 未达 quorum → Proposer 修订提案,融合反对意见 → 重投 + ├─ 达到 quorum 但有 conditions → 记录 conditions 作为实施约束 + └─ 明确通过 → 执行决策 +``` + +**投票格式**: +```javascript +team_msg({ + type: "vote", + data: { + proposalId: "PROPOSAL-xxx", + vote: "APPROVE" | "REJECT" | "ABSTAIN", + rationale: "选择方案A因为...", // 必须提供理由 + conditions: ["需要增加向后兼容层"], // 可选附加条件 + confidence: 0.85 // 置信度 0-1 + } +}) +``` + +### Fallback + +| Condition | Action | +|-----------|--------| +| 2 轮都未达 quorum | 上报用户裁决 | +| 投票截止但票数不足(< N/2) | 延长截止时间 1 轮 | +| 全员 ABSTAIN | 由 Coordinator 做默认决策并记录 | + +--- + +## CP-5: Escalation Chain (逐级升级) + +### Description + +当 agent 遇到无法自行解决的问题时,逐级升级到更高层级的处理能力。模拟软件开发中的 on-call escalation / tiered support。 + +### Entry Condition + +- Agent 自修复失败(尝试 N 次后仍无法解决) +- 问题超出当前角色能力范围 +- 需要更高权限或更广视角的决策 + +### Roles Required + +`agent` → `specialist` → `coordinator` → `user` + +### Workflow + +``` +Level 0 Level 1 Level 2 Level 3 +┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐ +│ Agent │────→│Specialist│────→│Coordinator│───→│ User │ +│ 自修复 │ │ 专家诊断 │ │ 全局视角 │ │ 人工裁决 │ +└──────────┘ └──────────┘ └──────────┘ └──────────┘ + │ │ │ │ + ▼ ▼ ▼ ▼ + retry 2x CLI analysis cross-team fix manual fix +``` + +```javascript +// Escalation state machine +const ESCALATION_LEVELS = [ + { + level: 0, + name: "Self-repair", + handler: "agent", + maxAttempts: 2, + actions: ["Retry with different approach", "Read more context", "Simplify approach"] + }, + { + level: 1, + name: "Specialist diagnosis", + handler: "specialist", + maxAttempts: 1, + actions: ["CLI analysis (gemini/qwen)", "Cross-file dependency trace", "Pattern matching"] + }, + { + level: 2, + name: "Coordinator intervention", + handler: "coordinator", + maxAttempts: 1, + actions: ["Reassign to different agent", "Modify task scope", "Create support task"] + }, + { + level: 3, + name: "User escalation", + handler: "user", + maxAttempts: 1, + actions: ["Present diagnosis chain", "Request manual guidance", "Offer options"] + } +] + +function escalate(issue, currentLevel) { + const nextLevel = ESCALATION_LEVELS[currentLevel + 1] + if (!nextLevel) { + // Already at highest level, wait for user + return { action: "wait", level: currentLevel } + } + + team_msg({ + type: "escalate", + data: { + issue: issue.description, + from_level: currentLevel, + to_level: nextLevel.level, + attempts_at_current: issue.attempts, + diagnosis_chain: issue.diagnosisHistory // 所有层级的诊断记录 + } + }) + + return { action: "escalate", nextHandler: nextLevel.handler, level: nextLevel.level } +} +``` + +### Convergence + +| Element | Value | +|---------|-------| +| **Success Gate** | 问题在某个层级被解决(agent 报告 issue resolved) | +| **Max Iterations** | 每个层级有独立的 maxAttempts(L0: 2, L1: 1, L2: 1, L3: 1) | +| **Timeout** | L0-L2 无显式超时;L3 用户层需等待用户响应 | + +### Feedback Loop + +``` +Signal: escalate { issue, from_level, diagnosis_chain } + ↓ +Handler: 上级接收问题 + 下级的全部诊断历史 + ↓ +Diagnosis: 上级基于更广视角/更强能力做出诊断 + ↓ +Response: ├─ 解决方案 → 传回原 agent 执行 + ├─ 重新定义问题 → 修改任务描述,agent 重试 + └─ 无法解决 → 继续升级到下一层 +``` + +**诊断链结构** (每层追加): +```javascript +{ + diagnosisHistory: [ + { level: 0, agent: "executor", attempts: 2, diagnosis: "TypeScript类型不匹配", tried: ["修改类型定义", "添加类型断言"] }, + { level: 1, agent: "specialist", attempts: 1, diagnosis: "循环依赖导致类型无法推断", recommendation: "重构模块边界" }, + // ... 每层追加自己的诊断 + ] +} +``` + +### Fallback + +| Condition | Action | +|-----------|--------| +| L3 用户无响应 | Agent 尝试最保守的方案继续,标记为 WORKAROUND | +| 诊断链显示根本问题(如架构缺陷) | 回退到 CP-1 重新规划 | +| 升级到 L1 后解决 | 将解决方案记录为 pattern,下次 L0 可直接处理 | + +--- + +## CP-6: Incremental Delivery (增量交付) + +### Description + +将大型任务分解为小的增量,每个增量独立交付并验证后再进行下一个。模拟 CI/CD 中的小批量交付和渐进式部署。 + +### Entry Condition + +- 大型特性(影响 > 10 个文件) +- 高风险变更(需要逐步验证) +- 用户要求渐进可见的进度 + +### Roles Required + +`coordinator` → `executor` (increment) → `validator` (per-increment) → `coordinator` (gate) + +### Workflow + +``` + ┌─────────────────────────────────────────────────┐ + │ Increment 1 Increment 2 Increment N │ + │ ┌──────────┐ ┌──────────┐ ┌──────────┐ │ + │ │ Implement│ │ Implement│ │ Implement│ │ + │ └────┬─────┘ └────┬─────┘ └────┬─────┘ │ + │ ▼ ▼ ▼ │ + │ ┌──────────┐ ┌──────────┐ ┌──────────┐ │ + │ │ Validate │ │ Validate │ │ Validate │ │ + │ └────┬─────┘ └────┬─────┘ └────┬─────┘ │ + │ ▼ ▼ ▼ │ + │ ┌──────────┐ ┌──────────┐ ┌──────────┐ │ + │ │ Gate ✓ │ │ Gate ✓ │ │ Gate ✓ │ │ + │ └──────────┘ └──────────┘ └──────────┘ │ + └─────────────────────────────────────────────────┘ + │ + ▼ + Full validation +``` + +```javascript +// Coordinator splits plan into increments +function createIncrements(plan) { + // Group tasks by dependency layers + const layers = topologicalSort(plan.tasks) + + return layers.map((layer, i) => ({ + id: `INCREMENT-${i+1}`, + tasks: layer, + gate: { + syntax_clean: true, + no_regression: true, // 已有测试不破坏 + increment_tests_pass: true // 增量相关测试通过 + } + })) +} + +// Execute increment cycle +for (const increment of increments) { + // Step 1: Executor implements increment + TaskCreate({ + subject: `IMPL-inc-${increment.id}`, + description: `Implement increment: ${increment.tasks.map(t => t.title).join(', ')}`, + owner: "executor" + }) + + // Step 2: Wait for implementation + // msg: increment_ready + + // Step 3: Validator checks increment gate + const gateResult = validateIncrement(increment) + + // Step 4: Gate decision + if (gateResult.passed) { + team_msg({ type: "increment_ready", data: { increment: increment.id, status: "PASS" } }) + // Continue to next increment + } else { + // Feedback: which gate criteria failed + team_msg({ type: "fix_required", data: { + increment: increment.id, + failed_gates: gateResult.failures, + suggestion: gateResult.fix_suggestion + }}) + // Executor fixes → re-validate (max 3 retries per increment) + } +} + +// Final: full validation after all increments +TaskCreate({ subject: "TEST-final", description: "Full test suite after all increments" }) +``` + +### Convergence + +| Element | Value | +|---------|-------| +| **Success Gate** | 所有增量通过各自的 gate + 最终全量验证通过 | +| **Max Iterations** | 每个增量最多 3 次重试,总增量数由 plan 决定 | +| **Timeout** | 单个增量超时 = 增量任务数 × 单任务超时 | + +### Feedback Loop + +``` +Signal: increment gate failure { failed_gates, affected_files } + ↓ +Handler: Coordinator 定位失败的 gate 条件 + ↓ +Correction: ├─ syntax_clean 失败 → Executor 修复语法 + ├─ no_regression 失败 → Executor 修复回归 + 回滚该增量 + └─ increment_tests 失败 → 触发 CP-2 Review-Fix Cycle +``` + +**增量进度追踪**: +```javascript +{ + total_increments: 4, + completed: 2, + current: 3, + progress_percent: 50, + gate_history: [ + { id: "INCREMENT-1", attempts: 1, status: "PASS" }, + { id: "INCREMENT-2", attempts: 2, status: "PASS" }, + { id: "INCREMENT-3", attempts: 1, status: "IN_PROGRESS" } + ] +} +``` + +### Fallback + +| Condition | Action | +|-----------|--------| +| 增量重试 3 次仍失败 | 回滚该增量代码,标记为 blocked,继续下一增量 | +| 超过半数增量被 blocked | 停止交付,上报用户评估是否重新规划 | +| 最终全量验证失败 | 识别失败增量组合,逐个回退定位 | + +--- + +## CP-7: Swarming (群策攻关) + +### Description + +当流水线被一个问题阻塞时,暂停正常工作流,所有可用 agent 集中力量解决该问题。模拟敏捷开发中的 swarming / mob debugging。 + +### Entry Condition + +- 关键任务被阻塞超过阈值时间 +- Agent 自修复和 L1 升级都失败 +- 问题影响多个下游任务 + +### Roles Required + +`coordinator` (发起) → `all available agents` (协同) → `coordinator` (裁决) + +### Workflow + +``` + ┌───────────────────────────────────┐ + │ SWARM MODE ACTIVE │ + │ │ + │ ┌────────┐ ┌────────┐ │ + │ │Agent A │ │Agent B │ │ + │ │诊断视角1│ │诊断视角2│ │ + │ └───┬────┘ └───┬────┘ │ + │ │ │ │ + │ ▼ ▼ │ + │ ┌─────────────────────┐ │ + │ │ Coordinator 汇总 │ │ + │ │ 选择最佳诊断 │ │ + │ └─────────┬───────────┘ │ + │ ▼ │ + │ ┌─────────────────────┐ │ + │ │ 指定 Agent 执行修复 │ │ + │ └─────────────────────┘ │ + │ │ + └───────────────────────────────────┘ + │ + ▼ + Resume normal pipeline +``` + +```javascript +// Coordinator initiates swarm +function initiateSwarm(blockingIssue) { + // Step 1: Pause all non-critical tasks + const activeTasks = TaskList().filter(t => t.status === 'in_progress') + activeTasks.forEach(t => { + TaskUpdate({ taskId: t.id, metadata: { paused_for_swarm: true } }) + }) + + // Step 2: Broadcast swarm request + team_msg({ + type: "swarm_join", + data: { + issue: blockingIssue.description, + affected_tasks: blockingIssue.blockedTasks, + diagnosis_so_far: blockingIssue.diagnosisHistory, + assignment: "All agents: diagnose from your expertise angle" + } + }) + + // Step 3: Each agent analyzes from their perspective + // planner: 架构视角诊断 + // executor: 实现细节诊断 + // tester: 测试/环境视角诊断 + + // Step 4: Collect diagnoses (fan-in) + // Uses CP-3 fan-in aggregation + + // Step 5: Coordinator selects best diagnosis + assigns fix + const bestDiagnosis = selectBestDiagnosis(diagnoses) + const fixer = selectBestFixer(bestDiagnosis, availableAgents) + + TaskCreate({ + subject: `SWARM-FIX: ${blockingIssue.summary}`, + description: `Fix based on swarm diagnosis:\n${bestDiagnosis.detail}`, + owner: fixer + }) + + // Step 6: Verify fix, resume pipeline + // ... wait for fix completion ... + + // Step 7: Resume paused tasks + activeTasks.forEach(t => { + TaskUpdate({ taskId: t.id, metadata: { paused_for_swarm: null } }) + }) +} +``` + +### Convergence + +| Element | Value | +|---------|-------| +| **Success Gate** | 阻塞问题解决(相关测试通过 / 错误消失) | +| **Max Iterations** | 2 轮诊断。第 1 轮全员诊断 → 修复 → 若失败 → 第 2 轮聚焦诊断 | +| **Timeout** | 10 分钟。超时后以最佳可用诊断尝试修复 | + +### Feedback Loop + +``` +Signal: 每个 agent 的 swarm_diagnosis { perspective, root_cause, confidence, fix_suggestion } + ↓ +Handler: Coordinator 按 confidence 排序,合并互补的诊断 + ↓ +Selection: 选择 confidence 最高且 fix_suggestion 最具体的方案 + ↓ +Execution: 指定最合适的 agent 执行修复 + ↓ +Verify: 修复后重新运行触发阻塞的场景 + ├─ 通过 → 恢复流水线 + └─ 失败 → 第 2 轮,排除已尝试方案 +``` + +### Fallback + +| Condition | Action | +|-----------|--------| +| 2 轮 swarm 未解决 | 升级到用户(CP-5 L3) | +| 修复引入新问题 | 回滚修复,尝试次优方案 | +| Agent 在 swarm 期间无响应 | 跳过该 agent,用其余 agent 的诊断 | + +--- + +## CP-8: Consulting/Advisory (咨询顾问) + +### Description + +工作中的 agent 暂停当前任务,向拥有特定领域知识的 specialist 请求建议,获得建议后继续工作。不同于升级(CP-5),consulting 不转移问题所有权。 + +### Entry Condition + +- Agent 遇到不熟悉的领域(如安全、性能优化、特定框架用法) +- 需要验证方案的正确性但不需要他人实现 +- 需要领域最佳实践参考 + +### Roles Required + +`requester` (任何工作中的 agent) → `consultant` (specialist agent) → `requester` (继续工作) + +### Workflow + +``` + requester consultant + ┌──────────┐ ┌──────────┐ + │ Working │ │ Idle │ + │ │──(1)请求────→│ │ + │ Paused │ │ 分析中 │ + │ │←─(2)建议────│ │ + │ Resume │ │ Idle │ + │ (应用建议)│ │ │ + └──────────┘ └──────────┘ +``` + +```javascript +// Requester sends consultation request +function requestConsultation(topic, context, urgency = 'normal') { + team_msg({ + type: "consult_request", + from: currentRole, + to: "coordinator", // coordinator routes to appropriate specialist + data: { + topic: topic, // "security", "performance", "database" + question: context.question, // 具体问题 + context: context.codeSnippet, // 相关代码片段 + options: context.options, // 可选方案(如果有) + urgency: urgency // "blocking" | "normal" | "low" + } + }) + // Pause current work, wait for response +} + +// Coordinator routes to specialist +function routeConsultation(request) { + const specialist = selectSpecialist(request.data.topic) + // If no specialist agent exists, use CLI tool as specialist + if (!specialist) { + // Fallback: invoke CLI analysis + Bash(`ccw cli -p "PURPOSE: Expert consultation on ${request.data.topic} +TASK: ${request.data.question} +CONTEXT: ${request.data.context} +EXPECTED: Actionable recommendation with confidence level +" --tool gemini --mode analysis`, { run_in_background: true }) + } +} + +// Consultant provides advice +function provideAdvice(request) { + team_msg({ + type: "consult_response", + from: "consultant", + to: request.from, + data: { + recommendation: "使用 bcrypt 而非 SHA-256 进行密码哈希", + rationale: "bcrypt 内置 salt 和自适应计算成本...", + confidence: 0.95, + references: ["OWASP Password Storage Cheat Sheet"], + caveats: ["需要增加 ~100ms 延迟"], + alternative: "如果延迟敏感,可考虑 Argon2id" + } + }) +} + +// Requester applies advice +function applyAdvice(advice) { + if (advice.data.confidence >= 0.8) { + // Apply recommendation directly + return { action: "apply", recommendation: advice.data.recommendation } + } else { + // Low confidence: request second opinion or escalate + return { action: "second_opinion" } + } +} +``` + +### Convergence + +| Element | Value | +|---------|-------| +| **Success Gate** | 咨询回复 confidence ≥ 0.8 且 requester 成功应用建议 | +| **Max Iterations** | 1 次主咨询 + 1 次追问(如果首次回复不够清晰) | +| **Timeout** | blocking: 2 分钟, normal: 5 分钟, low: 无超时 | + +### Feedback Loop + +``` +Signal: consult_response { recommendation, confidence, caveats } + ↓ +Handler: Requester 评估建议的适用性 + ├─ confidence ≥ 0.8 → 直接应用 + ├─ confidence < 0.8 → 追问一轮或请求 second opinion + └─ 与当前方案冲突 → 上报 coordinator 裁决 + ↓ +Correction: Requester 基于建议调整实现方案,继续原任务 +``` + +### Fallback + +| Condition | Action | +|-----------|--------| +| 无可用 specialist | 使用 CLI tool (gemini/qwen) 替代 | +| 咨询超时 | Requester 用自己的最佳判断继续,标注 unverified | +| 建议与当前设计冲突 | 上报 coordinator,触发 CP-4 Consensus | + +--- + +## CP-9: Dual-Track (双轨并行) + +### Description + +两条工作轨道并行推进,在预定义的同步点(checkpoint)对齐。典型应用:设计与实现并行、前端与后端并行、功能开发与测试开发并行。 + +### Entry Condition + +- 任务可分解为两条相对独立的工作流 +- 两条轨道有明确的同步点(interface contract、API spec、schema) +- 并行执行能提升整体效率 + +### Roles Required + +`coordinator` → `track-A` agent + `track-B` agent → `coordinator` (sync) + +### Workflow + +``` + Track A (Design/API) Track B (Implementation) + ┌────────────┐ ┌────────────┐ + │ Phase A-1 │ │ Phase B-1 │ + │ API 设计 │ │ 脚手架搭建 │ + └─────┬──────┘ └─────┬──────┘ + │ │ + ▼ ┌──────────┐ ▼ + ╔═══════════════╗│ SYNC-1 │╔═══════════════╗ + ║ checkpoint 1 ║│ 对齐接口 │║ checkpoint 1 ║ + ╚═══════════════╝└──────────┘╚═══════════════╝ + │ │ + ▼ ▼ + ┌────────────┐ ┌────────────┐ + │ Phase A-2 │ │ Phase B-2 │ + │ 详细设计 │ │ 核心实现 │ + └─────┬──────┘ └─────┬──────┘ + │ │ + ▼ ┌──────────┐ ▼ + ╔═══════════════╗│ SYNC-2 │╔═══════════════╗ + ║ checkpoint 2 ║│ 集成验证 │║ checkpoint 2 ║ + ╚═══════════════╝└──────────┘╚═══════════════╝ +``` + +```javascript +// Coordinator defines sync points +const syncPoints = [ + { + id: "SYNC-1", + name: "Interface Contract", + trackA_deliverable: "API schema / interface definitions", + trackB_deliverable: "Scaffold with interface stubs", + alignment_check: "Both tracks agree on interface signatures" + }, + { + id: "SYNC-2", + name: "Integration Verification", + trackA_deliverable: "Complete design + test specs", + trackB_deliverable: "Core implementation", + alignment_check: "Implementation passes design test specs" + } +] + +// Phase execution with sync barriers +for (const sync of syncPoints) { + // Launch both tracks in parallel + const trackATask = TaskCreate({ + subject: `TRACK-A-${sync.id}: ${sync.trackA_deliverable}`, + owner: "track-a-agent" + }) + const trackBTask = TaskCreate({ + subject: `TRACK-B-${sync.id}: ${sync.trackB_deliverable}`, + owner: "track-b-agent" + }) + + // Wait for both tracks to reach sync point + waitForBoth(trackATask, trackBTask) + + // Sync point: alignment check + const aligned = checkAlignment(sync.alignment_check, trackAResult, trackBResult) + + if (!aligned) { + // Misalignment detected → correction + team_msg({ type: "sync_checkpoint", data: { + sync_id: sync.id, + status: "MISALIGNED", + trackA_state: trackAResult.summary, + trackB_state: trackBResult.summary, + conflicts: aligned.conflicts + }}) + // Coordinator mediates: which track adjusts? + resolveAlignment(aligned.conflicts, trackATask, trackBTask) + } else { + team_msg({ type: "sync_checkpoint", data: { sync_id: sync.id, status: "ALIGNED" } }) + } +} +``` + +### Convergence + +| Element | Value | +|---------|-------| +| **Success Gate** | 所有 sync point 对齐 + 最终集成验证通过 | +| **Max Iterations** | 每个 sync point 最多 2 次对齐尝试 | +| **Timeout** | 先到达 sync point 的 track 等待另一个 track,超时 5 分钟 | + +### Feedback Loop + +``` +Signal: sync_checkpoint { status: "MISALIGNED", conflicts } + ↓ +Handler: Coordinator 分析冲突来源 + ├─ Track A 的设计不合理 → 要求 Track A 调整设计 + ├─ Track B 偏离了接口约定 → 要求 Track B 重新对齐 + └─ 双方都有偏差 → 协商折中方案 + ↓ +Correction: 被要求调整的 track agent 收到 fix 描述 + → 调整产出 → 重新到达 sync point +``` + +### Fallback + +| Condition | Action | +|-----------|--------| +| 一条 track 完全阻塞 | 另一条 track 暂停等待,触发 CP-7 Swarming 解决阻塞 | +| Sync 对齐 2 次失败 | 降级为 CP-1 Linear Pipeline(顺序执行) | +| 两条 track 差距过大(一快一慢) | 快 track 预做下一阶段,慢 track 加速 | + +--- + +## CP-10: Post-Mortem (复盘回顾) + +### Description + +任务完成后,对整个执行过程进行结构化回顾,提取经验教训和改进建议。这些知识可以反馈到项目的 CLAUDE.md 或 core memory 中,提升未来任务的执行质量。 + +### Entry Condition + +- 团队完成一个完整任务周期后(所有 task completed) +- 任务执行过程中出现显著问题(多次失败、多次升级) +- 用户主动请求回顾 + +### Roles Required + +`coordinator` (发起 + 汇总) → `all agents` (提供各自视角) → `coordinator` (文档化) + +### Workflow + +``` + ┌──────────────────────────────────────────┐ + │ POST-MORTEM PHASE │ + │ │ + │ 1. Coordinator 收集执行数据 │ + │ ├─ 任务链完成情况 │ + │ ├─ 消息总线历史 │ + │ └─ 迭代/升级/失败记录 │ + │ │ + │ 2. 每个 Agent 提交回顾 │ + │ ├─ Planner: 规划准确度 │ + │ ├─ Executor: 实现障碍 │ + │ └─ Tester: 质量发现 │ + │ │ + │ 3. Coordinator 汇总 │ + │ ├─ 成功因素 │ + │ ├─ 改进点 │ + │ └─ 行动建议 │ + │ │ + │ 4. 输出到 memory / CLAUDE.md │ + └──────────────────────────────────────────┘ +``` + +```javascript +// Coordinator initiates post-mortem +function conductPostMortem(teamName) { + // Step 1: Collect execution data + const messages = team_msg({ operation: "list", team: teamName }) + const tasks = TaskList() + const completedTasks = tasks.filter(t => t.status === 'completed') + const failedTasks = tasks.filter(t => t.status === 'in_progress' && t.metadata?.stuck) + + const executionData = { + total_tasks: tasks.length, + completed: completedTasks.length, + total_messages: messages.length, + escalations: messages.filter(m => m.type === 'escalate').length, + fix_cycles: messages.filter(m => m.type === 'fix_required').length, + errors: messages.filter(m => m.type === 'error').length + } + + // Step 2: Request agent retrospectives (Fan-out) + // Each agent answers: What went well? What was difficult? What should change? + agents.forEach(agent => { + SendMessage({ + recipient: agent.name, + content: `## Post-Mortem Request +请回顾本次任务,回答: +1. **顺利**: 哪些方面执行顺利? +2. **困难**: 遇到了什么障碍? +3. **建议**: 下次如何改进? +4. **模式**: 发现了什么可复用的模式?`, + summary: "Post-mortem request" + }) + }) + + // Step 3: Aggregate findings + // Collect retro_finding messages from all agents + const findings = collectFindings(agents) + + // Step 4: Generate structured post-mortem report + const report = { + team: teamName, + timestamp: new Date().toISOString(), + execution_summary: executionData, + what_went_well: findings.flatMap(f => f.went_well), + what_was_difficult: findings.flatMap(f => f.difficult), + improvement_actions: findings.flatMap(f => f.suggestions), + reusable_patterns: findings.flatMap(f => f.patterns), + recommendations: generateRecommendations(executionData, findings) + } + + // Step 5: Persist learnings + // Option A: Write to session artifacts + Write(`${sessionFolder}/post-mortem.json`, JSON.stringify(report, null, 2)) + + // Option B: Import to core memory (if significant) + if (report.reusable_patterns.length > 0) { + mcp__ccw-tools__core_memory({ + operation: "import", + text: `Team ${teamName} post-mortem: ${report.reusable_patterns.join('; ')}` + }) + } + + return report +} +``` + +### Convergence + +| Element | Value | +|---------|-------| +| **Success Gate** | 所有 agent 提交回顾 + 报告生成完毕 | +| **Max Iterations** | 1 轮(回顾是单次活动) | +| **Timeout** | Agent 回顾提交超时 3 分钟,超时则仅用已收到的回顾 | + +### Feedback Loop + +``` +Signal: retro_finding { went_well, difficult, suggestions, patterns } + ↓ +Handler: Coordinator 分类、去重、优先级排序 + ↓ +Output: ├─ 高价值 patterns → 写入 core memory / CLAUDE.md + ├─ 改进建议 → 记录到项目 guidelines + └─ 问题根因 → 作为下次任务的预防措施 +``` + +**学习闭环**: Post-mortem 的输出反馈到未来任务的 Phase 2 Context Loading,实现持续改进。 + +### Fallback + +| Condition | Action | +|-----------|--------| +| Agent 无法提供回顾(已关闭) | 从消息总线历史中提取该 agent 的执行数据 | +| 无显著 findings | 生成最小报告,不写入 core memory | +| 执行数据缺失 | 基于现有数据生成部分报告 | + +--- + +## CP-11: Beat Pipeline (节拍流水线) + +### Description + +消除阶段间批处理同步屏障的流水线模式。上游角色每完成一个工作单元(如一个 issue 的 solution)立即输出信号,下游角色即时开始处理,两者并行重叠执行。核心区别于 CP-1(整阶段串行)和 CP-6(阶段内增量但阶段间仍串行):CP-11 将"阶段边界"从同步屏障降级为轻量信号,实现真正的上下游并行。 + +三大支撑机制: +1. **逐条派发 (Per-item Dispatch)** — 每完成一个单元即输出 `item_ready` 信号,不等同批其他单元 +2. **中间产物 (Intermediate Artifacts)** — 上游将结果写入文件,下游从文件加载,解耦上下游上下文 +3. **内联轻量检查 (Inline Lightweight Check)** — 用 ~20 行内联逻辑替代重量级子 agent 调用(如冲突检测) + +### Entry Condition + +- 上游(规划)和下游(执行)存在天然流水线关系 +- 上游产出可以逐条独立消费(各单元间无强顺序依赖,或依赖可通过轻量检查标记) +- 下游处理耗时 ≥ 上游单元产出间隔(否则批处理反而更简单) +- 传统批处理导致下游大量空闲等待 + +### Roles Required + +`coordinator` → `producer`(上游) + `consumer`(下游, 可多实例) + +### Workflow + +``` + Producer (planner) Coordinator Consumer (executor) + ───────────────── ─────────── ────────────────── + │ │ │ + ┌────┴────┐ │ │ + │ Plan │ │ │ + │ Item 1 │ │ │ + └────┬────┘ │ │ + │ item_ready(1) │ │ + │─────────────────────────────→│ │ + │ │──spawn/dispatch────────→│ + ┌────┴────┐ │ ┌────┴────┐ + │ Plan │ │ │ Execute │ + │ Item 2 │ (parallel overlap) │ │ Item 1 │ + └────┬────┘ │ └────┬────┘ + │ item_ready(2) │ │ + │─────────────────────────────→│ │ + │ │──spawn/dispatch────────→│ (new instance) + ┌────┴────┐ │ │ + │ Plan │ │ ┌────┴────┐ + │ Item 3 │ │ │ Execute │ + └────┬────┘ │ │ Item 2 │ + │ all_planned │ └────┬────┘ + │─────────────────────────────→│ │ + │ │──wait all consumers────→│ + │ │ │ impl_complete + │ │←────────────────────────│ + │ │ │ + [aggregate results] +``` + +```javascript +// ── Coordinator: Per-item dispatch loop ── +const producer = spawnProducer(requirements) +const consumers = [] + +while (true) { + const output = waitForProducer(producer) + const itemData = parseItemReady(output) + + if (itemData) { + // Spawn consumer immediately — don't wait for other items + const consumer = spawnConsumer({ + item: itemData, + artifactFile: itemData.artifact_file // File-based handoff + }) + consumers.push(consumer) + } + + if (output.includes('all_planned')) break + + // Tell producer to continue next item + continueProducer(producer) +} + +// Wait for all consumers to finish +waitForAllConsumers(consumers) +aggregateResults(consumers) +``` + +#### 中间产物协议 + +上游将每个工作单元的完整数据写入文件,仅通过信号传递文件路径: + +```javascript +// Producer: 写中间产物 +const artifactFile = `${sessionDir}/artifacts/${itemId}.json` +writeFile(artifactFile, JSON.stringify({ + item_id: itemId, + data: solutionData, + config: executionConfig, + timestamp: new Date().toISOString() +}, null, 2)) + +// Signal: 仅传路径 +output({ status: 'item_ready', item_id: itemId, artifact_file: artifactFile }) +``` + +```javascript +// Consumer: 从文件加载(兼容无文件 fallback) +let data +if (artifactFile) { + try { + data = JSON.parse(readFile(artifactFile)) + } catch { + data = loadFromCli(itemId) // Fallback + } +} else { + data = loadFromCli(itemId) // Legacy path +} +``` + +#### 内联轻量检查 + +替代重量级子 agent 的冲突/依赖检测: + +```javascript +function inlineConflictCheck(itemId, itemData, dispatched) { + const currentFiles = itemData.files_touched || [] + const blockedBy = [] + + // 1. 文件冲突:当前 item 触及的文件与已派发 item 有重叠 + for (const prev of dispatched) { + const prevFiles = prev.data.files_touched || [] + if (currentFiles.some(f => prevFiles.includes(f))) { + blockedBy.push(prev.itemId) + } + } + + // 2. 显式依赖 + const explicitDeps = itemData.dependencies || [] + for (const dep of explicitDeps) { + if (!blockedBy.includes(dep)) blockedBy.push(dep) + } + + return blockedBy // Empty = can execute immediately +} +``` + +### Convergence + +| Element | Value | +|---------|-------| +| **Success Gate** | Producer 输出 `all_planned` + 所有 Consumer 输出 `impl_complete` | +| **Max Iterations** | Producer: N items (有限); Consumer: 每个 item 内部最多 2 次 test-fix retry | +| **Timeout** | Producer per-item: 10 min (超时则催促收敛); Consumer per-item: 15 min | + +### Feedback Loop + +``` +Signal: item_ready { item_id, artifact_file, depends_on } + ↓ +Handler: Coordinator 检查 depends_on,spawn Consumer(或标记 blocked) + ↓ +Signal: impl_complete { item_id, status, commit_hash } + ↓ +Handler: Coordinator 累积结果 + ├─ status=success → 记录 commit,解锁被阻塞的 items + ├─ status=failed → 记录错误,跳过(不阻塞其他独立 items) + └─ all consumers done → 汇总报告 +``` + +**流水线重叠效果**: Producer 规划 Item N+1 的同时,Consumer 执行 Item N。当 Producer 输出速度 < Consumer 执行速度时,Consumer 始终有工作可做,消除空闲等待。 + +### Fallback + +| Condition | Action | +|-----------|--------| +| Producer 长时间无输出 | send_input 催促收敛,retry wait 120s | +| Consumer 执行失败 | 记录 failed,继续其他 items(不级联失败) | +| 中间产物文件读取失败 | Consumer fallback 到 CLI 命令加载 | +| 内联冲突检查误判 | Consumer 执行时自然发现冲突(git merge conflict),报告 failed | +| Producer 异常退出 | 视为 all_planned,等待已派发 Consumers 完成 | +| 依赖链过长导致 Consumer 全部 blocked | 升级到 CP-1 串行模式 | + +### 与其他模式的区别 + +| 维度 | CP-1 Linear | CP-6 Incremental | CP-9 Dual-Track | **CP-11 Beat** | +|------|-------------|-------------------|------------------|----------------| +| 阶段边界 | 硬同步屏障 | 阶段内增量,阶段间串行 | 两条并行 track | 无边界,逐条流转 | +| 上下游关系 | 串行 | 串行(增量粒度) | 并行但独立 | 并行且流水线重叠 | +| 调度粒度 | 整阶段 | 增量批次 | Track 级 | 单个工作单元 | +| 空闲等待 | 下游等整阶段 | 下游等增量批次 | 两 track 独立 | 近零(流水线填充) | +| 中间产物 | 内存传递 | 内存传递 | 各 track 独立 | 文件传递(解耦上下文) | +| 冲突检测 | 无需 | 无需 | Sync checkpoint | 内联轻量检查 | + +### 适用实例 + +- **PlanEx Pipeline**: Planner 逐 issue 输出 solution,Executor 即时实现 +- **CI/CD Pipeline**: Builder 逐模块构建,Deployer 即时部署 +- **Document Pipeline**: Analyzer 逐章节分析,Writer 即时撰写 + +### 平台实现差异 + +| 维度 | Claude (TaskCreate/SendMessage) | Codex (spawn_agent/wait/send_input) | +|------|--------------------------------|--------------------------------------| +| 派发机制 | `TaskCreate` + `SendMessage` 通知 | `spawn_agent` 每个 Consumer | +| 信号传递 | `mcp__ccw-tools__team_msg` | Producer 文本输出 marker(ISSUE_READY/ALL_PLANNED) | +| 等待机制 | TaskList 轮询 + SendMessage 回调 | `wait({ ids: [...] })` | +| 催促收敛 | SendMessage 提醒 | `send_input` 催促 | +| 中间产物 | Write tool → Read tool | `write_file` → `read_file` | +| 依赖标记 | `TaskUpdate({ addBlockedBy })` | Producer 输出 `depends_on` 数组,Coordinator 控制 spawn 顺序 | + +--- + +## Pattern Composition (模式组合) + +### 常见组合 + +协作模式可以组合使用,形成更复杂的工作流: + +``` +1. Standard Development (标准开发) + CP-1 (Pipeline) + CP-2 (Review-Fix) + CP-10 (Post-Mortem) + +2. High-Risk Feature (高风险特性) + CP-4 (Consensus on design) → CP-6 (Incremental Delivery) → CP-2 (Review-Fix each increment) + +3. Complex Investigation (复杂问题调查) + CP-3 (Fan-out analysis) → CP-4 (Consensus on diagnosis) → CP-7 (Swarm if needed) + +4. Parallel Development (并行开发) + CP-9 (Dual-Track) + CP-2 (Review-Fix per track) + CP-10 (Post-Mortem) + +5. Expert-Guided Development (专家指导开发) + CP-8 (Consulting for design) → CP-1 (Pipeline implementation) → CP-2 (Review-Fix) + +6. Plan-and-Execute Pipeline (边规划边执行) + CP-11 (Beat Pipeline) + CP-2 (Review-Fix per item) + CP-10 (Post-Mortem) + +7. High-Throughput Batch Processing (高吞吐批处理) + CP-8 (Consulting for architecture) → CP-11 (Beat Pipeline) → CP-3 (Fan-out for testing) +``` + +### 组合规则 + +1. **不可重入**: 同一时间只能有一个 CP-7 (Swarming) 实例 +2. **可嵌套**: CP-6 的每个增量可以内部使用 CP-2 +3. **可升级**: CP-2 内发现无法修复的问题可升级到 CP-5 或 CP-7 +4. **可降级**: CP-9 对齐失败可降级到 CP-1; CP-11 依赖链过长可降级到 CP-1 +5. **CP-10 始终在最后**: Post-mortem 只在团队任务完成后执行 +6. **CP-11 可替代 CP-1**: 当上下游可流水线重叠时,CP-11 是 CP-1 的升级版 + +### State Machine (模式状态机) + +``` + ┌──────────────────────────────────────────┐ + │ COORDINATOR FSM │ + │ │ + 新需求 ────→ [PLAN] ──→ [EXECUTE] ──→ [VALIDATE] ──→ [DONE] │ + │ │ │ │ │ + │ │ │ │ │ + ▼ ▼ ▼ ▼ │ + CP-4? CP-6? CP-2? CP-10 │ + Consensus Incremental Review-Fix Post- │ + Gate Delivery Cycle Mortem │ + │ │ │ │ + ▼ ▼ ▼ │ + [blocked?] [blocked?] [blocked?] │ + │ │ │ │ + ▼ ▼ ▼ │ + CP-5/CP-7 CP-5/CP-7 CP-5/CP-7 │ + Escalate Escalate Escalate │ + /Swarm /Swarm /Swarm │ + └──────────────────────────────────────────┘ +``` + +--- + +## Coordinator Integration Guide + +### Coordinator 如何选择协作模式 + +```javascript +// In coordinate.md Phase 1 (需求澄清) or Phase 4 (协调主循环) + +function selectCollaborationPattern(context) { + const { taskType, complexity, riskLevel, teamSize, hasExpert } = context + + // Rule-based selection + const patterns = ['CP-1'] // CP-1 is always the base + + if (riskLevel === 'high') patterns.push('CP-4') // Consensus for risky decisions + if (complexity === 'High' && taskType === 'feature') patterns.push('CP-6') // Incremental for large features + if (taskType === 'review' || taskType === 'test') patterns.push('CP-2') // Review-Fix for quality + if (hasExpert && complexity !== 'Low') patterns.push('CP-8') // Consulting for expertise + if (taskType === 'batch' || taskType === 'multi-issue') patterns.push('CP-11') // Beat for pipeline overlap + patterns.push('CP-10') // Post-mortem always included + + return patterns +} +``` + +### Coordinator 消息路由表(含协作模式) + +| 消息类型 | 触发模式 | Coordinator 动作 | +|---------|---------|-----------------| +| `plan_ready` | CP-1 | 审批 plan → 通知 executor | +| `impl_complete` | CP-1/CP-6/CP-11 | 解锁 TEST/REVIEW 或下一增量/累积结果 | +| `review_result` (BLOCK) | CP-2 | 创建 fix 任务 → 启动 Review-Fix Cycle | +| `vote` | CP-4 | 收集投票 → 达到 quorum 则执行决策 | +| `escalate` | CP-5 | 路由到上一级处理者 | +| `increment_ready` | CP-6 | 验证增量 gate → 允许或拒绝 | +| `swarm_join` | CP-7 | 暂停其他任务 → 聚合诊断 | +| `consult_request` | CP-8 | 路由到 specialist 或 CLI tool | +| `sync_checkpoint` | CP-9 | 检查两条 track 对齐状态 | +| `retro_finding` | CP-10 | 收集回顾 → 生成报告 | +| `item_ready` | CP-11 | Spawn consumer → 传递 artifact_file | +| `all_planned` | CP-11 | 停止等待 producer → wait all consumers | diff --git a/.claude/skills_lib/team-skill-designer-v2/specs/quality-standards.md b/.claude/skills_lib/team-skill-designer-v2/specs/quality-standards.md new file mode 100644 index 00000000..c7e5fe97 --- /dev/null +++ b/.claude/skills_lib/team-skill-designer-v2/specs/quality-standards.md @@ -0,0 +1,242 @@ +# Quality Standards for Team Skills (v2) + +Quality assessment criteria for generated team skill packages (v3 style: text + decision tables, no pseudocode). + +## When to Use + +| Phase | Usage | Section | +|-------|-------|---------| +| Phase 5 | Score generated command | All dimensions | +| Phase 3 | Guide generation quality | Checklist | + +--- + +## Quality Dimensions + +### 1. Completeness (25%) + +| Score | Criteria | +|-------|----------| +| 100% | All 15 required sections present with substantive content | +| 80% | 12+ sections present, minor gaps in non-critical areas | +| 60% | Core sections present (front matter, message bus, 5 phases, error handling) | +| 40% | Missing critical sections | +| 0% | Skeleton only | + +**Required Sections Checklist (role.md files):** +- [ ] Role Identity (name, task prefix, output tag, responsibility) +- [ ] Role Boundaries (MUST / MUST NOT) +- [ ] Toolbox section (Available Commands with markdown links) +- [ ] Phase 2: Context Loading (decision tables, no pseudocode) +- [ ] Phase 3: Core Work (decision tables + tool call templates) +- [ ] Phase 4: Validation/Summary (checklist tables) +- [ ] Error Handling table +- [ ] Phase 1/5: Reference to SKILL.md Shared Infrastructure (not inline) +- [ ] No JavaScript pseudocode in any phase +- [ ] All branching logic expressed as decision tables + +**Required Sections Checklist (SKILL.md):** +- [ ] Frontmatter (name, description, allowed-tools) +- [ ] Architecture Overview (role routing diagram with flow symbols) +- [ ] Role Router (Input Parsing + Role Registry table with markdown links) +- [ ] Shared Infrastructure (Worker Phase 1 Task Discovery + Phase 5 Report templates) +- [ ] Pipeline Definitions with Cadence Control (beat diagram + checkpoints) +- [ ] Compact Protection (Phase Reference table with Compact column) +- [ ] Coordinator Spawn Template +- [ ] Role Isolation Rules table +- [ ] Error Handling table + +**SKILL.md MUST NOT contain:** +- [ ] ❌ No JavaScript pseudocode (VALID_ROLES object, routing functions, etc.) +- [ ] ❌ No role-specific implementation logic (belongs in role.md or commands/*.md) +- [ ] ❌ No `${variable}` notation (use `` instead) + +> **Note**: For `commands/*.md` file quality criteria, see [Command File Quality Standards](#command-file-quality-standards) below. + +### 2. Pattern Compliance (25%) + +| Score | Criteria | +|-------|----------| +| 100% | All 9 infrastructure patterns + selected collaboration patterns fully implemented | +| 80% | 7 core infra patterns + at least 1 collaboration pattern with convergence | +| 60% | Minimum 6 infra patterns, collaboration patterns present but incomplete | +| 40% | Missing critical patterns (message bus or task lifecycle) | +| 0% | No pattern compliance | + +**Infrastructure Pattern Checklist:** +- [ ] Pattern 1: Message bus - team_msg before every SendMessage +- [ ] Pattern 1b: CLI fallback section +- [ ] Pattern 2: YAML front matter - all fields present +- [ ] Pattern 3: Task lifecycle - TaskList/Get/Update flow +- [ ] Pattern 4: Five-phase structure (Phase 1/5 shared in SKILL.md, Phase 2-4 in role.md) +- [ ] Pattern 5: Complexity-adaptive (if applicable) +- [ ] Pattern 6: Coordinator spawn compatible +- [ ] Pattern 7: Error handling table +- [ ] Pattern 8: Session files (if applicable) +- [ ] Pattern 9: Compact Protection (Phase Reference table + re-read directives) + +**Collaboration Pattern Checklist:** +- [ ] At least one CP selected (CP-1 minimum) +- [ ] Each selected CP has convergence criteria defined +- [ ] Each selected CP has feedback loop mechanism +- [ ] Each selected CP has timeout/fallback behavior +- [ ] CP-specific message types registered in message bus section +- [ ] Escalation path defined (CP-5) for error scenarios + +### 3. Integration (25%) + +| Score | Criteria | +|-------|----------| +| 100% | All integration checks pass, spawn snippet ready | +| 80% | Minor integration notes, no blocking issues | +| 60% | Some checks need attention but functional | +| 40% | Task prefix conflict or missing critical tools | +| 0% | Incompatible with team system | + +### 4. Consistency (25%) + +| Score | Criteria | +|-------|----------| +| 100% | Role name, task prefix, message types consistent throughout | +| 80% | Minor inconsistencies in non-critical areas | +| 60% | Some mixed terminology but intent clear | +| 40% | Confusing or contradictory content | +| 0% | Internally inconsistent | + +--- + +## Quality Gates + +| Gate | Threshold | Action | +|------|-----------|--------| +| PASS | >= 80% | Deliver to `.claude/skills/team-{name}/` | +| REVIEW | 60-79% | Fix recommendations, re-validate | +| FAIL | < 60% | Major rework needed, re-run from Phase 3 | + +--- + +## Issue Classification + +### Errors (Must Fix) + +- Missing YAML front matter +- Missing `group: team` +- No message bus section +- No task lifecycle (TaskList/Get/Update) +- No SendMessage to coordinator +- Task prefix conflicts with existing +- **Coordinator dispatch `owner` values not in Role Registry** — all task owners must match a role in SKILL.md Role Registry table +- **Monitor spawn prompt missing Skill callback** — spawn prompt must contain `Skill(skill="team-xxx", args="--role=yyy")` +- **Spawn template missing `description` parameter** — Task() requires `description` as a mandatory field +- **Spawn template missing `team_name` or `name` parameter** — agent will not join the team or have identity + +### Warnings (Should Fix) + +- Missing error handling table +- Incomplete Phase implementation (skeleton only) +- Missing team_msg before some SendMessage calls +- Missing CLI fallback section (`### CLI 回退` with `ccw team` examples) +- No complexity-adaptive routing when role is complex +- **Dispatch task IDs not aligned with pipeline diagram** — task IDs (e.g., RESEARCH-001, DRAFT-001) must match the pipeline defined in SKILL.md +- **Coordinator commands reference roles not in Message Routing Tables** — all roles in dispatch/monitor must appear in SKILL.md Available Roles table + +### Info (Nice to Have) + +- Decision tables could cover more edge cases +- Additional tool call examples +- Session file structure documentation + +--- + +## Coordinator Commands Consistency Standards + +Quality assessment for coordinator's `dispatch.md` and `monitor.md` command files. These files are the most common source of integration failures. + +### 6. Coordinator-SKILL Alignment (Applies to coordinator commands) + +| Score | Criteria | +|-------|----------| +| 100% | All 5 alignment checks pass | +| 80% | 4/5 pass, one minor mismatch | +| 60% | 3/5 pass, cosmetic role naming issues | +| 40% | Critical mismatch: roles not in VALID_ROLES or missing Skill callback | +| 0% | dispatch/monitor written independently of SKILL.md | + +#### Check 1: Role Name Alignment + +- [ ] Every `owner` value in dispatch.md TaskCreate calls exists in SKILL.md Role Registry table +- [ ] No invented role names (e.g., "spec-writer" when Role Registry has "writer") +- [ ] No typos or case mismatches in role names + +#### Check 2: Task ID-Pipeline Alignment + +- [ ] Task IDs in dispatch.md match the pipeline diagram in SKILL.md +- [ ] Task prefix mapping is consistent (e.g., RESEARCH-* → analyst, DRAFT-* → writer) +- [ ] Dependency chain in dispatch.md matches pipeline flow arrows + +#### Check 3: Spawn Template Completeness + +- [ ] monitor.md Task() calls include ALL required parameters: `description`, `team_name`, `name`, `prompt` +- [ ] Spawn prompt contains `Skill(skill="team-xxx", args="--role=yyy")` callback +- [ ] Spawn prompt includes role boundaries (task prefix constraint, output tag, communication rules) +- [ ] Spawn prompt is NOT a minimal generic instruction (e.g., "Execute task X") + +#### Check 4: Message Routing Table Alignment + +- [ ] All roles in dispatch.md appear in monitor.md's Message Routing Tables +- [ ] All message types used by roles are listed in the routing tables +- [ ] Sender roles in routing tables match Role Registry entries + +#### Check 5: v3 Style Compliance + +- [ ] No JavaScript pseudocode in any generated file +- [ ] All branching logic expressed as decision tables +- [ ] Code blocks contain only actual tool calls +- [ ] `` notation used (not `${variable}`) +- [ ] Phase 1/5 reference SKILL.md Shared Infrastructure (not inline) + +--- + +## Command File Quality Standards + +Quality assessment criteria for generated command `.md` files in `roles/{name}/commands/`. + +### 5. Command File Quality (Applies to folder-based roles) + +| Score | Criteria | +|-------|----------| +| 100% | All 4 dimensions pass, all command files self-contained | +| 80% | 3/4 dimensions pass, minor gaps in one area | +| 60% | 2/4 dimensions pass, some cross-references or missing sections | +| 40% | Missing required sections or broken references | +| 0% | No command files or non-functional | + +#### Dimension 1: Structural Completeness + +Each command file MUST contain: +- [ ] `## When to Use` - Trigger conditions +- [ ] `## Strategy` with delegation mode (Subagent / CLI / Sequential / Direct) +- [ ] `## Execution Steps` with decision tables and tool call templates +- [ ] `## Error Handling` table with Scenario/Resolution +- [ ] `## Output Format` section + +#### Dimension 2: Self-Containment + +- [ ] No `Ref:` or cross-references to other command files +- [ ] No imports or dependencies on sibling commands +- [ ] All context loaded within the command (task, plan, files) +- [ ] Any subagent can `Read()` the command and execute independently + +#### Dimension 3: Toolbox Consistency + +- [ ] Every command listed in role.md Toolbox has a corresponding file in `commands/` +- [ ] Every file in `commands/` is listed in role.md Toolbox +- [ ] Phase mapping in Toolbox matches command's `## When to Use` phase reference +- [ ] Delegation mode in command matches role's subagent/CLI capabilities + +#### Dimension 4: Pattern Compliance + +- [ ] Pre-built command patterns (explore, analyze, implement, validate, review, dispatch, monitor) follow templates/role-command-template.md +- [ ] Custom commands follow the template skeleton structure +- [ ] Delegation mode is appropriate for the command's complexity +- [ ] Output format is structured and parseable by the calling role.md diff --git a/.claude/skills_lib/team-skill-designer-v2/specs/team-design-patterns.md b/.claude/skills_lib/team-skill-designer-v2/specs/team-design-patterns.md new file mode 100644 index 00000000..67e4d0bb --- /dev/null +++ b/.claude/skills_lib/team-skill-designer-v2/specs/team-design-patterns.md @@ -0,0 +1,590 @@ +# Team Command Design Patterns + +> Extracted from 5 production team commands: coordinate, plan, execute, test, review +> Extended with 10 collaboration patterns for diverse team interaction models + +--- + +## Pattern Architecture + +``` +Team Design Patterns +├── Section A: Infrastructure Patterns (9) ← HOW to build a team command +│ ├── Pattern 1: Message Bus Integration +│ ├── Pattern 2: YAML Front Matter +│ ├── Pattern 3: Task Lifecycle +│ ├── Pattern 4: Five-Phase Execution +│ ├── Pattern 5: Complexity-Adaptive Routing +│ ├── Pattern 6: Coordinator Spawn Integration +│ ├── Pattern 7: Error Handling Table +│ └── Pattern 8: Session File Structure +│ +└── Section B: Collaboration Patterns (10) ← HOW agents interact + ├── CP-1: Linear Pipeline (线性流水线) + ├── CP-2: Review-Fix Cycle (审查修复循环) + ├── CP-3: Parallel Fan-out/Fan-in (并行扇出扇入) + ├── CP-4: Consensus Gate (共识门控) + ├── CP-5: Escalation Chain (逐级升级) + ├── CP-6: Incremental Delivery (增量交付) + ├── CP-7: Swarming (群策攻关) + ├── CP-8: Consulting/Advisory (咨询顾问) + ├── CP-9: Dual-Track (双轨并行) + └── CP-10: Post-Mortem (复盘回顾) +``` + +**Section B** collaboration patterns are documented in: [collaboration-patterns.md](collaboration-patterns.md) + +--- + +## When to Use + +| Phase | Usage | Section | +|-------|-------|---------| +| Phase 0 | Understand all patterns before design | All sections | +| Phase 2 | Select applicable infrastructure + collaboration patterns | Pattern catalog | +| Phase 3 | Apply patterns during generation | Implementation details | +| Phase 4 | Verify compliance | Checklists | + +--- + +# Section A: Infrastructure Patterns + +## Pattern 1: Message Bus Integration + +Every teammate must use `mcp__ccw-tools__team_msg` for persistent logging before every `SendMessage`. + +### Structure + +```javascript +// BEFORE every SendMessage, call: +mcp__ccw-tools__team_msg({ + operation: "log", + team: teamName, + from: "", // planner | executor | tester | + to: "coordinator", + type: "", + summary: "", + ref: "", + data: { /* optional structured payload */ } +}) +``` + +### Standard Message Types + +| Type | Direction | Trigger | Payload | +|------|-----------|---------|---------| +| `plan_ready` | planner -> coordinator | Plan generation complete | `{ taskCount, complexity }` | +| `plan_approved` | coordinator -> planner | Plan reviewed | `{ approved: true }` | +| `plan_revision` | planner -> coordinator | Plan modified per feedback | `{ changes }` | +| `task_unblocked` | coordinator -> any | Dependency resolved | `{ taskId }` | +| `impl_complete` | executor -> coordinator | Implementation done | `{ changedFiles, syntaxClean }` | +| `impl_progress` | any -> coordinator | Progress update | `{ batch, total }` | +| `test_result` | tester -> coordinator | Test cycle end | `{ passRate, iterations }` | +| `review_result` | tester -> coordinator | Review done | `{ verdict, findings }` | +| `fix_required` | any -> coordinator | Critical issues | `{ details[] }` | +| `error` | any -> coordinator | Blocking error | `{ message }` | +| `shutdown` | coordinator -> all | Team dissolved | `{}` | + +### Collaboration Pattern Message Types + +| Type | Used By | Direction | Trigger | +|------|---------|-----------|---------| +| `vote` | CP-4 Consensus | any -> coordinator | Agent casts vote on proposal | +| `escalate` | CP-5 Escalation | any -> coordinator | Agent escalates unresolved issue | +| `increment_ready` | CP-6 Incremental | executor -> coordinator | Increment delivered for validation | +| `swarm_join` | CP-7 Swarming | any -> coordinator | Agent joins swarm on blocker | +| `consult_request` | CP-8 Consulting | any -> specialist | Agent requests expert advice | +| `consult_response` | CP-8 Consulting | specialist -> requester | Expert provides advice | +| `sync_checkpoint` | CP-9 Dual-Track | any -> coordinator | Track reaches sync point | +| `retro_finding` | CP-10 Post-Mortem | any -> coordinator | Retrospective insight | + +### Adding New Message Types + +When designing a new role, define role-specific message types following the convention: +- `{action}_ready` - Work product ready for review +- `{action}_complete` - Work phase finished +- `{action}_progress` - Intermediate progress update + +### CLI Fallback + +When `mcp__ccw-tools__team_msg` MCP is unavailable, use `ccw team` CLI as equivalent fallback: + +```javascript +// Fallback: Replace MCP call with Bash CLI (parameters map 1:1) +Bash(`ccw team log --team "${teamName}" --from "" --to "coordinator" --type "" --summary "" [--ref ] [--data ''] --json`) +``` + +**Parameter mapping**: `team_msg(params)` → `ccw team --team [--from/--to/--type/--summary/--ref/--data/--id/--last] [--json]` + +**Coordinator** uses all 4 operations: `log`, `list`, `status`, `read` +**Teammates** primarily use: `log` + +### Message Bus Section Template + +```markdown +## 消息总线 + +每次 SendMessage **前**,必须调用 `mcp__ccw-tools__team_msg` 记录消息: + +\`\`\`javascript +mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "", to: "coordinator", type: "", summary: "" }) +\`\`\` + +### 支持的 Message Types + +| Type | 方向 | 触发时机 | 说明 | +|------|------|----------|------| +| `` | → coordinator | | | + +### CLI 回退 + +当 `mcp__ccw-tools__team_msg` MCP 不可用时,使用 `ccw team` CLI 作为等效回退: + +\`\`\`javascript +// 回退: 将 MCP 调用替换为 Bash CLI(参数一一对应) +Bash(\`ccw team log --team "${teamName}" --from "" --to "coordinator" --type "" --summary "" --json\`) +\`\`\` + +**参数映射**: `team_msg(params)` → `ccw team log --team --from --to coordinator --type --summary "" [--ref ] [--data ''] [--json]` +``` + +--- + +## Pattern 2: YAML Front Matter + +Every team command file must start with standardized YAML front matter. + +### Structure + +```yaml +--- +name: +description: Team - +argument-hint: "" +allowed-tools: SendMessage(*), TaskUpdate(*), TaskList(*), TaskGet(*), TodoWrite(*), Read(*), Write(*), Edit(*), Bash(*), Glob(*), Grep(*), Task(*) +group: team +--- +``` + +### Field Rules + +| Field | Rule | Example | +|-------|------|---------| +| `name` | Lowercase, matches filename | `plan`, `execute`, `test` | +| `description` | `Team -` prefix + Chinese capability list | `Team planner - 多角度代码探索、结构化实现规划` | +| `argument-hint` | Empty string for teammates, has hint for coordinator | `""` | +| `allowed-tools` | Start with `SendMessage(*), TaskUpdate(*), TaskList(*), TaskGet(*)` | See each role | +| `group` | Always `team` | `team` | + +### Minimum Tool Set (All Teammates) + +``` +SendMessage(*), TaskUpdate(*), TaskList(*), TaskGet(*), TodoWrite(*), Read(*), Bash(*), Glob(*), Grep(*) +``` + +### Role-Specific Additional Tools + +| Role Type | Additional Tools | +|-----------|-----------------| +| Read-only (reviewer, analyzer) | None extra | +| Write-capable (executor, fixer) | `Write(*), Edit(*)` | +| Agent-delegating (planner, executor) | `Task(*)` | + +--- + +## Pattern 3: Task Lifecycle + +All teammates follow the same task discovery and lifecycle pattern. + +### Standard Flow + +```javascript +// Step 1: Find my tasks +const tasks = TaskList() +const myTasks = tasks.filter(t => + t.subject.startsWith('-') && // PLAN-*, IMPL-*, TEST-*, REVIEW-* + t.owner === '' && + t.status === 'pending' && + t.blockedBy.length === 0 // Not blocked +) + +// Step 2: No tasks -> idle +if (myTasks.length === 0) return + +// Step 3: Claim task (lowest ID first) +const task = TaskGet({ taskId: myTasks[0].id }) +TaskUpdate({ taskId: task.id, status: 'in_progress' }) + +// Step 4: Execute work +// ... role-specific logic ... + +// Step 5: Complete and loop +TaskUpdate({ taskId: task.id, status: 'completed' }) + +// Step 6: Check for next task +const nextTasks = TaskList().filter(t => + t.subject.startsWith('-') && + t.owner === '' && + t.status === 'pending' && + t.blockedBy.length === 0 +) +if (nextTasks.length > 0) { + // Continue with next task -> back to Step 3 +} +``` + +### Task Prefix Convention + +| Prefix | Role | Example | +|--------|------|---------| +| `PLAN-` | planner | `PLAN-001: Explore and plan implementation` | +| `IMPL-` | executor | `IMPL-001: Implement approved plan` | +| `TEST-` | tester | `TEST-001: Test-fix cycle` | +| `REVIEW-` | tester | `REVIEW-001: Code review and requirement verification` | +| `-` | new role | Must be unique, uppercase, hyphen-suffixed | + +### Task Chain (defined in coordinate.md) + +``` +PLAN-001 → IMPL-001 → TEST-001 + REVIEW-001 + ↑ blockedBy ↑ blockedBy +``` + +--- + +## Pattern 4: Five-Phase Execution Structure + +Every team command follows a consistent 5-phase internal structure. + +### Standard Phases + +| Phase | Purpose | Common Actions | +|-------|---------|----------------| +| Phase 1: Task Discovery | Find and claim assigned tasks | TaskList, TaskGet, TaskUpdate | +| Phase 2: Context Loading | Load necessary context for work | Read plan/config, detect framework | +| Phase 3: Core Work | Execute primary responsibility | Role-specific logic | +| Phase 4: Validation/Summary | Verify work quality | Syntax check, criteria verification | +| Phase 5: Report + Loop | Report to coordinator, check next | SendMessage, TaskUpdate, TaskList | + +### Phase Structure Template + +```markdown +### Phase N: + +\`\`\`javascript +// Implementation code +\`\`\` +``` + +--- + +## Pattern 5: Complexity-Adaptive Routing + +All roles that process varying-difficulty tasks should implement adaptive routing. + +### Decision Logic + +```javascript +function assessComplexity(description) { + let score = 0 + if (/refactor|architect|restructure|module|system/.test(description)) score += 2 + if (/multiple|across|cross/.test(description)) score += 2 + if (/integrate|api|database/.test(description)) score += 1 + if (/security|performance/.test(description)) score += 1 + return score >= 4 ? 'High' : score >= 2 ? 'Medium' : 'Low' +} +``` + +### Routing Table + +| Complexity | Direct Claude | CLI Agent | Sub-agent | +|------------|---------------|-----------|-----------| +| Low | Direct execution | - | - | +| Medium | - | `cli-explore-agent` / `cli-lite-planning-agent` | - | +| High | - | CLI agent | `code-developer` / `universal-executor` | + +### Sub-agent Delegation Pattern + +```javascript +Task({ + subagent_type: "", + run_in_background: false, + description: "", + prompt: ` +## Task Objective +${taskDescription} + +## Output Location +${sessionFolder}/${outputFile} + +## MANDATORY FIRST STEPS +1. Read: .workflow/project-tech.json (if exists) +2. Read: .workflow/project-guidelines.json (if exists) + +## Expected Output +${expectedFormat} +` +}) +``` + +--- + +## Pattern 6: Coordinator Spawn Integration + +New teammates must be spawnable from coordinate.md using standard pattern. + +### Skill Path Format (Folder-Based) + +Team commands use folder-based organization with colon-separated skill paths: + +``` +File location: .claude/commands/team/{team-name}/{role-name}.md +Skill path: team:{team-name}:{role-name} + +Example: + .claude/commands/team/spec/analyst.md → team:spec:analyst + .claude/commands/team/security/scanner.md → team:security:scanner +``` + +### Spawn Template + +> **⚠️ CRITICAL**: Spawn prompt 必须包含完整的 Skill 回调指令。如果 prompt 过于简化(如仅 "Execute task X"),agent 会自行发挥而非通过 Skill → role.md 加载角色定义。 + +```javascript +Task({ + subagent_type: "general-purpose", + description: `Spawn ${roleName} worker`, // ← 必填参数 + team_name: teamName, + name: "", + prompt: `You are team "${teamName}" . + +## ⚠️ 首要指令(MUST) +你的所有工作必须通过调用 Skill 获取角色定义后执行,禁止自行发挥: +Skill(skill="team-${teamName}", args="--role=") + +When you receive -* tasks, execute via the Skill callback above. + +Current requirement: ${taskDescription} +Constraints: ${constraints} + +## Message Bus (Required) +Before each SendMessage, call mcp__ccw-tools__team_msg: +mcp__ccw-tools__team_msg({ operation: "log", team: "${teamName}", from: "", to: "coordinator", type: "", summary: "" }) + +Workflow: +1. 调用 Skill(skill="team-${teamName}", args="--role=") 获取角色定义 +2. 按 role.md 中的 5-Phase 流程执行(TaskList → 找到 -* 任务 → 执行 → 汇报) +3. team_msg log + SendMessage results to coordinator +4. TaskUpdate completed -> check next task` +}) +``` + +### Spawn Anti-Patterns(必须避免) + +| Anti-Pattern | 后果 | 正确做法 | +|-------------|------|---------| +| prompt 中缺少 `Skill(...)` 回调 | agent 自行发挥,不加载 role.md | 必须包含完整 Skill 回调指令 | +| 缺少 `description` 参数 | Task() 调用失败(必填参数) | 始终提供 `description` | +| 缺少 `team_name` 参数 | agent 不属于团队,无法收发消息 | 始终提供 `team_name` | +| 缺少 `name` 参数 | agent 无角色标识 | 始终提供 `name` | +| dispatch/monitor 中 `owner` 值不在 VALID_ROLES | Skill 路由失败 | owner 必须精确匹配 VALID_ROLES key | + +--- + +## Pattern 7: Error Handling Table + +Every command ends with a standardized error handling table. + +### Template + +```markdown +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| No tasks available | Idle, wait for coordinator assignment | +| Plan/Context file not found | Notify coordinator, request location | +| Sub-agent failure | Retry once, then fallback to direct execution | +| Max iterations exceeded | Report to coordinator, suggest intervention | +| Critical issue beyond scope | SendMessage fix_required to coordinator | +``` + +--- + +## Pattern 8: Session File Structure + +Roles that produce artifacts follow standard session directory patterns. + +### Convention + +``` +.workflow/.team-/{identifier}-{YYYY-MM-DD}/ +├── +├── manifest.json (if multiple outputs) +└── .task/ (if generating task files) + ├── TASK-001.json + └── TASK-002.json +``` + +--- + +# Section B: Collaboration Patterns + +> Complete specification: [collaboration-patterns.md](collaboration-patterns.md) + +## Collaboration Pattern Quick Reference + +Every collaboration pattern has these standard elements: + +| Element | Description | +|---------|-------------| +| **Entry Condition** | When to activate this pattern | +| **Workflow** | Step-by-step execution flow | +| **Convergence Criteria** | How the pattern terminates successfully | +| **Feedback Loop** | How information flows back to enable correction | +| **Timeout/Fallback** | What happens when the pattern doesn't converge | +| **Max Iterations** | Hard limit on cycles (where applicable) | + +### Pattern Selection Guide + +| Scenario | Recommended Pattern | Why | +|----------|-------------------|-----| +| Standard feature development | CP-1: Linear Pipeline | Well-defined sequential stages | +| Code review with fixes needed | CP-2: Review-Fix Cycle | Iterative improvement until quality met | +| Multi-angle analysis needed | CP-3: Fan-out/Fan-in | Parallel exploration, aggregated results | +| Critical decision (architecture, security) | CP-4: Consensus Gate | Multiple perspectives before committing | +| Agent stuck / self-repair failed | CP-5: Escalation Chain | Progressive expertise levels | +| Large feature (many files) | CP-6: Incremental Delivery | Validated increments reduce risk | +| Blocking issue stalls pipeline | CP-7: Swarming | All resources on one problem | +| Domain-specific expertise needed | CP-8: Consulting | Expert advice without role change | +| Design + Implementation parallel | CP-9: Dual-Track | Faster delivery with sync checkpoints | +| Post-completion learning | CP-10: Post-Mortem | Capture insights for future improvement | +| Multi-issue plan + execute overlap | CP-11: Beat Pipeline | Per-item dispatch eliminates stage idle time | + +--- + +## Pattern Summary Checklist + +When designing a new team command, verify: + +### Infrastructure Patterns +- [ ] YAML front matter with `group: team` +- [ ] Message bus section with `team_msg` logging +- [ ] CLI fallback section with `ccw team` CLI examples and parameter mapping +- [ ] Role-specific message types defined +- [ ] Task lifecycle: TaskList -> TaskGet -> TaskUpdate flow +- [ ] Unique task prefix (no collision with existing PLAN/IMPL/TEST/REVIEW, scan `team/**/*.md`) +- [ ] 5-phase execution structure +- [ ] Complexity-adaptive routing (if applicable) +- [ ] Coordinator spawn template integration +- [ ] Error handling table +- [ ] SendMessage communication to coordinator only +- [ ] Session file structure (if producing artifacts) + +### Collaboration Patterns +- [ ] At least one collaboration pattern selected +- [ ] Convergence criteria defined (max iterations / quality gate / timeout) +- [ ] Feedback loop implemented (how results flow back) +- [ ] Timeout/fallback behavior specified +- [ ] Pattern-specific message types registered +- [ ] Coordinator aware of pattern (can route messages accordingly) +- [ ] If using CP-11: intermediate artifact protocol defined (file path + format) +- [ ] If using CP-11: inline conflict check implemented (no heavy subagent for dependency detection) + +--- + +## Pattern 9: Parallel Subagent Orchestration + +Roles that need to perform complex, multi-perspective work can delegate to subagents or CLI tools rather than executing everything inline. This pattern defines three delegation modes and context management rules. + +### Delegation Modes + +#### Mode A: Subagent Fan-out + +Launch multiple Task agents in parallel for independent work streams. + +```javascript +// Launch 2-4 parallel agents for different perspectives +const agents = [ + Task({ + subagent_type: "cli-explore-agent", + run_in_background: false, + description: "Explore angle 1", + prompt: `Analyze from perspective 1: ${taskDescription}` + }), + Task({ + subagent_type: "cli-explore-agent", + run_in_background: false, + description: "Explore angle 2", + prompt: `Analyze from perspective 2: ${taskDescription}` + }) +] +// Aggregate results after all complete +``` + +**When to use**: Multi-angle exploration, parallel code analysis, independent subtask execution. + +#### Mode B: CLI Fan-out + +Launch multiple `ccw cli` calls for multi-perspective analysis. + +```javascript +// Parallel CLI calls for different analysis angles +Bash(`ccw cli -p "PURPOSE: Analyze from security angle..." --tool gemini --mode analysis`, { run_in_background: true }) +Bash(`ccw cli -p "PURPOSE: Analyze from performance angle..." --tool gemini --mode analysis`, { run_in_background: true }) +// Wait for all CLI results, then synthesize +``` + +**When to use**: Multi-dimensional code review, architecture analysis, security + performance audits. + +#### Mode C: Sequential Delegation + +Delegate a single heavy task to a specialized agent. + +```javascript +Task({ + subagent_type: "code-developer", + run_in_background: false, + description: "Implement complex feature", + prompt: `## Goal\n${plan.summary}\n\n## Tasks\n${taskDetails}` +}) +``` + +**When to use**: Complex implementation, test-fix cycles, large-scope refactoring. + +### Context Management Hierarchy + +| Level | Location | Context Size | Use Case | +|-------|----------|-------------|----------| +| Small | role.md inline | < 200 lines | Simple logic, direct execution | +| Medium | commands/*.md | 200-500 lines | Structured delegation with strategy | +| Large | Subagent prompt | Unlimited | Full autonomous execution | + +**Rule**: role.md Phase 1/5 are always inline (standardized). Phases 2-4 either inline (small) or delegate to commands (medium/large). + +### Command File Extraction Criteria + +Extract a phase into a command file when ANY of these conditions are met: + +1. **Subagent delegation**: Phase launches Task() agents +2. **CLI fan-out**: Phase runs parallel `ccw cli` calls +3. **Complex strategy**: Phase has >3 conditional branches +4. **Reusable logic**: Same logic used by multiple roles + +If none apply, keep the phase inline in role.md. + +### Relationship to Other Patterns + +- **Pattern 5 (Complexity-Adaptive)**: Pattern 9 provides the delegation mechanisms that Pattern 5 routes to. Low complexity → inline, Medium → CLI agent, High → Subagent fan-out. +- **CP-3 (Parallel Fan-out)**: Pattern 9 Mode A/B are the implementation mechanisms for CP-3 at the role level. +- **Pattern 4 (Five-Phase)**: Pattern 9 does NOT replace the 5-phase structure. It provides delegation options WITHIN phases 2-4. + +### Checklist + +- [ ] Delegation mode selected based on task characteristics +- [ ] Context management level appropriate (small/medium/large) +- [ ] Command files extracted only when criteria met +- [ ] Subagent prompts include mandatory first steps (read project config) +- [ ] CLI fan-out uses `--mode analysis` by default +- [ ] Results aggregated after parallel completion +- [ ] Error handling covers agent/CLI failure with fallback diff --git a/.claude/skills_lib/team-skill-designer-v2/templates/role-command-template.md b/.claude/skills_lib/team-skill-designer-v2/templates/role-command-template.md new file mode 100644 index 00000000..6dcba3b9 --- /dev/null +++ b/.claude/skills_lib/team-skill-designer-v2/templates/role-command-template.md @@ -0,0 +1,820 @@ +# Role Command Template + +Template for generating command files in `roles//commands/.md` (v3 style). + +## Purpose + +| Phase | Usage | +|-------|-------| +| Phase 0 | Read to understand command file structure | +| Phase 3 | Apply with role-specific content | + +## Style Rules + +Generated output follows v3 conventions: + +| Rule | Description | +|------|-------------| +| No JS pseudocode | All logic uses text + decision tables + flow symbols | +| Code blocks = tool calls only | Only Task(), Bash(), Read(), Grep() etc. | +| `` in output | Not `${variable}` in generated content | +| Decision tables | Strategy selection, error routing all use tables | +| Self-contained | Each command executable independently | + +> **Note**: The template itself uses `{{handlebars}}` for variable substitution during Phase 3 generation. The **generated output** must not contain `{{handlebars}}` or JS pseudocode. + +--- + +## Template + +```markdown +# Command: {{command_name}} + +## Purpose + +{{command_description}} + +## Phase 2: Context Loading + +| Input | Source | Required | +|-------|--------|----------| +{{#each context_inputs}} +| {{this.name}} | {{this.source}} | {{this.required}} | +{{/each}} + +## Phase 3: Core Work + +{{core_work_content}} + +## Phase 4: Validation + +{{validation_content}} + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +{{#each error_handlers}} +| {{this.scenario}} | {{this.resolution}} | +{{/each}} +``` + +--- + +## 7 Pre-built Command Patterns + +Each pattern below provides the complete v3-style structure. During Phase 3 generation, select the matching pattern and customize with team-specific content. + +### 1. explore.md (Multi-angle Exploration) + +**Maps to**: Orchestration roles, Phase 2 +**Delegation**: Subagent Fan-out + +```markdown +# Command: explore + +## Purpose + +Multi-angle codebase exploration using parallel exploration agents. Discovers patterns, dependencies, and architecture before planning. + +## Phase 2: Context Loading + +| Input | Source | Required | +|-------|--------|----------| +| Task description | TaskGet result | Yes | +| Project root | `git rev-parse --show-toplevel` | Yes | +| Existing explorations | /explorations/ | No | +| Wisdom | /wisdom/ | No | + +## Phase 3: Core Work + +### Angle Selection + +Determine exploration angles from task description: + +| Signal in Description | Angle | +|-----------------------|-------| +| architect, structure, design | architecture | +| pattern, convention, style | patterns | +| depend, import, module | dependencies | +| test, spec, coverage | testing | +| No signals matched | general + patterns (default) | + +### Execution Strategy + +| Angle Count | Strategy | +|-------------|----------| +| 1 angle | Single agent exploration | +| 2-4 angles | Parallel agents, one per angle | + +**Per-angle agent spawn**: + +\`\`\` +Task({ + subagent_type: "cli-explore-agent", + run_in_background: false, + description: "Explore: ", + prompt: "Explore the codebase from the perspective of . +Focus on: +Project root: + +Report findings as structured markdown with file references." +}) +\`\`\` + +### Result Aggregation + +After all agents complete: + +1. Merge key findings across all angles (deduplicate) +2. Collect relevant file paths (deduplicate) +3. Extract discovered patterns +4. Write aggregated results to `/explorations/.md` + +### Output Format + +\`\`\` +## Exploration Results + +### Angles Explored: [list] + +### Key Findings +- [finding with file:line reference] + +### Relevant Files +- [file path with relevance note] + +### Patterns Found +- [pattern name: description] +\`\`\` + +## Phase 4: Validation + +| Check | Method | Pass Criteria | +|-------|--------|---------------| +| All angles covered | Compare planned vs completed | All planned angles explored | +| Findings non-empty | Check result count | At least 1 finding per angle | +| File references valid | Verify referenced files exist | >= 80% files exist | + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Agent returns no results | Retry with broader search scope | +| Agent timeout | Use partial results, note incomplete angles | +| Project root not found | Fall back to current directory | +| Exploration cache exists | Load cached, skip re-exploration | +``` + +### 2. analyze.md (Multi-perspective Analysis) + +**Maps to**: Read-only analysis roles, Phase 3 +**Delegation**: CLI Fan-out + +```markdown +# Command: analyze + +## Purpose + +Multi-perspective code analysis using parallel CLI calls. Each perspective produces severity-ranked findings with file:line references. + +## Phase 2: Context Loading + +| Input | Source | Required | +|-------|--------|----------| +| Target files | `git diff --name-only HEAD~1` or `--cached` | Yes | +| Plan file | /plan/plan.json | No | +| Wisdom | /wisdom/ | No | + +**File discovery**: + +\`\`\` +Bash("git diff --name-only HEAD~1 2>/dev/null || git diff --name-only --cached") +\`\`\` + +## Phase 3: Core Work + +### Perspective Selection + +Determine analysis perspectives from task description: + +| Signal in Description | Perspective | +|-----------------------|-------------| +| security, auth, inject, xss | security | +| performance, speed, optimize, memory | performance | +| quality, clean, maintain, debt | code-quality | +| architect, pattern, structure | architecture | +| No signals matched | code-quality + architecture (default) | + +### Execution Strategy + +| Perspective Count | Strategy | +|-------------------|----------| +| 1 perspective | Single CLI call | +| 2-4 perspectives | Parallel CLI calls, one per perspective | + +**Per-perspective CLI call**: + +\`\`\` +Bash("ccw cli -p \"PURPOSE: Analyze code from perspective +TASK: Review changes in: +MODE: analysis +CONTEXT: @ +EXPECTED: Findings with severity, file:line references, remediation +CONSTRAINTS: Focus on \" --tool gemini --mode analysis", { run_in_background: true }) +\`\`\` + +### Finding Aggregation + +After all perspectives complete: + +1. Parse findings from each CLI response +2. Classify by severity: Critical / High / Medium / Low +3. Deduplicate across perspectives +4. Sort by severity then by file location + +### Output Format + +\`\`\` +## Analysis Results + +### Perspectives Analyzed: [list] + +### Findings by Severity +#### Critical +- [finding with file:line] +#### High +- [finding] +#### Medium +- [finding] +#### Low +- [finding] +\`\`\` + +## Phase 4: Validation + +| Check | Method | Pass Criteria | +|-------|--------|---------------| +| All perspectives covered | Compare planned vs completed | All perspectives analyzed | +| Findings have file refs | Check file:line format | >= 90% findings have references | +| No duplicate findings | Dedup check | No identical findings | + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| CLI tool unavailable | Fall back to secondary tool | +| CLI returns empty | Retry with broader scope | +| Too many findings | Prioritize critical/high, summarize medium/low | +| Target files empty | Report no changes to analyze | +``` + +### 3. implement.md (Code Implementation) + +**Maps to**: Code generation roles, Phase 3 +**Delegation**: Sequential Delegation + +```markdown +# Command: implement + +## Purpose + +Code implementation via subagent delegation with batch routing. Reads plan tasks and executes code changes, grouping by module for efficiency. + +## Phase 2: Context Loading + +| Input | Source | Required | +|-------|--------|----------| +| Plan file | /plan/plan.json | Yes | +| Task files | /plan/.task/.json | Yes | +| Wisdom conventions | /wisdom/conventions.md | No | + +**Loading steps**: + +1. Extract plan path from task description +2. Read plan.json -> get task list +3. Read each task file for detailed specs +4. Load coding conventions from wisdom + +## Phase 3: Core Work + +### Strategy Selection + +| Task Count | Strategy | Description | +|------------|----------|-------------| +| <= 2 | Direct | Inline Edit/Write by this role | +| 3-5 | Single agent | One code-developer subagent for all tasks | +| > 5 | Batch agent | Group by module, one agent per batch | + +### Direct Strategy (1-2 tasks) + +For each task, for each file in task spec: +1. Read existing file (if modifying) +2. Apply changes via Edit or Write +3. Verify file saved + +### Agent Strategy (3+ tasks) + +**Single agent spawn**: + +\`\`\` +Task({ + subagent_type: "code-developer", + run_in_background: false, + description: "Implement tasks", + prompt: "## Goal + + +## Tasks + + +Complete each task according to its convergence criteria." +}) +\`\`\` + +**Batch agent** (> 5 tasks): Group tasks by module/directory, spawn one agent per batch using the template above. + +### Output Tracking + +After implementation: +1. Get list of changed files: `Bash("git diff --name-only")` +2. Count completed vs total tasks +3. Record changed file paths for validation phase + +## Phase 4: Validation + +| Check | Method | Pass Criteria | +|-------|--------|---------------| +| Syntax clean | Language-specific check (tsc, python -c, etc.) | No syntax errors | +| All files created | Verify plan-specified files exist | All files present | +| Import resolution | Check for broken imports | All imports resolve | + +**Auto-fix on failure** (max 2 attempts): + +| Attempt | Action | +|---------|--------| +| 1 | Parse error, apply targeted fix | +| 2 | Delegate fix to code-developer subagent | +| Failed | Report remaining issues to coordinator | + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Plan file not found | Notify coordinator, request plan path | +| Agent fails on task | Retry once, then mark task as blocked | +| Syntax errors after impl | Attempt auto-fix, report if unresolved | +| File conflict | Check git status, resolve or report | +``` + +### 4. validate.md (Test-Fix Cycle) + +**Maps to**: Validation roles, Phase 3 +**Delegation**: Sequential Delegation + +```markdown +# Command: validate + +## Purpose + +Iterative test-fix cycle with max iteration control. Runs tests, identifies failures, delegates fixes, and re-validates until passing or max iterations reached. + +## Phase 2: Context Loading + +| Input | Source | Required | +|-------|--------|----------| +| Test command | Auto-detect from project config | Yes | +| Changed files | `git diff --name-only` | Yes | +| Plan file | /plan/plan.json | No | + +**Test command detection**: + +| Detection Signal | Test Command | +|-----------------|--------------| +| package.json has "test" script | `npm test` | +| pytest.ini or conftest.py exists | `pytest` | +| Makefile has "test" target | `make test` | +| go.mod exists | `go test ./...` | +| No signal detected | Notify coordinator | + +## Phase 3: Core Work + +### Test-Fix Cycle + +| Step | Action | Exit Condition | +|------|--------|----------------| +| 1. Run tests | `Bash(" 2>&1 || true")` | - | +| 2. Parse results | Extract pass/fail counts | - | +| 3. Check pass rate | Compare against threshold | Pass rate >= 95% -> exit SUCCESS | +| 4. Extract failures | Parse failing test names and errors | - | +| 5. Delegate fix | Spawn code-developer subagent | - | +| 6. Increment counter | iteration++ | iteration >= 5 -> exit MAX_REACHED | +| 7. Loop | Go to Step 1 | - | + +**Fix delegation**: + +\`\`\` +Task({ + subagent_type: "code-developer", + run_in_background: false, + description: "Fix test failures (iteration )", + prompt: "Test failures: + + +Fix the failing tests. Changed files: " +}) +\`\`\` + +### Outcome Routing + +| Outcome | Action | +|---------|--------| +| SUCCESS (pass rate >= 95%) | Proceed to Phase 4 | +| MAX_REACHED (5 iterations) | Report failures, mark for manual intervention | +| ENV_ERROR (test env broken) | Report environment issue to coordinator | + +## Phase 4: Validation + +| Metric | Source | Threshold | +|--------|--------|-----------| +| Pass rate | Final test run | >= 95% | +| Iterations used | Counter | Report count | +| Remaining failures | Last test output | List details | + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| No test command found | Notify coordinator | +| Max iterations exceeded | Report failures, suggest manual intervention | +| Test environment broken | Report environment issue | +| Flaky tests detected | Re-run once to confirm, exclude if consistently flaky | +``` + +### 5. review.md (Multi-dimensional Review) + +**Maps to**: Read-only analysis roles (reviewer type), Phase 3 +**Delegation**: CLI Fan-out + +```markdown +# Command: review + +## Purpose + +Multi-dimensional code review producing a verdict (BLOCK/CONDITIONAL/APPROVE) with categorized findings across quality, security, architecture, and requirements dimensions. + +## Phase 2: Context Loading + +| Input | Source | Required | +|-------|--------|----------| +| Plan file | /plan/plan.json | Yes | +| Git diff | `git diff HEAD~1` or `git diff --cached` | Yes | +| Modified files | From git diff --name-only | Yes | +| Test results | Tester output (if available) | No | +| Wisdom | /wisdom/ | No | + +## Phase 3: Core Work + +### Dimension Overview + +| Dimension | Focus | What to Detect | +|-----------|-------|----------------| +| Quality | Code correctness, type safety, clean code | Empty catch, ts-ignore, any type, console.log | +| Security | Vulnerability patterns, secret exposure | Hardcoded secrets, SQL injection, eval, XSS | +| Architecture | Module structure, coupling, file size | Circular deps, deep imports, large files | +| Requirements | Acceptance criteria coverage | Unmet criteria, missing error handling, missing tests | + +### Per-Dimension Detection + +For each dimension, scan modified files using pattern detection: + +**Example: Quality scan for console statements**: + +\`\`\` +Grep(pattern="console\\.(log|debug|info)", path="", output_mode="content", "-n"=true) +\`\`\` + +**Example: Architecture scan for deep imports**: + +\`\`\` +Grep(pattern="from\\s+['\"](\\.\\./){3,}", path="", output_mode="content", "-n"=true) +\`\`\` + +### Requirements Verification + +1. Read plan file -> extract acceptance criteria section +2. For each criterion -> extract keywords (4+ char meaningful words) +3. Search modified files for keyword matches +4. Score coverage: + +| Match Rate | Status | +|------------|--------| +| >= 70% | Met | +| 40-69% | Partial | +| < 40% | Unmet | + +### Verdict Routing + +| Verdict | Criteria | Action | +|---------|----------|--------| +| BLOCK | Any critical-severity issues found | Must fix before merge | +| CONDITIONAL | High or medium issues, no critical | Should address, can merge with tracking | +| APPROVE | Only low issues or none | Ready to merge | + +### Report Format + +\`\`\` +# Code Review Report + +**Verdict**: + +## Blocking Issues (if BLOCK) +- **** (:): + +## Review Dimensions + +### Quality Issues +**CRITICAL** () +- (:) + +### Security Issues +(same format per severity) + +### Architecture Issues +(same format per severity) + +### Requirements Issues +(same format per severity) + +## Recommendations +1. +\`\`\` + +## Phase 4: Validation + +| Field | Description | +|-------|-------------| +| Total issues | Sum across all dimensions and severities | +| Critical count | Must be 0 for APPROVE | +| Blocking issues | Listed explicitly in report header | +| Dimensions covered | Must be 4/4 | + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Plan file not found | Skip requirements dimension, note in report | +| Git diff empty | Report no changes to review | +| File read fails | Skip file, note in report | +| No modified files | Report empty review | +| Codex unavailable | Skip codex review dimension, report 3-dimension review | +``` + +### 6. dispatch.md (Task Distribution) + +**Maps to**: Coordinator role, Phase 3 +**Delegation**: Direct (coordinator acts directly) + +```markdown +# Command: dispatch + +## Purpose + +Task chain creation with dependency management. Creates all pipeline tasks with correct blockedBy relationships and role-based ownership. + +## Phase 2: Context Loading + +| Input | Source | Required | +|-------|--------|----------| +| Pipeline definition | SKILL.md Pipeline Definitions | Yes | +| Task metadata | SKILL.md Task Metadata Registry | Yes | +| Session folder | From Phase 2 initialization | Yes | +| Mode | From Phase 1 requirements | Yes | + +## Phase 3: Core Work + +### Pipeline Selection + +Select pipeline based on mode: + +| Mode | Pipeline | Task Count | +|------|----------|------------| +{{#each pipeline_modes}} +| {{this.mode}} | {{this.pipeline}} | {{this.task_count}} | +{{/each}} + +### Task Creation Flow + +For each task in the selected pipeline (in dependency order): + +1. **Create task**: + +\`\`\` +TaskCreate({ + subject: ": ", + description: "\n\nSession: ", + activeForm: " in progress" +}) +\`\`\` + +2. **Set owner and dependencies**: + +\`\`\` +TaskUpdate({ + taskId: , + owner: "", + addBlockedBy: [] +}) +\`\`\` + +3. Record created task ID for downstream dependency references + +### Dependency Mapping + +Follow SKILL.md Task Metadata Registry for: +- Task ID naming convention +- Role assignment (owner field) +- Dependencies (blockedBy relationships) +- Task description with session folder reference + +### Parallel Task Handling + +| Condition | Action | +|-----------|--------| +| Tasks share same blockedBy and no mutual dependency | Create both, they run in parallel | +| N parallel tasks for same role | Use instance-specific owner: `-1`, `-2` | + +## Phase 4: Validation + +| Check | Method | Pass Criteria | +|-------|--------|---------------| +| All tasks created | Compare pipeline spec vs TaskList | Count matches | +| Dependencies correct | Verify blockedBy for each task | All deps point to valid tasks | +| Owners assigned | Check owner field | Every task has valid role owner | +| No orphan tasks | Verify all tasks reachable from pipeline start | No disconnected tasks | + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Task creation fails | Retry, then report to user | +| Dependency cycle detected | Flatten dependencies, warn | +| Role not spawned yet | Queue task, spawn role first | +| Task prefix conflict | Log warning, proceed | +``` + +### 7. monitor.md (Message-Driven Coordination) + +**Maps to**: Coordinator role, Phase 4 +**Delegation**: Message-Driven (no polling) + +```markdown +# Command: monitor + +## Purpose + +Message-driven coordination. Team members (spawned in Phase 2) execute tasks autonomously and report via SendMessage. Coordinator receives messages and routes next actions. + +## Phase 2: Context Loading + +| Input | Source | Required | +|-------|--------|----------| +| Pipeline state | TaskList() | Yes | +| Session file | /team-session.json | Yes | +| Team config | Team member list | Yes | + +## Phase 3: Core Work + +### Design Principles + +| Principle | Description | +|-----------|-------------| +| No re-spawning | Team members already spawned in Phase 2 -- do NOT spawn again here | +| No polling loops | No `while` + `sleep` + status check (wastes API turns) | +| Event-driven | Worker SendMessage is the trigger signal | +| One beat per wake | Coordinator processes one event then STOPs | + +### Entry Handlers + +When coordinator wakes, route based on Entry Router detection: + +| Handler | Trigger | Actions | +|---------|---------|---------| +| handleCallback | Worker `[role-name]` message received | 1. Log received message 2. Check task status 3. Route to next action | +| handleCheck | User says "check"/"status" | 1. Load TaskList 2. Output status graph 3. STOP (no advancement) | +| handleResume | User says "resume"/"continue" | 1. Load TaskList 2. Find ready tasks 3. Spawn/notify workers 4. STOP | + +### handleCallback Flow + +1. Identify sender role from message tag `[role-name]` +2. Log received message via team_msg +3. Load TaskList for current state +4. Route based on message content: + +| Message Content | Action | +|-----------------|--------| +| Contains "fix_required" or "error" | Assess severity -> escalate to user if critical | +| Normal completion | Check pipeline progress (see below) | + +5. Check pipeline progress: + +| State | Condition | Action | +|-------|-----------|--------| +| All done | completed count >= total pipeline tasks | -> Phase 5 | +| Tasks unblocked | pending tasks with empty blockedBy | Notify/spawn workers for unblocked tasks | +| Checkpoint | Pipeline at spec->impl transition | Pause, ask user to `resume` | +| Stalled | No ready + no running + has pending | Report blocking point | + +6. Output status summary -> STOP + +### handleCheck Flow (Status Only) + +1. Load all tasks via TaskList +2. Build status overview: + +\`\`\` +Pipeline Status: + Completed: / + In Progress: + Pending: + Blocked: +\`\`\` + +3. STOP (no pipeline advancement) + +### handleResume Flow + +1. Load TaskList +2. Find tasks with: status=pending, blockedBy all resolved +3. For each ready task: + +| Condition | Action | +|-----------|--------| +| Worker already spawned and idle | SendMessage to worker: "Task unblocked, please proceed" | +| Worker not spawned | Spawn worker using SKILL.md Spawn Template | + +4. Output status summary -> STOP + +### Status Graph Format + +\`\`\` +Pipeline Progress: / + + [DONE] TASK-001 (role) - description + [DONE] TASK-002 (role) - description + [>> ] TASK-003 (role) - description <- in_progress + [ ] TASK-004 (role) - description <- blocked by TASK-003 + ... +\`\`\` + +## Phase 4: Validation + +| Check | Method | Pass Criteria | +|-------|--------|---------------| +| Message routed | Verify handler executed | Handler completed without error | +| State consistent | TaskList reflects actions taken | Tasks updated correctly | +| No orphan workers | All spawned workers have assigned tasks | No idle workers without tasks | + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Teammate reports error | Assess severity -> retry SendMessage or escalate to user | +| Task stuck (no callback) | Send follow-up to teammate, 2x -> suggest respawn | +| Critical issue beyond scope | AskUserQuestion: retry/skip/terminate | +| Session file corrupted | Rebuild state from TaskList | +``` + +--- + +## Variable Reference + +| Variable | Source | Description | +|----------|--------|-------------| +| `{{command_name}}` | Command identifier | e.g., "explore", "analyze" | +| `{{command_description}}` | One-line description | What this command does | +| `{{context_inputs}}` | Array of {name, source, required} | Context loading table rows | +| `{{core_work_content}}` | Generated from pattern | Phase 3 content | +| `{{validation_content}}` | Generated from pattern | Phase 4 content | +| `{{error_handlers}}` | Array of {scenario, resolution} | Error handling table rows | +| `{{pipeline_modes}}` | config.pipeline_modes | Array of {mode, pipeline, task_count} for dispatch | + +## Self-Containment Rules + +1. **No cross-command references**: Each command.md must be executable independently +2. **Include all context inputs**: List all required context (files, configs) in Phase 2 +3. **Complete error handling**: Every command handles its own failures +4. **Explicit output format**: Define what the command produces +5. **Strategy in decision tables**: All routing logic in tables, not code + +## Key Differences from v1 + +| Aspect | v1 (old) | v2 (this template) | +|--------|----------|---------------------| +| Strategy logic | JS `if/else` + regex matching | Decision tables | +| Execution steps | JS code blocks (pseudocode) | Step lists + actual tool call templates | +| Result processing | JS object construction | Text aggregation description | +| Output format | Embedded in JS template literals | Standalone markdown format block | +| Error handling | JS try/catch with fallbacks | Decision table with clear routing | +| Context prep | JS variable assignments | Phase 2 table + loading steps | +| Monitor design | JS while loop + polling | Event-driven handlers + STOP pattern | diff --git a/.claude/skills_lib/team-skill-designer-v2/templates/role-template.md b/.claude/skills_lib/team-skill-designer-v2/templates/role-template.md new file mode 100644 index 00000000..ad353272 --- /dev/null +++ b/.claude/skills_lib/team-skill-designer-v2/templates/role-template.md @@ -0,0 +1,586 @@ +# Role File Template + +Template for generating per-role execution detail files in `roles//role.md` (v3 style). + +## Purpose + +| Phase | Usage | +|-------|-------| +| Phase 0 | Read to understand role file structure | +| Phase 3 | Apply with role-specific content | + +## Style Rules + +Generated output follows v3 conventions: + +| Rule | Description | +|------|-------------| +| Phase 1/5 shared | Reference "See SKILL.md Shared Infrastructure" instead of inline code | +| No JS pseudocode | Message Bus, Task Lifecycle all use text + tool call templates | +| Decision tables | All branching logic uses `| Condition | Action |` tables | +| Code blocks = tool calls only | Only actual executable calls (Read(), TaskList(), SendMessage(), etc.) | +| `` in output | Not `${variable}` in generated content | +| Phase 2-4 only | Role files define Phase 2-4 role-specific logic | + +> **Note**: The template itself uses `{{handlebars}}` for variable substitution during Phase 3 generation. The **generated output** must not contain `{{handlebars}}` or JS pseudocode. + +--- + +## Template + +### Worker Role Template + +```markdown +# {{display_name}} Role + +{{role_description}} + +## Identity + +- **Name**: `{{role_name}}` | **Tag**: `[{{role_name}}]` +- **Task Prefix**: `{{task_prefix}}-*` +- **Responsibility**: {{responsibility_type}} + +## Boundaries + +### MUST +- Only process `{{task_prefix}}-*` prefixed tasks +- All output (SendMessage, team_msg, logs) must carry `[{{role_name}}]` identifier +- Only communicate with coordinator via SendMessage +- Work strictly within {{responsibility_type}} responsibility scope +{{#each must_rules}} +- {{this}} +{{/each}} + +### MUST NOT +- Execute work outside this role's responsibility scope +- Communicate directly with other worker roles (must go through coordinator) +- Create tasks for other roles (TaskCreate is coordinator-exclusive) +- Modify files or resources outside this role's responsibility +- Omit `[{{role_name}}]` identifier in any output +{{#each must_not_rules}} +- {{this}} +{{/each}} + +--- + +## Toolbox + +### Available Commands + +| Command | File | Phase | Description | +|---------|------|-------|-------------| +{{#each commands}} +| `{{this.name}}` | [commands/{{this.name}}.md](commands/{{this.name}}.md) | Phase {{this.phase}} | {{this.description}} | +{{/each}} + +{{#if has_no_commands}} +> No command files -- all phases execute inline. +{{/if}} + +### Tool Capabilities + +| Tool | Type | Used By | Purpose | +|------|------|---------|---------| +{{#each tool_capabilities}} +| `{{this.tool}}` | {{this.type}} | {{this.used_by}} | {{this.purpose}} | +{{/each}} + +--- + +## Message Types + +| Type | Direction | Trigger | Description | +|------|-----------|---------|-------------| +{{#each message_types}} +| `{{this.type}}` | {{this.direction}} | {{this.trigger}} | {{this.description}} | +{{/each}} + +## Message Bus + +Before every SendMessage, log via `mcp__ccw-tools__team_msg`: + +\`\`\` +mcp__ccw-tools__team_msg({ + operation: "log", + team: , + from: "{{role_name}}", + to: "coordinator", + type: , + summary: "[{{role_name}}] complete: ", + ref: +}) +\`\`\` + +**CLI fallback** (when MCP unavailable): + +\`\`\` +Bash("ccw team log --team --from {{role_name}} --to coordinator --type --summary \"[{{role_name}}] complete\" --ref --json") +\`\`\` + +--- + +## Execution (5-Phase) + +### Phase 1: Task Discovery + +> See SKILL.md Shared Infrastructure -> Worker Phase 1: Task Discovery + +Standard task discovery flow: TaskList -> filter by prefix `{{task_prefix}}-*` + owner match + pending + unblocked -> TaskGet -> TaskUpdate in_progress. + +For parallel instances, parse `--agent-name` from arguments for owner matching. Falls back to `{{role_name}}` for single-instance roles. + +### Phase 2: {{phase2_name}} + +{{phase2_content}} + +### Phase 3: {{phase3_name}} + +{{phase3_content}} + +### Phase 4: {{phase4_name}} + +{{phase4_content}} + +### Phase 5: Report to Coordinator + +> See SKILL.md Shared Infrastructure -> Worker Phase 5: Report + +Standard report flow: team_msg log -> SendMessage with `[{{role_name}}]` prefix -> TaskUpdate completed -> Loop to Phase 1 for next task. + +--- + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| No {{task_prefix}}-* tasks available | Idle, wait for coordinator assignment | +| Context/Plan file not found | Notify coordinator, request location | +{{#if has_commands}} +| Command file not found | Fall back to inline execution | +{{/if}} +{{#each additional_error_handlers}} +| {{this.scenario}} | {{this.resolution}} | +{{/each}} +| Critical issue beyond scope | SendMessage fix_required to coordinator | +| Unexpected error | Log error via team_msg, report to coordinator | +``` + +--- + +### Coordinator Role Template + +The coordinator role is special and always generated. Its template differs from worker roles: + +```markdown +# Coordinator Role + +Orchestrate the {{team_display_name}} workflow: team creation, task dispatching, progress monitoring, session state. + +## Identity + +- **Name**: `coordinator` | **Tag**: `[coordinator]` +- **Responsibility**: Parse requirements -> Create team -> Dispatch tasks -> Monitor progress -> Report results + +## Boundaries + +### MUST +- Parse user requirements and clarify ambiguous inputs via AskUserQuestion +- Create team and spawn worker subagents in background +- Dispatch tasks with proper dependency chains (see SKILL.md Task Metadata Registry) +- Monitor progress via worker callbacks and route messages +- Maintain session state persistence +{{#each coordinator_must_rules}} +- {{this}} +{{/each}} + +### MUST NOT +- Execute {{team_purpose}} work directly (delegate to workers) +- Modify task outputs (workers own their deliverables) +- Call implementation subagents directly +- Skip dependency validation when creating task chains +{{#each coordinator_must_not_rules}} +- {{this}} +{{/each}} + +> **Core principle**: coordinator is the orchestrator, not the executor. All actual work must be delegated to worker roles via TaskCreate. + +--- + +## Entry Router + +When coordinator is invoked, first detect the invocation type: + +| Detection | Condition | Handler | +|-----------|-----------|---------| +| Worker callback | Message contains `[role-name]` tag from a known worker role | -> handleCallback: auto-advance pipeline | +| Status check | Arguments contain "check" or "status" | -> handleCheck: output execution graph, no advancement | +| Manual resume | Arguments contain "resume" or "continue" | -> handleResume: check worker states, advance pipeline | +| New session | None of the above | -> Phase 0 (Session Resume Check) | + +For callback/check/resume: load `commands/monitor.md` and execute the appropriate handler, then STOP. + +--- + +## Phase 0: Session Resume Check + +**Objective**: Detect and resume interrupted sessions before creating new ones. + +**Workflow**: +1. Scan session directory for sessions with status "active" or "paused" +2. No sessions found -> proceed to Phase 1 +3. Single session found -> resume it (-> Session Reconciliation) +4. Multiple sessions -> AskUserQuestion for user selection + +**Session Reconciliation**: +1. Audit TaskList -> get real status of all tasks +2. Reconcile: session state <-> TaskList status (bidirectional sync) +3. Reset any in_progress tasks -> pending (they were interrupted) +4. Determine remaining pipeline from reconciled state +5. Rebuild team if disbanded (TeamCreate + spawn needed workers only) +6. Create missing tasks with correct blockedBy dependencies +7. Verify dependency chain integrity +8. Update session file with reconciled state +9. Kick first executable task's worker -> Phase 4 + +--- + +## Phase 1: Requirement Clarification + +**Objective**: Parse user input and gather execution parameters. + +**Workflow**: + +1. **Parse arguments** for explicit settings: mode, scope, focus areas +2. **Ask for missing parameters** via AskUserQuestion: + +{{phase1_questions}} + +3. **Store requirements**: mode, scope, focus, constraints + +**Success**: All parameters captured, mode finalized. + +--- + +## Phase 2: Create Team + Initialize Session + +**Objective**: Initialize team, session file, and wisdom directory. + +**Workflow**: +1. Generate session ID +2. Create session folder +3. Call TeamCreate with team name +4. Initialize wisdom directory (learnings.md, decisions.md, conventions.md, issues.md) +5. Write session file with: session_id, mode, scope, status="active" +6. Spawn worker roles (see SKILL.md Coordinator Spawn Template) + +**Success**: Team created, session file written, wisdom initialized, workers spawned. + +--- + +## Phase 3: Create Task Chain + +**Objective**: Dispatch tasks based on mode with proper dependencies. + +{{#if has_dispatch_command}} +Delegate to `commands/dispatch.md` which creates the full task chain: +1. Reads SKILL.md Task Metadata Registry for task definitions +2. Creates tasks via TaskCreate with correct blockedBy +3. Assigns owner based on role mapping +4. Includes `Session: ` in every task description +{{else}} +{{phase3_dispatch_content}} +{{/if}} + +--- + +## Phase 4: Spawn-and-Stop + +**Objective**: Spawn first batch of ready workers in background, then STOP. + +**Design**: Spawn-and-Stop + Callback pattern. +- Spawn workers with `Task(run_in_background: true)` -> immediately return +- Worker completes -> SendMessage callback -> auto-advance +- User can use "check" / "resume" to manually advance +- Coordinator does one operation per invocation, then STOPS + +**Workflow**: +{{#if has_monitor_command}} +1. Load `commands/monitor.md` +{{/if}} +2. Find tasks with: status=pending, blockedBy all resolved, owner assigned +3. For each ready task -> spawn worker (see SKILL.md Spawn Template) +4. Output status summary +5. STOP + +**Pipeline advancement** driven by three wake sources: +- Worker callback (automatic) -> Entry Router -> handleCallback +- User "check" -> handleCheck (status only) +- User "resume" -> handleResume (advance) + +--- + +## Phase 5: Report + Next Steps + +**Objective**: Completion report and follow-up options. + +**Workflow**: +1. Load session state -> count completed tasks, duration +2. List deliverables with output paths +3. Update session status -> "completed" +4. Offer next steps to user + +--- + +## Error Handling + +| Error | Resolution | +|-------|------------| +| Task timeout | Log, mark failed, ask user to retry or skip | +| Worker crash | Respawn worker, reassign task | +| Dependency cycle | Detect, report to user, halt | +| Invalid mode | Reject with error, ask to clarify | +| Session corruption | Attempt recovery, fallback to manual reconciliation | +{{#each coordinator_error_handlers}} +| {{this.error}} | {{this.resolution}} | +{{/each}} +``` + +--- + +## Phase 2-4 Content by Responsibility Type + +The following sections provide Phase 2-4 content templates based on `responsibility_type`. During Phase 3 generation, select the matching section and fill into `{{phase2_content}}`, `{{phase3_content}}`, `{{phase4_content}}`. + +### Read-only Analysis + +**Phase 2: Context Loading** + +``` +| Input | Source | Required | +|-------|--------|----------| +| Plan file | /plan/plan.json | Yes | +| Git diff | `git diff HEAD~1` or `git diff --cached` | Yes | +| Modified files | From git diff --name-only | Yes | +| Wisdom | /wisdom/ | No | + +**Loading steps**: + +1. Extract session path from task description +2. Read plan file for criteria reference +3. Get changed files list + +\`\`\` +Bash("git diff --name-only HEAD~1 2>/dev/null || git diff --name-only --cached") +\`\`\` + +4. Read file contents for analysis (limit to 20 files) +5. Load wisdom files if available +``` + +**Phase 3: Analysis Execution** + +``` +Delegate to `commands/.md` if available, otherwise execute inline. + +Analysis strategy selection: + +| Condition | Strategy | +|-----------|----------| +| Single dimension analysis | Direct inline scan | +| Multi-dimension analysis | Per-dimension sequential scan | +| Deep analysis needed | CLI Fan-out to external tool | + +For each dimension, scan modified files for patterns. Record findings with severity levels. +``` + +**Phase 4: Finding Summary** + +``` +Classify findings by severity: + +| Severity | Criteria | +|----------|----------| +| Critical | Must fix before merge | +| High | Should fix, may merge with tracking | +| Medium | Recommended improvement | +| Low | Informational, optional | + +Generate structured report with file:line references and remediation suggestions. +``` + +### Code Generation + +**Phase 2: Task & Plan Loading** + +``` +**Loading steps**: + +1. Extract session path from task description +2. Read plan file -> extract task list and acceptance criteria +3. Read individual task files from `.task/` directory +4. Load wisdom files for conventions and patterns + +Fail-safe: If plan file not found -> SendMessage to coordinator requesting location. +``` + +**Phase 3: Code Implementation** + +``` +Implementation strategy selection: + +| Task Count | Complexity | Strategy | +|------------|------------|----------| +| <= 2 tasks | Low | Direct: inline Edit/Write | +| 3-5 tasks | Medium | Single agent: one code-developer for all | +| > 5 tasks | High | Batch agent: group by module, one agent per batch | + +{{#if phase3_command}} +Delegate to `commands/{{phase3_command}}.md`. +{{else}} +Execute inline based on strategy selection above. +{{/if}} +``` + +**Phase 4: Self-Validation** + +``` +Validation checks: + +| Check | Method | Pass Criteria | +|-------|--------|---------------| +| Syntax | `Bash("tsc --noEmit 2>&1 || true")` or equivalent | No errors | +| File existence | Verify all planned files exist | All files present | +| Import resolution | Check no broken imports | All imports resolve | + +If validation fails -> attempt auto-fix (max 2 attempts) -> report remaining issues. +``` + +### Orchestration + +**Phase 2: Context & Complexity Assessment** + +``` +Complexity assessment: + +| Signal | Weight | Keywords | +|--------|--------|----------| +| Structural change | +2 | refactor, architect, restructure, module, system | +| Cross-cutting | +2 | multiple, across, cross | +| Integration | +1 | integrate, api, database | +| Non-functional | +1 | security, performance | + +| Score | Complexity | Approach | +|-------|------------|----------| +| >= 4 | High | Multi-stage with sub-orchestration | +| 2-3 | Medium | Standard pipeline | +| 0-1 | Low | Simplified flow | +``` + +**Phase 3: Orchestrated Execution** + +``` +Launch execution based on complexity: + +| Complexity | Execution Pattern | +|------------|-------------------| +| High | Parallel sub-agents + synchronization barriers | +| Medium | Sequential stages with dependency tracking | +| Low | Direct delegation to single worker | +``` + +**Phase 4: Result Aggregation** + +``` +Merge and summarize sub-agent results: + +1. Collect all sub-agent outputs +2. Deduplicate findings across agents +3. Prioritize by severity/importance +4. Generate consolidated summary +``` + +### Validation + +**Phase 2: Environment Detection** + +``` +**Detection steps**: + +1. Get changed files from git diff +2. Detect test framework from project files + +| Detection | Method | +|-----------|--------| +| Changed files | `Bash("git diff --name-only HEAD~1 2>/dev/null || git diff --name-only --cached")` | +| Test command | Check package.json scripts, pytest.ini, Makefile | +| Coverage tool | Check for nyc, coverage.py, jest --coverage config | +``` + +**Phase 3: Execution & Fix Cycle** + +``` +Iterative test-fix cycle: + +| Step | Action | +|------|--------| +| 1 | Run test command | +| 2 | Parse results -> check pass rate | +| 3 | Pass rate >= 95% -> exit loop (success) | +| 4 | Extract failing test details | +| 5 | Delegate fix to code-developer subagent | +| 6 | Increment iteration counter | +| 7 | iteration >= MAX (5) -> exit loop (report failures) | +| 8 | Go to Step 1 | +``` + +**Phase 4: Result Analysis** + +``` +Analyze test outcomes: + +| Metric | Source | Threshold | +|--------|--------|-----------| +| Pass rate | Test output parser | >= 95% | +| Coverage | Coverage tool output | >= 80% | +| Flaky tests | Compare runs | 0 flaky | + +Generate test report with: pass/fail counts, coverage data, failure details, fix attempts made. +``` + +--- + +## Variable Reference + +| Variable | Source | Description | +|----------|--------|-------------| +| `{{role_name}}` | config.role_name | Role identifier | +| `{{display_name}}` | config.display_name | Human-readable role name | +| `{{task_prefix}}` | config.task_prefix | UPPERCASE task prefix | +| `{{responsibility_type}}` | config.responsibility_type | Role type (read-only analysis, code generation, orchestration, validation) | +| `{{role_description}}` | config.role_description | One-line role description | +| `{{phase2_name}}` | patterns.phase_structure.phase2 | Phase 2 label | +| `{{phase3_name}}` | patterns.phase_structure.phase3 | Phase 3 label | +| `{{phase4_name}}` | patterns.phase_structure.phase4 | Phase 4 label | +| `{{phase2_content}}` | Generated from responsibility template | Phase 2 text content | +| `{{phase3_content}}` | Generated from responsibility template | Phase 3 text content | +| `{{phase4_content}}` | Generated from responsibility template | Phase 4 text content | +| `{{message_types}}` | config.message_types | Array of message type definitions | +| `{{commands}}` | config.commands | Array of command definitions | +| `{{has_commands}}` | config.commands.length > 0 | Boolean: has extracted commands | +| `{{has_no_commands}}` | config.commands.length === 0 | Boolean: all phases inline | +| `{{tool_capabilities}}` | config.tool_capabilities | Array of tool/subagent/CLI capabilities | +| `{{must_rules}}` | config.must_rules | Additional MUST rules | +| `{{must_not_rules}}` | config.must_not_rules | Additional MUST NOT rules | +| `{{additional_error_handlers}}` | config.additional_error_handlers | Array of {scenario, resolution} | + +## Key Differences from v1 + +| Aspect | v1 (old) | v2 (this template) | +|--------|----------|---------------------| +| Phase 1/5 | Inline JS code | Reference SKILL.md Shared Infrastructure | +| Message Bus | JS function call pseudocode | Text description + actual tool call template | +| Task Lifecycle | JS filter/map code | Step list description | +| Phase 2-4 | JS code per responsibility_type | Text + decision tables per responsibility_type | +| Command delegation | JS try/catch block | Text "Delegate to commands/xxx.md" | +| Coordinator template | JS spawn loops | Text phases with decision tables | diff --git a/.claude/skills_lib/team-skill-designer-v2/templates/skill-router-template.md b/.claude/skills_lib/team-skill-designer-v2/templates/skill-router-template.md new file mode 100644 index 00000000..279f8e1f --- /dev/null +++ b/.claude/skills_lib/team-skill-designer-v2/templates/skill-router-template.md @@ -0,0 +1,360 @@ +# Skill Router Template + +Template for the generated SKILL.md with role-based routing (v3 style). + +## Purpose + +| Phase | Usage | +|-------|-------| +| Phase 0 | Read to understand generated SKILL.md structure | +| Phase 3 | Apply with team-specific content | + +## Style Rules + +Generated output follows v3 conventions: + +| Rule | Description | +|------|-------------| +| No pseudocode | Flow uses text + decision tables + flow symbols | +| Code blocks = tool calls only | Only Task(), TaskCreate(), Bash(), Read() etc. | +| `` in output | Not `${variable}` or `{{handlebars}}` in generated content | +| Decision tables | All branching logic uses `| Condition | Action |` tables | +| Cadence Control | Beat diagram + checkpoint definitions | +| Compact Protection | Phase Reference with Compact column | + +> **Note**: The template itself uses `{{handlebars}}` for variable substitution during Phase 3 generation. The **generated output** must not contain `{{handlebars}}` or JS pseudocode. + +--- + +## Template + +```markdown +--- +name: team-{{team_name}} +description: Unified team skill for {{team_name}}. All roles invoke this skill with --role arg for role-specific execution. Triggers on "team {{team_name}}". +allowed-tools: {{all_roles_tools_union}} +--- + +# Team {{team_display_name}} + +Unified team skill: {{team_purpose}}. All team members invoke with `--role=xxx` to route to role-specific execution. + +## Architecture + +\`\`\` +{{architecture_diagram}} +\`\`\` + +## Role Router + +### Input Parsing + +Parse `$ARGUMENTS` to extract `--role`. If absent -> Orchestration Mode (auto route to coordinator). + +### Role Registry + +| Role | File | Task Prefix | Type | Compact | +|------|------|-------------|------|---------| +{{#each roles}} +| {{this.name}} | [roles/{{this.name}}/role.md](roles/{{this.name}}/role.md) | {{this.task_prefix}}-* | {{this.type}} | Compress after must re-read | +{{/each}} + +> **COMPACT PROTECTION**: Role files are execution documents, not reference material. When context compression occurs and role instructions are reduced to summaries, **you MUST immediately `Read` the corresponding role.md to reload before continuing execution**. Do not execute any Phase based on summaries. + +### Dispatch + +1. Extract `--role` from arguments +2. If no `--role` -> route to coordinator (Orchestration Mode) +3. Look up role in registry -> Read the role file -> Execute its phases + +### Orchestration Mode + +When invoked without `--role`, coordinator auto-starts. User just provides task description. + +**Invocation**: `Skill(skill="team-{{team_name}}", args="")` + +**Lifecycle**: +\`\`\` +User provides task description + -> coordinator Phase 1-3: Requirement clarification -> TeamCreate -> Create task chain + -> coordinator Phase 4: spawn first batch workers (background) -> STOP + -> Worker executes -> SendMessage callback -> coordinator advances next step + -> Loop until pipeline complete -> Phase 5 report +\`\`\` + +**User Commands** (wake paused coordinator): + +| Command | Action | +|---------|--------| +| `check` / `status` | Output execution status graph, no advancement | +| `resume` / `continue` | Check worker states, advance next step | + +--- + +## Shared Infrastructure + +The following templates apply to all worker roles. Each role.md only needs to write **Phase 2-4** role-specific logic. + +### Worker Phase 1: Task Discovery (shared by all workers) + +Every worker executes the same task discovery flow on startup: + +1. Call `TaskList()` to get all tasks +2. Filter: subject matches this role's prefix + owner is this role + status is pending + blockedBy is empty +3. No tasks -> idle wait +4. Has tasks -> `TaskGet` for details -> `TaskUpdate` mark in_progress + +**Resume Artifact Check** (prevent duplicate output after resume): +- Check whether this task's output artifact already exists +- Artifact complete -> skip to Phase 5 report completion +- Artifact incomplete or missing -> normal Phase 2-4 execution + +### Worker Phase 5: Report (shared by all workers) + +Standard reporting flow after task completion: + +1. **Message Bus**: Call `mcp__ccw-tools__team_msg` to log message + - Parameters: operation="log", team=, from=, to="coordinator", type=, summary="[] ", ref= + - **CLI fallback**: When MCP unavailable -> `ccw team log --team --from --to coordinator --type --summary "[] ..." --json` +2. **SendMessage**: Send result to coordinator (content and summary both prefixed with `[]`) +3. **TaskUpdate**: Mark task completed +4. **Loop**: Return to Phase 1 to check next task + +### Wisdom Accumulation (all roles) + +Cross-task knowledge accumulation. Coordinator creates `wisdom/` directory at session initialization. + +**Directory**: +\`\`\` +/wisdom/ ++-- learnings.md # Patterns and insights ++-- decisions.md # Architecture and design decisions ++-- conventions.md # Codebase conventions ++-- issues.md # Known risks and issues +\`\`\` + +**Worker Load** (Phase 2): Extract `Session: ` from task description, read wisdom directory files. +**Worker Contribute** (Phase 4/5): Write this task's discoveries to corresponding wisdom files. + +### Role Isolation Rules + +| Allowed | Forbidden | +|---------|-----------| +| Process tasks with own prefix | Process tasks with other role prefixes | +| SendMessage to coordinator | Communicate directly with other workers | +| Use tools declared in Toolbox | Create tasks for other roles | +| Delegate to commands/ files | Modify resources outside own responsibility | + +Coordinator additional restrictions: Do not write/modify code directly, do not call implementation subagents, do not execute analysis/test/review directly. + +--- + +## Pipeline Definitions + +### Pipeline Diagram + +\`\`\` +{{pipeline_diagram}} +\`\`\` + +### Cadence Control + +**Beat model**: Event-driven, each beat = coordinator wake -> process -> spawn -> STOP. + +\`\`\` +Beat Cycle (single beat) +======================================================== + Event Coordinator Workers +-------------------------------------------------------- + callback/resume --> +- handleCallback -+ + | mark completed | + | check pipeline | + +- handleSpawnNext -+ + | find ready tasks | + | spawn workers ---+--> [Worker A] Phase 1-5 + | (parallel OK) --+--> [Worker B] Phase 1-5 + +- STOP (idle) -----+ | + | + callback <-----------------------------------------+ + (next beat) SendMessage + TaskUpdate(completed) +======================================================== +\`\`\` + +{{cadence_beat_view}} + +**Checkpoints**: + +{{checkpoint_table}} + +**Stall Detection** (coordinator `handleCheck` executes): + +| Check | Condition | Resolution | +|-------|-----------|------------| +| Worker no response | in_progress task no callback | Report waiting task list, suggest user `resume` | +| Pipeline deadlock | no ready + no running + has pending | Check blockedBy dependency chain, report blocking point | +{{#if has_gc_loop}} +| GC loop exceeded | iteration > max_rounds | Terminate loop, output latest report | +{{/if}} + +### Task Metadata Registry + +| Task ID | Role | Phase | Dependencies | Description | +|---------|------|-------|-------------|-------------| +{{#each task_metadata}} +| {{this.task_id}} | {{this.role}} | {{this.phase}} | {{this.dependencies}} | {{this.description}} | +{{/each}} + +## Coordinator Spawn Template + +When coordinator spawns workers, use background mode (Spawn-and-Stop): + +\`\`\` +Task({ + subagent_type: "general-purpose", + description: "Spawn worker", + team_name: , + name: "", + run_in_background: true, + prompt: `You are team "" . + +## Primary Directive +All your work must be executed through Skill to load role definition: +Skill(skill="team-{{team_name}}", args="--role=") + +Current requirement: +Session: + +## Role Guidelines +- Only process -* tasks, do not execute other role work +- All output prefixed with [] identifier +- Only communicate with coordinator +- Do not use TaskCreate for other roles +- Call mcp__ccw-tools__team_msg before every SendMessage + +## Workflow +1. Call Skill -> load role definition and execution logic +2. Follow role.md 5-Phase flow +3. team_msg + SendMessage results to coordinator +4. TaskUpdate completed -> check next task` +}) +\`\`\` + +{{#if has_parallel_spawn}} +### Parallel Spawn (N agents for same role) + +> When pipeline has parallel tasks assigned to the same role, spawn N distinct agents with unique names. A single agent can only process tasks serially. + +**Parallel detection**: + +| Condition | Action | +|-----------|--------| +| N parallel tasks for same role prefix | Spawn N agents named `-1`, `-2` ... | +| Single task for role | Standard spawn (single agent) | + +**Parallel spawn template**: + +\`\`\` +Task({ + subagent_type: "general-purpose", + description: "Spawn - worker", + team_name: , + name: "-", + run_in_background: true, + prompt: `You are team "" (-). +Your agent name is "-", use this name for task discovery owner matching. + +## Primary Directive +Skill(skill="team-{{team_name}}", args="--role= --agent-name=-") + +## Role Guidelines +- Only process tasks where owner === "-" with -* prefix +- All output prefixed with [] identifier + +## Workflow +1. TaskList -> find tasks where owner === "-" with -* prefix +2. Skill -> execute role definition +3. team_msg + SendMessage results to coordinator +4. TaskUpdate completed -> check next task` +}) +\`\`\` + +**Dispatch must match agent names**: In dispatch, parallel tasks use instance-specific owner: `-`. In role.md, task discovery uses --agent-name for owner matching. +{{/if}} + +## Session Directory + +\`\`\` +{{session_directory_tree}} +\`\`\` + +{{#if has_session_resume}} +## Session Resume + +Coordinator supports `--resume` / `--continue` for interrupted sessions: + +1. Scan session directory for sessions with status "active" or "paused" +2. Multiple matches -> AskUserQuestion for selection +3. Audit TaskList -> reconcile session state <-> task status +4. Reset in_progress -> pending (interrupted tasks) +5. Rebuild team and spawn needed workers only +6. Create missing tasks with correct blockedBy +7. Kick first executable task -> Phase 4 coordination loop +{{/if}} + +{{#if shared_resources}} +## Shared Resources + +| Resource | Path | Usage | +|----------|------|-------| +{{#each shared_resources}} +| {{this.name}} | [{{this.path}}]({{this.path}}) | {{this.usage}} | +{{/each}} +{{/if}} + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Unknown --role value | Error with available role list | +| Missing --role arg | Orchestration Mode -> auto route to coordinator | +| Role file not found | Error with expected path (roles//role.md) | +| Command file not found | Fallback to inline execution in role.md | +{{#each additional_error_handlers}} +| {{this.scenario}} | {{this.resolution}} | +{{/each}} +``` + +--- + +## Variable Reference + +| Variable | Source | Description | +|----------|--------|-------------| +| `{{team_name}}` | config.team_name | Team identifier (lowercase) | +| `{{team_display_name}}` | config.team_display_name | Human-readable team name | +| `{{team_purpose}}` | config.team_purpose | One-line team purpose | +| `{{all_roles_tools_union}}` | Union of all roles' allowed-tools | Combined tool list | +| `{{roles}}` | config.roles[] | Array of role definitions | +| `{{architecture_diagram}}` | Generated from role structure | ASCII architecture diagram | +| `{{pipeline_diagram}}` | Generated from task chain | ASCII pipeline diagram | +| `{{cadence_beat_view}}` | Generated from pipeline | Pipeline beat view diagram | +| `{{checkpoint_table}}` | Generated from pipeline | Checkpoint trigger/location/behavior table | +| `{{task_metadata}}` | Generated from pipeline | Task metadata registry entries | +| `{{session_directory_tree}}` | Generated from session structure | Session directory tree | +| `{{has_parallel_spawn}}` | config.has_parallel_spawn | Boolean: pipeline has parallel same-role tasks | +| `{{has_session_resume}}` | config.has_session_resume | Boolean: supports session resume | +| `{{has_gc_loop}}` | config.has_gc_loop | Boolean: has guard-and-correct loops | +| `{{shared_resources}}` | config.shared_resources | Array of shared resource definitions | +| `{{additional_error_handlers}}` | config.additional_error_handlers | Array of {scenario, resolution} | + +## Key Differences from v1 + +| Aspect | v1 (old) | v2 (this template) | +|--------|----------|---------------------| +| Role lookup | `VALID_ROLES` JS object | Role Registry decision table with markdown links | +| Routing | JS regex + if/else | Text dispatch flow (3 steps) | +| Spawn template | JS code with `${variable}` | Text template with `` | +| Infrastructure | Inline JS per role | Shared Infrastructure section (Phase 1/5 templates) | +| Pipeline | ASCII only | Cadence Control + beat view + checkpoints | +| Compact safety | None | Compact Protection with re-read mandate | +| Orchestration Mode | JS if/else block | Decision table + lifecycle flow diagram |