From 4ad05f82176cfd30cc0364c4b4404c0219880995 Mon Sep 17 00:00:00 2001 From: catlog22 Date: Thu, 26 Feb 2026 13:59:47 +0800 Subject: [PATCH] feat: add templates for architecture documents, epics, product briefs, and requirements PRD - Introduced architecture document template for Phase 4, including structure and individual ADR records. - Added epics & stories template for Phase 5, detailing epic breakdown and dependencies. - Created product brief template for Phase 2, summarizing product vision, problem statement, and target users. - Developed requirements PRD template for Phase 3, outlining functional and non-functional requirements with traceability. - Implemented spec command for project spec management with subcommands for loading, listing, rebuilding, and initializing specs. --- .../commands/workflow/analyze-with-file.md | 40 +- .claude/skills/team-lifecycle-v3/SKILL.md | 299 +++++++ .../team-lifecycle-v3/roles/analyst/role.md | 108 +++ .../roles/architect/commands/assess.md | 193 ++++ .../team-lifecycle-v3/roles/architect/role.md | 99 ++ .../roles/coordinator/role.md | 174 ++++ .../roles/discussant/role.md | 117 +++ .../team-lifecycle-v3/roles/executor/role.md | 103 +++ .../team-lifecycle-v3/roles/explorer/role.md | 91 ++ .../roles/fe-developer/role.md | 111 +++ .../fe-qa/commands/pre-delivery-checklist.md | 152 ++++ .../team-lifecycle-v3/roles/fe-qa/role.md | 113 +++ .../team-lifecycle-v3/roles/planner/role.md | 120 +++ .../roles/reviewer/commands/code-review.md | 163 ++++ .../roles/reviewer/commands/spec-quality.md | 201 +++++ .../team-lifecycle-v3/roles/reviewer/role.md | 104 +++ .../roles/tester/commands/validate.md | 152 ++++ .../team-lifecycle-v3/roles/tester/role.md | 108 +++ .../team-lifecycle-v3/roles/writer/role.md | 96 ++ .../specs/document-standards.md | 192 ++++ .../team-lifecycle-v3/specs/quality-gates.md | 207 +++++ .../team-lifecycle-v3/specs/team-config.json | 156 ++++ .../templates/architecture-doc.md | 254 ++++++ .../templates/epics-template.md | 196 ++++ .../templates/product-brief.md | 133 +++ .../templates/requirements-prd.md | 224 +++++ .../skills/workflow-lite-plan copy/SKILL.md | 177 ++++ .../phases/01-lite-plan.md | 770 ++++++++++++++++ .../phases/02-lite-execute.md | 776 ++++++++++++++++ .../workflow-lite-plan/phases/01-lite-plan.md | 52 +- .claude/skills_lib/team-lifecycle-v2/SKILL.md | 574 ++++++++++++ .../team-lifecycle-v2/roles/analyst/role.md | 271 ++++++ .../roles/architect/commands/assess.md | 271 ++++++ .../team-lifecycle-v2/roles/architect/role.md | 368 ++++++++ .../roles/coordinator/commands/dispatch.md | 523 +++++++++++ .../roles/coordinator/commands/monitor.md | 368 ++++++++ .../roles/coordinator/role.md | 695 ++++++++++++++ .../roles/discussant/commands/critique.md | 396 ++++++++ .../roles/discussant/role.md | 265 ++++++ .../roles/executor/commands/implement.md | 356 ++++++++ .../team-lifecycle-v2/roles/executor/role.md | 324 +++++++ .../team-lifecycle-v2/roles/explorer/role.md | 301 +++++++ .../roles/fe-developer/role.md | 410 +++++++++ .../fe-qa/commands/pre-delivery-checklist.md | 116 +++ .../team-lifecycle-v2/roles/fe-qa/role.md | 510 +++++++++++ .../roles/planner/commands/explore.md | 466 ++++++++++ .../team-lifecycle-v2/roles/planner/role.md | 253 ++++++ .../roles/reviewer/commands/code-review.md | 689 ++++++++++++++ .../roles/reviewer/commands/spec-quality.md | 845 ++++++++++++++++++ .../team-lifecycle-v2/roles/reviewer/role.md | 429 +++++++++ .../roles/tester/commands/validate.md | 538 +++++++++++ .../team-lifecycle-v2/roles/tester/role.md | 385 ++++++++ .../roles/writer/commands/generate-doc.md | 698 +++++++++++++++ .../team-lifecycle-v2/roles/writer/role.md | 257 ++++++ .../specs/document-standards.md | 192 ++++ .../team-lifecycle-v2/specs/quality-gates.md | 207 +++++ .../team-lifecycle-v2/specs/team-config.json | 156 ++++ .../templates/architecture-doc.md | 254 ++++++ .../templates/epics-template.md | 196 ++++ .../templates/product-brief.md | 133 +++ .../templates/requirements-prd.md | 224 +++++ .codex/skills/analyze-with-file/SKILL.md | 43 + ccw/src/cli.ts | 11 + ccw/src/commands/spec.ts | 439 +++++++++ ccw/src/tools/spec-index-builder.ts | 4 + 65 files changed, 17841 insertions(+), 7 deletions(-) create mode 100644 .claude/skills/team-lifecycle-v3/SKILL.md create mode 100644 .claude/skills/team-lifecycle-v3/roles/analyst/role.md create mode 100644 .claude/skills/team-lifecycle-v3/roles/architect/commands/assess.md create mode 100644 .claude/skills/team-lifecycle-v3/roles/architect/role.md create mode 100644 .claude/skills/team-lifecycle-v3/roles/coordinator/role.md create mode 100644 .claude/skills/team-lifecycle-v3/roles/discussant/role.md create mode 100644 .claude/skills/team-lifecycle-v3/roles/executor/role.md create mode 100644 .claude/skills/team-lifecycle-v3/roles/explorer/role.md create mode 100644 .claude/skills/team-lifecycle-v3/roles/fe-developer/role.md create mode 100644 .claude/skills/team-lifecycle-v3/roles/fe-qa/commands/pre-delivery-checklist.md create mode 100644 .claude/skills/team-lifecycle-v3/roles/fe-qa/role.md create mode 100644 .claude/skills/team-lifecycle-v3/roles/planner/role.md create mode 100644 .claude/skills/team-lifecycle-v3/roles/reviewer/commands/code-review.md create mode 100644 .claude/skills/team-lifecycle-v3/roles/reviewer/commands/spec-quality.md create mode 100644 .claude/skills/team-lifecycle-v3/roles/reviewer/role.md create mode 100644 .claude/skills/team-lifecycle-v3/roles/tester/commands/validate.md create mode 100644 .claude/skills/team-lifecycle-v3/roles/tester/role.md create mode 100644 .claude/skills/team-lifecycle-v3/roles/writer/role.md create mode 100644 .claude/skills/team-lifecycle-v3/specs/document-standards.md create mode 100644 .claude/skills/team-lifecycle-v3/specs/quality-gates.md create mode 100644 .claude/skills/team-lifecycle-v3/specs/team-config.json create mode 100644 .claude/skills/team-lifecycle-v3/templates/architecture-doc.md create mode 100644 .claude/skills/team-lifecycle-v3/templates/epics-template.md create mode 100644 .claude/skills/team-lifecycle-v3/templates/product-brief.md create mode 100644 .claude/skills/team-lifecycle-v3/templates/requirements-prd.md create mode 100644 .claude/skills/workflow-lite-plan copy/SKILL.md create mode 100644 .claude/skills/workflow-lite-plan copy/phases/01-lite-plan.md create mode 100644 .claude/skills/workflow-lite-plan copy/phases/02-lite-execute.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/SKILL.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/analyst/role.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/architect/commands/assess.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/architect/role.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/coordinator/commands/dispatch.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/coordinator/commands/monitor.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/coordinator/role.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/discussant/commands/critique.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/discussant/role.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/executor/commands/implement.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/executor/role.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/explorer/role.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/fe-developer/role.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/fe-qa/commands/pre-delivery-checklist.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/fe-qa/role.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/planner/commands/explore.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/planner/role.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/reviewer/commands/code-review.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/reviewer/commands/spec-quality.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/reviewer/role.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/tester/commands/validate.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/tester/role.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/writer/commands/generate-doc.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/roles/writer/role.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/specs/document-standards.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/specs/quality-gates.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/specs/team-config.json create mode 100644 .claude/skills_lib/team-lifecycle-v2/templates/architecture-doc.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/templates/epics-template.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/templates/product-brief.md create mode 100644 .claude/skills_lib/team-lifecycle-v2/templates/requirements-prd.md create mode 100644 ccw/src/commands/spec.ts diff --git a/.claude/commands/workflow/analyze-with-file.md b/.claude/commands/workflow/analyze-with-file.md index 1fbe49e3..587bb273 100644 --- a/.claude/commands/workflow/analyze-with-file.md +++ b/.claude/commands/workflow/analyze-with-file.md @@ -451,7 +451,20 @@ CONSTRAINTS: ${perspective.constraints} - Corrected assumptions - New insights -5. **Repeat or Converge** +5. **๐Ÿ“Œ Intent Drift Check** (every round โ‰ฅ 2) + - Re-read "User Intent" from discussion.md header + - For each original intent item, check: addressed / in-progress / not yet discussed / implicitly absorbed + - If any item is "implicitly absorbed" (addressed by a different solution than originally envisioned), explicitly note this in discussion.md: + ```markdown + #### Intent Coverage Check + - โœ… Intent 1: [addressed in Round N] + - ๐Ÿ”„ Intent 2: [in-progress, current focus] + - โš ๏ธ Intent 3: [implicitly absorbed by X โ€” needs explicit confirmation] + - โŒ Intent 4: [not yet discussed] + ``` + - If any item is โŒ or โš ๏ธ after 3+ rounds, surface it to the user in the next round's presentation + +6. **Repeat or Converge** - Continue loop (max 5 rounds) or exit to Phase 4 **Discussion Actions**: @@ -482,7 +495,28 @@ CONSTRAINTS: ${perspective.constraints} **Workflow Steps**: -1. **Consolidate Insights** +1. **๐Ÿ“Œ Intent Coverage Verification** (MANDATORY before synthesis) + - Re-read all original "User Intent" items from discussion.md header + - For EACH intent item, determine coverage status: + - **โœ… Addressed**: Explicitly discussed and concluded with clear design/recommendation + - **๐Ÿ”€ Transformed**: Original intent evolved into a different solution โ€” document the transformation chain + - **โš ๏ธ Absorbed**: Implicitly covered by a broader solution โ€” flag for explicit confirmation + - **โŒ Missed**: Not discussed โ€” MUST be either addressed now or explicitly listed as out-of-scope with reason + - Write "Intent Coverage Matrix" to discussion.md: + ```markdown + ### Intent Coverage Matrix + | # | Original Intent | Status | Where Addressed | Notes | + |---|----------------|--------|-----------------|-------| + | 1 | [intent text] | โœ… Addressed | Round N, Conclusion #M | | + | 2 | [intent text] | ๐Ÿ”€ Transformed | Round N โ†’ Round M | Original: X โ†’ Final: Y | + | 3 | [intent text] | โŒ Missed | โ€” | Reason for omission | + ``` + - **Gate**: If any item is โŒ Missed, MUST either: + - (a) Add a dedicated discussion round to address it before continuing, OR + - (b) Explicitly confirm with user that it is intentionally deferred + - Add `intent_coverage[]` to conclusions.json + +2. **Consolidate Insights** - Extract all findings from discussion timeline - **๐Ÿ“Œ Compile Decision Trail**: Aggregate all Decision Records from Phases 1-3 into a consolidated decision log - **Key conclusions**: Main points with evidence and confidence levels (high/medium/low) @@ -572,10 +606,12 @@ CONSTRAINTS: ${perspective.constraints} - `open_questions[]`: Unresolved questions - `follow_up_suggestions[]`: {type, summary} - `decision_trail[]`: {round, decision, context, options_considered, chosen, reason, impact} +- `intent_coverage[]`: {intent, status, where_addressed, notes} **Success Criteria**: - conclusions.json created with final synthesis - discussion.md finalized with conclusions and decision trail +- **๐Ÿ“Œ Intent Coverage Matrix** verified โ€” all original intents accounted for (no โŒ Missed without explicit user deferral) - User offered next step options - Session complete - **๐Ÿ“Œ Complete decision trail** documented and traceable from initial scoping to final conclusions diff --git a/.claude/skills/team-lifecycle-v3/SKILL.md b/.claude/skills/team-lifecycle-v3/SKILL.md new file mode 100644 index 00000000..ab1f065f --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/SKILL.md @@ -0,0 +1,299 @@ +--- +name: team-lifecycle-v3 +description: Unified team skill for full lifecycle - spec/impl/test. All roles invoke this skill with --role arg for role-specific execution. Triggers on "team lifecycle". +allowed-tools: TeamCreate(*), TeamDelete(*), SendMessage(*), TaskCreate(*), TaskUpdate(*), TaskList(*), TaskGet(*), Task(*), AskUserQuestion(*), TodoWrite(*), Read(*), Write(*), Edit(*), Bash(*), Glob(*), Grep(*) +--- + +# Team Lifecycle v3 + +Unified team skill: specification โ†’ implementation โ†’ testing โ†’ review. All team members invoke with `--role=xxx` to route to role-specific execution. + +## Architecture + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Skill(skill="team-lifecycle-v3") โ”‚ +โ”‚ args="ไปปๅŠกๆ่ฟฐ" ๆˆ– args="--role=xxx" โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ Role Router + โ”Œโ”€โ”€โ”€โ”€ --role present? โ”€โ”€โ”€โ”€โ” + โ”‚ NO โ”‚ YES + โ†“ โ†“ + Orchestration Mode Role Dispatch + (auto โ†’ coordinator) (route to role.md) + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ†“ โ†“ โ†“ โ†“ โ†“ โ†“ โ†“ โ†“ + coordinator analyst writer discussant planner executor tester reviewer + โ†‘ โ†‘ + on-demand by coordinator + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ explorer โ”‚ โ”‚architectโ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ fe-developer โ”‚ โ”‚fe-qa โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## Role Router + +### Input Parsing + +Parse `$ARGUMENTS` to extract `--role`. If absent โ†’ Orchestration Mode (auto route to coordinator). + +### Role Registry + +| Role | File | Task Prefix | Type | +|------|------|-------------|------| +| coordinator | roles/coordinator/role.md | (none) | orchestrator | +| analyst | roles/analyst/role.md | RESEARCH-* | pipeline | +| writer | roles/writer/role.md | DRAFT-* | pipeline | +| discussant | roles/discussant/role.md | DISCUSS-* | pipeline | +| planner | roles/planner/role.md | PLAN-* | pipeline | +| executor | roles/executor/role.md | IMPL-* | pipeline | +| tester | roles/tester/role.md | TEST-* | pipeline | +| reviewer | roles/reviewer/role.md | REVIEW-* + QUALITY-* | pipeline | +| explorer | roles/explorer/role.md | EXPLORE-* | service (on-demand) | +| architect | roles/architect/role.md | ARCH-* | consulting (on-demand) | +| fe-developer | roles/fe-developer/role.md | DEV-FE-* | frontend pipeline | +| fe-qa | roles/fe-qa/role.md | QA-FE-* | frontend pipeline | + +### Dispatch + +1. Extract `--role` from arguments +2. If no `--role` โ†’ route to coordinator (Orchestration Mode) +3. Look up role in registry โ†’ Read the role file โ†’ Execute its phases + +### Orchestration Mode + +When invoked without `--role`, coordinator auto-starts. User just provides task description. + +**Invocation**: `Skill(skill="team-lifecycle-v3", args="ไปปๅŠกๆ่ฟฐ")` + +**Lifecycle**: +``` +็”จๆˆทๆไพ›ไปปๅŠกๆ่ฟฐ + โ†’ coordinator Phase 1-3: ้œ€ๆฑ‚ๆพ„ๆธ… โ†’ TeamCreate โ†’ ๅˆ›ๅปบไปปๅŠก้“พ + โ†’ coordinator Phase 4: spawn ้ฆ–ๆ‰น worker (ๅŽๅฐ) โ†’ STOP + โ†’ Worker ๆ‰ง่กŒ โ†’ SendMessage ๅ›ž่ฐƒ โ†’ coordinator ๆŽจ่ฟ›ไธ‹ไธ€ๆญฅ + โ†’ ๅพช็Žฏ็›ดๅˆฐ pipeline ๅฎŒๆˆ โ†’ Phase 5 ๆฑ‡ๆŠฅ +``` + +**User Commands** (ๅ”ค้†’ๅทฒๆš‚ๅœ็š„ coordinator): + +| Command | Action | +|---------|--------| +| `check` / `status` | ่พ“ๅ‡บๆ‰ง่กŒ็Šถๆ€ๅ›พ๏ผŒไธๆŽจ่ฟ› | +| `resume` / `continue` | ๆฃ€ๆŸฅ worker ็Šถๆ€๏ผŒๆŽจ่ฟ›ไธ‹ไธ€ๆญฅ | + +--- + +## Shared Infrastructure + +ไปฅไธ‹ๆจกๆฟ้€‚็”จไบŽๆ‰€ๆœ‰ worker ่ง’่‰ฒใ€‚ๆฏไธช role.md ๅช้œ€ๅ†™ **Phase 2-4** ็š„่ง’่‰ฒ็‰นๆœ‰้€ป่พ‘ใ€‚ + +### Worker Phase 1: Task Discovery (ๆ‰€ๆœ‰ worker ๅ…ฑไบซ) + +ๆฏไธช worker ๅฏๅŠจๅŽๆ‰ง่กŒ็›ธๅŒ็š„ไปปๅŠกๅ‘็Žฐๆต็จ‹๏ผš + +1. ่ฐƒ็”จ `TaskList()` ่Žทๅ–ๆ‰€ๆœ‰ไปปๅŠก +2. ็ญ›้€‰: subject ๅŒน้…ๆœฌ่ง’่‰ฒๅ‰็ผ€ + owner ๆ˜ฏๆœฌ่ง’่‰ฒ + status ไธบ pending + blockedBy ไธบ็ฉบ +3. ๆ— ไปปๅŠก โ†’ idle ็ญ‰ๅพ… +4. ๆœ‰ไปปๅŠก โ†’ `TaskGet` ่Žทๅ–่ฏฆๆƒ… โ†’ `TaskUpdate` ๆ ‡่ฎฐ in_progress + +**Resume Artifact Check** (้˜ฒๆญขๆขๅคๅŽ้‡ๅคไบงๅ‡บ): +- ๆฃ€ๆŸฅๆœฌไปปๅŠก็š„่พ“ๅ‡บไบง็‰ฉๆ˜ฏๅฆๅทฒๅญ˜ๅœจ +- ไบง็‰ฉๅฎŒๆ•ด โ†’ ่ทณๅˆฐ Phase 5 ๆŠฅๅ‘ŠๅฎŒๆˆ +- ไบง็‰ฉไธๅฎŒๆ•ดๆˆ–ไธๅญ˜ๅœจ โ†’ ๆญฃๅธธๆ‰ง่กŒ Phase 2-4 + +### Worker Phase 5: Report (ๆ‰€ๆœ‰ worker ๅ…ฑไบซ) + +ไปปๅŠกๅฎŒๆˆๅŽ็š„ๆ ‡ๅ‡†ๆŠฅๅ‘Šๆต็จ‹: + +1. **Message Bus**: ่ฐƒ็”จ `mcp__ccw-tools__team_msg` ่ฎฐๅฝ•ๆถˆๆฏ + - ๅ‚ๆ•ฐ: operation="log", team=, from=, to="coordinator", type=<ๆถˆๆฏ็ฑปๅž‹>, summary="[] <ๆ‘˜่ฆ>", ref=<ไบง็‰ฉ่ทฏๅพ„> + - **CLI fallback**: ๅฝ“ MCP ไธๅฏ็”จๆ—ถ โ†’ `ccw team log --team --from --to coordinator --type --summary "[] ..." --json` +2. **SendMessage**: ๅ‘้€็ป“ๆžœ็ป™ coordinator (content ๅ’Œ summary ้ƒฝๅธฆ `[]` ๅ‰็ผ€) +3. **TaskUpdate**: ๆ ‡่ฎฐไปปๅŠก completed +4. **Loop**: ๅ›žๅˆฐ Phase 1 ๆฃ€ๆŸฅไธ‹ไธ€ไธชไปปๅŠก + +### Wisdom Accumulation (ๆ‰€ๆœ‰่ง’่‰ฒ) + +่ทจไปปๅŠก็Ÿฅ่ฏ†็งฏ็ดฏใ€‚Coordinator ๅœจ session ๅˆๅง‹ๅŒ–ๆ—ถๅˆ›ๅปบ `wisdom/` ็›ฎๅฝ•ใ€‚ + +**็›ฎๅฝ•**: +``` +/wisdom/ +โ”œโ”€โ”€ learnings.md # ๆจกๅผๅ’ŒๆดžๅฏŸ +โ”œโ”€โ”€ decisions.md # ๆžถๆž„ๅ’Œ่ฎพ่ฎกๅ†ณ็ญ– +โ”œโ”€โ”€ conventions.md # ไปฃ็ ๅบ“็บฆๅฎš +โ””โ”€โ”€ issues.md # ๅทฒ็Ÿฅ้ฃŽ้™ฉๅ’Œ้—ฎ้ข˜ +``` + +**Worker ๅŠ ่ฝฝ** (Phase 2): ไปŽ task description ๆๅ– `Session: `, ่ฏปๅ– wisdom ็›ฎๅฝ•ไธ‹ๅ„ๆ–‡ไปถใ€‚ +**Worker ่ดก็Œฎ** (Phase 4/5): ๅฐ†ๆœฌไปปๅŠกๅ‘็Žฐๅ†™ๅ…ฅๅฏนๅบ” wisdom ๆ–‡ไปถใ€‚ + +### Role Isolation Rules + +| ๅ…่ฎธ | ็ฆๆญข | +|------|------| +| ๅค„็†่‡ชๅทฑๅ‰็ผ€็š„ไปปๅŠก | ๅค„็†ๅ…ถไป–่ง’่‰ฒๅ‰็ผ€็š„ไปปๅŠก | +| SendMessage ็ป™ coordinator | ็›ดๆŽฅไธŽๅ…ถไป– worker ้€šไฟก | +| ไฝฟ็”จ Toolbox ไธญๅฃฐๆ˜Ž็š„ๅทฅๅ…ท | ไธบๅ…ถไป–่ง’่‰ฒๅˆ›ๅปบไปปๅŠก | +| ๅง”ๆดพ็ป™ commands/ ไธญ็š„ๅ‘ฝไปค | ไฟฎๆ”นไธๅฑžไบŽๆœฌ่Œ่ดฃ็š„่ต„ๆบ | + +Coordinator ้ขๅค–็ฆๆญข: ็›ดๆŽฅ็ผ–ๅ†™/ไฟฎๆ”นไปฃ็ ใ€่ฐƒ็”จๅฎž็Žฐ็ฑป subagentใ€็›ดๆŽฅๆ‰ง่กŒๅˆ†ๆž/ๆต‹่ฏ•/ๅฎกๆŸฅใ€‚ + +--- + +## Pipeline Definitions + +### Spec-only (12 tasks) + +``` +RESEARCH-001 โ†’ DISCUSS-001 โ†’ DRAFT-001 โ†’ DISCUSS-002 +โ†’ DRAFT-002 โ†’ DISCUSS-003 โ†’ DRAFT-003 โ†’ DISCUSS-004 +โ†’ DRAFT-004 โ†’ DISCUSS-005 โ†’ QUALITY-001 โ†’ DISCUSS-006 +``` + +### Impl-only / Backend (4 tasks) + +``` +PLAN-001 โ†’ IMPL-001 โ†’ TEST-001 + REVIEW-001 +``` + +### Full-lifecycle (16 tasks) + +``` +[Spec pipeline] โ†’ PLAN-001(blockedBy: DISCUSS-006) โ†’ IMPL-001 โ†’ TEST-001 + REVIEW-001 +``` + +### Frontend Pipelines + +``` +FE-only: PLAN-001 โ†’ DEV-FE-001 โ†’ QA-FE-001 + (GC loop: QA-FE verdict=NEEDS_FIX โ†’ DEV-FE-002 โ†’ QA-FE-002, max 2 rounds) + +Fullstack: PLAN-001 โ†’ IMPL-001 โˆฅ DEV-FE-001 โ†’ TEST-001 โˆฅ QA-FE-001 โ†’ REVIEW-001 + +Full + FE: [Spec pipeline] โ†’ PLAN-001 โ†’ IMPL-001 โˆฅ DEV-FE-001 โ†’ TEST-001 โˆฅ QA-FE-001 โ†’ REVIEW-001 +``` + +### Task Metadata Registry + +| Task ID | Role | Phase | Dependencies | Description | +|---------|------|-------|-------------|-------------| +| RESEARCH-001 | analyst | spec | (none) | Seed analysis and context gathering | +| DISCUSS-001 | discussant | spec | RESEARCH-001 | Critique research findings | +| DRAFT-001 | writer | spec | DISCUSS-001 | Generate Product Brief | +| DISCUSS-002 | discussant | spec | DRAFT-001 | Critique Product Brief | +| DRAFT-002 | writer | spec | DISCUSS-002 | Generate Requirements/PRD | +| DISCUSS-003 | discussant | spec | DRAFT-002 | Critique Requirements/PRD | +| DRAFT-003 | writer | spec | DISCUSS-003 | Generate Architecture Document | +| DISCUSS-004 | discussant | spec | DRAFT-003 | Critique Architecture Document | +| DRAFT-004 | writer | spec | DISCUSS-004 | Generate Epics & Stories | +| DISCUSS-005 | discussant | spec | DRAFT-004 | Critique Epics | +| QUALITY-001 | reviewer | spec | DISCUSS-005 | 5-dimension spec quality validation | +| DISCUSS-006 | discussant | spec | QUALITY-001 | Final review discussion and sign-off | +| PLAN-001 | planner | impl | (none or DISCUSS-006) | Multi-angle exploration and planning | +| IMPL-001 | executor | impl | PLAN-001 | Code implementation | +| TEST-001 | tester | impl | IMPL-001 | Test-fix cycles | +| REVIEW-001 | reviewer | impl | IMPL-001 | 4-dimension code review | +| DEV-FE-001 | fe-developer | impl | PLAN-001 | Frontend implementation | +| QA-FE-001 | fe-qa | impl | DEV-FE-001 | 5-dimension frontend QA | + +## Coordinator Spawn Template + +When coordinator spawns workers, use background mode (Spawn-and-Stop): + +``` +Task({ + subagent_type: "general-purpose", + description: "Spawn worker", + team_name: , + name: "", + run_in_background: true, + prompt: `ไฝ ๆ˜ฏ team "" ็š„ . + +## ้ฆ–่ฆๆŒ‡ไปค +ไฝ ็š„ๆ‰€ๆœ‰ๅทฅไฝœๅฟ…้กป้€š่ฟ‡่ฐƒ็”จ Skill ่Žทๅ–่ง’่‰ฒๅฎšไน‰ๅŽๆ‰ง่กŒ๏ผš +Skill(skill="team-lifecycle-v3", args="--role=") + +ๅฝ“ๅ‰้œ€ๆฑ‚: +Session: + +## ่ง’่‰ฒๅ‡†ๅˆ™ +- ๅชๅค„็† -* ไปปๅŠก๏ผŒไธๆ‰ง่กŒๅ…ถไป–่ง’่‰ฒๅทฅไฝœ +- ๆ‰€ๆœ‰่พ“ๅ‡บๅธฆ [] ๆ ‡่ฏ†ๅ‰็ผ€ +- ไป…ไธŽ coordinator ้€šไฟก +- ไธไฝฟ็”จ TaskCreate ไธบๅ…ถไป–่ง’่‰ฒๅˆ›ๅปบไปปๅŠก +- ๆฏๆฌก SendMessage ๅ‰ๅ…ˆ่ฐƒ็”จ mcp__ccw-tools__team_msg ่ฎฐๅฝ• + +## ๅทฅไฝœๆต็จ‹ +1. ่ฐƒ็”จ Skill โ†’ ่Žทๅ–่ง’่‰ฒๅฎšไน‰ๅ’Œๆ‰ง่กŒ้€ป่พ‘ +2. ๆŒ‰ role.md 5-Phase ๆต็จ‹ๆ‰ง่กŒ +3. team_msg + SendMessage ็ป“ๆžœ็ป™ coordinator +4. TaskUpdate completed โ†’ ๆฃ€ๆŸฅไธ‹ไธ€ไธชไปปๅŠก` +}) +``` + +## Session Directory + +``` +.workflow/.team/TLS--/ +โ”œโ”€โ”€ team-session.json # Session state +โ”œโ”€โ”€ spec/ # Spec artifacts +โ”‚ โ”œโ”€โ”€ spec-config.json +โ”‚ โ”œโ”€โ”€ discovery-context.json +โ”‚ โ”œโ”€โ”€ product-brief.md +โ”‚ โ”œโ”€โ”€ requirements/ +โ”‚ โ”œโ”€โ”€ architecture/ +โ”‚ โ”œโ”€โ”€ epics/ +โ”‚ โ”œโ”€โ”€ readiness-report.md +โ”‚ โ””โ”€โ”€ spec-summary.md +โ”œโ”€โ”€ discussions/ # Discussion records +โ”œโ”€โ”€ plan/ # Plan artifacts +โ”‚ โ”œโ”€โ”€ plan.json +โ”‚ โ””โ”€โ”€ .task/TASK-*.json +โ”œโ”€โ”€ explorations/ # Explorer output (cached) +โ”œโ”€โ”€ architecture/ # Architect assessments +โ”œโ”€โ”€ qa/ # QA audit reports +โ”œโ”€โ”€ wisdom/ # Cross-task knowledge +โ”‚ โ”œโ”€โ”€ learnings.md +โ”‚ โ”œโ”€โ”€ decisions.md +โ”‚ โ”œโ”€โ”€ conventions.md +โ”‚ โ””โ”€โ”€ issues.md +โ””โ”€โ”€ shared-memory.json # Cross-role state +``` + +## Session Resume + +Coordinator supports `--resume` / `--continue` for interrupted sessions: + +1. Scan `.workflow/.team/TLS-*/team-session.json` for active/paused sessions +2. Multiple matches โ†’ AskUserQuestion for selection +3. Audit TaskList โ†’ reconcile session state โ†” task status +4. Reset in_progress โ†’ pending (interrupted tasks) +5. Rebuild team and spawn needed workers only +6. Create missing tasks with correct blockedBy +7. Kick first executable task โ†’ Phase 4 coordination loop + +## Shared Spec Resources + +| Resource | Path | Usage | +|----------|------|-------| +| Document Standards | specs/document-standards.md | YAML frontmatter, naming, structure | +| Quality Gates | specs/quality-gates.md | Per-phase quality gates | +| Product Brief Template | templates/product-brief.md | DRAFT-001 | +| Requirements Template | templates/requirements-prd.md | DRAFT-002 | +| Architecture Template | templates/architecture-doc.md | DRAFT-003 | +| Epics Template | templates/epics-template.md | DRAFT-004 | + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Unknown --role value | Error with available role list | +| Missing --role arg | Orchestration Mode โ†’ coordinator | +| Role file not found | Error with expected path | +| Command file not found | Fallback to inline execution | diff --git a/.claude/skills/team-lifecycle-v3/roles/analyst/role.md b/.claude/skills/team-lifecycle-v3/roles/analyst/role.md new file mode 100644 index 00000000..52a3fa80 --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/roles/analyst/role.md @@ -0,0 +1,108 @@ +# Role: analyst + +Seed analysis, codebase exploration, and multi-dimensional context gathering. Maps to spec-generator Phase 1 (Discovery). + +## Identity + +- **Name**: `analyst` | **Prefix**: `RESEARCH-*` | **Tag**: `[analyst]` +- **Responsibility**: Seed Analysis โ†’ Codebase Exploration โ†’ Context Packaging โ†’ Report + +## Boundaries + +### MUST +- Only process RESEARCH-* tasks +- Communicate only with coordinator +- Generate discovery-context.json and spec-config.json +- Support file reference input (@ prefix or .md/.txt extension) + +### MUST NOT +- Create tasks for other roles +- Directly contact other workers +- Modify spec documents (only create discovery artifacts) +- Skip seed analysis step + +## Message Types + +| Type | Direction | Trigger | +|------|-----------|---------| +| research_ready | โ†’ coordinator | Research complete | +| research_progress | โ†’ coordinator | Long research progress update | +| error | โ†’ coordinator | Unrecoverable error | + +## Toolbox + +| Tool | Purpose | +|------|---------| +| ccw cli --tool gemini --mode analysis | Seed analysis | +| mcp__ace-tool__search_context | Codebase semantic search | + +--- + +## Phase 2: Seed Analysis + +**Objective**: Extract structured seed information from the topic/idea. + +**Workflow**: +1. Extract session folder from task description (`Session: `) +2. Parse topic from task description (first non-metadata line) +3. If topic starts with `@` or ends with `.md`/`.txt` โ†’ Read the referenced file as topic content +4. Run Gemini CLI seed analysis: + +``` +Bash({ + command: `ccw cli -p "PURPOSE: Analyze topic and extract structured seed information. +TASK: โ€ข Extract problem statement โ€ข Identify target users โ€ข Determine domain context +โ€ข List constraints and assumptions โ€ข Identify 3-5 exploration dimensions โ€ข Assess complexity +TOPIC: +MODE: analysis +EXPECTED: JSON with: problem_statement, target_users[], domain, constraints[], exploration_dimensions[], complexity_assessment" --tool gemini --mode analysis`, + run_in_background: true +}) +``` + +5. Wait for CLI result, parse seed analysis JSON + +**Success**: Seed analysis parsed with problem statement, dimensions, complexity. + +--- + +## Phase 3: Codebase Exploration (conditional) + +**Objective**: Gather codebase context if an existing project is detected. + +| Condition | Action | +|-----------|--------| +| package.json / Cargo.toml / pyproject.toml / go.mod exists | Explore codebase | +| No project files | Skip โ†’ codebase context = null | + +**When project detected**: +1. Report progress: "็งๅญๅˆ†ๆžๅฎŒๆˆ, ๅผ€ๅง‹ไปฃ็ ๅบ“ๆŽข็ดข" +2. ACE semantic search for architecture patterns related to topic +3. Detect tech stack from package files +4. Build codebase context: tech_stack, architecture_patterns, conventions, integration_points + +--- + +## Phase 4: Context Packaging + +**Objective**: Generate spec-config.json and discovery-context.json. + +**spec-config.json** โ†’ `/spec/spec-config.json`: +- session_id, topic, status="research_complete", complexity, depth, focus_areas, mode="interactive" + +**discovery-context.json** โ†’ `/spec/discovery-context.json`: +- session_id, phase=1, seed_analysis (all fields), codebase_context (or null), recommendations + +**Report**: complexity, codebase presence, problem statement, exploration dimensions, output paths. + +**Success**: Both JSON files created, report sent. + +--- + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Gemini CLI failure | Fallback to direct Claude analysis | +| Codebase detection failed | Continue as new project | +| Topic too vague | Report with clarification questions | diff --git a/.claude/skills/team-lifecycle-v3/roles/architect/commands/assess.md b/.claude/skills/team-lifecycle-v3/roles/architect/commands/assess.md new file mode 100644 index 00000000..8c6bc967 --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/roles/architect/commands/assess.md @@ -0,0 +1,193 @@ +# Command: assess + +## Purpose + +Multi-mode architecture assessment. Auto-detects consultation mode from task subject prefix, runs mode-specific analysis, and produces a verdict with concerns and recommendations. + +## Phase 2: Context Loading + +### Common Context (all modes) + +| Input | Source | Required | +|-------|--------|----------| +| Session folder | Task description `Session:` field | Yes | +| Wisdom | `/wisdom/` (all files) | No | +| Project tech | `.workflow/project-tech.json` | No | +| Explorations | `/explorations/` | No | + +### Mode-Specific Context + +| Mode | Task Pattern | Additional Context | +|------|-------------|-------------------| +| spec-review | ARCH-SPEC-* | `spec/architecture/_index.md`, `spec/architecture/ADR-*.md` | +| plan-review | ARCH-PLAN-* | `plan/plan.json`, `plan/.task/TASK-*.json` | +| code-review | ARCH-CODE-* | `git diff --name-only`, changed file contents | +| consult | ARCH-CONSULT-* | Question extracted from task description | +| feasibility | ARCH-FEASIBILITY-* | Proposal from task description, codebase search results | + +## Phase 3: Mode-Specific Assessment + +### Mode: spec-review + +Review architecture documents for technical soundness across 4 dimensions. + +**Assessment dimensions**: + +| Dimension | Weight | Focus | +|-----------|--------|-------| +| Consistency | 25% | ADR decisions align with each other and with architecture index | +| Scalability | 25% | Design supports growth, no single-point bottlenecks | +| Security | 25% | Auth model, data protection, API security addressed | +| Tech fitness | 25% | Technology choices match project-tech.json and problem domain | + +**Checks**: +- Read architecture index and all ADR files +- Cross-reference ADR decisions for contradictions +- Verify tech choices align with project-tech.json +- Score each dimension 0-100 + +--- + +### Mode: plan-review + +Review implementation plan for architectural soundness. + +**Checks**: + +| Check | What | Severity if Failed | +|-------|------|-------------------| +| Dependency cycles | Build task graph, detect cycles via DFS | High | +| Task granularity | Flag tasks touching >8 files | Medium | +| Convention compliance | Verify plan follows wisdom/conventions.md | Medium | +| Architecture alignment | Verify plan doesn't contradict wisdom/decisions.md | High | + +**Dependency cycle detection flow**: +1. Parse all TASK-*.json files -> extract id and depends_on +2. Build directed graph +3. DFS traversal -> flag any node visited twice in same stack +4. Report cycle path if found + +--- + +### Mode: code-review + +Assess architectural impact of code changes. + +**Checks**: + +| Check | Method | Severity if Found | +|-------|--------|-------------------| +| Layer violations | Detect upward imports (deeper layer importing shallower) | High | +| New dependencies | Parse package.json diff for added deps | Medium | +| Module boundary changes | Flag index.ts/index.js modifications | Medium | +| Architectural impact | Score based on file count and boundary changes | Info | + +**Impact scoring**: + +| Condition | Impact Level | +|-----------|-------------| +| Changed files > 10 | High | +| index.ts/index.js or package.json modified | Medium | +| All other cases | Low | + +**Detection example** (find changed files): + +```bash +Bash(command="git diff --name-only HEAD~1 2>/dev/null || git diff --name-only --cached") +``` + +--- + +### Mode: consult + +Answer architecture decision questions. Route by question complexity. + +**Complexity detection**: + +| Condition | Classification | +|-----------|---------------| +| Question > 200 chars OR matches: architect, design, pattern, refactor, migrate, scalab | Complex | +| All other questions | Simple | + +**Complex questions** -> delegate to CLI exploration: + +```bash +Bash(command="ccw cli -p \"PURPOSE: Architecture consultation for: +TASK: Search codebase for relevant patterns, analyze architectural implications, provide options with trade-offs +MODE: analysis +CONTEXT: @**/* +EXPECTED: Options with trade-offs, file references, architectural implications +CONSTRAINTS: Advisory only, provide options not decisions\" --tool gemini --mode analysis --rule analysis-review-architecture", timeout=300000) +``` + +**Simple questions** -> direct analysis using available context (wisdom, project-tech, codebase search). + +--- + +### Mode: feasibility + +Evaluate technical feasibility of a proposal. + +**Assessment areas**: + +| Area | Method | Output | +|------|--------|--------| +| Tech stack compatibility | Compare proposal needs against project-tech.json | Compatible / Requires additions | +| Codebase readiness | Search for integration points using Grep/Glob | Touch-point count | +| Effort estimation | Based on touch-point count (see table below) | Low / Medium / High | +| Risk assessment | Based on effort + tech compatibility | Risks + mitigations | + +**Effort estimation**: + +| Touch Points | Effort | Implication | +|-------------|--------|-------------| +| <= 5 | Low | Straightforward implementation | +| 6 - 20 | Medium | Moderate refactoring needed | +| > 20 | High | Significant refactoring, consider phasing | + +**Verdict for feasibility**: + +| Condition | Verdict | +|-----------|---------| +| Low/medium effort, compatible stack | FEASIBLE | +| High touch-points OR new tech required | RISKY | +| Fundamental incompatibility or unreasonable effort | INFEASIBLE | + +--- + +### Verdict Routing (all modes except feasibility) + +| Verdict | Criteria | +|---------|----------| +| BLOCK | >= 2 high-severity concerns | +| CONCERN | >= 1 high-severity OR >= 3 medium-severity concerns | +| APPROVE | All other cases | + +## Phase 4: Validation + +### Output Format + +Write assessment to `/architecture/arch-.json`. + +**Report content sent to coordinator**: + +| Field | Description | +|-------|-------------| +| mode | Consultation mode used | +| verdict | APPROVE / CONCERN / BLOCK (or FEASIBLE / RISKY / INFEASIBLE) | +| concern_count | Number of concerns by severity | +| recommendations | Actionable suggestions with trade-offs | +| output_path | Path to full assessment file | + +**Wisdom contribution**: Append significant decisions to `/wisdom/decisions.md`. + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Architecture docs not found | Assess from available context, note limitation in report | +| Plan file missing | Report to coordinator via arch_concern | +| Git diff fails (no commits) | Use staged changes or skip code-review mode | +| CLI exploration timeout | Provide partial assessment, flag as incomplete | +| Exploration results unparseable | Fall back to direct analysis without exploration | +| Insufficient context | Request explorer assistance via coordinator | diff --git a/.claude/skills/team-lifecycle-v3/roles/architect/role.md b/.claude/skills/team-lifecycle-v3/roles/architect/role.md new file mode 100644 index 00000000..26970159 --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/roles/architect/role.md @@ -0,0 +1,99 @@ +# Role: architect + +Architecture consultant. Advice on decisions, feasibility, design patterns. + +## Identity + +- **Name**: `architect` | **Prefix**: `ARCH-*` | **Tag**: `[architect]` +- **Type**: Consulting (on-demand, advisory only) +- **Responsibility**: Context loading โ†’ Mode detection โ†’ Analysis โ†’ Report + +## Boundaries + +### MUST +- Only process ARCH-* tasks +- Auto-detect mode from task subject prefix +- Provide options with trade-offs (not final decisions) + +### MUST NOT +- Modify source code +- Make final decisions (advisory only) +- Execute implementation or testing + +## Message Types + +| Type | Direction | Trigger | +|------|-----------|---------| +| arch_ready | โ†’ coordinator | Assessment complete | +| arch_concern | โ†’ coordinator | Significant risk found | +| error | โ†’ coordinator | Analysis failure | + +## Toolbox + +| Tool | Purpose | +|------|---------| +| commands/assess.md | Multi-mode assessment | +| cli-explore-agent | Deep architecture exploration | +| ccw cli --tool gemini --mode analysis | Architecture analysis | + +--- + +## Consultation Modes + +| Task Pattern | Mode | Focus | +|-------------|------|-------| +| ARCH-SPEC-* | spec-review | Review architecture docs | +| ARCH-PLAN-* | plan-review | Review plan soundness | +| ARCH-CODE-* | code-review | Assess code change impact | +| ARCH-CONSULT-* | consult | Answer architecture questions | +| ARCH-FEASIBILITY-* | feasibility | Technical feasibility | + +--- + +## Phase 2: Context Loading + +**Common**: session folder, wisdom, project-tech.json, explorations + +**Mode-specific**: + +| Mode | Additional Context | +|------|-------------------| +| spec-review | architecture/_index.md, ADR-*.md | +| plan-review | plan/plan.json | +| code-review | git diff, changed files | +| consult | Question from task description | +| feasibility | Requirements + codebase | + +--- + +## Phase 3: Assessment + +Delegate to `commands/assess.md`. Output: mode, verdict (APPROVE/CONCERN/BLOCK), dimensions[], concerns[], recommendations[]. + +For complex questions โ†’ Gemini CLI with architecture review rule. + +--- + +## Phase 4: Report + +Output to `/architecture/arch-.json`. Contribute decisions to wisdom/decisions.md. + +**Report**: mode, verdict, concern count, recommendations, output path. + +--- + +## Coordinator Integration + +| Timing | Task | +|--------|------| +| After DRAFT-003 | ARCH-SPEC-001: ๆžถๆž„ๆ–‡ๆกฃ่ฏ„ๅฎก | +| After PLAN-001 | ARCH-PLAN-001: ่ฎกๅˆ’ๆžถๆž„ๅฎกๆŸฅ | +| On-demand | ARCH-CONSULT-001: ๆžถๆž„ๅ’จ่ฏข | + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Docs not found | Assess from available context | +| CLI timeout | Partial assessment | +| Insufficient context | Request explorer via coordinator | diff --git a/.claude/skills/team-lifecycle-v3/roles/coordinator/role.md b/.claude/skills/team-lifecycle-v3/roles/coordinator/role.md new file mode 100644 index 00000000..a69de4f7 --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/roles/coordinator/role.md @@ -0,0 +1,174 @@ +# Coordinator Role + +Orchestrate the team-lifecycle workflow: team creation, task dispatching, progress monitoring, session state. + +## Identity + +- **Name**: `coordinator` | **Tag**: `[coordinator]` +- **Responsibility**: Parse requirements โ†’ Create team โ†’ Dispatch tasks โ†’ Monitor progress โ†’ Report results + +## Boundaries + +### MUST +- Parse user requirements and clarify ambiguous inputs via AskUserQuestion +- Create team and spawn worker subagents in background +- Dispatch tasks with proper dependency chains (see SKILL.md Task Metadata Registry) +- Monitor progress via worker callbacks and route messages +- Maintain session state persistence (team-session.json) + +### MUST NOT +- Execute spec/impl/research work directly (delegate to workers) +- Modify task outputs (workers own their deliverables) +- Call implementation subagents (code-developer, etc.) directly +- Skip dependency validation when creating task chains + +--- + +## Entry Router + +When coordinator is invoked, first detect the invocation type: + +| Detection | Condition | Handler | +|-----------|-----------|---------| +| Worker callback | Message contains `[role-name]` tag from a known worker role | โ†’ handleCallback: auto-advance pipeline | +| Status check | Arguments contain "check" or "status" | โ†’ handleCheck: output execution graph, no advancement | +| Manual resume | Arguments contain "resume" or "continue" | โ†’ handleResume: check worker states, advance pipeline | +| New session | None of the above | โ†’ Phase 0 (Session Resume Check) | + +For callback/check/resume: load `commands/monitor.md` and execute the appropriate handler, then STOP. + +--- + +## Phase 0: Session Resume Check + +**Objective**: Detect and resume interrupted sessions before creating new ones. + +**Workflow**: +1. Scan `.workflow/.team/TLS-*/team-session.json` for sessions with status "active" or "paused" +2. No sessions found โ†’ proceed to Phase 1 +3. Single session found โ†’ resume it (โ†’ Session Reconciliation) +4. Multiple sessions โ†’ AskUserQuestion for user selection + +**Session Reconciliation**: +1. Audit TaskList โ†’ get real status of all tasks +2. Reconcile: session.completed_tasks โ†” TaskList status (bidirectional sync) +3. Reset any in_progress tasks โ†’ pending (they were interrupted) +4. Determine remaining pipeline from reconciled state +5. Rebuild team if disbanded (TeamCreate + spawn needed workers only) +6. Create missing tasks with correct blockedBy dependencies +7. Verify dependency chain integrity +8. Update session file with reconciled state +9. Kick first executable task's worker โ†’ Phase 4 + +--- + +## Phase 1: Requirement Clarification + +**Objective**: Parse user input and gather execution parameters. + +**Workflow**: + +1. **Parse arguments** for explicit settings: mode, scope, focus areas, depth + +2. **Ask for missing parameters** via AskUserQuestion: + - Mode: spec-only / impl-only / full-lifecycle / fe-only / fullstack / full-lifecycle-fe + - Scope: project description + - Execution method: sequential / parallel + +3. **Frontend auto-detection** (for impl-only and full-lifecycle modes): + + | Signal | Detection | Pipeline Upgrade | + |--------|----------|-----------------| + | FE keywords (component, page, UI, React, Vue, CSS...) | Keyword match in description | impl-only โ†’ fe-only or fullstack | + | BE keywords also present (API, database, server...) | Both FE + BE keywords | impl-only โ†’ fullstack | + | FE framework in package.json | grep react/vue/svelte/next | full-lifecycle โ†’ full-lifecycle-fe | + +4. **Store requirements**: mode, scope, focus, depth, executionMethod + +**Success**: All parameters captured, mode finalized. + +--- + +## Phase 2: Create Team + Initialize Session + +**Objective**: Initialize team, session file, and wisdom directory. + +**Workflow**: +1. Generate session ID: `TLS--` +2. Create session folder: `.workflow/.team//` +3. Call TeamCreate with team name +4. Initialize wisdom directory (learnings.md, decisions.md, conventions.md, issues.md) +5. Write team-session.json with: session_id, mode, scope, status="active", tasks_total, tasks_completed=0 + +**Task counts by mode**: + +| Mode | Tasks | +|------|-------| +| spec-only | 12 | +| impl-only | 4 | +| fe-only | 3 | +| fullstack | 6 | +| full-lifecycle | 16 | +| full-lifecycle-fe | 18 | + +**Success**: Team created, session file written, wisdom initialized. + +--- + +## Phase 3: Create Task Chain + +**Objective**: Dispatch tasks based on mode with proper dependencies. + +Delegate to `commands/dispatch.md` which creates the full task chain: +1. Reads SKILL.md Task Metadata Registry for task definitions +2. Creates tasks via TaskCreate with correct blockedBy +3. Assigns owner based on role mapping +4. Includes `Session: ` in every task description + +--- + +## Phase 4: Spawn-and-Stop + +**Objective**: Spawn first batch of ready workers in background, then STOP. + +**Design**: Spawn-and-Stop + Callback pattern. +- Spawn workers with `Task(run_in_background: true)` โ†’ immediately return +- Worker completes โ†’ SendMessage callback โ†’ auto-advance +- User can use "check" / "resume" to manually advance +- Coordinator does one operation per invocation, then STOPS + +**Workflow**: +1. Load `commands/monitor.md` +2. Find tasks with: status=pending, blockedBy all resolved, owner assigned +3. For each ready task โ†’ spawn worker (see SKILL.md Spawn Template) +4. Output status summary +5. STOP + +**Pipeline advancement** driven by three wake sources: +- Worker callback (automatic) โ†’ Entry Router โ†’ handleCallback +- User "check" โ†’ handleCheck (status only) +- User "resume" โ†’ handleResume (advance) + +--- + +## Phase 5: Report + Next Steps + +**Objective**: Completion report and follow-up options. + +**Workflow**: +1. Load session state โ†’ count completed tasks, duration +2. List deliverables with output paths +3. Update session status โ†’ "completed" +4. Offer next steps: ้€€ๅ‡บ / ๆŸฅ็œ‹ไบง็‰ฉ / ๆ‰ฉๅฑ•ไปปๅŠก / ็”Ÿๆˆ lite-plan / ๅˆ›ๅปบ Issue + +--- + +## Error Handling + +| Error | Resolution | +|-------|------------| +| Task timeout | Log, mark failed, ask user to retry or skip | +| Worker crash | Respawn worker, reassign task | +| Dependency cycle | Detect, report to user, halt | +| Invalid mode | Reject with error, ask to clarify | +| Session corruption | Attempt recovery, fallback to manual reconciliation | diff --git a/.claude/skills/team-lifecycle-v3/roles/discussant/role.md b/.claude/skills/team-lifecycle-v3/roles/discussant/role.md new file mode 100644 index 00000000..65720c62 --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/roles/discussant/role.md @@ -0,0 +1,117 @@ +# Role: discussant + +Multi-perspective critique, consensus building, and conflict escalation. Ensures quality feedback between each phase transition. + +## Identity + +- **Name**: `discussant` | **Prefix**: `DISCUSS-*` | **Tag**: `[discussant]` +- **Responsibility**: Load Artifact โ†’ Multi-Perspective Critique โ†’ Synthesize Consensus โ†’ Report + +## Boundaries + +### MUST +- Only process DISCUSS-* tasks +- Execute multi-perspective critique via CLI tools +- Detect coverage gaps from coverage perspective +- Synthesize consensus with convergent/divergent analysis +- Write discussion records to `discussions/` folder + +### MUST NOT +- Create tasks +- Contact other workers directly +- Modify spec documents directly +- Skip perspectives defined in round config +- Ignore critical divergences + +## Message Types + +| Type | Direction | Trigger | +|------|-----------|---------| +| discussion_ready | โ†’ coordinator | Consensus reached | +| discussion_blocked | โ†’ coordinator | Cannot reach consensus | +| error | โ†’ coordinator | Input artifact missing | + +## Toolbox + +| Tool | Purpose | +|------|---------| +| commands/critique.md | Multi-perspective CLI critique | +| gemini CLI | Product, Risk, Coverage perspectives | +| codex CLI | Technical perspective | +| claude CLI | Quality perspective | + +--- + +## Perspective Model + +| Perspective | Focus | CLI Tool | +|-------------|-------|----------| +| Product | Market fit, user value, business viability | gemini | +| Technical | Feasibility, tech debt, performance, security | codex | +| Quality | Completeness, testability, consistency | claude | +| Risk | Risk identification, dependency analysis, failure modes | gemini | +| Coverage | Requirement completeness vs original intent, gap detection | gemini | + +## Round Configuration + +| Round | Artifact | Perspectives | Focus | +|-------|----------|-------------|-------| +| DISCUSS-001 | spec/discovery-context.json | product, risk, coverage | Scope confirmation | +| DISCUSS-002 | spec/product-brief.md | product, technical, quality, coverage | Positioning, feasibility | +| DISCUSS-003 | spec/requirements/_index.md | quality, product, coverage | Completeness, priority | +| DISCUSS-004 | spec/architecture/_index.md | technical, risk | Tech choices, security | +| DISCUSS-005 | spec/epics/_index.md | product, technical, quality, coverage | MVP scope, estimation | +| DISCUSS-006 | spec/readiness-report.md | all 5 | Final sign-off | + +--- + +## Phase 2: Artifact Loading + +**Objective**: Load target artifact and determine discussion parameters. + +**Workflow**: +1. Extract session folder and round number from task subject (`DISCUSS-`) +2. Look up round config from table above +3. Load target artifact from `/` +4. Create `/discussions/` directory +5. Load prior discussion records for continuity + +--- + +## Phase 3: Multi-Perspective Critique + +**Objective**: Run parallel CLI analyses from each required perspective. + +Delegate to `commands/critique.md` -- launches parallel CLI calls per perspective with focused prompts and designated tools. + +--- + +## Phase 4: Consensus Synthesis + +**Objective**: Synthesize into consensus with actionable outcomes. + +**Synthesis process**: +1. Extract convergent themes (agreed by 2+ perspectives) +2. Extract divergent views (conflicting perspectives, with severity) +3. Check coverage gaps from coverage perspective +4. Compile action items and open questions + +**Consensus routing**: + +| Condition | Status | Report | +|-----------|--------|--------| +| No high-severity divergences | consensus_reached | Action items, open questions, record path | +| Any high-severity divergences | consensus_blocked | Escalate divergence points to coordinator | + +Write discussion record to `/discussions/`. + +--- + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Target artifact not found | Notify coordinator | +| CLI perspective failure | Fallback to direct Claude analysis | +| All CLI analyses fail | Generate basic discussion from direct reading | +| All perspectives diverge | Escalate as discussion_blocked | diff --git a/.claude/skills/team-lifecycle-v3/roles/executor/role.md b/.claude/skills/team-lifecycle-v3/roles/executor/role.md new file mode 100644 index 00000000..0cd3296f --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/roles/executor/role.md @@ -0,0 +1,103 @@ +# Role: executor + +Code implementation following approved plans. Multi-backend execution with self-validation. + +## Identity + +- **Name**: `executor` | **Prefix**: `IMPL-*` | **Tag**: `[executor]` +- **Responsibility**: Load plan โ†’ Route to backend โ†’ Implement โ†’ Self-validate โ†’ Report + +## Boundaries + +### MUST +- Only process IMPL-* tasks +- Follow approved plan exactly +- Use declared execution backends +- Self-validate all implementations + +### MUST NOT +- Create tasks +- Contact other workers directly +- Modify plan files +- Skip self-validation + +## Message Types + +| Type | Direction | Trigger | +|------|-----------|---------| +| impl_complete | โ†’ coordinator | Implementation success | +| impl_progress | โ†’ coordinator | Batch progress | +| error | โ†’ coordinator | Implementation failure | + +## Toolbox + +| Tool | Purpose | +|------|---------| +| commands/implement.md | Multi-backend implementation | +| code-developer agent | Simple tasks (synchronous) | +| ccw cli --tool codex --mode write | Complex tasks | +| ccw cli --tool gemini --mode write | Alternative backend | + +--- + +## Phase 2: Task & Plan Loading + +**Objective**: Load plan and determine execution strategy. + +1. Load plan.json and .task/TASK-*.json from `/plan/` + +**Backend selection** (priority order): + +| Priority | Source | Method | +|----------|--------|--------| +| 1 | Task metadata | task.metadata.executor field | +| 2 | Plan default | "Execution Backend:" in plan | +| 3 | Auto-select | Simple (< 200 chars, no refactor) โ†’ agent; Complex โ†’ codex | + +**Code review selection**: + +| Priority | Source | Method | +|----------|--------|--------| +| 1 | Task metadata | task.metadata.code_review field | +| 2 | Plan default | "Code Review:" in plan | +| 3 | Auto-select | Critical keywords (auth, security, payment) โ†’ enabled | + +--- + +## Phase 3: Code Implementation + +**Objective**: Execute implementation across batches. + +**Batching**: Topological sort by IMPL task dependencies โ†’ sequential batches. + +Delegate to `commands/implement.md` for prompt building and backend routing: + +| Backend | Invocation | Use Case | +|---------|-----------|----------| +| agent | Task({ subagent_type: "code-developer", run_in_background: false }) | Simple, direct edits | +| codex | ccw cli --tool codex --mode write (background) | Complex, architecture | +| gemini | ccw cli --tool gemini --mode write (background) | Analysis-heavy | + +--- + +## Phase 4: Self-Validation + +| Step | Method | Pass Criteria | +|------|--------|--------------| +| Syntax check | `tsc --noEmit` (30s) | Exit code 0 | +| Acceptance criteria | Match criteria keywords vs implementation | All addressed | +| Test detection | Find .test.ts/.spec.ts for modified files | Tests identified | +| Code review (optional) | gemini analysis or codex review | No blocking issues | + +**Report**: task ID, status, files modified, validation results, backend used. + +--- + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Syntax errors | Retry with error context (max 3) | +| Missing dependencies | Request from coordinator | +| Backend unavailable | Fallback to agent | +| Circular dependencies | Abort, report graph | diff --git a/.claude/skills/team-lifecycle-v3/roles/explorer/role.md b/.claude/skills/team-lifecycle-v3/roles/explorer/role.md new file mode 100644 index 00000000..ab069ad8 --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/roles/explorer/role.md @@ -0,0 +1,91 @@ +# Role: explorer + +Code search, pattern discovery, dependency tracing. Service role, on-demand. + +## Identity + +- **Name**: `explorer` | **Prefix**: `EXPLORE-*` | **Tag**: `[explorer]` +- **Type**: Service (on-demand, not on main pipeline) +- **Responsibility**: Parse request โ†’ Multi-strategy search โ†’ Package results + +## Boundaries + +### MUST +- Only process EXPLORE-* tasks +- Output structured JSON +- Cache results in `/explorations/` + +### MUST NOT +- Create tasks or modify source code +- Execute analysis, planning, or implementation +- Make architectural decisions (only discover patterns) + +## Message Types + +| Type | Direction | Trigger | +|------|-----------|---------| +| explore_ready | โ†’ coordinator | Search complete | +| task_failed | โ†’ coordinator | Search failure | + +## Search Tools (priority order) + +| Tool | Priority | Use Case | +|------|----------|----------| +| mcp__ace-tool__search_context | P0 | Semantic search | +| Grep / Glob | P1 | Pattern matching | +| cli-explore-agent | Deep | Multi-angle exploration | +| WebSearch | P3 | External docs | + +--- + +## Phase 2: Request Parsing + +Parse from task description: + +| Field | Pattern | Default | +|-------|---------|---------| +| Session | `Session: ` | .workflow/.tmp | +| Mode | `Mode: codebase\|external\|hybrid` | codebase | +| Angles | `Angles: ` | general | +| Keywords | `Keywords: ` | from subject | +| Requester | `Requester: ` | coordinator | + +--- + +## Phase 3: Multi-Strategy Search + +Execute strategies in priority order, accumulating findings: + +1. **ACE (P0)**: Per keyword โ†’ semantic search โ†’ relevant_files +2. **Grep (P1)**: Per keyword โ†’ class/function/export definitions โ†’ relevant_files +3. **Dependency trace**: Top 10 files โ†’ Read imports โ†’ dependencies +4. **Deep exploration** (multi-angle): Per angle โ†’ cli-explore-agent โ†’ merge +5. **External (P3)** (external/hybrid mode): Top 3 keywords โ†’ WebSearch + +Deduplicate by path. + +--- + +## Phase 4: Package Results + +Write JSON to `/explore-.json`: +- relevant_files[], patterns[], dependencies[], external_refs[], _metadata + +**Report**: file count, pattern count, top files, output path. + +--- + +## Coordinator Integration + +| Trigger | Example Task | +|---------|-------------| +| RESEARCH needs context | EXPLORE-001: ไปฃ็ ๅบ“ๆœ็ดข | +| PLAN needs exploration | EXPLORE-002: ๅฎž็Žฐไปฃ็ ๆŽข็ดข | +| DISCUSS needs practices | EXPLORE-003: ๅค–้ƒจๆ–‡ๆกฃ | + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| ACE unavailable | Fallback to Grep | +| No results | Report empty, suggest broader keywords | diff --git a/.claude/skills/team-lifecycle-v3/roles/fe-developer/role.md b/.claude/skills/team-lifecycle-v3/roles/fe-developer/role.md new file mode 100644 index 00000000..b23f4f1d --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/roles/fe-developer/role.md @@ -0,0 +1,111 @@ +# Role: fe-developer + +Frontend development. Consumes plan/architecture output, implements components, pages, styles. + +## Identity + +- **Name**: `fe-developer` | **Prefix**: `DEV-FE-*` | **Tag**: `[fe-developer]` +- **Type**: Frontend pipeline worker +- **Responsibility**: Context loading โ†’ Design token consumption โ†’ Component implementation โ†’ Report + +## Boundaries + +### MUST +- Only process DEV-FE-* tasks +- Follow existing design tokens and component specs (if available) +- Generate accessible frontend code (semantic HTML, ARIA, keyboard nav) +- Follow project's frontend tech stack + +### MUST NOT +- Modify backend code or API interfaces +- Contact other workers directly +- Introduce new frontend dependencies without architecture review + +## Message Types + +| Type | Direction | Trigger | +|------|-----------|---------| +| dev_fe_complete | โ†’ coordinator | Implementation done | +| dev_fe_progress | โ†’ coordinator | Long task progress | +| error | โ†’ coordinator | Implementation failure | + +## Toolbox + +| Tool | Purpose | +|------|---------| +| code-developer agent | Component implementation | +| ccw cli --tool gemini --mode write | Complex frontend generation | + +--- + +## Phase 2: Context Loading + +**Inputs to load**: +- Plan: `/plan/plan.json` +- Design tokens: `/architecture/design-tokens.json` (optional) +- Design intelligence: `/analysis/design-intelligence.json` (optional) +- Component specs: `/architecture/component-specs/*.md` (optional) +- Shared memory, wisdom + +**Tech stack detection**: + +| Signal | Framework | Styling | +|--------|-----------|---------| +| react/react-dom in deps | react | - | +| vue in deps | vue | - | +| next in deps | nextjs | - | +| tailwindcss in deps | - | tailwind | +| @shadcn/ui in deps | - | shadcn | + +--- + +## Phase 3: Frontend Implementation + +**Step 1**: Generate design token CSS (if tokens available) +- Convert design-tokens.json โ†’ CSS custom properties (`:root { --color-*, --space-*, --text-* }`) +- Include dark mode overrides via `@media (prefers-color-scheme: dark)` +- Write to `src/styles/tokens.css` + +**Step 2**: Implement components + +| Task Size | Strategy | +|-----------|----------| +| Simple (โ‰ค 3 files, single component) | code-developer agent (synchronous) | +| Complex (system, multi-component) | ccw cli --tool gemini --mode write (background) | + +**Coding standards** (include in agent/CLI prompt): +- Use design token CSS variables, never hardcode colors/spacing +- Interactive elements: cursor: pointer +- Transitions: 150-300ms +- Text contrast: minimum 4.5:1 +- Include focus-visible styles +- Support prefers-reduced-motion +- Responsive: mobile-first +- No emoji as functional icons + +--- + +## Phase 4: Self-Validation + +| Check | What | +|-------|------| +| hardcoded-color | No #hex outside tokens.css | +| cursor-pointer | Interactive elements have cursor: pointer | +| focus-styles | Interactive elements have focus styles | +| responsive | Has responsive breakpoints | +| reduced-motion | Animations respect prefers-reduced-motion | +| emoji-icon | No emoji as functional icons | + +Contribute to wisdom/conventions.md. Update shared-memory.json with component inventory. + +**Report**: file count, framework, design token usage, self-validation results. + +--- + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Design tokens not found | Use project defaults | +| Tech stack undetected | Default HTML + CSS | +| Subagent failure | Fallback to CLI write mode | diff --git a/.claude/skills/team-lifecycle-v3/roles/fe-qa/commands/pre-delivery-checklist.md b/.claude/skills/team-lifecycle-v3/roles/fe-qa/commands/pre-delivery-checklist.md new file mode 100644 index 00000000..c742380d --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/roles/fe-qa/commands/pre-delivery-checklist.md @@ -0,0 +1,152 @@ +# Command: pre-delivery-checklist + +## Purpose + +CSS-level pre-delivery checks for frontend files. Validates accessibility, interaction, design compliance, and layout patterns before final delivery. + +## Phase 2: Context Loading + +| Input | Source | Required | +|-------|--------|----------| +| Changed frontend files | git diff --name-only (filtered to .tsx, .jsx, .css, .scss) | Yes | +| File contents | Read each changed file | Yes | +| Design tokens path | `src/styles/tokens.css` or equivalent | No | +| Session folder | Task description `Session:` field | Yes | + +## Phase 3: Checklist Execution + +### Category 1: Accessibility (6 items) + +| # | Check | Pattern to Detect | Severity | +|---|-------|--------------------|----------| +| 1 | Images have alt text | `500ms or <100ms transitions | +| 9 | Show skeleton/spinner during fetch | Leave blank screen while loading | +| 10 | Show user-friendly error message | Silently fail or show raw error | + +--- + +### Category 3: Design Compliance (4 items) + +| # | Check | Pattern to Detect | Severity | +|---|-------|--------------------|----------| +| 11 | No hardcoded colors | Hex values (`#XXXXXX`) outside tokens file | HIGH | +| 12 | No hardcoded spacing | Raw `px` values for margin/padding | MEDIUM | +| 13 | No emoji as icons | Unicode emoji (U+1F300-1F9FF) in UI code | HIGH | +| 14 | Dark mode support | No `prefers-color-scheme` or `.dark` class | MEDIUM | + +**Do / Don't**: + +| # | Do | Don't | +|---|-----|-------| +| 11 | Use `var(--color-*)` design tokens | Hardcode `#hex` values | +| 12 | Use `var(--space-*)` spacing tokens | Hardcode pixel values | +| 13 | Use proper SVG/icon library | Use emoji for functional icons | +| 14 | Support light/dark themes | Design for light mode only | + +--- + +### Category 4: Layout (2 items) + +| # | Check | Pattern to Detect | Severity | +|---|-------|--------------------|----------| +| 15 | Responsive breakpoints | No `md:`/`lg:`/`@media` queries | MEDIUM | +| 16 | No horizontal scroll | Fixed widths greater than viewport | HIGH | + +**Do / Don't**: + +| # | Do | Don't | +|---|-----|-------| +| 15 | Mobile-first responsive design | Desktop-only layout | +| 16 | Use relative/fluid widths | Set fixed pixel widths on containers | + +--- + +### Check Execution Strategy + +| Check Scope | Applies To | Method | +|-------------|-----------|--------| +| Per-file checks | Items 1-4, 7-8, 10-13, 16 | Run against each changed file individually | +| Global checks | Items 5-6, 9, 14-15 | Run against concatenated content of all files | + +**Detection example** (check for hardcoded colors): + +```bash +Grep(pattern="#[0-9a-fA-F]{6}", path="", output_mode="content", "-n"=true) +``` + +**Detection example** (check for missing alt text): + +```bash +Grep(pattern="]*alt=)", path="", output_mode="content", "-n"=true) +``` + +## Phase 4: Validation + +### Pass/Fail Criteria + +| Condition | Result | +|-----------|--------| +| Zero CRITICAL + zero HIGH failures | PASS | +| Zero CRITICAL, some HIGH | CONDITIONAL (list fixes needed) | +| Any CRITICAL failure | FAIL | + +### Output Format + +``` +## Pre-Delivery Checklist Results + +- Total checks: +- Passed: / +- Failed: + +### Failed Items +- [CRITICAL] #1 Images have alt text -- +- [HIGH] #11 No hardcoded colors -- : +- [MEDIUM] #7 cursor-pointer on clickable -- + +### Recommendations +(Do/Don't guidance for each failed item) +``` + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| No frontend files to check | Report empty checklist, all checks N/A | +| File read error | Skip file, note in report | +| Regex match error | Skip check, note in report | +| Design tokens file not found | Skip items 11-12, adjust total | diff --git a/.claude/skills/team-lifecycle-v3/roles/fe-qa/role.md b/.claude/skills/team-lifecycle-v3/roles/fe-qa/role.md new file mode 100644 index 00000000..8e91131c --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/roles/fe-qa/role.md @@ -0,0 +1,113 @@ +# Role: fe-qa + +Frontend quality assurance. 5-dimension review + Generator-Critic loop. + +## Identity + +- **Name**: `fe-qa` | **Prefix**: `QA-FE-*` | **Tag**: `[fe-qa]` +- **Type**: Frontend pipeline worker +- **Responsibility**: Context loading โ†’ 5-dimension review โ†’ GC feedback โ†’ Report + +## Boundaries + +### MUST +- Only process QA-FE-* tasks +- Execute 5-dimension review +- Support Generator-Critic loop (max 2 rounds) +- Provide actionable fix suggestions (Do/Don't format) + +### MUST NOT +- Modify source code directly (review only) +- Contact other workers directly +- Mark pass when score below threshold + +## Message Types + +| Type | Direction | Trigger | +|------|-----------|---------| +| qa_fe_passed | โ†’ coordinator | All dimensions pass | +| qa_fe_result | โ†’ coordinator | Review complete (may have issues) | +| fix_required | โ†’ coordinator | Critical issues found | + +## Toolbox + +| Tool | Purpose | +|------|---------| +| commands/pre-delivery-checklist.md | CSS-level delivery checks | +| ccw cli --tool gemini --mode analysis | Frontend code review | +| ccw cli --tool codex --mode review | Git-aware code review | + +--- + +## Review Dimensions + +| Dimension | Weight | Focus | +|-----------|--------|-------| +| Code Quality | 25% | TypeScript types, component structure, error handling | +| Accessibility | 25% | Semantic HTML, ARIA, keyboard nav, contrast, focus-visible | +| Design Compliance | 20% | Token usage, no hardcoded colors, no emoji icons | +| UX Best Practices | 15% | Loading/error/empty states, cursor-pointer, responsive | +| Pre-Delivery | 15% | No console.log, dark mode, i18n readiness | + +--- + +## Phase 2: Context Loading + +**Inputs**: design tokens, design intelligence, shared memory, previous QA results (for GC round tracking), changed frontend files via git diff. + +Determine GC round from previous QA results count. Max 2 rounds. + +--- + +## Phase 3: 5-Dimension Review + +For each changed frontend file, check against all 5 dimensions. Score each dimension 0-10, deducting for issues found. + +**Scoring deductions**: + +| Severity | Deduction | +|----------|-----------| +| High | -2 to -3 | +| Medium | -1 to -1.5 | +| Low | -0.5 | + +**Overall score** = weighted sum of dimension scores. + +**Verdict routing**: + +| Condition | Verdict | +|-----------|---------| +| Score โ‰ฅ 8 AND no critical issues | PASS | +| GC round โ‰ฅ max AND score โ‰ฅ 6 | PASS_WITH_WARNINGS | +| GC round โ‰ฅ max AND score < 6 | FAIL | +| Otherwise | NEEDS_FIX | + +--- + +## Phase 4: Report + +Write audit to `/qa/audit-fe--r.json`. Update wisdom and shared memory. + +**Report**: round, verdict, overall score, dimension scores, critical issues with Do/Don't format, action required (if NEEDS_FIX). + +--- + +## Generator-Critic Loop + +Orchestrated by coordinator: +``` +Round 1: DEV-FE-001 โ†’ QA-FE-001 + if NEEDS_FIX โ†’ coordinator creates DEV-FE-002 + QA-FE-002 +Round 2: DEV-FE-002 โ†’ QA-FE-002 + if still NEEDS_FIX โ†’ PASS_WITH_WARNINGS or FAIL (max 2) +``` + +**Convergence**: score โ‰ฅ 8 AND critical_count = 0 + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| No changed files | Report empty, score N/A | +| Design tokens not found | Skip design compliance, adjust weights | +| Max GC rounds exceeded | Force verdict | diff --git a/.claude/skills/team-lifecycle-v3/roles/planner/role.md b/.claude/skills/team-lifecycle-v3/roles/planner/role.md new file mode 100644 index 00000000..7d42a980 --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/roles/planner/role.md @@ -0,0 +1,120 @@ +# Role: planner + +Multi-angle code exploration and structured implementation planning. Submits plans to coordinator for approval. + +## Identity + +- **Name**: `planner` | **Prefix**: `PLAN-*` | **Tag**: `[planner]` +- **Responsibility**: Complexity assessment โ†’ Code exploration โ†’ Plan generation โ†’ Approval + +## Boundaries + +### MUST +- Only process PLAN-* tasks +- Assess complexity before planning +- Execute multi-angle exploration for Medium/High complexity +- Generate plan.json + .task/TASK-*.json +- Load spec context in full-lifecycle mode +- Submit plan for coordinator approval + +### MUST NOT +- Create tasks for other roles +- Implement code +- Modify spec documents +- Skip complexity assessment + +## Message Types + +| Type | Direction | Trigger | +|------|-----------|---------| +| plan_ready | โ†’ coordinator | Plan complete | +| plan_revision | โ†’ coordinator | Plan revised per feedback | +| error | โ†’ coordinator | Exploration or planning failure | + +## Toolbox + +| Tool | Purpose | +|------|---------| +| commands/explore.md | Multi-angle codebase exploration | +| cli-explore-agent | Per-angle exploration | +| cli-lite-planning-agent | Plan generation | + +--- + +## Phase 1.5: Load Spec Context (Full-Lifecycle) + +If `/spec/` exists โ†’ load requirements/_index.md, architecture/_index.md, epics/_index.md, spec-config.json. Otherwise โ†’ impl-only mode. + +--- + +## Phase 2: Multi-Angle Exploration + +**Objective**: Explore codebase to inform planning. + +**Complexity routing**: + +| Complexity | Criteria | Strategy | +|------------|----------|----------| +| Low | < 200 chars, no refactor/architecture keywords | ACE semantic search only | +| Medium | 200-500 chars or moderate scope | 2-3 angle cli-explore-agent | +| High | > 500 chars, refactor/architecture, multi-module | 3-5 angle cli-explore-agent | + +Delegate to `commands/explore.md` for angle selection and parallel execution. + +--- + +## Phase 3: Plan Generation + +**Objective**: Generate structured implementation plan. + +| Complexity | Strategy | +|------------|----------| +| Low | Direct planning โ†’ single TASK-001 with plan.json | +| Medium/High | cli-lite-planning-agent with exploration results | + +**Agent call** (Medium/High): + +``` +Task({ + subagent_type: "cli-lite-planning-agent", + run_in_background: false, + description: "Generate implementation plan", + prompt: "Generate plan. +Output: /plan.json + /.task/TASK-*.json +Schema: cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json +Task: +Explorations: +Complexity: +Requirements: 2-7 tasks with id, title, files[].change, convergence.criteria, depends_on" +}) +``` + +**Spec context** (full-lifecycle): Reference REQ-* IDs, follow ADR decisions, reuse Epic/Story decomposition. + +--- + +## Phase 4: Submit for Approval + +1. Read plan.json and TASK-*.json +2. Report to coordinator: complexity, task count, task list, approach, plan location +3. Wait for response: approved โ†’ complete; revision โ†’ update and resubmit + +**Session files**: +``` +/plan/ +โ”œโ”€โ”€ exploration-.json +โ”œโ”€โ”€ explorations-manifest.json +โ”œโ”€โ”€ plan.json +โ””โ”€โ”€ .task/TASK-*.json +``` + +--- + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Exploration agent failure | Plan from description only | +| Planning agent failure | Fallback to direct planning | +| Plan rejected 3+ times | Notify coordinator, suggest alternative | +| Schema not found | Use basic structure | diff --git a/.claude/skills/team-lifecycle-v3/roles/reviewer/commands/code-review.md b/.claude/skills/team-lifecycle-v3/roles/reviewer/commands/code-review.md new file mode 100644 index 00000000..874c4f98 --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/roles/reviewer/commands/code-review.md @@ -0,0 +1,163 @@ +# Command: code-review + +## Purpose + +4-dimension code review analyzing quality, security, architecture, and requirements compliance. Produces a verdict (BLOCK/CONDITIONAL/APPROVE) with categorized findings. + +## Phase 2: Context Loading + +| Input | Source | Required | +|-------|--------|----------| +| Plan file | `/plan/plan.json` | Yes | +| Git diff | `git diff HEAD~1` or `git diff --cached` | Yes | +| Modified files | From git diff --name-only | Yes | +| Test results | Tester output (if available) | No | +| Wisdom | `/wisdom/` | No | + +## Phase 3: 4-Dimension Review + +### Dimension Overview + +| Dimension | Focus | Weight | +|-----------|-------|--------| +| Quality | Code correctness, type safety, clean code | Equal | +| Security | Vulnerability patterns, secret exposure | Equal | +| Architecture | Module structure, coupling, file size | Equal | +| Requirements | Acceptance criteria coverage, completeness | Equal | + +--- + +### Dimension 1: Quality + +Scan each modified file for quality anti-patterns. + +| Severity | Pattern | What to Detect | +|----------|---------|----------------| +| Critical | Empty catch blocks | `catch(e) {}` with no handling | +| High | @ts-ignore without justification | Suppression comment < 10 chars explanation | +| High | `any` type in public APIs | `any` outside comments and generic definitions | +| High | console.log in production | `console.(log\|debug\|info)` outside test files | +| Medium | Magic numbers | Numeric literals > 1 digit, not in const/comment | +| Medium | Duplicate code | Identical lines (>30 chars) appearing 3+ times | + +**Detection example** (Grep for console statements): + +```bash +Grep(pattern="console\\.(log|debug|info)", path="", output_mode="content", "-n"=true) +``` + +--- + +### Dimension 2: Security + +Scan for vulnerability patterns across all modified files. + +| Severity | Pattern | What to Detect | +|----------|---------|----------------| +| Critical | Hardcoded secrets | `api_key=`, `password=`, `secret=`, `token=` with string values (20+ chars) | +| Critical | SQL injection | String concatenation in `query()`/`execute()` calls | +| High | eval/exec usage | `eval()`, `new Function()`, `setTimeout(string)` | +| High | XSS vectors | `innerHTML`, `dangerouslySetInnerHTML` | +| Medium | Insecure random | `Math.random()` in security context (token/key/password/session) | +| Low | Missing input validation | Functions with parameters but no validation in first 5 lines | + +--- + +### Dimension 3: Architecture + +Assess structural health of modified files. + +| Severity | Pattern | What to Detect | +|----------|---------|----------------| +| Critical | Circular dependencies | File A imports B, B imports A | +| High | Excessive parent imports | Import traverses >2 parent directories (`../../../`) | +| Medium | Large files | Files exceeding 500 lines | +| Medium | Tight coupling | >5 imports from same base module | +| Medium | Long functions | Functions exceeding 50 lines | +| Medium | Module boundary changes | Modifications to index.ts/index.js files | + +**Detection example** (check for deep parent imports): + +```bash +Grep(pattern="from\\s+['\"](\\.\\./){3,}", path="", output_mode="content", "-n"=true) +``` + +--- + +### Dimension 4: Requirements + +Verify implementation against plan acceptance criteria. + +| Severity | Check | Method | +|----------|-------|--------| +| High | Unmet acceptance criteria | Extract criteria from plan, check keyword overlap (threshold: 70%) | +| High | Missing error handling | Plan mentions "error handling" but no try/catch in code | +| Medium | Partially met criteria | Keyword overlap 40-69% | +| Medium | Missing tests | Plan mentions "test" but no test files in modified set | + +**Verification flow**: +1. Read plan file โ†’ extract acceptance criteria section +2. For each criterion โ†’ extract keywords (4+ char meaningful words) +3. Search modified files for keyword matches +4. Score: >= 70% match = met, 40-69% = partial, < 40% = unmet + +--- + +### Verdict Routing + +| Verdict | Criteria | Action | +|---------|----------|--------| +| BLOCK | Any critical-severity issues found | Must fix before merge | +| CONDITIONAL | High or medium issues, no critical | Should address, can merge with tracking | +| APPROVE | Only low issues or none | Ready to merge | + +## Phase 4: Validation + +### Report Format + +The review report follows this structure: + +``` +# Code Review Report + +**Verdict**: + +## Blocking Issues (if BLOCK) +- **** (:): + +## Review Dimensions + +### Quality Issues +**CRITICAL** () +- (:) + +### Security Issues +(same format per severity) + +### Architecture Issues +(same format per severity) + +### Requirements Issues +(same format per severity) + +## Recommendations +1. +``` + +### Summary Counts + +| Field | Description | +|-------|-------------| +| Total issues | Sum across all dimensions and severities | +| Critical count | Must be 0 for APPROVE | +| Blocking issues | Listed explicitly in report header | +| Dimensions covered | Must be 4/4 | + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Plan file not found | Skip requirements dimension, note in report | +| Git diff empty | Report no changes to review | +| File read fails | Skip file, note in report | +| No modified files | Report empty review | diff --git a/.claude/skills/team-lifecycle-v3/roles/reviewer/commands/spec-quality.md b/.claude/skills/team-lifecycle-v3/roles/reviewer/commands/spec-quality.md new file mode 100644 index 00000000..5172de88 --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/roles/reviewer/commands/spec-quality.md @@ -0,0 +1,201 @@ +# Command: spec-quality + +## Purpose + +5-dimension spec quality check with weighted scoring, quality gate determination, and readiness report generation. + +## Phase 2: Context Loading + +| Input | Source | Required | +|-------|--------|----------| +| Spec documents | `/spec/` (all .md files) | Yes | +| Original requirements | Product brief objectives section | Yes | +| Quality gate config | specs/quality-gates.md | No | +| Session folder | Task description `Session:` field | Yes | + +**Spec document phases** (matched by filename/directory): + +| Phase | Expected Path | +|-------|--------------| +| product-brief | spec/product-brief.md | +| prd | spec/requirements/*.md | +| architecture | spec/architecture/_index.md + ADR-*.md | +| user-stories | spec/epics/*.md | +| implementation-plan | plan/plan.json | +| test-strategy | spec/test-strategy.md | + +## Phase 3: 5-Dimension Scoring + +### Dimension Weights + +| Dimension | Weight | Focus | +|-----------|--------|-------| +| Completeness | 25% | All required sections present with substance | +| Consistency | 20% | Terminology, format, references, naming | +| Traceability | 25% | Goals -> Reqs -> Components -> Stories chain | +| Depth | 20% | AC testable, ADRs justified, stories estimable | +| Coverage | 10% | Original requirements mapped to spec | + +--- + +### Dimension 1: Completeness (25%) + +Check each spec document for required sections. + +**Required sections by phase**: + +| Phase | Required Sections | +|-------|------------------| +| product-brief | Vision Statement, Problem Statement, Target Audience, Success Metrics, Constraints | +| prd | Goals, Requirements, User Stories, Acceptance Criteria, Non-Functional Requirements | +| architecture | System Overview, Component Design, Data Models, API Specifications, Technology Stack | +| user-stories | Story List, Acceptance Criteria, Priority, Estimation | +| implementation-plan | Task Breakdown, Dependencies, Timeline, Resource Allocation | +| test-strategy | Test Scope, Test Cases, Coverage Goals, Test Environment | + +**Scoring formula**: +- Section present: 50% credit +- Section has substantial content (>100 chars beyond header): additional 50% credit +- Per-document score = (present_ratio * 50) + (substantial_ratio * 50) +- Overall = average across all documents + +--- + +### Dimension 2: Consistency (20%) + +Check cross-document consistency across four areas. + +| Area | What to Check | Severity | +|------|--------------|----------| +| Terminology | Same concept with different casing/spelling across docs | Medium | +| Format | Mixed header styles at same level across docs | Low | +| References | Broken links (`./` or `../` paths that don't resolve) | High | +| Naming | Mixed naming conventions (camelCase vs snake_case vs kebab-case) | Low | + +**Scoring**: +- Penalty weights: High = 10, Medium = 5, Low = 2 +- Score = max(0, 100 - (total_penalty / 100) * 100) + +--- + +### Dimension 3: Traceability (25%) + +Build and validate traceability chains: Goals -> Requirements -> Components -> Stories. + +**Chain building flow**: +1. Extract goals from product-brief (pattern: `- Goal: `) +2. Extract requirements from PRD (pattern: `- REQ-NNN: `) +3. Extract components from architecture (pattern: `- Component: `) +4. Extract stories from user-stories (pattern: `- US-NNN: `) +5. Link by keyword overlap (threshold: 30% keyword match) + +**Chain completeness**: A chain is complete when a goal links to at least one requirement, one component, and one story. + +**Scoring**: (complete chains / total chains) * 100 + +**Weak link identification**: For each incomplete chain, report which link is missing (no requirements, no components, or no stories). + +--- + +### Dimension 4: Depth (20%) + +Assess the analytical depth of spec content across four sub-dimensions. + +| Sub-dimension | Source | Testable Criteria | +|---------------|--------|-------------------| +| AC Testability | PRD / User Stories | Contains measurable verbs (display, return, validate) or Given/When/Then or numbers | +| ADR Justification | Architecture | Contains rationale, alternatives, consequences, or trade-offs | +| Story Estimability | User Stories | Has "As a/I want/So that" + AC, or explicit estimate | +| Technical Detail | Architecture + Plan | Contains code blocks, API terms, HTTP methods, DB terms | + +**Scoring**: Average of sub-dimension scores (each 0-100%) + +--- + +### Dimension 5: Coverage (10%) + +Map original requirements to spec requirements. + +**Flow**: +1. Extract original requirements from product-brief objectives section +2. Extract spec requirements from all documents (pattern: `- REQ-NNN:` or `- Requirement:` or `- Feature:`) +3. For each original requirement, check keyword overlap with any spec requirement (threshold: 40%) +4. Score = (covered_count / total_original) * 100 + +--- + +### Quality Gate Decision Table + +| Gate | Criteria | Message | +|------|----------|---------| +| PASS | Overall score >= 80% AND coverage >= 70% | Ready for implementation | +| FAIL | Overall score < 60% OR coverage < 50% | Major revisions required | +| REVIEW | All other cases | Improvements needed, may proceed with caution | + +## Phase 4: Validation + +### Readiness Report Format + +Write to `/spec/readiness-report.md`: + +``` +# Specification Readiness Report + +**Generated**: +**Overall Score**: % +**Quality Gate**: - +**Recommended Action**: + +## Dimension Scores + +| Dimension | Score | Weight | Weighted Score | +|-----------|-------|--------|----------------| +| Completeness | % | 25% | % | +| Consistency | % | 20% | % | +| Traceability | % | 25% | % | +| Depth | % | 20% | % | +| Coverage | % | 10% | % | + +## Completeness Analysis +(per-phase breakdown: sections present/expected, missing sections) + +## Consistency Analysis +(issues by area: terminology, format, references, naming) + +## Traceability Analysis +(complete chains / total, weak links) + +## Depth Analysis +(per sub-dimension scores) + +## Requirement Coverage +(covered / total, uncovered requirements list) +``` + +### Spec Summary Format + +Write to `/spec/spec-summary.md`: + +``` +# Specification Summary + +**Overall Quality Score**: % +**Quality Gate**: + +## Documents Reviewed +(per document: phase, path, size, section list) + +## Key Findings +### Strengths (dimensions scoring >= 80%) +### Areas for Improvement (dimensions scoring < 70%) +### Recommendations +``` + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Spec folder empty | FAIL gate, report no documents found | +| Missing phase document | Score 0 for that phase in completeness, note in report | +| No original requirements found | Score coverage at 100% (nothing to cover) | +| Broken references | Flag in consistency, do not fail entire review | diff --git a/.claude/skills/team-lifecycle-v3/roles/reviewer/role.md b/.claude/skills/team-lifecycle-v3/roles/reviewer/role.md new file mode 100644 index 00000000..3f7456f9 --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/roles/reviewer/role.md @@ -0,0 +1,104 @@ +# Role: reviewer + +Dual-mode review: code review (REVIEW-*) and spec quality validation (QUALITY-*). Auto-switches by task prefix. + +## Identity + +- **Name**: `reviewer` | **Prefix**: `REVIEW-*` + `QUALITY-*` | **Tag**: `[reviewer]` +- **Responsibility**: Branch by Prefix โ†’ Review/Score โ†’ Report + +## Boundaries + +### MUST +- Process REVIEW-* and QUALITY-* tasks +- Generate readiness-report.md for QUALITY tasks +- Cover all required dimensions per mode + +### MUST NOT +- Create tasks +- Modify source code +- Skip quality dimensions +- Approve without verification + +## Message Types + +| Type | Direction | Trigger | +|------|-----------|---------| +| review_result | โ†’ coordinator | Code review complete | +| quality_result | โ†’ coordinator | Spec quality complete | +| fix_required | โ†’ coordinator | Critical issues found | + +## Toolbox + +| Tool | Purpose | +|------|---------| +| commands/code-review.md | 4-dimension code review | +| commands/spec-quality.md | 5-dimension spec quality | + +--- + +## Mode Detection + +| Task Prefix | Mode | Dimensions | +|-------------|------|-----------| +| REVIEW-* | Code Review | quality, security, architecture, requirements | +| QUALITY-* | Spec Quality | completeness, consistency, traceability, depth, coverage | + +--- + +## Code Review (REVIEW-*) + +**Inputs**: Plan file, git diff, modified files, test results (if available) + +**4 dimensions** (delegate to commands/code-review.md): + +| Dimension | Critical Issues | +|-----------|----------------| +| Quality | Empty catch, any in public APIs, @ts-ignore, console.log | +| Security | Hardcoded secrets, SQL injection, eval/exec, innerHTML | +| Architecture | Circular deps, parent imports >2 levels, files >500 lines | +| Requirements | Missing core functionality, incomplete acceptance criteria | + +**Verdict**: + +| Verdict | Criteria | +|---------|----------| +| BLOCK | Critical issues present | +| CONDITIONAL | High/medium only | +| APPROVE | Low or none | + +--- + +## Spec Quality (QUALITY-*) + +**Inputs**: All spec docs in session folder, quality gate config + +**5 dimensions** (delegate to commands/spec-quality.md): + +| Dimension | Weight | Focus | +|-----------|--------|-------| +| Completeness | 25% | All sections present with substance | +| Consistency | 20% | Terminology, format, references | +| Traceability | 25% | Goals โ†’ Reqs โ†’ Arch โ†’ Stories chain | +| Depth | 20% | AC testable, ADRs justified, stories estimable | +| Coverage | 10% | Original requirements mapped | + +**Quality gate**: + +| Gate | Criteria | +|------|----------| +| PASS | Score โ‰ฅ 80% AND coverage โ‰ฅ 70% | +| REVIEW | Score 60-79% OR coverage 50-69% | +| FAIL | Score < 60% OR coverage < 50% | + +**Artifacts**: readiness-report.md + spec-summary.md + +--- + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Missing context | Request from coordinator | +| Invalid mode | Abort with error | +| Analysis failure | Retry, then fallback template | diff --git a/.claude/skills/team-lifecycle-v3/roles/tester/commands/validate.md b/.claude/skills/team-lifecycle-v3/roles/tester/commands/validate.md new file mode 100644 index 00000000..8016d78a --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/roles/tester/commands/validate.md @@ -0,0 +1,152 @@ +# Command: validate + +## Purpose + +Test-fix cycle with strategy engine: detect framework, run tests, classify failures, select fix strategy, iterate until pass rate target is met or max iterations exhausted. + +## Constants + +| Constant | Value | Description | +|----------|-------|-------------| +| MAX_ITERATIONS | 10 | Maximum test-fix cycle attempts | +| PASS_RATE_TARGET | 95% | Minimum pass rate to succeed | +| AFFECTED_TESTS_FIRST | true | Run affected tests before full suite | + +## Phase 2: Context Loading + +Load from task description and executor output: + +| Input | Source | Required | +|-------|--------|----------| +| Framework | Auto-detected (see below) | Yes | +| Modified files | Executor task output / git diff | Yes | +| Affected tests | Derived from modified files | No | +| Session folder | Task description `Session:` field | Yes | +| Wisdom | `/wisdom/` | No | + +**Framework detection** (priority order): + +| Priority | Method | Check | +|----------|--------|-------| +| 1 | package.json devDependencies | vitest, jest, mocha, pytest | +| 2 | package.json scripts.test | Command contains framework name | +| 3 | Config file existence | vitest.config.*, jest.config.*, pytest.ini | + +**Affected test discovery** from modified files: +- For each modified file `.`, search: + `.test.ts`, `.spec.ts`, `tests/.test.ts`, `__tests__/.test.ts` + +## Phase 3: Test-Fix Cycle + +### Test Command Table + +| Framework | Affected Tests | Full Suite | +|-----------|---------------|------------| +| vitest | `vitest run --reporter=verbose` | `vitest run --reporter=verbose` | +| jest | `jest --no-coverage --verbose` | `jest --no-coverage --verbose` | +| mocha | `mocha --reporter spec` | `mocha --reporter spec` | +| pytest | `pytest -v --tb=short` | `pytest -v --tb=short` | + +### Iteration Flow + +``` +Iteration 1 + โ”œโ”€ Run affected tests (or full suite if none) + โ”œโ”€ Parse results โ†’ pass rate + โ”œโ”€ Pass rate >= 95%? + โ”‚ โ”œโ”€ YES + affected-only โ†’ run full suite to confirm + โ”‚ โ”‚ โ”œโ”€ Full suite passes โ†’ SUCCESS + โ”‚ โ”‚ โ””โ”€ Full suite fails โ†’ continue with full results + โ”‚ โ””โ”€ YES + full suite โ†’ SUCCESS + โ””โ”€ NO โ†’ classify failures โ†’ select strategy โ†’ apply fixes + +Iteration 2..10 + โ”œโ”€ Re-run tests + โ”œโ”€ Track best pass rate across iterations + โ”œโ”€ Pass rate >= 95% โ†’ SUCCESS + โ”œโ”€ No failures to fix โ†’ STOP (anomaly) + โ””โ”€ Failures remain โ†’ classify โ†’ select strategy โ†’ apply fixes + +After iteration 10 + โ””โ”€ FAIL: max iterations reached, report best pass rate +``` + +**Progress update**: When iteration > 5, send progress to coordinator with current pass rate and iteration count. + +### Strategy Selection Matrix + +| Condition | Strategy | Behavior | +|-----------|----------|----------| +| Iteration <= 3 OR pass rate >= 80% | Conservative | Fix one failure at a time, highest severity first | +| Critical failures exist AND count < 5 | Surgical | Identify common error pattern, fix all matching occurrences | +| Pass rate < 50% OR iteration > 7 | Aggressive | Fix all critical + high failures in batch | +| Default (no other match) | Conservative | Safe fallback | + +### Failure Classification Table + +| Severity | Error Patterns | +|----------|---------------| +| Critical | SyntaxError, cannot find module, is not defined | +| High | Assertion mismatch (expected/received), toBe/toEqual failures | +| Medium | Timeout, async errors | +| Low | Warnings, deprecation notices | + +### Fix Approach by Error Type + +| Error Type | Pattern | Fix Approach | +|------------|---------|-------------| +| missing_import | "Cannot find module ''" | Add import statement, resolve relative path from modified files | +| undefined_variable | " is not defined" | Check source for renamed/moved exports, update reference | +| assertion_mismatch | "Expected: X, Received: Y" | Read test file at failure line, update expected value if behavior change is intentional | +| timeout | "Timeout" | Increase timeout or add async/await | +| syntax_error | "SyntaxError" | Read source at error line, fix syntax | + +### Tool Call Example + +Run tests with framework-appropriate command: + +```bash +Bash(command="vitest run src/utils/__tests__/parser.test.ts --reporter=verbose", timeout=120000) +``` + +Read test file to analyze failure: + +```bash +Read(file_path="") +``` + +Apply fix via Edit: + +```bash +Edit(file_path="", old_string="", new_string="") +``` + +## Phase 4: Validation + +### Success Criteria + +| Check | Criteria | Required | +|-------|----------|----------| +| Pass rate | >= 95% | Yes | +| Full suite run | At least one full suite pass | Yes | +| No critical failures | Zero critical-severity failures remaining | Yes | +| Best pass rate tracked | Reported in final result | Yes | + +### Result Routing + +| Outcome | Message Type | Content | +|---------|-------------|---------| +| Pass rate >= target | test_result | Success, iterations count, full suite confirmed | +| Max iterations, pass rate < target | fix_required | Best pass rate, remaining failures, iteration count | +| No tests found | error | Framework detected but no test files | +| Framework not detected | error | Detection methods exhausted | + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Framework not detected | Report error to coordinator, list detection attempts | +| No test files found | Report to coordinator, suggest manual test path | +| Test command fails (exit code != 0/1) | Check stderr for environment issues, retry once | +| Fix application fails | Skip fix, try next iteration with different strategy | +| Infinite loop (same failures repeat) | Abort after 3 identical result sets | diff --git a/.claude/skills/team-lifecycle-v3/roles/tester/role.md b/.claude/skills/team-lifecycle-v3/roles/tester/role.md new file mode 100644 index 00000000..afdfd887 --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/roles/tester/role.md @@ -0,0 +1,108 @@ +# Role: tester + +Adaptive test execution with fix cycles and quality gates. + +## Identity + +- **Name**: `tester` | **Prefix**: `TEST-*` | **Tag**: `[tester]` +- **Responsibility**: Detect Framework โ†’ Run Tests โ†’ Fix Cycle โ†’ Report + +## Boundaries + +### MUST +- Only process TEST-* tasks +- Detect test framework before running +- Run affected tests before full suite +- Use strategy engine for fix cycles + +### MUST NOT +- Create tasks +- Contact other workers directly +- Modify production code beyond test fixes +- Skip framework detection + +## Message Types + +| Type | Direction | Trigger | +|------|-----------|---------| +| test_result | โ†’ coordinator | Tests pass or final result | +| fix_required | โ†’ coordinator | Failures after max iterations | +| error | โ†’ coordinator | Framework not detected | + +## Toolbox + +| Tool | Purpose | +|------|---------| +| commands/validate.md | Test-fix cycle with strategy engine | + +--- + +## Phase 2: Framework Detection & Test Discovery + +**Framework detection** (priority order): + +| Priority | Method | Frameworks | +|----------|--------|-----------| +| 1 | package.json devDependencies | vitest, jest, mocha, pytest | +| 2 | package.json scripts.test | vitest, jest, mocha, pytest | +| 3 | Config files | vitest.config.*, jest.config.*, pytest.ini | + +**Affected test discovery** from executor's modified files: +- Search variants: `.test.ts`, `.spec.ts`, `tests/.test.ts`, `__tests__/.test.ts` + +--- + +## Phase 3: Test Execution & Fix Cycle + +**Config**: MAX_ITERATIONS=10, PASS_RATE_TARGET=95%, AFFECTED_TESTS_FIRST=true + +Delegate to `commands/validate.md`: +1. Run affected tests โ†’ parse results +2. Pass rate met โ†’ run full suite +3. Failures โ†’ select strategy โ†’ fix โ†’ re-run โ†’ repeat + +**Strategy selection**: + +| Condition | Strategy | Behavior | +|-----------|----------|----------| +| Iteration โ‰ค 3 or pass โ‰ฅ 80% | Conservative | Fix one critical failure at a time | +| Critical failures < 5 | Surgical | Fix specific pattern everywhere | +| Pass < 50% or iteration > 7 | Aggressive | Fix all failures in batch | + +**Test commands**: + +| Framework | Affected | Full Suite | +|-----------|---------|------------| +| vitest | `vitest run ` | `vitest run` | +| jest | `jest --no-coverage` | `jest --no-coverage` | +| pytest | `pytest -v` | `pytest -v` | + +--- + +## Phase 4: Result Analysis + +**Failure classification**: + +| Severity | Patterns | +|----------|----------| +| Critical | SyntaxError, cannot find module, undefined | +| High | Assertion failures, toBe/toEqual | +| Medium | Timeout, async errors | +| Low | Warnings, deprecations | + +**Report routing**: + +| Condition | Type | +|-----------|------| +| Pass rate โ‰ฅ target | test_result (success) | +| Pass rate < target after max iterations | fix_required | + +--- + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Framework not detected | Prompt user | +| No tests found | Report to coordinator | +| Infinite fix loop | Abort after MAX_ITERATIONS | diff --git a/.claude/skills/team-lifecycle-v3/roles/writer/role.md b/.claude/skills/team-lifecycle-v3/roles/writer/role.md new file mode 100644 index 00000000..ef5b157c --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/roles/writer/role.md @@ -0,0 +1,96 @@ +# Role: writer + +Product Brief, Requirements/PRD, Architecture, and Epics & Stories document generation. Maps to spec-generator Phases 2-5. + +## Identity + +- **Name**: `writer` | **Prefix**: `DRAFT-*` | **Tag**: `[writer]` +- **Responsibility**: Load Context โ†’ Generate Document โ†’ Incorporate Feedback โ†’ Report + +## Boundaries + +### MUST +- Only process DRAFT-* tasks +- Read templates before generating (from `../../templates/`) +- Follow document-standards.md (from `../../specs/`) +- Integrate discussion feedback when available +- Generate proper YAML frontmatter + +### MUST NOT +- Create tasks for other roles +- Skip template loading +- Modify discussion records + +## Message Types + +| Type | Direction | Trigger | +|------|-----------|---------| +| draft_ready | โ†’ coordinator | Document writing complete | +| draft_revision | โ†’ coordinator | Document revised per feedback | +| error | โ†’ coordinator | Template missing, insufficient context | + +## Toolbox + +| Tool | Purpose | +|------|---------| +| commands/generate-doc.md | Multi-CLI document generation | +| gemini, codex, claude CLI | Multi-perspective content generation | + +--- + +## Phase 2: Context & Discussion Loading + +**Objective**: Load all required inputs for document generation. + +**Document type routing**: + +| Task Subject Contains | Doc Type | Template | Discussion Input | +|----------------------|----------|----------|-----------------| +| Product Brief | product-brief | templates/product-brief.md | discuss-001-scope.md | +| Requirements / PRD | requirements | templates/requirements-prd.md | discuss-002-brief.md | +| Architecture | architecture | templates/architecture-doc.md | discuss-003-requirements.md | +| Epics | epics | templates/epics-template.md | discuss-004-architecture.md | + +**Progressive dependency loading**: + +| Doc Type | Requires | +|----------|----------| +| product-brief | discovery-context.json | +| requirements | + product-brief.md | +| architecture | + requirements/_index.md | +| epics | + architecture/_index.md | + +**Success**: Template loaded, discussion feedback loaded (if exists), prior docs loaded. + +--- + +## Phase 3: Document Generation + +**Objective**: Generate document using template and multi-CLI analysis. + +Delegate to `commands/generate-doc.md` with: doc type, session folder, spec config, discussion feedback, prior docs. + +--- + +## Phase 4: Self-Validation + +**Objective**: Verify document meets standards. + +| Check | What to Verify | +|-------|---------------| +| has_frontmatter | Starts with YAML frontmatter | +| sections_complete | All template sections present | +| cross_references | session_id included | +| discussion_integrated | Reflects feedback (if exists) | + +**Report**: doc type, validation status, summary, output path. + +--- + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Prior doc not found | Notify coordinator, request prerequisite | +| CLI failure | Retry with fallback tool | +| Discussion contradicts prior docs | Note conflict, flag for next discussion | diff --git a/.claude/skills/team-lifecycle-v3/specs/document-standards.md b/.claude/skills/team-lifecycle-v3/specs/document-standards.md new file mode 100644 index 00000000..2820cd98 --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/specs/document-standards.md @@ -0,0 +1,192 @@ +# Document Standards + +Defines format conventions, YAML frontmatter schema, naming rules, and content structure for all spec-generator outputs. + +## When to Use + +| Phase | Usage | Section | +|-------|-------|---------| +| All Phases | Frontmatter format | YAML Frontmatter Schema | +| All Phases | File naming | Naming Conventions | +| Phase 2-5 | Document structure | Content Structure | +| Phase 6 | Validation reference | All sections | + +--- + +## YAML Frontmatter Schema + +Every generated document MUST begin with YAML frontmatter: + +```yaml +--- +session_id: SPEC-{slug}-{YYYY-MM-DD} +phase: {1-6} +document_type: {product-brief|requirements|architecture|epics|readiness-report|spec-summary} +status: draft|review|complete +generated_at: {ISO8601 timestamp} +stepsCompleted: [] +version: 1 +dependencies: + - {list of input documents used} +--- +``` + +### Field Definitions + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `session_id` | string | Yes | Session identifier matching spec-config.json | +| `phase` | number | Yes | Phase number that generated this document (1-6) | +| `document_type` | string | Yes | One of: product-brief, requirements, architecture, epics, readiness-report, spec-summary | +| `status` | enum | Yes | draft (initial), review (user reviewed), complete (finalized) | +| `generated_at` | string | Yes | ISO8601 timestamp of generation | +| `stepsCompleted` | array | Yes | List of step IDs completed during generation | +| `version` | number | Yes | Document version, incremented on re-generation | +| `dependencies` | array | No | List of input files this document depends on | + +### Status Transitions + +``` +draft -> review -> complete + | ^ + +-------------------+ (direct promotion in auto mode) +``` + +- **draft**: Initial generation, not yet user-reviewed +- **review**: User has reviewed and provided feedback +- **complete**: Finalized, ready for downstream consumption + +In auto mode (`-y`), documents are promoted directly from `draft` to `complete`. + +--- + +## Naming Conventions + +### Session ID Format + +``` +SPEC-{slug}-{YYYY-MM-DD} +``` + +- **slug**: Lowercase, alphanumeric + Chinese characters, hyphens as separators, max 40 chars +- **date**: UTC+8 date in YYYY-MM-DD format + +Examples: +- `SPEC-task-management-system-2026-02-11` +- `SPEC-user-auth-oauth-2026-02-11` + +### Output Files + +| File | Phase | Description | +|------|-------|-------------| +| `spec-config.json` | 1 | Session configuration and state | +| `discovery-context.json` | 1 | Codebase exploration results (optional) | +| `product-brief.md` | 2 | Product brief document | +| `requirements.md` | 3 | PRD document | +| `architecture.md` | 4 | Architecture decisions document | +| `epics.md` | 5 | Epic/Story breakdown document | +| `readiness-report.md` | 6 | Quality validation report | +| `spec-summary.md` | 6 | One-page executive summary | + +### Output Directory + +``` +.workflow/.spec/{session-id}/ +``` + +--- + +## Content Structure + +### Heading Hierarchy + +- `#` (H1): Document title only (one per document) +- `##` (H2): Major sections +- `###` (H3): Subsections +- `####` (H4): Detail items (use sparingly) + +Maximum depth: 4 levels. Prefer flat structures. + +### Section Ordering + +Every document follows this general pattern: + +1. **YAML Frontmatter** (mandatory) +2. **Title** (H1) +3. **Executive Summary** (2-3 sentences) +4. **Core Content Sections** (H2, document-specific) +5. **Open Questions / Risks** (if applicable) +6. **References / Traceability** (links to upstream/downstream docs) + +### Formatting Rules + +| Element | Format | Example | +|---------|--------|---------| +| Requirements | `REQ-{NNN}` prefix | REQ-001: User login | +| Acceptance criteria | Checkbox list | `- [ ] User can log in with email` | +| Architecture decisions | `ADR-{NNN}` prefix | ADR-001: Use PostgreSQL | +| Epics | `EPIC-{NNN}` prefix | EPIC-001: Authentication | +| Stories | `STORY-{EPIC}-{NNN}` prefix | STORY-001-001: Login form | +| Priority tags | MoSCoW labels | `[Must]`, `[Should]`, `[Could]`, `[Won't]` | +| Mermaid diagrams | Fenced code blocks | ````mermaid ... ``` `` | +| Code examples | Language-tagged blocks | ````typescript ... ``` `` | + +### Cross-Reference Format + +Use relative references between documents: + +```markdown +See [Product Brief](product-brief.md#section-name) for details. +Derived from [REQ-001](requirements.md#req-001). +``` + +### Language + +- Document body: Follow user's input language (Chinese or English) +- Technical identifiers: Always English (REQ-001, ADR-001, EPIC-001) +- YAML frontmatter keys: Always English + +--- + +## spec-config.json Schema + +```json +{ + "session_id": "string (required)", + "seed_input": "string (required) - original user input", + "input_type": "text|file (required)", + "timestamp": "ISO8601 (required)", + "mode": "interactive|auto (required)", + "complexity": "simple|moderate|complex (required)", + "depth": "light|standard|comprehensive (required)", + "focus_areas": ["string array"], + "seed_analysis": { + "problem_statement": "string", + "target_users": ["string array"], + "domain": "string", + "constraints": ["string array"], + "dimensions": ["string array - 3-5 exploration dimensions"] + }, + "has_codebase": "boolean", + "phasesCompleted": [ + { + "phase": "number (1-6)", + "name": "string (phase name)", + "output_file": "string (primary output file)", + "completed_at": "ISO8601" + } + ] +} +``` + +--- + +## Validation Checklist + +- [ ] Every document starts with valid YAML frontmatter +- [ ] `session_id` matches across all documents in a session +- [ ] `status` field reflects current document state +- [ ] All cross-references resolve to valid targets +- [ ] Heading hierarchy is correct (no skipped levels) +- [ ] Technical identifiers use correct prefixes +- [ ] Output files are in the correct directory diff --git a/.claude/skills/team-lifecycle-v3/specs/quality-gates.md b/.claude/skills/team-lifecycle-v3/specs/quality-gates.md new file mode 100644 index 00000000..ae968436 --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/specs/quality-gates.md @@ -0,0 +1,207 @@ +# Quality Gates + +Per-phase quality gate criteria and scoring dimensions for spec-generator outputs. + +## When to Use + +| Phase | Usage | Section | +|-------|-------|---------| +| Phase 2-5 | Post-generation self-check | Per-Phase Gates | +| Phase 6 | Cross-document validation | Cross-Document Validation | +| Phase 6 | Final scoring | Scoring Dimensions | + +--- + +## Quality Thresholds + +| Gate | Score | Action | +|------|-------|--------| +| **Pass** | >= 80% | Continue to next phase | +| **Review** | 60-79% | Log warnings, continue with caveats | +| **Fail** | < 60% | Must address issues before continuing | + +In auto mode (`-y`), Review-level issues are logged but do not block progress. + +--- + +## Scoring Dimensions + +### 1. Completeness (25%) + +All required sections present with substantive content. + +| Score | Criteria | +|-------|----------| +| 100% | All template sections filled with detailed content | +| 75% | All sections present, some lack detail | +| 50% | Major sections present but minor sections missing | +| 25% | Multiple major sections missing or empty | +| 0% | Document is a skeleton only | + +### 2. Consistency (25%) + +Terminology, formatting, and references are uniform across documents. + +| Score | Criteria | +|-------|----------| +| 100% | All terms consistent, all references valid, formatting uniform | +| 75% | Minor terminology variations, all references valid | +| 50% | Some inconsistent terms, 1-2 broken references | +| 25% | Frequent inconsistencies, multiple broken references | +| 0% | Documents contradict each other | + +### 3. Traceability (25%) + +Requirements, architecture decisions, and stories trace back to goals. + +| Score | Criteria | +|-------|----------| +| 100% | Every story traces to a requirement, every requirement traces to a goal | +| 75% | Most items traceable, few orphans | +| 50% | Partial traceability, some disconnected items | +| 25% | Weak traceability, many orphan items | +| 0% | No traceability between documents | + +### 4. Depth (25%) + +Content provides sufficient detail for execution teams. + +| Score | Criteria | +|-------|----------| +| 100% | Acceptance criteria specific and testable, architecture decisions justified, stories estimable | +| 75% | Most items detailed enough, few vague areas | +| 50% | Mix of detailed and vague content | +| 25% | Mostly high-level, lacking actionable detail | +| 0% | Too abstract for execution | + +--- + +## Per-Phase Quality Gates + +### Phase 1: Discovery + +| Check | Criteria | Severity | +|-------|----------|----------| +| Session ID valid | Matches `SPEC-{slug}-{date}` format | Error | +| Problem statement exists | Non-empty, >= 20 characters | Error | +| Target users identified | >= 1 user group | Error | +| Dimensions generated | 3-5 exploration dimensions | Warning | +| Constraints listed | >= 0 (can be empty with justification) | Info | + +### Phase 2: Product Brief + +| Check | Criteria | Severity | +|-------|----------|----------| +| Vision statement | Clear, 1-3 sentences | Error | +| Problem statement | Specific and measurable | Error | +| Target users | >= 1 persona with needs described | Error | +| Goals defined | >= 2 measurable goals | Error | +| Success metrics | >= 2 quantifiable metrics | Warning | +| Scope boundaries | In-scope and out-of-scope listed | Warning | +| Multi-perspective | >= 2 CLI perspectives synthesized | Info | + +### Phase 3: Requirements (PRD) + +| Check | Criteria | Severity | +|-------|----------|----------| +| Functional requirements | >= 3 with REQ-NNN IDs | Error | +| Acceptance criteria | Every requirement has >= 1 criterion | Error | +| MoSCoW priority | Every requirement tagged | Error | +| Non-functional requirements | >= 1 (performance, security, etc.) | Warning | +| User stories | >= 1 per Must-have requirement | Warning | +| Traceability | Requirements trace to product brief goals | Warning | + +### Phase 4: Architecture + +| Check | Criteria | Severity | +|-------|----------|----------| +| Component diagram | Present (Mermaid or ASCII) | Error | +| Tech stack specified | Languages, frameworks, key libraries | Error | +| ADR present | >= 1 Architecture Decision Record | Error | +| ADR has alternatives | Each ADR lists >= 2 options considered | Warning | +| Integration points | External systems/APIs identified | Warning | +| Data model | Key entities and relationships described | Warning | +| Codebase mapping | Mapped to existing code (if has_codebase) | Info | + +### Phase 5: Epics & Stories + +| Check | Criteria | Severity | +|-------|----------|----------| +| Epics defined | 3-7 epics with EPIC-NNN IDs | Error | +| MVP subset | >= 1 epic tagged as MVP | Error | +| Stories per epic | 2-5 stories per epic | Error | +| Story format | "As a...I want...So that..." pattern | Warning | +| Dependency map | Cross-epic dependencies documented | Warning | +| Estimation hints | Relative sizing (S/M/L/XL) per story | Info | +| Traceability | Stories trace to requirements | Warning | + +### Phase 6: Readiness Check + +| Check | Criteria | Severity | +|-------|----------|----------| +| All documents exist | product-brief, requirements, architecture, epics | Error | +| Frontmatter valid | All YAML frontmatter parseable and correct | Error | +| Cross-references valid | All document links resolve | Error | +| Overall score >= 60% | Weighted average across 4 dimensions | Error | +| No unresolved Errors | All Error-severity issues addressed | Error | +| Summary generated | spec-summary.md created | Warning | + +--- + +## Cross-Document Validation + +Checks performed during Phase 6 across all documents: + +### Completeness Matrix + +``` +Product Brief goals -> Requirements (each goal has >= 1 requirement) +Requirements -> Architecture (each Must requirement has design coverage) +Requirements -> Epics (each Must requirement appears in >= 1 story) +Architecture ADRs -> Epics (tech choices reflected in implementation stories) +``` + +### Consistency Checks + +| Check | Documents | Rule | +|-------|-----------|------| +| Terminology | All | Same term used consistently (no synonyms for same concept) | +| User personas | Brief + PRD + Epics | Same user names/roles throughout | +| Scope | Brief + PRD | PRD scope does not exceed brief scope | +| Tech stack | Architecture + Epics | Stories reference correct technologies | + +### Traceability Matrix Format + +```markdown +| Goal | Requirements | Architecture | Epics | +|------|-------------|--------------|-------| +| G-001: ... | REQ-001, REQ-002 | ADR-001 | EPIC-001 | +| G-002: ... | REQ-003 | ADR-002 | EPIC-002, EPIC-003 | +``` + +--- + +## Issue Classification + +### Error (Must Fix) + +- Missing required document or section +- Broken cross-references +- Contradictory information between documents +- Empty acceptance criteria on Must-have requirements +- No MVP subset defined in epics + +### Warning (Should Fix) + +- Vague acceptance criteria +- Missing non-functional requirements +- No success metrics defined +- Incomplete traceability +- Missing architecture review notes + +### Info (Nice to Have) + +- Could add more detailed personas +- Consider additional ADR alternatives +- Story estimation hints missing +- Mermaid diagrams could be more detailed diff --git a/.claude/skills/team-lifecycle-v3/specs/team-config.json b/.claude/skills/team-lifecycle-v3/specs/team-config.json new file mode 100644 index 00000000..306db7c0 --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/specs/team-config.json @@ -0,0 +1,156 @@ +{ + "team_name": "team-lifecycle", + "team_display_name": "Team Lifecycle", + "description": "Unified team skill covering spec-to-dev-to-test full lifecycle", + "version": "2.0.0", + "architecture": "folder-based", + "role_structure": "roles/{name}/role.md + roles/{name}/commands/*.md", + + "roles": { + "coordinator": { + "task_prefix": null, + "responsibility": "Pipeline orchestration, requirement clarification, task chain creation, message dispatch", + "message_types": ["plan_approved", "plan_revision", "task_unblocked", "fix_required", "error", "shutdown"] + }, + "analyst": { + "task_prefix": "RESEARCH", + "responsibility": "Seed analysis, codebase exploration, multi-dimensional context gathering", + "message_types": ["research_ready", "research_progress", "error"] + }, + "writer": { + "task_prefix": "DRAFT", + "responsibility": "Product Brief / PRD / Architecture / Epics document generation", + "message_types": ["draft_ready", "draft_revision", "impl_progress", "error"] + }, + "discussant": { + "task_prefix": "DISCUSS", + "responsibility": "Multi-perspective critique, consensus building, conflict escalation", + "message_types": ["discussion_ready", "discussion_blocked", "impl_progress", "error"] + }, + "planner": { + "task_prefix": "PLAN", + "responsibility": "Multi-angle code exploration, structured implementation planning", + "message_types": ["plan_ready", "plan_revision", "impl_progress", "error"] + }, + "executor": { + "task_prefix": "IMPL", + "responsibility": "Code implementation following approved plans", + "message_types": ["impl_complete", "impl_progress", "error"] + }, + "tester": { + "task_prefix": "TEST", + "responsibility": "Adaptive test-fix cycles, progressive testing, quality gates", + "message_types": ["test_result", "impl_progress", "fix_required", "error"] + }, + "reviewer": { + "task_prefix": "REVIEW", + "additional_prefixes": ["QUALITY"], + "responsibility": "Code review (REVIEW-*) + Spec quality validation (QUALITY-*)", + "message_types": ["review_result", "quality_result", "fix_required", "error"] + }, + "explorer": { + "task_prefix": "EXPLORE", + "responsibility": "Code search, pattern discovery, dependency tracing. Service role โ€” on-demand by coordinator", + "role_type": "service", + "message_types": ["explore_ready", "explore_progress", "task_failed"] + }, + "architect": { + "task_prefix": "ARCH", + "responsibility": "Architecture assessment, tech feasibility, design pattern review. Consulting role โ€” on-demand by coordinator", + "role_type": "consulting", + "consultation_modes": ["spec-review", "plan-review", "code-review", "consult", "feasibility"], + "message_types": ["arch_ready", "arch_concern", "arch_progress", "error"] + }, + "fe-developer": { + "task_prefix": "DEV-FE", + "responsibility": "Frontend component/page implementation, design token consumption, responsive UI", + "role_type": "frontend-pipeline", + "message_types": ["dev_fe_complete", "dev_fe_progress", "error"] + }, + "fe-qa": { + "task_prefix": "QA-FE", + "responsibility": "5-dimension frontend review (quality, a11y, design compliance, UX, pre-delivery), GC loop", + "role_type": "frontend-pipeline", + "message_types": ["qa_fe_passed", "qa_fe_result", "fix_required", "error"] + } + }, + + "pipelines": { + "spec-only": { + "description": "Specification pipeline: research โ†’ discuss โ†’ draft โ†’ quality", + "task_chain": [ + "RESEARCH-001", + "DISCUSS-001", "DRAFT-001", "DISCUSS-002", + "DRAFT-002", "DISCUSS-003", "DRAFT-003", "DISCUSS-004", + "DRAFT-004", "DISCUSS-005", "QUALITY-001", "DISCUSS-006" + ] + }, + "impl-only": { + "description": "Implementation pipeline: plan โ†’ implement โ†’ test + review", + "task_chain": ["PLAN-001", "IMPL-001", "TEST-001", "REVIEW-001"] + }, + "full-lifecycle": { + "description": "Full lifecycle: spec pipeline โ†’ implementation pipeline", + "task_chain": "spec-only + impl-only (PLAN-001 blockedBy DISCUSS-006)" + }, + "fe-only": { + "description": "Frontend-only pipeline: plan โ†’ frontend dev โ†’ frontend QA", + "task_chain": ["PLAN-001", "DEV-FE-001", "QA-FE-001"], + "gc_loop": { "max_rounds": 2, "convergence": "score >= 8 && critical === 0" } + }, + "fullstack": { + "description": "Fullstack pipeline: plan โ†’ backend + frontend parallel โ†’ test + QA", + "task_chain": ["PLAN-001", "IMPL-001||DEV-FE-001", "TEST-001||QA-FE-001", "REVIEW-001"], + "sync_points": ["REVIEW-001"] + }, + "full-lifecycle-fe": { + "description": "Full lifecycle with frontend: spec โ†’ plan โ†’ backend + frontend โ†’ test + QA", + "task_chain": "spec-only + fullstack (PLAN-001 blockedBy DISCUSS-006)" + } + }, + + "frontend_detection": { + "keywords": ["component", "page", "UI", "ๅ‰็ซฏ", "frontend", "CSS", "HTML", "React", "Vue", "Tailwind", "็ป„ไปถ", "้กต้ข", "ๆ ทๅผ", "layout", "responsive", "Svelte", "Next.js", "Nuxt", "shadcn", "่ฎพ่ฎก็ณป็ปŸ", "design system"], + "file_patterns": ["*.tsx", "*.jsx", "*.vue", "*.svelte", "*.css", "*.scss", "*.html"], + "routing_rules": { + "frontend_only": "All tasks match frontend keywords, no backend/API mentions", + "fullstack": "Mix of frontend and backend tasks", + "backend_only": "No frontend keywords detected (default impl-only)" + } + }, + + "ui_ux_pro_max": { + "skill_name": "ui-ux-pro-max", + "install_command": "/plugin install ui-ux-pro-max@ui-ux-pro-max-skill", + "invocation": "Skill(skill=\"ui-ux-pro-max\", args=\"...\")", + "domains": ["product", "style", "typography", "color", "landing", "chart", "ux", "web"], + "stacks": ["html-tailwind", "react", "nextjs", "vue", "svelte", "shadcn", "swiftui", "react-native", "flutter"], + "fallback": "llm-general-knowledge", + "design_intelligence_chain": ["analyst โ†’ design-intelligence.json", "architect โ†’ design-tokens.json", "fe-developer โ†’ tokens.css", "fe-qa โ†’ anti-pattern audit"] + }, + + "shared_memory": { + "file": "shared-memory.json", + "schema": { + "design_intelligence": "From analyst via ui-ux-pro-max", + "design_token_registry": "From architect, consumed by fe-developer/fe-qa", + "component_inventory": "From fe-developer, list of implemented components", + "style_decisions": "Accumulated design decisions", + "qa_history": "From fe-qa, audit trail", + "industry_context": "Industry + strictness config" + } + }, + + "collaboration_patterns": ["CP-1", "CP-2", "CP-4", "CP-5", "CP-6", "CP-10"], + + "session_dirs": { + "base": ".workflow/.team/TLS-{slug}-{YYYY-MM-DD}/", + "spec": "spec/", + "discussions": "discussions/", + "plan": "plan/", + "explorations": "explorations/", + "architecture": "architecture/", + "wisdom": "wisdom/", + "messages": ".workflow/.team-msg/{team-name}/" + } +} diff --git a/.claude/skills/team-lifecycle-v3/templates/architecture-doc.md b/.claude/skills/team-lifecycle-v3/templates/architecture-doc.md new file mode 100644 index 00000000..5106de03 --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/templates/architecture-doc.md @@ -0,0 +1,254 @@ +# Architecture Document Template (Directory Structure) + +Template for generating architecture decision documents as a directory of individual ADR files in Phase 4. + +## Usage Context + +| Phase | Usage | +|-------|-------| +| Phase 4 (Architecture) | Generate `architecture/` directory from requirements analysis | +| Output Location | `{workDir}/architecture/` | + +## Output Structure + +``` +{workDir}/architecture/ +โ”œโ”€โ”€ _index.md # Overview, components, tech stack, data model, security +โ”œโ”€โ”€ ADR-001-{slug}.md # Individual Architecture Decision Record +โ”œโ”€โ”€ ADR-002-{slug}.md +โ””โ”€โ”€ ... +``` + +--- + +## Template: _index.md + +```markdown +--- +session_id: {session_id} +phase: 4 +document_type: architecture-index +status: draft +generated_at: {timestamp} +version: 1 +dependencies: + - ../spec-config.json + - ../product-brief.md + - ../requirements/_index.md +--- + +# Architecture: {product_name} + +{executive_summary - high-level architecture approach and key decisions} + +## System Overview + +### Architecture Style +{description of chosen architecture style: microservices, monolith, serverless, etc.} + +### System Context Diagram + +```mermaid +C4Context + title System Context Diagram + Person(user, "User", "Primary user") + System(system, "{product_name}", "Core system") + System_Ext(ext1, "{external_system}", "{description}") + Rel(user, system, "Uses") + Rel(system, ext1, "Integrates with") +``` + +## Component Architecture + +### Component Diagram + +```mermaid +graph TD + subgraph "{product_name}" + A[Component A] --> B[Component B] + B --> C[Component C] + A --> D[Component D] + end + B --> E[External Service] +``` + +### Component Descriptions + +| Component | Responsibility | Technology | Dependencies | +|-----------|---------------|------------|--------------| +| {component_name} | {what it does} | {tech stack} | {depends on} | + +## Technology Stack + +### Core Technologies + +| Layer | Technology | Version | Rationale | +|-------|-----------|---------|-----------| +| Frontend | {technology} | {version} | {why chosen} | +| Backend | {technology} | {version} | {why chosen} | +| Database | {technology} | {version} | {why chosen} | +| Infrastructure | {technology} | {version} | {why chosen} | + +### Key Libraries & Frameworks + +| Library | Purpose | License | +|---------|---------|---------| +| {library_name} | {purpose} | {license} | + +## Architecture Decision Records + +| ADR | Title | Status | Key Choice | +|-----|-------|--------|------------| +| [ADR-001](ADR-001-{slug}.md) | {title} | Accepted | {one-line summary} | +| [ADR-002](ADR-002-{slug}.md) | {title} | Accepted | {one-line summary} | +| [ADR-003](ADR-003-{slug}.md) | {title} | Proposed | {one-line summary} | + +## Data Architecture + +### Data Model + +```mermaid +erDiagram + ENTITY_A ||--o{ ENTITY_B : "has many" + ENTITY_A { + string id PK + string name + datetime created_at + } + ENTITY_B { + string id PK + string entity_a_id FK + string value + } +``` + +### Data Storage Strategy + +| Data Type | Storage | Retention | Backup | +|-----------|---------|-----------|--------| +| {type} | {storage solution} | {retention policy} | {backup strategy} | + +## API Design + +### API Overview + +| Endpoint | Method | Purpose | Auth | +|----------|--------|---------|------| +| {/api/resource} | {GET/POST/etc} | {purpose} | {auth type} | + +## Security Architecture + +### Security Controls + +| Control | Implementation | Requirement | +|---------|---------------|-------------| +| Authentication | {approach} | [NFR-S-{NNN}](../requirements/NFR-S-{NNN}-{slug}.md) | +| Authorization | {approach} | [NFR-S-{NNN}](../requirements/NFR-S-{NNN}-{slug}.md) | +| Data Protection | {approach} | [NFR-S-{NNN}](../requirements/NFR-S-{NNN}-{slug}.md) | + +## Infrastructure & Deployment + +### Deployment Architecture + +{description of deployment model: containers, serverless, VMs, etc.} + +### Environment Strategy + +| Environment | Purpose | Configuration | +|-------------|---------|---------------| +| Development | Local development | {config} | +| Staging | Pre-production testing | {config} | +| Production | Live system | {config} | + +## Codebase Integration + +{if has_codebase is true:} + +### Existing Code Mapping + +| New Component | Existing Module | Integration Type | Notes | +|--------------|----------------|------------------|-------| +| {component} | {existing module path} | Extend/Replace/New | {notes} | + +### Migration Notes +{any migration considerations for existing code} + +## Quality Attributes + +| Attribute | Target | Measurement | ADR Reference | +|-----------|--------|-------------|---------------| +| Performance | {target} | {how measured} | [ADR-{NNN}](ADR-{NNN}-{slug}.md) | +| Scalability | {target} | {how measured} | [ADR-{NNN}](ADR-{NNN}-{slug}.md) | +| Reliability | {target} | {how measured} | [ADR-{NNN}](ADR-{NNN}-{slug}.md) | + +## Risks & Mitigations + +| Risk | Impact | Probability | Mitigation | +|------|--------|-------------|------------| +| {risk} | High/Medium/Low | High/Medium/Low | {mitigation approach} | + +## Open Questions + +- [ ] {architectural question 1} +- [ ] {architectural question 2} + +## References + +- Derived from: [Requirements](../requirements/_index.md), [Product Brief](../product-brief.md) +- Next: [Epics & Stories](../epics/_index.md) +``` + +--- + +## Template: ADR-NNN-{slug}.md (Individual Architecture Decision Record) + +```markdown +--- +id: ADR-{NNN} +status: Accepted +traces_to: [{REQ-NNN}, {NFR-X-NNN}] +date: {timestamp} +--- + +# ADR-{NNN}: {decision_title} + +## Context + +{what is the situation that motivates this decision} + +## Decision + +{what is the chosen approach} + +## Alternatives Considered + +| Option | Pros | Cons | +|--------|------|------| +| {option_1 - chosen} | {pros} | {cons} | +| {option_2} | {pros} | {cons} | +| {option_3} | {pros} | {cons} | + +## Consequences + +- **Positive**: {positive outcomes} +- **Negative**: {tradeoffs accepted} +- **Risks**: {risks to monitor} + +## Traces + +- **Requirements**: [REQ-{NNN}](../requirements/REQ-{NNN}-{slug}.md), [NFR-X-{NNN}](../requirements/NFR-X-{NNN}-{slug}.md) +- **Implemented by**: [EPIC-{NNN}](../epics/EPIC-{NNN}-{slug}.md) (added in Phase 5) +``` + +--- + +## Variable Descriptions + +| Variable | Source | Description | +|----------|--------|-------------| +| `{session_id}` | spec-config.json | Session identifier | +| `{timestamp}` | Runtime | ISO8601 generation timestamp | +| `{product_name}` | product-brief.md | Product/feature name | +| `{NNN}` | Auto-increment | ADR/requirement number | +| `{slug}` | Auto-generated | Kebab-case from decision title | +| `{has_codebase}` | spec-config.json | Whether existing codebase exists | diff --git a/.claude/skills/team-lifecycle-v3/templates/epics-template.md b/.claude/skills/team-lifecycle-v3/templates/epics-template.md new file mode 100644 index 00000000..939d933c --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/templates/epics-template.md @@ -0,0 +1,196 @@ +# Epics & Stories Template (Directory Structure) + +Template for generating epic/story breakdown as a directory of individual Epic files in Phase 5. + +## Usage Context + +| Phase | Usage | +|-------|-------| +| Phase 5 (Epics & Stories) | Generate `epics/` directory from requirements decomposition | +| Output Location | `{workDir}/epics/` | + +## Output Structure + +``` +{workDir}/epics/ +โ”œโ”€โ”€ _index.md # Overview table + dependency map + MVP scope + execution order +โ”œโ”€โ”€ EPIC-001-{slug}.md # Individual Epic with its Stories +โ”œโ”€โ”€ EPIC-002-{slug}.md +โ””โ”€โ”€ ... +``` + +--- + +## Template: _index.md + +```markdown +--- +session_id: {session_id} +phase: 5 +document_type: epics-index +status: draft +generated_at: {timestamp} +version: 1 +dependencies: + - ../spec-config.json + - ../product-brief.md + - ../requirements/_index.md + - ../architecture/_index.md +--- + +# Epics & Stories: {product_name} + +{executive_summary - overview of epic structure and MVP scope} + +## Epic Overview + +| Epic ID | Title | Priority | MVP | Stories | Est. Size | +|---------|-------|----------|-----|---------|-----------| +| [EPIC-001](EPIC-001-{slug}.md) | {title} | Must | Yes | {n} | {S/M/L/XL} | +| [EPIC-002](EPIC-002-{slug}.md) | {title} | Must | Yes | {n} | {S/M/L/XL} | +| [EPIC-003](EPIC-003-{slug}.md) | {title} | Should | No | {n} | {S/M/L/XL} | + +## Dependency Map + +```mermaid +graph LR + EPIC-001 --> EPIC-002 + EPIC-001 --> EPIC-003 + EPIC-002 --> EPIC-004 + EPIC-003 --> EPIC-005 +``` + +### Dependency Notes +{explanation of why these dependencies exist and suggested execution order} + +### Recommended Execution Order +1. [EPIC-{NNN}](EPIC-{NNN}-{slug}.md): {reason - foundational} +2. [EPIC-{NNN}](EPIC-{NNN}-{slug}.md): {reason - depends on #1} +3. ... + +## MVP Scope + +### MVP Epics +{list of epics included in MVP with justification, linking to each} + +### MVP Definition of Done +- [ ] {MVP completion criterion 1} +- [ ] {MVP completion criterion 2} +- [ ] {MVP completion criterion 3} + +## Traceability Matrix + +| Requirement | Epic | Stories | Architecture | +|-------------|------|---------|--------------| +| [REQ-001](../requirements/REQ-001-{slug}.md) | [EPIC-001](EPIC-001-{slug}.md) | STORY-001-001, STORY-001-002 | [ADR-001](../architecture/ADR-001-{slug}.md) | +| [REQ-002](../requirements/REQ-002-{slug}.md) | [EPIC-001](EPIC-001-{slug}.md) | STORY-001-003 | Component B | +| [REQ-003](../requirements/REQ-003-{slug}.md) | [EPIC-002](EPIC-002-{slug}.md) | STORY-002-001 | [ADR-002](../architecture/ADR-002-{slug}.md) | + +## Estimation Summary + +| Size | Meaning | Count | +|------|---------|-------| +| S | Small - well-understood, minimal risk | {n} | +| M | Medium - some complexity, moderate risk | {n} | +| L | Large - significant complexity, should consider splitting | {n} | +| XL | Extra Large - high complexity, must split before implementation | {n} | + +## Risks & Considerations + +| Risk | Affected Epics | Mitigation | +|------|---------------|------------| +| {risk description} | [EPIC-{NNN}](EPIC-{NNN}-{slug}.md) | {mitigation} | + +## Open Questions + +- [ ] {question about scope or implementation 1} +- [ ] {question about scope or implementation 2} + +## References + +- Derived from: [Requirements](../requirements/_index.md), [Architecture](../architecture/_index.md) +- Handoff to: execution workflows (lite-plan, plan, req-plan) +``` + +--- + +## Template: EPIC-NNN-{slug}.md (Individual Epic) + +```markdown +--- +id: EPIC-{NNN} +priority: {Must|Should|Could} +mvp: {true|false} +size: {S|M|L|XL} +requirements: [REQ-{NNN}] +architecture: [ADR-{NNN}] +dependencies: [EPIC-{NNN}] +status: draft +--- + +# EPIC-{NNN}: {epic_title} + +**Priority**: {Must|Should|Could} +**MVP**: {Yes|No} +**Estimated Size**: {S|M|L|XL} + +## Description + +{detailed epic description} + +## Requirements + +- [REQ-{NNN}](../requirements/REQ-{NNN}-{slug}.md): {title} +- [REQ-{NNN}](../requirements/REQ-{NNN}-{slug}.md): {title} + +## Architecture + +- [ADR-{NNN}](../architecture/ADR-{NNN}-{slug}.md): {title} +- Component: {component_name} + +## Dependencies + +- [EPIC-{NNN}](EPIC-{NNN}-{slug}.md) (blocking): {reason} +- [EPIC-{NNN}](EPIC-{NNN}-{slug}.md) (soft): {reason} + +## Stories + +### STORY-{EPIC}-001: {story_title} + +**User Story**: As a {persona}, I want to {action} so that {benefit}. + +**Acceptance Criteria**: +- [ ] {criterion 1} +- [ ] {criterion 2} +- [ ] {criterion 3} + +**Size**: {S|M|L|XL} +**Traces to**: [REQ-{NNN}](../requirements/REQ-{NNN}-{slug}.md) + +--- + +### STORY-{EPIC}-002: {story_title} + +**User Story**: As a {persona}, I want to {action} so that {benefit}. + +**Acceptance Criteria**: +- [ ] {criterion 1} +- [ ] {criterion 2} + +**Size**: {S|M|L|XL} +**Traces to**: [REQ-{NNN}](../requirements/REQ-{NNN}-{slug}.md) +``` + +--- + +## Variable Descriptions + +| Variable | Source | Description | +|----------|--------|-------------| +| `{session_id}` | spec-config.json | Session identifier | +| `{timestamp}` | Runtime | ISO8601 generation timestamp | +| `{product_name}` | product-brief.md | Product/feature name | +| `{EPIC}` | Auto-increment | Epic number (3 digits) | +| `{NNN}` | Auto-increment | Story/requirement number | +| `{slug}` | Auto-generated | Kebab-case from epic/story title | +| `{S\|M\|L\|XL}` | CLI analysis | Relative size estimate | diff --git a/.claude/skills/team-lifecycle-v3/templates/product-brief.md b/.claude/skills/team-lifecycle-v3/templates/product-brief.md new file mode 100644 index 00000000..ffbdf437 --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/templates/product-brief.md @@ -0,0 +1,133 @@ +# Product Brief Template + +Template for generating product brief documents in Phase 2. + +## Usage Context + +| Phase | Usage | +|-------|-------| +| Phase 2 (Product Brief) | Generate product-brief.md from multi-CLI analysis | +| Output Location | `{workDir}/product-brief.md` | + +--- + +## Template + +```markdown +--- +session_id: {session_id} +phase: 2 +document_type: product-brief +status: draft +generated_at: {timestamp} +stepsCompleted: [] +version: 1 +dependencies: + - spec-config.json +--- + +# Product Brief: {product_name} + +{executive_summary - 2-3 sentences capturing the essence of the product/feature} + +## Vision + +{vision_statement - clear, aspirational 1-3 sentence statement of what success looks like} + +## Problem Statement + +### Current Situation +{description of the current state and pain points} + +### Impact +{quantified impact of the problem - who is affected, how much, how often} + +## Target Users + +{for each user persona:} + +### {Persona Name} +- **Role**: {user's role/context} +- **Needs**: {primary needs related to this product} +- **Pain Points**: {current frustrations} +- **Success Criteria**: {what success looks like for this user} + +## Goals & Success Metrics + +| Goal ID | Goal | Success Metric | Target | +|---------|------|----------------|--------| +| G-001 | {goal description} | {measurable metric} | {specific target} | +| G-002 | {goal description} | {measurable metric} | {specific target} | + +## Scope + +### In Scope +- {feature/capability 1} +- {feature/capability 2} +- {feature/capability 3} + +### Out of Scope +- {explicitly excluded item 1} +- {explicitly excluded item 2} + +### Assumptions +- {key assumption 1} +- {key assumption 2} + +## Competitive Landscape + +| Aspect | Current State | Proposed Solution | Advantage | +|--------|--------------|-------------------|-----------| +| {aspect} | {how it's done now} | {our approach} | {differentiator} | + +## Constraints & Dependencies + +### Technical Constraints +- {constraint 1} +- {constraint 2} + +### Business Constraints +- {constraint 1} + +### Dependencies +- {external dependency 1} +- {external dependency 2} + +## Multi-Perspective Synthesis + +### Product Perspective +{summary of product/market analysis findings} + +### Technical Perspective +{summary of technical feasibility and constraints} + +### User Perspective +{summary of user journey and UX considerations} + +### Convergent Themes +{themes where all perspectives agree} + +### Conflicting Views +{areas where perspectives differ, with notes on resolution approach} + +## Open Questions + +- [ ] {unresolved question 1} +- [ ] {unresolved question 2} + +## References + +- Derived from: [spec-config.json](spec-config.json) +- Next: [Requirements PRD](requirements.md) +``` + +## Variable Descriptions + +| Variable | Source | Description | +|----------|--------|-------------| +| `{session_id}` | spec-config.json | Session identifier | +| `{timestamp}` | Runtime | ISO8601 generation timestamp | +| `{product_name}` | Seed analysis | Product/feature name | +| `{executive_summary}` | CLI synthesis | 2-3 sentence summary | +| `{vision_statement}` | CLI product perspective | Aspirational vision | +| All `{...}` fields | CLI analysis outputs | Filled from multi-perspective analysis | diff --git a/.claude/skills/team-lifecycle-v3/templates/requirements-prd.md b/.claude/skills/team-lifecycle-v3/templates/requirements-prd.md new file mode 100644 index 00000000..0b1dbf28 --- /dev/null +++ b/.claude/skills/team-lifecycle-v3/templates/requirements-prd.md @@ -0,0 +1,224 @@ +# Requirements PRD Template (Directory Structure) + +Template for generating Product Requirements Document as a directory of individual requirement files in Phase 3. + +## Usage Context + +| Phase | Usage | +|-------|-------| +| Phase 3 (Requirements) | Generate `requirements/` directory from product brief expansion | +| Output Location | `{workDir}/requirements/` | + +## Output Structure + +``` +{workDir}/requirements/ +โ”œโ”€โ”€ _index.md # Summary + MoSCoW table + traceability matrix + links +โ”œโ”€โ”€ REQ-001-{slug}.md # Individual functional requirement +โ”œโ”€โ”€ REQ-002-{slug}.md +โ”œโ”€โ”€ NFR-P-001-{slug}.md # Non-functional: Performance +โ”œโ”€โ”€ NFR-S-001-{slug}.md # Non-functional: Security +โ”œโ”€โ”€ NFR-SC-001-{slug}.md # Non-functional: Scalability +โ”œโ”€โ”€ NFR-U-001-{slug}.md # Non-functional: Usability +โ””โ”€โ”€ ... +``` + +--- + +## Template: _index.md + +```markdown +--- +session_id: {session_id} +phase: 3 +document_type: requirements-index +status: draft +generated_at: {timestamp} +version: 1 +dependencies: + - ../spec-config.json + - ../product-brief.md +--- + +# Requirements: {product_name} + +{executive_summary - brief overview of what this PRD covers and key decisions} + +## Requirement Summary + +| Priority | Count | Coverage | +|----------|-------|----------| +| Must Have | {n} | {description of must-have scope} | +| Should Have | {n} | {description of should-have scope} | +| Could Have | {n} | {description of could-have scope} | +| Won't Have | {n} | {description of explicitly excluded} | + +## Functional Requirements + +| ID | Title | Priority | Traces To | +|----|-------|----------|-----------| +| [REQ-001](REQ-001-{slug}.md) | {title} | Must | [G-001](../product-brief.md#goals--success-metrics) | +| [REQ-002](REQ-002-{slug}.md) | {title} | Must | [G-001](../product-brief.md#goals--success-metrics) | +| [REQ-003](REQ-003-{slug}.md) | {title} | Should | [G-002](../product-brief.md#goals--success-metrics) | + +## Non-Functional Requirements + +### Performance + +| ID | Title | Target | +|----|-------|--------| +| [NFR-P-001](NFR-P-001-{slug}.md) | {title} | {target value} | + +### Security + +| ID | Title | Standard | +|----|-------|----------| +| [NFR-S-001](NFR-S-001-{slug}.md) | {title} | {standard/framework} | + +### Scalability + +| ID | Title | Target | +|----|-------|--------| +| [NFR-SC-001](NFR-SC-001-{slug}.md) | {title} | {target value} | + +### Usability + +| ID | Title | Target | +|----|-------|--------| +| [NFR-U-001](NFR-U-001-{slug}.md) | {title} | {target value} | + +## Data Requirements + +### Data Entities + +| Entity | Description | Key Attributes | +|--------|-------------|----------------| +| {entity_name} | {description} | {attr1, attr2, attr3} | + +### Data Flows + +{description of key data flows, optionally with Mermaid diagram} + +## Integration Requirements + +| System | Direction | Protocol | Data Format | Notes | +|--------|-----------|----------|-------------|-------| +| {system_name} | Inbound/Outbound/Both | {REST/gRPC/etc} | {JSON/XML/etc} | {notes} | + +## Constraints & Assumptions + +### Constraints +- {technical or business constraint 1} +- {technical or business constraint 2} + +### Assumptions +- {assumption 1 - must be validated} +- {assumption 2 - must be validated} + +## Priority Rationale + +{explanation of MoSCoW prioritization decisions, especially for Should/Could boundaries} + +## Traceability Matrix + +| Goal | Requirements | +|------|-------------| +| G-001 | [REQ-001](REQ-001-{slug}.md), [REQ-002](REQ-002-{slug}.md), [NFR-P-001](NFR-P-001-{slug}.md) | +| G-002 | [REQ-003](REQ-003-{slug}.md), [NFR-S-001](NFR-S-001-{slug}.md) | + +## Open Questions + +- [ ] {unresolved question 1} +- [ ] {unresolved question 2} + +## References + +- Derived from: [Product Brief](../product-brief.md) +- Next: [Architecture](../architecture/_index.md) +``` + +--- + +## Template: REQ-NNN-{slug}.md (Individual Functional Requirement) + +```markdown +--- +id: REQ-{NNN} +type: functional +priority: {Must|Should|Could|Won't} +traces_to: [G-{NNN}] +status: draft +--- + +# REQ-{NNN}: {requirement_title} + +**Priority**: {Must|Should|Could|Won't} + +## Description + +{detailed requirement description} + +## User Story + +As a {persona}, I want to {action} so that {benefit}. + +## Acceptance Criteria + +- [ ] {specific, testable criterion 1} +- [ ] {specific, testable criterion 2} +- [ ] {specific, testable criterion 3} + +## Traces + +- **Goal**: [G-{NNN}](../product-brief.md#goals--success-metrics) +- **Architecture**: [ADR-{NNN}](../architecture/ADR-{NNN}-{slug}.md) (if applicable) +- **Implemented by**: [EPIC-{NNN}](../epics/EPIC-{NNN}-{slug}.md) (added in Phase 5) +``` + +--- + +## Template: NFR-{type}-NNN-{slug}.md (Individual Non-Functional Requirement) + +```markdown +--- +id: NFR-{type}-{NNN} +type: non-functional +category: {Performance|Security|Scalability|Usability} +priority: {Must|Should|Could} +status: draft +--- + +# NFR-{type}-{NNN}: {requirement_title} + +**Category**: {Performance|Security|Scalability|Usability} +**Priority**: {Must|Should|Could} + +## Requirement + +{detailed requirement description} + +## Metric & Target + +| Metric | Target | Measurement Method | +|--------|--------|--------------------| +| {metric} | {target value} | {how measured} | + +## Traces + +- **Goal**: [G-{NNN}](../product-brief.md#goals--success-metrics) +- **Architecture**: [ADR-{NNN}](../architecture/ADR-{NNN}-{slug}.md) (if applicable) +``` + +--- + +## Variable Descriptions + +| Variable | Source | Description | +|----------|--------|-------------| +| `{session_id}` | spec-config.json | Session identifier | +| `{timestamp}` | Runtime | ISO8601 generation timestamp | +| `{product_name}` | product-brief.md | Product/feature name | +| `{NNN}` | Auto-increment | Requirement number (zero-padded 3 digits) | +| `{slug}` | Auto-generated | Kebab-case from requirement title | +| `{type}` | Category | P (Performance), S (Security), SC (Scalability), U (Usability) | +| `{Must\|Should\|Could\|Won't}` | User input / auto | MoSCoW priority tag | diff --git a/.claude/skills/workflow-lite-plan copy/SKILL.md b/.claude/skills/workflow-lite-plan copy/SKILL.md new file mode 100644 index 00000000..3bb5cf0a --- /dev/null +++ b/.claude/skills/workflow-lite-plan copy/SKILL.md @@ -0,0 +1,177 @@ +--- +name: workflow-lite-plan +description: Lightweight planning and execution skill - route to lite-plan or lite-execute with prompt enhancement. Triggers on "workflow:lite-plan", "workflow:lite-execute". +allowed-tools: Skill, Task, AskUserQuestion, TodoWrite, Read, Write, Edit, Bash, Glob, Grep +--- + +# Workflow Lite-Plan + +Unified lightweight planning and execution skill. Routes to lite-plan (planning pipeline) or lite-execute (execution engine) based on trigger, with prompt enhancement for both modes. + +## Architecture Overview + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ SKILL.md (Router + Prompt Enhancement) โ”‚ +โ”‚ โ†’ Detect mode โ†’ Enhance prompt โ†’ Dispatch to phase โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ†“ โ†“ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ lite-plan โ”‚ โ”‚lite-executeโ”‚ + โ”‚ Phase 1 โ”‚ โ”‚ Phase 2 โ”‚ + โ”‚ Plan+Exec โ”‚โ”€directโ”€โ”€โ†’โ”‚ Standalone โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## Mode Detection & Routing + +```javascript +const args = $ARGUMENTS +const mode = detectMode() + +function detectMode() { + if (skillName === 'workflow:lite-execute') return 'execute' + return 'plan' // default: workflow:lite-plan +} +``` + +**Routing Table**: + +| Trigger | Mode | Phase Document | Description | +|---------|------|----------------|-------------| +| `workflow:lite-plan` | plan | [phases/01-lite-plan.md](phases/01-lite-plan.md) | Full planning pipeline (explore โ†’ plan โ†’ confirm โ†’ execute) | +| `workflow:lite-execute` | execute | [phases/02-lite-execute.md](phases/02-lite-execute.md) | Standalone execution (in-memory / prompt / file) | + +## Interactive Preference Collection + +Before dispatching, collect workflow preferences via AskUserQuestion: + +```javascript +// โ˜… ็ปŸไธ€ auto mode ๆฃ€ๆต‹๏ผš-y/--yes ไปŽ $ARGUMENTS ๆˆ– ccw ไผ ๆ’ญ +const autoYes = /\b(-y|--yes)\b/.test($ARGUMENTS) + +if (autoYes) { + // ่‡ชๅŠจๆจกๅผ๏ผš่ทณ่ฟ‡ๆ‰€ๆœ‰่ฏข้—ฎ๏ผŒไฝฟ็”จ้ป˜่ฎคๅ€ผ + workflowPreferences = { autoYes: true, forceExplore: false } +} else if (mode === 'plan') { + const prefResponse = AskUserQuestion({ + questions: [ + { + question: "ๆ˜ฏๅฆ่ทณ่ฟ‡ๆ‰€ๆœ‰็กฎ่ฎคๆญฅ้ชค๏ผˆ่‡ชๅŠจๆจกๅผ๏ผ‰๏ผŸ", + header: "Auto Mode", + multiSelect: false, + options: [ + { label: "Interactive (Recommended)", description: "ไบคไบ’ๆจกๅผ๏ผŒๅŒ…ๅซ็กฎ่ฎคๆญฅ้ชค" }, + { label: "Auto", description: "่ทณ่ฟ‡ๆ‰€ๆœ‰็กฎ่ฎค๏ผŒ่‡ชๅŠจๆ‰ง่กŒ" } + ] + }, + { + question: "ๆ˜ฏๅฆๅผบๅˆถๆ‰ง่กŒไปฃ็ ๆŽข็ดข้˜ถๆฎต๏ผŸ", + header: "Exploration", + multiSelect: false, + options: [ + { label: "Auto-detect (Recommended)", description: "ๆ™บ่ƒฝๅˆคๆ–ญๆ˜ฏๅฆ้œ€่ฆๆŽข็ดข" }, + { label: "Force explore", description: "ๅผบๅˆถๆ‰ง่กŒไปฃ็ ๆŽข็ดข" } + ] + } + ] + }) + workflowPreferences = { + autoYes: prefResponse.autoMode === 'Auto', + forceExplore: prefResponse.exploration === 'Force explore' + } +} else if (mode !== 'plan') { + // Execute mode (standalone, not in-memory) + const prefResponse = AskUserQuestion({ + questions: [ + { + question: "ๆ˜ฏๅฆ่ทณ่ฟ‡ๆ‰€ๆœ‰็กฎ่ฎคๆญฅ้ชค๏ผˆ่‡ชๅŠจๆจกๅผ๏ผ‰๏ผŸ", + header: "Auto Mode", + multiSelect: false, + options: [ + { label: "Interactive (Recommended)", description: "ไบคไบ’ๆจกๅผ๏ผŒๅŒ…ๅซ็กฎ่ฎคๆญฅ้ชค" }, + { label: "Auto", description: "่ทณ่ฟ‡ๆ‰€ๆœ‰็กฎ่ฎค๏ผŒ่‡ชๅŠจๆ‰ง่กŒ" } + ] + } + ] + }) + workflowPreferences = { + autoYes: prefResponse.autoMode === 'Auto', + forceExplore: false + } +} +``` + +**workflowPreferences** is passed to phase execution as context variable, referenced as `workflowPreferences.autoYes` and `workflowPreferences.forceExplore` within phases. + +## Prompt Enhancement + +After collecting preferences, enhance context and dispatch: + +```javascript +// Step 0: Parse --from-analysis handoff (from analyze-with-file) +const fromAnalysisMatch = args.match(/--from-analysis\s+(\S+)/) +if (fromAnalysisMatch) { + const handoffPath = fromAnalysisMatch[1] + workflowPreferences.analysisHandoff = JSON.parse(Read(handoffPath)) + workflowPreferences.forceExplore = false + // Strip flag from args, keep task description + args = args.replace(/--from-analysis\s+\S+\s*/, '').trim() +} + +// Step 1: Check for project context files +const hasProjectTech = fileExists('.workflow/project-tech.json') +const hasProjectGuidelines = fileExists('.workflow/project-guidelines.json') + +// Step 2: Log available context +if (hasProjectTech) { + console.log('Project tech context available: .workflow/project-tech.json') +} +if (hasProjectGuidelines) { + console.log('Project guidelines available: .workflow/project-guidelines.json') +} + +// Step 3: Dispatch to phase (workflowPreferences available as context) +if (mode === 'plan') { + // Read phases/01-lite-plan.md and execute +} else { + // Read phases/02-lite-execute.md and execute +} +``` + +## Execution Flow + +### Plan Mode + +``` +1. Collect preferences via AskUserQuestion (autoYes, forceExplore) +2. Enhance prompt with project context availability +3. Read phases/01-lite-plan.md +4. Execute lite-plan pipeline (Phase 1-5 within the phase doc) +5. lite-plan Phase 5 directly reads and executes Phase 2 (lite-execute) with executionContext +``` + +### Execute Mode + +``` +1. Collect preferences via AskUserQuestion (autoYes) +2. Enhance prompt with project context availability +3. Read phases/02-lite-execute.md +4. Execute lite-execute pipeline (input detection โ†’ execution โ†’ review) +``` + +## Usage + +Plan mode and execute mode are triggered by skill name routing (see Mode Detection). Workflow preferences (auto mode, force explore) are collected interactively via AskUserQuestion before dispatching to phases. + +**Plan mode**: Task description provided as arguments โ†’ interactive preference collection โ†’ planning pipeline +**Execute mode**: Task description, file path, or in-memory context โ†’ interactive preference collection โ†’ execution pipeline + +## Phase Reference Documents + +| Phase | Document | Purpose | +|-------|----------|---------| +| 1 | [phases/01-lite-plan.md](phases/01-lite-plan.md) | Complete planning pipeline: exploration, clarification, planning, confirmation, handoff | +| 2 | [phases/02-lite-execute.md](phases/02-lite-execute.md) | Complete execution engine: input modes, task grouping, batch execution, code review | diff --git a/.claude/skills/workflow-lite-plan copy/phases/01-lite-plan.md b/.claude/skills/workflow-lite-plan copy/phases/01-lite-plan.md new file mode 100644 index 00000000..1469bfba --- /dev/null +++ b/.claude/skills/workflow-lite-plan copy/phases/01-lite-plan.md @@ -0,0 +1,770 @@ +# Phase 1: Lite-Plan + +Complete planning pipeline: task analysis, multi-angle exploration, clarification, adaptive planning, confirmation, and execution handoff. + +--- + +## Overview + +Intelligent lightweight planning command with dynamic workflow adaptation based on task complexity. Focuses on planning phases (exploration, clarification, planning, confirmation) and delegates execution to Phase 2 (lite-execute). + +**Core capabilities:** +- Intelligent task analysis with automatic exploration detection +- Dynamic code exploration (cli-explore-agent) when codebase understanding needed +- Interactive clarification after exploration to gather missing information +- Adaptive planning: Low complexity โ†’ Direct Claude; Medium/High โ†’ cli-lite-planning-agent +- Two-step confirmation: plan display โ†’ multi-dimensional input collection +- Execution handoff with complete context to lite-execute + +## Input + +``` + Task description or path to .md file (required) +``` + +Workflow preferences (`autoYes`, `forceExplore`) are collected by SKILL.md via AskUserQuestion and passed as `workflowPreferences` context variable. + +## Output Artifacts + +| Artifact | Description | +|----------|-------------| +| `exploration-{angle}.json` | Per-angle exploration results (1-4 files based on complexity) | +| `explorations-manifest.json` | Index of all exploration files | +| `planning-context.md` | Evidence paths + synthesized understanding | +| `plan.json` | Plan overview with task_ids[] (plan-overview-base-schema.json) | +| `.task/TASK-*.json` | Independent task files (one per task) | + +**Output Directory**: `.workflow/.lite-plan/{task-slug}-{YYYY-MM-DD}/` + +**Agent Usage**: +- Low complexity โ†’ Direct Claude planning (no agent) +- Medium/High complexity โ†’ `cli-lite-planning-agent` generates `plan.json` + +**Schema Reference**: `~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json` + +## Auto Mode Defaults + +When `workflowPreferences.autoYes === true`: +- **Clarification Questions**: Skipped (no clarification phase) +- **Plan Confirmation**: Auto-selected "Allow" +- **Execution Method**: Auto-selected "Auto" +- **Code Review**: Auto-selected "Skip" + +## Execution Process + +``` +Phase 1: Task Analysis & Exploration + โ”œโ”€ Parse input (description or .md file) + โ”œโ”€ intelligent complexity assessment (Low/Medium/High) + โ”œโ”€ Exploration decision (auto-detect or workflowPreferences.forceExplore) + โ”œโ”€ Context protection: If file reading โ‰ฅ50k chars โ†’ force cli-explore-agent + โ””โ”€ Decision: + โ”œโ”€ needsExploration=true โ†’ Launch parallel cli-explore-agents (1-4 based on complexity) + โ””โ”€ needsExploration=false โ†’ Skip to Phase 2/3 + +Phase 2: Clarification (optional, multi-round) + โ”œโ”€ Aggregate clarification_needs from all exploration angles + โ”œโ”€ Deduplicate similar questions + โ””โ”€ Decision: + โ”œโ”€ Has clarifications โ†’ AskUserQuestion (max 4 questions per round, multiple rounds allowed) + โ””โ”€ No clarifications โ†’ Skip to Phase 3 + +Phase 3: Planning (NO CODE EXECUTION - planning only) + โ””โ”€ Decision (based on Phase 1 complexity): + โ”œโ”€ Low โ†’ Load schema: cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json โ†’ Direct Claude planning (following schema) โ†’ plan.json + โ””โ”€ Medium/High โ†’ cli-lite-planning-agent โ†’ plan.json (agent internally executes quality check) + +Phase 4: Confirmation & Selection + โ”œโ”€ Display plan summary (tasks, complexity, estimated time) + โ””โ”€ AskUserQuestion: + โ”œโ”€ Confirm: Allow / Modify / Cancel + โ”œโ”€ Execution: Agent / Codex / Auto + โ””โ”€ Review: Gemini / Agent / Skip + +Phase 5: Execute + โ”œโ”€ Build executionContext (plan + explorations + clarifications + selections) + โ””โ”€ Direct handoff: Read phases/02-lite-execute.md โ†’ Execute with executionContext (Mode 1) +``` + +## Implementation + +### Phase 1: Intelligent Multi-Angle Exploration + +**Session Setup** (MANDATORY - follow exactly): +```javascript +// Helper: Get UTC+8 (China Standard Time) ISO string +const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString() + +const taskSlug = task_description.toLowerCase().replace(/[^a-z0-9]+/g, '-').substring(0, 40) +const dateStr = getUtc8ISOString().substring(0, 10) // Format: 2025-11-29 + +const sessionId = `${taskSlug}-${dateStr}` // e.g., "implement-jwt-refresh-2025-11-29" +const sessionFolder = `.workflow/.lite-plan/${sessionId}` + +bash(`mkdir -p ${sessionFolder} && test -d ${sessionFolder} && echo "SUCCESS: ${sessionFolder}" || echo "FAILED: ${sessionFolder}"`) +``` + +**Exploration Decision Logic**: +```javascript +// Analysis handoff: reconstruct exploration from upstream analysis artifacts +if (workflowPreferences.analysisHandoff) { + const handoff = workflowPreferences.analysisHandoff + Write(`${sessionFolder}/exploration-from-analysis.json`, JSON.stringify({ + relevant_files: handoff.exploration_digest.relevant_files || [], + patterns: handoff.exploration_digest.patterns || [], + key_findings: handoff.exploration_digest.key_findings || [], + clarification_needs: [], // analysis already did multi-round discussion + _metadata: { exploration_angle: "from-analysis", source_session: handoff.source_session, reconstructed: true } + }, null, 2)) + Write(`${sessionFolder}/explorations-manifest.json`, JSON.stringify({ + session_id: sessionId, task_description: task_description, timestamp: getUtc8ISOString(), + complexity: complexity, exploration_count: 1, from_analysis: handoff.source_session, + explorations: [{ angle: "from-analysis", file: "exploration-from-analysis.json", + path: `${sessionFolder}/exploration-from-analysis.json`, index: 1 }] + }, null, 2)) + needsExploration = false + // clarification_needs=[] โ†’ Phase 2 naturally skipped โ†’ proceed to Phase 3 +} + +needsExploration = needsExploration ?? ( + workflowPreferences.forceExplore || + task.mentions_specific_files || + task.requires_codebase_context || + task.needs_architecture_understanding || + task.modifies_existing_code +) + +if (!needsExploration) { + // Skip to Phase 2 (Clarification) or Phase 3 (Planning) + proceed_to_next_phase() +} +``` + +**โš ๏ธ Context Protection**: File reading โ‰ฅ50k chars โ†’ force `needsExploration=true` (delegate to cli-explore-agent) + +**Complexity Assessment** (Intelligent Analysis): +```javascript +// analyzes task complexity based on: +// - Scope: How many systems/modules are affected? +// - Depth: Surface change vs architectural impact? +// - Risk: Potential for breaking existing functionality? +// - Dependencies: How interconnected is the change? + +const complexity = analyzeTaskComplexity(task_description) +// Returns: 'Low' | 'Medium' | 'High' +// Low: ONLY truly trivial โ€” single file, single function, zero cross-module impact, no new patterns +// Examples: fix typo, rename variable, add log line, adjust constant value +// Medium: Multiple files OR any integration point OR new pattern introduction OR moderate risk +// Examples: add endpoint, implement feature, refactor module, fix bug spanning files +// High: Cross-module, architectural, or systemic change +// Examples: new subsystem, migration, security overhaul, API redesign +// โš ๏ธ Default bias: When uncertain between Low and Medium, choose Medium + +// Angle assignment based on task type (orchestrator decides, not agent) +const ANGLE_PRESETS = { + architecture: ['architecture', 'dependencies', 'modularity', 'integration-points'], + security: ['security', 'auth-patterns', 'dataflow', 'validation'], + performance: ['performance', 'bottlenecks', 'caching', 'data-access'], + bugfix: ['error-handling', 'dataflow', 'state-management', 'edge-cases'], + feature: ['patterns', 'integration-points', 'testing', 'dependencies'] +} + +function selectAngles(taskDescription, count) { + const text = taskDescription.toLowerCase() + let preset = 'feature' // default + + if (/refactor|architect|restructure|modular/.test(text)) preset = 'architecture' + else if (/security|auth|permission|access/.test(text)) preset = 'security' + else if (/performance|slow|optimi|cache/.test(text)) preset = 'performance' + else if (/fix|bug|error|issue|broken/.test(text)) preset = 'bugfix' + + return ANGLE_PRESETS[preset].slice(0, count) +} + +const selectedAngles = selectAngles(task_description, complexity === 'High' ? 4 : (complexity === 'Medium' ? 3 : 1)) + +// Planning strategy determination +// Agent trigger: anything beyond trivial single-file change +// - analysisHandoff โ†’ always agent (analysis validated non-trivial task) +// - multi-angle exploration โ†’ agent (complexity warranted multiple angles) +// - Medium/High complexity โ†’ agent +// Direct Claude planning ONLY for truly trivial Low + no analysis + single angle +const planningStrategy = ( + complexity === 'Low' && !workflowPreferences.analysisHandoff && selectedAngles.length <= 1 +) ? 'Direct Claude Planning' + : 'cli-lite-planning-agent' + +console.log(` +## Exploration Plan + +Task Complexity: ${complexity} +Selected Angles: ${selectedAngles.join(', ')} +Planning Strategy: ${planningStrategy} + +Launching ${selectedAngles.length} parallel explorations... +`) +``` + +**Launch Parallel Explorations** - Orchestrator assigns angle to each agent: + +**โš ๏ธ CRITICAL - NO BACKGROUND EXECUTION**: +- **MUST NOT use `run_in_background: true`** - exploration results are REQUIRED before planning + + +```javascript +// Launch agents with pre-assigned angles +const explorationTasks = selectedAngles.map((angle, index) => + Task( + subagent_type="cli-explore-agent", + run_in_background=false, // โš ๏ธ MANDATORY: Must wait for results + description=`Explore: ${angle}`, + prompt=` +## Task Objective +Execute **${angle}** exploration for task planning context. Analyze codebase from this specific angle to discover relevant structure, patterns, and constraints. + +## Output Location + +**Session Folder**: ${sessionFolder} +**Output File**: ${sessionFolder}/exploration-${angle}.json + +## Assigned Context +- **Exploration Angle**: ${angle} +- **Task Description**: ${task_description} +- **Exploration Index**: ${index + 1} of ${selectedAngles.length} + +## Agent Initialization +cli-explore-agent autonomously handles: project structure discovery, schema loading, project context loading (project-tech.json, project-guidelines.json), and keyword search. These steps execute automatically. + +## Exploration Strategy (${angle} focus) + +**Step 1: Structural Scan** (Bash) +- get_modules_by_depth.sh โ†’ identify modules related to ${angle} +- find/rg โ†’ locate files relevant to ${angle} aspect +- Analyze imports/dependencies from ${angle} perspective + +**Step 2: Semantic Analysis** (Gemini CLI) +- How does existing code handle ${angle} concerns? +- What patterns are used for ${angle}? +- Where would new code integrate from ${angle} viewpoint? + +**Step 3: Write Output** +- Consolidate ${angle} findings into JSON +- Identify ${angle}-specific clarification needs + +## Expected Output + +**Schema Reference**: explore-json-schema.json (auto-loaded by agent during initialization) + +**Required Fields** (all ${angle} focused): +- Follow explore-json-schema.json exactly (auto-loaded by agent) +- All fields scoped to ${angle} perspective +- Ensure rationale is specific and >10 chars (not generic) +- Include file:line locations in integration_points +- _metadata.exploration_angle: "${angle}" + +## Success Criteria +- [ ] get_modules_by_depth.sh executed +- [ ] At least 3 relevant files identified with specific rationale + role +- [ ] Every file has rationale >10 chars (not generic like "Related to ${angle}") +- [ ] Every file has role classification (modify_target/dependency/etc.) +- [ ] Patterns are actionable (code examples, not generic advice) +- [ ] Integration points include file:line locations +- [ ] Constraints are project-specific to ${angle} +- [ ] JSON output follows schema exactly +- [ ] clarification_needs includes options + recommended + +## Execution +**Write**: \`${sessionFolder}/exploration-${angle}.json\` +**Return**: 2-3 sentence summary of ${angle} findings +` + ) +) + +// Execute all exploration tasks in parallel +``` + +**Auto-discover Generated Exploration Files**: +```javascript +// After explorations complete, auto-discover all exploration-*.json files +const explorationFiles = bash(`find ${sessionFolder} -name "exploration-*.json" -type f`) + .split('\n') + .filter(f => f.trim()) + +// Read metadata to build manifest +const explorationManifest = { + session_id: sessionId, + task_description: task_description, + timestamp: getUtc8ISOString(), + complexity: complexity, + exploration_count: explorationCount, + explorations: explorationFiles.map(file => { + const data = JSON.parse(Read(file)) + const filename = path.basename(file) + return { + angle: data._metadata.exploration_angle, + file: filename, + path: file, + index: data._metadata.exploration_index + } + }) +} + +Write(`${sessionFolder}/explorations-manifest.json`, JSON.stringify(explorationManifest, null, 2)) + +console.log(` +## Exploration Complete + +Generated exploration files in ${sessionFolder}: +${explorationManifest.explorations.map(e => `- exploration-${e.angle}.json (angle: ${e.angle})`).join('\n')} + +Manifest: explorations-manifest.json +Angles explored: ${explorationManifest.explorations.map(e => e.angle).join(', ')} +`) +``` + +**Output**: +- `${sessionFolder}/exploration-{angle1}.json` +- `${sessionFolder}/exploration-{angle2}.json` +- ... (1-4 files based on complexity) +- `${sessionFolder}/explorations-manifest.json` + +--- + +### Phase 2: Clarification (Optional, Multi-Round) + +**Skip if**: No exploration or `clarification_needs` is empty across all explorations + +**โš ๏ธ CRITICAL**: AskUserQuestion tool limits max 4 questions per call. **MUST execute multiple rounds** to exhaust all clarification needs - do NOT stop at round 1. + +**Aggregate clarification needs from all exploration angles**: +```javascript +// Load manifest and all exploration files +const manifest = JSON.parse(Read(`${sessionFolder}/explorations-manifest.json`)) +const explorations = manifest.explorations.map(exp => ({ + angle: exp.angle, + data: JSON.parse(Read(exp.path)) +})) + +// Aggregate clarification needs from all explorations +const allClarifications = [] +explorations.forEach(exp => { + if (exp.data.clarification_needs?.length > 0) { + exp.data.clarification_needs.forEach(need => { + allClarifications.push({ + ...need, + source_angle: exp.angle + }) + }) + } +}) + +// Intelligent deduplication: analyze allClarifications by intent +// - Identify questions with similar intent across different angles +// - Merge similar questions: combine options, consolidate context +// - Produce dedupedClarifications with unique intents only +const dedupedClarifications = intelligentMerge(allClarifications) + +const autoYes = workflowPreferences.autoYes + +if (autoYes) { + // Auto mode: Skip clarification phase + console.log(`[Auto] Skipping ${dedupedClarifications.length} clarification questions`) + console.log(`Proceeding to planning with exploration results...`) + // Continue to Phase 3 +} else if (dedupedClarifications.length > 0) { + // Interactive mode: Multi-round clarification + const BATCH_SIZE = 4 + const totalRounds = Math.ceil(dedupedClarifications.length / BATCH_SIZE) + + for (let i = 0; i < dedupedClarifications.length; i += BATCH_SIZE) { + const batch = dedupedClarifications.slice(i, i + BATCH_SIZE) + const currentRound = Math.floor(i / BATCH_SIZE) + 1 + + console.log(`### Clarification Round ${currentRound}/${totalRounds}`) + + AskUserQuestion({ + questions: batch.map(need => ({ + question: `[${need.source_angle}] ${need.question}\n\nContext: ${need.context}`, + header: need.source_angle.substring(0, 12), + multiSelect: false, + options: need.options.map((opt, index) => ({ + label: need.recommended === index ? `${opt} โ˜…` : opt, + description: need.recommended === index ? `Recommended` : `Use ${opt}` + })) + })) + }) + + // Store batch responses in clarificationContext before next round + } +} +``` + +**Output**: `clarificationContext` (in-memory) + +--- + +### Phase 3: Planning + +**Planning Strategy Selection** (based on Phase 1 complexity): + +**IMPORTANT**: Phase 3 is **planning only** - NO code execution. All execution happens in Phase 5 via lite-execute. + +**Executor Assignment** (Claude ๆ™บ่ƒฝๅˆ†้…๏ผŒplan ็”ŸๆˆๅŽๆ‰ง่กŒ): + +```javascript +// ๅˆ†้…่ง„ๅˆ™๏ผˆไผ˜ๅ…ˆ็บงไปŽ้ซ˜ๅˆฐไฝŽ๏ผ‰๏ผš +// 1. ็”จๆˆทๆ˜Ž็กฎๆŒ‡ๅฎš๏ผš"็”จ gemini ๅˆ†ๆž..." โ†’ gemini, "codex ๅฎž็Žฐ..." โ†’ codex +// 2. ้ป˜่ฎค โ†’ agent + +const executorAssignments = {} // { taskId: { executor: 'gemini'|'codex'|'agent', reason: string } } + +// Load tasks from .task/ directory for executor assignment +const taskFiles = Glob(`${sessionFolder}/.task/TASK-*.json`) +taskFiles.forEach(taskPath => { + const task = JSON.parse(Read(taskPath)) + // Claude ๆ นๆฎไธŠ่ฟฐ่ง„ๅˆ™่ฏญไน‰ๅˆ†ๆž๏ผŒไธบๆฏไธช task ๅˆ†้… executor + executorAssignments[task.id] = { executor: '...', reason: '...' } +}) +``` + +**Low Complexity** - Direct planning by Claude: +```javascript +// Step 1: Read schema +const schema = Bash(`cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json`) + +// Step 2: โš ๏ธ MANDATORY - Read and review ALL exploration files +const manifest = JSON.parse(Read(`${sessionFolder}/explorations-manifest.json`)) +manifest.explorations.forEach(exp => { + const explorationData = Read(exp.path) + console.log(`\n### Exploration: ${exp.angle}\n${explorationData}`) +}) + +// Step 3: Generate task objects (Claude directly, no agent) +// โš ๏ธ Tasks MUST incorporate insights from exploration files read in Step 2 +// Task fields use NEW names: convergence.criteria (not acceptance), files[].change (not modification_points), test (not verification) +const tasks = [ + { + id: "TASK-001", + title: "...", + description: "...", + depends_on: [], + convergence: { criteria: ["..."] }, + files: [{ path: "...", change: "..." }], + implementation: ["..."], + test: "..." + }, + // ... more tasks +] + +// Step 4: Write task files to .task/ directory +const taskDir = `${sessionFolder}/.task` +Bash(`mkdir -p "${taskDir}"`) +tasks.forEach(task => { + Write(`${taskDir}/${task.id}.json`, JSON.stringify(task, null, 2)) +}) + +// Step 5: Generate plan overview (NO embedded tasks[]) +const plan = { + summary: "...", + approach: "...", + task_ids: tasks.map(t => t.id), + task_count: tasks.length, + complexity: "Low", + estimated_time: "...", + recommended_execution: "Agent", + _metadata: { + timestamp: getUtc8ISOString(), + source: "direct-planning", + planning_mode: "direct", + plan_type: "feature" + } +} + +// Step 6: Write plan overview to session folder +Write(`${sessionFolder}/plan.json`, JSON.stringify(plan, null, 2)) + +// Step 7: MUST continue to Phase 4 (Confirmation) - DO NOT execute code here +``` + +**Medium/High Complexity** - Invoke cli-lite-planning-agent: + +```javascript +Task( + subagent_type="cli-lite-planning-agent", + run_in_background=false, + description="Generate detailed implementation plan", + prompt=` +Generate implementation plan and write plan.json. + +## Output Location + +**Session Folder**: ${sessionFolder} +**Output Files**: +- ${sessionFolder}/planning-context.md (evidence + understanding) +- ${sessionFolder}/plan.json (plan overview -- NO embedded tasks[]) +- ${sessionFolder}/.task/TASK-*.json (independent task files, one per task) + +## Output Schema Reference +Execute: cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json (get schema reference before generating plan) + +## Project Context (MANDATORY - Read Both Files) +1. Read: .workflow/project-tech.json (technology stack, architecture, key components) +2. Read: .workflow/project-guidelines.json (user-defined constraints and conventions) + +**CRITICAL**: All generated tasks MUST comply with constraints in project-guidelines.json + +## Task Description +${task_description} + +## Multi-Angle Exploration Context + +${manifest.explorations.map(exp => `### Exploration: ${exp.angle} (${exp.file}) +Path: ${exp.path} + +Read this file for detailed ${exp.angle} analysis.`).join('\n\n')} + +Total explorations: ${manifest.exploration_count} +Angles covered: ${manifest.explorations.map(e => e.angle).join(', ')} + +Manifest: ${sessionFolder}/explorations-manifest.json + +## User Clarifications +${JSON.stringify(clarificationContext) || "None"} + +## Complexity Level +${complexity} + +## Requirements +Generate plan.json and .task/*.json following the schema obtained above. Key constraints: +- _metadata.exploration_angles: ${JSON.stringify(manifest.explorations.map(e => e.angle))} + +**Output Format**: Two-layer structure: +- plan.json: Overview with task_ids[] referencing .task/ files (NO tasks[] array) +- .task/TASK-*.json: Independent task files following task-schema.json + +Follow plan-overview-base-schema.json (loaded via cat command above) for plan.json structure. +Follow task-schema.json for .task/TASK-*.json structure. +Note: Use files[].change (not modification_points), convergence.criteria (not acceptance). + +## Task Grouping Rules +1. **Group by feature**: All changes for one feature = one task (even if 3-5 files) +2. **Group by context**: Tasks with similar context or related functional changes can be grouped together +3. **Minimize agent count**: Simple, unrelated tasks can also be grouped to reduce agent execution overhead +4. **Avoid file-per-task**: Do NOT create separate tasks for each file +5. **Substantial tasks**: Each task should represent 15-60 minutes of work +6. **True dependencies only**: Only use depends_on when Task B cannot start without Task A's output +7. **Prefer parallel**: Most tasks should be independent (no depends_on) + +## Execution +1. Read schema file (cat command above) +2. Execute CLI planning using Gemini (Qwen fallback) +3. Read ALL exploration files for comprehensive context +4. Synthesize findings and generate tasks + plan overview +5. **Write**: \`${sessionFolder}/planning-context.md\` (evidence paths + understanding) +6. **Create**: \`${sessionFolder}/.task/\` directory (mkdir -p) +7. **Write**: \`${sessionFolder}/.task/TASK-001.json\`, \`TASK-002.json\`, etc. (one per task) +8. **Write**: \`${sessionFolder}/plan.json\` (overview with task_ids[], NO tasks[]) +9. Return brief completion summary +` +) +``` + +**Output**: `${sessionFolder}/plan.json` + +--- + +### Phase 4: Task Confirmation & Execution Selection + +**Step 4.1: Display Plan** +```javascript +const plan = JSON.parse(Read(`${sessionFolder}/plan.json`)) + +// Load tasks from .task/ directory +const tasks = (plan.task_ids || []).map(id => { + const taskPath = `${sessionFolder}/.task/${id}.json` + return JSON.parse(Read(taskPath)) +}) +const taskList = tasks + +console.log(` +## Implementation Plan + +**Summary**: ${plan.summary} +**Approach**: ${plan.approach} + +**Tasks** (${taskList.length}): +${taskList.map((t, i) => `${i+1}. ${t.title} (${t.scope || t.files?.[0]?.path || ''})`).join('\n')} + +**Complexity**: ${plan.complexity} +**Estimated Time**: ${plan.estimated_time} +**Recommended**: ${plan.recommended_execution} +`) +``` + +**Step 4.2: Collect Confirmation** +```javascript +const autoYes = workflowPreferences.autoYes + +let userSelection + +if (autoYes) { + // Auto mode: Use defaults + console.log(`[Auto] Auto-confirming plan:`) + console.log(` - Confirmation: Allow`) + console.log(` - Execution: Auto`) + console.log(` - Review: Skip`) + + userSelection = { + confirmation: "Allow", + execution_method: "Auto", + code_review_tool: "Skip" + } +} else { + // Interactive mode: Ask user + // Note: Execution "Other" option allows specifying CLI tools from ~/.claude/cli-tools.json + userSelection = AskUserQuestion({ + questions: [ + { + question: `Confirm plan? (${taskList.length} tasks, ${plan.complexity})`, + header: "Confirm", + multiSelect: false, + options: [ + { label: "Allow", description: "Proceed as-is" }, + { label: "Modify", description: "Adjust before execution" }, + { label: "Cancel", description: "Abort workflow" } + ] + }, + { + question: "Execution method:", + header: "Execution", + multiSelect: false, + options: [ + { label: "Agent", description: "@code-developer agent" }, + { label: "Codex", description: "codex CLI tool" }, + { label: "Auto", description: `Auto: ${plan.complexity === 'Low' ? 'Agent' : 'Codex'}` } + ] + }, + { + question: "Code review after execution?", + header: "Review", + multiSelect: false, + options: [ + { label: "Gemini Review", description: "Gemini CLI review" }, + { label: "Codex Review", description: "Git-aware review (prompt OR --uncommitted)" }, + { label: "Agent Review", description: "@code-reviewer agent" }, + { label: "Skip", description: "No review" } + ] + } + ] + }) +} +``` + +--- + +### Phase 5: Handoff to Execution + +**CRITICAL**: lite-plan NEVER executes code directly. ALL execution MUST go through lite-execute. + +**Step 5.1: Build executionContext** + +```javascript +// Load manifest and all exploration files +const manifest = JSON.parse(Read(`${sessionFolder}/explorations-manifest.json`)) +const explorations = {} + +manifest.explorations.forEach(exp => { + if (file_exists(exp.path)) { + explorations[exp.angle] = JSON.parse(Read(exp.path)) + } +}) + +const plan = JSON.parse(Read(`${sessionFolder}/plan.json`)) + +executionContext = { + planObject: plan, // plan overview (no tasks[]) + taskFiles: (plan.task_ids || []).map(id => ({ + id, + path: `${sessionFolder}/.task/${id}.json` + })), + explorationsContext: explorations, + explorationAngles: manifest.explorations.map(e => e.angle), + explorationManifest: manifest, + clarificationContext: clarificationContext || null, + executionMethod: userSelection.execution_method, // ๅ…จๅฑ€้ป˜่ฎค๏ผŒๅฏ่ขซ executorAssignments ่ฆ†็›– + codeReviewTool: userSelection.code_review_tool, + originalUserInput: task_description, + + // ไปปๅŠก็บง executor ๅˆ†้…๏ผˆไผ˜ๅ…ˆไบŽๅ…จๅฑ€ executionMethod๏ผ‰ + executorAssignments: executorAssignments, // { taskId: { executor, reason } } + + session: { + id: sessionId, + folder: sessionFolder, + artifacts: { + explorations: manifest.explorations.map(exp => ({ + angle: exp.angle, + path: exp.path + })), + explorations_manifest: `${sessionFolder}/explorations-manifest.json`, + plan: `${sessionFolder}/plan.json`, + task_dir: `${sessionFolder}/.task` + } + } +} +``` + +**Step 5.2: Handoff** + +```javascript +// Direct phase handoff: Read and execute Phase 2 (lite-execute) with in-memory context +// No Skill routing needed - executionContext is already set in Step 5.1 +Read("phases/02-lite-execute.md") +// Execute Phase 2 with executionContext (Mode 1: In-Memory Plan) +``` + +## Session Folder Structure + +``` +.workflow/.lite-plan/{task-slug}-{YYYY-MM-DD}/ +โ”œโ”€โ”€ exploration-{angle1}.json # Exploration angle 1 +โ”œโ”€โ”€ exploration-{angle2}.json # Exploration angle 2 +โ”œโ”€โ”€ exploration-{angle3}.json # Exploration angle 3 (if applicable) +โ”œโ”€โ”€ exploration-{angle4}.json # Exploration angle 4 (if applicable) +โ”œโ”€โ”€ explorations-manifest.json # Exploration index +โ”œโ”€โ”€ planning-context.md # Evidence paths + understanding +โ”œโ”€โ”€ plan.json # Plan overview (task_ids[]) +โ””โ”€โ”€ .task/ # Task files directory + โ”œโ”€โ”€ TASK-001.json + โ”œโ”€โ”€ TASK-002.json + โ””โ”€โ”€ ... +``` + +**Example**: +``` +.workflow/.lite-plan/implement-jwt-refresh-2025-11-25-14-30-25/ +โ”œโ”€โ”€ exploration-architecture.json +โ”œโ”€โ”€ exploration-auth-patterns.json +โ”œโ”€โ”€ exploration-security.json +โ”œโ”€โ”€ explorations-manifest.json +โ”œโ”€โ”€ planning-context.md +โ”œโ”€โ”€ plan.json +โ””โ”€โ”€ .task/ + โ”œโ”€โ”€ TASK-001.json + โ”œโ”€โ”€ TASK-002.json + โ””โ”€โ”€ TASK-003.json +``` + +## Error Handling + +| Error | Resolution | +|-------|------------| +| Exploration agent failure | Skip exploration, continue with task description only | +| Planning agent failure | Fallback to direct planning by Claude | +| Clarification timeout | Use exploration findings as-is | +| Confirmation timeout | Save context, display resume instructions | +| Modify loop > 3 times | Suggest breaking task or using /workflow:plan | + +## Next Phase + +After Phase 5 handoff, execution continues in [Phase 2: Lite-Execute](02-lite-execute.md). diff --git a/.claude/skills/workflow-lite-plan copy/phases/02-lite-execute.md b/.claude/skills/workflow-lite-plan copy/phases/02-lite-execute.md new file mode 100644 index 00000000..5550dbc1 --- /dev/null +++ b/.claude/skills/workflow-lite-plan copy/phases/02-lite-execute.md @@ -0,0 +1,776 @@ +# Phase 2: Lite-Execute + +Complete execution engine: multi-mode input, task grouping, batch execution, code review, and development index update. + +--- + +## Overview + +Flexible task execution command supporting three input modes: in-memory plan (from lite-plan), direct prompt description, or file content. Handles execution orchestration, progress tracking, and optional code review. + +**Core capabilities:** +- Multi-mode input (in-memory plan, prompt description, or file path) +- Execution orchestration (Agent or Codex) with full context +- Live progress tracking via TodoWrite at execution call level +- Optional code review with selected tool (Gemini, Agent, or custom) +- Context continuity across multiple executions +- Intelligent format detection (Enhanced Task JSON vs plain text) + +## Usage + +### Input +``` + Task description string, or path to file (required) +``` + +Mode 1 (In-Memory) is triggered by lite-plan direct handoff when `executionContext` is available. +Workflow preferences (`autoYes`) are passed from SKILL.md via `workflowPreferences` context variable. + +## Input Modes + +### Mode 1: In-Memory Plan + +**Trigger**: Called by lite-plan direct handoff after Phase 4 approval (executionContext available) + +**Input Source**: `executionContext` global variable set by lite-plan + +**Content**: Complete execution context (see Data Structures section) + +**Behavior**: +- Skip execution method selection (already set by lite-plan) +- Directly proceed to execution with full context +- All planning artifacts available (exploration, clarifications, plan) + +### Mode 2: Prompt Description + +**Trigger**: User calls with task description string + +**Input**: Simple task description (e.g., "Add unit tests for auth module") + +**Behavior**: +- Store prompt as `originalUserInput` +- Create simple execution plan from prompt +- AskUserQuestion: Select execution method (Agent/Codex/Auto) +- AskUserQuestion: Select code review tool (Skip/Gemini/Agent/Other) +- Proceed to execution with `originalUserInput` included + +**User Interaction**: +```javascript +const autoYes = workflowPreferences.autoYes + +let userSelection + +if (autoYes) { + // Auto mode: Use defaults + console.log(`[Auto] Auto-confirming execution:`) + console.log(` - Execution method: Auto`) + console.log(` - Code review: Skip`) + + userSelection = { + execution_method: "Auto", + code_review_tool: "Skip" + } +} else { + // Interactive mode: Ask user + userSelection = AskUserQuestion({ + questions: [ + { + question: "Select execution method:", + header: "Execution", + multiSelect: false, + options: [ + { label: "Agent", description: "@code-developer agent" }, + { label: "Codex", description: "codex CLI tool" }, + { label: "Auto", description: "Auto-select based on complexity" } + ] + }, + { + question: "Enable code review after execution?", + header: "Code Review", + multiSelect: false, + options: [ + { label: "Skip", description: "No review" }, + { label: "Gemini Review", description: "Gemini CLI tool" }, + { label: "Codex Review", description: "Git-aware review (prompt OR --uncommitted)" }, + { label: "Agent Review", description: "Current agent review" } + ] + } + ] + }) +} +``` + +### Mode 3: File Content + +**Trigger**: User calls with file path + +**Input**: Path to file containing task description or plan.json + +**Step 1: Read and Detect Format** + +```javascript +fileContent = Read(filePath) + +// Attempt JSON parsing +try { + jsonData = JSON.parse(fileContent) + + // Check if plan.json from lite-plan session (two-layer format: task_ids[]) + if (jsonData.summary && jsonData.approach && jsonData.task_ids) { + planObject = jsonData + originalUserInput = jsonData.summary + isPlanJson = true + + // Load tasks from .task/*.json files + const planDir = filePath.replace(/[/\\][^/\\]+$/, '') // parent directory + planObject._loadedTasks = loadTaskFiles(planDir, jsonData.task_ids) + } else { + // Valid JSON but not plan.json - treat as plain text + originalUserInput = fileContent + isPlanJson = false + } +} catch { + // Not valid JSON - treat as plain text prompt + originalUserInput = fileContent + isPlanJson = false +} +``` + +**Step 2: Create Execution Plan** + +If `isPlanJson === true`: +- Use `planObject` directly +- User selects execution method and code review + +If `isPlanJson === false`: +- Treat file content as prompt (same behavior as Mode 2) +- Create simple execution plan from content + +**Step 3: User Interaction** + +- AskUserQuestion: Select execution method (Agent/Codex/Auto) +- AskUserQuestion: Select code review tool +- Proceed to execution with full context + +## Helper Functions + +```javascript +// Load task files from .task/ directory (two-layer format) +function loadTaskFiles(planDir, taskIds) { + return taskIds.map(id => { + const taskPath = `${planDir}/.task/${id}.json` + return JSON.parse(Read(taskPath)) + }) +} + +// Get tasks array from loaded .task/*.json files +function getTasks(planObject) { + return planObject._loadedTasks || [] +} +``` + +## Execution Process + +``` +Input Parsing: + โ””โ”€ Decision (mode detection): + โ”œโ”€ executionContext exists โ†’ Mode 1: Load executionContext โ†’ Skip user selection + โ”œโ”€ Ends with .md/.json/.txt โ†’ Mode 3: Read file โ†’ Detect format + โ”‚ โ”œโ”€ Valid plan.json โ†’ Use planObject โ†’ User selects method + review + โ”‚ โ””โ”€ Not plan.json โ†’ Treat as prompt โ†’ User selects method + review + โ””โ”€ Other โ†’ Mode 2: Prompt description โ†’ User selects method + review + +Execution: + โ”œโ”€ Step 1: Initialize result tracking (previousExecutionResults = []) + โ”œโ”€ Step 2: Task grouping & batch creation + โ”‚ โ”œโ”€ Extract explicit depends_on (no file/keyword inference) + โ”‚ โ”œโ”€ Group: independent tasks โ†’ per-executor parallel batches (one CLI per batch) + โ”‚ โ”œโ”€ Group: dependent tasks โ†’ sequential phases (respect dependencies) + โ”‚ โ””โ”€ Create TodoWrite list for batches + โ”œโ”€ Step 3: Launch execution + โ”‚ โ”œโ”€ Phase 1: Independent tasks (โšก per-executor batches, multi-CLI concurrent) + โ”‚ โ””โ”€ Phase 2+: Dependent tasks by dependency order + โ”œโ”€ Step 4: Track progress (TodoWrite updates per batch) + โ””โ”€ Step 5: Code review (if codeReviewTool โ‰  "Skip") + +Output: + โ””โ”€ Execution complete with results in previousExecutionResults[] +``` + +## Detailed Execution Steps + +### Step 1: Initialize Execution Tracking + +**Operations**: +- Initialize result tracking for multi-execution scenarios +- Set up `previousExecutionResults` array for context continuity +- **In-Memory Mode**: Echo execution strategy from lite-plan for transparency + +```javascript +// Initialize result tracking +previousExecutionResults = [] + +// In-Memory Mode: Echo execution strategy (transparency before execution) +if (executionContext) { + console.log(` +๐Ÿ“‹ Execution Strategy (from lite-plan): + Method: ${executionContext.executionMethod} + Review: ${executionContext.codeReviewTool} + Tasks: ${getTasks(executionContext.planObject).length} + Complexity: ${executionContext.planObject.complexity} +${executionContext.executorAssignments ? ` Assignments: ${JSON.stringify(executionContext.executorAssignments)}` : ''} + `) +} +``` + +### Step 2: Task Grouping & Batch Creation + +**Dependency Analysis & Grouping Algorithm**: +```javascript +// Use explicit depends_on from plan.json (no inference from file/keywords) +function extractDependencies(tasks) { + const taskIdToIndex = {} + tasks.forEach((t, i) => { taskIdToIndex[t.id] = i }) + + return tasks.map((task, i) => { + // Only use explicit depends_on from plan.json + const deps = (task.depends_on || []) + .map(depId => taskIdToIndex[depId]) + .filter(idx => idx !== undefined && idx < i) + return { ...task, taskIndex: i, dependencies: deps } + }) +} + +// Executor Resolution (used by task grouping below) +// ่Žทๅ–ไปปๅŠก็š„ executor๏ผˆไผ˜ๅ…ˆไฝฟ็”จ executorAssignments๏ผŒfallback ๅˆฐๅ…จๅฑ€ executionMethod๏ผ‰ +function getTaskExecutor(task) { + const assignments = executionContext?.executorAssignments || {} + if (assignments[task.id]) { + return assignments[task.id].executor // 'gemini' | 'codex' | 'agent' + } + // Fallback: ๅ…จๅฑ€ executionMethod ๆ˜ ๅฐ„ + const method = executionContext?.executionMethod || 'Auto' + if (method === 'Agent') return 'agent' + if (method === 'Codex') return 'codex' + // Auto: ๆ นๆฎๅคๆ‚ๅบฆ + return planObject.complexity === 'Low' ? 'agent' : 'codex' +} + +// ๆŒ‰ executor ๅˆ†็ป„ไปปๅŠก๏ผˆๆ ธๅฟƒๅˆ†็ป„็ป„ไปถ๏ผ‰ +function groupTasksByExecutor(tasks) { + const groups = { gemini: [], codex: [], agent: [] } + tasks.forEach(task => { + const executor = getTaskExecutor(task) + groups[executor].push(task) + }) + return groups +} + +// Group into batches: per-executor parallel batches (one CLI per batch) +function createExecutionCalls(tasks, executionMethod) { + const tasksWithDeps = extractDependencies(tasks) + const processed = new Set() + const calls = [] + + // Phase 1: Independent tasks โ†’ per-executor batches (multi-CLI concurrent) + const independentTasks = tasksWithDeps.filter(t => t.dependencies.length === 0) + if (independentTasks.length > 0) { + const executorGroups = groupTasksByExecutor(independentTasks) + let parallelIndex = 1 + + for (const [executor, tasks] of Object.entries(executorGroups)) { + if (tasks.length === 0) continue + tasks.forEach(t => processed.add(t.taskIndex)) + calls.push({ + method: executionMethod, + executor: executor, // ๆ˜Ž็กฎๆŒ‡ๅฎš executor + executionType: "parallel", + groupId: `P${parallelIndex++}`, + taskSummary: tasks.map(t => t.title).join(' | '), + tasks: tasks + }) + } + } + + // Phase 2: Dependent tasks โ†’ sequential/parallel batches (respect dependencies) + let sequentialIndex = 1 + let remaining = tasksWithDeps.filter(t => !processed.has(t.taskIndex)) + + while (remaining.length > 0) { + // Find tasks whose dependencies are all satisfied + const ready = remaining.filter(t => + t.dependencies.every(d => processed.has(d)) + ) + + if (ready.length === 0) { + console.warn('Circular dependency detected, forcing remaining tasks') + ready.push(...remaining) + } + + if (ready.length > 1) { + // Multiple ready tasks โ†’ per-executor batches (parallel within this phase) + const executorGroups = groupTasksByExecutor(ready) + for (const [executor, tasks] of Object.entries(executorGroups)) { + if (tasks.length === 0) continue + tasks.forEach(t => processed.add(t.taskIndex)) + calls.push({ + method: executionMethod, + executor: executor, + executionType: "parallel", + groupId: `P${calls.length + 1}`, + taskSummary: tasks.map(t => t.title).join(' | '), + tasks: tasks + }) + } + } else { + // Single ready task โ†’ sequential batch + ready.forEach(t => processed.add(t.taskIndex)) + calls.push({ + method: executionMethod, + executor: getTaskExecutor(ready[0]), + executionType: "sequential", + groupId: `S${sequentialIndex++}`, + taskSummary: ready[0].title, + tasks: ready + }) + } + + remaining = remaining.filter(t => !processed.has(t.taskIndex)) + } + + return calls +} + +executionCalls = createExecutionCalls(getTasks(planObject), executionMethod).map(c => ({ ...c, id: `[${c.groupId}]` })) + +TodoWrite({ + todos: executionCalls.map(c => ({ + content: `${c.executionType === "parallel" ? "โšก" : "โ†’"} ${c.id} (${c.tasks.length} tasks)`, + status: "pending", + activeForm: `Executing ${c.id}` + })) +}) +``` + +### Step 3: Launch Execution + +**Executor Resolution**: `getTaskExecutor()` and `groupTasksByExecutor()` defined in Step 2 (Task Grouping). + +**Batch Execution Routing** (ๆ นๆฎ batch.executor ๅญ—ๆฎต่ทฏ็”ฑ): +```javascript +// executeBatch ๆ นๆฎ batch ่‡ช่บซ็š„ executor ๅญ—ๆฎตๅ†ณๅฎš่ฐƒ็”จๅ“ชไธช CLI +function executeBatch(batch) { + const executor = batch.executor || getTaskExecutor(batch.tasks[0]) + const sessionId = executionContext?.session?.id || 'standalone' + const fixedId = `${sessionId}-${batch.groupId}` + + if (executor === 'agent') { + // Agent execution (synchronous) + return Task({ + subagent_type: "code-developer", + run_in_background: false, + description: batch.taskSummary, + prompt: buildExecutionPrompt(batch) + }) + } else if (executor === 'codex') { + // Codex CLI (background) + return Bash(`ccw cli -p "${buildExecutionPrompt(batch)}" --tool codex --mode write --id ${fixedId}`, { run_in_background: true }) + } else if (executor === 'gemini') { + // Gemini CLI (background) + return Bash(`ccw cli -p "${buildExecutionPrompt(batch)}" --tool gemini --mode write --id ${fixedId}`, { run_in_background: true }) + } +} +``` + +**ๅนถ่กŒๆ‰ง่กŒๅŽŸๅˆ™**: +- ๆฏไธช batch ๅฏนๅบ”ไธ€ไธช็‹ฌ็ซ‹็š„ CLI ๅฎžไพ‹ๆˆ– Agent ่ฐƒ็”จ +- ๅนถ่กŒ = ๅคšไธช Bash(run_in_background=true) ๆˆ–ๅคšไธช Task() ๅŒๆ—ถๅ‘ๅ‡บ +- ็ปไธๅฐ†ๅคšไธช็‹ฌ็ซ‹ไปปๅŠกๅˆๅนถๅˆฐๅŒไธ€ไธช CLI prompt ไธญ +- Agent ไปปๅŠกไธๅฏๅŽๅฐๆ‰ง่กŒ๏ผˆrun_in_background=false๏ผ‰๏ผŒไฝ†ๅคšไธช Agent ไปปๅŠกๅฏ้€š่ฟ‡ๅ•ๆกๆถˆๆฏไธญ็š„ๅคšไธช Task() ่ฐƒ็”จๅนถๅ‘ + +**Execution Flow**: Parallel batches concurrently โ†’ Sequential batches in order +```javascript +const parallel = executionCalls.filter(c => c.executionType === "parallel") +const sequential = executionCalls.filter(c => c.executionType === "sequential") + +// Phase 1: Launch all parallel batches (single message with multiple tool calls) +if (parallel.length > 0) { + TodoWrite({ todos: executionCalls.map(c => ({ status: c.executionType === "parallel" ? "in_progress" : "pending" })) }) + parallelResults = await Promise.all(parallel.map(c => executeBatch(c))) + previousExecutionResults.push(...parallelResults) + TodoWrite({ todos: executionCalls.map(c => ({ status: parallel.includes(c) ? "completed" : "pending" })) }) +} + +// Phase 2: Execute sequential batches one by one +for (const call of sequential) { + TodoWrite({ todos: executionCalls.map(c => ({ status: c === call ? "in_progress" : "..." })) }) + result = await executeBatch(call) + previousExecutionResults.push(result) + TodoWrite({ todos: executionCalls.map(c => ({ status: "completed" or "pending" })) }) +} +``` + +### Unified Task Prompt Builder + +**Task Formatting Principle**: Each task is a self-contained checklist. The executor only needs to know what THIS task requires. Same template for Agent and CLI. + +```javascript +function buildExecutionPrompt(batch) { + // Task template (6 parts: Files โ†’ Why โ†’ How โ†’ Reference โ†’ Risks โ†’ Done) + const formatTask = (t) => ` +## ${t.title} + +**Scope**: \`${t.scope}\` | **Action**: ${t.action} + +### Files +${(t.files || []).map(f => `- **${f.path}** โ†’ \`${f.target || ''}\`: ${f.change || (f.changes || []).join(', ') || ''}`).join('\n')} + +${t.rationale ? ` +### Why this approach (Medium/High) +${t.rationale.chosen_approach} +${t.rationale.decision_factors?.length > 0 ? `\nKey factors: ${t.rationale.decision_factors.join(', ')}` : ''} +${t.rationale.tradeoffs ? `\nTradeoffs: ${t.rationale.tradeoffs}` : ''} +` : ''} + +### How to do it +${t.description} + +${t.implementation.map(step => `- ${step}`).join('\n')} + +${t.code_skeleton ? ` +### Code skeleton (High) +${t.code_skeleton.interfaces?.length > 0 ? `**Interfaces**: ${t.code_skeleton.interfaces.map(i => `\`${i.name}\` - ${i.purpose}`).join(', ')}` : ''} +${t.code_skeleton.key_functions?.length > 0 ? `\n**Functions**: ${t.code_skeleton.key_functions.map(f => `\`${f.signature}\` - ${f.purpose}`).join(', ')}` : ''} +${t.code_skeleton.classes?.length > 0 ? `\n**Classes**: ${t.code_skeleton.classes.map(c => `\`${c.name}\` - ${c.purpose}`).join(', ')}` : ''} +` : ''} + +### Reference +- Pattern: ${t.reference?.pattern || 'N/A'} +- Files: ${t.reference?.files?.join(', ') || 'N/A'} +${t.reference?.examples ? `- Notes: ${t.reference.examples}` : ''} + +${t.risks?.length > 0 ? ` +### Risk mitigations (High) +${t.risks.map(r => `- ${r.description} โ†’ **${r.mitigation}**`).join('\n')} +` : ''} + +### Done when +${(t.convergence?.criteria || []).map(c => `- [ ] ${c}`).join('\n')} +${(t.test?.success_metrics || []).length > 0 ? `\n**Success metrics**: ${t.test.success_metrics.join(', ')}` : ''}` + + // Build prompt + const sections = [] + + if (originalUserInput) sections.push(`## Goal\n${originalUserInput}`) + + sections.push(`## Tasks\n${batch.tasks.map(formatTask).join('\n\n---\n')}`) + + // Context (reference only) + const context = [] + if (previousExecutionResults.length > 0) { + context.push(`### Previous Work\n${previousExecutionResults.map(r => `- ${r.tasksSummary}: ${r.status}`).join('\n')}`) + } + if (clarificationContext) { + context.push(`### Clarifications\n${Object.entries(clarificationContext).map(([q, a]) => `- ${q}: ${a}`).join('\n')}`) + } + if (executionContext?.planObject?.data_flow?.diagram) { + context.push(`### Data Flow\n${executionContext.planObject.data_flow.diagram}`) + } + if (executionContext?.session?.artifacts?.plan) { + context.push(`### Artifacts\nPlan: ${executionContext.session.artifacts.plan}`) + } + // Project guidelines (user-defined constraints from /workflow:session:solidify) + context.push(`### Project Guidelines\n@.workflow/project-guidelines.json`) + if (context.length > 0) sections.push(`## Context\n${context.join('\n\n')}`) + + sections.push(`Complete each task according to its "Done when" checklist.`) + + return sections.join('\n\n') +} +``` + +**Option A: Agent Execution** + +When to use: +- `getTaskExecutor(task) === "agent"` +- ๆˆ– `executionMethod = "Agent"` (ๅ…จๅฑ€ fallback) +- ๆˆ– `executionMethod = "Auto" AND complexity = "Low"` (ๅ…จๅฑ€ fallback) + +```javascript +Task( + subagent_type="code-developer", + run_in_background=false, + description=batch.taskSummary, + prompt=buildExecutionPrompt(batch) +) +``` + +**Result Collection**: After completion, collect result following `executionResult` structure (see Data Structures section) + +**Option B: CLI Execution (Codex)** + +When to use: +- `getTaskExecutor(task) === "codex"` +- ๆˆ– `executionMethod = "Codex"` (ๅ…จๅฑ€ fallback) +- ๆˆ– `executionMethod = "Auto" AND complexity = "Medium/High"` (ๅ…จๅฑ€ fallback) + +```bash +ccw cli -p "${buildExecutionPrompt(batch)}" --tool codex --mode write +``` + +**Execution with fixed IDs** (predictable ID pattern): +```javascript +// Launch CLI in background, wait for task hook callback +// Generate fixed execution ID: ${sessionId}-${groupId} +const sessionId = executionContext?.session?.id || 'standalone' +const fixedExecutionId = `${sessionId}-${batch.groupId}` // e.g., "implement-auth-2025-12-13-P1" + +// Check if resuming from previous failed execution +const previousCliId = batch.resumeFromCliId || null + +// Build command with fixed ID (and optional resume for continuation) +const cli_command = previousCliId + ? `ccw cli -p "${buildExecutionPrompt(batch)}" --tool codex --mode write --id ${fixedExecutionId} --resume ${previousCliId}` + : `ccw cli -p "${buildExecutionPrompt(batch)}" --tool codex --mode write --id ${fixedExecutionId}` + +// Execute in background, stop output and wait for task hook callback +Bash( + command=cli_command, + run_in_background=true +) +// STOP HERE - CLI executes in background, task hook will notify on completion +``` + +**Resume on Failure** (with fixed ID): +```javascript +// If execution failed or timed out, offer resume option +if (bash_result.status === 'failed' || bash_result.status === 'timeout') { + console.log(` +โš ๏ธ Execution incomplete. Resume available: + Fixed ID: ${fixedExecutionId} + Lookup: ccw cli detail ${fixedExecutionId} + Resume: ccw cli -p "Continue tasks" --resume ${fixedExecutionId} --tool codex --mode write --id ${fixedExecutionId}-retry +`) + + // Store for potential retry in same session + batch.resumeFromCliId = fixedExecutionId +} +``` + +**Result Collection**: After completion, analyze output and collect result following `executionResult` structure (include `cliExecutionId` for resume capability) + +**Option C: CLI Execution (Gemini)** + +When to use: `getTaskExecutor(task) === "gemini"` (ๅˆ†ๆž็ฑปไปปๅŠก) + +```bash +# ไฝฟ็”จ็ปŸไธ€็š„ buildExecutionPrompt๏ผŒๅˆ‡ๆข tool ๅ’Œ mode +ccw cli -p "${buildExecutionPrompt(batch)}" --tool gemini --mode analysis --id ${sessionId}-${batch.groupId} +``` + +### Step 4: Progress Tracking + +Progress tracked at batch level (not individual task level). Icons: โšก (parallel, concurrent), โ†’ (sequential, one-by-one) + +### Step 5: Code Review (Optional) + +**Skip Condition**: Only run if `codeReviewTool โ‰  "Skip"` + +**Review Focus**: Verify implementation against plan convergence criteria and test requirements +- Read plan.json + .task/*.json for task convergence criteria and test checklist +- Check each convergence criterion is fulfilled +- Verify success metrics from test field (Medium/High complexity) +- Run unit/integration tests specified in test field +- Validate code quality and identify issues +- Ensure alignment with planned approach and risk mitigations + +**Operations**: +- Agent Review: Current agent performs direct review +- Gemini Review: Execute gemini CLI with review prompt +- Codex Review: Two options - (A) with prompt for complex reviews, (B) `--uncommitted` flag only for quick reviews +- Custom tool: Execute specified CLI tool (qwen, etc.) + +**Unified Review Template** (All tools use same standard): + +**Review Criteria**: +- **Convergence Criteria**: Verify each criterion from task convergence.criteria +- **Test Checklist** (Medium/High): Check unit, integration, success_metrics from task test +- **Code Quality**: Analyze quality, identify issues, suggest improvements +- **Plan Alignment**: Validate implementation matches planned approach and risk mitigations + +**Shared Prompt Template** (used by all CLI tools): +``` +PURPOSE: Code review for implemented changes against plan convergence criteria and test requirements +TASK: โ€ข Verify plan convergence criteria fulfillment โ€ข Check test requirements (unit, integration, success_metrics) โ€ข Analyze code quality โ€ข Identify issues โ€ข Suggest improvements โ€ข Validate plan adherence and risk mitigations +MODE: analysis +CONTEXT: @**/* @{plan.json} @{.task/*.json} [@{exploration.json}] | Memory: Review lite-execute changes against plan requirements including test checklist +EXPECTED: Quality assessment with: + - Convergence criteria verification (all tasks from .task/*.json) + - Test checklist validation (Medium/High: unit, integration, success_metrics) + - Issue identification + - Recommendations + Explicitly check each convergence criterion and test item from .task/*.json files. +CONSTRAINTS: Focus on plan convergence criteria, test requirements, and plan adherence | analysis=READ-ONLY +``` + +**Tool-Specific Execution** (Apply shared prompt template above): + +```bash +# Method 1: Agent Review (current agent) +# - Read plan.json: ${executionContext.session.artifacts.plan} +# - Apply unified review criteria (see Shared Prompt Template) +# - Report findings directly + +# Method 2: Gemini Review (recommended) +ccw cli -p "[Shared Prompt Template with artifacts]" --tool gemini --mode analysis +# CONTEXT includes: @**/* @${plan.json} [@${exploration.json}] + +# Method 3: Qwen Review (alternative) +ccw cli -p "[Shared Prompt Template with artifacts]" --tool qwen --mode analysis +# Same prompt as Gemini, different execution engine + +# Method 4: Codex Review (git-aware) - Two mutually exclusive options: + +# Option A: With custom prompt (reviews uncommitted by default) +ccw cli -p "[Shared Prompt Template with artifacts]" --tool codex --mode review +# Use for complex reviews with specific focus areas + +# Option B: Target flag only (no prompt allowed) +ccw cli --tool codex --mode review --uncommitted +# Quick review of uncommitted changes without custom instructions + +# โš ๏ธ IMPORTANT: -p prompt and target flags (--uncommitted/--base/--commit) are MUTUALLY EXCLUSIVE +``` + +**Multi-Round Review with Fixed IDs**: +```javascript +// Generate fixed review ID +const reviewId = `${sessionId}-review` + +// First review pass with fixed ID +const reviewResult = Bash(`ccw cli -p "[Review prompt]" --tool gemini --mode analysis --id ${reviewId}`) + +// If issues found, continue review dialog with fixed ID chain +if (hasUnresolvedIssues(reviewResult)) { + // Resume with follow-up questions + Bash(`ccw cli -p "Clarify the security concerns you mentioned" --resume ${reviewId} --tool gemini --mode analysis --id ${reviewId}-followup`) +} +``` + +**Implementation Note**: Replace `[Shared Prompt Template with artifacts]` placeholder with actual template content, substituting: +- `@{plan.json}` โ†’ `@${executionContext.session.artifacts.plan}` +- `[@{exploration.json}]` โ†’ exploration files from artifacts (if exists) + +### Step 6: Auto-Sync Project State + +**Trigger**: After all executions complete (regardless of code review) + +**Operation**: Execute `/workflow:session:sync -y "{summary}"` to update both `project-guidelines.json` and `project-tech.json` in one shot. + +Summary ๅ–ๅ€ผไผ˜ๅ…ˆ็บง๏ผš`originalUserInput` โ†’ `planObject.summary` โ†’ git log ่‡ชๅŠจๆŽจๆ–ญใ€‚ + +## Best Practices + +**Input Modes**: In-memory (lite-plan), prompt (standalone), file (JSON/text) +**Task Grouping**: Based on explicit depends_on only; independent tasks split by executor, each batch runs as separate CLI instance +**Execution**: Independent task batches launch concurrently via single Claude message with multiple tool calls (one tool call per batch) + +## Error Handling + +| Error | Cause | Resolution | +|-------|-------|------------| +| Missing executionContext | In-memory mode without context | Error: "No execution context found. Only available when called by lite-plan." | +| File not found | File path doesn't exist | Error: "File not found: {path}. Check file path." | +| Empty file | File exists but no content | Error: "File is empty: {path}. Provide task description." | +| Invalid Enhanced Task JSON | JSON missing required fields | Warning: "Missing required fields. Treating as plain text." | +| Malformed JSON | JSON parsing fails | Treat as plain text (expected for non-JSON files) | +| Execution failure | Agent/Codex crashes | Display error, use fixed ID `${sessionId}-${groupId}` for resume: `ccw cli -p "Continue" --resume --id -retry` | +| Execution timeout | CLI exceeded timeout | Use fixed ID for resume with extended timeout | +| Codex unavailable | Codex not installed | Show installation instructions, offer Agent execution | +| Fixed ID not found | Custom ID lookup failed | Check `ccw cli history`, verify date directories | + +## Data Structures + +### executionContext (Input - Mode 1) + +Passed from lite-plan via global variable: + +```javascript +{ + planObject: { + summary: string, + approach: string, + task_ids: string[], // Task IDs referencing .task/*.json files + task_count: number, // Number of tasks + _loadedTasks: [...], // Populated at runtime from .task/*.json files + estimated_time: string, + recommended_execution: string, + complexity: string + }, + // Task file paths (populated for two-layer format) + taskFiles: [{id: string, path: string}] | null, + explorationsContext: {...} | null, // Multi-angle explorations + explorationAngles: string[], // List of exploration angles + explorationManifest: {...} | null, // Exploration manifest + clarificationContext: {...} | null, + executionMethod: "Agent" | "Codex" | "Auto", // ๅ…จๅฑ€้ป˜่ฎค + codeReviewTool: "Skip" | "Gemini Review" | "Agent Review" | string, + originalUserInput: string, + + // ไปปๅŠก็บง executor ๅˆ†้…๏ผˆไผ˜ๅ…ˆไบŽ executionMethod๏ผ‰ + executorAssignments: { + [taskId]: { executor: "gemini" | "codex" | "agent", reason: string } + }, + + // Session artifacts location (saved by lite-plan) + session: { + id: string, // Session identifier: {taskSlug}-{shortTimestamp} + folder: string, // Session folder path: .workflow/.lite-plan/{session-id} + artifacts: { + explorations: [{angle, path}], // exploration-{angle}.json paths + explorations_manifest: string, // explorations-manifest.json path + plan: string // plan.json path (always present) + } + } +} +``` + +**Artifact Usage**: +- Artifact files contain detailed planning context +- Pass artifact paths to CLI tools and agents for enhanced context +- See execution options below for usage examples + +### executionResult (Output) + +Collected after each execution call completes: + +```javascript +{ + executionId: string, // e.g., "[Agent-1]", "[Codex-1]" + status: "completed" | "partial" | "failed", + tasksSummary: string, // Brief description of tasks handled + completionSummary: string, // What was completed + keyOutputs: string, // Files created/modified, key changes + notes: string, // Important context for next execution + fixedCliId: string | null // Fixed CLI execution ID (e.g., "implement-auth-2025-12-13-P1") +} +``` + +Appended to `previousExecutionResults` array for context continuity in multi-execution scenarios. + +## Post-Completion Expansion + +**Auto-sync**: ๆ‰ง่กŒ `/workflow:session:sync -y "{summary}"` ๆ›ดๆ–ฐ project-guidelines + project-tech๏ผˆStep 6 ๅทฒ่งฆๅ‘๏ผŒๆญคๅค„ไธ้‡ๅค๏ผ‰ใ€‚ + +ๅฎŒๆˆๅŽ่ฏข้—ฎ็”จๆˆทๆ˜ฏๅฆๆ‰ฉๅฑ•ไธบissue(test/enhance/refactor/doc)๏ผŒ้€‰ไธญ้กน่ฐƒ็”จ `/issue:new "{summary} - {dimension}"` + +**Fixed ID Pattern**: `${sessionId}-${groupId}` enables predictable lookup without auto-generated timestamps. + +**Resume Usage**: If `status` is "partial" or "failed", use `fixedCliId` to resume: +```bash +# Lookup previous execution +ccw cli detail ${fixedCliId} + +# Resume with new fixed ID for retry +ccw cli -p "Continue from where we left off" --resume ${fixedCliId} --tool codex --mode write --id ${fixedCliId}-retry +``` diff --git a/.claude/skills/workflow-lite-plan/phases/01-lite-plan.md b/.claude/skills/workflow-lite-plan/phases/01-lite-plan.md index 1469bfba..75222292 100644 --- a/.claude/skills/workflow-lite-plan/phases/01-lite-plan.md +++ b/.claude/skills/workflow-lite-plan/phases/01-lite-plan.md @@ -714,13 +714,55 @@ executionContext = { } ``` -**Step 5.2: Handoff** +**Step 5.2: Serialize & Agent Handoff** + +> **Why agent handoff**: Phase 1 history consumes significant context. Direct `Read("phases/02-lite-execute.md")` in the same context risks compact compressing Phase 2 instructions mid-execution. Spawning a fresh agent gives Phase 2 a clean context window. ```javascript -// Direct phase handoff: Read and execute Phase 2 (lite-execute) with in-memory context -// No Skill routing needed - executionContext is already set in Step 5.1 -Read("phases/02-lite-execute.md") -// Execute Phase 2 with executionContext (Mode 1: In-Memory Plan) +// Pre-populate _loadedTasks so serialized context is self-contained +executionContext.planObject._loadedTasks = (executionContext.planObject.task_ids || []).map(id => + JSON.parse(Read(`${sessionFolder}/.task/${id}.json`)) +) + +// Save executionContext to file for agent handoff +Write(`${sessionFolder}/execution-context.json`, JSON.stringify(executionContext, null, 2)) + +// Resolve absolute path to Phase 2 instructions +const phaseFile = Bash(`cd "${Bash('pwd').trim()}/.claude/skills/workflow-lite-plan/phases" && pwd`).trim() + + '/02-lite-execute.md' + +// Agent handoff: fresh context prevents compact from losing Phase 2 instructions +Task( + subagent_type="universal-executor", + run_in_background=false, + description=`Execute: ${taskSlug}`, + prompt=` +Execute implementation plan following lite-execute protocol. + +## Phase Instructions (MUST read first) +Read and follow: ${phaseFile} + +## Execution Context (Mode 1: In-Memory Plan) +Read and parse as JSON: ${sessionFolder}/execution-context.json +This is the executionContext variable referenced throughout Phase 2. +The planObject._loadedTasks array is pre-populated โ€” getTasks(planObject) works directly. + +## Key References +- Session ID: ${sessionId} +- Session folder: ${sessionFolder} +- Plan: ${sessionFolder}/plan.json +- Task files: ${sessionFolder}/.task/TASK-*.json +- Original task: ${task_description} + +## Execution Steps +1. Read phase instructions file (full protocol) +2. Read execution-context.json โ†’ parse as executionContext +3. Follow Phase 2 Mode 1 (In-Memory Plan) โ€” executionContext exists, skip user selection +4. Execute all tasks (Step 1-4 in Phase 2) +5. Run code review if codeReviewTool โ‰  "Skip" (Step 5) +6. Run auto-sync (Step 6) +` +) ``` ## Session Folder Structure diff --git a/.claude/skills_lib/team-lifecycle-v2/SKILL.md b/.claude/skills_lib/team-lifecycle-v2/SKILL.md new file mode 100644 index 00000000..c23b4ebb --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/SKILL.md @@ -0,0 +1,574 @@ +--- +name: team-lifecycle-v2 +description: Unified team skill for full lifecycle - spec/impl/test. All roles invoke this skill with --role arg for role-specific execution. Triggers on "team lifecycle". +allowed-tools: TeamCreate(*), TeamDelete(*), SendMessage(*), TaskCreate(*), TaskUpdate(*), TaskList(*), TaskGet(*), Task(*), AskUserQuestion(*), TodoWrite(*), Read(*), Write(*), Edit(*), Bash(*), Glob(*), Grep(*) +--- + +# Team Lifecycle + +Unified team skill covering specification, implementation, testing, and review. All team members invoke this skill with `--role=xxx` to route to role-specific execution. + +## Architecture Overview + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Skill(skill="team-lifecycle-v2") โ”‚ +โ”‚ args="ไปปๅŠกๆ่ฟฐ" ๆˆ– args="--role=xxx" โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ Role Router + โ”‚ + โ”Œโ”€โ”€โ”€โ”€ --role present? โ”€โ”€โ”€โ”€โ” + โ”‚ NO โ”‚ YES + โ†“ โ†“ + Orchestration Mode Role Dispatch + (auto โ†’ coordinator) (route to role.md) + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ†“ โ†“ โ†“ โ†“ โ†“ โ†“ โ†“ โ†“ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚coordinatorโ”‚โ”‚analystโ”‚โ”‚writerโ”‚โ”‚discussantโ”‚โ”‚plannerโ”‚โ”‚executorโ”‚โ”‚testerโ”‚โ”‚reviewerโ”‚ +โ”‚ roles/ โ”‚โ”‚roles/ โ”‚โ”‚roles/โ”‚โ”‚ roles/ โ”‚โ”‚roles/ โ”‚โ”‚ roles/ โ”‚โ”‚roles/โ”‚โ”‚ roles/ โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”˜โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”˜โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ†‘ โ†‘ + on-demand by coordinator + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ explorer โ”‚ โ”‚architectโ”‚ + โ”‚ (service)โ”‚ โ”‚(consult)โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## Command Architecture + +Each role is organized as a folder with a `role.md` orchestrator and optional `commands/` for delegation: + +``` +roles/ +โ”œโ”€โ”€ coordinator/ +โ”‚ โ”œโ”€โ”€ role.md # Orchestrator (Phase 1/5 inline, Phase 2-4 delegate) +โ”‚ โ””โ”€โ”€ commands/ +โ”‚ โ”œโ”€โ”€ dispatch.md # Task chain creation (3 modes) +โ”‚ โ””โ”€โ”€ monitor.md # Coordination loop + message routing +โ”œโ”€โ”€ analyst/ +โ”‚ โ”œโ”€โ”€ role.md +โ”‚ โ””โ”€โ”€ commands/ +โ”œโ”€โ”€ writer/ +โ”‚ โ”œโ”€โ”€ role.md +โ”‚ โ””โ”€โ”€ commands/ +โ”‚ โ””โ”€โ”€ generate-doc.md # Multi-CLI document generation (4 doc types) +โ”œโ”€โ”€ discussant/ +โ”‚ โ”œโ”€โ”€ role.md +โ”‚ โ””โ”€โ”€ commands/ +โ”‚ โ””โ”€โ”€ critique.md # Multi-perspective CLI critique +โ”œโ”€โ”€ planner/ +โ”‚ โ”œโ”€โ”€ role.md +โ”‚ โ””โ”€โ”€ commands/ +โ”‚ โ””โ”€โ”€ explore.md # Multi-angle codebase exploration +โ”œโ”€โ”€ executor/ +โ”‚ โ”œโ”€โ”€ role.md +โ”‚ โ””โ”€โ”€ commands/ +โ”‚ โ””โ”€โ”€ implement.md # Multi-backend code implementation +โ”œโ”€โ”€ tester/ +โ”‚ โ”œโ”€โ”€ role.md +โ”‚ โ””โ”€โ”€ commands/ +โ”‚ โ””โ”€โ”€ validate.md # Test-fix cycle +โ”œโ”€โ”€ reviewer/ +โ”‚ โ”œโ”€โ”€ role.md +โ”‚ โ””โ”€โ”€ commands/ +โ”‚ โ”œโ”€โ”€ code-review.md # 4-dimension code review +โ”‚ โ””โ”€โ”€ spec-quality.md # 5-dimension spec quality check +โ”œโ”€โ”€ explorer/ # Service role (on-demand) +โ”‚ โ””โ”€โ”€ role.md # Multi-strategy code search & pattern discovery +โ””โ”€โ”€ architect/ # Consulting role (on-demand) + โ”œโ”€โ”€ role.md # Multi-mode architecture assessment + โ””โ”€โ”€ commands/ + โ””โ”€โ”€ assess.md # Mode-specific assessment strategies +โ”œโ”€โ”€ fe-developer/ # Frontend pipeline role +โ”‚ โ””โ”€โ”€ role.md # Frontend component/page implementation +โ””โ”€โ”€ fe-qa/ # Frontend pipeline role + โ”œโ”€โ”€ role.md + โ””โ”€โ”€ commands/ + โ””โ”€โ”€ pre-delivery-checklist.md + โ””โ”€โ”€ role.md # 5-dimension frontend QA + GC loop +``` + +**Design principle**: role.md keeps Phase 1 (Task Discovery) and Phase 5 (Report) inline. Phases 2-4 either stay inline (simple logic) or delegate to `commands/*.md` via `Read("commands/xxx.md")` when they involve subagent delegation, CLI fan-out, or complex strategies. + +**Command files** are self-contained: each includes Strategy, Execution Steps, and Error Handling. Any subagent can `Read()` a command file and execute it independently. + +## Role Router + +### Input Parsing + +Parse `$ARGUMENTS` to extract `--role`: + +```javascript +const args = "$ARGUMENTS" +const roleMatch = args.match(/--role[=\s]+(\w+)/) +const teamName = args.match(/--team[=\s]+([\w-]+)/)?.[1] || "lifecycle" + +if (!roleMatch) { + // No --role: Orchestration Mode โ†’ auto route to coordinator + // See "Orchestration Mode" section below +} + +const role = roleMatch ? roleMatch[1] : "coordinator" +``` + +### Role Dispatch + +```javascript +const VALID_ROLES = { + "coordinator": { file: "roles/coordinator/role.md", prefix: null }, + "analyst": { file: "roles/analyst/role.md", prefix: "RESEARCH" }, + "writer": { file: "roles/writer/role.md", prefix: "DRAFT" }, + "discussant": { file: "roles/discussant/role.md", prefix: "DISCUSS" }, + "planner": { file: "roles/planner/role.md", prefix: "PLAN" }, + "executor": { file: "roles/executor/role.md", prefix: "IMPL" }, + "tester": { file: "roles/tester/role.md", prefix: "TEST" }, + "reviewer": { file: "roles/reviewer/role.md", prefix: ["REVIEW", "QUALITY"] }, + "explorer": { file: "roles/explorer/role.md", prefix: "EXPLORE", type: "service" }, + "architect": { file: "roles/architect/role.md", prefix: "ARCH", type: "consulting" }, + "fe-developer":{ file: "roles/fe-developer/role.md",prefix: "DEV-FE", type: "frontend-pipeline" }, + "fe-qa": { file: "roles/fe-qa/role.md", prefix: "QA-FE", type: "frontend-pipeline" } +} + +if (!VALID_ROLES[role]) { + throw new Error(`Unknown role: ${role}. Available: ${Object.keys(VALID_ROLES).join(', ')}`) +} + +// Read and execute role-specific logic +Read(VALID_ROLES[role].file) +// โ†’ Execute the 5-phase process defined in that file +``` + +### Orchestration Mode๏ผˆๆ— ๅ‚ๆ•ฐ่งฆๅ‘๏ผ‰ + +ๅฝ“ไธๅธฆ `--role` ่ฐƒ็”จๆ—ถ๏ผŒ่‡ชๅŠจ่ฟ›ๅ…ฅ coordinator ็ผ–ๆŽ’ๆจกๅผใ€‚็”จๆˆทๅช้œ€ไผ ไปปๅŠกๆ่ฟฐๅณๅฏ่งฆๅ‘ๅฎŒๆ•ดๆต็จ‹ใ€‚ + +**่งฆๅ‘ๆ–นๅผ**: + +```javascript +// ็”จๆˆท่ฐƒ็”จ๏ผˆๆ—  --role๏ผ‰โ€” ่‡ชๅŠจ่ทฏ็”ฑๅˆฐ coordinator +Skill(skill="team-lifecycle-v2", args="ไปปๅŠกๆ่ฟฐ") + +// ็ญ‰ไปทไบŽ +Skill(skill="team-lifecycle-v2", args="--role=coordinator ไปปๅŠกๆ่ฟฐ") +``` + +**ๆต็จ‹**: + +```javascript +if (!roleMatch) { + // Orchestration Mode: ่‡ชๅŠจ่ทฏ็”ฑๅˆฐ coordinator + // coordinator role.md ๅฐ†ๆ‰ง่กŒ๏ผš + // Phase 1: ้œ€ๆฑ‚ๆพ„ๆธ… + // Phase 2: TeamCreate + spawn ๆ‰€ๆœ‰ worker agents + // ๆฏไธช agent prompt ไธญๅŒ…ๅซ Skill(args="--role=xxx") ๅ›ž่ฐƒ + // Phase 3: ๅˆ›ๅปบไปปๅŠก้“พ + // Phase 4: ็›‘ๆŽงๅ่ฐƒๅพช็Žฏ + // Phase 5: ็ป“ๆžœๆฑ‡ๆŠฅ + + const role = "coordinator" + Read(VALID_ROLES[role].file) +} +``` + +**ๅฎŒๆ•ด่ฐƒ็”จ้“พ**: + +``` +็”จๆˆท: Skill(args="ไปปๅŠกๆ่ฟฐ") + โ”‚ + โ”œโ”€ SKILL.md: ๆ—  --role โ†’ Orchestration Mode โ†’ ่ฏปๅ– coordinator role.md + โ”‚ + โ”œโ”€ coordinator Phase 2: TeamCreate + spawn workers + โ”‚ ๆฏไธช worker prompt ไธญๅŒ…ๅซ Skill(args="--role=xxx") ๅ›ž่ฐƒ + โ”‚ + โ”œโ”€ coordinator Phase 3: dispatch ไปปๅŠก้“พ + โ”‚ + โ”œโ”€ worker ๆ”ถๅˆฐไปปๅŠก โ†’ Skill(args="--role=xxx") โ†’ SKILL.md Role Router โ†’ role.md + โ”‚ ๆฏไธช worker ่‡ชๅŠจ่Žทๅ–: + โ”‚ โ”œโ”€ ่ง’่‰ฒๅฎšไน‰ (role.md: identity, boundaries, message types) + โ”‚ โ”œโ”€ ๅฏ็”จๅ‘ฝไปค (commands/*.md) + โ”‚ โ””โ”€ ๆ‰ง่กŒ้€ป่พ‘ (5-phase process) + โ”‚ + โ””โ”€ coordinator Phase 4-5: ็›‘ๆŽง โ†’ ็ป“ๆžœๆฑ‡ๆŠฅ +``` + +### Available Roles + +| Role | Task Prefix | Responsibility | Role File | +|------|-------------|----------------|-----------| +| `coordinator` | N/A | Pipeline orchestration, requirement clarification, task dispatch | [roles/coordinator/role.md](roles/coordinator/role.md) | +| `analyst` | RESEARCH-* | Seed analysis, codebase exploration, context gathering | [roles/analyst/role.md](roles/analyst/role.md) | +| `writer` | DRAFT-* | Product Brief / PRD / Architecture / Epics generation | [roles/writer/role.md](roles/writer/role.md) | +| `discussant` | DISCUSS-* | Multi-perspective critique, consensus building | [roles/discussant/role.md](roles/discussant/role.md) | +| `planner` | PLAN-* | Multi-angle exploration, structured planning | [roles/planner/role.md](roles/planner/role.md) | +| `executor` | IMPL-* | Code implementation following plans | [roles/executor/role.md](roles/executor/role.md) | +| `tester` | TEST-* | Adaptive test-fix cycles, quality gates | [roles/tester/role.md](roles/tester/role.md) | +| `reviewer` | `REVIEW-*` + `QUALITY-*` | Code review + Spec quality validation (auto-switch by prefix) | [roles/reviewer/role.md](roles/reviewer/role.md) | +| `explorer` | EXPLORE-* | Code search, pattern discovery, dependency tracing (service role, on-demand) | [roles/explorer/role.md](roles/explorer/role.md) | +| `architect` | ARCH-* | Architecture assessment, tech feasibility, design review (consulting role, on-demand) | [roles/architect/role.md](roles/architect/role.md) | +| `fe-developer` | DEV-FE-* | Frontend component/page implementation, design token consumption (frontend pipeline) | [roles/fe-developer/role.md](roles/fe-developer/role.md) | +| `fe-qa` | QA-FE-* | 5-dimension frontend QA, accessibility, design compliance, GC loop (frontend pipeline) | [roles/fe-qa/role.md](roles/fe-qa/role.md) | + +## Shared Infrastructure + +### Role Isolation Rules + +**ๆ ธๅฟƒๅŽŸๅˆ™**: ๆฏไธช่ง’่‰ฒไป…่ƒฝๆ‰ง่กŒ่‡ชๅทฑ่Œ่ดฃ่Œƒๅ›ดๅ†…็š„ๅทฅไฝœใ€‚ + +#### Output Tagging๏ผˆๅผบๅˆถ๏ผ‰ + +ๆ‰€ๆœ‰่ง’่‰ฒ็š„่พ“ๅ‡บๅฟ…้กปๅธฆ `[role_name]` ๆ ‡่ฏ†ๅ‰็ผ€๏ผš + +```javascript +// SendMessage โ€” content ๅ’Œ summary ้ƒฝๅฟ…้กปๅธฆๆ ‡่ฏ† +SendMessage({ + content: `## [${role}] ...`, + summary: `[${role}] ...` +}) + +// team_msg โ€” summary ๅฟ…้กปๅธฆๆ ‡่ฏ† +mcp__ccw-tools__team_msg({ + summary: `[${role}] ...` +}) +``` + +#### Coordinator ้š”็ฆป + +| ๅ…่ฎธ | ็ฆๆญข | +|------|------| +| ้œ€ๆฑ‚ๆพ„ๆธ… (AskUserQuestion) | โŒ ็›ดๆŽฅ็ผ–ๅ†™/ไฟฎๆ”นไปฃ็  | +| ๅˆ›ๅปบไปปๅŠก้“พ (TaskCreate) | โŒ ่ฐƒ็”จๅฎž็Žฐ็ฑป subagent (code-developer ็ญ‰) | +| ๅˆ†ๅ‘ไปปๅŠก็ป™ worker | โŒ ็›ดๆŽฅๆ‰ง่กŒๅˆ†ๆž/ๆต‹่ฏ•/ๅฎกๆŸฅ | +| ็›‘ๆŽง่ฟ›ๅบฆ (ๆถˆๆฏๆ€ป็บฟ) | โŒ ็ป•่ฟ‡ worker ่‡ช่กŒๅฎŒๆˆไปปๅŠก | +| ๆฑ‡ๆŠฅ็ป“ๆžœ็ป™็”จๆˆท | โŒ ไฟฎๆ”นๆบไปฃ็ ๆˆ–ไบง็‰ฉๆ–‡ไปถ | + +#### Worker ้š”็ฆป + +| ๅ…่ฎธ | ็ฆๆญข | +|------|------| +| ๅค„็†่‡ชๅทฑๅ‰็ผ€็š„ไปปๅŠก | โŒ ๅค„็†ๅ…ถไป–่ง’่‰ฒๅ‰็ผ€็š„ไปปๅŠก | +| SendMessage ็ป™ coordinator | โŒ ็›ดๆŽฅไธŽๅ…ถไป– worker ้€šไฟก | +| ไฝฟ็”จ Toolbox ไธญๅฃฐๆ˜Ž็š„ๅทฅๅ…ท | โŒ ไธบๅ…ถไป–่ง’่‰ฒๅˆ›ๅปบไปปๅŠก (TaskCreate) | +| ๅง”ๆดพ็ป™ commands/ ไธญ็š„ๅ‘ฝไปค | โŒ ไฟฎๆ”นไธๅฑžไบŽๆœฌ่Œ่ดฃ็š„่ต„ๆบ | + +### Message Bus (All Roles) + +Every SendMessage **before**, must call `mcp__ccw-tools__team_msg` to log: + +```javascript +mcp__ccw-tools__team_msg({ + operation: "log", + team: teamName, + from: role, + to: "coordinator", + type: "", + summary: `[${role}] `, + ref: "" +}) +``` + +**Message types by role**: + +| Role | Types | +|------|-------| +| coordinator | `plan_approved`, `plan_revision`, `task_unblocked`, `fix_required`, `error`, `shutdown` | +| analyst | `research_ready`, `research_progress`, `error` | +| writer | `draft_ready`, `draft_revision`, `impl_progress`, `error` | +| discussant | `discussion_ready`, `discussion_blocked`, `impl_progress`, `error` | +| planner | `plan_ready`, `plan_revision`, `impl_progress`, `error` | +| executor | `impl_complete`, `impl_progress`, `error` | +| tester | `test_result`, `impl_progress`, `fix_required`, `error` | +| reviewer | `review_result`, `quality_result`, `fix_required`, `error` | +| explorer | `explore_ready`, `explore_progress`, `task_failed` | +| architect | `arch_ready`, `arch_concern`, `arch_progress`, `error` | +| fe-developer | `dev_fe_complete`, `dev_fe_progress`, `error` | +| fe-qa | `qa_fe_passed`, `qa_fe_result`, `fix_required`, `error` | + +### CLI Fallback + +When `mcp__ccw-tools__team_msg` MCP is unavailable: + +```javascript +Bash(`ccw team log --team "${teamName}" --from "${role}" --to "coordinator" --type "" --summary "[${role}] " --json`) +Bash(`ccw team list --team "${teamName}" --last 10 --json`) +Bash(`ccw team status --team "${teamName}" --json`) +``` + +### Wisdom Accumulation (All Roles) + +่ทจไปปๅŠก็Ÿฅ่ฏ†็งฏ็ดฏๆœบๅˆถใ€‚Coordinator ๅœจ session ๅˆๅง‹ๅŒ–ๆ—ถๅˆ›ๅปบ `wisdom/` ็›ฎๅฝ•๏ผŒๆ‰€ๆœ‰ worker ๅœจๆ‰ง่กŒ่ฟ‡็จ‹ไธญ่ฏปๅ–ๅ’Œ่ดก็Œฎ wisdomใ€‚ + +**็›ฎๅฝ•็ป“ๆž„**: +``` +{sessionFolder}/wisdom/ +โ”œโ”€โ”€ learnings.md # ๅ‘็Žฐ็š„ๆจกๅผๅ’ŒๆดžๅฏŸ +โ”œโ”€โ”€ decisions.md # ๆžถๆž„ๅ’Œ่ฎพ่ฎกๅ†ณ็ญ– +โ”œโ”€โ”€ conventions.md # ไปฃ็ ๅบ“็บฆๅฎš +โ””โ”€โ”€ issues.md # ๅทฒ็Ÿฅ้ฃŽ้™ฉๅ’Œ้—ฎ้ข˜ +``` + +**Phase 2 ๅŠ ่ฝฝ๏ผˆๆ‰€ๆœ‰ worker๏ผ‰**: +```javascript +// Load wisdom context at start of Phase 2 +const sessionFolder = task.description.match(/Session:\s*([^\n]+)/)?.[1]?.trim() +let wisdom = {} +if (sessionFolder) { + try { wisdom.learnings = Read(`${sessionFolder}/wisdom/learnings.md`) } catch {} + try { wisdom.decisions = Read(`${sessionFolder}/wisdom/decisions.md`) } catch {} + try { wisdom.conventions = Read(`${sessionFolder}/wisdom/conventions.md`) } catch {} + try { wisdom.issues = Read(`${sessionFolder}/wisdom/issues.md`) } catch {} +} +``` + +**Phase 4/5 ่ดก็Œฎ๏ผˆไปปๅŠกๅฎŒๆˆๆ—ถ๏ผ‰**: +```javascript +// Contribute wisdom after task completion +if (sessionFolder) { + const timestamp = new Date().toISOString().substring(0, 10) + + // Role-specific contributions: + // analyst โ†’ learnings (exploration dimensions, codebase patterns) + // writer โ†’ conventions (document structure, naming patterns) + // planner โ†’ decisions (task decomposition rationale) + // executor โ†’ learnings (implementation patterns), issues (bugs encountered) + // tester โ†’ issues (test failures, edge cases), learnings (test patterns) + // reviewer โ†’ conventions (code quality patterns), issues (review findings) + // explorer โ†’ conventions (codebase patterns), learnings (dependency insights) + // architect โ†’ decisions (architecture choices), issues (architectural risks) + + try { + const targetFile = `${sessionFolder}/wisdom/${wisdomTarget}.md` + const existing = Read(targetFile) + const entry = `- [${timestamp}] [${role}] ${wisdomEntry}` + Write(targetFile, existing + '\n' + entry) + } catch {} // wisdom not initialized +} +``` + +**Coordinator ๆณจๅ…ฅ**: Coordinator ๅœจ spawn worker ๆ—ถ้€š่ฟ‡ task description ไผ ้€’ `Session: {sessionFolder}`๏ผŒworker ๆฎๆญคๅฎšไฝ wisdom ็›ฎๅฝ•ใ€‚ๅทฒๆœ‰ wisdom ๅ†…ๅฎนไธบๅŽ็ปญ worker ๆไพ›ไธŠไธ‹ๆ–‡๏ผŒๅฎž็Žฐ่ทจไปปๅŠก็Ÿฅ่ฏ†ไผ ้€’ใ€‚ + +### Task Lifecycle (All Worker Roles) + +```javascript +// Standard task lifecycle every worker role follows +// Phase 1: Discovery +const tasks = TaskList() +const prefixes = Array.isArray(VALID_ROLES[role].prefix) ? VALID_ROLES[role].prefix : [VALID_ROLES[role].prefix] +const myTasks = tasks.filter(t => + prefixes.some(p => t.subject.startsWith(`${p}-`)) && + t.owner === role && + t.status === 'pending' && + t.blockedBy.length === 0 +) +if (myTasks.length === 0) return // idle +const task = TaskGet({ taskId: myTasks[0].id }) +TaskUpdate({ taskId: task.id, status: 'in_progress' }) + +// Phase 1.5: Resume Artifact Check (้˜ฒๆญข้‡ๅคไบงๅ‡บ) +// ๅฝ“ session ไปŽๆš‚ๅœๆขๅคๆ—ถ๏ผŒcoordinator ๅทฒๅฐ† in_progress ไปปๅŠก้‡็ฝฎไธบ pendingใ€‚ +// Worker ๅœจๅผ€ๅง‹ๅทฅไฝœๅ‰๏ผŒๅฟ…้กปๆฃ€ๆŸฅ่ฏฅไปปๅŠก็š„่พ“ๅ‡บไบง็‰ฉๆ˜ฏๅฆๅทฒๅญ˜ๅœจใ€‚ +// ๅฆ‚ๆžœไบง็‰ฉๅทฒๅญ˜ๅœจไธ”ๅ†…ๅฎนๅฎŒๆ•ด๏ผš +// โ†’ ็›ดๆŽฅ่ทณๅˆฐ Phase 5 ๆŠฅๅ‘ŠๅฎŒๆˆ๏ผˆ้ฟๅ…่ฆ†็›–ไธŠๆฌกๆˆๆžœ๏ผ‰ +// ๅฆ‚ๆžœไบง็‰ฉๅญ˜ๅœจไฝ†ไธๅฎŒๆ•ด๏ผˆๅฆ‚ๆ–‡ไปถไธบ็ฉบๆˆ–็ผบๅฐ‘ๅ…ณ้”ฎ section๏ผ‰๏ผš +// โ†’ ๆญฃๅธธๆ‰ง่กŒ Phase 2-4๏ผˆๅŸบไบŽๅทฒๆœ‰ไบง็‰ฉ็ปง็ปญ๏ผŒ่€Œ้žไปŽๅคดๅผ€ๅง‹๏ผ‰ +// ๅฆ‚ๆžœไบง็‰ฉไธๅญ˜ๅœจ๏ผš +// โ†’ ๆญฃๅธธๆ‰ง่กŒ Phase 2-4 +// +// ๆฏไธช role ๆฃ€ๆŸฅ่‡ชๅทฑ็š„่พ“ๅ‡บ่ทฏๅพ„: +// analyst โ†’ sessionFolder/spec/discovery-context.json +// writer โ†’ sessionFolder/spec/{product-brief.md | requirements/ | architecture/ | epics/} +// discussant โ†’ sessionFolder/discussions/discuss-NNN-*.md +// planner โ†’ sessionFolder/plan/plan.json +// executor โ†’ git diff (ๅทฒๆไบค็š„ไปฃ็ ๅ˜ๆ›ด) +// tester โ†’ test pass rate +// reviewer โ†’ sessionFolder/spec/readiness-report.md (quality) ๆˆ– review findings (code) + +// Phase 2-4: Role-specific (see roles/{role}/role.md) + +// Phase 5: Report + Loop โ€” ๆ‰€ๆœ‰่พ“ๅ‡บๅฟ…้กปๅธฆ [role] ๆ ‡่ฏ† +mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: role, to: "coordinator", type: "...", summary: `[${role}] ...` }) +SendMessage({ type: "message", recipient: "coordinator", content: `## [${role}] ...`, summary: `[${role}] ...` }) +TaskUpdate({ taskId: task.id, status: 'completed' }) +// Check for next task โ†’ back to Phase 1 +``` + +## Three-Mode Pipeline + +``` +Spec-only: + RESEARCH-001 โ†’ DISCUSS-001 โ†’ DRAFT-001 โ†’ DISCUSS-002 + โ†’ DRAFT-002 โ†’ DISCUSS-003 โ†’ DRAFT-003 โ†’ DISCUSS-004 + โ†’ DRAFT-004 โ†’ DISCUSS-005 โ†’ QUALITY-001 โ†’ DISCUSS-006 + +Impl-only (backend): + PLAN-001 โ†’ IMPL-001 โ†’ TEST-001 + REVIEW-001 + +Full-lifecycle (backend): + [Spec pipeline] โ†’ PLAN-001(blockedBy: DISCUSS-006) โ†’ IMPL-001 โ†’ TEST-001 + REVIEW-001 +``` + +### Frontend Pipelines + +Coordinator ๆ นๆฎไปปๅŠกๅ…ณ้”ฎ่ฏ่‡ชๅŠจๆฃ€ๆต‹ๅ‰็ซฏไปปๅŠกๅนถ่ทฏ็”ฑๅˆฐๅ‰็ซฏๅญๆตๆฐด็บฟ๏ผš + +``` +FE-only (็บฏๅ‰็ซฏ): + PLAN-001 โ†’ DEV-FE-001 โ†’ QA-FE-001 + (GC loop: if QA-FE verdict=NEEDS_FIX โ†’ DEV-FE-002 โ†’ QA-FE-002, max 2 rounds) + +Fullstack (ๅ‰ๅŽ็ซฏๅนถ่กŒ): + PLAN-001 โ†’ IMPL-001 โˆฅ DEV-FE-001 โ†’ TEST-001 โˆฅ QA-FE-001 โ†’ REVIEW-001 + +Full-lifecycle + FE: + [Spec pipeline] โ†’ PLAN-001(blockedBy: DISCUSS-006) + โ†’ IMPL-001 โˆฅ DEV-FE-001 โ†’ TEST-001 โˆฅ QA-FE-001 โ†’ REVIEW-001 +``` + +### Frontend Detection + +Coordinator ๅœจ Phase 1 ๆ นๆฎไปปๅŠกๅ…ณ้”ฎ่ฏ + ้กน็›ฎๆ–‡ไปถ่‡ชๅŠจๆฃ€ๆต‹ๅ‰็ซฏไปปๅŠกๅนถ้€‰ๆ‹ฉๆตๆฐด็บฟๆจกๅผ๏ผˆfe-only / fullstack / impl-only๏ผ‰ใ€‚ๆฃ€ๆต‹้€ป่พ‘่ง [roles/coordinator/role.md](roles/coordinator/role.md)ใ€‚ + +### Generator-Critic Loop (fe-developer โ†” fe-qa) + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” DEV-FE artifact โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ fe-developer โ”‚ โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ†’ โ”‚ fe-qa โ”‚ +โ”‚ (Generator) โ”‚ โ”‚ (Critic) โ”‚ +โ”‚ โ”‚ โ†โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ โ”‚ โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ QA-FE feedback โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + (max 2 rounds) + +Convergence: fe-qa.score >= 8 && fe-qa.critical_count === 0 +``` + +## Unified Session Directory + +All session artifacts are stored under a single session folder: + +``` +.workflow/.team/TLS-{slug}-{YYYY-MM-DD}/ +โ”œโ”€โ”€ team-session.json # Session state (status, progress, completed_tasks) +โ”œโ”€โ”€ spec/ # Spec artifacts (analyst, writer, reviewer output) +โ”‚ โ”œโ”€โ”€ spec-config.json +โ”‚ โ”œโ”€โ”€ discovery-context.json +โ”‚ โ”œโ”€โ”€ product-brief.md +โ”‚ โ”œโ”€โ”€ requirements/ # _index.md + REQ-*.md + NFR-*.md +โ”‚ โ”œโ”€โ”€ architecture/ # _index.md + ADR-*.md +โ”‚ โ”œโ”€โ”€ epics/ # _index.md + EPIC-*.md +โ”‚ โ”œโ”€โ”€ readiness-report.md +โ”‚ โ””โ”€โ”€ spec-summary.md +โ”œโ”€โ”€ discussions/ # Discussion records (discussant output) +โ”‚ โ””โ”€โ”€ discuss-001..006.md +โ”œโ”€โ”€ plan/ # Plan artifacts (planner output) +โ”‚ โ”œโ”€โ”€ exploration-{angle}.json +โ”‚ โ”œโ”€โ”€ explorations-manifest.json +โ”‚ โ”œโ”€โ”€ plan.json +โ”‚ โ””โ”€โ”€ .task/ +โ”‚ โ””โ”€โ”€ TASK-*.json +โ”œโ”€โ”€ explorations/ # Explorer output (cached for cross-role reuse) +โ”‚ โ””โ”€โ”€ explore-*.json +โ”œโ”€โ”€ architecture/ # Architect output (assessment reports) +โ”‚ โ””โ”€โ”€ arch-*.json +โ””โ”€โ”€ wisdom/ # Cross-task accumulated knowledge + โ”œโ”€โ”€ learnings.md # Patterns and insights discovered + โ”œโ”€โ”€ decisions.md # Architectural decisions made + โ”œโ”€โ”€ conventions.md # Codebase conventions found + โ””โ”€โ”€ issues.md # Known issues and risks +โ”œโ”€โ”€ qa/ # QA output (fe-qa audit reports) +โ”‚ โ””โ”€โ”€ audit-fe-*.json +โ””โ”€โ”€ build/ # Frontend build output (fe-developer) + โ”œโ”€โ”€ token-files/ + โ””โ”€โ”€ component-files/ +``` + +Messages remain at `.workflow/.team-msg/{team-name}/` (unchanged). + +## Session Resume + +Coordinator supports `--resume` / `--continue` flags to resume interrupted sessions: + +1. Scans `.workflow/.team/TLS-*/team-session.json` for `status: "active"` or `"paused"` +2. Multiple matches โ†’ `AskUserQuestion` for user selection +3. **Audit TaskList** โ€” ่Žทๅ–ๅฝ“ๅ‰ๆ‰€ๆœ‰ไปปๅŠก็š„็œŸๅฎž็Šถๆ€ +4. **Reconcile** โ€” ๅŒๅ‘ๅŒๆญฅ session.completed_tasks โ†” TaskList ็Šถๆ€: + - session ๅทฒๅฎŒๆˆไฝ† TaskList ๆœชๆ ‡่ฎฐ โ†’ ไฟฎๆญฃ TaskList ไธบ completed + - TaskList ๅทฒๅฎŒๆˆไฝ† session ๆœช่ฎฐๅฝ• โ†’ ่กฅๅฝ•ๅˆฐ session + - in_progress ็Šถๆ€๏ผˆๆš‚ๅœไธญๆ–ญ๏ผ‰โ†’ ้‡็ฝฎไธบ pending +5. Determines remaining pipeline from reconciled state +6. Rebuilds team (`TeamCreate` + worker spawns for needed roles only) +7. Creates missing tasks with correct `blockedBy` dependency chain (uses `TASK_METADATA` lookup) +8. Verifies dependency chain integrity for existing tasks +9. Updates session file with reconciled state + current_phase +10. **Kick** โ€” ๅ‘้ฆ–ไธชๅฏๆ‰ง่กŒไปปๅŠก็š„ worker ๅ‘้€ `task_unblocked` ๆถˆๆฏ๏ผŒๆ‰“็ ด resume ๆญป้” +11. Jumps to Phase 4 coordination loop + +## Coordinator Spawn Template + +When coordinator creates teammates, use this pattern: + +```javascript +TeamCreate({ team_name: teamName }) + +// For each worker role: +Task({ + subagent_type: "general-purpose", + description: `Spawn ${roleName} worker`, // โ† ๅฟ…ๅกซๅ‚ๆ•ฐ + team_name: teamName, + name: "", + prompt: `ไฝ ๆ˜ฏ team "${teamName}" ็š„ . + +## โš ๏ธ ้ฆ–่ฆๆŒ‡ไปค๏ผˆMUST๏ผ‰ +ไฝ ็š„ๆ‰€ๆœ‰ๅทฅไฝœๅฟ…้กป้€š่ฟ‡่ฐƒ็”จ Skill ่Žทๅ–่ง’่‰ฒๅฎšไน‰ๅŽๆ‰ง่กŒ๏ผŒ็ฆๆญข่‡ช่กŒๅ‘ๆŒฅ๏ผš +Skill(skill="team-lifecycle-v2", args="--role=") +ๆญค่ฐƒ็”จไผšๅŠ ่ฝฝไฝ ็š„่ง’่‰ฒๅฎšไน‰๏ผˆrole.md๏ผ‰ใ€ๅฏ็”จๅ‘ฝไปค๏ผˆcommands/*.md๏ผ‰ๅ’ŒๅฎŒๆ•ดๆ‰ง่กŒ้€ป่พ‘ใ€‚ + +ๅฝ“ๅ‰้œ€ๆฑ‚: ${taskDescription} +็บฆๆŸ: ${constraints} +Session: ${sessionFolder} + +## ่ง’่‰ฒๅ‡†ๅˆ™๏ผˆๅผบๅˆถ๏ผ‰ +- ไฝ ๅช่ƒฝๅค„็† -* ๅ‰็ผ€็š„ไปปๅŠก๏ผŒไธๅพ—ๆ‰ง่กŒๅ…ถไป–่ง’่‰ฒ็š„ๅทฅไฝœ +- ๆ‰€ๆœ‰่พ“ๅ‡บ๏ผˆSendMessageใ€team_msg๏ผ‰ๅฟ…้กปๅธฆ [] ๆ ‡่ฏ†ๅ‰็ผ€ +- ไป…ไธŽ coordinator ้€šไฟก๏ผŒไธๅพ—็›ดๆŽฅ่”็ณปๅ…ถไป– worker +- ไธๅพ—ไฝฟ็”จ TaskCreate ไธบๅ…ถไป–่ง’่‰ฒๅˆ›ๅปบไปปๅŠก + +## ๆถˆๆฏๆ€ป็บฟ๏ผˆๅฟ…้กป๏ผ‰ +ๆฏๆฌก SendMessage ๅ‰๏ผŒๅ…ˆ่ฐƒ็”จ mcp__ccw-tools__team_msg ่ฎฐๅฝ•ใ€‚ + +## ๅทฅไฝœๆต็จ‹๏ผˆไธฅๆ ผๆŒ‰้กบๅบ๏ผ‰ +1. ่ฐƒ็”จ Skill(skill="team-lifecycle-v2", args="--role=") ่Žทๅ–่ง’่‰ฒๅฎšไน‰ๅ’Œๆ‰ง่กŒ้€ป่พ‘ +2. ๆŒ‰ role.md ไธญ็š„ 5-Phase ๆต็จ‹ๆ‰ง่กŒ๏ผˆTaskList โ†’ ๆ‰พๅˆฐ -* ไปปๅŠก โ†’ ๆ‰ง่กŒ โ†’ ๆฑ‡ๆŠฅ๏ผ‰ +3. team_msg log + SendMessage ็ป“ๆžœ็ป™ coordinator๏ผˆๅธฆ [] ๆ ‡่ฏ†๏ผ‰ +4. TaskUpdate completed โ†’ ๆฃ€ๆŸฅไธ‹ไธ€ไธชไปปๅŠก โ†’ ๅ›žๅˆฐๆญฅ้ชค 1` +}) +``` + +See [roles/coordinator/role.md](roles/coordinator/role.md) for the full spawn implementation with per-role prompts. + +## Shared Spec Resources + +Writer ๅ’Œ Reviewer ่ง’่‰ฒๅœจ spec ๆจกๅผไธ‹ไฝฟ็”จๆœฌ skill ๅ†…็ฝฎ็š„ๆ ‡ๅ‡†ๅ’Œๆจกๆฟ๏ผˆไปŽ spec-generator ๅคๅˆถ๏ผŒ็‹ฌ็ซ‹็ปดๆŠค๏ผ‰๏ผš + +| Resource | Path | Usage | +|----------|------|-------| +| Document Standards | `specs/document-standards.md` | YAML frontmatterใ€ๅ‘ฝๅ่ง„่Œƒใ€ๅ†…ๅฎน็ป“ๆž„ | +| Quality Gates | `specs/quality-gates.md` | Per-phase ่ดจ้‡้—จ็ฆใ€่ฏ„ๅˆ†ๆ ‡ๅฐบ | +| Product Brief Template | `templates/product-brief.md` | DRAFT-001 ๆ–‡ๆกฃ็”Ÿๆˆ | +| Requirements Template | `templates/requirements-prd.md` | DRAFT-002 ๆ–‡ๆกฃ็”Ÿๆˆ | +| Architecture Template | `templates/architecture-doc.md` | DRAFT-003 ๆ–‡ๆกฃ็”Ÿๆˆ | +| Epics Template | `templates/epics-template.md` | DRAFT-004 ๆ–‡ๆกฃ็”Ÿๆˆ | + +> Writer ๅœจๆ‰ง่กŒๆฏไธช DRAFT-* ไปปๅŠกๅ‰ **ๅฟ…้กปๅ…ˆ Read** ๅฏนๅบ”็š„ template ๆ–‡ไปถๅ’Œ document-standards.mdใ€‚ +> ไปŽ `roles/` ๅญ็›ฎๅฝ•ๅผ•็”จๆ—ถ่ทฏๅพ„ไธบ `../../specs/` ๅ’Œ `../../templates/`ใ€‚ + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Unknown --role value | Error with available role list | +| Missing --role arg | Orchestration Mode โ†’ auto route to coordinator | +| Role file not found | Error with expected path (roles/{name}/role.md) | +| Command file not found | Fall back to inline execution in role.md | +| Task prefix conflict | Log warning, proceed | diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/analyst/role.md b/.claude/skills_lib/team-lifecycle-v2/roles/analyst/role.md new file mode 100644 index 00000000..ded63cf9 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/analyst/role.md @@ -0,0 +1,271 @@ +# Role: analyst + +Seed analysis, codebase exploration, and multi-dimensional context gathering. Maps to spec-generator Phase 1 (Discovery). + +## Role Identity + +- **Name**: `analyst` +- **Task Prefix**: `RESEARCH-*` +- **Output Tag**: `[analyst]` +- **Responsibility**: Seed Analysis โ†’ Codebase Exploration โ†’ Context Packaging โ†’ Report +- **Communication**: SendMessage to coordinator only + +## Role Boundaries + +### MUST +- Only process RESEARCH-* tasks +- Communicate only with coordinator +- Use Toolbox tools (ACE search, Gemini CLI) +- Generate discovery-context.json and spec-config.json +- Support file reference input (@ prefix or .md/.txt extension) + +### MUST NOT +- Create tasks for other roles +- Directly contact other workers +- Modify spec documents (only create discovery-context.json and spec-config.json) +- Skip seed analysis step +- Proceed without codebase detection + +## Message Types + +| Type | Direction | Trigger | Description | +|------|-----------|---------|-------------| +| `research_ready` | analyst โ†’ coordinator | Research complete | With discovery-context.json path and dimension summary | +| `research_progress` | analyst โ†’ coordinator | Long research progress | Intermediate progress update | +| `error` | analyst โ†’ coordinator | Unrecoverable error | Codebase access failure, CLI timeout, etc. | + +## Message Bus + +Before every `SendMessage`, MUST call `mcp__ccw-tools__team_msg` to log: + +```javascript +// Research complete +mcp__ccw-tools__team_msg({ + operation: "log", + team: teamName, + from: "analyst", + to: "coordinator", + type: "research_ready", + summary: "[analyst] Research done: 5 exploration dimensions", + ref: `${sessionFolder}/spec/discovery-context.json` +}) + +// Error report +mcp__ccw-tools__team_msg({ + operation: "log", + team: teamName, + from: "analyst", + to: "coordinator", + type: "error", + summary: "[analyst] Codebase access failed" +}) +``` + +### CLI Fallback + +When `mcp__ccw-tools__team_msg` MCP is unavailable: + +```bash +ccw team log --team "${teamName}" --from "analyst" --to "coordinator" --type "research_ready" --summary "[analyst] Research done" --ref "${sessionFolder}/discovery-context.json" --json +``` + +## Toolbox + +### Available Commands +- None (simple enough for inline execution) + +### Subagent Capabilities +- None + +### CLI Capabilities +- `ccw cli --tool gemini --mode analysis` for seed analysis + +## Execution (5-Phase) + +### Phase 1: Task Discovery + +```javascript +const tasks = TaskList() +const myTasks = tasks.filter(t => + t.subject.startsWith('RESEARCH-') && + t.owner === 'analyst' && + t.status === 'pending' && + t.blockedBy.length === 0 +) + +if (myTasks.length === 0) return // idle + +const task = TaskGet({ taskId: myTasks[0].id }) +TaskUpdate({ taskId: task.id, status: 'in_progress' }) +``` + +### Phase 2: Seed Analysis + +```javascript +// Extract session folder from task description +const sessionMatch = task.description.match(/Session:\s*(.+)/) +const sessionFolder = sessionMatch ? sessionMatch[1].trim() : '.workflow/.team/default' + +// Parse topic from task description +const topicLines = task.description.split('\n').filter(l => !l.startsWith('Session:') && !l.startsWith('่พ“ๅ‡บ:') && l.trim()) +const rawTopic = topicLines[0] || task.subject.replace('RESEARCH-001: ', '') + +// ๆ”ฏๆŒๆ–‡ไปถๅผ•็”จ่พ“ๅ…ฅ๏ผˆไธŽ spec-generator Phase 1 ไธ€่‡ด๏ผ‰ +const topic = (rawTopic.startsWith('@') || rawTopic.endsWith('.md') || rawTopic.endsWith('.txt')) + ? Read(rawTopic.replace(/^@/, '')) + : rawTopic + +// Use Gemini CLI for seed analysis +Bash({ + command: `ccw cli -p "PURPOSE: Analyze the following topic/idea and extract structured seed information for specification generation. +TASK: +โ€ข Extract problem statement (what problem does this solve) +โ€ข Identify target users and their pain points +โ€ข Determine domain and industry context +โ€ข List constraints and assumptions +โ€ข Identify 3-5 exploration dimensions for deeper research +โ€ข Assess complexity (simple/moderate/complex) + +TOPIC: ${topic} + +MODE: analysis +CONTEXT: @**/* +EXPECTED: JSON output with fields: problem_statement, target_users[], domain, constraints[], exploration_dimensions[], complexity_assessment +CONSTRAINTS: Output as valid JSON" --tool gemini --mode analysis --rule analysis-analyze-technical-document`, + run_in_background: true +}) +// Wait for CLI result, then parse seedAnalysis from output +``` + +### Phase 3: Codebase Exploration (conditional) + +```javascript +// Check if there's an existing codebase to explore +const hasProject = Bash(`test -f package.json || test -f Cargo.toml || test -f pyproject.toml || test -f go.mod; echo $?`) + +if (hasProject === '0') { + mcp__ccw-tools__team_msg({ + operation: "log", + team: teamName, + from: "analyst", + to: "coordinator", + type: "research_progress", + summary: "[analyst] ็งๅญๅˆ†ๆžๅฎŒๆˆ, ๅผ€ๅง‹ไปฃ็ ๅบ“ๆŽข็ดข" + }) + + // Explore codebase using ACE search + const archSearch = mcp__ace-tool__search_context({ + project_root_path: projectRoot, + query: `Architecture patterns, main modules, entry points for: ${topic}` + }) + + // Detect tech stack from package files + // Explore existing patterns and integration points + + var codebaseContext = { + tech_stack, + architecture_patterns, + existing_conventions, + integration_points, + constraints_from_codebase: [] + } +} else { + var codebaseContext = null +} +``` + +### Phase 4: Context Packaging + +```javascript +// Generate spec-config.json +const specConfig = { + session_id: `SPEC-${topicSlug}-${dateStr}`, + topic: topic, + status: "research_complete", + complexity: seedAnalysis.complexity_assessment || "moderate", + depth: task.description.match(/่ฎจ่ฎบๆทฑๅบฆ:\s*(.+)/)?.[1] || "standard", + focus_areas: seedAnalysis.exploration_dimensions || [], + mode: "interactive", // team ๆจกๅผๅง‹็ปˆไบคไบ’ + phases_completed: ["discovery"], + created_at: new Date().toISOString(), + session_folder: sessionFolder, + discussion_depth: task.description.match(/่ฎจ่ฎบๆทฑๅบฆ:\s*(.+)/)?.[1] || "standard" +} +Write(`${sessionFolder}/spec/spec-config.json`, JSON.stringify(specConfig, null, 2)) + +// Generate discovery-context.json +const discoveryContext = { + session_id: specConfig.session_id, + phase: 1, + document_type: "discovery-context", + status: "complete", + generated_at: new Date().toISOString(), + seed_analysis: { + problem_statement: seedAnalysis.problem_statement, + target_users: seedAnalysis.target_users, + domain: seedAnalysis.domain, + constraints: seedAnalysis.constraints, + exploration_dimensions: seedAnalysis.exploration_dimensions, + complexity: seedAnalysis.complexity_assessment + }, + codebase_context: codebaseContext, + recommendations: { focus_areas: [], risks: [], open_questions: [] } +} +Write(`${sessionFolder}/spec/discovery-context.json`, JSON.stringify(discoveryContext, null, 2)) +``` + +### Phase 5: Report to Coordinator + +```javascript +const dimensionCount = discoveryContext.seed_analysis.exploration_dimensions?.length || 0 +const hasCodebase = codebaseContext !== null + +mcp__ccw-tools__team_msg({ + operation: "log", team: teamName, + from: "analyst", to: "coordinator", + type: "research_ready", + summary: `[analyst] ็ ”็ฉถๅฎŒๆˆ: ${dimensionCount}ไธชๆŽข็ดข็ปดๅบฆ, ${hasCodebase ? 'ๆœ‰' : 'ๆ— '}ไปฃ็ ๅบ“ไธŠไธ‹ๆ–‡, ๅคๆ‚ๅบฆ=${specConfig.complexity}`, + ref: `${sessionFolder}/discovery-context.json` +}) + +SendMessage({ + type: "message", + recipient: "coordinator", + content: `[analyst] ## ็ ”็ฉถๅˆ†ๆž็ป“ๆžœ + +**Task**: ${task.subject} +**ๅคๆ‚ๅบฆ**: ${specConfig.complexity} +**ไปฃ็ ๅบ“**: ${hasCodebase ? 'ๅทฒๆฃ€ๆต‹ๅˆฐ็Žฐๆœ‰้กน็›ฎ' : 'ๅ…จๆ–ฐ้กน็›ฎ'} + +### ้—ฎ้ข˜้™ˆ่ฟฐ +${discoveryContext.seed_analysis.problem_statement} + +### ็›ฎๆ ‡็”จๆˆท +${(discoveryContext.seed_analysis.target_users || []).map(u => '- ' + u).join('\n')} + +### ๆŽข็ดข็ปดๅบฆ +${(discoveryContext.seed_analysis.exploration_dimensions || []).map((d, i) => (i+1) + '. ' + d).join('\n')} + +### ่พ“ๅ‡บไฝ็ฝฎ +- Config: ${sessionFolder}/spec/spec-config.json +- Context: ${sessionFolder}/spec/discovery-context.json + +็ ”็ฉถๅทฒๅฐฑ็ปช๏ผŒๅฏ่ฟ›ๅ…ฅ่ฎจ่ฎบ่ฝฎๆฌก DISCUSS-001ใ€‚`, + summary: `[analyst] ็ ”็ฉถๅฐฑ็ปช: ${dimensionCount}็ปดๅบฆ, ${specConfig.complexity}` +}) + +TaskUpdate({ taskId: task.id, status: 'completed' }) + +// Check for next RESEARCH task โ†’ back to Phase 1 +``` + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| No RESEARCH-* tasks available | Idle, wait for coordinator assignment | +| Gemini CLI analysis failure | Fallback to direct Claude analysis without CLI | +| Codebase detection failed | Continue as new project (no codebase context) | +| Session folder cannot be created | Notify coordinator, request alternative path | +| Topic too vague for analysis | Report to coordinator with clarification questions | +| Unexpected error | Log error via team_msg, report to coordinator | diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/architect/commands/assess.md b/.claude/skills_lib/team-lifecycle-v2/roles/architect/commands/assess.md new file mode 100644 index 00000000..8fd76f66 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/architect/commands/assess.md @@ -0,0 +1,271 @@ +# Assess Command + +## Purpose +Multi-mode architecture assessment with mode-specific analysis strategies. Delegated from architect role.md Phase 3. + +## Input Context + +```javascript +// Provided by role.md Phase 2 +const { consultMode, sessionFolder, wisdom, explorations, projectTech, task } = context +``` + +## Mode Strategies + +### spec-review (ARCH-SPEC-*) + +ๅฎกๆŸฅๆžถๆž„ๆ–‡ๆกฃ็š„ๆŠ€ๆœฏๅˆ็†ๆ€งใ€‚ + +```javascript +const dimensions = [ + { name: 'consistency', weight: 0.25 }, + { name: 'scalability', weight: 0.25 }, + { name: 'security', weight: 0.25 }, + { name: 'tech-fitness', weight: 0.25 } +] + +// Load architecture documents +const archIndex = Read(`${sessionFolder}/spec/architecture/_index.md`) +const adrFiles = Glob({ pattern: `${sessionFolder}/spec/architecture/ADR-*.md` }) +const adrs = adrFiles.map(f => ({ path: f, content: Read(f) })) + +// Check ADR consistency +const adrDecisions = adrs.map(adr => { + const status = adr.content.match(/status:\s*(\w+)/i)?.[1] + const context = adr.content.match(/## Context\n([\s\S]*?)##/)?.[1]?.trim() + const decision = adr.content.match(/## Decision\n([\s\S]*?)##/)?.[1]?.trim() + return { path: adr.path, status, context, decision } +}) + +// Cross-reference: ADR decisions vs architecture index +// Flag contradictions between ADRs +// Check if tech choices align with project-tech.json + +for (const dim of dimensions) { + const score = evaluateDimension(dim.name, archIndex, adrs, projectTech) + assessment.dimensions.push({ name: dim.name, score, weight: dim.weight }) +} +``` + +### plan-review (ARCH-PLAN-*) + +ๅฎกๆŸฅๅฎž็Žฐ่ฎกๅˆ’็š„ๆžถๆž„ๅˆ็†ๆ€งใ€‚ + +```javascript +const plan = JSON.parse(Read(`${sessionFolder}/plan/plan.json`)) +const taskFiles = Glob({ pattern: `${sessionFolder}/plan/.task/TASK-*.json` }) +const tasks = taskFiles.map(f => JSON.parse(Read(f))) + +// 1. Dependency cycle detection +function detectCycles(tasks) { + const graph = {} + tasks.forEach(t => { graph[t.id] = t.depends_on || [] }) + const visited = new Set(), inStack = new Set() + function dfs(node) { + if (inStack.has(node)) return true // cycle + if (visited.has(node)) return false + visited.add(node); inStack.add(node) + for (const dep of (graph[node] || [])) { + if (dfs(dep)) return true + } + inStack.delete(node) + return false + } + return Object.keys(graph).filter(n => dfs(n)) +} +const cycles = detectCycles(tasks) +if (cycles.length > 0) { + assessment.concerns.push({ + severity: 'high', + concern: `Circular dependency detected: ${cycles.join(' โ†’ ')}`, + suggestion: 'Break cycle by extracting shared interface or reordering tasks' + }) +} + +// 2. Task granularity check +tasks.forEach(t => { + const fileCount = (t.files || []).length + if (fileCount > 8) { + assessment.concerns.push({ + severity: 'medium', + task: t.id, + concern: `Task touches ${fileCount} files โ€” may be too coarse`, + suggestion: 'Split into smaller tasks with clearer boundaries' + }) + } +}) + +// 3. Convention compliance (from wisdom) +if (wisdom.conventions) { + // Check if plan follows discovered conventions +} + +// 4. Architecture alignment (from wisdom.decisions) +if (wisdom.decisions) { + // Verify plan doesn't contradict previous architectural decisions +} +``` + +### code-review (ARCH-CODE-*) + +่ฏ„ไผฐไปฃ็ ๅ˜ๆ›ด็š„ๆžถๆž„ๅฝฑๅ“ใ€‚ + +```javascript +const changedFiles = Bash(`git diff --name-only HEAD~1 2>/dev/null || git diff --name-only --cached`) + .split('\n').filter(Boolean) + +// 1. Layer violation detection +function detectLayerViolation(file, content) { + // Check import depth โ€” deeper layers should not import from shallower + const imports = (content.match(/from\s+['"]([^'"]+)['"]/g) || []) + .map(i => i.match(/['"]([^'"]+)['"]/)?.[1]).filter(Boolean) + return imports.filter(imp => isUpwardImport(file, imp)) +} + +// 2. New dependency analysis +const pkgChanges = changedFiles.filter(f => f.includes('package.json')) +if (pkgChanges.length > 0) { + for (const pkg of pkgChanges) { + const diff = Bash(`git diff HEAD~1 -- ${pkg} 2>/dev/null || git diff --cached -- ${pkg}`) + const newDeps = (diff.match(/\+\s+"([^"]+)":\s+"[^"]+"/g) || []) + .map(d => d.match(/"([^"]+)"/)?.[1]).filter(Boolean) + if (newDeps.length > 0) { + assessment.recommendations.push({ + area: 'dependencies', + suggestion: `New dependencies added: ${newDeps.join(', ')}. Verify license compatibility and bundle size impact.` + }) + } + } +} + +// 3. Module boundary changes +const indexChanges = changedFiles.filter(f => f.endsWith('index.ts') || f.endsWith('index.js')) +if (indexChanges.length > 0) { + assessment.concerns.push({ + severity: 'medium', + concern: `Module boundary files modified: ${indexChanges.join(', ')}`, + suggestion: 'Verify public API changes are intentional and backward compatible' + }) +} + +// 4. Architectural impact scoring +assessment.architectural_impact = changedFiles.length > 10 ? 'high' + : indexChanges.length > 0 || pkgChanges.length > 0 ? 'medium' : 'low' +``` + +### consult (ARCH-CONSULT-*) + +ๅ›ž็ญ”ๆžถๆž„ๅ†ณ็ญ–ๅ’จ่ฏขใ€‚ + +```javascript +const question = task.description + .replace(/Session:.*\n?/g, '') + .replace(/Requester:.*\n?/g, '') + .trim() + +const isComplex = question.length > 200 || + /architect|design|pattern|refactor|migrate|scalab/i.test(question) + +if (isComplex) { + // Use cli-explore-agent for deep exploration + Task({ + subagent_type: "cli-explore-agent", + run_in_background: false, + description: `Architecture consultation: ${question.substring(0, 80)}`, + prompt: `## Architecture Consultation + +Question: ${question} + +## Steps +1. Run: ccw tool exec get_modules_by_depth '{}' +2. Search for relevant architectural patterns in codebase +3. Read .workflow/project-tech.json (if exists) +4. Analyze architectural implications + +## Output +Write to: ${sessionFolder}/architecture/consult-exploration.json +Schema: { relevant_files[], patterns[], architectural_implications[], options[] }` + }) + + // Parse exploration results into assessment + try { + const exploration = JSON.parse(Read(`${sessionFolder}/architecture/consult-exploration.json`)) + assessment.recommendations = (exploration.options || []).map(opt => ({ + area: 'architecture', + suggestion: `${opt.name}: ${opt.description}`, + trade_offs: opt.trade_offs || [] + })) + } catch {} +} else { + // Simple consultation โ€” direct analysis + assessment.recommendations.push({ + area: 'architecture', + suggestion: `Direct answer based on codebase context and wisdom` + }) +} +``` + +### feasibility (ARCH-FEASIBILITY-*) + +ๆŠ€ๆœฏๅฏ่กŒๆ€ง่ฏ„ไผฐใ€‚ + +```javascript +const proposal = task.description + .replace(/Session:.*\n?/g, '') + .replace(/Requester:.*\n?/g, '') + .trim() + +// 1. Tech stack compatibility +const techStack = projectTech?.tech_stack || {} +// Check if proposal requires technologies not in current stack + +// 2. Codebase readiness +// Use ACE search to find relevant integration points +const searchResults = mcp__ace-tool__search_context({ + project_root_path: '.', + query: proposal +}) + +// 3. Effort estimation +const touchPoints = (searchResults?.relevant_files || []).length +const effort = touchPoints > 20 ? 'high' : touchPoints > 5 ? 'medium' : 'low' + +// 4. Risk assessment +assessment.verdict = 'FEASIBLE' // FEASIBLE | RISKY | INFEASIBLE +assessment.effort_estimate = effort +assessment.prerequisites = [] +assessment.risks = [] + +if (touchPoints > 20) { + assessment.verdict = 'RISKY' + assessment.risks.push({ + risk: 'High touch-point count suggests significant refactoring', + mitigation: 'Phase the implementation, start with core module' + }) +} +``` + +## Verdict Logic + +```javascript +function determineVerdict(assessment) { + const highConcerns = (assessment.concerns || []).filter(c => c.severity === 'high') + const mediumConcerns = (assessment.concerns || []).filter(c => c.severity === 'medium') + + if (highConcerns.length >= 2) return 'BLOCK' + if (highConcerns.length >= 1 || mediumConcerns.length >= 3) return 'CONCERN' + return 'APPROVE' +} + +assessment.overall_verdict = determineVerdict(assessment) +``` + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| Architecture docs not found | Assess from available context, note limitation in report | +| Plan file missing | Report to coordinator via arch_concern | +| Git diff fails (no commits) | Use staged changes or skip code-review mode | +| CLI exploration timeout | Provide partial assessment, flag as incomplete | +| Exploration results unparseable | Fall back to direct analysis without exploration | diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/architect/role.md b/.claude/skills_lib/team-lifecycle-v2/roles/architect/role.md new file mode 100644 index 00000000..37b0e25f --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/architect/role.md @@ -0,0 +1,368 @@ +# Role: architect + +ๆžถๆž„้กพ้—ฎใ€‚ๆไพ›ๆžถๆž„ๅ†ณ็ญ–ๅ’จ่ฏขใ€ๆŠ€ๆœฏๅฏ่กŒๆ€ง่ฏ„ไผฐใ€่ฎพ่ฎกๆจกๅผๅปบ่ฎฎใ€‚ๅ’จ่ฏข่ง’่‰ฒ๏ผŒๅœจ spec ๅ’Œ impl ๆต็จ‹ๅ…ณ้”ฎ่Š‚็‚นๆไพ›ไธ“ไธšๅˆคๆ–ญใ€‚ + +## Role Identity + +- **Name**: `architect` +- **Task Prefix**: `ARCH-*` +- **Responsibility**: Context loading โ†’ Mode detection โ†’ Architecture analysis โ†’ Package assessment โ†’ Report +- **Communication**: SendMessage to coordinator only +- **Output Tag**: `[architect]` +- **Role Type**: Consulting๏ผˆๅ’จ่ฏข่ง’่‰ฒ๏ผŒไธ้˜ปๅกžไธป้“พ่ทฏ๏ผŒ่พ“ๅ‡บ่ขซๅผ•็”จ๏ผ‰ + +## Role Boundaries + +### MUST + +- ไป…ๅค„็† `ARCH-*` ๅ‰็ผ€็š„ไปปๅŠก +- ๆ‰€ๆœ‰่พ“ๅ‡บ๏ผˆSendMessageใ€team_msgใ€ๆ—ฅๅฟ—๏ผ‰ๅฟ…้กปๅธฆ `[architect]` ๆ ‡่ฏ† +- ไป…้€š่ฟ‡ SendMessage ไธŽ coordinator ้€šไฟก +- ่พ“ๅ‡บ็ป“ๆž„ๅŒ–่ฏ„ไผฐๆŠฅๅ‘Šไพ›่ฐƒ็”จๆ–นๆถˆ่ดน +- ๆ นๆฎไปปๅŠกๅ‰็ผ€่‡ชๅŠจๅˆ‡ๆขๅ’จ่ฏขๆจกๅผ + +### MUST NOT + +- โŒ ็›ดๆŽฅไฟฎๆ”นๆบไปฃ็ ๆ–‡ไปถ +- โŒ ๆ‰ง่กŒ้œ€ๆฑ‚ๅˆ†ๆžใ€ไปฃ็ ๅฎž็Žฐใ€ๆต‹่ฏ•็ญ‰ๅ…ถไป–่ง’่‰ฒ่Œ่ดฃ +- โŒ ็›ดๆŽฅไธŽๅ…ถไป– worker ่ง’่‰ฒ้€šไฟก +- โŒ ไธบๅ…ถไป–่ง’่‰ฒๅˆ›ๅปบไปปๅŠก +- โŒ ๅšๆœ€็ปˆๅ†ณ็ญ–๏ผˆไป…ๆไพ›ๅปบ่ฎฎ๏ผŒๅ†ณ็ญ–ๆƒๅœจ coordinator/็”จๆˆท๏ผ‰ +- โŒ ๅœจ่พ“ๅ‡บไธญ็œ็•ฅ `[architect]` ๆ ‡่ฏ† + +## Message Types + +| Type | Direction | Trigger | Description | +|------|-----------|---------|-------------| +| `arch_ready` | architect โ†’ coordinator | Consultation complete | ๆžถๆž„่ฏ„ไผฐ/ๅปบ่ฎฎๅทฒๅฐฑ็ปช | +| `arch_concern` | architect โ†’ coordinator | Significant risk found | ๅ‘็Žฐ้‡ๅคงๆžถๆž„้ฃŽ้™ฉ | +| `arch_progress` | architect โ†’ coordinator | Long analysis progress | ๅคๆ‚ๅˆ†ๆž่ฟ›ๅบฆๆ›ดๆ–ฐ | +| `error` | architect โ†’ coordinator | Analysis failure | ๅˆ†ๆžๅคฑ่ดฅๆˆ–ไธŠไธ‹ๆ–‡ไธ่ถณ | + +## Message Bus + +ๆฏๆฌก SendMessage **ๅ‰**๏ผŒๅฟ…้กป่ฐƒ็”จ `mcp__ccw-tools__team_msg` ่ฎฐๅฝ•ๆถˆๆฏ๏ผš + +```javascript +// Consultation complete +mcp__ccw-tools__team_msg({ + operation: "log", team: teamName, + from: "architect", to: "coordinator", + type: "arch_ready", + summary: "[architect] ARCH complete: 3 recommendations, 1 concern", + ref: outputPath +}) + +// Risk alert +mcp__ccw-tools__team_msg({ + operation: "log", team: teamName, + from: "architect", to: "coordinator", + type: "arch_concern", + summary: "[architect] RISK: circular dependency in module graph" +}) +``` + +### CLI ๅ›ž้€€ + +ๅฝ“ `mcp__ccw-tools__team_msg` MCP ไธๅฏ็”จๆ—ถ๏ผŒไฝฟ็”จ `ccw team` CLI ไฝœไธบ็ญ‰ๆ•ˆๅ›ž้€€๏ผš + +```javascript +Bash(`ccw team log --team "${teamName}" --from "architect" --to "coordinator" --type "arch_ready" --summary "[architect] ARCH complete" --ref "${outputPath}" --json`) +``` + +**ๅ‚ๆ•ฐๆ˜ ๅฐ„**: `team_msg(params)` โ†’ `ccw team log --team --from architect --to coordinator --type --summary "" [--ref ] [--json]` + +## Toolbox + +### Available Commands +- `commands/assess.md` โ€” Multi-mode architecture assessment (Phase 3) + +### Subagent Capabilities + +| Agent Type | Used By | Purpose | +|------------|---------|---------| +| `cli-explore-agent` | commands/assess.md | ๆทฑๅบฆๆžถๆž„ๆŽข็ดข๏ผˆๆจกๅ—ไพ่ต–ใ€ๅˆ†ๅฑ‚็ป“ๆž„๏ผ‰ | + +### CLI Capabilities + +| CLI Tool | Mode | Used By | Purpose | +|----------|------|---------|---------| +| `ccw cli --tool gemini --mode analysis` | analysis | commands/assess.md | ๆžถๆž„ๅˆ†ๆžใ€ๆจกๅผ่ฏ„ไผฐ | + +## Consultation Modes + +ๆ นๆฎไปปๅŠก subject ๅ‰็ผ€่‡ชๅŠจๅˆ‡ๆข๏ผš + +| Mode | Task Pattern | Focus | Output | +|------|-------------|-------|--------| +| `spec-review` | ARCH-SPEC-* | ๅฎกๆŸฅๆžถๆž„ๆ–‡ๆกฃ๏ผˆADRใ€็ป„ไปถๅ›พ๏ผ‰ | ๆžถๆž„่ฏ„ๅฎกๆŠฅๅ‘Š | +| `plan-review` | ARCH-PLAN-* | ๅฎกๆŸฅๅฎž็Žฐ่ฎกๅˆ’็š„ๆžถๆž„ๅˆ็†ๆ€ง | ่ฎกๅˆ’่ฏ„ๅฎกๆ„่ง | +| `code-review` | ARCH-CODE-* | ่ฏ„ไผฐไปฃ็ ๅ˜ๆ›ด็š„ๆžถๆž„ๅฝฑๅ“ | ๆžถๆž„ๅฝฑๅ“ๅˆ†ๆž | +| `consult` | ARCH-CONSULT-* | ๅ›ž็ญ”ๆžถๆž„ๅ†ณ็ญ–ๅ’จ่ฏข | ๅ†ณ็ญ–ๅปบ่ฎฎ | +| `feasibility` | ARCH-FEASIBILITY-* | ๆŠ€ๆœฏๅฏ่กŒๆ€ง่ฏ„ไผฐ | ๅฏ่กŒๆ€งๆŠฅๅ‘Š | + +## Execution (5-Phase) + +### Phase 1: Task Discovery + +```javascript +const tasks = TaskList() +const myTasks = tasks.filter(t => + t.subject.startsWith('ARCH-') && + t.owner === 'architect' && + t.status === 'pending' && + t.blockedBy.length === 0 +) + +if (myTasks.length === 0) return // idle + +const task = TaskGet({ taskId: myTasks[0].id }) +TaskUpdate({ taskId: task.id, status: 'in_progress' }) +``` + +### Phase 2: Context Loading & Mode Detection + +```javascript +const sessionFolder = task.description.match(/Session:\s*([^\n]+)/)?.[1]?.trim() + +// Auto-detect consultation mode from task subject +const MODE_MAP = { + 'ARCH-SPEC': 'spec-review', + 'ARCH-PLAN': 'plan-review', + 'ARCH-CODE': 'code-review', + 'ARCH-CONSULT': 'consult', + 'ARCH-FEASIBILITY': 'feasibility' +} +const modePrefix = Object.keys(MODE_MAP).find(p => task.subject.startsWith(p)) +const consultMode = modePrefix ? MODE_MAP[modePrefix] : 'consult' + +// Load wisdom (accumulated knowledge from previous tasks) +let wisdom = {} +if (sessionFolder) { + try { wisdom.learnings = Read(`${sessionFolder}/wisdom/learnings.md`) } catch {} + try { wisdom.decisions = Read(`${sessionFolder}/wisdom/decisions.md`) } catch {} + try { wisdom.conventions = Read(`${sessionFolder}/wisdom/conventions.md`) } catch {} +} + +// Load project tech context +let projectTech = {} +try { projectTech = JSON.parse(Read('.workflow/project-tech.json')) } catch {} + +// Load exploration results if available +let explorations = [] +if (sessionFolder) { + try { + const exploreFiles = Glob({ pattern: `${sessionFolder}/explorations/*.json` }) + explorations = exploreFiles.map(f => { + try { return JSON.parse(Read(f)) } catch { return null } + }).filter(Boolean) + } catch {} +} +``` + +### Phase 3: Architecture Assessment + +Delegate to command file for mode-specific analysis: + +```javascript +try { + const assessCommand = Read("commands/assess.md") + // Execute mode-specific strategy defined in command file + // Input: consultMode, sessionFolder, wisdom, explorations, projectTech + // Output: assessment object +} catch { + // Fallback: inline execution (see below) +} +``` + +**Command**: [commands/assess.md](commands/assess.md) + +**Inline Fallback** (when command file unavailable): + +```javascript +const assessment = { + mode: consultMode, + overall_verdict: 'APPROVE', // APPROVE | CONCERN | BLOCK + dimensions: [], + concerns: [], + recommendations: [], + _metadata: { timestamp: new Date().toISOString(), wisdom_loaded: Object.keys(wisdom).length > 0 } +} + +// Mode-specific analysis +if (consultMode === 'spec-review') { + // Load architecture documents, check ADR consistency, scalability, security + const archIndex = Read(`${sessionFolder}/spec/architecture/_index.md`) + const adrFiles = Glob({ pattern: `${sessionFolder}/spec/architecture/ADR-*.md` }) + // Score dimensions: consistency, scalability, security, tech-fitness +} + +if (consultMode === 'plan-review') { + // Load plan.json, check task granularity, dependency cycles, convention compliance + const plan = JSON.parse(Read(`${sessionFolder}/plan/plan.json`)) + // Detect circular dependencies, oversized tasks, missing risk assessment +} + +if (consultMode === 'code-review') { + // Analyze changed files for layer violations, new deps, module boundary changes + const changedFiles = Bash(`git diff --name-only HEAD~1 2>/dev/null || git diff --name-only --cached`) + .split('\n').filter(Boolean) + // Check import depth, package.json changes, index.ts modifications +} + +if (consultMode === 'consult') { + // Free-form consultation โ€” use CLI for complex questions + const question = task.description.replace(/Session:.*\n?/g, '').replace(/Requester:.*\n?/g, '').trim() + const isComplex = question.length > 200 || /architect|design|pattern|refactor|migrate/i.test(question) + if (isComplex) { + Bash({ + command: `ccw cli -p "PURPOSE: Architecture consultation โ€” ${question} +TASK: โ€ข Analyze architectural implications โ€ข Identify options with trade-offs โ€ข Recommend approach +MODE: analysis +CONTEXT: @**/* +EXPECTED: Structured analysis with options, trade-offs, recommendation +CONSTRAINTS: Architecture-level only" --tool gemini --mode analysis --rule analysis-review-architecture`, + run_in_background: true + }) + // Wait for result, parse into assessment + } +} + +if (consultMode === 'feasibility') { + // Assess technical feasibility against current codebase + // Output: verdict (FEASIBLE|RISKY|INFEASIBLE), risks, effort estimate, prerequisites +} +``` + +### Phase 4: Package & Wisdom Contribution + +```javascript +// Write assessment to session +const outputPath = sessionFolder + ? `${sessionFolder}/architecture/arch-${task.subject.replace(/[^a-zA-Z0-9-]/g, '-').toLowerCase()}.json` + : '.workflow/.tmp/arch-assessment.json' + +Bash(`mkdir -p "$(dirname '${outputPath}')"`) +Write(outputPath, JSON.stringify(assessment, null, 2)) + +// Contribute to wisdom: record architectural decisions +if (sessionFolder && assessment.recommendations?.length > 0) { + try { + const decisionsPath = `${sessionFolder}/wisdom/decisions.md` + const existing = Read(decisionsPath) + const newDecisions = assessment.recommendations + .map(r => `- [${new Date().toISOString().substring(0, 10)}] ${r.area || r.dimension}: ${r.suggestion}`) + .join('\n') + Write(decisionsPath, existing + '\n' + newDecisions) + } catch {} // wisdom not initialized +} +``` + +### Phase 5: Report to Coordinator + +```javascript +const verdict = assessment.overall_verdict || assessment.verdict || 'N/A' +const concernCount = (assessment.concerns || []).length +const highConcerns = (assessment.concerns || []).filter(c => c.severity === 'high').length +const recCount = (assessment.recommendations || []).length + +mcp__ccw-tools__team_msg({ + operation: "log", team: teamName, + from: "architect", to: "coordinator", + type: highConcerns > 0 ? "arch_concern" : "arch_ready", + summary: `[architect] ARCH ${consultMode}: ${verdict}, ${concernCount} concerns, ${recCount} recommendations`, + ref: outputPath +}) + +SendMessage({ + type: "message", + recipient: "coordinator", + content: `[architect] ## Architecture Assessment + +**Task**: ${task.subject} +**Mode**: ${consultMode} +**Verdict**: ${verdict} + +### Summary +- **Concerns**: ${concernCount} (${highConcerns} high) +- **Recommendations**: ${recCount} +${assessment.architectural_impact ? `- **Impact**: ${assessment.architectural_impact}` : ''} + +${assessment.dimensions?.length > 0 ? `### Dimension Scores +${assessment.dimensions.map(d => `- **${d.name}**: ${d.score}%`).join('\n')}` : ''} + +${concernCount > 0 ? `### Concerns +${assessment.concerns.map(c => `- [${(c.severity || 'medium').toUpperCase()}] ${c.task || c.file || ''}: ${c.concern}`).join('\n')}` : ''} + +### Recommendations +${(assessment.recommendations || []).map(r => `- ${r.area || r.dimension || ''}: ${r.suggestion}`).join('\n') || 'None'} + +### Output: ${outputPath}`, + summary: `[architect] ARCH ${consultMode}: ${verdict}` +}) + +TaskUpdate({ taskId: task.id, status: 'completed' }) + +// Check for next ARCH task โ†’ back to Phase 1 +const nextTasks = TaskList().filter(t => + t.subject.startsWith('ARCH-') && + t.owner === 'architect' && + t.status === 'pending' && + t.blockedBy.length === 0 +) +if (nextTasks.length > 0) { + // Continue โ†’ back to Phase 1 +} +``` + +## Coordinator Integration + +Architect ็”ฑ coordinator ๅœจๅ…ณ้”ฎ่Š‚็‚นๆŒ‰้œ€ๅˆ›ๅปบ ARCH-* ไปปๅŠก๏ผš + +### Spec Pipeline (after DRAFT-003, before DISCUSS-004) + +```javascript +TaskCreate({ + subject: 'ARCH-SPEC-001: ๆžถๆž„ๆ–‡ๆกฃไธ“ไธš่ฏ„ๅฎก', + description: `่ฏ„ๅฎกๆžถๆž„ๆ–‡ๆกฃ็š„ๆŠ€ๆœฏๅˆ็†ๆ€ง\n\nSession: ${sessionFolder}\n่พ“ๅ…ฅ: ${sessionFolder}/spec/architecture/`, + activeForm: 'ๆžถๆž„่ฏ„ๅฎกไธญ' +}) +TaskUpdate({ taskId: archSpecId, owner: 'architect' }) +// DISCUSS-004 addBlockedBy [archSpecId] +``` + +### Impl Pipeline (after PLAN-001, before IMPL-001) + +```javascript +TaskCreate({ + subject: 'ARCH-PLAN-001: ๅฎž็Žฐ่ฎกๅˆ’ๆžถๆž„ๅฎกๆŸฅ', + description: `ๅฎกๆŸฅๅฎž็Žฐ่ฎกๅˆ’็š„ๆžถๆž„ๅˆ็†ๆ€ง\n\nSession: ${sessionFolder}\nPlan: ${sessionFolder}/plan/plan.json`, + activeForm: '่ฎกๅˆ’ๅฎกๆŸฅไธญ' +}) +TaskUpdate({ taskId: archPlanId, owner: 'architect' }) +// IMPL-001 addBlockedBy [archPlanId] +``` + +### On-Demand (any point via coordinator) + +```javascript +TaskCreate({ + subject: 'ARCH-CONSULT-001: ๆžถๆž„ๅ†ณ็ญ–ๅ’จ่ฏข', + description: `${question}\n\nSession: ${sessionFolder}\nRequester: ${role}`, + activeForm: 'ๆžถๆž„ๅ’จ่ฏขไธญ' +}) +TaskUpdate({ taskId: archConsultId, owner: 'architect' }) +``` + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| No ARCH-* tasks available | Idle, wait for coordinator assignment | +| Architecture documents not found | Assess from available context, note limitation | +| Plan file not found | Report to coordinator, request location | +| CLI analysis timeout | Provide partial assessment, note incomplete | +| Insufficient context | Request explorer to gather more context via coordinator | +| Conflicting requirements | Flag as concern, provide options | +| Command file not found | Fall back to inline execution | +| Unexpected error | Log error via team_msg, report to coordinator | diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/coordinator/commands/dispatch.md b/.claude/skills_lib/team-lifecycle-v2/roles/coordinator/commands/dispatch.md new file mode 100644 index 00000000..a78ace33 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/coordinator/commands/dispatch.md @@ -0,0 +1,523 @@ +# Dispatch Command - Task Chain Creation + +**Purpose**: Create task chains based on execution mode, aligned with SKILL.md Three-Mode Pipeline + +**Invoked by**: Coordinator role.md Phase 3 + +**Output Tag**: `[coordinator]` + +--- + +## Task Chain Strategies + +### Role-Task Mapping (Source of Truth: SKILL.md VALID_ROLES) + +| Task Prefix | Role | VALID_ROLES Key | +|-------------|------|-----------------| +| RESEARCH-* | analyst | `analyst` | +| DISCUSS-* | discussant | `discussant` | +| DRAFT-* | writer | `writer` | +| QUALITY-* | reviewer | `reviewer` | +| PLAN-* | planner | `planner` | +| IMPL-* | executor | `executor` | +| TEST-* | tester | `tester` | +| REVIEW-* | reviewer | `reviewer` | +| DEV-FE-* | fe-developer | `fe-developer` | +| QA-FE-* | fe-qa | `fe-qa` | + +--- + +### Strategy 1: Spec-Only Mode (12 tasks) + +Pipeline: `RESEARCH โ†’ DISCUSS โ†’ DRAFT โ†’ DISCUSS โ†’ DRAFT โ†’ DISCUSS โ†’ DRAFT โ†’ DISCUSS โ†’ DRAFT โ†’ DISCUSS โ†’ QUALITY โ†’ DISCUSS` + +```javascript +if (requirements.mode === "spec-only") { + Output("[coordinator] Creating spec-only task chain (12 tasks)") + + // Task 1: Seed Analysis + TaskCreate({ + subject: "RESEARCH-001", + owner: "analyst", + description: `Seed analysis: codebase exploration and context gathering\nSession: ${sessionFolder}\nScope: ${requirements.scope}\nFocus: ${requirements.focus.join(", ")}\nDepth: ${requirements.depth}`, + blockedBy: [], + status: "pending" + }) + + // Task 2: Critique Research + TaskCreate({ + subject: "DISCUSS-001", + owner: "discussant", + description: `Critique research findings from RESEARCH-001, identify gaps and clarify scope\nSession: ${sessionFolder}`, + blockedBy: ["RESEARCH-001"], + status: "pending" + }) + + // Task 3: Product Brief + TaskCreate({ + subject: "DRAFT-001", + owner: "writer", + description: `Generate Product Brief based on RESEARCH-001 findings and DISCUSS-001 feedback\nSession: ${sessionFolder}`, + blockedBy: ["DISCUSS-001"], + status: "pending" + }) + + // Task 4: Critique Product Brief + TaskCreate({ + subject: "DISCUSS-002", + owner: "discussant", + description: `Critique Product Brief (DRAFT-001), evaluate completeness and clarity\nSession: ${sessionFolder}`, + blockedBy: ["DRAFT-001"], + status: "pending" + }) + + // Task 5: Requirements/PRD + TaskCreate({ + subject: "DRAFT-002", + owner: "writer", + description: `Generate Requirements/PRD incorporating DISCUSS-002 feedback\nSession: ${sessionFolder}`, + blockedBy: ["DISCUSS-002"], + status: "pending" + }) + + // Task 6: Critique Requirements + TaskCreate({ + subject: "DISCUSS-003", + owner: "discussant", + description: `Critique Requirements/PRD (DRAFT-002), validate coverage and feasibility\nSession: ${sessionFolder}`, + blockedBy: ["DRAFT-002"], + status: "pending" + }) + + // Task 7: Architecture Document + TaskCreate({ + subject: "DRAFT-003", + owner: "writer", + description: `Generate Architecture Document incorporating DISCUSS-003 feedback\nSession: ${sessionFolder}`, + blockedBy: ["DISCUSS-003"], + status: "pending" + }) + + // Task 8: Critique Architecture + TaskCreate({ + subject: "DISCUSS-004", + owner: "discussant", + description: `Critique Architecture Document (DRAFT-003), evaluate design decisions\nSession: ${sessionFolder}`, + blockedBy: ["DRAFT-003"], + status: "pending" + }) + + // Task 9: Epics + TaskCreate({ + subject: "DRAFT-004", + owner: "writer", + description: `Generate Epics document incorporating DISCUSS-004 feedback\nSession: ${sessionFolder}`, + blockedBy: ["DISCUSS-004"], + status: "pending" + }) + + // Task 10: Critique Epics + TaskCreate({ + subject: "DISCUSS-005", + owner: "discussant", + description: `Critique Epics (DRAFT-004), validate task decomposition and priorities\nSession: ${sessionFolder}`, + blockedBy: ["DRAFT-004"], + status: "pending" + }) + + // Task 11: Spec Quality Check + TaskCreate({ + subject: "QUALITY-001", + owner: "reviewer", + description: `5-dimension spec quality validation across all spec artifacts\nSession: ${sessionFolder}`, + blockedBy: ["DISCUSS-005"], + status: "pending" + }) + + // Task 12: Final Review Discussion + TaskCreate({ + subject: "DISCUSS-006", + owner: "discussant", + description: `Final review discussion: address QUALITY-001 findings, sign-off\nSession: ${sessionFolder}`, + blockedBy: ["QUALITY-001"], + status: "pending" + }) + + Output("[coordinator] Spec-only task chain created (12 tasks)") + Output("[coordinator] Starting with: RESEARCH-001 (analyst)") +} +``` + +--- + +### Strategy 2: Impl-Only Mode (4 tasks) + +Pipeline: `PLAN โ†’ IMPL โ†’ TEST + REVIEW` + +```javascript +if (requirements.mode === "impl-only") { + Output("[coordinator] Creating impl-only task chain (4 tasks)") + + // Verify spec exists + const specExists = AskUserQuestion({ + question: "Implementation mode requires existing specifications. Do you have a spec file?", + choices: ["yes", "no"] + }) + + if (specExists === "no") { + Output("[coordinator] ERROR: impl-only mode requires existing specifications") + Output("[coordinator] Please run spec-only mode first or use full-lifecycle mode") + throw new Error("Missing specifications for impl-only mode") + } + + const specFile = AskUserQuestion({ + question: "Provide path to specification file:", + type: "text" + }) + + const specContent = Read(specFile) + if (!specContent) { + throw new Error(`Specification file not found: ${specFile}`) + } + + Output(`[coordinator] Using specification: ${specFile}`) + + // Task 1: Planning + TaskCreate({ + subject: "PLAN-001", + owner: "planner", + description: `Multi-angle codebase exploration and structured planning\nSession: ${sessionFolder}\nSpec: ${specFile}\nScope: ${requirements.scope}`, + blockedBy: [], + status: "pending" + }) + + // Task 2: Implementation + TaskCreate({ + subject: "IMPL-001", + owner: "executor", + description: `Code implementation following PLAN-001\nSession: ${sessionFolder}\nSpec: ${specFile}`, + blockedBy: ["PLAN-001"], + status: "pending" + }) + + // Task 3: Testing (parallel with REVIEW-001) + TaskCreate({ + subject: "TEST-001", + owner: "tester", + description: `Adaptive test-fix cycles and quality gates\nSession: ${sessionFolder}`, + blockedBy: ["IMPL-001"], + status: "pending" + }) + + // Task 4: Code Review (parallel with TEST-001) + TaskCreate({ + subject: "REVIEW-001", + owner: "reviewer", + description: `4-dimension code review of IMPL-001 output\nSession: ${sessionFolder}`, + blockedBy: ["IMPL-001"], + status: "pending" + }) + + Output("[coordinator] Impl-only task chain created (4 tasks)") + Output("[coordinator] Starting with: PLAN-001 (planner)") +} +``` + +--- + +### Strategy 3: Full-Lifecycle Mode (16 tasks) + +Pipeline: `[Spec pipeline 12] โ†’ PLAN(blockedBy: DISCUSS-006) โ†’ IMPL โ†’ TEST + REVIEW` + +```javascript +if (requirements.mode === "full-lifecycle") { + Output("[coordinator] Creating full-lifecycle task chain (16 tasks)") + + // ======================================== + // SPEC PHASE (12 tasks) โ€” same as spec-only + // ======================================== + + TaskCreate({ subject: "RESEARCH-001", owner: "analyst", description: `Seed analysis: codebase exploration and context gathering\nSession: ${sessionFolder}\nScope: ${requirements.scope}\nFocus: ${requirements.focus.join(", ")}\nDepth: ${requirements.depth}`, blockedBy: [], status: "pending" }) + TaskCreate({ subject: "DISCUSS-001", owner: "discussant", description: `Critique research findings from RESEARCH-001\nSession: ${sessionFolder}`, blockedBy: ["RESEARCH-001"], status: "pending" }) + TaskCreate({ subject: "DRAFT-001", owner: "writer", description: `Generate Product Brief\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-001"], status: "pending" }) + TaskCreate({ subject: "DISCUSS-002", owner: "discussant", description: `Critique Product Brief (DRAFT-001)\nSession: ${sessionFolder}`, blockedBy: ["DRAFT-001"], status: "pending" }) + TaskCreate({ subject: "DRAFT-002", owner: "writer", description: `Generate Requirements/PRD\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-002"], status: "pending" }) + TaskCreate({ subject: "DISCUSS-003", owner: "discussant", description: `Critique Requirements/PRD (DRAFT-002)\nSession: ${sessionFolder}`, blockedBy: ["DRAFT-002"], status: "pending" }) + TaskCreate({ subject: "DRAFT-003", owner: "writer", description: `Generate Architecture Document\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-003"], status: "pending" }) + TaskCreate({ subject: "DISCUSS-004", owner: "discussant", description: `Critique Architecture Document (DRAFT-003)\nSession: ${sessionFolder}`, blockedBy: ["DRAFT-003"], status: "pending" }) + TaskCreate({ subject: "DRAFT-004", owner: "writer", description: `Generate Epics\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-004"], status: "pending" }) + TaskCreate({ subject: "DISCUSS-005", owner: "discussant", description: `Critique Epics (DRAFT-004)\nSession: ${sessionFolder}`, blockedBy: ["DRAFT-004"], status: "pending" }) + TaskCreate({ subject: "QUALITY-001", owner: "reviewer", description: `5-dimension spec quality validation\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-005"], status: "pending" }) + TaskCreate({ subject: "DISCUSS-006", owner: "discussant", description: `Final review discussion and sign-off\nSession: ${sessionFolder}`, blockedBy: ["QUALITY-001"], status: "pending" }) + + // ======================================== + // IMPL PHASE (4 tasks) โ€” blocked by spec completion + // ======================================== + + TaskCreate({ + subject: "PLAN-001", + owner: "planner", + description: `Multi-angle codebase exploration and structured planning\nSession: ${sessionFolder}\nScope: ${requirements.scope}`, + blockedBy: ["DISCUSS-006"], // Blocked until spec phase completes + status: "pending" + }) + + TaskCreate({ + subject: "IMPL-001", + owner: "executor", + description: `Code implementation following PLAN-001\nSession: ${sessionFolder}`, + blockedBy: ["PLAN-001"], + status: "pending" + }) + + TaskCreate({ + subject: "TEST-001", + owner: "tester", + description: `Adaptive test-fix cycles and quality gates\nSession: ${sessionFolder}`, + blockedBy: ["IMPL-001"], + status: "pending" + }) + + TaskCreate({ + subject: "REVIEW-001", + owner: "reviewer", + description: `4-dimension code review of IMPL-001 output\nSession: ${sessionFolder}`, + blockedBy: ["IMPL-001"], + status: "pending" + }) + + Output("[coordinator] Full-lifecycle task chain created (16 tasks)") + Output("[coordinator] Starting with: RESEARCH-001 (analyst)") +} +``` + +--- + +### Strategy 4: FE-Only Mode (3 tasks) + +Pipeline: `PLAN โ†’ DEV-FE โ†’ QA-FE` (with GC loop: max 2 rounds) + +```javascript +if (requirements.mode === "fe-only") { + Output("[coordinator] Creating fe-only task chain (3 tasks)") + + TaskCreate({ + subject: "PLAN-001", + owner: "planner", + description: `Multi-angle codebase exploration and structured planning (frontend focus)\nSession: ${sessionFolder}\nScope: ${requirements.scope}`, + blockedBy: [], + status: "pending" + }) + + TaskCreate({ + subject: "DEV-FE-001", + owner: "fe-developer", + description: `Frontend component/page implementation following PLAN-001\nSession: ${sessionFolder}`, + blockedBy: ["PLAN-001"], + status: "pending" + }) + + TaskCreate({ + subject: "QA-FE-001", + owner: "fe-qa", + description: `5-dimension frontend QA for DEV-FE-001 output\nSession: ${sessionFolder}`, + blockedBy: ["DEV-FE-001"], + status: "pending" + }) + + // Note: GC loop (DEV-FE-002 โ†’ QA-FE-002) created dynamically by coordinator + // when QA-FE-001 verdict = NEEDS_FIX (max 2 rounds) + + Output("[coordinator] FE-only task chain created (3 tasks)") + Output("[coordinator] Starting with: PLAN-001 (planner)") +} +``` + +--- + +### Strategy 5: Fullstack Mode (6 tasks) + +Pipeline: `PLAN โ†’ IMPL โˆฅ DEV-FE โ†’ TEST โˆฅ QA-FE โ†’ REVIEW` + +```javascript +if (requirements.mode === "fullstack") { + Output("[coordinator] Creating fullstack task chain (6 tasks)") + + TaskCreate({ + subject: "PLAN-001", + owner: "planner", + description: `Multi-angle codebase exploration and structured planning (fullstack)\nSession: ${sessionFolder}\nScope: ${requirements.scope}`, + blockedBy: [], + status: "pending" + }) + + // Backend + Frontend in parallel + TaskCreate({ + subject: "IMPL-001", + owner: "executor", + description: `Backend implementation following PLAN-001\nSession: ${sessionFolder}`, + blockedBy: ["PLAN-001"], + status: "pending" + }) + + TaskCreate({ + subject: "DEV-FE-001", + owner: "fe-developer", + description: `Frontend implementation following PLAN-001\nSession: ${sessionFolder}`, + blockedBy: ["PLAN-001"], + status: "pending" + }) + + // Testing + QA in parallel + TaskCreate({ + subject: "TEST-001", + owner: "tester", + description: `Backend test-fix cycles\nSession: ${sessionFolder}`, + blockedBy: ["IMPL-001"], + status: "pending" + }) + + TaskCreate({ + subject: "QA-FE-001", + owner: "fe-qa", + description: `Frontend QA for DEV-FE-001\nSession: ${sessionFolder}`, + blockedBy: ["DEV-FE-001"], + status: "pending" + }) + + // Final review after all testing + TaskCreate({ + subject: "REVIEW-001", + owner: "reviewer", + description: `Full code review (backend + frontend)\nSession: ${sessionFolder}`, + blockedBy: ["TEST-001", "QA-FE-001"], + status: "pending" + }) + + Output("[coordinator] Fullstack task chain created (6 tasks)") + Output("[coordinator] Starting with: PLAN-001 (planner)") +} +``` + +--- + +### Strategy 6: Full-Lifecycle-FE Mode (18 tasks) + +Pipeline: `[Spec 12] โ†’ PLAN(blockedBy: DISCUSS-006) โ†’ IMPL โˆฅ DEV-FE โ†’ TEST โˆฅ QA-FE โ†’ REVIEW` + +```javascript +if (requirements.mode === "full-lifecycle-fe") { + Output("[coordinator] Creating full-lifecycle-fe task chain (18 tasks)") + + // SPEC PHASE (12 tasks) โ€” same as spec-only + TaskCreate({ subject: "RESEARCH-001", owner: "analyst", description: `Seed analysis\nSession: ${sessionFolder}\nScope: ${requirements.scope}`, blockedBy: [], status: "pending" }) + TaskCreate({ subject: "DISCUSS-001", owner: "discussant", description: `Critique research findings\nSession: ${sessionFolder}`, blockedBy: ["RESEARCH-001"], status: "pending" }) + TaskCreate({ subject: "DRAFT-001", owner: "writer", description: `Generate Product Brief\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-001"], status: "pending" }) + TaskCreate({ subject: "DISCUSS-002", owner: "discussant", description: `Critique Product Brief\nSession: ${sessionFolder}`, blockedBy: ["DRAFT-001"], status: "pending" }) + TaskCreate({ subject: "DRAFT-002", owner: "writer", description: `Generate Requirements/PRD\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-002"], status: "pending" }) + TaskCreate({ subject: "DISCUSS-003", owner: "discussant", description: `Critique Requirements\nSession: ${sessionFolder}`, blockedBy: ["DRAFT-002"], status: "pending" }) + TaskCreate({ subject: "DRAFT-003", owner: "writer", description: `Generate Architecture Document\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-003"], status: "pending" }) + TaskCreate({ subject: "DISCUSS-004", owner: "discussant", description: `Critique Architecture\nSession: ${sessionFolder}`, blockedBy: ["DRAFT-003"], status: "pending" }) + TaskCreate({ subject: "DRAFT-004", owner: "writer", description: `Generate Epics\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-004"], status: "pending" }) + TaskCreate({ subject: "DISCUSS-005", owner: "discussant", description: `Critique Epics\nSession: ${sessionFolder}`, blockedBy: ["DRAFT-004"], status: "pending" }) + TaskCreate({ subject: "QUALITY-001", owner: "reviewer", description: `Spec quality validation\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-005"], status: "pending" }) + TaskCreate({ subject: "DISCUSS-006", owner: "discussant", description: `Final review and sign-off\nSession: ${sessionFolder}`, blockedBy: ["QUALITY-001"], status: "pending" }) + + // IMPL PHASE (6 tasks) โ€” fullstack, blocked by spec + TaskCreate({ subject: "PLAN-001", owner: "planner", description: `Fullstack planning\nSession: ${sessionFolder}`, blockedBy: ["DISCUSS-006"], status: "pending" }) + TaskCreate({ subject: "IMPL-001", owner: "executor", description: `Backend implementation\nSession: ${sessionFolder}`, blockedBy: ["PLAN-001"], status: "pending" }) + TaskCreate({ subject: "DEV-FE-001", owner: "fe-developer", description: `Frontend implementation\nSession: ${sessionFolder}`, blockedBy: ["PLAN-001"], status: "pending" }) + TaskCreate({ subject: "TEST-001", owner: "tester", description: `Backend test-fix cycles\nSession: ${sessionFolder}`, blockedBy: ["IMPL-001"], status: "pending" }) + TaskCreate({ subject: "QA-FE-001", owner: "fe-qa", description: `Frontend QA\nSession: ${sessionFolder}`, blockedBy: ["DEV-FE-001"], status: "pending" }) + TaskCreate({ subject: "REVIEW-001", owner: "reviewer", description: `Full code review\nSession: ${sessionFolder}`, blockedBy: ["TEST-001", "QA-FE-001"], status: "pending" }) + + Output("[coordinator] Full-lifecycle-fe task chain created (18 tasks)") + Output("[coordinator] Starting with: RESEARCH-001 (analyst)") +} +``` + +--- + +## Task Metadata Reference + +```javascript +// Unified metadata for all pipelines (used by Session Resume) +const TASK_METADATA = { + // Spec pipeline (12 tasks) + "RESEARCH-001": { role: "analyst", deps: [], description: "Seed analysis: codebase exploration and context gathering" }, + "DISCUSS-001": { role: "discussant", deps: ["RESEARCH-001"], description: "Critique research findings, identify gaps" }, + "DRAFT-001": { role: "writer", deps: ["DISCUSS-001"], description: "Generate Product Brief" }, + "DISCUSS-002": { role: "discussant", deps: ["DRAFT-001"], description: "Critique Product Brief" }, + "DRAFT-002": { role: "writer", deps: ["DISCUSS-002"], description: "Generate Requirements/PRD" }, + "DISCUSS-003": { role: "discussant", deps: ["DRAFT-002"], description: "Critique Requirements/PRD" }, + "DRAFT-003": { role: "writer", deps: ["DISCUSS-003"], description: "Generate Architecture Document" }, + "DISCUSS-004": { role: "discussant", deps: ["DRAFT-003"], description: "Critique Architecture Document" }, + "DRAFT-004": { role: "writer", deps: ["DISCUSS-004"], description: "Generate Epics" }, + "DISCUSS-005": { role: "discussant", deps: ["DRAFT-004"], description: "Critique Epics" }, + "QUALITY-001": { role: "reviewer", deps: ["DISCUSS-005"], description: "5-dimension spec quality validation" }, + "DISCUSS-006": { role: "discussant", deps: ["QUALITY-001"], description: "Final review discussion and sign-off" }, + + // Impl pipeline (4 tasks) โ€” deps shown for impl-only mode + // In full-lifecycle, PLAN-001 deps = ["DISCUSS-006"] + "PLAN-001": { role: "planner", deps: [], description: "Multi-angle codebase exploration and structured planning" }, + "IMPL-001": { role: "executor", deps: ["PLAN-001"], description: "Code implementation following plan" }, + "TEST-001": { role: "tester", deps: ["IMPL-001"], description: "Adaptive test-fix cycles and quality gates" }, + "REVIEW-001": { role: "reviewer", deps: ["IMPL-001"], description: "4-dimension code review" }, + + // Frontend pipeline tasks + "DEV-FE-001": { role: "fe-developer", deps: ["PLAN-001"], description: "Frontend component/page implementation" }, + "QA-FE-001": { role: "fe-qa", deps: ["DEV-FE-001"], description: "5-dimension frontend QA" }, + // GC loop tasks (created dynamically) + "DEV-FE-002": { role: "fe-developer", deps: ["QA-FE-001"], description: "Frontend fixes (GC round 2)" }, + "QA-FE-002": { role: "fe-qa", deps: ["DEV-FE-002"], description: "Frontend QA re-check (GC round 2)" } +} + +// Pipeline chain constants +const SPEC_CHAIN = [ + "RESEARCH-001", "DISCUSS-001", "DRAFT-001", "DISCUSS-002", + "DRAFT-002", "DISCUSS-003", "DRAFT-003", "DISCUSS-004", + "DRAFT-004", "DISCUSS-005", "QUALITY-001", "DISCUSS-006" +] + +const IMPL_CHAIN = ["PLAN-001", "IMPL-001", "TEST-001", "REVIEW-001"] + +const FE_CHAIN = ["DEV-FE-001", "QA-FE-001"] + +const FULLSTACK_CHAIN = ["PLAN-001", "IMPL-001", "DEV-FE-001", "TEST-001", "QA-FE-001", "REVIEW-001"] +``` + +--- + +## Execution Method Handling + +### Sequential Execution + +```javascript +if (requirements.executionMethod === "sequential") { + Output("[coordinator] Sequential execution: tasks will run one at a time") + // Only one task active at a time + // Next task activated only after predecessor completes +} +``` + +### Parallel Execution + +```javascript +if (requirements.executionMethod === "parallel") { + Output("[coordinator] Parallel execution: independent tasks will run concurrently") + // Tasks with all deps met can run in parallel + // e.g., TEST-001 and REVIEW-001 both depend on IMPL-001 โ†’ run together + // e.g., IMPL-001 and DEV-FE-001 both depend on PLAN-001 โ†’ run together +} +``` + +--- + +## Output Format + +All outputs from this command use the `[coordinator]` tag: + +``` +[coordinator] Creating spec-only task chain (12 tasks) +[coordinator] Starting with: RESEARCH-001 (analyst) +``` diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/coordinator/commands/monitor.md b/.claude/skills_lib/team-lifecycle-v2/roles/coordinator/commands/monitor.md new file mode 100644 index 00000000..5b290b54 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/coordinator/commands/monitor.md @@ -0,0 +1,368 @@ +# Monitor Command - Coordination Loop + +**Purpose**: Monitor task progress, route messages, and handle checkpoints + +**Invoked by**: Coordinator role.md Phase 4 + +**Output Tag**: `[coordinator]` + +--- + +## Coordination Loop + +> **่ฎพ่ฎกๅŽŸๅˆ™**: ๆจกๅž‹ๆ‰ง่กŒๆฒกๆœ‰ๆ—ถ้—ดๆฆ‚ๅฟต๏ผŒ็ฆๆญขไปปไฝ•ๅฝขๅผ็š„่ฝฎ่ฏข็ญ‰ๅพ…ใ€‚ +> ไฝฟ็”จๅŒๆญฅ `Task(run_in_background: false)` ่ฐƒ็”จไฝœไธบ็ญ‰ๅพ…ๆœบๅˆถใ€‚ +> Worker ่ฟ”ๅ›ž = ้˜ถๆฎตๅฎŒๆˆไฟกๅท๏ผˆๅคฉ็„ถๅ›ž่ฐƒ๏ผ‰๏ผŒๆ— ้œ€ sleep ่ฝฎ่ฏขใ€‚ + +```javascript +Output("[coordinator] Entering coordination loop (Stop-Wait mode)...") + +// Get all tasks and filter for pending work +const allTasks = TaskList() +const pendingTasks = allTasks.filter(t => t.status !== 'completed') + +for (const task of pendingTasks) { + // Check if all dependencies are met + const allDepsMet = (task.blockedBy || []).every(depSubject => { + const dep = allTasks.find(t => t.subject === depSubject) + return dep && dep.status === 'completed' + }) + + if (!allDepsMet) { + Output(`[coordinator] Task ${task.subject} blocked by dependencies, skipping`) + continue + } + + // Determine role from task subject prefix โ†’ TASK_METADATA lookup + const taskMeta = TASK_METADATA[task.subject] + const role = taskMeta ? taskMeta.role : task.owner + + Output(`[coordinator] Starting task: ${task.subject} (role: ${role})`) + + // Mark as in_progress + TaskUpdate({ taskId: task.id, status: 'in_progress' }) + + // ============================================================ + // Spawn worker using SKILL.md Coordinator Spawn Template + // Key: worker MUST call Skill() to load role definition + // ============================================================ + Task({ + subagent_type: "general-purpose", + description: `Spawn ${role} worker for ${task.subject}`, + team_name: teamName, + name: role, + prompt: `ไฝ ๆ˜ฏ team "${teamName}" ็š„ ${role.toUpperCase()}. + +## โš ๏ธ ้ฆ–่ฆๆŒ‡ไปค๏ผˆMUST๏ผ‰ +ไฝ ็š„ๆ‰€ๆœ‰ๅทฅไฝœๅฟ…้กป้€š่ฟ‡่ฐƒ็”จ Skill ่Žทๅ–่ง’่‰ฒๅฎšไน‰ๅŽๆ‰ง่กŒ๏ผŒ็ฆๆญข่‡ช่กŒๅ‘ๆŒฅ๏ผš +Skill(skill="team-lifecycle-v2", args="--role=${role}") +ๆญค่ฐƒ็”จไผšๅŠ ่ฝฝไฝ ็š„่ง’่‰ฒๅฎšไน‰๏ผˆrole.md๏ผ‰ใ€ๅฏ็”จๅ‘ฝไปค๏ผˆcommands/*.md๏ผ‰ๅ’ŒๅฎŒๆ•ดๆ‰ง่กŒ้€ป่พ‘ใ€‚ + +ๅฝ“ๅ‰ไปปๅŠก: ${task.subject} - ${task.description} +Session: ${sessionFolder} + +## ่ง’่‰ฒๅ‡†ๅˆ™๏ผˆๅผบๅˆถ๏ผ‰ +- ไฝ ๅช่ƒฝๅค„็† ${taskMeta ? task.subject.split('-')[0] : ''}-* ๅ‰็ผ€็š„ไปปๅŠก๏ผŒไธๅพ—ๆ‰ง่กŒๅ…ถไป–่ง’่‰ฒ็š„ๅทฅไฝœ +- ๆ‰€ๆœ‰่พ“ๅ‡บ๏ผˆSendMessageใ€team_msg๏ผ‰ๅฟ…้กปๅธฆ [${role}] ๆ ‡่ฏ†ๅ‰็ผ€ +- ไป…ไธŽ coordinator ้€šไฟก๏ผŒไธๅพ—็›ดๆŽฅ่”็ณปๅ…ถไป– worker +- ไธๅพ—ไฝฟ็”จ TaskCreate ไธบๅ…ถไป–่ง’่‰ฒๅˆ›ๅปบไปปๅŠก + +## ๆถˆๆฏๆ€ป็บฟ๏ผˆๅฟ…้กป๏ผ‰ +ๆฏๆฌก SendMessage ๅ‰๏ผŒๅ…ˆ่ฐƒ็”จ mcp__ccw-tools__team_msg ่ฎฐๅฝ•ใ€‚ + +## ๅทฅไฝœๆต็จ‹๏ผˆไธฅๆ ผๆŒ‰้กบๅบ๏ผ‰ +1. ่ฐƒ็”จ Skill(skill="team-lifecycle-v2", args="--role=${role}") ่Žทๅ–่ง’่‰ฒๅฎšไน‰ๅ’Œๆ‰ง่กŒ้€ป่พ‘ +2. ๆŒ‰ role.md ไธญ็š„ 5-Phase ๆต็จ‹ๆ‰ง่กŒ๏ผˆTaskList โ†’ ๆ‰พๅˆฐไปปๅŠก โ†’ ๆ‰ง่กŒ โ†’ ๆฑ‡ๆŠฅ๏ผ‰ +3. team_msg log + SendMessage ็ป“ๆžœ็ป™ coordinator๏ผˆๅธฆ [${role}] ๆ ‡่ฏ†๏ผ‰ +4. TaskUpdate completed โ†’ ๆฃ€ๆŸฅไธ‹ไธ€ไธชไปปๅŠก โ†’ ๅ›žๅˆฐๆญฅ้ชค 1`, + run_in_background: false + }) + + // Worker returned โ€” check status + const completedTask = TaskGet({ taskId: task.id }) + Output(`[coordinator] Task ${task.subject} status: ${completedTask.status}`) + + if (completedTask.status === "completed") { + handleTaskComplete({ subject: task.subject, output: completedTask }) + } + + // Update session progress + const session = Read(sessionFile) + const allTasksNow = TaskList() + session.tasks_completed = allTasksNow.filter(t => t.status === "completed").length + Write(sessionFile, session) + + // Check if all tasks complete + const remaining = allTasksNow.filter(t => t.status !== "completed") + if (remaining.length === 0) { + Output("[coordinator] All tasks completed!") + break + } +} + +Output("[coordinator] Coordination loop complete") +``` + +--- + +## Message Handlers + +### handleTaskComplete + +```javascript +function handleTaskComplete(message) { + const subject = message.subject + + Output(`[coordinator] Task completed: ${subject}`) + + // Check for dependent tasks + const allTasks = TaskList() + const dependentTasks = allTasks.filter(t => + (t.blockedBy || []).includes(subject) && t.status === 'pending' + ) + + Output(`[coordinator] Checking ${dependentTasks.length} dependent tasks`) + + for (const depTask of dependentTasks) { + // Check if all dependencies are met + const allDepsMet = (depTask.blockedBy || []).every(depSubject => { + const dep = allTasks.find(t => t.subject === depSubject) + return dep && dep.status === 'completed' + }) + + if (allDepsMet) { + Output(`[coordinator] Unblocking task: ${depTask.subject} (${depTask.owner})`) + } + } + + // Special checkpoint: Spec phase complete before implementation + if (subject === "DISCUSS-006" && (requirements.mode === "full-lifecycle" || requirements.mode === "full-lifecycle-fe")) { + Output("[coordinator] Spec phase complete. Checkpoint before implementation.") + handleSpecCompleteCheckpoint() + } +} +``` + +--- + +### handleTaskBlocked + +```javascript +function handleTaskBlocked(message) { + const subject = message.subject + const reason = message.reason + + Output(`[coordinator] Task blocked: ${subject}`) + Output(`[coordinator] Reason: ${reason}`) + + // Check if block reason is dependency-related + if (reason.includes("dependency")) { + Output("[coordinator] Dependency block detected. Waiting for predecessor tasks.") + return + } + + // Check if block reason is ambiguity-related + if (reason.includes("ambiguous") || reason.includes("unclear")) { + Output("[coordinator] Ambiguity detected. Routing to analyst for research.") + handleAmbiguityBlock(subject, reason) + return + } + + // Unknown block reason - escalate to user + Output("[coordinator] Unknown block reason. Escalating to user.") + const userDecision = AskUserQuestion({ + question: `Task ${subject} is blocked: ${reason}. How to proceed?`, + choices: [ + "retry - Retry the task", + "skip - Skip this task", + "abort - Abort entire workflow", + "manual - Provide manual input" + ] + }) + + switch (userDecision) { + case "retry": + // Task will be retried in next coordination loop iteration + break + + case "skip": + const task = TaskList().find(t => t.subject === subject) + if (task) TaskUpdate({ taskId: task.id, status: "completed" }) + Output(`[coordinator] Task ${subject} skipped by user`) + break + + case "abort": + Output("[coordinator] Workflow aborted by user") + loopActive = false + break + + case "manual": + const manualInput = AskUserQuestion({ + question: `Provide manual input for task ${subject}:`, + type: "text" + }) + const taskToComplete = TaskList().find(t => t.subject === subject) + if (taskToComplete) TaskUpdate({ taskId: taskToComplete.id, status: "completed" }) + Output(`[coordinator] Task ${subject} completed with manual input`) + break + } +} + +// Route ambiguity to analyst (explorer as fallback) +function handleAmbiguityBlock(subject, reason) { + Output(`[coordinator] Creating research task for ambiguity in ${subject}`) + + // Spawn analyst on-demand to research the ambiguity + Task({ + subagent_type: "general-purpose", + description: `Spawn analyst for ambiguity research`, + team_name: teamName, + name: "analyst", + prompt: `ไฝ ๆ˜ฏ team "${teamName}" ็š„ ANALYST. + +## โš ๏ธ ้ฆ–่ฆๆŒ‡ไปค๏ผˆMUST๏ผ‰ +Skill(skill="team-lifecycle-v2", args="--role=analyst") + +## ็ดงๆ€ฅ็ ”็ฉถไปปๅŠก +่ขซ้˜ปๅกžไปปๅŠก: ${subject} +้˜ปๅกžๅŽŸๅ› : ${reason} +Session: ${sessionFolder} + +่ฏท่ฐƒๆŸฅๅนถ้€š่ฟ‡ SendMessage ๆฑ‡ๆŠฅ็ ”็ฉถ็ป“ๆžœ็ป™ coordinatorใ€‚`, + run_in_background: false + }) + + Output(`[coordinator] Ambiguity research complete for ${subject}`) +} +``` + +--- + +### handleDiscussionNeeded + +```javascript +function handleDiscussionNeeded(message) { + const subject = message.subject + const question = message.question + const context = message.context + + Output(`[coordinator] Discussion needed for task: ${subject}`) + Output(`[coordinator] Question: ${question}`) + + // Route to user + const userResponse = AskUserQuestion({ + question: `Task ${subject} needs clarification:\n\n${question}\n\nContext: ${context}`, + type: "text" + }) + + Output(`[coordinator] User response received for ${subject}`) +} +``` + +--- + +## Checkpoint Handlers + +### handleSpecCompleteCheckpoint + +```javascript +function handleSpecCompleteCheckpoint() { + Output("[coordinator] ========================================") + Output("[coordinator] SPEC PHASE COMPLETE - CHECKPOINT") + Output("[coordinator] ========================================") + + // Ask user to review + const userDecision = AskUserQuestion({ + question: "Spec phase complete (DISCUSS-006 done). Review specifications before proceeding to implementation?", + choices: [ + "proceed - Proceed to implementation (PLAN-001)", + "review - Review spec artifacts in session folder", + "revise - Request spec revision", + "stop - Stop here (spec-only)" + ] + }) + + switch (userDecision) { + case "proceed": + Output("[coordinator] Proceeding to implementation phase (PLAN-001)") + break + + case "review": + Output("[coordinator] Spec artifacts are in: " + sessionFolder + "/spec/") + Output("[coordinator] Please review and then re-invoke to continue.") + handleSpecCompleteCheckpoint() + break + + case "revise": + const revisionScope = AskUserQuestion({ + question: "Which spec artifacts need revision? (e.g., DRAFT-002 requirements, DRAFT-003 architecture)", + type: "text" + }) + Output(`[coordinator] Revision requested: ${revisionScope}`) + handleSpecCompleteCheckpoint() + break + + case "stop": + Output("[coordinator] Stopping at spec phase (user request)") + loopActive = false + break + } +} +``` + +--- + +## Message Routing Tables + +### Spec Phase Messages + +| Message Type | Sender Role | Trigger | Coordinator Action | +|--------------|-------------|---------|-------------------| +| `research_ready` | analyst | RESEARCH-* done | Update session, unblock DISCUSS-001 | +| `discussion_ready` | discussant | DISCUSS-* done | Unblock next DRAFT-* or QUALITY-* | +| `draft_ready` | writer | DRAFT-* done | Unblock next DISCUSS-* | +| `quality_result` | reviewer | QUALITY-* done | Unblock DISCUSS-006 | +| `error` | any worker | Task failed | Log error, escalate to user | + +### Impl Phase Messages + +| Message Type | Sender Role | Trigger | Coordinator Action | +|--------------|-------------|---------|-------------------| +| `plan_ready` | planner | PLAN-001 done | Unblock IMPL-001 (+ DEV-FE-001 for fullstack) | +| `impl_complete` | executor | IMPL-001 done | Unblock TEST-001 + REVIEW-001 | +| `test_result` | tester | TEST-001 done | Log results | +| `review_result` | reviewer | REVIEW-001 done | Log results | +| `dev_fe_complete` | fe-developer | DEV-FE-* done | Unblock QA-FE-* | +| `qa_fe_result` | fe-qa | QA-FE-* done | Check verdict, maybe create GC round | +| `error` | any worker | Task failed | Log error, escalate to user | + +--- + +## Progress Tracking + +```javascript +function logProgress() { + const session = Read(sessionFile) + const completedCount = session.tasks_completed + const totalCount = session.tasks_total + const percentage = Math.round((completedCount / totalCount) * 100) + + Output(`[coordinator] Progress: ${completedCount}/${totalCount} tasks (${percentage}%)`) + Output(`[coordinator] Current phase: ${session.current_phase}`) +} +``` + +--- + +## Output Format + +All outputs from this command use the `[coordinator]` tag: + +``` +[coordinator] Entering coordination loop (Stop-Wait mode)... +[coordinator] Starting task: RESEARCH-001 (role: analyst) +[coordinator] Task RESEARCH-001 status: completed +[coordinator] Checking 1 dependent tasks +[coordinator] Unblocking task: DISCUSS-001 (discussant) +[coordinator] Progress: 1/12 tasks (8%) +``` diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/coordinator/role.md b/.claude/skills_lib/team-lifecycle-v2/roles/coordinator/role.md new file mode 100644 index 00000000..0b353c81 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/coordinator/role.md @@ -0,0 +1,695 @@ +# Coordinator Role + +## Role Identity + +**Role**: Coordinator +**Output Tag**: `[coordinator]` +**Responsibility**: Orchestrate the team-lifecycle workflow by managing team creation, task dispatching, progress monitoring, and session state persistence. + +## Role Boundaries + +### MUST +- Parse user requirements and clarify ambiguous inputs +- Create team and spawn worker subagents +- Dispatch tasks with proper dependency chains +- Monitor task progress and route messages +- Handle session resume and reconciliation +- Maintain session state persistence +- Provide progress reports and next-step options + +### MUST NOT +- Execute spec/impl/research work directly (delegate to workers) +- Modify task outputs (workers own their deliverables) +- Skip dependency validation +- Proceed without user confirmation at checkpoints + +## Message Types + +| Message Type | Sender | Trigger | Coordinator Action | +|--------------|--------|---------|-------------------| +| `task_complete` | Worker | Task finished | Update session, check dependencies, kick next task | +| `task_blocked` | Worker | Dependency missing | Log block reason, wait for predecessor | +| `discussion_needed` | Worker | Ambiguity found | Route to user via AskUserQuestion | +| `research_ready` | analyst | Research done | Checkpoint with user before impl | + +## Toolbox + +### Available Commands +- `commands/dispatch.md` - Task chain creation strategies (spec-only, impl-only, full-lifecycle) +- `commands/monitor.md` - Coordination loop with message routing and checkpoint handling + +### Subagent Capabilities +- `TeamCreate` - Initialize team with session metadata +- `TeamSpawn` - Spawn worker subagents (analyst, writer, discussant, planner, executor, tester, reviewer, etc.) +- `TaskCreate` - Create tasks with dependencies +- `TaskUpdate` - Update task status/metadata +- `TaskGet` - Retrieve task details +- `AskUserQuestion` - Interactive user prompts + +### CLI Capabilities +- Session file I/O (`Read`, `Write`) +- Directory scanning (`Glob`) +- Background execution for long-running tasks + +--- + +## Execution Flow + +### Phase 0: Session Resume Check + +**Purpose**: Detect and resume interrupted sessions + +```javascript +// Scan for session files +const sessionFiles = Glob("D:/Claude_dms3/.workflow/.sessions/team-lifecycle-*.json") + +if (sessionFiles.length === 0) { + // No existing session, proceed to Phase 1 + goto Phase1 +} + +if (sessionFiles.length === 1) { + // Single session found + const session = Read(sessionFiles[0]) + if (session.status === "active" || session.status === "paused") { + Output("[coordinator] Resuming session: " + session.session_id) + goto SessionReconciliation + } +} + +if (sessionFiles.length > 1) { + // Multiple sessions - ask user + const choices = sessionFiles.map(f => { + const s = Read(f) + return `${s.session_id} (${s.status}) - ${s.mode} - ${s.tasks_completed}/${s.tasks_total}` + }) + + const answer = AskUserQuestion({ + question: "Multiple sessions found. Which to resume?", + choices: ["Create new session", ...choices] + }) + + if (answer === "Create new session") { + goto Phase1 + } else { + const selectedSession = Read(sessionFiles[answer.index - 1]) + goto SessionReconciliation + } +} + +// Session Reconciliation Process +SessionReconciliation: { + Output("[coordinator] Reconciling session state...") + + // Pipeline constants (aligned with SKILL.md Three-Mode Pipeline) + const SPEC_CHAIN = [ + "RESEARCH-001", "DISCUSS-001", "DRAFT-001", "DISCUSS-002", + "DRAFT-002", "DISCUSS-003", "DRAFT-003", "DISCUSS-004", + "DRAFT-004", "DISCUSS-005", "QUALITY-001", "DISCUSS-006" + ] + + const IMPL_CHAIN = ["PLAN-001", "IMPL-001", "TEST-001", "REVIEW-001"] + + const FE_CHAIN = ["DEV-FE-001", "QA-FE-001"] + + const FULLSTACK_CHAIN = ["PLAN-001", "IMPL-001", "DEV-FE-001", "TEST-001", "QA-FE-001", "REVIEW-001"] + + // Task metadata โ€” role must match VALID_ROLES in SKILL.md + const TASK_METADATA = { + // Spec pipeline (12 tasks) + "RESEARCH-001": { role: "analyst", phase: "spec", deps: [], description: "Seed analysis: codebase exploration and context gathering" }, + "DISCUSS-001": { role: "discussant", phase: "spec", deps: ["RESEARCH-001"], description: "Critique research findings" }, + "DRAFT-001": { role: "writer", phase: "spec", deps: ["DISCUSS-001"], description: "Generate Product Brief" }, + "DISCUSS-002": { role: "discussant", phase: "spec", deps: ["DRAFT-001"], description: "Critique Product Brief" }, + "DRAFT-002": { role: "writer", phase: "spec", deps: ["DISCUSS-002"], description: "Generate Requirements/PRD" }, + "DISCUSS-003": { role: "discussant", phase: "spec", deps: ["DRAFT-002"], description: "Critique Requirements/PRD" }, + "DRAFT-003": { role: "writer", phase: "spec", deps: ["DISCUSS-003"], description: "Generate Architecture Document" }, + "DISCUSS-004": { role: "discussant", phase: "spec", deps: ["DRAFT-003"], description: "Critique Architecture Document" }, + "DRAFT-004": { role: "writer", phase: "spec", deps: ["DISCUSS-004"], description: "Generate Epics" }, + "DISCUSS-005": { role: "discussant", phase: "spec", deps: ["DRAFT-004"], description: "Critique Epics" }, + "QUALITY-001": { role: "reviewer", phase: "spec", deps: ["DISCUSS-005"], description: "5-dimension spec quality validation" }, + "DISCUSS-006": { role: "discussant", phase: "spec", deps: ["QUALITY-001"], description: "Final review discussion and sign-off" }, + + // Impl pipeline (deps shown for impl-only; full-lifecycle adds PLAN-001 โ†’ ["DISCUSS-006"]) + "PLAN-001": { role: "planner", phase: "impl", deps: [], description: "Multi-angle codebase exploration and structured planning" }, + "IMPL-001": { role: "executor", phase: "impl", deps: ["PLAN-001"], description: "Code implementation following plan" }, + "TEST-001": { role: "tester", phase: "impl", deps: ["IMPL-001"], description: "Adaptive test-fix cycles and quality gates" }, + "REVIEW-001": { role: "reviewer", phase: "impl", deps: ["IMPL-001"], description: "4-dimension code review" }, + + // Frontend pipeline tasks + "DEV-FE-001": { role: "fe-developer", phase: "impl", deps: ["PLAN-001"], description: "Frontend component/page implementation" }, + "QA-FE-001": { role: "fe-qa", phase: "impl", deps: ["DEV-FE-001"], description: "5-dimension frontend QA" } + } + + // Helper: Get predecessor task + function getPredecessor(taskId, chain) { + const index = chain.indexOf(taskId) + return index > 0 ? chain[index - 1] : null + } + + // Step 1: Audit current state + const session = Read(sessionFile) + const teamState = TeamGet(session.team_id) + const allTasks = teamState.tasks + + Output("[coordinator] Session audit:") + Output(` Mode: ${session.mode}`) + Output(` Tasks completed: ${session.tasks_completed}/${session.tasks_total}`) + Output(` Status: ${session.status}`) + + // Step 2: Reconcile task states + const completedTasks = allTasks.filter(t => t.status === "completed") + const activeTasks = allTasks.filter(t => t.status === "active") + const blockedTasks = allTasks.filter(t => t.status === "blocked") + const pendingTasks = allTasks.filter(t => t.status === "pending") + + Output("[coordinator] Task breakdown:") + Output(` Completed: ${completedTasks.length}`) + Output(` Active: ${activeTasks.length}`) + Output(` Blocked: ${blockedTasks.length}`) + Output(` Pending: ${pendingTasks.length}`) + + // Step 3: Determine remaining work + const expectedChain = + session.mode === "spec-only" ? SPEC_CHAIN : + session.mode === "impl-only" ? IMPL_CHAIN : + session.mode === "fe-only" ? ["PLAN-001", ...FE_CHAIN] : + session.mode === "fullstack" ? FULLSTACK_CHAIN : + session.mode === "full-lifecycle-fe" ? [...SPEC_CHAIN, ...FULLSTACK_CHAIN] : + [...SPEC_CHAIN, ...IMPL_CHAIN] // full-lifecycle default + + const remainingTaskIds = expectedChain.filter(id => + !completedTasks.some(t => t.subject === id) + ) + + Output(`[coordinator] Remaining tasks: ${remainingTaskIds.join(", ")}`) + + // Step 4: Rebuild team if needed + if (!teamState || teamState.status === "disbanded") { + Output("[coordinator] Team disbanded, recreating...") + TeamCreate({ + team_id: session.team_id, + session_id: session.session_id, + mode: session.mode + }) + } + + // Step 5: Create missing tasks + for (const taskId of remainingTaskIds) { + const existingTask = allTasks.find(t => t.subject === taskId) + if (!existingTask) { + const metadata = TASK_METADATA[taskId] + TaskCreate({ + subject: taskId, + owner: metadata.role, + description: `${metadata.description}\nSession: ${sessionFolder}`, + blockedBy: metadata.deps, + status: "pending" + }) + Output(`[coordinator] Created missing task: ${taskId} (${metadata.role})`) + } + } + + // Step 6: Verify dependencies + for (const taskId of remainingTaskIds) { + const task = allTasks.find(t => t.subject === taskId) + if (!task) continue + const metadata = TASK_METADATA[taskId] + const allDepsMet = metadata.deps.every(depId => + completedTasks.some(t => t.subject === depId) + ) + + if (allDepsMet && task.status !== "completed") { + Output(`[coordinator] Unblocked task: ${taskId} (${metadata.role})`) + } + } + + // Step 7: Update session state + session.status = "active" + session.resumed_at = new Date().toISOString() + session.tasks_completed = completedTasks.length + Write(sessionFile, session) + + // Step 8: Report reconciliation + Output("[coordinator] Session reconciliation complete") + Output(`[coordinator] Ready to resume from: ${remainingTaskIds[0] || "all tasks complete"}`) + + // Step 9: Kick next task + if (remainingTaskIds.length > 0) { + const nextTaskId = remainingTaskIds[0] + const nextTask = TaskGet(nextTaskId) + const metadata = TASK_METADATA[nextTaskId] + + if (metadata.deps.every(depId => completedTasks.some(t => t.subject === depId))) { + TaskUpdate(nextTaskId, { status: "active" }) + Output(`[coordinator] Kicking task: ${nextTaskId}`) + goto Phase4_CoordinationLoop + } else { + Output(`[coordinator] Next task ${nextTaskId} blocked on: ${metadata.deps.join(", ")}`) + goto Phase4_CoordinationLoop + } + } else { + Output("[coordinator] All tasks complete!") + goto Phase5_Report + } +} +``` + +--- + +### Phase 1: Requirement Clarification + +**Purpose**: Parse user input and clarify execution parameters + +```javascript +Output("[coordinator] Phase 1: Requirement Clarification") + +// Parse $ARGUMENTS +const userInput = $ARGUMENTS + +// Extract mode if specified +let mode = null +if (userInput.includes("spec-only")) mode = "spec-only" +if (userInput.includes("impl-only")) mode = "impl-only" +if (userInput.includes("full-lifecycle")) mode = "full-lifecycle" + +// Extract scope if specified +let scope = null +if (userInput.includes("scope:")) { + scope = userInput.match(/scope:\s*([^\n]+)/)[1] +} + +// Extract focus areas +let focus = [] +if (userInput.includes("focus:")) { + focus = userInput.match(/focus:\s*([^\n]+)/)[1].split(",").map(s => s.trim()) +} + +// Extract depth preference +let depth = "standard" +if (userInput.includes("depth:shallow")) depth = "shallow" +if (userInput.includes("depth:deep")) depth = "deep" + +// Ask for missing parameters +if (!mode) { + mode = AskUserQuestion({ + question: "Select execution mode:", + choices: [ + "spec-only - Generate specifications only", + "impl-only - Implementation only (requires existing spec)", + "full-lifecycle - Complete spec + implementation", + "fe-only - Frontend-only pipeline (plan โ†’ dev โ†’ QA)", + "fullstack - Backend + frontend parallel pipeline", + "full-lifecycle-fe - Full lifecycle with frontend (spec โ†’ fullstack)" + ] + }) +} + +if (!scope) { + scope = AskUserQuestion({ + question: "Describe the project scope:", + type: "text" + }) +} + +if (focus.length === 0) { + const focusAnswer = AskUserQuestion({ + question: "Any specific focus areas? (optional)", + type: "text", + optional: true + }) + if (focusAnswer) { + focus = focusAnswer.split(",").map(s => s.trim()) + } +} + +// Determine execution method +const executionMethod = AskUserQuestion({ + question: "Execution method:", + choices: [ + "sequential - One task at a time (safer, slower)", + "parallel - Multiple tasks in parallel (faster, more complex)" + ] +}) + +// Store clarified requirements +const requirements = { + mode, + scope, + focus, + depth, + executionMethod, + originalInput: userInput +} + +// --- Frontend Detection --- +// Auto-detect frontend tasks and adjust pipeline mode +const FE_KEYWORDS = /component|page|UI|ๅ‰็ซฏ|frontend|CSS|HTML|React|Vue|Tailwind|็ป„ไปถ|้กต้ข|ๆ ทๅผ|layout|responsive|Svelte|Next\.js|Nuxt|shadcn|่ฎพ่ฎก็ณป็ปŸ|design.system/i +const BE_KEYWORDS = /API|database|server|ๅŽ็ซฏ|backend|middleware|auth|REST|GraphQL|migration|schema|model|controller|service/i + +function detectImplMode(taskDescription) { + const hasFE = FE_KEYWORDS.test(taskDescription) + const hasBE = BE_KEYWORDS.test(taskDescription) + + // Also check project files for frontend frameworks + const hasFEFiles = Bash(`test -f package.json && (grep -q react package.json || grep -q vue package.json || grep -q svelte package.json || grep -q next package.json); echo $?`) === '0' + + if (hasFE && hasBE) return 'fullstack' + if (hasFE || hasFEFiles) return 'fe-only' + return 'impl-only' // default backend +} + +// Apply frontend detection for implementation modes +if (mode === 'impl-only' || mode === 'full-lifecycle') { + const detectedMode = detectImplMode(scope + ' ' + userInput) + if (detectedMode !== 'impl-only') { + // Frontend detected โ€” upgrade pipeline mode + if (mode === 'impl-only') { + mode = detectedMode // fe-only or fullstack + } else if (mode === 'full-lifecycle') { + mode = 'full-lifecycle-fe' // spec + fullstack + } + requirements.mode = mode + Output(`[coordinator] Frontend detected โ†’ pipeline upgraded to: ${mode}`) + } +} + +Output("[coordinator] Requirements clarified:") +Output(` Mode: ${mode}`) +Output(` Scope: ${scope}`) +Output(` Focus: ${focus.join(", ") || "none"}`) +Output(` Depth: ${depth}`) +Output(` Execution: ${executionMethod}`) + +goto Phase2 +``` + +--- + +### Phase 2: Create Team + Initialize Session + +**Purpose**: Initialize team and session state + +```javascript +Output("[coordinator] Phase 2: Team Creation") + +// Generate session ID +const sessionId = `team-lifecycle-${Date.now()}` +const teamId = sessionId + +// Create team +TeamCreate({ + team_id: teamId, + session_id: sessionId, + mode: requirements.mode, + scope: requirements.scope, + focus: requirements.focus, + depth: requirements.depth, + executionMethod: requirements.executionMethod +}) + +Output(`[coordinator] Team created: ${teamId}`) + +// Initialize wisdom directory +const wisdomDir = `${sessionFolder}/wisdom` +Bash(`mkdir -p "${wisdomDir}"`) +Write(`${wisdomDir}/learnings.md`, `# Learnings\n\n\n`) +Write(`${wisdomDir}/decisions.md`, `# Decisions\n\n\n`) +Write(`${wisdomDir}/conventions.md`, `# Conventions\n\n\n\n`) +Write(`${wisdomDir}/issues.md`, `# Known Issues\n\n\n`) + +// Initialize session file +const sessionFile = `D:/Claude_dms3/.workflow/.sessions/${sessionId}.json` +const sessionData = { + session_id: sessionId, + team_id: teamId, + mode: requirements.mode, + scope: requirements.scope, + focus: requirements.focus, + depth: requirements.depth, + executionMethod: requirements.executionMethod, + status: "active", + created_at: new Date().toISOString(), + tasks_total: requirements.mode === "spec-only" ? 12 : + requirements.mode === "impl-only" ? 4 : + requirements.mode === "fe-only" ? 3 : + requirements.mode === "fullstack" ? 6 : + requirements.mode === "full-lifecycle-fe" ? 18 : 16, + tasks_completed: 0, + current_phase: requirements.mode === "impl-only" ? "impl" : "spec" +} + +Write(sessionFile, sessionData) +Output(`[coordinator] Session file created: ${sessionFile}`) + +// โš ๏ธ Workers are NOT pre-spawned here. +// Workers are spawned per-stage in Phase 4 via Stop-Wait Task(run_in_background: false). +// See SKILL.md Coordinator Spawn Template for worker prompt templates. +// +// Worker roles by mode (spawned on-demand, must match VALID_ROLES in SKILL.md): +// spec-only: analyst, discussant, writer, reviewer +// impl-only: planner, executor, tester, reviewer +// fe-only: planner, fe-developer, fe-qa +// fullstack: planner, executor, fe-developer, tester, fe-qa, reviewer +// full-lifecycle: analyst, discussant, writer, reviewer, planner, executor, tester +// full-lifecycle-fe: all of the above + fe-developer, fe-qa +// On-demand (ambiguity): analyst or explorer + +goto Phase3 +``` + +--- + +### Phase 3: Create Task Chain + +**Purpose**: Dispatch tasks based on execution mode + +```javascript +Output("[coordinator] Phase 3: Task Dispatching") + +// Delegate to command file +const dispatchStrategy = Read("commands/dispatch.md") + +// Execute strategy defined in command file +// (dispatch.md contains the complete task chain creation logic) + +goto Phase4 +``` + +--- + +### Phase 4: Coordination Loop + +**Purpose**: Monitor task progress and route messages + +> **่ฎพ่ฎกๅŽŸๅˆ™๏ผˆStop-Wait๏ผ‰**: ๆจกๅž‹ๆ‰ง่กŒๆฒกๆœ‰ๆ—ถ้—ดๆฆ‚ๅฟต๏ผŒ็ฆๆญขไปปไฝ•ๅฝขๅผ็š„่ฝฎ่ฏข็ญ‰ๅพ…ใ€‚ +> - โŒ ็ฆๆญข: `while` ๅพช็Žฏ + `sleep` + ๆฃ€ๆŸฅ็Šถๆ€ +> - โœ… ้‡‡็”จ: ๅŒๆญฅ `Task(run_in_background: false)` ่ฐƒ็”จ๏ผŒWorker ่ฟ”ๅ›ž = ้˜ถๆฎตๅฎŒๆˆไฟกๅท +> +> ๆŒ‰ Phase 3 ๅˆ›ๅปบ็š„ไปปๅŠก้“พ้กบๅบ๏ผŒ้€้˜ถๆฎต spawn worker ๅŒๆญฅๆ‰ง่กŒใ€‚ +> Worker prompt ไฝฟ็”จ SKILL.md Coordinator Spawn Templateใ€‚ + +```javascript +Output("[coordinator] Phase 4: Coordination Loop") + +// Delegate to command file +const monitorStrategy = Read("commands/monitor.md") + +// Execute strategy defined in command file +// (monitor.md contains the complete message routing and checkpoint logic) + +goto Phase5 +``` + +--- + +### Phase 5: Report + Persistent Loop + +**Purpose**: Provide completion report and offer next steps + +```javascript +Output("[coordinator] Phase 5: Completion Report") + +// Load session state +const session = Read(sessionFile) +const teamState = TeamGet(session.team_id) + +// Generate report +Output("[coordinator] ========================================") +Output("[coordinator] TEAM LIFECYCLE EXECUTION COMPLETE") +Output("[coordinator] ========================================") +Output(`[coordinator] Session ID: ${session.session_id}`) +Output(`[coordinator] Mode: ${session.mode}`) +Output(`[coordinator] Tasks Completed: ${session.tasks_completed}/${session.tasks_total}`) +Output(`[coordinator] Duration: ${calculateDuration(session.created_at, new Date())}`) + +// List deliverables +const completedTasks = teamState.tasks.filter(t => t.status === "completed") +Output("[coordinator] Deliverables:") +for (const task of completedTasks) { + Output(` โœ“ ${task.subject}: ${task.description}`) + if (task.output_file) { + Output(` Output: ${task.output_file}`) + } +} + +// Update session status +session.status = "completed" +session.completed_at = new Date().toISOString() +Write(sessionFile, session) + +// Offer next steps +const nextAction = AskUserQuestion({ + question: "What would you like to do next?", + choices: [ + "exit - End session", + "review - Review specific deliverables", + "extend - Add more tasks to this session", + "handoff-lite-plan - Create lite-plan from spec", + "handoff-full-plan - Create full-plan from spec", + "handoff-req-plan - Create req-plan from requirements", + "handoff-create-issues - Generate GitHub issues" + ] +}) + +switch (nextAction) { + case "exit": + Output("[coordinator] Session ended. Goodbye!") + break + + case "review": + const taskToReview = AskUserQuestion({ + question: "Which task output to review?", + choices: completedTasks.map(t => t.subject) + }) + const reviewTask = completedTasks.find(t => t.subject === taskToReview) + if (reviewTask.output_file) { + const content = Read(reviewTask.output_file) + Output(`[coordinator] Task: ${reviewTask.subject}`) + Output(content) + } + goto Phase5 // Loop back for more actions + + case "extend": + const extensionScope = AskUserQuestion({ + question: "Describe additional work:", + type: "text" + }) + Output("[coordinator] Creating extension tasks...") + // Create custom tasks based on extension scope + // (Implementation depends on extension requirements) + goto Phase4 // Return to coordination loop + + case "handoff-lite-plan": + Output("[coordinator] Generating lite-plan from specifications...") + // Read spec completion output (DISCUSS-006 = final sign-off) + const specOutput = Read(getTaskOutput("DISCUSS-006")) + // Create lite-plan format + const litePlan = generateLitePlan(specOutput) + const litePlanFile = `D:/Claude_dms3/.workflow/.sessions/${session.session_id}-lite-plan.md` + Write(litePlanFile, litePlan) + Output(`[coordinator] Lite-plan created: ${litePlanFile}`) + goto Phase5 + + case "handoff-full-plan": + Output("[coordinator] Generating full-plan from specifications...") + const fullSpecOutput = Read(getTaskOutput("DISCUSS-006")) + const fullPlan = generateFullPlan(fullSpecOutput) + const fullPlanFile = `D:/Claude_dms3/.workflow/.sessions/${session.session_id}-full-plan.md` + Write(fullPlanFile, fullPlan) + Output(`[coordinator] Full-plan created: ${fullPlanFile}`) + goto Phase5 + + case "handoff-req-plan": + Output("[coordinator] Generating req-plan from requirements...") + const reqAnalysis = Read(getTaskOutput("RESEARCH-001")) + const reqPlan = generateReqPlan(reqAnalysis) + const reqPlanFile = `D:/Claude_dms3/.workflow/.sessions/${session.session_id}-req-plan.md` + Write(reqPlanFile, reqPlan) + Output(`[coordinator] Req-plan created: ${reqPlanFile}`) + goto Phase5 + + case "handoff-create-issues": + Output("[coordinator] Generating GitHub issues...") + const issuesSpec = Read(getTaskOutput("DISCUSS-006")) + const issues = generateGitHubIssues(issuesSpec) + const issuesFile = `D:/Claude_dms3/.workflow/.sessions/${session.session_id}-issues.json` + Write(issuesFile, issues) + Output(`[coordinator] Issues created: ${issuesFile}`) + Output("[coordinator] Use GitHub CLI to import: gh issue create --title ... --body ...") + goto Phase5 +} + +// Helper functions +function calculateDuration(start, end) { + const diff = new Date(end) - new Date(start) + const minutes = Math.floor(diff / 60000) + const seconds = Math.floor((diff % 60000) / 1000) + return `${minutes}m ${seconds}s` +} + +function getTaskOutput(taskId) { + const task = TaskGet(taskId) + return task.output_file +} + +function generateLitePlan(specOutput) { + // Parse spec output and create lite-plan format + return `# Lite Plan\n\n${specOutput}\n\n## Implementation Steps\n- Step 1\n- Step 2\n...` +} + +function generateFullPlan(specOutput) { + // Parse spec output and create full-plan format with detailed breakdown + return `# Full Plan\n\n${specOutput}\n\n## Detailed Implementation\n### Phase 1\n### Phase 2\n...` +} + +function generateReqPlan(reqAnalysis) { + // Parse requirements and create req-plan format + return `# Requirements Plan\n\n${reqAnalysis}\n\n## Acceptance Criteria\n- Criterion 1\n- Criterion 2\n...` +} + +function generateGitHubIssues(specOutput) { + // Parse spec and generate GitHub issue JSON + return { + issues: [ + { title: "Issue 1", body: "Description", labels: ["feature"] }, + { title: "Issue 2", body: "Description", labels: ["bug"] } + ] + } +} +``` + +--- + +## Session File Structure + +```json +{ + "session_id": "team-lifecycle-1234567890", + "team_id": "team-lifecycle-1234567890", + "mode": "full-lifecycle", + "scope": "Build authentication system", + "focus": ["security", "scalability"], + "depth": "standard", + "executionMethod": "sequential", + "status": "active", + "created_at": "2026-02-18T10:00:00Z", + "completed_at": null, + "resumed_at": null, + "tasks_total": 16, + "tasks_completed": 5, + "current_phase": "spec" +} +``` + +--- + +## Error Handling + +| Error Type | Coordinator Action | +|------------|-------------------| +| Task timeout | Log timeout, mark task as failed, ask user to retry or skip | +| Worker crash | Respawn worker, reassign task | +| Dependency cycle | Detect cycle, report to user, halt execution | +| Invalid mode | Reject with error message, ask user to clarify | +| Session corruption | Attempt recovery, fallback to manual reconciliation | diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/discussant/commands/critique.md b/.claude/skills_lib/team-lifecycle-v2/roles/discussant/commands/critique.md new file mode 100644 index 00000000..bc429a4c --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/discussant/commands/critique.md @@ -0,0 +1,396 @@ +# Command: Multi-Perspective Critique + +Phase 3 of discussant execution - launch parallel CLI analyses for each required perspective. + +## Overview + +This command executes multi-perspective critique by routing to specialized CLI tools based on perspective type. Each perspective produces structured critique with strengths, weaknesses, suggestions, and ratings. + +## Perspective Definitions + +### 1. Product Perspective (gemini) + +**Focus**: Market fit, user value, business viability, competitive differentiation + +**CLI Tool**: gemini + +**Output Structure**: +```json +{ + "perspective": "product", + "strengths": ["string"], + "weaknesses": ["string"], + "suggestions": ["string"], + "rating": 1-5 +} +``` + +**Prompt Template**: +``` +Analyze from Product Manager perspective: +- Market fit and user value proposition +- Business viability and ROI potential +- Competitive differentiation +- User experience and adoption barriers + +Artifact: {artifactContent} + +Output JSON with: strengths[], weaknesses[], suggestions[], rating (1-5) +``` + +### 2. Technical Perspective (codex) + +**Focus**: Feasibility, tech debt, performance, security, maintainability + +**CLI Tool**: codex + +**Output Structure**: +```json +{ + "perspective": "technical", + "strengths": ["string"], + "weaknesses": ["string"], + "suggestions": ["string"], + "rating": 1-5 +} +``` + +**Prompt Template**: +``` +Analyze from Tech Lead perspective: +- Technical feasibility and implementation complexity +- Architecture decisions and tech debt implications +- Performance and scalability considerations +- Security vulnerabilities and risks +- Code maintainability and extensibility + +Artifact: {artifactContent} + +Output JSON with: strengths[], weaknesses[], suggestions[], rating (1-5) +``` + +### 3. Quality Perspective (claude) + +**Focus**: Completeness, testability, consistency, standards compliance + +**CLI Tool**: claude + +**Output Structure**: +```json +{ + "perspective": "quality", + "strengths": ["string"], + "weaknesses": ["string"], + "suggestions": ["string"], + "rating": 1-5 +} +``` + +**Prompt Template**: +``` +Analyze from QA Lead perspective: +- Specification completeness and clarity +- Testability and test coverage potential +- Consistency across requirements/design +- Standards compliance (coding, documentation, accessibility) +- Ambiguity detection and edge case coverage + +Artifact: {artifactContent} + +Output JSON with: strengths[], weaknesses[], suggestions[], rating (1-5) +``` + +### 4. Risk Perspective (gemini) + +**Focus**: Risk identification, dependency analysis, assumption validation, failure modes + +**CLI Tool**: gemini + +**Output Structure**: +```json +{ + "perspective": "risk", + "strengths": ["string"], + "weaknesses": ["string"], + "suggestions": ["string"], + "rating": 1-5, + "risk_level": "low|medium|high|critical" +} +``` + +**Prompt Template**: +``` +Analyze from Risk Analyst perspective: +- Risk identification (technical, business, operational) +- Dependency analysis and external risks +- Assumption validation and hidden dependencies +- Failure modes and mitigation strategies +- Timeline and resource risks + +Artifact: {artifactContent} + +Output JSON with: strengths[], weaknesses[], suggestions[], rating (1-5), risk_level +``` + +### 5. Coverage Perspective (gemini) + +**Focus**: Requirement completeness vs original intent, scope drift, gap detection + +**CLI Tool**: gemini + +**Output Structure**: +```json +{ + "perspective": "coverage", + "strengths": ["string"], + "weaknesses": ["string"], + "suggestions": ["string"], + "rating": 1-5, + "covered_requirements": ["REQ-ID"], + "partial_requirements": ["REQ-ID"], + "missing_requirements": ["REQ-ID"], + "scope_creep": ["description"] +} +``` + +**Prompt Template**: +``` +Analyze from Requirements Analyst perspective: +- Compare current artifact against original requirements in discovery-context.json +- Identify covered requirements (fully addressed) +- Identify partial requirements (partially addressed) +- Identify missing requirements (not addressed) +- Detect scope creep (new items not in original requirements) + +Original Requirements: {discoveryContext} +Current Artifact: {artifactContent} + +Output JSON with: +- strengths[], weaknesses[], suggestions[], rating (1-5) +- covered_requirements[] (REQ-IDs fully addressed) +- partial_requirements[] (REQ-IDs partially addressed) +- missing_requirements[] (REQ-IDs not addressed) โ† CRITICAL if non-empty +- scope_creep[] (new items not in original requirements) +``` + +## Execution Pattern + +### Parallel CLI Execution + +```javascript +// Load artifact content +const artifactPath = `${sessionFolder}/${config.artifact}` +const artifactContent = config.type === 'json' + ? JSON.parse(Read(artifactPath)) + : Read(artifactPath) + +// Load discovery context for coverage perspective +let discoveryContext = null +try { + discoveryContext = JSON.parse(Read(`${sessionFolder}/spec/discovery-context.json`)) +} catch { /* may not exist in early rounds */ } + +// Launch parallel CLI analyses +const perspectiveResults = [] + +for (const perspective of config.perspectives) { + let cliTool, prompt + + switch(perspective) { + case 'product': + cliTool = 'gemini' + prompt = `Analyze from Product Manager perspective: +- Market fit and user value proposition +- Business viability and ROI potential +- Competitive differentiation +- User experience and adoption barriers + +Artifact: +${JSON.stringify(artifactContent, null, 2)} + +Output JSON with: strengths[], weaknesses[], suggestions[], rating (1-5)` + break + + case 'technical': + cliTool = 'codex' + prompt = `Analyze from Tech Lead perspective: +- Technical feasibility and implementation complexity +- Architecture decisions and tech debt implications +- Performance and scalability considerations +- Security vulnerabilities and risks +- Code maintainability and extensibility + +Artifact: +${JSON.stringify(artifactContent, null, 2)} + +Output JSON with: strengths[], weaknesses[], suggestions[], rating (1-5)` + break + + case 'quality': + cliTool = 'claude' + prompt = `Analyze from QA Lead perspective: +- Specification completeness and clarity +- Testability and test coverage potential +- Consistency across requirements/design +- Standards compliance (coding, documentation, accessibility) +- Ambiguity detection and edge case coverage + +Artifact: +${JSON.stringify(artifactContent, null, 2)} + +Output JSON with: strengths[], weaknesses[], suggestions[], rating (1-5)` + break + + case 'risk': + cliTool = 'gemini' + prompt = `Analyze from Risk Analyst perspective: +- Risk identification (technical, business, operational) +- Dependency analysis and external risks +- Assumption validation and hidden dependencies +- Failure modes and mitigation strategies +- Timeline and resource risks + +Artifact: +${JSON.stringify(artifactContent, null, 2)} + +Output JSON with: strengths[], weaknesses[], suggestions[], rating (1-5), risk_level` + break + + case 'coverage': + cliTool = 'gemini' + prompt = `Analyze from Requirements Analyst perspective: +- Compare current artifact against original requirements in discovery-context.json +- Identify covered requirements (fully addressed) +- Identify partial requirements (partially addressed) +- Identify missing requirements (not addressed) +- Detect scope creep (new items not in original requirements) + +Original Requirements: +${discoveryContext ? JSON.stringify(discoveryContext, null, 2) : 'Not available'} + +Current Artifact: +${JSON.stringify(artifactContent, null, 2)} + +Output JSON with: +- strengths[], weaknesses[], suggestions[], rating (1-5) +- covered_requirements[] (REQ-IDs fully addressed) +- partial_requirements[] (REQ-IDs partially addressed) +- missing_requirements[] (REQ-IDs not addressed) โ† CRITICAL if non-empty +- scope_creep[] (new items not in original requirements)` + break + } + + // Execute CLI analysis (run_in_background: true per CLAUDE.md) + Bash({ + command: `ccw cli -p "${prompt.replace(/"/g, '\\"')}" --tool ${cliTool} --mode analysis`, + run_in_background: true, + description: `[discussant] ${perspective} perspective analysis` + }) +} + +// Wait for all CLI results via hook callbacks +// Results will be collected in perspectiveResults array +``` + +## Critical Divergence Detection + +### Coverage Gap Detection + +```javascript +const coverageResult = perspectiveResults.find(p => p.perspective === 'coverage') +if (coverageResult?.missing_requirements?.length > 0) { + // Flag as critical divergence + synthesis.divergent_views.push({ + topic: 'requirement_coverage_gap', + description: `${coverageResult.missing_requirements.length} requirements from discovery-context not covered: ${coverageResult.missing_requirements.join(', ')}`, + severity: 'high', + source: 'coverage' + }) +} +``` + +### Risk Level Detection + +```javascript +const riskResult = perspectiveResults.find(p => p.perspective === 'risk') +if (riskResult?.risk_level === 'high' || riskResult?.risk_level === 'critical') { + synthesis.risk_flags.push({ + level: riskResult.risk_level, + description: riskResult.weaknesses.join('; ') + }) +} +``` + +## Fallback Strategy + +### CLI Failure Fallback + +```javascript +// If CLI analysis fails for a perspective, fallback to direct Claude analysis +try { + // CLI execution + Bash({ command: `ccw cli -p "..." --tool ${cliTool} --mode analysis`, run_in_background: true }) +} catch (error) { + // Fallback: Direct Claude analysis + const fallbackResult = { + perspective: perspective, + strengths: ["Direct analysis: ..."], + weaknesses: ["Direct analysis: ..."], + suggestions: ["Direct analysis: ..."], + rating: 3, + _fallback: true + } + perspectiveResults.push(fallbackResult) +} +``` + +### All CLI Failures + +```javascript +if (perspectiveResults.every(r => r._fallback)) { + // Generate basic discussion from direct reading + const basicDiscussion = { + convergent_themes: ["Basic analysis from direct reading"], + divergent_views: [], + action_items: ["Review artifact manually"], + open_questions: [], + decisions: [], + risk_flags: [], + overall_sentiment: 'neutral', + consensus_reached: true, + _basic_mode: true + } +} +``` + +## Output Format + +Each perspective produces: + +```json +{ + "perspective": "product|technical|quality|risk|coverage", + "strengths": ["string"], + "weaknesses": ["string"], + "suggestions": ["string"], + "rating": 1-5, + + // Risk perspective only + "risk_level": "low|medium|high|critical", + + // Coverage perspective only + "covered_requirements": ["REQ-ID"], + "partial_requirements": ["REQ-ID"], + "missing_requirements": ["REQ-ID"], + "scope_creep": ["description"] +} +``` + +## Integration with Phase 4 + +Phase 4 (Consensus Synthesis) consumes `perspectiveResults` array to: +1. Extract convergent themes (2+ perspectives agree) +2. Extract divergent views (perspectives conflict) +3. Detect coverage gaps (missing_requirements non-empty) +4. Assess risk flags (high/critical risk_level) +5. Determine consensus_reached (true if no critical divergences) diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/discussant/role.md b/.claude/skills_lib/team-lifecycle-v2/roles/discussant/role.md new file mode 100644 index 00000000..855a7ef3 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/discussant/role.md @@ -0,0 +1,265 @@ +# Role: discussant + +Multi-perspective critique, consensus building, and conflict escalation. The key differentiator of the spec team workflow โ€” ensuring quality feedback between each phase transition. + +## Role Identity + +- **Name**: `discussant` +- **Task Prefix**: `DISCUSS-*` +- **Output Tag**: `[discussant]` +- **Responsibility**: Load Artifact โ†’ Multi-Perspective Critique โ†’ Synthesize Consensus โ†’ Report +- **Communication**: SendMessage to coordinator only + +## Role Boundaries + +### MUST +- Only process DISCUSS-* tasks +- Communicate only with coordinator +- Write discussion records to `discussions/` folder +- Tag all SendMessage and team_msg calls with `[discussant]` +- Load roundConfig with all 6 rounds +- Execute multi-perspective critique via CLI tools +- Detect coverage gaps from coverage perspective +- Synthesize consensus with convergent/divergent analysis +- Report consensus_reached vs discussion_blocked paths + +### MUST NOT +- Create tasks +- Contact other workers directly +- Modify spec documents directly +- Skip perspectives defined in roundConfig +- Proceed without artifact loading +- Ignore critical divergences + +## Message Types + +| Type | Direction | Trigger | Description | +|------|-----------|---------|-------------| +| `discussion_ready` | discussant โ†’ coordinator | Discussion complete, consensus reached | With discussion record path and decision summary | +| `discussion_blocked` | discussant โ†’ coordinator | Cannot reach consensus | With divergence points and options, needs coordinator | +| `impl_progress` | discussant โ†’ coordinator | Long discussion progress | Multi-perspective analysis progress | +| `error` | discussant โ†’ coordinator | Discussion cannot proceed | Input artifact missing, etc. | + +## Message Bus + +Before every `SendMessage`, MUST call `mcp__ccw-tools__team_msg` to log: + +```javascript +// Discussion complete +mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "discussant", to: "coordinator", type: "discussion_ready", summary: "[discussant] Scope discussion consensus reached: 3 decisions", ref: `${sessionFolder}/discussions/discuss-001-scope.md` }) + +// Discussion blocked +mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "discussant", to: "coordinator", type: "discussion_blocked", summary: "[discussant] Cannot reach consensus on tech stack", data: { reason: "...", options: [...] } }) + +// Error report +mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "discussant", to: "coordinator", type: "error", summary: "[discussant] Input artifact missing" }) +``` + +### CLI Fallback + +When `mcp__ccw-tools__team_msg` MCP is unavailable: + +```javascript +Bash(`ccw team log --team "${teamName}" --from "discussant" --to "coordinator" --type "discussion_ready" --summary "[discussant] Discussion complete" --ref "${sessionFolder}/discussions/discuss-001-scope.md" --json`) +``` + +## Discussion Dimension Model + +Each discussion round analyzes from 5 perspectives: + +| Perspective | Focus | Representative | CLI Tool | +|-------------|-------|----------------|----------| +| **Product** | Market fit, user value, business viability, competitive differentiation | Product Manager | gemini | +| **Technical** | Feasibility, tech debt, performance, security, maintainability | Tech Lead | codex | +| **Quality** | Completeness, testability, consistency, standards compliance | QA Lead | claude | +| **Risk** | Risk identification, dependency analysis, assumption validation, failure modes | Risk Analyst | gemini | +| **Coverage** | Requirement completeness vs original intent, scope drift, gap detection | Requirements Analyst | gemini | + +## Discussion Round Configuration + +| Round | Artifact | Key Perspectives | Focus | +|-------|----------|-----------------|-------| +| DISCUSS-001 | discovery-context | product + risk + **coverage** | Scope confirmation, direction, initial coverage check | +| DISCUSS-002 | product-brief | product + technical + quality + **coverage** | Positioning, feasibility, requirement coverage | +| DISCUSS-003 | requirements | quality + product + **coverage** | Completeness, priority, gap detection | +| DISCUSS-004 | architecture | technical + risk | Tech choices, security | +| DISCUSS-005 | epics | product + technical + quality + **coverage** | MVP scope, estimation, requirement tracing | +| DISCUSS-006 | readiness-report | all 5 perspectives | Final sign-off | + +## Toolbox + +### Available Commands +- `commands/critique.md` - Multi-perspective CLI critique (Phase 3) + +### Subagent Capabilities +None (discussant uses CLI tools directly) + +### CLI Capabilities +- **gemini**: Product perspective, Risk perspective, Coverage perspective +- **codex**: Technical perspective +- **claude**: Quality perspective + +## Execution (5-Phase) + +### Phase 1: Task Discovery + +```javascript +const tasks = TaskList() +const myTasks = tasks.filter(t => + t.subject.startsWith('DISCUSS-') && + t.owner === 'discussant' && + t.status === 'pending' && + t.blockedBy.length === 0 +) + +if (myTasks.length === 0) return // idle + +const task = TaskGet({ taskId: myTasks[0].id }) +TaskUpdate({ taskId: task.id, status: 'in_progress' }) +``` + +### Phase 2: Artifact Loading + +```javascript +const sessionMatch = task.description.match(/Session:\s*(.+)/) +const sessionFolder = sessionMatch ? sessionMatch[1].trim() : '' +const roundMatch = task.subject.match(/DISCUSS-(\d+)/) +const roundNumber = roundMatch ? parseInt(roundMatch[1]) : 0 + +const roundConfig = { + 1: { artifact: 'spec/discovery-context.json', type: 'json', outputFile: 'discuss-001-scope.md', perspectives: ['product', 'risk', 'coverage'], label: '่Œƒๅ›ด่ฎจ่ฎบ' }, + 2: { artifact: 'spec/product-brief.md', type: 'md', outputFile: 'discuss-002-brief.md', perspectives: ['product', 'technical', 'quality', 'coverage'], label: 'Brief่ฏ„ๅฎก' }, + 3: { artifact: 'spec/requirements/_index.md', type: 'md', outputFile: 'discuss-003-requirements.md', perspectives: ['quality', 'product', 'coverage'], label: '้œ€ๆฑ‚่ฎจ่ฎบ' }, + 4: { artifact: 'spec/architecture/_index.md', type: 'md', outputFile: 'discuss-004-architecture.md', perspectives: ['technical', 'risk'], label: 'ๆžถๆž„่ฎจ่ฎบ' }, + 5: { artifact: 'spec/epics/_index.md', type: 'md', outputFile: 'discuss-005-epics.md', perspectives: ['product', 'technical', 'quality', 'coverage'], label: 'Epics่ฎจ่ฎบ' }, + 6: { artifact: 'spec/readiness-report.md', type: 'md', outputFile: 'discuss-006-final.md', perspectives: ['product', 'technical', 'quality', 'risk', 'coverage'], label: 'ๆœ€็ปˆ็ญพๆ”ถ' } +} + +const config = roundConfig[roundNumber] +// Load target artifact and prior discussion records for continuity +Bash(`mkdir -p ${sessionFolder}/discussions`) +``` + +### Phase 3: Multi-Perspective Critique + +**Delegate to**: `Read("commands/critique.md")` + +Launch parallel CLI analyses for each required perspective. See `commands/critique.md` for full implementation. + +### Phase 4: Consensus Synthesis + +```javascript +const synthesis = { + convergent_themes: [], + divergent_views: [], + action_items: [], + open_questions: [], + decisions: [], + risk_flags: [], + overall_sentiment: '', // positive/neutral/concerns/critical + consensus_reached: true // false if major unresolvable conflicts +} + +// Extract convergent themes (items mentioned positively by 2+ perspectives) +// Extract divergent views (items where perspectives conflict) +// Check coverage gaps from coverage perspective (if present) +const coverageResult = perspectiveResults.find(p => p.perspective === 'coverage') +if (coverageResult?.missing_requirements?.length > 0) { + synthesis.coverage_gaps = coverageResult.missing_requirements + synthesis.divergent_views.push({ + topic: 'requirement_coverage_gap', + description: `${coverageResult.missing_requirements.length} requirements from discovery-context not covered: ${coverageResult.missing_requirements.join(', ')}`, + severity: 'high', + source: 'coverage' + }) +} +// Check for unresolvable conflicts +const criticalDivergences = synthesis.divergent_views.filter(d => d.severity === 'high') +if (criticalDivergences.length > 0) synthesis.consensus_reached = false + +// Determine overall sentiment from average rating +// Generate discussion record markdown with all perspectives, convergence, divergence, action items + +Write(`${sessionFolder}/discussions/${config.outputFile}`, discussionRecord) +``` + +### Phase 5: Report to Coordinator + +```javascript +if (synthesis.consensus_reached) { + mcp__ccw-tools__team_msg({ + operation: "log", team: teamName, + from: "discussant", to: "coordinator", + type: "discussion_ready", + summary: `[discussant] ${config.label}่ฎจ่ฎบๅฎŒๆˆ: ${synthesis.action_items.length}ไธช่กŒๅŠจ้กน, ${synthesis.open_questions.length}ไธชๅผ€ๆ”พ้—ฎ้ข˜, ๆ€ปไฝ“${synthesis.overall_sentiment}`, + ref: `${sessionFolder}/discussions/${config.outputFile}` + }) + + SendMessage({ + type: "message", + recipient: "coordinator", + content: `[discussant] ## ่ฎจ่ฎบ็ป“ๆžœ: ${config.label} + +**Task**: ${task.subject} +**ๅ…ฑ่ฏ†**: ๅทฒ่พพๆˆ +**ๆ€ปไฝ“่ฏ„ไปท**: ${synthesis.overall_sentiment} + +### ่กŒๅŠจ้กน (${synthesis.action_items.length}) +${synthesis.action_items.map((item, i) => (i+1) + '. ' + item).join('\n') || 'ๆ— '} + +### ๅผ€ๆ”พ้—ฎ้ข˜ (${synthesis.open_questions.length}) +${synthesis.open_questions.map((q, i) => (i+1) + '. ' + q).join('\n') || 'ๆ— '} + +### ่ฎจ่ฎบ่ฎฐๅฝ• +${sessionFolder}/discussions/${config.outputFile} + +ๅ…ฑ่ฏ†ๅทฒ่พพๆˆ๏ผŒๅฏๆŽจ่ฟ›่‡ณไธ‹ไธ€้˜ถๆฎตใ€‚`, + summary: `[discussant] ${config.label}ๅ…ฑ่ฏ†่พพๆˆ: ${synthesis.action_items.length}่กŒๅŠจ้กน` + }) + + TaskUpdate({ taskId: task.id, status: 'completed' }) +} else { + // Consensus blocked - escalate to coordinator + mcp__ccw-tools__team_msg({ + operation: "log", team: teamName, + from: "discussant", to: "coordinator", + type: "discussion_blocked", + summary: `[discussant] ${config.label}่ฎจ่ฎบ้˜ปๅกž: ${criticalDivergences.length}ไธชๅ…ณ้”ฎๅˆ†ๆญง้œ€ๅ†ณ็ญ–`, + data: { + reason: criticalDivergences.map(d => d.description).join('; '), + options: criticalDivergences.map(d => ({ label: d.topic, description: d.options?.join(' vs ') || d.description })) + } + }) + + SendMessage({ + type: "message", + recipient: "coordinator", + content: `[discussant] ## ่ฎจ่ฎบ้˜ปๅกž: ${config.label} + +**Task**: ${task.subject} +**็Šถๆ€**: ๆ— ๆณ•่พพๆˆๅ…ฑ่ฏ†๏ผŒ้œ€่ฆ coordinator ไป‹ๅ…ฅ + +### ๅ…ณ้”ฎๅˆ†ๆญง +${criticalDivergences.map((d, i) => (i+1) + '. **' + d.topic + '**: ' + d.description).join('\n\n')} + +่ฏท้€š่ฟ‡ AskUserQuestion ๆ”ถ้›†็”จๆˆทๅฏนๅˆ†ๆญง็‚น็š„ๅ†ณ็ญ–ใ€‚`, + summary: `[discussant] ${config.label}้˜ปๅกž: ${criticalDivergences.length}ๅˆ†ๆญง` + }) + // Keep task in_progress, wait for coordinator resolution +} + +// Check for next DISCUSS task โ†’ back to Phase 1 +``` + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| No DISCUSS-* tasks available | Idle, wait for coordinator assignment | +| Target artifact not found | Notify coordinator with `[discussant]` tag, request prerequisite completion | +| CLI perspective analysis failure | Fallback to direct Claude analysis for that perspective | +| All CLI analyses fail | Generate basic discussion from direct reading | +| Consensus timeout (all perspectives diverge) | Escalate as discussion_blocked with `[discussant]` tag | +| Prior discussion records missing | Continue without continuity context | +| Session folder not found | Notify coordinator with `[discussant]` tag, request session path | +| Unexpected error | Log error via team_msg with `[discussant]` tag, report to coordinator | diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/executor/commands/implement.md b/.claude/skills_lib/team-lifecycle-v2/roles/executor/commands/implement.md new file mode 100644 index 00000000..8078f6d9 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/executor/commands/implement.md @@ -0,0 +1,356 @@ +# Implement Command + +## Purpose +Multi-backend code implementation with progress tracking and batch execution support. + +## Execution Paths + +### Path 1: Simple Task + Agent Backend (Direct Edit) + +**Criteria**: +```javascript +function isSimpleTask(task) { + return task.description.length < 200 && + !task.description.includes("refactor") && + !task.description.includes("architecture") && + !task.description.includes("multiple files") +} +``` + +**Execution**: +```javascript +if (isSimpleTask(task) && executor === "agent") { + // Direct file edit without subagent overhead + const targetFile = task.metadata?.target_file + if (targetFile) { + const content = Read(targetFile) + const prompt = buildExecutionPrompt(task, plan, [task]) + + // Apply edit directly + Edit(targetFile, oldContent, newContent) + + return { + success: true, + files_modified: [targetFile], + method: "direct_edit" + } + } +} +``` + +### Path 2: Agent Backend (code-developer subagent) + +**Execution**: +```javascript +if (executor === "agent") { + const prompt = buildExecutionPrompt(task, plan, [task]) + + const result = Subagent({ + type: "code-developer", + prompt: prompt, + run_in_background: false // Synchronous execution + }) + + return { + success: result.success, + files_modified: result.files_modified || [], + method: "subagent" + } +} +``` + +### Path 3: Codex Backend (CLI) + +**Execution**: +```javascript +if (executor === "codex") { + const prompt = buildExecutionPrompt(task, plan, [task]) + + team_msg({ + to: "coordinator", + type: "progress_update", + task_id: task.task_id, + status: "executing_codex", + message: "Starting Codex implementation..." + }, "[executor]") + + const result = Bash( + `ccw cli -p "${escapePrompt(prompt)}" --tool codex --mode write --cd ${task.metadata?.working_dir || "."}`, + { run_in_background: true, timeout: 300000 } + ) + + // Wait for CLI completion via hook callback + return { + success: true, + files_modified: [], // Will be detected by git diff + method: "codex_cli" + } +} +``` + +### Path 4: Gemini Backend (CLI) + +**Execution**: +```javascript +if (executor === "gemini") { + const prompt = buildExecutionPrompt(task, plan, [task]) + + team_msg({ + to: "coordinator", + type: "progress_update", + task_id: task.task_id, + status: "executing_gemini", + message: "Starting Gemini implementation..." + }, "[executor]") + + const result = Bash( + `ccw cli -p "${escapePrompt(prompt)}" --tool gemini --mode write --cd ${task.metadata?.working_dir || "."}`, + { run_in_background: true, timeout: 300000 } + ) + + // Wait for CLI completion via hook callback + return { + success: true, + files_modified: [], // Will be detected by git diff + method: "gemini_cli" + } +} +``` + +## Prompt Building + +### Single Task Prompt + +```javascript +function buildExecutionPrompt(task, plan, tasks) { + const context = extractContextFromPlan(plan, task) + + return ` +# Implementation Task: ${task.task_id} + +## Task Description +${task.description} + +## Acceptance Criteria +${task.acceptance_criteria?.map((c, i) => `${i + 1}. ${c}`).join("\n") || "None specified"} + +## Context from Plan +${context} + +## Files to Modify +${task.metadata?.target_files?.join("\n") || "Auto-detect based on task"} + +## Constraints +- Follow existing code style and patterns +- Preserve backward compatibility +- Add appropriate error handling +- Include inline comments for complex logic +- Update related tests if applicable + +## Expected Output +- Modified files with implementation +- Brief summary of changes made +- Any assumptions or decisions made during implementation +`.trim() +} +``` + +### Batch Task Prompt + +```javascript +function buildBatchPrompt(tasks, plan) { + const taskDescriptions = tasks.map((task, i) => ` +### Task ${i + 1}: ${task.task_id} +**Description**: ${task.description} +**Acceptance Criteria**: +${task.acceptance_criteria?.map((c, j) => ` ${j + 1}. ${c}`).join("\n") || " None specified"} +**Target Files**: ${task.metadata?.target_files?.join(", ") || "Auto-detect"} + `).join("\n") + + return ` +# Batch Implementation: ${tasks.length} Tasks + +## Tasks to Implement +${taskDescriptions} + +## Context from Plan +${extractContextFromPlan(plan, tasks[0])} + +## Batch Execution Guidelines +- Implement tasks in the order listed +- Ensure each task's acceptance criteria are met +- Maintain consistency across all implementations +- Report any conflicts or dependencies discovered +- Follow existing code patterns and style + +## Expected Output +- All tasks implemented successfully +- Summary of changes per task +- Any cross-task considerations or conflicts +`.trim() +} +``` + +### Context Extraction + +```javascript +function extractContextFromPlan(plan, task) { + // Extract relevant sections from plan + const sections = [] + + // Architecture context + const archMatch = plan.match(/## Architecture[\s\S]*?(?=##|$)/) + if (archMatch) { + sections.push("### Architecture\n" + archMatch[0]) + } + + // Technical stack + const techMatch = plan.match(/## Technical Stack[\s\S]*?(?=##|$)/) + if (techMatch) { + sections.push("### Technical Stack\n" + techMatch[0]) + } + + // Related tasks context + const taskSection = plan.match(new RegExp(`${task.task_id}[\\s\\S]*?(?=IMPL-\\d+|$)`)) + if (taskSection) { + sections.push("### Task Context\n" + taskSection[0]) + } + + return sections.join("\n\n") || "No additional context available" +} +``` + +## Progress Tracking + +### Batch Progress Updates + +```javascript +function reportBatchProgress(batchIndex, totalBatches, currentTask) { + if (totalBatches > 1) { + team_msg({ + to: "coordinator", + type: "progress_update", + batch_index: batchIndex + 1, + total_batches: totalBatches, + current_task: currentTask.task_id, + message: `Processing batch ${batchIndex + 1}/${totalBatches}: ${currentTask.task_id}` + }, "[executor]") + } +} +``` + +### Long-Running Task Updates + +```javascript +function reportLongRunningTask(task, elapsedSeconds) { + if (elapsedSeconds > 60 && elapsedSeconds % 30 === 0) { + team_msg({ + to: "coordinator", + type: "progress_update", + task_id: task.task_id, + elapsed_seconds: elapsedSeconds, + message: `Still processing ${task.task_id} (${elapsedSeconds}s elapsed)...` + }, "[executor]") + } +} +``` + +## Utility Functions + +### Prompt Escaping + +```javascript +function escapePrompt(prompt) { + return prompt + .replace(/\\/g, "\\\\") + .replace(/"/g, '\\"') + .replace(/\n/g, "\\n") + .replace(/\$/g, "\\$") +} +``` + +### File Change Detection + +```javascript +function detectModifiedFiles() { + const gitDiff = Bash("git diff --name-only HEAD") + return gitDiff.stdout.split("\n").filter(f => f.trim()) +} +``` + +### Simple Task Detection + +```javascript +function isSimpleTask(task) { + const simpleIndicators = [ + task.description.length < 200, + !task.description.toLowerCase().includes("refactor"), + !task.description.toLowerCase().includes("architecture"), + !task.description.toLowerCase().includes("multiple files"), + !task.description.toLowerCase().includes("complex"), + task.metadata?.target_files?.length === 1 + ] + + return simpleIndicators.filter(Boolean).length >= 4 +} +``` + +## Error Recovery + +### Retry Logic + +```javascript +function executeWithRetry(task, executor, maxRetries = 3) { + let attempt = 0 + let lastError = null + + while (attempt < maxRetries) { + try { + const result = executeTask(task, executor) + if (result.success) { + return result + } + lastError = result.error + } catch (error) { + lastError = error.message + } + + attempt++ + if (attempt < maxRetries) { + team_msg({ + to: "coordinator", + type: "progress_update", + task_id: task.task_id, + message: `Retry attempt ${attempt}/${maxRetries} after error: ${lastError}` + }, "[executor]") + } + } + + return { + success: false, + error: lastError, + retry_count: maxRetries + } +} +``` + +### Backend Fallback + +```javascript +function executeWithFallback(task, primaryExecutor) { + const result = executeTask(task, primaryExecutor) + + if (!result.success && primaryExecutor !== "agent") { + team_msg({ + to: "coordinator", + type: "progress_update", + task_id: task.task_id, + message: `${primaryExecutor} failed, falling back to agent backend...` + }, "[executor]") + + return executeTask(task, "agent") + } + + return result +} +``` diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/executor/role.md b/.claude/skills_lib/team-lifecycle-v2/roles/executor/role.md new file mode 100644 index 00000000..a6f16a9d --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/executor/role.md @@ -0,0 +1,324 @@ +# Executor Role + +## 1. Role Identity + +- **Name**: executor +- **Task Prefix**: IMPL-* +- **Output Tag**: `[executor]` +- **Responsibility**: Load plan โ†’ Route to backend โ†’ Implement code โ†’ Self-validate โ†’ Report + +## 2. Role Boundaries + +### MUST +- Only process IMPL-* tasks +- Follow approved plan exactly +- Use declared execution backends (agent/codex/gemini) +- Self-validate all implementations (syntax + acceptance criteria) +- Tag all outputs with `[executor]` + +### MUST NOT +- Create tasks +- Contact other workers directly +- Modify plan files +- Skip self-validation +- Proceed without plan approval + +## 3. Message Types + +| Type | Direction | Purpose | Format | +|------|-----------|---------|--------| +| `task_request` | FROM coordinator | Receive IMPL-* task assignment | `{ type: "task_request", task_id, description }` | +| `task_complete` | TO coordinator | Report implementation success | `{ type: "task_complete", task_id, status: "success", files_modified, validation_results }` | +| `task_failed` | TO coordinator | Report implementation failure | `{ type: "task_failed", task_id, error, retry_count }` | +| `progress_update` | TO coordinator | Report batch progress | `{ type: "progress_update", task_id, batch_index, total_batches }` | + +## 4. Message Bus + +**Primary**: Use `team_msg` for all coordinator communication with `[executor]` tag: +```javascript +team_msg({ + to: "coordinator", + type: "task_complete", + task_id: "IMPL-001", + status: "success", + files_modified: ["src/auth.ts"], + validation_results: { syntax: "pass", acceptance: "pass" } +}, "[executor]") +``` + +**CLI Fallback**: When message bus unavailable, write to `.workflow/.team/messages/executor-{timestamp}.json` + +## 5. Toolbox + +### Available Commands +- `commands/implement.md` - Multi-backend code implementation with progress tracking + +### Subagent Capabilities +- `code-developer` - Synchronous agent execution for simple tasks and agent backend + +### CLI Capabilities +- `ccw cli --tool codex --mode write` - Codex backend implementation +- `ccw cli --tool gemini --mode write` - Gemini backend implementation + +## 6. Execution (5-Phase) + +### Phase 1: Task & Plan Loading + +**Task Discovery**: +```javascript +const tasks = Glob(".workflow/.team/tasks/IMPL-*.json") + .filter(task => task.status === "pending" && task.assigned_to === "executor") +``` + +**Plan Path Extraction**: +```javascript +const planPath = task.metadata?.plan_path || ".workflow/plan.md" +const plan = Read(planPath) +``` + +**Execution Backend Resolution**: +```javascript +function resolveExecutor(task, plan) { + // Priority 1: Task-level override + if (task.metadata?.executor) { + return task.metadata.executor // "agent" | "codex" | "gemini" + } + + // Priority 2: Plan-level default + const planMatch = plan.match(/Execution Backend:\s*(agent|codex|gemini)/i) + if (planMatch) { + return planMatch[1].toLowerCase() + } + + // Priority 3: Auto-select based on task complexity + const isSimple = task.description.length < 200 && + !task.description.includes("refactor") && + !task.description.includes("architecture") + + return isSimple ? "agent" : "codex" // Default: codex for complex, agent for simple +} +``` + +**Code Review Resolution**: +```javascript +function resolveCodeReview(task, plan) { + // Priority 1: Task-level override + if (task.metadata?.code_review !== undefined) { + return task.metadata.code_review // boolean + } + + // Priority 2: Plan-level default + const reviewMatch = plan.match(/Code Review:\s*(enabled|disabled)/i) + if (reviewMatch) { + return reviewMatch[1].toLowerCase() === "enabled" + } + + // Priority 3: Default based on task type + const criticalKeywords = ["auth", "security", "payment", "api", "database"] + const isCritical = criticalKeywords.some(kw => + task.description.toLowerCase().includes(kw) + ) + + return isCritical // Enable review for critical paths +} +``` + +### Phase 2: Task Grouping + +**Dependency-Based Batching**: +```javascript +function createBatches(tasks, plan) { + // Extract dependencies from plan + const dependencies = new Map() + const depRegex = /IMPL-(\d+).*depends on.*IMPL-(\d+)/gi + let match + while ((match = depRegex.exec(plan)) !== null) { + const [_, taskId, depId] = match + if (!dependencies.has(`IMPL-${taskId}`)) { + dependencies.set(`IMPL-${taskId}`, []) + } + dependencies.get(`IMPL-${taskId}`).push(`IMPL-${depId}`) + } + + // Topological sort for execution order + const batches = [] + const completed = new Set() + const remaining = new Set(tasks.map(t => t.task_id)) + + while (remaining.size > 0) { + const batch = [] + + for (const taskId of remaining) { + const deps = dependencies.get(taskId) || [] + const depsCompleted = deps.every(dep => completed.has(dep)) + + if (depsCompleted) { + batch.push(tasks.find(t => t.task_id === taskId)) + } + } + + if (batch.length === 0) { + // Circular dependency detected + throw new Error(`Circular dependency detected in remaining tasks: ${[...remaining].join(", ")}`) + } + + batches.push(batch) + batch.forEach(task => { + completed.add(task.task_id) + remaining.delete(task.task_id) + }) + } + + return batches +} +``` + +### Phase 3: Code Implementation + +**Delegate to Command**: +```javascript +const implementCommand = Read("commands/implement.md") +// Command handles: +// - buildExecutionPrompt (context + acceptance criteria) +// - buildBatchPrompt (multi-task batching) +// - 4 execution paths: simple+agent, agent, codex, gemini +// - Progress updates via team_msg +``` + +### Phase 4: Self-Validation + +**Syntax Check**: +```javascript +const syntaxCheck = Bash("tsc --noEmit", { timeout: 30000 }) +const syntaxPass = syntaxCheck.exitCode === 0 +``` + +**Acceptance Criteria Verification**: +```javascript +function verifyAcceptance(task, implementation) { + const criteria = task.acceptance_criteria || [] + const results = criteria.map(criterion => { + // Simple keyword matching for automated verification + const keywords = criterion.toLowerCase().match(/\b\w+\b/g) || [] + const matched = keywords.some(kw => + implementation.toLowerCase().includes(kw) + ) + return { criterion, matched, status: matched ? "pass" : "manual_review" } + }) + + const allPassed = results.every(r => r.status === "pass") + return { allPassed, results } +} +``` + +**Test File Detection**: +```javascript +function findAffectedTests(modifiedFiles) { + const testFiles = [] + + for (const file of modifiedFiles) { + const baseName = file.replace(/\.(ts|js|tsx|jsx)$/, "") + const testVariants = [ + `${baseName}.test.ts`, + `${baseName}.test.js`, + `${baseName}.spec.ts`, + `${baseName}.spec.js`, + `${file.replace(/^src\//, "tests/")}.test.ts`, + `${file.replace(/^src\//, "__tests__/")}.test.ts` + ] + + for (const variant of testVariants) { + if (Bash(`test -f ${variant}`).exitCode === 0) { + testFiles.push(variant) + } + } + } + + return testFiles +} +``` + +**Optional Code Review**: +```javascript +const codeReviewEnabled = resolveCodeReview(task, plan) + +if (codeReviewEnabled) { + const executor = resolveExecutor(task, plan) + + if (executor === "gemini") { + // Gemini Review: Use Gemini CLI for review + const reviewResult = Bash( + `ccw cli -p "Review implementation for: ${task.description}. Check: code quality, security, architecture compliance." --tool gemini --mode analysis`, + { run_in_background: true } + ) + } else if (executor === "codex") { + // Codex Review: Use Codex CLI review mode + const reviewResult = Bash( + `ccw cli --tool codex --mode review --uncommitted`, + { run_in_background: true } + ) + } + + // Wait for review results and append to validation +} +``` + +### Phase 5: Report to Coordinator + +**Success Report**: +```javascript +team_msg({ + to: "coordinator", + type: "task_complete", + task_id: task.task_id, + status: "success", + files_modified: modifiedFiles, + validation_results: { + syntax: syntaxPass ? "pass" : "fail", + acceptance: acceptanceResults.allPassed ? "pass" : "manual_review", + tests_found: affectedTests.length, + code_review: codeReviewEnabled ? "completed" : "skipped" + }, + execution_backend: executor, + timestamp: new Date().toISOString() +}, "[executor]") +``` + +**Failure Report**: +```javascript +team_msg({ + to: "coordinator", + type: "task_failed", + task_id: task.task_id, + error: errorMessage, + retry_count: task.retry_count || 0, + validation_results: { + syntax: syntaxPass ? "pass" : "fail", + acceptance: "not_verified" + }, + timestamp: new Date().toISOString() +}, "[executor]") +``` + +## 7. Error Handling + +| Error Type | Recovery Strategy | Escalation | +|------------|-------------------|------------| +| Syntax errors | Retry with error context (max 3 attempts) | Report to coordinator after 3 failures | +| Missing dependencies | Request dependency resolution from coordinator | Immediate escalation | +| Backend unavailable | Fallback to agent backend | Report backend switch | +| Validation failure | Include validation details in report | Manual review required | +| Circular dependencies | Abort batch, report dependency graph | Immediate escalation | + +## 8. Execution Backends + +| Backend | Tool | Invocation | Mode | Use Case | +|---------|------|------------|------|----------| +| **agent** | code-developer | Subagent call (synchronous) | N/A | Simple tasks, direct edits | +| **codex** | ccw cli | `ccw cli --tool codex --mode write` | write | Complex tasks, architecture changes | +| **gemini** | ccw cli | `ccw cli --tool gemini --mode write` | write | Alternative backend, analysis-heavy tasks | + +**Backend Selection Logic**: +1. Task metadata override โ†’ Use specified backend +2. Plan default โ†’ Use plan-level backend +3. Auto-select โ†’ Simple tasks use agent, complex use codex diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/explorer/role.md b/.claude/skills_lib/team-lifecycle-v2/roles/explorer/role.md new file mode 100644 index 00000000..90e369f8 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/explorer/role.md @@ -0,0 +1,301 @@ +# Explorer Role + +ไธ“่Œไปฃ็ ๆœ็ดขไธŽๆจกๅผๅ‘็Žฐใ€‚ๆœๅŠก่ง’่‰ฒ๏ผŒ่ขซ analyst/planner/executor/discussant ๆŒ‰้œ€่ฐƒ็”จใ€‚ + +## 1. Role Identity + +- **Name**: explorer +- **Task Prefix**: EXPLORE-* +- **Output Tag**: `[explorer]` +- **Role Type**: Service๏ผˆๆŒ‰้œ€่ฐƒ็”จ๏ผŒไธๅ ไธป้“พ่ทฏไฝ็ฝฎ๏ผ‰ +- **Responsibility**: Parse request โ†’ Multi-strategy search โ†’ Dependency trace โ†’ Package results โ†’ Report + +## 2. Role Boundaries + +### MUST +- Only process EXPLORE-* tasks +- Output structured JSON for downstream consumption +- Use priority-ordered search strategies (ACE โ†’ Grep โ†’ cli-explore-agent) +- Tag all outputs with `[explorer]` +- Cache results in `{session}/explorations/` for cross-role reuse + +### MUST NOT +- Create tasks +- Contact other workers directly +- Modify any source code files +- Execute analysis, planning, or implementation +- Make architectural decisions (only discover patterns) + +## 3. Message Types + +| Type | Direction | Purpose | Format | +|------|-----------|---------|--------| +| `explore_ready` | TO coordinator | Search complete | `{ type: "explore_ready", task_id, file_count, pattern_count, output_path }` | +| `explore_progress` | TO coordinator | Multi-angle progress | `{ type: "explore_progress", task_id, angle, status }` | +| `task_failed` | TO coordinator | Search failure | `{ type: "task_failed", task_id, error, fallback_used }` | + +## 4. Message Bus + +**Primary**: Use `team_msg` for all coordinator communication with `[explorer]` tag: +```javascript +team_msg({ + to: "coordinator", + type: "explore_ready", + task_id: "EXPLORE-001", + file_count: 15, + pattern_count: 3, + output_path: `${sessionFolder}/explorations/explore-001.json` +}, "[explorer]") +``` + +**CLI Fallback**: When message bus unavailable: +```bash +ccw team log --team "${teamName}" --from "explorer" --to "coordinator" --type "explore_ready" --summary "[explorer] 15 files, 3 patterns" --json +``` + +## 5. Toolbox + +### Available Commands +- None (inline execution, search logic is straightforward) + +### Search Tools (priority order) + +| Tool | Priority | Use Case | +|------|----------|----------| +| `mcp__ace-tool__search_context` | P0 | Semantic code search | +| `Grep` / `Glob` | P1 | Pattern matching, file discovery | +| `Read` | P1 | File content reading | +| `Bash` (rg, find) | P2 | Structured search fallback | +| `WebSearch` | P3 | External docs/best practices | + +### Subagent Capabilities +- `cli-explore-agent` โ€” Deep multi-angle codebase exploration + +## 6. Execution (5-Phase) + +### Phase 1: Task Discovery & Request Parsing + +```javascript +const tasks = TaskList() +const myTasks = tasks.filter(t => + t.subject.startsWith('EXPLORE-') && + t.owner === 'explorer' && + t.status === 'pending' && + t.blockedBy.length === 0 +) +if (myTasks.length === 0) return +const task = TaskGet({ taskId: myTasks[0].id }) +TaskUpdate({ taskId: task.id, status: 'in_progress' }) + +// Parse structured request from task description +const sessionFolder = task.description.match(/Session:\s*([^\n]+)/)?.[1]?.trim() +const exploreMode = task.description.match(/Mode:\s*([^\n]+)/)?.[1]?.trim() || 'codebase' +const angles = (task.description.match(/Angles:\s*([^\n]+)/)?.[1] || 'general').split(',').map(a => a.trim()) +const keywords = (task.description.match(/Keywords:\s*([^\n]+)/)?.[1] || '').split(',').map(k => k.trim()).filter(Boolean) +const requester = task.description.match(/Requester:\s*([^\n]+)/)?.[1]?.trim() || 'coordinator' + +const outputDir = sessionFolder ? `${sessionFolder}/explorations` : '.workflow/.tmp' +Bash(`mkdir -p "${outputDir}"`) +``` + +### Phase 2: Multi-Strategy Search + +```javascript +const findings = { + relevant_files: [], // { path, rationale, role, discovery_source, key_symbols } + patterns: [], // { name, description, files } + dependencies: [], // { file, imports[] } + external_refs: [], // { keyword, results[] } + _metadata: { angles, mode: exploreMode, requester, timestamp: new Date().toISOString() } +} + +// === Strategy 1: ACE Semantic Search (P0) === +if (exploreMode !== 'external') { + for (const kw of keywords) { + try { + const results = mcp__ace-tool__search_context({ project_root_path: '.', query: kw }) + // Deduplicate and add to findings.relevant_files with discovery_source: 'ace-search' + } catch { /* ACE unavailable, fall through */ } + } +} + +// === Strategy 2: Grep Pattern Scan (P1) === +if (exploreMode !== 'external') { + for (const kw of keywords) { + // Find imports/exports/definitions + const defResults = Grep({ + pattern: `(class|function|const|export|interface|type)\\s+.*${kw}`, + glob: '*.{ts,tsx,js,jsx,py,go,rs}', + '-n': true, output_mode: 'content' + }) + // Add to findings with discovery_source: 'grep-scan' + } +} + +// === Strategy 3: Dependency Tracing === +if (exploreMode !== 'external') { + for (const file of findings.relevant_files.slice(0, 10)) { + try { + const content = Read(file.path) + const imports = (content.match(/from\s+['"]([^'"]+)['"]/g) || []) + .map(i => i.match(/['"]([^'"]+)['"]/)?.[1]).filter(Boolean) + if (imports.length > 0) { + findings.dependencies.push({ file: file.path, imports }) + } + } catch {} + } +} + +// === Strategy 4: Deep Exploration (multi-angle, via cli-explore-agent) === +if (angles.length > 1 && exploreMode !== 'external') { + for (const angle of angles) { + Task({ + subagent_type: "cli-explore-agent", + run_in_background: false, + description: `Explore: ${angle}`, + prompt: `## Exploration: ${angle} angle +Keywords: ${keywords.join(', ')} + +## Steps +1. rg -l "${keywords[0]}" --type-add 'code:*.{ts,tsx,js,py,go,rs}' --type code +2. Read .workflow/project-tech.json (if exists) +3. Focus on ${angle} perspective + +## Output +Write to: ${outputDir}/exploration-${angle}.json +Schema: { relevant_files[], patterns[], dependencies[] }` + }) + // Merge angle results into main findings + try { + const angleData = JSON.parse(Read(`${outputDir}/exploration-${angle}.json`)) + findings.relevant_files.push(...(angleData.relevant_files || [])) + findings.patterns.push(...(angleData.patterns || [])) + } catch {} + } +} + +// === Strategy 5: External Search (P3) === +if (exploreMode === 'external' || exploreMode === 'hybrid') { + for (const kw of keywords.slice(0, 3)) { + try { + const results = WebSearch({ query: `${kw} best practices documentation` }) + findings.external_refs.push({ keyword: kw, results }) + } catch {} + } +} + +// Deduplicate relevant_files by path +const seen = new Set() +findings.relevant_files = findings.relevant_files.filter(f => { + if (seen.has(f.path)) return false + seen.add(f.path) + return true +}) +``` + +### Phase 3: Wisdom Contribution + +```javascript +// If wisdom directory exists, contribute discovered patterns +if (sessionFolder) { + try { + const conventionsPath = `${sessionFolder}/wisdom/conventions.md` + const existing = Read(conventionsPath) + if (findings.patterns.length > 0) { + const newPatterns = findings.patterns + .map(p => `- ${p.name}: ${p.description || ''}`) + .join('\n') + Edit({ + file_path: conventionsPath, + old_string: '', + new_string: `\n${newPatterns}` + }) + } + } catch {} // wisdom not initialized +} +``` + +### Phase 4: Package Results + +```javascript +const outputPath = `${outputDir}/explore-${task.subject.replace(/[^a-zA-Z0-9-]/g, '-').toLowerCase()}.json` +Write(outputPath, JSON.stringify(findings, null, 2)) +``` + +### Phase 5: Report to Coordinator + +```javascript +const summary = `${findings.relevant_files.length} files, ${findings.patterns.length} patterns, ${findings.dependencies.length} deps` + +mcp__ccw-tools__team_msg({ + operation: "log", team: teamName, + from: "explorer", to: "coordinator", + type: "explore_ready", + summary: `[explorer] EXPLORE complete: ${summary}`, + ref: outputPath +}) + +SendMessage({ + type: "message", + recipient: "coordinator", + content: `[explorer] ## Exploration Results + +**Task**: ${task.subject} +**Mode**: ${exploreMode} | **Angles**: ${angles.join(', ')} | **Requester**: ${requester} + +### Files: ${findings.relevant_files.length} +${findings.relevant_files.slice(0, 8).map(f => `- \`${f.path}\` (${f.role}) โ€” ${f.rationale}`).join('\n')} + +### Patterns: ${findings.patterns.length} +${findings.patterns.slice(0, 5).map(p => `- ${p.name}: ${p.description || ''}`).join('\n') || 'None'} + +### Output: ${outputPath}`, + summary: `[explorer] ${summary}` +}) + +TaskUpdate({ taskId: task.id, status: 'completed' }) +// Check for next EXPLORE task โ†’ back to Phase 1 +``` + +## 7. Coordinator Integration + +Explorer ๆ˜ฏๆœๅŠก่ง’่‰ฒ๏ผŒcoordinator ๅœจไปฅไธ‹ๅœบๆ™ฏๆŒ‰้œ€ๅˆ›ๅปบ EXPLORE-* ไปปๅŠก๏ผš + +| Trigger | Task Example | Requester | +|---------|-------------|-----------| +| RESEARCH-001 ้œ€่ฆไปฃ็ ๅบ“ไธŠไธ‹ๆ–‡ | `EXPLORE-001: ไปฃ็ ๅบ“ไธŠไธ‹ๆ–‡ๆœ็ดข` | analyst | +| PLAN-001 ้œ€่ฆๅคš่ง’ๅบฆๆŽข็ดข | `EXPLORE-002: ๅฎž็Žฐ็›ธๅ…ณไปฃ็ ๆŽข็ดข` | planner | +| DISCUSS-004 ้œ€่ฆๅค–้ƒจๆœ€ไฝณๅฎž่ทต | `EXPLORE-003: ๅค–้ƒจๆ–‡ๆกฃๆœ็ดข` | discussant | +| IMPL-001 ้‡ๅˆฐๆœช็Ÿฅไปฃ็  | `EXPLORE-004: ไพ่ต–่ฟฝ่ธช` | executor | + +**Task Description Template**: +``` +ๆœ็ดขๆ่ฟฐ + +Session: {sessionFolder} +Mode: codebase|external|hybrid +Angles: architecture,patterns,dependencies +Keywords: auth,middleware,session +Requester: analyst +``` + +## 8. Result Caching + +``` +{sessionFolder}/explorations/ +โ”œโ”€โ”€ explore-explore-001-*.json # Consolidated results +โ”œโ”€โ”€ exploration-architecture.json # Angle-specific (from cli-explore-agent) +โ””โ”€โ”€ exploration-patterns.json +``` + +ๅŽ็ปญ่ง’่‰ฒ Phase 2 ๅฏ็›ดๆŽฅ่ฏปๅ–ๅทฒๆœ‰ๆŽข็ดข็ป“ๆžœ๏ผŒ้ฟๅ…้‡ๅคๆœ็ดขใ€‚ + +## 9. Error Handling + +| Error Type | Recovery Strategy | Escalation | +|------------|-------------------|------------| +| ACE unavailable | Fallback to Grep + rg | Continue with degraded results | +| cli-explore-agent failure | Fallback to direct search | Report partial results | +| No results found | Report empty, suggest broader keywords | Coordinator decides | +| Web search fails | Skip external refs | Continue with codebase results | +| Session folder missing | Use .workflow/.tmp | Notify coordinator | diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/fe-developer/role.md b/.claude/skills_lib/team-lifecycle-v2/roles/fe-developer/role.md new file mode 100644 index 00000000..540ef26b --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/fe-developer/role.md @@ -0,0 +1,410 @@ +# Role: fe-developer + +ๅ‰็ซฏๅผ€ๅ‘ใ€‚ๆถˆ่ดน่ฎกๅˆ’/ๆžถๆž„ไบงๅ‡บ๏ผŒๅฎž็Žฐๅ‰็ซฏ็ป„ไปถใ€้กต้ขใ€ๆ ทๅผไปฃ็ ใ€‚ + +## Role Identity + +- **Name**: `fe-developer` +- **Task Prefix**: `DEV-FE-*` +- **Output Tag**: `[fe-developer]` +- **Role Type**: Pipeline๏ผˆๅ‰็ซฏๅญๆตๆฐด็บฟ worker๏ผ‰ +- **Responsibility**: Context loading โ†’ Design token consumption โ†’ Component implementation โ†’ Report + +## Role Boundaries + +### MUST +- ไป…ๅค„็† `DEV-FE-*` ๅ‰็ผ€็š„ไปปๅŠก +- ๆ‰€ๆœ‰่พ“ๅ‡บๅธฆ `[fe-developer]` ๆ ‡่ฏ† +- ไป…้€š่ฟ‡ SendMessage ไธŽ coordinator ้€šไฟก +- ้ตๅพชๅทฒๆœ‰่ฎพ่ฎกไปค็‰Œๅ’Œ็ป„ไปถ่ง„่Œƒ๏ผˆๅฆ‚ๅญ˜ๅœจ๏ผ‰ +- ็”Ÿๆˆๅฏ่ฎฟ้—ฎๆ€งๅˆ่ง„็š„ๅ‰็ซฏไปฃ็ ๏ผˆ่ฏญไน‰ HTMLใ€ARIA ๅฑžๆ€งใ€้”ฎ็›˜ๅฏผ่ˆช๏ผ‰ +- ้ตๅพช้กน็›ฎๅทฒๆœ‰็š„ๅ‰็ซฏๆŠ€ๆœฏๆ ˆๅ’Œ็บฆๅฎš + +### MUST NOT +- โŒ ไฟฎๆ”นๅŽ็ซฏไปฃ็ ๆˆ– API ๆŽฅๅฃ +- โŒ ็›ดๆŽฅไธŽๅ…ถไป– worker ้€šไฟก +- โŒ ไธบๅ…ถไป–่ง’่‰ฒๅˆ›ๅปบไปปๅŠก +- โŒ ่ทณ่ฟ‡่ฎพ่ฎกไปค็‰Œ/่ง„่Œƒๆฃ€ๆŸฅ๏ผˆๅฆ‚ๅญ˜ๅœจ๏ผ‰ +- โŒ ๅผ•ๅ…ฅๆœช็ปๆžถๆž„ๅฎกๆŸฅ็š„ๆ–ฐๅ‰็ซฏไพ่ต– + +## Message Types + +| Type | Direction | Trigger | Description | +|------|-----------|---------|-------------| +| `dev_fe_complete` | fe-developer โ†’ coordinator | Implementation done | ๅ‰็ซฏๅฎž็ŽฐๅฎŒๆˆ | +| `dev_fe_progress` | fe-developer โ†’ coordinator | Long task progress | ่ฟ›ๅบฆๆ›ดๆ–ฐ | +| `error` | fe-developer โ†’ coordinator | Implementation failure | ๅฎž็Žฐๅคฑ่ดฅ | + +## Message Bus + +ๆฏๆฌก SendMessage **ๅ‰**๏ผŒๅฟ…้กป่ฐƒ็”จ `mcp__ccw-tools__team_msg` ่ฎฐๅฝ•๏ผš + +```javascript +mcp__ccw-tools__team_msg({ + operation: "log", team: teamName, + from: "fe-developer", to: "coordinator", + type: "dev_fe_complete", + summary: "[fe-developer] DEV-FE complete: 3 components, 1 page", + ref: outputPath +}) +``` + +### CLI ๅ›ž้€€ + +```javascript +Bash(`ccw team log --team "${teamName}" --from "fe-developer" --to "coordinator" --type "dev_fe_complete" --summary "[fe-developer] DEV-FE complete" --ref "${outputPath}" --json`) +``` + +## Toolbox + +### Available Commands +- None (inline execution โ€” implementation delegated to subagent) + +### Subagent Capabilities + +| Agent Type | Purpose | +|------------|---------| +| `code-developer` | ็ป„ไปถ/้กต้ขไปฃ็ ๅฎž็Žฐ | + +### CLI Capabilities + +| CLI Tool | Mode | Purpose | +|----------|------|---------| +| `ccw cli --tool gemini --mode write` | write | ๅ‰็ซฏไปฃ็ ็”Ÿๆˆ | + +## Execution (5-Phase) + +### Phase 1: Task Discovery + +```javascript +const tasks = TaskList() +const myTasks = tasks.filter(t => + t.subject.startsWith('DEV-FE-') && + t.owner === 'fe-developer' && + t.status === 'pending' && + t.blockedBy.length === 0 +) +if (myTasks.length === 0) return +const task = TaskGet({ taskId: myTasks[0].id }) +TaskUpdate({ taskId: task.id, status: 'in_progress' }) +``` + +### Phase 2: Context Loading + +```javascript +const sessionFolder = task.description.match(/Session:\s*([^\n]+)/)?.[1]?.trim() + +// Load plan context +let plan = null +try { plan = JSON.parse(Read(`${sessionFolder}/plan/plan.json`)) } catch {} + +// Load design tokens (if architect produced them) +let designTokens = null +try { designTokens = JSON.parse(Read(`${sessionFolder}/architecture/design-tokens.json`)) } catch {} + +// Load design intelligence (from analyst via ui-ux-pro-max) +let designIntel = {} +try { designIntel = JSON.parse(Read(`${sessionFolder}/analysis/design-intelligence.json`)) } catch {} + +// Load component specs (if available) +let componentSpecs = [] +try { + const specFiles = Glob({ pattern: `${sessionFolder}/architecture/component-specs/*.md` }) + componentSpecs = specFiles.map(f => ({ path: f, content: Read(f) })) +} catch {} + +// Load shared memory (cross-role state) +let sharedMemory = {} +try { sharedMemory = JSON.parse(Read(`${sessionFolder}/shared-memory.json`)) } catch {} + +// Load wisdom +let wisdom = {} +if (sessionFolder) { + try { wisdom.conventions = Read(`${sessionFolder}/wisdom/conventions.md`) } catch {} + try { wisdom.decisions = Read(`${sessionFolder}/wisdom/decisions.md`) } catch {} +} + +// Extract design constraints from design intelligence +const antiPatterns = designIntel.recommendations?.anti_patterns || [] +const implementationChecklist = designIntel.design_system?.implementation_checklist || [] +const stackGuidelines = designIntel.stack_guidelines || {} + +// Detect frontend tech stack +let techStack = {} +try { techStack = JSON.parse(Read('.workflow/project-tech.json')) } catch {} +const feTech = detectFrontendStack(techStack) +// Override with design intelligence detection if available +if (designIntel.detected_stack) { + const diStack = designIntel.detected_stack + if (['react', 'nextjs', 'vue', 'svelte', 'nuxt'].includes(diStack)) feTech.framework = diStack +} + +function detectFrontendStack(tech) { + const deps = tech?.dependencies || {} + const stack = { framework: 'html', styling: 'css', ui_lib: null } + if (deps.react || deps['react-dom']) stack.framework = 'react' + if (deps.vue) stack.framework = 'vue' + if (deps.svelte) stack.framework = 'svelte' + if (deps.next) stack.framework = 'nextjs' + if (deps.nuxt) stack.framework = 'nuxt' + if (deps.tailwindcss) stack.styling = 'tailwind' + if (deps['@shadcn/ui'] || deps['shadcn-ui']) stack.ui_lib = 'shadcn' + if (deps['@mui/material']) stack.ui_lib = 'mui' + if (deps['antd']) stack.ui_lib = 'antd' + return stack +} +``` + +### Phase 3: Frontend Implementation + +#### Step 1: Generate Design Token CSS (if tokens available) + +```javascript +if (designTokens && task.description.includes('Scope: tokens') || task.description.includes('Scope: full')) { + // Convert design-tokens.json to CSS custom properties + let cssVars = ':root {\n' + + // Colors + if (designTokens.color) { + for (const [name, token] of Object.entries(designTokens.color)) { + const value = typeof token.$value === 'object' ? token.$value.light : token.$value + cssVars += ` --color-${name}: ${value};\n` + } + } + + // Typography + if (designTokens.typography?.['font-family']) { + for (const [name, token] of Object.entries(designTokens.typography['font-family'])) { + const value = Array.isArray(token.$value) ? token.$value.join(', ') : token.$value + cssVars += ` --font-${name}: ${value};\n` + } + } + if (designTokens.typography?.['font-size']) { + for (const [name, token] of Object.entries(designTokens.typography['font-size'])) { + cssVars += ` --text-${name}: ${token.$value};\n` + } + } + + // Spacing, border-radius, shadow, transition + for (const category of ['spacing', 'border-radius', 'shadow', 'transition']) { + const prefix = { spacing: 'space', 'border-radius': 'radius', shadow: 'shadow', transition: 'duration' }[category] + if (designTokens[category]) { + for (const [name, token] of Object.entries(designTokens[category])) { + cssVars += ` --${prefix}-${name}: ${token.$value};\n` + } + } + } + + cssVars += '}\n' + + // Dark mode overrides + if (designTokens.color) { + const darkOverrides = Object.entries(designTokens.color) + .filter(([, token]) => typeof token.$value === 'object' && token.$value.dark) + if (darkOverrides.length > 0) { + cssVars += '\n@media (prefers-color-scheme: dark) {\n :root {\n' + for (const [name, token] of darkOverrides) { + cssVars += ` --color-${name}: ${token.$value.dark};\n` + } + cssVars += ' }\n}\n' + } + } + + Bash(`mkdir -p src/styles`) + Write('src/styles/tokens.css', cssVars) +} +``` + +#### Step 2: Implement Components + +```javascript +const taskId = task.subject.match(/DEV-FE-(\d+)/)?.[0] +const taskDetail = plan?.task_ids?.includes(taskId) + ? JSON.parse(Read(`${sessionFolder}/plan/.task/${taskId}.json`)) + : { title: task.subject, description: task.description, files: [] } + +const isSimple = (taskDetail.files || []).length <= 3 && + !task.description.includes('system') && + !task.description.includes('ๅคš็ป„ไปถ') + +if (isSimple) { + Task({ + subagent_type: "code-developer", + run_in_background: false, + description: `Frontend implementation: ${taskDetail.title}`, + prompt: `## Frontend Implementation + +Task: ${taskDetail.title} +Description: ${taskDetail.description} + +${designTokens ? `## Design Tokens\nImport from: src/styles/tokens.css\nUse CSS custom properties (var(--color-primary), var(--space-md), etc.)\n${JSON.stringify(designTokens, null, 2).substring(0, 1000)}` : ''} +${componentSpecs.length > 0 ? `## Component Specs\n${componentSpecs.map(s => s.content.substring(0, 500)).join('\n---\n')}` : ''} + +## Tech Stack +- Framework: ${feTech.framework} +- Styling: ${feTech.styling} +${feTech.ui_lib ? `- UI Library: ${feTech.ui_lib}` : ''} + +## Stack-Specific Guidelines +${JSON.stringify(stackGuidelines, null, 2).substring(0, 500)} + +## Implementation Checklist (MUST verify each item) +${implementationChecklist.map(item => `- [ ] ${item}`).join('\n') || '- [ ] Semantic HTML\n- [ ] Keyboard accessible\n- [ ] Responsive layout\n- [ ] Dark mode support'} + +## Anti-Patterns to AVOID +${antiPatterns.map(p => `- โŒ ${p}`).join('\n') || 'None specified'} + +## Coding Standards +- Use design token CSS variables, never hardcode colors/spacing +- All interactive elements must have cursor: pointer +- Transitions: 150-300ms (use var(--duration-normal)) +- Text contrast: minimum 4.5:1 ratio +- Include focus-visible styles for keyboard navigation +- Support prefers-reduced-motion +- Responsive: mobile-first with md/lg breakpoints +- No emoji as functional icons + +## Files to modify/create +${(taskDetail.files || []).map(f => `- ${f.path}: ${f.change}`).join('\n') || 'Determine from task description'} + +## Conventions +${wisdom.conventions || 'Follow project existing patterns'}` + }) +} else { + Bash({ + command: `ccw cli -p "PURPOSE: Implement frontend components for '${taskDetail.title}' +TASK: ${taskDetail.description} +MODE: write +CONTEXT: @src/**/*.{tsx,jsx,vue,svelte,css,scss,html} @public/**/* +EXPECTED: Production-ready frontend code with accessibility, responsive design, design token usage +CONSTRAINTS: Framework=${feTech.framework}, Styling=${feTech.styling}${feTech.ui_lib ? ', UI=' + feTech.ui_lib : ''} +ANTI-PATTERNS: ${antiPatterns.join(', ') || 'None'} +CHECKLIST: ${implementationChecklist.join(', ') || 'Semantic HTML, keyboard accessible, responsive, dark mode'}" --tool gemini --mode write --rule development-implement-component-ui`, + run_in_background: true + }) +} +``` + +### Phase 4: Self-Validation + Wisdom + Shared Memory + +```javascript +// === Self-Validation (pre-QA check) === +const implementedFiles = Glob({ pattern: 'src/**/*.{tsx,jsx,vue,svelte,html,css}' }) +const selfCheck = { passed: [], failed: [] } + +for (const file of implementedFiles.slice(0, 20)) { + try { + const content = Read(file) + + // Check: no hardcoded colors (hex outside tokens.css) + if (file !== 'src/styles/tokens.css' && /#[0-9a-fA-F]{3,8}/.test(content)) { + selfCheck.failed.push({ file, check: 'hardcoded-color', message: 'Hardcoded color โ€” use var(--color-*)' }) + } + + // Check: cursor-pointer on interactive elements + if (/button| ({ path: f, status: 'implemented' })) + Write(`${sessionFolder}/shared-memory.json`, JSON.stringify(sharedMemory, null, 2)) + } catch {} +} +``` + +### Phase 5: Report to Coordinator + +```javascript +const changedFiles = Bash(`git diff --name-only HEAD 2>/dev/null || echo "unknown"`) + .split('\n').filter(Boolean) +const feFiles = changedFiles.filter(f => + /\.(tsx|jsx|vue|svelte|css|scss|html)$/.test(f) +) + +const resultStatus = selfCheck.failed.length === 0 ? 'complete' : 'complete_with_warnings' + +mcp__ccw-tools__team_msg({ + operation: "log", team: teamName, + from: "fe-developer", to: "coordinator", + type: "dev_fe_complete", + summary: `[fe-developer] DEV-FE complete: ${feFiles.length} files, self-check: ${selfCheck.failed.length} issues`, + ref: sessionFolder +}) + +SendMessage({ + type: "message", + recipient: "coordinator", + content: `[fe-developer] ## Frontend Implementation Complete + +**Task**: ${task.subject} +**Status**: ${resultStatus} +**Framework**: ${feTech.framework} | **Styling**: ${feTech.styling} +**Design Intelligence**: ${designIntel._source || 'not available'} + +### Files Modified +${feFiles.slice(0, 10).map(f => `- \`${f}\``).join('\n') || 'See git diff'} + +### Design Token Usage +${designTokens ? 'Applied design tokens from architecture โ†’ src/styles/tokens.css' : 'No design tokens available โ€” used project defaults'} + +### Self-Validation +${selfCheck.failed.length === 0 ? 'โœ… All checks passed' : `โš ๏ธ ${selfCheck.failed.length} issues:\n${selfCheck.failed.slice(0, 5).map(f => `- [${f.check}] ${f.file}: ${f.message}`).join('\n')}`} + +### Accessibility +- Semantic HTML structure +- ARIA attributes applied +- Keyboard navigation supported +- Focus-visible styles included`, + summary: `[fe-developer] DEV-FE complete: ${feFiles.length} files` +}) + +TaskUpdate({ taskId: task.id, status: 'completed' }) +// Check for next DEV-FE task โ†’ back to Phase 1 +``` + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| No DEV-FE-* tasks | Idle, wait for coordinator | +| Design tokens not found | Use project defaults, note in report | +| Component spec missing | Implement from task description only | +| Tech stack undetected | Default to HTML + CSS, ask coordinator | +| Subagent failure | Fallback to CLI write mode | +| Build/lint errors | Report to coordinator for QA-FE review | diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/fe-qa/commands/pre-delivery-checklist.md b/.claude/skills_lib/team-lifecycle-v2/roles/fe-qa/commands/pre-delivery-checklist.md new file mode 100644 index 00000000..b8c9e4b0 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/fe-qa/commands/pre-delivery-checklist.md @@ -0,0 +1,116 @@ +# Command: pre-delivery-checklist + +> ๆœ€็ปˆไบคไป˜ๅ‰็š„ CSS ็บงๅˆซ็ฒพๅ‡†ๆฃ€ๆŸฅๆธ…ๅ•๏ผŒ่žๅˆ ui-ux-pro-max Pre-Delivery Checklist ๅ’Œ ux-guidelines Do/Don't ่ง„ๅˆ™ใ€‚ + +## When to Use + +- Phase 3 of fe-qa role, Dimension 5: Pre-Delivery +- Final review or code-review type tasks + +## Strategy + +### Delegation Mode + +**Mode**: Direct (inline pattern matching in fe-qa Phase 3) + +## Checklist Items + +### Accessibility + +| # | Check | Pattern | Severity | Do | Don't | +|---|-------|---------|----------|-----|-------| +| 1 | Images have alt text | `500ms or <100ms transitions | +| 9 | Loading states | Async ops without loading indicator | MEDIUM | Show skeleton/spinner during fetch | Leave blank screen while loading | +| 10 | Error states | Async ops without error handling | HIGH | Show user-friendly error message | Silently fail or show raw error | + +### Design Compliance + +| # | Check | Pattern | Severity | Do | Don't | +|---|-------|---------|----------|-----|-------| +| 11 | No hardcoded colors | Hex values outside tokens.css | HIGH | Use var(--color-*) tokens | Hardcode #hex values | +| 12 | No hardcoded spacing | px values for margin/padding | MEDIUM | Use var(--space-*) tokens | Hardcode pixel values | +| 13 | No emoji as icons | Unicode emoji in UI | HIGH | Use proper SVG/icon library | Use emoji for functional icons | +| 14 | Dark mode support | No prefers-color-scheme | MEDIUM | Support light/dark themes | Design for light mode only | + +### Layout + +| # | Check | Pattern | Severity | Do | Don't | +|---|-------|---------|----------|-----|-------| +| 15 | Responsive breakpoints | No md:/lg:/@media | MEDIUM | Mobile-first responsive design | Desktop-only layout | +| 16 | No horizontal scroll | Fixed widths > viewport | HIGH | Use relative/fluid widths | Set fixed pixel widths on containers | + +## Execution + +```javascript +function runPreDeliveryChecklist(fileContents) { + const results = { passed: 0, failed: 0, items: [] } + + const checks = [ + { id: 1, check: "Images have alt text", test: (c) => /]*alt=/.test(c), severity: 'CRITICAL' }, + { id: 7, check: "cursor-pointer on clickable", test: (c) => /button|onClick/.test(c) && !/cursor-pointer/.test(c), severity: 'MEDIUM' }, + { id: 11, check: "No hardcoded colors", test: (c, f) => f !== 'src/styles/tokens.css' && /#[0-9a-fA-F]{6}/.test(c), severity: 'HIGH' }, + { id: 13, check: "No emoji as icons", test: (c) => /[\u{1F300}-\u{1F9FF}]/u.test(c), severity: 'HIGH' }, + { id: 14, check: "Dark mode support", test: (c) => !/prefers-color-scheme|dark:|\.dark/.test(c), severity: 'MEDIUM', global: true }, + { id: 15, check: "Responsive breakpoints", test: (c) => !/md:|lg:|@media.*min-width/.test(c), severity: 'MEDIUM', global: true } + ] + + // Per-file checks + for (const [file, content] of Object.entries(fileContents)) { + for (const check of checks.filter(c => !c.global)) { + if (check.test(content, file)) { + results.failed++ + results.items.push({ ...check, file, status: 'FAIL' }) + } else { + results.passed++ + results.items.push({ ...check, file, status: 'PASS' }) + } + } + } + + // Global checks (across all content) + const allContent = Object.values(fileContents).join('\n') + for (const check of checks.filter(c => c.global)) { + if (check.test(allContent)) { + results.failed++ + results.items.push({ ...check, file: 'global', status: 'FAIL' }) + } else { + results.passed++ + results.items.push({ ...check, file: 'global', status: 'PASS' }) + } + } + + return results +} +``` + +## Output Format + +``` +## Pre-Delivery Checklist Results +- Passed: X / Y +- Failed: Z + +### Failed Items +- [CRITICAL] #1 Images have alt text โ€” src/components/Hero.tsx +- [HIGH] #11 No hardcoded colors โ€” src/styles/custom.css +``` + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| No files to check | Report empty checklist, score 10/10 | +| File read error | Skip file, note in report | +| Regex error | Skip check, note in report | diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/fe-qa/role.md b/.claude/skills_lib/team-lifecycle-v2/roles/fe-qa/role.md new file mode 100644 index 00000000..6e57627d --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/fe-qa/role.md @@ -0,0 +1,510 @@ +# Role: fe-qa + +ๅ‰็ซฏ่ดจ้‡ไฟ่ฏใ€‚5 ็ปดๅบฆไปฃ็ ๅฎกๆŸฅ + Generator-Critic ๅพช็Žฏ็กฎไฟๅ‰็ซฏไปฃ็ ่ดจ้‡ใ€‚่žๅˆ ui-ux-pro-max ็š„ Pre-Delivery Checklistใ€ux-guidelines Do/Don't ่ง„ๅˆ™ใ€่กŒไธšๅๆจกๅผๅบ“ใ€‚ + +## Role Identity + +- **Name**: `fe-qa` +- **Task Prefix**: `QA-FE-*` +- **Output Tag**: `[fe-qa]` +- **Role Type**: Pipeline๏ผˆๅ‰็ซฏๅญๆตๆฐด็บฟ worker๏ผ‰ +- **Responsibility**: Context loading โ†’ Multi-dimension review โ†’ GC feedback โ†’ Report + +## Role Boundaries + +### MUST +- ไป…ๅค„็† `QA-FE-*` ๅ‰็ผ€็š„ไปปๅŠก +- ๆ‰€ๆœ‰่พ“ๅ‡บๅธฆ `[fe-qa]` ๆ ‡่ฏ† +- ไป…้€š่ฟ‡ SendMessage ไธŽ coordinator ้€šไฟก +- ๆ‰ง่กŒ 5 ็ปดๅบฆๅฎกๆŸฅ๏ผˆไปฃ็ ่ดจ้‡ใ€ๅฏ่ฎฟ้—ฎๆ€งใ€่ฎพ่ฎกๅˆ่ง„ใ€UX ๆœ€ไฝณๅฎž่ทตใ€Pre-Delivery๏ผ‰ +- ๆไพ›ๅฏๆ“ไฝœ็š„ไฟฎๅคๅปบ่ฎฎ๏ผˆDo/Don't ๆ ผๅผ๏ผ‰ +- ๆ”ฏๆŒ Generator-Critic ๅพช็Žฏ๏ผˆๆœ€ๅคš 2 ่ฝฎ๏ผ‰ +- ๅŠ ่ฝฝ design-intelligence.json ็”จไบŽ่กŒไธšๅๆจกๅผๆฃ€ๆŸฅ + +### MUST NOT +- โŒ ็›ดๆŽฅไฟฎๆ”นๆบไปฃ็ ๏ผˆไป…ๆไพ›ๅฎกๆŸฅๆ„่ง๏ผ‰ +- โŒ ็›ดๆŽฅไธŽๅ…ถไป– worker ้€šไฟก +- โŒ ไธบๅ…ถไป–่ง’่‰ฒๅˆ›ๅปบไปปๅŠก +- โŒ ่ทณ่ฟ‡ๅฏ่ฎฟ้—ฎๆ€งๆฃ€ๆŸฅ +- โŒ ๅœจ่ฏ„ๅˆ†ๆœช่พพๆ ‡ๆ—ถๆ ‡่ฎฐ้€š่ฟ‡ + +## Message Types + +| Type | Direction | Trigger | Description | +|------|-----------|---------|-------------| +| `qa_fe_passed` | fe-qa โ†’ coordinator | All dimensions pass | ๅ‰็ซฏ่ดจๆฃ€้€š่ฟ‡ | +| `qa_fe_result` | fe-qa โ†’ coordinator | Review complete (may have issues) | ๅฎกๆŸฅ็ป“ๆžœ๏ผˆๅซ้—ฎ้ข˜๏ผ‰ | +| `fix_required` | fe-qa โ†’ coordinator | Critical issues found | ้œ€่ฆ fe-developer ไฟฎๅค | +| `error` | fe-qa โ†’ coordinator | Review failure | ๅฎกๆŸฅๅคฑ่ดฅ | + +## Message Bus + +```javascript +mcp__ccw-tools__team_msg({ + operation: "log", team: teamName, + from: "fe-qa", to: "coordinator", + type: "qa_fe_result", + summary: "[fe-qa] QA-FE: score=8.5, 0 critical, 2 medium", + ref: outputPath +}) +``` + +### CLI ๅ›ž้€€ + +```javascript +Bash(`ccw team log --team "${teamName}" --from "fe-qa" --to "coordinator" --type "qa_fe_result" --summary "[fe-qa] QA-FE complete" --json`) +``` + +## Toolbox + +### Available Commands +- [commands/pre-delivery-checklist.md](commands/pre-delivery-checklist.md) โ€” CSS ็บงๅˆซ็ฒพๅ‡†ไบคไป˜ๆฃ€ๆŸฅ + +### CLI Capabilities + +| CLI Tool | Mode | Purpose | +|----------|------|---------| +| `ccw cli --tool gemini --mode analysis` | analysis | ๅ‰็ซฏไปฃ็ ๅฎกๆŸฅ | +| `ccw cli --tool codex --mode review` | review | Git-aware ไปฃ็ ๅฎกๆŸฅ | + +## Review Dimensions + +| Dimension | Weight | Source | Focus | +|-----------|--------|--------|-------| +| Code Quality | 25% | Standard code review | TypeScript ็ฑปๅž‹ๅฎ‰ๅ…จใ€็ป„ไปถ็ป“ๆž„ใ€็Šถๆ€็ฎก็†ใ€้”™่ฏฏๅค„็† | +| Accessibility | 25% | ux-guidelines rules | ่ฏญไน‰ HTMLใ€ARIAใ€้”ฎ็›˜ๅฏผ่ˆชใ€่‰ฒๅฝฉๅฏนๆฏ”ใ€focus-visibleใ€prefers-reduced-motion | +| Design Compliance | 20% | design-intelligence.json | ่ฎพ่ฎกไปค็‰Œไฝฟ็”จใ€่กŒไธšๅๆจกๅผใ€emoji ๆฃ€ๆŸฅใ€้—ด่ท/ๆŽ’็‰ˆไธ€่‡ดๆ€ง | +| UX Best Practices | 15% | ux-guidelines Do/Don't | ๅŠ ่ฝฝ็Šถๆ€ใ€้”™่ฏฏ็Šถๆ€ใ€็ฉบ็Šถๆ€ใ€cursor-pointerใ€ๅ“ๅบ”ๅผใ€ๅŠจ็”ปๆ—ถ้•ฟ | +| Pre-Delivery | 15% | Pre-Delivery Checklist | ๆš—่‰ฒๆจกๅผใ€ๆ—  console.logใ€ๆ— ็กฌ็ผ–็ ใ€ๅ›ฝ้™…ๅŒ–ๅฐฑ็ปชใ€must-have ๆฃ€ๆŸฅ | + +## Execution (5-Phase) + +### Phase 1: Task Discovery + +```javascript +const tasks = TaskList() +const myTasks = tasks.filter(t => + t.subject.startsWith('QA-FE-') && + t.owner === 'fe-qa' && + t.status === 'pending' && + t.blockedBy.length === 0 +) +if (myTasks.length === 0) return +const task = TaskGet({ taskId: myTasks[0].id }) +TaskUpdate({ taskId: task.id, status: 'in_progress' }) +``` + +### Phase 2: Context Loading + +```javascript +const sessionFolder = task.description.match(/Session:\s*([^\n]+)/)?.[1]?.trim() + +// Load design tokens for compliance check +let designTokens = null +try { designTokens = JSON.parse(Read(`${sessionFolder}/architecture/design-tokens.json`)) } catch {} + +// Load design intelligence (from analyst via ui-ux-pro-max) +let designIntel = {} +try { designIntel = JSON.parse(Read(`${sessionFolder}/analysis/design-intelligence.json`)) } catch {} + +// Load shared memory for industry context + QA history +let sharedMemory = {} +try { sharedMemory = JSON.parse(Read(`${sessionFolder}/shared-memory.json`)) } catch {} + +const industryContext = sharedMemory.industry_context || {} +const antiPatterns = designIntel.recommendations?.anti_patterns || [] +const mustHave = designIntel.recommendations?.must_have || [] + +// Determine audit strictness from industry (standard / strict for medical/financial) +const strictness = industryContext.config?.strictness || 'standard' + +// Load component specs +let componentSpecs = [] +try { + const specFiles = Glob({ pattern: `${sessionFolder}/architecture/component-specs/*.md` }) + componentSpecs = specFiles.map(f => ({ path: f, content: Read(f) })) +} catch {} + +// Load previous QA results (for GC loop tracking) +let previousQA = [] +try { + const qaFiles = Glob({ pattern: `${sessionFolder}/qa/audit-fe-*.json` }) + previousQA = qaFiles.map(f => JSON.parse(Read(f))) +} catch {} + +// Determine GC round +const gcRound = previousQA.filter(q => q.task_subject === task.subject).length + 1 +const maxGCRounds = 2 + +// Get changed frontend files +const changedFiles = Bash(`git diff --name-only HEAD~1 2>/dev/null || git diff --name-only --cached 2>/dev/null || echo ""`) + .split('\n').filter(f => /\.(tsx|jsx|vue|svelte|css|scss|html|ts|js)$/.test(f)) + +// Read file contents for review +const fileContents = {} +for (const file of changedFiles.slice(0, 30)) { + try { fileContents[file] = Read(file) } catch {} +} +``` + +### Phase 3: 5-Dimension Review + +```javascript +const review = { + task_subject: task.subject, + gc_round: gcRound, + timestamp: new Date().toISOString(), + dimensions: [], + issues: [], + overall_score: 0, + verdict: 'PENDING' +} + +// === Dimension 1: Code Quality (25%) === +const codeQuality = { name: 'code-quality', weight: 0.25, score: 10, issues: [] } +for (const [file, content] of Object.entries(fileContents)) { + if (/:\s*any\b/.test(content)) { + codeQuality.issues.push({ file, severity: 'medium', issue: 'Using `any` type', fix: 'Replace with specific type', do: 'Define proper TypeScript types', dont: 'Use `any` to bypass type checking' }) + codeQuality.score -= 1.5 + } + if (/\.tsx$/.test(file) && /export/.test(content) && !/ErrorBoundary/.test(content) && /throw/.test(content)) { + codeQuality.issues.push({ file, severity: 'low', issue: 'No error boundary for component with throw', fix: 'Wrap with ErrorBoundary' }) + codeQuality.score -= 0.5 + } + if (/style=\{?\{/.test(content) && designTokens) { + codeQuality.issues.push({ file, severity: 'medium', issue: 'Inline styles detected', fix: 'Use design tokens or CSS classes', do: 'Use var(--color-*) tokens', dont: 'Hardcode style values inline' }) + codeQuality.score -= 1.5 + } + if (/catch\s*\(\s*\)\s*\{[\s]*\}/.test(content)) { + codeQuality.issues.push({ file, severity: 'high', issue: 'Empty catch block', fix: 'Add error handling logic', do: 'Log or handle the error', dont: 'Silently swallow exceptions' }) + codeQuality.score -= 2 + } + if (content.split('\n').length > 300) { + codeQuality.issues.push({ file, severity: 'medium', issue: 'File exceeds 300 lines', fix: 'Split into smaller modules' }) + codeQuality.score -= 1 + } +} +codeQuality.score = Math.max(0, codeQuality.score) +review.dimensions.push(codeQuality) + +// === Dimension 2: Accessibility (25%) === +const a11y = { name: 'accessibility', weight: 0.25, score: 10, issues: [] } +for (const [file, content] of Object.entries(fileContents)) { + if (!/\.(tsx|jsx|vue|svelte|html)$/.test(file)) continue + + if (/]*alt=/.test(content)) { + a11y.issues.push({ file, severity: 'high', issue: 'Image missing alt attribute', fix: 'Add descriptive alt text', do: 'Always provide alt text', dont: 'Leave alt empty without role="presentation"' }) + a11y.score -= 3 + } + if (/onClick/.test(content) && !/onKeyDown|onKeyPress|onKeyUp|role=.button/.test(content)) { + a11y.issues.push({ file, severity: 'medium', issue: 'Click handler without keyboard equivalent', fix: 'Add onKeyDown or role="button" tabIndex={0}' }) + a11y.score -= 1.5 + } + if (/ or aria-label', do: 'Associate every input with a label', dont: 'Use placeholder as sole label' }) + a11y.score -= 2 + } + if (/]*>\s* parseInt(h[2])) || [] + for (let i = 1; i < headings.length; i++) { + if (headings[i] - headings[i-1] > 1) { + a11y.issues.push({ file, severity: 'medium', issue: `Heading level skipped: h${headings[i-1]} โ†’ h${headings[i]}`, fix: 'Use sequential heading levels' }) + a11y.score -= 1 + } + } + // Focus-visible styles + if (/button| 0 && (ms < 100 || ms > 500)) { + uxPractices.issues.push({ file, severity: 'low', issue: `Transition ${ms}ms outside 150-300ms range`, fix: 'Use 150-300ms for micro-interactions' }) + uxPractices.score -= 0.5 + } + } + if (!/\.(tsx|jsx|vue|svelte)$/.test(file)) continue + // Loading states + if (/fetch|useQuery|useSWR|axios/.test(content) && !/loading|isLoading|skeleton|spinner/i.test(content)) { + uxPractices.issues.push({ file, severity: 'medium', issue: 'Data fetching without loading state', fix: 'Add loading indicator', do: 'Show skeleton/spinner during fetch', dont: 'Leave blank screen while loading' }) + uxPractices.score -= 1 + } + // Error states + if (/fetch|useQuery|useSWR|axios/.test(content) && !/error|isError|catch/i.test(content)) { + uxPractices.issues.push({ file, severity: 'high', issue: 'Data fetching without error handling', fix: 'Add error state UI', do: 'Show user-friendly error message', dont: 'Silently fail or show raw error' }) + uxPractices.score -= 2 + } + // Empty states + if (/\.map\(/.test(content) && !/empty|no.*data|no.*result|length\s*===?\s*0/i.test(content)) { + uxPractices.issues.push({ file, severity: 'low', issue: 'List rendering without empty state', fix: 'Add empty state message' }) + uxPractices.score -= 0.5 + } + // Responsive breakpoints + if (/className|class=/.test(content) && !/md:|lg:|@media/.test(content)) { + uxPractices.issues.push({ file, severity: 'medium', issue: 'No responsive breakpoints', fix: 'Mobile-first responsive design', do: 'Mobile-first responsive design', dont: 'Design for desktop only' }) + uxPractices.score -= 1 + } +} +uxPractices.score = Math.max(0, uxPractices.score) +review.dimensions.push(uxPractices) + +// === Dimension 5: Pre-Delivery (15%) === +// Detailed checklist: commands/pre-delivery-checklist.md +const preDelivery = { name: 'pre-delivery', weight: 0.15, score: 10, issues: [] } +const allContent = Object.values(fileContents).join('\n') + +// Per-file checks +for (const [file, content] of Object.entries(fileContents)) { + if (/console\.(log|debug|info)\(/.test(content) && !/test|spec|\.test\./.test(file)) { + preDelivery.issues.push({ file, severity: 'medium', issue: 'console.log in production code', fix: 'Remove or use proper logger' }) + preDelivery.score -= 1 + } + if (/\.(tsx|jsx)$/.test(file) && />\s*[A-Z][a-z]+\s+[a-z]+/.test(content) && !/t\(|intl|i18n|formatMessage/.test(content)) { + preDelivery.issues.push({ file, severity: 'low', issue: 'Hardcoded text โ€” consider i18n', fix: 'Extract to translation keys' }) + preDelivery.score -= 0.5 + } + if (/TODO|FIXME|HACK|XXX/.test(content)) { + preDelivery.issues.push({ file, severity: 'low', issue: 'TODO/FIXME comment found', fix: 'Resolve or create issue' }) + preDelivery.score -= 0.5 + } +} + +// Global checklist items (from pre-delivery-checklist.md) +const checklist = [ + { check: "No emoji as functional icons", test: () => /[\u{1F300}-\u{1F9FF}]/u.test(allContent), severity: 'high' }, + { check: "cursor-pointer on clickable", test: () => /button|onClick/.test(allContent) && !/cursor-pointer/.test(allContent), severity: 'medium' }, + { check: "Focus states visible", test: () => /button|input| /animation|@keyframes/.test(allContent) && !/prefers-reduced-motion/.test(allContent), severity: 'medium' }, + { check: "Responsive breakpoints", test: () => !/md:|lg:|@media.*min-width/.test(allContent), severity: 'medium' }, + { check: "No hardcoded colors", test: () => { const nt = Object.entries(fileContents).filter(([f]) => f !== 'src/styles/tokens.css'); return nt.some(([,c]) => /#[0-9a-fA-F]{6}/.test(c)) }, severity: 'high' }, + { check: "Dark mode support", test: () => !/prefers-color-scheme|dark:|\.dark/.test(allContent), severity: 'medium' } +] +for (const item of checklist) { + try { + if (item.test()) { + preDelivery.issues.push({ check: item.check, severity: item.severity, issue: `Pre-delivery: ${item.check}` }) + preDelivery.score -= (item.severity === 'high' ? 2 : item.severity === 'medium' ? 1 : 0.5) + } + } catch {} +} + +// Must-have checks from industry config +for (const req of mustHave) { + if (req === 'wcag-aaa' && !/aria-/.test(allContent)) { + preDelivery.issues.push({ severity: 'high', issue: 'WCAG AAA required but no ARIA attributes found' }) + preDelivery.score -= 3 + } + if (req === 'high-contrast' && !/high-contrast|forced-colors/.test(allContent)) { + preDelivery.issues.push({ severity: 'medium', issue: 'High contrast mode not supported' }) + preDelivery.score -= 1 + } +} +preDelivery.score = Math.max(0, preDelivery.score) +review.dimensions.push(preDelivery) + +// === Calculate Overall Score === +review.overall_score = review.dimensions.reduce((sum, d) => sum + d.score * d.weight, 0) +review.issues = review.dimensions.flatMap(d => d.issues) +const criticalCount = review.issues.filter(i => i.severity === 'high').length + +if (review.overall_score >= 8 && criticalCount === 0) { + review.verdict = 'PASS' +} else if (gcRound >= maxGCRounds) { + review.verdict = review.overall_score >= 6 ? 'PASS_WITH_WARNINGS' : 'FAIL' +} else { + review.verdict = 'NEEDS_FIX' +} +``` + +### Phase 4: Package Results + Shared Memory + +```javascript +const outputPath = sessionFolder + ? `${sessionFolder}/qa/audit-fe-${task.subject.replace(/[^a-zA-Z0-9-]/g, '-').toLowerCase()}-r${gcRound}.json` + : '.workflow/.tmp/qa-fe-audit.json' + +Bash(`mkdir -p "$(dirname '${outputPath}')"`) +Write(outputPath, JSON.stringify(review, null, 2)) + +// Wisdom contribution +if (sessionFolder && review.issues.length > 0) { + try { + const issuesPath = `${sessionFolder}/wisdom/issues.md` + const existing = Read(issuesPath) + const timestamp = new Date().toISOString().substring(0, 10) + const highIssues = review.issues.filter(i => i.severity === 'high') + if (highIssues.length > 0) { + const entries = highIssues.map(i => `- [${timestamp}] [fe-qa] ${i.issue} in ${i.file || 'global'}`).join('\n') + Write(issuesPath, existing + '\n' + entries) + } + } catch {} +} + +// Update shared memory with QA history +if (sessionFolder) { + try { + sharedMemory.qa_history = sharedMemory.qa_history || [] + sharedMemory.qa_history.push({ + task_subject: task.subject, + gc_round: gcRound, + verdict: review.verdict, + score: review.overall_score, + critical_count: criticalCount, + total_issues: review.issues.length, + timestamp: new Date().toISOString() + }) + Write(`${sessionFolder}/shared-memory.json`, JSON.stringify(sharedMemory, null, 2)) + } catch {} +} +``` + +### Phase 5: Report to Coordinator + +```javascript +const msgType = review.verdict === 'PASS' || review.verdict === 'PASS_WITH_WARNINGS' + ? 'qa_fe_passed' + : criticalCount > 0 ? 'fix_required' : 'qa_fe_result' + +mcp__ccw-tools__team_msg({ + operation: "log", team: teamName, + from: "fe-qa", to: "coordinator", + type: msgType, + summary: `[fe-qa] QA-FE R${gcRound}: ${review.verdict}, score=${review.overall_score.toFixed(1)}, ${criticalCount} critical`, + ref: outputPath +}) + +SendMessage({ + type: "message", + recipient: "coordinator", + content: `[fe-qa] ## Frontend QA Review + +**Task**: ${task.subject} +**Round**: ${gcRound}/${maxGCRounds} +**Verdict**: ${review.verdict} +**Score**: ${review.overall_score.toFixed(1)}/10 +**Strictness**: ${strictness} +**Design Intelligence**: ${designIntel._source || 'not available'} + +### Dimension Scores +${review.dimensions.map(d => `- **${d.name}**: ${d.score.toFixed(1)}/10 (${d.issues.length} issues)`).join('\n')} + +### Critical Issues (${criticalCount}) +${review.issues.filter(i => i.severity === 'high').map(i => `- \`${i.file || i.check}\`: ${i.issue} โ†’ ${i.fix || ''}${i.do ? `\n โœ… Do: ${i.do}` : ''}${i.dont ? `\n โŒ Don't: ${i.dont}` : ''}`).join('\n') || 'None'} + +### Medium Issues +${review.issues.filter(i => i.severity === 'medium').slice(0, 5).map(i => `- \`${i.file || i.check}\`: ${i.issue} โ†’ ${i.fix || ''}`).join('\n') || 'None'} + +${review.verdict === 'NEEDS_FIX' ? `\n### Action Required\nfe-developer ้œ€ไฟฎๅค ${criticalCount} ไธช critical ้—ฎ้ข˜ๅŽ้‡ๆ–ฐๆไบคใ€‚` : ''} + +### Output: ${outputPath}`, + summary: `[fe-qa] QA-FE R${gcRound}: ${review.verdict}, ${review.overall_score.toFixed(1)}/10` +}) + +TaskUpdate({ taskId: task.id, status: 'completed' }) +// Check for next QA-FE task โ†’ back to Phase 1 +``` + +## Generator-Critic Loop + +fe-developer โ†” fe-qa ๅพช็Žฏ็”ฑ coordinator ็ผ–ๆŽ’๏ผš + +``` +Round 1: DEV-FE-001 โ†’ QA-FE-001 + if QA verdict = NEEDS_FIX: + coordinator creates DEV-FE-002 (fix task, blockedBy QA-FE-001) + coordinator creates QA-FE-002 (re-review, blockedBy DEV-FE-002) +Round 2: DEV-FE-002 โ†’ QA-FE-002 + if still NEEDS_FIX: verdict = PASS_WITH_WARNINGS or FAIL (max 2 rounds) +``` + +**ๆ”ถๆ•›ๆกไปถ**: `overall_score >= 8 && critical_count === 0` + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| No QA-FE-* tasks | Idle, wait for coordinator | +| No changed frontend files | Report empty review, score = N/A | +| Design tokens not found | Skip design compliance dimension, adjust weights | +| design-intelligence.json not found | Skip industry anti-patterns, use standard strictness | +| Git diff fails | Use Glob to find recent frontend files | +| Max GC rounds exceeded | Force verdict (PASS_WITH_WARNINGS or FAIL) | +| ui-ux-pro-max not installed | Continue without design intelligence, note in report | diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/planner/commands/explore.md b/.claude/skills_lib/team-lifecycle-v2/roles/planner/commands/explore.md new file mode 100644 index 00000000..4746d851 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/planner/commands/explore.md @@ -0,0 +1,466 @@ +# Command: Multi-Angle Exploration + +Phase 2 of planner execution - assess complexity, select exploration angles, and execute parallel exploration. + +## Overview + +This command performs multi-angle codebase exploration based on task complexity. Low complexity uses direct semantic search, while Medium/High complexity launches parallel cli-explore-agent subagents for comprehensive analysis. + +## Complexity Assessment + +### assessComplexity Function + +```javascript +function assessComplexity(desc) { + let score = 0 + if (/refactor|architect|restructure|ๆจกๅ—|็ณป็ปŸ/.test(desc)) score += 2 + if (/multiple|ๅคšไธช|across|่ทจ/.test(desc)) score += 2 + if (/integrate|้›†ๆˆ|api|database/.test(desc)) score += 1 + if (/security|ๅฎ‰ๅ…จ|performance|ๆ€ง่ƒฝ/.test(desc)) score += 1 + return score >= 4 ? 'High' : score >= 2 ? 'Medium' : 'Low' +} + +const complexity = assessComplexity(task.description) +``` + +### Complexity Levels + +| Level | Score | Characteristics | Angle Count | +|-------|-------|----------------|-------------| +| **Low** | 0-1 | Simple feature, single module, clear scope | 1 | +| **Medium** | 2-3 | Multiple modules, integration points, moderate scope | 3 | +| **High** | 4+ | Architecture changes, cross-cutting concerns, complex scope | 4 | + +## Angle Selection + +### ANGLE_PRESETS + +```javascript +const ANGLE_PRESETS = { + architecture: ['architecture', 'dependencies', 'modularity', 'integration-points'], + security: ['security', 'auth-patterns', 'dataflow', 'validation'], + performance: ['performance', 'bottlenecks', 'caching', 'data-access'], + bugfix: ['error-handling', 'dataflow', 'state-management', 'edge-cases'], + feature: ['patterns', 'integration-points', 'testing', 'dependencies'] +} +``` + +### selectAngles Function + +```javascript +function selectAngles(desc, count) { + const text = desc.toLowerCase() + let preset = 'feature' + if (/refactor|architect|restructure|modular/.test(text)) preset = 'architecture' + else if (/security|auth|permission|access/.test(text)) preset = 'security' + else if (/performance|slow|optimi|cache/.test(text)) preset = 'performance' + else if (/fix|bug|error|issue|broken/.test(text)) preset = 'bugfix' + return ANGLE_PRESETS[preset].slice(0, count) +} + +const angleCount = complexity === 'High' ? 4 : (complexity === 'Medium' ? 3 : 1) +const selectedAngles = selectAngles(task.description, angleCount) +``` + +### Angle Definitions + +| Angle | Focus | Use Case | +|-------|-------|----------| +| **architecture** | System structure, layer boundaries, design patterns | Refactoring, restructuring | +| **dependencies** | Module dependencies, coupling, external libraries | Integration, modularity | +| **modularity** | Component boundaries, separation of concerns | Architecture changes | +| **integration-points** | API boundaries, data flow between modules | Feature development | +| **security** | Auth/authz, input validation, data protection | Security features | +| **auth-patterns** | Authentication flows, session management | Auth implementation | +| **dataflow** | Data transformation, state propagation | Bug fixes, features | +| **validation** | Input validation, error handling | Security, quality | +| **performance** | Bottlenecks, optimization opportunities | Performance tuning | +| **bottlenecks** | Slow operations, resource contention | Performance issues | +| **caching** | Cache strategies, invalidation patterns | Performance optimization | +| **data-access** | Database queries, data fetching patterns | Performance, features | +| **error-handling** | Error propagation, recovery strategies | Bug fixes | +| **state-management** | State updates, consistency | Bug fixes, features | +| **edge-cases** | Boundary conditions, error scenarios | Bug fixes, testing | +| **patterns** | Code patterns, conventions, best practices | Feature development | +| **testing** | Test coverage, test strategies | Feature development | + +## Exploration Execution + +### Low Complexity: Direct Semantic Search + +```javascript +if (complexity === 'Low') { + // Direct exploration via semantic search + const results = mcp__ace-tool__search_context({ + project_root_path: projectRoot, + query: task.description + }) + + // Transform ACE results to exploration JSON + const exploration = { + project_structure: "Analyzed via ACE semantic search", + relevant_files: results.files.map(f => ({ + path: f.path, + rationale: f.relevance_reason || "Semantic match to task description", + role: "modify_target", + discovery_source: "ace-search", + key_symbols: f.symbols || [] + })), + patterns: results.patterns || [], + dependencies: results.dependencies || [], + integration_points: results.integration_points || [], + constraints: [], + clarification_needs: [], + _metadata: { + exploration_angle: selectedAngles[0], + complexity: 'Low', + discovery_method: 'ace-semantic-search' + } + } + + Write(`${planDir}/exploration-${selectedAngles[0]}.json`, JSON.stringify(exploration, null, 2)) +} +``` + +### Medium/High Complexity: Parallel cli-explore-agent + +```javascript +else { + // Launch parallel cli-explore-agent for each angle + selectedAngles.forEach((angle, index) => { + Task({ + subagent_type: "cli-explore-agent", + run_in_background: false, + description: `Explore: ${angle}`, + prompt: ` +## Task Objective +Execute **${angle}** exploration for task planning context. + +## Output Location +**Session Folder**: ${sessionFolder} +**Output File**: ${planDir}/exploration-${angle}.json + +## Assigned Context +- **Exploration Angle**: ${angle} +- **Task Description**: ${task.description} +- **Spec Context**: ${specContext ? 'Available โ€” use spec/requirements, spec/architecture, spec/epics for informed exploration' : 'Not available (impl-only mode)'} +- **Exploration Index**: ${index + 1} of ${selectedAngles.length} + +## MANDATORY FIRST STEPS +1. Run: rg -l "{relevant_keyword}" --type ts (locate relevant files) +2. Execute: cat ~/.ccw/workflows/cli-templates/schemas/explore-json-schema.json (get output schema) +3. Read: .workflow/project-tech.json (if exists - technology stack) + +## Expected Output +Write JSON to: ${planDir}/exploration-${angle}.json +Follow explore-json-schema.json structure with ${angle}-focused findings. + +**MANDATORY**: Every file in relevant_files MUST have: +- **rationale** (required): Specific selection basis tied to ${angle} topic (>10 chars, not generic) +- **role** (required): modify_target|dependency|pattern_reference|test_target|type_definition|integration_point|config|context_only +- **discovery_source** (recommended): bash-scan|cli-analysis|ace-search|dependency-trace|manual +- **key_symbols** (recommended): Key functions/classes/types relevant to task + +## Exploration Focus by Angle + +${getAngleFocusGuide(angle)} + +## Output Schema Structure + +\`\`\`json +{ + "project_structure": "string - high-level architecture overview", + "relevant_files": [ + { + "path": "string - relative file path", + "rationale": "string - WHY this file matters for ${angle} (>10 chars, specific)", + "role": "modify_target|dependency|pattern_reference|test_target|type_definition|integration_point|config|context_only", + "discovery_source": "bash-scan|cli-analysis|ace-search|dependency-trace|manual", + "key_symbols": ["function/class/type names"] + } + ], + "patterns": ["string - code patterns relevant to ${angle}"], + "dependencies": ["string - module/library dependencies"], + "integration_points": ["string - API/interface boundaries"], + "constraints": ["string - technical constraints"], + "clarification_needs": ["string - questions needing user input"], + "_metadata": { + "exploration_angle": "${angle}", + "complexity": "${complexity}", + "discovery_method": "cli-explore-agent" + } +} +\`\`\` +` + }) + }) +} +``` + +### Angle Focus Guide + +```javascript +function getAngleFocusGuide(angle) { + const guides = { + architecture: ` +**Architecture Focus**: +- Identify layer boundaries (presentation, business, data) +- Map module dependencies and coupling +- Locate design patterns (factory, strategy, observer, etc.) +- Find architectural decision records (ADRs) +- Analyze component responsibilities`, + + dependencies: ` +**Dependencies Focus**: +- Map internal module dependencies (import/require statements) +- Identify external library usage (package.json, requirements.txt) +- Trace dependency chains and circular dependencies +- Locate shared utilities and common modules +- Analyze coupling strength between modules`, + + modularity: ` +**Modularity Focus**: +- Identify module boundaries and interfaces +- Analyze separation of concerns +- Locate tightly coupled code +- Find opportunities for extraction/refactoring +- Map public vs private APIs`, + + 'integration-points': ` +**Integration Points Focus**: +- Locate API endpoints and routes +- Identify data flow between modules +- Find event emitters/listeners +- Map external service integrations +- Analyze interface contracts`, + + security: ` +**Security Focus**: +- Locate authentication/authorization logic +- Identify input validation points +- Find sensitive data handling +- Analyze access control mechanisms +- Locate security-related middleware`, + + 'auth-patterns': ` +**Auth Patterns Focus**: +- Identify authentication flows (login, logout, refresh) +- Locate session management code +- Find token generation/validation +- Map user permission checks +- Analyze auth middleware`, + + dataflow: ` +**Dataflow Focus**: +- Trace data transformations +- Identify state propagation paths +- Locate data validation points +- Map data sources and sinks +- Analyze data mutation points`, + + validation: ` +**Validation Focus**: +- Locate input validation logic +- Identify schema definitions +- Find error handling for invalid data +- Map validation middleware +- Analyze sanitization functions`, + + performance: ` +**Performance Focus**: +- Identify computational bottlenecks +- Locate database queries (N+1 problems) +- Find synchronous blocking operations +- Map resource-intensive operations +- Analyze algorithm complexity`, + + bottlenecks: ` +**Bottlenecks Focus**: +- Locate slow operations (profiling data) +- Identify resource contention points +- Find inefficient algorithms +- Map hot paths in code +- Analyze concurrency issues`, + + caching: ` +**Caching Focus**: +- Locate existing cache implementations +- Identify cacheable operations +- Find cache invalidation logic +- Map cache key strategies +- Analyze cache hit/miss patterns`, + + 'data-access': ` +**Data Access Focus**: +- Locate database query patterns +- Identify ORM/query builder usage +- Find data fetching strategies +- Map data access layers +- Analyze query optimization opportunities`, + + 'error-handling': ` +**Error Handling Focus**: +- Locate try-catch blocks +- Identify error propagation paths +- Find error recovery strategies +- Map error logging points +- Analyze error types and handling`, + + 'state-management': ` +**State Management Focus**: +- Locate state containers (Redux, Vuex, etc.) +- Identify state update patterns +- Find state synchronization logic +- Map state dependencies +- Analyze state consistency mechanisms`, + + 'edge-cases': ` +**Edge Cases Focus**: +- Identify boundary conditions +- Locate null/undefined handling +- Find empty array/object handling +- Map error scenarios +- Analyze exceptional flows`, + + patterns: ` +**Patterns Focus**: +- Identify code patterns and conventions +- Locate design pattern implementations +- Find naming conventions +- Map code organization patterns +- Analyze best practices usage`, + + testing: ` +**Testing Focus**: +- Locate test files and test utilities +- Identify test coverage gaps +- Find test patterns (unit, integration, e2e) +- Map mocking/stubbing strategies +- Analyze test organization` + } + + return guides[angle] || `**${angle} Focus**: Analyze codebase from ${angle} perspective` +} +``` + +## Explorations Manifest + +```javascript +// Build explorations manifest +const explorationManifest = { + session_id: `${taskSlug}-${dateStr}`, + task_description: task.description, + complexity: complexity, + exploration_count: selectedAngles.length, + explorations: selectedAngles.map(angle => ({ + angle: angle, + file: `exploration-${angle}.json`, + path: `${planDir}/exploration-${angle}.json` + })) +} +Write(`${planDir}/explorations-manifest.json`, JSON.stringify(explorationManifest, null, 2)) +``` + +## Output Schema + +### explore-json-schema.json Structure + +```json +{ + "project_structure": "string - high-level architecture overview", + "relevant_files": [ + { + "path": "string - relative file path", + "rationale": "string - specific selection basis (>10 chars)", + "role": "modify_target|dependency|pattern_reference|test_target|type_definition|integration_point|config|context_only", + "discovery_source": "bash-scan|cli-analysis|ace-search|dependency-trace|manual", + "key_symbols": ["string - function/class/type names"] + } + ], + "patterns": ["string - code patterns relevant to angle"], + "dependencies": ["string - module/library dependencies"], + "integration_points": ["string - API/interface boundaries"], + "constraints": ["string - technical constraints"], + "clarification_needs": ["string - questions needing user input"], + "_metadata": { + "exploration_angle": "string - angle name", + "complexity": "Low|Medium|High", + "discovery_method": "ace-semantic-search|cli-explore-agent" + } +} +``` + +## Integration with Phase 3 + +Phase 3 (Plan Generation) consumes: +1. `explorations-manifest.json` - list of exploration files +2. `exploration-{angle}.json` - per-angle exploration results +3. `specContext` (if available) - requirements, architecture, epics + +These inputs are passed to cli-lite-planning-agent for plan generation. + +## Error Handling + +### Exploration Agent Failure + +```javascript +try { + Task({ + subagent_type: "cli-explore-agent", + run_in_background: false, + description: `Explore: ${angle}`, + prompt: `...` + }) +} catch (error) { + // Skip exploration, continue with available explorations + console.error(`[planner] Exploration failed for angle: ${angle}`, error) + // Remove failed angle from manifest + explorationManifest.explorations = explorationManifest.explorations.filter(e => e.angle !== angle) + explorationManifest.exploration_count = explorationManifest.explorations.length +} +``` + +### All Explorations Fail + +```javascript +if (explorationManifest.exploration_count === 0) { + // Fallback: Plan from task description only + console.warn(`[planner] All explorations failed, planning from task description only`) + // Proceed to Phase 3 with empty explorations +} +``` + +### ACE Search Failure (Low Complexity) + +```javascript +try { + const results = mcp__ace-tool__search_context({ + project_root_path: projectRoot, + query: task.description + }) +} catch (error) { + // Fallback: Use ripgrep for basic file discovery + const rgResults = Bash(`rg -l "${task.description}" --type ts`) + const exploration = { + project_structure: "Basic file discovery via ripgrep", + relevant_files: rgResults.split('\n').map(path => ({ + path: path.trim(), + rationale: "Matched task description keywords", + role: "modify_target", + discovery_source: "bash-scan", + key_symbols: [] + })), + patterns: [], + dependencies: [], + integration_points: [], + constraints: [], + clarification_needs: [], + _metadata: { + exploration_angle: selectedAngles[0], + complexity: 'Low', + discovery_method: 'ripgrep-fallback' + } + } + Write(`${planDir}/exploration-${selectedAngles[0]}.json`, JSON.stringify(exploration, null, 2)) +} +``` diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/planner/role.md b/.claude/skills_lib/team-lifecycle-v2/roles/planner/role.md new file mode 100644 index 00000000..c658ec3a --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/planner/role.md @@ -0,0 +1,253 @@ +# Role: planner + +Multi-angle code exploration and structured implementation planning. Submits plans to the coordinator for approval. + +## Role Identity + +- **Name**: `planner` +- **Task Prefix**: `PLAN-*` +- **Output Tag**: `[planner]` +- **Responsibility**: Code exploration โ†’ Implementation planning โ†’ Coordinator approval +- **Communication**: SendMessage to coordinator only + +## Role Boundaries + +### MUST +- Only process PLAN-* tasks +- Communicate only with coordinator +- Write plan artifacts to `plan/` folder +- Tag all SendMessage and team_msg calls with `[planner]` +- Assess complexity (Low/Medium/High) +- Execute multi-angle exploration based on complexity +- Generate plan.json + .task/TASK-*.json following schemas +- Submit plan for coordinator approval +- Load spec context in full-lifecycle mode + +### MUST NOT +- Create tasks +- Contact other workers directly +- Implement code +- Modify spec documents +- Skip complexity assessment +- Proceed without exploration (Medium/High complexity) +- Generate plan without schema validation + +## Message Types + +| Type | Direction | Trigger | Description | +|------|-----------|---------|-------------| +| `plan_ready` | planner โ†’ coordinator | Plan generation complete | With plan.json path and task count summary | +| `plan_revision` | planner โ†’ coordinator | Plan revised and resubmitted | Describes changes made | +| `impl_progress` | planner โ†’ coordinator | Exploration phase progress | Optional, for long explorations | +| `error` | planner โ†’ coordinator | Unrecoverable error | Exploration failure, schema missing, etc. | + +## Message Bus + +Before every `SendMessage`, MUST call `mcp__ccw-tools__team_msg` to log: + +```javascript +// Plan ready +mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "planner", to: "coordinator", type: "plan_ready", summary: "[planner] Plan ready: 3 tasks, Medium complexity", ref: `${sessionFolder}/plan/plan.json` }) + +// Plan revision +mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "planner", to: "coordinator", type: "plan_revision", summary: "[planner] Split task-2 into two subtasks per feedback" }) + +// Error report +mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "planner", to: "coordinator", type: "error", summary: "[planner] plan-overview-base-schema.json not found, using default structure" }) +``` + +### CLI Fallback + +When `mcp__ccw-tools__team_msg` MCP is unavailable: + +```javascript +Bash(`ccw team log --team "${teamName}" --from "planner" --to "coordinator" --type "plan_ready" --summary "[planner] Plan ready: 3 tasks" --ref "${sessionFolder}/plan/plan.json" --json`) +``` + +## Toolbox + +### Available Commands +- `commands/explore.md` - Multi-angle codebase exploration (Phase 2) + +### Subagent Capabilities +- **cli-explore-agent**: Per-angle exploration (Medium/High complexity) +- **cli-lite-planning-agent**: Plan generation (Medium/High complexity) + +### CLI Capabilities +None directly (delegates to subagents) + +## Execution (5-Phase) + +### Phase 1: Task Discovery + +```javascript +const tasks = TaskList() +const myTasks = tasks.filter(t => + t.subject.startsWith('PLAN-') && + t.owner === 'planner' && + t.status === 'pending' && + t.blockedBy.length === 0 +) + +if (myTasks.length === 0) return // idle + +const task = TaskGet({ taskId: myTasks[0].id }) +TaskUpdate({ taskId: task.id, status: 'in_progress' }) +``` + +### Phase 1.5: Load Spec Context (Full-Lifecycle Mode) + +```javascript +// Extract session folder from task description (set by coordinator) +const sessionMatch = task.description.match(/Session:\s*(.+)/) +const sessionFolder = sessionMatch ? sessionMatch[1].trim() : `.workflow/.team/default` +const planDir = `${sessionFolder}/plan` +Bash(`mkdir -p ${planDir}`) + +// Check if spec directory exists (full-lifecycle mode) +const specDir = `${sessionFolder}/spec` +let specContext = null +try { + const reqIndex = Read(`${specDir}/requirements/_index.md`) + const archIndex = Read(`${specDir}/architecture/_index.md`) + const epicsIndex = Read(`${specDir}/epics/_index.md`) + const specConfig = JSON.parse(Read(`${specDir}/spec-config.json`)) + specContext = { reqIndex, archIndex, epicsIndex, specConfig } +} catch { /* impl-only mode has no spec */ } +``` + +### Phase 2: Multi-Angle Exploration + +**Delegate to**: `Read("commands/explore.md")` + +Execute complexity assessment, angle selection, and parallel exploration. See `commands/explore.md` for full implementation. + +### Phase 3: Plan Generation + +```javascript +// Read schema reference +const schema = Bash(`cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json`) + +if (complexity === 'Low') { + // Direct Claude planning + Bash(`mkdir -p ${planDir}/.task`) + // Generate plan.json + .task/TASK-*.json following schemas + + const plan = { + session_id: `${taskSlug}-${dateStr}`, + task_description: task.description, + complexity: 'Low', + approach: "Direct implementation based on semantic search", + task_count: 1, + task_ids: ['TASK-001'], + exploration_refs: [`${planDir}/exploration-patterns.json`] + } + Write(`${planDir}/plan.json`, JSON.stringify(plan, null, 2)) + + const taskDetail = { + id: 'TASK-001', + title: task.subject, + description: task.description, + files: [], + convergence: { criteria: ["Implementation complete", "Tests pass"] }, + depends_on: [] + } + Write(`${planDir}/.task/TASK-001.json`, JSON.stringify(taskDetail, null, 2)) + +} else { + // Use cli-lite-planning-agent for Medium/High + Task({ + subagent_type: "cli-lite-planning-agent", + run_in_background: false, + description: "Generate detailed implementation plan", + prompt: `Generate implementation plan. +Output: ${planDir}/plan.json + ${planDir}/.task/TASK-*.json +Schema: cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json +Task Description: ${task.description} +Explorations: ${explorationManifest} +Complexity: ${complexity} +${specContext ? `Spec Context: +- Requirements: ${specContext.reqIndex.substring(0, 500)} +- Architecture: ${specContext.archIndex.substring(0, 500)} +- Epics: ${specContext.epicsIndex.substring(0, 500)} +Reference REQ-* IDs, follow ADR decisions, reuse Epic/Story decomposition.` : ''} +Requirements: 2-7 tasks, each with id, title, files[].change, convergence.criteria, depends_on` + }) +} +``` + +### Phase 4: Submit for Approval + +```javascript +const plan = JSON.parse(Read(`${planDir}/plan.json`)) +const planTasks = plan.task_ids.map(id => JSON.parse(Read(`${planDir}/.task/${id}.json`))) +const taskCount = plan.task_count || plan.task_ids.length + +mcp__ccw-tools__team_msg({ + operation: "log", team: teamName, + from: "planner", to: "coordinator", + type: "plan_ready", + summary: `[planner] Planๅฐฑ็ปช: ${taskCount}ไธชtask, ${complexity}ๅคๆ‚ๅบฆ`, + ref: `${planDir}/plan.json` +}) + +SendMessage({ + type: "message", + recipient: "coordinator", + content: `[planner] ## Plan Ready for Review + +**Task**: ${task.subject} +**Complexity**: ${complexity} +**Tasks**: ${taskCount} + +### Task Summary +${planTasks.map((t, i) => (i+1) + '. ' + t.title).join('\n')} + +### Approach +${plan.approach} + +### Plan Location +${planDir}/plan.json +Task Files: ${planDir}/.task/ + +Please review and approve or request revisions.`, + summary: `[planner] Plan ready: ${taskCount} tasks` +}) + +// Wait for coordinator response (approve โ†’ mark completed, revision โ†’ update and resubmit) +``` + +### Phase 5: After Approval + +```javascript +TaskUpdate({ taskId: task.id, status: 'completed' }) + +// Check for next PLAN task โ†’ back to Phase 1 +``` + +## Session Files + +``` +{sessionFolder}/plan/ +โ”œโ”€โ”€ exploration-{angle}.json +โ”œโ”€โ”€ explorations-manifest.json +โ”œโ”€โ”€ planning-context.md +โ”œโ”€โ”€ plan.json +โ””โ”€โ”€ .task/ + โ””โ”€โ”€ TASK-*.json +``` + +> **Note**: `sessionFolder` is extracted from task description (`Session: .workflow/.team/TLS-xxx`). Plan outputs go to `plan/` subdirectory. In full-lifecycle mode, spec products are available at `../spec/`. + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| No PLAN-* tasks available | Idle, wait for coordinator assignment | +| Exploration agent failure | Skip exploration, plan from task description only | +| Planning agent failure | Fallback to direct Claude planning | +| Plan rejected 3+ times | Notify coordinator with `[planner]` tag, suggest alternative approach | +| Schema file not found | Use basic plan structure without schema validation, log error with `[planner]` tag | +| Spec context load failure | Continue in impl-only mode (no spec context) | +| Session folder not found | Notify coordinator with `[planner]` tag, request session path | +| Unexpected error | Log error via team_msg with `[planner]` tag, report to coordinator | diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/reviewer/commands/code-review.md b/.claude/skills_lib/team-lifecycle-v2/roles/reviewer/commands/code-review.md new file mode 100644 index 00000000..53e4a635 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/reviewer/commands/code-review.md @@ -0,0 +1,689 @@ +# Code Review Command + +## Purpose +4-dimension code review analyzing quality, security, architecture, and requirements compliance. + +## Review Dimensions + +### 1. Quality Review + +```javascript +function reviewQuality(files, gitDiff) { + const issues = { + critical: [], + high: [], + medium: [], + low: [] + } + + for (const file of files) { + const content = file.content + const lines = content.split("\n") + + // Check for @ts-ignore / @ts-expect-error + lines.forEach((line, idx) => { + if (line.includes("@ts-ignore") || line.includes("@ts-expect-error")) { + const nextLine = lines[idx + 1] || "" + const hasJustification = line.includes("//") && line.split("//")[1].trim().length > 10 + + if (!hasJustification) { + issues.high.push({ + file: file.path, + line: idx + 1, + type: "ts-ignore-without-justification", + message: "TypeScript error suppression without explanation", + code: line.trim() + }) + } + } + }) + + // Check for 'any' type usage + const anyMatches = Grep("\\bany\\b", { path: file.path, "-n": true }) + if (anyMatches) { + anyMatches.forEach(match => { + // Exclude comments and type definitions that are intentionally generic + if (!match.line.includes("//") && !match.line.includes("Generic")) { + issues.high.push({ + file: file.path, + line: match.lineNumber, + type: "any-type-usage", + message: "Using 'any' type reduces type safety", + code: match.line.trim() + }) + } + }) + } + + // Check for console.log in production code + const consoleMatches = Grep("console\\.(log|debug|info)", { path: file.path, "-n": true }) + if (consoleMatches && !file.path.includes("test")) { + consoleMatches.forEach(match => { + issues.high.push({ + file: file.path, + line: match.lineNumber, + type: "console-log", + message: "Console statements should be removed from production code", + code: match.line.trim() + }) + }) + } + + // Check for empty catch blocks + const emptyCatchRegex = /catch\s*\([^)]*\)\s*\{\s*\}/g + let match + while ((match = emptyCatchRegex.exec(content)) !== null) { + const lineNumber = content.substring(0, match.index).split("\n").length + issues.critical.push({ + file: file.path, + line: lineNumber, + type: "empty-catch", + message: "Empty catch block silently swallows errors", + code: match[0] + }) + } + + // Check for magic numbers + const magicNumberRegex = /(? { + const trimmed = line.trim() + if (trimmed.length > 30 && !trimmed.startsWith("//")) { + if (!lineHashes.has(trimmed)) { + lineHashes.set(trimmed, []) + } + lineHashes.get(trimmed).push(idx + 1) + } + }) + + lineHashes.forEach((occurrences, line) => { + if (occurrences.length > 2) { + issues.medium.push({ + file: file.path, + line: occurrences[0], + type: "duplicate-code", + message: `Duplicate code found at lines: ${occurrences.join(", ")}`, + code: line + }) + } + }) + } + + return issues +} +``` + +### 2. Security Review + +```javascript +function reviewSecurity(files) { + const issues = { + critical: [], + high: [], + medium: [], + low: [] + } + + for (const file of files) { + const content = file.content + + // Check for eval/exec usage + const evalMatches = Grep("\\b(eval|exec|Function\\(|setTimeout\\(.*string|setInterval\\(.*string)\\b", { + path: file.path, + "-n": true + }) + if (evalMatches) { + evalMatches.forEach(match => { + issues.high.push({ + file: file.path, + line: match.lineNumber, + type: "dangerous-eval", + message: "eval/exec usage can lead to code injection vulnerabilities", + code: match.line.trim() + }) + }) + } + + // Check for innerHTML/dangerouslySetInnerHTML + const innerHTMLMatches = Grep("(innerHTML|dangerouslySetInnerHTML)", { + path: file.path, + "-n": true + }) + if (innerHTMLMatches) { + innerHTMLMatches.forEach(match => { + issues.high.push({ + file: file.path, + line: match.lineNumber, + type: "xss-risk", + message: "Direct HTML injection can lead to XSS vulnerabilities", + code: match.line.trim() + }) + }) + } + + // Check for hardcoded secrets + const secretPatterns = [ + /api[_-]?key\s*=\s*['"][^'"]{20,}['"]/i, + /password\s*=\s*['"][^'"]+['"]/i, + /secret\s*=\s*['"][^'"]{20,}['"]/i, + /token\s*=\s*['"][^'"]{20,}['"]/i, + /aws[_-]?access[_-]?key/i, + /private[_-]?key\s*=\s*['"][^'"]+['"]/i + ] + + secretPatterns.forEach(pattern => { + const matches = content.match(new RegExp(pattern, "gm")) + if (matches) { + matches.forEach(match => { + const lineNumber = content.substring(0, content.indexOf(match)).split("\n").length + issues.critical.push({ + file: file.path, + line: lineNumber, + type: "hardcoded-secret", + message: "Hardcoded secrets should be moved to environment variables", + code: match.replace(/['"][^'"]+['"]/, "'***'") // Redact secret + }) + }) + } + }) + + // Check for SQL injection vectors + const sqlInjectionMatches = Grep("(query|execute)\\s*\\(.*\\+.*\\)", { + path: file.path, + "-n": true + }) + if (sqlInjectionMatches) { + sqlInjectionMatches.forEach(match => { + if (!match.line.includes("//") && !match.line.includes("prepared")) { + issues.critical.push({ + file: file.path, + line: match.lineNumber, + type: "sql-injection", + message: "String concatenation in SQL queries can lead to SQL injection", + code: match.line.trim() + }) + } + }) + } + + // Check for insecure random + const insecureRandomMatches = Grep("Math\\.random\\(\\)", { + path: file.path, + "-n": true + }) + if (insecureRandomMatches) { + insecureRandomMatches.forEach(match => { + // Check if used for security purposes + const context = content.substring( + Math.max(0, content.indexOf(match.line) - 200), + content.indexOf(match.line) + 200 + ) + if (context.match(/token|key|secret|password|session/i)) { + issues.medium.push({ + file: file.path, + line: match.lineNumber, + type: "insecure-random", + message: "Math.random() is not cryptographically secure, use crypto.randomBytes()", + code: match.line.trim() + }) + } + }) + } + + // Check for missing input validation + const functionMatches = Grep("(function|const.*=.*\\(|async.*\\()", { + path: file.path, + "-n": true + }) + if (functionMatches) { + functionMatches.forEach(match => { + // Simple heuristic: check if function has parameters but no validation + if (match.line.includes("(") && !match.line.includes("()")) { + const nextLines = content.split("\n").slice(match.lineNumber, match.lineNumber + 5).join("\n") + const hasValidation = nextLines.match(/if\s*\(|throw|assert|validate|check/) + + if (!hasValidation && !match.line.includes("test") && !match.line.includes("mock")) { + issues.low.push({ + file: file.path, + line: match.lineNumber, + type: "missing-validation", + message: "Function parameters should be validated", + code: match.line.trim() + }) + } + } + }) + } + } + + return issues +} +``` + +### 3. Architecture Review + +```javascript +function reviewArchitecture(files) { + const issues = { + critical: [], + high: [], + medium: [], + low: [] + } + + for (const file of files) { + const content = file.content + const lines = content.split("\n") + + // Check for parent directory imports + const importMatches = Grep("from\\s+['\"](\\.\\./)+", { + path: file.path, + "-n": true + }) + if (importMatches) { + importMatches.forEach(match => { + const parentLevels = (match.line.match(/\.\.\//g) || []).length + + if (parentLevels > 2) { + issues.high.push({ + file: file.path, + line: match.lineNumber, + type: "excessive-parent-imports", + message: `Import traverses ${parentLevels} parent directories, consider restructuring`, + code: match.line.trim() + }) + } else if (parentLevels === 2) { + issues.medium.push({ + file: file.path, + line: match.lineNumber, + type: "parent-imports", + message: "Consider using absolute imports or restructuring modules", + code: match.line.trim() + }) + } + }) + } + + // Check for large files + const lineCount = lines.length + if (lineCount > 500) { + issues.medium.push({ + file: file.path, + line: 1, + type: "large-file", + message: `File has ${lineCount} lines, consider splitting into smaller modules`, + code: `Total lines: ${lineCount}` + }) + } + + // Check for circular dependencies (simple heuristic) + const imports = lines + .filter(line => line.match(/^import.*from/)) + .map(line => { + const match = line.match(/from\s+['"](.+?)['"]/) + return match ? match[1] : null + }) + .filter(Boolean) + + // Check if any imported file imports this file back + for (const importPath of imports) { + const resolvedPath = resolveImportPath(file.path, importPath) + if (resolvedPath && Bash(`test -f ${resolvedPath}`).exitCode === 0) { + const importedContent = Read(resolvedPath) + const reverseImport = importedContent.includes(file.path.replace(/\.[jt]sx?$/, "")) + + if (reverseImport) { + issues.critical.push({ + file: file.path, + line: 1, + type: "circular-dependency", + message: `Circular dependency detected with ${resolvedPath}`, + code: `${file.path} โ†” ${resolvedPath}` + }) + } + } + } + + // Check for tight coupling (many imports from same module) + const importCounts = {} + imports.forEach(imp => { + const baseModule = imp.split("/")[0] + importCounts[baseModule] = (importCounts[baseModule] || 0) + 1 + }) + + Object.entries(importCounts).forEach(([module, count]) => { + if (count > 5) { + issues.medium.push({ + file: file.path, + line: 1, + type: "tight-coupling", + message: `File imports ${count} items from '${module}', consider facade pattern`, + code: `Imports from ${module}: ${count}` + }) + } + }) + + // Check for missing abstractions (long functions) + const functionRegex = /(function|const.*=.*\(|async.*\()/g + let match + while ((match = functionRegex.exec(content)) !== null) { + const startLine = content.substring(0, match.index).split("\n").length + const functionBody = extractFunctionBody(content, match.index) + const functionLines = functionBody.split("\n").length + + if (functionLines > 50) { + issues.medium.push({ + file: file.path, + line: startLine, + type: "long-function", + message: `Function has ${functionLines} lines, consider extracting smaller functions`, + code: match[0].trim() + }) + } + } + } + + return issues +} + +function resolveImportPath(fromFile, importPath) { + if (importPath.startsWith(".")) { + const dir = fromFile.substring(0, fromFile.lastIndexOf("/")) + const resolved = `${dir}/${importPath}`.replace(/\/\.\//g, "/") + + // Try with extensions + for (const ext of [".ts", ".js", ".tsx", ".jsx"]) { + if (Bash(`test -f ${resolved}${ext}`).exitCode === 0) { + return `${resolved}${ext}` + } + } + } + return null +} + +function extractFunctionBody(content, startIndex) { + let braceCount = 0 + let inFunction = false + let body = "" + + for (let i = startIndex; i < content.length; i++) { + const char = content[i] + + if (char === "{") { + braceCount++ + inFunction = true + } else if (char === "}") { + braceCount-- + } + + if (inFunction) { + body += char + } + + if (inFunction && braceCount === 0) { + break + } + } + + return body +} +``` + +### 4. Requirements Verification + +```javascript +function verifyRequirements(plan, files, gitDiff) { + const issues = { + critical: [], + high: [], + medium: [], + low: [] + } + + // Extract acceptance criteria from plan + const acceptanceCriteria = extractAcceptanceCriteria(plan) + + // Verify each criterion + for (const criterion of acceptanceCriteria) { + const verified = verifyCriterion(criterion, files, gitDiff) + + if (!verified.met) { + issues.high.push({ + file: "plan", + line: criterion.lineNumber, + type: "unmet-acceptance-criteria", + message: `Acceptance criterion not met: ${criterion.text}`, + code: criterion.text + }) + } else if (verified.partial) { + issues.medium.push({ + file: "plan", + line: criterion.lineNumber, + type: "partial-acceptance-criteria", + message: `Acceptance criterion partially met: ${criterion.text}`, + code: criterion.text + }) + } + } + + // Check for missing error handling + const errorHandlingRequired = plan.match(/error handling|exception|validation/i) + if (errorHandlingRequired) { + const hasErrorHandling = files.some(file => + file.content.match(/try\s*\{|catch\s*\(|throw\s+new|\.catch\(/) + ) + + if (!hasErrorHandling) { + issues.high.push({ + file: "implementation", + line: 1, + type: "missing-error-handling", + message: "Plan requires error handling but none found in implementation", + code: "No try-catch or error handling detected" + }) + } + } + + // Check for missing tests + const testingRequired = plan.match(/test|testing|coverage/i) + if (testingRequired) { + const hasTests = files.some(file => + file.path.match(/\.(test|spec)\.[jt]sx?$/) + ) + + if (!hasTests) { + issues.medium.push({ + file: "implementation", + line: 1, + type: "missing-tests", + message: "Plan requires tests but no test files found", + code: "No test files detected" + }) + } + } + + return issues +} + +function extractAcceptanceCriteria(plan) { + const criteria = [] + const lines = plan.split("\n") + + let inAcceptanceSection = false + lines.forEach((line, idx) => { + if (line.match(/acceptance criteria/i)) { + inAcceptanceSection = true + } else if (line.match(/^##/)) { + inAcceptanceSection = false + } else if (inAcceptanceSection && line.match(/^[-*]\s+/)) { + criteria.push({ + text: line.replace(/^[-*]\s+/, "").trim(), + lineNumber: idx + 1 + }) + } + }) + + return criteria +} + +function verifyCriterion(criterion, files, gitDiff) { + // Extract keywords from criterion + const keywords = criterion.text.toLowerCase().match(/\b\w{4,}\b/g) || [] + + // Check if keywords appear in implementation + let matchCount = 0 + for (const file of files) { + const content = file.content.toLowerCase() + for (const keyword of keywords) { + if (content.includes(keyword)) { + matchCount++ + } + } + } + + const matchRatio = matchCount / keywords.length + + return { + met: matchRatio >= 0.7, + partial: matchRatio >= 0.4 && matchRatio < 0.7, + matchRatio: matchRatio + } +} +``` + +## Verdict Determination + +```javascript +function determineVerdict(qualityIssues, securityIssues, architectureIssues, requirementIssues) { + const allIssues = { + critical: [ + ...qualityIssues.critical, + ...securityIssues.critical, + ...architectureIssues.critical, + ...requirementIssues.critical + ], + high: [ + ...qualityIssues.high, + ...securityIssues.high, + ...architectureIssues.high, + ...requirementIssues.high + ], + medium: [ + ...qualityIssues.medium, + ...securityIssues.medium, + ...architectureIssues.medium, + ...requirementIssues.medium + ], + low: [ + ...qualityIssues.low, + ...securityIssues.low, + ...architectureIssues.low, + ...requirementIssues.low + ] + } + + // BLOCK: Any critical issues + if (allIssues.critical.length > 0) { + return { + verdict: "BLOCK", + reason: `${allIssues.critical.length} critical issue(s) must be fixed`, + blocking_issues: allIssues.critical + } + } + + // CONDITIONAL: High or medium issues + if (allIssues.high.length > 0 || allIssues.medium.length > 0) { + return { + verdict: "CONDITIONAL", + reason: `${allIssues.high.length} high and ${allIssues.medium.length} medium issue(s) should be addressed`, + blocking_issues: [] + } + } + + // APPROVE: Only low issues or none + return { + verdict: "APPROVE", + reason: allIssues.low.length > 0 + ? `${allIssues.low.length} low-priority issue(s) noted` + : "No issues found", + blocking_issues: [] + } +} +``` + +## Report Formatting + +```javascript +function formatCodeReviewReport(report) { + const { verdict, dimensions, recommendations, blocking_issues } = report + + let markdown = `# Code Review Report\n\n` + markdown += `**Verdict**: ${verdict}\n\n` + + if (blocking_issues.length > 0) { + markdown += `## Blocking Issues\n\n` + blocking_issues.forEach(issue => { + markdown += `- **${issue.type}** (${issue.file}:${issue.line})\n` + markdown += ` ${issue.message}\n` + markdown += ` \`\`\`\n ${issue.code}\n \`\`\`\n\n` + }) + } + + markdown += `## Review Dimensions\n\n` + + markdown += `### Quality Issues\n` + markdown += formatIssuesByDimension(dimensions.quality) + + markdown += `### Security Issues\n` + markdown += formatIssuesByDimension(dimensions.security) + + markdown += `### Architecture Issues\n` + markdown += formatIssuesByDimension(dimensions.architecture) + + markdown += `### Requirements Issues\n` + markdown += formatIssuesByDimension(dimensions.requirements) + + if (recommendations.length > 0) { + markdown += `## Recommendations\n\n` + recommendations.forEach((rec, i) => { + markdown += `${i + 1}. ${rec}\n` + }) + } + + return markdown +} + +function formatIssuesByDimension(issues) { + let markdown = "" + + const severities = ["critical", "high", "medium", "low"] + severities.forEach(severity => { + if (issues[severity].length > 0) { + markdown += `\n**${severity.toUpperCase()}** (${issues[severity].length})\n\n` + issues[severity].forEach(issue => { + markdown += `- ${issue.message} (${issue.file}:${issue.line})\n` + markdown += ` \`${issue.code}\`\n\n` + }) + } + }) + + return markdown || "No issues found.\n\n" +} +``` diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/reviewer/commands/spec-quality.md b/.claude/skills_lib/team-lifecycle-v2/roles/reviewer/commands/spec-quality.md new file mode 100644 index 00000000..63aa6f74 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/reviewer/commands/spec-quality.md @@ -0,0 +1,845 @@ +# Spec Quality Command + +## Purpose +5-dimension spec quality check with readiness report generation and quality gate determination. + +## Quality Dimensions + +### 1. Completeness (Weight: 25%) + +```javascript +function scoreCompleteness(specDocs) { + const requiredSections = { + "product-brief": [ + "Vision Statement", + "Problem Statement", + "Target Audience", + "Success Metrics", + "Constraints" + ], + "prd": [ + "Goals", + "Requirements", + "User Stories", + "Acceptance Criteria", + "Non-Functional Requirements" + ], + "architecture": [ + "System Overview", + "Component Design", + "Data Models", + "API Specifications", + "Technology Stack" + ], + "user-stories": [ + "Story List", + "Acceptance Criteria", + "Priority", + "Estimation" + ], + "implementation-plan": [ + "Task Breakdown", + "Dependencies", + "Timeline", + "Resource Allocation" + ], + "test-strategy": [ + "Test Scope", + "Test Cases", + "Coverage Goals", + "Test Environment" + ] + } + + let totalScore = 0 + let totalWeight = 0 + const details = [] + + for (const doc of specDocs) { + const phase = doc.phase + const expectedSections = requiredSections[phase] || [] + + if (expectedSections.length === 0) continue + + let presentCount = 0 + let substantialCount = 0 + + for (const section of expectedSections) { + const sectionRegex = new RegExp(`##\\s+${section}`, "i") + const sectionMatch = doc.content.match(sectionRegex) + + if (sectionMatch) { + presentCount++ + + // Check if section has substantial content (not just header) + const sectionIndex = doc.content.indexOf(sectionMatch[0]) + const nextSectionIndex = doc.content.indexOf("\n##", sectionIndex + 1) + const sectionContent = nextSectionIndex > -1 + ? doc.content.substring(sectionIndex, nextSectionIndex) + : doc.content.substring(sectionIndex) + + // Substantial = more than 100 chars excluding header + const contentWithoutHeader = sectionContent.replace(sectionRegex, "").trim() + if (contentWithoutHeader.length > 100) { + substantialCount++ + } + } + } + + const presentRatio = presentCount / expectedSections.length + const substantialRatio = substantialCount / expectedSections.length + + // Score: 50% for presence, 50% for substance + const docScore = (presentRatio * 50) + (substantialRatio * 50) + + totalScore += docScore + totalWeight += 100 + + details.push({ + phase: phase, + score: docScore, + present: presentCount, + substantial: substantialCount, + expected: expectedSections.length, + missing: expectedSections.filter(s => !doc.content.match(new RegExp(`##\\s+${s}`, "i"))) + }) + } + + const overallScore = totalWeight > 0 ? (totalScore / totalWeight) * 100 : 0 + + return { + score: overallScore, + weight: 25, + weighted_score: overallScore * 0.25, + details: details + } +} +``` + +### 2. Consistency (Weight: 20%) + +```javascript +function scoreConsistency(specDocs) { + const issues = [] + + // 1. Terminology consistency + const terminologyMap = new Map() + + for (const doc of specDocs) { + // Extract key terms (capitalized phrases, technical terms) + const terms = doc.content.match(/\b[A-Z][a-z]+(?:\s+[A-Z][a-z]+)*\b/g) || [] + + terms.forEach(term => { + const normalized = term.toLowerCase() + if (!terminologyMap.has(normalized)) { + terminologyMap.set(normalized, new Set()) + } + terminologyMap.get(normalized).add(term) + }) + } + + // Find inconsistent terminology (same concept, different casing/spelling) + terminologyMap.forEach((variants, normalized) => { + if (variants.size > 1) { + issues.push({ + type: "terminology", + severity: "medium", + message: `Inconsistent terminology: ${[...variants].join(", ")}`, + suggestion: `Standardize to one variant` + }) + } + }) + + // 2. Format consistency + const headerStyles = new Map() + for (const doc of specDocs) { + const headers = doc.content.match(/^#{1,6}\s+.+$/gm) || [] + headers.forEach(header => { + const level = header.match(/^#+/)[0].length + const style = header.includes("**") ? "bold" : "plain" + const key = `level-${level}` + + if (!headerStyles.has(key)) { + headerStyles.set(key, new Set()) + } + headerStyles.get(key).add(style) + }) + } + + headerStyles.forEach((styles, level) => { + if (styles.size > 1) { + issues.push({ + type: "format", + severity: "low", + message: `Inconsistent header style at ${level}: ${[...styles].join(", ")}`, + suggestion: "Use consistent header formatting" + }) + } + }) + + // 3. Reference consistency + const references = new Map() + for (const doc of specDocs) { + // Extract references to other documents/sections + const refs = doc.content.match(/\[.*?\]\(.*?\)/g) || [] + refs.forEach(ref => { + const linkMatch = ref.match(/\((.*?)\)/) + if (linkMatch) { + const link = linkMatch[1] + if (!references.has(link)) { + references.set(link, []) + } + references.get(link).push(doc.phase) + } + }) + } + + // Check for broken references + references.forEach((sources, link) => { + if (link.startsWith("./") || link.startsWith("../")) { + // Check if file exists + const exists = Bash(`test -f ${link}`).exitCode === 0 + if (!exists) { + issues.push({ + type: "reference", + severity: "high", + message: `Broken reference: ${link} (referenced in ${sources.join(", ")})`, + suggestion: "Fix or remove broken reference" + }) + } + } + }) + + // 4. Naming convention consistency + const namingPatterns = { + camelCase: /\b[a-z]+(?:[A-Z][a-z]+)+\b/g, + PascalCase: /\b[A-Z][a-z]+(?:[A-Z][a-z]+)+\b/g, + snake_case: /\b[a-z]+(?:_[a-z]+)+\b/g, + kebab_case: /\b[a-z]+(?:-[a-z]+)+\b/g + } + + const namingCounts = {} + for (const doc of specDocs) { + Object.entries(namingPatterns).forEach(([pattern, regex]) => { + const matches = doc.content.match(regex) || [] + namingCounts[pattern] = (namingCounts[pattern] || 0) + matches.length + }) + } + + const dominantPattern = Object.entries(namingCounts) + .sort((a, b) => b[1] - a[1])[0]?.[0] + + Object.entries(namingCounts).forEach(([pattern, count]) => { + if (pattern !== dominantPattern && count > 10) { + issues.push({ + type: "naming", + severity: "low", + message: `Mixed naming conventions: ${pattern} (${count} occurrences) vs ${dominantPattern}`, + suggestion: `Standardize to ${dominantPattern}` + }) + } + }) + + // Calculate score based on issues + const severityWeights = { high: 10, medium: 5, low: 2 } + const totalPenalty = issues.reduce((sum, issue) => sum + severityWeights[issue.severity], 0) + const maxPenalty = 100 // Arbitrary max for normalization + + const score = Math.max(0, 100 - (totalPenalty / maxPenalty) * 100) + + return { + score: score, + weight: 20, + weighted_score: score * 0.20, + issues: issues, + details: { + terminology_issues: issues.filter(i => i.type === "terminology").length, + format_issues: issues.filter(i => i.type === "format").length, + reference_issues: issues.filter(i => i.type === "reference").length, + naming_issues: issues.filter(i => i.type === "naming").length + } + } +} +``` + +### 3. Traceability (Weight: 25%) + +```javascript +function scoreTraceability(specDocs) { + const chains = [] + + // Extract traceability elements + const goals = extractElements(specDocs, "product-brief", /^[-*]\s+Goal:\s*(.+)$/gm) + const requirements = extractElements(specDocs, "prd", /^[-*]\s+(?:REQ-\d+|Requirement):\s*(.+)$/gm) + const components = extractElements(specDocs, "architecture", /^[-*]\s+(?:Component|Module):\s*(.+)$/gm) + const stories = extractElements(specDocs, "user-stories", /^[-*]\s+(?:US-\d+|Story):\s*(.+)$/gm) + + // Build traceability chains: Goals โ†’ Requirements โ†’ Components โ†’ Stories + for (const goal of goals) { + const chain = { + goal: goal.text, + requirements: [], + components: [], + stories: [], + complete: false + } + + // Find requirements that reference this goal + const goalKeywords = extractKeywords(goal.text) + for (const req of requirements) { + if (hasKeywordOverlap(req.text, goalKeywords, 0.3)) { + chain.requirements.push(req.text) + + // Find components that implement this requirement + const reqKeywords = extractKeywords(req.text) + for (const comp of components) { + if (hasKeywordOverlap(comp.text, reqKeywords, 0.3)) { + chain.components.push(comp.text) + } + } + + // Find stories that implement this requirement + for (const story of stories) { + if (hasKeywordOverlap(story.text, reqKeywords, 0.3)) { + chain.stories.push(story.text) + } + } + } + } + + // Check if chain is complete + chain.complete = chain.requirements.length > 0 && + chain.components.length > 0 && + chain.stories.length > 0 + + chains.push(chain) + } + + // Calculate score + const completeChains = chains.filter(c => c.complete).length + const totalChains = chains.length + const score = totalChains > 0 ? (completeChains / totalChains) * 100 : 0 + + // Identify weak links + const weakLinks = [] + chains.forEach((chain, idx) => { + if (!chain.complete) { + if (chain.requirements.length === 0) { + weakLinks.push(`Goal ${idx + 1} has no linked requirements`) + } + if (chain.components.length === 0) { + weakLinks.push(`Goal ${idx + 1} has no linked components`) + } + if (chain.stories.length === 0) { + weakLinks.push(`Goal ${idx + 1} has no linked stories`) + } + } + }) + + return { + score: score, + weight: 25, + weighted_score: score * 0.25, + details: { + total_chains: totalChains, + complete_chains: completeChains, + weak_links: weakLinks + }, + chains: chains + } +} + +function extractElements(specDocs, phase, regex) { + const elements = [] + const doc = specDocs.find(d => d.phase === phase) + + if (doc) { + let match + while ((match = regex.exec(doc.content)) !== null) { + elements.push({ + text: match[1].trim(), + phase: phase + }) + } + } + + return elements +} + +function extractKeywords(text) { + // Extract meaningful words (4+ chars, not common words) + const commonWords = new Set(["that", "this", "with", "from", "have", "will", "should", "must", "can"]) + const words = text.toLowerCase().match(/\b\w{4,}\b/g) || [] + return words.filter(w => !commonWords.has(w)) +} + +function hasKeywordOverlap(text, keywords, threshold) { + const textLower = text.toLowerCase() + const matchCount = keywords.filter(kw => textLower.includes(kw)).length + return matchCount / keywords.length >= threshold +} +``` + +### 4. Depth (Weight: 20%) + +```javascript +function scoreDepth(specDocs) { + const dimensions = [] + + // 1. Acceptance Criteria Testability + const acDoc = specDocs.find(d => d.phase === "prd" || d.phase === "user-stories") + if (acDoc) { + const acMatches = acDoc.content.match(/Acceptance Criteria:[\s\S]*?(?=\n##|\n\n[-*]|$)/gi) || [] + let testableCount = 0 + let totalCount = 0 + + acMatches.forEach(section => { + const criteria = section.match(/^[-*]\s+(.+)$/gm) || [] + totalCount += criteria.length + + criteria.forEach(criterion => { + // Testable if contains measurable verbs or specific conditions + const testablePatterns = [ + /\b(should|must|will)\s+(display|show|return|validate|check|verify|calculate|send|receive)\b/i, + /\b(when|if|given)\b.*\b(then|should|must)\b/i, + /\b\d+\b/, // Contains numbers (measurable) + /\b(success|error|fail|pass)\b/i + ] + + const isTestable = testablePatterns.some(pattern => pattern.test(criterion)) + if (isTestable) testableCount++ + }) + }) + + const acScore = totalCount > 0 ? (testableCount / totalCount) * 100 : 0 + dimensions.push({ + name: "Acceptance Criteria Testability", + score: acScore, + testable: testableCount, + total: totalCount + }) + } + + // 2. ADR Justification + const archDoc = specDocs.find(d => d.phase === "architecture") + if (archDoc) { + const adrMatches = archDoc.content.match(/##\s+(?:ADR|Decision)[\s\S]*?(?=\n##|$)/gi) || [] + let justifiedCount = 0 + let totalCount = adrMatches.length + + adrMatches.forEach(adr => { + // Justified if contains rationale, alternatives, or consequences + const hasJustification = adr.match(/\b(rationale|reason|because|alternative|consequence|trade-?off)\b/i) + if (hasJustification) justifiedCount++ + }) + + const adrScore = totalCount > 0 ? (justifiedCount / totalCount) * 100 : 100 // Default 100 if no ADRs + dimensions.push({ + name: "ADR Justification", + score: adrScore, + justified: justifiedCount, + total: totalCount + }) + } + + // 3. User Stories Estimability + const storiesDoc = specDocs.find(d => d.phase === "user-stories") + if (storiesDoc) { + const storyMatches = storiesDoc.content.match(/^[-*]\s+(?:US-\d+|Story)[\s\S]*?(?=\n[-*]|$)/gim) || [] + let estimableCount = 0 + let totalCount = storyMatches.length + + storyMatches.forEach(story => { + // Estimable if has clear scope, AC, and no ambiguity + const hasScope = story.match(/\b(as a|I want|so that)\b/i) + const hasAC = story.match(/acceptance criteria/i) + const hasEstimate = story.match(/\b(points?|hours?|days?|estimate)\b/i) + + if ((hasScope && hasAC) || hasEstimate) estimableCount++ + }) + + const storiesScore = totalCount > 0 ? (estimableCount / totalCount) * 100 : 0 + dimensions.push({ + name: "User Stories Estimability", + score: storiesScore, + estimable: estimableCount, + total: totalCount + }) + } + + // 4. Technical Detail Sufficiency + const techDocs = specDocs.filter(d => d.phase === "architecture" || d.phase === "implementation-plan") + let detailScore = 0 + + if (techDocs.length > 0) { + const detailIndicators = [ + /```[\s\S]*?```/, // Code blocks + /\b(API|endpoint|schema|model|interface|class|function)\b/i, + /\b(GET|POST|PUT|DELETE|PATCH)\b/, // HTTP methods + /\b(database|table|collection|index)\b/i, + /\b(authentication|authorization|security)\b/i + ] + + let indicatorCount = 0 + techDocs.forEach(doc => { + detailIndicators.forEach(pattern => { + if (pattern.test(doc.content)) indicatorCount++ + }) + }) + + detailScore = Math.min(100, (indicatorCount / (detailIndicators.length * techDocs.length)) * 100) + dimensions.push({ + name: "Technical Detail Sufficiency", + score: detailScore, + indicators_found: indicatorCount, + indicators_expected: detailIndicators.length * techDocs.length + }) + } + + // Calculate overall depth score + const overallScore = dimensions.reduce((sum, d) => sum + d.score, 0) / dimensions.length + + return { + score: overallScore, + weight: 20, + weighted_score: overallScore * 0.20, + dimensions: dimensions + } +} +``` + +### 5. Requirement Coverage (Weight: 10%) + +```javascript +function scoreRequirementCoverage(specDocs, originalRequirements) { + // Extract original requirements from task description or initial brief + const originalReqs = originalRequirements || extractOriginalRequirements(specDocs) + + if (originalReqs.length === 0) { + return { + score: 100, // No requirements to cover + weight: 10, + weighted_score: 10, + details: { + total: 0, + covered: 0, + uncovered: [] + } + } + } + + // Extract all requirements from spec documents + const specReqs = [] + for (const doc of specDocs) { + const reqMatches = doc.content.match(/^[-*]\s+(?:REQ-\d+|Requirement|Feature):\s*(.+)$/gm) || [] + reqMatches.forEach(match => { + specReqs.push(match.replace(/^[-*]\s+(?:REQ-\d+|Requirement|Feature):\s*/, "").trim()) + }) + } + + // Map original requirements to spec requirements + const coverage = [] + for (const origReq of originalReqs) { + const keywords = extractKeywords(origReq) + const covered = specReqs.some(specReq => hasKeywordOverlap(specReq, keywords, 0.4)) + + coverage.push({ + requirement: origReq, + covered: covered + }) + } + + const coveredCount = coverage.filter(c => c.covered).length + const score = (coveredCount / originalReqs.length) * 100 + + return { + score: score, + weight: 10, + weighted_score: score * 0.10, + details: { + total: originalReqs.length, + covered: coveredCount, + uncovered: coverage.filter(c => !c.covered).map(c => c.requirement) + } + } +} + +function extractOriginalRequirements(specDocs) { + // Try to find original requirements in product brief + const briefDoc = specDocs.find(d => d.phase === "product-brief") + if (!briefDoc) return [] + + const reqSection = briefDoc.content.match(/##\s+(?:Requirements|Objectives)[\s\S]*?(?=\n##|$)/i) + if (!reqSection) return [] + + const reqs = reqSection[0].match(/^[-*]\s+(.+)$/gm) || [] + return reqs.map(r => r.replace(/^[-*]\s+/, "").trim()) +} +``` + +## Quality Gate Determination + +```javascript +function determineQualityGate(overallScore, coverageScore) { + // PASS: Score โ‰ฅ80% AND coverage โ‰ฅ70% + if (overallScore >= 80 && coverageScore >= 70) { + return { + gate: "PASS", + message: "Specification meets quality standards and is ready for implementation", + action: "Proceed to implementation phase" + } + } + + // FAIL: Score <60% OR coverage <50% + if (overallScore < 60 || coverageScore < 50) { + return { + gate: "FAIL", + message: "Specification requires major revisions before implementation", + action: "Address critical gaps and resubmit for review" + } + } + + // REVIEW: Between PASS and FAIL + return { + gate: "REVIEW", + message: "Specification needs improvements but may proceed with caution", + action: "Address recommendations and consider re-review" + } +} +``` + +## Readiness Report Generation + +```javascript +function formatReadinessReport(report, specDocs) { + const { overall_score, quality_gate, dimensions, phase_gates } = report + + let markdown = `# Specification Readiness Report\n\n` + markdown += `**Generated**: ${new Date().toISOString()}\n\n` + markdown += `**Overall Score**: ${overall_score.toFixed(1)}%\n\n` + markdown += `**Quality Gate**: ${quality_gate.gate} - ${quality_gate.message}\n\n` + markdown += `**Recommended Action**: ${quality_gate.action}\n\n` + + markdown += `---\n\n` + + markdown += `## Dimension Scores\n\n` + markdown += `| Dimension | Score | Weight | Weighted Score |\n` + markdown += `|-----------|-------|--------|----------------|\n` + + Object.entries(dimensions).forEach(([name, data]) => { + markdown += `| ${name} | ${data.score.toFixed(1)}% | ${data.weight}% | ${data.weighted_score.toFixed(1)}% |\n` + }) + + markdown += `\n---\n\n` + + // Completeness Details + markdown += `## Completeness Analysis\n\n` + dimensions.completeness.details.forEach(detail => { + markdown += `### ${detail.phase}\n` + markdown += `- Score: ${detail.score.toFixed(1)}%\n` + markdown += `- Sections Present: ${detail.present}/${detail.expected}\n` + markdown += `- Substantial Content: ${detail.substantial}/${detail.expected}\n` + if (detail.missing.length > 0) { + markdown += `- Missing: ${detail.missing.join(", ")}\n` + } + markdown += `\n` + }) + + // Consistency Details + markdown += `## Consistency Analysis\n\n` + if (dimensions.consistency.issues.length > 0) { + markdown += `**Issues Found**: ${dimensions.consistency.issues.length}\n\n` + dimensions.consistency.issues.forEach(issue => { + markdown += `- **${issue.severity.toUpperCase()}**: ${issue.message}\n` + markdown += ` *Suggestion*: ${issue.suggestion}\n\n` + }) + } else { + markdown += `No consistency issues found.\n\n` + } + + // Traceability Details + markdown += `## Traceability Analysis\n\n` + markdown += `- Complete Chains: ${dimensions.traceability.details.complete_chains}/${dimensions.traceability.details.total_chains}\n\n` + if (dimensions.traceability.details.weak_links.length > 0) { + markdown += `**Weak Links**:\n` + dimensions.traceability.details.weak_links.forEach(link => { + markdown += `- ${link}\n` + }) + markdown += `\n` + } + + // Depth Details + markdown += `## Depth Analysis\n\n` + dimensions.depth.dimensions.forEach(dim => { + markdown += `### ${dim.name}\n` + markdown += `- Score: ${dim.score.toFixed(1)}%\n` + if (dim.testable !== undefined) { + markdown += `- Testable: ${dim.testable}/${dim.total}\n` + } + if (dim.justified !== undefined) { + markdown += `- Justified: ${dim.justified}/${dim.total}\n` + } + if (dim.estimable !== undefined) { + markdown += `- Estimable: ${dim.estimable}/${dim.total}\n` + } + markdown += `\n` + }) + + // Coverage Details + markdown += `## Requirement Coverage\n\n` + markdown += `- Covered: ${dimensions.coverage.details.covered}/${dimensions.coverage.details.total}\n` + if (dimensions.coverage.details.uncovered.length > 0) { + markdown += `\n**Uncovered Requirements**:\n` + dimensions.coverage.details.uncovered.forEach(req => { + markdown += `- ${req}\n` + }) + } + markdown += `\n` + + // Phase Gates + if (phase_gates) { + markdown += `---\n\n` + markdown += `## Phase-Level Quality Gates\n\n` + Object.entries(phase_gates).forEach(([phase, gate]) => { + markdown += `### ${phase}\n` + markdown += `- Gate: ${gate.status}\n` + markdown += `- Score: ${gate.score.toFixed(1)}%\n` + if (gate.issues.length > 0) { + markdown += `- Issues: ${gate.issues.join(", ")}\n` + } + markdown += `\n` + }) + } + + return markdown +} +``` + +## Spec Summary Generation + +```javascript +function formatSpecSummary(specDocs, report) { + let markdown = `# Specification Summary\n\n` + + markdown += `**Overall Quality Score**: ${report.overall_score.toFixed(1)}%\n` + markdown += `**Quality Gate**: ${report.quality_gate.gate}\n\n` + + markdown += `---\n\n` + + // Document Overview + markdown += `## Documents Reviewed\n\n` + specDocs.forEach(doc => { + markdown += `### ${doc.phase}\n` + markdown += `- Path: ${doc.path}\n` + markdown += `- Size: ${doc.content.length} characters\n` + + // Extract key sections + const sections = doc.content.match(/^##\s+(.+)$/gm) || [] + if (sections.length > 0) { + markdown += `- Sections: ${sections.map(s => s.replace(/^##\s+/, "")).join(", ")}\n` + } + markdown += `\n` + }) + + markdown += `---\n\n` + + // Key Findings + markdown += `## Key Findings\n\n` + + // Strengths + const strengths = [] + Object.entries(report.dimensions).forEach(([name, data]) => { + if (data.score >= 80) { + strengths.push(`${name}: ${data.score.toFixed(1)}%`) + } + }) + + if (strengths.length > 0) { + markdown += `### Strengths\n` + strengths.forEach(s => markdown += `- ${s}\n`) + markdown += `\n` + } + + // Areas for Improvement + const improvements = [] + Object.entries(report.dimensions).forEach(([name, data]) => { + if (data.score < 70) { + improvements.push(`${name}: ${data.score.toFixed(1)}%`) + } + }) + + if (improvements.length > 0) { + markdown += `### Areas for Improvement\n` + improvements.forEach(i => markdown += `- ${i}\n`) + markdown += `\n` + } + + // Recommendations + if (report.recommendations && report.recommendations.length > 0) { + markdown += `### Recommendations\n` + report.recommendations.forEach((rec, i) => { + markdown += `${i + 1}. ${rec}\n` + }) + markdown += `\n` + } + + return markdown +} +``` + +## Phase-Level Quality Gates + +```javascript +function calculatePhaseGates(specDocs) { + const gates = {} + + for (const doc of specDocs) { + const phase = doc.phase + const issues = [] + let score = 100 + + // Check minimum content threshold + if (doc.content.length < 500) { + issues.push("Insufficient content") + score -= 30 + } + + // Check for required sections (phase-specific) + const requiredSections = getRequiredSections(phase) + const missingSections = requiredSections.filter(section => + !doc.content.match(new RegExp(`##\\s+${section}`, "i")) + ) + + if (missingSections.length > 0) { + issues.push(`Missing sections: ${missingSections.join(", ")}`) + score -= missingSections.length * 15 + } + + // Determine gate status + let status = "PASS" + if (score < 60) status = "FAIL" + else if (score < 80) status = "REVIEW" + + gates[phase] = { + status: status, + score: Math.max(0, score), + issues: issues + } + } + + return gates +} + +function getRequiredSections(phase) { + const sectionMap = { + "product-brief": ["Vision", "Problem", "Target Audience"], + "prd": ["Goals", "Requirements", "User Stories"], + "architecture": ["Overview", "Components", "Data Models"], + "user-stories": ["Stories", "Acceptance Criteria"], + "implementation-plan": ["Tasks", "Dependencies"], + "test-strategy": ["Test Cases", "Coverage"] + } + + return sectionMap[phase] || [] +} +``` diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/reviewer/role.md b/.claude/skills_lib/team-lifecycle-v2/roles/reviewer/role.md new file mode 100644 index 00000000..61f9702a --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/reviewer/role.md @@ -0,0 +1,429 @@ +# Reviewer Role + +## 1. Role Identity + +- **Name**: reviewer +- **Task Prefix**: REVIEW-* + QUALITY-* +- **Output Tag**: `[reviewer]` +- **Responsibility**: Discover Task โ†’ Branch by Prefix โ†’ Review/Score โ†’ Report + +## 2. Role Boundaries + +### MUST +- Only process REVIEW-* and QUALITY-* tasks +- Communicate only with coordinator +- Generate readiness-report.md for QUALITY tasks +- Tag all outputs with `[reviewer]` + +### MUST NOT +- Create tasks +- Contact other workers directly +- Modify source code +- Skip quality dimensions +- Approve without verification + +## 3. Message Types + +| Type | Direction | Purpose | Format | +|------|-----------|---------|--------| +| `task_request` | FROM coordinator | Receive REVIEW-*/QUALITY-* task assignment | `{ type: "task_request", task_id, description, review_mode }` | +| `task_complete` | TO coordinator | Report review success | `{ type: "task_complete", task_id, status: "success", verdict, score, issues }` | +| `task_failed` | TO coordinator | Report review failure | `{ type: "task_failed", task_id, error }` | + +## 4. Message Bus + +**Primary**: Use `team_msg` for all coordinator communication with `[reviewer]` tag: +```javascript +// Code review completion +team_msg({ + to: "coordinator", + type: "task_complete", + task_id: "REVIEW-001", + status: "success", + verdict: "APPROVE", + issues: { critical: 0, high: 2, medium: 5, low: 3 }, + recommendations: ["Fix console.log statements", "Add error handling"] +}, "[reviewer]") + +// Spec quality completion +team_msg({ + to: "coordinator", + type: "task_complete", + task_id: "QUALITY-001", + status: "success", + overall_score: 85.5, + quality_gate: "PASS", + dimensions: { + completeness: 90, + consistency: 85, + traceability: 80, + depth: 88, + coverage: 82 + } +}, "[reviewer]") +``` + +**CLI Fallback**: When message bus unavailable, write to `.workflow/.team/messages/reviewer-{timestamp}.json` + +## 5. Toolbox + +### Available Commands +- `commands/code-review.md` - 4-dimension code review (quality, security, architecture, requirements) +- `commands/spec-quality.md` - 5-dimension spec quality check (completeness, consistency, traceability, depth, coverage) + +### CLI Capabilities +- None (uses Grep-based analysis) + +## 6. Execution (5-Phase) - Dual-Prefix + +### Phase 1: Task Discovery + +**Dual Prefix Filter**: +```javascript +const tasks = Glob(".workflow/.team/tasks/{REVIEW,QUALITY}-*.json") + .filter(task => task.status === "pending" && task.assigned_to === "reviewer") + +// Determine review mode +const reviewMode = task.task_id.startsWith("REVIEW-") ? "code" : "spec" +``` + +### Phase 2: Context Loading (Branch by Mode) + +**Code Review Context (REVIEW-*)**: +```javascript +if (reviewMode === "code") { + // Load plan + const planPath = task.metadata?.plan_path || ".workflow/plan.md" + const plan = Read(planPath) + + // Get git diff + const implTaskId = task.metadata?.impl_task_id + const gitDiff = Bash("git diff HEAD").stdout + + // Load modified files + const modifiedFiles = Bash("git diff --name-only HEAD").stdout.split("\n").filter(Boolean) + const fileContents = modifiedFiles.map(f => ({ + path: f, + content: Read(f) + })) + + // Load test results if available + const testTaskId = task.metadata?.test_task_id + const testResults = testTaskId ? Read(`.workflow/.team/tasks/${testTaskId}.json`) : null +} +``` + +**Spec Quality Context (QUALITY-*)**: +```javascript +if (reviewMode === "spec") { + // Load session folder + const sessionFolder = task.metadata?.session_folder || ".workflow/.sessions/latest" + + // Load quality gates + const qualityGates = task.metadata?.quality_gates || { + pass_threshold: 80, + fail_threshold: 60, + coverage_threshold: 70 + } + + // Load all spec documents + const specDocs = Glob(`${sessionFolder}/**/*.md`).map(path => ({ + path: path, + content: Read(path), + phase: extractPhase(path) + })) +} +``` + +### Phase 3: Review Execution (Delegate by Mode) + +**Code Review**: +```javascript +if (reviewMode === "code") { + const codeReviewCommand = Read("commands/code-review.md") + // Command handles: + // - reviewQuality (ts-ignore, any, console.log, empty catch) + // - reviewSecurity (eval/exec, secrets, SQL injection, XSS) + // - reviewArchitecture (parent imports, large files) + // - verifyRequirements (plan acceptance criteria vs implementation) + // - Verdict determination (BLOCK/CONDITIONAL/APPROVE) +} +``` + +**Spec Quality**: +```javascript +if (reviewMode === "spec") { + const specQualityCommand = Read("commands/spec-quality.md") + // Command handles: + // - scoreCompleteness (section content checks) + // - scoreConsistency (terminology, format, references) + // - scoreTraceability (goals โ†’ reqs โ†’ arch โ†’ stories chain) + // - scoreDepth (AC testable, ADRs justified, stories estimable) + // - scoreRequirementCoverage (original requirements โ†’ document mapping) + // - Quality gate determination (PASS โ‰ฅ80%, FAIL <60%, else REVIEW) + // - readiness-report.md generation + // - spec-summary.md generation +} +``` + +### Phase 4: Report Generation (Branch by Mode) + +**Code Review Report**: +```javascript +if (reviewMode === "code") { + const report = { + verdict: verdict, // BLOCK | CONDITIONAL | APPROVE + dimensions: { + quality: qualityIssues, + security: securityIssues, + architecture: architectureIssues, + requirements: requirementIssues + }, + recommendations: recommendations, + blocking_issues: blockingIssues + } + + // Write review report + Write(`.workflow/.team/reviews/${task.task_id}-report.md`, formatCodeReviewReport(report)) +} +``` + +**Spec Quality Report**: +```javascript +if (reviewMode === "spec") { + const report = { + overall_score: overallScore, + quality_gate: qualityGate, // PASS | REVIEW | FAIL + dimensions: { + completeness: completenessScore, + consistency: consistencyScore, + traceability: traceabilityScore, + depth: depthScore, + coverage: coverageScore + }, + phase_gates: phaseGates, + recommendations: recommendations + } + + // Write readiness report + Write(`${sessionFolder}/readiness-report.md`, formatReadinessReport(report)) + + // Write spec summary + Write(`${sessionFolder}/spec-summary.md`, formatSpecSummary(specDocs, report)) +} +``` + +### Phase 5: Report to Coordinator (Branch by Mode) + +**Code Review Completion**: +```javascript +if (reviewMode === "code") { + team_msg({ + to: "coordinator", + type: "task_complete", + task_id: task.task_id, + status: "success", + verdict: verdict, + issues: { + critical: blockingIssues.length, + high: highIssues.length, + medium: mediumIssues.length, + low: lowIssues.length + }, + recommendations: recommendations, + report_path: `.workflow/.team/reviews/${task.task_id}-report.md`, + timestamp: new Date().toISOString() + }, "[reviewer]") +} +``` + +**Spec Quality Completion**: +```javascript +if (reviewMode === "spec") { + team_msg({ + to: "coordinator", + type: "task_complete", + task_id: task.task_id, + status: "success", + overall_score: overallScore, + quality_gate: qualityGate, + dimensions: { + completeness: completenessScore, + consistency: consistencyScore, + traceability: traceabilityScore, + depth: depthScore, + coverage: coverageScore + }, + report_path: `${sessionFolder}/readiness-report.md`, + summary_path: `${sessionFolder}/spec-summary.md`, + timestamp: new Date().toISOString() + }, "[reviewer]") +} +``` + +## 7. Code Review Dimensions + +### Quality Dimension + +**Anti-patterns**: +- `@ts-ignore` / `@ts-expect-error` without justification +- `any` type usage +- `console.log` in production code +- Empty catch blocks +- Magic numbers +- Duplicate code + +**Severity**: +- Critical: Empty catch, any in public APIs +- High: @ts-ignore without comment, console.log +- Medium: Magic numbers, duplicate code +- Low: Minor style issues + +### Security Dimension + +**Vulnerabilities**: +- `eval()` / `exec()` usage +- `innerHTML` / `dangerouslySetInnerHTML` +- Hardcoded secrets (API keys, passwords) +- SQL injection vectors +- XSS vulnerabilities +- Insecure dependencies + +**Severity**: +- Critical: Hardcoded secrets, SQL injection +- High: eval/exec, innerHTML +- Medium: Insecure dependencies +- Low: Missing input validation + +### Architecture Dimension + +**Issues**: +- Parent directory imports (`../../../`) +- Large files (>500 lines) +- Circular dependencies +- Missing abstractions +- Tight coupling + +**Severity**: +- Critical: Circular dependencies +- High: Excessive parent imports (>2 levels) +- Medium: Large files, tight coupling +- Low: Minor structure issues + +### Requirements Dimension + +**Verification**: +- Acceptance criteria coverage +- Feature completeness +- Edge case handling +- Error handling + +**Severity**: +- Critical: Missing core functionality +- High: Incomplete acceptance criteria +- Medium: Missing edge cases +- Low: Minor feature gaps + +## 8. Spec Quality Dimensions + +### Completeness (Weight: 25%) + +**Checks**: +- All required sections present +- Section content depth (not just headers) +- Cross-phase coverage +- Artifact completeness + +**Scoring**: +- 100%: All sections with substantial content +- 75%: All sections present, some thin +- 50%: Missing 1-2 sections +- 25%: Missing 3+ sections +- 0%: Critical sections missing + +### Consistency (Weight: 20%) + +**Checks**: +- Terminology consistency +- Format consistency +- Reference consistency +- Naming conventions + +**Scoring**: +- 100%: Fully consistent +- 75%: Minor inconsistencies (1-2) +- 50%: Moderate inconsistencies (3-5) +- 25%: Major inconsistencies (6+) +- 0%: Chaotic inconsistency + +### Traceability (Weight: 25%) + +**Checks**: +- Goals โ†’ Requirements chain +- Requirements โ†’ Architecture chain +- Architecture โ†’ User Stories chain +- Bidirectional references + +**Scoring**: +- 100%: Full traceability chain +- 75%: 1 weak link +- 50%: 2 weak links +- 25%: 3+ weak links +- 0%: No traceability + +### Depth (Weight: 20%) + +**Checks**: +- Acceptance criteria testable +- ADRs justified +- User stories estimable +- Technical details sufficient + +**Scoring**: +- 100%: All items detailed +- 75%: 1-2 shallow items +- 50%: 3-5 shallow items +- 25%: 6+ shallow items +- 0%: All items shallow + +### Coverage (Weight: 10%) + +**Checks**: +- Original requirements mapped +- All features documented +- All constraints addressed +- All stakeholders considered + +**Scoring**: +- 100%: Full coverage (100%) +- 75%: High coverage (80-99%) +- 50%: Moderate coverage (60-79%) +- 25%: Low coverage (40-59%) +- 0%: Minimal coverage (<40%) + +## 9. Verdict/Gate Determination + +### Code Review Verdicts + +| Verdict | Criteria | Action | +|---------|----------|--------| +| **BLOCK** | Critical issues present | Must fix before merge | +| **CONDITIONAL** | High/medium issues only | Fix recommended, merge allowed | +| **APPROVE** | Low issues or none | Ready to merge | + +### Spec Quality Gates + +| Gate | Criteria | Action | +|------|----------|--------| +| **PASS** | Score โ‰ฅ80% AND coverage โ‰ฅ70% | Ready for implementation | +| **REVIEW** | Score 60-79% OR coverage 50-69% | Revisions recommended | +| **FAIL** | Score <60% OR coverage <50% | Major revisions required | + +## 10. Error Handling + +| Error Type | Recovery Strategy | Escalation | +|------------|-------------------|------------| +| Missing context | Request from coordinator | Immediate escalation | +| Invalid review mode | Abort with error | Report to coordinator | +| Analysis failure | Retry with verbose logging | Report after 2 failures | +| Report generation failure | Use fallback template | Report with partial results | diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/tester/commands/validate.md b/.claude/skills_lib/team-lifecycle-v2/roles/tester/commands/validate.md new file mode 100644 index 00000000..05ec30c4 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/tester/commands/validate.md @@ -0,0 +1,538 @@ +# Validate Command + +## Purpose +Test-fix cycle with strategy engine for automated test failure resolution. + +## Configuration + +```javascript +const MAX_ITERATIONS = 10 +const PASS_RATE_TARGET = 95 // percentage +``` + +## Main Iteration Loop + +```javascript +function runTestFixCycle(task, framework, affectedTests, modifiedFiles) { + let iteration = 0 + let bestPassRate = 0 + let bestResults = null + + while (iteration < MAX_ITERATIONS) { + iteration++ + + // Phase 1: Run Tests + const testCommand = buildTestCommand(framework, affectedTests, iteration === 1) + const testOutput = Bash(testCommand, { timeout: 120000 }) + const results = parseTestResults(testOutput.stdout + testOutput.stderr, framework) + + const passRate = results.total > 0 ? (results.passed / results.total * 100) : 0 + + // Track best result + if (passRate > bestPassRate) { + bestPassRate = passRate + bestResults = results + } + + // Progress update for long cycles + if (iteration > 5) { + team_msg({ + to: "coordinator", + type: "progress_update", + task_id: task.task_id, + iteration: iteration, + pass_rate: passRate.toFixed(1), + tests_passed: results.passed, + tests_failed: results.failed, + message: `Test-fix cycle iteration ${iteration}/${MAX_ITERATIONS}` + }, "[tester]") + } + + // Phase 2: Check Success + if (passRate >= PASS_RATE_TARGET) { + // Quality gate: Run full suite if only affected tests passed + if (affectedTests.length > 0 && iteration === 1) { + team_msg({ + to: "coordinator", + type: "progress_update", + task_id: task.task_id, + message: "Affected tests passed, running full suite..." + }, "[tester]") + + const fullSuiteCommand = buildTestCommand(framework, [], false) + const fullOutput = Bash(fullSuiteCommand, { timeout: 300000 }) + const fullResults = parseTestResults(fullOutput.stdout + fullOutput.stderr, framework) + const fullPassRate = fullResults.total > 0 ? (fullResults.passed / fullResults.total * 100) : 0 + + if (fullPassRate >= PASS_RATE_TARGET) { + return { + success: true, + results: fullResults, + iterations: iteration, + full_suite_run: true + } + } else { + // Full suite failed, continue fixing + results = fullResults + passRate = fullPassRate + } + } else { + return { + success: true, + results: results, + iterations: iteration, + full_suite_run: affectedTests.length === 0 + } + } + } + + // Phase 3: Analyze Failures + if (results.failures.length === 0) { + break // No failures to fix + } + + const classified = classifyFailures(results.failures) + + // Phase 4: Select Strategy + const strategy = selectStrategy(iteration, passRate, results.failures) + + team_msg({ + to: "coordinator", + type: "progress_update", + task_id: task.task_id, + iteration: iteration, + strategy: strategy, + failures: { + critical: classified.critical.length, + high: classified.high.length, + medium: classified.medium.length, + low: classified.low.length + } + }, "[tester]") + + // Phase 5: Apply Fixes + const fixResult = applyFixes(strategy, results.failures, framework, modifiedFiles) + + if (!fixResult.success) { + // Fix application failed, try next iteration with different strategy + continue + } + } + + // Max iterations reached + return { + success: false, + results: bestResults, + iterations: MAX_ITERATIONS, + best_pass_rate: bestPassRate, + error: "Max iterations reached without achieving target pass rate" + } +} +``` + +## Strategy Selection + +```javascript +function selectStrategy(iteration, passRate, failures) { + const classified = classifyFailures(failures) + + // Conservative: Early iterations or high pass rate + if (iteration <= 3 || passRate >= 80) { + return "conservative" + } + + // Surgical: Specific failure patterns + if (classified.critical.length > 0 && classified.critical.length < 5) { + return "surgical" + } + + // Aggressive: Low pass rate or many iterations + if (passRate < 50 || iteration > 7) { + return "aggressive" + } + + return "conservative" +} +``` + +## Fix Application + +### Conservative Strategy + +```javascript +function applyConservativeFixes(failures, framework, modifiedFiles) { + const classified = classifyFailures(failures) + + // Fix only the first critical failure + if (classified.critical.length > 0) { + const failure = classified.critical[0] + return fixSingleFailure(failure, framework, modifiedFiles) + } + + // If no critical, fix first high priority + if (classified.high.length > 0) { + const failure = classified.high[0] + return fixSingleFailure(failure, framework, modifiedFiles) + } + + return { success: false, error: "No fixable failures found" } +} +``` + +### Surgical Strategy + +```javascript +function applySurgicalFixes(failures, framework, modifiedFiles) { + // Identify common pattern + const pattern = identifyCommonPattern(failures) + + if (!pattern) { + return { success: false, error: "No common pattern identified" } + } + + // Apply pattern-based fix across all occurrences + const fixes = [] + + for (const failure of failures) { + if (matchesPattern(failure, pattern)) { + const fix = generatePatternFix(failure, pattern, framework) + fixes.push(fix) + } + } + + // Apply all fixes in batch + for (const fix of fixes) { + applyFix(fix, modifiedFiles) + } + + return { + success: true, + fixes_applied: fixes.length, + pattern: pattern + } +} + +function identifyCommonPattern(failures) { + // Group failures by error type + const errorTypes = {} + + for (const failure of failures) { + const errorType = extractErrorType(failure.error) + if (!errorTypes[errorType]) { + errorTypes[errorType] = [] + } + errorTypes[errorType].push(failure) + } + + // Find most common error type + let maxCount = 0 + let commonPattern = null + + for (const [errorType, instances] of Object.entries(errorTypes)) { + if (instances.length > maxCount) { + maxCount = instances.length + commonPattern = { + type: errorType, + instances: instances, + count: instances.length + } + } + } + + return maxCount >= 3 ? commonPattern : null +} + +function extractErrorType(error) { + const errorLower = error.toLowerCase() + + if (errorLower.includes("cannot find module")) return "missing_import" + if (errorLower.includes("is not defined")) return "undefined_variable" + if (errorLower.includes("expected") && errorLower.includes("received")) return "assertion_mismatch" + if (errorLower.includes("timeout")) return "timeout" + if (errorLower.includes("syntaxerror")) return "syntax_error" + + return "unknown" +} +``` + +### Aggressive Strategy + +```javascript +function applyAggressiveFixes(failures, framework, modifiedFiles) { + const classified = classifyFailures(failures) + const fixes = [] + + // Fix all critical failures + for (const failure of classified.critical) { + const fix = generateFix(failure, framework, modifiedFiles) + if (fix) { + fixes.push(fix) + } + } + + // Fix all high priority failures + for (const failure of classified.high) { + const fix = generateFix(failure, framework, modifiedFiles) + if (fix) { + fixes.push(fix) + } + } + + // Apply all fixes + for (const fix of fixes) { + applyFix(fix, modifiedFiles) + } + + return { + success: fixes.length > 0, + fixes_applied: fixes.length + } +} +``` + +### Fix Generation + +```javascript +function generateFix(failure, framework, modifiedFiles) { + const errorType = extractErrorType(failure.error) + + switch (errorType) { + case "missing_import": + return generateImportFix(failure, modifiedFiles) + + case "undefined_variable": + return generateVariableFix(failure, modifiedFiles) + + case "assertion_mismatch": + return generateAssertionFix(failure, framework) + + case "timeout": + return generateTimeoutFix(failure, framework) + + case "syntax_error": + return generateSyntaxFix(failure, modifiedFiles) + + default: + return null + } +} + +function generateImportFix(failure, modifiedFiles) { + // Extract module name from error + const moduleMatch = failure.error.match(/Cannot find module ['"](.+?)['"]/) + if (!moduleMatch) return null + + const moduleName = moduleMatch[1] + + // Find test file + const testFile = extractTestFile(failure.test) + if (!testFile) return null + + // Check if module exists in modified files + const sourceFile = modifiedFiles.find(f => + f.includes(moduleName) || f.endsWith(`${moduleName}.ts`) || f.endsWith(`${moduleName}.js`) + ) + + if (!sourceFile) return null + + // Generate import statement + const relativePath = calculateRelativePath(testFile, sourceFile) + const importStatement = `import { } from '${relativePath}'` + + return { + file: testFile, + type: "add_import", + content: importStatement, + line: 1 // Add at top of file + } +} + +function generateAssertionFix(failure, framework) { + // Extract expected vs received values + const expectedMatch = failure.error.match(/Expected:\s*(.+?)(?:\n|$)/) + const receivedMatch = failure.error.match(/Received:\s*(.+?)(?:\n|$)/) + + if (!expectedMatch || !receivedMatch) return null + + const expected = expectedMatch[1].trim() + const received = receivedMatch[1].trim() + + // Find test file and line + const testFile = extractTestFile(failure.test) + const testLine = extractTestLine(failure.error) + + if (!testFile || !testLine) return null + + return { + file: testFile, + type: "update_assertion", + line: testLine, + old_value: expected, + new_value: received, + note: "Auto-updated assertion based on actual behavior" + } +} +``` + +## Test Result Parsing + +```javascript +function parseTestResults(output, framework) { + const results = { + total: 0, + passed: 0, + failed: 0, + skipped: 0, + failures: [] + } + + if (framework === "jest" || framework === "vitest") { + // Parse summary line + const summaryMatch = output.match(/Tests:\s+(?:(\d+)\s+failed,\s+)?(?:(\d+)\s+passed,\s+)?(\d+)\s+total/) + if (summaryMatch) { + results.failed = summaryMatch[1] ? parseInt(summaryMatch[1]) : 0 + results.passed = summaryMatch[2] ? parseInt(summaryMatch[2]) : 0 + results.total = parseInt(summaryMatch[3]) + } + + // Alternative format + if (results.total === 0) { + const altMatch = output.match(/(\d+)\s+passed.*?(\d+)\s+total/) + if (altMatch) { + results.passed = parseInt(altMatch[1]) + results.total = parseInt(altMatch[2]) + results.failed = results.total - results.passed + } + } + + // Extract failure details + const failureRegex = /โ—\s+(.*?)\n\n([\s\S]*?)(?=\n\nโ—|\n\nTest Suites:|\n\n$)/g + let match + while ((match = failureRegex.exec(output)) !== null) { + results.failures.push({ + test: match[1].trim(), + error: match[2].trim() + }) + } + + } else if (framework === "pytest") { + // Parse pytest summary + const summaryMatch = output.match(/=+\s+(?:(\d+)\s+failed,?\s+)?(?:(\d+)\s+passed)?/) + if (summaryMatch) { + results.failed = summaryMatch[1] ? parseInt(summaryMatch[1]) : 0 + results.passed = summaryMatch[2] ? parseInt(summaryMatch[2]) : 0 + results.total = results.failed + results.passed + } + + // Extract failure details + const failureRegex = /FAILED\s+(.*?)\s+-\s+([\s\S]*?)(?=\n_+|FAILED|=+\s+\d+)/g + let match + while ((match = failureRegex.exec(output)) !== null) { + results.failures.push({ + test: match[1].trim(), + error: match[2].trim() + }) + } + } + + return results +} +``` + +## Test Command Building + +```javascript +function buildTestCommand(framework, affectedTests, isFirstRun) { + const testFiles = affectedTests.length > 0 ? affectedTests.join(" ") : "" + + switch (framework) { + case "vitest": + return testFiles + ? `vitest run ${testFiles} --reporter=verbose` + : `vitest run --reporter=verbose` + + case "jest": + return testFiles + ? `jest ${testFiles} --no-coverage --verbose` + : `jest --no-coverage --verbose` + + case "mocha": + return testFiles + ? `mocha ${testFiles} --reporter spec` + : `mocha --reporter spec` + + case "pytest": + return testFiles + ? `pytest ${testFiles} -v --tb=short` + : `pytest -v --tb=short` + + default: + throw new Error(`Unsupported test framework: ${framework}`) + } +} +``` + +## Utility Functions + +### Extract Test File + +```javascript +function extractTestFile(testName) { + // Extract file path from test name + // Format: "path/to/file.test.ts > describe block > test name" + const fileMatch = testName.match(/^(.*?\.(?:test|spec)\.[jt]sx?)/) + return fileMatch ? fileMatch[1] : null +} +``` + +### Extract Test Line + +```javascript +function extractTestLine(error) { + // Extract line number from error stack + const lineMatch = error.match(/:(\d+):\d+/) + return lineMatch ? parseInt(lineMatch[1]) : null +} +``` + +### Calculate Relative Path + +```javascript +function calculateRelativePath(fromFile, toFile) { + const fromParts = fromFile.split("/") + const toParts = toFile.split("/") + + // Remove filename + fromParts.pop() + + // Find common base + let commonLength = 0 + while (commonLength < fromParts.length && + commonLength < toParts.length && + fromParts[commonLength] === toParts[commonLength]) { + commonLength++ + } + + // Build relative path + const upLevels = fromParts.length - commonLength + const downPath = toParts.slice(commonLength) + + const relativeParts = [] + for (let i = 0; i < upLevels; i++) { + relativeParts.push("..") + } + relativeParts.push(...downPath) + + let path = relativeParts.join("/") + + // Remove file extension + path = path.replace(/\.[jt]sx?$/, "") + + // Ensure starts with ./ + if (!path.startsWith(".")) { + path = "./" + path + } + + return path +} +``` diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/tester/role.md b/.claude/skills_lib/team-lifecycle-v2/roles/tester/role.md new file mode 100644 index 00000000..83283ea0 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/tester/role.md @@ -0,0 +1,385 @@ +# Tester Role + +## 1. Role Identity + +- **Name**: tester +- **Task Prefix**: TEST-* +- **Output Tag**: `[tester]` +- **Responsibility**: Detect Framework โ†’ Run Tests โ†’ Fix Cycle โ†’ Report Results + +## 2. Role Boundaries + +### MUST +- Only process TEST-* tasks +- Communicate only with coordinator +- Use detected test framework +- Run affected tests before full suite +- Tag all outputs with `[tester]` + +### MUST NOT +- Create tasks +- Contact other workers directly +- Modify production code beyond test fixes +- Skip framework detection +- Run full suite without affected tests first + +## 3. Message Types + +| Type | Direction | Purpose | Format | +|------|-----------|---------|--------| +| `task_request` | FROM coordinator | Receive TEST-* task assignment | `{ type: "task_request", task_id, description, impl_task_id }` | +| `task_complete` | TO coordinator | Report test success | `{ type: "task_complete", task_id, status: "success", pass_rate, tests_run, iterations }` | +| `task_failed` | TO coordinator | Report test failure | `{ type: "task_failed", task_id, error, failures, pass_rate }` | +| `progress_update` | TO coordinator | Report fix cycle progress | `{ type: "progress_update", task_id, iteration, pass_rate, strategy }` | + +## 4. Message Bus + +**Primary**: Use `team_msg` for all coordinator communication with `[tester]` tag: +```javascript +team_msg({ + to: "coordinator", + type: "task_complete", + task_id: "TEST-001", + status: "success", + pass_rate: 98.5, + tests_run: 45, + iterations: 3, + framework: "vitest" +}, "[tester]") +``` + +**CLI Fallback**: When message bus unavailable, write to `.workflow/.team/messages/tester-{timestamp}.json` + +## 5. Toolbox + +### Available Commands +- `commands/validate.md` - Test-fix cycle with strategy engine + +### CLI Capabilities +- None (uses project's test framework directly via Bash) + +## 6. Execution (5-Phase) + +### Phase 1: Task Discovery + +**Task Loading**: +```javascript +const tasks = Glob(".workflow/.team/tasks/TEST-*.json") + .filter(task => task.status === "pending" && task.assigned_to === "tester") +``` + +**Implementation Task Linking**: +```javascript +const implTaskId = task.metadata?.impl_task_id +const implTask = implTaskId ? Read(`.workflow/.team/tasks/${implTaskId}.json`) : null +const modifiedFiles = implTask?.metadata?.files_modified || [] +``` + +### Phase 2: Test Framework Detection + +**Framework Detection**: +```javascript +function detectTestFramework() { + // Check package.json for test frameworks + const packageJson = Read("package.json") + const pkg = JSON.parse(packageJson) + + // Priority 1: Check dependencies + if (pkg.devDependencies?.vitest || pkg.dependencies?.vitest) { + return "vitest" + } + if (pkg.devDependencies?.jest || pkg.dependencies?.jest) { + return "jest" + } + if (pkg.devDependencies?.mocha || pkg.dependencies?.mocha) { + return "mocha" + } + if (pkg.devDependencies?.pytest || pkg.dependencies?.pytest) { + return "pytest" + } + + // Priority 2: Check test scripts + const testScript = pkg.scripts?.test || "" + if (testScript.includes("vitest")) return "vitest" + if (testScript.includes("jest")) return "jest" + if (testScript.includes("mocha")) return "mocha" + if (testScript.includes("pytest")) return "pytest" + + // Priority 3: Check config files + const configFiles = Glob("{vitest,jest,mocha}.config.{js,ts,json}") + if (configFiles.some(f => f.includes("vitest"))) return "vitest" + if (configFiles.some(f => f.includes("jest"))) return "jest" + if (configFiles.some(f => f.includes("mocha"))) return "mocha" + + if (Bash("test -f pytest.ini").exitCode === 0) return "pytest" + + return "unknown" +} +``` + +**Affected Test Discovery**: +```javascript +function findAffectedTests(modifiedFiles) { + const testFiles = [] + + for (const file of modifiedFiles) { + const baseName = file.replace(/\.(ts|js|tsx|jsx|py)$/, "") + const dir = file.substring(0, file.lastIndexOf("/")) + + const testVariants = [ + // Same directory variants + `${baseName}.test.ts`, + `${baseName}.test.js`, + `${baseName}.spec.ts`, + `${baseName}.spec.js`, + `${baseName}_test.py`, + `test_${baseName.split("/").pop()}.py`, + + // Test directory variants + `${file.replace(/^src\//, "tests/")}`, + `${file.replace(/^src\//, "__tests__/")}`, + `${file.replace(/^src\//, "test/")}`, + `${dir}/__tests__/${file.split("/").pop().replace(/\.(ts|js|tsx|jsx)$/, ".test.ts")}`, + + // Python variants + `${file.replace(/^src\//, "tests/").replace(/\.py$/, "_test.py")}`, + `${file.replace(/^src\//, "tests/test_")}` + ] + + for (const variant of testVariants) { + if (Bash(`test -f ${variant}`).exitCode === 0) { + testFiles.push(variant) + } + } + } + + return [...new Set(testFiles)] // Deduplicate +} +``` + +### Phase 3: Test Execution & Fix Cycle + +**Delegate to Command**: +```javascript +const validateCommand = Read("commands/validate.md") +// Command handles: +// - MAX_ITERATIONS=10, PASS_RATE_TARGET=95 +// - Main iteration loop with strategy selection +// - Quality gate check (affected tests โ†’ full suite) +// - applyFixes by strategy (conservative/aggressive/surgical) +// - Progress updates for long cycles (iteration > 5) +``` + +### Phase 4: Result Analysis + +**Test Result Parsing**: +```javascript +function parseTestResults(output, framework) { + const results = { + total: 0, + passed: 0, + failed: 0, + skipped: 0, + failures: [] + } + + if (framework === "jest" || framework === "vitest") { + // Parse Jest/Vitest output + const totalMatch = output.match(/Tests:\s+(\d+)\s+total/) + const passedMatch = output.match(/(\d+)\s+passed/) + const failedMatch = output.match(/(\d+)\s+failed/) + const skippedMatch = output.match(/(\d+)\s+skipped/) + + results.total = totalMatch ? parseInt(totalMatch[1]) : 0 + results.passed = passedMatch ? parseInt(passedMatch[1]) : 0 + results.failed = failedMatch ? parseInt(failedMatch[1]) : 0 + results.skipped = skippedMatch ? parseInt(skippedMatch[1]) : 0 + + // Extract failure details + const failureRegex = /โ—\s+(.*?)\n\n\s+(.*?)(?=\n\nโ—|\n\nTest Suites:)/gs + let match + while ((match = failureRegex.exec(output)) !== null) { + results.failures.push({ + test: match[1].trim(), + error: match[2].trim() + }) + } + } else if (framework === "pytest") { + // Parse pytest output + const summaryMatch = output.match(/=+\s+(\d+)\s+failed,\s+(\d+)\s+passed/) + if (summaryMatch) { + results.failed = parseInt(summaryMatch[1]) + results.passed = parseInt(summaryMatch[2]) + results.total = results.failed + results.passed + } + + // Extract failure details + const failureRegex = /FAILED\s+(.*?)\s+-\s+(.*?)(?=\n_+|\nFAILED|$)/gs + let match + while ((match = failureRegex.exec(output)) !== null) { + results.failures.push({ + test: match[1].trim(), + error: match[2].trim() + }) + } + } + + return results +} +``` + +**Failure Classification**: +```javascript +function classifyFailures(failures) { + const classified = { + critical: [], // Syntax errors, missing imports + high: [], // Assertion failures, logic errors + medium: [], // Timeout, flaky tests + low: [] // Warnings, deprecations + } + + for (const failure of failures) { + const error = failure.error.toLowerCase() + + if (error.includes("syntaxerror") || + error.includes("cannot find module") || + error.includes("is not defined")) { + classified.critical.push(failure) + } else if (error.includes("expected") || + error.includes("assertion") || + error.includes("toBe") || + error.includes("toEqual")) { + classified.high.push(failure) + } else if (error.includes("timeout") || + error.includes("async")) { + classified.medium.push(failure) + } else { + classified.low.push(failure) + } + } + + return classified +} +``` + +### Phase 5: Report to Coordinator + +**Success Report**: +```javascript +team_msg({ + to: "coordinator", + type: "task_complete", + task_id: task.task_id, + status: "success", + pass_rate: (results.passed / results.total * 100).toFixed(1), + tests_run: results.total, + tests_passed: results.passed, + tests_failed: results.failed, + iterations: iterationCount, + framework: framework, + affected_tests: affectedTests.length, + full_suite_run: fullSuiteRun, + timestamp: new Date().toISOString() +}, "[tester]") +``` + +**Failure Report**: +```javascript +const classified = classifyFailures(results.failures) + +team_msg({ + to: "coordinator", + type: "task_failed", + task_id: task.task_id, + error: "Test failures exceeded threshold", + pass_rate: (results.passed / results.total * 100).toFixed(1), + tests_run: results.total, + failures: { + critical: classified.critical.length, + high: classified.high.length, + medium: classified.medium.length, + low: classified.low.length + }, + failure_details: classified, + iterations: iterationCount, + framework: framework, + timestamp: new Date().toISOString() +}, "[tester]") +``` + +## 7. Strategy Engine + +### Strategy Selection + +```javascript +function selectStrategy(iteration, passRate, failures) { + const classified = classifyFailures(failures) + + // Conservative: Early iterations or high pass rate + if (iteration <= 3 || passRate >= 80) { + return "conservative" + } + + // Surgical: Specific failure patterns + if (classified.critical.length > 0 && classified.critical.length < 5) { + return "surgical" + } + + // Aggressive: Low pass rate or many iterations + if (passRate < 50 || iteration > 7) { + return "aggressive" + } + + return "conservative" +} +``` + +### Fix Application + +```javascript +function applyFixes(strategy, failures, framework) { + if (strategy === "conservative") { + // Fix only critical failures one at a time + const critical = classifyFailures(failures).critical + if (critical.length > 0) { + return fixFailure(critical[0], framework) + } + } else if (strategy === "surgical") { + // Fix specific pattern across all occurrences + const pattern = identifyCommonPattern(failures) + return fixPattern(pattern, framework) + } else if (strategy === "aggressive") { + // Fix all failures in batch + return fixAllFailures(failures, framework) + } +} +``` + +## 8. Error Handling + +| Error Type | Recovery Strategy | Escalation | +|------------|-------------------|------------| +| Framework not detected | Prompt user for framework | Immediate escalation | +| No tests found | Report to coordinator | Manual intervention | +| Test command fails | Retry with verbose output | Report after 2 failures | +| Infinite fix loop | Abort after MAX_ITERATIONS | Report iteration history | +| Pass rate below target | Report best attempt | Include failure classification | + +## 9. Configuration + +| Parameter | Default | Description | +|-----------|---------|-------------| +| MAX_ITERATIONS | 10 | Maximum fix-test cycles | +| PASS_RATE_TARGET | 95 | Target pass rate (%) | +| AFFECTED_TESTS_FIRST | true | Run affected tests before full suite | +| PARALLEL_TESTS | true | Enable parallel test execution | +| TIMEOUT_PER_TEST | 30000 | Timeout per test (ms) | + +## 10. Test Framework Commands + +| Framework | Affected Tests Command | Full Suite Command | +|-----------|------------------------|-------------------| +| vitest | `vitest run ${files.join(" ")}` | `vitest run` | +| jest | `jest ${files.join(" ")} --no-coverage` | `jest --no-coverage` | +| mocha | `mocha ${files.join(" ")}` | `mocha` | +| pytest | `pytest ${files.join(" ")} -v` | `pytest -v` | diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/writer/commands/generate-doc.md b/.claude/skills_lib/team-lifecycle-v2/roles/writer/commands/generate-doc.md new file mode 100644 index 00000000..67c89708 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/writer/commands/generate-doc.md @@ -0,0 +1,698 @@ +# Command: Generate Document + +Multi-CLI document generation for 4 document types: Product Brief, Requirements/PRD, Architecture, Epics & Stories. + +## Pre-Steps (All Document Types) + +```javascript +// 1. Load document standards +const docStandards = Read('../../specs/document-standards.md') + +// 2. Load appropriate template +const templateMap = { + 'product-brief': '../../templates/product-brief.md', + 'requirements': '../../templates/requirements-prd.md', + 'architecture': '../../templates/architecture-doc.md', + 'epics': '../../templates/epics-template.md' +} +const template = Read(templateMap[docType]) + +// 3. Build shared context +const seedAnalysis = specConfig?.seed_analysis || + (priorDocs.discoveryContext ? JSON.parse(priorDocs.discoveryContext).seed_analysis : {}) + +const sharedContext = ` +SEED: ${specConfig?.topic || ''} +PROBLEM: ${seedAnalysis.problem_statement || ''} +TARGET USERS: ${(seedAnalysis.target_users || []).join(', ')} +DOMAIN: ${seedAnalysis.domain || ''} +CONSTRAINTS: ${(seedAnalysis.constraints || []).join(', ')} +FOCUS AREAS: ${(specConfig?.focus_areas || []).join(', ')} +${priorDocs.discoveryContext ? ` +CODEBASE CONTEXT: +- Existing patterns: ${JSON.parse(priorDocs.discoveryContext).existing_patterns?.slice(0,5).join(', ') || 'none'} +- Tech stack: ${JSON.stringify(JSON.parse(priorDocs.discoveryContext).tech_stack || {})} +` : ''}` + +// 4. Route to specific document type +``` + +## DRAFT-001: Product Brief + +3-way parallel CLI analysis (product/technical/user perspectives), then synthesize. + +```javascript +if (docType === 'product-brief') { + // === Parallel CLI Analysis === + + // Product Perspective (Gemini) + Bash({ + command: `ccw cli -p "PURPOSE: Product analysis for specification - identify market fit, user value, and success criteria. +Success: Clear vision, measurable goals, competitive positioning. + +${sharedContext} + +TASK: +- Define product vision (1-3 sentences, aspirational) +- Analyze market/competitive landscape +- Define 3-5 measurable success metrics +- Identify scope boundaries (in-scope vs out-of-scope) +- Assess user value proposition +- List assumptions that need validation + +MODE: analysis +EXPECTED: Structured product analysis with: vision, goals with metrics, scope, competitive positioning, assumptions +CONSTRAINTS: Focus on 'what' and 'why', not 'how' +" --tool gemini --mode analysis`, + run_in_background: true + }) + + // Technical Perspective (Codex) + Bash({ + command: `ccw cli -p "PURPOSE: Technical feasibility analysis for specification - assess implementation viability and constraints. +Success: Clear technical constraints, integration complexity, technology recommendations. + +${sharedContext} + +TASK: +- Assess technical feasibility of the core concept +- Identify technical constraints and blockers +- Evaluate integration complexity with existing systems +- Recommend technology approach (high-level) +- Identify technical risks and dependencies +- Estimate complexity: simple/moderate/complex + +MODE: analysis +EXPECTED: Technical analysis with: feasibility assessment, constraints, integration complexity, tech recommendations, risks +CONSTRAINTS: Focus on feasibility and constraints, not detailed architecture +" --tool codex --mode analysis`, + run_in_background: true + }) + + // User Perspective (Claude) + Bash({ + command: `ccw cli -p "PURPOSE: User experience analysis for specification - understand user journeys, pain points, and UX considerations. +Success: Clear user personas, journey maps, UX requirements. + +${sharedContext} + +TASK: +- Elaborate user personas with goals and frustrations +- Map primary user journey (happy path) +- Identify key pain points in current experience +- Define UX success criteria +- List accessibility and usability considerations +- Suggest interaction patterns + +MODE: analysis +EXPECTED: User analysis with: personas, journey map, pain points, UX criteria, interaction recommendations +CONSTRAINTS: Focus on user needs and experience, not implementation +" --tool claude --mode analysis`, + run_in_background: true + }) + + // STOP: Wait for all 3 CLI results + + // === Synthesize Three Perspectives === + const synthesis = { + convergent_themes: [], // Themes consistent across all three perspectives + conflicts: [], // Conflicting viewpoints + product_insights: [], // Unique product perspective insights + technical_insights: [], // Unique technical perspective insights + user_insights: [] // Unique user perspective insights + } + + // Parse CLI outputs and identify: + // - Common themes mentioned by 2+ perspectives + // - Conflicts (e.g., product wants feature X, technical says infeasible) + // - Unique insights from each perspective + + // === Integrate Discussion Feedback === + if (discussionFeedback) { + // Extract consensus and adjustments from discuss-001-scope.md + // Merge discussion conclusions into synthesis + } + + // === Generate Document from Template === + const frontmatter = `--- +session_id: ${specConfig?.session_id || 'unknown'} +phase: 2 +document_type: product-brief +status: draft +generated_at: ${new Date().toISOString()} +version: 1 +dependencies: + - spec-config.json + - discovery-context.json +---` + + // Fill template sections: + // - Vision (from product perspective + synthesis) + // - Problem Statement (from seed analysis + user perspective) + // - Target Users (from user perspective + personas) + // - Goals (from product perspective + metrics) + // - Scope (from product perspective + technical constraints) + // - Success Criteria (from all three perspectives) + // - Assumptions (from product + technical perspectives) + + const filledContent = fillTemplate(template, { + vision: productPerspective.vision, + problem: seedAnalysis.problem_statement, + users: userPerspective.personas, + goals: productPerspective.goals, + scope: synthesis.scope_boundaries, + success_criteria: synthesis.convergent_themes, + assumptions: [...productPerspective.assumptions, ...technicalPerspective.assumptions] + }) + + Write(`${sessionFolder}/spec/product-brief.md`, `${frontmatter}\n\n${filledContent}`) + + return { + outputPath: 'spec/product-brief.md', + documentSummary: `Product Brief generated with ${synthesis.convergent_themes.length} convergent themes, ${synthesis.conflicts.length} conflicts resolved` + } +} +``` + +## DRAFT-002: Requirements/PRD + +Gemini CLI expansion to generate REQ-NNN and NFR-{type}-NNN files. + +```javascript +if (docType === 'requirements') { + // === Requirements Expansion CLI === + Bash({ + command: `ccw cli -p "PURPOSE: Generate detailed functional and non-functional requirements from product brief. +Success: Complete PRD with testable acceptance criteria for every requirement. + +PRODUCT BRIEF CONTEXT: +${priorDocs.productBrief?.slice(0, 3000) || ''} + +${sharedContext} + +TASK: +- For each goal in the product brief, generate 3-7 functional requirements +- Each requirement must have: + - Unique ID: REQ-NNN (zero-padded) + - Clear title + - Detailed description + - User story: As a [persona], I want [action] so that [benefit] + - 2-4 specific, testable acceptance criteria +- Generate non-functional requirements: + - Performance (response times, throughput) + - Security (authentication, authorization, data protection) + - Scalability (user load, data volume) + - Usability (accessibility, learnability) +- Assign MoSCoW priority: Must/Should/Could/Won't +- Output structure per requirement: ID, title, description, user_story, acceptance_criteria[], priority, traces + +MODE: analysis +EXPECTED: Structured requirements with: ID, title, description, user story, acceptance criteria, priority, traceability to goals +CONSTRAINTS: Every requirement must be specific enough to estimate and test. No vague requirements. +" --tool gemini --mode analysis`, + run_in_background: true + }) + + // Wait for CLI result + + // === Integrate Discussion Feedback === + if (discussionFeedback) { + // Extract requirement adjustments from discuss-002-brief.md + // Merge new/modified/deleted requirements + } + + // === Generate requirements/ Directory === + Bash(`mkdir -p "${sessionFolder}/spec/requirements"`) + + const timestamp = new Date().toISOString() + + // Parse CLI output โ†’ funcReqs[], nfReqs[] + const funcReqs = parseFunctionalRequirements(cliOutput) + const nfReqs = parseNonFunctionalRequirements(cliOutput) + + // Write individual REQ-*.md files (one per functional requirement) + funcReqs.forEach(req => { + const reqFrontmatter = `--- +id: REQ-${req.id} +title: "${req.title}" +priority: ${req.priority} +status: draft +traces: + - product-brief.md +---` + const reqContent = `${reqFrontmatter} + +# REQ-${req.id}: ${req.title} + +## Description +${req.description} + +## User Story +${req.user_story} + +## Acceptance Criteria +${req.acceptance_criteria.map((ac, i) => `${i+1}. ${ac}`).join('\n')} +` + Write(`${sessionFolder}/spec/requirements/REQ-${req.id}-${req.slug}.md`, reqContent) + }) + + // Write individual NFR-*.md files + nfReqs.forEach(nfr => { + const nfrFrontmatter = `--- +id: NFR-${nfr.type}-${nfr.id} +type: ${nfr.type} +title: "${nfr.title}" +status: draft +traces: + - product-brief.md +---` + const nfrContent = `${nfrFrontmatter} + +# NFR-${nfr.type}-${nfr.id}: ${nfr.title} + +## Requirement +${nfr.requirement} + +## Metric & Target +${nfr.metric} โ€” Target: ${nfr.target} +` + Write(`${sessionFolder}/spec/requirements/NFR-${nfr.type}-${nfr.id}-${nfr.slug}.md`, nfrContent) + }) + + // Write _index.md (summary + links) + const indexFrontmatter = `--- +session_id: ${specConfig?.session_id || 'unknown'} +phase: 3 +document_type: requirements-index +status: draft +generated_at: ${timestamp} +version: 1 +dependencies: + - product-brief.md +---` + const indexContent = `${indexFrontmatter} + +# Requirements (PRD) + +## Summary +Total: ${funcReqs.length} functional + ${nfReqs.length} non-functional requirements + +## Functional Requirements +| ID | Title | Priority | Status | +|----|-------|----------|--------| +${funcReqs.map(r => `| [REQ-${r.id}](REQ-${r.id}-${r.slug}.md) | ${r.title} | ${r.priority} | draft |`).join('\n')} + +## Non-Functional Requirements +| ID | Type | Title | +|----|------|-------| +${nfReqs.map(n => `| [NFR-${n.type}-${n.id}](NFR-${n.type}-${n.id}-${n.slug}.md) | ${n.type} | ${n.title} |`).join('\n')} + +## MoSCoW Summary +- **Must**: ${funcReqs.filter(r => r.priority === 'Must').length} +- **Should**: ${funcReqs.filter(r => r.priority === 'Should').length} +- **Could**: ${funcReqs.filter(r => r.priority === 'Could').length} +- **Won't**: ${funcReqs.filter(r => r.priority === "Won't").length} +` + Write(`${sessionFolder}/spec/requirements/_index.md`, indexContent) + + return { + outputPath: 'spec/requirements/_index.md', + documentSummary: `Requirements generated: ${funcReqs.length} functional, ${nfReqs.length} non-functional` + } +} +``` + +## DRAFT-003: Architecture + +Two-stage CLI: Gemini architecture design + Codex architecture review. + +```javascript +if (docType === 'architecture') { + // === Stage 1: Architecture Design (Gemini) === + Bash({ + command: `ccw cli -p "PURPOSE: Generate technical architecture for the specified requirements. +Success: Complete component architecture, tech stack, and ADRs with justified decisions. + +PRODUCT BRIEF (summary): +${priorDocs.productBrief?.slice(0, 3000) || ''} + +REQUIREMENTS: +${priorDocs.requirementsIndex?.slice(0, 5000) || ''} + +${sharedContext} + +TASK: +- Define system architecture style (monolith, microservices, serverless, etc.) with justification +- Identify core components and their responsibilities +- Create component interaction diagram (Mermaid graph TD format) +- Specify technology stack: languages, frameworks, databases, infrastructure +- Generate 2-4 Architecture Decision Records (ADRs): + - Each ADR: context, decision, 2-3 alternatives with pros/cons, consequences + - Focus on: data storage, API design, authentication, key technical choices +- Define data model: key entities and relationships (Mermaid erDiagram format) +- Identify security architecture: auth, authorization, data protection +- List API endpoints (high-level) + +MODE: analysis +EXPECTED: Complete architecture with: style justification, component diagram, tech stack table, ADRs, data model, security controls, API overview +CONSTRAINTS: Architecture must support all Must-have requirements. Prefer proven technologies. +" --tool gemini --mode analysis`, + run_in_background: true + }) + + // Wait for Gemini result + + // === Stage 2: Architecture Review (Codex) === + Bash({ + command: `ccw cli -p "PURPOSE: Critical review of proposed architecture - identify weaknesses and risks. +Success: Actionable feedback with specific concerns and improvement suggestions. + +PROPOSED ARCHITECTURE: +${geminiArchitectureOutput.slice(0, 5000)} + +REQUIREMENTS CONTEXT: +${priorDocs.requirementsIndex?.slice(0, 2000) || ''} + +TASK: +- Challenge each ADR: are the alternatives truly the best options? +- Identify scalability bottlenecks in the component design +- Assess security gaps: authentication, authorization, data protection +- Evaluate technology choices: maturity, community support, fit +- Check for over-engineering or under-engineering +- Verify architecture covers all Must-have requirements +- Rate overall architecture quality: 1-5 with justification + +MODE: analysis +EXPECTED: Architecture review with: per-ADR feedback, scalability concerns, security gaps, technology risks, quality rating +CONSTRAINTS: Be genuinely critical, not just validating. Focus on actionable improvements. +" --tool codex --mode analysis`, + run_in_background: true + }) + + // Wait for Codex result + + // === Integrate Discussion Feedback === + if (discussionFeedback) { + // Extract architecture feedback from discuss-003-requirements.md + // Merge into architecture design + } + + // === Codebase Integration Mapping (conditional) === + let integrationMapping = null + if (priorDocs.discoveryContext) { + const dc = JSON.parse(priorDocs.discoveryContext) + if (dc.relevant_files) { + integrationMapping = dc.relevant_files.map(f => ({ + new_component: '...', + existing_module: f.path, + integration_type: 'Extend|Replace|New', + notes: f.rationale + })) + } + } + + // === Generate architecture/ Directory === + Bash(`mkdir -p "${sessionFolder}/spec/architecture"`) + + const timestamp = new Date().toISOString() + const adrs = parseADRs(geminiArchitectureOutput, codexReviewOutput) + + // Write individual ADR-*.md files + adrs.forEach(adr => { + const adrFrontmatter = `--- +id: ADR-${adr.id} +title: "${adr.title}" +status: draft +traces: + - ../requirements/_index.md +---` + const adrContent = `${adrFrontmatter} + +# ADR-${adr.id}: ${adr.title} + +## Context +${adr.context} + +## Decision +${adr.decision} + +## Alternatives +${adr.alternatives.map((alt, i) => `### Option ${i+1}: ${alt.name}\n- **Pros**: ${alt.pros.join(', ')}\n- **Cons**: ${alt.cons.join(', ')}`).join('\n\n')} + +## Consequences +${adr.consequences} + +## Review Feedback +${adr.reviewFeedback || 'N/A'} +` + Write(`${sessionFolder}/spec/architecture/ADR-${adr.id}-${adr.slug}.md`, adrContent) + }) + + // Write _index.md (with Mermaid component diagram + ER diagram + links) + const archIndexFrontmatter = `--- +session_id: ${specConfig?.session_id || 'unknown'} +phase: 4 +document_type: architecture-index +status: draft +generated_at: ${timestamp} +version: 1 +dependencies: + - ../product-brief.md + - ../requirements/_index.md +---` + + const archIndexContent = `${archIndexFrontmatter} + +# Architecture Document + +## System Overview +${geminiArchitectureOutput.system_overview} + +## Component Diagram +\`\`\`mermaid +${geminiArchitectureOutput.component_diagram} +\`\`\` + +## Technology Stack +${geminiArchitectureOutput.tech_stack_table} + +## Architecture Decision Records +| ID | Title | Status | +|----|-------|--------| +${adrs.map(a => `| [ADR-${a.id}](ADR-${a.id}-${a.slug}.md) | ${a.title} | draft |`).join('\n')} + +## Data Model +\`\`\`mermaid +${geminiArchitectureOutput.data_model_diagram} +\`\`\` + +## API Design +${geminiArchitectureOutput.api_overview} + +## Security Controls +${geminiArchitectureOutput.security_controls} + +## Review Summary +${codexReviewOutput.summary} +Quality Rating: ${codexReviewOutput.quality_rating}/5 +` + + Write(`${sessionFolder}/spec/architecture/_index.md`, archIndexContent) + + return { + outputPath: 'spec/architecture/_index.md', + documentSummary: `Architecture generated with ${adrs.length} ADRs, quality rating ${codexReviewOutput.quality_rating}/5` + } +} +``` + +## DRAFT-004: Epics & Stories + +Gemini CLI decomposition to generate EPIC-*.md files. + +```javascript +if (docType === 'epics') { + // === Epic Decomposition CLI === + Bash({ + command: `ccw cli -p "PURPOSE: Decompose requirements into executable Epics and Stories for implementation planning. +Success: 3-7 Epics with prioritized Stories, dependency map, and MVP subset clearly defined. + +PRODUCT BRIEF (summary): +${priorDocs.productBrief?.slice(0, 2000) || ''} + +REQUIREMENTS: +${priorDocs.requirementsIndex?.slice(0, 5000) || ''} + +ARCHITECTURE (summary): +${priorDocs.architectureIndex?.slice(0, 3000) || ''} + +TASK: +- Group requirements into 3-7 logical Epics: + - Each Epic: EPIC-NNN ID, title, description, priority (Must/Should/Could) + - Group by functional domain or user journey stage + - Tag MVP Epics (minimum set for initial release) +- For each Epic, generate 2-5 Stories: + - Each Story: STORY-{EPIC}-NNN ID, title + - User story format: As a [persona], I want [action] so that [benefit] + - 2-4 acceptance criteria per story (testable) + - Relative size estimate: S/M/L/XL + - Trace to source requirement(s): REQ-NNN +- Create dependency map: + - Cross-Epic dependencies (which Epics block others) + - Mermaid graph LR format + - Recommended execution order with rationale +- Define MVP: + - Which Epics are in MVP + - MVP definition of done (3-5 criteria) + - What is explicitly deferred post-MVP + +MODE: analysis +EXPECTED: Structured output with: Epic list (ID, title, priority, MVP flag), Stories per Epic (ID, user story, AC, size, trace), dependency Mermaid diagram, execution order, MVP definition +CONSTRAINTS: Every Must-have requirement must appear in at least one Story. Stories must be small enough to implement independently. Dependencies should be minimized across Epics. +" --tool gemini --mode analysis`, + run_in_background: true + }) + + // Wait for CLI result + + // === Integrate Discussion Feedback === + if (discussionFeedback) { + // Extract execution feedback from discuss-004-architecture.md + // Adjust Epic granularity, MVP scope + } + + // === Generate epics/ Directory === + Bash(`mkdir -p "${sessionFolder}/spec/epics"`) + + const timestamp = new Date().toISOString() + const epicsList = parseEpics(cliOutput) + + // Write individual EPIC-*.md files (with stories) + epicsList.forEach(epic => { + const epicFrontmatter = `--- +id: EPIC-${epic.id} +title: "${epic.title}" +priority: ${epic.priority} +mvp: ${epic.mvp} +size: ${epic.size} +requirements: +${epic.reqs.map(r => ` - ${r}`).join('\n')} +architecture: +${epic.adrs.map(a => ` - ${a}`).join('\n')} +dependencies: +${epic.deps.map(d => ` - ${d}`).join('\n')} +status: draft +---` + const storiesContent = epic.stories.map(s => `### ${s.id}: ${s.title} + +**User Story**: ${s.user_story} +**Size**: ${s.size} +**Traces**: ${s.traces.join(', ')} + +**Acceptance Criteria**: +${s.acceptance_criteria.map((ac, i) => `${i+1}. ${ac}`).join('\n')} +`).join('\n') + + const epicContent = `${epicFrontmatter} + +# EPIC-${epic.id}: ${epic.title} + +## Description +${epic.description} + +## Stories +${storiesContent} + +## Requirements +${epic.reqs.map(r => `- [${r}](../requirements/${r}.md)`).join('\n')} + +## Architecture +${epic.adrs.map(a => `- [${a}](../architecture/${a}.md)`).join('\n')} +` + Write(`${sessionFolder}/spec/epics/EPIC-${epic.id}-${epic.slug}.md`, epicContent) + }) + + // Write _index.md (with Mermaid dependency diagram + MVP + links) + const epicsIndexFrontmatter = `--- +session_id: ${specConfig?.session_id || 'unknown'} +phase: 5 +document_type: epics-index +status: draft +generated_at: ${timestamp} +version: 1 +dependencies: + - ../requirements/_index.md + - ../architecture/_index.md +---` + + const epicsIndexContent = `${epicsIndexFrontmatter} + +# Epics & Stories + +## Epic Overview +| ID | Title | Priority | MVP | Size | Status | +|----|-------|----------|-----|------|--------| +${epicsList.map(e => `| [EPIC-${e.id}](EPIC-${e.id}-${e.slug}.md) | ${e.title} | ${e.priority} | ${e.mvp ? 'โœ“' : ''} | ${e.size} | draft |`).join('\n')} + +## Dependency Map +\`\`\`mermaid +${cliOutput.dependency_diagram} +\`\`\` + +## Execution Order +${cliOutput.execution_order} + +## MVP Scope +${cliOutput.mvp_definition} + +### MVP Epics +${epicsList.filter(e => e.mvp).map(e => `- EPIC-${e.id}: ${e.title}`).join('\n')} + +### Post-MVP +${epicsList.filter(e => !e.mvp).map(e => `- EPIC-${e.id}: ${e.title}`).join('\n')} + +## Traceability Matrix +${generateTraceabilityMatrix(epicsList, funcReqs)} +` + + Write(`${sessionFolder}/spec/epics/_index.md`, epicsIndexContent) + + return { + outputPath: 'spec/epics/_index.md', + documentSummary: `Epics generated: ${epicsList.length} total, ${epicsList.filter(e => e.mvp).length} in MVP` + } +} +``` + +## Helper Functions + +```javascript +function parseFunctionalRequirements(cliOutput) { + // Parse CLI JSON output to extract functional requirements + // Returns: [{ id, title, description, user_story, acceptance_criteria[], priority, slug }] +} + +function parseNonFunctionalRequirements(cliOutput) { + // Parse CLI JSON output to extract non-functional requirements + // Returns: [{ id, type, title, requirement, metric, target, slug }] +} + +function parseADRs(geminiOutput, codexOutput) { + // Parse architecture outputs to extract ADRs with review feedback + // Returns: [{ id, title, context, decision, alternatives[], consequences, reviewFeedback, slug }] +} + +function parseEpics(cliOutput) { + // Parse CLI JSON output to extract Epics and Stories + // Returns: [{ id, title, description, priority, mvp, size, stories[], reqs[], adrs[], deps[], slug }] +} + +function fillTemplate(template, data) { + // Fill template placeholders with data + // Apply document-standards.md formatting rules +} + +function generateTraceabilityMatrix(epics, requirements) { + // Generate traceability matrix showing Epic โ†’ Requirement mappings +} +``` diff --git a/.claude/skills_lib/team-lifecycle-v2/roles/writer/role.md b/.claude/skills_lib/team-lifecycle-v2/roles/writer/role.md new file mode 100644 index 00000000..f86cff3a --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/roles/writer/role.md @@ -0,0 +1,257 @@ +# Role: writer + +Product Brief, Requirements/PRD, Architecture, and Epics & Stories document generation. Maps to spec-generator Phases 2-5. + +## Role Identity + +- **Name**: `writer` +- **Task Prefix**: `DRAFT-*` +- **Output Tag**: `[writer]` +- **Responsibility**: Load Context โ†’ Generate Document โ†’ Incorporate Feedback โ†’ Report +- **Communication**: SendMessage to coordinator only + +## Role Boundaries + +### MUST +- Only process DRAFT-* tasks +- Read templates before generating documents +- Follow document-standards.md formatting rules +- Integrate discussion feedback when available +- Generate proper frontmatter for all documents + +### MUST NOT +- Create tasks for other roles +- Contact other workers directly +- Skip template loading +- Modify discussion records +- Generate documents without loading prior dependencies + +## Message Types + +| Type | Direction | Trigger | Description | +|------|-----------|---------|-------------| +| `draft_ready` | writer โ†’ coordinator | Document writing complete | With document path and type | +| `draft_revision` | writer โ†’ coordinator | Document revised and resubmitted | Describes changes made | +| `impl_progress` | writer โ†’ coordinator | Long writing progress | Multi-document stage progress | +| `error` | writer โ†’ coordinator | Unrecoverable error | Template missing, insufficient context, etc. | + +## Message Bus + +Before every `SendMessage`, MUST call `mcp__ccw-tools__team_msg` to log: + +```javascript +// Document ready +mcp__ccw-tools__team_msg({ + operation: "log", + team: teamName, + from: "writer", + to: "coordinator", + type: "draft_ready", + summary: "[writer] Product Brief complete", + ref: `${sessionFolder}/product-brief.md` +}) + +// Document revision +mcp__ccw-tools__team_msg({ + operation: "log", + team: teamName, + from: "writer", + to: "coordinator", + type: "draft_revision", + summary: "[writer] Requirements revised per discussion feedback" +}) + +// Error report +mcp__ccw-tools__team_msg({ + operation: "log", + team: teamName, + from: "writer", + to: "coordinator", + type: "error", + summary: "[writer] Input artifact missing, cannot generate document" +}) +``` + +### CLI Fallback + +When `mcp__ccw-tools__team_msg` MCP is unavailable: + +```bash +ccw team log --team "${teamName}" --from "writer" --to "coordinator" --type "draft_ready" --summary "[writer] Brief complete" --ref "${sessionFolder}/product-brief.md" --json +``` + +## Toolbox + +### Available Commands +- `commands/generate-doc.md` - Multi-CLI document generation for 4 doc types + +### Subagent Capabilities +- None + +### CLI Capabilities +- `gemini`, `codex`, `claude` for multi-perspective analysis + +## Execution (5-Phase) + +### Phase 1: Task Discovery + +```javascript +const tasks = TaskList() +const myTasks = tasks.filter(t => + t.subject.startsWith('DRAFT-') && + t.owner === 'writer' && + t.status === 'pending' && + t.blockedBy.length === 0 +) + +if (myTasks.length === 0) return // idle + +const task = TaskGet({ taskId: myTasks[0].id }) +TaskUpdate({ taskId: task.id, status: 'in_progress' }) +``` + +### Phase 2: Context & Discussion Loading + +```javascript +// Extract session folder from task description +const sessionMatch = task.description.match(/Session:\s*(.+)/) +const sessionFolder = sessionMatch ? sessionMatch[1].trim() : '' + +// Load session config +let specConfig = null +try { specConfig = JSON.parse(Read(`${sessionFolder}/spec/spec-config.json`)) } catch {} + +// Determine document type from task subject +const docType = task.subject.includes('Product Brief') ? 'product-brief' + : task.subject.includes('Requirements') || task.subject.includes('PRD') ? 'requirements' + : task.subject.includes('Architecture') ? 'architecture' + : task.subject.includes('Epics') ? 'epics' + : 'unknown' + +// Load discussion feedback (from preceding DISCUSS task) +const discussionFiles = { + 'product-brief': 'discussions/discuss-001-scope.md', + 'requirements': 'discussions/discuss-002-brief.md', + 'architecture': 'discussions/discuss-003-requirements.md', + 'epics': 'discussions/discuss-004-architecture.md' +} +let discussionFeedback = null +try { discussionFeedback = Read(`${sessionFolder}/${discussionFiles[docType]}`) } catch {} + +// Load prior documents progressively +const priorDocs = {} +if (docType !== 'product-brief') { + try { priorDocs.discoveryContext = Read(`${sessionFolder}/spec/discovery-context.json`) } catch {} +} +if (['requirements', 'architecture', 'epics'].includes(docType)) { + try { priorDocs.productBrief = Read(`${sessionFolder}/spec/product-brief.md`) } catch {} +} +if (['architecture', 'epics'].includes(docType)) { + try { priorDocs.requirementsIndex = Read(`${sessionFolder}/spec/requirements/_index.md`) } catch {} +} +if (docType === 'epics') { + try { priorDocs.architectureIndex = Read(`${sessionFolder}/spec/architecture/_index.md`) } catch {} +} +``` + +### Phase 3: Document Generation + +**Delegate to command file**: + +```javascript +// Load and execute document generation command +const generateDocCommand = Read('commands/generate-doc.md') + +// Execute command with context: +// - docType +// - sessionFolder +// - specConfig +// - discussionFeedback +// - priorDocs +// - task + +// Command will handle: +// - Loading document standards +// - Loading appropriate template +// - Building shared context +// - Routing to type-specific generation (DRAFT-001/002/003/004) +// - Integrating discussion feedback +// - Writing output files + +// Returns: { outputPath, documentSummary } +``` + +### Phase 4: Self-Validation + +```javascript +const docContent = Read(`${sessionFolder}/${outputPath}`) + +const validationChecks = { + has_frontmatter: /^---\n[\s\S]+?\n---/.test(docContent), + sections_complete: /* verify all required sections present */, + cross_references: docContent.includes('session_id'), + discussion_integrated: !discussionFeedback || docContent.includes('Discussion') +} + +const allValid = Object.values(validationChecks).every(v => v) +``` + +### Phase 5: Report to Coordinator + +```javascript +const docTypeLabel = { + 'product-brief': 'Product Brief', + 'requirements': 'Requirements/PRD', + 'architecture': 'Architecture Document', + 'epics': 'Epics & Stories' +} + +mcp__ccw-tools__team_msg({ + operation: "log", team: teamName, + from: "writer", to: "coordinator", + type: "draft_ready", + summary: `[writer] ${docTypeLabel[docType]} ๅฎŒๆˆ: ${allValid ? '้ชŒ่ฏ้€š่ฟ‡' : '้ƒจๅˆ†้ชŒ่ฏๅคฑ่ดฅ'}`, + ref: `${sessionFolder}/${outputPath}` +}) + +SendMessage({ + type: "message", + recipient: "coordinator", + content: `[writer] ## ๆ–‡ๆกฃๆ’ฐๅ†™็ป“ๆžœ + +**Task**: ${task.subject} +**ๆ–‡ๆกฃ็ฑปๅž‹**: ${docTypeLabel[docType]} +**้ชŒ่ฏ็Šถๆ€**: ${allValid ? 'PASS' : 'PARTIAL'} + +### ๆ–‡ๆกฃๆ‘˜่ฆ +${documentSummary} + +### ่ฎจ่ฎบๅ้ฆˆๆ•ดๅˆ +${discussionFeedback ? 'ๅทฒๆ•ดๅˆๅ‰ๅบ่ฎจ่ฎบๅ้ฆˆ' : '้ฆ–ๆฌกๆ’ฐๅ†™'} + +### ่‡ช้ชŒ่ฏ็ป“ๆžœ +${Object.entries(validationChecks).map(([k, v]) => '- ' + k + ': ' + (v ? 'PASS' : 'FAIL')).join('\n')} + +### ่พ“ๅ‡บไฝ็ฝฎ +${sessionFolder}/${outputPath} + +ๆ–‡ๆกฃๅทฒๅฐฑ็ปช๏ผŒๅฏ่ฟ›ๅ…ฅ่ฎจ่ฎบ่ฝฎๆฌกใ€‚`, + summary: `[writer] ${docTypeLabel[docType]} ๅฐฑ็ปช` +}) + +TaskUpdate({ taskId: task.id, status: 'completed' }) + +// Check for next DRAFT task โ†’ back to Phase 1 +``` + +## Error Handling + +| Scenario | Resolution | +|----------|------------| +| No DRAFT-* tasks available | Idle, wait for coordinator assignment | +| Prior document not found | Notify coordinator, request prerequisite | +| CLI analysis failure | Retry with fallback tool, then direct generation | +| Template sections incomplete | Generate best-effort, note gaps in report | +| Discussion feedback contradicts prior docs | Note conflict in document, flag for next discussion | +| Session folder missing | Notify coordinator, request session path | +| Unexpected error | Log error via team_msg, report to coordinator | diff --git a/.claude/skills_lib/team-lifecycle-v2/specs/document-standards.md b/.claude/skills_lib/team-lifecycle-v2/specs/document-standards.md new file mode 100644 index 00000000..2820cd98 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/specs/document-standards.md @@ -0,0 +1,192 @@ +# Document Standards + +Defines format conventions, YAML frontmatter schema, naming rules, and content structure for all spec-generator outputs. + +## When to Use + +| Phase | Usage | Section | +|-------|-------|---------| +| All Phases | Frontmatter format | YAML Frontmatter Schema | +| All Phases | File naming | Naming Conventions | +| Phase 2-5 | Document structure | Content Structure | +| Phase 6 | Validation reference | All sections | + +--- + +## YAML Frontmatter Schema + +Every generated document MUST begin with YAML frontmatter: + +```yaml +--- +session_id: SPEC-{slug}-{YYYY-MM-DD} +phase: {1-6} +document_type: {product-brief|requirements|architecture|epics|readiness-report|spec-summary} +status: draft|review|complete +generated_at: {ISO8601 timestamp} +stepsCompleted: [] +version: 1 +dependencies: + - {list of input documents used} +--- +``` + +### Field Definitions + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `session_id` | string | Yes | Session identifier matching spec-config.json | +| `phase` | number | Yes | Phase number that generated this document (1-6) | +| `document_type` | string | Yes | One of: product-brief, requirements, architecture, epics, readiness-report, spec-summary | +| `status` | enum | Yes | draft (initial), review (user reviewed), complete (finalized) | +| `generated_at` | string | Yes | ISO8601 timestamp of generation | +| `stepsCompleted` | array | Yes | List of step IDs completed during generation | +| `version` | number | Yes | Document version, incremented on re-generation | +| `dependencies` | array | No | List of input files this document depends on | + +### Status Transitions + +``` +draft -> review -> complete + | ^ + +-------------------+ (direct promotion in auto mode) +``` + +- **draft**: Initial generation, not yet user-reviewed +- **review**: User has reviewed and provided feedback +- **complete**: Finalized, ready for downstream consumption + +In auto mode (`-y`), documents are promoted directly from `draft` to `complete`. + +--- + +## Naming Conventions + +### Session ID Format + +``` +SPEC-{slug}-{YYYY-MM-DD} +``` + +- **slug**: Lowercase, alphanumeric + Chinese characters, hyphens as separators, max 40 chars +- **date**: UTC+8 date in YYYY-MM-DD format + +Examples: +- `SPEC-task-management-system-2026-02-11` +- `SPEC-user-auth-oauth-2026-02-11` + +### Output Files + +| File | Phase | Description | +|------|-------|-------------| +| `spec-config.json` | 1 | Session configuration and state | +| `discovery-context.json` | 1 | Codebase exploration results (optional) | +| `product-brief.md` | 2 | Product brief document | +| `requirements.md` | 3 | PRD document | +| `architecture.md` | 4 | Architecture decisions document | +| `epics.md` | 5 | Epic/Story breakdown document | +| `readiness-report.md` | 6 | Quality validation report | +| `spec-summary.md` | 6 | One-page executive summary | + +### Output Directory + +``` +.workflow/.spec/{session-id}/ +``` + +--- + +## Content Structure + +### Heading Hierarchy + +- `#` (H1): Document title only (one per document) +- `##` (H2): Major sections +- `###` (H3): Subsections +- `####` (H4): Detail items (use sparingly) + +Maximum depth: 4 levels. Prefer flat structures. + +### Section Ordering + +Every document follows this general pattern: + +1. **YAML Frontmatter** (mandatory) +2. **Title** (H1) +3. **Executive Summary** (2-3 sentences) +4. **Core Content Sections** (H2, document-specific) +5. **Open Questions / Risks** (if applicable) +6. **References / Traceability** (links to upstream/downstream docs) + +### Formatting Rules + +| Element | Format | Example | +|---------|--------|---------| +| Requirements | `REQ-{NNN}` prefix | REQ-001: User login | +| Acceptance criteria | Checkbox list | `- [ ] User can log in with email` | +| Architecture decisions | `ADR-{NNN}` prefix | ADR-001: Use PostgreSQL | +| Epics | `EPIC-{NNN}` prefix | EPIC-001: Authentication | +| Stories | `STORY-{EPIC}-{NNN}` prefix | STORY-001-001: Login form | +| Priority tags | MoSCoW labels | `[Must]`, `[Should]`, `[Could]`, `[Won't]` | +| Mermaid diagrams | Fenced code blocks | ````mermaid ... ``` `` | +| Code examples | Language-tagged blocks | ````typescript ... ``` `` | + +### Cross-Reference Format + +Use relative references between documents: + +```markdown +See [Product Brief](product-brief.md#section-name) for details. +Derived from [REQ-001](requirements.md#req-001). +``` + +### Language + +- Document body: Follow user's input language (Chinese or English) +- Technical identifiers: Always English (REQ-001, ADR-001, EPIC-001) +- YAML frontmatter keys: Always English + +--- + +## spec-config.json Schema + +```json +{ + "session_id": "string (required)", + "seed_input": "string (required) - original user input", + "input_type": "text|file (required)", + "timestamp": "ISO8601 (required)", + "mode": "interactive|auto (required)", + "complexity": "simple|moderate|complex (required)", + "depth": "light|standard|comprehensive (required)", + "focus_areas": ["string array"], + "seed_analysis": { + "problem_statement": "string", + "target_users": ["string array"], + "domain": "string", + "constraints": ["string array"], + "dimensions": ["string array - 3-5 exploration dimensions"] + }, + "has_codebase": "boolean", + "phasesCompleted": [ + { + "phase": "number (1-6)", + "name": "string (phase name)", + "output_file": "string (primary output file)", + "completed_at": "ISO8601" + } + ] +} +``` + +--- + +## Validation Checklist + +- [ ] Every document starts with valid YAML frontmatter +- [ ] `session_id` matches across all documents in a session +- [ ] `status` field reflects current document state +- [ ] All cross-references resolve to valid targets +- [ ] Heading hierarchy is correct (no skipped levels) +- [ ] Technical identifiers use correct prefixes +- [ ] Output files are in the correct directory diff --git a/.claude/skills_lib/team-lifecycle-v2/specs/quality-gates.md b/.claude/skills_lib/team-lifecycle-v2/specs/quality-gates.md new file mode 100644 index 00000000..ae968436 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/specs/quality-gates.md @@ -0,0 +1,207 @@ +# Quality Gates + +Per-phase quality gate criteria and scoring dimensions for spec-generator outputs. + +## When to Use + +| Phase | Usage | Section | +|-------|-------|---------| +| Phase 2-5 | Post-generation self-check | Per-Phase Gates | +| Phase 6 | Cross-document validation | Cross-Document Validation | +| Phase 6 | Final scoring | Scoring Dimensions | + +--- + +## Quality Thresholds + +| Gate | Score | Action | +|------|-------|--------| +| **Pass** | >= 80% | Continue to next phase | +| **Review** | 60-79% | Log warnings, continue with caveats | +| **Fail** | < 60% | Must address issues before continuing | + +In auto mode (`-y`), Review-level issues are logged but do not block progress. + +--- + +## Scoring Dimensions + +### 1. Completeness (25%) + +All required sections present with substantive content. + +| Score | Criteria | +|-------|----------| +| 100% | All template sections filled with detailed content | +| 75% | All sections present, some lack detail | +| 50% | Major sections present but minor sections missing | +| 25% | Multiple major sections missing or empty | +| 0% | Document is a skeleton only | + +### 2. Consistency (25%) + +Terminology, formatting, and references are uniform across documents. + +| Score | Criteria | +|-------|----------| +| 100% | All terms consistent, all references valid, formatting uniform | +| 75% | Minor terminology variations, all references valid | +| 50% | Some inconsistent terms, 1-2 broken references | +| 25% | Frequent inconsistencies, multiple broken references | +| 0% | Documents contradict each other | + +### 3. Traceability (25%) + +Requirements, architecture decisions, and stories trace back to goals. + +| Score | Criteria | +|-------|----------| +| 100% | Every story traces to a requirement, every requirement traces to a goal | +| 75% | Most items traceable, few orphans | +| 50% | Partial traceability, some disconnected items | +| 25% | Weak traceability, many orphan items | +| 0% | No traceability between documents | + +### 4. Depth (25%) + +Content provides sufficient detail for execution teams. + +| Score | Criteria | +|-------|----------| +| 100% | Acceptance criteria specific and testable, architecture decisions justified, stories estimable | +| 75% | Most items detailed enough, few vague areas | +| 50% | Mix of detailed and vague content | +| 25% | Mostly high-level, lacking actionable detail | +| 0% | Too abstract for execution | + +--- + +## Per-Phase Quality Gates + +### Phase 1: Discovery + +| Check | Criteria | Severity | +|-------|----------|----------| +| Session ID valid | Matches `SPEC-{slug}-{date}` format | Error | +| Problem statement exists | Non-empty, >= 20 characters | Error | +| Target users identified | >= 1 user group | Error | +| Dimensions generated | 3-5 exploration dimensions | Warning | +| Constraints listed | >= 0 (can be empty with justification) | Info | + +### Phase 2: Product Brief + +| Check | Criteria | Severity | +|-------|----------|----------| +| Vision statement | Clear, 1-3 sentences | Error | +| Problem statement | Specific and measurable | Error | +| Target users | >= 1 persona with needs described | Error | +| Goals defined | >= 2 measurable goals | Error | +| Success metrics | >= 2 quantifiable metrics | Warning | +| Scope boundaries | In-scope and out-of-scope listed | Warning | +| Multi-perspective | >= 2 CLI perspectives synthesized | Info | + +### Phase 3: Requirements (PRD) + +| Check | Criteria | Severity | +|-------|----------|----------| +| Functional requirements | >= 3 with REQ-NNN IDs | Error | +| Acceptance criteria | Every requirement has >= 1 criterion | Error | +| MoSCoW priority | Every requirement tagged | Error | +| Non-functional requirements | >= 1 (performance, security, etc.) | Warning | +| User stories | >= 1 per Must-have requirement | Warning | +| Traceability | Requirements trace to product brief goals | Warning | + +### Phase 4: Architecture + +| Check | Criteria | Severity | +|-------|----------|----------| +| Component diagram | Present (Mermaid or ASCII) | Error | +| Tech stack specified | Languages, frameworks, key libraries | Error | +| ADR present | >= 1 Architecture Decision Record | Error | +| ADR has alternatives | Each ADR lists >= 2 options considered | Warning | +| Integration points | External systems/APIs identified | Warning | +| Data model | Key entities and relationships described | Warning | +| Codebase mapping | Mapped to existing code (if has_codebase) | Info | + +### Phase 5: Epics & Stories + +| Check | Criteria | Severity | +|-------|----------|----------| +| Epics defined | 3-7 epics with EPIC-NNN IDs | Error | +| MVP subset | >= 1 epic tagged as MVP | Error | +| Stories per epic | 2-5 stories per epic | Error | +| Story format | "As a...I want...So that..." pattern | Warning | +| Dependency map | Cross-epic dependencies documented | Warning | +| Estimation hints | Relative sizing (S/M/L/XL) per story | Info | +| Traceability | Stories trace to requirements | Warning | + +### Phase 6: Readiness Check + +| Check | Criteria | Severity | +|-------|----------|----------| +| All documents exist | product-brief, requirements, architecture, epics | Error | +| Frontmatter valid | All YAML frontmatter parseable and correct | Error | +| Cross-references valid | All document links resolve | Error | +| Overall score >= 60% | Weighted average across 4 dimensions | Error | +| No unresolved Errors | All Error-severity issues addressed | Error | +| Summary generated | spec-summary.md created | Warning | + +--- + +## Cross-Document Validation + +Checks performed during Phase 6 across all documents: + +### Completeness Matrix + +``` +Product Brief goals -> Requirements (each goal has >= 1 requirement) +Requirements -> Architecture (each Must requirement has design coverage) +Requirements -> Epics (each Must requirement appears in >= 1 story) +Architecture ADRs -> Epics (tech choices reflected in implementation stories) +``` + +### Consistency Checks + +| Check | Documents | Rule | +|-------|-----------|------| +| Terminology | All | Same term used consistently (no synonyms for same concept) | +| User personas | Brief + PRD + Epics | Same user names/roles throughout | +| Scope | Brief + PRD | PRD scope does not exceed brief scope | +| Tech stack | Architecture + Epics | Stories reference correct technologies | + +### Traceability Matrix Format + +```markdown +| Goal | Requirements | Architecture | Epics | +|------|-------------|--------------|-------| +| G-001: ... | REQ-001, REQ-002 | ADR-001 | EPIC-001 | +| G-002: ... | REQ-003 | ADR-002 | EPIC-002, EPIC-003 | +``` + +--- + +## Issue Classification + +### Error (Must Fix) + +- Missing required document or section +- Broken cross-references +- Contradictory information between documents +- Empty acceptance criteria on Must-have requirements +- No MVP subset defined in epics + +### Warning (Should Fix) + +- Vague acceptance criteria +- Missing non-functional requirements +- No success metrics defined +- Incomplete traceability +- Missing architecture review notes + +### Info (Nice to Have) + +- Could add more detailed personas +- Consider additional ADR alternatives +- Story estimation hints missing +- Mermaid diagrams could be more detailed diff --git a/.claude/skills_lib/team-lifecycle-v2/specs/team-config.json b/.claude/skills_lib/team-lifecycle-v2/specs/team-config.json new file mode 100644 index 00000000..306db7c0 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/specs/team-config.json @@ -0,0 +1,156 @@ +{ + "team_name": "team-lifecycle", + "team_display_name": "Team Lifecycle", + "description": "Unified team skill covering spec-to-dev-to-test full lifecycle", + "version": "2.0.0", + "architecture": "folder-based", + "role_structure": "roles/{name}/role.md + roles/{name}/commands/*.md", + + "roles": { + "coordinator": { + "task_prefix": null, + "responsibility": "Pipeline orchestration, requirement clarification, task chain creation, message dispatch", + "message_types": ["plan_approved", "plan_revision", "task_unblocked", "fix_required", "error", "shutdown"] + }, + "analyst": { + "task_prefix": "RESEARCH", + "responsibility": "Seed analysis, codebase exploration, multi-dimensional context gathering", + "message_types": ["research_ready", "research_progress", "error"] + }, + "writer": { + "task_prefix": "DRAFT", + "responsibility": "Product Brief / PRD / Architecture / Epics document generation", + "message_types": ["draft_ready", "draft_revision", "impl_progress", "error"] + }, + "discussant": { + "task_prefix": "DISCUSS", + "responsibility": "Multi-perspective critique, consensus building, conflict escalation", + "message_types": ["discussion_ready", "discussion_blocked", "impl_progress", "error"] + }, + "planner": { + "task_prefix": "PLAN", + "responsibility": "Multi-angle code exploration, structured implementation planning", + "message_types": ["plan_ready", "plan_revision", "impl_progress", "error"] + }, + "executor": { + "task_prefix": "IMPL", + "responsibility": "Code implementation following approved plans", + "message_types": ["impl_complete", "impl_progress", "error"] + }, + "tester": { + "task_prefix": "TEST", + "responsibility": "Adaptive test-fix cycles, progressive testing, quality gates", + "message_types": ["test_result", "impl_progress", "fix_required", "error"] + }, + "reviewer": { + "task_prefix": "REVIEW", + "additional_prefixes": ["QUALITY"], + "responsibility": "Code review (REVIEW-*) + Spec quality validation (QUALITY-*)", + "message_types": ["review_result", "quality_result", "fix_required", "error"] + }, + "explorer": { + "task_prefix": "EXPLORE", + "responsibility": "Code search, pattern discovery, dependency tracing. Service role โ€” on-demand by coordinator", + "role_type": "service", + "message_types": ["explore_ready", "explore_progress", "task_failed"] + }, + "architect": { + "task_prefix": "ARCH", + "responsibility": "Architecture assessment, tech feasibility, design pattern review. Consulting role โ€” on-demand by coordinator", + "role_type": "consulting", + "consultation_modes": ["spec-review", "plan-review", "code-review", "consult", "feasibility"], + "message_types": ["arch_ready", "arch_concern", "arch_progress", "error"] + }, + "fe-developer": { + "task_prefix": "DEV-FE", + "responsibility": "Frontend component/page implementation, design token consumption, responsive UI", + "role_type": "frontend-pipeline", + "message_types": ["dev_fe_complete", "dev_fe_progress", "error"] + }, + "fe-qa": { + "task_prefix": "QA-FE", + "responsibility": "5-dimension frontend review (quality, a11y, design compliance, UX, pre-delivery), GC loop", + "role_type": "frontend-pipeline", + "message_types": ["qa_fe_passed", "qa_fe_result", "fix_required", "error"] + } + }, + + "pipelines": { + "spec-only": { + "description": "Specification pipeline: research โ†’ discuss โ†’ draft โ†’ quality", + "task_chain": [ + "RESEARCH-001", + "DISCUSS-001", "DRAFT-001", "DISCUSS-002", + "DRAFT-002", "DISCUSS-003", "DRAFT-003", "DISCUSS-004", + "DRAFT-004", "DISCUSS-005", "QUALITY-001", "DISCUSS-006" + ] + }, + "impl-only": { + "description": "Implementation pipeline: plan โ†’ implement โ†’ test + review", + "task_chain": ["PLAN-001", "IMPL-001", "TEST-001", "REVIEW-001"] + }, + "full-lifecycle": { + "description": "Full lifecycle: spec pipeline โ†’ implementation pipeline", + "task_chain": "spec-only + impl-only (PLAN-001 blockedBy DISCUSS-006)" + }, + "fe-only": { + "description": "Frontend-only pipeline: plan โ†’ frontend dev โ†’ frontend QA", + "task_chain": ["PLAN-001", "DEV-FE-001", "QA-FE-001"], + "gc_loop": { "max_rounds": 2, "convergence": "score >= 8 && critical === 0" } + }, + "fullstack": { + "description": "Fullstack pipeline: plan โ†’ backend + frontend parallel โ†’ test + QA", + "task_chain": ["PLAN-001", "IMPL-001||DEV-FE-001", "TEST-001||QA-FE-001", "REVIEW-001"], + "sync_points": ["REVIEW-001"] + }, + "full-lifecycle-fe": { + "description": "Full lifecycle with frontend: spec โ†’ plan โ†’ backend + frontend โ†’ test + QA", + "task_chain": "spec-only + fullstack (PLAN-001 blockedBy DISCUSS-006)" + } + }, + + "frontend_detection": { + "keywords": ["component", "page", "UI", "ๅ‰็ซฏ", "frontend", "CSS", "HTML", "React", "Vue", "Tailwind", "็ป„ไปถ", "้กต้ข", "ๆ ทๅผ", "layout", "responsive", "Svelte", "Next.js", "Nuxt", "shadcn", "่ฎพ่ฎก็ณป็ปŸ", "design system"], + "file_patterns": ["*.tsx", "*.jsx", "*.vue", "*.svelte", "*.css", "*.scss", "*.html"], + "routing_rules": { + "frontend_only": "All tasks match frontend keywords, no backend/API mentions", + "fullstack": "Mix of frontend and backend tasks", + "backend_only": "No frontend keywords detected (default impl-only)" + } + }, + + "ui_ux_pro_max": { + "skill_name": "ui-ux-pro-max", + "install_command": "/plugin install ui-ux-pro-max@ui-ux-pro-max-skill", + "invocation": "Skill(skill=\"ui-ux-pro-max\", args=\"...\")", + "domains": ["product", "style", "typography", "color", "landing", "chart", "ux", "web"], + "stacks": ["html-tailwind", "react", "nextjs", "vue", "svelte", "shadcn", "swiftui", "react-native", "flutter"], + "fallback": "llm-general-knowledge", + "design_intelligence_chain": ["analyst โ†’ design-intelligence.json", "architect โ†’ design-tokens.json", "fe-developer โ†’ tokens.css", "fe-qa โ†’ anti-pattern audit"] + }, + + "shared_memory": { + "file": "shared-memory.json", + "schema": { + "design_intelligence": "From analyst via ui-ux-pro-max", + "design_token_registry": "From architect, consumed by fe-developer/fe-qa", + "component_inventory": "From fe-developer, list of implemented components", + "style_decisions": "Accumulated design decisions", + "qa_history": "From fe-qa, audit trail", + "industry_context": "Industry + strictness config" + } + }, + + "collaboration_patterns": ["CP-1", "CP-2", "CP-4", "CP-5", "CP-6", "CP-10"], + + "session_dirs": { + "base": ".workflow/.team/TLS-{slug}-{YYYY-MM-DD}/", + "spec": "spec/", + "discussions": "discussions/", + "plan": "plan/", + "explorations": "explorations/", + "architecture": "architecture/", + "wisdom": "wisdom/", + "messages": ".workflow/.team-msg/{team-name}/" + } +} diff --git a/.claude/skills_lib/team-lifecycle-v2/templates/architecture-doc.md b/.claude/skills_lib/team-lifecycle-v2/templates/architecture-doc.md new file mode 100644 index 00000000..5106de03 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/templates/architecture-doc.md @@ -0,0 +1,254 @@ +# Architecture Document Template (Directory Structure) + +Template for generating architecture decision documents as a directory of individual ADR files in Phase 4. + +## Usage Context + +| Phase | Usage | +|-------|-------| +| Phase 4 (Architecture) | Generate `architecture/` directory from requirements analysis | +| Output Location | `{workDir}/architecture/` | + +## Output Structure + +``` +{workDir}/architecture/ +โ”œโ”€โ”€ _index.md # Overview, components, tech stack, data model, security +โ”œโ”€โ”€ ADR-001-{slug}.md # Individual Architecture Decision Record +โ”œโ”€โ”€ ADR-002-{slug}.md +โ””โ”€โ”€ ... +``` + +--- + +## Template: _index.md + +```markdown +--- +session_id: {session_id} +phase: 4 +document_type: architecture-index +status: draft +generated_at: {timestamp} +version: 1 +dependencies: + - ../spec-config.json + - ../product-brief.md + - ../requirements/_index.md +--- + +# Architecture: {product_name} + +{executive_summary - high-level architecture approach and key decisions} + +## System Overview + +### Architecture Style +{description of chosen architecture style: microservices, monolith, serverless, etc.} + +### System Context Diagram + +```mermaid +C4Context + title System Context Diagram + Person(user, "User", "Primary user") + System(system, "{product_name}", "Core system") + System_Ext(ext1, "{external_system}", "{description}") + Rel(user, system, "Uses") + Rel(system, ext1, "Integrates with") +``` + +## Component Architecture + +### Component Diagram + +```mermaid +graph TD + subgraph "{product_name}" + A[Component A] --> B[Component B] + B --> C[Component C] + A --> D[Component D] + end + B --> E[External Service] +``` + +### Component Descriptions + +| Component | Responsibility | Technology | Dependencies | +|-----------|---------------|------------|--------------| +| {component_name} | {what it does} | {tech stack} | {depends on} | + +## Technology Stack + +### Core Technologies + +| Layer | Technology | Version | Rationale | +|-------|-----------|---------|-----------| +| Frontend | {technology} | {version} | {why chosen} | +| Backend | {technology} | {version} | {why chosen} | +| Database | {technology} | {version} | {why chosen} | +| Infrastructure | {technology} | {version} | {why chosen} | + +### Key Libraries & Frameworks + +| Library | Purpose | License | +|---------|---------|---------| +| {library_name} | {purpose} | {license} | + +## Architecture Decision Records + +| ADR | Title | Status | Key Choice | +|-----|-------|--------|------------| +| [ADR-001](ADR-001-{slug}.md) | {title} | Accepted | {one-line summary} | +| [ADR-002](ADR-002-{slug}.md) | {title} | Accepted | {one-line summary} | +| [ADR-003](ADR-003-{slug}.md) | {title} | Proposed | {one-line summary} | + +## Data Architecture + +### Data Model + +```mermaid +erDiagram + ENTITY_A ||--o{ ENTITY_B : "has many" + ENTITY_A { + string id PK + string name + datetime created_at + } + ENTITY_B { + string id PK + string entity_a_id FK + string value + } +``` + +### Data Storage Strategy + +| Data Type | Storage | Retention | Backup | +|-----------|---------|-----------|--------| +| {type} | {storage solution} | {retention policy} | {backup strategy} | + +## API Design + +### API Overview + +| Endpoint | Method | Purpose | Auth | +|----------|--------|---------|------| +| {/api/resource} | {GET/POST/etc} | {purpose} | {auth type} | + +## Security Architecture + +### Security Controls + +| Control | Implementation | Requirement | +|---------|---------------|-------------| +| Authentication | {approach} | [NFR-S-{NNN}](../requirements/NFR-S-{NNN}-{slug}.md) | +| Authorization | {approach} | [NFR-S-{NNN}](../requirements/NFR-S-{NNN}-{slug}.md) | +| Data Protection | {approach} | [NFR-S-{NNN}](../requirements/NFR-S-{NNN}-{slug}.md) | + +## Infrastructure & Deployment + +### Deployment Architecture + +{description of deployment model: containers, serverless, VMs, etc.} + +### Environment Strategy + +| Environment | Purpose | Configuration | +|-------------|---------|---------------| +| Development | Local development | {config} | +| Staging | Pre-production testing | {config} | +| Production | Live system | {config} | + +## Codebase Integration + +{if has_codebase is true:} + +### Existing Code Mapping + +| New Component | Existing Module | Integration Type | Notes | +|--------------|----------------|------------------|-------| +| {component} | {existing module path} | Extend/Replace/New | {notes} | + +### Migration Notes +{any migration considerations for existing code} + +## Quality Attributes + +| Attribute | Target | Measurement | ADR Reference | +|-----------|--------|-------------|---------------| +| Performance | {target} | {how measured} | [ADR-{NNN}](ADR-{NNN}-{slug}.md) | +| Scalability | {target} | {how measured} | [ADR-{NNN}](ADR-{NNN}-{slug}.md) | +| Reliability | {target} | {how measured} | [ADR-{NNN}](ADR-{NNN}-{slug}.md) | + +## Risks & Mitigations + +| Risk | Impact | Probability | Mitigation | +|------|--------|-------------|------------| +| {risk} | High/Medium/Low | High/Medium/Low | {mitigation approach} | + +## Open Questions + +- [ ] {architectural question 1} +- [ ] {architectural question 2} + +## References + +- Derived from: [Requirements](../requirements/_index.md), [Product Brief](../product-brief.md) +- Next: [Epics & Stories](../epics/_index.md) +``` + +--- + +## Template: ADR-NNN-{slug}.md (Individual Architecture Decision Record) + +```markdown +--- +id: ADR-{NNN} +status: Accepted +traces_to: [{REQ-NNN}, {NFR-X-NNN}] +date: {timestamp} +--- + +# ADR-{NNN}: {decision_title} + +## Context + +{what is the situation that motivates this decision} + +## Decision + +{what is the chosen approach} + +## Alternatives Considered + +| Option | Pros | Cons | +|--------|------|------| +| {option_1 - chosen} | {pros} | {cons} | +| {option_2} | {pros} | {cons} | +| {option_3} | {pros} | {cons} | + +## Consequences + +- **Positive**: {positive outcomes} +- **Negative**: {tradeoffs accepted} +- **Risks**: {risks to monitor} + +## Traces + +- **Requirements**: [REQ-{NNN}](../requirements/REQ-{NNN}-{slug}.md), [NFR-X-{NNN}](../requirements/NFR-X-{NNN}-{slug}.md) +- **Implemented by**: [EPIC-{NNN}](../epics/EPIC-{NNN}-{slug}.md) (added in Phase 5) +``` + +--- + +## Variable Descriptions + +| Variable | Source | Description | +|----------|--------|-------------| +| `{session_id}` | spec-config.json | Session identifier | +| `{timestamp}` | Runtime | ISO8601 generation timestamp | +| `{product_name}` | product-brief.md | Product/feature name | +| `{NNN}` | Auto-increment | ADR/requirement number | +| `{slug}` | Auto-generated | Kebab-case from decision title | +| `{has_codebase}` | spec-config.json | Whether existing codebase exists | diff --git a/.claude/skills_lib/team-lifecycle-v2/templates/epics-template.md b/.claude/skills_lib/team-lifecycle-v2/templates/epics-template.md new file mode 100644 index 00000000..939d933c --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/templates/epics-template.md @@ -0,0 +1,196 @@ +# Epics & Stories Template (Directory Structure) + +Template for generating epic/story breakdown as a directory of individual Epic files in Phase 5. + +## Usage Context + +| Phase | Usage | +|-------|-------| +| Phase 5 (Epics & Stories) | Generate `epics/` directory from requirements decomposition | +| Output Location | `{workDir}/epics/` | + +## Output Structure + +``` +{workDir}/epics/ +โ”œโ”€โ”€ _index.md # Overview table + dependency map + MVP scope + execution order +โ”œโ”€โ”€ EPIC-001-{slug}.md # Individual Epic with its Stories +โ”œโ”€โ”€ EPIC-002-{slug}.md +โ””โ”€โ”€ ... +``` + +--- + +## Template: _index.md + +```markdown +--- +session_id: {session_id} +phase: 5 +document_type: epics-index +status: draft +generated_at: {timestamp} +version: 1 +dependencies: + - ../spec-config.json + - ../product-brief.md + - ../requirements/_index.md + - ../architecture/_index.md +--- + +# Epics & Stories: {product_name} + +{executive_summary - overview of epic structure and MVP scope} + +## Epic Overview + +| Epic ID | Title | Priority | MVP | Stories | Est. Size | +|---------|-------|----------|-----|---------|-----------| +| [EPIC-001](EPIC-001-{slug}.md) | {title} | Must | Yes | {n} | {S/M/L/XL} | +| [EPIC-002](EPIC-002-{slug}.md) | {title} | Must | Yes | {n} | {S/M/L/XL} | +| [EPIC-003](EPIC-003-{slug}.md) | {title} | Should | No | {n} | {S/M/L/XL} | + +## Dependency Map + +```mermaid +graph LR + EPIC-001 --> EPIC-002 + EPIC-001 --> EPIC-003 + EPIC-002 --> EPIC-004 + EPIC-003 --> EPIC-005 +``` + +### Dependency Notes +{explanation of why these dependencies exist and suggested execution order} + +### Recommended Execution Order +1. [EPIC-{NNN}](EPIC-{NNN}-{slug}.md): {reason - foundational} +2. [EPIC-{NNN}](EPIC-{NNN}-{slug}.md): {reason - depends on #1} +3. ... + +## MVP Scope + +### MVP Epics +{list of epics included in MVP with justification, linking to each} + +### MVP Definition of Done +- [ ] {MVP completion criterion 1} +- [ ] {MVP completion criterion 2} +- [ ] {MVP completion criterion 3} + +## Traceability Matrix + +| Requirement | Epic | Stories | Architecture | +|-------------|------|---------|--------------| +| [REQ-001](../requirements/REQ-001-{slug}.md) | [EPIC-001](EPIC-001-{slug}.md) | STORY-001-001, STORY-001-002 | [ADR-001](../architecture/ADR-001-{slug}.md) | +| [REQ-002](../requirements/REQ-002-{slug}.md) | [EPIC-001](EPIC-001-{slug}.md) | STORY-001-003 | Component B | +| [REQ-003](../requirements/REQ-003-{slug}.md) | [EPIC-002](EPIC-002-{slug}.md) | STORY-002-001 | [ADR-002](../architecture/ADR-002-{slug}.md) | + +## Estimation Summary + +| Size | Meaning | Count | +|------|---------|-------| +| S | Small - well-understood, minimal risk | {n} | +| M | Medium - some complexity, moderate risk | {n} | +| L | Large - significant complexity, should consider splitting | {n} | +| XL | Extra Large - high complexity, must split before implementation | {n} | + +## Risks & Considerations + +| Risk | Affected Epics | Mitigation | +|------|---------------|------------| +| {risk description} | [EPIC-{NNN}](EPIC-{NNN}-{slug}.md) | {mitigation} | + +## Open Questions + +- [ ] {question about scope or implementation 1} +- [ ] {question about scope or implementation 2} + +## References + +- Derived from: [Requirements](../requirements/_index.md), [Architecture](../architecture/_index.md) +- Handoff to: execution workflows (lite-plan, plan, req-plan) +``` + +--- + +## Template: EPIC-NNN-{slug}.md (Individual Epic) + +```markdown +--- +id: EPIC-{NNN} +priority: {Must|Should|Could} +mvp: {true|false} +size: {S|M|L|XL} +requirements: [REQ-{NNN}] +architecture: [ADR-{NNN}] +dependencies: [EPIC-{NNN}] +status: draft +--- + +# EPIC-{NNN}: {epic_title} + +**Priority**: {Must|Should|Could} +**MVP**: {Yes|No} +**Estimated Size**: {S|M|L|XL} + +## Description + +{detailed epic description} + +## Requirements + +- [REQ-{NNN}](../requirements/REQ-{NNN}-{slug}.md): {title} +- [REQ-{NNN}](../requirements/REQ-{NNN}-{slug}.md): {title} + +## Architecture + +- [ADR-{NNN}](../architecture/ADR-{NNN}-{slug}.md): {title} +- Component: {component_name} + +## Dependencies + +- [EPIC-{NNN}](EPIC-{NNN}-{slug}.md) (blocking): {reason} +- [EPIC-{NNN}](EPIC-{NNN}-{slug}.md) (soft): {reason} + +## Stories + +### STORY-{EPIC}-001: {story_title} + +**User Story**: As a {persona}, I want to {action} so that {benefit}. + +**Acceptance Criteria**: +- [ ] {criterion 1} +- [ ] {criterion 2} +- [ ] {criterion 3} + +**Size**: {S|M|L|XL} +**Traces to**: [REQ-{NNN}](../requirements/REQ-{NNN}-{slug}.md) + +--- + +### STORY-{EPIC}-002: {story_title} + +**User Story**: As a {persona}, I want to {action} so that {benefit}. + +**Acceptance Criteria**: +- [ ] {criterion 1} +- [ ] {criterion 2} + +**Size**: {S|M|L|XL} +**Traces to**: [REQ-{NNN}](../requirements/REQ-{NNN}-{slug}.md) +``` + +--- + +## Variable Descriptions + +| Variable | Source | Description | +|----------|--------|-------------| +| `{session_id}` | spec-config.json | Session identifier | +| `{timestamp}` | Runtime | ISO8601 generation timestamp | +| `{product_name}` | product-brief.md | Product/feature name | +| `{EPIC}` | Auto-increment | Epic number (3 digits) | +| `{NNN}` | Auto-increment | Story/requirement number | +| `{slug}` | Auto-generated | Kebab-case from epic/story title | +| `{S\|M\|L\|XL}` | CLI analysis | Relative size estimate | diff --git a/.claude/skills_lib/team-lifecycle-v2/templates/product-brief.md b/.claude/skills_lib/team-lifecycle-v2/templates/product-brief.md new file mode 100644 index 00000000..ffbdf437 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/templates/product-brief.md @@ -0,0 +1,133 @@ +# Product Brief Template + +Template for generating product brief documents in Phase 2. + +## Usage Context + +| Phase | Usage | +|-------|-------| +| Phase 2 (Product Brief) | Generate product-brief.md from multi-CLI analysis | +| Output Location | `{workDir}/product-brief.md` | + +--- + +## Template + +```markdown +--- +session_id: {session_id} +phase: 2 +document_type: product-brief +status: draft +generated_at: {timestamp} +stepsCompleted: [] +version: 1 +dependencies: + - spec-config.json +--- + +# Product Brief: {product_name} + +{executive_summary - 2-3 sentences capturing the essence of the product/feature} + +## Vision + +{vision_statement - clear, aspirational 1-3 sentence statement of what success looks like} + +## Problem Statement + +### Current Situation +{description of the current state and pain points} + +### Impact +{quantified impact of the problem - who is affected, how much, how often} + +## Target Users + +{for each user persona:} + +### {Persona Name} +- **Role**: {user's role/context} +- **Needs**: {primary needs related to this product} +- **Pain Points**: {current frustrations} +- **Success Criteria**: {what success looks like for this user} + +## Goals & Success Metrics + +| Goal ID | Goal | Success Metric | Target | +|---------|------|----------------|--------| +| G-001 | {goal description} | {measurable metric} | {specific target} | +| G-002 | {goal description} | {measurable metric} | {specific target} | + +## Scope + +### In Scope +- {feature/capability 1} +- {feature/capability 2} +- {feature/capability 3} + +### Out of Scope +- {explicitly excluded item 1} +- {explicitly excluded item 2} + +### Assumptions +- {key assumption 1} +- {key assumption 2} + +## Competitive Landscape + +| Aspect | Current State | Proposed Solution | Advantage | +|--------|--------------|-------------------|-----------| +| {aspect} | {how it's done now} | {our approach} | {differentiator} | + +## Constraints & Dependencies + +### Technical Constraints +- {constraint 1} +- {constraint 2} + +### Business Constraints +- {constraint 1} + +### Dependencies +- {external dependency 1} +- {external dependency 2} + +## Multi-Perspective Synthesis + +### Product Perspective +{summary of product/market analysis findings} + +### Technical Perspective +{summary of technical feasibility and constraints} + +### User Perspective +{summary of user journey and UX considerations} + +### Convergent Themes +{themes where all perspectives agree} + +### Conflicting Views +{areas where perspectives differ, with notes on resolution approach} + +## Open Questions + +- [ ] {unresolved question 1} +- [ ] {unresolved question 2} + +## References + +- Derived from: [spec-config.json](spec-config.json) +- Next: [Requirements PRD](requirements.md) +``` + +## Variable Descriptions + +| Variable | Source | Description | +|----------|--------|-------------| +| `{session_id}` | spec-config.json | Session identifier | +| `{timestamp}` | Runtime | ISO8601 generation timestamp | +| `{product_name}` | Seed analysis | Product/feature name | +| `{executive_summary}` | CLI synthesis | 2-3 sentence summary | +| `{vision_statement}` | CLI product perspective | Aspirational vision | +| All `{...}` fields | CLI analysis outputs | Filled from multi-perspective analysis | diff --git a/.claude/skills_lib/team-lifecycle-v2/templates/requirements-prd.md b/.claude/skills_lib/team-lifecycle-v2/templates/requirements-prd.md new file mode 100644 index 00000000..0b1dbf28 --- /dev/null +++ b/.claude/skills_lib/team-lifecycle-v2/templates/requirements-prd.md @@ -0,0 +1,224 @@ +# Requirements PRD Template (Directory Structure) + +Template for generating Product Requirements Document as a directory of individual requirement files in Phase 3. + +## Usage Context + +| Phase | Usage | +|-------|-------| +| Phase 3 (Requirements) | Generate `requirements/` directory from product brief expansion | +| Output Location | `{workDir}/requirements/` | + +## Output Structure + +``` +{workDir}/requirements/ +โ”œโ”€โ”€ _index.md # Summary + MoSCoW table + traceability matrix + links +โ”œโ”€โ”€ REQ-001-{slug}.md # Individual functional requirement +โ”œโ”€โ”€ REQ-002-{slug}.md +โ”œโ”€โ”€ NFR-P-001-{slug}.md # Non-functional: Performance +โ”œโ”€โ”€ NFR-S-001-{slug}.md # Non-functional: Security +โ”œโ”€โ”€ NFR-SC-001-{slug}.md # Non-functional: Scalability +โ”œโ”€โ”€ NFR-U-001-{slug}.md # Non-functional: Usability +โ””โ”€โ”€ ... +``` + +--- + +## Template: _index.md + +```markdown +--- +session_id: {session_id} +phase: 3 +document_type: requirements-index +status: draft +generated_at: {timestamp} +version: 1 +dependencies: + - ../spec-config.json + - ../product-brief.md +--- + +# Requirements: {product_name} + +{executive_summary - brief overview of what this PRD covers and key decisions} + +## Requirement Summary + +| Priority | Count | Coverage | +|----------|-------|----------| +| Must Have | {n} | {description of must-have scope} | +| Should Have | {n} | {description of should-have scope} | +| Could Have | {n} | {description of could-have scope} | +| Won't Have | {n} | {description of explicitly excluded} | + +## Functional Requirements + +| ID | Title | Priority | Traces To | +|----|-------|----------|-----------| +| [REQ-001](REQ-001-{slug}.md) | {title} | Must | [G-001](../product-brief.md#goals--success-metrics) | +| [REQ-002](REQ-002-{slug}.md) | {title} | Must | [G-001](../product-brief.md#goals--success-metrics) | +| [REQ-003](REQ-003-{slug}.md) | {title} | Should | [G-002](../product-brief.md#goals--success-metrics) | + +## Non-Functional Requirements + +### Performance + +| ID | Title | Target | +|----|-------|--------| +| [NFR-P-001](NFR-P-001-{slug}.md) | {title} | {target value} | + +### Security + +| ID | Title | Standard | +|----|-------|----------| +| [NFR-S-001](NFR-S-001-{slug}.md) | {title} | {standard/framework} | + +### Scalability + +| ID | Title | Target | +|----|-------|--------| +| [NFR-SC-001](NFR-SC-001-{slug}.md) | {title} | {target value} | + +### Usability + +| ID | Title | Target | +|----|-------|--------| +| [NFR-U-001](NFR-U-001-{slug}.md) | {title} | {target value} | + +## Data Requirements + +### Data Entities + +| Entity | Description | Key Attributes | +|--------|-------------|----------------| +| {entity_name} | {description} | {attr1, attr2, attr3} | + +### Data Flows + +{description of key data flows, optionally with Mermaid diagram} + +## Integration Requirements + +| System | Direction | Protocol | Data Format | Notes | +|--------|-----------|----------|-------------|-------| +| {system_name} | Inbound/Outbound/Both | {REST/gRPC/etc} | {JSON/XML/etc} | {notes} | + +## Constraints & Assumptions + +### Constraints +- {technical or business constraint 1} +- {technical or business constraint 2} + +### Assumptions +- {assumption 1 - must be validated} +- {assumption 2 - must be validated} + +## Priority Rationale + +{explanation of MoSCoW prioritization decisions, especially for Should/Could boundaries} + +## Traceability Matrix + +| Goal | Requirements | +|------|-------------| +| G-001 | [REQ-001](REQ-001-{slug}.md), [REQ-002](REQ-002-{slug}.md), [NFR-P-001](NFR-P-001-{slug}.md) | +| G-002 | [REQ-003](REQ-003-{slug}.md), [NFR-S-001](NFR-S-001-{slug}.md) | + +## Open Questions + +- [ ] {unresolved question 1} +- [ ] {unresolved question 2} + +## References + +- Derived from: [Product Brief](../product-brief.md) +- Next: [Architecture](../architecture/_index.md) +``` + +--- + +## Template: REQ-NNN-{slug}.md (Individual Functional Requirement) + +```markdown +--- +id: REQ-{NNN} +type: functional +priority: {Must|Should|Could|Won't} +traces_to: [G-{NNN}] +status: draft +--- + +# REQ-{NNN}: {requirement_title} + +**Priority**: {Must|Should|Could|Won't} + +## Description + +{detailed requirement description} + +## User Story + +As a {persona}, I want to {action} so that {benefit}. + +## Acceptance Criteria + +- [ ] {specific, testable criterion 1} +- [ ] {specific, testable criterion 2} +- [ ] {specific, testable criterion 3} + +## Traces + +- **Goal**: [G-{NNN}](../product-brief.md#goals--success-metrics) +- **Architecture**: [ADR-{NNN}](../architecture/ADR-{NNN}-{slug}.md) (if applicable) +- **Implemented by**: [EPIC-{NNN}](../epics/EPIC-{NNN}-{slug}.md) (added in Phase 5) +``` + +--- + +## Template: NFR-{type}-NNN-{slug}.md (Individual Non-Functional Requirement) + +```markdown +--- +id: NFR-{type}-{NNN} +type: non-functional +category: {Performance|Security|Scalability|Usability} +priority: {Must|Should|Could} +status: draft +--- + +# NFR-{type}-{NNN}: {requirement_title} + +**Category**: {Performance|Security|Scalability|Usability} +**Priority**: {Must|Should|Could} + +## Requirement + +{detailed requirement description} + +## Metric & Target + +| Metric | Target | Measurement Method | +|--------|--------|--------------------| +| {metric} | {target value} | {how measured} | + +## Traces + +- **Goal**: [G-{NNN}](../product-brief.md#goals--success-metrics) +- **Architecture**: [ADR-{NNN}](../architecture/ADR-{NNN}-{slug}.md) (if applicable) +``` + +--- + +## Variable Descriptions + +| Variable | Source | Description | +|----------|--------|-------------| +| `{session_id}` | spec-config.json | Session identifier | +| `{timestamp}` | Runtime | ISO8601 generation timestamp | +| `{product_name}` | product-brief.md | Product/feature name | +| `{NNN}` | Auto-increment | Requirement number (zero-padded 3 digits) | +| `{slug}` | Auto-generated | Kebab-case from requirement title | +| `{type}` | Category | P (Performance), S (Security), SC (Scalability), U (Usability) | +| `{Must\|Should\|Could\|Won't}` | User input / auto | MoSCoW priority tag | diff --git a/.codex/skills/analyze-with-file/SKILL.md b/.codex/skills/analyze-with-file/SKILL.md index cada240f..41e3e1b1 100644 --- a/.codex/skills/analyze-with-file/SKILL.md +++ b/.codex/skills/analyze-with-file/SKILL.md @@ -541,6 +541,21 @@ Update discussion.md with results from each discussion round: - Organized by analysis dimension - Links between rounds showing understanding evolution +##### Step 3.4: Intent Drift Check (every round โ‰ฅ 2) + +Re-read "User Intent" / "Analysis Context" from discussion.md header. For each original intent item, check coverage status: + +```markdown +#### Intent Coverage Check +- โœ… Intent 1: [addressed in Round N] +- ๐Ÿ”„ Intent 2: [in-progress, current focus] +- โš ๏ธ Intent 3: [implicitly absorbed by X โ€” needs explicit confirmation] +- โŒ Intent 4: [not yet discussed] +``` + +- If any item is "implicitly absorbed" (โš ๏ธ), note it explicitly in discussion.md โ€” absorbed โ‰  addressed +- If any item is โŒ after 3+ rounds, surface it to the user in the next round's presentation + **Success Criteria**: - User feedback processed for each round - discussion.md updated with all discussion rounds @@ -553,6 +568,30 @@ Update discussion.md with results from each discussion round: **Objective**: Consolidate insights from all discussion rounds, generate conclusions and recommendations. +##### Step 4.0: Intent Coverage Verification (MANDATORY before synthesis) + +Re-read all original user intent / analysis context items from discussion.md header. For EACH item, determine coverage status: + +- **โœ… Addressed**: Explicitly discussed and concluded with clear design/recommendation +- **๐Ÿ”€ Transformed**: Original intent evolved into a different solution โ€” document the transformation chain +- **โš ๏ธ Absorbed**: Implicitly covered by a broader solution โ€” flag for explicit confirmation +- **โŒ Missed**: Not discussed โ€” MUST be either addressed now or explicitly listed as out-of-scope with reason + +Write "Intent Coverage Matrix" to discussion.md: + +```markdown +### Intent Coverage Matrix +| # | Original Intent | Status | Where Addressed | Notes | +|---|----------------|--------|-----------------|-------| +| 1 | [intent text] | โœ… Addressed | Round N, Conclusion #M | | +| 2 | [intent text] | ๐Ÿ”€ Transformed | Round N โ†’ Round M | Original: X โ†’ Final: Y | +| 3 | [intent text] | โŒ Missed | โ€” | Reason for omission | +``` + +**Gate**: If any item is โŒ Missed, MUST either: +- (a) Add a dedicated discussion round to address it before continuing, OR +- (b) Explicitly confirm with user that it is intentionally deferred + ##### Step 4.1: Consolidate Insights ```javascript @@ -579,6 +618,9 @@ const conclusions = { ], decision_trail: [ // Consolidated decisions from all phases { round: 1, decision: '...', context: '...', options_considered: [...], chosen: '...', reason: '...', impact: '...' } + ], + intent_coverage: [ // From Step 4.0 + { intent: '...', status: 'addressed|transformed|absorbed|missed', where_addressed: '...', notes: '...' } ] } Write(`${sessionFolder}/conclusions.json`, JSON.stringify(conclusions, null, 2)) @@ -676,6 +718,7 @@ if (!autoYes) { **Success Criteria**: - conclusions.json created with complete synthesis - discussion.md finalized with conclusions and decision trail +- **Intent Coverage Matrix** verified โ€” all original intents accounted for (no โŒ Missed without explicit user deferral) - User offered meaningful next step options - **Complete decision trail** documented and traceable from initial scoping to final conclusions diff --git a/ccw/src/cli.ts b/ccw/src/cli.ts index cc7662fe..fb364b78 100644 --- a/ccw/src/cli.ts +++ b/ccw/src/cli.ts @@ -12,6 +12,7 @@ import { cliCommand } from './commands/cli.js'; import { memoryCommand } from './commands/memory.js'; import { coreMemoryCommand } from './commands/core-memory.js'; import { hookCommand } from './commands/hook.js'; +import { specCommand } from './commands/spec.js'; import { issueCommand } from './commands/issue.js'; import { workflowCommand } from './commands/workflow.js'; import { loopCommand } from './commands/loop.js'; @@ -297,6 +298,16 @@ export function run(argv: string[]): void { .option('--limit ', 'Max entries to return (for project-state)') .action((subcommand, args, options) => hookCommand(subcommand, args, options)); + // Spec command - Project spec management (load/list/rebuild/status/init) + program + .command('spec [subcommand] [args...]') + .description('Project spec management for conventions and guidelines') + .option('--dimension ', 'Target dimension: specs, roadmap, changelog, personal') + .option('--context ', 'Context text for keyword extraction (CLI mode)') + .option('--stdin', 'Read input from stdin (Hook mode)') + .option('--json', 'Output as JSON') + .action((subcommand, args, options) => specCommand(subcommand, args, options)); + // Issue command - Issue lifecycle management with JSONL task tracking program .command('issue [subcommand] [args...]') diff --git a/ccw/src/commands/spec.ts b/ccw/src/commands/spec.ts new file mode 100644 index 00000000..60e2cb5c --- /dev/null +++ b/ccw/src/commands/spec.ts @@ -0,0 +1,439 @@ +/** + * Spec Command - CLI endpoint for project spec management + * + * Provides 6 subcommands: load, list, rebuild, status, init, help. + * The load subcommand supports dual-mode: CLI direct and Hook stdin. + * + * Pattern: cli.ts register -> commands/spec.ts dispatch -> tools/spec-*.ts execute + */ + +import chalk from 'chalk'; + +interface SpecOptions { + dimension?: string; + context?: string; + stdin?: boolean; + json?: boolean; +} + +interface StdinData { + session_id?: string; + cwd?: string; + user_prompt?: string; + prompt?: string; + [key: string]: unknown; +} + +/** + * Read JSON data from stdin (for Claude Code hooks). + */ +async function readStdin(): Promise { + return new Promise((resolve) => { + let data = ''; + process.stdin.setEncoding('utf8'); + process.stdin.on('readable', () => { + let chunk; + while ((chunk = process.stdin.read()) !== null) { + data += chunk; + } + }); + process.stdin.on('end', () => { + resolve(data); + }); + if (process.stdin.isTTY) { + resolve(''); + } + }); +} + +/** + * Get project path from hook data or current working directory. + */ +function getProjectPath(hookCwd?: string): string { + return hookCwd || process.cwd(); +} + +// ============================================================================ +// Subcommand Actions +// ============================================================================ + +/** + * Load action - load specs matching dimension/keywords. + * + * CLI mode: --dimension and --context options, outputs formatted markdown. + * Hook mode: --stdin reads JSON {session_id, cwd, user_prompt}, outputs JSON {continue, systemMessage}. + */ +async function loadAction(options: SpecOptions): Promise { + const { stdin, dimension, context } = options; + let projectPath: string; + let stdinData: StdinData | undefined; + + if (stdin) { + try { + const raw = await readStdin(); + if (raw) { + stdinData = JSON.parse(raw) as StdinData; + projectPath = getProjectPath(stdinData.cwd); + } else { + projectPath = getProjectPath(); + } + } catch { + // Malformed stdin - output continue and exit + process.stdout.write(JSON.stringify({ continue: true })); + process.exit(0); + } + } else { + projectPath = getProjectPath(); + } + + try { + const { loadSpecs } = await import('../tools/spec-loader.js'); + + const keywords = context + ? context.split(/[\s,]+/).filter(Boolean) + : undefined; + + const result = await loadSpecs({ + projectPath, + dimension: dimension as 'specs' | 'roadmap' | 'changelog' | 'personal' | undefined, + keywords, + outputFormat: stdin ? 'hook' : 'cli', + stdinData, + }); + + if (stdin) { + process.stdout.write(result.content); + process.exit(0); + } + + console.log(result.content); + } catch (error) { + if (stdin) { + process.stdout.write(JSON.stringify({ continue: true })); + process.exit(0); + } + console.error(chalk.red(`Error: ${(error as Error).message}`)); + process.exit(1); + } +} + +/** + * List action - show all indexed specs with readMode and keyword info. + */ +async function listAction(options: SpecOptions): Promise { + const { dimension, json } = options; + const projectPath = getProjectPath(); + + try { + const { getDimensionIndex, SPEC_DIMENSIONS } = await import( + '../tools/spec-index-builder.js' + ); + + const dimensions = dimension ? [dimension] : [...SPEC_DIMENSIONS]; + const allEntries: Array<{ + dimension: string; + title: string; + readMode: string; + priority: string; + keywords: string[]; + file: string; + }> = []; + + for (const dim of dimensions) { + const index = await getDimensionIndex(projectPath, dim); + for (const entry of index.entries) { + allEntries.push({ + dimension: entry.dimension, + title: entry.title, + readMode: entry.readMode, + priority: entry.priority, + keywords: entry.keywords, + file: entry.file, + }); + } + } + + if (json) { + console.log(JSON.stringify(allEntries, null, 2)); + return; + } + + if (allEntries.length === 0) { + console.log(chalk.gray('No specs found. Run "ccw spec init" to create seed documents.')); + return; + } + + console.log(chalk.bold(`Specs (${allEntries.length} total)\n`)); + + let currentDim = ''; + for (const entry of allEntries) { + if (entry.dimension !== currentDim) { + currentDim = entry.dimension; + console.log(chalk.yellow(` [${currentDim}]`)); + } + + const modeTag = + entry.readMode === 'required' + ? chalk.red('required') + : chalk.gray('optional'); + const priTag = chalk.cyan(entry.priority); + const kw = entry.keywords.length > 0 + ? chalk.gray(` (${entry.keywords.join(', ')})`) + : ''; + + console.log(` ${entry.title} ${modeTag} ${priTag}${kw}`); + } + } catch (error) { + console.error(chalk.red(`Error: ${(error as Error).message}`)); + process.exit(1); + } +} + +/** + * Rebuild action - force re-scan of MD files and rebuild .spec-index cache. + */ +async function rebuildAction(options: SpecOptions): Promise { + const { dimension } = options; + const projectPath = getProjectPath(); + + try { + const { buildAllIndices, buildDimensionIndex, getIndexPath, SPEC_DIMENSIONS } = + await import('../tools/spec-index-builder.js'); + const { writeFileSync } = await import('fs'); + + if (dimension) { + console.log(chalk.cyan(`Rebuilding index for: ${dimension}`)); + const index = await buildDimensionIndex(projectPath, dimension); + const indexPath = getIndexPath(projectPath, dimension); + writeFileSync(indexPath, JSON.stringify(index, null, 2), 'utf-8'); + console.log( + chalk.green(` ${dimension}: ${index.entries.length} entries indexed`) + ); + } else { + console.log(chalk.cyan('Rebuilding all spec indices...')); + await buildAllIndices(projectPath); + // Show stats + const { readCachedIndex } = await import('../tools/spec-index-builder.js'); + for (const dim of SPEC_DIMENSIONS) { + const cached = readCachedIndex(projectPath, dim); + const count = cached?.entries.length ?? 0; + console.log(chalk.green(` ${dim}: ${count} entries indexed`)); + } + } + + console.log(chalk.green('\nIndex rebuild complete.')); + } catch (error) { + console.error(chalk.red(`Error: ${(error as Error).message}`)); + process.exit(1); + } +} + +/** + * Status action - show per-dimension stats. + */ +async function statusAction(options: SpecOptions): Promise { + const { json } = options; + const projectPath = getProjectPath(); + + try { + const { readCachedIndex, SPEC_DIMENSIONS, getDimensionDir } = await import( + '../tools/spec-index-builder.js' + ); + const { existsSync } = await import('fs'); + + const stats: Array<{ + dimension: string; + total: number; + required: number; + optional: number; + indexed: boolean; + built_at: string | null; + dirExists: boolean; + }> = []; + + for (const dim of SPEC_DIMENSIONS) { + const cached = readCachedIndex(projectPath, dim); + const dimDir = getDimensionDir(projectPath, dim); + const dirExists = existsSync(dimDir); + + const entries = cached?.entries ?? []; + const required = entries.filter(e => e.readMode === 'required').length; + const optional = entries.filter(e => e.readMode === 'optional').length; + + stats.push({ + dimension: dim, + total: entries.length, + required, + optional, + indexed: cached !== null, + built_at: cached?.built_at ?? null, + dirExists, + }); + } + + if (json) { + console.log(JSON.stringify(stats, null, 2)); + return; + } + + console.log(chalk.bold('Spec System Status\n')); + + for (const s of stats) { + const dirStatus = s.dirExists ? chalk.green('OK') : chalk.red('missing'); + const indexStatus = s.indexed ? chalk.green('cached') : chalk.yellow('not built'); + const builtAt = s.built_at + ? chalk.gray(` (${new Date(s.built_at).toLocaleString()})`) + : ''; + + console.log(chalk.yellow(` [${s.dimension}]`)); + console.log(` Directory: ${dirStatus}`); + console.log(` Index: ${indexStatus}${builtAt}`); + console.log( + ` Specs: ${s.total} total (${chalk.red(String(s.required))} required, ${chalk.gray(String(s.optional))} optional)` + ); + } + } catch (error) { + console.error(chalk.red(`Error: ${(error as Error).message}`)); + process.exit(1); + } +} + +/** + * Init action - create directory structure and seed documents. + */ +async function initAction(): Promise { + const projectPath = getProjectPath(); + + try { + const { initSpecSystem } = await import('../tools/spec-init.js'); + + console.log(chalk.cyan('Initializing spec system...')); + const result = initSpecSystem(projectPath); + + if (result.directories.length > 0) { + console.log(chalk.green('\nDirectories created:')); + for (const dir of result.directories) { + console.log(chalk.gray(` + ${dir}`)); + } + } + + if (result.created.length > 0) { + console.log(chalk.green('\nSeed files created:')); + for (const file of result.created) { + console.log(chalk.gray(` + ${file}`)); + } + } + + if (result.skipped.length > 0) { + console.log(chalk.gray('\nSkipped (already exist):')); + for (const file of result.skipped) { + console.log(chalk.gray(` - ${file}`)); + } + } + + if (result.directories.length === 0 && result.created.length === 0) { + console.log(chalk.gray('\nSpec system already initialized. No changes made.')); + } else { + console.log(chalk.green('\nSpec system initialized. Run "ccw spec rebuild" to build index.')); + } + } catch (error) { + console.error(chalk.red(`Error: ${(error as Error).message}`)); + process.exit(1); + } +} + +/** + * Show help for spec command. + */ +function showHelp(): void { + console.log(` +${chalk.bold('ccw spec')} - Project spec management + +${chalk.bold('USAGE')} + ccw spec [options] + +${chalk.bold('SUBCOMMANDS')} + load Load specs matching dimension/keywords (CLI or Hook mode) + list List all indexed specs with readMode and keyword info + rebuild Force re-scan of MD files and rebuild .spec-index cache + status Show per-dimension stats (total, required, optional, freshness) + init Create 4-dimension directory structure with seed MD documents + +${chalk.bold('OPTIONS')} + --dimension Target dimension: specs, roadmap, changelog, personal + --context Context text for keyword extraction (CLI mode) + --stdin Read input from stdin (Hook mode) + --json Output as JSON + +${chalk.bold('EXAMPLES')} + ${chalk.gray('# Initialize spec system:')} + ccw spec init + + ${chalk.gray('# Load specs for a topic (CLI mode):')} + ccw spec load --dimension specs --context "auth jwt security" + + ${chalk.gray('# Load all matching specs:')} + ccw spec load --context "implement user authentication" + + ${chalk.gray('# Use as Claude Code hook (settings.json):')} + ccw spec load --stdin + + ${chalk.gray('# List all specs:')} + ccw spec list + + ${chalk.gray('# List specs for a specific dimension:')} + ccw spec list --dimension specs + + ${chalk.gray('# Rebuild index cache:')} + ccw spec rebuild + + ${chalk.gray('# Rebuild single dimension:')} + ccw spec rebuild --dimension roadmap + + ${chalk.gray('# Check system status:')} + ccw spec status +`); +} + +// ============================================================================ +// Main Command Dispatcher +// ============================================================================ + +/** + * Main spec command handler. + * + * Dispatches to subcommand action functions following the same switch + * pattern used by hookCommand in commands/hook.ts. + */ +export async function specCommand( + subcommand: string, + args: string | string[], + options: SpecOptions +): Promise { + switch (subcommand) { + case 'load': + await loadAction(options); + break; + case 'list': + case 'ls': + await listAction(options); + break; + case 'rebuild': + await rebuildAction(options); + break; + case 'status': + await statusAction(options); + break; + case 'init': + await initAction(); + break; + case 'help': + case undefined: + showHelp(); + break; + default: + console.error(chalk.red(`Unknown subcommand: ${subcommand}`)); + console.error(chalk.gray('Run "ccw spec help" for usage information')); + process.exit(1); + } +} diff --git a/ccw/src/tools/spec-index-builder.ts b/ccw/src/tools/spec-index-builder.ts index 92e23a3b..5f206e20 100644 --- a/ccw/src/tools/spec-index-builder.ts +++ b/ccw/src/tools/spec-index-builder.ts @@ -165,6 +165,10 @@ export async function buildDimensionIndex( const entry = parseSpecFile(filePath, dimension, projectPath); if (entry) { entries.push(entry); + } else { + process.stderr.write( + `[spec-index-builder] Skipping malformed spec file: ${file}\n` + ); } }