mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-28 09:23:08 +08:00
feat: Add templates for epics, product brief, and requirements PRD
- Introduced a comprehensive template for generating epics and stories, including an index and individual epic files. - Created a product brief template to outline product vision, problem statements, and target users. - Developed a requirements PRD template to structure functional and non-functional requirements, including traceability and prioritization. - Implemented ast-grep processors for JavaScript and TypeScript to extract relationships such as imports and inheritance. - Added corresponding patterns for JavaScript and TypeScript to support relationship extraction. - Established comparison tests to validate the accuracy of relationship extraction between tree-sitter and ast-grep methods.
This commit is contained in:
441
.claude/skills/team-lifecycle-v2/SKILL.md
Normal file
441
.claude/skills/team-lifecycle-v2/SKILL.md
Normal file
@@ -0,0 +1,441 @@
|
|||||||
|
---
|
||||||
|
name: team-lifecycle
|
||||||
|
description: Unified team skill for full lifecycle - spec/impl/test. All roles invoke this skill with --role arg for role-specific execution. Triggers on "team lifecycle".
|
||||||
|
allowed-tools: TeamCreate(*), TeamDelete(*), SendMessage(*), TaskCreate(*), TaskUpdate(*), TaskList(*), TaskGet(*), Task(*), AskUserQuestion(*), TodoWrite(*), Read(*), Write(*), Edit(*), Bash(*), Glob(*), Grep(*)
|
||||||
|
---
|
||||||
|
|
||||||
|
# Team Lifecycle
|
||||||
|
|
||||||
|
Unified team skill covering specification, implementation, testing, and review. All team members invoke this skill with `--role=xxx` to route to role-specific execution.
|
||||||
|
|
||||||
|
## Architecture Overview
|
||||||
|
|
||||||
|
```
|
||||||
|
┌───────────────────────────────────────────────────┐
|
||||||
|
│ Skill(skill="team-lifecycle") │
|
||||||
|
│ args="任务描述" 或 args="--role=xxx" │
|
||||||
|
└───────────────────┬───────────────────────────────┘
|
||||||
|
│ Role Router
|
||||||
|
│
|
||||||
|
┌──── --role present? ────┐
|
||||||
|
│ NO │ YES
|
||||||
|
↓ ↓
|
||||||
|
Orchestration Mode Role Dispatch
|
||||||
|
(auto → coordinator) (route to role.md)
|
||||||
|
│
|
||||||
|
┌────┴────┬───────┬───────┬───────┬───────┬───────┬───────┐
|
||||||
|
↓ ↓ ↓ ↓ ↓ ↓ ↓ ↓
|
||||||
|
┌──────────┐┌───────┐┌──────┐┌──────────┐┌───────┐┌────────┐┌──────┐┌────────┐
|
||||||
|
│coordinator││analyst││writer││discussant││planner││executor││tester││reviewer│
|
||||||
|
│ roles/ ││roles/ ││roles/││ roles/ ││roles/ ││ roles/ ││roles/││ roles/ │
|
||||||
|
└──────────┘└───────┘└──────┘└──────────┘└───────┘└────────┘└──────┘└────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## Command Architecture
|
||||||
|
|
||||||
|
Each role is organized as a folder with a `role.md` orchestrator and optional `commands/` for delegation:
|
||||||
|
|
||||||
|
```
|
||||||
|
roles/
|
||||||
|
├── coordinator/
|
||||||
|
│ ├── role.md # Orchestrator (Phase 1/5 inline, Phase 2-4 delegate)
|
||||||
|
│ └── commands/
|
||||||
|
│ ├── dispatch.md # Task chain creation (3 modes)
|
||||||
|
│ └── monitor.md # Coordination loop + message routing
|
||||||
|
├── analyst/
|
||||||
|
│ ├── role.md
|
||||||
|
│ └── commands/
|
||||||
|
├── writer/
|
||||||
|
│ ├── role.md
|
||||||
|
│ └── commands/
|
||||||
|
│ └── generate-doc.md # Multi-CLI document generation (4 doc types)
|
||||||
|
├── discussant/
|
||||||
|
│ ├── role.md
|
||||||
|
│ └── commands/
|
||||||
|
│ └── critique.md # Multi-perspective CLI critique
|
||||||
|
├── planner/
|
||||||
|
│ ├── role.md
|
||||||
|
│ └── commands/
|
||||||
|
│ └── explore.md # Multi-angle codebase exploration
|
||||||
|
├── executor/
|
||||||
|
│ ├── role.md
|
||||||
|
│ └── commands/
|
||||||
|
│ └── implement.md # Multi-backend code implementation
|
||||||
|
├── tester/
|
||||||
|
│ ├── role.md
|
||||||
|
│ └── commands/
|
||||||
|
│ └── validate.md # Test-fix cycle
|
||||||
|
└── reviewer/
|
||||||
|
├── role.md
|
||||||
|
└── commands/
|
||||||
|
├── code-review.md # 4-dimension code review
|
||||||
|
└── spec-quality.md # 5-dimension spec quality check
|
||||||
|
```
|
||||||
|
|
||||||
|
**Design principle**: role.md keeps Phase 1 (Task Discovery) and Phase 5 (Report) inline. Phases 2-4 either stay inline (simple logic) or delegate to `commands/*.md` via `Read("commands/xxx.md")` when they involve subagent delegation, CLI fan-out, or complex strategies.
|
||||||
|
|
||||||
|
**Command files** are self-contained: each includes Strategy, Execution Steps, and Error Handling. Any subagent can `Read()` a command file and execute it independently.
|
||||||
|
|
||||||
|
## Role Router
|
||||||
|
|
||||||
|
### Input Parsing
|
||||||
|
|
||||||
|
Parse `$ARGUMENTS` to extract `--role`:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const args = "$ARGUMENTS"
|
||||||
|
const roleMatch = args.match(/--role[=\s]+(\w+)/)
|
||||||
|
const teamName = args.match(/--team[=\s]+([\w-]+)/)?.[1] || "lifecycle"
|
||||||
|
|
||||||
|
if (!roleMatch) {
|
||||||
|
// No --role: Orchestration Mode → auto route to coordinator
|
||||||
|
// See "Orchestration Mode" section below
|
||||||
|
}
|
||||||
|
|
||||||
|
const role = roleMatch ? roleMatch[1] : "coordinator"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Role Dispatch
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const VALID_ROLES = {
|
||||||
|
"coordinator": { file: "roles/coordinator/role.md", prefix: null },
|
||||||
|
"analyst": { file: "roles/analyst/role.md", prefix: "RESEARCH" },
|
||||||
|
"writer": { file: "roles/writer/role.md", prefix: "DRAFT" },
|
||||||
|
"discussant": { file: "roles/discussant/role.md", prefix: "DISCUSS" },
|
||||||
|
"planner": { file: "roles/planner/role.md", prefix: "PLAN" },
|
||||||
|
"executor": { file: "roles/executor/role.md", prefix: "IMPL" },
|
||||||
|
"tester": { file: "roles/tester/role.md", prefix: "TEST" },
|
||||||
|
"reviewer": { file: "roles/reviewer/role.md", prefix: ["REVIEW", "QUALITY"] }
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!VALID_ROLES[role]) {
|
||||||
|
throw new Error(`Unknown role: ${role}. Available: ${Object.keys(VALID_ROLES).join(', ')}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read and execute role-specific logic
|
||||||
|
Read(VALID_ROLES[role].file)
|
||||||
|
// → Execute the 5-phase process defined in that file
|
||||||
|
```
|
||||||
|
|
||||||
|
### Orchestration Mode(无参数触发)
|
||||||
|
|
||||||
|
当不带 `--role` 调用时,自动进入 coordinator 编排模式。用户只需传任务描述即可触发完整流程。
|
||||||
|
|
||||||
|
**触发方式**:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// 用户调用(无 --role)— 自动路由到 coordinator
|
||||||
|
Skill(skill="team-lifecycle", args="任务描述")
|
||||||
|
|
||||||
|
// 等价于
|
||||||
|
Skill(skill="team-lifecycle", args="--role=coordinator 任务描述")
|
||||||
|
```
|
||||||
|
|
||||||
|
**流程**:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (!roleMatch) {
|
||||||
|
// Orchestration Mode: 自动路由到 coordinator
|
||||||
|
// coordinator role.md 将执行:
|
||||||
|
// Phase 1: 需求澄清
|
||||||
|
// Phase 2: TeamCreate + spawn 所有 worker agents
|
||||||
|
// 每个 agent prompt 中包含 Skill(args="--role=xxx") 回调
|
||||||
|
// Phase 3: 创建任务链
|
||||||
|
// Phase 4: 监控协调循环
|
||||||
|
// Phase 5: 结果汇报
|
||||||
|
|
||||||
|
const role = "coordinator"
|
||||||
|
Read(VALID_ROLES[role].file)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**完整调用链**:
|
||||||
|
|
||||||
|
```
|
||||||
|
用户: Skill(args="任务描述")
|
||||||
|
│
|
||||||
|
├─ SKILL.md: 无 --role → Orchestration Mode → 读取 coordinator role.md
|
||||||
|
│
|
||||||
|
├─ coordinator Phase 2: TeamCreate + spawn workers
|
||||||
|
│ 每个 worker prompt 中包含 Skill(args="--role=xxx") 回调
|
||||||
|
│
|
||||||
|
├─ coordinator Phase 3: dispatch 任务链
|
||||||
|
│
|
||||||
|
├─ worker 收到任务 → Skill(args="--role=xxx") → SKILL.md Role Router → role.md
|
||||||
|
│ 每个 worker 自动获取:
|
||||||
|
│ ├─ 角色定义 (role.md: identity, boundaries, message types)
|
||||||
|
│ ├─ 可用命令 (commands/*.md)
|
||||||
|
│ └─ 执行逻辑 (5-phase process)
|
||||||
|
│
|
||||||
|
└─ coordinator Phase 4-5: 监控 → 结果汇报
|
||||||
|
```
|
||||||
|
|
||||||
|
### Available Roles
|
||||||
|
|
||||||
|
| Role | Task Prefix | Responsibility | Role File |
|
||||||
|
|------|-------------|----------------|-----------|
|
||||||
|
| `coordinator` | N/A | Pipeline orchestration, requirement clarification, task dispatch | [roles/coordinator/role.md](roles/coordinator/role.md) |
|
||||||
|
| `analyst` | RESEARCH-* | Seed analysis, codebase exploration, context gathering | [roles/analyst/role.md](roles/analyst/role.md) |
|
||||||
|
| `writer` | DRAFT-* | Product Brief / PRD / Architecture / Epics generation | [roles/writer/role.md](roles/writer/role.md) |
|
||||||
|
| `discussant` | DISCUSS-* | Multi-perspective critique, consensus building | [roles/discussant/role.md](roles/discussant/role.md) |
|
||||||
|
| `planner` | PLAN-* | Multi-angle exploration, structured planning | [roles/planner/role.md](roles/planner/role.md) |
|
||||||
|
| `executor` | IMPL-* | Code implementation following plans | [roles/executor/role.md](roles/executor/role.md) |
|
||||||
|
| `tester` | TEST-* | Adaptive test-fix cycles, quality gates | [roles/tester/role.md](roles/tester/role.md) |
|
||||||
|
| `reviewer` | `REVIEW-*` + `QUALITY-*` | Code review + Spec quality validation (auto-switch by prefix) | [roles/reviewer/role.md](roles/reviewer/role.md) |
|
||||||
|
|
||||||
|
## Shared Infrastructure
|
||||||
|
|
||||||
|
### Role Isolation Rules
|
||||||
|
|
||||||
|
**核心原则**: 每个角色仅能执行自己职责范围内的工作。
|
||||||
|
|
||||||
|
#### Output Tagging(强制)
|
||||||
|
|
||||||
|
所有角色的输出必须带 `[role_name]` 标识前缀:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// SendMessage — content 和 summary 都必须带标识
|
||||||
|
SendMessage({
|
||||||
|
content: `## [${role}] ...`,
|
||||||
|
summary: `[${role}] ...`
|
||||||
|
})
|
||||||
|
|
||||||
|
// team_msg — summary 必须带标识
|
||||||
|
mcp__ccw-tools__team_msg({
|
||||||
|
summary: `[${role}] ...`
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Coordinator 隔离
|
||||||
|
|
||||||
|
| 允许 | 禁止 |
|
||||||
|
|------|------|
|
||||||
|
| 需求澄清 (AskUserQuestion) | ❌ 直接编写/修改代码 |
|
||||||
|
| 创建任务链 (TaskCreate) | ❌ 调用实现类 subagent (code-developer 等) |
|
||||||
|
| 分发任务给 worker | ❌ 直接执行分析/测试/审查 |
|
||||||
|
| 监控进度 (消息总线) | ❌ 绕过 worker 自行完成任务 |
|
||||||
|
| 汇报结果给用户 | ❌ 修改源代码或产物文件 |
|
||||||
|
|
||||||
|
#### Worker 隔离
|
||||||
|
|
||||||
|
| 允许 | 禁止 |
|
||||||
|
|------|------|
|
||||||
|
| 处理自己前缀的任务 | ❌ 处理其他角色前缀的任务 |
|
||||||
|
| SendMessage 给 coordinator | ❌ 直接与其他 worker 通信 |
|
||||||
|
| 使用 Toolbox 中声明的工具 | ❌ 为其他角色创建任务 (TaskCreate) |
|
||||||
|
| 委派给 commands/ 中的命令 | ❌ 修改不属于本职责的资源 |
|
||||||
|
|
||||||
|
### Message Bus (All Roles)
|
||||||
|
|
||||||
|
Every SendMessage **before**, must call `mcp__ccw-tools__team_msg` to log:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
mcp__ccw-tools__team_msg({
|
||||||
|
operation: "log",
|
||||||
|
team: teamName,
|
||||||
|
from: role,
|
||||||
|
to: "coordinator",
|
||||||
|
type: "<type>",
|
||||||
|
summary: `[${role}] <summary>`,
|
||||||
|
ref: "<file_path>"
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
**Message types by role**:
|
||||||
|
|
||||||
|
| Role | Types |
|
||||||
|
|------|-------|
|
||||||
|
| coordinator | `plan_approved`, `plan_revision`, `task_unblocked`, `fix_required`, `error`, `shutdown` |
|
||||||
|
| analyst | `research_ready`, `research_progress`, `error` |
|
||||||
|
| writer | `draft_ready`, `draft_revision`, `impl_progress`, `error` |
|
||||||
|
| discussant | `discussion_ready`, `discussion_blocked`, `impl_progress`, `error` |
|
||||||
|
| planner | `plan_ready`, `plan_revision`, `impl_progress`, `error` |
|
||||||
|
| executor | `impl_complete`, `impl_progress`, `error` |
|
||||||
|
| tester | `test_result`, `impl_progress`, `fix_required`, `error` |
|
||||||
|
| reviewer | `review_result`, `quality_result`, `fix_required`, `error` |
|
||||||
|
|
||||||
|
### CLI Fallback
|
||||||
|
|
||||||
|
When `mcp__ccw-tools__team_msg` MCP is unavailable:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Bash(`ccw team log --team "${teamName}" --from "${role}" --to "coordinator" --type "<type>" --summary "[${role}] <summary>" --json`)
|
||||||
|
Bash(`ccw team list --team "${teamName}" --last 10 --json`)
|
||||||
|
Bash(`ccw team status --team "${teamName}" --json`)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Task Lifecycle (All Worker Roles)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Standard task lifecycle every worker role follows
|
||||||
|
// Phase 1: Discovery
|
||||||
|
const tasks = TaskList()
|
||||||
|
const prefixes = Array.isArray(VALID_ROLES[role].prefix) ? VALID_ROLES[role].prefix : [VALID_ROLES[role].prefix]
|
||||||
|
const myTasks = tasks.filter(t =>
|
||||||
|
prefixes.some(p => t.subject.startsWith(`${p}-`)) &&
|
||||||
|
t.owner === role &&
|
||||||
|
t.status === 'pending' &&
|
||||||
|
t.blockedBy.length === 0
|
||||||
|
)
|
||||||
|
if (myTasks.length === 0) return // idle
|
||||||
|
const task = TaskGet({ taskId: myTasks[0].id })
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'in_progress' })
|
||||||
|
|
||||||
|
// Phase 1.5: Resume Artifact Check (防止重复产出)
|
||||||
|
// 当 session 从暂停恢复时,coordinator 已将 in_progress 任务重置为 pending。
|
||||||
|
// Worker 在开始工作前,必须检查该任务的输出产物是否已存在。
|
||||||
|
// 如果产物已存在且内容完整:
|
||||||
|
// → 直接跳到 Phase 5 报告完成(避免覆盖上次成果)
|
||||||
|
// 如果产物存在但不完整(如文件为空或缺少关键 section):
|
||||||
|
// → 正常执行 Phase 2-4(基于已有产物继续,而非从头开始)
|
||||||
|
// 如果产物不存在:
|
||||||
|
// → 正常执行 Phase 2-4
|
||||||
|
//
|
||||||
|
// 每个 role 检查自己的输出路径:
|
||||||
|
// analyst → sessionFolder/spec/discovery-context.json
|
||||||
|
// writer → sessionFolder/spec/{product-brief.md | requirements/ | architecture/ | epics/}
|
||||||
|
// discussant → sessionFolder/discussions/discuss-NNN-*.md
|
||||||
|
// planner → sessionFolder/plan/plan.json
|
||||||
|
// executor → git diff (已提交的代码变更)
|
||||||
|
// tester → test pass rate
|
||||||
|
// reviewer → sessionFolder/spec/readiness-report.md (quality) 或 review findings (code)
|
||||||
|
|
||||||
|
// Phase 2-4: Role-specific (see roles/{role}/role.md)
|
||||||
|
|
||||||
|
// Phase 5: Report + Loop — 所有输出必须带 [role] 标识
|
||||||
|
mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: role, to: "coordinator", type: "...", summary: `[${role}] ...` })
|
||||||
|
SendMessage({ type: "message", recipient: "coordinator", content: `## [${role}] ...`, summary: `[${role}] ...` })
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'completed' })
|
||||||
|
// Check for next task → back to Phase 1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Three-Mode Pipeline
|
||||||
|
|
||||||
|
```
|
||||||
|
Spec-only:
|
||||||
|
RESEARCH-001 → DISCUSS-001 → DRAFT-001 → DISCUSS-002
|
||||||
|
→ DRAFT-002 → DISCUSS-003 → DRAFT-003 → DISCUSS-004
|
||||||
|
→ DRAFT-004 → DISCUSS-005 → QUALITY-001 → DISCUSS-006
|
||||||
|
|
||||||
|
Impl-only:
|
||||||
|
PLAN-001 → IMPL-001 → TEST-001 + REVIEW-001
|
||||||
|
|
||||||
|
Full-lifecycle:
|
||||||
|
[Spec pipeline] → PLAN-001(blockedBy: DISCUSS-006) → IMPL-001 → TEST-001 + REVIEW-001
|
||||||
|
```
|
||||||
|
|
||||||
|
## Unified Session Directory
|
||||||
|
|
||||||
|
All session artifacts are stored under a single session folder:
|
||||||
|
|
||||||
|
```
|
||||||
|
.workflow/.team/TLS-{slug}-{YYYY-MM-DD}/
|
||||||
|
├── team-session.json # Session state (status, progress, completed_tasks)
|
||||||
|
├── spec/ # Spec artifacts (analyst, writer, reviewer output)
|
||||||
|
│ ├── spec-config.json
|
||||||
|
│ ├── discovery-context.json
|
||||||
|
│ ├── product-brief.md
|
||||||
|
│ ├── requirements/ # _index.md + REQ-*.md + NFR-*.md
|
||||||
|
│ ├── architecture/ # _index.md + ADR-*.md
|
||||||
|
│ ├── epics/ # _index.md + EPIC-*.md
|
||||||
|
│ ├── readiness-report.md
|
||||||
|
│ └── spec-summary.md
|
||||||
|
├── discussions/ # Discussion records (discussant output)
|
||||||
|
│ └── discuss-001..006.md
|
||||||
|
└── plan/ # Plan artifacts (planner output)
|
||||||
|
├── exploration-{angle}.json
|
||||||
|
├── explorations-manifest.json
|
||||||
|
├── plan.json
|
||||||
|
└── .task/
|
||||||
|
└── TASK-*.json
|
||||||
|
```
|
||||||
|
|
||||||
|
Messages remain at `.workflow/.team-msg/{team-name}/` (unchanged).
|
||||||
|
|
||||||
|
## Session Resume
|
||||||
|
|
||||||
|
Coordinator supports `--resume` / `--continue` flags to resume interrupted sessions:
|
||||||
|
|
||||||
|
1. Scans `.workflow/.team/TLS-*/team-session.json` for `status: "active"` or `"paused"`
|
||||||
|
2. Multiple matches → `AskUserQuestion` for user selection
|
||||||
|
3. **Audit TaskList** — 获取当前所有任务的真实状态
|
||||||
|
4. **Reconcile** — 双向同步 session.completed_tasks ↔ TaskList 状态:
|
||||||
|
- session 已完成但 TaskList 未标记 → 修正 TaskList 为 completed
|
||||||
|
- TaskList 已完成但 session 未记录 → 补录到 session
|
||||||
|
- in_progress 状态(暂停中断)→ 重置为 pending
|
||||||
|
5. Determines remaining pipeline from reconciled state
|
||||||
|
6. Rebuilds team (`TeamCreate` + worker spawns for needed roles only)
|
||||||
|
7. Creates missing tasks with correct `blockedBy` dependency chain (uses `TASK_METADATA` lookup)
|
||||||
|
8. Verifies dependency chain integrity for existing tasks
|
||||||
|
9. Updates session file with reconciled state + current_phase
|
||||||
|
10. **Kick** — 向首个可执行任务的 worker 发送 `task_unblocked` 消息,打破 resume 死锁
|
||||||
|
11. Jumps to Phase 4 coordination loop
|
||||||
|
|
||||||
|
## Coordinator Spawn Template
|
||||||
|
|
||||||
|
When coordinator creates teammates, use this pattern:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
TeamCreate({ team_name: teamName })
|
||||||
|
|
||||||
|
// For each worker role:
|
||||||
|
Task({
|
||||||
|
subagent_type: "general-purpose",
|
||||||
|
team_name: teamName,
|
||||||
|
name: "<role_name>",
|
||||||
|
prompt: `你是 team "${teamName}" 的 <ROLE_NAME_UPPER>.
|
||||||
|
|
||||||
|
## ⚠️ 首要指令(MUST)
|
||||||
|
你的所有工作必须通过调用 Skill 获取角色定义后执行,禁止自行发挥:
|
||||||
|
Skill(skill="team-lifecycle", args="--role=<role_name>")
|
||||||
|
此调用会加载你的角色定义(role.md)、可用命令(commands/*.md)和完整执行逻辑。
|
||||||
|
|
||||||
|
当前需求: ${taskDescription}
|
||||||
|
约束: ${constraints}
|
||||||
|
Session: ${sessionFolder}
|
||||||
|
|
||||||
|
## 角色准则(强制)
|
||||||
|
- 你只能处理 <PREFIX>-* 前缀的任务,不得执行其他角色的工作
|
||||||
|
- 所有输出(SendMessage、team_msg)必须带 [<role_name>] 标识前缀
|
||||||
|
- 仅与 coordinator 通信,不得直接联系其他 worker
|
||||||
|
- 不得使用 TaskCreate 为其他角色创建任务
|
||||||
|
|
||||||
|
## 消息总线(必须)
|
||||||
|
每次 SendMessage 前,先调用 mcp__ccw-tools__team_msg 记录。
|
||||||
|
|
||||||
|
## 工作流程(严格按顺序)
|
||||||
|
1. 调用 Skill(skill="team-lifecycle", args="--role=<role_name>") 获取角色定义和执行逻辑
|
||||||
|
2. 按 role.md 中的 5-Phase 流程执行(TaskList → 找到 <PREFIX>-* 任务 → 执行 → 汇报)
|
||||||
|
3. team_msg log + SendMessage 结果给 coordinator(带 [<role_name>] 标识)
|
||||||
|
4. TaskUpdate completed → 检查下一个任务 → 回到步骤 1`
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
See [roles/coordinator/role.md](roles/coordinator/role.md) for the full spawn implementation with per-role prompts.
|
||||||
|
|
||||||
|
## Shared Spec Resources
|
||||||
|
|
||||||
|
Writer 和 Reviewer 角色在 spec 模式下使用本 skill 内置的标准和模板(从 spec-generator 复制,独立维护):
|
||||||
|
|
||||||
|
| Resource | Path | Usage |
|
||||||
|
|----------|------|-------|
|
||||||
|
| Document Standards | `specs/document-standards.md` | YAML frontmatter、命名规范、内容结构 |
|
||||||
|
| Quality Gates | `specs/quality-gates.md` | Per-phase 质量门禁、评分标尺 |
|
||||||
|
| Product Brief Template | `templates/product-brief.md` | DRAFT-001 文档生成 |
|
||||||
|
| Requirements Template | `templates/requirements-prd.md` | DRAFT-002 文档生成 |
|
||||||
|
| Architecture Template | `templates/architecture-doc.md` | DRAFT-003 文档生成 |
|
||||||
|
| Epics Template | `templates/epics-template.md` | DRAFT-004 文档生成 |
|
||||||
|
|
||||||
|
> Writer 在执行每个 DRAFT-* 任务前 **必须先 Read** 对应的 template 文件和 document-standards.md。
|
||||||
|
> 从 `roles/` 子目录引用时路径为 `../../specs/` 和 `../../templates/`。
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Scenario | Resolution |
|
||||||
|
|----------|------------|
|
||||||
|
| Unknown --role value | Error with available role list |
|
||||||
|
| Missing --role arg | Orchestration Mode → auto route to coordinator |
|
||||||
|
| Role file not found | Error with expected path (roles/{name}/role.md) |
|
||||||
|
| Command file not found | Fall back to inline execution in role.md |
|
||||||
|
| Task prefix conflict | Log warning, proceed |
|
||||||
271
.claude/skills/team-lifecycle-v2/roles/analyst/role.md
Normal file
271
.claude/skills/team-lifecycle-v2/roles/analyst/role.md
Normal file
@@ -0,0 +1,271 @@
|
|||||||
|
# Role: analyst
|
||||||
|
|
||||||
|
Seed analysis, codebase exploration, and multi-dimensional context gathering. Maps to spec-generator Phase 1 (Discovery).
|
||||||
|
|
||||||
|
## Role Identity
|
||||||
|
|
||||||
|
- **Name**: `analyst`
|
||||||
|
- **Task Prefix**: `RESEARCH-*`
|
||||||
|
- **Output Tag**: `[analyst]`
|
||||||
|
- **Responsibility**: Seed Analysis → Codebase Exploration → Context Packaging → Report
|
||||||
|
- **Communication**: SendMessage to coordinator only
|
||||||
|
|
||||||
|
## Role Boundaries
|
||||||
|
|
||||||
|
### MUST
|
||||||
|
- Only process RESEARCH-* tasks
|
||||||
|
- Communicate only with coordinator
|
||||||
|
- Use Toolbox tools (ACE search, Gemini CLI)
|
||||||
|
- Generate discovery-context.json and spec-config.json
|
||||||
|
- Support file reference input (@ prefix or .md/.txt extension)
|
||||||
|
|
||||||
|
### MUST NOT
|
||||||
|
- Create tasks for other roles
|
||||||
|
- Directly contact other workers
|
||||||
|
- Modify spec documents (only create discovery-context.json and spec-config.json)
|
||||||
|
- Skip seed analysis step
|
||||||
|
- Proceed without codebase detection
|
||||||
|
|
||||||
|
## Message Types
|
||||||
|
|
||||||
|
| Type | Direction | Trigger | Description |
|
||||||
|
|------|-----------|---------|-------------|
|
||||||
|
| `research_ready` | analyst → coordinator | Research complete | With discovery-context.json path and dimension summary |
|
||||||
|
| `research_progress` | analyst → coordinator | Long research progress | Intermediate progress update |
|
||||||
|
| `error` | analyst → coordinator | Unrecoverable error | Codebase access failure, CLI timeout, etc. |
|
||||||
|
|
||||||
|
## Message Bus
|
||||||
|
|
||||||
|
Before every `SendMessage`, MUST call `mcp__ccw-tools__team_msg` to log:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Research complete
|
||||||
|
mcp__ccw-tools__team_msg({
|
||||||
|
operation: "log",
|
||||||
|
team: teamName,
|
||||||
|
from: "analyst",
|
||||||
|
to: "coordinator",
|
||||||
|
type: "research_ready",
|
||||||
|
summary: "[analyst] Research done: 5 exploration dimensions",
|
||||||
|
ref: `${sessionFolder}/spec/discovery-context.json`
|
||||||
|
})
|
||||||
|
|
||||||
|
// Error report
|
||||||
|
mcp__ccw-tools__team_msg({
|
||||||
|
operation: "log",
|
||||||
|
team: teamName,
|
||||||
|
from: "analyst",
|
||||||
|
to: "coordinator",
|
||||||
|
type: "error",
|
||||||
|
summary: "[analyst] Codebase access failed"
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Fallback
|
||||||
|
|
||||||
|
When `mcp__ccw-tools__team_msg` MCP is unavailable:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ccw team log --team "${teamName}" --from "analyst" --to "coordinator" --type "research_ready" --summary "[analyst] Research done" --ref "${sessionFolder}/discovery-context.json" --json
|
||||||
|
```
|
||||||
|
|
||||||
|
## Toolbox
|
||||||
|
|
||||||
|
### Available Commands
|
||||||
|
- None (simple enough for inline execution)
|
||||||
|
|
||||||
|
### Subagent Capabilities
|
||||||
|
- None
|
||||||
|
|
||||||
|
### CLI Capabilities
|
||||||
|
- `ccw cli --tool gemini --mode analysis` for seed analysis
|
||||||
|
|
||||||
|
## Execution (5-Phase)
|
||||||
|
|
||||||
|
### Phase 1: Task Discovery
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const tasks = TaskList()
|
||||||
|
const myTasks = tasks.filter(t =>
|
||||||
|
t.subject.startsWith('RESEARCH-') &&
|
||||||
|
t.owner === 'analyst' &&
|
||||||
|
t.status === 'pending' &&
|
||||||
|
t.blockedBy.length === 0
|
||||||
|
)
|
||||||
|
|
||||||
|
if (myTasks.length === 0) return // idle
|
||||||
|
|
||||||
|
const task = TaskGet({ taskId: myTasks[0].id })
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'in_progress' })
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 2: Seed Analysis
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Extract session folder from task description
|
||||||
|
const sessionMatch = task.description.match(/Session:\s*(.+)/)
|
||||||
|
const sessionFolder = sessionMatch ? sessionMatch[1].trim() : '.workflow/.team/default'
|
||||||
|
|
||||||
|
// Parse topic from task description
|
||||||
|
const topicLines = task.description.split('\n').filter(l => !l.startsWith('Session:') && !l.startsWith('输出:') && l.trim())
|
||||||
|
const rawTopic = topicLines[0] || task.subject.replace('RESEARCH-001: ', '')
|
||||||
|
|
||||||
|
// 支持文件引用输入(与 spec-generator Phase 1 一致)
|
||||||
|
const topic = (rawTopic.startsWith('@') || rawTopic.endsWith('.md') || rawTopic.endsWith('.txt'))
|
||||||
|
? Read(rawTopic.replace(/^@/, ''))
|
||||||
|
: rawTopic
|
||||||
|
|
||||||
|
// Use Gemini CLI for seed analysis
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Analyze the following topic/idea and extract structured seed information for specification generation.
|
||||||
|
TASK:
|
||||||
|
• Extract problem statement (what problem does this solve)
|
||||||
|
• Identify target users and their pain points
|
||||||
|
• Determine domain and industry context
|
||||||
|
• List constraints and assumptions
|
||||||
|
• Identify 3-5 exploration dimensions for deeper research
|
||||||
|
• Assess complexity (simple/moderate/complex)
|
||||||
|
|
||||||
|
TOPIC: ${topic}
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
CONTEXT: @**/*
|
||||||
|
EXPECTED: JSON output with fields: problem_statement, target_users[], domain, constraints[], exploration_dimensions[], complexity_assessment
|
||||||
|
CONSTRAINTS: Output as valid JSON" --tool gemini --mode analysis --rule analysis-analyze-technical-document`,
|
||||||
|
run_in_background: true
|
||||||
|
})
|
||||||
|
// Wait for CLI result, then parse seedAnalysis from output
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 3: Codebase Exploration (conditional)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Check if there's an existing codebase to explore
|
||||||
|
const hasProject = Bash(`test -f package.json || test -f Cargo.toml || test -f pyproject.toml || test -f go.mod; echo $?`)
|
||||||
|
|
||||||
|
if (hasProject === '0') {
|
||||||
|
mcp__ccw-tools__team_msg({
|
||||||
|
operation: "log",
|
||||||
|
team: teamName,
|
||||||
|
from: "analyst",
|
||||||
|
to: "coordinator",
|
||||||
|
type: "research_progress",
|
||||||
|
summary: "[analyst] 种子分析完成, 开始代码库探索"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Explore codebase using ACE search
|
||||||
|
const archSearch = mcp__ace-tool__search_context({
|
||||||
|
project_root_path: projectRoot,
|
||||||
|
query: `Architecture patterns, main modules, entry points for: ${topic}`
|
||||||
|
})
|
||||||
|
|
||||||
|
// Detect tech stack from package files
|
||||||
|
// Explore existing patterns and integration points
|
||||||
|
|
||||||
|
var codebaseContext = {
|
||||||
|
tech_stack,
|
||||||
|
architecture_patterns,
|
||||||
|
existing_conventions,
|
||||||
|
integration_points,
|
||||||
|
constraints_from_codebase: []
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var codebaseContext = null
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 4: Context Packaging
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Generate spec-config.json
|
||||||
|
const specConfig = {
|
||||||
|
session_id: `SPEC-${topicSlug}-${dateStr}`,
|
||||||
|
topic: topic,
|
||||||
|
status: "research_complete",
|
||||||
|
complexity: seedAnalysis.complexity_assessment || "moderate",
|
||||||
|
depth: task.description.match(/讨论深度:\s*(.+)/)?.[1] || "standard",
|
||||||
|
focus_areas: seedAnalysis.exploration_dimensions || [],
|
||||||
|
mode: "interactive", // team 模式始终交互
|
||||||
|
phases_completed: ["discovery"],
|
||||||
|
created_at: new Date().toISOString(),
|
||||||
|
session_folder: sessionFolder,
|
||||||
|
discussion_depth: task.description.match(/讨论深度:\s*(.+)/)?.[1] || "standard"
|
||||||
|
}
|
||||||
|
Write(`${sessionFolder}/spec/spec-config.json`, JSON.stringify(specConfig, null, 2))
|
||||||
|
|
||||||
|
// Generate discovery-context.json
|
||||||
|
const discoveryContext = {
|
||||||
|
session_id: specConfig.session_id,
|
||||||
|
phase: 1,
|
||||||
|
document_type: "discovery-context",
|
||||||
|
status: "complete",
|
||||||
|
generated_at: new Date().toISOString(),
|
||||||
|
seed_analysis: {
|
||||||
|
problem_statement: seedAnalysis.problem_statement,
|
||||||
|
target_users: seedAnalysis.target_users,
|
||||||
|
domain: seedAnalysis.domain,
|
||||||
|
constraints: seedAnalysis.constraints,
|
||||||
|
exploration_dimensions: seedAnalysis.exploration_dimensions,
|
||||||
|
complexity: seedAnalysis.complexity_assessment
|
||||||
|
},
|
||||||
|
codebase_context: codebaseContext,
|
||||||
|
recommendations: { focus_areas: [], risks: [], open_questions: [] }
|
||||||
|
}
|
||||||
|
Write(`${sessionFolder}/spec/discovery-context.json`, JSON.stringify(discoveryContext, null, 2))
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 5: Report to Coordinator
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const dimensionCount = discoveryContext.seed_analysis.exploration_dimensions?.length || 0
|
||||||
|
const hasCodebase = codebaseContext !== null
|
||||||
|
|
||||||
|
mcp__ccw-tools__team_msg({
|
||||||
|
operation: "log", team: teamName,
|
||||||
|
from: "analyst", to: "coordinator",
|
||||||
|
type: "research_ready",
|
||||||
|
summary: `[analyst] 研究完成: ${dimensionCount}个探索维度, ${hasCodebase ? '有' : '无'}代码库上下文, 复杂度=${specConfig.complexity}`,
|
||||||
|
ref: `${sessionFolder}/discovery-context.json`
|
||||||
|
})
|
||||||
|
|
||||||
|
SendMessage({
|
||||||
|
type: "message",
|
||||||
|
recipient: "coordinator",
|
||||||
|
content: `[analyst] ## 研究分析结果
|
||||||
|
|
||||||
|
**Task**: ${task.subject}
|
||||||
|
**复杂度**: ${specConfig.complexity}
|
||||||
|
**代码库**: ${hasCodebase ? '已检测到现有项目' : '全新项目'}
|
||||||
|
|
||||||
|
### 问题陈述
|
||||||
|
${discoveryContext.seed_analysis.problem_statement}
|
||||||
|
|
||||||
|
### 目标用户
|
||||||
|
${(discoveryContext.seed_analysis.target_users || []).map(u => '- ' + u).join('\n')}
|
||||||
|
|
||||||
|
### 探索维度
|
||||||
|
${(discoveryContext.seed_analysis.exploration_dimensions || []).map((d, i) => (i+1) + '. ' + d).join('\n')}
|
||||||
|
|
||||||
|
### 输出位置
|
||||||
|
- Config: ${sessionFolder}/spec/spec-config.json
|
||||||
|
- Context: ${sessionFolder}/spec/discovery-context.json
|
||||||
|
|
||||||
|
研究已就绪,可进入讨论轮次 DISCUSS-001。`,
|
||||||
|
summary: `[analyst] 研究就绪: ${dimensionCount}维度, ${specConfig.complexity}`
|
||||||
|
})
|
||||||
|
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'completed' })
|
||||||
|
|
||||||
|
// Check for next RESEARCH task → back to Phase 1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Scenario | Resolution |
|
||||||
|
|----------|------------|
|
||||||
|
| No RESEARCH-* tasks available | Idle, wait for coordinator assignment |
|
||||||
|
| Gemini CLI analysis failure | Fallback to direct Claude analysis without CLI |
|
||||||
|
| Codebase detection failed | Continue as new project (no codebase context) |
|
||||||
|
| Session folder cannot be created | Notify coordinator, request alternative path |
|
||||||
|
| Topic too vague for analysis | Report to coordinator with clarification questions |
|
||||||
|
| Unexpected error | Log error via team_msg, report to coordinator |
|
||||||
@@ -0,0 +1,530 @@
|
|||||||
|
# Dispatch Command - Task Chain Creation
|
||||||
|
|
||||||
|
**Purpose**: Create task chains based on execution mode (spec-only, impl-only, full-lifecycle)
|
||||||
|
|
||||||
|
**Invoked by**: Coordinator role.md Phase 3
|
||||||
|
|
||||||
|
**Output Tag**: `[coordinator]`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Task Chain Strategies
|
||||||
|
|
||||||
|
### Strategy 1: Spec-Only Mode (12 tasks)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (requirements.mode === "spec-only") {
|
||||||
|
Output("[coordinator] Creating spec-only task chain (12 tasks)")
|
||||||
|
|
||||||
|
// Task 1: Requirements Analysis
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "req-analysis",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Analyze requirements and extract key features",
|
||||||
|
dependencies: [],
|
||||||
|
input: {
|
||||||
|
scope: requirements.scope,
|
||||||
|
focus: requirements.focus,
|
||||||
|
depth: requirements.depth
|
||||||
|
},
|
||||||
|
status: "active" // First task starts immediately
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 2: Architecture Design
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "arch-design",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Design system architecture",
|
||||||
|
dependencies: ["req-analysis"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 3: API Design
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "api-design",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Design API contracts and endpoints",
|
||||||
|
dependencies: ["arch-design"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 4: Data Model Design
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "data-model",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Design data models and schemas",
|
||||||
|
dependencies: ["arch-design"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 5: UI Specification
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "ui-spec",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Design UI components and user flows",
|
||||||
|
dependencies: ["arch-design"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 6: Test Strategy
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "test-strategy",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Define testing strategy and test cases",
|
||||||
|
dependencies: ["api-design", "data-model"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 7: Error Handling Design
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "error-handling",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Design error handling and recovery mechanisms",
|
||||||
|
dependencies: ["api-design"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 8: Security Review
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "security-review",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Review security considerations and vulnerabilities",
|
||||||
|
dependencies: ["api-design", "data-model"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 9: Performance Requirements
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "perf-requirements",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Define performance requirements and benchmarks",
|
||||||
|
dependencies: ["arch-design"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 10: Documentation Outline
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "doc-outline",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Create documentation structure and outline",
|
||||||
|
dependencies: ["api-design"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 11: Review Specifications
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "review-spec",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Review all specifications for consistency",
|
||||||
|
dependencies: ["test-strategy", "error-handling", "security-review", "perf-requirements", "doc-outline"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 12: Finalize Specifications
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "finalize-spec",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Finalize and package all specifications",
|
||||||
|
dependencies: ["review-spec"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
Output("[coordinator] Spec-only task chain created (12 tasks)")
|
||||||
|
Output("[coordinator] Starting with: req-analysis")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Strategy 2: Impl-Only Mode (4 tasks)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (requirements.mode === "impl-only") {
|
||||||
|
Output("[coordinator] Creating impl-only task chain (4 tasks)")
|
||||||
|
|
||||||
|
// Verify spec exists
|
||||||
|
const specExists = AskUserQuestion({
|
||||||
|
question: "Implementation mode requires existing specifications. Do you have a spec file?",
|
||||||
|
choices: ["yes", "no"]
|
||||||
|
})
|
||||||
|
|
||||||
|
if (specExists === "no") {
|
||||||
|
Output("[coordinator] ERROR: impl-only mode requires existing specifications")
|
||||||
|
Output("[coordinator] Please run spec-only mode first or use full-lifecycle mode")
|
||||||
|
throw new Error("Missing specifications for impl-only mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
const specFile = AskUserQuestion({
|
||||||
|
question: "Provide path to specification file:",
|
||||||
|
type: "text"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Validate spec file exists
|
||||||
|
const specContent = Read(specFile)
|
||||||
|
if (!specContent) {
|
||||||
|
throw new Error(`Specification file not found: ${specFile}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
Output(`[coordinator] Using specification: ${specFile}`)
|
||||||
|
|
||||||
|
// Task 1: Setup Scaffold
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "setup-scaffold",
|
||||||
|
assigned_to: "implementer",
|
||||||
|
phase: "impl",
|
||||||
|
description: "Setup project scaffold and dependencies",
|
||||||
|
dependencies: [],
|
||||||
|
input: {
|
||||||
|
spec_file: specFile,
|
||||||
|
scope: requirements.scope
|
||||||
|
},
|
||||||
|
status: "active" // First task starts immediately
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 2: Core Implementation
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "core-impl",
|
||||||
|
assigned_to: "implementer",
|
||||||
|
phase: "impl",
|
||||||
|
description: "Implement core functionality",
|
||||||
|
dependencies: ["setup-scaffold"],
|
||||||
|
input: {
|
||||||
|
spec_file: specFile
|
||||||
|
},
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 3: Integration
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "integration",
|
||||||
|
assigned_to: "implementer",
|
||||||
|
phase: "impl",
|
||||||
|
description: "Integrate components and test",
|
||||||
|
dependencies: ["core-impl"],
|
||||||
|
input: {
|
||||||
|
spec_file: specFile
|
||||||
|
},
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 4: Finalize Implementation
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "finalize-impl",
|
||||||
|
assigned_to: "implementer",
|
||||||
|
phase: "impl",
|
||||||
|
description: "Finalize implementation and documentation",
|
||||||
|
dependencies: ["integration"],
|
||||||
|
input: {
|
||||||
|
spec_file: specFile
|
||||||
|
},
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
Output("[coordinator] Impl-only task chain created (4 tasks)")
|
||||||
|
Output("[coordinator] Starting with: setup-scaffold")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Strategy 3: Full-Lifecycle Mode (16 tasks)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (requirements.mode === "full-lifecycle") {
|
||||||
|
Output("[coordinator] Creating full-lifecycle task chain (16 tasks)")
|
||||||
|
|
||||||
|
// ========================================
|
||||||
|
// SPEC PHASE (12 tasks)
|
||||||
|
// ========================================
|
||||||
|
|
||||||
|
// Task 1: Requirements Analysis
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "req-analysis",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Analyze requirements and extract key features",
|
||||||
|
dependencies: [],
|
||||||
|
input: {
|
||||||
|
scope: requirements.scope,
|
||||||
|
focus: requirements.focus,
|
||||||
|
depth: requirements.depth
|
||||||
|
},
|
||||||
|
status: "active" // First task starts immediately
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 2: Architecture Design
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "arch-design",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Design system architecture",
|
||||||
|
dependencies: ["req-analysis"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 3: API Design
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "api-design",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Design API contracts and endpoints",
|
||||||
|
dependencies: ["arch-design"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 4: Data Model Design
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "data-model",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Design data models and schemas",
|
||||||
|
dependencies: ["arch-design"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 5: UI Specification
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "ui-spec",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Design UI components and user flows",
|
||||||
|
dependencies: ["arch-design"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 6: Test Strategy
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "test-strategy",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Define testing strategy and test cases",
|
||||||
|
dependencies: ["api-design", "data-model"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 7: Error Handling Design
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "error-handling",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Design error handling and recovery mechanisms",
|
||||||
|
dependencies: ["api-design"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 8: Security Review
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "security-review",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Review security considerations and vulnerabilities",
|
||||||
|
dependencies: ["api-design", "data-model"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 9: Performance Requirements
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "perf-requirements",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Define performance requirements and benchmarks",
|
||||||
|
dependencies: ["arch-design"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 10: Documentation Outline
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "doc-outline",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Create documentation structure and outline",
|
||||||
|
dependencies: ["api-design"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 11: Review Specifications
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "review-spec",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Review all specifications for consistency",
|
||||||
|
dependencies: ["test-strategy", "error-handling", "security-review", "perf-requirements", "doc-outline"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 12: Finalize Specifications
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "finalize-spec",
|
||||||
|
assigned_to: "spec-writer",
|
||||||
|
phase: "spec",
|
||||||
|
description: "Finalize and package all specifications",
|
||||||
|
dependencies: ["review-spec"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// ========================================
|
||||||
|
// IMPL PHASE (4 tasks)
|
||||||
|
// ========================================
|
||||||
|
|
||||||
|
// Task 13: Setup Scaffold
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "setup-scaffold",
|
||||||
|
assigned_to: "implementer",
|
||||||
|
phase: "impl",
|
||||||
|
description: "Setup project scaffold and dependencies",
|
||||||
|
dependencies: ["finalize-spec"], // Blocked until spec phase completes
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 14: Core Implementation
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "core-impl",
|
||||||
|
assigned_to: "implementer",
|
||||||
|
phase: "impl",
|
||||||
|
description: "Implement core functionality",
|
||||||
|
dependencies: ["setup-scaffold"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 15: Integration
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "integration",
|
||||||
|
assigned_to: "implementer",
|
||||||
|
phase: "impl",
|
||||||
|
description: "Integrate components and test",
|
||||||
|
dependencies: ["core-impl"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Task 16: Finalize Implementation
|
||||||
|
TaskCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
task_id: "finalize-impl",
|
||||||
|
assigned_to: "implementer",
|
||||||
|
phase: "impl",
|
||||||
|
description: "Finalize implementation and documentation",
|
||||||
|
dependencies: ["integration"],
|
||||||
|
status: "blocked"
|
||||||
|
})
|
||||||
|
|
||||||
|
Output("[coordinator] Full-lifecycle task chain created (16 tasks)")
|
||||||
|
Output("[coordinator] Starting with: req-analysis")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Execution Method Handling
|
||||||
|
|
||||||
|
### Sequential Execution
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (requirements.executionMethod === "sequential") {
|
||||||
|
Output("[coordinator] Sequential execution: tasks will run one at a time")
|
||||||
|
// Only one task marked as "active" at a time
|
||||||
|
// Next task activated only after predecessor completes
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Parallel Execution
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (requirements.executionMethod === "parallel") {
|
||||||
|
Output("[coordinator] Parallel execution: independent tasks will run concurrently")
|
||||||
|
|
||||||
|
// Activate all tasks with no dependencies
|
||||||
|
const independentTasks = allTasks.filter(t => t.dependencies.length === 0)
|
||||||
|
for (const task of independentTasks) {
|
||||||
|
TaskUpdate(task.task_id, { status: "active" })
|
||||||
|
Output(`[coordinator] Activated parallel task: ${task.task_id}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// As tasks complete, activate all tasks whose dependencies are met
|
||||||
|
// (Handled in coordination loop)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Task Metadata Reference
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const TASK_METADATA = {
|
||||||
|
// Spec tasks
|
||||||
|
"req-analysis": { phase: "spec", deps: [], description: "Analyze requirements" },
|
||||||
|
"arch-design": { phase: "spec", deps: ["req-analysis"], description: "Design architecture" },
|
||||||
|
"api-design": { phase: "spec", deps: ["arch-design"], description: "Design API contracts" },
|
||||||
|
"data-model": { phase: "spec", deps: ["arch-design"], description: "Design data models" },
|
||||||
|
"ui-spec": { phase: "spec", deps: ["arch-design"], description: "Design UI specifications" },
|
||||||
|
"test-strategy": { phase: "spec", deps: ["api-design", "data-model"], description: "Define test strategy" },
|
||||||
|
"error-handling": { phase: "spec", deps: ["api-design"], description: "Design error handling" },
|
||||||
|
"security-review": { phase: "spec", deps: ["api-design", "data-model"], description: "Security review" },
|
||||||
|
"perf-requirements": { phase: "spec", deps: ["arch-design"], description: "Performance requirements" },
|
||||||
|
"doc-outline": { phase: "spec", deps: ["api-design"], description: "Documentation outline" },
|
||||||
|
"review-spec": { phase: "spec", deps: ["test-strategy", "error-handling", "security-review", "perf-requirements", "doc-outline"], description: "Review specifications" },
|
||||||
|
"finalize-spec": { phase: "spec", deps: ["review-spec"], description: "Finalize specifications" },
|
||||||
|
|
||||||
|
// Impl tasks
|
||||||
|
"setup-scaffold": { phase: "impl", deps: ["finalize-spec"], description: "Setup project scaffold" },
|
||||||
|
"core-impl": { phase: "impl", deps: ["setup-scaffold"], description: "Core implementation" },
|
||||||
|
"integration": { phase: "impl", deps: ["core-impl"], description: "Integration work" },
|
||||||
|
"finalize-impl": { phase: "impl", deps: ["integration"], description: "Finalize implementation" }
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Output Format
|
||||||
|
|
||||||
|
All outputs from this command use the `[coordinator]` tag:
|
||||||
|
|
||||||
|
```
|
||||||
|
[coordinator] Creating spec-only task chain (12 tasks)
|
||||||
|
[coordinator] Task created: req-analysis
|
||||||
|
[coordinator] Task created: arch-design
|
||||||
|
...
|
||||||
|
[coordinator] Starting with: req-analysis
|
||||||
|
```
|
||||||
@@ -0,0 +1,537 @@
|
|||||||
|
# Monitor Command - Coordination Loop
|
||||||
|
|
||||||
|
**Purpose**: Monitor task progress, route messages, and handle checkpoints
|
||||||
|
|
||||||
|
**Invoked by**: Coordinator role.md Phase 4
|
||||||
|
|
||||||
|
**Output Tag**: `[coordinator]`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Coordination Loop
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Output("[coordinator] Entering coordination loop...")
|
||||||
|
|
||||||
|
let loopActive = true
|
||||||
|
let checkpointPending = false
|
||||||
|
|
||||||
|
while (loopActive) {
|
||||||
|
// Load current session state
|
||||||
|
const session = Read(sessionFile)
|
||||||
|
const teamState = TeamGet(session.team_id)
|
||||||
|
const allTasks = teamState.tasks
|
||||||
|
|
||||||
|
// Check for incoming messages
|
||||||
|
const messages = TeamGetMessages(session.team_id)
|
||||||
|
|
||||||
|
for (const message of messages) {
|
||||||
|
Output(`[coordinator] Received message: ${message.type} from ${message.sender}`)
|
||||||
|
|
||||||
|
switch (message.type) {
|
||||||
|
case "task_complete":
|
||||||
|
handleTaskComplete(message)
|
||||||
|
break
|
||||||
|
|
||||||
|
case "task_blocked":
|
||||||
|
handleTaskBlocked(message)
|
||||||
|
break
|
||||||
|
|
||||||
|
case "discussion_needed":
|
||||||
|
handleDiscussionNeeded(message)
|
||||||
|
break
|
||||||
|
|
||||||
|
case "research_complete":
|
||||||
|
handleResearchComplete(message)
|
||||||
|
break
|
||||||
|
|
||||||
|
default:
|
||||||
|
Output(`[coordinator] Unknown message type: ${message.type}`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if all tasks complete
|
||||||
|
const completedTasks = allTasks.filter(t => t.status === "completed")
|
||||||
|
const totalTasks = allTasks.length
|
||||||
|
|
||||||
|
if (completedTasks.length === totalTasks) {
|
||||||
|
Output("[coordinator] All tasks completed!")
|
||||||
|
loopActive = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update session progress
|
||||||
|
session.tasks_completed = completedTasks.length
|
||||||
|
Write(sessionFile, session)
|
||||||
|
|
||||||
|
// Sleep before next iteration
|
||||||
|
sleep(5000) // 5 seconds
|
||||||
|
}
|
||||||
|
|
||||||
|
Output("[coordinator] Coordination loop complete")
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Message Handlers
|
||||||
|
|
||||||
|
### handleTaskComplete
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function handleTaskComplete(message) {
|
||||||
|
const taskId = message.task_id
|
||||||
|
const task = TaskGet(taskId)
|
||||||
|
|
||||||
|
Output(`[coordinator] Task completed: ${taskId}`)
|
||||||
|
|
||||||
|
// Mark task as completed
|
||||||
|
TaskUpdate(taskId, {
|
||||||
|
status: "completed",
|
||||||
|
completed_at: new Date().toISOString(),
|
||||||
|
output: message.output
|
||||||
|
})
|
||||||
|
|
||||||
|
// Save output to file if provided
|
||||||
|
if (message.output_content) {
|
||||||
|
const outputFile = `D:/Claude_dms3/.workflow/.sessions/${session.session_id}/${taskId}-output.md`
|
||||||
|
Write(outputFile, message.output_content)
|
||||||
|
TaskUpdate(taskId, { output_file: outputFile })
|
||||||
|
Output(`[coordinator] Output saved: ${outputFile}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for dependent tasks
|
||||||
|
const dependentTasks = allTasks.filter(t =>
|
||||||
|
t.dependencies.includes(taskId) && t.status === "blocked"
|
||||||
|
)
|
||||||
|
|
||||||
|
Output(`[coordinator] Checking ${dependentTasks.length} dependent tasks`)
|
||||||
|
|
||||||
|
for (const depTask of dependentTasks) {
|
||||||
|
// Check if all dependencies are met
|
||||||
|
const allDepsMet = depTask.dependencies.every(depId => {
|
||||||
|
const dep = TaskGet(depId)
|
||||||
|
return dep.status === "completed"
|
||||||
|
})
|
||||||
|
|
||||||
|
if (allDepsMet) {
|
||||||
|
Output(`[coordinator] Unblocking task: ${depTask.task_id}`)
|
||||||
|
TaskUpdate(depTask.task_id, { status: "pending" })
|
||||||
|
|
||||||
|
// Activate task if sequential mode or if parallel mode
|
||||||
|
if (requirements.executionMethod === "sequential") {
|
||||||
|
// Only activate one task at a time
|
||||||
|
const activeTasks = allTasks.filter(t => t.status === "active")
|
||||||
|
if (activeTasks.length === 0) {
|
||||||
|
kickTask(depTask.task_id)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Parallel mode: activate immediately
|
||||||
|
kickTask(depTask.task_id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Special checkpoint: Research complete before implementation
|
||||||
|
if (taskId === "finalize-spec" && requirements.mode === "full-lifecycle") {
|
||||||
|
Output("[coordinator] Spec phase complete. Checkpoint before implementation.")
|
||||||
|
checkpointPending = true
|
||||||
|
handleSpecCompleteCheckpoint()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function kickTask(taskId) {
|
||||||
|
TaskUpdate(taskId, { status: "active", started_at: new Date().toISOString() })
|
||||||
|
Output(`[coordinator] Kicked task: ${taskId}`)
|
||||||
|
|
||||||
|
// Notify assigned worker
|
||||||
|
const task = TaskGet(taskId)
|
||||||
|
TeamSendMessage({
|
||||||
|
team_id: session.team_id,
|
||||||
|
recipient: task.assigned_to,
|
||||||
|
type: "task_assigned",
|
||||||
|
task_id: taskId
|
||||||
|
})
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### handleTaskBlocked
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function handleTaskBlocked(message) {
|
||||||
|
const taskId = message.task_id
|
||||||
|
const reason = message.reason
|
||||||
|
|
||||||
|
Output(`[coordinator] Task blocked: ${taskId}`)
|
||||||
|
Output(`[coordinator] Reason: ${reason}`)
|
||||||
|
|
||||||
|
// Mark task as blocked
|
||||||
|
TaskUpdate(taskId, {
|
||||||
|
status: "blocked",
|
||||||
|
block_reason: reason
|
||||||
|
})
|
||||||
|
|
||||||
|
// Check if block reason is dependency-related
|
||||||
|
if (reason.includes("dependency")) {
|
||||||
|
Output("[coordinator] Dependency block detected. Waiting for predecessor tasks.")
|
||||||
|
// Normal dependency block - no action needed
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if block reason is ambiguity-related
|
||||||
|
if (reason.includes("ambiguous") || reason.includes("unclear")) {
|
||||||
|
Output("[coordinator] Ambiguity detected. Routing to researcher.")
|
||||||
|
handleAmbiguityBlock(taskId, reason)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unknown block reason - escalate to user
|
||||||
|
Output("[coordinator] Unknown block reason. Escalating to user.")
|
||||||
|
const userDecision = AskUserQuestion({
|
||||||
|
question: `Task ${taskId} is blocked: ${reason}. How to proceed?`,
|
||||||
|
choices: [
|
||||||
|
"retry - Retry the task",
|
||||||
|
"skip - Skip this task",
|
||||||
|
"abort - Abort entire workflow",
|
||||||
|
"manual - Provide manual input"
|
||||||
|
]
|
||||||
|
})
|
||||||
|
|
||||||
|
switch (userDecision) {
|
||||||
|
case "retry":
|
||||||
|
TaskUpdate(taskId, { status: "pending" })
|
||||||
|
kickTask(taskId)
|
||||||
|
break
|
||||||
|
|
||||||
|
case "skip":
|
||||||
|
TaskUpdate(taskId, { status: "skipped" })
|
||||||
|
Output(`[coordinator] Task ${taskId} skipped by user`)
|
||||||
|
break
|
||||||
|
|
||||||
|
case "abort":
|
||||||
|
Output("[coordinator] Workflow aborted by user")
|
||||||
|
loopActive = false
|
||||||
|
break
|
||||||
|
|
||||||
|
case "manual":
|
||||||
|
const manualInput = AskUserQuestion({
|
||||||
|
question: `Provide manual input for task ${taskId}:`,
|
||||||
|
type: "text"
|
||||||
|
})
|
||||||
|
TaskUpdate(taskId, {
|
||||||
|
status: "completed",
|
||||||
|
output: manualInput,
|
||||||
|
completed_by: "user"
|
||||||
|
})
|
||||||
|
Output(`[coordinator] Task ${taskId} completed with manual input`)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleAmbiguityBlock(taskId, reason) {
|
||||||
|
// Create research task
|
||||||
|
const researchTaskId = `research-${taskId}-${Date.now()}`
|
||||||
|
|
||||||
|
TaskCreate({
|
||||||
|
team_id: session.team_id,
|
||||||
|
task_id: researchTaskId,
|
||||||
|
assigned_to: "researcher",
|
||||||
|
phase: "research",
|
||||||
|
description: `Research ambiguity in ${taskId}: ${reason}`,
|
||||||
|
dependencies: [],
|
||||||
|
input: {
|
||||||
|
blocked_task: taskId,
|
||||||
|
ambiguity: reason
|
||||||
|
},
|
||||||
|
status: "active"
|
||||||
|
})
|
||||||
|
|
||||||
|
Output(`[coordinator] Created research task: ${researchTaskId}`)
|
||||||
|
|
||||||
|
// Notify researcher
|
||||||
|
TeamSendMessage({
|
||||||
|
team_id: session.team_id,
|
||||||
|
recipient: "researcher",
|
||||||
|
type: "research_requested",
|
||||||
|
task_id: researchTaskId,
|
||||||
|
context: {
|
||||||
|
blocked_task: taskId,
|
||||||
|
reason: reason
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### handleDiscussionNeeded
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function handleDiscussionNeeded(message) {
|
||||||
|
const taskId = message.task_id
|
||||||
|
const question = message.question
|
||||||
|
const context = message.context
|
||||||
|
|
||||||
|
Output(`[coordinator] Discussion needed for task: ${taskId}`)
|
||||||
|
Output(`[coordinator] Question: ${question}`)
|
||||||
|
|
||||||
|
// Route to user
|
||||||
|
const userResponse = AskUserQuestion({
|
||||||
|
question: `Task ${taskId} needs clarification:\n\n${question}\n\nContext: ${context}`,
|
||||||
|
type: "text"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Send response back to worker
|
||||||
|
TeamSendMessage({
|
||||||
|
team_id: session.team_id,
|
||||||
|
recipient: message.sender,
|
||||||
|
type: "discussion_response",
|
||||||
|
task_id: taskId,
|
||||||
|
response: userResponse
|
||||||
|
})
|
||||||
|
|
||||||
|
Output(`[coordinator] User response sent to ${message.sender}`)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### handleResearchComplete
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function handleResearchComplete(message) {
|
||||||
|
const researchTaskId = message.task_id
|
||||||
|
const findings = message.findings
|
||||||
|
const blockedTaskId = message.blocked_task
|
||||||
|
|
||||||
|
Output(`[coordinator] Research complete: ${researchTaskId}`)
|
||||||
|
Output(`[coordinator] Findings: ${findings}`)
|
||||||
|
|
||||||
|
// Mark research task as completed
|
||||||
|
TaskUpdate(researchTaskId, {
|
||||||
|
status: "completed",
|
||||||
|
output: findings
|
||||||
|
})
|
||||||
|
|
||||||
|
// Unblock original task
|
||||||
|
const blockedTask = TaskGet(blockedTaskId)
|
||||||
|
if (blockedTask.status === "blocked") {
|
||||||
|
TaskUpdate(blockedTaskId, {
|
||||||
|
status: "pending",
|
||||||
|
research_findings: findings
|
||||||
|
})
|
||||||
|
|
||||||
|
Output(`[coordinator] Unblocked task: ${blockedTaskId}`)
|
||||||
|
|
||||||
|
// Kick task if ready
|
||||||
|
const allDepsMet = blockedTask.dependencies.every(depId => {
|
||||||
|
const dep = TaskGet(depId)
|
||||||
|
return dep.status === "completed"
|
||||||
|
})
|
||||||
|
|
||||||
|
if (allDepsMet) {
|
||||||
|
kickTask(blockedTaskId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Checkpoint Handlers
|
||||||
|
|
||||||
|
### handleSpecCompleteCheckpoint
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function handleSpecCompleteCheckpoint() {
|
||||||
|
Output("[coordinator] ========================================")
|
||||||
|
Output("[coordinator] SPEC PHASE COMPLETE - CHECKPOINT")
|
||||||
|
Output("[coordinator] ========================================")
|
||||||
|
|
||||||
|
// Load spec output
|
||||||
|
const specOutput = Read(getTaskOutput("finalize-spec"))
|
||||||
|
|
||||||
|
Output("[coordinator] Specification summary:")
|
||||||
|
Output(specOutput.substring(0, 500) + "...") // Show first 500 chars
|
||||||
|
|
||||||
|
// Ask user to review
|
||||||
|
const userDecision = AskUserQuestion({
|
||||||
|
question: "Spec phase complete. Review specifications before proceeding to implementation?",
|
||||||
|
choices: [
|
||||||
|
"proceed - Proceed to implementation",
|
||||||
|
"review - Review full specifications",
|
||||||
|
"revise - Revise specifications",
|
||||||
|
"stop - Stop here (spec-only)"
|
||||||
|
]
|
||||||
|
})
|
||||||
|
|
||||||
|
switch (userDecision) {
|
||||||
|
case "proceed":
|
||||||
|
Output("[coordinator] Proceeding to implementation phase")
|
||||||
|
checkpointPending = false
|
||||||
|
// Kick first impl task
|
||||||
|
kickTask("setup-scaffold")
|
||||||
|
break
|
||||||
|
|
||||||
|
case "review":
|
||||||
|
Output("[coordinator] Full specification:")
|
||||||
|
Output(specOutput)
|
||||||
|
// Ask again after review
|
||||||
|
handleSpecCompleteCheckpoint()
|
||||||
|
break
|
||||||
|
|
||||||
|
case "revise":
|
||||||
|
const revisionScope = AskUserQuestion({
|
||||||
|
question: "Which tasks need revision?",
|
||||||
|
type: "text"
|
||||||
|
})
|
||||||
|
Output(`[coordinator] Revision requested: ${revisionScope}`)
|
||||||
|
// Create revision tasks (implementation depends on revision scope)
|
||||||
|
// For now, just log and ask to proceed
|
||||||
|
handleSpecCompleteCheckpoint()
|
||||||
|
break
|
||||||
|
|
||||||
|
case "stop":
|
||||||
|
Output("[coordinator] Stopping at spec phase (user request)")
|
||||||
|
loopActive = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Message Routing Tables
|
||||||
|
|
||||||
|
### Spec Phase Messages
|
||||||
|
|
||||||
|
| Message Type | Sender | Trigger | Coordinator Action |
|
||||||
|
|--------------|--------|---------|-------------------|
|
||||||
|
| `task_complete` | spec-writer | Task finished | Update session, unblock dependents, kick next |
|
||||||
|
| `task_blocked` | spec-writer | Dependency missing | Log block, wait for predecessor |
|
||||||
|
| `discussion_needed` | spec-writer | Ambiguity found | Route to user via AskUserQuestion |
|
||||||
|
| `research_requested` | spec-writer | Need external info | Create research task, assign to researcher |
|
||||||
|
| `research_complete` | researcher | Research done | Unblock original task, kick if ready |
|
||||||
|
|
||||||
|
### Impl Phase Messages
|
||||||
|
|
||||||
|
| Message Type | Sender | Trigger | Coordinator Action |
|
||||||
|
|--------------|--------|---------|-------------------|
|
||||||
|
| `task_complete` | implementer | Task finished | Update session, unblock dependents, kick next |
|
||||||
|
| `task_blocked` | implementer | Dependency missing | Log block, wait for predecessor |
|
||||||
|
| `discussion_needed` | implementer | Ambiguity found | Route to user via AskUserQuestion |
|
||||||
|
| `spec_clarification` | implementer | Spec unclear | Route to spec-writer or user |
|
||||||
|
| `test_failed` | implementer | Tests failing | Log failure, ask user to debug or retry |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Progress Tracking
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function logProgress() {
|
||||||
|
const session = Read(sessionFile)
|
||||||
|
const completedCount = session.tasks_completed
|
||||||
|
const totalCount = session.tasks_total
|
||||||
|
const percentage = Math.round((completedCount / totalCount) * 100)
|
||||||
|
|
||||||
|
Output(`[coordinator] Progress: ${completedCount}/${totalCount} tasks (${percentage}%)`)
|
||||||
|
|
||||||
|
// Log current phase
|
||||||
|
const currentPhase = session.current_phase
|
||||||
|
Output(`[coordinator] Current phase: ${currentPhase}`)
|
||||||
|
|
||||||
|
// Log active tasks
|
||||||
|
const activeTasks = allTasks.filter(t => t.status === "active")
|
||||||
|
if (activeTasks.length > 0) {
|
||||||
|
Output(`[coordinator] Active tasks: ${activeTasks.map(t => t.task_id).join(", ")}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log blocked tasks
|
||||||
|
const blockedTasks = allTasks.filter(t => t.status === "blocked")
|
||||||
|
if (blockedTasks.length > 0) {
|
||||||
|
Output(`[coordinator] Blocked tasks: ${blockedTasks.map(t => t.task_id).join(", ")}`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call logProgress every 10 iterations
|
||||||
|
let iterationCount = 0
|
||||||
|
if (iterationCount % 10 === 0) {
|
||||||
|
logProgress()
|
||||||
|
}
|
||||||
|
iterationCount++
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Error Recovery
|
||||||
|
|
||||||
|
### Task Timeout Handling
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function checkTaskTimeouts() {
|
||||||
|
const now = new Date()
|
||||||
|
const timeoutThreshold = 30 * 60 * 1000 // 30 minutes
|
||||||
|
|
||||||
|
const activeTasks = allTasks.filter(t => t.status === "active")
|
||||||
|
|
||||||
|
for (const task of activeTasks) {
|
||||||
|
const startTime = new Date(task.started_at)
|
||||||
|
const elapsed = now - startTime
|
||||||
|
|
||||||
|
if (elapsed > timeoutThreshold) {
|
||||||
|
Output(`[coordinator] Task timeout detected: ${task.task_id}`)
|
||||||
|
Output(`[coordinator] Elapsed time: ${Math.round(elapsed / 60000)} minutes`)
|
||||||
|
|
||||||
|
const userDecision = AskUserQuestion({
|
||||||
|
question: `Task ${task.task_id} has been running for ${Math.round(elapsed / 60000)} minutes. Action?`,
|
||||||
|
choices: [
|
||||||
|
"wait - Continue waiting",
|
||||||
|
"retry - Restart task",
|
||||||
|
"skip - Skip task",
|
||||||
|
"abort - Abort workflow"
|
||||||
|
]
|
||||||
|
})
|
||||||
|
|
||||||
|
switch (userDecision) {
|
||||||
|
case "wait":
|
||||||
|
// Reset timeout by updating started_at
|
||||||
|
TaskUpdate(task.task_id, { started_at: new Date().toISOString() })
|
||||||
|
break
|
||||||
|
|
||||||
|
case "retry":
|
||||||
|
TaskUpdate(task.task_id, { status: "pending" })
|
||||||
|
kickTask(task.task_id)
|
||||||
|
break
|
||||||
|
|
||||||
|
case "skip":
|
||||||
|
TaskUpdate(task.task_id, { status: "skipped" })
|
||||||
|
break
|
||||||
|
|
||||||
|
case "abort":
|
||||||
|
loopActive = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call checkTaskTimeouts every iteration
|
||||||
|
checkTaskTimeouts()
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Output Format
|
||||||
|
|
||||||
|
All outputs from this command use the `[coordinator]` tag:
|
||||||
|
|
||||||
|
```
|
||||||
|
[coordinator] Entering coordination loop...
|
||||||
|
[coordinator] Received message: task_complete from spec-writer
|
||||||
|
[coordinator] Task completed: req-analysis
|
||||||
|
[coordinator] Checking 1 dependent tasks
|
||||||
|
[coordinator] Unblocking task: arch-design
|
||||||
|
[coordinator] Kicked task: arch-design
|
||||||
|
[coordinator] Progress: 1/12 tasks (8%)
|
||||||
|
```
|
||||||
647
.claude/skills/team-lifecycle-v2/roles/coordinator/role.md
Normal file
647
.claude/skills/team-lifecycle-v2/roles/coordinator/role.md
Normal file
@@ -0,0 +1,647 @@
|
|||||||
|
# Coordinator Role
|
||||||
|
|
||||||
|
## Role Identity
|
||||||
|
|
||||||
|
**Role**: Coordinator
|
||||||
|
**Output Tag**: `[coordinator]`
|
||||||
|
**Responsibility**: Orchestrate the team-lifecycle workflow by managing team creation, task dispatching, progress monitoring, and session state persistence.
|
||||||
|
|
||||||
|
## Role Boundaries
|
||||||
|
|
||||||
|
### MUST
|
||||||
|
- Parse user requirements and clarify ambiguous inputs
|
||||||
|
- Create team and spawn worker subagents
|
||||||
|
- Dispatch tasks with proper dependency chains
|
||||||
|
- Monitor task progress and route messages
|
||||||
|
- Handle session resume and reconciliation
|
||||||
|
- Maintain session state persistence
|
||||||
|
- Provide progress reports and next-step options
|
||||||
|
|
||||||
|
### MUST NOT
|
||||||
|
- Execute spec/impl/research work directly (delegate to workers)
|
||||||
|
- Modify task outputs (workers own their deliverables)
|
||||||
|
- Skip dependency validation
|
||||||
|
- Proceed without user confirmation at checkpoints
|
||||||
|
|
||||||
|
## Message Types
|
||||||
|
|
||||||
|
| Message Type | Sender | Trigger | Coordinator Action |
|
||||||
|
|--------------|--------|---------|-------------------|
|
||||||
|
| `task_complete` | Worker | Task finished | Update session, check dependencies, kick next task |
|
||||||
|
| `task_blocked` | Worker | Dependency missing | Log block reason, wait for predecessor |
|
||||||
|
| `discussion_needed` | Worker | Ambiguity found | Route to user via AskUserQuestion |
|
||||||
|
| `research_complete` | Researcher | Research done | Checkpoint with user before impl |
|
||||||
|
|
||||||
|
## Toolbox
|
||||||
|
|
||||||
|
### Available Commands
|
||||||
|
- `commands/dispatch.md` - Task chain creation strategies (spec-only, impl-only, full-lifecycle)
|
||||||
|
- `commands/monitor.md` - Coordination loop with message routing and checkpoint handling
|
||||||
|
|
||||||
|
### Subagent Capabilities
|
||||||
|
- `TeamCreate` - Initialize team with session metadata
|
||||||
|
- `TeamSpawn` - Spawn worker subagents (spec-writer, implementer, researcher)
|
||||||
|
- `TaskCreate` - Create tasks with dependencies
|
||||||
|
- `TaskUpdate` - Update task status/metadata
|
||||||
|
- `TaskGet` - Retrieve task details
|
||||||
|
- `AskUserQuestion` - Interactive user prompts
|
||||||
|
|
||||||
|
### CLI Capabilities
|
||||||
|
- Session file I/O (`Read`, `Write`)
|
||||||
|
- Directory scanning (`Glob`)
|
||||||
|
- Background execution for long-running tasks
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Execution Flow
|
||||||
|
|
||||||
|
### Phase 0: Session Resume Check
|
||||||
|
|
||||||
|
**Purpose**: Detect and resume interrupted sessions
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Scan for session files
|
||||||
|
const sessionFiles = Glob("D:/Claude_dms3/.workflow/.sessions/team-lifecycle-*.json")
|
||||||
|
|
||||||
|
if (sessionFiles.length === 0) {
|
||||||
|
// No existing session, proceed to Phase 1
|
||||||
|
goto Phase1
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sessionFiles.length === 1) {
|
||||||
|
// Single session found
|
||||||
|
const session = Read(sessionFiles[0])
|
||||||
|
if (session.status === "active" || session.status === "paused") {
|
||||||
|
Output("[coordinator] Resuming session: " + session.session_id)
|
||||||
|
goto SessionReconciliation
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sessionFiles.length > 1) {
|
||||||
|
// Multiple sessions - ask user
|
||||||
|
const choices = sessionFiles.map(f => {
|
||||||
|
const s = Read(f)
|
||||||
|
return `${s.session_id} (${s.status}) - ${s.mode} - ${s.tasks_completed}/${s.tasks_total}`
|
||||||
|
})
|
||||||
|
|
||||||
|
const answer = AskUserQuestion({
|
||||||
|
question: "Multiple sessions found. Which to resume?",
|
||||||
|
choices: ["Create new session", ...choices]
|
||||||
|
})
|
||||||
|
|
||||||
|
if (answer === "Create new session") {
|
||||||
|
goto Phase1
|
||||||
|
} else {
|
||||||
|
const selectedSession = Read(sessionFiles[answer.index - 1])
|
||||||
|
goto SessionReconciliation
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Session Reconciliation Process
|
||||||
|
SessionReconciliation: {
|
||||||
|
Output("[coordinator] Reconciling session state...")
|
||||||
|
|
||||||
|
// Pipeline constants
|
||||||
|
const SPEC_CHAIN = [
|
||||||
|
"req-analysis", "arch-design", "api-design", "data-model",
|
||||||
|
"ui-spec", "test-strategy", "error-handling", "security-review",
|
||||||
|
"perf-requirements", "doc-outline", "review-spec", "finalize-spec"
|
||||||
|
]
|
||||||
|
|
||||||
|
const IMPL_CHAIN = [
|
||||||
|
"setup-scaffold", "core-impl", "integration", "finalize-impl"
|
||||||
|
]
|
||||||
|
|
||||||
|
// Task metadata with dependencies
|
||||||
|
const TASK_METADATA = {
|
||||||
|
// Spec tasks
|
||||||
|
"req-analysis": { phase: "spec", deps: [], description: "Analyze requirements" },
|
||||||
|
"arch-design": { phase: "spec", deps: ["req-analysis"], description: "Design architecture" },
|
||||||
|
"api-design": { phase: "spec", deps: ["arch-design"], description: "Design API contracts" },
|
||||||
|
"data-model": { phase: "spec", deps: ["arch-design"], description: "Design data models" },
|
||||||
|
"ui-spec": { phase: "spec", deps: ["arch-design"], description: "Design UI specifications" },
|
||||||
|
"test-strategy": { phase: "spec", deps: ["api-design", "data-model"], description: "Define test strategy" },
|
||||||
|
"error-handling": { phase: "spec", deps: ["api-design"], description: "Design error handling" },
|
||||||
|
"security-review": { phase: "spec", deps: ["api-design", "data-model"], description: "Security review" },
|
||||||
|
"perf-requirements": { phase: "spec", deps: ["arch-design"], description: "Performance requirements" },
|
||||||
|
"doc-outline": { phase: "spec", deps: ["api-design"], description: "Documentation outline" },
|
||||||
|
"review-spec": { phase: "spec", deps: ["test-strategy", "error-handling", "security-review", "perf-requirements", "doc-outline"], description: "Review specifications" },
|
||||||
|
"finalize-spec": { phase: "spec", deps: ["review-spec"], description: "Finalize specifications" },
|
||||||
|
|
||||||
|
// Impl tasks
|
||||||
|
"setup-scaffold": { phase: "impl", deps: ["finalize-spec"], description: "Setup project scaffold" },
|
||||||
|
"core-impl": { phase: "impl", deps: ["setup-scaffold"], description: "Core implementation" },
|
||||||
|
"integration": { phase: "impl", deps: ["core-impl"], description: "Integration work" },
|
||||||
|
"finalize-impl": { phase: "impl", deps: ["integration"], description: "Finalize implementation" }
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper: Get predecessor task
|
||||||
|
function getPredecessor(taskId, chain) {
|
||||||
|
const index = chain.indexOf(taskId)
|
||||||
|
return index > 0 ? chain[index - 1] : null
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 1: Audit current state
|
||||||
|
const session = Read(sessionFile)
|
||||||
|
const teamState = TeamGet(session.team_id)
|
||||||
|
const allTasks = teamState.tasks
|
||||||
|
|
||||||
|
Output("[coordinator] Session audit:")
|
||||||
|
Output(` Mode: ${session.mode}`)
|
||||||
|
Output(` Tasks completed: ${session.tasks_completed}/${session.tasks_total}`)
|
||||||
|
Output(` Status: ${session.status}`)
|
||||||
|
|
||||||
|
// Step 2: Reconcile task states
|
||||||
|
const completedTasks = allTasks.filter(t => t.status === "completed")
|
||||||
|
const activeTasks = allTasks.filter(t => t.status === "active")
|
||||||
|
const blockedTasks = allTasks.filter(t => t.status === "blocked")
|
||||||
|
const pendingTasks = allTasks.filter(t => t.status === "pending")
|
||||||
|
|
||||||
|
Output("[coordinator] Task breakdown:")
|
||||||
|
Output(` Completed: ${completedTasks.length}`)
|
||||||
|
Output(` Active: ${activeTasks.length}`)
|
||||||
|
Output(` Blocked: ${blockedTasks.length}`)
|
||||||
|
Output(` Pending: ${pendingTasks.length}`)
|
||||||
|
|
||||||
|
// Step 3: Determine remaining work
|
||||||
|
const expectedChain = session.mode === "spec-only" ? SPEC_CHAIN :
|
||||||
|
session.mode === "impl-only" ? IMPL_CHAIN :
|
||||||
|
[...SPEC_CHAIN, ...IMPL_CHAIN]
|
||||||
|
|
||||||
|
const remainingTaskIds = expectedChain.filter(id =>
|
||||||
|
!completedTasks.some(t => t.task_id === id)
|
||||||
|
)
|
||||||
|
|
||||||
|
Output(`[coordinator] Remaining tasks: ${remainingTaskIds.join(", ")}`)
|
||||||
|
|
||||||
|
// Step 4: Rebuild team if needed
|
||||||
|
if (!teamState || teamState.status === "disbanded") {
|
||||||
|
Output("[coordinator] Team disbanded, recreating...")
|
||||||
|
TeamCreate({
|
||||||
|
team_id: session.team_id,
|
||||||
|
session_id: session.session_id,
|
||||||
|
mode: session.mode
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 5: Create missing tasks
|
||||||
|
for (const taskId of remainingTaskIds) {
|
||||||
|
const existingTask = allTasks.find(t => t.task_id === taskId)
|
||||||
|
if (!existingTask) {
|
||||||
|
const metadata = TASK_METADATA[taskId]
|
||||||
|
TaskCreate({
|
||||||
|
team_id: session.team_id,
|
||||||
|
task_id: taskId,
|
||||||
|
phase: metadata.phase,
|
||||||
|
description: metadata.description,
|
||||||
|
dependencies: metadata.deps,
|
||||||
|
status: "pending"
|
||||||
|
})
|
||||||
|
Output(`[coordinator] Created missing task: ${taskId}`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 6: Verify dependencies
|
||||||
|
for (const taskId of remainingTaskIds) {
|
||||||
|
const task = TaskGet(taskId)
|
||||||
|
const metadata = TASK_METADATA[taskId]
|
||||||
|
const allDepsMet = metadata.deps.every(depId =>
|
||||||
|
completedTasks.some(t => t.task_id === depId)
|
||||||
|
)
|
||||||
|
|
||||||
|
if (allDepsMet && task.status === "blocked") {
|
||||||
|
TaskUpdate(taskId, { status: "pending" })
|
||||||
|
Output(`[coordinator] Unblocked task: ${taskId}`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 7: Update session state
|
||||||
|
session.status = "active"
|
||||||
|
session.resumed_at = new Date().toISOString()
|
||||||
|
session.tasks_completed = completedTasks.length
|
||||||
|
Write(sessionFile, session)
|
||||||
|
|
||||||
|
// Step 8: Report reconciliation
|
||||||
|
Output("[coordinator] Session reconciliation complete")
|
||||||
|
Output(`[coordinator] Ready to resume from: ${remainingTaskIds[0] || "all tasks complete"}`)
|
||||||
|
|
||||||
|
// Step 9: Kick next task
|
||||||
|
if (remainingTaskIds.length > 0) {
|
||||||
|
const nextTaskId = remainingTaskIds[0]
|
||||||
|
const nextTask = TaskGet(nextTaskId)
|
||||||
|
const metadata = TASK_METADATA[nextTaskId]
|
||||||
|
|
||||||
|
if (metadata.deps.every(depId => completedTasks.some(t => t.task_id === depId))) {
|
||||||
|
TaskUpdate(nextTaskId, { status: "active" })
|
||||||
|
Output(`[coordinator] Kicking task: ${nextTaskId}`)
|
||||||
|
goto Phase4_CoordinationLoop
|
||||||
|
} else {
|
||||||
|
Output(`[coordinator] Next task ${nextTaskId} blocked on: ${metadata.deps.join(", ")}`)
|
||||||
|
goto Phase4_CoordinationLoop
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Output("[coordinator] All tasks complete!")
|
||||||
|
goto Phase5_Report
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Phase 1: Requirement Clarification
|
||||||
|
|
||||||
|
**Purpose**: Parse user input and clarify execution parameters
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Output("[coordinator] Phase 1: Requirement Clarification")
|
||||||
|
|
||||||
|
// Parse $ARGUMENTS
|
||||||
|
const userInput = $ARGUMENTS
|
||||||
|
|
||||||
|
// Extract mode if specified
|
||||||
|
let mode = null
|
||||||
|
if (userInput.includes("spec-only")) mode = "spec-only"
|
||||||
|
if (userInput.includes("impl-only")) mode = "impl-only"
|
||||||
|
if (userInput.includes("full-lifecycle")) mode = "full-lifecycle"
|
||||||
|
|
||||||
|
// Extract scope if specified
|
||||||
|
let scope = null
|
||||||
|
if (userInput.includes("scope:")) {
|
||||||
|
scope = userInput.match(/scope:\s*([^\n]+)/)[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract focus areas
|
||||||
|
let focus = []
|
||||||
|
if (userInput.includes("focus:")) {
|
||||||
|
focus = userInput.match(/focus:\s*([^\n]+)/)[1].split(",").map(s => s.trim())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract depth preference
|
||||||
|
let depth = "standard"
|
||||||
|
if (userInput.includes("depth:shallow")) depth = "shallow"
|
||||||
|
if (userInput.includes("depth:deep")) depth = "deep"
|
||||||
|
|
||||||
|
// Ask for missing parameters
|
||||||
|
if (!mode) {
|
||||||
|
mode = AskUserQuestion({
|
||||||
|
question: "Select execution mode:",
|
||||||
|
choices: [
|
||||||
|
"spec-only - Generate specifications only",
|
||||||
|
"impl-only - Implementation only (requires existing spec)",
|
||||||
|
"full-lifecycle - Complete spec + implementation"
|
||||||
|
]
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!scope) {
|
||||||
|
scope = AskUserQuestion({
|
||||||
|
question: "Describe the project scope:",
|
||||||
|
type: "text"
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if (focus.length === 0) {
|
||||||
|
const focusAnswer = AskUserQuestion({
|
||||||
|
question: "Any specific focus areas? (optional)",
|
||||||
|
type: "text",
|
||||||
|
optional: true
|
||||||
|
})
|
||||||
|
if (focusAnswer) {
|
||||||
|
focus = focusAnswer.split(",").map(s => s.trim())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine execution method
|
||||||
|
const executionMethod = AskUserQuestion({
|
||||||
|
question: "Execution method:",
|
||||||
|
choices: [
|
||||||
|
"sequential - One task at a time (safer, slower)",
|
||||||
|
"parallel - Multiple tasks in parallel (faster, more complex)"
|
||||||
|
]
|
||||||
|
})
|
||||||
|
|
||||||
|
// Store clarified requirements
|
||||||
|
const requirements = {
|
||||||
|
mode,
|
||||||
|
scope,
|
||||||
|
focus,
|
||||||
|
depth,
|
||||||
|
executionMethod,
|
||||||
|
originalInput: userInput
|
||||||
|
}
|
||||||
|
|
||||||
|
Output("[coordinator] Requirements clarified:")
|
||||||
|
Output(` Mode: ${mode}`)
|
||||||
|
Output(` Scope: ${scope}`)
|
||||||
|
Output(` Focus: ${focus.join(", ") || "none"}`)
|
||||||
|
Output(` Depth: ${depth}`)
|
||||||
|
Output(` Execution: ${executionMethod}`)
|
||||||
|
|
||||||
|
goto Phase2
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Phase 2: Create Team + Spawn Workers
|
||||||
|
|
||||||
|
**Purpose**: Initialize team and spawn worker subagents
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Output("[coordinator] Phase 2: Team Creation")
|
||||||
|
|
||||||
|
// Generate session ID
|
||||||
|
const sessionId = `team-lifecycle-${Date.now()}`
|
||||||
|
const teamId = sessionId
|
||||||
|
|
||||||
|
// Create team
|
||||||
|
TeamCreate({
|
||||||
|
team_id: teamId,
|
||||||
|
session_id: sessionId,
|
||||||
|
mode: requirements.mode,
|
||||||
|
scope: requirements.scope,
|
||||||
|
focus: requirements.focus,
|
||||||
|
depth: requirements.depth,
|
||||||
|
executionMethod: requirements.executionMethod
|
||||||
|
})
|
||||||
|
|
||||||
|
Output(`[coordinator] Team created: ${teamId}`)
|
||||||
|
|
||||||
|
// Initialize session file
|
||||||
|
const sessionFile = `D:/Claude_dms3/.workflow/.sessions/${sessionId}.json`
|
||||||
|
const sessionData = {
|
||||||
|
session_id: sessionId,
|
||||||
|
team_id: teamId,
|
||||||
|
mode: requirements.mode,
|
||||||
|
scope: requirements.scope,
|
||||||
|
focus: requirements.focus,
|
||||||
|
depth: requirements.depth,
|
||||||
|
executionMethod: requirements.executionMethod,
|
||||||
|
status: "active",
|
||||||
|
created_at: new Date().toISOString(),
|
||||||
|
tasks_total: requirements.mode === "spec-only" ? 12 :
|
||||||
|
requirements.mode === "impl-only" ? 4 : 16,
|
||||||
|
tasks_completed: 0,
|
||||||
|
current_phase: requirements.mode === "impl-only" ? "impl" : "spec"
|
||||||
|
}
|
||||||
|
|
||||||
|
Write(sessionFile, sessionData)
|
||||||
|
Output(`[coordinator] Session file created: ${sessionFile}`)
|
||||||
|
|
||||||
|
// Spawn workers conditionally
|
||||||
|
if (requirements.mode === "spec-only" || requirements.mode === "full-lifecycle") {
|
||||||
|
TeamSpawn({
|
||||||
|
team_id: teamId,
|
||||||
|
role: "spec-writer",
|
||||||
|
count: 1
|
||||||
|
})
|
||||||
|
Output("[coordinator] Spawned spec-writer")
|
||||||
|
}
|
||||||
|
|
||||||
|
if (requirements.mode === "impl-only" || requirements.mode === "full-lifecycle") {
|
||||||
|
TeamSpawn({
|
||||||
|
team_id: teamId,
|
||||||
|
role: "implementer",
|
||||||
|
count: 1
|
||||||
|
})
|
||||||
|
Output("[coordinator] Spawned implementer")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Always spawn researcher for ambiguity resolution
|
||||||
|
TeamSpawn({
|
||||||
|
team_id: teamId,
|
||||||
|
role: "researcher",
|
||||||
|
count: 1
|
||||||
|
})
|
||||||
|
Output("[coordinator] Spawned researcher")
|
||||||
|
|
||||||
|
goto Phase3
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Phase 3: Create Task Chain
|
||||||
|
|
||||||
|
**Purpose**: Dispatch tasks based on execution mode
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Output("[coordinator] Phase 3: Task Dispatching")
|
||||||
|
|
||||||
|
// Delegate to command file
|
||||||
|
const dispatchStrategy = Read("commands/dispatch.md")
|
||||||
|
|
||||||
|
// Execute strategy defined in command file
|
||||||
|
// (dispatch.md contains the complete task chain creation logic)
|
||||||
|
|
||||||
|
goto Phase4
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Phase 4: Coordination Loop
|
||||||
|
|
||||||
|
**Purpose**: Monitor task progress and route messages
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Output("[coordinator] Phase 4: Coordination Loop")
|
||||||
|
|
||||||
|
// Delegate to command file
|
||||||
|
const monitorStrategy = Read("commands/monitor.md")
|
||||||
|
|
||||||
|
// Execute strategy defined in command file
|
||||||
|
// (monitor.md contains the complete message routing and checkpoint logic)
|
||||||
|
|
||||||
|
goto Phase5
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Phase 5: Report + Persistent Loop
|
||||||
|
|
||||||
|
**Purpose**: Provide completion report and offer next steps
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Output("[coordinator] Phase 5: Completion Report")
|
||||||
|
|
||||||
|
// Load session state
|
||||||
|
const session = Read(sessionFile)
|
||||||
|
const teamState = TeamGet(session.team_id)
|
||||||
|
|
||||||
|
// Generate report
|
||||||
|
Output("[coordinator] ========================================")
|
||||||
|
Output("[coordinator] TEAM LIFECYCLE EXECUTION COMPLETE")
|
||||||
|
Output("[coordinator] ========================================")
|
||||||
|
Output(`[coordinator] Session ID: ${session.session_id}`)
|
||||||
|
Output(`[coordinator] Mode: ${session.mode}`)
|
||||||
|
Output(`[coordinator] Tasks Completed: ${session.tasks_completed}/${session.tasks_total}`)
|
||||||
|
Output(`[coordinator] Duration: ${calculateDuration(session.created_at, new Date())}`)
|
||||||
|
|
||||||
|
// List deliverables
|
||||||
|
const completedTasks = teamState.tasks.filter(t => t.status === "completed")
|
||||||
|
Output("[coordinator] Deliverables:")
|
||||||
|
for (const task of completedTasks) {
|
||||||
|
Output(` ✓ ${task.task_id}: ${task.description}`)
|
||||||
|
if (task.output_file) {
|
||||||
|
Output(` Output: ${task.output_file}`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update session status
|
||||||
|
session.status = "completed"
|
||||||
|
session.completed_at = new Date().toISOString()
|
||||||
|
Write(sessionFile, session)
|
||||||
|
|
||||||
|
// Offer next steps
|
||||||
|
const nextAction = AskUserQuestion({
|
||||||
|
question: "What would you like to do next?",
|
||||||
|
choices: [
|
||||||
|
"exit - End session",
|
||||||
|
"review - Review specific deliverables",
|
||||||
|
"extend - Add more tasks to this session",
|
||||||
|
"handoff-lite-plan - Create lite-plan from spec",
|
||||||
|
"handoff-full-plan - Create full-plan from spec",
|
||||||
|
"handoff-req-plan - Create req-plan from requirements",
|
||||||
|
"handoff-create-issues - Generate GitHub issues"
|
||||||
|
]
|
||||||
|
})
|
||||||
|
|
||||||
|
switch (nextAction) {
|
||||||
|
case "exit":
|
||||||
|
Output("[coordinator] Session ended. Goodbye!")
|
||||||
|
break
|
||||||
|
|
||||||
|
case "review":
|
||||||
|
const taskToReview = AskUserQuestion({
|
||||||
|
question: "Which task output to review?",
|
||||||
|
choices: completedTasks.map(t => t.task_id)
|
||||||
|
})
|
||||||
|
const reviewTask = completedTasks.find(t => t.task_id === taskToReview)
|
||||||
|
if (reviewTask.output_file) {
|
||||||
|
const content = Read(reviewTask.output_file)
|
||||||
|
Output(`[coordinator] Task: ${reviewTask.task_id}`)
|
||||||
|
Output(content)
|
||||||
|
}
|
||||||
|
goto Phase5 // Loop back for more actions
|
||||||
|
|
||||||
|
case "extend":
|
||||||
|
const extensionScope = AskUserQuestion({
|
||||||
|
question: "Describe additional work:",
|
||||||
|
type: "text"
|
||||||
|
})
|
||||||
|
Output("[coordinator] Creating extension tasks...")
|
||||||
|
// Create custom tasks based on extension scope
|
||||||
|
// (Implementation depends on extension requirements)
|
||||||
|
goto Phase4 // Return to coordination loop
|
||||||
|
|
||||||
|
case "handoff-lite-plan":
|
||||||
|
Output("[coordinator] Generating lite-plan from specifications...")
|
||||||
|
// Read finalize-spec output
|
||||||
|
const specOutput = Read(getTaskOutput("finalize-spec"))
|
||||||
|
// Create lite-plan format
|
||||||
|
const litePlan = generateLitePlan(specOutput)
|
||||||
|
const litePlanFile = `D:/Claude_dms3/.workflow/.sessions/${session.session_id}-lite-plan.md`
|
||||||
|
Write(litePlanFile, litePlan)
|
||||||
|
Output(`[coordinator] Lite-plan created: ${litePlanFile}`)
|
||||||
|
goto Phase5
|
||||||
|
|
||||||
|
case "handoff-full-plan":
|
||||||
|
Output("[coordinator] Generating full-plan from specifications...")
|
||||||
|
const fullSpecOutput = Read(getTaskOutput("finalize-spec"))
|
||||||
|
const fullPlan = generateFullPlan(fullSpecOutput)
|
||||||
|
const fullPlanFile = `D:/Claude_dms3/.workflow/.sessions/${session.session_id}-full-plan.md`
|
||||||
|
Write(fullPlanFile, fullPlan)
|
||||||
|
Output(`[coordinator] Full-plan created: ${fullPlanFile}`)
|
||||||
|
goto Phase5
|
||||||
|
|
||||||
|
case "handoff-req-plan":
|
||||||
|
Output("[coordinator] Generating req-plan from requirements...")
|
||||||
|
const reqAnalysis = Read(getTaskOutput("req-analysis"))
|
||||||
|
const reqPlan = generateReqPlan(reqAnalysis)
|
||||||
|
const reqPlanFile = `D:/Claude_dms3/.workflow/.sessions/${session.session_id}-req-plan.md`
|
||||||
|
Write(reqPlanFile, reqPlan)
|
||||||
|
Output(`[coordinator] Req-plan created: ${reqPlanFile}`)
|
||||||
|
goto Phase5
|
||||||
|
|
||||||
|
case "handoff-create-issues":
|
||||||
|
Output("[coordinator] Generating GitHub issues...")
|
||||||
|
const issuesSpec = Read(getTaskOutput("finalize-spec"))
|
||||||
|
const issues = generateGitHubIssues(issuesSpec)
|
||||||
|
const issuesFile = `D:/Claude_dms3/.workflow/.sessions/${session.session_id}-issues.json`
|
||||||
|
Write(issuesFile, issues)
|
||||||
|
Output(`[coordinator] Issues created: ${issuesFile}`)
|
||||||
|
Output("[coordinator] Use GitHub CLI to import: gh issue create --title ... --body ...")
|
||||||
|
goto Phase5
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper functions
|
||||||
|
function calculateDuration(start, end) {
|
||||||
|
const diff = new Date(end) - new Date(start)
|
||||||
|
const minutes = Math.floor(diff / 60000)
|
||||||
|
const seconds = Math.floor((diff % 60000) / 1000)
|
||||||
|
return `${minutes}m ${seconds}s`
|
||||||
|
}
|
||||||
|
|
||||||
|
function getTaskOutput(taskId) {
|
||||||
|
const task = TaskGet(taskId)
|
||||||
|
return task.output_file
|
||||||
|
}
|
||||||
|
|
||||||
|
function generateLitePlan(specOutput) {
|
||||||
|
// Parse spec output and create lite-plan format
|
||||||
|
return `# Lite Plan\n\n${specOutput}\n\n## Implementation Steps\n- Step 1\n- Step 2\n...`
|
||||||
|
}
|
||||||
|
|
||||||
|
function generateFullPlan(specOutput) {
|
||||||
|
// Parse spec output and create full-plan format with detailed breakdown
|
||||||
|
return `# Full Plan\n\n${specOutput}\n\n## Detailed Implementation\n### Phase 1\n### Phase 2\n...`
|
||||||
|
}
|
||||||
|
|
||||||
|
function generateReqPlan(reqAnalysis) {
|
||||||
|
// Parse requirements and create req-plan format
|
||||||
|
return `# Requirements Plan\n\n${reqAnalysis}\n\n## Acceptance Criteria\n- Criterion 1\n- Criterion 2\n...`
|
||||||
|
}
|
||||||
|
|
||||||
|
function generateGitHubIssues(specOutput) {
|
||||||
|
// Parse spec and generate GitHub issue JSON
|
||||||
|
return {
|
||||||
|
issues: [
|
||||||
|
{ title: "Issue 1", body: "Description", labels: ["feature"] },
|
||||||
|
{ title: "Issue 2", body: "Description", labels: ["bug"] }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Session File Structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"session_id": "team-lifecycle-1234567890",
|
||||||
|
"team_id": "team-lifecycle-1234567890",
|
||||||
|
"mode": "full-lifecycle",
|
||||||
|
"scope": "Build authentication system",
|
||||||
|
"focus": ["security", "scalability"],
|
||||||
|
"depth": "standard",
|
||||||
|
"executionMethod": "sequential",
|
||||||
|
"status": "active",
|
||||||
|
"created_at": "2026-02-18T10:00:00Z",
|
||||||
|
"completed_at": null,
|
||||||
|
"resumed_at": null,
|
||||||
|
"tasks_total": 16,
|
||||||
|
"tasks_completed": 5,
|
||||||
|
"current_phase": "spec"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Error Type | Coordinator Action |
|
||||||
|
|------------|-------------------|
|
||||||
|
| Task timeout | Log timeout, mark task as failed, ask user to retry or skip |
|
||||||
|
| Worker crash | Respawn worker, reassign task |
|
||||||
|
| Dependency cycle | Detect cycle, report to user, halt execution |
|
||||||
|
| Invalid mode | Reject with error message, ask user to clarify |
|
||||||
|
| Session corruption | Attempt recovery, fallback to manual reconciliation |
|
||||||
@@ -0,0 +1,396 @@
|
|||||||
|
# Command: Multi-Perspective Critique
|
||||||
|
|
||||||
|
Phase 3 of discussant execution - launch parallel CLI analyses for each required perspective.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This command executes multi-perspective critique by routing to specialized CLI tools based on perspective type. Each perspective produces structured critique with strengths, weaknesses, suggestions, and ratings.
|
||||||
|
|
||||||
|
## Perspective Definitions
|
||||||
|
|
||||||
|
### 1. Product Perspective (gemini)
|
||||||
|
|
||||||
|
**Focus**: Market fit, user value, business viability, competitive differentiation
|
||||||
|
|
||||||
|
**CLI Tool**: gemini
|
||||||
|
|
||||||
|
**Output Structure**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"perspective": "product",
|
||||||
|
"strengths": ["string"],
|
||||||
|
"weaknesses": ["string"],
|
||||||
|
"suggestions": ["string"],
|
||||||
|
"rating": 1-5
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Prompt Template**:
|
||||||
|
```
|
||||||
|
Analyze from Product Manager perspective:
|
||||||
|
- Market fit and user value proposition
|
||||||
|
- Business viability and ROI potential
|
||||||
|
- Competitive differentiation
|
||||||
|
- User experience and adoption barriers
|
||||||
|
|
||||||
|
Artifact: {artifactContent}
|
||||||
|
|
||||||
|
Output JSON with: strengths[], weaknesses[], suggestions[], rating (1-5)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Technical Perspective (codex)
|
||||||
|
|
||||||
|
**Focus**: Feasibility, tech debt, performance, security, maintainability
|
||||||
|
|
||||||
|
**CLI Tool**: codex
|
||||||
|
|
||||||
|
**Output Structure**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"perspective": "technical",
|
||||||
|
"strengths": ["string"],
|
||||||
|
"weaknesses": ["string"],
|
||||||
|
"suggestions": ["string"],
|
||||||
|
"rating": 1-5
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Prompt Template**:
|
||||||
|
```
|
||||||
|
Analyze from Tech Lead perspective:
|
||||||
|
- Technical feasibility and implementation complexity
|
||||||
|
- Architecture decisions and tech debt implications
|
||||||
|
- Performance and scalability considerations
|
||||||
|
- Security vulnerabilities and risks
|
||||||
|
- Code maintainability and extensibility
|
||||||
|
|
||||||
|
Artifact: {artifactContent}
|
||||||
|
|
||||||
|
Output JSON with: strengths[], weaknesses[], suggestions[], rating (1-5)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Quality Perspective (claude)
|
||||||
|
|
||||||
|
**Focus**: Completeness, testability, consistency, standards compliance
|
||||||
|
|
||||||
|
**CLI Tool**: claude
|
||||||
|
|
||||||
|
**Output Structure**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"perspective": "quality",
|
||||||
|
"strengths": ["string"],
|
||||||
|
"weaknesses": ["string"],
|
||||||
|
"suggestions": ["string"],
|
||||||
|
"rating": 1-5
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Prompt Template**:
|
||||||
|
```
|
||||||
|
Analyze from QA Lead perspective:
|
||||||
|
- Specification completeness and clarity
|
||||||
|
- Testability and test coverage potential
|
||||||
|
- Consistency across requirements/design
|
||||||
|
- Standards compliance (coding, documentation, accessibility)
|
||||||
|
- Ambiguity detection and edge case coverage
|
||||||
|
|
||||||
|
Artifact: {artifactContent}
|
||||||
|
|
||||||
|
Output JSON with: strengths[], weaknesses[], suggestions[], rating (1-5)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Risk Perspective (gemini)
|
||||||
|
|
||||||
|
**Focus**: Risk identification, dependency analysis, assumption validation, failure modes
|
||||||
|
|
||||||
|
**CLI Tool**: gemini
|
||||||
|
|
||||||
|
**Output Structure**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"perspective": "risk",
|
||||||
|
"strengths": ["string"],
|
||||||
|
"weaknesses": ["string"],
|
||||||
|
"suggestions": ["string"],
|
||||||
|
"rating": 1-5,
|
||||||
|
"risk_level": "low|medium|high|critical"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Prompt Template**:
|
||||||
|
```
|
||||||
|
Analyze from Risk Analyst perspective:
|
||||||
|
- Risk identification (technical, business, operational)
|
||||||
|
- Dependency analysis and external risks
|
||||||
|
- Assumption validation and hidden dependencies
|
||||||
|
- Failure modes and mitigation strategies
|
||||||
|
- Timeline and resource risks
|
||||||
|
|
||||||
|
Artifact: {artifactContent}
|
||||||
|
|
||||||
|
Output JSON with: strengths[], weaknesses[], suggestions[], rating (1-5), risk_level
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Coverage Perspective (gemini)
|
||||||
|
|
||||||
|
**Focus**: Requirement completeness vs original intent, scope drift, gap detection
|
||||||
|
|
||||||
|
**CLI Tool**: gemini
|
||||||
|
|
||||||
|
**Output Structure**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"perspective": "coverage",
|
||||||
|
"strengths": ["string"],
|
||||||
|
"weaknesses": ["string"],
|
||||||
|
"suggestions": ["string"],
|
||||||
|
"rating": 1-5,
|
||||||
|
"covered_requirements": ["REQ-ID"],
|
||||||
|
"partial_requirements": ["REQ-ID"],
|
||||||
|
"missing_requirements": ["REQ-ID"],
|
||||||
|
"scope_creep": ["description"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Prompt Template**:
|
||||||
|
```
|
||||||
|
Analyze from Requirements Analyst perspective:
|
||||||
|
- Compare current artifact against original requirements in discovery-context.json
|
||||||
|
- Identify covered requirements (fully addressed)
|
||||||
|
- Identify partial requirements (partially addressed)
|
||||||
|
- Identify missing requirements (not addressed)
|
||||||
|
- Detect scope creep (new items not in original requirements)
|
||||||
|
|
||||||
|
Original Requirements: {discoveryContext}
|
||||||
|
Current Artifact: {artifactContent}
|
||||||
|
|
||||||
|
Output JSON with:
|
||||||
|
- strengths[], weaknesses[], suggestions[], rating (1-5)
|
||||||
|
- covered_requirements[] (REQ-IDs fully addressed)
|
||||||
|
- partial_requirements[] (REQ-IDs partially addressed)
|
||||||
|
- missing_requirements[] (REQ-IDs not addressed) ← CRITICAL if non-empty
|
||||||
|
- scope_creep[] (new items not in original requirements)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Execution Pattern
|
||||||
|
|
||||||
|
### Parallel CLI Execution
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Load artifact content
|
||||||
|
const artifactPath = `${sessionFolder}/${config.artifact}`
|
||||||
|
const artifactContent = config.type === 'json'
|
||||||
|
? JSON.parse(Read(artifactPath))
|
||||||
|
: Read(artifactPath)
|
||||||
|
|
||||||
|
// Load discovery context for coverage perspective
|
||||||
|
let discoveryContext = null
|
||||||
|
try {
|
||||||
|
discoveryContext = JSON.parse(Read(`${sessionFolder}/spec/discovery-context.json`))
|
||||||
|
} catch { /* may not exist in early rounds */ }
|
||||||
|
|
||||||
|
// Launch parallel CLI analyses
|
||||||
|
const perspectiveResults = []
|
||||||
|
|
||||||
|
for (const perspective of config.perspectives) {
|
||||||
|
let cliTool, prompt
|
||||||
|
|
||||||
|
switch(perspective) {
|
||||||
|
case 'product':
|
||||||
|
cliTool = 'gemini'
|
||||||
|
prompt = `Analyze from Product Manager perspective:
|
||||||
|
- Market fit and user value proposition
|
||||||
|
- Business viability and ROI potential
|
||||||
|
- Competitive differentiation
|
||||||
|
- User experience and adoption barriers
|
||||||
|
|
||||||
|
Artifact:
|
||||||
|
${JSON.stringify(artifactContent, null, 2)}
|
||||||
|
|
||||||
|
Output JSON with: strengths[], weaknesses[], suggestions[], rating (1-5)`
|
||||||
|
break
|
||||||
|
|
||||||
|
case 'technical':
|
||||||
|
cliTool = 'codex'
|
||||||
|
prompt = `Analyze from Tech Lead perspective:
|
||||||
|
- Technical feasibility and implementation complexity
|
||||||
|
- Architecture decisions and tech debt implications
|
||||||
|
- Performance and scalability considerations
|
||||||
|
- Security vulnerabilities and risks
|
||||||
|
- Code maintainability and extensibility
|
||||||
|
|
||||||
|
Artifact:
|
||||||
|
${JSON.stringify(artifactContent, null, 2)}
|
||||||
|
|
||||||
|
Output JSON with: strengths[], weaknesses[], suggestions[], rating (1-5)`
|
||||||
|
break
|
||||||
|
|
||||||
|
case 'quality':
|
||||||
|
cliTool = 'claude'
|
||||||
|
prompt = `Analyze from QA Lead perspective:
|
||||||
|
- Specification completeness and clarity
|
||||||
|
- Testability and test coverage potential
|
||||||
|
- Consistency across requirements/design
|
||||||
|
- Standards compliance (coding, documentation, accessibility)
|
||||||
|
- Ambiguity detection and edge case coverage
|
||||||
|
|
||||||
|
Artifact:
|
||||||
|
${JSON.stringify(artifactContent, null, 2)}
|
||||||
|
|
||||||
|
Output JSON with: strengths[], weaknesses[], suggestions[], rating (1-5)`
|
||||||
|
break
|
||||||
|
|
||||||
|
case 'risk':
|
||||||
|
cliTool = 'gemini'
|
||||||
|
prompt = `Analyze from Risk Analyst perspective:
|
||||||
|
- Risk identification (technical, business, operational)
|
||||||
|
- Dependency analysis and external risks
|
||||||
|
- Assumption validation and hidden dependencies
|
||||||
|
- Failure modes and mitigation strategies
|
||||||
|
- Timeline and resource risks
|
||||||
|
|
||||||
|
Artifact:
|
||||||
|
${JSON.stringify(artifactContent, null, 2)}
|
||||||
|
|
||||||
|
Output JSON with: strengths[], weaknesses[], suggestions[], rating (1-5), risk_level`
|
||||||
|
break
|
||||||
|
|
||||||
|
case 'coverage':
|
||||||
|
cliTool = 'gemini'
|
||||||
|
prompt = `Analyze from Requirements Analyst perspective:
|
||||||
|
- Compare current artifact against original requirements in discovery-context.json
|
||||||
|
- Identify covered requirements (fully addressed)
|
||||||
|
- Identify partial requirements (partially addressed)
|
||||||
|
- Identify missing requirements (not addressed)
|
||||||
|
- Detect scope creep (new items not in original requirements)
|
||||||
|
|
||||||
|
Original Requirements:
|
||||||
|
${discoveryContext ? JSON.stringify(discoveryContext, null, 2) : 'Not available'}
|
||||||
|
|
||||||
|
Current Artifact:
|
||||||
|
${JSON.stringify(artifactContent, null, 2)}
|
||||||
|
|
||||||
|
Output JSON with:
|
||||||
|
- strengths[], weaknesses[], suggestions[], rating (1-5)
|
||||||
|
- covered_requirements[] (REQ-IDs fully addressed)
|
||||||
|
- partial_requirements[] (REQ-IDs partially addressed)
|
||||||
|
- missing_requirements[] (REQ-IDs not addressed) ← CRITICAL if non-empty
|
||||||
|
- scope_creep[] (new items not in original requirements)`
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute CLI analysis (run_in_background: true per CLAUDE.md)
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "${prompt.replace(/"/g, '\\"')}" --tool ${cliTool} --mode analysis`,
|
||||||
|
run_in_background: true,
|
||||||
|
description: `[discussant] ${perspective} perspective analysis`
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for all CLI results via hook callbacks
|
||||||
|
// Results will be collected in perspectiveResults array
|
||||||
|
```
|
||||||
|
|
||||||
|
## Critical Divergence Detection
|
||||||
|
|
||||||
|
### Coverage Gap Detection
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const coverageResult = perspectiveResults.find(p => p.perspective === 'coverage')
|
||||||
|
if (coverageResult?.missing_requirements?.length > 0) {
|
||||||
|
// Flag as critical divergence
|
||||||
|
synthesis.divergent_views.push({
|
||||||
|
topic: 'requirement_coverage_gap',
|
||||||
|
description: `${coverageResult.missing_requirements.length} requirements from discovery-context not covered: ${coverageResult.missing_requirements.join(', ')}`,
|
||||||
|
severity: 'high',
|
||||||
|
source: 'coverage'
|
||||||
|
})
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Risk Level Detection
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const riskResult = perspectiveResults.find(p => p.perspective === 'risk')
|
||||||
|
if (riskResult?.risk_level === 'high' || riskResult?.risk_level === 'critical') {
|
||||||
|
synthesis.risk_flags.push({
|
||||||
|
level: riskResult.risk_level,
|
||||||
|
description: riskResult.weaknesses.join('; ')
|
||||||
|
})
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Fallback Strategy
|
||||||
|
|
||||||
|
### CLI Failure Fallback
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// If CLI analysis fails for a perspective, fallback to direct Claude analysis
|
||||||
|
try {
|
||||||
|
// CLI execution
|
||||||
|
Bash({ command: `ccw cli -p "..." --tool ${cliTool} --mode analysis`, run_in_background: true })
|
||||||
|
} catch (error) {
|
||||||
|
// Fallback: Direct Claude analysis
|
||||||
|
const fallbackResult = {
|
||||||
|
perspective: perspective,
|
||||||
|
strengths: ["Direct analysis: ..."],
|
||||||
|
weaknesses: ["Direct analysis: ..."],
|
||||||
|
suggestions: ["Direct analysis: ..."],
|
||||||
|
rating: 3,
|
||||||
|
_fallback: true
|
||||||
|
}
|
||||||
|
perspectiveResults.push(fallbackResult)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### All CLI Failures
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (perspectiveResults.every(r => r._fallback)) {
|
||||||
|
// Generate basic discussion from direct reading
|
||||||
|
const basicDiscussion = {
|
||||||
|
convergent_themes: ["Basic analysis from direct reading"],
|
||||||
|
divergent_views: [],
|
||||||
|
action_items: ["Review artifact manually"],
|
||||||
|
open_questions: [],
|
||||||
|
decisions: [],
|
||||||
|
risk_flags: [],
|
||||||
|
overall_sentiment: 'neutral',
|
||||||
|
consensus_reached: true,
|
||||||
|
_basic_mode: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output Format
|
||||||
|
|
||||||
|
Each perspective produces:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"perspective": "product|technical|quality|risk|coverage",
|
||||||
|
"strengths": ["string"],
|
||||||
|
"weaknesses": ["string"],
|
||||||
|
"suggestions": ["string"],
|
||||||
|
"rating": 1-5,
|
||||||
|
|
||||||
|
// Risk perspective only
|
||||||
|
"risk_level": "low|medium|high|critical",
|
||||||
|
|
||||||
|
// Coverage perspective only
|
||||||
|
"covered_requirements": ["REQ-ID"],
|
||||||
|
"partial_requirements": ["REQ-ID"],
|
||||||
|
"missing_requirements": ["REQ-ID"],
|
||||||
|
"scope_creep": ["description"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Integration with Phase 4
|
||||||
|
|
||||||
|
Phase 4 (Consensus Synthesis) consumes `perspectiveResults` array to:
|
||||||
|
1. Extract convergent themes (2+ perspectives agree)
|
||||||
|
2. Extract divergent views (perspectives conflict)
|
||||||
|
3. Detect coverage gaps (missing_requirements non-empty)
|
||||||
|
4. Assess risk flags (high/critical risk_level)
|
||||||
|
5. Determine consensus_reached (true if no critical divergences)
|
||||||
265
.claude/skills/team-lifecycle-v2/roles/discussant/role.md
Normal file
265
.claude/skills/team-lifecycle-v2/roles/discussant/role.md
Normal file
@@ -0,0 +1,265 @@
|
|||||||
|
# Role: discussant
|
||||||
|
|
||||||
|
Multi-perspective critique, consensus building, and conflict escalation. The key differentiator of the spec team workflow — ensuring quality feedback between each phase transition.
|
||||||
|
|
||||||
|
## Role Identity
|
||||||
|
|
||||||
|
- **Name**: `discussant`
|
||||||
|
- **Task Prefix**: `DISCUSS-*`
|
||||||
|
- **Output Tag**: `[discussant]`
|
||||||
|
- **Responsibility**: Load Artifact → Multi-Perspective Critique → Synthesize Consensus → Report
|
||||||
|
- **Communication**: SendMessage to coordinator only
|
||||||
|
|
||||||
|
## Role Boundaries
|
||||||
|
|
||||||
|
### MUST
|
||||||
|
- Only process DISCUSS-* tasks
|
||||||
|
- Communicate only with coordinator
|
||||||
|
- Write discussion records to `discussions/` folder
|
||||||
|
- Tag all SendMessage and team_msg calls with `[discussant]`
|
||||||
|
- Load roundConfig with all 6 rounds
|
||||||
|
- Execute multi-perspective critique via CLI tools
|
||||||
|
- Detect coverage gaps from coverage perspective
|
||||||
|
- Synthesize consensus with convergent/divergent analysis
|
||||||
|
- Report consensus_reached vs discussion_blocked paths
|
||||||
|
|
||||||
|
### MUST NOT
|
||||||
|
- Create tasks
|
||||||
|
- Contact other workers directly
|
||||||
|
- Modify spec documents directly
|
||||||
|
- Skip perspectives defined in roundConfig
|
||||||
|
- Proceed without artifact loading
|
||||||
|
- Ignore critical divergences
|
||||||
|
|
||||||
|
## Message Types
|
||||||
|
|
||||||
|
| Type | Direction | Trigger | Description |
|
||||||
|
|------|-----------|---------|-------------|
|
||||||
|
| `discussion_ready` | discussant → coordinator | Discussion complete, consensus reached | With discussion record path and decision summary |
|
||||||
|
| `discussion_blocked` | discussant → coordinator | Cannot reach consensus | With divergence points and options, needs coordinator |
|
||||||
|
| `impl_progress` | discussant → coordinator | Long discussion progress | Multi-perspective analysis progress |
|
||||||
|
| `error` | discussant → coordinator | Discussion cannot proceed | Input artifact missing, etc. |
|
||||||
|
|
||||||
|
## Message Bus
|
||||||
|
|
||||||
|
Before every `SendMessage`, MUST call `mcp__ccw-tools__team_msg` to log:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Discussion complete
|
||||||
|
mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "discussant", to: "coordinator", type: "discussion_ready", summary: "[discussant] Scope discussion consensus reached: 3 decisions", ref: `${sessionFolder}/discussions/discuss-001-scope.md` })
|
||||||
|
|
||||||
|
// Discussion blocked
|
||||||
|
mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "discussant", to: "coordinator", type: "discussion_blocked", summary: "[discussant] Cannot reach consensus on tech stack", data: { reason: "...", options: [...] } })
|
||||||
|
|
||||||
|
// Error report
|
||||||
|
mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "discussant", to: "coordinator", type: "error", summary: "[discussant] Input artifact missing" })
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Fallback
|
||||||
|
|
||||||
|
When `mcp__ccw-tools__team_msg` MCP is unavailable:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Bash(`ccw team log --team "${teamName}" --from "discussant" --to "coordinator" --type "discussion_ready" --summary "[discussant] Discussion complete" --ref "${sessionFolder}/discussions/discuss-001-scope.md" --json`)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Discussion Dimension Model
|
||||||
|
|
||||||
|
Each discussion round analyzes from 5 perspectives:
|
||||||
|
|
||||||
|
| Perspective | Focus | Representative | CLI Tool |
|
||||||
|
|-------------|-------|----------------|----------|
|
||||||
|
| **Product** | Market fit, user value, business viability, competitive differentiation | Product Manager | gemini |
|
||||||
|
| **Technical** | Feasibility, tech debt, performance, security, maintainability | Tech Lead | codex |
|
||||||
|
| **Quality** | Completeness, testability, consistency, standards compliance | QA Lead | claude |
|
||||||
|
| **Risk** | Risk identification, dependency analysis, assumption validation, failure modes | Risk Analyst | gemini |
|
||||||
|
| **Coverage** | Requirement completeness vs original intent, scope drift, gap detection | Requirements Analyst | gemini |
|
||||||
|
|
||||||
|
## Discussion Round Configuration
|
||||||
|
|
||||||
|
| Round | Artifact | Key Perspectives | Focus |
|
||||||
|
|-------|----------|-----------------|-------|
|
||||||
|
| DISCUSS-001 | discovery-context | product + risk + **coverage** | Scope confirmation, direction, initial coverage check |
|
||||||
|
| DISCUSS-002 | product-brief | product + technical + quality + **coverage** | Positioning, feasibility, requirement coverage |
|
||||||
|
| DISCUSS-003 | requirements | quality + product + **coverage** | Completeness, priority, gap detection |
|
||||||
|
| DISCUSS-004 | architecture | technical + risk | Tech choices, security |
|
||||||
|
| DISCUSS-005 | epics | product + technical + quality + **coverage** | MVP scope, estimation, requirement tracing |
|
||||||
|
| DISCUSS-006 | readiness-report | all 5 perspectives | Final sign-off |
|
||||||
|
|
||||||
|
## Toolbox
|
||||||
|
|
||||||
|
### Available Commands
|
||||||
|
- `commands/critique.md` - Multi-perspective CLI critique (Phase 3)
|
||||||
|
|
||||||
|
### Subagent Capabilities
|
||||||
|
None (discussant uses CLI tools directly)
|
||||||
|
|
||||||
|
### CLI Capabilities
|
||||||
|
- **gemini**: Product perspective, Risk perspective, Coverage perspective
|
||||||
|
- **codex**: Technical perspective
|
||||||
|
- **claude**: Quality perspective
|
||||||
|
|
||||||
|
## Execution (5-Phase)
|
||||||
|
|
||||||
|
### Phase 1: Task Discovery
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const tasks = TaskList()
|
||||||
|
const myTasks = tasks.filter(t =>
|
||||||
|
t.subject.startsWith('DISCUSS-') &&
|
||||||
|
t.owner === 'discussant' &&
|
||||||
|
t.status === 'pending' &&
|
||||||
|
t.blockedBy.length === 0
|
||||||
|
)
|
||||||
|
|
||||||
|
if (myTasks.length === 0) return // idle
|
||||||
|
|
||||||
|
const task = TaskGet({ taskId: myTasks[0].id })
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'in_progress' })
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 2: Artifact Loading
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const sessionMatch = task.description.match(/Session:\s*(.+)/)
|
||||||
|
const sessionFolder = sessionMatch ? sessionMatch[1].trim() : ''
|
||||||
|
const roundMatch = task.subject.match(/DISCUSS-(\d+)/)
|
||||||
|
const roundNumber = roundMatch ? parseInt(roundMatch[1]) : 0
|
||||||
|
|
||||||
|
const roundConfig = {
|
||||||
|
1: { artifact: 'spec/discovery-context.json', type: 'json', outputFile: 'discuss-001-scope.md', perspectives: ['product', 'risk', 'coverage'], label: '范围讨论' },
|
||||||
|
2: { artifact: 'spec/product-brief.md', type: 'md', outputFile: 'discuss-002-brief.md', perspectives: ['product', 'technical', 'quality', 'coverage'], label: 'Brief评审' },
|
||||||
|
3: { artifact: 'spec/requirements/_index.md', type: 'md', outputFile: 'discuss-003-requirements.md', perspectives: ['quality', 'product', 'coverage'], label: '需求讨论' },
|
||||||
|
4: { artifact: 'spec/architecture/_index.md', type: 'md', outputFile: 'discuss-004-architecture.md', perspectives: ['technical', 'risk'], label: '架构讨论' },
|
||||||
|
5: { artifact: 'spec/epics/_index.md', type: 'md', outputFile: 'discuss-005-epics.md', perspectives: ['product', 'technical', 'quality', 'coverage'], label: 'Epics讨论' },
|
||||||
|
6: { artifact: 'spec/readiness-report.md', type: 'md', outputFile: 'discuss-006-final.md', perspectives: ['product', 'technical', 'quality', 'risk', 'coverage'], label: '最终签收' }
|
||||||
|
}
|
||||||
|
|
||||||
|
const config = roundConfig[roundNumber]
|
||||||
|
// Load target artifact and prior discussion records for continuity
|
||||||
|
Bash(`mkdir -p ${sessionFolder}/discussions`)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 3: Multi-Perspective Critique
|
||||||
|
|
||||||
|
**Delegate to**: `Read("commands/critique.md")`
|
||||||
|
|
||||||
|
Launch parallel CLI analyses for each required perspective. See `commands/critique.md` for full implementation.
|
||||||
|
|
||||||
|
### Phase 4: Consensus Synthesis
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const synthesis = {
|
||||||
|
convergent_themes: [],
|
||||||
|
divergent_views: [],
|
||||||
|
action_items: [],
|
||||||
|
open_questions: [],
|
||||||
|
decisions: [],
|
||||||
|
risk_flags: [],
|
||||||
|
overall_sentiment: '', // positive/neutral/concerns/critical
|
||||||
|
consensus_reached: true // false if major unresolvable conflicts
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract convergent themes (items mentioned positively by 2+ perspectives)
|
||||||
|
// Extract divergent views (items where perspectives conflict)
|
||||||
|
// Check coverage gaps from coverage perspective (if present)
|
||||||
|
const coverageResult = perspectiveResults.find(p => p.perspective === 'coverage')
|
||||||
|
if (coverageResult?.missing_requirements?.length > 0) {
|
||||||
|
synthesis.coverage_gaps = coverageResult.missing_requirements
|
||||||
|
synthesis.divergent_views.push({
|
||||||
|
topic: 'requirement_coverage_gap',
|
||||||
|
description: `${coverageResult.missing_requirements.length} requirements from discovery-context not covered: ${coverageResult.missing_requirements.join(', ')}`,
|
||||||
|
severity: 'high',
|
||||||
|
source: 'coverage'
|
||||||
|
})
|
||||||
|
}
|
||||||
|
// Check for unresolvable conflicts
|
||||||
|
const criticalDivergences = synthesis.divergent_views.filter(d => d.severity === 'high')
|
||||||
|
if (criticalDivergences.length > 0) synthesis.consensus_reached = false
|
||||||
|
|
||||||
|
// Determine overall sentiment from average rating
|
||||||
|
// Generate discussion record markdown with all perspectives, convergence, divergence, action items
|
||||||
|
|
||||||
|
Write(`${sessionFolder}/discussions/${config.outputFile}`, discussionRecord)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 5: Report to Coordinator
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (synthesis.consensus_reached) {
|
||||||
|
mcp__ccw-tools__team_msg({
|
||||||
|
operation: "log", team: teamName,
|
||||||
|
from: "discussant", to: "coordinator",
|
||||||
|
type: "discussion_ready",
|
||||||
|
summary: `[discussant] ${config.label}讨论完成: ${synthesis.action_items.length}个行动项, ${synthesis.open_questions.length}个开放问题, 总体${synthesis.overall_sentiment}`,
|
||||||
|
ref: `${sessionFolder}/discussions/${config.outputFile}`
|
||||||
|
})
|
||||||
|
|
||||||
|
SendMessage({
|
||||||
|
type: "message",
|
||||||
|
recipient: "coordinator",
|
||||||
|
content: `[discussant] ## 讨论结果: ${config.label}
|
||||||
|
|
||||||
|
**Task**: ${task.subject}
|
||||||
|
**共识**: 已达成
|
||||||
|
**总体评价**: ${synthesis.overall_sentiment}
|
||||||
|
|
||||||
|
### 行动项 (${synthesis.action_items.length})
|
||||||
|
${synthesis.action_items.map((item, i) => (i+1) + '. ' + item).join('\n') || '无'}
|
||||||
|
|
||||||
|
### 开放问题 (${synthesis.open_questions.length})
|
||||||
|
${synthesis.open_questions.map((q, i) => (i+1) + '. ' + q).join('\n') || '无'}
|
||||||
|
|
||||||
|
### 讨论记录
|
||||||
|
${sessionFolder}/discussions/${config.outputFile}
|
||||||
|
|
||||||
|
共识已达成,可推进至下一阶段。`,
|
||||||
|
summary: `[discussant] ${config.label}共识达成: ${synthesis.action_items.length}行动项`
|
||||||
|
})
|
||||||
|
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'completed' })
|
||||||
|
} else {
|
||||||
|
// Consensus blocked - escalate to coordinator
|
||||||
|
mcp__ccw-tools__team_msg({
|
||||||
|
operation: "log", team: teamName,
|
||||||
|
from: "discussant", to: "coordinator",
|
||||||
|
type: "discussion_blocked",
|
||||||
|
summary: `[discussant] ${config.label}讨论阻塞: ${criticalDivergences.length}个关键分歧需决策`,
|
||||||
|
data: {
|
||||||
|
reason: criticalDivergences.map(d => d.description).join('; '),
|
||||||
|
options: criticalDivergences.map(d => ({ label: d.topic, description: d.options?.join(' vs ') || d.description }))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
SendMessage({
|
||||||
|
type: "message",
|
||||||
|
recipient: "coordinator",
|
||||||
|
content: `[discussant] ## 讨论阻塞: ${config.label}
|
||||||
|
|
||||||
|
**Task**: ${task.subject}
|
||||||
|
**状态**: 无法达成共识,需要 coordinator 介入
|
||||||
|
|
||||||
|
### 关键分歧
|
||||||
|
${criticalDivergences.map((d, i) => (i+1) + '. **' + d.topic + '**: ' + d.description).join('\n\n')}
|
||||||
|
|
||||||
|
请通过 AskUserQuestion 收集用户对分歧点的决策。`,
|
||||||
|
summary: `[discussant] ${config.label}阻塞: ${criticalDivergences.length}分歧`
|
||||||
|
})
|
||||||
|
// Keep task in_progress, wait for coordinator resolution
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for next DISCUSS task → back to Phase 1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Scenario | Resolution |
|
||||||
|
|----------|------------|
|
||||||
|
| No DISCUSS-* tasks available | Idle, wait for coordinator assignment |
|
||||||
|
| Target artifact not found | Notify coordinator with `[discussant]` tag, request prerequisite completion |
|
||||||
|
| CLI perspective analysis failure | Fallback to direct Claude analysis for that perspective |
|
||||||
|
| All CLI analyses fail | Generate basic discussion from direct reading |
|
||||||
|
| Consensus timeout (all perspectives diverge) | Escalate as discussion_blocked with `[discussant]` tag |
|
||||||
|
| Prior discussion records missing | Continue without continuity context |
|
||||||
|
| Session folder not found | Notify coordinator with `[discussant]` tag, request session path |
|
||||||
|
| Unexpected error | Log error via team_msg with `[discussant]` tag, report to coordinator |
|
||||||
@@ -0,0 +1,356 @@
|
|||||||
|
# Implement Command
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
Multi-backend code implementation with progress tracking and batch execution support.
|
||||||
|
|
||||||
|
## Execution Paths
|
||||||
|
|
||||||
|
### Path 1: Simple Task + Agent Backend (Direct Edit)
|
||||||
|
|
||||||
|
**Criteria**:
|
||||||
|
```javascript
|
||||||
|
function isSimpleTask(task) {
|
||||||
|
return task.description.length < 200 &&
|
||||||
|
!task.description.includes("refactor") &&
|
||||||
|
!task.description.includes("architecture") &&
|
||||||
|
!task.description.includes("multiple files")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Execution**:
|
||||||
|
```javascript
|
||||||
|
if (isSimpleTask(task) && executor === "agent") {
|
||||||
|
// Direct file edit without subagent overhead
|
||||||
|
const targetFile = task.metadata?.target_file
|
||||||
|
if (targetFile) {
|
||||||
|
const content = Read(targetFile)
|
||||||
|
const prompt = buildExecutionPrompt(task, plan, [task])
|
||||||
|
|
||||||
|
// Apply edit directly
|
||||||
|
Edit(targetFile, oldContent, newContent)
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
files_modified: [targetFile],
|
||||||
|
method: "direct_edit"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Path 2: Agent Backend (code-developer subagent)
|
||||||
|
|
||||||
|
**Execution**:
|
||||||
|
```javascript
|
||||||
|
if (executor === "agent") {
|
||||||
|
const prompt = buildExecutionPrompt(task, plan, [task])
|
||||||
|
|
||||||
|
const result = Subagent({
|
||||||
|
type: "code-developer",
|
||||||
|
prompt: prompt,
|
||||||
|
run_in_background: false // Synchronous execution
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: result.success,
|
||||||
|
files_modified: result.files_modified || [],
|
||||||
|
method: "subagent"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Path 3: Codex Backend (CLI)
|
||||||
|
|
||||||
|
**Execution**:
|
||||||
|
```javascript
|
||||||
|
if (executor === "codex") {
|
||||||
|
const prompt = buildExecutionPrompt(task, plan, [task])
|
||||||
|
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "progress_update",
|
||||||
|
task_id: task.task_id,
|
||||||
|
status: "executing_codex",
|
||||||
|
message: "Starting Codex implementation..."
|
||||||
|
}, "[executor]")
|
||||||
|
|
||||||
|
const result = Bash(
|
||||||
|
`ccw cli -p "${escapePrompt(prompt)}" --tool codex --mode write --cd ${task.metadata?.working_dir || "."}`,
|
||||||
|
{ run_in_background: true, timeout: 300000 }
|
||||||
|
)
|
||||||
|
|
||||||
|
// Wait for CLI completion via hook callback
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
files_modified: [], // Will be detected by git diff
|
||||||
|
method: "codex_cli"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Path 4: Gemini Backend (CLI)
|
||||||
|
|
||||||
|
**Execution**:
|
||||||
|
```javascript
|
||||||
|
if (executor === "gemini") {
|
||||||
|
const prompt = buildExecutionPrompt(task, plan, [task])
|
||||||
|
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "progress_update",
|
||||||
|
task_id: task.task_id,
|
||||||
|
status: "executing_gemini",
|
||||||
|
message: "Starting Gemini implementation..."
|
||||||
|
}, "[executor]")
|
||||||
|
|
||||||
|
const result = Bash(
|
||||||
|
`ccw cli -p "${escapePrompt(prompt)}" --tool gemini --mode write --cd ${task.metadata?.working_dir || "."}`,
|
||||||
|
{ run_in_background: true, timeout: 300000 }
|
||||||
|
)
|
||||||
|
|
||||||
|
// Wait for CLI completion via hook callback
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
files_modified: [], // Will be detected by git diff
|
||||||
|
method: "gemini_cli"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Prompt Building
|
||||||
|
|
||||||
|
### Single Task Prompt
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function buildExecutionPrompt(task, plan, tasks) {
|
||||||
|
const context = extractContextFromPlan(plan, task)
|
||||||
|
|
||||||
|
return `
|
||||||
|
# Implementation Task: ${task.task_id}
|
||||||
|
|
||||||
|
## Task Description
|
||||||
|
${task.description}
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
${task.acceptance_criteria?.map((c, i) => `${i + 1}. ${c}`).join("\n") || "None specified"}
|
||||||
|
|
||||||
|
## Context from Plan
|
||||||
|
${context}
|
||||||
|
|
||||||
|
## Files to Modify
|
||||||
|
${task.metadata?.target_files?.join("\n") || "Auto-detect based on task"}
|
||||||
|
|
||||||
|
## Constraints
|
||||||
|
- Follow existing code style and patterns
|
||||||
|
- Preserve backward compatibility
|
||||||
|
- Add appropriate error handling
|
||||||
|
- Include inline comments for complex logic
|
||||||
|
- Update related tests if applicable
|
||||||
|
|
||||||
|
## Expected Output
|
||||||
|
- Modified files with implementation
|
||||||
|
- Brief summary of changes made
|
||||||
|
- Any assumptions or decisions made during implementation
|
||||||
|
`.trim()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Batch Task Prompt
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function buildBatchPrompt(tasks, plan) {
|
||||||
|
const taskDescriptions = tasks.map((task, i) => `
|
||||||
|
### Task ${i + 1}: ${task.task_id}
|
||||||
|
**Description**: ${task.description}
|
||||||
|
**Acceptance Criteria**:
|
||||||
|
${task.acceptance_criteria?.map((c, j) => ` ${j + 1}. ${c}`).join("\n") || " None specified"}
|
||||||
|
**Target Files**: ${task.metadata?.target_files?.join(", ") || "Auto-detect"}
|
||||||
|
`).join("\n")
|
||||||
|
|
||||||
|
return `
|
||||||
|
# Batch Implementation: ${tasks.length} Tasks
|
||||||
|
|
||||||
|
## Tasks to Implement
|
||||||
|
${taskDescriptions}
|
||||||
|
|
||||||
|
## Context from Plan
|
||||||
|
${extractContextFromPlan(plan, tasks[0])}
|
||||||
|
|
||||||
|
## Batch Execution Guidelines
|
||||||
|
- Implement tasks in the order listed
|
||||||
|
- Ensure each task's acceptance criteria are met
|
||||||
|
- Maintain consistency across all implementations
|
||||||
|
- Report any conflicts or dependencies discovered
|
||||||
|
- Follow existing code patterns and style
|
||||||
|
|
||||||
|
## Expected Output
|
||||||
|
- All tasks implemented successfully
|
||||||
|
- Summary of changes per task
|
||||||
|
- Any cross-task considerations or conflicts
|
||||||
|
`.trim()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Context Extraction
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function extractContextFromPlan(plan, task) {
|
||||||
|
// Extract relevant sections from plan
|
||||||
|
const sections = []
|
||||||
|
|
||||||
|
// Architecture context
|
||||||
|
const archMatch = plan.match(/## Architecture[\s\S]*?(?=##|$)/)
|
||||||
|
if (archMatch) {
|
||||||
|
sections.push("### Architecture\n" + archMatch[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Technical stack
|
||||||
|
const techMatch = plan.match(/## Technical Stack[\s\S]*?(?=##|$)/)
|
||||||
|
if (techMatch) {
|
||||||
|
sections.push("### Technical Stack\n" + techMatch[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Related tasks context
|
||||||
|
const taskSection = plan.match(new RegExp(`${task.task_id}[\\s\\S]*?(?=IMPL-\\d+|$)`))
|
||||||
|
if (taskSection) {
|
||||||
|
sections.push("### Task Context\n" + taskSection[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
return sections.join("\n\n") || "No additional context available"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Progress Tracking
|
||||||
|
|
||||||
|
### Batch Progress Updates
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function reportBatchProgress(batchIndex, totalBatches, currentTask) {
|
||||||
|
if (totalBatches > 1) {
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "progress_update",
|
||||||
|
batch_index: batchIndex + 1,
|
||||||
|
total_batches: totalBatches,
|
||||||
|
current_task: currentTask.task_id,
|
||||||
|
message: `Processing batch ${batchIndex + 1}/${totalBatches}: ${currentTask.task_id}`
|
||||||
|
}, "[executor]")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Long-Running Task Updates
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function reportLongRunningTask(task, elapsedSeconds) {
|
||||||
|
if (elapsedSeconds > 60 && elapsedSeconds % 30 === 0) {
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "progress_update",
|
||||||
|
task_id: task.task_id,
|
||||||
|
elapsed_seconds: elapsedSeconds,
|
||||||
|
message: `Still processing ${task.task_id} (${elapsedSeconds}s elapsed)...`
|
||||||
|
}, "[executor]")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Utility Functions
|
||||||
|
|
||||||
|
### Prompt Escaping
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function escapePrompt(prompt) {
|
||||||
|
return prompt
|
||||||
|
.replace(/\\/g, "\\\\")
|
||||||
|
.replace(/"/g, '\\"')
|
||||||
|
.replace(/\n/g, "\\n")
|
||||||
|
.replace(/\$/g, "\\$")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### File Change Detection
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function detectModifiedFiles() {
|
||||||
|
const gitDiff = Bash("git diff --name-only HEAD")
|
||||||
|
return gitDiff.stdout.split("\n").filter(f => f.trim())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Simple Task Detection
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function isSimpleTask(task) {
|
||||||
|
const simpleIndicators = [
|
||||||
|
task.description.length < 200,
|
||||||
|
!task.description.toLowerCase().includes("refactor"),
|
||||||
|
!task.description.toLowerCase().includes("architecture"),
|
||||||
|
!task.description.toLowerCase().includes("multiple files"),
|
||||||
|
!task.description.toLowerCase().includes("complex"),
|
||||||
|
task.metadata?.target_files?.length === 1
|
||||||
|
]
|
||||||
|
|
||||||
|
return simpleIndicators.filter(Boolean).length >= 4
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Recovery
|
||||||
|
|
||||||
|
### Retry Logic
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function executeWithRetry(task, executor, maxRetries = 3) {
|
||||||
|
let attempt = 0
|
||||||
|
let lastError = null
|
||||||
|
|
||||||
|
while (attempt < maxRetries) {
|
||||||
|
try {
|
||||||
|
const result = executeTask(task, executor)
|
||||||
|
if (result.success) {
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
lastError = result.error
|
||||||
|
} catch (error) {
|
||||||
|
lastError = error.message
|
||||||
|
}
|
||||||
|
|
||||||
|
attempt++
|
||||||
|
if (attempt < maxRetries) {
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "progress_update",
|
||||||
|
task_id: task.task_id,
|
||||||
|
message: `Retry attempt ${attempt}/${maxRetries} after error: ${lastError}`
|
||||||
|
}, "[executor]")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: lastError,
|
||||||
|
retry_count: maxRetries
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Backend Fallback
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function executeWithFallback(task, primaryExecutor) {
|
||||||
|
const result = executeTask(task, primaryExecutor)
|
||||||
|
|
||||||
|
if (!result.success && primaryExecutor !== "agent") {
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "progress_update",
|
||||||
|
task_id: task.task_id,
|
||||||
|
message: `${primaryExecutor} failed, falling back to agent backend...`
|
||||||
|
}, "[executor]")
|
||||||
|
|
||||||
|
return executeTask(task, "agent")
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
```
|
||||||
324
.claude/skills/team-lifecycle-v2/roles/executor/role.md
Normal file
324
.claude/skills/team-lifecycle-v2/roles/executor/role.md
Normal file
@@ -0,0 +1,324 @@
|
|||||||
|
# Executor Role
|
||||||
|
|
||||||
|
## 1. Role Identity
|
||||||
|
|
||||||
|
- **Name**: executor
|
||||||
|
- **Task Prefix**: IMPL-*
|
||||||
|
- **Output Tag**: `[executor]`
|
||||||
|
- **Responsibility**: Load plan → Route to backend → Implement code → Self-validate → Report
|
||||||
|
|
||||||
|
## 2. Role Boundaries
|
||||||
|
|
||||||
|
### MUST
|
||||||
|
- Only process IMPL-* tasks
|
||||||
|
- Follow approved plan exactly
|
||||||
|
- Use declared execution backends (agent/codex/gemini)
|
||||||
|
- Self-validate all implementations (syntax + acceptance criteria)
|
||||||
|
- Tag all outputs with `[executor]`
|
||||||
|
|
||||||
|
### MUST NOT
|
||||||
|
- Create tasks
|
||||||
|
- Contact other workers directly
|
||||||
|
- Modify plan files
|
||||||
|
- Skip self-validation
|
||||||
|
- Proceed without plan approval
|
||||||
|
|
||||||
|
## 3. Message Types
|
||||||
|
|
||||||
|
| Type | Direction | Purpose | Format |
|
||||||
|
|------|-----------|---------|--------|
|
||||||
|
| `task_request` | FROM coordinator | Receive IMPL-* task assignment | `{ type: "task_request", task_id, description }` |
|
||||||
|
| `task_complete` | TO coordinator | Report implementation success | `{ type: "task_complete", task_id, status: "success", files_modified, validation_results }` |
|
||||||
|
| `task_failed` | TO coordinator | Report implementation failure | `{ type: "task_failed", task_id, error, retry_count }` |
|
||||||
|
| `progress_update` | TO coordinator | Report batch progress | `{ type: "progress_update", task_id, batch_index, total_batches }` |
|
||||||
|
|
||||||
|
## 4. Message Bus
|
||||||
|
|
||||||
|
**Primary**: Use `team_msg` for all coordinator communication with `[executor]` tag:
|
||||||
|
```javascript
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "task_complete",
|
||||||
|
task_id: "IMPL-001",
|
||||||
|
status: "success",
|
||||||
|
files_modified: ["src/auth.ts"],
|
||||||
|
validation_results: { syntax: "pass", acceptance: "pass" }
|
||||||
|
}, "[executor]")
|
||||||
|
```
|
||||||
|
|
||||||
|
**CLI Fallback**: When message bus unavailable, write to `.workflow/.team/messages/executor-{timestamp}.json`
|
||||||
|
|
||||||
|
## 5. Toolbox
|
||||||
|
|
||||||
|
### Available Commands
|
||||||
|
- `commands/implement.md` - Multi-backend code implementation with progress tracking
|
||||||
|
|
||||||
|
### Subagent Capabilities
|
||||||
|
- `code-developer` - Synchronous agent execution for simple tasks and agent backend
|
||||||
|
|
||||||
|
### CLI Capabilities
|
||||||
|
- `ccw cli --tool codex --mode write` - Codex backend implementation
|
||||||
|
- `ccw cli --tool gemini --mode write` - Gemini backend implementation
|
||||||
|
|
||||||
|
## 6. Execution (5-Phase)
|
||||||
|
|
||||||
|
### Phase 1: Task & Plan Loading
|
||||||
|
|
||||||
|
**Task Discovery**:
|
||||||
|
```javascript
|
||||||
|
const tasks = Glob(".workflow/.team/tasks/IMPL-*.json")
|
||||||
|
.filter(task => task.status === "pending" && task.assigned_to === "executor")
|
||||||
|
```
|
||||||
|
|
||||||
|
**Plan Path Extraction**:
|
||||||
|
```javascript
|
||||||
|
const planPath = task.metadata?.plan_path || ".workflow/plan.md"
|
||||||
|
const plan = Read(planPath)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Execution Backend Resolution**:
|
||||||
|
```javascript
|
||||||
|
function resolveExecutor(task, plan) {
|
||||||
|
// Priority 1: Task-level override
|
||||||
|
if (task.metadata?.executor) {
|
||||||
|
return task.metadata.executor // "agent" | "codex" | "gemini"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 2: Plan-level default
|
||||||
|
const planMatch = plan.match(/Execution Backend:\s*(agent|codex|gemini)/i)
|
||||||
|
if (planMatch) {
|
||||||
|
return planMatch[1].toLowerCase()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 3: Auto-select based on task complexity
|
||||||
|
const isSimple = task.description.length < 200 &&
|
||||||
|
!task.description.includes("refactor") &&
|
||||||
|
!task.description.includes("architecture")
|
||||||
|
|
||||||
|
return isSimple ? "agent" : "codex" // Default: codex for complex, agent for simple
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Code Review Resolution**:
|
||||||
|
```javascript
|
||||||
|
function resolveCodeReview(task, plan) {
|
||||||
|
// Priority 1: Task-level override
|
||||||
|
if (task.metadata?.code_review !== undefined) {
|
||||||
|
return task.metadata.code_review // boolean
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 2: Plan-level default
|
||||||
|
const reviewMatch = plan.match(/Code Review:\s*(enabled|disabled)/i)
|
||||||
|
if (reviewMatch) {
|
||||||
|
return reviewMatch[1].toLowerCase() === "enabled"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 3: Default based on task type
|
||||||
|
const criticalKeywords = ["auth", "security", "payment", "api", "database"]
|
||||||
|
const isCritical = criticalKeywords.some(kw =>
|
||||||
|
task.description.toLowerCase().includes(kw)
|
||||||
|
)
|
||||||
|
|
||||||
|
return isCritical // Enable review for critical paths
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 2: Task Grouping
|
||||||
|
|
||||||
|
**Dependency-Based Batching**:
|
||||||
|
```javascript
|
||||||
|
function createBatches(tasks, plan) {
|
||||||
|
// Extract dependencies from plan
|
||||||
|
const dependencies = new Map()
|
||||||
|
const depRegex = /IMPL-(\d+).*depends on.*IMPL-(\d+)/gi
|
||||||
|
let match
|
||||||
|
while ((match = depRegex.exec(plan)) !== null) {
|
||||||
|
const [_, taskId, depId] = match
|
||||||
|
if (!dependencies.has(`IMPL-${taskId}`)) {
|
||||||
|
dependencies.set(`IMPL-${taskId}`, [])
|
||||||
|
}
|
||||||
|
dependencies.get(`IMPL-${taskId}`).push(`IMPL-${depId}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Topological sort for execution order
|
||||||
|
const batches = []
|
||||||
|
const completed = new Set()
|
||||||
|
const remaining = new Set(tasks.map(t => t.task_id))
|
||||||
|
|
||||||
|
while (remaining.size > 0) {
|
||||||
|
const batch = []
|
||||||
|
|
||||||
|
for (const taskId of remaining) {
|
||||||
|
const deps = dependencies.get(taskId) || []
|
||||||
|
const depsCompleted = deps.every(dep => completed.has(dep))
|
||||||
|
|
||||||
|
if (depsCompleted) {
|
||||||
|
batch.push(tasks.find(t => t.task_id === taskId))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (batch.length === 0) {
|
||||||
|
// Circular dependency detected
|
||||||
|
throw new Error(`Circular dependency detected in remaining tasks: ${[...remaining].join(", ")}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
batches.push(batch)
|
||||||
|
batch.forEach(task => {
|
||||||
|
completed.add(task.task_id)
|
||||||
|
remaining.delete(task.task_id)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return batches
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 3: Code Implementation
|
||||||
|
|
||||||
|
**Delegate to Command**:
|
||||||
|
```javascript
|
||||||
|
const implementCommand = Read("commands/implement.md")
|
||||||
|
// Command handles:
|
||||||
|
// - buildExecutionPrompt (context + acceptance criteria)
|
||||||
|
// - buildBatchPrompt (multi-task batching)
|
||||||
|
// - 4 execution paths: simple+agent, agent, codex, gemini
|
||||||
|
// - Progress updates via team_msg
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 4: Self-Validation
|
||||||
|
|
||||||
|
**Syntax Check**:
|
||||||
|
```javascript
|
||||||
|
const syntaxCheck = Bash("tsc --noEmit", { timeout: 30000 })
|
||||||
|
const syntaxPass = syntaxCheck.exitCode === 0
|
||||||
|
```
|
||||||
|
|
||||||
|
**Acceptance Criteria Verification**:
|
||||||
|
```javascript
|
||||||
|
function verifyAcceptance(task, implementation) {
|
||||||
|
const criteria = task.acceptance_criteria || []
|
||||||
|
const results = criteria.map(criterion => {
|
||||||
|
// Simple keyword matching for automated verification
|
||||||
|
const keywords = criterion.toLowerCase().match(/\b\w+\b/g) || []
|
||||||
|
const matched = keywords.some(kw =>
|
||||||
|
implementation.toLowerCase().includes(kw)
|
||||||
|
)
|
||||||
|
return { criterion, matched, status: matched ? "pass" : "manual_review" }
|
||||||
|
})
|
||||||
|
|
||||||
|
const allPassed = results.every(r => r.status === "pass")
|
||||||
|
return { allPassed, results }
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Test File Detection**:
|
||||||
|
```javascript
|
||||||
|
function findAffectedTests(modifiedFiles) {
|
||||||
|
const testFiles = []
|
||||||
|
|
||||||
|
for (const file of modifiedFiles) {
|
||||||
|
const baseName = file.replace(/\.(ts|js|tsx|jsx)$/, "")
|
||||||
|
const testVariants = [
|
||||||
|
`${baseName}.test.ts`,
|
||||||
|
`${baseName}.test.js`,
|
||||||
|
`${baseName}.spec.ts`,
|
||||||
|
`${baseName}.spec.js`,
|
||||||
|
`${file.replace(/^src\//, "tests/")}.test.ts`,
|
||||||
|
`${file.replace(/^src\//, "__tests__/")}.test.ts`
|
||||||
|
]
|
||||||
|
|
||||||
|
for (const variant of testVariants) {
|
||||||
|
if (Bash(`test -f ${variant}`).exitCode === 0) {
|
||||||
|
testFiles.push(variant)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return testFiles
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Optional Code Review**:
|
||||||
|
```javascript
|
||||||
|
const codeReviewEnabled = resolveCodeReview(task, plan)
|
||||||
|
|
||||||
|
if (codeReviewEnabled) {
|
||||||
|
const executor = resolveExecutor(task, plan)
|
||||||
|
|
||||||
|
if (executor === "gemini") {
|
||||||
|
// Gemini Review: Use Gemini CLI for review
|
||||||
|
const reviewResult = Bash(
|
||||||
|
`ccw cli -p "Review implementation for: ${task.description}. Check: code quality, security, architecture compliance." --tool gemini --mode analysis`,
|
||||||
|
{ run_in_background: true }
|
||||||
|
)
|
||||||
|
} else if (executor === "codex") {
|
||||||
|
// Codex Review: Use Codex CLI review mode
|
||||||
|
const reviewResult = Bash(
|
||||||
|
`ccw cli --tool codex --mode review --uncommitted`,
|
||||||
|
{ run_in_background: true }
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for review results and append to validation
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 5: Report to Coordinator
|
||||||
|
|
||||||
|
**Success Report**:
|
||||||
|
```javascript
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "task_complete",
|
||||||
|
task_id: task.task_id,
|
||||||
|
status: "success",
|
||||||
|
files_modified: modifiedFiles,
|
||||||
|
validation_results: {
|
||||||
|
syntax: syntaxPass ? "pass" : "fail",
|
||||||
|
acceptance: acceptanceResults.allPassed ? "pass" : "manual_review",
|
||||||
|
tests_found: affectedTests.length,
|
||||||
|
code_review: codeReviewEnabled ? "completed" : "skipped"
|
||||||
|
},
|
||||||
|
execution_backend: executor,
|
||||||
|
timestamp: new Date().toISOString()
|
||||||
|
}, "[executor]")
|
||||||
|
```
|
||||||
|
|
||||||
|
**Failure Report**:
|
||||||
|
```javascript
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "task_failed",
|
||||||
|
task_id: task.task_id,
|
||||||
|
error: errorMessage,
|
||||||
|
retry_count: task.retry_count || 0,
|
||||||
|
validation_results: {
|
||||||
|
syntax: syntaxPass ? "pass" : "fail",
|
||||||
|
acceptance: "not_verified"
|
||||||
|
},
|
||||||
|
timestamp: new Date().toISOString()
|
||||||
|
}, "[executor]")
|
||||||
|
```
|
||||||
|
|
||||||
|
## 7. Error Handling
|
||||||
|
|
||||||
|
| Error Type | Recovery Strategy | Escalation |
|
||||||
|
|------------|-------------------|------------|
|
||||||
|
| Syntax errors | Retry with error context (max 3 attempts) | Report to coordinator after 3 failures |
|
||||||
|
| Missing dependencies | Request dependency resolution from coordinator | Immediate escalation |
|
||||||
|
| Backend unavailable | Fallback to agent backend | Report backend switch |
|
||||||
|
| Validation failure | Include validation details in report | Manual review required |
|
||||||
|
| Circular dependencies | Abort batch, report dependency graph | Immediate escalation |
|
||||||
|
|
||||||
|
## 8. Execution Backends
|
||||||
|
|
||||||
|
| Backend | Tool | Invocation | Mode | Use Case |
|
||||||
|
|---------|------|------------|------|----------|
|
||||||
|
| **agent** | code-developer | Subagent call (synchronous) | N/A | Simple tasks, direct edits |
|
||||||
|
| **codex** | ccw cli | `ccw cli --tool codex --mode write` | write | Complex tasks, architecture changes |
|
||||||
|
| **gemini** | ccw cli | `ccw cli --tool gemini --mode write` | write | Alternative backend, analysis-heavy tasks |
|
||||||
|
|
||||||
|
**Backend Selection Logic**:
|
||||||
|
1. Task metadata override → Use specified backend
|
||||||
|
2. Plan default → Use plan-level backend
|
||||||
|
3. Auto-select → Simple tasks use agent, complex use codex
|
||||||
@@ -0,0 +1,466 @@
|
|||||||
|
# Command: Multi-Angle Exploration
|
||||||
|
|
||||||
|
Phase 2 of planner execution - assess complexity, select exploration angles, and execute parallel exploration.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This command performs multi-angle codebase exploration based on task complexity. Low complexity uses direct semantic search, while Medium/High complexity launches parallel cli-explore-agent subagents for comprehensive analysis.
|
||||||
|
|
||||||
|
## Complexity Assessment
|
||||||
|
|
||||||
|
### assessComplexity Function
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function assessComplexity(desc) {
|
||||||
|
let score = 0
|
||||||
|
if (/refactor|architect|restructure|模块|系统/.test(desc)) score += 2
|
||||||
|
if (/multiple|多个|across|跨/.test(desc)) score += 2
|
||||||
|
if (/integrate|集成|api|database/.test(desc)) score += 1
|
||||||
|
if (/security|安全|performance|性能/.test(desc)) score += 1
|
||||||
|
return score >= 4 ? 'High' : score >= 2 ? 'Medium' : 'Low'
|
||||||
|
}
|
||||||
|
|
||||||
|
const complexity = assessComplexity(task.description)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Complexity Levels
|
||||||
|
|
||||||
|
| Level | Score | Characteristics | Angle Count |
|
||||||
|
|-------|-------|----------------|-------------|
|
||||||
|
| **Low** | 0-1 | Simple feature, single module, clear scope | 1 |
|
||||||
|
| **Medium** | 2-3 | Multiple modules, integration points, moderate scope | 3 |
|
||||||
|
| **High** | 4+ | Architecture changes, cross-cutting concerns, complex scope | 4 |
|
||||||
|
|
||||||
|
## Angle Selection
|
||||||
|
|
||||||
|
### ANGLE_PRESETS
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const ANGLE_PRESETS = {
|
||||||
|
architecture: ['architecture', 'dependencies', 'modularity', 'integration-points'],
|
||||||
|
security: ['security', 'auth-patterns', 'dataflow', 'validation'],
|
||||||
|
performance: ['performance', 'bottlenecks', 'caching', 'data-access'],
|
||||||
|
bugfix: ['error-handling', 'dataflow', 'state-management', 'edge-cases'],
|
||||||
|
feature: ['patterns', 'integration-points', 'testing', 'dependencies']
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### selectAngles Function
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function selectAngles(desc, count) {
|
||||||
|
const text = desc.toLowerCase()
|
||||||
|
let preset = 'feature'
|
||||||
|
if (/refactor|architect|restructure|modular/.test(text)) preset = 'architecture'
|
||||||
|
else if (/security|auth|permission|access/.test(text)) preset = 'security'
|
||||||
|
else if (/performance|slow|optimi|cache/.test(text)) preset = 'performance'
|
||||||
|
else if (/fix|bug|error|issue|broken/.test(text)) preset = 'bugfix'
|
||||||
|
return ANGLE_PRESETS[preset].slice(0, count)
|
||||||
|
}
|
||||||
|
|
||||||
|
const angleCount = complexity === 'High' ? 4 : (complexity === 'Medium' ? 3 : 1)
|
||||||
|
const selectedAngles = selectAngles(task.description, angleCount)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Angle Definitions
|
||||||
|
|
||||||
|
| Angle | Focus | Use Case |
|
||||||
|
|-------|-------|----------|
|
||||||
|
| **architecture** | System structure, layer boundaries, design patterns | Refactoring, restructuring |
|
||||||
|
| **dependencies** | Module dependencies, coupling, external libraries | Integration, modularity |
|
||||||
|
| **modularity** | Component boundaries, separation of concerns | Architecture changes |
|
||||||
|
| **integration-points** | API boundaries, data flow between modules | Feature development |
|
||||||
|
| **security** | Auth/authz, input validation, data protection | Security features |
|
||||||
|
| **auth-patterns** | Authentication flows, session management | Auth implementation |
|
||||||
|
| **dataflow** | Data transformation, state propagation | Bug fixes, features |
|
||||||
|
| **validation** | Input validation, error handling | Security, quality |
|
||||||
|
| **performance** | Bottlenecks, optimization opportunities | Performance tuning |
|
||||||
|
| **bottlenecks** | Slow operations, resource contention | Performance issues |
|
||||||
|
| **caching** | Cache strategies, invalidation patterns | Performance optimization |
|
||||||
|
| **data-access** | Database queries, data fetching patterns | Performance, features |
|
||||||
|
| **error-handling** | Error propagation, recovery strategies | Bug fixes |
|
||||||
|
| **state-management** | State updates, consistency | Bug fixes, features |
|
||||||
|
| **edge-cases** | Boundary conditions, error scenarios | Bug fixes, testing |
|
||||||
|
| **patterns** | Code patterns, conventions, best practices | Feature development |
|
||||||
|
| **testing** | Test coverage, test strategies | Feature development |
|
||||||
|
|
||||||
|
## Exploration Execution
|
||||||
|
|
||||||
|
### Low Complexity: Direct Semantic Search
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (complexity === 'Low') {
|
||||||
|
// Direct exploration via semantic search
|
||||||
|
const results = mcp__ace-tool__search_context({
|
||||||
|
project_root_path: projectRoot,
|
||||||
|
query: task.description
|
||||||
|
})
|
||||||
|
|
||||||
|
// Transform ACE results to exploration JSON
|
||||||
|
const exploration = {
|
||||||
|
project_structure: "Analyzed via ACE semantic search",
|
||||||
|
relevant_files: results.files.map(f => ({
|
||||||
|
path: f.path,
|
||||||
|
rationale: f.relevance_reason || "Semantic match to task description",
|
||||||
|
role: "modify_target",
|
||||||
|
discovery_source: "ace-search",
|
||||||
|
key_symbols: f.symbols || []
|
||||||
|
})),
|
||||||
|
patterns: results.patterns || [],
|
||||||
|
dependencies: results.dependencies || [],
|
||||||
|
integration_points: results.integration_points || [],
|
||||||
|
constraints: [],
|
||||||
|
clarification_needs: [],
|
||||||
|
_metadata: {
|
||||||
|
exploration_angle: selectedAngles[0],
|
||||||
|
complexity: 'Low',
|
||||||
|
discovery_method: 'ace-semantic-search'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Write(`${planDir}/exploration-${selectedAngles[0]}.json`, JSON.stringify(exploration, null, 2))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Medium/High Complexity: Parallel cli-explore-agent
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
else {
|
||||||
|
// Launch parallel cli-explore-agent for each angle
|
||||||
|
selectedAngles.forEach((angle, index) => {
|
||||||
|
Task({
|
||||||
|
subagent_type: "cli-explore-agent",
|
||||||
|
run_in_background: false,
|
||||||
|
description: `Explore: ${angle}`,
|
||||||
|
prompt: `
|
||||||
|
## Task Objective
|
||||||
|
Execute **${angle}** exploration for task planning context.
|
||||||
|
|
||||||
|
## Output Location
|
||||||
|
**Session Folder**: ${sessionFolder}
|
||||||
|
**Output File**: ${planDir}/exploration-${angle}.json
|
||||||
|
|
||||||
|
## Assigned Context
|
||||||
|
- **Exploration Angle**: ${angle}
|
||||||
|
- **Task Description**: ${task.description}
|
||||||
|
- **Spec Context**: ${specContext ? 'Available — use spec/requirements, spec/architecture, spec/epics for informed exploration' : 'Not available (impl-only mode)'}
|
||||||
|
- **Exploration Index**: ${index + 1} of ${selectedAngles.length}
|
||||||
|
|
||||||
|
## MANDATORY FIRST STEPS
|
||||||
|
1. Run: rg -l "{relevant_keyword}" --type ts (locate relevant files)
|
||||||
|
2. Execute: cat ~/.ccw/workflows/cli-templates/schemas/explore-json-schema.json (get output schema)
|
||||||
|
3. Read: .workflow/project-tech.json (if exists - technology stack)
|
||||||
|
|
||||||
|
## Expected Output
|
||||||
|
Write JSON to: ${planDir}/exploration-${angle}.json
|
||||||
|
Follow explore-json-schema.json structure with ${angle}-focused findings.
|
||||||
|
|
||||||
|
**MANDATORY**: Every file in relevant_files MUST have:
|
||||||
|
- **rationale** (required): Specific selection basis tied to ${angle} topic (>10 chars, not generic)
|
||||||
|
- **role** (required): modify_target|dependency|pattern_reference|test_target|type_definition|integration_point|config|context_only
|
||||||
|
- **discovery_source** (recommended): bash-scan|cli-analysis|ace-search|dependency-trace|manual
|
||||||
|
- **key_symbols** (recommended): Key functions/classes/types relevant to task
|
||||||
|
|
||||||
|
## Exploration Focus by Angle
|
||||||
|
|
||||||
|
${getAngleFocusGuide(angle)}
|
||||||
|
|
||||||
|
## Output Schema Structure
|
||||||
|
|
||||||
|
\`\`\`json
|
||||||
|
{
|
||||||
|
"project_structure": "string - high-level architecture overview",
|
||||||
|
"relevant_files": [
|
||||||
|
{
|
||||||
|
"path": "string - relative file path",
|
||||||
|
"rationale": "string - WHY this file matters for ${angle} (>10 chars, specific)",
|
||||||
|
"role": "modify_target|dependency|pattern_reference|test_target|type_definition|integration_point|config|context_only",
|
||||||
|
"discovery_source": "bash-scan|cli-analysis|ace-search|dependency-trace|manual",
|
||||||
|
"key_symbols": ["function/class/type names"]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"patterns": ["string - code patterns relevant to ${angle}"],
|
||||||
|
"dependencies": ["string - module/library dependencies"],
|
||||||
|
"integration_points": ["string - API/interface boundaries"],
|
||||||
|
"constraints": ["string - technical constraints"],
|
||||||
|
"clarification_needs": ["string - questions needing user input"],
|
||||||
|
"_metadata": {
|
||||||
|
"exploration_angle": "${angle}",
|
||||||
|
"complexity": "${complexity}",
|
||||||
|
"discovery_method": "cli-explore-agent"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
\`\`\`
|
||||||
|
`
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Angle Focus Guide
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function getAngleFocusGuide(angle) {
|
||||||
|
const guides = {
|
||||||
|
architecture: `
|
||||||
|
**Architecture Focus**:
|
||||||
|
- Identify layer boundaries (presentation, business, data)
|
||||||
|
- Map module dependencies and coupling
|
||||||
|
- Locate design patterns (factory, strategy, observer, etc.)
|
||||||
|
- Find architectural decision records (ADRs)
|
||||||
|
- Analyze component responsibilities`,
|
||||||
|
|
||||||
|
dependencies: `
|
||||||
|
**Dependencies Focus**:
|
||||||
|
- Map internal module dependencies (import/require statements)
|
||||||
|
- Identify external library usage (package.json, requirements.txt)
|
||||||
|
- Trace dependency chains and circular dependencies
|
||||||
|
- Locate shared utilities and common modules
|
||||||
|
- Analyze coupling strength between modules`,
|
||||||
|
|
||||||
|
modularity: `
|
||||||
|
**Modularity Focus**:
|
||||||
|
- Identify module boundaries and interfaces
|
||||||
|
- Analyze separation of concerns
|
||||||
|
- Locate tightly coupled code
|
||||||
|
- Find opportunities for extraction/refactoring
|
||||||
|
- Map public vs private APIs`,
|
||||||
|
|
||||||
|
'integration-points': `
|
||||||
|
**Integration Points Focus**:
|
||||||
|
- Locate API endpoints and routes
|
||||||
|
- Identify data flow between modules
|
||||||
|
- Find event emitters/listeners
|
||||||
|
- Map external service integrations
|
||||||
|
- Analyze interface contracts`,
|
||||||
|
|
||||||
|
security: `
|
||||||
|
**Security Focus**:
|
||||||
|
- Locate authentication/authorization logic
|
||||||
|
- Identify input validation points
|
||||||
|
- Find sensitive data handling
|
||||||
|
- Analyze access control mechanisms
|
||||||
|
- Locate security-related middleware`,
|
||||||
|
|
||||||
|
'auth-patterns': `
|
||||||
|
**Auth Patterns Focus**:
|
||||||
|
- Identify authentication flows (login, logout, refresh)
|
||||||
|
- Locate session management code
|
||||||
|
- Find token generation/validation
|
||||||
|
- Map user permission checks
|
||||||
|
- Analyze auth middleware`,
|
||||||
|
|
||||||
|
dataflow: `
|
||||||
|
**Dataflow Focus**:
|
||||||
|
- Trace data transformations
|
||||||
|
- Identify state propagation paths
|
||||||
|
- Locate data validation points
|
||||||
|
- Map data sources and sinks
|
||||||
|
- Analyze data mutation points`,
|
||||||
|
|
||||||
|
validation: `
|
||||||
|
**Validation Focus**:
|
||||||
|
- Locate input validation logic
|
||||||
|
- Identify schema definitions
|
||||||
|
- Find error handling for invalid data
|
||||||
|
- Map validation middleware
|
||||||
|
- Analyze sanitization functions`,
|
||||||
|
|
||||||
|
performance: `
|
||||||
|
**Performance Focus**:
|
||||||
|
- Identify computational bottlenecks
|
||||||
|
- Locate database queries (N+1 problems)
|
||||||
|
- Find synchronous blocking operations
|
||||||
|
- Map resource-intensive operations
|
||||||
|
- Analyze algorithm complexity`,
|
||||||
|
|
||||||
|
bottlenecks: `
|
||||||
|
**Bottlenecks Focus**:
|
||||||
|
- Locate slow operations (profiling data)
|
||||||
|
- Identify resource contention points
|
||||||
|
- Find inefficient algorithms
|
||||||
|
- Map hot paths in code
|
||||||
|
- Analyze concurrency issues`,
|
||||||
|
|
||||||
|
caching: `
|
||||||
|
**Caching Focus**:
|
||||||
|
- Locate existing cache implementations
|
||||||
|
- Identify cacheable operations
|
||||||
|
- Find cache invalidation logic
|
||||||
|
- Map cache key strategies
|
||||||
|
- Analyze cache hit/miss patterns`,
|
||||||
|
|
||||||
|
'data-access': `
|
||||||
|
**Data Access Focus**:
|
||||||
|
- Locate database query patterns
|
||||||
|
- Identify ORM/query builder usage
|
||||||
|
- Find data fetching strategies
|
||||||
|
- Map data access layers
|
||||||
|
- Analyze query optimization opportunities`,
|
||||||
|
|
||||||
|
'error-handling': `
|
||||||
|
**Error Handling Focus**:
|
||||||
|
- Locate try-catch blocks
|
||||||
|
- Identify error propagation paths
|
||||||
|
- Find error recovery strategies
|
||||||
|
- Map error logging points
|
||||||
|
- Analyze error types and handling`,
|
||||||
|
|
||||||
|
'state-management': `
|
||||||
|
**State Management Focus**:
|
||||||
|
- Locate state containers (Redux, Vuex, etc.)
|
||||||
|
- Identify state update patterns
|
||||||
|
- Find state synchronization logic
|
||||||
|
- Map state dependencies
|
||||||
|
- Analyze state consistency mechanisms`,
|
||||||
|
|
||||||
|
'edge-cases': `
|
||||||
|
**Edge Cases Focus**:
|
||||||
|
- Identify boundary conditions
|
||||||
|
- Locate null/undefined handling
|
||||||
|
- Find empty array/object handling
|
||||||
|
- Map error scenarios
|
||||||
|
- Analyze exceptional flows`,
|
||||||
|
|
||||||
|
patterns: `
|
||||||
|
**Patterns Focus**:
|
||||||
|
- Identify code patterns and conventions
|
||||||
|
- Locate design pattern implementations
|
||||||
|
- Find naming conventions
|
||||||
|
- Map code organization patterns
|
||||||
|
- Analyze best practices usage`,
|
||||||
|
|
||||||
|
testing: `
|
||||||
|
**Testing Focus**:
|
||||||
|
- Locate test files and test utilities
|
||||||
|
- Identify test coverage gaps
|
||||||
|
- Find test patterns (unit, integration, e2e)
|
||||||
|
- Map mocking/stubbing strategies
|
||||||
|
- Analyze test organization`
|
||||||
|
}
|
||||||
|
|
||||||
|
return guides[angle] || `**${angle} Focus**: Analyze codebase from ${angle} perspective`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Explorations Manifest
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Build explorations manifest
|
||||||
|
const explorationManifest = {
|
||||||
|
session_id: `${taskSlug}-${dateStr}`,
|
||||||
|
task_description: task.description,
|
||||||
|
complexity: complexity,
|
||||||
|
exploration_count: selectedAngles.length,
|
||||||
|
explorations: selectedAngles.map(angle => ({
|
||||||
|
angle: angle,
|
||||||
|
file: `exploration-${angle}.json`,
|
||||||
|
path: `${planDir}/exploration-${angle}.json`
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
Write(`${planDir}/explorations-manifest.json`, JSON.stringify(explorationManifest, null, 2))
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output Schema
|
||||||
|
|
||||||
|
### explore-json-schema.json Structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"project_structure": "string - high-level architecture overview",
|
||||||
|
"relevant_files": [
|
||||||
|
{
|
||||||
|
"path": "string - relative file path",
|
||||||
|
"rationale": "string - specific selection basis (>10 chars)",
|
||||||
|
"role": "modify_target|dependency|pattern_reference|test_target|type_definition|integration_point|config|context_only",
|
||||||
|
"discovery_source": "bash-scan|cli-analysis|ace-search|dependency-trace|manual",
|
||||||
|
"key_symbols": ["string - function/class/type names"]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"patterns": ["string - code patterns relevant to angle"],
|
||||||
|
"dependencies": ["string - module/library dependencies"],
|
||||||
|
"integration_points": ["string - API/interface boundaries"],
|
||||||
|
"constraints": ["string - technical constraints"],
|
||||||
|
"clarification_needs": ["string - questions needing user input"],
|
||||||
|
"_metadata": {
|
||||||
|
"exploration_angle": "string - angle name",
|
||||||
|
"complexity": "Low|Medium|High",
|
||||||
|
"discovery_method": "ace-semantic-search|cli-explore-agent"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Integration with Phase 3
|
||||||
|
|
||||||
|
Phase 3 (Plan Generation) consumes:
|
||||||
|
1. `explorations-manifest.json` - list of exploration files
|
||||||
|
2. `exploration-{angle}.json` - per-angle exploration results
|
||||||
|
3. `specContext` (if available) - requirements, architecture, epics
|
||||||
|
|
||||||
|
These inputs are passed to cli-lite-planning-agent for plan generation.
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
### Exploration Agent Failure
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
try {
|
||||||
|
Task({
|
||||||
|
subagent_type: "cli-explore-agent",
|
||||||
|
run_in_background: false,
|
||||||
|
description: `Explore: ${angle}`,
|
||||||
|
prompt: `...`
|
||||||
|
})
|
||||||
|
} catch (error) {
|
||||||
|
// Skip exploration, continue with available explorations
|
||||||
|
console.error(`[planner] Exploration failed for angle: ${angle}`, error)
|
||||||
|
// Remove failed angle from manifest
|
||||||
|
explorationManifest.explorations = explorationManifest.explorations.filter(e => e.angle !== angle)
|
||||||
|
explorationManifest.exploration_count = explorationManifest.explorations.length
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### All Explorations Fail
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (explorationManifest.exploration_count === 0) {
|
||||||
|
// Fallback: Plan from task description only
|
||||||
|
console.warn(`[planner] All explorations failed, planning from task description only`)
|
||||||
|
// Proceed to Phase 3 with empty explorations
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### ACE Search Failure (Low Complexity)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
try {
|
||||||
|
const results = mcp__ace-tool__search_context({
|
||||||
|
project_root_path: projectRoot,
|
||||||
|
query: task.description
|
||||||
|
})
|
||||||
|
} catch (error) {
|
||||||
|
// Fallback: Use ripgrep for basic file discovery
|
||||||
|
const rgResults = Bash(`rg -l "${task.description}" --type ts`)
|
||||||
|
const exploration = {
|
||||||
|
project_structure: "Basic file discovery via ripgrep",
|
||||||
|
relevant_files: rgResults.split('\n').map(path => ({
|
||||||
|
path: path.trim(),
|
||||||
|
rationale: "Matched task description keywords",
|
||||||
|
role: "modify_target",
|
||||||
|
discovery_source: "bash-scan",
|
||||||
|
key_symbols: []
|
||||||
|
})),
|
||||||
|
patterns: [],
|
||||||
|
dependencies: [],
|
||||||
|
integration_points: [],
|
||||||
|
constraints: [],
|
||||||
|
clarification_needs: [],
|
||||||
|
_metadata: {
|
||||||
|
exploration_angle: selectedAngles[0],
|
||||||
|
complexity: 'Low',
|
||||||
|
discovery_method: 'ripgrep-fallback'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Write(`${planDir}/exploration-${selectedAngles[0]}.json`, JSON.stringify(exploration, null, 2))
|
||||||
|
}
|
||||||
|
```
|
||||||
253
.claude/skills/team-lifecycle-v2/roles/planner/role.md
Normal file
253
.claude/skills/team-lifecycle-v2/roles/planner/role.md
Normal file
@@ -0,0 +1,253 @@
|
|||||||
|
# Role: planner
|
||||||
|
|
||||||
|
Multi-angle code exploration and structured implementation planning. Submits plans to the coordinator for approval.
|
||||||
|
|
||||||
|
## Role Identity
|
||||||
|
|
||||||
|
- **Name**: `planner`
|
||||||
|
- **Task Prefix**: `PLAN-*`
|
||||||
|
- **Output Tag**: `[planner]`
|
||||||
|
- **Responsibility**: Code exploration → Implementation planning → Coordinator approval
|
||||||
|
- **Communication**: SendMessage to coordinator only
|
||||||
|
|
||||||
|
## Role Boundaries
|
||||||
|
|
||||||
|
### MUST
|
||||||
|
- Only process PLAN-* tasks
|
||||||
|
- Communicate only with coordinator
|
||||||
|
- Write plan artifacts to `plan/` folder
|
||||||
|
- Tag all SendMessage and team_msg calls with `[planner]`
|
||||||
|
- Assess complexity (Low/Medium/High)
|
||||||
|
- Execute multi-angle exploration based on complexity
|
||||||
|
- Generate plan.json + .task/TASK-*.json following schemas
|
||||||
|
- Submit plan for coordinator approval
|
||||||
|
- Load spec context in full-lifecycle mode
|
||||||
|
|
||||||
|
### MUST NOT
|
||||||
|
- Create tasks
|
||||||
|
- Contact other workers directly
|
||||||
|
- Implement code
|
||||||
|
- Modify spec documents
|
||||||
|
- Skip complexity assessment
|
||||||
|
- Proceed without exploration (Medium/High complexity)
|
||||||
|
- Generate plan without schema validation
|
||||||
|
|
||||||
|
## Message Types
|
||||||
|
|
||||||
|
| Type | Direction | Trigger | Description |
|
||||||
|
|------|-----------|---------|-------------|
|
||||||
|
| `plan_ready` | planner → coordinator | Plan generation complete | With plan.json path and task count summary |
|
||||||
|
| `plan_revision` | planner → coordinator | Plan revised and resubmitted | Describes changes made |
|
||||||
|
| `impl_progress` | planner → coordinator | Exploration phase progress | Optional, for long explorations |
|
||||||
|
| `error` | planner → coordinator | Unrecoverable error | Exploration failure, schema missing, etc. |
|
||||||
|
|
||||||
|
## Message Bus
|
||||||
|
|
||||||
|
Before every `SendMessage`, MUST call `mcp__ccw-tools__team_msg` to log:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Plan ready
|
||||||
|
mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "planner", to: "coordinator", type: "plan_ready", summary: "[planner] Plan ready: 3 tasks, Medium complexity", ref: `${sessionFolder}/plan/plan.json` })
|
||||||
|
|
||||||
|
// Plan revision
|
||||||
|
mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "planner", to: "coordinator", type: "plan_revision", summary: "[planner] Split task-2 into two subtasks per feedback" })
|
||||||
|
|
||||||
|
// Error report
|
||||||
|
mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "planner", to: "coordinator", type: "error", summary: "[planner] plan-overview-base-schema.json not found, using default structure" })
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Fallback
|
||||||
|
|
||||||
|
When `mcp__ccw-tools__team_msg` MCP is unavailable:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Bash(`ccw team log --team "${teamName}" --from "planner" --to "coordinator" --type "plan_ready" --summary "[planner] Plan ready: 3 tasks" --ref "${sessionFolder}/plan/plan.json" --json`)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Toolbox
|
||||||
|
|
||||||
|
### Available Commands
|
||||||
|
- `commands/explore.md` - Multi-angle codebase exploration (Phase 2)
|
||||||
|
|
||||||
|
### Subagent Capabilities
|
||||||
|
- **cli-explore-agent**: Per-angle exploration (Medium/High complexity)
|
||||||
|
- **cli-lite-planning-agent**: Plan generation (Medium/High complexity)
|
||||||
|
|
||||||
|
### CLI Capabilities
|
||||||
|
None directly (delegates to subagents)
|
||||||
|
|
||||||
|
## Execution (5-Phase)
|
||||||
|
|
||||||
|
### Phase 1: Task Discovery
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const tasks = TaskList()
|
||||||
|
const myTasks = tasks.filter(t =>
|
||||||
|
t.subject.startsWith('PLAN-') &&
|
||||||
|
t.owner === 'planner' &&
|
||||||
|
t.status === 'pending' &&
|
||||||
|
t.blockedBy.length === 0
|
||||||
|
)
|
||||||
|
|
||||||
|
if (myTasks.length === 0) return // idle
|
||||||
|
|
||||||
|
const task = TaskGet({ taskId: myTasks[0].id })
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'in_progress' })
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 1.5: Load Spec Context (Full-Lifecycle Mode)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Extract session folder from task description (set by coordinator)
|
||||||
|
const sessionMatch = task.description.match(/Session:\s*(.+)/)
|
||||||
|
const sessionFolder = sessionMatch ? sessionMatch[1].trim() : `.workflow/.team/default`
|
||||||
|
const planDir = `${sessionFolder}/plan`
|
||||||
|
Bash(`mkdir -p ${planDir}`)
|
||||||
|
|
||||||
|
// Check if spec directory exists (full-lifecycle mode)
|
||||||
|
const specDir = `${sessionFolder}/spec`
|
||||||
|
let specContext = null
|
||||||
|
try {
|
||||||
|
const reqIndex = Read(`${specDir}/requirements/_index.md`)
|
||||||
|
const archIndex = Read(`${specDir}/architecture/_index.md`)
|
||||||
|
const epicsIndex = Read(`${specDir}/epics/_index.md`)
|
||||||
|
const specConfig = JSON.parse(Read(`${specDir}/spec-config.json`))
|
||||||
|
specContext = { reqIndex, archIndex, epicsIndex, specConfig }
|
||||||
|
} catch { /* impl-only mode has no spec */ }
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 2: Multi-Angle Exploration
|
||||||
|
|
||||||
|
**Delegate to**: `Read("commands/explore.md")`
|
||||||
|
|
||||||
|
Execute complexity assessment, angle selection, and parallel exploration. See `commands/explore.md` for full implementation.
|
||||||
|
|
||||||
|
### Phase 3: Plan Generation
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Read schema reference
|
||||||
|
const schema = Bash(`cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json`)
|
||||||
|
|
||||||
|
if (complexity === 'Low') {
|
||||||
|
// Direct Claude planning
|
||||||
|
Bash(`mkdir -p ${planDir}/.task`)
|
||||||
|
// Generate plan.json + .task/TASK-*.json following schemas
|
||||||
|
|
||||||
|
const plan = {
|
||||||
|
session_id: `${taskSlug}-${dateStr}`,
|
||||||
|
task_description: task.description,
|
||||||
|
complexity: 'Low',
|
||||||
|
approach: "Direct implementation based on semantic search",
|
||||||
|
task_count: 1,
|
||||||
|
task_ids: ['TASK-001'],
|
||||||
|
exploration_refs: [`${planDir}/exploration-patterns.json`]
|
||||||
|
}
|
||||||
|
Write(`${planDir}/plan.json`, JSON.stringify(plan, null, 2))
|
||||||
|
|
||||||
|
const taskDetail = {
|
||||||
|
id: 'TASK-001',
|
||||||
|
title: task.subject,
|
||||||
|
description: task.description,
|
||||||
|
files: [],
|
||||||
|
convergence: { criteria: ["Implementation complete", "Tests pass"] },
|
||||||
|
depends_on: []
|
||||||
|
}
|
||||||
|
Write(`${planDir}/.task/TASK-001.json`, JSON.stringify(taskDetail, null, 2))
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// Use cli-lite-planning-agent for Medium/High
|
||||||
|
Task({
|
||||||
|
subagent_type: "cli-lite-planning-agent",
|
||||||
|
run_in_background: false,
|
||||||
|
description: "Generate detailed implementation plan",
|
||||||
|
prompt: `Generate implementation plan.
|
||||||
|
Output: ${planDir}/plan.json + ${planDir}/.task/TASK-*.json
|
||||||
|
Schema: cat ~/.ccw/workflows/cli-templates/schemas/plan-overview-base-schema.json
|
||||||
|
Task Description: ${task.description}
|
||||||
|
Explorations: ${explorationManifest}
|
||||||
|
Complexity: ${complexity}
|
||||||
|
${specContext ? `Spec Context:
|
||||||
|
- Requirements: ${specContext.reqIndex.substring(0, 500)}
|
||||||
|
- Architecture: ${specContext.archIndex.substring(0, 500)}
|
||||||
|
- Epics: ${specContext.epicsIndex.substring(0, 500)}
|
||||||
|
Reference REQ-* IDs, follow ADR decisions, reuse Epic/Story decomposition.` : ''}
|
||||||
|
Requirements: 2-7 tasks, each with id, title, files[].change, convergence.criteria, depends_on`
|
||||||
|
})
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 4: Submit for Approval
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const plan = JSON.parse(Read(`${planDir}/plan.json`))
|
||||||
|
const planTasks = plan.task_ids.map(id => JSON.parse(Read(`${planDir}/.task/${id}.json`)))
|
||||||
|
const taskCount = plan.task_count || plan.task_ids.length
|
||||||
|
|
||||||
|
mcp__ccw-tools__team_msg({
|
||||||
|
operation: "log", team: teamName,
|
||||||
|
from: "planner", to: "coordinator",
|
||||||
|
type: "plan_ready",
|
||||||
|
summary: `[planner] Plan就绪: ${taskCount}个task, ${complexity}复杂度`,
|
||||||
|
ref: `${planDir}/plan.json`
|
||||||
|
})
|
||||||
|
|
||||||
|
SendMessage({
|
||||||
|
type: "message",
|
||||||
|
recipient: "coordinator",
|
||||||
|
content: `[planner] ## Plan Ready for Review
|
||||||
|
|
||||||
|
**Task**: ${task.subject}
|
||||||
|
**Complexity**: ${complexity}
|
||||||
|
**Tasks**: ${taskCount}
|
||||||
|
|
||||||
|
### Task Summary
|
||||||
|
${planTasks.map((t, i) => (i+1) + '. ' + t.title).join('\n')}
|
||||||
|
|
||||||
|
### Approach
|
||||||
|
${plan.approach}
|
||||||
|
|
||||||
|
### Plan Location
|
||||||
|
${planDir}/plan.json
|
||||||
|
Task Files: ${planDir}/.task/
|
||||||
|
|
||||||
|
Please review and approve or request revisions.`,
|
||||||
|
summary: `[planner] Plan ready: ${taskCount} tasks`
|
||||||
|
})
|
||||||
|
|
||||||
|
// Wait for coordinator response (approve → mark completed, revision → update and resubmit)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 5: After Approval
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'completed' })
|
||||||
|
|
||||||
|
// Check for next PLAN task → back to Phase 1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Session Files
|
||||||
|
|
||||||
|
```
|
||||||
|
{sessionFolder}/plan/
|
||||||
|
├── exploration-{angle}.json
|
||||||
|
├── explorations-manifest.json
|
||||||
|
├── planning-context.md
|
||||||
|
├── plan.json
|
||||||
|
└── .task/
|
||||||
|
└── TASK-*.json
|
||||||
|
```
|
||||||
|
|
||||||
|
> **Note**: `sessionFolder` is extracted from task description (`Session: .workflow/.team/TLS-xxx`). Plan outputs go to `plan/` subdirectory. In full-lifecycle mode, spec products are available at `../spec/`.
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Scenario | Resolution |
|
||||||
|
|----------|------------|
|
||||||
|
| No PLAN-* tasks available | Idle, wait for coordinator assignment |
|
||||||
|
| Exploration agent failure | Skip exploration, plan from task description only |
|
||||||
|
| Planning agent failure | Fallback to direct Claude planning |
|
||||||
|
| Plan rejected 3+ times | Notify coordinator with `[planner]` tag, suggest alternative approach |
|
||||||
|
| Schema file not found | Use basic plan structure without schema validation, log error with `[planner]` tag |
|
||||||
|
| Spec context load failure | Continue in impl-only mode (no spec context) |
|
||||||
|
| Session folder not found | Notify coordinator with `[planner]` tag, request session path |
|
||||||
|
| Unexpected error | Log error via team_msg with `[planner]` tag, report to coordinator |
|
||||||
@@ -0,0 +1,689 @@
|
|||||||
|
# Code Review Command
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
4-dimension code review analyzing quality, security, architecture, and requirements compliance.
|
||||||
|
|
||||||
|
## Review Dimensions
|
||||||
|
|
||||||
|
### 1. Quality Review
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function reviewQuality(files, gitDiff) {
|
||||||
|
const issues = {
|
||||||
|
critical: [],
|
||||||
|
high: [],
|
||||||
|
medium: [],
|
||||||
|
low: []
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const file of files) {
|
||||||
|
const content = file.content
|
||||||
|
const lines = content.split("\n")
|
||||||
|
|
||||||
|
// Check for @ts-ignore / @ts-expect-error
|
||||||
|
lines.forEach((line, idx) => {
|
||||||
|
if (line.includes("@ts-ignore") || line.includes("@ts-expect-error")) {
|
||||||
|
const nextLine = lines[idx + 1] || ""
|
||||||
|
const hasJustification = line.includes("//") && line.split("//")[1].trim().length > 10
|
||||||
|
|
||||||
|
if (!hasJustification) {
|
||||||
|
issues.high.push({
|
||||||
|
file: file.path,
|
||||||
|
line: idx + 1,
|
||||||
|
type: "ts-ignore-without-justification",
|
||||||
|
message: "TypeScript error suppression without explanation",
|
||||||
|
code: line.trim()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Check for 'any' type usage
|
||||||
|
const anyMatches = Grep("\\bany\\b", { path: file.path, "-n": true })
|
||||||
|
if (anyMatches) {
|
||||||
|
anyMatches.forEach(match => {
|
||||||
|
// Exclude comments and type definitions that are intentionally generic
|
||||||
|
if (!match.line.includes("//") && !match.line.includes("Generic")) {
|
||||||
|
issues.high.push({
|
||||||
|
file: file.path,
|
||||||
|
line: match.lineNumber,
|
||||||
|
type: "any-type-usage",
|
||||||
|
message: "Using 'any' type reduces type safety",
|
||||||
|
code: match.line.trim()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for console.log in production code
|
||||||
|
const consoleMatches = Grep("console\\.(log|debug|info)", { path: file.path, "-n": true })
|
||||||
|
if (consoleMatches && !file.path.includes("test")) {
|
||||||
|
consoleMatches.forEach(match => {
|
||||||
|
issues.high.push({
|
||||||
|
file: file.path,
|
||||||
|
line: match.lineNumber,
|
||||||
|
type: "console-log",
|
||||||
|
message: "Console statements should be removed from production code",
|
||||||
|
code: match.line.trim()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for empty catch blocks
|
||||||
|
const emptyCatchRegex = /catch\s*\([^)]*\)\s*\{\s*\}/g
|
||||||
|
let match
|
||||||
|
while ((match = emptyCatchRegex.exec(content)) !== null) {
|
||||||
|
const lineNumber = content.substring(0, match.index).split("\n").length
|
||||||
|
issues.critical.push({
|
||||||
|
file: file.path,
|
||||||
|
line: lineNumber,
|
||||||
|
type: "empty-catch",
|
||||||
|
message: "Empty catch block silently swallows errors",
|
||||||
|
code: match[0]
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for magic numbers
|
||||||
|
const magicNumberRegex = /(?<![a-zA-Z0-9_])((?!0|1|2|10|100|1000)\d{2,})(?![a-zA-Z0-9_])/g
|
||||||
|
while ((match = magicNumberRegex.exec(content)) !== null) {
|
||||||
|
const lineNumber = content.substring(0, match.index).split("\n").length
|
||||||
|
const line = lines[lineNumber - 1]
|
||||||
|
|
||||||
|
// Exclude if in comment or constant definition
|
||||||
|
if (!line.includes("//") && !line.includes("const") && !line.includes("=")) {
|
||||||
|
issues.medium.push({
|
||||||
|
file: file.path,
|
||||||
|
line: lineNumber,
|
||||||
|
type: "magic-number",
|
||||||
|
message: "Magic number should be extracted to named constant",
|
||||||
|
code: line.trim()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for duplicate code (simple heuristic: identical lines)
|
||||||
|
const lineHashes = new Map()
|
||||||
|
lines.forEach((line, idx) => {
|
||||||
|
const trimmed = line.trim()
|
||||||
|
if (trimmed.length > 30 && !trimmed.startsWith("//")) {
|
||||||
|
if (!lineHashes.has(trimmed)) {
|
||||||
|
lineHashes.set(trimmed, [])
|
||||||
|
}
|
||||||
|
lineHashes.get(trimmed).push(idx + 1)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
lineHashes.forEach((occurrences, line) => {
|
||||||
|
if (occurrences.length > 2) {
|
||||||
|
issues.medium.push({
|
||||||
|
file: file.path,
|
||||||
|
line: occurrences[0],
|
||||||
|
type: "duplicate-code",
|
||||||
|
message: `Duplicate code found at lines: ${occurrences.join(", ")}`,
|
||||||
|
code: line
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return issues
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Security Review
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function reviewSecurity(files) {
|
||||||
|
const issues = {
|
||||||
|
critical: [],
|
||||||
|
high: [],
|
||||||
|
medium: [],
|
||||||
|
low: []
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const file of files) {
|
||||||
|
const content = file.content
|
||||||
|
|
||||||
|
// Check for eval/exec usage
|
||||||
|
const evalMatches = Grep("\\b(eval|exec|Function\\(|setTimeout\\(.*string|setInterval\\(.*string)\\b", {
|
||||||
|
path: file.path,
|
||||||
|
"-n": true
|
||||||
|
})
|
||||||
|
if (evalMatches) {
|
||||||
|
evalMatches.forEach(match => {
|
||||||
|
issues.high.push({
|
||||||
|
file: file.path,
|
||||||
|
line: match.lineNumber,
|
||||||
|
type: "dangerous-eval",
|
||||||
|
message: "eval/exec usage can lead to code injection vulnerabilities",
|
||||||
|
code: match.line.trim()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for innerHTML/dangerouslySetInnerHTML
|
||||||
|
const innerHTMLMatches = Grep("(innerHTML|dangerouslySetInnerHTML)", {
|
||||||
|
path: file.path,
|
||||||
|
"-n": true
|
||||||
|
})
|
||||||
|
if (innerHTMLMatches) {
|
||||||
|
innerHTMLMatches.forEach(match => {
|
||||||
|
issues.high.push({
|
||||||
|
file: file.path,
|
||||||
|
line: match.lineNumber,
|
||||||
|
type: "xss-risk",
|
||||||
|
message: "Direct HTML injection can lead to XSS vulnerabilities",
|
||||||
|
code: match.line.trim()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for hardcoded secrets
|
||||||
|
const secretPatterns = [
|
||||||
|
/api[_-]?key\s*=\s*['"][^'"]{20,}['"]/i,
|
||||||
|
/password\s*=\s*['"][^'"]+['"]/i,
|
||||||
|
/secret\s*=\s*['"][^'"]{20,}['"]/i,
|
||||||
|
/token\s*=\s*['"][^'"]{20,}['"]/i,
|
||||||
|
/aws[_-]?access[_-]?key/i,
|
||||||
|
/private[_-]?key\s*=\s*['"][^'"]+['"]/i
|
||||||
|
]
|
||||||
|
|
||||||
|
secretPatterns.forEach(pattern => {
|
||||||
|
const matches = content.match(new RegExp(pattern, "gm"))
|
||||||
|
if (matches) {
|
||||||
|
matches.forEach(match => {
|
||||||
|
const lineNumber = content.substring(0, content.indexOf(match)).split("\n").length
|
||||||
|
issues.critical.push({
|
||||||
|
file: file.path,
|
||||||
|
line: lineNumber,
|
||||||
|
type: "hardcoded-secret",
|
||||||
|
message: "Hardcoded secrets should be moved to environment variables",
|
||||||
|
code: match.replace(/['"][^'"]+['"]/, "'***'") // Redact secret
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Check for SQL injection vectors
|
||||||
|
const sqlInjectionMatches = Grep("(query|execute)\\s*\\(.*\\+.*\\)", {
|
||||||
|
path: file.path,
|
||||||
|
"-n": true
|
||||||
|
})
|
||||||
|
if (sqlInjectionMatches) {
|
||||||
|
sqlInjectionMatches.forEach(match => {
|
||||||
|
if (!match.line.includes("//") && !match.line.includes("prepared")) {
|
||||||
|
issues.critical.push({
|
||||||
|
file: file.path,
|
||||||
|
line: match.lineNumber,
|
||||||
|
type: "sql-injection",
|
||||||
|
message: "String concatenation in SQL queries can lead to SQL injection",
|
||||||
|
code: match.line.trim()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for insecure random
|
||||||
|
const insecureRandomMatches = Grep("Math\\.random\\(\\)", {
|
||||||
|
path: file.path,
|
||||||
|
"-n": true
|
||||||
|
})
|
||||||
|
if (insecureRandomMatches) {
|
||||||
|
insecureRandomMatches.forEach(match => {
|
||||||
|
// Check if used for security purposes
|
||||||
|
const context = content.substring(
|
||||||
|
Math.max(0, content.indexOf(match.line) - 200),
|
||||||
|
content.indexOf(match.line) + 200
|
||||||
|
)
|
||||||
|
if (context.match(/token|key|secret|password|session/i)) {
|
||||||
|
issues.medium.push({
|
||||||
|
file: file.path,
|
||||||
|
line: match.lineNumber,
|
||||||
|
type: "insecure-random",
|
||||||
|
message: "Math.random() is not cryptographically secure, use crypto.randomBytes()",
|
||||||
|
code: match.line.trim()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for missing input validation
|
||||||
|
const functionMatches = Grep("(function|const.*=.*\\(|async.*\\()", {
|
||||||
|
path: file.path,
|
||||||
|
"-n": true
|
||||||
|
})
|
||||||
|
if (functionMatches) {
|
||||||
|
functionMatches.forEach(match => {
|
||||||
|
// Simple heuristic: check if function has parameters but no validation
|
||||||
|
if (match.line.includes("(") && !match.line.includes("()")) {
|
||||||
|
const nextLines = content.split("\n").slice(match.lineNumber, match.lineNumber + 5).join("\n")
|
||||||
|
const hasValidation = nextLines.match(/if\s*\(|throw|assert|validate|check/)
|
||||||
|
|
||||||
|
if (!hasValidation && !match.line.includes("test") && !match.line.includes("mock")) {
|
||||||
|
issues.low.push({
|
||||||
|
file: file.path,
|
||||||
|
line: match.lineNumber,
|
||||||
|
type: "missing-validation",
|
||||||
|
message: "Function parameters should be validated",
|
||||||
|
code: match.line.trim()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return issues
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Architecture Review
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function reviewArchitecture(files) {
|
||||||
|
const issues = {
|
||||||
|
critical: [],
|
||||||
|
high: [],
|
||||||
|
medium: [],
|
||||||
|
low: []
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const file of files) {
|
||||||
|
const content = file.content
|
||||||
|
const lines = content.split("\n")
|
||||||
|
|
||||||
|
// Check for parent directory imports
|
||||||
|
const importMatches = Grep("from\\s+['\"](\\.\\./)+", {
|
||||||
|
path: file.path,
|
||||||
|
"-n": true
|
||||||
|
})
|
||||||
|
if (importMatches) {
|
||||||
|
importMatches.forEach(match => {
|
||||||
|
const parentLevels = (match.line.match(/\.\.\//g) || []).length
|
||||||
|
|
||||||
|
if (parentLevels > 2) {
|
||||||
|
issues.high.push({
|
||||||
|
file: file.path,
|
||||||
|
line: match.lineNumber,
|
||||||
|
type: "excessive-parent-imports",
|
||||||
|
message: `Import traverses ${parentLevels} parent directories, consider restructuring`,
|
||||||
|
code: match.line.trim()
|
||||||
|
})
|
||||||
|
} else if (parentLevels === 2) {
|
||||||
|
issues.medium.push({
|
||||||
|
file: file.path,
|
||||||
|
line: match.lineNumber,
|
||||||
|
type: "parent-imports",
|
||||||
|
message: "Consider using absolute imports or restructuring modules",
|
||||||
|
code: match.line.trim()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for large files
|
||||||
|
const lineCount = lines.length
|
||||||
|
if (lineCount > 500) {
|
||||||
|
issues.medium.push({
|
||||||
|
file: file.path,
|
||||||
|
line: 1,
|
||||||
|
type: "large-file",
|
||||||
|
message: `File has ${lineCount} lines, consider splitting into smaller modules`,
|
||||||
|
code: `Total lines: ${lineCount}`
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for circular dependencies (simple heuristic)
|
||||||
|
const imports = lines
|
||||||
|
.filter(line => line.match(/^import.*from/))
|
||||||
|
.map(line => {
|
||||||
|
const match = line.match(/from\s+['"](.+?)['"]/)
|
||||||
|
return match ? match[1] : null
|
||||||
|
})
|
||||||
|
.filter(Boolean)
|
||||||
|
|
||||||
|
// Check if any imported file imports this file back
|
||||||
|
for (const importPath of imports) {
|
||||||
|
const resolvedPath = resolveImportPath(file.path, importPath)
|
||||||
|
if (resolvedPath && Bash(`test -f ${resolvedPath}`).exitCode === 0) {
|
||||||
|
const importedContent = Read(resolvedPath)
|
||||||
|
const reverseImport = importedContent.includes(file.path.replace(/\.[jt]sx?$/, ""))
|
||||||
|
|
||||||
|
if (reverseImport) {
|
||||||
|
issues.critical.push({
|
||||||
|
file: file.path,
|
||||||
|
line: 1,
|
||||||
|
type: "circular-dependency",
|
||||||
|
message: `Circular dependency detected with ${resolvedPath}`,
|
||||||
|
code: `${file.path} ↔ ${resolvedPath}`
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for tight coupling (many imports from same module)
|
||||||
|
const importCounts = {}
|
||||||
|
imports.forEach(imp => {
|
||||||
|
const baseModule = imp.split("/")[0]
|
||||||
|
importCounts[baseModule] = (importCounts[baseModule] || 0) + 1
|
||||||
|
})
|
||||||
|
|
||||||
|
Object.entries(importCounts).forEach(([module, count]) => {
|
||||||
|
if (count > 5) {
|
||||||
|
issues.medium.push({
|
||||||
|
file: file.path,
|
||||||
|
line: 1,
|
||||||
|
type: "tight-coupling",
|
||||||
|
message: `File imports ${count} items from '${module}', consider facade pattern`,
|
||||||
|
code: `Imports from ${module}: ${count}`
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Check for missing abstractions (long functions)
|
||||||
|
const functionRegex = /(function|const.*=.*\(|async.*\()/g
|
||||||
|
let match
|
||||||
|
while ((match = functionRegex.exec(content)) !== null) {
|
||||||
|
const startLine = content.substring(0, match.index).split("\n").length
|
||||||
|
const functionBody = extractFunctionBody(content, match.index)
|
||||||
|
const functionLines = functionBody.split("\n").length
|
||||||
|
|
||||||
|
if (functionLines > 50) {
|
||||||
|
issues.medium.push({
|
||||||
|
file: file.path,
|
||||||
|
line: startLine,
|
||||||
|
type: "long-function",
|
||||||
|
message: `Function has ${functionLines} lines, consider extracting smaller functions`,
|
||||||
|
code: match[0].trim()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return issues
|
||||||
|
}
|
||||||
|
|
||||||
|
function resolveImportPath(fromFile, importPath) {
|
||||||
|
if (importPath.startsWith(".")) {
|
||||||
|
const dir = fromFile.substring(0, fromFile.lastIndexOf("/"))
|
||||||
|
const resolved = `${dir}/${importPath}`.replace(/\/\.\//g, "/")
|
||||||
|
|
||||||
|
// Try with extensions
|
||||||
|
for (const ext of [".ts", ".js", ".tsx", ".jsx"]) {
|
||||||
|
if (Bash(`test -f ${resolved}${ext}`).exitCode === 0) {
|
||||||
|
return `${resolved}${ext}`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
function extractFunctionBody(content, startIndex) {
|
||||||
|
let braceCount = 0
|
||||||
|
let inFunction = false
|
||||||
|
let body = ""
|
||||||
|
|
||||||
|
for (let i = startIndex; i < content.length; i++) {
|
||||||
|
const char = content[i]
|
||||||
|
|
||||||
|
if (char === "{") {
|
||||||
|
braceCount++
|
||||||
|
inFunction = true
|
||||||
|
} else if (char === "}") {
|
||||||
|
braceCount--
|
||||||
|
}
|
||||||
|
|
||||||
|
if (inFunction) {
|
||||||
|
body += char
|
||||||
|
}
|
||||||
|
|
||||||
|
if (inFunction && braceCount === 0) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return body
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Requirements Verification
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function verifyRequirements(plan, files, gitDiff) {
|
||||||
|
const issues = {
|
||||||
|
critical: [],
|
||||||
|
high: [],
|
||||||
|
medium: [],
|
||||||
|
low: []
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract acceptance criteria from plan
|
||||||
|
const acceptanceCriteria = extractAcceptanceCriteria(plan)
|
||||||
|
|
||||||
|
// Verify each criterion
|
||||||
|
for (const criterion of acceptanceCriteria) {
|
||||||
|
const verified = verifyCriterion(criterion, files, gitDiff)
|
||||||
|
|
||||||
|
if (!verified.met) {
|
||||||
|
issues.high.push({
|
||||||
|
file: "plan",
|
||||||
|
line: criterion.lineNumber,
|
||||||
|
type: "unmet-acceptance-criteria",
|
||||||
|
message: `Acceptance criterion not met: ${criterion.text}`,
|
||||||
|
code: criterion.text
|
||||||
|
})
|
||||||
|
} else if (verified.partial) {
|
||||||
|
issues.medium.push({
|
||||||
|
file: "plan",
|
||||||
|
line: criterion.lineNumber,
|
||||||
|
type: "partial-acceptance-criteria",
|
||||||
|
message: `Acceptance criterion partially met: ${criterion.text}`,
|
||||||
|
code: criterion.text
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for missing error handling
|
||||||
|
const errorHandlingRequired = plan.match(/error handling|exception|validation/i)
|
||||||
|
if (errorHandlingRequired) {
|
||||||
|
const hasErrorHandling = files.some(file =>
|
||||||
|
file.content.match(/try\s*\{|catch\s*\(|throw\s+new|\.catch\(/)
|
||||||
|
)
|
||||||
|
|
||||||
|
if (!hasErrorHandling) {
|
||||||
|
issues.high.push({
|
||||||
|
file: "implementation",
|
||||||
|
line: 1,
|
||||||
|
type: "missing-error-handling",
|
||||||
|
message: "Plan requires error handling but none found in implementation",
|
||||||
|
code: "No try-catch or error handling detected"
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for missing tests
|
||||||
|
const testingRequired = plan.match(/test|testing|coverage/i)
|
||||||
|
if (testingRequired) {
|
||||||
|
const hasTests = files.some(file =>
|
||||||
|
file.path.match(/\.(test|spec)\.[jt]sx?$/)
|
||||||
|
)
|
||||||
|
|
||||||
|
if (!hasTests) {
|
||||||
|
issues.medium.push({
|
||||||
|
file: "implementation",
|
||||||
|
line: 1,
|
||||||
|
type: "missing-tests",
|
||||||
|
message: "Plan requires tests but no test files found",
|
||||||
|
code: "No test files detected"
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return issues
|
||||||
|
}
|
||||||
|
|
||||||
|
function extractAcceptanceCriteria(plan) {
|
||||||
|
const criteria = []
|
||||||
|
const lines = plan.split("\n")
|
||||||
|
|
||||||
|
let inAcceptanceSection = false
|
||||||
|
lines.forEach((line, idx) => {
|
||||||
|
if (line.match(/acceptance criteria/i)) {
|
||||||
|
inAcceptanceSection = true
|
||||||
|
} else if (line.match(/^##/)) {
|
||||||
|
inAcceptanceSection = false
|
||||||
|
} else if (inAcceptanceSection && line.match(/^[-*]\s+/)) {
|
||||||
|
criteria.push({
|
||||||
|
text: line.replace(/^[-*]\s+/, "").trim(),
|
||||||
|
lineNumber: idx + 1
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return criteria
|
||||||
|
}
|
||||||
|
|
||||||
|
function verifyCriterion(criterion, files, gitDiff) {
|
||||||
|
// Extract keywords from criterion
|
||||||
|
const keywords = criterion.text.toLowerCase().match(/\b\w{4,}\b/g) || []
|
||||||
|
|
||||||
|
// Check if keywords appear in implementation
|
||||||
|
let matchCount = 0
|
||||||
|
for (const file of files) {
|
||||||
|
const content = file.content.toLowerCase()
|
||||||
|
for (const keyword of keywords) {
|
||||||
|
if (content.includes(keyword)) {
|
||||||
|
matchCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const matchRatio = matchCount / keywords.length
|
||||||
|
|
||||||
|
return {
|
||||||
|
met: matchRatio >= 0.7,
|
||||||
|
partial: matchRatio >= 0.4 && matchRatio < 0.7,
|
||||||
|
matchRatio: matchRatio
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Verdict Determination
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function determineVerdict(qualityIssues, securityIssues, architectureIssues, requirementIssues) {
|
||||||
|
const allIssues = {
|
||||||
|
critical: [
|
||||||
|
...qualityIssues.critical,
|
||||||
|
...securityIssues.critical,
|
||||||
|
...architectureIssues.critical,
|
||||||
|
...requirementIssues.critical
|
||||||
|
],
|
||||||
|
high: [
|
||||||
|
...qualityIssues.high,
|
||||||
|
...securityIssues.high,
|
||||||
|
...architectureIssues.high,
|
||||||
|
...requirementIssues.high
|
||||||
|
],
|
||||||
|
medium: [
|
||||||
|
...qualityIssues.medium,
|
||||||
|
...securityIssues.medium,
|
||||||
|
...architectureIssues.medium,
|
||||||
|
...requirementIssues.medium
|
||||||
|
],
|
||||||
|
low: [
|
||||||
|
...qualityIssues.low,
|
||||||
|
...securityIssues.low,
|
||||||
|
...architectureIssues.low,
|
||||||
|
...requirementIssues.low
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
// BLOCK: Any critical issues
|
||||||
|
if (allIssues.critical.length > 0) {
|
||||||
|
return {
|
||||||
|
verdict: "BLOCK",
|
||||||
|
reason: `${allIssues.critical.length} critical issue(s) must be fixed`,
|
||||||
|
blocking_issues: allIssues.critical
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CONDITIONAL: High or medium issues
|
||||||
|
if (allIssues.high.length > 0 || allIssues.medium.length > 0) {
|
||||||
|
return {
|
||||||
|
verdict: "CONDITIONAL",
|
||||||
|
reason: `${allIssues.high.length} high and ${allIssues.medium.length} medium issue(s) should be addressed`,
|
||||||
|
blocking_issues: []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// APPROVE: Only low issues or none
|
||||||
|
return {
|
||||||
|
verdict: "APPROVE",
|
||||||
|
reason: allIssues.low.length > 0
|
||||||
|
? `${allIssues.low.length} low-priority issue(s) noted`
|
||||||
|
: "No issues found",
|
||||||
|
blocking_issues: []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Report Formatting
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function formatCodeReviewReport(report) {
|
||||||
|
const { verdict, dimensions, recommendations, blocking_issues } = report
|
||||||
|
|
||||||
|
let markdown = `# Code Review Report\n\n`
|
||||||
|
markdown += `**Verdict**: ${verdict}\n\n`
|
||||||
|
|
||||||
|
if (blocking_issues.length > 0) {
|
||||||
|
markdown += `## Blocking Issues\n\n`
|
||||||
|
blocking_issues.forEach(issue => {
|
||||||
|
markdown += `- **${issue.type}** (${issue.file}:${issue.line})\n`
|
||||||
|
markdown += ` ${issue.message}\n`
|
||||||
|
markdown += ` \`\`\`\n ${issue.code}\n \`\`\`\n\n`
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
markdown += `## Review Dimensions\n\n`
|
||||||
|
|
||||||
|
markdown += `### Quality Issues\n`
|
||||||
|
markdown += formatIssuesByDimension(dimensions.quality)
|
||||||
|
|
||||||
|
markdown += `### Security Issues\n`
|
||||||
|
markdown += formatIssuesByDimension(dimensions.security)
|
||||||
|
|
||||||
|
markdown += `### Architecture Issues\n`
|
||||||
|
markdown += formatIssuesByDimension(dimensions.architecture)
|
||||||
|
|
||||||
|
markdown += `### Requirements Issues\n`
|
||||||
|
markdown += formatIssuesByDimension(dimensions.requirements)
|
||||||
|
|
||||||
|
if (recommendations.length > 0) {
|
||||||
|
markdown += `## Recommendations\n\n`
|
||||||
|
recommendations.forEach((rec, i) => {
|
||||||
|
markdown += `${i + 1}. ${rec}\n`
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return markdown
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatIssuesByDimension(issues) {
|
||||||
|
let markdown = ""
|
||||||
|
|
||||||
|
const severities = ["critical", "high", "medium", "low"]
|
||||||
|
severities.forEach(severity => {
|
||||||
|
if (issues[severity].length > 0) {
|
||||||
|
markdown += `\n**${severity.toUpperCase()}** (${issues[severity].length})\n\n`
|
||||||
|
issues[severity].forEach(issue => {
|
||||||
|
markdown += `- ${issue.message} (${issue.file}:${issue.line})\n`
|
||||||
|
markdown += ` \`${issue.code}\`\n\n`
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return markdown || "No issues found.\n\n"
|
||||||
|
}
|
||||||
|
```
|
||||||
@@ -0,0 +1,845 @@
|
|||||||
|
# Spec Quality Command
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
5-dimension spec quality check with readiness report generation and quality gate determination.
|
||||||
|
|
||||||
|
## Quality Dimensions
|
||||||
|
|
||||||
|
### 1. Completeness (Weight: 25%)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function scoreCompleteness(specDocs) {
|
||||||
|
const requiredSections = {
|
||||||
|
"product-brief": [
|
||||||
|
"Vision Statement",
|
||||||
|
"Problem Statement",
|
||||||
|
"Target Audience",
|
||||||
|
"Success Metrics",
|
||||||
|
"Constraints"
|
||||||
|
],
|
||||||
|
"prd": [
|
||||||
|
"Goals",
|
||||||
|
"Requirements",
|
||||||
|
"User Stories",
|
||||||
|
"Acceptance Criteria",
|
||||||
|
"Non-Functional Requirements"
|
||||||
|
],
|
||||||
|
"architecture": [
|
||||||
|
"System Overview",
|
||||||
|
"Component Design",
|
||||||
|
"Data Models",
|
||||||
|
"API Specifications",
|
||||||
|
"Technology Stack"
|
||||||
|
],
|
||||||
|
"user-stories": [
|
||||||
|
"Story List",
|
||||||
|
"Acceptance Criteria",
|
||||||
|
"Priority",
|
||||||
|
"Estimation"
|
||||||
|
],
|
||||||
|
"implementation-plan": [
|
||||||
|
"Task Breakdown",
|
||||||
|
"Dependencies",
|
||||||
|
"Timeline",
|
||||||
|
"Resource Allocation"
|
||||||
|
],
|
||||||
|
"test-strategy": [
|
||||||
|
"Test Scope",
|
||||||
|
"Test Cases",
|
||||||
|
"Coverage Goals",
|
||||||
|
"Test Environment"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
let totalScore = 0
|
||||||
|
let totalWeight = 0
|
||||||
|
const details = []
|
||||||
|
|
||||||
|
for (const doc of specDocs) {
|
||||||
|
const phase = doc.phase
|
||||||
|
const expectedSections = requiredSections[phase] || []
|
||||||
|
|
||||||
|
if (expectedSections.length === 0) continue
|
||||||
|
|
||||||
|
let presentCount = 0
|
||||||
|
let substantialCount = 0
|
||||||
|
|
||||||
|
for (const section of expectedSections) {
|
||||||
|
const sectionRegex = new RegExp(`##\\s+${section}`, "i")
|
||||||
|
const sectionMatch = doc.content.match(sectionRegex)
|
||||||
|
|
||||||
|
if (sectionMatch) {
|
||||||
|
presentCount++
|
||||||
|
|
||||||
|
// Check if section has substantial content (not just header)
|
||||||
|
const sectionIndex = doc.content.indexOf(sectionMatch[0])
|
||||||
|
const nextSectionIndex = doc.content.indexOf("\n##", sectionIndex + 1)
|
||||||
|
const sectionContent = nextSectionIndex > -1
|
||||||
|
? doc.content.substring(sectionIndex, nextSectionIndex)
|
||||||
|
: doc.content.substring(sectionIndex)
|
||||||
|
|
||||||
|
// Substantial = more than 100 chars excluding header
|
||||||
|
const contentWithoutHeader = sectionContent.replace(sectionRegex, "").trim()
|
||||||
|
if (contentWithoutHeader.length > 100) {
|
||||||
|
substantialCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const presentRatio = presentCount / expectedSections.length
|
||||||
|
const substantialRatio = substantialCount / expectedSections.length
|
||||||
|
|
||||||
|
// Score: 50% for presence, 50% for substance
|
||||||
|
const docScore = (presentRatio * 50) + (substantialRatio * 50)
|
||||||
|
|
||||||
|
totalScore += docScore
|
||||||
|
totalWeight += 100
|
||||||
|
|
||||||
|
details.push({
|
||||||
|
phase: phase,
|
||||||
|
score: docScore,
|
||||||
|
present: presentCount,
|
||||||
|
substantial: substantialCount,
|
||||||
|
expected: expectedSections.length,
|
||||||
|
missing: expectedSections.filter(s => !doc.content.match(new RegExp(`##\\s+${s}`, "i")))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
const overallScore = totalWeight > 0 ? (totalScore / totalWeight) * 100 : 0
|
||||||
|
|
||||||
|
return {
|
||||||
|
score: overallScore,
|
||||||
|
weight: 25,
|
||||||
|
weighted_score: overallScore * 0.25,
|
||||||
|
details: details
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Consistency (Weight: 20%)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function scoreConsistency(specDocs) {
|
||||||
|
const issues = []
|
||||||
|
|
||||||
|
// 1. Terminology consistency
|
||||||
|
const terminologyMap = new Map()
|
||||||
|
|
||||||
|
for (const doc of specDocs) {
|
||||||
|
// Extract key terms (capitalized phrases, technical terms)
|
||||||
|
const terms = doc.content.match(/\b[A-Z][a-z]+(?:\s+[A-Z][a-z]+)*\b/g) || []
|
||||||
|
|
||||||
|
terms.forEach(term => {
|
||||||
|
const normalized = term.toLowerCase()
|
||||||
|
if (!terminologyMap.has(normalized)) {
|
||||||
|
terminologyMap.set(normalized, new Set())
|
||||||
|
}
|
||||||
|
terminologyMap.get(normalized).add(term)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find inconsistent terminology (same concept, different casing/spelling)
|
||||||
|
terminologyMap.forEach((variants, normalized) => {
|
||||||
|
if (variants.size > 1) {
|
||||||
|
issues.push({
|
||||||
|
type: "terminology",
|
||||||
|
severity: "medium",
|
||||||
|
message: `Inconsistent terminology: ${[...variants].join(", ")}`,
|
||||||
|
suggestion: `Standardize to one variant`
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// 2. Format consistency
|
||||||
|
const headerStyles = new Map()
|
||||||
|
for (const doc of specDocs) {
|
||||||
|
const headers = doc.content.match(/^#{1,6}\s+.+$/gm) || []
|
||||||
|
headers.forEach(header => {
|
||||||
|
const level = header.match(/^#+/)[0].length
|
||||||
|
const style = header.includes("**") ? "bold" : "plain"
|
||||||
|
const key = `level-${level}`
|
||||||
|
|
||||||
|
if (!headerStyles.has(key)) {
|
||||||
|
headerStyles.set(key, new Set())
|
||||||
|
}
|
||||||
|
headerStyles.get(key).add(style)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
headerStyles.forEach((styles, level) => {
|
||||||
|
if (styles.size > 1) {
|
||||||
|
issues.push({
|
||||||
|
type: "format",
|
||||||
|
severity: "low",
|
||||||
|
message: `Inconsistent header style at ${level}: ${[...styles].join(", ")}`,
|
||||||
|
suggestion: "Use consistent header formatting"
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// 3. Reference consistency
|
||||||
|
const references = new Map()
|
||||||
|
for (const doc of specDocs) {
|
||||||
|
// Extract references to other documents/sections
|
||||||
|
const refs = doc.content.match(/\[.*?\]\(.*?\)/g) || []
|
||||||
|
refs.forEach(ref => {
|
||||||
|
const linkMatch = ref.match(/\((.*?)\)/)
|
||||||
|
if (linkMatch) {
|
||||||
|
const link = linkMatch[1]
|
||||||
|
if (!references.has(link)) {
|
||||||
|
references.set(link, [])
|
||||||
|
}
|
||||||
|
references.get(link).push(doc.phase)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for broken references
|
||||||
|
references.forEach((sources, link) => {
|
||||||
|
if (link.startsWith("./") || link.startsWith("../")) {
|
||||||
|
// Check if file exists
|
||||||
|
const exists = Bash(`test -f ${link}`).exitCode === 0
|
||||||
|
if (!exists) {
|
||||||
|
issues.push({
|
||||||
|
type: "reference",
|
||||||
|
severity: "high",
|
||||||
|
message: `Broken reference: ${link} (referenced in ${sources.join(", ")})`,
|
||||||
|
suggestion: "Fix or remove broken reference"
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// 4. Naming convention consistency
|
||||||
|
const namingPatterns = {
|
||||||
|
camelCase: /\b[a-z]+(?:[A-Z][a-z]+)+\b/g,
|
||||||
|
PascalCase: /\b[A-Z][a-z]+(?:[A-Z][a-z]+)+\b/g,
|
||||||
|
snake_case: /\b[a-z]+(?:_[a-z]+)+\b/g,
|
||||||
|
kebab_case: /\b[a-z]+(?:-[a-z]+)+\b/g
|
||||||
|
}
|
||||||
|
|
||||||
|
const namingCounts = {}
|
||||||
|
for (const doc of specDocs) {
|
||||||
|
Object.entries(namingPatterns).forEach(([pattern, regex]) => {
|
||||||
|
const matches = doc.content.match(regex) || []
|
||||||
|
namingCounts[pattern] = (namingCounts[pattern] || 0) + matches.length
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
const dominantPattern = Object.entries(namingCounts)
|
||||||
|
.sort((a, b) => b[1] - a[1])[0]?.[0]
|
||||||
|
|
||||||
|
Object.entries(namingCounts).forEach(([pattern, count]) => {
|
||||||
|
if (pattern !== dominantPattern && count > 10) {
|
||||||
|
issues.push({
|
||||||
|
type: "naming",
|
||||||
|
severity: "low",
|
||||||
|
message: `Mixed naming conventions: ${pattern} (${count} occurrences) vs ${dominantPattern}`,
|
||||||
|
suggestion: `Standardize to ${dominantPattern}`
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Calculate score based on issues
|
||||||
|
const severityWeights = { high: 10, medium: 5, low: 2 }
|
||||||
|
const totalPenalty = issues.reduce((sum, issue) => sum + severityWeights[issue.severity], 0)
|
||||||
|
const maxPenalty = 100 // Arbitrary max for normalization
|
||||||
|
|
||||||
|
const score = Math.max(0, 100 - (totalPenalty / maxPenalty) * 100)
|
||||||
|
|
||||||
|
return {
|
||||||
|
score: score,
|
||||||
|
weight: 20,
|
||||||
|
weighted_score: score * 0.20,
|
||||||
|
issues: issues,
|
||||||
|
details: {
|
||||||
|
terminology_issues: issues.filter(i => i.type === "terminology").length,
|
||||||
|
format_issues: issues.filter(i => i.type === "format").length,
|
||||||
|
reference_issues: issues.filter(i => i.type === "reference").length,
|
||||||
|
naming_issues: issues.filter(i => i.type === "naming").length
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Traceability (Weight: 25%)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function scoreTraceability(specDocs) {
|
||||||
|
const chains = []
|
||||||
|
|
||||||
|
// Extract traceability elements
|
||||||
|
const goals = extractElements(specDocs, "product-brief", /^[-*]\s+Goal:\s*(.+)$/gm)
|
||||||
|
const requirements = extractElements(specDocs, "prd", /^[-*]\s+(?:REQ-\d+|Requirement):\s*(.+)$/gm)
|
||||||
|
const components = extractElements(specDocs, "architecture", /^[-*]\s+(?:Component|Module):\s*(.+)$/gm)
|
||||||
|
const stories = extractElements(specDocs, "user-stories", /^[-*]\s+(?:US-\d+|Story):\s*(.+)$/gm)
|
||||||
|
|
||||||
|
// Build traceability chains: Goals → Requirements → Components → Stories
|
||||||
|
for (const goal of goals) {
|
||||||
|
const chain = {
|
||||||
|
goal: goal.text,
|
||||||
|
requirements: [],
|
||||||
|
components: [],
|
||||||
|
stories: [],
|
||||||
|
complete: false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find requirements that reference this goal
|
||||||
|
const goalKeywords = extractKeywords(goal.text)
|
||||||
|
for (const req of requirements) {
|
||||||
|
if (hasKeywordOverlap(req.text, goalKeywords, 0.3)) {
|
||||||
|
chain.requirements.push(req.text)
|
||||||
|
|
||||||
|
// Find components that implement this requirement
|
||||||
|
const reqKeywords = extractKeywords(req.text)
|
||||||
|
for (const comp of components) {
|
||||||
|
if (hasKeywordOverlap(comp.text, reqKeywords, 0.3)) {
|
||||||
|
chain.components.push(comp.text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find stories that implement this requirement
|
||||||
|
for (const story of stories) {
|
||||||
|
if (hasKeywordOverlap(story.text, reqKeywords, 0.3)) {
|
||||||
|
chain.stories.push(story.text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if chain is complete
|
||||||
|
chain.complete = chain.requirements.length > 0 &&
|
||||||
|
chain.components.length > 0 &&
|
||||||
|
chain.stories.length > 0
|
||||||
|
|
||||||
|
chains.push(chain)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate score
|
||||||
|
const completeChains = chains.filter(c => c.complete).length
|
||||||
|
const totalChains = chains.length
|
||||||
|
const score = totalChains > 0 ? (completeChains / totalChains) * 100 : 0
|
||||||
|
|
||||||
|
// Identify weak links
|
||||||
|
const weakLinks = []
|
||||||
|
chains.forEach((chain, idx) => {
|
||||||
|
if (!chain.complete) {
|
||||||
|
if (chain.requirements.length === 0) {
|
||||||
|
weakLinks.push(`Goal ${idx + 1} has no linked requirements`)
|
||||||
|
}
|
||||||
|
if (chain.components.length === 0) {
|
||||||
|
weakLinks.push(`Goal ${idx + 1} has no linked components`)
|
||||||
|
}
|
||||||
|
if (chain.stories.length === 0) {
|
||||||
|
weakLinks.push(`Goal ${idx + 1} has no linked stories`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
score: score,
|
||||||
|
weight: 25,
|
||||||
|
weighted_score: score * 0.25,
|
||||||
|
details: {
|
||||||
|
total_chains: totalChains,
|
||||||
|
complete_chains: completeChains,
|
||||||
|
weak_links: weakLinks
|
||||||
|
},
|
||||||
|
chains: chains
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function extractElements(specDocs, phase, regex) {
|
||||||
|
const elements = []
|
||||||
|
const doc = specDocs.find(d => d.phase === phase)
|
||||||
|
|
||||||
|
if (doc) {
|
||||||
|
let match
|
||||||
|
while ((match = regex.exec(doc.content)) !== null) {
|
||||||
|
elements.push({
|
||||||
|
text: match[1].trim(),
|
||||||
|
phase: phase
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return elements
|
||||||
|
}
|
||||||
|
|
||||||
|
function extractKeywords(text) {
|
||||||
|
// Extract meaningful words (4+ chars, not common words)
|
||||||
|
const commonWords = new Set(["that", "this", "with", "from", "have", "will", "should", "must", "can"])
|
||||||
|
const words = text.toLowerCase().match(/\b\w{4,}\b/g) || []
|
||||||
|
return words.filter(w => !commonWords.has(w))
|
||||||
|
}
|
||||||
|
|
||||||
|
function hasKeywordOverlap(text, keywords, threshold) {
|
||||||
|
const textLower = text.toLowerCase()
|
||||||
|
const matchCount = keywords.filter(kw => textLower.includes(kw)).length
|
||||||
|
return matchCount / keywords.length >= threshold
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Depth (Weight: 20%)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function scoreDepth(specDocs) {
|
||||||
|
const dimensions = []
|
||||||
|
|
||||||
|
// 1. Acceptance Criteria Testability
|
||||||
|
const acDoc = specDocs.find(d => d.phase === "prd" || d.phase === "user-stories")
|
||||||
|
if (acDoc) {
|
||||||
|
const acMatches = acDoc.content.match(/Acceptance Criteria:[\s\S]*?(?=\n##|\n\n[-*]|$)/gi) || []
|
||||||
|
let testableCount = 0
|
||||||
|
let totalCount = 0
|
||||||
|
|
||||||
|
acMatches.forEach(section => {
|
||||||
|
const criteria = section.match(/^[-*]\s+(.+)$/gm) || []
|
||||||
|
totalCount += criteria.length
|
||||||
|
|
||||||
|
criteria.forEach(criterion => {
|
||||||
|
// Testable if contains measurable verbs or specific conditions
|
||||||
|
const testablePatterns = [
|
||||||
|
/\b(should|must|will)\s+(display|show|return|validate|check|verify|calculate|send|receive)\b/i,
|
||||||
|
/\b(when|if|given)\b.*\b(then|should|must)\b/i,
|
||||||
|
/\b\d+\b/, // Contains numbers (measurable)
|
||||||
|
/\b(success|error|fail|pass)\b/i
|
||||||
|
]
|
||||||
|
|
||||||
|
const isTestable = testablePatterns.some(pattern => pattern.test(criterion))
|
||||||
|
if (isTestable) testableCount++
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
const acScore = totalCount > 0 ? (testableCount / totalCount) * 100 : 0
|
||||||
|
dimensions.push({
|
||||||
|
name: "Acceptance Criteria Testability",
|
||||||
|
score: acScore,
|
||||||
|
testable: testableCount,
|
||||||
|
total: totalCount
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. ADR Justification
|
||||||
|
const archDoc = specDocs.find(d => d.phase === "architecture")
|
||||||
|
if (archDoc) {
|
||||||
|
const adrMatches = archDoc.content.match(/##\s+(?:ADR|Decision)[\s\S]*?(?=\n##|$)/gi) || []
|
||||||
|
let justifiedCount = 0
|
||||||
|
let totalCount = adrMatches.length
|
||||||
|
|
||||||
|
adrMatches.forEach(adr => {
|
||||||
|
// Justified if contains rationale, alternatives, or consequences
|
||||||
|
const hasJustification = adr.match(/\b(rationale|reason|because|alternative|consequence|trade-?off)\b/i)
|
||||||
|
if (hasJustification) justifiedCount++
|
||||||
|
})
|
||||||
|
|
||||||
|
const adrScore = totalCount > 0 ? (justifiedCount / totalCount) * 100 : 100 // Default 100 if no ADRs
|
||||||
|
dimensions.push({
|
||||||
|
name: "ADR Justification",
|
||||||
|
score: adrScore,
|
||||||
|
justified: justifiedCount,
|
||||||
|
total: totalCount
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. User Stories Estimability
|
||||||
|
const storiesDoc = specDocs.find(d => d.phase === "user-stories")
|
||||||
|
if (storiesDoc) {
|
||||||
|
const storyMatches = storiesDoc.content.match(/^[-*]\s+(?:US-\d+|Story)[\s\S]*?(?=\n[-*]|$)/gim) || []
|
||||||
|
let estimableCount = 0
|
||||||
|
let totalCount = storyMatches.length
|
||||||
|
|
||||||
|
storyMatches.forEach(story => {
|
||||||
|
// Estimable if has clear scope, AC, and no ambiguity
|
||||||
|
const hasScope = story.match(/\b(as a|I want|so that)\b/i)
|
||||||
|
const hasAC = story.match(/acceptance criteria/i)
|
||||||
|
const hasEstimate = story.match(/\b(points?|hours?|days?|estimate)\b/i)
|
||||||
|
|
||||||
|
if ((hasScope && hasAC) || hasEstimate) estimableCount++
|
||||||
|
})
|
||||||
|
|
||||||
|
const storiesScore = totalCount > 0 ? (estimableCount / totalCount) * 100 : 0
|
||||||
|
dimensions.push({
|
||||||
|
name: "User Stories Estimability",
|
||||||
|
score: storiesScore,
|
||||||
|
estimable: estimableCount,
|
||||||
|
total: totalCount
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Technical Detail Sufficiency
|
||||||
|
const techDocs = specDocs.filter(d => d.phase === "architecture" || d.phase === "implementation-plan")
|
||||||
|
let detailScore = 0
|
||||||
|
|
||||||
|
if (techDocs.length > 0) {
|
||||||
|
const detailIndicators = [
|
||||||
|
/```[\s\S]*?```/, // Code blocks
|
||||||
|
/\b(API|endpoint|schema|model|interface|class|function)\b/i,
|
||||||
|
/\b(GET|POST|PUT|DELETE|PATCH)\b/, // HTTP methods
|
||||||
|
/\b(database|table|collection|index)\b/i,
|
||||||
|
/\b(authentication|authorization|security)\b/i
|
||||||
|
]
|
||||||
|
|
||||||
|
let indicatorCount = 0
|
||||||
|
techDocs.forEach(doc => {
|
||||||
|
detailIndicators.forEach(pattern => {
|
||||||
|
if (pattern.test(doc.content)) indicatorCount++
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
detailScore = Math.min(100, (indicatorCount / (detailIndicators.length * techDocs.length)) * 100)
|
||||||
|
dimensions.push({
|
||||||
|
name: "Technical Detail Sufficiency",
|
||||||
|
score: detailScore,
|
||||||
|
indicators_found: indicatorCount,
|
||||||
|
indicators_expected: detailIndicators.length * techDocs.length
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate overall depth score
|
||||||
|
const overallScore = dimensions.reduce((sum, d) => sum + d.score, 0) / dimensions.length
|
||||||
|
|
||||||
|
return {
|
||||||
|
score: overallScore,
|
||||||
|
weight: 20,
|
||||||
|
weighted_score: overallScore * 0.20,
|
||||||
|
dimensions: dimensions
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Requirement Coverage (Weight: 10%)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function scoreRequirementCoverage(specDocs, originalRequirements) {
|
||||||
|
// Extract original requirements from task description or initial brief
|
||||||
|
const originalReqs = originalRequirements || extractOriginalRequirements(specDocs)
|
||||||
|
|
||||||
|
if (originalReqs.length === 0) {
|
||||||
|
return {
|
||||||
|
score: 100, // No requirements to cover
|
||||||
|
weight: 10,
|
||||||
|
weighted_score: 10,
|
||||||
|
details: {
|
||||||
|
total: 0,
|
||||||
|
covered: 0,
|
||||||
|
uncovered: []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract all requirements from spec documents
|
||||||
|
const specReqs = []
|
||||||
|
for (const doc of specDocs) {
|
||||||
|
const reqMatches = doc.content.match(/^[-*]\s+(?:REQ-\d+|Requirement|Feature):\s*(.+)$/gm) || []
|
||||||
|
reqMatches.forEach(match => {
|
||||||
|
specReqs.push(match.replace(/^[-*]\s+(?:REQ-\d+|Requirement|Feature):\s*/, "").trim())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map original requirements to spec requirements
|
||||||
|
const coverage = []
|
||||||
|
for (const origReq of originalReqs) {
|
||||||
|
const keywords = extractKeywords(origReq)
|
||||||
|
const covered = specReqs.some(specReq => hasKeywordOverlap(specReq, keywords, 0.4))
|
||||||
|
|
||||||
|
coverage.push({
|
||||||
|
requirement: origReq,
|
||||||
|
covered: covered
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
const coveredCount = coverage.filter(c => c.covered).length
|
||||||
|
const score = (coveredCount / originalReqs.length) * 100
|
||||||
|
|
||||||
|
return {
|
||||||
|
score: score,
|
||||||
|
weight: 10,
|
||||||
|
weighted_score: score * 0.10,
|
||||||
|
details: {
|
||||||
|
total: originalReqs.length,
|
||||||
|
covered: coveredCount,
|
||||||
|
uncovered: coverage.filter(c => !c.covered).map(c => c.requirement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function extractOriginalRequirements(specDocs) {
|
||||||
|
// Try to find original requirements in product brief
|
||||||
|
const briefDoc = specDocs.find(d => d.phase === "product-brief")
|
||||||
|
if (!briefDoc) return []
|
||||||
|
|
||||||
|
const reqSection = briefDoc.content.match(/##\s+(?:Requirements|Objectives)[\s\S]*?(?=\n##|$)/i)
|
||||||
|
if (!reqSection) return []
|
||||||
|
|
||||||
|
const reqs = reqSection[0].match(/^[-*]\s+(.+)$/gm) || []
|
||||||
|
return reqs.map(r => r.replace(/^[-*]\s+/, "").trim())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Quality Gate Determination
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function determineQualityGate(overallScore, coverageScore) {
|
||||||
|
// PASS: Score ≥80% AND coverage ≥70%
|
||||||
|
if (overallScore >= 80 && coverageScore >= 70) {
|
||||||
|
return {
|
||||||
|
gate: "PASS",
|
||||||
|
message: "Specification meets quality standards and is ready for implementation",
|
||||||
|
action: "Proceed to implementation phase"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FAIL: Score <60% OR coverage <50%
|
||||||
|
if (overallScore < 60 || coverageScore < 50) {
|
||||||
|
return {
|
||||||
|
gate: "FAIL",
|
||||||
|
message: "Specification requires major revisions before implementation",
|
||||||
|
action: "Address critical gaps and resubmit for review"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// REVIEW: Between PASS and FAIL
|
||||||
|
return {
|
||||||
|
gate: "REVIEW",
|
||||||
|
message: "Specification needs improvements but may proceed with caution",
|
||||||
|
action: "Address recommendations and consider re-review"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Readiness Report Generation
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function formatReadinessReport(report, specDocs) {
|
||||||
|
const { overall_score, quality_gate, dimensions, phase_gates } = report
|
||||||
|
|
||||||
|
let markdown = `# Specification Readiness Report\n\n`
|
||||||
|
markdown += `**Generated**: ${new Date().toISOString()}\n\n`
|
||||||
|
markdown += `**Overall Score**: ${overall_score.toFixed(1)}%\n\n`
|
||||||
|
markdown += `**Quality Gate**: ${quality_gate.gate} - ${quality_gate.message}\n\n`
|
||||||
|
markdown += `**Recommended Action**: ${quality_gate.action}\n\n`
|
||||||
|
|
||||||
|
markdown += `---\n\n`
|
||||||
|
|
||||||
|
markdown += `## Dimension Scores\n\n`
|
||||||
|
markdown += `| Dimension | Score | Weight | Weighted Score |\n`
|
||||||
|
markdown += `|-----------|-------|--------|----------------|\n`
|
||||||
|
|
||||||
|
Object.entries(dimensions).forEach(([name, data]) => {
|
||||||
|
markdown += `| ${name} | ${data.score.toFixed(1)}% | ${data.weight}% | ${data.weighted_score.toFixed(1)}% |\n`
|
||||||
|
})
|
||||||
|
|
||||||
|
markdown += `\n---\n\n`
|
||||||
|
|
||||||
|
// Completeness Details
|
||||||
|
markdown += `## Completeness Analysis\n\n`
|
||||||
|
dimensions.completeness.details.forEach(detail => {
|
||||||
|
markdown += `### ${detail.phase}\n`
|
||||||
|
markdown += `- Score: ${detail.score.toFixed(1)}%\n`
|
||||||
|
markdown += `- Sections Present: ${detail.present}/${detail.expected}\n`
|
||||||
|
markdown += `- Substantial Content: ${detail.substantial}/${detail.expected}\n`
|
||||||
|
if (detail.missing.length > 0) {
|
||||||
|
markdown += `- Missing: ${detail.missing.join(", ")}\n`
|
||||||
|
}
|
||||||
|
markdown += `\n`
|
||||||
|
})
|
||||||
|
|
||||||
|
// Consistency Details
|
||||||
|
markdown += `## Consistency Analysis\n\n`
|
||||||
|
if (dimensions.consistency.issues.length > 0) {
|
||||||
|
markdown += `**Issues Found**: ${dimensions.consistency.issues.length}\n\n`
|
||||||
|
dimensions.consistency.issues.forEach(issue => {
|
||||||
|
markdown += `- **${issue.severity.toUpperCase()}**: ${issue.message}\n`
|
||||||
|
markdown += ` *Suggestion*: ${issue.suggestion}\n\n`
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
markdown += `No consistency issues found.\n\n`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traceability Details
|
||||||
|
markdown += `## Traceability Analysis\n\n`
|
||||||
|
markdown += `- Complete Chains: ${dimensions.traceability.details.complete_chains}/${dimensions.traceability.details.total_chains}\n\n`
|
||||||
|
if (dimensions.traceability.details.weak_links.length > 0) {
|
||||||
|
markdown += `**Weak Links**:\n`
|
||||||
|
dimensions.traceability.details.weak_links.forEach(link => {
|
||||||
|
markdown += `- ${link}\n`
|
||||||
|
})
|
||||||
|
markdown += `\n`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Depth Details
|
||||||
|
markdown += `## Depth Analysis\n\n`
|
||||||
|
dimensions.depth.dimensions.forEach(dim => {
|
||||||
|
markdown += `### ${dim.name}\n`
|
||||||
|
markdown += `- Score: ${dim.score.toFixed(1)}%\n`
|
||||||
|
if (dim.testable !== undefined) {
|
||||||
|
markdown += `- Testable: ${dim.testable}/${dim.total}\n`
|
||||||
|
}
|
||||||
|
if (dim.justified !== undefined) {
|
||||||
|
markdown += `- Justified: ${dim.justified}/${dim.total}\n`
|
||||||
|
}
|
||||||
|
if (dim.estimable !== undefined) {
|
||||||
|
markdown += `- Estimable: ${dim.estimable}/${dim.total}\n`
|
||||||
|
}
|
||||||
|
markdown += `\n`
|
||||||
|
})
|
||||||
|
|
||||||
|
// Coverage Details
|
||||||
|
markdown += `## Requirement Coverage\n\n`
|
||||||
|
markdown += `- Covered: ${dimensions.coverage.details.covered}/${dimensions.coverage.details.total}\n`
|
||||||
|
if (dimensions.coverage.details.uncovered.length > 0) {
|
||||||
|
markdown += `\n**Uncovered Requirements**:\n`
|
||||||
|
dimensions.coverage.details.uncovered.forEach(req => {
|
||||||
|
markdown += `- ${req}\n`
|
||||||
|
})
|
||||||
|
}
|
||||||
|
markdown += `\n`
|
||||||
|
|
||||||
|
// Phase Gates
|
||||||
|
if (phase_gates) {
|
||||||
|
markdown += `---\n\n`
|
||||||
|
markdown += `## Phase-Level Quality Gates\n\n`
|
||||||
|
Object.entries(phase_gates).forEach(([phase, gate]) => {
|
||||||
|
markdown += `### ${phase}\n`
|
||||||
|
markdown += `- Gate: ${gate.status}\n`
|
||||||
|
markdown += `- Score: ${gate.score.toFixed(1)}%\n`
|
||||||
|
if (gate.issues.length > 0) {
|
||||||
|
markdown += `- Issues: ${gate.issues.join(", ")}\n`
|
||||||
|
}
|
||||||
|
markdown += `\n`
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return markdown
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Spec Summary Generation
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function formatSpecSummary(specDocs, report) {
|
||||||
|
let markdown = `# Specification Summary\n\n`
|
||||||
|
|
||||||
|
markdown += `**Overall Quality Score**: ${report.overall_score.toFixed(1)}%\n`
|
||||||
|
markdown += `**Quality Gate**: ${report.quality_gate.gate}\n\n`
|
||||||
|
|
||||||
|
markdown += `---\n\n`
|
||||||
|
|
||||||
|
// Document Overview
|
||||||
|
markdown += `## Documents Reviewed\n\n`
|
||||||
|
specDocs.forEach(doc => {
|
||||||
|
markdown += `### ${doc.phase}\n`
|
||||||
|
markdown += `- Path: ${doc.path}\n`
|
||||||
|
markdown += `- Size: ${doc.content.length} characters\n`
|
||||||
|
|
||||||
|
// Extract key sections
|
||||||
|
const sections = doc.content.match(/^##\s+(.+)$/gm) || []
|
||||||
|
if (sections.length > 0) {
|
||||||
|
markdown += `- Sections: ${sections.map(s => s.replace(/^##\s+/, "")).join(", ")}\n`
|
||||||
|
}
|
||||||
|
markdown += `\n`
|
||||||
|
})
|
||||||
|
|
||||||
|
markdown += `---\n\n`
|
||||||
|
|
||||||
|
// Key Findings
|
||||||
|
markdown += `## Key Findings\n\n`
|
||||||
|
|
||||||
|
// Strengths
|
||||||
|
const strengths = []
|
||||||
|
Object.entries(report.dimensions).forEach(([name, data]) => {
|
||||||
|
if (data.score >= 80) {
|
||||||
|
strengths.push(`${name}: ${data.score.toFixed(1)}%`)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
if (strengths.length > 0) {
|
||||||
|
markdown += `### Strengths\n`
|
||||||
|
strengths.forEach(s => markdown += `- ${s}\n`)
|
||||||
|
markdown += `\n`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Areas for Improvement
|
||||||
|
const improvements = []
|
||||||
|
Object.entries(report.dimensions).forEach(([name, data]) => {
|
||||||
|
if (data.score < 70) {
|
||||||
|
improvements.push(`${name}: ${data.score.toFixed(1)}%`)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
if (improvements.length > 0) {
|
||||||
|
markdown += `### Areas for Improvement\n`
|
||||||
|
improvements.forEach(i => markdown += `- ${i}\n`)
|
||||||
|
markdown += `\n`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recommendations
|
||||||
|
if (report.recommendations && report.recommendations.length > 0) {
|
||||||
|
markdown += `### Recommendations\n`
|
||||||
|
report.recommendations.forEach((rec, i) => {
|
||||||
|
markdown += `${i + 1}. ${rec}\n`
|
||||||
|
})
|
||||||
|
markdown += `\n`
|
||||||
|
}
|
||||||
|
|
||||||
|
return markdown
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Phase-Level Quality Gates
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function calculatePhaseGates(specDocs) {
|
||||||
|
const gates = {}
|
||||||
|
|
||||||
|
for (const doc of specDocs) {
|
||||||
|
const phase = doc.phase
|
||||||
|
const issues = []
|
||||||
|
let score = 100
|
||||||
|
|
||||||
|
// Check minimum content threshold
|
||||||
|
if (doc.content.length < 500) {
|
||||||
|
issues.push("Insufficient content")
|
||||||
|
score -= 30
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for required sections (phase-specific)
|
||||||
|
const requiredSections = getRequiredSections(phase)
|
||||||
|
const missingSections = requiredSections.filter(section =>
|
||||||
|
!doc.content.match(new RegExp(`##\\s+${section}`, "i"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if (missingSections.length > 0) {
|
||||||
|
issues.push(`Missing sections: ${missingSections.join(", ")}`)
|
||||||
|
score -= missingSections.length * 15
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine gate status
|
||||||
|
let status = "PASS"
|
||||||
|
if (score < 60) status = "FAIL"
|
||||||
|
else if (score < 80) status = "REVIEW"
|
||||||
|
|
||||||
|
gates[phase] = {
|
||||||
|
status: status,
|
||||||
|
score: Math.max(0, score),
|
||||||
|
issues: issues
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return gates
|
||||||
|
}
|
||||||
|
|
||||||
|
function getRequiredSections(phase) {
|
||||||
|
const sectionMap = {
|
||||||
|
"product-brief": ["Vision", "Problem", "Target Audience"],
|
||||||
|
"prd": ["Goals", "Requirements", "User Stories"],
|
||||||
|
"architecture": ["Overview", "Components", "Data Models"],
|
||||||
|
"user-stories": ["Stories", "Acceptance Criteria"],
|
||||||
|
"implementation-plan": ["Tasks", "Dependencies"],
|
||||||
|
"test-strategy": ["Test Cases", "Coverage"]
|
||||||
|
}
|
||||||
|
|
||||||
|
return sectionMap[phase] || []
|
||||||
|
}
|
||||||
|
```
|
||||||
429
.claude/skills/team-lifecycle-v2/roles/reviewer/role.md
Normal file
429
.claude/skills/team-lifecycle-v2/roles/reviewer/role.md
Normal file
@@ -0,0 +1,429 @@
|
|||||||
|
# Reviewer Role
|
||||||
|
|
||||||
|
## 1. Role Identity
|
||||||
|
|
||||||
|
- **Name**: reviewer
|
||||||
|
- **Task Prefix**: REVIEW-* + QUALITY-*
|
||||||
|
- **Output Tag**: `[reviewer]`
|
||||||
|
- **Responsibility**: Discover Task → Branch by Prefix → Review/Score → Report
|
||||||
|
|
||||||
|
## 2. Role Boundaries
|
||||||
|
|
||||||
|
### MUST
|
||||||
|
- Only process REVIEW-* and QUALITY-* tasks
|
||||||
|
- Communicate only with coordinator
|
||||||
|
- Generate readiness-report.md for QUALITY tasks
|
||||||
|
- Tag all outputs with `[reviewer]`
|
||||||
|
|
||||||
|
### MUST NOT
|
||||||
|
- Create tasks
|
||||||
|
- Contact other workers directly
|
||||||
|
- Modify source code
|
||||||
|
- Skip quality dimensions
|
||||||
|
- Approve without verification
|
||||||
|
|
||||||
|
## 3. Message Types
|
||||||
|
|
||||||
|
| Type | Direction | Purpose | Format |
|
||||||
|
|------|-----------|---------|--------|
|
||||||
|
| `task_request` | FROM coordinator | Receive REVIEW-*/QUALITY-* task assignment | `{ type: "task_request", task_id, description, review_mode }` |
|
||||||
|
| `task_complete` | TO coordinator | Report review success | `{ type: "task_complete", task_id, status: "success", verdict, score, issues }` |
|
||||||
|
| `task_failed` | TO coordinator | Report review failure | `{ type: "task_failed", task_id, error }` |
|
||||||
|
|
||||||
|
## 4. Message Bus
|
||||||
|
|
||||||
|
**Primary**: Use `team_msg` for all coordinator communication with `[reviewer]` tag:
|
||||||
|
```javascript
|
||||||
|
// Code review completion
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "task_complete",
|
||||||
|
task_id: "REVIEW-001",
|
||||||
|
status: "success",
|
||||||
|
verdict: "APPROVE",
|
||||||
|
issues: { critical: 0, high: 2, medium: 5, low: 3 },
|
||||||
|
recommendations: ["Fix console.log statements", "Add error handling"]
|
||||||
|
}, "[reviewer]")
|
||||||
|
|
||||||
|
// Spec quality completion
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "task_complete",
|
||||||
|
task_id: "QUALITY-001",
|
||||||
|
status: "success",
|
||||||
|
overall_score: 85.5,
|
||||||
|
quality_gate: "PASS",
|
||||||
|
dimensions: {
|
||||||
|
completeness: 90,
|
||||||
|
consistency: 85,
|
||||||
|
traceability: 80,
|
||||||
|
depth: 88,
|
||||||
|
coverage: 82
|
||||||
|
}
|
||||||
|
}, "[reviewer]")
|
||||||
|
```
|
||||||
|
|
||||||
|
**CLI Fallback**: When message bus unavailable, write to `.workflow/.team/messages/reviewer-{timestamp}.json`
|
||||||
|
|
||||||
|
## 5. Toolbox
|
||||||
|
|
||||||
|
### Available Commands
|
||||||
|
- `commands/code-review.md` - 4-dimension code review (quality, security, architecture, requirements)
|
||||||
|
- `commands/spec-quality.md` - 5-dimension spec quality check (completeness, consistency, traceability, depth, coverage)
|
||||||
|
|
||||||
|
### CLI Capabilities
|
||||||
|
- None (uses Grep-based analysis)
|
||||||
|
|
||||||
|
## 6. Execution (5-Phase) - Dual-Prefix
|
||||||
|
|
||||||
|
### Phase 1: Task Discovery
|
||||||
|
|
||||||
|
**Dual Prefix Filter**:
|
||||||
|
```javascript
|
||||||
|
const tasks = Glob(".workflow/.team/tasks/{REVIEW,QUALITY}-*.json")
|
||||||
|
.filter(task => task.status === "pending" && task.assigned_to === "reviewer")
|
||||||
|
|
||||||
|
// Determine review mode
|
||||||
|
const reviewMode = task.task_id.startsWith("REVIEW-") ? "code" : "spec"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 2: Context Loading (Branch by Mode)
|
||||||
|
|
||||||
|
**Code Review Context (REVIEW-*)**:
|
||||||
|
```javascript
|
||||||
|
if (reviewMode === "code") {
|
||||||
|
// Load plan
|
||||||
|
const planPath = task.metadata?.plan_path || ".workflow/plan.md"
|
||||||
|
const plan = Read(planPath)
|
||||||
|
|
||||||
|
// Get git diff
|
||||||
|
const implTaskId = task.metadata?.impl_task_id
|
||||||
|
const gitDiff = Bash("git diff HEAD").stdout
|
||||||
|
|
||||||
|
// Load modified files
|
||||||
|
const modifiedFiles = Bash("git diff --name-only HEAD").stdout.split("\n").filter(Boolean)
|
||||||
|
const fileContents = modifiedFiles.map(f => ({
|
||||||
|
path: f,
|
||||||
|
content: Read(f)
|
||||||
|
}))
|
||||||
|
|
||||||
|
// Load test results if available
|
||||||
|
const testTaskId = task.metadata?.test_task_id
|
||||||
|
const testResults = testTaskId ? Read(`.workflow/.team/tasks/${testTaskId}.json`) : null
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Spec Quality Context (QUALITY-*)**:
|
||||||
|
```javascript
|
||||||
|
if (reviewMode === "spec") {
|
||||||
|
// Load session folder
|
||||||
|
const sessionFolder = task.metadata?.session_folder || ".workflow/.sessions/latest"
|
||||||
|
|
||||||
|
// Load quality gates
|
||||||
|
const qualityGates = task.metadata?.quality_gates || {
|
||||||
|
pass_threshold: 80,
|
||||||
|
fail_threshold: 60,
|
||||||
|
coverage_threshold: 70
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load all spec documents
|
||||||
|
const specDocs = Glob(`${sessionFolder}/**/*.md`).map(path => ({
|
||||||
|
path: path,
|
||||||
|
content: Read(path),
|
||||||
|
phase: extractPhase(path)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 3: Review Execution (Delegate by Mode)
|
||||||
|
|
||||||
|
**Code Review**:
|
||||||
|
```javascript
|
||||||
|
if (reviewMode === "code") {
|
||||||
|
const codeReviewCommand = Read("commands/code-review.md")
|
||||||
|
// Command handles:
|
||||||
|
// - reviewQuality (ts-ignore, any, console.log, empty catch)
|
||||||
|
// - reviewSecurity (eval/exec, secrets, SQL injection, XSS)
|
||||||
|
// - reviewArchitecture (parent imports, large files)
|
||||||
|
// - verifyRequirements (plan acceptance criteria vs implementation)
|
||||||
|
// - Verdict determination (BLOCK/CONDITIONAL/APPROVE)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Spec Quality**:
|
||||||
|
```javascript
|
||||||
|
if (reviewMode === "spec") {
|
||||||
|
const specQualityCommand = Read("commands/spec-quality.md")
|
||||||
|
// Command handles:
|
||||||
|
// - scoreCompleteness (section content checks)
|
||||||
|
// - scoreConsistency (terminology, format, references)
|
||||||
|
// - scoreTraceability (goals → reqs → arch → stories chain)
|
||||||
|
// - scoreDepth (AC testable, ADRs justified, stories estimable)
|
||||||
|
// - scoreRequirementCoverage (original requirements → document mapping)
|
||||||
|
// - Quality gate determination (PASS ≥80%, FAIL <60%, else REVIEW)
|
||||||
|
// - readiness-report.md generation
|
||||||
|
// - spec-summary.md generation
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 4: Report Generation (Branch by Mode)
|
||||||
|
|
||||||
|
**Code Review Report**:
|
||||||
|
```javascript
|
||||||
|
if (reviewMode === "code") {
|
||||||
|
const report = {
|
||||||
|
verdict: verdict, // BLOCK | CONDITIONAL | APPROVE
|
||||||
|
dimensions: {
|
||||||
|
quality: qualityIssues,
|
||||||
|
security: securityIssues,
|
||||||
|
architecture: architectureIssues,
|
||||||
|
requirements: requirementIssues
|
||||||
|
},
|
||||||
|
recommendations: recommendations,
|
||||||
|
blocking_issues: blockingIssues
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write review report
|
||||||
|
Write(`.workflow/.team/reviews/${task.task_id}-report.md`, formatCodeReviewReport(report))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Spec Quality Report**:
|
||||||
|
```javascript
|
||||||
|
if (reviewMode === "spec") {
|
||||||
|
const report = {
|
||||||
|
overall_score: overallScore,
|
||||||
|
quality_gate: qualityGate, // PASS | REVIEW | FAIL
|
||||||
|
dimensions: {
|
||||||
|
completeness: completenessScore,
|
||||||
|
consistency: consistencyScore,
|
||||||
|
traceability: traceabilityScore,
|
||||||
|
depth: depthScore,
|
||||||
|
coverage: coverageScore
|
||||||
|
},
|
||||||
|
phase_gates: phaseGates,
|
||||||
|
recommendations: recommendations
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write readiness report
|
||||||
|
Write(`${sessionFolder}/readiness-report.md`, formatReadinessReport(report))
|
||||||
|
|
||||||
|
// Write spec summary
|
||||||
|
Write(`${sessionFolder}/spec-summary.md`, formatSpecSummary(specDocs, report))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 5: Report to Coordinator (Branch by Mode)
|
||||||
|
|
||||||
|
**Code Review Completion**:
|
||||||
|
```javascript
|
||||||
|
if (reviewMode === "code") {
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "task_complete",
|
||||||
|
task_id: task.task_id,
|
||||||
|
status: "success",
|
||||||
|
verdict: verdict,
|
||||||
|
issues: {
|
||||||
|
critical: blockingIssues.length,
|
||||||
|
high: highIssues.length,
|
||||||
|
medium: mediumIssues.length,
|
||||||
|
low: lowIssues.length
|
||||||
|
},
|
||||||
|
recommendations: recommendations,
|
||||||
|
report_path: `.workflow/.team/reviews/${task.task_id}-report.md`,
|
||||||
|
timestamp: new Date().toISOString()
|
||||||
|
}, "[reviewer]")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Spec Quality Completion**:
|
||||||
|
```javascript
|
||||||
|
if (reviewMode === "spec") {
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "task_complete",
|
||||||
|
task_id: task.task_id,
|
||||||
|
status: "success",
|
||||||
|
overall_score: overallScore,
|
||||||
|
quality_gate: qualityGate,
|
||||||
|
dimensions: {
|
||||||
|
completeness: completenessScore,
|
||||||
|
consistency: consistencyScore,
|
||||||
|
traceability: traceabilityScore,
|
||||||
|
depth: depthScore,
|
||||||
|
coverage: coverageScore
|
||||||
|
},
|
||||||
|
report_path: `${sessionFolder}/readiness-report.md`,
|
||||||
|
summary_path: `${sessionFolder}/spec-summary.md`,
|
||||||
|
timestamp: new Date().toISOString()
|
||||||
|
}, "[reviewer]")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 7. Code Review Dimensions
|
||||||
|
|
||||||
|
### Quality Dimension
|
||||||
|
|
||||||
|
**Anti-patterns**:
|
||||||
|
- `@ts-ignore` / `@ts-expect-error` without justification
|
||||||
|
- `any` type usage
|
||||||
|
- `console.log` in production code
|
||||||
|
- Empty catch blocks
|
||||||
|
- Magic numbers
|
||||||
|
- Duplicate code
|
||||||
|
|
||||||
|
**Severity**:
|
||||||
|
- Critical: Empty catch, any in public APIs
|
||||||
|
- High: @ts-ignore without comment, console.log
|
||||||
|
- Medium: Magic numbers, duplicate code
|
||||||
|
- Low: Minor style issues
|
||||||
|
|
||||||
|
### Security Dimension
|
||||||
|
|
||||||
|
**Vulnerabilities**:
|
||||||
|
- `eval()` / `exec()` usage
|
||||||
|
- `innerHTML` / `dangerouslySetInnerHTML`
|
||||||
|
- Hardcoded secrets (API keys, passwords)
|
||||||
|
- SQL injection vectors
|
||||||
|
- XSS vulnerabilities
|
||||||
|
- Insecure dependencies
|
||||||
|
|
||||||
|
**Severity**:
|
||||||
|
- Critical: Hardcoded secrets, SQL injection
|
||||||
|
- High: eval/exec, innerHTML
|
||||||
|
- Medium: Insecure dependencies
|
||||||
|
- Low: Missing input validation
|
||||||
|
|
||||||
|
### Architecture Dimension
|
||||||
|
|
||||||
|
**Issues**:
|
||||||
|
- Parent directory imports (`../../../`)
|
||||||
|
- Large files (>500 lines)
|
||||||
|
- Circular dependencies
|
||||||
|
- Missing abstractions
|
||||||
|
- Tight coupling
|
||||||
|
|
||||||
|
**Severity**:
|
||||||
|
- Critical: Circular dependencies
|
||||||
|
- High: Excessive parent imports (>2 levels)
|
||||||
|
- Medium: Large files, tight coupling
|
||||||
|
- Low: Minor structure issues
|
||||||
|
|
||||||
|
### Requirements Dimension
|
||||||
|
|
||||||
|
**Verification**:
|
||||||
|
- Acceptance criteria coverage
|
||||||
|
- Feature completeness
|
||||||
|
- Edge case handling
|
||||||
|
- Error handling
|
||||||
|
|
||||||
|
**Severity**:
|
||||||
|
- Critical: Missing core functionality
|
||||||
|
- High: Incomplete acceptance criteria
|
||||||
|
- Medium: Missing edge cases
|
||||||
|
- Low: Minor feature gaps
|
||||||
|
|
||||||
|
## 8. Spec Quality Dimensions
|
||||||
|
|
||||||
|
### Completeness (Weight: 25%)
|
||||||
|
|
||||||
|
**Checks**:
|
||||||
|
- All required sections present
|
||||||
|
- Section content depth (not just headers)
|
||||||
|
- Cross-phase coverage
|
||||||
|
- Artifact completeness
|
||||||
|
|
||||||
|
**Scoring**:
|
||||||
|
- 100%: All sections with substantial content
|
||||||
|
- 75%: All sections present, some thin
|
||||||
|
- 50%: Missing 1-2 sections
|
||||||
|
- 25%: Missing 3+ sections
|
||||||
|
- 0%: Critical sections missing
|
||||||
|
|
||||||
|
### Consistency (Weight: 20%)
|
||||||
|
|
||||||
|
**Checks**:
|
||||||
|
- Terminology consistency
|
||||||
|
- Format consistency
|
||||||
|
- Reference consistency
|
||||||
|
- Naming conventions
|
||||||
|
|
||||||
|
**Scoring**:
|
||||||
|
- 100%: Fully consistent
|
||||||
|
- 75%: Minor inconsistencies (1-2)
|
||||||
|
- 50%: Moderate inconsistencies (3-5)
|
||||||
|
- 25%: Major inconsistencies (6+)
|
||||||
|
- 0%: Chaotic inconsistency
|
||||||
|
|
||||||
|
### Traceability (Weight: 25%)
|
||||||
|
|
||||||
|
**Checks**:
|
||||||
|
- Goals → Requirements chain
|
||||||
|
- Requirements → Architecture chain
|
||||||
|
- Architecture → User Stories chain
|
||||||
|
- Bidirectional references
|
||||||
|
|
||||||
|
**Scoring**:
|
||||||
|
- 100%: Full traceability chain
|
||||||
|
- 75%: 1 weak link
|
||||||
|
- 50%: 2 weak links
|
||||||
|
- 25%: 3+ weak links
|
||||||
|
- 0%: No traceability
|
||||||
|
|
||||||
|
### Depth (Weight: 20%)
|
||||||
|
|
||||||
|
**Checks**:
|
||||||
|
- Acceptance criteria testable
|
||||||
|
- ADRs justified
|
||||||
|
- User stories estimable
|
||||||
|
- Technical details sufficient
|
||||||
|
|
||||||
|
**Scoring**:
|
||||||
|
- 100%: All items detailed
|
||||||
|
- 75%: 1-2 shallow items
|
||||||
|
- 50%: 3-5 shallow items
|
||||||
|
- 25%: 6+ shallow items
|
||||||
|
- 0%: All items shallow
|
||||||
|
|
||||||
|
### Coverage (Weight: 10%)
|
||||||
|
|
||||||
|
**Checks**:
|
||||||
|
- Original requirements mapped
|
||||||
|
- All features documented
|
||||||
|
- All constraints addressed
|
||||||
|
- All stakeholders considered
|
||||||
|
|
||||||
|
**Scoring**:
|
||||||
|
- 100%: Full coverage (100%)
|
||||||
|
- 75%: High coverage (80-99%)
|
||||||
|
- 50%: Moderate coverage (60-79%)
|
||||||
|
- 25%: Low coverage (40-59%)
|
||||||
|
- 0%: Minimal coverage (<40%)
|
||||||
|
|
||||||
|
## 9. Verdict/Gate Determination
|
||||||
|
|
||||||
|
### Code Review Verdicts
|
||||||
|
|
||||||
|
| Verdict | Criteria | Action |
|
||||||
|
|---------|----------|--------|
|
||||||
|
| **BLOCK** | Critical issues present | Must fix before merge |
|
||||||
|
| **CONDITIONAL** | High/medium issues only | Fix recommended, merge allowed |
|
||||||
|
| **APPROVE** | Low issues or none | Ready to merge |
|
||||||
|
|
||||||
|
### Spec Quality Gates
|
||||||
|
|
||||||
|
| Gate | Criteria | Action |
|
||||||
|
|------|----------|--------|
|
||||||
|
| **PASS** | Score ≥80% AND coverage ≥70% | Ready for implementation |
|
||||||
|
| **REVIEW** | Score 60-79% OR coverage 50-69% | Revisions recommended |
|
||||||
|
| **FAIL** | Score <60% OR coverage <50% | Major revisions required |
|
||||||
|
|
||||||
|
## 10. Error Handling
|
||||||
|
|
||||||
|
| Error Type | Recovery Strategy | Escalation |
|
||||||
|
|------------|-------------------|------------|
|
||||||
|
| Missing context | Request from coordinator | Immediate escalation |
|
||||||
|
| Invalid review mode | Abort with error | Report to coordinator |
|
||||||
|
| Analysis failure | Retry with verbose logging | Report after 2 failures |
|
||||||
|
| Report generation failure | Use fallback template | Report with partial results |
|
||||||
@@ -0,0 +1,538 @@
|
|||||||
|
# Validate Command
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
Test-fix cycle with strategy engine for automated test failure resolution.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const MAX_ITERATIONS = 10
|
||||||
|
const PASS_RATE_TARGET = 95 // percentage
|
||||||
|
```
|
||||||
|
|
||||||
|
## Main Iteration Loop
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function runTestFixCycle(task, framework, affectedTests, modifiedFiles) {
|
||||||
|
let iteration = 0
|
||||||
|
let bestPassRate = 0
|
||||||
|
let bestResults = null
|
||||||
|
|
||||||
|
while (iteration < MAX_ITERATIONS) {
|
||||||
|
iteration++
|
||||||
|
|
||||||
|
// Phase 1: Run Tests
|
||||||
|
const testCommand = buildTestCommand(framework, affectedTests, iteration === 1)
|
||||||
|
const testOutput = Bash(testCommand, { timeout: 120000 })
|
||||||
|
const results = parseTestResults(testOutput.stdout + testOutput.stderr, framework)
|
||||||
|
|
||||||
|
const passRate = results.total > 0 ? (results.passed / results.total * 100) : 0
|
||||||
|
|
||||||
|
// Track best result
|
||||||
|
if (passRate > bestPassRate) {
|
||||||
|
bestPassRate = passRate
|
||||||
|
bestResults = results
|
||||||
|
}
|
||||||
|
|
||||||
|
// Progress update for long cycles
|
||||||
|
if (iteration > 5) {
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "progress_update",
|
||||||
|
task_id: task.task_id,
|
||||||
|
iteration: iteration,
|
||||||
|
pass_rate: passRate.toFixed(1),
|
||||||
|
tests_passed: results.passed,
|
||||||
|
tests_failed: results.failed,
|
||||||
|
message: `Test-fix cycle iteration ${iteration}/${MAX_ITERATIONS}`
|
||||||
|
}, "[tester]")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 2: Check Success
|
||||||
|
if (passRate >= PASS_RATE_TARGET) {
|
||||||
|
// Quality gate: Run full suite if only affected tests passed
|
||||||
|
if (affectedTests.length > 0 && iteration === 1) {
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "progress_update",
|
||||||
|
task_id: task.task_id,
|
||||||
|
message: "Affected tests passed, running full suite..."
|
||||||
|
}, "[tester]")
|
||||||
|
|
||||||
|
const fullSuiteCommand = buildTestCommand(framework, [], false)
|
||||||
|
const fullOutput = Bash(fullSuiteCommand, { timeout: 300000 })
|
||||||
|
const fullResults = parseTestResults(fullOutput.stdout + fullOutput.stderr, framework)
|
||||||
|
const fullPassRate = fullResults.total > 0 ? (fullResults.passed / fullResults.total * 100) : 0
|
||||||
|
|
||||||
|
if (fullPassRate >= PASS_RATE_TARGET) {
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
results: fullResults,
|
||||||
|
iterations: iteration,
|
||||||
|
full_suite_run: true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Full suite failed, continue fixing
|
||||||
|
results = fullResults
|
||||||
|
passRate = fullPassRate
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
results: results,
|
||||||
|
iterations: iteration,
|
||||||
|
full_suite_run: affectedTests.length === 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 3: Analyze Failures
|
||||||
|
if (results.failures.length === 0) {
|
||||||
|
break // No failures to fix
|
||||||
|
}
|
||||||
|
|
||||||
|
const classified = classifyFailures(results.failures)
|
||||||
|
|
||||||
|
// Phase 4: Select Strategy
|
||||||
|
const strategy = selectStrategy(iteration, passRate, results.failures)
|
||||||
|
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "progress_update",
|
||||||
|
task_id: task.task_id,
|
||||||
|
iteration: iteration,
|
||||||
|
strategy: strategy,
|
||||||
|
failures: {
|
||||||
|
critical: classified.critical.length,
|
||||||
|
high: classified.high.length,
|
||||||
|
medium: classified.medium.length,
|
||||||
|
low: classified.low.length
|
||||||
|
}
|
||||||
|
}, "[tester]")
|
||||||
|
|
||||||
|
// Phase 5: Apply Fixes
|
||||||
|
const fixResult = applyFixes(strategy, results.failures, framework, modifiedFiles)
|
||||||
|
|
||||||
|
if (!fixResult.success) {
|
||||||
|
// Fix application failed, try next iteration with different strategy
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max iterations reached
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
results: bestResults,
|
||||||
|
iterations: MAX_ITERATIONS,
|
||||||
|
best_pass_rate: bestPassRate,
|
||||||
|
error: "Max iterations reached without achieving target pass rate"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Strategy Selection
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function selectStrategy(iteration, passRate, failures) {
|
||||||
|
const classified = classifyFailures(failures)
|
||||||
|
|
||||||
|
// Conservative: Early iterations or high pass rate
|
||||||
|
if (iteration <= 3 || passRate >= 80) {
|
||||||
|
return "conservative"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Surgical: Specific failure patterns
|
||||||
|
if (classified.critical.length > 0 && classified.critical.length < 5) {
|
||||||
|
return "surgical"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggressive: Low pass rate or many iterations
|
||||||
|
if (passRate < 50 || iteration > 7) {
|
||||||
|
return "aggressive"
|
||||||
|
}
|
||||||
|
|
||||||
|
return "conservative"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Fix Application
|
||||||
|
|
||||||
|
### Conservative Strategy
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function applyConservativeFixes(failures, framework, modifiedFiles) {
|
||||||
|
const classified = classifyFailures(failures)
|
||||||
|
|
||||||
|
// Fix only the first critical failure
|
||||||
|
if (classified.critical.length > 0) {
|
||||||
|
const failure = classified.critical[0]
|
||||||
|
return fixSingleFailure(failure, framework, modifiedFiles)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no critical, fix first high priority
|
||||||
|
if (classified.high.length > 0) {
|
||||||
|
const failure = classified.high[0]
|
||||||
|
return fixSingleFailure(failure, framework, modifiedFiles)
|
||||||
|
}
|
||||||
|
|
||||||
|
return { success: false, error: "No fixable failures found" }
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Surgical Strategy
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function applySurgicalFixes(failures, framework, modifiedFiles) {
|
||||||
|
// Identify common pattern
|
||||||
|
const pattern = identifyCommonPattern(failures)
|
||||||
|
|
||||||
|
if (!pattern) {
|
||||||
|
return { success: false, error: "No common pattern identified" }
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply pattern-based fix across all occurrences
|
||||||
|
const fixes = []
|
||||||
|
|
||||||
|
for (const failure of failures) {
|
||||||
|
if (matchesPattern(failure, pattern)) {
|
||||||
|
const fix = generatePatternFix(failure, pattern, framework)
|
||||||
|
fixes.push(fix)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply all fixes in batch
|
||||||
|
for (const fix of fixes) {
|
||||||
|
applyFix(fix, modifiedFiles)
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
fixes_applied: fixes.length,
|
||||||
|
pattern: pattern
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function identifyCommonPattern(failures) {
|
||||||
|
// Group failures by error type
|
||||||
|
const errorTypes = {}
|
||||||
|
|
||||||
|
for (const failure of failures) {
|
||||||
|
const errorType = extractErrorType(failure.error)
|
||||||
|
if (!errorTypes[errorType]) {
|
||||||
|
errorTypes[errorType] = []
|
||||||
|
}
|
||||||
|
errorTypes[errorType].push(failure)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find most common error type
|
||||||
|
let maxCount = 0
|
||||||
|
let commonPattern = null
|
||||||
|
|
||||||
|
for (const [errorType, instances] of Object.entries(errorTypes)) {
|
||||||
|
if (instances.length > maxCount) {
|
||||||
|
maxCount = instances.length
|
||||||
|
commonPattern = {
|
||||||
|
type: errorType,
|
||||||
|
instances: instances,
|
||||||
|
count: instances.length
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return maxCount >= 3 ? commonPattern : null
|
||||||
|
}
|
||||||
|
|
||||||
|
function extractErrorType(error) {
|
||||||
|
const errorLower = error.toLowerCase()
|
||||||
|
|
||||||
|
if (errorLower.includes("cannot find module")) return "missing_import"
|
||||||
|
if (errorLower.includes("is not defined")) return "undefined_variable"
|
||||||
|
if (errorLower.includes("expected") && errorLower.includes("received")) return "assertion_mismatch"
|
||||||
|
if (errorLower.includes("timeout")) return "timeout"
|
||||||
|
if (errorLower.includes("syntaxerror")) return "syntax_error"
|
||||||
|
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Aggressive Strategy
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function applyAggressiveFixes(failures, framework, modifiedFiles) {
|
||||||
|
const classified = classifyFailures(failures)
|
||||||
|
const fixes = []
|
||||||
|
|
||||||
|
// Fix all critical failures
|
||||||
|
for (const failure of classified.critical) {
|
||||||
|
const fix = generateFix(failure, framework, modifiedFiles)
|
||||||
|
if (fix) {
|
||||||
|
fixes.push(fix)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fix all high priority failures
|
||||||
|
for (const failure of classified.high) {
|
||||||
|
const fix = generateFix(failure, framework, modifiedFiles)
|
||||||
|
if (fix) {
|
||||||
|
fixes.push(fix)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply all fixes
|
||||||
|
for (const fix of fixes) {
|
||||||
|
applyFix(fix, modifiedFiles)
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: fixes.length > 0,
|
||||||
|
fixes_applied: fixes.length
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Fix Generation
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function generateFix(failure, framework, modifiedFiles) {
|
||||||
|
const errorType = extractErrorType(failure.error)
|
||||||
|
|
||||||
|
switch (errorType) {
|
||||||
|
case "missing_import":
|
||||||
|
return generateImportFix(failure, modifiedFiles)
|
||||||
|
|
||||||
|
case "undefined_variable":
|
||||||
|
return generateVariableFix(failure, modifiedFiles)
|
||||||
|
|
||||||
|
case "assertion_mismatch":
|
||||||
|
return generateAssertionFix(failure, framework)
|
||||||
|
|
||||||
|
case "timeout":
|
||||||
|
return generateTimeoutFix(failure, framework)
|
||||||
|
|
||||||
|
case "syntax_error":
|
||||||
|
return generateSyntaxFix(failure, modifiedFiles)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function generateImportFix(failure, modifiedFiles) {
|
||||||
|
// Extract module name from error
|
||||||
|
const moduleMatch = failure.error.match(/Cannot find module ['"](.+?)['"]/)
|
||||||
|
if (!moduleMatch) return null
|
||||||
|
|
||||||
|
const moduleName = moduleMatch[1]
|
||||||
|
|
||||||
|
// Find test file
|
||||||
|
const testFile = extractTestFile(failure.test)
|
||||||
|
if (!testFile) return null
|
||||||
|
|
||||||
|
// Check if module exists in modified files
|
||||||
|
const sourceFile = modifiedFiles.find(f =>
|
||||||
|
f.includes(moduleName) || f.endsWith(`${moduleName}.ts`) || f.endsWith(`${moduleName}.js`)
|
||||||
|
)
|
||||||
|
|
||||||
|
if (!sourceFile) return null
|
||||||
|
|
||||||
|
// Generate import statement
|
||||||
|
const relativePath = calculateRelativePath(testFile, sourceFile)
|
||||||
|
const importStatement = `import { } from '${relativePath}'`
|
||||||
|
|
||||||
|
return {
|
||||||
|
file: testFile,
|
||||||
|
type: "add_import",
|
||||||
|
content: importStatement,
|
||||||
|
line: 1 // Add at top of file
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function generateAssertionFix(failure, framework) {
|
||||||
|
// Extract expected vs received values
|
||||||
|
const expectedMatch = failure.error.match(/Expected:\s*(.+?)(?:\n|$)/)
|
||||||
|
const receivedMatch = failure.error.match(/Received:\s*(.+?)(?:\n|$)/)
|
||||||
|
|
||||||
|
if (!expectedMatch || !receivedMatch) return null
|
||||||
|
|
||||||
|
const expected = expectedMatch[1].trim()
|
||||||
|
const received = receivedMatch[1].trim()
|
||||||
|
|
||||||
|
// Find test file and line
|
||||||
|
const testFile = extractTestFile(failure.test)
|
||||||
|
const testLine = extractTestLine(failure.error)
|
||||||
|
|
||||||
|
if (!testFile || !testLine) return null
|
||||||
|
|
||||||
|
return {
|
||||||
|
file: testFile,
|
||||||
|
type: "update_assertion",
|
||||||
|
line: testLine,
|
||||||
|
old_value: expected,
|
||||||
|
new_value: received,
|
||||||
|
note: "Auto-updated assertion based on actual behavior"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test Result Parsing
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function parseTestResults(output, framework) {
|
||||||
|
const results = {
|
||||||
|
total: 0,
|
||||||
|
passed: 0,
|
||||||
|
failed: 0,
|
||||||
|
skipped: 0,
|
||||||
|
failures: []
|
||||||
|
}
|
||||||
|
|
||||||
|
if (framework === "jest" || framework === "vitest") {
|
||||||
|
// Parse summary line
|
||||||
|
const summaryMatch = output.match(/Tests:\s+(?:(\d+)\s+failed,\s+)?(?:(\d+)\s+passed,\s+)?(\d+)\s+total/)
|
||||||
|
if (summaryMatch) {
|
||||||
|
results.failed = summaryMatch[1] ? parseInt(summaryMatch[1]) : 0
|
||||||
|
results.passed = summaryMatch[2] ? parseInt(summaryMatch[2]) : 0
|
||||||
|
results.total = parseInt(summaryMatch[3])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Alternative format
|
||||||
|
if (results.total === 0) {
|
||||||
|
const altMatch = output.match(/(\d+)\s+passed.*?(\d+)\s+total/)
|
||||||
|
if (altMatch) {
|
||||||
|
results.passed = parseInt(altMatch[1])
|
||||||
|
results.total = parseInt(altMatch[2])
|
||||||
|
results.failed = results.total - results.passed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract failure details
|
||||||
|
const failureRegex = /●\s+(.*?)\n\n([\s\S]*?)(?=\n\n●|\n\nTest Suites:|\n\n$)/g
|
||||||
|
let match
|
||||||
|
while ((match = failureRegex.exec(output)) !== null) {
|
||||||
|
results.failures.push({
|
||||||
|
test: match[1].trim(),
|
||||||
|
error: match[2].trim()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if (framework === "pytest") {
|
||||||
|
// Parse pytest summary
|
||||||
|
const summaryMatch = output.match(/=+\s+(?:(\d+)\s+failed,?\s+)?(?:(\d+)\s+passed)?/)
|
||||||
|
if (summaryMatch) {
|
||||||
|
results.failed = summaryMatch[1] ? parseInt(summaryMatch[1]) : 0
|
||||||
|
results.passed = summaryMatch[2] ? parseInt(summaryMatch[2]) : 0
|
||||||
|
results.total = results.failed + results.passed
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract failure details
|
||||||
|
const failureRegex = /FAILED\s+(.*?)\s+-\s+([\s\S]*?)(?=\n_+|FAILED|=+\s+\d+)/g
|
||||||
|
let match
|
||||||
|
while ((match = failureRegex.exec(output)) !== null) {
|
||||||
|
results.failures.push({
|
||||||
|
test: match[1].trim(),
|
||||||
|
error: match[2].trim()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test Command Building
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function buildTestCommand(framework, affectedTests, isFirstRun) {
|
||||||
|
const testFiles = affectedTests.length > 0 ? affectedTests.join(" ") : ""
|
||||||
|
|
||||||
|
switch (framework) {
|
||||||
|
case "vitest":
|
||||||
|
return testFiles
|
||||||
|
? `vitest run ${testFiles} --reporter=verbose`
|
||||||
|
: `vitest run --reporter=verbose`
|
||||||
|
|
||||||
|
case "jest":
|
||||||
|
return testFiles
|
||||||
|
? `jest ${testFiles} --no-coverage --verbose`
|
||||||
|
: `jest --no-coverage --verbose`
|
||||||
|
|
||||||
|
case "mocha":
|
||||||
|
return testFiles
|
||||||
|
? `mocha ${testFiles} --reporter spec`
|
||||||
|
: `mocha --reporter spec`
|
||||||
|
|
||||||
|
case "pytest":
|
||||||
|
return testFiles
|
||||||
|
? `pytest ${testFiles} -v --tb=short`
|
||||||
|
: `pytest -v --tb=short`
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw new Error(`Unsupported test framework: ${framework}`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Utility Functions
|
||||||
|
|
||||||
|
### Extract Test File
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function extractTestFile(testName) {
|
||||||
|
// Extract file path from test name
|
||||||
|
// Format: "path/to/file.test.ts > describe block > test name"
|
||||||
|
const fileMatch = testName.match(/^(.*?\.(?:test|spec)\.[jt]sx?)/)
|
||||||
|
return fileMatch ? fileMatch[1] : null
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Extract Test Line
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function extractTestLine(error) {
|
||||||
|
// Extract line number from error stack
|
||||||
|
const lineMatch = error.match(/:(\d+):\d+/)
|
||||||
|
return lineMatch ? parseInt(lineMatch[1]) : null
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Calculate Relative Path
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function calculateRelativePath(fromFile, toFile) {
|
||||||
|
const fromParts = fromFile.split("/")
|
||||||
|
const toParts = toFile.split("/")
|
||||||
|
|
||||||
|
// Remove filename
|
||||||
|
fromParts.pop()
|
||||||
|
|
||||||
|
// Find common base
|
||||||
|
let commonLength = 0
|
||||||
|
while (commonLength < fromParts.length &&
|
||||||
|
commonLength < toParts.length &&
|
||||||
|
fromParts[commonLength] === toParts[commonLength]) {
|
||||||
|
commonLength++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build relative path
|
||||||
|
const upLevels = fromParts.length - commonLength
|
||||||
|
const downPath = toParts.slice(commonLength)
|
||||||
|
|
||||||
|
const relativeParts = []
|
||||||
|
for (let i = 0; i < upLevels; i++) {
|
||||||
|
relativeParts.push("..")
|
||||||
|
}
|
||||||
|
relativeParts.push(...downPath)
|
||||||
|
|
||||||
|
let path = relativeParts.join("/")
|
||||||
|
|
||||||
|
// Remove file extension
|
||||||
|
path = path.replace(/\.[jt]sx?$/, "")
|
||||||
|
|
||||||
|
// Ensure starts with ./
|
||||||
|
if (!path.startsWith(".")) {
|
||||||
|
path = "./" + path
|
||||||
|
}
|
||||||
|
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
```
|
||||||
385
.claude/skills/team-lifecycle-v2/roles/tester/role.md
Normal file
385
.claude/skills/team-lifecycle-v2/roles/tester/role.md
Normal file
@@ -0,0 +1,385 @@
|
|||||||
|
# Tester Role
|
||||||
|
|
||||||
|
## 1. Role Identity
|
||||||
|
|
||||||
|
- **Name**: tester
|
||||||
|
- **Task Prefix**: TEST-*
|
||||||
|
- **Output Tag**: `[tester]`
|
||||||
|
- **Responsibility**: Detect Framework → Run Tests → Fix Cycle → Report Results
|
||||||
|
|
||||||
|
## 2. Role Boundaries
|
||||||
|
|
||||||
|
### MUST
|
||||||
|
- Only process TEST-* tasks
|
||||||
|
- Communicate only with coordinator
|
||||||
|
- Use detected test framework
|
||||||
|
- Run affected tests before full suite
|
||||||
|
- Tag all outputs with `[tester]`
|
||||||
|
|
||||||
|
### MUST NOT
|
||||||
|
- Create tasks
|
||||||
|
- Contact other workers directly
|
||||||
|
- Modify production code beyond test fixes
|
||||||
|
- Skip framework detection
|
||||||
|
- Run full suite without affected tests first
|
||||||
|
|
||||||
|
## 3. Message Types
|
||||||
|
|
||||||
|
| Type | Direction | Purpose | Format |
|
||||||
|
|------|-----------|---------|--------|
|
||||||
|
| `task_request` | FROM coordinator | Receive TEST-* task assignment | `{ type: "task_request", task_id, description, impl_task_id }` |
|
||||||
|
| `task_complete` | TO coordinator | Report test success | `{ type: "task_complete", task_id, status: "success", pass_rate, tests_run, iterations }` |
|
||||||
|
| `task_failed` | TO coordinator | Report test failure | `{ type: "task_failed", task_id, error, failures, pass_rate }` |
|
||||||
|
| `progress_update` | TO coordinator | Report fix cycle progress | `{ type: "progress_update", task_id, iteration, pass_rate, strategy }` |
|
||||||
|
|
||||||
|
## 4. Message Bus
|
||||||
|
|
||||||
|
**Primary**: Use `team_msg` for all coordinator communication with `[tester]` tag:
|
||||||
|
```javascript
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "task_complete",
|
||||||
|
task_id: "TEST-001",
|
||||||
|
status: "success",
|
||||||
|
pass_rate: 98.5,
|
||||||
|
tests_run: 45,
|
||||||
|
iterations: 3,
|
||||||
|
framework: "vitest"
|
||||||
|
}, "[tester]")
|
||||||
|
```
|
||||||
|
|
||||||
|
**CLI Fallback**: When message bus unavailable, write to `.workflow/.team/messages/tester-{timestamp}.json`
|
||||||
|
|
||||||
|
## 5. Toolbox
|
||||||
|
|
||||||
|
### Available Commands
|
||||||
|
- `commands/validate.md` - Test-fix cycle with strategy engine
|
||||||
|
|
||||||
|
### CLI Capabilities
|
||||||
|
- None (uses project's test framework directly via Bash)
|
||||||
|
|
||||||
|
## 6. Execution (5-Phase)
|
||||||
|
|
||||||
|
### Phase 1: Task Discovery
|
||||||
|
|
||||||
|
**Task Loading**:
|
||||||
|
```javascript
|
||||||
|
const tasks = Glob(".workflow/.team/tasks/TEST-*.json")
|
||||||
|
.filter(task => task.status === "pending" && task.assigned_to === "tester")
|
||||||
|
```
|
||||||
|
|
||||||
|
**Implementation Task Linking**:
|
||||||
|
```javascript
|
||||||
|
const implTaskId = task.metadata?.impl_task_id
|
||||||
|
const implTask = implTaskId ? Read(`.workflow/.team/tasks/${implTaskId}.json`) : null
|
||||||
|
const modifiedFiles = implTask?.metadata?.files_modified || []
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 2: Test Framework Detection
|
||||||
|
|
||||||
|
**Framework Detection**:
|
||||||
|
```javascript
|
||||||
|
function detectTestFramework() {
|
||||||
|
// Check package.json for test frameworks
|
||||||
|
const packageJson = Read("package.json")
|
||||||
|
const pkg = JSON.parse(packageJson)
|
||||||
|
|
||||||
|
// Priority 1: Check dependencies
|
||||||
|
if (pkg.devDependencies?.vitest || pkg.dependencies?.vitest) {
|
||||||
|
return "vitest"
|
||||||
|
}
|
||||||
|
if (pkg.devDependencies?.jest || pkg.dependencies?.jest) {
|
||||||
|
return "jest"
|
||||||
|
}
|
||||||
|
if (pkg.devDependencies?.mocha || pkg.dependencies?.mocha) {
|
||||||
|
return "mocha"
|
||||||
|
}
|
||||||
|
if (pkg.devDependencies?.pytest || pkg.dependencies?.pytest) {
|
||||||
|
return "pytest"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 2: Check test scripts
|
||||||
|
const testScript = pkg.scripts?.test || ""
|
||||||
|
if (testScript.includes("vitest")) return "vitest"
|
||||||
|
if (testScript.includes("jest")) return "jest"
|
||||||
|
if (testScript.includes("mocha")) return "mocha"
|
||||||
|
if (testScript.includes("pytest")) return "pytest"
|
||||||
|
|
||||||
|
// Priority 3: Check config files
|
||||||
|
const configFiles = Glob("{vitest,jest,mocha}.config.{js,ts,json}")
|
||||||
|
if (configFiles.some(f => f.includes("vitest"))) return "vitest"
|
||||||
|
if (configFiles.some(f => f.includes("jest"))) return "jest"
|
||||||
|
if (configFiles.some(f => f.includes("mocha"))) return "mocha"
|
||||||
|
|
||||||
|
if (Bash("test -f pytest.ini").exitCode === 0) return "pytest"
|
||||||
|
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Affected Test Discovery**:
|
||||||
|
```javascript
|
||||||
|
function findAffectedTests(modifiedFiles) {
|
||||||
|
const testFiles = []
|
||||||
|
|
||||||
|
for (const file of modifiedFiles) {
|
||||||
|
const baseName = file.replace(/\.(ts|js|tsx|jsx|py)$/, "")
|
||||||
|
const dir = file.substring(0, file.lastIndexOf("/"))
|
||||||
|
|
||||||
|
const testVariants = [
|
||||||
|
// Same directory variants
|
||||||
|
`${baseName}.test.ts`,
|
||||||
|
`${baseName}.test.js`,
|
||||||
|
`${baseName}.spec.ts`,
|
||||||
|
`${baseName}.spec.js`,
|
||||||
|
`${baseName}_test.py`,
|
||||||
|
`test_${baseName.split("/").pop()}.py`,
|
||||||
|
|
||||||
|
// Test directory variants
|
||||||
|
`${file.replace(/^src\//, "tests/")}`,
|
||||||
|
`${file.replace(/^src\//, "__tests__/")}`,
|
||||||
|
`${file.replace(/^src\//, "test/")}`,
|
||||||
|
`${dir}/__tests__/${file.split("/").pop().replace(/\.(ts|js|tsx|jsx)$/, ".test.ts")}`,
|
||||||
|
|
||||||
|
// Python variants
|
||||||
|
`${file.replace(/^src\//, "tests/").replace(/\.py$/, "_test.py")}`,
|
||||||
|
`${file.replace(/^src\//, "tests/test_")}`
|
||||||
|
]
|
||||||
|
|
||||||
|
for (const variant of testVariants) {
|
||||||
|
if (Bash(`test -f ${variant}`).exitCode === 0) {
|
||||||
|
testFiles.push(variant)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return [...new Set(testFiles)] // Deduplicate
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 3: Test Execution & Fix Cycle
|
||||||
|
|
||||||
|
**Delegate to Command**:
|
||||||
|
```javascript
|
||||||
|
const validateCommand = Read("commands/validate.md")
|
||||||
|
// Command handles:
|
||||||
|
// - MAX_ITERATIONS=10, PASS_RATE_TARGET=95
|
||||||
|
// - Main iteration loop with strategy selection
|
||||||
|
// - Quality gate check (affected tests → full suite)
|
||||||
|
// - applyFixes by strategy (conservative/aggressive/surgical)
|
||||||
|
// - Progress updates for long cycles (iteration > 5)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 4: Result Analysis
|
||||||
|
|
||||||
|
**Test Result Parsing**:
|
||||||
|
```javascript
|
||||||
|
function parseTestResults(output, framework) {
|
||||||
|
const results = {
|
||||||
|
total: 0,
|
||||||
|
passed: 0,
|
||||||
|
failed: 0,
|
||||||
|
skipped: 0,
|
||||||
|
failures: []
|
||||||
|
}
|
||||||
|
|
||||||
|
if (framework === "jest" || framework === "vitest") {
|
||||||
|
// Parse Jest/Vitest output
|
||||||
|
const totalMatch = output.match(/Tests:\s+(\d+)\s+total/)
|
||||||
|
const passedMatch = output.match(/(\d+)\s+passed/)
|
||||||
|
const failedMatch = output.match(/(\d+)\s+failed/)
|
||||||
|
const skippedMatch = output.match(/(\d+)\s+skipped/)
|
||||||
|
|
||||||
|
results.total = totalMatch ? parseInt(totalMatch[1]) : 0
|
||||||
|
results.passed = passedMatch ? parseInt(passedMatch[1]) : 0
|
||||||
|
results.failed = failedMatch ? parseInt(failedMatch[1]) : 0
|
||||||
|
results.skipped = skippedMatch ? parseInt(skippedMatch[1]) : 0
|
||||||
|
|
||||||
|
// Extract failure details
|
||||||
|
const failureRegex = /●\s+(.*?)\n\n\s+(.*?)(?=\n\n●|\n\nTest Suites:)/gs
|
||||||
|
let match
|
||||||
|
while ((match = failureRegex.exec(output)) !== null) {
|
||||||
|
results.failures.push({
|
||||||
|
test: match[1].trim(),
|
||||||
|
error: match[2].trim()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
} else if (framework === "pytest") {
|
||||||
|
// Parse pytest output
|
||||||
|
const summaryMatch = output.match(/=+\s+(\d+)\s+failed,\s+(\d+)\s+passed/)
|
||||||
|
if (summaryMatch) {
|
||||||
|
results.failed = parseInt(summaryMatch[1])
|
||||||
|
results.passed = parseInt(summaryMatch[2])
|
||||||
|
results.total = results.failed + results.passed
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract failure details
|
||||||
|
const failureRegex = /FAILED\s+(.*?)\s+-\s+(.*?)(?=\n_+|\nFAILED|$)/gs
|
||||||
|
let match
|
||||||
|
while ((match = failureRegex.exec(output)) !== null) {
|
||||||
|
results.failures.push({
|
||||||
|
test: match[1].trim(),
|
||||||
|
error: match[2].trim()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Failure Classification**:
|
||||||
|
```javascript
|
||||||
|
function classifyFailures(failures) {
|
||||||
|
const classified = {
|
||||||
|
critical: [], // Syntax errors, missing imports
|
||||||
|
high: [], // Assertion failures, logic errors
|
||||||
|
medium: [], // Timeout, flaky tests
|
||||||
|
low: [] // Warnings, deprecations
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const failure of failures) {
|
||||||
|
const error = failure.error.toLowerCase()
|
||||||
|
|
||||||
|
if (error.includes("syntaxerror") ||
|
||||||
|
error.includes("cannot find module") ||
|
||||||
|
error.includes("is not defined")) {
|
||||||
|
classified.critical.push(failure)
|
||||||
|
} else if (error.includes("expected") ||
|
||||||
|
error.includes("assertion") ||
|
||||||
|
error.includes("toBe") ||
|
||||||
|
error.includes("toEqual")) {
|
||||||
|
classified.high.push(failure)
|
||||||
|
} else if (error.includes("timeout") ||
|
||||||
|
error.includes("async")) {
|
||||||
|
classified.medium.push(failure)
|
||||||
|
} else {
|
||||||
|
classified.low.push(failure)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return classified
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 5: Report to Coordinator
|
||||||
|
|
||||||
|
**Success Report**:
|
||||||
|
```javascript
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "task_complete",
|
||||||
|
task_id: task.task_id,
|
||||||
|
status: "success",
|
||||||
|
pass_rate: (results.passed / results.total * 100).toFixed(1),
|
||||||
|
tests_run: results.total,
|
||||||
|
tests_passed: results.passed,
|
||||||
|
tests_failed: results.failed,
|
||||||
|
iterations: iterationCount,
|
||||||
|
framework: framework,
|
||||||
|
affected_tests: affectedTests.length,
|
||||||
|
full_suite_run: fullSuiteRun,
|
||||||
|
timestamp: new Date().toISOString()
|
||||||
|
}, "[tester]")
|
||||||
|
```
|
||||||
|
|
||||||
|
**Failure Report**:
|
||||||
|
```javascript
|
||||||
|
const classified = classifyFailures(results.failures)
|
||||||
|
|
||||||
|
team_msg({
|
||||||
|
to: "coordinator",
|
||||||
|
type: "task_failed",
|
||||||
|
task_id: task.task_id,
|
||||||
|
error: "Test failures exceeded threshold",
|
||||||
|
pass_rate: (results.passed / results.total * 100).toFixed(1),
|
||||||
|
tests_run: results.total,
|
||||||
|
failures: {
|
||||||
|
critical: classified.critical.length,
|
||||||
|
high: classified.high.length,
|
||||||
|
medium: classified.medium.length,
|
||||||
|
low: classified.low.length
|
||||||
|
},
|
||||||
|
failure_details: classified,
|
||||||
|
iterations: iterationCount,
|
||||||
|
framework: framework,
|
||||||
|
timestamp: new Date().toISOString()
|
||||||
|
}, "[tester]")
|
||||||
|
```
|
||||||
|
|
||||||
|
## 7. Strategy Engine
|
||||||
|
|
||||||
|
### Strategy Selection
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function selectStrategy(iteration, passRate, failures) {
|
||||||
|
const classified = classifyFailures(failures)
|
||||||
|
|
||||||
|
// Conservative: Early iterations or high pass rate
|
||||||
|
if (iteration <= 3 || passRate >= 80) {
|
||||||
|
return "conservative"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Surgical: Specific failure patterns
|
||||||
|
if (classified.critical.length > 0 && classified.critical.length < 5) {
|
||||||
|
return "surgical"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggressive: Low pass rate or many iterations
|
||||||
|
if (passRate < 50 || iteration > 7) {
|
||||||
|
return "aggressive"
|
||||||
|
}
|
||||||
|
|
||||||
|
return "conservative"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Fix Application
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function applyFixes(strategy, failures, framework) {
|
||||||
|
if (strategy === "conservative") {
|
||||||
|
// Fix only critical failures one at a time
|
||||||
|
const critical = classifyFailures(failures).critical
|
||||||
|
if (critical.length > 0) {
|
||||||
|
return fixFailure(critical[0], framework)
|
||||||
|
}
|
||||||
|
} else if (strategy === "surgical") {
|
||||||
|
// Fix specific pattern across all occurrences
|
||||||
|
const pattern = identifyCommonPattern(failures)
|
||||||
|
return fixPattern(pattern, framework)
|
||||||
|
} else if (strategy === "aggressive") {
|
||||||
|
// Fix all failures in batch
|
||||||
|
return fixAllFailures(failures, framework)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 8. Error Handling
|
||||||
|
|
||||||
|
| Error Type | Recovery Strategy | Escalation |
|
||||||
|
|------------|-------------------|------------|
|
||||||
|
| Framework not detected | Prompt user for framework | Immediate escalation |
|
||||||
|
| No tests found | Report to coordinator | Manual intervention |
|
||||||
|
| Test command fails | Retry with verbose output | Report after 2 failures |
|
||||||
|
| Infinite fix loop | Abort after MAX_ITERATIONS | Report iteration history |
|
||||||
|
| Pass rate below target | Report best attempt | Include failure classification |
|
||||||
|
|
||||||
|
## 9. Configuration
|
||||||
|
|
||||||
|
| Parameter | Default | Description |
|
||||||
|
|-----------|---------|-------------|
|
||||||
|
| MAX_ITERATIONS | 10 | Maximum fix-test cycles |
|
||||||
|
| PASS_RATE_TARGET | 95 | Target pass rate (%) |
|
||||||
|
| AFFECTED_TESTS_FIRST | true | Run affected tests before full suite |
|
||||||
|
| PARALLEL_TESTS | true | Enable parallel test execution |
|
||||||
|
| TIMEOUT_PER_TEST | 30000 | Timeout per test (ms) |
|
||||||
|
|
||||||
|
## 10. Test Framework Commands
|
||||||
|
|
||||||
|
| Framework | Affected Tests Command | Full Suite Command |
|
||||||
|
|-----------|------------------------|-------------------|
|
||||||
|
| vitest | `vitest run ${files.join(" ")}` | `vitest run` |
|
||||||
|
| jest | `jest ${files.join(" ")} --no-coverage` | `jest --no-coverage` |
|
||||||
|
| mocha | `mocha ${files.join(" ")}` | `mocha` |
|
||||||
|
| pytest | `pytest ${files.join(" ")} -v` | `pytest -v` |
|
||||||
@@ -0,0 +1,698 @@
|
|||||||
|
# Command: Generate Document
|
||||||
|
|
||||||
|
Multi-CLI document generation for 4 document types: Product Brief, Requirements/PRD, Architecture, Epics & Stories.
|
||||||
|
|
||||||
|
## Pre-Steps (All Document Types)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// 1. Load document standards
|
||||||
|
const docStandards = Read('../../specs/document-standards.md')
|
||||||
|
|
||||||
|
// 2. Load appropriate template
|
||||||
|
const templateMap = {
|
||||||
|
'product-brief': '../../templates/product-brief.md',
|
||||||
|
'requirements': '../../templates/requirements-prd.md',
|
||||||
|
'architecture': '../../templates/architecture-doc.md',
|
||||||
|
'epics': '../../templates/epics-template.md'
|
||||||
|
}
|
||||||
|
const template = Read(templateMap[docType])
|
||||||
|
|
||||||
|
// 3. Build shared context
|
||||||
|
const seedAnalysis = specConfig?.seed_analysis ||
|
||||||
|
(priorDocs.discoveryContext ? JSON.parse(priorDocs.discoveryContext).seed_analysis : {})
|
||||||
|
|
||||||
|
const sharedContext = `
|
||||||
|
SEED: ${specConfig?.topic || ''}
|
||||||
|
PROBLEM: ${seedAnalysis.problem_statement || ''}
|
||||||
|
TARGET USERS: ${(seedAnalysis.target_users || []).join(', ')}
|
||||||
|
DOMAIN: ${seedAnalysis.domain || ''}
|
||||||
|
CONSTRAINTS: ${(seedAnalysis.constraints || []).join(', ')}
|
||||||
|
FOCUS AREAS: ${(specConfig?.focus_areas || []).join(', ')}
|
||||||
|
${priorDocs.discoveryContext ? `
|
||||||
|
CODEBASE CONTEXT:
|
||||||
|
- Existing patterns: ${JSON.parse(priorDocs.discoveryContext).existing_patterns?.slice(0,5).join(', ') || 'none'}
|
||||||
|
- Tech stack: ${JSON.stringify(JSON.parse(priorDocs.discoveryContext).tech_stack || {})}
|
||||||
|
` : ''}`
|
||||||
|
|
||||||
|
// 4. Route to specific document type
|
||||||
|
```
|
||||||
|
|
||||||
|
## DRAFT-001: Product Brief
|
||||||
|
|
||||||
|
3-way parallel CLI analysis (product/technical/user perspectives), then synthesize.
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (docType === 'product-brief') {
|
||||||
|
// === Parallel CLI Analysis ===
|
||||||
|
|
||||||
|
// Product Perspective (Gemini)
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Product analysis for specification - identify market fit, user value, and success criteria.
|
||||||
|
Success: Clear vision, measurable goals, competitive positioning.
|
||||||
|
|
||||||
|
${sharedContext}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- Define product vision (1-3 sentences, aspirational)
|
||||||
|
- Analyze market/competitive landscape
|
||||||
|
- Define 3-5 measurable success metrics
|
||||||
|
- Identify scope boundaries (in-scope vs out-of-scope)
|
||||||
|
- Assess user value proposition
|
||||||
|
- List assumptions that need validation
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: Structured product analysis with: vision, goals with metrics, scope, competitive positioning, assumptions
|
||||||
|
CONSTRAINTS: Focus on 'what' and 'why', not 'how'
|
||||||
|
" --tool gemini --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
})
|
||||||
|
|
||||||
|
// Technical Perspective (Codex)
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Technical feasibility analysis for specification - assess implementation viability and constraints.
|
||||||
|
Success: Clear technical constraints, integration complexity, technology recommendations.
|
||||||
|
|
||||||
|
${sharedContext}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- Assess technical feasibility of the core concept
|
||||||
|
- Identify technical constraints and blockers
|
||||||
|
- Evaluate integration complexity with existing systems
|
||||||
|
- Recommend technology approach (high-level)
|
||||||
|
- Identify technical risks and dependencies
|
||||||
|
- Estimate complexity: simple/moderate/complex
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: Technical analysis with: feasibility assessment, constraints, integration complexity, tech recommendations, risks
|
||||||
|
CONSTRAINTS: Focus on feasibility and constraints, not detailed architecture
|
||||||
|
" --tool codex --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
})
|
||||||
|
|
||||||
|
// User Perspective (Claude)
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: User experience analysis for specification - understand user journeys, pain points, and UX considerations.
|
||||||
|
Success: Clear user personas, journey maps, UX requirements.
|
||||||
|
|
||||||
|
${sharedContext}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- Elaborate user personas with goals and frustrations
|
||||||
|
- Map primary user journey (happy path)
|
||||||
|
- Identify key pain points in current experience
|
||||||
|
- Define UX success criteria
|
||||||
|
- List accessibility and usability considerations
|
||||||
|
- Suggest interaction patterns
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: User analysis with: personas, journey map, pain points, UX criteria, interaction recommendations
|
||||||
|
CONSTRAINTS: Focus on user needs and experience, not implementation
|
||||||
|
" --tool claude --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
})
|
||||||
|
|
||||||
|
// STOP: Wait for all 3 CLI results
|
||||||
|
|
||||||
|
// === Synthesize Three Perspectives ===
|
||||||
|
const synthesis = {
|
||||||
|
convergent_themes: [], // Themes consistent across all three perspectives
|
||||||
|
conflicts: [], // Conflicting viewpoints
|
||||||
|
product_insights: [], // Unique product perspective insights
|
||||||
|
technical_insights: [], // Unique technical perspective insights
|
||||||
|
user_insights: [] // Unique user perspective insights
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse CLI outputs and identify:
|
||||||
|
// - Common themes mentioned by 2+ perspectives
|
||||||
|
// - Conflicts (e.g., product wants feature X, technical says infeasible)
|
||||||
|
// - Unique insights from each perspective
|
||||||
|
|
||||||
|
// === Integrate Discussion Feedback ===
|
||||||
|
if (discussionFeedback) {
|
||||||
|
// Extract consensus and adjustments from discuss-001-scope.md
|
||||||
|
// Merge discussion conclusions into synthesis
|
||||||
|
}
|
||||||
|
|
||||||
|
// === Generate Document from Template ===
|
||||||
|
const frontmatter = `---
|
||||||
|
session_id: ${specConfig?.session_id || 'unknown'}
|
||||||
|
phase: 2
|
||||||
|
document_type: product-brief
|
||||||
|
status: draft
|
||||||
|
generated_at: ${new Date().toISOString()}
|
||||||
|
version: 1
|
||||||
|
dependencies:
|
||||||
|
- spec-config.json
|
||||||
|
- discovery-context.json
|
||||||
|
---`
|
||||||
|
|
||||||
|
// Fill template sections:
|
||||||
|
// - Vision (from product perspective + synthesis)
|
||||||
|
// - Problem Statement (from seed analysis + user perspective)
|
||||||
|
// - Target Users (from user perspective + personas)
|
||||||
|
// - Goals (from product perspective + metrics)
|
||||||
|
// - Scope (from product perspective + technical constraints)
|
||||||
|
// - Success Criteria (from all three perspectives)
|
||||||
|
// - Assumptions (from product + technical perspectives)
|
||||||
|
|
||||||
|
const filledContent = fillTemplate(template, {
|
||||||
|
vision: productPerspective.vision,
|
||||||
|
problem: seedAnalysis.problem_statement,
|
||||||
|
users: userPerspective.personas,
|
||||||
|
goals: productPerspective.goals,
|
||||||
|
scope: synthesis.scope_boundaries,
|
||||||
|
success_criteria: synthesis.convergent_themes,
|
||||||
|
assumptions: [...productPerspective.assumptions, ...technicalPerspective.assumptions]
|
||||||
|
})
|
||||||
|
|
||||||
|
Write(`${sessionFolder}/spec/product-brief.md`, `${frontmatter}\n\n${filledContent}`)
|
||||||
|
|
||||||
|
return {
|
||||||
|
outputPath: 'spec/product-brief.md',
|
||||||
|
documentSummary: `Product Brief generated with ${synthesis.convergent_themes.length} convergent themes, ${synthesis.conflicts.length} conflicts resolved`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## DRAFT-002: Requirements/PRD
|
||||||
|
|
||||||
|
Gemini CLI expansion to generate REQ-NNN and NFR-{type}-NNN files.
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (docType === 'requirements') {
|
||||||
|
// === Requirements Expansion CLI ===
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Generate detailed functional and non-functional requirements from product brief.
|
||||||
|
Success: Complete PRD with testable acceptance criteria for every requirement.
|
||||||
|
|
||||||
|
PRODUCT BRIEF CONTEXT:
|
||||||
|
${priorDocs.productBrief?.slice(0, 3000) || ''}
|
||||||
|
|
||||||
|
${sharedContext}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- For each goal in the product brief, generate 3-7 functional requirements
|
||||||
|
- Each requirement must have:
|
||||||
|
- Unique ID: REQ-NNN (zero-padded)
|
||||||
|
- Clear title
|
||||||
|
- Detailed description
|
||||||
|
- User story: As a [persona], I want [action] so that [benefit]
|
||||||
|
- 2-4 specific, testable acceptance criteria
|
||||||
|
- Generate non-functional requirements:
|
||||||
|
- Performance (response times, throughput)
|
||||||
|
- Security (authentication, authorization, data protection)
|
||||||
|
- Scalability (user load, data volume)
|
||||||
|
- Usability (accessibility, learnability)
|
||||||
|
- Assign MoSCoW priority: Must/Should/Could/Won't
|
||||||
|
- Output structure per requirement: ID, title, description, user_story, acceptance_criteria[], priority, traces
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: Structured requirements with: ID, title, description, user story, acceptance criteria, priority, traceability to goals
|
||||||
|
CONSTRAINTS: Every requirement must be specific enough to estimate and test. No vague requirements.
|
||||||
|
" --tool gemini --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
})
|
||||||
|
|
||||||
|
// Wait for CLI result
|
||||||
|
|
||||||
|
// === Integrate Discussion Feedback ===
|
||||||
|
if (discussionFeedback) {
|
||||||
|
// Extract requirement adjustments from discuss-002-brief.md
|
||||||
|
// Merge new/modified/deleted requirements
|
||||||
|
}
|
||||||
|
|
||||||
|
// === Generate requirements/ Directory ===
|
||||||
|
Bash(`mkdir -p "${sessionFolder}/spec/requirements"`)
|
||||||
|
|
||||||
|
const timestamp = new Date().toISOString()
|
||||||
|
|
||||||
|
// Parse CLI output → funcReqs[], nfReqs[]
|
||||||
|
const funcReqs = parseFunctionalRequirements(cliOutput)
|
||||||
|
const nfReqs = parseNonFunctionalRequirements(cliOutput)
|
||||||
|
|
||||||
|
// Write individual REQ-*.md files (one per functional requirement)
|
||||||
|
funcReqs.forEach(req => {
|
||||||
|
const reqFrontmatter = `---
|
||||||
|
id: REQ-${req.id}
|
||||||
|
title: "${req.title}"
|
||||||
|
priority: ${req.priority}
|
||||||
|
status: draft
|
||||||
|
traces:
|
||||||
|
- product-brief.md
|
||||||
|
---`
|
||||||
|
const reqContent = `${reqFrontmatter}
|
||||||
|
|
||||||
|
# REQ-${req.id}: ${req.title}
|
||||||
|
|
||||||
|
## Description
|
||||||
|
${req.description}
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
${req.user_story}
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
${req.acceptance_criteria.map((ac, i) => `${i+1}. ${ac}`).join('\n')}
|
||||||
|
`
|
||||||
|
Write(`${sessionFolder}/spec/requirements/REQ-${req.id}-${req.slug}.md`, reqContent)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Write individual NFR-*.md files
|
||||||
|
nfReqs.forEach(nfr => {
|
||||||
|
const nfrFrontmatter = `---
|
||||||
|
id: NFR-${nfr.type}-${nfr.id}
|
||||||
|
type: ${nfr.type}
|
||||||
|
title: "${nfr.title}"
|
||||||
|
status: draft
|
||||||
|
traces:
|
||||||
|
- product-brief.md
|
||||||
|
---`
|
||||||
|
const nfrContent = `${nfrFrontmatter}
|
||||||
|
|
||||||
|
# NFR-${nfr.type}-${nfr.id}: ${nfr.title}
|
||||||
|
|
||||||
|
## Requirement
|
||||||
|
${nfr.requirement}
|
||||||
|
|
||||||
|
## Metric & Target
|
||||||
|
${nfr.metric} — Target: ${nfr.target}
|
||||||
|
`
|
||||||
|
Write(`${sessionFolder}/spec/requirements/NFR-${nfr.type}-${nfr.id}-${nfr.slug}.md`, nfrContent)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Write _index.md (summary + links)
|
||||||
|
const indexFrontmatter = `---
|
||||||
|
session_id: ${specConfig?.session_id || 'unknown'}
|
||||||
|
phase: 3
|
||||||
|
document_type: requirements-index
|
||||||
|
status: draft
|
||||||
|
generated_at: ${timestamp}
|
||||||
|
version: 1
|
||||||
|
dependencies:
|
||||||
|
- product-brief.md
|
||||||
|
---`
|
||||||
|
const indexContent = `${indexFrontmatter}
|
||||||
|
|
||||||
|
# Requirements (PRD)
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
Total: ${funcReqs.length} functional + ${nfReqs.length} non-functional requirements
|
||||||
|
|
||||||
|
## Functional Requirements
|
||||||
|
| ID | Title | Priority | Status |
|
||||||
|
|----|-------|----------|--------|
|
||||||
|
${funcReqs.map(r => `| [REQ-${r.id}](REQ-${r.id}-${r.slug}.md) | ${r.title} | ${r.priority} | draft |`).join('\n')}
|
||||||
|
|
||||||
|
## Non-Functional Requirements
|
||||||
|
| ID | Type | Title |
|
||||||
|
|----|------|-------|
|
||||||
|
${nfReqs.map(n => `| [NFR-${n.type}-${n.id}](NFR-${n.type}-${n.id}-${n.slug}.md) | ${n.type} | ${n.title} |`).join('\n')}
|
||||||
|
|
||||||
|
## MoSCoW Summary
|
||||||
|
- **Must**: ${funcReqs.filter(r => r.priority === 'Must').length}
|
||||||
|
- **Should**: ${funcReqs.filter(r => r.priority === 'Should').length}
|
||||||
|
- **Could**: ${funcReqs.filter(r => r.priority === 'Could').length}
|
||||||
|
- **Won't**: ${funcReqs.filter(r => r.priority === "Won't").length}
|
||||||
|
`
|
||||||
|
Write(`${sessionFolder}/spec/requirements/_index.md`, indexContent)
|
||||||
|
|
||||||
|
return {
|
||||||
|
outputPath: 'spec/requirements/_index.md',
|
||||||
|
documentSummary: `Requirements generated: ${funcReqs.length} functional, ${nfReqs.length} non-functional`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## DRAFT-003: Architecture
|
||||||
|
|
||||||
|
Two-stage CLI: Gemini architecture design + Codex architecture review.
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (docType === 'architecture') {
|
||||||
|
// === Stage 1: Architecture Design (Gemini) ===
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Generate technical architecture for the specified requirements.
|
||||||
|
Success: Complete component architecture, tech stack, and ADRs with justified decisions.
|
||||||
|
|
||||||
|
PRODUCT BRIEF (summary):
|
||||||
|
${priorDocs.productBrief?.slice(0, 3000) || ''}
|
||||||
|
|
||||||
|
REQUIREMENTS:
|
||||||
|
${priorDocs.requirementsIndex?.slice(0, 5000) || ''}
|
||||||
|
|
||||||
|
${sharedContext}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- Define system architecture style (monolith, microservices, serverless, etc.) with justification
|
||||||
|
- Identify core components and their responsibilities
|
||||||
|
- Create component interaction diagram (Mermaid graph TD format)
|
||||||
|
- Specify technology stack: languages, frameworks, databases, infrastructure
|
||||||
|
- Generate 2-4 Architecture Decision Records (ADRs):
|
||||||
|
- Each ADR: context, decision, 2-3 alternatives with pros/cons, consequences
|
||||||
|
- Focus on: data storage, API design, authentication, key technical choices
|
||||||
|
- Define data model: key entities and relationships (Mermaid erDiagram format)
|
||||||
|
- Identify security architecture: auth, authorization, data protection
|
||||||
|
- List API endpoints (high-level)
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: Complete architecture with: style justification, component diagram, tech stack table, ADRs, data model, security controls, API overview
|
||||||
|
CONSTRAINTS: Architecture must support all Must-have requirements. Prefer proven technologies.
|
||||||
|
" --tool gemini --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
})
|
||||||
|
|
||||||
|
// Wait for Gemini result
|
||||||
|
|
||||||
|
// === Stage 2: Architecture Review (Codex) ===
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Critical review of proposed architecture - identify weaknesses and risks.
|
||||||
|
Success: Actionable feedback with specific concerns and improvement suggestions.
|
||||||
|
|
||||||
|
PROPOSED ARCHITECTURE:
|
||||||
|
${geminiArchitectureOutput.slice(0, 5000)}
|
||||||
|
|
||||||
|
REQUIREMENTS CONTEXT:
|
||||||
|
${priorDocs.requirementsIndex?.slice(0, 2000) || ''}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- Challenge each ADR: are the alternatives truly the best options?
|
||||||
|
- Identify scalability bottlenecks in the component design
|
||||||
|
- Assess security gaps: authentication, authorization, data protection
|
||||||
|
- Evaluate technology choices: maturity, community support, fit
|
||||||
|
- Check for over-engineering or under-engineering
|
||||||
|
- Verify architecture covers all Must-have requirements
|
||||||
|
- Rate overall architecture quality: 1-5 with justification
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: Architecture review with: per-ADR feedback, scalability concerns, security gaps, technology risks, quality rating
|
||||||
|
CONSTRAINTS: Be genuinely critical, not just validating. Focus on actionable improvements.
|
||||||
|
" --tool codex --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
})
|
||||||
|
|
||||||
|
// Wait for Codex result
|
||||||
|
|
||||||
|
// === Integrate Discussion Feedback ===
|
||||||
|
if (discussionFeedback) {
|
||||||
|
// Extract architecture feedback from discuss-003-requirements.md
|
||||||
|
// Merge into architecture design
|
||||||
|
}
|
||||||
|
|
||||||
|
// === Codebase Integration Mapping (conditional) ===
|
||||||
|
let integrationMapping = null
|
||||||
|
if (priorDocs.discoveryContext) {
|
||||||
|
const dc = JSON.parse(priorDocs.discoveryContext)
|
||||||
|
if (dc.relevant_files) {
|
||||||
|
integrationMapping = dc.relevant_files.map(f => ({
|
||||||
|
new_component: '...',
|
||||||
|
existing_module: f.path,
|
||||||
|
integration_type: 'Extend|Replace|New',
|
||||||
|
notes: f.rationale
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// === Generate architecture/ Directory ===
|
||||||
|
Bash(`mkdir -p "${sessionFolder}/spec/architecture"`)
|
||||||
|
|
||||||
|
const timestamp = new Date().toISOString()
|
||||||
|
const adrs = parseADRs(geminiArchitectureOutput, codexReviewOutput)
|
||||||
|
|
||||||
|
// Write individual ADR-*.md files
|
||||||
|
adrs.forEach(adr => {
|
||||||
|
const adrFrontmatter = `---
|
||||||
|
id: ADR-${adr.id}
|
||||||
|
title: "${adr.title}"
|
||||||
|
status: draft
|
||||||
|
traces:
|
||||||
|
- ../requirements/_index.md
|
||||||
|
---`
|
||||||
|
const adrContent = `${adrFrontmatter}
|
||||||
|
|
||||||
|
# ADR-${adr.id}: ${adr.title}
|
||||||
|
|
||||||
|
## Context
|
||||||
|
${adr.context}
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
${adr.decision}
|
||||||
|
|
||||||
|
## Alternatives
|
||||||
|
${adr.alternatives.map((alt, i) => `### Option ${i+1}: ${alt.name}\n- **Pros**: ${alt.pros.join(', ')}\n- **Cons**: ${alt.cons.join(', ')}`).join('\n\n')}
|
||||||
|
|
||||||
|
## Consequences
|
||||||
|
${adr.consequences}
|
||||||
|
|
||||||
|
## Review Feedback
|
||||||
|
${adr.reviewFeedback || 'N/A'}
|
||||||
|
`
|
||||||
|
Write(`${sessionFolder}/spec/architecture/ADR-${adr.id}-${adr.slug}.md`, adrContent)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Write _index.md (with Mermaid component diagram + ER diagram + links)
|
||||||
|
const archIndexFrontmatter = `---
|
||||||
|
session_id: ${specConfig?.session_id || 'unknown'}
|
||||||
|
phase: 4
|
||||||
|
document_type: architecture-index
|
||||||
|
status: draft
|
||||||
|
generated_at: ${timestamp}
|
||||||
|
version: 1
|
||||||
|
dependencies:
|
||||||
|
- ../product-brief.md
|
||||||
|
- ../requirements/_index.md
|
||||||
|
---`
|
||||||
|
|
||||||
|
const archIndexContent = `${archIndexFrontmatter}
|
||||||
|
|
||||||
|
# Architecture Document
|
||||||
|
|
||||||
|
## System Overview
|
||||||
|
${geminiArchitectureOutput.system_overview}
|
||||||
|
|
||||||
|
## Component Diagram
|
||||||
|
\`\`\`mermaid
|
||||||
|
${geminiArchitectureOutput.component_diagram}
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
|
## Technology Stack
|
||||||
|
${geminiArchitectureOutput.tech_stack_table}
|
||||||
|
|
||||||
|
## Architecture Decision Records
|
||||||
|
| ID | Title | Status |
|
||||||
|
|----|-------|--------|
|
||||||
|
${adrs.map(a => `| [ADR-${a.id}](ADR-${a.id}-${a.slug}.md) | ${a.title} | draft |`).join('\n')}
|
||||||
|
|
||||||
|
## Data Model
|
||||||
|
\`\`\`mermaid
|
||||||
|
${geminiArchitectureOutput.data_model_diagram}
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
|
## API Design
|
||||||
|
${geminiArchitectureOutput.api_overview}
|
||||||
|
|
||||||
|
## Security Controls
|
||||||
|
${geminiArchitectureOutput.security_controls}
|
||||||
|
|
||||||
|
## Review Summary
|
||||||
|
${codexReviewOutput.summary}
|
||||||
|
Quality Rating: ${codexReviewOutput.quality_rating}/5
|
||||||
|
`
|
||||||
|
|
||||||
|
Write(`${sessionFolder}/spec/architecture/_index.md`, archIndexContent)
|
||||||
|
|
||||||
|
return {
|
||||||
|
outputPath: 'spec/architecture/_index.md',
|
||||||
|
documentSummary: `Architecture generated with ${adrs.length} ADRs, quality rating ${codexReviewOutput.quality_rating}/5`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## DRAFT-004: Epics & Stories
|
||||||
|
|
||||||
|
Gemini CLI decomposition to generate EPIC-*.md files.
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (docType === 'epics') {
|
||||||
|
// === Epic Decomposition CLI ===
|
||||||
|
Bash({
|
||||||
|
command: `ccw cli -p "PURPOSE: Decompose requirements into executable Epics and Stories for implementation planning.
|
||||||
|
Success: 3-7 Epics with prioritized Stories, dependency map, and MVP subset clearly defined.
|
||||||
|
|
||||||
|
PRODUCT BRIEF (summary):
|
||||||
|
${priorDocs.productBrief?.slice(0, 2000) || ''}
|
||||||
|
|
||||||
|
REQUIREMENTS:
|
||||||
|
${priorDocs.requirementsIndex?.slice(0, 5000) || ''}
|
||||||
|
|
||||||
|
ARCHITECTURE (summary):
|
||||||
|
${priorDocs.architectureIndex?.slice(0, 3000) || ''}
|
||||||
|
|
||||||
|
TASK:
|
||||||
|
- Group requirements into 3-7 logical Epics:
|
||||||
|
- Each Epic: EPIC-NNN ID, title, description, priority (Must/Should/Could)
|
||||||
|
- Group by functional domain or user journey stage
|
||||||
|
- Tag MVP Epics (minimum set for initial release)
|
||||||
|
- For each Epic, generate 2-5 Stories:
|
||||||
|
- Each Story: STORY-{EPIC}-NNN ID, title
|
||||||
|
- User story format: As a [persona], I want [action] so that [benefit]
|
||||||
|
- 2-4 acceptance criteria per story (testable)
|
||||||
|
- Relative size estimate: S/M/L/XL
|
||||||
|
- Trace to source requirement(s): REQ-NNN
|
||||||
|
- Create dependency map:
|
||||||
|
- Cross-Epic dependencies (which Epics block others)
|
||||||
|
- Mermaid graph LR format
|
||||||
|
- Recommended execution order with rationale
|
||||||
|
- Define MVP:
|
||||||
|
- Which Epics are in MVP
|
||||||
|
- MVP definition of done (3-5 criteria)
|
||||||
|
- What is explicitly deferred post-MVP
|
||||||
|
|
||||||
|
MODE: analysis
|
||||||
|
EXPECTED: Structured output with: Epic list (ID, title, priority, MVP flag), Stories per Epic (ID, user story, AC, size, trace), dependency Mermaid diagram, execution order, MVP definition
|
||||||
|
CONSTRAINTS: Every Must-have requirement must appear in at least one Story. Stories must be small enough to implement independently. Dependencies should be minimized across Epics.
|
||||||
|
" --tool gemini --mode analysis`,
|
||||||
|
run_in_background: true
|
||||||
|
})
|
||||||
|
|
||||||
|
// Wait for CLI result
|
||||||
|
|
||||||
|
// === Integrate Discussion Feedback ===
|
||||||
|
if (discussionFeedback) {
|
||||||
|
// Extract execution feedback from discuss-004-architecture.md
|
||||||
|
// Adjust Epic granularity, MVP scope
|
||||||
|
}
|
||||||
|
|
||||||
|
// === Generate epics/ Directory ===
|
||||||
|
Bash(`mkdir -p "${sessionFolder}/spec/epics"`)
|
||||||
|
|
||||||
|
const timestamp = new Date().toISOString()
|
||||||
|
const epicsList = parseEpics(cliOutput)
|
||||||
|
|
||||||
|
// Write individual EPIC-*.md files (with stories)
|
||||||
|
epicsList.forEach(epic => {
|
||||||
|
const epicFrontmatter = `---
|
||||||
|
id: EPIC-${epic.id}
|
||||||
|
title: "${epic.title}"
|
||||||
|
priority: ${epic.priority}
|
||||||
|
mvp: ${epic.mvp}
|
||||||
|
size: ${epic.size}
|
||||||
|
requirements:
|
||||||
|
${epic.reqs.map(r => ` - ${r}`).join('\n')}
|
||||||
|
architecture:
|
||||||
|
${epic.adrs.map(a => ` - ${a}`).join('\n')}
|
||||||
|
dependencies:
|
||||||
|
${epic.deps.map(d => ` - ${d}`).join('\n')}
|
||||||
|
status: draft
|
||||||
|
---`
|
||||||
|
const storiesContent = epic.stories.map(s => `### ${s.id}: ${s.title}
|
||||||
|
|
||||||
|
**User Story**: ${s.user_story}
|
||||||
|
**Size**: ${s.size}
|
||||||
|
**Traces**: ${s.traces.join(', ')}
|
||||||
|
|
||||||
|
**Acceptance Criteria**:
|
||||||
|
${s.acceptance_criteria.map((ac, i) => `${i+1}. ${ac}`).join('\n')}
|
||||||
|
`).join('\n')
|
||||||
|
|
||||||
|
const epicContent = `${epicFrontmatter}
|
||||||
|
|
||||||
|
# EPIC-${epic.id}: ${epic.title}
|
||||||
|
|
||||||
|
## Description
|
||||||
|
${epic.description}
|
||||||
|
|
||||||
|
## Stories
|
||||||
|
${storiesContent}
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
${epic.reqs.map(r => `- [${r}](../requirements/${r}.md)`).join('\n')}
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
${epic.adrs.map(a => `- [${a}](../architecture/${a}.md)`).join('\n')}
|
||||||
|
`
|
||||||
|
Write(`${sessionFolder}/spec/epics/EPIC-${epic.id}-${epic.slug}.md`, epicContent)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Write _index.md (with Mermaid dependency diagram + MVP + links)
|
||||||
|
const epicsIndexFrontmatter = `---
|
||||||
|
session_id: ${specConfig?.session_id || 'unknown'}
|
||||||
|
phase: 5
|
||||||
|
document_type: epics-index
|
||||||
|
status: draft
|
||||||
|
generated_at: ${timestamp}
|
||||||
|
version: 1
|
||||||
|
dependencies:
|
||||||
|
- ../requirements/_index.md
|
||||||
|
- ../architecture/_index.md
|
||||||
|
---`
|
||||||
|
|
||||||
|
const epicsIndexContent = `${epicsIndexFrontmatter}
|
||||||
|
|
||||||
|
# Epics & Stories
|
||||||
|
|
||||||
|
## Epic Overview
|
||||||
|
| ID | Title | Priority | MVP | Size | Status |
|
||||||
|
|----|-------|----------|-----|------|--------|
|
||||||
|
${epicsList.map(e => `| [EPIC-${e.id}](EPIC-${e.id}-${e.slug}.md) | ${e.title} | ${e.priority} | ${e.mvp ? '✓' : ''} | ${e.size} | draft |`).join('\n')}
|
||||||
|
|
||||||
|
## Dependency Map
|
||||||
|
\`\`\`mermaid
|
||||||
|
${cliOutput.dependency_diagram}
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
|
## Execution Order
|
||||||
|
${cliOutput.execution_order}
|
||||||
|
|
||||||
|
## MVP Scope
|
||||||
|
${cliOutput.mvp_definition}
|
||||||
|
|
||||||
|
### MVP Epics
|
||||||
|
${epicsList.filter(e => e.mvp).map(e => `- EPIC-${e.id}: ${e.title}`).join('\n')}
|
||||||
|
|
||||||
|
### Post-MVP
|
||||||
|
${epicsList.filter(e => !e.mvp).map(e => `- EPIC-${e.id}: ${e.title}`).join('\n')}
|
||||||
|
|
||||||
|
## Traceability Matrix
|
||||||
|
${generateTraceabilityMatrix(epicsList, funcReqs)}
|
||||||
|
`
|
||||||
|
|
||||||
|
Write(`${sessionFolder}/spec/epics/_index.md`, epicsIndexContent)
|
||||||
|
|
||||||
|
return {
|
||||||
|
outputPath: 'spec/epics/_index.md',
|
||||||
|
documentSummary: `Epics generated: ${epicsList.length} total, ${epicsList.filter(e => e.mvp).length} in MVP`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Helper Functions
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function parseFunctionalRequirements(cliOutput) {
|
||||||
|
// Parse CLI JSON output to extract functional requirements
|
||||||
|
// Returns: [{ id, title, description, user_story, acceptance_criteria[], priority, slug }]
|
||||||
|
}
|
||||||
|
|
||||||
|
function parseNonFunctionalRequirements(cliOutput) {
|
||||||
|
// Parse CLI JSON output to extract non-functional requirements
|
||||||
|
// Returns: [{ id, type, title, requirement, metric, target, slug }]
|
||||||
|
}
|
||||||
|
|
||||||
|
function parseADRs(geminiOutput, codexOutput) {
|
||||||
|
// Parse architecture outputs to extract ADRs with review feedback
|
||||||
|
// Returns: [{ id, title, context, decision, alternatives[], consequences, reviewFeedback, slug }]
|
||||||
|
}
|
||||||
|
|
||||||
|
function parseEpics(cliOutput) {
|
||||||
|
// Parse CLI JSON output to extract Epics and Stories
|
||||||
|
// Returns: [{ id, title, description, priority, mvp, size, stories[], reqs[], adrs[], deps[], slug }]
|
||||||
|
}
|
||||||
|
|
||||||
|
function fillTemplate(template, data) {
|
||||||
|
// Fill template placeholders with data
|
||||||
|
// Apply document-standards.md formatting rules
|
||||||
|
}
|
||||||
|
|
||||||
|
function generateTraceabilityMatrix(epics, requirements) {
|
||||||
|
// Generate traceability matrix showing Epic → Requirement mappings
|
||||||
|
}
|
||||||
|
```
|
||||||
257
.claude/skills/team-lifecycle-v2/roles/writer/role.md
Normal file
257
.claude/skills/team-lifecycle-v2/roles/writer/role.md
Normal file
@@ -0,0 +1,257 @@
|
|||||||
|
# Role: writer
|
||||||
|
|
||||||
|
Product Brief, Requirements/PRD, Architecture, and Epics & Stories document generation. Maps to spec-generator Phases 2-5.
|
||||||
|
|
||||||
|
## Role Identity
|
||||||
|
|
||||||
|
- **Name**: `writer`
|
||||||
|
- **Task Prefix**: `DRAFT-*`
|
||||||
|
- **Output Tag**: `[writer]`
|
||||||
|
- **Responsibility**: Load Context → Generate Document → Incorporate Feedback → Report
|
||||||
|
- **Communication**: SendMessage to coordinator only
|
||||||
|
|
||||||
|
## Role Boundaries
|
||||||
|
|
||||||
|
### MUST
|
||||||
|
- Only process DRAFT-* tasks
|
||||||
|
- Read templates before generating documents
|
||||||
|
- Follow document-standards.md formatting rules
|
||||||
|
- Integrate discussion feedback when available
|
||||||
|
- Generate proper frontmatter for all documents
|
||||||
|
|
||||||
|
### MUST NOT
|
||||||
|
- Create tasks for other roles
|
||||||
|
- Contact other workers directly
|
||||||
|
- Skip template loading
|
||||||
|
- Modify discussion records
|
||||||
|
- Generate documents without loading prior dependencies
|
||||||
|
|
||||||
|
## Message Types
|
||||||
|
|
||||||
|
| Type | Direction | Trigger | Description |
|
||||||
|
|------|-----------|---------|-------------|
|
||||||
|
| `draft_ready` | writer → coordinator | Document writing complete | With document path and type |
|
||||||
|
| `draft_revision` | writer → coordinator | Document revised and resubmitted | Describes changes made |
|
||||||
|
| `impl_progress` | writer → coordinator | Long writing progress | Multi-document stage progress |
|
||||||
|
| `error` | writer → coordinator | Unrecoverable error | Template missing, insufficient context, etc. |
|
||||||
|
|
||||||
|
## Message Bus
|
||||||
|
|
||||||
|
Before every `SendMessage`, MUST call `mcp__ccw-tools__team_msg` to log:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Document ready
|
||||||
|
mcp__ccw-tools__team_msg({
|
||||||
|
operation: "log",
|
||||||
|
team: teamName,
|
||||||
|
from: "writer",
|
||||||
|
to: "coordinator",
|
||||||
|
type: "draft_ready",
|
||||||
|
summary: "[writer] Product Brief complete",
|
||||||
|
ref: `${sessionFolder}/product-brief.md`
|
||||||
|
})
|
||||||
|
|
||||||
|
// Document revision
|
||||||
|
mcp__ccw-tools__team_msg({
|
||||||
|
operation: "log",
|
||||||
|
team: teamName,
|
||||||
|
from: "writer",
|
||||||
|
to: "coordinator",
|
||||||
|
type: "draft_revision",
|
||||||
|
summary: "[writer] Requirements revised per discussion feedback"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Error report
|
||||||
|
mcp__ccw-tools__team_msg({
|
||||||
|
operation: "log",
|
||||||
|
team: teamName,
|
||||||
|
from: "writer",
|
||||||
|
to: "coordinator",
|
||||||
|
type: "error",
|
||||||
|
summary: "[writer] Input artifact missing, cannot generate document"
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Fallback
|
||||||
|
|
||||||
|
When `mcp__ccw-tools__team_msg` MCP is unavailable:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ccw team log --team "${teamName}" --from "writer" --to "coordinator" --type "draft_ready" --summary "[writer] Brief complete" --ref "${sessionFolder}/product-brief.md" --json
|
||||||
|
```
|
||||||
|
|
||||||
|
## Toolbox
|
||||||
|
|
||||||
|
### Available Commands
|
||||||
|
- `commands/generate-doc.md` - Multi-CLI document generation for 4 doc types
|
||||||
|
|
||||||
|
### Subagent Capabilities
|
||||||
|
- None
|
||||||
|
|
||||||
|
### CLI Capabilities
|
||||||
|
- `gemini`, `codex`, `claude` for multi-perspective analysis
|
||||||
|
|
||||||
|
## Execution (5-Phase)
|
||||||
|
|
||||||
|
### Phase 1: Task Discovery
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const tasks = TaskList()
|
||||||
|
const myTasks = tasks.filter(t =>
|
||||||
|
t.subject.startsWith('DRAFT-') &&
|
||||||
|
t.owner === 'writer' &&
|
||||||
|
t.status === 'pending' &&
|
||||||
|
t.blockedBy.length === 0
|
||||||
|
)
|
||||||
|
|
||||||
|
if (myTasks.length === 0) return // idle
|
||||||
|
|
||||||
|
const task = TaskGet({ taskId: myTasks[0].id })
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'in_progress' })
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 2: Context & Discussion Loading
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Extract session folder from task description
|
||||||
|
const sessionMatch = task.description.match(/Session:\s*(.+)/)
|
||||||
|
const sessionFolder = sessionMatch ? sessionMatch[1].trim() : ''
|
||||||
|
|
||||||
|
// Load session config
|
||||||
|
let specConfig = null
|
||||||
|
try { specConfig = JSON.parse(Read(`${sessionFolder}/spec/spec-config.json`)) } catch {}
|
||||||
|
|
||||||
|
// Determine document type from task subject
|
||||||
|
const docType = task.subject.includes('Product Brief') ? 'product-brief'
|
||||||
|
: task.subject.includes('Requirements') || task.subject.includes('PRD') ? 'requirements'
|
||||||
|
: task.subject.includes('Architecture') ? 'architecture'
|
||||||
|
: task.subject.includes('Epics') ? 'epics'
|
||||||
|
: 'unknown'
|
||||||
|
|
||||||
|
// Load discussion feedback (from preceding DISCUSS task)
|
||||||
|
const discussionFiles = {
|
||||||
|
'product-brief': 'discussions/discuss-001-scope.md',
|
||||||
|
'requirements': 'discussions/discuss-002-brief.md',
|
||||||
|
'architecture': 'discussions/discuss-003-requirements.md',
|
||||||
|
'epics': 'discussions/discuss-004-architecture.md'
|
||||||
|
}
|
||||||
|
let discussionFeedback = null
|
||||||
|
try { discussionFeedback = Read(`${sessionFolder}/${discussionFiles[docType]}`) } catch {}
|
||||||
|
|
||||||
|
// Load prior documents progressively
|
||||||
|
const priorDocs = {}
|
||||||
|
if (docType !== 'product-brief') {
|
||||||
|
try { priorDocs.discoveryContext = Read(`${sessionFolder}/spec/discovery-context.json`) } catch {}
|
||||||
|
}
|
||||||
|
if (['requirements', 'architecture', 'epics'].includes(docType)) {
|
||||||
|
try { priorDocs.productBrief = Read(`${sessionFolder}/spec/product-brief.md`) } catch {}
|
||||||
|
}
|
||||||
|
if (['architecture', 'epics'].includes(docType)) {
|
||||||
|
try { priorDocs.requirementsIndex = Read(`${sessionFolder}/spec/requirements/_index.md`) } catch {}
|
||||||
|
}
|
||||||
|
if (docType === 'epics') {
|
||||||
|
try { priorDocs.architectureIndex = Read(`${sessionFolder}/spec/architecture/_index.md`) } catch {}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 3: Document Generation
|
||||||
|
|
||||||
|
**Delegate to command file**:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Load and execute document generation command
|
||||||
|
const generateDocCommand = Read('commands/generate-doc.md')
|
||||||
|
|
||||||
|
// Execute command with context:
|
||||||
|
// - docType
|
||||||
|
// - sessionFolder
|
||||||
|
// - specConfig
|
||||||
|
// - discussionFeedback
|
||||||
|
// - priorDocs
|
||||||
|
// - task
|
||||||
|
|
||||||
|
// Command will handle:
|
||||||
|
// - Loading document standards
|
||||||
|
// - Loading appropriate template
|
||||||
|
// - Building shared context
|
||||||
|
// - Routing to type-specific generation (DRAFT-001/002/003/004)
|
||||||
|
// - Integrating discussion feedback
|
||||||
|
// - Writing output files
|
||||||
|
|
||||||
|
// Returns: { outputPath, documentSummary }
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 4: Self-Validation
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const docContent = Read(`${sessionFolder}/${outputPath}`)
|
||||||
|
|
||||||
|
const validationChecks = {
|
||||||
|
has_frontmatter: /^---\n[\s\S]+?\n---/.test(docContent),
|
||||||
|
sections_complete: /* verify all required sections present */,
|
||||||
|
cross_references: docContent.includes('session_id'),
|
||||||
|
discussion_integrated: !discussionFeedback || docContent.includes('Discussion')
|
||||||
|
}
|
||||||
|
|
||||||
|
const allValid = Object.values(validationChecks).every(v => v)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 5: Report to Coordinator
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const docTypeLabel = {
|
||||||
|
'product-brief': 'Product Brief',
|
||||||
|
'requirements': 'Requirements/PRD',
|
||||||
|
'architecture': 'Architecture Document',
|
||||||
|
'epics': 'Epics & Stories'
|
||||||
|
}
|
||||||
|
|
||||||
|
mcp__ccw-tools__team_msg({
|
||||||
|
operation: "log", team: teamName,
|
||||||
|
from: "writer", to: "coordinator",
|
||||||
|
type: "draft_ready",
|
||||||
|
summary: `[writer] ${docTypeLabel[docType]} 完成: ${allValid ? '验证通过' : '部分验证失败'}`,
|
||||||
|
ref: `${sessionFolder}/${outputPath}`
|
||||||
|
})
|
||||||
|
|
||||||
|
SendMessage({
|
||||||
|
type: "message",
|
||||||
|
recipient: "coordinator",
|
||||||
|
content: `[writer] ## 文档撰写结果
|
||||||
|
|
||||||
|
**Task**: ${task.subject}
|
||||||
|
**文档类型**: ${docTypeLabel[docType]}
|
||||||
|
**验证状态**: ${allValid ? 'PASS' : 'PARTIAL'}
|
||||||
|
|
||||||
|
### 文档摘要
|
||||||
|
${documentSummary}
|
||||||
|
|
||||||
|
### 讨论反馈整合
|
||||||
|
${discussionFeedback ? '已整合前序讨论反馈' : '首次撰写'}
|
||||||
|
|
||||||
|
### 自验证结果
|
||||||
|
${Object.entries(validationChecks).map(([k, v]) => '- ' + k + ': ' + (v ? 'PASS' : 'FAIL')).join('\n')}
|
||||||
|
|
||||||
|
### 输出位置
|
||||||
|
${sessionFolder}/${outputPath}
|
||||||
|
|
||||||
|
文档已就绪,可进入讨论轮次。`,
|
||||||
|
summary: `[writer] ${docTypeLabel[docType]} 就绪`
|
||||||
|
})
|
||||||
|
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'completed' })
|
||||||
|
|
||||||
|
// Check for next DRAFT task → back to Phase 1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Scenario | Resolution |
|
||||||
|
|----------|------------|
|
||||||
|
| No DRAFT-* tasks available | Idle, wait for coordinator assignment |
|
||||||
|
| Prior document not found | Notify coordinator, request prerequisite |
|
||||||
|
| CLI analysis failure | Retry with fallback tool, then direct generation |
|
||||||
|
| Template sections incomplete | Generate best-effort, note gaps in report |
|
||||||
|
| Discussion feedback contradicts prior docs | Note conflict in document, flag for next discussion |
|
||||||
|
| Session folder missing | Notify coordinator, request session path |
|
||||||
|
| Unexpected error | Log error via team_msg, report to coordinator |
|
||||||
192
.claude/skills/team-lifecycle-v2/specs/document-standards.md
Normal file
192
.claude/skills/team-lifecycle-v2/specs/document-standards.md
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
# Document Standards
|
||||||
|
|
||||||
|
Defines format conventions, YAML frontmatter schema, naming rules, and content structure for all spec-generator outputs.
|
||||||
|
|
||||||
|
## When to Use
|
||||||
|
|
||||||
|
| Phase | Usage | Section |
|
||||||
|
|-------|-------|---------|
|
||||||
|
| All Phases | Frontmatter format | YAML Frontmatter Schema |
|
||||||
|
| All Phases | File naming | Naming Conventions |
|
||||||
|
| Phase 2-5 | Document structure | Content Structure |
|
||||||
|
| Phase 6 | Validation reference | All sections |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## YAML Frontmatter Schema
|
||||||
|
|
||||||
|
Every generated document MUST begin with YAML frontmatter:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
---
|
||||||
|
session_id: SPEC-{slug}-{YYYY-MM-DD}
|
||||||
|
phase: {1-6}
|
||||||
|
document_type: {product-brief|requirements|architecture|epics|readiness-report|spec-summary}
|
||||||
|
status: draft|review|complete
|
||||||
|
generated_at: {ISO8601 timestamp}
|
||||||
|
stepsCompleted: []
|
||||||
|
version: 1
|
||||||
|
dependencies:
|
||||||
|
- {list of input documents used}
|
||||||
|
---
|
||||||
|
```
|
||||||
|
|
||||||
|
### Field Definitions
|
||||||
|
|
||||||
|
| Field | Type | Required | Description |
|
||||||
|
|-------|------|----------|-------------|
|
||||||
|
| `session_id` | string | Yes | Session identifier matching spec-config.json |
|
||||||
|
| `phase` | number | Yes | Phase number that generated this document (1-6) |
|
||||||
|
| `document_type` | string | Yes | One of: product-brief, requirements, architecture, epics, readiness-report, spec-summary |
|
||||||
|
| `status` | enum | Yes | draft (initial), review (user reviewed), complete (finalized) |
|
||||||
|
| `generated_at` | string | Yes | ISO8601 timestamp of generation |
|
||||||
|
| `stepsCompleted` | array | Yes | List of step IDs completed during generation |
|
||||||
|
| `version` | number | Yes | Document version, incremented on re-generation |
|
||||||
|
| `dependencies` | array | No | List of input files this document depends on |
|
||||||
|
|
||||||
|
### Status Transitions
|
||||||
|
|
||||||
|
```
|
||||||
|
draft -> review -> complete
|
||||||
|
| ^
|
||||||
|
+-------------------+ (direct promotion in auto mode)
|
||||||
|
```
|
||||||
|
|
||||||
|
- **draft**: Initial generation, not yet user-reviewed
|
||||||
|
- **review**: User has reviewed and provided feedback
|
||||||
|
- **complete**: Finalized, ready for downstream consumption
|
||||||
|
|
||||||
|
In auto mode (`-y`), documents are promoted directly from `draft` to `complete`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Naming Conventions
|
||||||
|
|
||||||
|
### Session ID Format
|
||||||
|
|
||||||
|
```
|
||||||
|
SPEC-{slug}-{YYYY-MM-DD}
|
||||||
|
```
|
||||||
|
|
||||||
|
- **slug**: Lowercase, alphanumeric + Chinese characters, hyphens as separators, max 40 chars
|
||||||
|
- **date**: UTC+8 date in YYYY-MM-DD format
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
- `SPEC-task-management-system-2026-02-11`
|
||||||
|
- `SPEC-user-auth-oauth-2026-02-11`
|
||||||
|
|
||||||
|
### Output Files
|
||||||
|
|
||||||
|
| File | Phase | Description |
|
||||||
|
|------|-------|-------------|
|
||||||
|
| `spec-config.json` | 1 | Session configuration and state |
|
||||||
|
| `discovery-context.json` | 1 | Codebase exploration results (optional) |
|
||||||
|
| `product-brief.md` | 2 | Product brief document |
|
||||||
|
| `requirements.md` | 3 | PRD document |
|
||||||
|
| `architecture.md` | 4 | Architecture decisions document |
|
||||||
|
| `epics.md` | 5 | Epic/Story breakdown document |
|
||||||
|
| `readiness-report.md` | 6 | Quality validation report |
|
||||||
|
| `spec-summary.md` | 6 | One-page executive summary |
|
||||||
|
|
||||||
|
### Output Directory
|
||||||
|
|
||||||
|
```
|
||||||
|
.workflow/.spec/{session-id}/
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Content Structure
|
||||||
|
|
||||||
|
### Heading Hierarchy
|
||||||
|
|
||||||
|
- `#` (H1): Document title only (one per document)
|
||||||
|
- `##` (H2): Major sections
|
||||||
|
- `###` (H3): Subsections
|
||||||
|
- `####` (H4): Detail items (use sparingly)
|
||||||
|
|
||||||
|
Maximum depth: 4 levels. Prefer flat structures.
|
||||||
|
|
||||||
|
### Section Ordering
|
||||||
|
|
||||||
|
Every document follows this general pattern:
|
||||||
|
|
||||||
|
1. **YAML Frontmatter** (mandatory)
|
||||||
|
2. **Title** (H1)
|
||||||
|
3. **Executive Summary** (2-3 sentences)
|
||||||
|
4. **Core Content Sections** (H2, document-specific)
|
||||||
|
5. **Open Questions / Risks** (if applicable)
|
||||||
|
6. **References / Traceability** (links to upstream/downstream docs)
|
||||||
|
|
||||||
|
### Formatting Rules
|
||||||
|
|
||||||
|
| Element | Format | Example |
|
||||||
|
|---------|--------|---------|
|
||||||
|
| Requirements | `REQ-{NNN}` prefix | REQ-001: User login |
|
||||||
|
| Acceptance criteria | Checkbox list | `- [ ] User can log in with email` |
|
||||||
|
| Architecture decisions | `ADR-{NNN}` prefix | ADR-001: Use PostgreSQL |
|
||||||
|
| Epics | `EPIC-{NNN}` prefix | EPIC-001: Authentication |
|
||||||
|
| Stories | `STORY-{EPIC}-{NNN}` prefix | STORY-001-001: Login form |
|
||||||
|
| Priority tags | MoSCoW labels | `[Must]`, `[Should]`, `[Could]`, `[Won't]` |
|
||||||
|
| Mermaid diagrams | Fenced code blocks | ````mermaid ... ``` `` |
|
||||||
|
| Code examples | Language-tagged blocks | ````typescript ... ``` `` |
|
||||||
|
|
||||||
|
### Cross-Reference Format
|
||||||
|
|
||||||
|
Use relative references between documents:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
See [Product Brief](product-brief.md#section-name) for details.
|
||||||
|
Derived from [REQ-001](requirements.md#req-001).
|
||||||
|
```
|
||||||
|
|
||||||
|
### Language
|
||||||
|
|
||||||
|
- Document body: Follow user's input language (Chinese or English)
|
||||||
|
- Technical identifiers: Always English (REQ-001, ADR-001, EPIC-001)
|
||||||
|
- YAML frontmatter keys: Always English
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## spec-config.json Schema
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"session_id": "string (required)",
|
||||||
|
"seed_input": "string (required) - original user input",
|
||||||
|
"input_type": "text|file (required)",
|
||||||
|
"timestamp": "ISO8601 (required)",
|
||||||
|
"mode": "interactive|auto (required)",
|
||||||
|
"complexity": "simple|moderate|complex (required)",
|
||||||
|
"depth": "light|standard|comprehensive (required)",
|
||||||
|
"focus_areas": ["string array"],
|
||||||
|
"seed_analysis": {
|
||||||
|
"problem_statement": "string",
|
||||||
|
"target_users": ["string array"],
|
||||||
|
"domain": "string",
|
||||||
|
"constraints": ["string array"],
|
||||||
|
"dimensions": ["string array - 3-5 exploration dimensions"]
|
||||||
|
},
|
||||||
|
"has_codebase": "boolean",
|
||||||
|
"phasesCompleted": [
|
||||||
|
{
|
||||||
|
"phase": "number (1-6)",
|
||||||
|
"name": "string (phase name)",
|
||||||
|
"output_file": "string (primary output file)",
|
||||||
|
"completed_at": "ISO8601"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Validation Checklist
|
||||||
|
|
||||||
|
- [ ] Every document starts with valid YAML frontmatter
|
||||||
|
- [ ] `session_id` matches across all documents in a session
|
||||||
|
- [ ] `status` field reflects current document state
|
||||||
|
- [ ] All cross-references resolve to valid targets
|
||||||
|
- [ ] Heading hierarchy is correct (no skipped levels)
|
||||||
|
- [ ] Technical identifiers use correct prefixes
|
||||||
|
- [ ] Output files are in the correct directory
|
||||||
207
.claude/skills/team-lifecycle-v2/specs/quality-gates.md
Normal file
207
.claude/skills/team-lifecycle-v2/specs/quality-gates.md
Normal file
@@ -0,0 +1,207 @@
|
|||||||
|
# Quality Gates
|
||||||
|
|
||||||
|
Per-phase quality gate criteria and scoring dimensions for spec-generator outputs.
|
||||||
|
|
||||||
|
## When to Use
|
||||||
|
|
||||||
|
| Phase | Usage | Section |
|
||||||
|
|-------|-------|---------|
|
||||||
|
| Phase 2-5 | Post-generation self-check | Per-Phase Gates |
|
||||||
|
| Phase 6 | Cross-document validation | Cross-Document Validation |
|
||||||
|
| Phase 6 | Final scoring | Scoring Dimensions |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Quality Thresholds
|
||||||
|
|
||||||
|
| Gate | Score | Action |
|
||||||
|
|------|-------|--------|
|
||||||
|
| **Pass** | >= 80% | Continue to next phase |
|
||||||
|
| **Review** | 60-79% | Log warnings, continue with caveats |
|
||||||
|
| **Fail** | < 60% | Must address issues before continuing |
|
||||||
|
|
||||||
|
In auto mode (`-y`), Review-level issues are logged but do not block progress.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Scoring Dimensions
|
||||||
|
|
||||||
|
### 1. Completeness (25%)
|
||||||
|
|
||||||
|
All required sections present with substantive content.
|
||||||
|
|
||||||
|
| Score | Criteria |
|
||||||
|
|-------|----------|
|
||||||
|
| 100% | All template sections filled with detailed content |
|
||||||
|
| 75% | All sections present, some lack detail |
|
||||||
|
| 50% | Major sections present but minor sections missing |
|
||||||
|
| 25% | Multiple major sections missing or empty |
|
||||||
|
| 0% | Document is a skeleton only |
|
||||||
|
|
||||||
|
### 2. Consistency (25%)
|
||||||
|
|
||||||
|
Terminology, formatting, and references are uniform across documents.
|
||||||
|
|
||||||
|
| Score | Criteria |
|
||||||
|
|-------|----------|
|
||||||
|
| 100% | All terms consistent, all references valid, formatting uniform |
|
||||||
|
| 75% | Minor terminology variations, all references valid |
|
||||||
|
| 50% | Some inconsistent terms, 1-2 broken references |
|
||||||
|
| 25% | Frequent inconsistencies, multiple broken references |
|
||||||
|
| 0% | Documents contradict each other |
|
||||||
|
|
||||||
|
### 3. Traceability (25%)
|
||||||
|
|
||||||
|
Requirements, architecture decisions, and stories trace back to goals.
|
||||||
|
|
||||||
|
| Score | Criteria |
|
||||||
|
|-------|----------|
|
||||||
|
| 100% | Every story traces to a requirement, every requirement traces to a goal |
|
||||||
|
| 75% | Most items traceable, few orphans |
|
||||||
|
| 50% | Partial traceability, some disconnected items |
|
||||||
|
| 25% | Weak traceability, many orphan items |
|
||||||
|
| 0% | No traceability between documents |
|
||||||
|
|
||||||
|
### 4. Depth (25%)
|
||||||
|
|
||||||
|
Content provides sufficient detail for execution teams.
|
||||||
|
|
||||||
|
| Score | Criteria |
|
||||||
|
|-------|----------|
|
||||||
|
| 100% | Acceptance criteria specific and testable, architecture decisions justified, stories estimable |
|
||||||
|
| 75% | Most items detailed enough, few vague areas |
|
||||||
|
| 50% | Mix of detailed and vague content |
|
||||||
|
| 25% | Mostly high-level, lacking actionable detail |
|
||||||
|
| 0% | Too abstract for execution |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Per-Phase Quality Gates
|
||||||
|
|
||||||
|
### Phase 1: Discovery
|
||||||
|
|
||||||
|
| Check | Criteria | Severity |
|
||||||
|
|-------|----------|----------|
|
||||||
|
| Session ID valid | Matches `SPEC-{slug}-{date}` format | Error |
|
||||||
|
| Problem statement exists | Non-empty, >= 20 characters | Error |
|
||||||
|
| Target users identified | >= 1 user group | Error |
|
||||||
|
| Dimensions generated | 3-5 exploration dimensions | Warning |
|
||||||
|
| Constraints listed | >= 0 (can be empty with justification) | Info |
|
||||||
|
|
||||||
|
### Phase 2: Product Brief
|
||||||
|
|
||||||
|
| Check | Criteria | Severity |
|
||||||
|
|-------|----------|----------|
|
||||||
|
| Vision statement | Clear, 1-3 sentences | Error |
|
||||||
|
| Problem statement | Specific and measurable | Error |
|
||||||
|
| Target users | >= 1 persona with needs described | Error |
|
||||||
|
| Goals defined | >= 2 measurable goals | Error |
|
||||||
|
| Success metrics | >= 2 quantifiable metrics | Warning |
|
||||||
|
| Scope boundaries | In-scope and out-of-scope listed | Warning |
|
||||||
|
| Multi-perspective | >= 2 CLI perspectives synthesized | Info |
|
||||||
|
|
||||||
|
### Phase 3: Requirements (PRD)
|
||||||
|
|
||||||
|
| Check | Criteria | Severity |
|
||||||
|
|-------|----------|----------|
|
||||||
|
| Functional requirements | >= 3 with REQ-NNN IDs | Error |
|
||||||
|
| Acceptance criteria | Every requirement has >= 1 criterion | Error |
|
||||||
|
| MoSCoW priority | Every requirement tagged | Error |
|
||||||
|
| Non-functional requirements | >= 1 (performance, security, etc.) | Warning |
|
||||||
|
| User stories | >= 1 per Must-have requirement | Warning |
|
||||||
|
| Traceability | Requirements trace to product brief goals | Warning |
|
||||||
|
|
||||||
|
### Phase 4: Architecture
|
||||||
|
|
||||||
|
| Check | Criteria | Severity |
|
||||||
|
|-------|----------|----------|
|
||||||
|
| Component diagram | Present (Mermaid or ASCII) | Error |
|
||||||
|
| Tech stack specified | Languages, frameworks, key libraries | Error |
|
||||||
|
| ADR present | >= 1 Architecture Decision Record | Error |
|
||||||
|
| ADR has alternatives | Each ADR lists >= 2 options considered | Warning |
|
||||||
|
| Integration points | External systems/APIs identified | Warning |
|
||||||
|
| Data model | Key entities and relationships described | Warning |
|
||||||
|
| Codebase mapping | Mapped to existing code (if has_codebase) | Info |
|
||||||
|
|
||||||
|
### Phase 5: Epics & Stories
|
||||||
|
|
||||||
|
| Check | Criteria | Severity |
|
||||||
|
|-------|----------|----------|
|
||||||
|
| Epics defined | 3-7 epics with EPIC-NNN IDs | Error |
|
||||||
|
| MVP subset | >= 1 epic tagged as MVP | Error |
|
||||||
|
| Stories per epic | 2-5 stories per epic | Error |
|
||||||
|
| Story format | "As a...I want...So that..." pattern | Warning |
|
||||||
|
| Dependency map | Cross-epic dependencies documented | Warning |
|
||||||
|
| Estimation hints | Relative sizing (S/M/L/XL) per story | Info |
|
||||||
|
| Traceability | Stories trace to requirements | Warning |
|
||||||
|
|
||||||
|
### Phase 6: Readiness Check
|
||||||
|
|
||||||
|
| Check | Criteria | Severity |
|
||||||
|
|-------|----------|----------|
|
||||||
|
| All documents exist | product-brief, requirements, architecture, epics | Error |
|
||||||
|
| Frontmatter valid | All YAML frontmatter parseable and correct | Error |
|
||||||
|
| Cross-references valid | All document links resolve | Error |
|
||||||
|
| Overall score >= 60% | Weighted average across 4 dimensions | Error |
|
||||||
|
| No unresolved Errors | All Error-severity issues addressed | Error |
|
||||||
|
| Summary generated | spec-summary.md created | Warning |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Cross-Document Validation
|
||||||
|
|
||||||
|
Checks performed during Phase 6 across all documents:
|
||||||
|
|
||||||
|
### Completeness Matrix
|
||||||
|
|
||||||
|
```
|
||||||
|
Product Brief goals -> Requirements (each goal has >= 1 requirement)
|
||||||
|
Requirements -> Architecture (each Must requirement has design coverage)
|
||||||
|
Requirements -> Epics (each Must requirement appears in >= 1 story)
|
||||||
|
Architecture ADRs -> Epics (tech choices reflected in implementation stories)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Consistency Checks
|
||||||
|
|
||||||
|
| Check | Documents | Rule |
|
||||||
|
|-------|-----------|------|
|
||||||
|
| Terminology | All | Same term used consistently (no synonyms for same concept) |
|
||||||
|
| User personas | Brief + PRD + Epics | Same user names/roles throughout |
|
||||||
|
| Scope | Brief + PRD | PRD scope does not exceed brief scope |
|
||||||
|
| Tech stack | Architecture + Epics | Stories reference correct technologies |
|
||||||
|
|
||||||
|
### Traceability Matrix Format
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
| Goal | Requirements | Architecture | Epics |
|
||||||
|
|------|-------------|--------------|-------|
|
||||||
|
| G-001: ... | REQ-001, REQ-002 | ADR-001 | EPIC-001 |
|
||||||
|
| G-002: ... | REQ-003 | ADR-002 | EPIC-002, EPIC-003 |
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Issue Classification
|
||||||
|
|
||||||
|
### Error (Must Fix)
|
||||||
|
|
||||||
|
- Missing required document or section
|
||||||
|
- Broken cross-references
|
||||||
|
- Contradictory information between documents
|
||||||
|
- Empty acceptance criteria on Must-have requirements
|
||||||
|
- No MVP subset defined in epics
|
||||||
|
|
||||||
|
### Warning (Should Fix)
|
||||||
|
|
||||||
|
- Vague acceptance criteria
|
||||||
|
- Missing non-functional requirements
|
||||||
|
- No success metrics defined
|
||||||
|
- Incomplete traceability
|
||||||
|
- Missing architecture review notes
|
||||||
|
|
||||||
|
### Info (Nice to Have)
|
||||||
|
|
||||||
|
- Could add more detailed personas
|
||||||
|
- Consider additional ADR alternatives
|
||||||
|
- Story estimation hints missing
|
||||||
|
- Mermaid diagrams could be more detailed
|
||||||
82
.claude/skills/team-lifecycle-v2/specs/team-config.json
Normal file
82
.claude/skills/team-lifecycle-v2/specs/team-config.json
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
{
|
||||||
|
"team_name": "team-lifecycle",
|
||||||
|
"team_display_name": "Team Lifecycle",
|
||||||
|
"description": "Unified team skill covering spec-to-dev-to-test full lifecycle",
|
||||||
|
"version": "2.0.0",
|
||||||
|
"architecture": "folder-based",
|
||||||
|
"role_structure": "roles/{name}/role.md + roles/{name}/commands/*.md",
|
||||||
|
|
||||||
|
"roles": {
|
||||||
|
"coordinator": {
|
||||||
|
"task_prefix": null,
|
||||||
|
"responsibility": "Pipeline orchestration, requirement clarification, task chain creation, message dispatch",
|
||||||
|
"message_types": ["plan_approved", "plan_revision", "task_unblocked", "fix_required", "error", "shutdown"]
|
||||||
|
},
|
||||||
|
"analyst": {
|
||||||
|
"task_prefix": "RESEARCH",
|
||||||
|
"responsibility": "Seed analysis, codebase exploration, multi-dimensional context gathering",
|
||||||
|
"message_types": ["research_ready", "research_progress", "error"]
|
||||||
|
},
|
||||||
|
"writer": {
|
||||||
|
"task_prefix": "DRAFT",
|
||||||
|
"responsibility": "Product Brief / PRD / Architecture / Epics document generation",
|
||||||
|
"message_types": ["draft_ready", "draft_revision", "impl_progress", "error"]
|
||||||
|
},
|
||||||
|
"discussant": {
|
||||||
|
"task_prefix": "DISCUSS",
|
||||||
|
"responsibility": "Multi-perspective critique, consensus building, conflict escalation",
|
||||||
|
"message_types": ["discussion_ready", "discussion_blocked", "impl_progress", "error"]
|
||||||
|
},
|
||||||
|
"planner": {
|
||||||
|
"task_prefix": "PLAN",
|
||||||
|
"responsibility": "Multi-angle code exploration, structured implementation planning",
|
||||||
|
"message_types": ["plan_ready", "plan_revision", "impl_progress", "error"]
|
||||||
|
},
|
||||||
|
"executor": {
|
||||||
|
"task_prefix": "IMPL",
|
||||||
|
"responsibility": "Code implementation following approved plans",
|
||||||
|
"message_types": ["impl_complete", "impl_progress", "error"]
|
||||||
|
},
|
||||||
|
"tester": {
|
||||||
|
"task_prefix": "TEST",
|
||||||
|
"responsibility": "Adaptive test-fix cycles, progressive testing, quality gates",
|
||||||
|
"message_types": ["test_result", "impl_progress", "fix_required", "error"]
|
||||||
|
},
|
||||||
|
"reviewer": {
|
||||||
|
"task_prefix": "REVIEW",
|
||||||
|
"additional_prefixes": ["QUALITY"],
|
||||||
|
"responsibility": "Code review (REVIEW-*) + Spec quality validation (QUALITY-*)",
|
||||||
|
"message_types": ["review_result", "quality_result", "fix_required", "error"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
"pipelines": {
|
||||||
|
"spec-only": {
|
||||||
|
"description": "Specification pipeline: research → discuss → draft → quality",
|
||||||
|
"task_chain": [
|
||||||
|
"RESEARCH-001",
|
||||||
|
"DISCUSS-001", "DRAFT-001", "DISCUSS-002",
|
||||||
|
"DRAFT-002", "DISCUSS-003", "DRAFT-003", "DISCUSS-004",
|
||||||
|
"DRAFT-004", "DISCUSS-005", "QUALITY-001", "DISCUSS-006"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"impl-only": {
|
||||||
|
"description": "Implementation pipeline: plan → implement → test + review",
|
||||||
|
"task_chain": ["PLAN-001", "IMPL-001", "TEST-001", "REVIEW-001"]
|
||||||
|
},
|
||||||
|
"full-lifecycle": {
|
||||||
|
"description": "Full lifecycle: spec pipeline → implementation pipeline",
|
||||||
|
"task_chain": "spec-only + impl-only (PLAN-001 blockedBy DISCUSS-006)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
"collaboration_patterns": ["CP-1", "CP-2", "CP-4", "CP-5", "CP-6", "CP-10"],
|
||||||
|
|
||||||
|
"session_dirs": {
|
||||||
|
"base": ".workflow/.team/TLS-{slug}-{YYYY-MM-DD}/",
|
||||||
|
"spec": "spec/",
|
||||||
|
"discussions": "discussions/",
|
||||||
|
"plan": "plan/",
|
||||||
|
"messages": ".workflow/.team-msg/{team-name}/"
|
||||||
|
}
|
||||||
|
}
|
||||||
254
.claude/skills/team-lifecycle-v2/templates/architecture-doc.md
Normal file
254
.claude/skills/team-lifecycle-v2/templates/architecture-doc.md
Normal file
@@ -0,0 +1,254 @@
|
|||||||
|
# Architecture Document Template (Directory Structure)
|
||||||
|
|
||||||
|
Template for generating architecture decision documents as a directory of individual ADR files in Phase 4.
|
||||||
|
|
||||||
|
## Usage Context
|
||||||
|
|
||||||
|
| Phase | Usage |
|
||||||
|
|-------|-------|
|
||||||
|
| Phase 4 (Architecture) | Generate `architecture/` directory from requirements analysis |
|
||||||
|
| Output Location | `{workDir}/architecture/` |
|
||||||
|
|
||||||
|
## Output Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
{workDir}/architecture/
|
||||||
|
├── _index.md # Overview, components, tech stack, data model, security
|
||||||
|
├── ADR-001-{slug}.md # Individual Architecture Decision Record
|
||||||
|
├── ADR-002-{slug}.md
|
||||||
|
└── ...
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Template: _index.md
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
---
|
||||||
|
session_id: {session_id}
|
||||||
|
phase: 4
|
||||||
|
document_type: architecture-index
|
||||||
|
status: draft
|
||||||
|
generated_at: {timestamp}
|
||||||
|
version: 1
|
||||||
|
dependencies:
|
||||||
|
- ../spec-config.json
|
||||||
|
- ../product-brief.md
|
||||||
|
- ../requirements/_index.md
|
||||||
|
---
|
||||||
|
|
||||||
|
# Architecture: {product_name}
|
||||||
|
|
||||||
|
{executive_summary - high-level architecture approach and key decisions}
|
||||||
|
|
||||||
|
## System Overview
|
||||||
|
|
||||||
|
### Architecture Style
|
||||||
|
{description of chosen architecture style: microservices, monolith, serverless, etc.}
|
||||||
|
|
||||||
|
### System Context Diagram
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
C4Context
|
||||||
|
title System Context Diagram
|
||||||
|
Person(user, "User", "Primary user")
|
||||||
|
System(system, "{product_name}", "Core system")
|
||||||
|
System_Ext(ext1, "{external_system}", "{description}")
|
||||||
|
Rel(user, system, "Uses")
|
||||||
|
Rel(system, ext1, "Integrates with")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Component Architecture
|
||||||
|
|
||||||
|
### Component Diagram
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
subgraph "{product_name}"
|
||||||
|
A[Component A] --> B[Component B]
|
||||||
|
B --> C[Component C]
|
||||||
|
A --> D[Component D]
|
||||||
|
end
|
||||||
|
B --> E[External Service]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Component Descriptions
|
||||||
|
|
||||||
|
| Component | Responsibility | Technology | Dependencies |
|
||||||
|
|-----------|---------------|------------|--------------|
|
||||||
|
| {component_name} | {what it does} | {tech stack} | {depends on} |
|
||||||
|
|
||||||
|
## Technology Stack
|
||||||
|
|
||||||
|
### Core Technologies
|
||||||
|
|
||||||
|
| Layer | Technology | Version | Rationale |
|
||||||
|
|-------|-----------|---------|-----------|
|
||||||
|
| Frontend | {technology} | {version} | {why chosen} |
|
||||||
|
| Backend | {technology} | {version} | {why chosen} |
|
||||||
|
| Database | {technology} | {version} | {why chosen} |
|
||||||
|
| Infrastructure | {technology} | {version} | {why chosen} |
|
||||||
|
|
||||||
|
### Key Libraries & Frameworks
|
||||||
|
|
||||||
|
| Library | Purpose | License |
|
||||||
|
|---------|---------|---------|
|
||||||
|
| {library_name} | {purpose} | {license} |
|
||||||
|
|
||||||
|
## Architecture Decision Records
|
||||||
|
|
||||||
|
| ADR | Title | Status | Key Choice |
|
||||||
|
|-----|-------|--------|------------|
|
||||||
|
| [ADR-001](ADR-001-{slug}.md) | {title} | Accepted | {one-line summary} |
|
||||||
|
| [ADR-002](ADR-002-{slug}.md) | {title} | Accepted | {one-line summary} |
|
||||||
|
| [ADR-003](ADR-003-{slug}.md) | {title} | Proposed | {one-line summary} |
|
||||||
|
|
||||||
|
## Data Architecture
|
||||||
|
|
||||||
|
### Data Model
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
erDiagram
|
||||||
|
ENTITY_A ||--o{ ENTITY_B : "has many"
|
||||||
|
ENTITY_A {
|
||||||
|
string id PK
|
||||||
|
string name
|
||||||
|
datetime created_at
|
||||||
|
}
|
||||||
|
ENTITY_B {
|
||||||
|
string id PK
|
||||||
|
string entity_a_id FK
|
||||||
|
string value
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Data Storage Strategy
|
||||||
|
|
||||||
|
| Data Type | Storage | Retention | Backup |
|
||||||
|
|-----------|---------|-----------|--------|
|
||||||
|
| {type} | {storage solution} | {retention policy} | {backup strategy} |
|
||||||
|
|
||||||
|
## API Design
|
||||||
|
|
||||||
|
### API Overview
|
||||||
|
|
||||||
|
| Endpoint | Method | Purpose | Auth |
|
||||||
|
|----------|--------|---------|------|
|
||||||
|
| {/api/resource} | {GET/POST/etc} | {purpose} | {auth type} |
|
||||||
|
|
||||||
|
## Security Architecture
|
||||||
|
|
||||||
|
### Security Controls
|
||||||
|
|
||||||
|
| Control | Implementation | Requirement |
|
||||||
|
|---------|---------------|-------------|
|
||||||
|
| Authentication | {approach} | [NFR-S-{NNN}](../requirements/NFR-S-{NNN}-{slug}.md) |
|
||||||
|
| Authorization | {approach} | [NFR-S-{NNN}](../requirements/NFR-S-{NNN}-{slug}.md) |
|
||||||
|
| Data Protection | {approach} | [NFR-S-{NNN}](../requirements/NFR-S-{NNN}-{slug}.md) |
|
||||||
|
|
||||||
|
## Infrastructure & Deployment
|
||||||
|
|
||||||
|
### Deployment Architecture
|
||||||
|
|
||||||
|
{description of deployment model: containers, serverless, VMs, etc.}
|
||||||
|
|
||||||
|
### Environment Strategy
|
||||||
|
|
||||||
|
| Environment | Purpose | Configuration |
|
||||||
|
|-------------|---------|---------------|
|
||||||
|
| Development | Local development | {config} |
|
||||||
|
| Staging | Pre-production testing | {config} |
|
||||||
|
| Production | Live system | {config} |
|
||||||
|
|
||||||
|
## Codebase Integration
|
||||||
|
|
||||||
|
{if has_codebase is true:}
|
||||||
|
|
||||||
|
### Existing Code Mapping
|
||||||
|
|
||||||
|
| New Component | Existing Module | Integration Type | Notes |
|
||||||
|
|--------------|----------------|------------------|-------|
|
||||||
|
| {component} | {existing module path} | Extend/Replace/New | {notes} |
|
||||||
|
|
||||||
|
### Migration Notes
|
||||||
|
{any migration considerations for existing code}
|
||||||
|
|
||||||
|
## Quality Attributes
|
||||||
|
|
||||||
|
| Attribute | Target | Measurement | ADR Reference |
|
||||||
|
|-----------|--------|-------------|---------------|
|
||||||
|
| Performance | {target} | {how measured} | [ADR-{NNN}](ADR-{NNN}-{slug}.md) |
|
||||||
|
| Scalability | {target} | {how measured} | [ADR-{NNN}](ADR-{NNN}-{slug}.md) |
|
||||||
|
| Reliability | {target} | {how measured} | [ADR-{NNN}](ADR-{NNN}-{slug}.md) |
|
||||||
|
|
||||||
|
## Risks & Mitigations
|
||||||
|
|
||||||
|
| Risk | Impact | Probability | Mitigation |
|
||||||
|
|------|--------|-------------|------------|
|
||||||
|
| {risk} | High/Medium/Low | High/Medium/Low | {mitigation approach} |
|
||||||
|
|
||||||
|
## Open Questions
|
||||||
|
|
||||||
|
- [ ] {architectural question 1}
|
||||||
|
- [ ] {architectural question 2}
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- Derived from: [Requirements](../requirements/_index.md), [Product Brief](../product-brief.md)
|
||||||
|
- Next: [Epics & Stories](../epics/_index.md)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Template: ADR-NNN-{slug}.md (Individual Architecture Decision Record)
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
---
|
||||||
|
id: ADR-{NNN}
|
||||||
|
status: Accepted
|
||||||
|
traces_to: [{REQ-NNN}, {NFR-X-NNN}]
|
||||||
|
date: {timestamp}
|
||||||
|
---
|
||||||
|
|
||||||
|
# ADR-{NNN}: {decision_title}
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
{what is the situation that motivates this decision}
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
{what is the chosen approach}
|
||||||
|
|
||||||
|
## Alternatives Considered
|
||||||
|
|
||||||
|
| Option | Pros | Cons |
|
||||||
|
|--------|------|------|
|
||||||
|
| {option_1 - chosen} | {pros} | {cons} |
|
||||||
|
| {option_2} | {pros} | {cons} |
|
||||||
|
| {option_3} | {pros} | {cons} |
|
||||||
|
|
||||||
|
## Consequences
|
||||||
|
|
||||||
|
- **Positive**: {positive outcomes}
|
||||||
|
- **Negative**: {tradeoffs accepted}
|
||||||
|
- **Risks**: {risks to monitor}
|
||||||
|
|
||||||
|
## Traces
|
||||||
|
|
||||||
|
- **Requirements**: [REQ-{NNN}](../requirements/REQ-{NNN}-{slug}.md), [NFR-X-{NNN}](../requirements/NFR-X-{NNN}-{slug}.md)
|
||||||
|
- **Implemented by**: [EPIC-{NNN}](../epics/EPIC-{NNN}-{slug}.md) (added in Phase 5)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Variable Descriptions
|
||||||
|
|
||||||
|
| Variable | Source | Description |
|
||||||
|
|----------|--------|-------------|
|
||||||
|
| `{session_id}` | spec-config.json | Session identifier |
|
||||||
|
| `{timestamp}` | Runtime | ISO8601 generation timestamp |
|
||||||
|
| `{product_name}` | product-brief.md | Product/feature name |
|
||||||
|
| `{NNN}` | Auto-increment | ADR/requirement number |
|
||||||
|
| `{slug}` | Auto-generated | Kebab-case from decision title |
|
||||||
|
| `{has_codebase}` | spec-config.json | Whether existing codebase exists |
|
||||||
196
.claude/skills/team-lifecycle-v2/templates/epics-template.md
Normal file
196
.claude/skills/team-lifecycle-v2/templates/epics-template.md
Normal file
@@ -0,0 +1,196 @@
|
|||||||
|
# Epics & Stories Template (Directory Structure)
|
||||||
|
|
||||||
|
Template for generating epic/story breakdown as a directory of individual Epic files in Phase 5.
|
||||||
|
|
||||||
|
## Usage Context
|
||||||
|
|
||||||
|
| Phase | Usage |
|
||||||
|
|-------|-------|
|
||||||
|
| Phase 5 (Epics & Stories) | Generate `epics/` directory from requirements decomposition |
|
||||||
|
| Output Location | `{workDir}/epics/` |
|
||||||
|
|
||||||
|
## Output Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
{workDir}/epics/
|
||||||
|
├── _index.md # Overview table + dependency map + MVP scope + execution order
|
||||||
|
├── EPIC-001-{slug}.md # Individual Epic with its Stories
|
||||||
|
├── EPIC-002-{slug}.md
|
||||||
|
└── ...
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Template: _index.md
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
---
|
||||||
|
session_id: {session_id}
|
||||||
|
phase: 5
|
||||||
|
document_type: epics-index
|
||||||
|
status: draft
|
||||||
|
generated_at: {timestamp}
|
||||||
|
version: 1
|
||||||
|
dependencies:
|
||||||
|
- ../spec-config.json
|
||||||
|
- ../product-brief.md
|
||||||
|
- ../requirements/_index.md
|
||||||
|
- ../architecture/_index.md
|
||||||
|
---
|
||||||
|
|
||||||
|
# Epics & Stories: {product_name}
|
||||||
|
|
||||||
|
{executive_summary - overview of epic structure and MVP scope}
|
||||||
|
|
||||||
|
## Epic Overview
|
||||||
|
|
||||||
|
| Epic ID | Title | Priority | MVP | Stories | Est. Size |
|
||||||
|
|---------|-------|----------|-----|---------|-----------|
|
||||||
|
| [EPIC-001](EPIC-001-{slug}.md) | {title} | Must | Yes | {n} | {S/M/L/XL} |
|
||||||
|
| [EPIC-002](EPIC-002-{slug}.md) | {title} | Must | Yes | {n} | {S/M/L/XL} |
|
||||||
|
| [EPIC-003](EPIC-003-{slug}.md) | {title} | Should | No | {n} | {S/M/L/XL} |
|
||||||
|
|
||||||
|
## Dependency Map
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph LR
|
||||||
|
EPIC-001 --> EPIC-002
|
||||||
|
EPIC-001 --> EPIC-003
|
||||||
|
EPIC-002 --> EPIC-004
|
||||||
|
EPIC-003 --> EPIC-005
|
||||||
|
```
|
||||||
|
|
||||||
|
### Dependency Notes
|
||||||
|
{explanation of why these dependencies exist and suggested execution order}
|
||||||
|
|
||||||
|
### Recommended Execution Order
|
||||||
|
1. [EPIC-{NNN}](EPIC-{NNN}-{slug}.md): {reason - foundational}
|
||||||
|
2. [EPIC-{NNN}](EPIC-{NNN}-{slug}.md): {reason - depends on #1}
|
||||||
|
3. ...
|
||||||
|
|
||||||
|
## MVP Scope
|
||||||
|
|
||||||
|
### MVP Epics
|
||||||
|
{list of epics included in MVP with justification, linking to each}
|
||||||
|
|
||||||
|
### MVP Definition of Done
|
||||||
|
- [ ] {MVP completion criterion 1}
|
||||||
|
- [ ] {MVP completion criterion 2}
|
||||||
|
- [ ] {MVP completion criterion 3}
|
||||||
|
|
||||||
|
## Traceability Matrix
|
||||||
|
|
||||||
|
| Requirement | Epic | Stories | Architecture |
|
||||||
|
|-------------|------|---------|--------------|
|
||||||
|
| [REQ-001](../requirements/REQ-001-{slug}.md) | [EPIC-001](EPIC-001-{slug}.md) | STORY-001-001, STORY-001-002 | [ADR-001](../architecture/ADR-001-{slug}.md) |
|
||||||
|
| [REQ-002](../requirements/REQ-002-{slug}.md) | [EPIC-001](EPIC-001-{slug}.md) | STORY-001-003 | Component B |
|
||||||
|
| [REQ-003](../requirements/REQ-003-{slug}.md) | [EPIC-002](EPIC-002-{slug}.md) | STORY-002-001 | [ADR-002](../architecture/ADR-002-{slug}.md) |
|
||||||
|
|
||||||
|
## Estimation Summary
|
||||||
|
|
||||||
|
| Size | Meaning | Count |
|
||||||
|
|------|---------|-------|
|
||||||
|
| S | Small - well-understood, minimal risk | {n} |
|
||||||
|
| M | Medium - some complexity, moderate risk | {n} |
|
||||||
|
| L | Large - significant complexity, should consider splitting | {n} |
|
||||||
|
| XL | Extra Large - high complexity, must split before implementation | {n} |
|
||||||
|
|
||||||
|
## Risks & Considerations
|
||||||
|
|
||||||
|
| Risk | Affected Epics | Mitigation |
|
||||||
|
|------|---------------|------------|
|
||||||
|
| {risk description} | [EPIC-{NNN}](EPIC-{NNN}-{slug}.md) | {mitigation} |
|
||||||
|
|
||||||
|
## Open Questions
|
||||||
|
|
||||||
|
- [ ] {question about scope or implementation 1}
|
||||||
|
- [ ] {question about scope or implementation 2}
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- Derived from: [Requirements](../requirements/_index.md), [Architecture](../architecture/_index.md)
|
||||||
|
- Handoff to: execution workflows (lite-plan, plan, req-plan)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Template: EPIC-NNN-{slug}.md (Individual Epic)
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
---
|
||||||
|
id: EPIC-{NNN}
|
||||||
|
priority: {Must|Should|Could}
|
||||||
|
mvp: {true|false}
|
||||||
|
size: {S|M|L|XL}
|
||||||
|
requirements: [REQ-{NNN}]
|
||||||
|
architecture: [ADR-{NNN}]
|
||||||
|
dependencies: [EPIC-{NNN}]
|
||||||
|
status: draft
|
||||||
|
---
|
||||||
|
|
||||||
|
# EPIC-{NNN}: {epic_title}
|
||||||
|
|
||||||
|
**Priority**: {Must|Should|Could}
|
||||||
|
**MVP**: {Yes|No}
|
||||||
|
**Estimated Size**: {S|M|L|XL}
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
{detailed epic description}
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- [REQ-{NNN}](../requirements/REQ-{NNN}-{slug}.md): {title}
|
||||||
|
- [REQ-{NNN}](../requirements/REQ-{NNN}-{slug}.md): {title}
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
- [ADR-{NNN}](../architecture/ADR-{NNN}-{slug}.md): {title}
|
||||||
|
- Component: {component_name}
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
- [EPIC-{NNN}](EPIC-{NNN}-{slug}.md) (blocking): {reason}
|
||||||
|
- [EPIC-{NNN}](EPIC-{NNN}-{slug}.md) (soft): {reason}
|
||||||
|
|
||||||
|
## Stories
|
||||||
|
|
||||||
|
### STORY-{EPIC}-001: {story_title}
|
||||||
|
|
||||||
|
**User Story**: As a {persona}, I want to {action} so that {benefit}.
|
||||||
|
|
||||||
|
**Acceptance Criteria**:
|
||||||
|
- [ ] {criterion 1}
|
||||||
|
- [ ] {criterion 2}
|
||||||
|
- [ ] {criterion 3}
|
||||||
|
|
||||||
|
**Size**: {S|M|L|XL}
|
||||||
|
**Traces to**: [REQ-{NNN}](../requirements/REQ-{NNN}-{slug}.md)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### STORY-{EPIC}-002: {story_title}
|
||||||
|
|
||||||
|
**User Story**: As a {persona}, I want to {action} so that {benefit}.
|
||||||
|
|
||||||
|
**Acceptance Criteria**:
|
||||||
|
- [ ] {criterion 1}
|
||||||
|
- [ ] {criterion 2}
|
||||||
|
|
||||||
|
**Size**: {S|M|L|XL}
|
||||||
|
**Traces to**: [REQ-{NNN}](../requirements/REQ-{NNN}-{slug}.md)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Variable Descriptions
|
||||||
|
|
||||||
|
| Variable | Source | Description |
|
||||||
|
|----------|--------|-------------|
|
||||||
|
| `{session_id}` | spec-config.json | Session identifier |
|
||||||
|
| `{timestamp}` | Runtime | ISO8601 generation timestamp |
|
||||||
|
| `{product_name}` | product-brief.md | Product/feature name |
|
||||||
|
| `{EPIC}` | Auto-increment | Epic number (3 digits) |
|
||||||
|
| `{NNN}` | Auto-increment | Story/requirement number |
|
||||||
|
| `{slug}` | Auto-generated | Kebab-case from epic/story title |
|
||||||
|
| `{S\|M\|L\|XL}` | CLI analysis | Relative size estimate |
|
||||||
133
.claude/skills/team-lifecycle-v2/templates/product-brief.md
Normal file
133
.claude/skills/team-lifecycle-v2/templates/product-brief.md
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
# Product Brief Template
|
||||||
|
|
||||||
|
Template for generating product brief documents in Phase 2.
|
||||||
|
|
||||||
|
## Usage Context
|
||||||
|
|
||||||
|
| Phase | Usage |
|
||||||
|
|-------|-------|
|
||||||
|
| Phase 2 (Product Brief) | Generate product-brief.md from multi-CLI analysis |
|
||||||
|
| Output Location | `{workDir}/product-brief.md` |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Template
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
---
|
||||||
|
session_id: {session_id}
|
||||||
|
phase: 2
|
||||||
|
document_type: product-brief
|
||||||
|
status: draft
|
||||||
|
generated_at: {timestamp}
|
||||||
|
stepsCompleted: []
|
||||||
|
version: 1
|
||||||
|
dependencies:
|
||||||
|
- spec-config.json
|
||||||
|
---
|
||||||
|
|
||||||
|
# Product Brief: {product_name}
|
||||||
|
|
||||||
|
{executive_summary - 2-3 sentences capturing the essence of the product/feature}
|
||||||
|
|
||||||
|
## Vision
|
||||||
|
|
||||||
|
{vision_statement - clear, aspirational 1-3 sentence statement of what success looks like}
|
||||||
|
|
||||||
|
## Problem Statement
|
||||||
|
|
||||||
|
### Current Situation
|
||||||
|
{description of the current state and pain points}
|
||||||
|
|
||||||
|
### Impact
|
||||||
|
{quantified impact of the problem - who is affected, how much, how often}
|
||||||
|
|
||||||
|
## Target Users
|
||||||
|
|
||||||
|
{for each user persona:}
|
||||||
|
|
||||||
|
### {Persona Name}
|
||||||
|
- **Role**: {user's role/context}
|
||||||
|
- **Needs**: {primary needs related to this product}
|
||||||
|
- **Pain Points**: {current frustrations}
|
||||||
|
- **Success Criteria**: {what success looks like for this user}
|
||||||
|
|
||||||
|
## Goals & Success Metrics
|
||||||
|
|
||||||
|
| Goal ID | Goal | Success Metric | Target |
|
||||||
|
|---------|------|----------------|--------|
|
||||||
|
| G-001 | {goal description} | {measurable metric} | {specific target} |
|
||||||
|
| G-002 | {goal description} | {measurable metric} | {specific target} |
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
### In Scope
|
||||||
|
- {feature/capability 1}
|
||||||
|
- {feature/capability 2}
|
||||||
|
- {feature/capability 3}
|
||||||
|
|
||||||
|
### Out of Scope
|
||||||
|
- {explicitly excluded item 1}
|
||||||
|
- {explicitly excluded item 2}
|
||||||
|
|
||||||
|
### Assumptions
|
||||||
|
- {key assumption 1}
|
||||||
|
- {key assumption 2}
|
||||||
|
|
||||||
|
## Competitive Landscape
|
||||||
|
|
||||||
|
| Aspect | Current State | Proposed Solution | Advantage |
|
||||||
|
|--------|--------------|-------------------|-----------|
|
||||||
|
| {aspect} | {how it's done now} | {our approach} | {differentiator} |
|
||||||
|
|
||||||
|
## Constraints & Dependencies
|
||||||
|
|
||||||
|
### Technical Constraints
|
||||||
|
- {constraint 1}
|
||||||
|
- {constraint 2}
|
||||||
|
|
||||||
|
### Business Constraints
|
||||||
|
- {constraint 1}
|
||||||
|
|
||||||
|
### Dependencies
|
||||||
|
- {external dependency 1}
|
||||||
|
- {external dependency 2}
|
||||||
|
|
||||||
|
## Multi-Perspective Synthesis
|
||||||
|
|
||||||
|
### Product Perspective
|
||||||
|
{summary of product/market analysis findings}
|
||||||
|
|
||||||
|
### Technical Perspective
|
||||||
|
{summary of technical feasibility and constraints}
|
||||||
|
|
||||||
|
### User Perspective
|
||||||
|
{summary of user journey and UX considerations}
|
||||||
|
|
||||||
|
### Convergent Themes
|
||||||
|
{themes where all perspectives agree}
|
||||||
|
|
||||||
|
### Conflicting Views
|
||||||
|
{areas where perspectives differ, with notes on resolution approach}
|
||||||
|
|
||||||
|
## Open Questions
|
||||||
|
|
||||||
|
- [ ] {unresolved question 1}
|
||||||
|
- [ ] {unresolved question 2}
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- Derived from: [spec-config.json](spec-config.json)
|
||||||
|
- Next: [Requirements PRD](requirements.md)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Variable Descriptions
|
||||||
|
|
||||||
|
| Variable | Source | Description |
|
||||||
|
|----------|--------|-------------|
|
||||||
|
| `{session_id}` | spec-config.json | Session identifier |
|
||||||
|
| `{timestamp}` | Runtime | ISO8601 generation timestamp |
|
||||||
|
| `{product_name}` | Seed analysis | Product/feature name |
|
||||||
|
| `{executive_summary}` | CLI synthesis | 2-3 sentence summary |
|
||||||
|
| `{vision_statement}` | CLI product perspective | Aspirational vision |
|
||||||
|
| All `{...}` fields | CLI analysis outputs | Filled from multi-perspective analysis |
|
||||||
224
.claude/skills/team-lifecycle-v2/templates/requirements-prd.md
Normal file
224
.claude/skills/team-lifecycle-v2/templates/requirements-prd.md
Normal file
@@ -0,0 +1,224 @@
|
|||||||
|
# Requirements PRD Template (Directory Structure)
|
||||||
|
|
||||||
|
Template for generating Product Requirements Document as a directory of individual requirement files in Phase 3.
|
||||||
|
|
||||||
|
## Usage Context
|
||||||
|
|
||||||
|
| Phase | Usage |
|
||||||
|
|-------|-------|
|
||||||
|
| Phase 3 (Requirements) | Generate `requirements/` directory from product brief expansion |
|
||||||
|
| Output Location | `{workDir}/requirements/` |
|
||||||
|
|
||||||
|
## Output Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
{workDir}/requirements/
|
||||||
|
├── _index.md # Summary + MoSCoW table + traceability matrix + links
|
||||||
|
├── REQ-001-{slug}.md # Individual functional requirement
|
||||||
|
├── REQ-002-{slug}.md
|
||||||
|
├── NFR-P-001-{slug}.md # Non-functional: Performance
|
||||||
|
├── NFR-S-001-{slug}.md # Non-functional: Security
|
||||||
|
├── NFR-SC-001-{slug}.md # Non-functional: Scalability
|
||||||
|
├── NFR-U-001-{slug}.md # Non-functional: Usability
|
||||||
|
└── ...
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Template: _index.md
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
---
|
||||||
|
session_id: {session_id}
|
||||||
|
phase: 3
|
||||||
|
document_type: requirements-index
|
||||||
|
status: draft
|
||||||
|
generated_at: {timestamp}
|
||||||
|
version: 1
|
||||||
|
dependencies:
|
||||||
|
- ../spec-config.json
|
||||||
|
- ../product-brief.md
|
||||||
|
---
|
||||||
|
|
||||||
|
# Requirements: {product_name}
|
||||||
|
|
||||||
|
{executive_summary - brief overview of what this PRD covers and key decisions}
|
||||||
|
|
||||||
|
## Requirement Summary
|
||||||
|
|
||||||
|
| Priority | Count | Coverage |
|
||||||
|
|----------|-------|----------|
|
||||||
|
| Must Have | {n} | {description of must-have scope} |
|
||||||
|
| Should Have | {n} | {description of should-have scope} |
|
||||||
|
| Could Have | {n} | {description of could-have scope} |
|
||||||
|
| Won't Have | {n} | {description of explicitly excluded} |
|
||||||
|
|
||||||
|
## Functional Requirements
|
||||||
|
|
||||||
|
| ID | Title | Priority | Traces To |
|
||||||
|
|----|-------|----------|-----------|
|
||||||
|
| [REQ-001](REQ-001-{slug}.md) | {title} | Must | [G-001](../product-brief.md#goals--success-metrics) |
|
||||||
|
| [REQ-002](REQ-002-{slug}.md) | {title} | Must | [G-001](../product-brief.md#goals--success-metrics) |
|
||||||
|
| [REQ-003](REQ-003-{slug}.md) | {title} | Should | [G-002](../product-brief.md#goals--success-metrics) |
|
||||||
|
|
||||||
|
## Non-Functional Requirements
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
|
||||||
|
| ID | Title | Target |
|
||||||
|
|----|-------|--------|
|
||||||
|
| [NFR-P-001](NFR-P-001-{slug}.md) | {title} | {target value} |
|
||||||
|
|
||||||
|
### Security
|
||||||
|
|
||||||
|
| ID | Title | Standard |
|
||||||
|
|----|-------|----------|
|
||||||
|
| [NFR-S-001](NFR-S-001-{slug}.md) | {title} | {standard/framework} |
|
||||||
|
|
||||||
|
### Scalability
|
||||||
|
|
||||||
|
| ID | Title | Target |
|
||||||
|
|----|-------|--------|
|
||||||
|
| [NFR-SC-001](NFR-SC-001-{slug}.md) | {title} | {target value} |
|
||||||
|
|
||||||
|
### Usability
|
||||||
|
|
||||||
|
| ID | Title | Target |
|
||||||
|
|----|-------|--------|
|
||||||
|
| [NFR-U-001](NFR-U-001-{slug}.md) | {title} | {target value} |
|
||||||
|
|
||||||
|
## Data Requirements
|
||||||
|
|
||||||
|
### Data Entities
|
||||||
|
|
||||||
|
| Entity | Description | Key Attributes |
|
||||||
|
|--------|-------------|----------------|
|
||||||
|
| {entity_name} | {description} | {attr1, attr2, attr3} |
|
||||||
|
|
||||||
|
### Data Flows
|
||||||
|
|
||||||
|
{description of key data flows, optionally with Mermaid diagram}
|
||||||
|
|
||||||
|
## Integration Requirements
|
||||||
|
|
||||||
|
| System | Direction | Protocol | Data Format | Notes |
|
||||||
|
|--------|-----------|----------|-------------|-------|
|
||||||
|
| {system_name} | Inbound/Outbound/Both | {REST/gRPC/etc} | {JSON/XML/etc} | {notes} |
|
||||||
|
|
||||||
|
## Constraints & Assumptions
|
||||||
|
|
||||||
|
### Constraints
|
||||||
|
- {technical or business constraint 1}
|
||||||
|
- {technical or business constraint 2}
|
||||||
|
|
||||||
|
### Assumptions
|
||||||
|
- {assumption 1 - must be validated}
|
||||||
|
- {assumption 2 - must be validated}
|
||||||
|
|
||||||
|
## Priority Rationale
|
||||||
|
|
||||||
|
{explanation of MoSCoW prioritization decisions, especially for Should/Could boundaries}
|
||||||
|
|
||||||
|
## Traceability Matrix
|
||||||
|
|
||||||
|
| Goal | Requirements |
|
||||||
|
|------|-------------|
|
||||||
|
| G-001 | [REQ-001](REQ-001-{slug}.md), [REQ-002](REQ-002-{slug}.md), [NFR-P-001](NFR-P-001-{slug}.md) |
|
||||||
|
| G-002 | [REQ-003](REQ-003-{slug}.md), [NFR-S-001](NFR-S-001-{slug}.md) |
|
||||||
|
|
||||||
|
## Open Questions
|
||||||
|
|
||||||
|
- [ ] {unresolved question 1}
|
||||||
|
- [ ] {unresolved question 2}
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- Derived from: [Product Brief](../product-brief.md)
|
||||||
|
- Next: [Architecture](../architecture/_index.md)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Template: REQ-NNN-{slug}.md (Individual Functional Requirement)
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
---
|
||||||
|
id: REQ-{NNN}
|
||||||
|
type: functional
|
||||||
|
priority: {Must|Should|Could|Won't}
|
||||||
|
traces_to: [G-{NNN}]
|
||||||
|
status: draft
|
||||||
|
---
|
||||||
|
|
||||||
|
# REQ-{NNN}: {requirement_title}
|
||||||
|
|
||||||
|
**Priority**: {Must|Should|Could|Won't}
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
{detailed requirement description}
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a {persona}, I want to {action} so that {benefit}.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] {specific, testable criterion 1}
|
||||||
|
- [ ] {specific, testable criterion 2}
|
||||||
|
- [ ] {specific, testable criterion 3}
|
||||||
|
|
||||||
|
## Traces
|
||||||
|
|
||||||
|
- **Goal**: [G-{NNN}](../product-brief.md#goals--success-metrics)
|
||||||
|
- **Architecture**: [ADR-{NNN}](../architecture/ADR-{NNN}-{slug}.md) (if applicable)
|
||||||
|
- **Implemented by**: [EPIC-{NNN}](../epics/EPIC-{NNN}-{slug}.md) (added in Phase 5)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Template: NFR-{type}-NNN-{slug}.md (Individual Non-Functional Requirement)
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
---
|
||||||
|
id: NFR-{type}-{NNN}
|
||||||
|
type: non-functional
|
||||||
|
category: {Performance|Security|Scalability|Usability}
|
||||||
|
priority: {Must|Should|Could}
|
||||||
|
status: draft
|
||||||
|
---
|
||||||
|
|
||||||
|
# NFR-{type}-{NNN}: {requirement_title}
|
||||||
|
|
||||||
|
**Category**: {Performance|Security|Scalability|Usability}
|
||||||
|
**Priority**: {Must|Should|Could}
|
||||||
|
|
||||||
|
## Requirement
|
||||||
|
|
||||||
|
{detailed requirement description}
|
||||||
|
|
||||||
|
## Metric & Target
|
||||||
|
|
||||||
|
| Metric | Target | Measurement Method |
|
||||||
|
|--------|--------|--------------------|
|
||||||
|
| {metric} | {target value} | {how measured} |
|
||||||
|
|
||||||
|
## Traces
|
||||||
|
|
||||||
|
- **Goal**: [G-{NNN}](../product-brief.md#goals--success-metrics)
|
||||||
|
- **Architecture**: [ADR-{NNN}](../architecture/ADR-{NNN}-{slug}.md) (if applicable)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Variable Descriptions
|
||||||
|
|
||||||
|
| Variable | Source | Description |
|
||||||
|
|----------|--------|-------------|
|
||||||
|
| `{session_id}` | spec-config.json | Session identifier |
|
||||||
|
| `{timestamp}` | Runtime | ISO8601 generation timestamp |
|
||||||
|
| `{product_name}` | product-brief.md | Product/feature name |
|
||||||
|
| `{NNN}` | Auto-increment | Requirement number (zero-padded 3 digits) |
|
||||||
|
| `{slug}` | Auto-generated | Kebab-case from requirement title |
|
||||||
|
| `{type}` | Category | P (Performance), S (Security), SC (Scalability), U (Usability) |
|
||||||
|
| `{Must\|Should\|Could\|Won't}` | User input / auto | MoSCoW priority tag |
|
||||||
@@ -280,7 +280,7 @@ export const envVarGroupsSchema: EnvVarGroupsSchema = {
|
|||||||
key: 'CODEXLENS_CASCADE_STRATEGY',
|
key: 'CODEXLENS_CASCADE_STRATEGY',
|
||||||
labelKey: 'codexlens.envField.searchStrategy',
|
labelKey: 'codexlens.envField.searchStrategy',
|
||||||
type: 'select',
|
type: 'select',
|
||||||
options: ['binary', 'hybrid', 'binary_rerank', 'dense_rerank'],
|
options: ['binary', 'hybrid', 'binary_rerank', 'dense_rerank', 'staged'],
|
||||||
default: 'dense_rerank',
|
default: 'dense_rerank',
|
||||||
settingsPath: 'cascade.strategy',
|
settingsPath: 'cascade.strategy',
|
||||||
},
|
},
|
||||||
@@ -304,6 +304,43 @@ export const envVarGroupsSchema: EnvVarGroupsSchema = {
|
|||||||
min: 1,
|
min: 1,
|
||||||
max: 100,
|
max: 100,
|
||||||
},
|
},
|
||||||
|
CODEXLENS_STAGED_STAGE2_MODE: {
|
||||||
|
key: 'CODEXLENS_STAGED_STAGE2_MODE',
|
||||||
|
labelKey: 'codexlens.envField.stagedStage2Mode',
|
||||||
|
type: 'select',
|
||||||
|
options: ['precomputed', 'realtime', 'static_global_graph'],
|
||||||
|
default: 'precomputed',
|
||||||
|
settingsPath: 'staged.stage2_mode',
|
||||||
|
showWhen: (env) => env['CODEXLENS_CASCADE_STRATEGY'] === 'staged',
|
||||||
|
},
|
||||||
|
CODEXLENS_STAGED_CLUSTERING_STRATEGY: {
|
||||||
|
key: 'CODEXLENS_STAGED_CLUSTERING_STRATEGY',
|
||||||
|
labelKey: 'codexlens.envField.stagedClusteringStrategy',
|
||||||
|
type: 'select',
|
||||||
|
options: ['auto', 'hdbscan', 'dbscan', 'frequency', 'noop', 'score', 'dir_rr', 'path'],
|
||||||
|
default: 'auto',
|
||||||
|
settingsPath: 'staged.clustering_strategy',
|
||||||
|
showWhen: (env) => env['CODEXLENS_CASCADE_STRATEGY'] === 'staged',
|
||||||
|
},
|
||||||
|
CODEXLENS_STAGED_CLUSTERING_MIN_SIZE: {
|
||||||
|
key: 'CODEXLENS_STAGED_CLUSTERING_MIN_SIZE',
|
||||||
|
labelKey: 'codexlens.envField.stagedClusteringMinSize',
|
||||||
|
type: 'number',
|
||||||
|
placeholder: '3',
|
||||||
|
default: '3',
|
||||||
|
settingsPath: 'staged.clustering_min_size',
|
||||||
|
min: 1,
|
||||||
|
max: 50,
|
||||||
|
showWhen: (env) => env['CODEXLENS_CASCADE_STRATEGY'] === 'staged',
|
||||||
|
},
|
||||||
|
CODEXLENS_ENABLE_STAGED_RERANK: {
|
||||||
|
key: 'CODEXLENS_ENABLE_STAGED_RERANK',
|
||||||
|
labelKey: 'codexlens.envField.enableStagedRerank',
|
||||||
|
type: 'checkbox',
|
||||||
|
default: 'true',
|
||||||
|
settingsPath: 'staged.enable_rerank',
|
||||||
|
showWhen: (env) => env['CODEXLENS_CASCADE_STRATEGY'] === 'staged',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
indexing: {
|
indexing: {
|
||||||
|
|||||||
@@ -176,6 +176,10 @@ const mockMessages: Record<Locale, Record<string, string>> = {
|
|||||||
'codexlens.envField.searchStrategy': 'Search Strategy',
|
'codexlens.envField.searchStrategy': 'Search Strategy',
|
||||||
'codexlens.envField.coarseK': 'Coarse K',
|
'codexlens.envField.coarseK': 'Coarse K',
|
||||||
'codexlens.envField.fineK': 'Fine K',
|
'codexlens.envField.fineK': 'Fine K',
|
||||||
|
'codexlens.envField.stagedStage2Mode': 'Stage-2 Mode',
|
||||||
|
'codexlens.envField.stagedClusteringStrategy': 'Clustering Strategy',
|
||||||
|
'codexlens.envField.stagedClusteringMinSize': 'Cluster Min Size',
|
||||||
|
'codexlens.envField.enableStagedRerank': 'Enable Rerank',
|
||||||
'codexlens.envField.useAstGrep': 'Use ast-grep',
|
'codexlens.envField.useAstGrep': 'Use ast-grep',
|
||||||
'codexlens.envField.staticGraphEnabled': 'Static Graph',
|
'codexlens.envField.staticGraphEnabled': 'Static Graph',
|
||||||
'codexlens.envField.staticGraphRelationshipTypes': 'Relationship Types',
|
'codexlens.envField.staticGraphRelationshipTypes': 'Relationship Types',
|
||||||
@@ -444,6 +448,10 @@ const mockMessages: Record<Locale, Record<string, string>> = {
|
|||||||
'codexlens.envField.searchStrategy': '搜索策略',
|
'codexlens.envField.searchStrategy': '搜索策略',
|
||||||
'codexlens.envField.coarseK': '粗筛 K 值',
|
'codexlens.envField.coarseK': '粗筛 K 值',
|
||||||
'codexlens.envField.fineK': '精筛 K 值',
|
'codexlens.envField.fineK': '精筛 K 值',
|
||||||
|
'codexlens.envField.stagedStage2Mode': 'Stage-2 模式',
|
||||||
|
'codexlens.envField.stagedClusteringStrategy': '聚类策略',
|
||||||
|
'codexlens.envField.stagedClusteringMinSize': '最小聚类大小',
|
||||||
|
'codexlens.envField.enableStagedRerank': '启用重排序',
|
||||||
'codexlens.envField.useAstGrep': '使用 ast-grep',
|
'codexlens.envField.useAstGrep': '使用 ast-grep',
|
||||||
'codexlens.envField.staticGraphEnabled': '启用静态图',
|
'codexlens.envField.staticGraphEnabled': '启用静态图',
|
||||||
'codexlens.envField.staticGraphRelationshipTypes': '关系类型',
|
'codexlens.envField.staticGraphRelationshipTypes': '关系类型',
|
||||||
|
|||||||
@@ -140,7 +140,7 @@ class Config:
|
|||||||
enable_cascade_search: bool = False # Enable cascade search (coarse + fine ranking)
|
enable_cascade_search: bool = False # Enable cascade search (coarse + fine ranking)
|
||||||
cascade_coarse_k: int = 100 # Number of coarse candidates from first stage
|
cascade_coarse_k: int = 100 # Number of coarse candidates from first stage
|
||||||
cascade_fine_k: int = 10 # Number of final results after reranking
|
cascade_fine_k: int = 10 # Number of final results after reranking
|
||||||
cascade_strategy: str = "binary" # "binary", "binary_rerank", "dense_rerank", or "staged"
|
cascade_strategy: str = "binary" # "binary", "binary_rerank" (alias: "hybrid"), "dense_rerank", or "staged"
|
||||||
|
|
||||||
# Staged cascade search configuration (4-stage pipeline)
|
# Staged cascade search configuration (4-stage pipeline)
|
||||||
staged_coarse_k: int = 200 # Number of coarse candidates from Stage 1 binary search
|
staged_coarse_k: int = 200 # Number of coarse candidates from Stage 1 binary search
|
||||||
@@ -190,7 +190,7 @@ class Config:
|
|||||||
chars_per_token_estimate: int = 4 # Characters per token estimation ratio
|
chars_per_token_estimate: int = 4 # Characters per token estimation ratio
|
||||||
|
|
||||||
# Parser configuration
|
# Parser configuration
|
||||||
use_astgrep: bool = False # Use ast-grep for Python relationship extraction (tree-sitter is default)
|
use_astgrep: bool = False # Use ast-grep for relationship extraction (Python/JS/TS); tree-sitter is default
|
||||||
|
|
||||||
def __post_init__(self) -> None:
|
def __post_init__(self) -> None:
|
||||||
try:
|
try:
|
||||||
@@ -408,14 +408,18 @@ class Config:
|
|||||||
# Load cascade settings
|
# Load cascade settings
|
||||||
cascade = settings.get("cascade", {})
|
cascade = settings.get("cascade", {})
|
||||||
if "strategy" in cascade:
|
if "strategy" in cascade:
|
||||||
strategy = cascade["strategy"]
|
raw_strategy = cascade["strategy"]
|
||||||
|
strategy = str(raw_strategy).strip().lower()
|
||||||
if strategy in {"binary", "binary_rerank", "dense_rerank", "staged"}:
|
if strategy in {"binary", "binary_rerank", "dense_rerank", "staged"}:
|
||||||
self.cascade_strategy = strategy
|
self.cascade_strategy = strategy
|
||||||
|
elif strategy == "hybrid":
|
||||||
|
self.cascade_strategy = "binary_rerank"
|
||||||
|
log.debug("Mapping cascade strategy 'hybrid' -> 'binary_rerank'")
|
||||||
else:
|
else:
|
||||||
log.warning(
|
log.warning(
|
||||||
"Invalid cascade strategy in %s: %r (expected 'binary', 'binary_rerank', 'dense_rerank', or 'staged')",
|
"Invalid cascade strategy in %s: %r (expected 'binary', 'binary_rerank', 'dense_rerank', or 'staged')",
|
||||||
self.settings_path,
|
self.settings_path,
|
||||||
strategy,
|
raw_strategy,
|
||||||
)
|
)
|
||||||
if "coarse_k" in cascade:
|
if "coarse_k" in cascade:
|
||||||
self.cascade_coarse_k = cascade["coarse_k"]
|
self.cascade_coarse_k = cascade["coarse_k"]
|
||||||
@@ -522,6 +526,9 @@ class Config:
|
|||||||
if strategy in {"binary", "binary_rerank", "dense_rerank", "staged"}:
|
if strategy in {"binary", "binary_rerank", "dense_rerank", "staged"}:
|
||||||
self.cascade_strategy = strategy
|
self.cascade_strategy = strategy
|
||||||
log.debug("Overriding cascade_strategy from .env: %s", self.cascade_strategy)
|
log.debug("Overriding cascade_strategy from .env: %s", self.cascade_strategy)
|
||||||
|
elif strategy == "hybrid":
|
||||||
|
self.cascade_strategy = "binary_rerank"
|
||||||
|
log.debug("Overriding cascade_strategy from .env: %s", self.cascade_strategy)
|
||||||
else:
|
else:
|
||||||
log.warning("Invalid CASCADE_STRATEGY in .env: %r", cascade_strategy)
|
log.warning("Invalid CASCADE_STRATEGY in .env: %r", cascade_strategy)
|
||||||
|
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ ENV_VARS = {
|
|||||||
"CODEXLENS_DEBUG": "Enable debug mode (true/false)",
|
"CODEXLENS_DEBUG": "Enable debug mode (true/false)",
|
||||||
# Cascade / staged pipeline configuration
|
# Cascade / staged pipeline configuration
|
||||||
"ENABLE_CASCADE_SEARCH": "Enable cascade search (true/false)",
|
"ENABLE_CASCADE_SEARCH": "Enable cascade search (true/false)",
|
||||||
"CASCADE_STRATEGY": "Cascade strategy: binary, binary_rerank, dense_rerank, staged",
|
"CASCADE_STRATEGY": "Cascade strategy: binary, binary_rerank (alias: hybrid), dense_rerank, staged",
|
||||||
"CASCADE_COARSE_K": "Cascade coarse_k candidate count (int)",
|
"CASCADE_COARSE_K": "Cascade coarse_k candidate count (int)",
|
||||||
"CASCADE_FINE_K": "Cascade fine_k result count (int)",
|
"CASCADE_FINE_K": "Cascade fine_k result count (int)",
|
||||||
"STAGED_STAGE2_MODE": "Staged Stage 2 mode: precomputed, realtime",
|
"STAGED_STAGE2_MODE": "Staged Stage 2 mode: precomputed, realtime",
|
||||||
|
|||||||
289
codex-lens/src/codexlens/parsers/astgrep_js_ts_processor.py
Normal file
289
codex-lens/src/codexlens/parsers/astgrep_js_ts_processor.py
Normal file
@@ -0,0 +1,289 @@
|
|||||||
|
"""Ast-grep processors for JavaScript/TypeScript relationship extraction.
|
||||||
|
|
||||||
|
These processors are intentionally narrower than the tree-sitter relationship
|
||||||
|
extractor: they focus on stable, high-signal edges for static graph usage:
|
||||||
|
- IMPORTS: ES module imports + CommonJS require() (string literal only)
|
||||||
|
- INHERITS: class/interface extends
|
||||||
|
|
||||||
|
They are used when Config.use_astgrep is True.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import re
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Callable, Iterable, List, Optional, Sequence, Set, Tuple
|
||||||
|
|
||||||
|
from codexlens.entities import CodeRelationship, IndexedFile, RelationshipType
|
||||||
|
from codexlens.parsers.astgrep_processor import BaseAstGrepProcessor
|
||||||
|
|
||||||
|
|
||||||
|
_IDENT_RE = re.compile(r"^[A-Za-z_$][A-Za-z0-9_$]*$")
|
||||||
|
|
||||||
|
|
||||||
|
def _strip_quotes(value: str) -> str:
|
||||||
|
value = (value or "").strip()
|
||||||
|
if len(value) >= 2 and value[0] == value[-1] and value[0] in {"'", '"', "`"}:
|
||||||
|
return value[1:-1]
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
|
def _module_from_literal(raw: str) -> str:
|
||||||
|
raw = (raw or "").strip()
|
||||||
|
if not raw:
|
||||||
|
return ""
|
||||||
|
unquoted = _strip_quotes(raw)
|
||||||
|
# Only accept string literal forms (tree-sitter extractor does the same).
|
||||||
|
if unquoted == raw:
|
||||||
|
return ""
|
||||||
|
return unquoted.strip()
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_named_imports(raw: str) -> List[str]:
|
||||||
|
raw = (raw or "").strip()
|
||||||
|
if not raw:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Normalize any surrounding braces the match might include.
|
||||||
|
if raw.startswith("{") and raw.endswith("}"):
|
||||||
|
raw = raw[1:-1].strip()
|
||||||
|
|
||||||
|
# Split by commas at top-level; named imports do not nest in JS/TS syntax.
|
||||||
|
parts = [p.strip() for p in raw.split(",") if p.strip()]
|
||||||
|
names: List[str] = []
|
||||||
|
for part in parts:
|
||||||
|
# TS: "type Foo" inside braces
|
||||||
|
if part.startswith("type "):
|
||||||
|
part = part[5:].strip()
|
||||||
|
# Handle `foo as bar` (TS) / `foo as bar` (proposed) / `foo as bar`-style text.
|
||||||
|
if " as " in part:
|
||||||
|
part = part.split(" as ", 1)[0].strip()
|
||||||
|
if _IDENT_RE.match(part):
|
||||||
|
names.append(part)
|
||||||
|
return names
|
||||||
|
|
||||||
|
|
||||||
|
def _dedupe_relationships(rels: Sequence[CodeRelationship]) -> List[CodeRelationship]:
|
||||||
|
seen: Set[Tuple[str, str, str]] = set()
|
||||||
|
out: List[CodeRelationship] = []
|
||||||
|
for r in rels:
|
||||||
|
key = (r.source_symbol, r.target_symbol, r.relationship_type.value)
|
||||||
|
if key in seen:
|
||||||
|
continue
|
||||||
|
seen.add(key)
|
||||||
|
out.append(r)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
class _AstGrepJsTsProcessor(BaseAstGrepProcessor):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
language_id: str,
|
||||||
|
*,
|
||||||
|
path: Optional[Path] = None,
|
||||||
|
get_pattern: Callable[[str], str],
|
||||||
|
) -> None:
|
||||||
|
super().__init__(language_id, path)
|
||||||
|
self._get_pattern = get_pattern
|
||||||
|
|
||||||
|
def parse(self, text: str, path: Path) -> Optional[IndexedFile]:
|
||||||
|
if not self.is_available():
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
relationships = self._extract_relationships(text, path)
|
||||||
|
return IndexedFile(
|
||||||
|
path=str(path.resolve()),
|
||||||
|
language=self.language_id,
|
||||||
|
symbols=[],
|
||||||
|
chunks=[],
|
||||||
|
relationships=relationships,
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def process_matches( # type: ignore[override]
|
||||||
|
self,
|
||||||
|
matches, # SgNode list (runtime-only type)
|
||||||
|
source_code: str,
|
||||||
|
path: Path,
|
||||||
|
) -> List[CodeRelationship]:
|
||||||
|
# Not used by the current JS/TS processors; keep the interface for parity.
|
||||||
|
_ = (matches, source_code, path)
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _extract_relationships(self, source_code: str, path: Path) -> List[CodeRelationship]:
|
||||||
|
source_file = str(path.resolve())
|
||||||
|
rels: List[CodeRelationship] = []
|
||||||
|
|
||||||
|
rels.extend(self._extract_imports(source_code, source_file=source_file))
|
||||||
|
rels.extend(self._extract_inherits(source_code, source_file=source_file))
|
||||||
|
|
||||||
|
return _dedupe_relationships(rels)
|
||||||
|
|
||||||
|
def _extract_imports(self, source_code: str, *, source_file: str) -> List[CodeRelationship]:
|
||||||
|
rels: List[CodeRelationship] = []
|
||||||
|
|
||||||
|
def record(module_name: str, line: int) -> None:
|
||||||
|
if not module_name:
|
||||||
|
return
|
||||||
|
rels.append(
|
||||||
|
CodeRelationship(
|
||||||
|
source_symbol="<module>",
|
||||||
|
target_symbol=module_name,
|
||||||
|
relationship_type=RelationshipType.IMPORTS,
|
||||||
|
source_file=source_file,
|
||||||
|
target_file=None,
|
||||||
|
source_line=line,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Any `import ... from "mod"` form
|
||||||
|
for node in self.run_ast_grep(source_code, self._get_pattern("import_from")):
|
||||||
|
mod = _module_from_literal(self._get_match(node, "MODULE"))
|
||||||
|
if mod:
|
||||||
|
record(mod, self._get_line_number(node))
|
||||||
|
|
||||||
|
# Side-effect import: import "mod"
|
||||||
|
for node in self.run_ast_grep(source_code, self._get_pattern("import_side_effect")):
|
||||||
|
mod = _module_from_literal(self._get_match(node, "MODULE"))
|
||||||
|
if mod:
|
||||||
|
record(mod, self._get_line_number(node))
|
||||||
|
|
||||||
|
# Named imports (named-only): import { a, b as c } from "mod"
|
||||||
|
for node in self.run_ast_grep(source_code, self._get_pattern("import_named_only")):
|
||||||
|
mod = _module_from_literal(self._get_match(node, "MODULE"))
|
||||||
|
if not mod:
|
||||||
|
continue
|
||||||
|
raw_names = self._get_match(node, "NAMES")
|
||||||
|
for name in _extract_named_imports(raw_names):
|
||||||
|
record(f"{mod}.{name}", self._get_line_number(node))
|
||||||
|
|
||||||
|
# Named imports (default + named): import X, { a, b as c } from "mod"
|
||||||
|
for node in self.run_ast_grep(source_code, self._get_pattern("import_default_named")):
|
||||||
|
mod = _module_from_literal(self._get_match(node, "MODULE"))
|
||||||
|
if not mod:
|
||||||
|
continue
|
||||||
|
raw_names = self._get_match(node, "NAMES")
|
||||||
|
for name in _extract_named_imports(raw_names):
|
||||||
|
record(f"{mod}.{name}", self._get_line_number(node))
|
||||||
|
|
||||||
|
# CommonJS require("mod") (string literal only)
|
||||||
|
for node in self.run_ast_grep(source_code, self._get_pattern("require_call")):
|
||||||
|
mod = _module_from_literal(self._get_match(node, "MODULE"))
|
||||||
|
if mod:
|
||||||
|
record(mod, self._get_line_number(node))
|
||||||
|
|
||||||
|
return rels
|
||||||
|
|
||||||
|
def _extract_inherits(self, source_code: str, *, source_file: str) -> List[CodeRelationship]:
|
||||||
|
rels: List[CodeRelationship] = []
|
||||||
|
|
||||||
|
for node in self.run_ast_grep(source_code, self._get_pattern("class_extends")):
|
||||||
|
class_name = (self._get_match(node, "NAME") or "").strip()
|
||||||
|
base_raw = (self._get_match(node, "BASE") or "").strip()
|
||||||
|
if not class_name or not base_raw:
|
||||||
|
continue
|
||||||
|
base = base_raw.split("<", 1)[0].strip()
|
||||||
|
if not base:
|
||||||
|
continue
|
||||||
|
rels.append(
|
||||||
|
CodeRelationship(
|
||||||
|
source_symbol=class_name,
|
||||||
|
target_symbol=base,
|
||||||
|
relationship_type=RelationshipType.INHERITS,
|
||||||
|
source_file=source_file,
|
||||||
|
target_file=None,
|
||||||
|
source_line=self._get_line_number(node),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return rels
|
||||||
|
|
||||||
|
|
||||||
|
class AstGrepJavaScriptProcessor(_AstGrepJsTsProcessor):
|
||||||
|
def __init__(self, path: Optional[Path] = None) -> None:
|
||||||
|
from codexlens.parsers.patterns.javascript import get_pattern as get_js_pattern
|
||||||
|
|
||||||
|
super().__init__("javascript", path=path, get_pattern=get_js_pattern)
|
||||||
|
|
||||||
|
|
||||||
|
class AstGrepTypeScriptProcessor(_AstGrepJsTsProcessor):
|
||||||
|
def __init__(self, path: Optional[Path] = None) -> None:
|
||||||
|
from codexlens.parsers.patterns.typescript import get_pattern as get_ts_pattern
|
||||||
|
|
||||||
|
super().__init__("typescript", path=path, get_pattern=get_ts_pattern)
|
||||||
|
|
||||||
|
def _extract_inherits(self, source_code: str, *, source_file: str) -> List[CodeRelationship]:
|
||||||
|
rels = super()._extract_inherits(source_code, source_file=source_file)
|
||||||
|
|
||||||
|
# Interface extends: interface Foo extends Bar {}
|
||||||
|
for node in self.run_ast_grep(source_code, self._get_pattern("interface_extends")):
|
||||||
|
name = (self._get_match(node, "NAME") or "").strip()
|
||||||
|
base_raw = (self._get_match(node, "BASE") or "").strip()
|
||||||
|
if not name or not base_raw:
|
||||||
|
continue
|
||||||
|
base = base_raw.split("<", 1)[0].strip()
|
||||||
|
if not base:
|
||||||
|
continue
|
||||||
|
rels.append(
|
||||||
|
CodeRelationship(
|
||||||
|
source_symbol=name,
|
||||||
|
target_symbol=base,
|
||||||
|
relationship_type=RelationshipType.INHERITS,
|
||||||
|
source_file=source_file,
|
||||||
|
target_file=None,
|
||||||
|
source_line=self._get_line_number(node),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return _dedupe_relationships(rels)
|
||||||
|
|
||||||
|
def _extract_imports(self, source_code: str, *, source_file: str) -> List[CodeRelationship]:
|
||||||
|
# Reuse JS logic for standard imports
|
||||||
|
rels = super()._extract_imports(source_code, source_file=source_file)
|
||||||
|
|
||||||
|
def record(module_name: str, line: int) -> None:
|
||||||
|
if not module_name:
|
||||||
|
return
|
||||||
|
rels.append(
|
||||||
|
CodeRelationship(
|
||||||
|
source_symbol="<module>",
|
||||||
|
target_symbol=module_name,
|
||||||
|
relationship_type=RelationshipType.IMPORTS,
|
||||||
|
source_file=source_file,
|
||||||
|
target_file=None,
|
||||||
|
source_line=line,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Type-only imports: import type ... from "mod"
|
||||||
|
for node in self.run_ast_grep(source_code, self._get_pattern("import_type_from")):
|
||||||
|
mod = _module_from_literal(self._get_match(node, "MODULE"))
|
||||||
|
if mod:
|
||||||
|
record(mod, self._get_line_number(node))
|
||||||
|
|
||||||
|
for node in self.run_ast_grep(source_code, self._get_pattern("import_type_named_only")):
|
||||||
|
mod = _module_from_literal(self._get_match(node, "MODULE"))
|
||||||
|
if not mod:
|
||||||
|
continue
|
||||||
|
raw_names = self._get_match(node, "NAMES")
|
||||||
|
for name in _extract_named_imports(raw_names):
|
||||||
|
record(f"{mod}.{name}", self._get_line_number(node))
|
||||||
|
|
||||||
|
for node in self.run_ast_grep(source_code, self._get_pattern("import_type_default_named")):
|
||||||
|
mod = _module_from_literal(self._get_match(node, "MODULE"))
|
||||||
|
if not mod:
|
||||||
|
continue
|
||||||
|
raw_names = self._get_match(node, "NAMES")
|
||||||
|
for name in _extract_named_imports(raw_names):
|
||||||
|
record(f"{mod}.{name}", self._get_line_number(node))
|
||||||
|
|
||||||
|
return _dedupe_relationships(rels)
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"AstGrepJavaScriptProcessor",
|
||||||
|
"AstGrepTypeScriptProcessor",
|
||||||
|
]
|
||||||
|
|
||||||
@@ -0,0 +1,82 @@
|
|||||||
|
"""JavaScript ast-grep patterns for relationship extraction.
|
||||||
|
|
||||||
|
These patterns are used by CodexLens' optional ast-grep processors to extract:
|
||||||
|
- IMPORTS: ES module imports + CommonJS require()
|
||||||
|
- INHERITS: class extends relationships
|
||||||
|
|
||||||
|
Pattern Syntax (ast-grep-py 0.40+):
|
||||||
|
$VAR - Single metavariable (matches one AST node)
|
||||||
|
$$$VAR - Multiple metavariable (matches zero or more nodes)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Dict, List
|
||||||
|
|
||||||
|
|
||||||
|
JAVASCRIPT_PATTERNS: Dict[str, str] = {
|
||||||
|
# ES module imports
|
||||||
|
# import React from "react"
|
||||||
|
# import React, { useEffect } from "react"
|
||||||
|
# import { useEffect } from "react"
|
||||||
|
# import * as fs from "fs"
|
||||||
|
"import_from": "import $$$IMPORTS from $MODULE",
|
||||||
|
"import_named_only": "import {$$$NAMES} from $MODULE",
|
||||||
|
"import_default_named": "import $DEFAULT, {$$$NAMES} from $MODULE",
|
||||||
|
# Side-effect import: import "./styles.css"
|
||||||
|
"import_side_effect": "import $MODULE",
|
||||||
|
|
||||||
|
# CommonJS require(): const fs = require("fs")
|
||||||
|
"require_call": "require($MODULE)",
|
||||||
|
|
||||||
|
# Class inheritance: class Child extends Base {}
|
||||||
|
"class_extends": "class $NAME extends $BASE $$$BODY",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
METAVARS = {
|
||||||
|
"module": "MODULE",
|
||||||
|
"import_names": "NAMES",
|
||||||
|
"import_default": "DEFAULT",
|
||||||
|
"class_name": "NAME",
|
||||||
|
"class_base": "BASE",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
RELATIONSHIP_PATTERNS: Dict[str, List[str]] = {
|
||||||
|
"imports": [
|
||||||
|
"import_from",
|
||||||
|
"import_named_only",
|
||||||
|
"import_default_named",
|
||||||
|
"import_side_effect",
|
||||||
|
"require_call",
|
||||||
|
],
|
||||||
|
"inheritance": ["class_extends"],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_pattern(pattern_name: str) -> str:
|
||||||
|
if pattern_name not in JAVASCRIPT_PATTERNS:
|
||||||
|
raise KeyError(
|
||||||
|
f"Unknown JS pattern: {pattern_name}. Available: {list(JAVASCRIPT_PATTERNS.keys())}"
|
||||||
|
)
|
||||||
|
return JAVASCRIPT_PATTERNS[pattern_name]
|
||||||
|
|
||||||
|
|
||||||
|
def get_patterns_for_relationship(rel_type: str) -> List[str]:
|
||||||
|
return RELATIONSHIP_PATTERNS.get(rel_type, [])
|
||||||
|
|
||||||
|
|
||||||
|
def get_metavar(name: str) -> str:
|
||||||
|
return METAVARS.get(name, name.upper())
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"JAVASCRIPT_PATTERNS",
|
||||||
|
"METAVARS",
|
||||||
|
"RELATIONSHIP_PATTERNS",
|
||||||
|
"get_pattern",
|
||||||
|
"get_patterns_for_relationship",
|
||||||
|
"get_metavar",
|
||||||
|
]
|
||||||
|
|
||||||
@@ -0,0 +1,68 @@
|
|||||||
|
"""TypeScript ast-grep patterns for relationship extraction.
|
||||||
|
|
||||||
|
This module extends the JavaScript patterns with TypeScript-specific syntax
|
||||||
|
such as `import type` and `interface ... extends ...`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Dict, List
|
||||||
|
|
||||||
|
from codexlens.parsers.patterns.javascript import (
|
||||||
|
METAVARS,
|
||||||
|
RELATIONSHIP_PATTERNS as _JS_RELATIONSHIP_PATTERNS,
|
||||||
|
JAVASCRIPT_PATTERNS,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
TYPESCRIPT_PATTERNS: Dict[str, str] = {
|
||||||
|
**JAVASCRIPT_PATTERNS,
|
||||||
|
# Type-only imports
|
||||||
|
"import_type_from": "import type $$$IMPORTS from $MODULE",
|
||||||
|
"import_type_named_only": "import type {$$$NAMES} from $MODULE",
|
||||||
|
"import_type_default_named": "import type $DEFAULT, {$$$NAMES} from $MODULE",
|
||||||
|
# Interface inheritance: interface Foo extends Bar {}
|
||||||
|
"interface_extends": "interface $NAME extends $BASE $$$BODY",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
RELATIONSHIP_PATTERNS: Dict[str, List[str]] = {
|
||||||
|
**_JS_RELATIONSHIP_PATTERNS,
|
||||||
|
"imports": [
|
||||||
|
*_JS_RELATIONSHIP_PATTERNS.get("imports", []),
|
||||||
|
"import_type_from",
|
||||||
|
"import_type_named_only",
|
||||||
|
"import_type_default_named",
|
||||||
|
],
|
||||||
|
"inheritance": [
|
||||||
|
*_JS_RELATIONSHIP_PATTERNS.get("inheritance", []),
|
||||||
|
"interface_extends",
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_pattern(pattern_name: str) -> str:
|
||||||
|
if pattern_name not in TYPESCRIPT_PATTERNS:
|
||||||
|
raise KeyError(
|
||||||
|
f"Unknown TS pattern: {pattern_name}. Available: {list(TYPESCRIPT_PATTERNS.keys())}"
|
||||||
|
)
|
||||||
|
return TYPESCRIPT_PATTERNS[pattern_name]
|
||||||
|
|
||||||
|
|
||||||
|
def get_patterns_for_relationship(rel_type: str) -> List[str]:
|
||||||
|
return RELATIONSHIP_PATTERNS.get(rel_type, [])
|
||||||
|
|
||||||
|
|
||||||
|
def get_metavar(name: str) -> str:
|
||||||
|
return METAVARS.get(name, name.upper())
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"TYPESCRIPT_PATTERNS",
|
||||||
|
"METAVARS",
|
||||||
|
"RELATIONSHIP_PATTERNS",
|
||||||
|
"get_pattern",
|
||||||
|
"get_patterns_for_relationship",
|
||||||
|
"get_metavar",
|
||||||
|
]
|
||||||
|
|
||||||
@@ -34,8 +34,9 @@ if TYPE_CHECKING:
|
|||||||
class TreeSitterSymbolParser:
|
class TreeSitterSymbolParser:
|
||||||
"""Parser using tree-sitter for AST-level symbol extraction.
|
"""Parser using tree-sitter for AST-level symbol extraction.
|
||||||
|
|
||||||
Supports optional ast-grep integration for Python relationship extraction
|
Supports optional ast-grep integration for relationship extraction
|
||||||
when config.use_astgrep is True and ast-grep-py is available.
|
(Python/JavaScript/TypeScript) when config.use_astgrep is True and
|
||||||
|
ast-grep-py is available.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -105,24 +106,33 @@ class TreeSitterSymbolParser:
|
|||||||
"""Check if ast-grep should be used for relationship extraction.
|
"""Check if ast-grep should be used for relationship extraction.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
True if config.use_astgrep is True and language is Python
|
True if config.use_astgrep is True and language is supported
|
||||||
"""
|
"""
|
||||||
if self._config is None:
|
if self._config is None:
|
||||||
return False
|
return False
|
||||||
if not getattr(self._config, "use_astgrep", False):
|
if not getattr(self._config, "use_astgrep", False):
|
||||||
return False
|
return False
|
||||||
return self.language_id == "python"
|
return self.language_id in {"python", "javascript", "typescript"}
|
||||||
|
|
||||||
def _initialize_astgrep_processor(self) -> None:
|
def _initialize_astgrep_processor(self) -> None:
|
||||||
"""Initialize ast-grep processor for Python relationship extraction."""
|
"""Initialize ast-grep processor for relationship extraction."""
|
||||||
try:
|
try:
|
||||||
from codexlens.parsers.astgrep_processor import (
|
from codexlens.parsers.astgrep_processor import (
|
||||||
AstGrepPythonProcessor,
|
AstGrepPythonProcessor,
|
||||||
is_astgrep_processor_available,
|
is_astgrep_processor_available,
|
||||||
)
|
)
|
||||||
|
from codexlens.parsers.astgrep_js_ts_processor import (
|
||||||
|
AstGrepJavaScriptProcessor,
|
||||||
|
AstGrepTypeScriptProcessor,
|
||||||
|
)
|
||||||
|
|
||||||
if is_astgrep_processor_available():
|
if is_astgrep_processor_available():
|
||||||
self._astgrep_processor = AstGrepPythonProcessor(self.path)
|
if self.language_id == "python":
|
||||||
|
self._astgrep_processor = AstGrepPythonProcessor(self.path)
|
||||||
|
elif self.language_id == "javascript":
|
||||||
|
self._astgrep_processor = AstGrepJavaScriptProcessor(self.path)
|
||||||
|
elif self.language_id == "typescript":
|
||||||
|
self._astgrep_processor = AstGrepTypeScriptProcessor(self.path)
|
||||||
except ImportError:
|
except ImportError:
|
||||||
self._astgrep_processor = None
|
self._astgrep_processor = None
|
||||||
|
|
||||||
@@ -222,9 +232,9 @@ class TreeSitterSymbolParser:
|
|||||||
path: Path,
|
path: Path,
|
||||||
source_code: Optional[str] = None,
|
source_code: Optional[str] = None,
|
||||||
) -> List[CodeRelationship]:
|
) -> List[CodeRelationship]:
|
||||||
"""Extract relationships, optionally using ast-grep for Python.
|
"""Extract relationships, optionally using ast-grep.
|
||||||
|
|
||||||
When config.use_astgrep is True and ast-grep is available for Python,
|
When config.use_astgrep is True and an ast-grep processor is available,
|
||||||
uses ast-grep for relationship extraction. Otherwise, uses tree-sitter.
|
uses ast-grep for relationship extraction. Otherwise, uses tree-sitter.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -236,32 +246,31 @@ class TreeSitterSymbolParser:
|
|||||||
Returns:
|
Returns:
|
||||||
List of extracted relationships
|
List of extracted relationships
|
||||||
"""
|
"""
|
||||||
|
# Try ast-grep first if configured and available for this language.
|
||||||
|
if self._astgrep_processor is not None and source_code is not None:
|
||||||
|
try:
|
||||||
|
astgrep_rels = self._extract_relationships_astgrep(source_code, path)
|
||||||
|
if astgrep_rels is not None:
|
||||||
|
return astgrep_rels
|
||||||
|
except Exception:
|
||||||
|
# Fall back to tree-sitter on ast-grep failure
|
||||||
|
pass
|
||||||
|
|
||||||
if self.language_id == "python":
|
if self.language_id == "python":
|
||||||
# Try ast-grep first if configured and available
|
|
||||||
if self._astgrep_processor is not None and source_code is not None:
|
|
||||||
try:
|
|
||||||
astgrep_rels = self._extract_python_relationships_astgrep(
|
|
||||||
source_code, path
|
|
||||||
)
|
|
||||||
if astgrep_rels is not None:
|
|
||||||
return astgrep_rels
|
|
||||||
except Exception:
|
|
||||||
# Fall back to tree-sitter on ast-grep failure
|
|
||||||
pass
|
|
||||||
return self._extract_python_relationships(source_bytes, root, path)
|
return self._extract_python_relationships(source_bytes, root, path)
|
||||||
if self.language_id in {"javascript", "typescript"}:
|
if self.language_id in {"javascript", "typescript"}:
|
||||||
return self._extract_js_ts_relationships(source_bytes, root, path)
|
return self._extract_js_ts_relationships(source_bytes, root, path)
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def _extract_python_relationships_astgrep(
|
def _extract_relationships_astgrep(
|
||||||
self,
|
self,
|
||||||
source_code: str,
|
source_code: str,
|
||||||
path: Path,
|
path: Path,
|
||||||
) -> Optional[List[CodeRelationship]]:
|
) -> Optional[List[CodeRelationship]]:
|
||||||
"""Extract Python relationships using ast-grep processor.
|
"""Extract relationships using ast-grep processor.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
source_code: Python source code text
|
source_code: Source code text
|
||||||
path: File path
|
path: File path
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
|
|||||||
@@ -694,13 +694,14 @@ class ChainSearchEngine:
|
|||||||
k: int = 10,
|
k: int = 10,
|
||||||
coarse_k: int = 100,
|
coarse_k: int = 100,
|
||||||
options: Optional[SearchOptions] = None,
|
options: Optional[SearchOptions] = None,
|
||||||
strategy: Optional[Literal["binary", "binary_rerank", "dense_rerank", "staged"]] = None,
|
strategy: Optional[Literal["binary", "binary_rerank", "dense_rerank", "staged", "hybrid"]] = None,
|
||||||
) -> ChainSearchResult:
|
) -> ChainSearchResult:
|
||||||
"""Unified cascade search entry point with strategy selection.
|
"""Unified cascade search entry point with strategy selection.
|
||||||
|
|
||||||
Provides a single interface for cascade search with configurable strategy:
|
Provides a single interface for cascade search with configurable strategy:
|
||||||
- "binary": Uses binary vector coarse ranking + dense fine ranking (fastest)
|
- "binary": Uses binary vector coarse ranking + dense fine ranking (fastest)
|
||||||
- "binary_rerank": Uses binary vector coarse ranking + cross-encoder reranking (best balance)
|
- "binary_rerank": Uses binary vector coarse ranking + cross-encoder reranking (best balance)
|
||||||
|
- "hybrid": Alias for "binary_rerank" (backward compat)
|
||||||
- "dense_rerank": Uses dense vector coarse ranking + cross-encoder reranking
|
- "dense_rerank": Uses dense vector coarse ranking + cross-encoder reranking
|
||||||
- "staged": 4-stage pipeline: binary -> LSP expand -> clustering -> optional rerank
|
- "staged": 4-stage pipeline: binary -> LSP expand -> clustering -> optional rerank
|
||||||
|
|
||||||
@@ -731,7 +732,7 @@ class ChainSearchEngine:
|
|||||||
"""
|
"""
|
||||||
# Strategy priority: parameter > config > default
|
# Strategy priority: parameter > config > default
|
||||||
effective_strategy = strategy
|
effective_strategy = strategy
|
||||||
valid_strategies = ("binary", "binary_rerank", "dense_rerank", "staged")
|
valid_strategies = ("binary", "binary_rerank", "dense_rerank", "staged", "hybrid")
|
||||||
if effective_strategy is None:
|
if effective_strategy is None:
|
||||||
# Not passed via parameter, check config
|
# Not passed via parameter, check config
|
||||||
if self._config is not None:
|
if self._config is not None:
|
||||||
@@ -743,6 +744,10 @@ class ChainSearchEngine:
|
|||||||
if effective_strategy not in valid_strategies:
|
if effective_strategy not in valid_strategies:
|
||||||
effective_strategy = "binary"
|
effective_strategy = "binary"
|
||||||
|
|
||||||
|
# Normalize backward-compat alias
|
||||||
|
if effective_strategy == "hybrid":
|
||||||
|
effective_strategy = "binary_rerank"
|
||||||
|
|
||||||
if effective_strategy == "binary":
|
if effective_strategy == "binary":
|
||||||
return self.binary_cascade_search(query, source_path, k, coarse_k, options)
|
return self.binary_cascade_search(query, source_path, k, coarse_k, options)
|
||||||
elif effective_strategy == "binary_rerank":
|
elif effective_strategy == "binary_rerank":
|
||||||
|
|||||||
140
codex-lens/tests/parsers/test_comparison_js_ts.py
Normal file
140
codex-lens/tests/parsers/test_comparison_js_ts.py
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
"""Comparison tests for tree-sitter vs ast-grep JS/TS relationship extraction.
|
||||||
|
|
||||||
|
These tests focus on stable, high-signal relationship types used by the
|
||||||
|
static graph pipeline:
|
||||||
|
- IMPORTS
|
||||||
|
- INHERITS
|
||||||
|
|
||||||
|
If ast-grep-py is not installed, tests are skipped.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Set, Tuple
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from codexlens.config import Config
|
||||||
|
from codexlens.entities import CodeRelationship, RelationshipType
|
||||||
|
from codexlens.parsers.treesitter_parser import TreeSitterSymbolParser
|
||||||
|
|
||||||
|
|
||||||
|
SAMPLE_JS_CODE = """
|
||||||
|
import React, { useEffect as useEf } from "react";
|
||||||
|
import { foo } from "./foo";
|
||||||
|
import "./styles.css";
|
||||||
|
const fs = require("fs");
|
||||||
|
|
||||||
|
class Base {}
|
||||||
|
class Child extends Base {
|
||||||
|
method() {
|
||||||
|
console.log("hi");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
SAMPLE_TS_CODE = """
|
||||||
|
import type { Foo } from "pkg";
|
||||||
|
import { bar as baz } from "./bar";
|
||||||
|
|
||||||
|
interface MyInterface extends Foo {}
|
||||||
|
|
||||||
|
class Base {}
|
||||||
|
class Child extends Base {}
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def extract_relationship_tuples(
|
||||||
|
relationships: List[CodeRelationship],
|
||||||
|
*,
|
||||||
|
only_types: Set[RelationshipType],
|
||||||
|
) -> Set[Tuple[str, str, str]]:
|
||||||
|
return {
|
||||||
|
(rel.source_symbol, rel.target_symbol, rel.relationship_type.value)
|
||||||
|
for rel in relationships
|
||||||
|
if rel.relationship_type in only_types
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _skip_if_astgrep_unavailable(parser: TreeSitterSymbolParser) -> None:
|
||||||
|
if parser._astgrep_processor is None or not parser._astgrep_processor.is_available(): # type: ignore[attr-defined]
|
||||||
|
pytest.skip("ast-grep-py not installed or language not supported")
|
||||||
|
|
||||||
|
|
||||||
|
def test_js_imports_and_inherits_match(tmp_path: Path) -> None:
|
||||||
|
js_file = tmp_path / "sample.js"
|
||||||
|
js_file.write_text(SAMPLE_JS_CODE, encoding="utf-8")
|
||||||
|
source = js_file.read_text(encoding="utf-8")
|
||||||
|
|
||||||
|
config_default = Config()
|
||||||
|
config_default.use_astgrep = False
|
||||||
|
ts_default = TreeSitterSymbolParser("javascript", js_file, config=config_default)
|
||||||
|
|
||||||
|
config_ast = Config()
|
||||||
|
config_ast.use_astgrep = True
|
||||||
|
ts_ast = TreeSitterSymbolParser("javascript", js_file, config=config_ast)
|
||||||
|
_skip_if_astgrep_unavailable(ts_ast)
|
||||||
|
|
||||||
|
result_ts = ts_default.parse(source, js_file)
|
||||||
|
result_ast = ts_ast.parse(source, js_file)
|
||||||
|
|
||||||
|
assert result_ts is not None
|
||||||
|
assert result_ast is not None
|
||||||
|
|
||||||
|
ts_rel = extract_relationship_tuples(
|
||||||
|
result_ts.relationships,
|
||||||
|
only_types={RelationshipType.IMPORTS, RelationshipType.INHERITS},
|
||||||
|
)
|
||||||
|
ast_rel = extract_relationship_tuples(
|
||||||
|
result_ast.relationships,
|
||||||
|
only_types={RelationshipType.IMPORTS, RelationshipType.INHERITS},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert ast_rel == ts_rel
|
||||||
|
|
||||||
|
|
||||||
|
def test_ts_imports_match_and_inherits_superset(tmp_path: Path) -> None:
|
||||||
|
ts_file = tmp_path / "sample.ts"
|
||||||
|
ts_file.write_text(SAMPLE_TS_CODE, encoding="utf-8")
|
||||||
|
source = ts_file.read_text(encoding="utf-8")
|
||||||
|
|
||||||
|
config_default = Config()
|
||||||
|
config_default.use_astgrep = False
|
||||||
|
ts_default = TreeSitterSymbolParser("typescript", ts_file, config=config_default)
|
||||||
|
|
||||||
|
config_ast = Config()
|
||||||
|
config_ast.use_astgrep = True
|
||||||
|
ts_ast = TreeSitterSymbolParser("typescript", ts_file, config=config_ast)
|
||||||
|
_skip_if_astgrep_unavailable(ts_ast)
|
||||||
|
|
||||||
|
result_ts = ts_default.parse(source, ts_file)
|
||||||
|
result_ast = ts_ast.parse(source, ts_file)
|
||||||
|
|
||||||
|
assert result_ts is not None
|
||||||
|
assert result_ast is not None
|
||||||
|
|
||||||
|
ts_imports = extract_relationship_tuples(
|
||||||
|
result_ts.relationships,
|
||||||
|
only_types={RelationshipType.IMPORTS},
|
||||||
|
)
|
||||||
|
ast_imports = extract_relationship_tuples(
|
||||||
|
result_ast.relationships,
|
||||||
|
only_types={RelationshipType.IMPORTS},
|
||||||
|
)
|
||||||
|
assert ast_imports == ts_imports
|
||||||
|
|
||||||
|
ts_inherits = extract_relationship_tuples(
|
||||||
|
result_ts.relationships,
|
||||||
|
only_types={RelationshipType.INHERITS},
|
||||||
|
)
|
||||||
|
ast_inherits = extract_relationship_tuples(
|
||||||
|
result_ast.relationships,
|
||||||
|
only_types={RelationshipType.INHERITS},
|
||||||
|
)
|
||||||
|
# Ast-grep may include additional TypeScript inheritance edges (e.g., interface extends).
|
||||||
|
assert ts_inherits.issubset(ast_inherits)
|
||||||
|
# But at minimum, class inheritance should be present.
|
||||||
|
assert ("Child", "Base", "inherits") in ast_inherits
|
||||||
|
|
||||||
@@ -84,6 +84,21 @@ class TestConfigCascadeDefaults:
|
|||||||
# Should keep the default "binary" strategy
|
# Should keep the default "binary" strategy
|
||||||
assert config.cascade_strategy == "binary"
|
assert config.cascade_strategy == "binary"
|
||||||
|
|
||||||
|
def test_hybrid_cascade_strategy_alias_maps_to_binary_rerank(self, temp_config_dir):
|
||||||
|
"""Hybrid is a backward-compat alias for binary_rerank."""
|
||||||
|
config = Config(data_dir=temp_config_dir)
|
||||||
|
settings = {"cascade": {"strategy": "hybrid"}}
|
||||||
|
|
||||||
|
settings_path = config.settings_path
|
||||||
|
settings_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(settings_path, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(settings, f)
|
||||||
|
|
||||||
|
with patch.object(config, "_apply_env_overrides"):
|
||||||
|
config.load_settings()
|
||||||
|
|
||||||
|
assert config.cascade_strategy == "binary_rerank"
|
||||||
|
|
||||||
def test_staged_config_defaults(self, temp_config_dir):
|
def test_staged_config_defaults(self, temp_config_dir):
|
||||||
"""Staged cascade settings should have correct defaults."""
|
"""Staged cascade settings should have correct defaults."""
|
||||||
config = Config(data_dir=temp_config_dir)
|
config = Config(data_dir=temp_config_dir)
|
||||||
|
|||||||
@@ -115,3 +115,22 @@ def test_staged_env_overrides_invalid_ignored(temp_config_dir: Path) -> None:
|
|||||||
assert config.staged_stage2_mode == "precomputed"
|
assert config.staged_stage2_mode == "precomputed"
|
||||||
assert config.staged_clustering_strategy == "auto"
|
assert config.staged_clustering_strategy == "auto"
|
||||||
assert config.staged_realtime_lsp_timeout_s == 30.0
|
assert config.staged_realtime_lsp_timeout_s == 30.0
|
||||||
|
|
||||||
|
|
||||||
|
def test_cascade_strategy_hybrid_alias_env_override(temp_config_dir: Path) -> None:
|
||||||
|
config = Config(data_dir=temp_config_dir)
|
||||||
|
|
||||||
|
env_path = temp_config_dir / ".env"
|
||||||
|
env_path.write_text(
|
||||||
|
"\n".join(
|
||||||
|
[
|
||||||
|
"CASCADE_STRATEGY=hybrid",
|
||||||
|
"",
|
||||||
|
]
|
||||||
|
),
|
||||||
|
encoding="utf-8",
|
||||||
|
)
|
||||||
|
|
||||||
|
config.load_settings()
|
||||||
|
|
||||||
|
assert config.cascade_strategy == "binary_rerank"
|
||||||
|
|||||||
Reference in New Issue
Block a user