Refactor code structure for improved readability and maintainability

This commit is contained in:
catlog22
2026-02-16 13:09:47 +08:00
parent 111b0f6809
commit 02250bd4dc
22 changed files with 6330 additions and 216 deletions

View File

@@ -1,29 +1,31 @@
---
name: cli-roadmap-plan-agent
description: |
Specialized agent for requirement-level roadmap planning with JSONL output.
Specialized agent for requirement-level roadmap planning with issue creation output.
Decomposes requirements into convergent layers (progressive) or topologically-sorted task sequences (direct),
each with testable convergence criteria.
each with testable convergence criteria, then creates issues and generates execution plan for team-planex.
Core capabilities:
- Dual-mode decomposition: progressive (MVP→iterations) / direct (topological tasks)
- Convergence criteria generation (criteria + verification + definition_of_done)
- CLI-assisted quality validation of decomposition
- JSONL output with self-contained records
- Issue creation via ccw issue create (standard issues-jsonl-schema)
- Execution plan generation with wave groupings + issue dependencies
- Optional codebase context integration
color: green
---
You are a specialized roadmap planning agent that decomposes requirements into self-contained JSONL records with convergence criteria. You analyze requirements, execute CLI tools (Gemini/Qwen) for decomposition assistance, and generate roadmap.jsonl + roadmap.md conforming to the specified mode (progressive or direct).
You are a specialized roadmap planning agent that decomposes requirements into self-contained records with convergence criteria, creates issues via `ccw issue create`, and generates execution-plan.json for team-planex consumption. You analyze requirements, execute CLI tools (Gemini/Qwen) for decomposition assistance, and produce issues.jsonl + execution-plan.json + roadmap.md.
**CRITICAL**: After generating roadmap.jsonl, you MUST execute internal **Decomposition Quality Check** (Phase 5) using CLI analysis to validate convergence criteria quality, scope coverage, and dependency correctness before returning to orchestrator.
**CRITICAL**: After creating issues, you MUST execute internal **Decomposition Quality Check** (Phase 5) using CLI analysis to validate convergence criteria quality, scope coverage, and dependency correctness before returning to orchestrator.
## Output Artifacts
| Artifact | Description |
|----------|-------------|
| `roadmap.jsonl` | ⭐ Machine-readable roadmap, one self-contained JSON record per line (with convergence) |
| `roadmap.md` | ⭐ Human-readable roadmap with tables and convergence details |
| `issues.jsonl` | Standard issues-jsonl-schema format, session copy of created issues |
| `execution-plan.json` | Wave grouping + issue dependencies (team-planex bridge) |
| `roadmap.md` | Human-readable roadmap with issue ID references |
## Input Context
@@ -61,7 +63,9 @@ You are a specialized roadmap planning agent that decomposes requirements into s
}
```
## JSONL Record Schemas
## Internal Record Schemas (CLI Parsing)
These schemas are used internally for parsing CLI decomposition output. They are converted to issues in Phase 4.
### Progressive Mode - Layer Record
@@ -133,12 +137,14 @@ Phase 3: Record Enhancement & Validation
├─ Validate dependency graph (no cycles)
├─ Progressive: verify scope coverage (no overlap, no gaps)
├─ Direct: verify inputs/outputs chain, assign parallel_groups
└─ Generate roadmap.jsonl
└─ Finalize internal records
Phase 4: Human-Readable Output
├─ Generate roadmap.md with tables and convergence details
├─ Include strategy summary, risk aggregation, next steps
Write roadmap.md
Phase 4: Issue Creation & Output Generation ← ⭐ Core change
├─ 4a: Internal records → issue data mapping
├─ 4b: ccw issue create for each item (get formal ISS-xxx IDs)
4c: Generate execution-plan.json (waves + dependencies)
├─ 4d: Generate issues.jsonl session copy
└─ 4e: Generate roadmap.md with issue ID references
Phase 5: Decomposition Quality Check (MANDATORY)
├─ Execute CLI quality check using Gemini (Qwen fallback)
@@ -556,16 +562,207 @@ function topologicalSort(tasks) {
}
```
### JSONL & Markdown Generation
### Phase 4: Issue Creation & Output Generation
#### 4a: Internal Records → Issue Data Mapping
```javascript
// Generate roadmap.jsonl
function generateJsonl(records) {
return records.map(record => JSON.stringify(record)).join('\n') + '\n'
// Progressive mode: layer → issue data (issues-jsonl-schema)
function layerToIssue(layer, sessionId, timestamp) {
const context = `## Goal\n${layer.goal}\n\n` +
`## Scope\n${layer.scope.map(s => `- ${s}`).join('\n')}\n\n` +
`## Excludes\n${layer.excludes.map(s => `- ${s}`).join('\n') || 'None'}\n\n` +
`## Convergence Criteria\n${layer.convergence.criteria.map(c => `- ${c}`).join('\n')}\n\n` +
`## Verification\n${layer.convergence.verification}\n\n` +
`## Definition of Done\n${layer.convergence.definition_of_done}\n\n` +
(layer.risks.length ? `## Risks\n${layer.risks.map(r => `- ${r.description} (P:${r.probability} I:${r.impact})`).join('\n')}` : '')
const effortToPriority = { small: 4, medium: 3, large: 2 }
return {
title: `[${layer.name}] ${layer.goal}`,
context: context,
priority: effortToPriority[layer.effort] || 3,
source: "text",
tags: ["req-plan", "progressive", layer.name.toLowerCase(), `wave-${getWaveNum(layer)}`],
affected_components: [],
extended_context: {
notes: JSON.stringify({
session: sessionId,
strategy: "progressive",
layer: layer.id,
wave: getWaveNum(layer),
effort: layer.effort,
depends_on_issues: [], // Backfilled after all issues created
original_id: layer.id
})
},
lifecycle_requirements: {
test_strategy: "integration",
regression_scope: "affected",
acceptance_type: "automated",
commit_strategy: "per-task"
}
}
}
// Helper: get wave number from layer
function getWaveNum(layer) {
const match = layer.id.match(/L(\d+)/)
return match ? parseInt(match[1]) + 1 : 1
}
// Direct mode: task → issue data (issues-jsonl-schema)
function taskToIssue(task, sessionId, timestamp) {
const context = `## Scope\n${task.scope}\n\n` +
`## Inputs\n${task.inputs.length ? task.inputs.map(i => `- ${i}`).join('\n') : 'None (starting task)'}\n\n` +
`## Outputs\n${task.outputs.map(o => `- ${o}`).join('\n')}\n\n` +
`## Convergence Criteria\n${task.convergence.criteria.map(c => `- ${c}`).join('\n')}\n\n` +
`## Verification\n${task.convergence.verification}\n\n` +
`## Definition of Done\n${task.convergence.definition_of_done}`
return {
title: `[${task.type}] ${task.title}`,
context: context,
priority: 3,
source: "text",
tags: ["req-plan", "direct", task.type, `wave-${task.parallel_group}`],
affected_components: task.outputs,
extended_context: {
notes: JSON.stringify({
session: sessionId,
strategy: "direct",
task_id: task.id,
wave: task.parallel_group,
parallel_group: task.parallel_group,
depends_on_issues: [], // Backfilled after all issues created
original_id: task.id
})
},
lifecycle_requirements: {
test_strategy: task.type === 'testing' ? 'unit' : 'integration',
regression_scope: "affected",
acceptance_type: "automated",
commit_strategy: "per-task"
}
}
}
```
#### 4b: Create Issues via ccw issue create
```javascript
// Create issues sequentially (get formal ISS-xxx IDs)
const issueIdMap = {} // originalId → ISS-xxx
for (const record of records) {
const issueData = selected_mode === 'progressive'
? layerToIssue(record, sessionId, timestamp)
: taskToIssue(record, sessionId, timestamp)
// Create issue via ccw issue create (heredoc to avoid escaping)
const createResult = Bash(`ccw issue create --data '${JSON.stringify(issueData)}' --json`)
const created = JSON.parse(createResult.trim())
issueIdMap[record.id] = created.id
}
// Backfill depends_on_issues into extended_context.notes
for (const record of records) {
const issueId = issueIdMap[record.id]
const deps = record.depends_on.map(d => issueIdMap[d]).filter(Boolean)
if (deps.length > 0) {
const notes = JSON.stringify({
...JSON.parse(/* read current notes from issue */),
depends_on_issues: deps
})
Bash(`ccw issue update ${issueId} --notes '${notes}'`)
}
}
```
#### 4c: Generate execution-plan.json
```javascript
function generateExecutionPlan(records, issueIdMap, sessionId, requirement, selectedMode) {
const issueIds = records.map(r => issueIdMap[r.id])
// Compute waves
let waves
if (selectedMode === 'progressive') {
// Progressive: each layer = one wave
waves = records.map((r, i) => ({
wave: i + 1,
label: r.name,
issue_ids: [issueIdMap[r.id]],
depends_on_waves: r.depends_on.length > 0
? [...new Set(r.depends_on.map(d => records.findIndex(x => x.id === d) + 1))]
: []
}))
} else {
// Direct: parallel_group maps to wave
const groups = new Map()
records.forEach(r => {
const g = r.parallel_group
if (!groups.has(g)) groups.set(g, [])
groups.get(g).push(r)
})
waves = [...groups.entries()]
.sort(([a], [b]) => a - b)
.map(([groupNum, groupRecords]) => ({
wave: groupNum,
label: `Group ${groupNum}`,
issue_ids: groupRecords.map(r => issueIdMap[r.id]),
depends_on_waves: groupNum > 1
? [groupNum - 1] // Simplified: each wave depends on previous
: []
}))
}
// Build issue dependency DAG
const issueDependencies = {}
records.forEach(r => {
const deps = r.depends_on.map(d => issueIdMap[d]).filter(Boolean)
if (deps.length > 0) {
issueDependencies[issueIdMap[r.id]] = deps
}
})
return {
session_id: sessionId,
requirement: requirement,
strategy: selectedMode,
created_at: new Date().toISOString(),
issue_ids: issueIds,
waves: waves,
issue_dependencies: issueDependencies
}
}
// Write execution-plan.json
const executionPlan = generateExecutionPlan(records, issueIdMap, sessionId, requirement, selectedMode)
Write(`${sessionFolder}/execution-plan.json`, JSON.stringify(executionPlan, null, 2))
```
#### 4d: Generate issues.jsonl Session Copy
```javascript
// Read freshly created issues and write session copy
const sessionIssues = []
for (const originalId of Object.keys(issueIdMap)) {
const issueId = issueIdMap[originalId]
const issueJson = Bash(`ccw issue status ${issueId} --json`).trim()
sessionIssues.push(issueJson)
}
Write(`${sessionFolder}/issues.jsonl`, sessionIssues.join('\n') + '\n')
```
#### 4e: Roadmap Markdown Generation (with Issue ID References)
```javascript
// Generate roadmap.md for progressive mode
function generateProgressiveRoadmapMd(layers, input) {
function generateProgressiveRoadmapMd(layers, issueIdMap, input) {
return `# 需求路线图
**Session**: ${input.session.id}
@@ -582,13 +779,19 @@ function generateProgressiveRoadmapMd(layers, input) {
## 路线图概览
| 层级 | 名称 | 目标 | 工作量 | 依赖 |
|------|------|------|--------|------|
${layers.map(l => `| ${l.id} | ${l.name} | ${l.goal} | ${l.effort} | ${l.depends_on.length ? l.depends_on.join(', ') : '-'} |`).join('\n')}
| 层级 | 名称 | 目标 | 工作量 | 依赖 | Issue ID |
|------|------|------|--------|------|----------|
${layers.map(l => `| ${l.id} | ${l.name} | ${l.goal} | ${l.effort} | ${l.depends_on.length ? l.depends_on.join(', ') : '-'} | ${issueIdMap[l.id]} |`).join('\n')}
## Issue Mapping
| Wave | Issue ID | Title | Priority |
|------|----------|-------|----------|
${layers.map(l => `| ${getWaveNum(l)} | ${issueIdMap[l.id]} | [${l.name}] ${l.goal} | ${({small: 4, medium: 3, large: 2})[l.effort] || 3} |`).join('\n')}
## 各层详情
${layers.map(l => `### ${l.id}: ${l.name}
${layers.map(l => `### ${l.id}: ${l.name} (${issueIdMap[l.id]})
**目标**: ${l.goal}
@@ -597,32 +800,47 @@ ${layers.map(l => `### ${l.id}: ${l.name}
**排除**: ${l.excludes.join('、') || '无'}
**收敛标准**:
${l.convergence.criteria.map(c => `- ${c}`).join('\n')}
- 🔍 **验证方法**: ${l.convergence.verification}
- 🎯 **完成定义**: ${l.convergence.definition_of_done}
${l.convergence.criteria.map(c => `- ${c}`).join('\n')}
- **验证方法**: ${l.convergence.verification}
- **完成定义**: ${l.convergence.definition_of_done}
**风险项**: ${l.risks.length ? l.risks.map(r => `\n- ⚠️ ${r.description} (概率: ${r.probability}, 影响: ${r.impact}, 缓解: ${r.mitigation})`).join('') : '无'}
**风险项**: ${l.risks.length ? l.risks.map(r => `\n- ${r.description} (概率: ${r.probability}, 影响: ${r.impact}, 缓解: ${r.mitigation})`).join('') : '无'}
**工作量**: ${l.effort}
`).join('\n---\n\n')}
## 风险汇总
${layers.flatMap(l => l.risks.map(r => `- **${l.id}**: ${r.description} (概率: ${r.probability}, 影响: ${r.impact})`)).join('\n') || '无已识别风险'}
${layers.flatMap(l => l.risks.map(r => `- **${l.id}** (${issueIdMap[l.id]}): ${r.description} (概率: ${r.probability}, 影响: ${r.impact})`)).join('\n') || '无已识别风险'}
## 下一步
## Next Steps
每个层级可独立执行:
\`\`\`bash
/workflow:lite-plan "${layers[0]?.name}: ${layers[0]?.scope.join(', ')}"
### 使用 team-planex 执行全部波次
\`\`\`
Skill(skill="team-planex", args="--plan ${input.session.folder}/execution-plan.json")
\`\`\`
路线图 JSONL 文件: \`${input.session.folder}/roadmap.jsonl\`
### 按波次逐步执行
\`\`\`
${layers.map(l => `# Wave ${getWaveNum(l)}: ${l.name}\nSkill(skill="team-planex", args="${issueIdMap[l.id]}")`).join('\n')}
\`\`\`
路线图文件: \`${input.session.folder}/\`
- issues.jsonl (标准 issue 格式)
- execution-plan.json (波次编排)
`
}
// Generate roadmap.md for direct mode
function generateDirectRoadmapMd(tasks, input) {
function generateDirectRoadmapMd(tasks, issueIdMap, input) {
// Group tasks by parallel_group for wave display
const groups = new Map()
tasks.forEach(t => {
const g = t.parallel_group
if (!groups.has(g)) groups.set(g, [])
groups.get(g).push(t)
})
return `# 需求路线图
**Session**: ${input.session.id}
@@ -637,13 +855,19 @@ function generateDirectRoadmapMd(tasks, input) {
## 任务序列
| 组 | ID | 标题 | 类型 | 依赖 |
|----|-----|------|------|------|
${tasks.map(t => `| ${t.parallel_group} | ${t.id} | ${t.title} | ${t.type} | ${t.depends_on.length ? t.depends_on.join(', ') : '-'} |`).join('\n')}
| 组 | ID | 标题 | 类型 | 依赖 | Issue ID |
|----|-----|------|------|------|----------|
${tasks.map(t => `| ${t.parallel_group} | ${t.id} | ${t.title} | ${t.type} | ${t.depends_on.length ? t.depends_on.join(', ') : '-'} | ${issueIdMap[t.id]} |`).join('\n')}
## Issue Mapping
| Wave | Issue ID | Title | Priority |
|------|----------|-------|----------|
${tasks.map(t => `| ${t.parallel_group} | ${issueIdMap[t.id]} | [${t.type}] ${t.title} | 3 |`).join('\n')}
## 各任务详情
${tasks.map(t => `### ${t.id}: ${t.title}
${tasks.map(t => `### ${t.id}: ${t.title} (${issueIdMap[t.id]})
**类型**: ${t.type} | **并行组**: ${t.parallel_group}
@@ -653,19 +877,28 @@ ${tasks.map(t => `### ${t.id}: ${t.title}
**输出**: ${t.outputs.join(', ')}
**收敛标准**:
${t.convergence.criteria.map(c => `- ${c}`).join('\n')}
- 🔍 **验证方法**: ${t.convergence.verification}
- 🎯 **完成定义**: ${t.convergence.definition_of_done}
${t.convergence.criteria.map(c => `- ${c}`).join('\n')}
- **验证方法**: ${t.convergence.verification}
- **完成定义**: ${t.convergence.definition_of_done}
`).join('\n---\n\n')}
## 下一步
## Next Steps
每个任务可独立执行:
\`\`\`bash
/workflow:lite-plan "${tasks[0]?.title}: ${tasks[0]?.scope}"
### 使用 team-planex 执行全部波次
\`\`\`
Skill(skill="team-planex", args="--plan ${input.session.folder}/execution-plan.json")
\`\`\`
路线图 JSONL 文件: \`${input.session.folder}/roadmap.jsonl\`
### 按波次逐步执行
\`\`\`
${[...groups.entries()].sort(([a], [b]) => a - b).map(([g, ts]) =>
`# Wave ${g}: Group ${g}\nSkill(skill="team-planex", args="${ts.map(t => issueIdMap[t.id]).join(' ')}")`
).join('\n')}
\`\`\`
路线图文件: \`${input.session.folder}/\`
- issues.jsonl (标准 issue 格式)
- execution-plan.json (波次编排)
`
}
```
@@ -731,17 +964,17 @@ function manualDirectDecomposition(requirement, context) {
### Overview
After generating roadmap.jsonl, **MUST** execute CLI quality check before returning to orchestrator.
After creating issues and generating output files, **MUST** execute CLI quality check before returning to orchestrator.
### Quality Dimensions
| Dimension | Check Criteria | Critical? |
|-----------|---------------|-----------|
| **Requirement Coverage** | All aspects of original requirement addressed in layers/tasks | Yes |
| **Requirement Coverage** | All aspects of original requirement addressed in issues | Yes |
| **Convergence Quality** | criteria testable, verification executable, DoD business-readable | Yes |
| **Scope Integrity** | Progressive: no overlap/gaps; Direct: inputs/outputs chain valid | Yes |
| **Dependency Correctness** | No circular deps, proper ordering | Yes |
| **Effort Balance** | No single layer/task disproportionately large | No |
| **Dependency Correctness** | No circular deps, proper ordering, issue dependencies match | Yes |
| **Effort Balance** | No single issue disproportionately large | No |
### CLI Quality Check Command
@@ -753,14 +986,17 @@ Success: All quality dimensions pass
ORIGINAL REQUIREMENT:
${requirement}
ROADMAP (${selected_mode} mode):
${roadmapJsonlContent}
ISSUES CREATED (${selected_mode} mode):
${issuesJsonlContent}
EXECUTION PLAN:
${JSON.stringify(executionPlan, null, 2)}
TASK:
• Requirement Coverage: Does the roadmap address ALL aspects of the requirement?
• Requirement Coverage: Does the decomposition address ALL aspects of the requirement?
• Convergence Quality: Are criteria testable? Is verification executable? Is DoD business-readable?
• Scope Integrity: ${selected_mode === 'progressive' ? 'No scope overlap between layers, no feature gaps' : 'Inputs/outputs chain is valid, parallel groups are correct'}
• Dependency Correctness: No circular dependencies
• Dependency Correctness: No circular dependencies, wave ordering correct
• Effort Balance: No disproportionately large items
MODE: analysis
@@ -791,10 +1027,10 @@ CONSTRAINTS: Read-only validation, do not modify files
|-----------|----------------|
| Vague criteria | Replace with specific, testable conditions |
| Technical DoD | Rewrite in business language |
| Missing scope items | Add to appropriate layer/task |
| Missing scope items | Add to appropriate issue context |
| Effort imbalance | Suggest split (report to orchestrator) |
After fixes, update `roadmap.jsonl` and `roadmap.md`.
After fixes, update issues via `ccw issue update` and regenerate `issues.jsonl` + `roadmap.md`.
## Error Handling
@@ -812,19 +1048,36 @@ try {
: manualDirectDecomposition(requirement, exploration_context)
}
}
// Issue creation failure: retry once, then skip and report
for (const record of records) {
try {
// create issue...
} catch (error) {
try {
// retry once...
} catch {
// Log error, skip this record, continue with remaining
}
}
}
```
## Key Reminders
**ALWAYS**:
- Parse CLI output into structured records with full convergence fields
- Validate all records against schema before writing JSONL
- Validate all records against schema before creating issues
- Check for circular dependencies
- Ensure convergence criteria are testable (not vague)
- Ensure verification is executable (commands or explicit steps)
- Ensure definition_of_done uses business language
- Create issues via `ccw issue create` (get formal ISS-xxx IDs)
- Generate execution-plan.json with correct wave groupings
- Generate issues.jsonl session copy
- Generate roadmap.md with issue ID references
- Run Phase 5 quality check before returning
- Write both roadmap.jsonl AND roadmap.md
- Write all three output files: issues.jsonl, execution-plan.json, roadmap.md
**Bash Tool**:
- Use `run_in_background=false` for all Bash/CLI calls
@@ -834,4 +1087,5 @@ try {
- Create circular dependencies
- Skip convergence validation
- Skip Phase 5 quality check
- Return without writing both output files
- Return without writing all three output files
- Generate roadmap.jsonl (deprecated, replaced by issues.jsonl + execution-plan.json)

View File

@@ -1,6 +1,6 @@
---
name: req-plan-with-file
description: Requirement-level progressive roadmap planning with JSONL output. Decomposes requirements into convergent layers (MVP→iterations) or topologically-sorted task sequences, each with testable completion criteria.
description: Requirement-level progressive roadmap planning with issue creation. Decomposes requirements into convergent layers or task sequences, creates issues via ccw issue create, and generates execution-plan.json for team-planex consumption.
argument-hint: "[-y|--yes] [-c|--continue] [-m|--mode progressive|direct|auto] \"requirement description\""
allowed-tools: TodoWrite(*), Task(*), AskUserQuestion(*), Read(*), Grep(*), Glob(*), Bash(*), Edit(*), Write(*)
---
@@ -31,18 +31,18 @@ When `--yes` or `-y`: Auto-confirm strategy selection, use recommended mode, ski
**Context Source**: cli-explore-agent (optional) + requirement analysis
**Output Directory**: `.workflow/.req-plan/{session-id}/`
**Core Innovation**: JSONL roadmap where each record is self-contained + has convergence criteria, independently executable via lite-plan
**Core Innovation**: Requirement decomposition → issue creation → execution-plan.json for team-planex consumption. Each issue is standard issues-jsonl-schema format, bridging req-plan to team-planex execution pipeline.
## Overview
Requirement-level layered roadmap planning command. Decomposes a requirement into **convergent layers or task sequences**, each record containing explicit completion criteria (convergence), independently executable via `lite-plan`.
Requirement-level layered roadmap planning command. Decomposes a requirement into **convergent layers or task sequences**, creates issues via `ccw issue create`, and generates execution-plan.json for team-planex consumption.
**Dual Modes**:
- **Progressive**: Layered MVP→iterations, suitable for high-uncertainty requirements (validate first, then refine)
- **Direct**: Topologically-sorted task sequence, suitable for low-uncertainty requirements (clear tasks, directly ordered)
- **Auto**: Automatically selects based on uncertainty level
**Core Workflow**: Requirement Understanding → Strategy Selection → Context Collection (optional) → Decomposition → Validation → Output
**Core Workflow**: Requirement Understanding → Strategy Selection → Context Collection (optional) → Decomposition + Issue Creation → Validation → team-planex Handoff
```
┌─────────────────────────────────────────────────────────────────────────┐
@@ -62,16 +62,18 @@ Requirement-level layered roadmap planning command. Decomposes a requirement int
│ ├─ Has codebase → cli-explore-agent explores relevant modules │
│ └─ No codebase → skip, pure requirement decomposition │
│ │
│ Phase 3: Decomposition Execution (cli-roadmap-plan-agent)
│ Phase 3: Decomposition & Issue Creation (cli-roadmap-plan-agent) │
│ ├─ Progressive: define 2-4 layers, each with full convergence │
│ ├─ Direct: vertical slicing + topological sort, each with convergence│
Generate roadmap.jsonl (one self-contained record per line)
Create issues via ccw issue create (ISS-xxx IDs)
│ ├─ Generate execution-plan.json (waves + dependencies) │
│ ├─ Generate issues.jsonl (session copy) │
│ └─ Generate roadmap.md (with issue ID references) │
│ │
│ Phase 4: Interactive Validation & Final Output
│ Phase 4: Validation & team-planex Handoff
│ ├─ Display decomposition results (tabular + convergence criteria) │
│ ├─ User feedback loop (up to 5 rounds) │
Generate final roadmap.md
│ └─ Next steps: layer-by-layer lite-plan / create issue / export │
Next steps: team-planex full execution / wave-by-wave / view
│ │
└─────────────────────────────────────────────────────────────────────────┘
```
@@ -80,8 +82,9 @@ Requirement-level layered roadmap planning command. Decomposes a requirement int
```
.workflow/.req-plan/RPLAN-{slug}-{YYYY-MM-DD}/
├── roadmap.md # Human-readable roadmap
├── roadmap.jsonl # ⭐ Machine-readable, one self-contained record per line (with convergence)
├── roadmap.md # Human-readable roadmap with issue ID references
├── issues.jsonl # Standard issues-jsonl-schema format (session copy)
├── execution-plan.json # Wave grouping + issue dependencies (team-planex bridge)
├── strategy-assessment.json # Strategy assessment result
└── exploration-codebase.json # Codebase context (optional)
```
@@ -89,10 +92,11 @@ Requirement-level layered roadmap planning command. Decomposes a requirement int
| File | Phase | Description |
|------|-------|-------------|
| `strategy-assessment.json` | 1 | Uncertainty analysis + mode recommendation + extracted goal/constraints/stakeholders |
| `roadmap.md` (skeleton) | 1 | Initial skeleton with placeholders, finalized in Phase 4 |
| `roadmap.md` (skeleton) | 1 | Initial skeleton with placeholders, finalized in Phase 3 |
| `exploration-codebase.json` | 2 | Codebase context: relevant modules, patterns, integration points (only when codebase exists) |
| `roadmap.jsonl` | 3 | One self-contained JSON record per line with convergence criteria |
| `roadmap.md` (final) | 4 | Human-readable roadmap with tabular display + convergence details, revised per user feedback |
| `issues.jsonl` | 3 | Standard issues-jsonl-schema records, one per line (session copy of created issues) |
| `execution-plan.json` | 3 | Wave grouping with issue dependencies for team-planex consumption |
| `roadmap.md` (final) | 3 | Human-readable roadmap with issue ID references, convergence details, team-planex execution guide |
**roadmap.md template**:
@@ -137,55 +141,65 @@ Requirement-level layered roadmap planning command. Decomposes a requirement int
## JSONL Schema Design
### Convergence Criteria (convergence field)
### Issue Format (issues.jsonl)
Each JSONL record's `convergence` object contains three levels:
Each line in `issues.jsonl` follows the standard `issues-jsonl-schema.json` (see `.ccw/workflows/cli-templates/schemas/issues-jsonl-schema.json`).
| Field | Purpose | Requirement |
|-------|---------|-------------|
| `criteria[]` | List of checkable specific conditions | **Testable** (can be written as assertions or manual steps) |
| `verification` | How to verify these conditions | **Executable** (command, script, or explicit steps) |
| `definition_of_done` | One-sentence completion definition | **Business language** (non-technical person can judge) |
**Key fields per issue**:
### Progressive Mode
| Field | Source | Description |
|-------|--------|-------------|
| `id` | `ccw issue create` | Formal ISS-YYYYMMDD-NNN ID |
| `title` | Layer/task mapping | `[LayerName] goal` or `[TaskType] title` |
| `context` | Convergence fields | Markdown with goal, scope, convergence criteria, verification, DoD |
| `priority` | Effort mapping | small→4, medium→3, large→2 |
| `source` | Fixed | `"text"` |
| `tags` | Auto-generated | `["req-plan", mode, name/type, "wave-N"]` |
| `extended_context.notes` | Metadata JSON | session, strategy, original_id, wave, depends_on_issues |
| `lifecycle_requirements` | Fixed | test_strategy, regression_scope, acceptance_type, commit_strategy |
Each line = one layer. Layer naming convention:
### Execution Plan Format (execution-plan.json)
| Layer | Name | Typical Goal |
|-------|------|--------------|
| L0 | MVP | Minimum viable closed loop, core path works end-to-end |
| L1 | Usable | Key user paths refined, basic error handling |
| L2 | Refined | Edge case handling, performance optimization, security hardening |
| L3 | Optimized | Advanced features, observability, operations support |
**Schema**: `id, name, goal, scope[], excludes[], convergence{}, risks[], effort, depends_on[]`
```jsonl
{"id":"L0","name":"MVP","goal":"Minimum viable closed loop","scope":["User registration and login","Basic CRUD"],"excludes":["OAuth","2FA"],"convergence":{"criteria":["End-to-end register→login→operate flow works","Core API returns correct responses"],"verification":"curl/Postman manual testing or smoke test script","definition_of_done":"New user can complete the full flow of register→login→perform one core operation"},"risks":[{"description":"JWT library selection needs validation","probability":"Medium","impact":"Medium","mitigation":"N/A"}],"effort":"medium","depends_on":[]}
{"id":"L1","name":"Usable","goal":"Complete key user paths","scope":["Password reset","Input validation","Error messages"],"excludes":["Audit logs","Rate limiting"],"convergence":{"criteria":["All form fields have frontend+backend validation","Password reset email can be sent and reset completed","Error scenarios show user-friendly messages"],"verification":"Unit tests cover validation logic + manual test of reset flow","definition_of_done":"Users have a clear recovery path when encountering input errors or forgotten passwords"},"risks":[],"effort":"medium","depends_on":["L0"]}
```json
{
"session_id": "RPLAN-{slug}-{date}",
"requirement": "Original requirement description",
"strategy": "progressive|direct",
"created_at": "ISO 8601",
"issue_ids": ["ISS-xxx", "ISS-yyy"],
"waves": [
{
"wave": 1,
"label": "MVP",
"issue_ids": ["ISS-xxx"],
"depends_on_waves": []
},
{
"wave": 2,
"label": "Usable",
"issue_ids": ["ISS-yyy"],
"depends_on_waves": [1]
}
],
"issue_dependencies": {
"ISS-yyy": ["ISS-xxx"]
}
}
```
**Constraints**: 2-4 layers, L0 must be a self-contained closed loop with no dependencies, each feature belongs to exactly ONE layer (no scope overlap).
**Wave mapping**:
- Progressive mode: each layer → one wave (L0→Wave 1, L1→Wave 2, ...)
- Direct mode: each parallel_group → one wave (group 1→Wave 1, group 2→Wave 2, ...)
### Direct Mode
### Convergence Criteria (in issue context)
Each line = one task. Task type convention:
Each issue's `context` field contains convergence information:
| Type | Use Case |
|------|----------|
| infrastructure | Data models, configuration, scaffolding |
| feature | API, UI, business logic implementation |
| enhancement | Validation, error handling, edge cases |
| testing | Unit tests, integration tests, E2E |
**Schema**: `id, title, type, scope, inputs[], outputs[], convergence{}, depends_on[], parallel_group`
```jsonl
{"id":"T1","title":"Establish data model","type":"infrastructure","scope":"DB schema + TypeScript types","inputs":[],"outputs":["schema.prisma","types/user.ts"],"convergence":{"criteria":["Migration executes without errors","TypeScript types compile successfully","Fields cover all business entities"],"verification":"npx prisma migrate dev && npx tsc --noEmit","definition_of_done":"Database schema migrates correctly, type definitions can be referenced by other modules"},"depends_on":[],"parallel_group":1}
{"id":"T2","title":"Implement core API","type":"feature","scope":"CRUD endpoints for User","inputs":["schema.prisma","types/user.ts"],"outputs":["routes/user.ts","controllers/user.ts"],"convergence":{"criteria":["GET/POST/PUT/DELETE return correct status codes","Request/response conforms to schema","No N+1 queries"],"verification":"jest --testPathPattern=user.test.ts","definition_of_done":"All User CRUD endpoints pass integration tests"},"depends_on":["T1"],"parallel_group":2}
```
**Constraints**: Inputs must come from preceding task outputs or existing resources, tasks in same parallel_group must be truly independent, no circular dependencies.
| Section | Purpose | Requirement |
|---------|---------|-------------|
| `## Convergence Criteria` | List of checkable specific conditions | **Testable** (can be written as assertions or manual steps) |
| `## Verification` | How to verify these conditions | **Executable** (command, script, or explicit steps) |
| `## Definition of Done` | One-sentence completion definition | **Business language** (non-technical person can judge) |
## Implementation
@@ -403,13 +417,13 @@ Bash(`mkdir -p ${sessionFolder}`)
- When codebase exists, exploration-codebase.json generated
- When no codebase, skipped and logged
### Phase 3: Decomposition Execution
### Phase 3: Decomposition & Issue Creation
**Objective**: Execute requirement decomposition via `cli-roadmap-plan-agent`, generating roadmap.jsonl + roadmap.md.
**Objective**: Execute requirement decomposition via `cli-roadmap-plan-agent`, creating issues and generating execution-plan.json + issues.jsonl + roadmap.md.
**Prerequisites**: Phase 1, Phase 2 complete. Strategy selected. Context collected (if applicable).
**Agent**: `cli-roadmap-plan-agent` (dedicated requirement roadmap planning agent, supports CLI-assisted decomposition + built-in quality checks)
**Agent**: `cli-roadmap-plan-agent` (dedicated requirement roadmap planning agent, supports CLI-assisted decomposition + issue creation + built-in quality checks)
**Steps**:
@@ -429,7 +443,7 @@ Bash(`mkdir -p ${sessionFolder}`)
- Phase 1: Context loading + requirement analysis
- Phase 2: CLI-assisted decomposition (Gemini → Qwen → manual fallback)
- Phase 3: Record enhancement + validation (schema compliance, dependency checks, convergence quality)
- Phase 4: Generate roadmap.jsonl + roadmap.md
- Phase 4: Issue creation + output generation (ccw issue create → execution-plan.json → issues.jsonl roadmap.md)
- Phase 5: CLI decomposition quality check (**MANDATORY** - requirement coverage, convergence criteria quality, dependency correctness)
```javascript
@@ -454,14 +468,23 @@ Bash(`mkdir -p ${sessionFolder}`)
? `File: ${sessionFolder}/exploration-codebase.json\n${JSON.stringify(explorationContext, null, 2)}`
: 'No codebase detected - pure requirement decomposition'}
### Issue Creation
- Use \`ccw issue create\` for each decomposed item
- Issue format: issues-jsonl-schema (id, title, status, priority, context, source, tags, extended_context)
- Write \`execution-plan.json\` with wave groupings + issue dependencies
- Write \`issues.jsonl\` session copy
- Update \`roadmap.md\` with issue ID references
### CLI Configuration
- Primary tool: gemini
- Fallback: qwen
- Timeout: 60000ms
### Expected Output
1. **${sessionFolder}/roadmap.jsonl** - One JSON record per line with convergence field
2. **${sessionFolder}/roadmap.md** - Human-readable roadmap with tables and convergence details
1. **${sessionFolder}/issues.jsonl** - Session copy of created issues (standard issues-jsonl-schema)
2. **${sessionFolder}/execution-plan.json** - Wave grouping + issue dependencies
3. **${sessionFolder}/roadmap.md** - Human-readable roadmap with issue references
4. Issues created in \`.workflow/issues/issues.jsonl\` via ccw issue create
### Mode-Specific Requirements
@@ -487,7 +510,7 @@ Bash(`mkdir -p ${sessionFolder}`)
1. Analyze requirement and build decomposition context
2. Execute CLI-assisted decomposition (Gemini, fallback Qwen)
3. Parse output, validate records, enhance convergence quality
4. Write roadmap.jsonl + roadmap.md
4. Create issues via ccw issue create, generate execution-plan.json + issues.jsonl + roadmap.md
5. Execute mandatory quality check (Phase 5)
6. Return brief completion summary
`
@@ -495,54 +518,62 @@ Bash(`mkdir -p ${sessionFolder}`)
```
**Success Criteria**:
- roadmap.jsonl generated, each line independently JSON.parse-able
- roadmap.md generated (follows template in Output section)
- Each record contains convergence (criteria + verification + definition_of_done)
- Issues created via `ccw issue create`, each with formal ISS-xxx ID
- issues.jsonl generated, each line independently JSON.parse-able, conforms to issues-jsonl-schema
- execution-plan.json generated with correct wave groupings and issue dependencies
- roadmap.md generated with issue ID references
- Agent's internal quality check passed
- No circular dependencies
- Progressive: 2-4 layers, no scope overlap
- Direct: tasks have explicit inputs/outputs, parallel_group assigned
### Phase 4: Interactive Validation & Final Output
### Phase 4: Validation & team-planex Handoff
**Objective**: Display decomposition results, collect user feedback, generate final artifacts.
**Objective**: Display decomposition results, collect user feedback, provide team-planex execution options.
**Prerequisites**: Phase 3 complete, roadmap.jsonl generated.
**Prerequisites**: Phase 3 complete, issues created, execution-plan.json generated.
**Steps**:
1. **Display Decomposition Results** (tabular format)
```javascript
// Read execution plan for display
const executionPlan = JSON.parse(Read(`${sessionFolder}/execution-plan.json`))
const issueIds = executionPlan.issue_ids
const waves = executionPlan.waves
```
**Progressive Mode**:
```markdown
## Roadmap Overview
| Layer | Name | Goal | Scope | Effort | Dependencies |
|-------|------|------|-------|--------|--------------|
| L0 | MVP | ... | ... | medium | - |
| L1 | Usable | ... | ... | medium | L0 |
| Wave | Issue ID | Name | Goal | Priority |
|------|----------|------|------|----------|
| 1 | ISS-xxx | MVP | ... | 2 |
| 2 | ISS-yyy | Usable | ... | 3 |
### Convergence Criteria
**L0 - MVP**:
- Criteria: [criteria list]
- 🔍 Verification: [verification]
- 🎯 Definition of Done: [definition_of_done]
**Wave 1 - MVP (ISS-xxx)**:
- Criteria: [criteria list]
- Verification: [verification]
- Definition of Done: [definition_of_done]
```
**Direct Mode**:
```markdown
## Task Sequence
| Group | ID | Title | Type | Dependencies |
|-------|----|-------|------|--------------|
| 1 | T1 | ... | infrastructure | - |
| 2 | T2 | ... | feature | T1 |
| Wave | Issue ID | Title | Type | Dependencies |
|------|----------|-------|------|--------------|
| 1 | ISS-xxx | ... | infrastructure | - |
| 2 | ISS-yyy | ... | feature | ISS-xxx |
### Convergence Criteria
**T1 - Establish Data Model**:
- Criteria: [criteria list]
- 🔍 Verification: [verification]
- 🎯 Definition of Done: [definition_of_done]
**Wave 1 - ISS-xxx**:
- Criteria: [criteria list]
- Verification: [verification]
- Definition of Done: [definition_of_done]
```
2. **User Feedback Loop** (up to 5 rounds, skipped when autoYes)
@@ -560,8 +591,8 @@ Bash(`mkdir -p ${sessionFolder}`)
header: "Feedback",
multiSelect: false,
options: [
{ label: "Approve", description: "Decomposition is reasonable, generate final artifacts" },
{ label: "Adjust Scope", description: "Some layer/task scopes need adjustment" },
{ label: "Approve", description: "Decomposition is reasonable, proceed to next steps" },
{ label: "Adjust Scope", description: "Some issue scopes need adjustment" },
{ label: "Modify Convergence", description: "Convergence criteria are not specific or testable enough" },
{ label: "Re-decompose", description: "Overall strategy or layering approach needs change" }
]
@@ -578,54 +609,20 @@ Bash(`mkdir -p ${sessionFolder}`)
}
```
3. **Finalize roadmap.md** (populate template from Output section with actual data)
```javascript
const roadmapMd = `
# Requirement Roadmap
**Session**: ${sessionId}
**Requirement**: ${requirement}
**Strategy**: ${selectedMode}
**Generated**: ${getUtc8ISOString()}
## Strategy Assessment
- Uncertainty level: ${strategy.uncertainty_level}
- Decomposition mode: ${selectedMode}
## Roadmap
${generateRoadmapTable(items, selectedMode)}
## Convergence Criteria Details
${items.map(item => generateConvergenceSection(item, selectedMode)).join('\n\n')}
## Risk Items
${generateRiskSection(items)}
## Next Steps
Each layer/task can be executed independently:
\`\`\`bash
/workflow:lite-plan "${items[0].name || items[0].title}: ${items[0].scope}"
\`\`\`
Roadmap JSONL file: \`${sessionFolder}/roadmap.jsonl\`
`
Write(`${sessionFolder}/roadmap.md`, roadmapMd)
```
4. **Post-Completion Options**
3. **Post-Completion Options**
```javascript
if (!autoYes) {
AskUserQuestion({
questions: [{
question: "Roadmap generated. Next step:",
question: `路线图已生成,${issueIds.length} 个 issues 已创建。下一步:`,
header: "Next Step",
multiSelect: false,
options: [
{ label: "Execute First Layer", description: `Launch lite-plan to execute ${items[0].id}` },
{ label: "Create Issue", description: "Create GitHub Issue based on roadmap" },
{ label: "Export Report", description: "Generate standalone shareable roadmap report" },
{ label: "Done", description: "Save roadmap only, execute later" }
{ label: "Execute with team-planex", description: `启动 team-planex 执行全部 ${issueIds.length} 个 issues${waves.length} 个波次)` },
{ label: "Execute first wave", description: `仅执行 Wave 1: ${waves[0].label}` },
{ label: "View issues", description: "查看已创建的 issue 详情" },
{ label: "Done", description: "保存路线图,稍后执行" }
]
}]
})
@@ -634,16 +631,15 @@ Bash(`mkdir -p ${sessionFolder}`)
| Selection | Action |
|-----------|--------|
| Execute First Layer | `Skill(skill="workflow:lite-plan", args="${firstItem.scope}")` |
| Create Issue | `Skill(skill="issue:new", args="...")` |
| Export Report | Copy roadmap.md + roadmap.jsonl to user-specified location, or generate standalone HTML/Markdown report |
| Done | Display roadmap file paths, end |
| Execute with team-planex | `Skill(skill="team-planex", args="--plan ${sessionFolder}/execution-plan.json")` |
| Execute first wave | `Skill(skill="team-planex", args="${waves[0].issue_ids.join(' ')}")` |
| View issues | Display issues summary table from issues.jsonl |
| Done | Display file paths, end |
**Success Criteria**:
- User feedback processed (or skipped via autoYes)
- roadmap.md finalized
- roadmap.jsonl final version updated
- Post-completion options provided
- team-planex handoff available via execution-plan.json
## Error Handling
@@ -664,28 +660,8 @@ Bash(`mkdir -p ${sessionFolder}`)
3. **Testable convergence**: criteria must be writable as assertions or manual steps; definition_of_done should be judgeable by non-technical stakeholders (see Convergence Criteria in JSONL Schema Design)
4. **Agent-First for Exploration**: Delegate codebase exploration to cli-explore-agent, do not analyze directly in main flow
5. **Incremental validation**: Use `--continue` to iterate on existing roadmaps
6. **Independently executable**: Each JSONL record should be independently passable to lite-plan for execution
6. **team-planex integration**: Issues created follow standard issues-jsonl-schema, directly consumable by team-planex via execution-plan.json
## Usage Recommendations
**Use `/workflow:req-plan-with-file` when:**
- You need to decompose a large requirement into a progressively executable roadmap
- Unsure where to start, need an MVP strategy
- Need to generate a trackable task sequence for the team
- Requirement involves multiple stages or iterations
**Use `/workflow:lite-plan` when:**
- You have a clear single task to execute
- The requirement is already a layer/task from the roadmap
- No layered planning needed
**Use `/workflow:collaborative-plan-with-file` when:**
- A single complex task needs multi-agent parallel planning
- Need to analyze the same task from multiple domain perspectives
**Use `/workflow:analyze-with-file` when:**
- Need in-depth analysis of a technical problem
- Not about planning execution, but understanding and discussion
---

View File

@@ -98,12 +98,29 @@ const inputText = textMatch ? textMatch[1] : null
const planMatch = (desc + ' ' + args).match(/--plan\s+(\S+)/)
const planFile = planMatch ? planMatch[1] : null
// 4) execution-plan.json 输入(来自 req-plan-with-file
let executionPlan = null
// Determine input type
let inputType = 'unknown'
if (issueIds.length > 0) inputType = 'issue_ids'
else if (inputText) inputType = 'text'
else if (planFile) inputType = 'plan_file'
else {
else if (planFile) {
// Check if it's an execution-plan.json from req-plan-with-file
try {
const content = JSON.parse(Read(planFile))
if (content.waves && content.issue_ids && content.session_id?.startsWith('RPLAN-')) {
inputType = 'execution_plan'
executionPlan = content
issueIds = content.issue_ids
} else {
inputType = 'plan_file'
}
} catch (e) {
// Not JSON or parse error, fallback to original plan_file parsing
inputType = 'plan_file'
}
} else {
// 任务描述本身可能就是需求文本
inputType = 'text_from_description'
}
@@ -150,12 +167,69 @@ if (inputType === 'plan_file') {
Issue IDs 已就绪,直接进入 solution 规划。
#### Wave 规划(所有路径汇聚)
将 issueIds 按波次分组规划:
#### Path D: execution-plan.json → 波次感知处理
```javascript
const projectRoot = Bash('cd . && pwd').trim()
if (inputType === 'execution_plan') {
const projectRoot = Bash('cd . && pwd').trim()
const waves = executionPlan.waves
let waveNum = 0
for (const wave of waves) {
waveNum++
const waveIssues = wave.issue_ids
// Step 1: issue-plan-agent 生成 solutions
const planResult = Task({
subagent_type: "issue-plan-agent",
run_in_background: false,
description: `Plan solutions for wave ${waveNum}: ${wave.label}`,
prompt: `
issue_ids: ${JSON.stringify(waveIssues)}
project_root: "${projectRoot}"
## Requirements
- Generate solutions for each issue
- Auto-bind single solutions
- Issues come from req-plan decomposition (tags: req-plan)
- Respect inter-issue dependencies: ${JSON.stringify(executionPlan.issue_dependencies)}
`
})
// Step 2: issue-queue-agent 形成 queue
const queueResult = Task({
subagent_type: "issue-queue-agent",
run_in_background: false,
description: `Form queue for wave ${waveNum}: ${wave.label}`,
prompt: `
issue_ids: ${JSON.stringify(waveIssues)}
project_root: "${projectRoot}"
## Requirements
- Order solutions by dependency (DAG)
- Detect conflicts between solutions
- Respect wave dependencies: ${JSON.stringify(wave.depends_on_waves)}
- Output execution queue
`
})
// Step 3: → Phase 4 (Wave Dispatch) - create EXEC-* tasks
// Continue to next wave without waiting for executor
}
// After all waves → Phase 5 (Report + Finalize)
}
```
**关键差异**: 波次分组来自 `executionPlan.waves`,而非固定 batch=5。Progressive 模式下 L0(Wave 1) → L1(Wave 2)Direct 模式下 parallel_group 映射为 wave。
#### Wave 规划Path A/B/C 汇聚)
将 issueIds 按波次分组规划Path D 使用独立的波次逻辑,不走此路径):
```javascript
if (inputType !== 'execution_plan') {
// Path A/B/C: 固定 batch=5 分组
const projectRoot = Bash('cd . && pwd').trim()
// 按批次分组(每 wave 最多 5 个 issues
const WAVE_SIZE = 5
@@ -202,6 +276,7 @@ project_root: "${projectRoot}"
// Step 3: → Phase 4 (Wave Dispatch)
}
} // end if (inputType !== 'execution_plan')
```
### Phase 4: Wave Dispatch
@@ -370,5 +445,7 @@ function parsePlanPhases(planContent) {
| issue-plan-agent failure | Retry once, then report error and skip to next issue |
| issue-queue-agent failure | Retry once, then create EXEC tasks without DAG ordering |
| Plan file not found | Report error with expected path |
| execution-plan.json parse failure | Fallback to plan_file parsing (Path B) |
| execution-plan.json missing waves | Report error, suggest re-running req-plan |
| Empty input (no issues, no text, no plan) | AskUserQuestion for clarification |
| Wave partially failed | Report partial success, continue with successful issues |

View File

@@ -0,0 +1,427 @@
---
name: team-quality-assurance
description: Unified team skill for quality assurance team. All roles invoke this skill with --role arg for role-specific execution. Triggers on "team quality-assurance", "team qa".
allowed-tools: TeamCreate(*), TeamDelete(*), SendMessage(*), TaskCreate(*), TaskUpdate(*), TaskList(*), TaskGet(*), Task(*), AskUserQuestion(*), Read(*), Write(*), Edit(*), Bash(*), Glob(*), Grep(*)
---
# Team Quality Assurance
质量保障团队技能。融合"问题发现"和"软件测试"两大能力域,形成"发现→策略→测试→分析"闭环。通过 Scout 多视角扫描、Generator-Executor 循环、共享缺陷模式数据库,实现渐进式质量保障。所有成员通过 `--role=xxx` 路由到角色执行逻辑。
## Architecture Overview
```
┌──────────────────────────────────────────────────────────┐
│ Skill(skill="team-quality-assurance", args="--role=xxx") │
└────────────────────────┬─────────────────────────────────┘
│ Role Router
┌────────┬───────────┼───────────┬──────────┬──────────┐
↓ ↓ ↓ ↓ ↓ ↓
┌────────┐┌───────┐┌──────────┐┌─────────┐┌────────┐┌────────┐
│coordi- ││scout ││strategist││generator││executor││analyst │
│nator ││SCOUT-*││QASTRAT-* ││QAGEN-* ││QARUN-* ││QAANA-* │
│ roles/ ││ roles/││ roles/ ││ roles/ ││ roles/ ││ roles/ │
└────────┘└───────┘└──────────┘└─────────┘└────────┘└────────┘
```
## Command Architecture
```
roles/
├── coordinator/
│ ├── role.md # Pipeline 编排(模式选择、任务分发、监控)
│ └── commands/
│ ├── dispatch.md # 任务链创建
│ └── monitor.md # 进度监控
├── scout/
│ ├── role.md # 多视角问题扫描
│ └── commands/
│ └── scan.md # 多视角 CLI Fan-out 扫描
├── strategist/
│ ├── role.md # 测试策略制定
│ └── commands/
│ └── analyze-scope.md # 变更范围分析
├── generator/
│ ├── role.md # 测试用例生成
│ └── commands/
│ └── generate-tests.md # 按层级生成测试代码
├── executor/
│ ├── role.md # 测试执行与修复
│ └── commands/
│ └── run-fix-cycle.md # 迭代测试修复循环
└── analyst/
├── role.md # 质量分析报告
└── commands/
└── quality-report.md # 缺陷模式 + 覆盖率分析
```
**设计原则**: role.md 保留 Phase 1Task Discovery和 Phase 5Report内联。Phase 2-4 根据复杂度决定内联或委派到 `commands/*.md`
## Role Router
### Input Parsing
Parse `$ARGUMENTS` to extract `--role`:
```javascript
const args = "$ARGUMENTS"
const roleMatch = args.match(/--role[=\s]+(\w+)/)
if (!roleMatch) {
throw new Error("Missing --role argument. Available roles: coordinator, scout, strategist, generator, executor, analyst")
}
const role = roleMatch[1]
const teamName = args.match(/--team[=\s]+([\w-]+)/)?.[1] || "quality-assurance"
```
### Role Dispatch
```javascript
const VALID_ROLES = {
"coordinator": { file: "roles/coordinator/role.md", prefix: null },
"scout": { file: "roles/scout/role.md", prefix: "SCOUT" },
"strategist": { file: "roles/strategist/role.md", prefix: "QASTRAT" },
"generator": { file: "roles/generator/role.md", prefix: "QAGEN" },
"executor": { file: "roles/executor/role.md", prefix: "QARUN" },
"analyst": { file: "roles/analyst/role.md", prefix: "QAANA" }
}
if (!VALID_ROLES[role]) {
throw new Error(`Unknown role: ${role}. Available: ${Object.keys(VALID_ROLES).join(', ')}`)
}
// Read and execute role-specific logic
Read(VALID_ROLES[role].file)
// → Execute the 5-phase process defined in that file
```
### Available Roles
| Role | Task Prefix | Responsibility | Role File |
|------|-------------|----------------|-----------|
| `coordinator` | N/A | QA pipeline 编排、模式选择、质量门控 | [roles/coordinator/role.md](roles/coordinator/role.md) |
| `scout` | SCOUT-* | 多视角问题扫描、主动发现潜在缺陷 | [roles/scout/role.md](roles/scout/role.md) |
| `strategist` | QASTRAT-* | 变更范围分析、测试层级选择、覆盖率目标 | [roles/strategist/role.md](roles/strategist/role.md) |
| `generator` | QAGEN-* | 按层级生成测试用例unit/integration/E2E | [roles/generator/role.md](roles/generator/role.md) |
| `executor` | QARUN-* | 执行测试、收集覆盖率、自动修复循环 | [roles/executor/role.md](roles/executor/role.md) |
| `analyst` | QAANA-* | 缺陷模式分析、覆盖率差距、质量报告 | [roles/analyst/role.md](roles/analyst/role.md) |
## Shared Infrastructure
### Role Isolation Rules
**核心原则**: 每个角色仅能执行自己职责范围内的工作。
#### Output Tagging强制
所有角色的输出必须带 `[role_name]` 标识前缀:
```javascript
SendMessage({ content: `## [${role}] ...`, summary: `[${role}] ...` })
mcp__ccw-tools__team_msg({ summary: `[${role}] ...` })
```
#### Coordinator 隔离
| 允许 | 禁止 |
|------|------|
| 需求澄清 (AskUserQuestion) | ❌ 直接编写测试 |
| 创建任务链 (TaskCreate) | ❌ 直接执行测试或扫描 |
| 模式选择 + 质量门控 | ❌ 直接分析覆盖率 |
| 监控进度 (消息总线) | ❌ 绕过 worker 自行完成 |
#### Worker 隔离
| 允许 | 禁止 |
|------|------|
| 处理自己前缀的任务 | ❌ 处理其他角色前缀的任务 |
| 读写 shared-memory.json (自己的字段) | ❌ 为其他角色创建任务 |
| SendMessage 给 coordinator | ❌ 直接与其他 worker 通信 |
### Team Configuration
```javascript
const TEAM_CONFIG = {
name: "quality-assurance",
sessionDir: ".workflow/.team/QA-{slug}-{date}/",
msgDir: ".workflow/.team-msg/quality-assurance/",
sharedMemory: "shared-memory.json",
testLayers: {
L1: { name: "Unit Tests", coverage_target: 80 },
L2: { name: "Integration Tests", coverage_target: 60 },
L3: { name: "E2E Tests", coverage_target: 40 }
},
scanPerspectives: ["bug", "security", "ux", "test-coverage", "code-quality"]
}
```
### Shared Memory核心创新
```javascript
// 各角色读取共享记忆
const memoryPath = `${sessionFolder}/shared-memory.json`
let sharedMemory = {}
try { sharedMemory = JSON.parse(Read(memoryPath)) } catch {}
// 各角色写入自己负责的字段:
// scout → sharedMemory.discovered_issues
// strategist → sharedMemory.test_strategy
// generator → sharedMemory.generated_tests
// executor → sharedMemory.execution_results
// analyst → sharedMemory.defect_patterns + quality_score + coverage_history
Write(memoryPath, JSON.stringify(sharedMemory, null, 2))
```
### Message Bus (All Roles)
```javascript
mcp__ccw-tools__team_msg({
operation: "log",
team: teamName,
from: role,
to: "coordinator",
type: "<type>",
summary: `[${role}] <summary>`,
ref: "<file_path>"
})
```
| Role | Types |
|------|-------|
| coordinator | `mode_selected`, `gc_loop_trigger`, `quality_gate`, `task_unblocked`, `error`, `shutdown` |
| scout | `scan_ready`, `issues_found`, `error` |
| strategist | `strategy_ready`, `error` |
| generator | `tests_generated`, `tests_revised`, `error` |
| executor | `tests_passed`, `tests_failed`, `coverage_report`, `error` |
| analyst | `analysis_ready`, `quality_report`, `error` |
### CLI 回退
```javascript
Bash(`ccw team log --team "${teamName}" --from "${role}" --to "coordinator" --type "<type>" --summary "<摘要>" --json`)
```
### Task Lifecycle (All Worker Roles)
```javascript
const tasks = TaskList()
const myTasks = tasks.filter(t =>
t.subject.startsWith(`${VALID_ROLES[role].prefix}-`) &&
t.owner === role &&
t.status === 'pending' &&
t.blockedBy.length === 0
)
if (myTasks.length === 0) return
const task = TaskGet({ taskId: myTasks[0].id })
TaskUpdate({ taskId: task.id, status: 'in_progress' })
// Phase 2-4: Role-specific
// Phase 5: Report + Loop
mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: role, to: "coordinator", type: "...", summary: `[${role}] ...` })
SendMessage({ type: "message", recipient: "coordinator", content: `## [${role}] ...`, summary: `[${role}] ...` })
TaskUpdate({ taskId: task.id, status: 'completed' })
```
## Three-Mode Pipeline Architecture
```
Discovery Mode (问题发现优先):
SCOUT-001(多视角扫描) → QASTRAT-001 → QAGEN-001 → QARUN-001 → QAANA-001
Testing Mode (测试优先,跳过 scout):
QASTRAT-001(变更分析) → QAGEN-001(L1) → QARUN-001(L1) → QAGEN-002(L2) → QARUN-002(L2) → QAANA-001
Full QA Mode (完整闭环):
SCOUT-001(扫描) → QASTRAT-001(策略) → [QAGEN-001(L1) + QAGEN-002(L2)](parallel) → [QARUN-001 + QARUN-002](parallel) → QAANA-001(分析) → SCOUT-002(回归扫描)
```
### Mode Auto-Detection
```javascript
function detectQAMode(args, taskDescription) {
if (/--mode[=\s]+(discovery|testing|full)/.test(args)) {
return args.match(/--mode[=\s]+(\w+)/)[1]
}
// 自动检测
if (/发现|扫描|scan|discover|issue|问题/.test(taskDescription)) return 'discovery'
if (/测试|test|覆盖|coverage|TDD/.test(taskDescription)) return 'testing'
return 'full'
}
```
### Generator-Executor Loop (GC 循环)
```
QAGEN → QARUN → (if coverage < target) → QAGEN-fix → QARUN-2
(if coverage >= target) → next layer or QAANA
```
## Unified Session Directory
```
.workflow/.team/QA-{slug}-{YYYY-MM-DD}/
├── team-session.json
├── shared-memory.json # 发现的问题 / 测试策略 / 缺陷模式 / 覆盖率历史
├── scan/ # Scout output
│ └── scan-results.json
├── strategy/ # Strategist output
│ └── test-strategy.md
├── tests/ # Generator output
│ ├── L1-unit/
│ ├── L2-integration/
│ └── L3-e2e/
├── results/ # Executor output
│ ├── run-001.json
│ └── coverage-001.json
└── analysis/ # Analyst output
└── quality-report.md
```
## Coordinator Spawn Template
```javascript
TeamCreate({ team_name: teamName })
// Scout
Task({
subagent_type: "general-purpose",
team_name: teamName,
name: "scout",
prompt: `你是 team "${teamName}" 的 SCOUT。
当你收到 SCOUT-* 任务时,调用 Skill(skill="team-quality-assurance", args="--role=scout") 执行。
当前需求: ${taskDescription}
约束: ${constraints}
## 角色准则(强制)
- 你只能处理 SCOUT-* 前缀的任务,不得执行其他角色的工作
- 所有输出SendMessage、team_msg必须带 [scout] 标识前缀
- 仅与 coordinator 通信,不得直接联系其他 worker
- 不得使用 TaskCreate 为其他角色创建任务
## 消息总线(必须)
每次 SendMessage 前,先调用 mcp__ccw-tools__team_msg 记录。
工作流程:
1. TaskList → 找到 SCOUT-* 任务
2. Skill(skill="team-quality-assurance", args="--role=scout") 执行
3. team_msg log + SendMessage 结果给 coordinator带 [scout] 标识)
4. TaskUpdate completed → 检查下一个任务`
})
// Strategist
Task({
subagent_type: "general-purpose",
team_name: teamName,
name: "strategist",
prompt: `你是 team "${teamName}" 的 STRATEGIST。
当你收到 QASTRAT-* 任务时,调用 Skill(skill="team-quality-assurance", args="--role=strategist") 执行。
当前需求: ${taskDescription}
约束: ${constraints}
## 角色准则(强制)
- 你只能处理 QASTRAT-* 前缀的任务
- 所有输出必须带 [strategist] 标识前缀
- 仅与 coordinator 通信
## 消息总线(必须)
每次 SendMessage 前,先调用 mcp__ccw-tools__team_msg 记录。
工作流程:
1. TaskList → 找到 QASTRAT-* 任务
2. Skill(skill="team-quality-assurance", args="--role=strategist") 执行
3. team_msg log + SendMessage 结果给 coordinator
4. TaskUpdate completed → 检查下一个任务`
})
// Generator
Task({
subagent_type: "general-purpose",
team_name: teamName,
name: "generator",
prompt: `你是 team "${teamName}" 的 GENERATOR。
当你收到 QAGEN-* 任务时,调用 Skill(skill="team-quality-assurance", args="--role=generator") 执行。
当前需求: ${taskDescription}
## 角色准则(强制)
- 你只能处理 QAGEN-* 前缀的任务
- 所有输出必须带 [generator] 标识前缀
## 消息总线(必须)
每次 SendMessage 前,先调用 mcp__ccw-tools__team_msg 记录。
工作流程:
1. TaskList → 找到 QAGEN-* 任务
2. Skill(skill="team-quality-assurance", args="--role=generator") 执行
3. team_msg log + SendMessage
4. TaskUpdate completed → 检查下一个任务`
})
// Executor
Task({
subagent_type: "general-purpose",
team_name: teamName,
name: "executor",
prompt: `你是 team "${teamName}" 的 EXECUTOR。
当你收到 QARUN-* 任务时,调用 Skill(skill="team-quality-assurance", args="--role=executor") 执行。
当前需求: ${taskDescription}
## 角色准则(强制)
- 你只能处理 QARUN-* 前缀的任务
- 所有输出必须带 [executor] 标识前缀
## 消息总线(必须)
每次 SendMessage 前,先调用 mcp__ccw-tools__team_msg 记录。
工作流程:
1. TaskList → 找到 QARUN-* 任务
2. Skill(skill="team-quality-assurance", args="--role=executor") 执行
3. team_msg log + SendMessage
4. TaskUpdate completed → 检查下一个任务`
})
// Analyst
Task({
subagent_type: "general-purpose",
team_name: teamName,
name: "analyst",
prompt: `你是 team "${teamName}" 的 ANALYST。
当你收到 QAANA-* 任务时,调用 Skill(skill="team-quality-assurance", args="--role=analyst") 执行。
当前需求: ${taskDescription}
## 角色准则(强制)
- 你只能处理 QAANA-* 前缀的任务
- 所有输出必须带 [analyst] 标识前缀
## 消息总线(必须)
每次 SendMessage 前,先调用 mcp__ccw-tools__team_msg 记录。
工作流程:
1. TaskList → 找到 QAANA-* 任务
2. Skill(skill="team-quality-assurance", args="--role=analyst") 执行
3. team_msg log + SendMessage
4. TaskUpdate completed → 检查下一个任务`
})
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| Unknown --role value | Error with available role list |
| Missing --role arg | Error with usage hint |
| Role file not found | Error with expected path (roles/{name}/role.md) |
| Task prefix conflict | Log warning, proceed |
| Coverage never reaches target | After 3 GC loops, accept current with warning |
| Scout finds no issues | Report clean scan, skip to testing mode |
| Test environment broken | Notify user, suggest manual fix |

View File

@@ -0,0 +1,360 @@
# Command: quality-report
> 缺陷模式分析 + 覆盖率分析 + 综合质量报告。多维度分析 QA 数据,生成质量评分和改进建议。
## When to Use
- Phase 3 of Analyst
- 测试执行完成,需要分析结果
- 需要识别缺陷模式和覆盖率趋势
**Trigger conditions**:
- QAANA-* 任务进入执行阶段
- 所有 QARUN 任务已完成
- Coordinator 请求质量报告
## Strategy
### Delegation Mode
**Mode**: CLI Fan-out深度分析/ Direct基础分析
**CLI Tool**: `gemini` (primary)
**CLI Mode**: `analysis`
### Decision Logic
```javascript
const dataPoints = discoveredIssues.length + Object.keys(executionResults).length
if (dataPoints <= 5) {
// 基础内联分析
mode = 'direct'
} else {
// CLI 辅助深度分析
mode = 'cli-assisted'
}
```
## Execution Steps
### Step 1: Context Preparation
```javascript
// 从 shared memory 加载所有 QA 数据
const discoveredIssues = sharedMemory.discovered_issues || []
const strategy = sharedMemory.test_strategy || {}
const generatedTests = sharedMemory.generated_tests || {}
const executionResults = sharedMemory.execution_results || {}
const historicalPatterns = sharedMemory.defect_patterns || []
const coverageHistory = sharedMemory.coverage_history || []
// 读取覆盖率详细数据
let coverageData = null
try {
coverageData = JSON.parse(Read('coverage/coverage-summary.json'))
} catch {}
// 读取各层级执行结果
const layerResults = {}
try {
const resultFiles = Glob(`${sessionFolder}/results/run-*.json`)
for (const f of resultFiles) {
const data = JSON.parse(Read(f))
layerResults[data.layer] = data
}
} catch {}
```
### Step 2: Execute Strategy
```javascript
if (mode === 'direct') {
// 基础内联分析
analysis = performDirectAnalysis()
} else {
// CLI 辅助深度分析
const analysisContext = JSON.stringify({
issues: discoveredIssues.slice(0, 20),
execution: layerResults,
coverage: coverageData?.total || {},
strategy: { layers: strategy.layers?.map(l => ({ level: l.level, target: l.target_coverage })) }
}, null, 2)
Bash(`ccw cli -p "PURPOSE: Perform deep quality analysis on QA results to identify defect patterns, coverage trends, and improvement opportunities
TASK: • Classify defects by root cause pattern (logic errors, integration issues, missing validation, etc.) • Identify files with highest defect density • Analyze coverage gaps vs risk levels • Compare actual coverage to targets • Generate actionable improvement recommendations
MODE: analysis
CONTEXT: @${sessionFolder}/shared-memory.json @${sessionFolder}/results/**/*
EXPECTED: Structured analysis with: defect pattern taxonomy, risk-coverage matrix, quality score rationale, top 5 improvement recommendations with expected impact
CONSTRAINTS: Be data-driven, avoid speculation without evidence" --tool gemini --mode analysis --rule analysis-analyze-code-patterns`, {
run_in_background: true
})
// 等待 CLI 完成
}
// ===== 分析维度 =====
// 1. 缺陷模式分析
function analyzeDefectPatterns(issues, results) {
const byType = {}
for (const issue of issues) {
const type = issue.perspective || 'unknown'
if (!byType[type]) byType[type] = []
byType[type].push(issue)
}
// 识别重复模式
const patterns = []
for (const [type, typeIssues] of Object.entries(byType)) {
if (typeIssues.length >= 2) {
// 分析共同特征
const commonFiles = findCommonPatterns(typeIssues.map(i => i.file))
patterns.push({
type,
count: typeIssues.length,
files: [...new Set(typeIssues.map(i => i.file))],
common_pattern: commonFiles,
description: `${type} 类问题在 ${typeIssues.length} 处重复出现`,
recommendation: generateRecommendation(type, typeIssues)
})
}
}
return { by_type: byType, patterns, total: issues.length }
}
// 2. 覆盖率差距分析
function analyzeCoverageGaps(coverage, strategy) {
if (!coverage) return { status: 'no_data', gaps: [] }
const totalCoverage = coverage.total?.lines?.pct || 0
const gaps = []
for (const layer of (strategy.layers || [])) {
if (totalCoverage < layer.target_coverage) {
gaps.push({
layer: layer.level,
target: layer.target_coverage,
actual: totalCoverage,
gap: Math.round(layer.target_coverage - totalCoverage),
severity: (layer.target_coverage - totalCoverage) > 20 ? 'high' : 'medium'
})
}
}
// 按文件分析覆盖率
const fileGaps = []
if (coverage && typeof coverage === 'object') {
for (const [file, data] of Object.entries(coverage)) {
if (file === 'total') continue
const linePct = data?.lines?.pct || 0
if (linePct < 50) {
fileGaps.push({ file, coverage: linePct, severity: linePct < 20 ? 'critical' : 'high' })
}
}
}
return { total_coverage: totalCoverage, gaps, file_gaps: fileGaps.slice(0, 10) }
}
// 3. 测试有效性分析
function analyzeTestEffectiveness(generated, results) {
const effectiveness = {}
for (const [layer, data] of Object.entries(generated)) {
const result = results[layer] || {}
effectiveness[layer] = {
files_generated: data.files?.length || 0,
pass_rate: result.pass_rate || 0,
iterations_needed: result.iterations || 0,
coverage_achieved: result.coverage || 0,
effective: (result.pass_rate || 0) >= 95 && (result.iterations || 0) <= 2
}
}
return effectiveness
}
// 4. 质量趋势分析
function analyzeQualityTrend(history) {
if (history.length < 2) return { trend: 'insufficient_data', confidence: 'low' }
const latest = history[history.length - 1]
const previous = history[history.length - 2]
const delta = (latest?.coverage || 0) - (previous?.coverage || 0)
return {
trend: delta > 5 ? 'improving' : delta < -5 ? 'declining' : 'stable',
delta: Math.round(delta * 10) / 10,
data_points: history.length,
confidence: history.length >= 5 ? 'high' : history.length >= 3 ? 'medium' : 'low'
}
}
// 5. 综合质量评分
function calculateQualityScore(analysis) {
let score = 100
// 扣分: 安全问题
const securityIssues = (analysis.defect_patterns.by_type?.security || []).length
score -= securityIssues * 10
// 扣分: Bug
const bugIssues = (analysis.defect_patterns.by_type?.bug || []).length
score -= bugIssues * 5
// 扣分: 覆盖率差距
for (const gap of (analysis.coverage_gaps.gaps || [])) {
score -= gap.gap * 0.5
}
// 扣分: 测试失败
for (const [layer, eff] of Object.entries(analysis.test_effectiveness)) {
if (eff.pass_rate < 100) score -= (100 - eff.pass_rate) * 0.3
}
// 加分: 有效测试层
const effectiveLayers = Object.values(analysis.test_effectiveness)
.filter(e => e.effective).length
score += effectiveLayers * 5
// 加分: 改善趋势
if (analysis.quality_trend.trend === 'improving') score += 3
return Math.max(0, Math.min(100, Math.round(score)))
}
// 辅助函数
function findCommonPatterns(files) {
const dirs = files.map(f => f.split('/').slice(0, -1).join('/'))
const commonDir = dirs.reduce((a, b) => {
const partsA = a.split('/')
const partsB = b.split('/')
const common = []
for (let i = 0; i < Math.min(partsA.length, partsB.length); i++) {
if (partsA[i] === partsB[i]) common.push(partsA[i])
else break
}
return common.join('/')
})
return commonDir || 'scattered'
}
function generateRecommendation(type, issues) {
const recommendations = {
'security': '加强输入验证和安全审计,考虑引入 SAST 工具',
'bug': '改进错误处理和边界检查,增加防御性编程',
'test-coverage': '补充缺失的测试用例,聚焦未覆盖的分支',
'code-quality': '重构复杂函数,消除代码重复',
'ux': '统一错误提示和加载状态处理'
}
return recommendations[type] || '进一步分析并制定改进计划'
}
```
### Step 3: Result Processing
```javascript
// 组装分析结果
const analysis = {
defect_patterns: analyzeDefectPatterns(discoveredIssues, layerResults),
coverage_gaps: analyzeCoverageGaps(coverageData, strategy),
test_effectiveness: analyzeTestEffectiveness(generatedTests, layerResults),
quality_trend: analyzeQualityTrend(coverageHistory),
quality_score: 0
}
analysis.quality_score = calculateQualityScore(analysis)
// 生成报告文件
const reportContent = generateReportMarkdown(analysis)
Bash(`mkdir -p "${sessionFolder}/analysis"`)
Write(`${sessionFolder}/analysis/quality-report.md`, reportContent)
// 更新 shared memory
sharedMemory.defect_patterns = analysis.defect_patterns.patterns
sharedMemory.quality_score = analysis.quality_score
sharedMemory.coverage_history = sharedMemory.coverage_history || []
sharedMemory.coverage_history.push({
date: new Date().toISOString(),
coverage: analysis.coverage_gaps.total_coverage || 0,
quality_score: analysis.quality_score,
issues: analysis.defect_patterns.total
})
Write(`${sessionFolder}/shared-memory.json`, JSON.stringify(sharedMemory, null, 2))
function generateReportMarkdown(analysis) {
return `# Quality Assurance Report
## Quality Score: ${analysis.quality_score}/100
---
## 1. Defect Pattern Analysis
- Total issues found: ${analysis.defect_patterns.total}
- Recurring patterns: ${analysis.defect_patterns.patterns.length}
${analysis.defect_patterns.patterns.map(p =>
`### Pattern: ${p.type} (${p.count} occurrences)
- Files: ${p.files.join(', ')}
- Common location: ${p.common_pattern}
- Recommendation: ${p.recommendation}`
).join('\n\n')}
## 2. Coverage Analysis
- Overall coverage: ${analysis.coverage_gaps.total_coverage || 'N/A'}%
- Coverage gaps: ${(analysis.coverage_gaps.gaps || []).length}
${(analysis.coverage_gaps.gaps || []).map(g =>
`- **${g.layer}**: target ${g.target}% vs actual ${g.actual}% (gap: ${g.gap}%, severity: ${g.severity})`
).join('\n')}
### Low Coverage Files
${(analysis.coverage_gaps.file_gaps || []).map(f =>
`- ${f.file}: ${f.coverage}% [${f.severity}]`
).join('\n')}
## 3. Test Effectiveness
${Object.entries(analysis.test_effectiveness).map(([layer, data]) =>
`- **${layer}**: ${data.files_generated} files, pass rate ${data.pass_rate}%, ${data.iterations_needed} fix iterations, ${data.effective ? 'EFFECTIVE' : 'NEEDS IMPROVEMENT'}`
).join('\n')}
## 4. Quality Trend
- Trend: ${analysis.quality_trend.trend}
${analysis.quality_trend.delta !== undefined ? `- Coverage delta: ${analysis.quality_trend.delta > 0 ? '+' : ''}${analysis.quality_trend.delta}%` : ''}
- Confidence: ${analysis.quality_trend.confidence}
## 5. Recommendations
${analysis.quality_score >= 80 ? '- Quality is **GOOD**. Maintain current testing practices.' : ''}
${analysis.quality_score >= 60 && analysis.quality_score < 80 ? '- Quality needs **IMPROVEMENT**. Focus on coverage gaps and recurring patterns.' : ''}
${analysis.quality_score < 60 ? '- Quality is **CONCERNING**. Recommend comprehensive review and testing effort.' : ''}
${analysis.defect_patterns.patterns.map(p => `- [${p.type}] ${p.recommendation}`).join('\n')}
${(analysis.coverage_gaps.gaps || []).map(g => `- Close ${g.layer} coverage gap: +${g.gap}% needed`).join('\n')}
`
}
```
## Output Format
```
## Quality Analysis Results
### Quality Score: [score]/100
### Dimensions
1. Defect Patterns: [count] recurring
2. Coverage Gaps: [count] layers below target
3. Test Effectiveness: [effective_count]/[total_layers] effective
4. Quality Trend: [improving|stable|declining]
### Report Location
[session]/analysis/quality-report.md
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| No coverage data available | Score based on other dimensions only |
| No execution results | Analyze only scout findings and strategy |
| Shared memory empty/corrupt | Generate minimal report with available data |
| CLI analysis fails | Fall back to direct inline analysis |
| Insufficient history for trend | Report 'insufficient_data', skip trend scoring |
| Agent/CLI failure | Retry once, then fallback to inline execution |
| Timeout (>5 min) | Report partial results, notify coordinator |

View File

@@ -0,0 +1,326 @@
# Role: analyst
质量分析师。分析缺陷模式、覆盖率差距、测试有效性,生成综合质量报告。维护缺陷模式数据库,为 scout 和 strategist 提供反馈数据。
## Role Identity
- **Name**: `analyst`
- **Task Prefix**: `QAANA-*`
- **Responsibility**: Read-only analysis质量分析
- **Communication**: SendMessage to coordinator only
- **Output Tag**: `[analyst]`
## Role Boundaries
### MUST
- 仅处理 `QAANA-*` 前缀的任务
- 所有输出必须带 `[analyst]` 标识
- 基于数据生成分析报告
- 更新 shared memory 中的缺陷模式和质量分数
### MUST NOT
- ❌ 修改源代码或测试代码
- ❌ 执行测试
- ❌ 为其他角色创建任务
- ❌ 直接与其他 worker 通信
## Message Types
| Type | Direction | Trigger | Description |
|------|-----------|---------|-------------|
| `analysis_ready` | analyst → coordinator | 分析完成 | 包含质量评分 |
| `quality_report` | analyst → coordinator | 报告生成 | 包含详细分析 |
| `error` | analyst → coordinator | 分析失败 | 阻塞性错误 |
## Toolbox
### Available Commands
| Command | File | Phase | Description |
|---------|------|-------|-------------|
| `quality-report` | [commands/quality-report.md](commands/quality-report.md) | Phase 3 | 缺陷模式分析 + 覆盖率分析 |
### CLI Capabilities
| CLI Tool | Mode | Used By | Purpose |
|----------|------|---------|---------|
| `gemini` | analysis | quality-report.md | 缺陷模式识别和趋势分析 |
## Execution (5-Phase)
### Phase 1: Task Discovery
```javascript
const tasks = TaskList()
const myTasks = tasks.filter(t =>
t.subject.startsWith('QAANA-') &&
t.owner === 'analyst' &&
t.status === 'pending' &&
t.blockedBy.length === 0
)
if (myTasks.length === 0) return
const task = TaskGet({ taskId: myTasks[0].id })
TaskUpdate({ taskId: task.id, status: 'in_progress' })
```
### Phase 2: Context Loading
```javascript
// 读取 shared memory 获取所有数据
const sessionFolder = task.description.match(/session:\s*(.+)/)?.[1] || '.'
let sharedMemory = {}
try { sharedMemory = JSON.parse(Read(`${sessionFolder}/shared-memory.json`)) } catch {}
const discoveredIssues = sharedMemory.discovered_issues || []
const strategy = sharedMemory.test_strategy || {}
const generatedTests = sharedMemory.generated_tests || {}
const executionResults = sharedMemory.execution_results || {}
const historicalPatterns = sharedMemory.defect_patterns || []
// 读取覆盖率数据
let coverageData = null
try {
coverageData = JSON.parse(Read('coverage/coverage-summary.json'))
} catch {}
// 读取测试执行日志
const runResults = {}
try {
const resultFiles = Glob(`${sessionFolder}/results/run-*.json`)
for (const f of resultFiles) {
const data = JSON.parse(Read(f))
runResults[data.layer] = data
}
} catch {}
```
### Phase 3: Multi-Dimensional Analysis
```javascript
// Read commands/quality-report.md for full implementation
Read("commands/quality-report.md")
```
**分析维度**:
```javascript
const analysis = {
// 1. 缺陷模式分析
defect_patterns: analyzeDefectPatterns(discoveredIssues, executionResults),
// 2. 覆盖率差距分析
coverage_gaps: analyzeCoverageGaps(coverageData, strategy),
// 3. 测试有效性分析
test_effectiveness: analyzeTestEffectiveness(generatedTests, executionResults),
// 4. 质量趋势
quality_trend: analyzeQualityTrend(sharedMemory.coverage_history || []),
// 5. 综合质量评分
quality_score: 0
}
function analyzeDefectPatterns(issues, results) {
// 按类型分组
const byType = {}
for (const issue of issues) {
const type = issue.perspective || 'unknown'
if (!byType[type]) byType[type] = []
byType[type].push(issue)
}
// 识别重复模式
const patterns = []
for (const [type, typeIssues] of Object.entries(byType)) {
if (typeIssues.length >= 2) {
patterns.push({
type,
count: typeIssues.length,
files: [...new Set(typeIssues.map(i => i.file))],
description: `${type} 类问题在 ${typeIssues.length} 处重复出现`
})
}
}
return { by_type: byType, patterns, total: issues.length }
}
function analyzeCoverageGaps(coverage, strategy) {
if (!coverage) return { status: 'no_data' }
const gaps = []
const totalCoverage = coverage.total?.lines?.pct || 0
// 对比策略目标
for (const layer of (strategy.layers || [])) {
const actual = totalCoverage
if (actual < layer.target_coverage) {
gaps.push({
layer: layer.level,
target: layer.target_coverage,
actual,
gap: layer.target_coverage - actual,
files_below_target: [] // 可以进一步分析
})
}
}
return { total_coverage: totalCoverage, gaps }
}
function analyzeTestEffectiveness(generated, results) {
const effectiveness = {}
for (const [layer, data] of Object.entries(generated)) {
const result = results[layer] || {}
effectiveness[layer] = {
files_generated: data.files?.length || 0,
pass_rate: result.pass_rate || 0,
iterations_needed: result.iterations || 0,
effective: (result.pass_rate || 0) >= 95
}
}
return effectiveness
}
function analyzeQualityTrend(history) {
if (history.length < 2) return { trend: 'insufficient_data' }
const latest = history[history.length - 1]
const previous = history[history.length - 2]
const delta = (latest?.coverage || 0) - (previous?.coverage || 0)
return {
trend: delta > 0 ? 'improving' : delta < 0 ? 'declining' : 'stable',
delta,
data_points: history.length
}
}
// 综合质量评分 (0-100)
function calculateQualityScore(analysis) {
let score = 100
// 扣分项
const criticalIssues = (analysis.defect_patterns.by_type?.security || []).length
score -= criticalIssues * 10
const highIssues = (analysis.defect_patterns.by_type?.bug || []).length
score -= highIssues * 5
// 覆盖率不达标扣分
for (const gap of (analysis.coverage_gaps.gaps || [])) {
score -= gap.gap * 0.5
}
// 测试有效性加分
const effectiveLayers = Object.values(analysis.test_effectiveness)
.filter(e => e.effective).length
score += effectiveLayers * 5
return Math.max(0, Math.min(100, Math.round(score)))
}
analysis.quality_score = calculateQualityScore(analysis)
```
### Phase 4: Report Generation
```javascript
// 生成质量报告
const reportContent = `# Quality Assurance Report
## Quality Score: ${analysis.quality_score}/100
## 1. Defect Pattern Analysis
- Total issues found: ${analysis.defect_patterns.total}
- Recurring patterns: ${analysis.defect_patterns.patterns.length}
${analysis.defect_patterns.patterns.map(p => ` - **${p.type}**: ${p.count} occurrences across ${p.files.length} files`).join('\n')}
## 2. Coverage Analysis
- Overall coverage: ${analysis.coverage_gaps.total_coverage || 'N/A'}%
- Coverage gaps: ${(analysis.coverage_gaps.gaps || []).length}
${(analysis.coverage_gaps.gaps || []).map(g => ` - **${g.layer}**: target ${g.target}% vs actual ${g.actual}% (gap: ${g.gap}%)`).join('\n')}
## 3. Test Effectiveness
${Object.entries(analysis.test_effectiveness).map(([layer, data]) =>
`- **${layer}**: ${data.files_generated} files, pass rate ${data.pass_rate}%, ${data.iterations_needed} fix iterations`
).join('\n')}
## 4. Quality Trend
- Trend: ${analysis.quality_trend.trend}
${analysis.quality_trend.delta !== undefined ? `- Coverage change: ${analysis.quality_trend.delta > 0 ? '+' : ''}${analysis.quality_trend.delta}%` : ''}
## 5. Recommendations
${analysis.quality_score >= 80 ? '- Quality is GOOD. Continue with current testing strategy.' : ''}
${analysis.quality_score >= 60 && analysis.quality_score < 80 ? '- Quality needs IMPROVEMENT. Focus on coverage gaps and recurring patterns.' : ''}
${analysis.quality_score < 60 ? '- Quality is CONCERNING. Recommend deep scan and comprehensive test generation.' : ''}
${analysis.defect_patterns.patterns.length > 0 ? `- Address ${analysis.defect_patterns.patterns.length} recurring defect patterns` : ''}
${(analysis.coverage_gaps.gaps || []).length > 0 ? `- Close ${analysis.coverage_gaps.gaps.length} coverage gaps` : ''}
`
Bash(`mkdir -p "${sessionFolder}/analysis"`)
Write(`${sessionFolder}/analysis/quality-report.md`, reportContent)
// 更新 shared memory
sharedMemory.defect_patterns = analysis.defect_patterns.patterns
sharedMemory.quality_score = analysis.quality_score
sharedMemory.coverage_history = sharedMemory.coverage_history || []
sharedMemory.coverage_history.push({
date: new Date().toISOString(),
coverage: analysis.coverage_gaps.total_coverage || 0,
quality_score: analysis.quality_score,
issues: analysis.defect_patterns.total
})
Write(`${sessionFolder}/shared-memory.json`, JSON.stringify(sharedMemory, null, 2))
```
### Phase 5: Report to Coordinator
```javascript
mcp__ccw-tools__team_msg({
operation: "log",
team: teamName,
from: "analyst",
to: "coordinator",
type: "quality_report",
summary: `[analyst] 质量评分: ${analysis.quality_score}/100, 缺陷模式: ${analysis.defect_patterns.patterns.length}, 覆盖率: ${analysis.coverage_gaps.total_coverage || 'N/A'}%`,
ref: `${sessionFolder}/analysis/quality-report.md`
})
SendMessage({
type: "message",
recipient: "coordinator",
content: `## [analyst] Quality Analysis Results
**Task**: ${task.subject}
**Quality Score**: ${analysis.quality_score}/100
**Defect Patterns**: ${analysis.defect_patterns.patterns.length} recurring
**Coverage**: ${analysis.coverage_gaps.total_coverage || 'N/A'}%
**Trend**: ${analysis.quality_trend.trend}
### Report
${sessionFolder}/analysis/quality-report.md`,
summary: `[analyst] QAANA complete: score ${analysis.quality_score}/100`
})
TaskUpdate({ taskId: task.id, status: 'completed' })
const nextTasks = TaskList().filter(t =>
t.subject.startsWith('QAANA-') && t.owner === 'analyst' &&
t.status === 'pending' && t.blockedBy.length === 0
)
if (nextTasks.length > 0) { /* back to Phase 1 */ }
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| No QAANA-* tasks available | Idle, wait for coordinator |
| Coverage data not found | Report quality score based on other dimensions |
| Shared memory empty | Generate minimal report with available data |
| No execution results | Analyze only scout findings and strategy coverage |
| CLI analysis fails | Fall back to inline pattern analysis |
| Critical issue beyond scope | SendMessage error to coordinator |

View File

@@ -0,0 +1,167 @@
# Command: dispatch
> 任务链创建与依赖管理。根据 QA 模式创建 pipeline 任务链并分配给 worker 角色。
## When to Use
- Phase 3 of Coordinator
- QA 模式已确定,需要创建任务链
- 团队已创建worker 已 spawn
**Trigger conditions**:
- Coordinator Phase 2 完成后
- 模式切换需要重建任务链
- GC 循环需要创建修复任务
## Strategy
### Delegation Mode
**Mode**: Directcoordinator 直接操作 TaskCreate/TaskUpdate
### Decision Logic
```javascript
// 根据 qaMode 选择 pipeline
function buildPipeline(qaMode, sessionFolder, taskDescription) {
const pipelines = {
'discovery': [
{ prefix: 'SCOUT', owner: 'scout', desc: '多视角问题扫描', blockedBy: [] },
{ prefix: 'QASTRAT', owner: 'strategist', desc: '测试策略制定', blockedBy: ['SCOUT'] },
{ prefix: 'QAGEN', owner: 'generator', desc: '测试代码生成 (L1)', meta: 'layer: L1', blockedBy: ['QASTRAT'] },
{ prefix: 'QARUN', owner: 'executor', desc: '测试执行 (L1)', meta: 'layer: L1', blockedBy: ['QAGEN'] },
{ prefix: 'QAANA', owner: 'analyst', desc: '质量分析报告', blockedBy: ['QARUN'] }
],
'testing': [
{ prefix: 'QASTRAT', owner: 'strategist', desc: '测试策略制定', blockedBy: [] },
{ prefix: 'QAGEN-L1', owner: 'generator', desc: '测试代码生成 (L1)', meta: 'layer: L1', blockedBy: ['QASTRAT'] },
{ prefix: 'QARUN-L1', owner: 'executor', desc: '测试执行 (L1)', meta: 'layer: L1', blockedBy: ['QAGEN-L1'] },
{ prefix: 'QAGEN-L2', owner: 'generator', desc: '测试代码生成 (L2)', meta: 'layer: L2', blockedBy: ['QARUN-L1'] },
{ prefix: 'QARUN-L2', owner: 'executor', desc: '测试执行 (L2)', meta: 'layer: L2', blockedBy: ['QAGEN-L2'] },
{ prefix: 'QAANA', owner: 'analyst', desc: '质量分析报告', blockedBy: ['QARUN-L2'] }
],
'full': [
{ prefix: 'SCOUT', owner: 'scout', desc: '多视角问题扫描', blockedBy: [] },
{ prefix: 'QASTRAT', owner: 'strategist', desc: '测试策略制定', blockedBy: ['SCOUT'] },
{ prefix: 'QAGEN-L1', owner: 'generator', desc: '测试代码生成 (L1)', meta: 'layer: L1', blockedBy: ['QASTRAT'] },
{ prefix: 'QAGEN-L2', owner: 'generator', desc: '测试代码生成 (L2)', meta: 'layer: L2', blockedBy: ['QASTRAT'] },
{ prefix: 'QARUN-L1', owner: 'executor', desc: '测试执行 (L1)', meta: 'layer: L1', blockedBy: ['QAGEN-L1'] },
{ prefix: 'QARUN-L2', owner: 'executor', desc: '测试执行 (L2)', meta: 'layer: L2', blockedBy: ['QAGEN-L2'] },
{ prefix: 'QAANA', owner: 'analyst', desc: '质量分析报告', blockedBy: ['QARUN-L1', 'QARUN-L2'] },
{ prefix: 'SCOUT-REG', owner: 'scout', desc: '回归扫描', blockedBy: ['QAANA'] }
]
}
return pipelines[qaMode] || pipelines['discovery']
}
```
## Execution Steps
### Step 1: Context Preparation
```javascript
const pipeline = buildPipeline(qaMode, sessionFolder, taskDescription)
```
### Step 2: Execute Strategy
```javascript
const taskIds = {}
for (const stage of pipeline) {
// 构建任务描述(包含 session 和层级信息)
const fullDesc = [
stage.desc,
`\nsession: ${sessionFolder}`,
stage.meta ? `\n${stage.meta}` : '',
`\n\n目标: ${taskDescription}`
].join('')
// 创建任务
TaskCreate({
subject: `${stage.prefix}-001: ${stage.desc}`,
description: fullDesc,
activeForm: `${stage.desc}进行中`
})
// 记录任务 ID假设 TaskCreate 返回 ID
const allTasks = TaskList()
const newTask = allTasks.find(t => t.subject.startsWith(`${stage.prefix}-001`))
taskIds[stage.prefix] = newTask.id
// 设置 owner 和依赖
const blockedByIds = stage.blockedBy
.map(dep => taskIds[dep])
.filter(Boolean)
TaskUpdate({
taskId: newTask.id,
owner: stage.owner,
addBlockedBy: blockedByIds
})
}
```
### Step 3: Result Processing
```javascript
// 验证任务链
const allTasks = TaskList()
const chainTasks = pipeline.map(s => taskIds[s.prefix]).filter(Boolean)
const chainValid = chainTasks.length === pipeline.length
if (!chainValid) {
mcp__ccw-tools__team_msg({
operation: "log", team: teamName, from: "coordinator",
to: "user", type: "error",
summary: `[coordinator] 任务链创建不完整: ${chainTasks.length}/${pipeline.length}`
})
}
```
## GC Loop Task Creation
当 executor 报告覆盖率不达标时coordinator 调用此逻辑追加任务:
```javascript
function createGCLoopTasks(gcIteration, targetLayer, sessionFolder) {
// 创建修复任务
TaskCreate({
subject: `QAGEN-fix-${gcIteration}: 修复 ${targetLayer} 测试 (GC #${gcIteration})`,
description: `修复未通过测试并补充覆盖\nsession: ${sessionFolder}\nlayer: ${targetLayer}\ntype: gc-fix`,
activeForm: `GC循环 #${gcIteration} 修复中`
})
// 创建重新执行任务
TaskCreate({
subject: `QARUN-gc-${gcIteration}: 重新执行 ${targetLayer} (GC #${gcIteration})`,
description: `重新执行测试验证修复\nsession: ${sessionFolder}\nlayer: ${targetLayer}`,
activeForm: `GC循环 #${gcIteration} 执行中`
})
// 设置依赖: QARUN-gc 依赖 QAGEN-fix
// ... TaskUpdate addBlockedBy
}
```
## Output Format
```
## Task Chain Created
### Mode: [discovery|testing|full]
### Pipeline Stages: [count]
- [prefix]-001: [description] (owner: [role], blocked by: [deps])
### Verification: PASS/FAIL
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| Task creation fails | Retry once, then report to user |
| Dependency cycle detected | Flatten dependencies, warn coordinator |
| Invalid qaMode | Default to 'discovery' mode |
| Agent/CLI failure | Retry once, then fallback to inline execution |
| Timeout (>5 min) | Report partial results, notify coordinator |

View File

@@ -0,0 +1,247 @@
# Command: monitor
> 消息总线轮询与协调循环。持续监控 worker 进度,路由消息,触发 GC 循环,执行质量门控。
## When to Use
- Phase 4 of Coordinator
- 任务链已创建并分发
- 需要持续监控直到所有任务完成
**Trigger conditions**:
- dispatch 完成后立即启动
- GC 循环创建新任务后重新进入
## Strategy
### Delegation Mode
**Mode**: Directcoordinator 直接轮询和路由)
### Decision Logic
```javascript
// 消息路由表
const routingTable = {
// Scout 完成
'scan_ready': {
action: 'Mark SCOUT complete, unblock QASTRAT',
next: 'strategist'
},
'issues_found': {
action: 'Mark SCOUT complete with issues, unblock QASTRAT',
next: 'strategist'
},
// Strategist 完成
'strategy_ready': {
action: 'Mark QASTRAT complete, unblock QAGEN',
next: 'generator'
},
// Generator 完成
'tests_generated': {
action: 'Mark QAGEN complete, unblock QARUN',
next: 'executor'
},
'tests_revised': {
action: 'Mark QAGEN-fix complete, unblock QARUN-gc',
next: 'executor'
},
// Executor 完成
'tests_passed': {
action: 'Mark QARUN complete, check coverage, unblock next',
next: 'check_coverage'
},
'tests_failed': {
action: 'Evaluate failures, decide GC loop or continue',
next: 'gc_decision'
},
// Analyst 完成
'analysis_ready': {
action: 'Mark QAANA complete, evaluate quality gate',
next: 'quality_gate'
},
'quality_report': {
action: 'Quality report received, prepare final report',
next: 'finalize'
},
// 错误
'error': {
action: 'Assess severity, retry or escalate',
next: 'error_handler'
}
}
```
## Execution Steps
### Step 1: Context Preparation
```javascript
// 从 shared memory 获取覆盖率目标
const sharedMemory = JSON.parse(Read(`${sessionFolder}/shared-memory.json`))
const strategy = sharedMemory.test_strategy || {}
const coverageTargets = {}
for (const layer of (strategy.layers || [])) {
coverageTargets[layer.level] = layer.target_coverage
}
let gcIteration = 0
const MAX_GC_ITERATIONS = 3
```
### Step 2: Execute Strategy
```javascript
let allComplete = false
while (!allComplete) {
// 1. Poll message bus
const messages = mcp__ccw-tools__team_msg({
operation: "list",
team: teamName,
last: 10
})
// 2. Route each unprocessed message
for (const msg of messages) {
const handler = routingTable[msg.type]
if (!handler) continue
switch (handler.next) {
case 'check_coverage': {
// 读取执行结果
const coverage = msg.data?.coverage || 0
const targetLayer = msg.data?.layer || 'L1'
const target = coverageTargets[targetLayer] || 80
if (coverage >= target) {
// 覆盖率达标,继续流水线
// 解锁下一个任务QAANA 或下一层级)
} else {
// 转入 GC 决策
handler.next = 'gc_decision'
}
break
}
case 'gc_decision': {
const coverage = msg.data?.coverage || 0
const targetLayer = msg.data?.layer || 'L1'
if (gcIteration < MAX_GC_ITERATIONS) {
gcIteration++
// 触发 GC 循环
mcp__ccw-tools__team_msg({
operation: "log", team: teamName, from: "coordinator",
to: "generator", type: "gc_loop_trigger",
summary: `[coordinator] GC循环 #${gcIteration}: 覆盖率 ${coverage}% 未达标,请修复`,
data: { iteration: gcIteration, layer: targetLayer, coverage }
})
// 创建 GC 修复任务(参见 dispatch.md createGCLoopTasks
// createGCLoopTasks(gcIteration, targetLayer, sessionFolder)
} else {
// 超过最大迭代次数,接受当前覆盖率
mcp__ccw-tools__team_msg({
operation: "log", team: teamName, from: "coordinator",
to: "user", type: "quality_gate",
summary: `[coordinator] GC循环已达上限(${MAX_GC_ITERATIONS}),接受当前覆盖率 ${coverage}%`
})
// 继续流水线,解锁 QAANA
}
break
}
case 'quality_gate': {
const qualityScore = sharedMemory.quality_score || 0
let status = 'PASS'
if (qualityScore < 60) status = 'FAIL'
else if (qualityScore < 80) status = 'CONDITIONAL'
mcp__ccw-tools__team_msg({
operation: "log", team: teamName, from: "coordinator",
to: "user", type: "quality_gate",
summary: `[coordinator] 质量门控: ${status} (score: ${qualityScore})`
})
break
}
case 'error_handler': {
const fromRole = msg.from
const severity = msg.data?.severity || 'medium'
if (severity === 'critical') {
// 通知用户
SendMessage({
content: `## [coordinator] Critical Error from ${fromRole}\n\n${msg.summary}`,
summary: `[coordinator] Critical error: ${msg.summary}`
})
} else {
// 标记任务失败,尝试重试
}
break
}
}
}
// 3. Check TaskList for overall completion
const tasks = TaskList()
const pendingWorkerTasks = tasks.filter(t =>
t.owner !== 'coordinator' &&
t.status !== 'completed' &&
t.status !== 'deleted'
)
allComplete = pendingWorkerTasks.length === 0
// 4. 如果没有完成,等待片刻再轮询
if (!allComplete) {
// 短暂等待(在实际执行中 coordinator 会在 subagent 返回后继续)
}
}
```
### Step 3: Result Processing
```javascript
// 汇总所有结果
const finalSharedMemory = JSON.parse(Read(`${sessionFolder}/shared-memory.json`))
const summary = {
total_tasks: TaskList().filter(t => t.owner !== 'coordinator').length,
completed_tasks: TaskList().filter(t => t.status === 'completed' && t.owner !== 'coordinator').length,
gc_iterations: gcIteration,
quality_score: finalSharedMemory.quality_score,
coverage: finalSharedMemory.execution_results?.coverage
}
```
## Output Format
```
## Coordination Summary
### Pipeline Status: COMPLETE
### Tasks: [completed]/[total]
### GC Iterations: [count]
### Quality Score: [score]/100
### Coverage: [percent]%
### Message Log (last 10)
- [timestamp] [from] → [to]: [type] - [summary]
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| Message bus unavailable | Fall back to TaskList polling only |
| Teammate unresponsive (2x no response) | Respawn teammate with same task |
| Deadlock detected (tasks blocked indefinitely) | Identify cycle, manually unblock |
| Quality gate FAIL | Report to user, suggest targeted re-run |
| Agent/CLI failure | Retry once, then fallback to inline execution |
| Timeout (>5 min) | Report partial results, notify coordinator |

View File

@@ -0,0 +1,228 @@
# Role: coordinator
QA 团队协调者。编排 pipeline需求澄清 → 模式选择 → 团队创建 → 任务分发 → 监控协调 → 质量门控 → 结果汇报。
## Role Identity
- **Name**: `coordinator`
- **Task Prefix**: N/A (coordinator creates tasks, doesn't receive them)
- **Responsibility**: Orchestration
- **Communication**: SendMessage to all teammates
- **Output Tag**: `[coordinator]`
## Role Boundaries
### MUST
- 所有输出SendMessage、team_msg、日志必须带 `[coordinator]` 标识
- 仅负责需求澄清、模式选择、任务创建/分发、进度监控、质量门控、结果汇报
- 通过 TaskCreate 创建任务并分配给 worker 角色
- 通过消息总线监控 worker 进度并路由消息
### MUST NOT
-**直接执行任何业务任务**(扫描、测试、分析等)
- ❌ 直接调用 cli-explore-agent、code-developer 等实现类 subagent
- ❌ 直接修改源代码或生成产物文件
- ❌ 绕过 worker 角色自行完成应委派的工作
- ❌ 在输出中省略 `[coordinator]` 标识
> **核心原则**: coordinator 是指挥者,不是执行者。所有实际工作必须通过 TaskCreate 委派给 worker 角色。
## Message Types
| Type | Direction | Trigger | Description |
|------|-----------|---------|-------------|
| `mode_selected` | coordinator → all | QA 模式确定 | Discovery/Testing/Full |
| `gc_loop_trigger` | coordinator → generator | 覆盖率不达标 | 触发 Generator-Executor 循环 |
| `quality_gate` | coordinator → user | 质量评估 | 通过/不通过/有条件通过 |
| `task_unblocked` | coordinator → worker | 依赖解除 | 任务可执行 |
| `error` | coordinator → user | 协调错误 | 阻塞性问题 |
| `shutdown` | coordinator → all | 团队关闭 | 清理资源 |
## Toolbox
### Available Commands
| Command | File | Phase | Description |
|---------|------|-------|-------------|
| `dispatch` | [commands/dispatch.md](commands/dispatch.md) | Phase 3 | 任务链创建与依赖管理 |
| `monitor` | [commands/monitor.md](commands/monitor.md) | Phase 4 | 消息总线轮询与协调循环 |
### Subagent Capabilities
> Coordinator 不直接使用 subagent通过 worker 角色间接使用)
### CLI Capabilities
> Coordinator 不直接使用 CLI 分析工具
## Execution
### Phase 1: Requirement Clarification
```javascript
const args = "$ARGUMENTS"
// 提取任务描述
const taskDescription = args.replace(/--role[=\s]+\w+/, '').replace(/--team[=\s]+[\w-]+/, '').replace(/--mode[=\s]+\w+/, '').trim()
// QA 模式选择
function detectQAMode(args, desc) {
const modeMatch = args.match(/--mode[=\s]+(discovery|testing|full)/)
if (modeMatch) return modeMatch[1]
if (/发现|扫描|scan|discover|issue|问题/.test(desc)) return 'discovery'
if (/测试|test|覆盖|coverage|TDD/.test(desc)) return 'testing'
return 'full'
}
let qaMode = detectQAMode(args, taskDescription)
// 简单任务可跳过确认
if (!taskDescription || taskDescription.length < 10) {
const clarification = AskUserQuestion({
questions: [{
question: "请描述 QA 目标(哪些模块需要质量保障?关注哪些方面?)",
header: "QA Target",
multiSelect: false,
options: [
{ label: "自定义", description: "输入具体描述" },
{ label: "全项目扫描", description: "对整个项目进行多视角质量扫描" },
{ label: "变更测试", description: "针对最近代码变更生成和执行测试" },
{ label: "完整QA流程", description: "扫描+测试+分析的完整闭环" }
]
}]
})
}
```
### Phase 2: Create Team + Spawn Teammates
```javascript
const teamName = "quality-assurance"
const sessionSlug = taskDescription.slice(0, 30).replace(/[^a-zA-Z0-9\u4e00-\u9fa5]/g, '-')
const sessionDate = new Date().toISOString().slice(0, 10)
const sessionFolder = `.workflow/.team/QA-${sessionSlug}-${sessionDate}`
Bash(`mkdir -p "${sessionFolder}"`)
// 初始化 shared memory
Write(`${sessionFolder}/shared-memory.json`, JSON.stringify({
discovered_issues: [],
test_strategy: {},
generated_tests: {},
execution_results: {},
defect_patterns: [],
coverage_history: [],
quality_score: null
}, null, 2))
TeamCreate({ team_name: teamName })
// Spawn teammates (see SKILL.md Coordinator Spawn Template)
// Scout, Strategist, Generator, Executor, Analyst
```
### Phase 3: Create Task Chain
根据 qaMode 创建不同的任务链:
```javascript
// Read commands/dispatch.md for full implementation
Read("commands/dispatch.md")
```
**Discovery Mode**:
```
SCOUT-001 → QASTRAT-001 → QAGEN-001 → QARUN-001 → QAANA-001
```
**Testing Mode** (跳过 scout):
```
QASTRAT-001 → QAGEN-001(L1) → QARUN-001(L1) → QAGEN-002(L2) → QARUN-002(L2) → QAANA-001
```
**Full QA Mode**:
```
SCOUT-001 → QASTRAT-001 → [QAGEN-001(L1) + QAGEN-002(L2)](parallel) → [QARUN-001 + QARUN-002](parallel) → QAANA-001 → SCOUT-002(回归)
```
### Phase 4: Coordination Loop
```javascript
// Read commands/monitor.md for full implementation
Read("commands/monitor.md")
```
| Received Message | Action |
|-----------------|--------|
| `scan_ready` | 标记 SCOUT complete → 解锁 QASTRAT |
| `strategy_ready` | 标记 QASTRAT complete → 解锁 QAGEN |
| `tests_generated` | 标记 QAGEN complete → 解锁 QARUN |
| `tests_passed` | 标记 QARUN complete → 解锁 QAANA 或下一层 |
| `tests_failed` | 评估覆盖率 → 触发 GC 循环gc_loop_trigger或继续 |
| `analysis_ready` | 标记 QAANA complete → 评估质量门控 |
| Worker: `error` | 评估严重性 → 重试或上报用户 |
**GC 循环触发逻辑**:
```javascript
if (coverage < targetCoverage && gcIteration < 3) {
// 创建 QAGEN-fix 任务 → QARUN 重新执行
gcIteration++
} else if (gcIteration >= 3) {
// 接受当前覆盖率,继续流水线
team_msg({ type: "quality_gate", data: { status: "CONDITIONAL", coverage } })
}
```
### Phase 5: Report + Persist
```javascript
// 读取 shared memory 汇总结果
const memory = JSON.parse(Read(`${sessionFolder}/shared-memory.json`))
const report = {
mode: qaMode,
issues_found: memory.discovered_issues?.length || 0,
test_strategy: memory.test_strategy?.layers || [],
tests_generated: Object.keys(memory.generated_tests || {}).length,
pass_rate: memory.execution_results?.pass_rate || 0,
coverage: memory.execution_results?.coverage || 0,
quality_score: memory.quality_score || 'N/A',
defect_patterns: memory.defect_patterns?.length || 0
}
mcp__ccw-tools__team_msg({
operation: "log", team: teamName, from: "coordinator",
to: "user", type: "quality_gate",
summary: `[coordinator] QA完成: ${report.issues_found}个问题, 覆盖率${report.coverage}%, 质量分${report.quality_score}`
})
SendMessage({
content: `## [coordinator] Quality Assurance Report\n\n${JSON.stringify(report, null, 2)}`,
summary: `[coordinator] QA report: ${report.quality_score}`
})
// 询问下一步
AskUserQuestion({
questions: [{
question: "QA 流程已完成。下一步:",
header: "Next",
multiSelect: false,
options: [
{ label: "新目标", description: "对新模块/需求执行QA" },
{ label: "深入分析", description: "对发现的问题进行更深入分析" },
{ label: "关闭团队", description: "关闭所有 teammate 并清理" }
]
}]
})
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| Teammate unresponsive | Send follow-up, 2x → respawn |
| Scout finds nothing | Skip to testing mode |
| GC loop stuck >3 iterations | Accept current coverage, continue pipeline |
| Test environment broken | Notify user, suggest manual fix |
| All tasks completed but quality_score < 60 | Report with WARNING, suggest re-run with deeper analysis |

View File

@@ -0,0 +1,220 @@
# Command: run-fix-cycle
> 迭代测试执行与自动修复。运行测试套件,解析结果,失败时委派 code-developer 修复,最多迭代 5 次。
## When to Use
- Phase 3 of Executor
- 测试代码已生成,需要执行并验证
- GC 循环中重新执行修复后的测试
**Trigger conditions**:
- QARUN-* 任务进入执行阶段
- Generator 报告测试生成完成
- GC 循环中 coordinator 创建的重新执行任务
## Strategy
### Delegation Mode
**Mode**: Sequential Delegation修复时/ Direct执行时
**Agent Type**: `code-developer`(仅用于修复)
**Max Iterations**: 5
### Decision Logic
```javascript
// 每次迭代的决策
function shouldContinue(iteration, passRate, testsFailed) {
if (iteration >= MAX_ITERATIONS) return false
if (testsFailed === 0) return false // 全部通过
if (passRate >= 95 && iteration >= 2) return false // 足够好
return true
}
```
## Execution Steps
### Step 1: Context Preparation
```javascript
// 检测测试框架和命令
const strategy = sharedMemory.test_strategy || {}
const framework = strategy.test_framework || 'vitest'
const targetLayer = task.description.match(/layer:\s*(L[123])/)?.[1] || 'L1'
// 构建测试命令
function buildTestCommand(framework, layer) {
const layerFilter = {
'L1': 'unit',
'L2': 'integration',
'L3': 'e2e'
}
const commands = {
'vitest': `npx vitest run --coverage --reporter=json --outputFile=test-results.json`,
'jest': `npx jest --coverage --json --outputFile=test-results.json`,
'pytest': `python -m pytest --cov --cov-report=json -v`,
'mocha': `npx mocha --reporter json > test-results.json`
}
let cmd = commands[framework] || 'npm test -- --coverage'
// 添加层级过滤(如果测试文件按目录组织)
const filter = layerFilter[layer]
if (filter && framework === 'vitest') {
cmd += ` --testPathPattern="${filter}"`
}
return cmd
}
const testCommand = buildTestCommand(framework, targetLayer)
// 获取关联的测试文件
const generatedTests = sharedMemory.generated_tests?.[targetLayer]?.files || []
```
### Step 2: Execute Strategy
```javascript
let iteration = 0
const MAX_ITERATIONS = 5
let lastOutput = ''
let passRate = 0
let coverage = 0
let testsPassed = 0
let testsFailed = 0
while (iteration < MAX_ITERATIONS) {
// ===== EXECUTE TESTS =====
lastOutput = Bash(`${testCommand} 2>&1 || true`)
// ===== PARSE RESULTS =====
// 解析通过/失败数
const passedMatch = lastOutput.match(/(\d+)\s*(?:passed|passing)/)
const failedMatch = lastOutput.match(/(\d+)\s*(?:failed|failing)/)
testsPassed = passedMatch ? parseInt(passedMatch[1]) : 0
testsFailed = failedMatch ? parseInt(failedMatch[1]) : 0
const testsTotal = testsPassed + testsFailed
passRate = testsTotal > 0 ? Math.round(testsPassed / testsTotal * 100) : 0
// 解析覆盖率
try {
const coverageJson = JSON.parse(Read('coverage/coverage-summary.json'))
coverage = coverageJson.total?.lines?.pct || 0
} catch {
// 尝试从输出解析
const covMatch = lastOutput.match(/(?:Lines|Stmts|All files)\s*[:|]\s*(\d+\.?\d*)%/)
coverage = covMatch ? parseFloat(covMatch[1]) : 0
}
// ===== CHECK PASS =====
if (testsFailed === 0) {
break // 全部通过
}
// ===== SHOULD CONTINUE? =====
if (!shouldContinue(iteration + 1, passRate, testsFailed)) {
break
}
// ===== AUTO-FIX =====
iteration++
// 提取失败详情
const failureLines = lastOutput.split('\n')
.filter(l => /FAIL|Error|AssertionError|Expected|Received|TypeError|ReferenceError/.test(l))
.slice(0, 30)
.join('\n')
// 委派修复给 code-developer
Task({
subagent_type: "code-developer",
run_in_background: false,
description: `Fix ${testsFailed} test failures (iteration ${iteration}/${MAX_ITERATIONS})`,
prompt: `## Goal
Fix failing tests. ONLY modify test files, NEVER modify source code.
## Test Output
\`\`\`
${failureLines}
\`\`\`
## Test Files to Fix
${generatedTests.map(f => `- ${f}`).join('\n')}
## Rules
- Read each failing test file before modifying
- Fix: incorrect assertions, missing imports, wrong mocks, setup issues
- Do NOT: skip tests, add \`@ts-ignore\`, use \`as any\`, modify source code
- Keep existing test structure and naming
- If a test is fundamentally wrong about expected behavior, fix the assertion to match actual source behavior`
})
}
```
### Step 3: Result Processing
```javascript
const resultData = {
layer: targetLayer,
framework: framework,
iterations: iteration,
pass_rate: passRate,
coverage: coverage,
tests_passed: testsPassed,
tests_failed: testsFailed,
all_passed: testsFailed === 0,
max_iterations_reached: iteration >= MAX_ITERATIONS
}
// 保存执行结果
Bash(`mkdir -p "${sessionFolder}/results"`)
Write(`${sessionFolder}/results/run-${targetLayer}.json`, JSON.stringify(resultData, null, 2))
// 保存最后一次测试输出(截取关键部分)
const outputSummary = lastOutput.split('\n').slice(-30).join('\n')
Write(`${sessionFolder}/results/output-${targetLayer}.txt`, outputSummary)
// 更新 shared memory
sharedMemory.execution_results = sharedMemory.execution_results || {}
sharedMemory.execution_results[targetLayer] = resultData
sharedMemory.execution_results.pass_rate = passRate
sharedMemory.execution_results.coverage = coverage
Write(`${sessionFolder}/shared-memory.json`, JSON.stringify(sharedMemory, null, 2))
```
## Output Format
```
## Test Execution Results
### Layer: [L1|L2|L3]
### Framework: [vitest|jest|pytest]
### Status: [PASS|FAIL]
### Results
- Tests passed: [count]
- Tests failed: [count]
- Pass rate: [percent]%
- Coverage: [percent]%
- Fix iterations: [count]/[max]
### Failure Details (if any)
- [test name]: [error description]
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| Test command not found | Try fallback: npm test → npx vitest → npx jest → pytest |
| Test environment broken | Report error to coordinator, suggest manual fix |
| Max iterations reached with failures | Report current state, let coordinator decide (GC loop or accept) |
| Coverage data unavailable | Report 0%, note coverage collection failure |
| Sub-agent fix introduces new failures | Revert last fix, try different approach |
| No test files to run | Report empty, notify coordinator |
| Agent/CLI failure | Retry once, then fallback to inline execution |
| Timeout (>5 min) | Report partial results, notify coordinator |

View File

@@ -0,0 +1,252 @@
# Role: executor
测试执行者。运行测试套件,收集覆盖率数据,在测试失败时进行自动修复循环。实现 Generator-ExecutorGC循环中的执行端。
## Role Identity
- **Name**: `executor`
- **Task Prefix**: `QARUN-*`
- **Responsibility**: Validation测试执行与修复
- **Communication**: SendMessage to coordinator only
- **Output Tag**: `[executor]`
## Role Boundaries
### MUST
- 仅处理 `QARUN-*` 前缀的任务
- 所有输出必须带 `[executor]` 标识
- 执行测试并收集覆盖率
- 在失败时尝试自动修复
### MUST NOT
- ❌ 从零生成新测试(那是 generator 的职责)
- ❌ 修改源代码(除非修复测试本身)
- ❌ 为其他角色创建任务
- ❌ 直接与其他 worker 通信
## Message Types
| Type | Direction | Trigger | Description |
|------|-----------|---------|-------------|
| `tests_passed` | executor → coordinator | 所有测试通过 | 包含覆盖率数据 |
| `tests_failed` | executor → coordinator | 测试失败 | 包含失败详情和修复尝试 |
| `coverage_report` | executor → coordinator | 覆盖率收集完成 | 覆盖率数据 |
| `error` | executor → coordinator | 执行环境错误 | 阻塞性错误 |
## Toolbox
### Available Commands
| Command | File | Phase | Description |
|---------|------|-------|-------------|
| `run-fix-cycle` | [commands/run-fix-cycle.md](commands/run-fix-cycle.md) | Phase 3 | 迭代测试执行与自动修复 |
### Subagent Capabilities
| Agent Type | Used By | Purpose |
|------------|---------|---------|
| `code-developer` | run-fix-cycle.md | 测试失败自动修复 |
## Execution (5-Phase)
### Phase 1: Task Discovery
```javascript
const tasks = TaskList()
const myTasks = tasks.filter(t =>
t.subject.startsWith('QARUN-') &&
t.owner === 'executor' &&
t.status === 'pending' &&
t.blockedBy.length === 0
)
if (myTasks.length === 0) return
const task = TaskGet({ taskId: myTasks[0].id })
TaskUpdate({ taskId: task.id, status: 'in_progress' })
```
### Phase 2: Environment Detection
```javascript
// 读取 shared memory
const sessionFolder = task.description.match(/session:\s*(.+)/)?.[1] || '.'
let sharedMemory = {}
try { sharedMemory = JSON.parse(Read(`${sessionFolder}/shared-memory.json`)) } catch {}
const strategy = sharedMemory.test_strategy || {}
const generatedTests = sharedMemory.generated_tests || {}
const targetLayer = task.description.match(/layer:\s*(L[123])/)?.[1] || 'L1'
// 检测测试命令
function detectTestCommand(framework, layer) {
const commands = {
'jest': `npx jest --coverage --testPathPattern="${layer === 'L1' ? 'unit' : layer === 'L2' ? 'integration' : 'e2e'}"`,
'vitest': `npx vitest run --coverage --reporter=json`,
'pytest': `python -m pytest --cov --cov-report=json`,
'mocha': `npx mocha --reporter json`,
}
return commands[framework] || 'npm test -- --coverage'
}
const testCommand = detectTestCommand(strategy.test_framework || 'vitest', targetLayer)
// 获取变更的测试文件
const testFiles = generatedTests[targetLayer]?.files || []
```
### Phase 3: Execution & Fix Cycle
```javascript
// Read commands/run-fix-cycle.md for full implementation
Read("commands/run-fix-cycle.md")
```
**核心逻辑**: 迭代执行测试,失败时自动修复
```javascript
let iteration = 0
const MAX_ITERATIONS = 5
let lastResult = null
let passRate = 0
let coverage = 0
while (iteration < MAX_ITERATIONS) {
// 执行测试
lastResult = Bash(`${testCommand} 2>&1 || true`)
// 解析结果
const testsPassed = (lastResult.match(/(\d+) passed/)?.[1] || 0) * 1
const testsFailed = (lastResult.match(/(\d+) failed/)?.[1] || 0) * 1
const testsTotal = testsPassed + testsFailed
passRate = testsTotal > 0 ? (testsPassed / testsTotal * 100) : 0
// 解析覆盖率
try {
const coverageJson = JSON.parse(Read('coverage/coverage-summary.json'))
coverage = coverageJson.total?.lines?.pct || 0
} catch {
coverage = 0
}
// 检查是否通过
if (testsFailed === 0) {
break // 全部通过
}
// 尝试自动修复
iteration++
if (iteration < MAX_ITERATIONS) {
// 提取失败信息
const failureDetails = lastResult.split('\n')
.filter(l => /FAIL|Error|AssertionError|Expected|Received/.test(l))
.slice(0, 20)
.join('\n')
// 委派修复给 code-developer
Task({
subagent_type: "code-developer",
run_in_background: false,
description: `Fix ${testsFailed} test failures (iteration ${iteration})`,
prompt: `## Goal
Fix failing tests. Do NOT modify source code, only fix test files.
## Test Failures
${failureDetails}
## Test Files
${testFiles.map(f => `- ${f}`).join('\n')}
## Instructions
- Read failing test files
- Fix assertions, imports, or test setup
- Do NOT change source code
- Do NOT skip/ignore tests`
})
}
}
```
### Phase 4: Result Analysis
```javascript
const resultData = {
layer: targetLayer,
iterations: iteration,
pass_rate: passRate,
coverage: coverage,
tests_passed: lastResult?.match(/(\d+) passed/)?.[1] || 0,
tests_failed: lastResult?.match(/(\d+) failed/)?.[1] || 0,
all_passed: passRate === 100 || (lastResult && !lastResult.includes('FAIL'))
}
// 保存执行结果
Bash(`mkdir -p "${sessionFolder}/results"`)
Write(`${sessionFolder}/results/run-${targetLayer}.json`, JSON.stringify(resultData, null, 2))
// 更新 shared memory
sharedMemory.execution_results = sharedMemory.execution_results || {}
sharedMemory.execution_results[targetLayer] = resultData
sharedMemory.execution_results.pass_rate = passRate
sharedMemory.execution_results.coverage = coverage
Write(`${sessionFolder}/shared-memory.json`, JSON.stringify(sharedMemory, null, 2))
```
### Phase 5: Report to Coordinator
```javascript
const statusMsg = resultData.all_passed
? `全部通过 (${resultData.tests_passed} tests, 覆盖率 ${coverage}%)`
: `${resultData.tests_failed} 个失败 (${iteration}次修复尝试, 覆盖率 ${coverage}%)`
const msgType = resultData.all_passed ? 'tests_passed' : 'tests_failed'
mcp__ccw-tools__team_msg({
operation: "log",
team: teamName,
from: "executor",
to: "coordinator",
type: msgType,
summary: `[executor] ${targetLayer}: ${statusMsg}`,
ref: `${sessionFolder}/results/run-${targetLayer}.json`,
data: { pass_rate: passRate, coverage, iterations: iteration }
})
SendMessage({
type: "message",
recipient: "coordinator",
content: `## [executor] Test Execution Results
**Task**: ${task.subject}
**Layer**: ${targetLayer}
**Status**: ${resultData.all_passed ? 'PASS' : 'FAIL'}
**Pass Rate**: ${passRate}%
**Coverage**: ${coverage}%
**Iterations**: ${iteration}/${MAX_ITERATIONS}
### Details
- Tests passed: ${resultData.tests_passed}
- Tests failed: ${resultData.tests_failed}`,
summary: `[executor] QARUN ${targetLayer}: ${resultData.all_passed ? 'PASS' : 'FAIL'} ${passRate}%`
})
TaskUpdate({ taskId: task.id, status: 'completed' })
const nextTasks = TaskList().filter(t =>
t.subject.startsWith('QARUN-') && t.owner === 'executor' &&
t.status === 'pending' && t.blockedBy.length === 0
)
if (nextTasks.length > 0) { /* back to Phase 1 */ }
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| No QARUN-* tasks available | Idle, wait for coordinator |
| Test command fails to execute | Try fallback: `npm test`, `npx vitest run`, `pytest` |
| Max iterations reached | Report current pass rate, let coordinator decide |
| Coverage data unavailable | Report 0%, note coverage collection failure |
| Test environment broken | SendMessage error to coordinator, suggest manual fix |
| Sub-agent fix introduces new failures | Revert fix, try next failure |

View File

@@ -0,0 +1,258 @@
# Command: generate-tests
> 按层级生成测试代码。根据 strategist 策略和项目现有测试模式,生成 L1/L2/L3 测试用例。
## When to Use
- Phase 3 of Generator
- 策略已制定,需要生成对应层级的测试代码
- GC 循环中修订失败的测试
**Trigger conditions**:
- QAGEN-* 任务进入执行阶段
- 测试策略中包含当前层级
- GC 循环触发修复任务QAGEN-fix-*
## Strategy
### Delegation Mode
**Mode**: Sequential Delegation复杂时/ Direct简单时
**Agent Type**: `code-developer`
**Delegation Scope**: Per-layer
### Decision Logic
```javascript
const focusFiles = layerConfig.focus_files || []
const isGCFix = task.subject.includes('fix')
if (isGCFix) {
// GC 修复模式:读取失败信息,针对性修复
mode = 'gc-fix'
} else if (focusFiles.length <= 3) {
// 直接生成:内联 Read → 分析 → Write
mode = 'direct'
} else {
// 委派给 code-developer
mode = 'delegate'
}
```
## Execution Steps
### Step 1: Context Preparation
```javascript
// 从 shared memory 获取策略
const strategy = sharedMemory.test_strategy || {}
const targetLayer = task.description.match(/layer:\s*(L[123])/)?.[1] || 'L1'
// 确定层级配置
const layerConfig = strategy.layers?.find(l => l.level === targetLayer) || {
level: targetLayer,
name: targetLayer === 'L1' ? 'Unit Tests' : targetLayer === 'L2' ? 'Integration Tests' : 'E2E Tests',
target_coverage: targetLayer === 'L1' ? 80 : targetLayer === 'L2' ? 60 : 40,
focus_files: []
}
// 学习现有测试模式(必须找 3 个相似测试文件)
const existingTests = Glob(`**/*.{test,spec}.{ts,tsx,js,jsx}`)
const testPatterns = existingTests.slice(0, 3).map(f => ({
path: f,
content: Read(f)
}))
// 检测测试约定
const testConventions = detectTestConventions(testPatterns)
```
### Step 2: Execute Strategy
```javascript
if (mode === 'gc-fix') {
// GC 修复模式
// 读取失败信息
const failedTests = sharedMemory.execution_results?.[targetLayer]
const failureOutput = Read(`${sessionFolder}/results/run-${targetLayer}.json`)
Task({
subagent_type: "code-developer",
run_in_background: false,
description: `Fix failing ${targetLayer} tests (GC iteration)`,
prompt: `## Goal
Fix the failing tests based on execution results. Do NOT modify source code.
## Test Execution Results
${JSON.stringify(failedTests, null, 2)}
## Test Conventions
${JSON.stringify(testConventions, null, 2)}
## Instructions
- Read each failing test file
- Fix assertions, imports, mocks, or test setup
- Ensure tests match actual source behavior
- Do NOT skip or ignore tests
- Do NOT modify source files`
})
} else if (mode === 'direct') {
// 直接生成模式
const focusFiles = layerConfig.focus_files || []
for (const sourceFile of focusFiles) {
const sourceContent = Read(sourceFile)
// 确定测试文件路径(遵循项目约定)
const testPath = determineTestPath(sourceFile, testConventions)
// 检查是否已有测试
let existingTest = null
try { existingTest = Read(testPath) } catch {}
if (existingTest) {
// 补充现有测试:分析缺失的测试用例
const missingCases = analyzeMissingCases(sourceContent, existingTest)
if (missingCases.length > 0) {
// 追加测试用例
Edit({
file_path: testPath,
old_string: findLastTestBlock(existingTest),
new_string: `${findLastTestBlock(existingTest)}\n\n${generateCases(missingCases, testConventions)}`
})
}
} else {
// 创建新测试文件
const testContent = generateFullTestFile(sourceFile, sourceContent, testConventions, targetLayer)
Write(testPath, testContent)
}
}
} else {
// 委派模式
const focusFiles = layerConfig.focus_files || []
Task({
subagent_type: "code-developer",
run_in_background: false,
description: `Generate ${targetLayer} tests for ${focusFiles.length} files`,
prompt: `## Goal
Generate ${layerConfig.name} for the following source files.
## Test Framework
${strategy.test_framework || 'vitest'}
## Existing Test Patterns (MUST follow these exactly)
${testPatterns.map(t => `### ${t.path}\n\`\`\`\n${t.content.substring(0, 800)}\n\`\`\``).join('\n\n')}
## Test Conventions
- Test file location: ${testConventions.location}
- Import style: ${testConventions.importStyle}
- Describe/it nesting: ${testConventions.nesting}
## Source Files to Test
${focusFiles.map(f => `- ${f}`).join('\n')}
## Requirements
- Follow existing test patterns exactly (import style, naming, structure)
- Cover: happy path + edge cases + error cases
- Target coverage: ${layerConfig.target_coverage}%
- Do NOT modify source files, only create/modify test files
- Do NOT use \`any\` type assertions
- Do NOT skip or mark tests as TODO without implementation`
})
}
// 辅助函数
function determineTestPath(sourceFile, conventions) {
if (conventions.location === 'colocated') {
return sourceFile.replace(/\.(ts|tsx|js|jsx)$/, `.test.$1`)
} else if (conventions.location === '__tests__') {
const dir = sourceFile.substring(0, sourceFile.lastIndexOf('/'))
const name = sourceFile.substring(sourceFile.lastIndexOf('/') + 1)
return `${dir}/__tests__/${name.replace(/\.(ts|tsx|js|jsx)$/, `.test.$1`)}`
}
return sourceFile.replace(/\.(ts|tsx|js|jsx)$/, `.test.$1`)
}
function detectTestConventions(patterns) {
const conventions = {
location: 'colocated', // or '__tests__'
importStyle: 'named', // or 'default'
nesting: 'describe-it', // or 'test-only'
framework: 'vitest'
}
for (const p of patterns) {
if (p.path.includes('__tests__')) conventions.location = '__tests__'
if (p.content.includes("import { describe")) conventions.nesting = 'describe-it'
if (p.content.includes("from 'vitest'")) conventions.framework = 'vitest'
if (p.content.includes("from '@jest'") || p.content.includes("from 'jest'")) conventions.framework = 'jest'
}
return conventions
}
```
### Step 3: Result Processing
```javascript
// 收集生成/修改的测试文件
const generatedTests = Bash(`git diff --name-only`).split('\n')
.filter(f => /\.(test|spec)\.(ts|tsx|js|jsx)$/.test(f))
// TypeScript 语法检查
const syntaxResult = Bash(`npx tsc --noEmit ${generatedTests.join(' ')} 2>&1 || true`)
const hasSyntaxErrors = syntaxResult.includes('error TS')
// 自动修复语法错误(最多 3 次)
if (hasSyntaxErrors) {
let fixAttempt = 0
while (fixAttempt < 3 && syntaxResult.includes('error TS')) {
const errors = syntaxResult.split('\n').filter(l => l.includes('error TS')).slice(0, 5)
// 尝试修复每个错误...
fixAttempt++
}
}
// 更新 shared memory
const testInfo = {
layer: targetLayer,
files: generatedTests,
count: generatedTests.length,
syntax_clean: !hasSyntaxErrors,
mode: mode,
gc_fix: mode === 'gc-fix'
}
sharedMemory.generated_tests = sharedMemory.generated_tests || {}
sharedMemory.generated_tests[targetLayer] = testInfo
Write(`${sessionFolder}/shared-memory.json`, JSON.stringify(sharedMemory, null, 2))
```
## Output Format
```
## Test Generation Results
### Layer: [L1|L2|L3]
### Mode: [direct|delegate|gc-fix]
### Files Generated: [count]
- [test file path]
### Syntax Check: PASS/FAIL
### Conventions Applied: [framework], [location], [nesting]
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| No focus files in strategy | Generate L1 tests for all source files in scope |
| No existing test patterns | Use framework defaults (vitest/jest/pytest) |
| Sub-agent failure | Retry once, fallback to direct generation |
| Syntax errors persist after 3 fixes | Report errors, proceed with available tests |
| Source file not found | Skip file, log warning |
| Agent/CLI failure | Retry once, then fallback to inline execution |
| Timeout (>5 min) | Report partial results, notify coordinator |

View File

@@ -0,0 +1,282 @@
# Role: generator
测试用例生成器。按 strategist 制定的策略和层级,生成对应的测试代码。支持 L1 单元测试、L2 集成测试、L3 E2E 测试。遵循项目现有测试模式和框架约定。
## Role Identity
- **Name**: `generator`
- **Task Prefix**: `QAGEN-*`
- **Responsibility**: Code generation测试代码生成
- **Communication**: SendMessage to coordinator only
- **Output Tag**: `[generator]`
## Role Boundaries
### MUST
- 仅处理 `QAGEN-*` 前缀的任务
- 所有输出必须带 `[generator]` 标识
- 遵循项目现有测试框架和模式
- 生成的测试必须可运行
### MUST NOT
- ❌ 修改源代码(仅生成测试代码)
- ❌ 执行测试
- ❌ 为其他角色创建任务
- ❌ 直接与其他 worker 通信
## Message Types
| Type | Direction | Trigger | Description |
|------|-----------|---------|-------------|
| `tests_generated` | generator → coordinator | 测试生成完成 | 包含生成的测试文件列表 |
| `tests_revised` | generator → coordinator | 测试修订完成 | GC 循环中修订后 |
| `error` | generator → coordinator | 生成失败 | 阻塞性错误 |
## Toolbox
### Available Commands
| Command | File | Phase | Description |
|---------|------|-------|-------------|
| `generate-tests` | [commands/generate-tests.md](commands/generate-tests.md) | Phase 3 | 按层级生成测试代码 |
### Subagent Capabilities
| Agent Type | Used By | Purpose |
|------------|---------|---------|
| `code-developer` | generate-tests.md | 复杂测试代码生成 |
### CLI Capabilities
| CLI Tool | Mode | Used By | Purpose |
|----------|------|---------|---------|
| `gemini` | analysis | generate-tests.md | 分析现有测试模式 |
## Execution (5-Phase)
### Phase 1: Task Discovery
```javascript
const tasks = TaskList()
const myTasks = tasks.filter(t =>
t.subject.startsWith('QAGEN-') &&
t.owner === 'generator' &&
t.status === 'pending' &&
t.blockedBy.length === 0
)
if (myTasks.length === 0) return
const task = TaskGet({ taskId: myTasks[0].id })
TaskUpdate({ taskId: task.id, status: 'in_progress' })
```
### Phase 2: Strategy & Pattern Loading
```javascript
// 读取 shared memory 获取策略
const sessionFolder = task.description.match(/session:\s*(.+)/)?.[1] || '.'
let sharedMemory = {}
try { sharedMemory = JSON.parse(Read(`${sessionFolder}/shared-memory.json`)) } catch {}
const strategy = sharedMemory.test_strategy || {}
const targetLayer = task.description.match(/layer:\s*(L[123])/)?.[1] || strategy.layers?.[0]?.level || 'L1'
// 确定目标层级的详情
const layerConfig = strategy.layers?.find(l => l.level === targetLayer) || {
level: targetLayer,
name: targetLayer === 'L1' ? 'Unit Tests' : targetLayer === 'L2' ? 'Integration Tests' : 'E2E Tests',
target_coverage: targetLayer === 'L1' ? 80 : targetLayer === 'L2' ? 60 : 40,
focus_files: []
}
// 学习现有测试模式(找 3 个相似测试文件)
const existingTests = Glob(`**/*.{test,spec}.{ts,tsx,js,jsx}`)
const testPatterns = existingTests.slice(0, 3).map(f => ({
path: f,
content: Read(f)
}))
// 检测测试框架和配置
const testFramework = strategy.test_framework || 'vitest'
```
### Phase 3: Test Generation
```javascript
// Read commands/generate-tests.md for full implementation
Read("commands/generate-tests.md")
```
**核心策略**: 基于复杂度选择生成方式
```javascript
const focusFiles = layerConfig.focus_files || []
if (focusFiles.length <= 3) {
// 直接生成:读取源文件 → 分析 → 写测试
for (const sourceFile of focusFiles) {
const sourceContent = Read(sourceFile)
// 确定测试文件路径(遵循项目约定)
const testPath = sourceFile
.replace(/\.(ts|tsx|js|jsx)$/, `.test.$1`)
.replace(/^src\//, 'src/__tests__/') // 或保持同级
// 检查是否已有测试
let existingTest = null
try { existingTest = Read(testPath) } catch {}
if (existingTest) {
// 补充现有测试
Edit({
file_path: testPath,
old_string: "// END OF TESTS",
new_string: `// Additional tests for coverage\n// ...new test cases...\n// END OF TESTS`
})
} else {
// 创建新测试文件
Write(testPath, generateTestContent(sourceFile, sourceContent, testPatterns, testFramework))
}
}
} else {
// 委派给 code-developer
Task({
subagent_type: "code-developer",
run_in_background: false,
description: `Generate ${targetLayer} tests for ${focusFiles.length} files`,
prompt: `## Goal
Generate ${layerConfig.name} for the following source files.
## Test Framework
${testFramework}
## Existing Test Patterns
${testPatterns.map(t => `### ${t.path}\n\`\`\`\n${t.content.substring(0, 500)}\n\`\`\``).join('\n\n')}
## Source Files to Test
${focusFiles.map(f => `- ${f}`).join('\n')}
## Requirements
- Follow existing test patterns exactly
- Cover happy path + edge cases + error cases
- Target coverage: ${layerConfig.target_coverage}%
- Do NOT modify source files, only create/modify test files`
})
}
// 辅助函数
function generateTestContent(sourceFile, sourceContent, patterns, framework) {
// 基于模式生成测试代码骨架
const imports = extractExports(sourceContent)
const pattern = patterns[0]?.content || ''
return `import { ${imports.join(', ')} } from '${sourceFile.replace(/\.(ts|tsx|js|jsx)$/, '')}'
describe('${sourceFile}', () => {
${imports.map(exp => `
describe('${exp}', () => {
it('should work correctly with valid input', () => {
// TODO: implement test
})
it('should handle edge cases', () => {
// TODO: implement test
})
it('should handle error cases', () => {
// TODO: implement test
})
})`).join('\n')}
})`
}
function extractExports(content) {
const matches = content.match(/export\s+(function|const|class|interface|type)\s+(\w+)/g) || []
return matches.map(m => m.split(/\s+/).pop())
}
```
### Phase 4: Self-Validation
```javascript
// 验证生成的测试文件语法正确
const generatedTests = Bash(`git diff --name-only`).split('\n')
.filter(f => /\.(test|spec)\.(ts|tsx|js|jsx)$/.test(f))
// TypeScript 语法检查
const syntaxResult = Bash(`npx tsc --noEmit ${generatedTests.join(' ')} 2>&1 || true`)
const hasSyntaxErrors = syntaxResult.includes('error TS')
if (hasSyntaxErrors) {
// 自动修复语法错误
const errors = syntaxResult.split('\n').filter(l => l.includes('error TS'))
for (const error of errors.slice(0, 5)) {
// 解析错误并尝试修复
}
}
// 记录生成的测试
const generatedTestInfo = {
layer: targetLayer,
files: generatedTests,
count: generatedTests.length,
syntax_clean: !hasSyntaxErrors
}
// 更新 shared memory
sharedMemory.generated_tests = sharedMemory.generated_tests || {}
sharedMemory.generated_tests[targetLayer] = generatedTestInfo
Write(`${sessionFolder}/shared-memory.json`, JSON.stringify(sharedMemory, null, 2))
```
### Phase 5: Report to Coordinator
```javascript
const msgType = task.subject.includes('fix') ? 'tests_revised' : 'tests_generated'
mcp__ccw-tools__team_msg({
operation: "log",
team: teamName,
from: "generator",
to: "coordinator",
type: msgType,
summary: `[generator] ${targetLayer} 测试生成完成: ${generatedTests.length} 文件, 语法${hasSyntaxErrors ? '有错误' : '正常'}`,
ref: generatedTests[0]
})
SendMessage({
type: "message",
recipient: "coordinator",
content: `## [generator] Test Generation Results
**Task**: ${task.subject}
**Layer**: ${targetLayer} - ${layerConfig.name}
**Generated**: ${generatedTests.length} test files
**Syntax**: ${hasSyntaxErrors ? 'ERRORS' : 'CLEAN'}
### Generated Files
${generatedTests.map(f => `- ${f}`).join('\n')}`,
summary: `[generator] QAGEN complete: ${targetLayer} ${generatedTests.length} files`
})
TaskUpdate({ taskId: task.id, status: 'completed' })
const nextTasks = TaskList().filter(t =>
t.subject.startsWith('QAGEN-') && t.owner === 'generator' &&
t.status === 'pending' && t.blockedBy.length === 0
)
if (nextTasks.length > 0) { /* back to Phase 1 */ }
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| No QAGEN-* tasks available | Idle, wait for coordinator |
| Strategy not found in shared memory | Generate L1 unit tests for changed files |
| No existing test patterns found | Use framework defaults |
| Sub-agent failure | Retry once, fallback to direct generation |
| Syntax errors in generated tests | Auto-fix up to 3 attempts, report remaining |
| Source file not found | Skip file, report to coordinator |

View File

@@ -0,0 +1,216 @@
# Command: scan
> 多视角 CLI Fan-out 扫描。从 bug、安全、测试覆盖、代码质量、UX 等视角并行分析代码,发现潜在问题。
## When to Use
- Phase 3 of Scout
- 需要对代码库进行多视角问题扫描
- 复杂度为 Medium 或 High 时使用 CLI Fan-out
**Trigger conditions**:
- SCOUT-* 任务进入 Phase 3
- 复杂度评估为 Medium/High
- 需要深度分析超出 ACE 搜索能力
## Strategy
### Delegation Mode
**Mode**: CLI Fan-out
**CLI Tool**: `gemini` (primary)
**CLI Mode**: `analysis`
**Parallel Perspectives**: 2-5根据复杂度
### Decision Logic
```javascript
// 复杂度决定扫描策略
if (complexity === 'Low') {
// ACE 搜索 + Grep 内联分析(不使用 CLI
mode = 'inline'
} else if (complexity === 'Medium') {
// CLI Fan-out: 3 个核心视角
mode = 'cli-fanout'
activePerspectives = perspectives.slice(0, 3)
} else {
// CLI Fan-out: 所有视角
mode = 'cli-fanout'
activePerspectives = perspectives
}
```
## Execution Steps
### Step 1: Context Preparation
```javascript
// 确定扫描范围
const projectRoot = Bash(`git rev-parse --show-toplevel 2>/dev/null || pwd`).trim()
const scanScope = task.description.match(/scope:\s*(.+)/)?.[1] || '**/*'
// 获取变更文件用于聚焦扫描
const changedFiles = Bash(`git diff --name-only HEAD~5 2>/dev/null || echo ""`)
.split('\n').filter(Boolean)
// 构建文件上下文
const fileContext = changedFiles.length > 0
? changedFiles.map(f => `@${f}`).join(' ')
: `@${scanScope}`
// 已知缺陷模式(来自 shared memory
const knownPatternsText = knownPatterns.length > 0
? `\nKnown defect patterns to verify: ${knownPatterns.map(p => p.description).join('; ')}`
: ''
```
### Step 2: Execute Strategy
```javascript
if (mode === 'inline') {
// 快速内联扫描
const aceResults = mcp__ace-tool__search_context({
project_root_path: projectRoot,
query: "potential bugs, error handling issues, unchecked return values, security vulnerabilities, missing input validation"
})
// 解析 ACE 结果并分类
for (const result of aceResults) {
classifyFinding(result)
}
} else {
// CLI Fan-out: 每个视角一个 CLI 调用
const perspectivePrompts = {
'bug': `PURPOSE: Discover potential bugs and logic errors
TASK: • Find unchecked return values • Identify race conditions • Check null/undefined handling • Find off-by-one errors • Detect resource leaks
MODE: analysis
CONTEXT: ${fileContext}${knownPatternsText}
EXPECTED: List of findings with severity, file:line, description, and fix suggestion
CONSTRAINTS: Focus on real bugs, avoid false positives`,
'security': `PURPOSE: Identify security vulnerabilities and risks
TASK: • Check for injection flaws (SQL, command, XSS) • Find authentication/authorization gaps • Identify sensitive data exposure • Check input validation • Review crypto usage
MODE: analysis
CONTEXT: ${fileContext}
EXPECTED: Security findings with CVSS-style severity, file:line, CWE references where applicable
CONSTRAINTS: Focus on exploitable vulnerabilities`,
'test-coverage': `PURPOSE: Identify untested code paths and coverage gaps
TASK: • Find functions/methods without tests • Identify complex logic without assertions • Check error paths without coverage • Find boundary conditions untested
MODE: analysis
CONTEXT: ${fileContext}
EXPECTED: List of untested areas with file:line, complexity indicator, and test suggestion
CONSTRAINTS: Focus on high-risk untested code`,
'code-quality': `PURPOSE: Detect code quality issues and anti-patterns
TASK: • Find code duplication • Identify overly complex functions • Check naming conventions • Find dead code • Detect God objects/functions
MODE: analysis
CONTEXT: ${fileContext}
EXPECTED: Quality findings with severity, file:line, and improvement suggestion
CONSTRAINTS: Focus on maintainability impacts`,
'ux': `PURPOSE: Identify UX-impacting issues in code
TASK: • Find missing loading states • Check error message quality • Identify accessibility gaps • Find inconsistent UI patterns • Check responsive handling
MODE: analysis
CONTEXT: ${fileContext}
EXPECTED: UX findings with impact level, file:line, and user-facing description
CONSTRAINTS: Focus on user-visible issues`
}
for (const perspective of activePerspectives) {
const prompt = perspectivePrompts[perspective]
if (!prompt) continue
Bash(`ccw cli -p "${prompt}" --tool gemini --mode analysis --rule analysis-assess-security-risks`, {
run_in_background: true
})
}
// 等待所有 CLI 完成hook 回调通知)
}
```
### Step 3: Result Processing
```javascript
// 聚合所有视角的结果
const allFindings = { critical: [], high: [], medium: [], low: [] }
// 从 CLI 输出解析结果
for (const perspective of activePerspectives) {
const findings = parseCliOutput(cliResults[perspective])
for (const finding of findings) {
finding.perspective = perspective
allFindings[finding.severity].push(finding)
}
}
// 去重:相同 file:line 的发现合并
function deduplicateFindings(findings) {
const seen = new Set()
const unique = []
for (const f of findings) {
const key = `${f.file}:${f.line}`
if (!seen.has(key)) {
seen.add(key)
unique.push(f)
} else {
// 合并视角信息到已有条目
const existing = unique.find(u => `${u.file}:${u.line}` === key)
if (existing) existing.perspectives = [...(existing.perspectives || [existing.perspective]), f.perspective]
}
}
return unique
}
for (const severity of ['critical', 'high', 'medium', 'low']) {
allFindings[severity] = deduplicateFindings(allFindings[severity])
}
// 与已知缺陷模式对比
for (const pattern of knownPatterns) {
for (const severity of ['critical', 'high', 'medium', 'low']) {
for (const finding of allFindings[severity]) {
if (finding.file === pattern.file || finding.description.includes(pattern.type)) {
finding.known_pattern = true
}
}
}
}
```
## Output Format
```
## Scan Results
### Perspectives Scanned: [list]
### Complexity: [Low|Medium|High]
### Findings by Severity
#### Critical ([count])
- [file:line] [perspective] - [description]
#### High ([count])
- [file:line] [perspective] - [description]
#### Medium ([count])
- [file:line] - [description]
#### Low ([count])
- [file:line] - [description]
### Known Pattern Matches: [count]
### New Findings: [count]
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| CLI tool unavailable | Fall back to ACE search + Grep inline analysis |
| CLI returns empty for a perspective | Note incomplete perspective, continue others |
| Too many findings (>50) | Prioritize critical/high, summarize medium/low |
| Timeout on CLI call | Use partial results, note incomplete perspectives |
| Agent/CLI failure | Retry once, then fallback to inline execution |
| Timeout (>5 min) | Report partial results, notify coordinator |

View File

@@ -0,0 +1,244 @@
# Role: scout
多视角问题侦察员。主动扫描代码库,从 bug、安全、UX、测试覆盖、代码质量等多个视角发现潜在问题创建结构化 issue。融合 issue-discover 的多视角扫描能力。
## Role Identity
- **Name**: `scout`
- **Task Prefix**: `SCOUT-*`
- **Responsibility**: Orchestration多视角扫描编排
- **Communication**: SendMessage to coordinator only
- **Output Tag**: `[scout]`
## Role Boundaries
### MUST
- 仅处理 `SCOUT-*` 前缀的任务
- 所有输出必须带 `[scout]` 标识
- 仅通过 SendMessage 与 coordinator 通信
- 严格在问题发现职责范围内工作
### MUST NOT
- ❌ 编写或修改代码
- ❌ 执行测试
- ❌ 为其他角色创建任务
- ❌ 直接与其他 worker 通信
## Message Types
| Type | Direction | Trigger | Description |
|------|-----------|---------|-------------|
| `scan_ready` | scout → coordinator | 扫描完成 | 包含发现的问题列表 |
| `issues_found` | scout → coordinator | 发现高优先级问题 | 需要关注的关键发现 |
| `error` | scout → coordinator | 扫描失败 | 阻塞性错误 |
## Toolbox
### Available Commands
| Command | File | Phase | Description |
|---------|------|-------|-------------|
| `scan` | [commands/scan.md](commands/scan.md) | Phase 3 | 多视角 CLI Fan-out 扫描 |
### Subagent Capabilities
| Agent Type | Used By | Purpose |
|------------|---------|---------|
| `cli-explore-agent` | scan.md | 多角度代码库探索 |
### CLI Capabilities
| CLI Tool | Mode | Used By | Purpose |
|----------|------|---------|---------|
| `gemini` | analysis | scan.md | 多视角代码分析 |
## Execution (5-Phase)
### Phase 1: Task Discovery
```javascript
const tasks = TaskList()
const myTasks = tasks.filter(t =>
t.subject.startsWith('SCOUT-') &&
t.owner === 'scout' &&
t.status === 'pending' &&
t.blockedBy.length === 0
)
if (myTasks.length === 0) return // idle
const task = TaskGet({ taskId: myTasks[0].id })
TaskUpdate({ taskId: task.id, status: 'in_progress' })
```
### Phase 2: Context & Scope Assessment
```javascript
// 确定扫描范围
const scanScope = task.description.match(/scope:\s*(.+)/)?.[1] || '**/*'
// 获取变更文件(如果有)
const changedFiles = Bash(`git diff --name-only HEAD~5 2>/dev/null || echo ""`)
.split('\n').filter(Boolean)
// 读取 shared memory 获取历史缺陷模式
const sessionFolder = task.description.match(/session:\s*(.+)/)?.[1] || '.'
let sharedMemory = {}
try { sharedMemory = JSON.parse(Read(`${sessionFolder}/shared-memory.json`)) } catch {}
const knownPatterns = sharedMemory.defect_patterns || []
// 确定扫描视角
const perspectives = ["bug", "security", "test-coverage", "code-quality"]
if (task.description.includes('ux')) perspectives.push("ux")
// 评估复杂度
function assessComplexity(desc) {
let score = 0
if (/全项目|全量|comprehensive|full/.test(desc)) score += 3
if (/security|安全/.test(desc)) score += 1
if (/multiple|across|cross|多模块/.test(desc)) score += 2
return score >= 4 ? 'High' : score >= 2 ? 'Medium' : 'Low'
}
const complexity = assessComplexity(task.description)
```
### Phase 3: Multi-Perspective Scan
```javascript
// Read commands/scan.md for full CLI Fan-out implementation
Read("commands/scan.md")
```
**核心策略**: 按视角并行执行 CLI 分析
```javascript
if (complexity === 'Low') {
// 直接使用 ACE 搜索 + Grep 进行快速扫描
const aceResults = mcp__ace-tool__search_context({
project_root_path: projectRoot,
query: "potential bugs, error handling issues, unchecked return values"
})
// 分析结果...
} else {
// CLI Fan-out: 每个视角一个 CLI 调用
for (const perspective of perspectives) {
Bash(`ccw cli -p "PURPOSE: Scan code from ${perspective} perspective to discover potential issues
TASK: • Analyze code patterns for ${perspective} problems • Identify anti-patterns • Check for common ${perspective} issues
MODE: analysis
CONTEXT: @${scanScope}
EXPECTED: List of findings with severity (critical/high/medium/low), file:line references, description
CONSTRAINTS: Focus on actionable findings only, no false positives" --tool gemini --mode analysis --rule analysis-assess-security-risks`, { run_in_background: true })
}
// 等待所有 CLI 完成,聚合结果
}
```
### Phase 4: Result Aggregation & Issue Creation
```javascript
// 聚合所有视角的发现
const allFindings = {
critical: [],
high: [],
medium: [],
low: []
}
// 去重:相同 file:line 的发现合并
// 排序:按严重性排列
// 与已知缺陷模式对比:标记重复发现
const discoveredIssues = allFindings.critical
.concat(allFindings.high)
.map((f, i) => ({
id: `SCOUT-ISSUE-${i + 1}`,
severity: f.severity,
perspective: f.perspective,
file: f.file,
line: f.line,
description: f.description,
suggestion: f.suggestion
}))
// 更新 shared memory
sharedMemory.discovered_issues = discoveredIssues
Write(`${sessionFolder}/shared-memory.json`, JSON.stringify(sharedMemory, null, 2))
// 保存扫描结果
Write(`${sessionFolder}/scan/scan-results.json`, JSON.stringify({
scan_date: new Date().toISOString(),
perspectives: perspectives,
total_findings: Object.values(allFindings).flat().length,
by_severity: {
critical: allFindings.critical.length,
high: allFindings.high.length,
medium: allFindings.medium.length,
low: allFindings.low.length
},
findings: allFindings,
issues_created: discoveredIssues.length
}, null, 2))
```
### Phase 5: Report to Coordinator
```javascript
const resultSummary = `发现 ${discoveredIssues.length} 个问题Critical: ${allFindings.critical.length}, High: ${allFindings.high.length}, Medium: ${allFindings.medium.length}, Low: ${allFindings.low.length}`
mcp__ccw-tools__team_msg({
operation: "log",
team: teamName,
from: "scout",
to: "coordinator",
type: discoveredIssues.length > 0 ? "issues_found" : "scan_ready",
summary: `[scout] ${resultSummary}`,
ref: `${sessionFolder}/scan/scan-results.json`
})
SendMessage({
type: "message",
recipient: "coordinator",
content: `## [scout] Scan Results
**Task**: ${task.subject}
**Perspectives**: ${perspectives.join(', ')}
**Status**: ${discoveredIssues.length > 0 ? 'Issues Found' : 'Clean'}
### Summary
${resultSummary}
### Top Findings
${discoveredIssues.slice(0, 5).map(i => `- **[${i.severity}]** ${i.file}:${i.line} - ${i.description}`).join('\n')}
### Scan Report
${sessionFolder}/scan/scan-results.json`,
summary: `[scout] SCOUT complete: ${resultSummary}`
})
TaskUpdate({ taskId: task.id, status: 'completed' })
// Check for next task
const nextTasks = TaskList().filter(t =>
t.subject.startsWith('SCOUT-') &&
t.owner === 'scout' &&
t.status === 'pending' &&
t.blockedBy.length === 0
)
if (nextTasks.length > 0) {
// Continue with next task → back to Phase 1
}
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| No SCOUT-* tasks available | Idle, wait for coordinator assignment |
| CLI tool unavailable | Fall back to ACE search + Grep inline analysis |
| Scan scope too broad | Narrow to changed files only, report partial results |
| All perspectives return empty | Report clean scan, notify coordinator |
| CLI timeout | Use partial results, note incomplete perspectives |
| Critical issue beyond scope | SendMessage issues_found to coordinator |

View File

@@ -0,0 +1,221 @@
# Command: analyze-scope
> 变更范围分析 + 测试策略制定。分析代码变更、scout 发现和项目结构,确定测试层级和覆盖率目标。
## When to Use
- Phase 2-3 of Strategist
- 需要分析代码变更范围
- 需要将 scout 发现转化为测试策略
**Trigger conditions**:
- QASTRAT-* 任务进入执行阶段
- 变更文件数 > 5 需要 CLI 辅助分析
- 存在 scout 发现的高优先级问题
## Strategy
### Delegation Mode
**Mode**: CLI Fan-out复杂项目/ Direct简单项目
**CLI Tool**: `gemini` (primary)
**CLI Mode**: `analysis`
### Decision Logic
```javascript
const totalScope = changedFiles.length + discoveredIssues.length
if (totalScope <= 5) {
// 直接内联分析
mode = 'direct'
} else if (totalScope <= 15) {
// 单次 CLI 分析
mode = 'single-cli'
} else {
// 多维度 CLI 分析
mode = 'multi-cli'
}
```
## Execution Steps
### Step 1: Context Preparation
```javascript
// 从 shared memory 获取 scout 发现
const discoveredIssues = sharedMemory.discovered_issues || []
// 分析 git diff 获取变更范围
const changedFiles = Bash(`git diff --name-only HEAD~5 2>/dev/null || git diff --name-only --cached 2>/dev/null || echo ""`)
.split('\n').filter(Boolean)
// 分类变更文件
const fileCategories = {
source: changedFiles.filter(f => /\.(ts|tsx|js|jsx|py|java|go|rs)$/.test(f)),
test: changedFiles.filter(f => /\.(test|spec)\.(ts|tsx|js|jsx)$/.test(f) || /test_/.test(f)),
config: changedFiles.filter(f => /\.(json|yaml|yml|toml|env)$/.test(f)),
style: changedFiles.filter(f => /\.(css|scss|less)$/.test(f)),
docs: changedFiles.filter(f => /\.(md|txt|rst)$/.test(f))
}
// 检测项目测试框架
const packageJson = Read('package.json')
const testFramework = detectFramework(packageJson)
// 获取已有测试覆盖率基线
let baselineCoverage = null
try {
const coverageSummary = JSON.parse(Read('coverage/coverage-summary.json'))
baselineCoverage = coverageSummary.total?.lines?.pct || null
} catch {}
```
### Step 2: Execute Strategy
```javascript
if (mode === 'direct') {
// 内联分析:直接构建策略
buildStrategyDirect(fileCategories, discoveredIssues, testFramework)
} else if (mode === 'single-cli') {
// 单次 CLI 综合分析
Bash(`ccw cli -p "PURPOSE: Analyze code changes and scout findings to determine optimal test strategy
TASK: • Classify ${changedFiles.length} changed files by risk level • Map ${discoveredIssues.length} scout issues to test requirements • Identify integration points between changed modules • Recommend test layers (L1/L2/L3) with coverage targets
MODE: analysis
CONTEXT: @${changedFiles.slice(0, 20).join(' @')} | Memory: Scout found ${discoveredIssues.length} issues, baseline coverage ${baselineCoverage || 'unknown'}%
EXPECTED: JSON with layers array, each containing level, name, target_coverage, focus_files, rationale
CONSTRAINTS: Be conservative with L3 E2E tests | Focus L1 on changed source files" --tool gemini --mode analysis --rule analysis-analyze-code-patterns`, {
run_in_background: true
})
// 等待 CLI 完成
} else {
// 多维度分析
// Dimension 1: 变更风险分析
Bash(`ccw cli -p "PURPOSE: Assess risk level of code changes
TASK: • Classify each file by change risk (high/medium/low) • Identify files touching critical paths • Map dependency chains
MODE: analysis
CONTEXT: @${fileCategories.source.join(' @')}
EXPECTED: Risk matrix with file:risk_level mapping
CONSTRAINTS: Focus on source files only" --tool gemini --mode analysis`, {
run_in_background: true
})
// Dimension 2: 测试覆盖差距分析
Bash(`ccw cli -p "PURPOSE: Identify test coverage gaps for changed code
TASK: • Find changed functions without tests • Map test files to source files • Identify missing integration test scenarios
MODE: analysis
CONTEXT: @${[...fileCategories.source, ...fileCategories.test].join(' @')}
EXPECTED: Coverage gap report with untested functions and modules
CONSTRAINTS: Compare existing tests to changed code" --tool gemini --mode analysis`, {
run_in_background: true
})
// 等待所有 CLI 完成
}
```
### Step 3: Result Processing
```javascript
// 构建测试策略
const strategy = {
scope: {
total_changed: changedFiles.length,
source_files: fileCategories.source.length,
test_files: fileCategories.test.length,
issue_count: discoveredIssues.length,
baseline_coverage: baselineCoverage
},
test_framework: testFramework,
layers: [],
coverage_targets: {}
}
// 层级选择算法
// L1: Unit Tests - 所有有源码变更的文件
if (fileCategories.source.length > 0 || discoveredIssues.length > 0) {
const l1Files = fileCategories.source.length > 0
? fileCategories.source
: [...new Set(discoveredIssues.map(i => i.file))]
strategy.layers.push({
level: 'L1',
name: 'Unit Tests',
target_coverage: 80,
focus_files: l1Files,
rationale: fileCategories.source.length > 0
? '所有变更的源文件需要单元测试覆盖'
: 'Scout 发现的问题需要测试覆盖'
})
}
// L2: Integration Tests - 多模块变更或关键问题
if (fileCategories.source.length >= 3 || discoveredIssues.some(i => i.severity === 'critical')) {
const integrationPoints = fileCategories.source
.filter(f => /service|controller|handler|middleware|route|api/.test(f))
if (integrationPoints.length > 0) {
strategy.layers.push({
level: 'L2',
name: 'Integration Tests',
target_coverage: 60,
focus_areas: integrationPoints,
rationale: '多文件变更涉及模块间交互,需要集成测试'
})
}
}
// L3: E2E Tests - 大量高优先级问题
const criticalHighCount = discoveredIssues
.filter(i => i.severity === 'critical' || i.severity === 'high').length
if (criticalHighCount >= 3) {
strategy.layers.push({
level: 'L3',
name: 'E2E Tests',
target_coverage: 40,
focus_flows: [...new Set(discoveredIssues
.filter(i => i.severity === 'critical' || i.severity === 'high')
.map(i => i.file.split('/')[1] || 'main'))],
rationale: `${criticalHighCount} 个高优先级问题需要端到端验证`
})
}
// 设置覆盖率目标
for (const layer of strategy.layers) {
strategy.coverage_targets[layer.level] = layer.target_coverage
}
```
## Output Format
```
## Test Strategy
### Scope Analysis
- Changed files: [count]
- Source files: [count]
- Scout issues: [count]
- Baseline coverage: [percent]%
### Test Layers
#### L1: Unit Tests
- Coverage target: 80%
- Focus files: [list]
#### L2: Integration Tests (if applicable)
- Coverage target: 60%
- Focus areas: [list]
#### L3: E2E Tests (if applicable)
- Coverage target: 40%
- Focus flows: [list]
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| No changed files | Use scout issues as scope |
| No scout issues | Generate L1 tests for all source files |
| Test framework unknown | Default to Jest/Vitest (JS/TS) or pytest (Python) |
| CLI analysis returns unusable results | Fall back to heuristic-based strategy |
| Agent/CLI failure | Retry once, then fallback to inline execution |
| Timeout (>5 min) | Report partial results, notify coordinator |

View File

@@ -0,0 +1,266 @@
# Role: strategist
测试策略师。分析变更范围确定测试层级L1-L3定义覆盖率目标生成测试策略文档。基于 scout 发现的问题和代码变更制定针对性测试计划。
## Role Identity
- **Name**: `strategist`
- **Task Prefix**: `QASTRAT-*`
- **Responsibility**: Orchestration策略制定
- **Communication**: SendMessage to coordinator only
- **Output Tag**: `[strategist]`
## Role Boundaries
### MUST
- 仅处理 `QASTRAT-*` 前缀的任务
- 所有输出必须带 `[strategist]` 标识
- 仅通过 SendMessage 与 coordinator 通信
### MUST NOT
- ❌ 编写测试代码
- ❌ 执行测试
- ❌ 为其他角色创建任务
- ❌ 修改源代码
## Message Types
| Type | Direction | Trigger | Description |
|------|-----------|---------|-------------|
| `strategy_ready` | strategist → coordinator | 策略制定完成 | 包含层级选择和覆盖率目标 |
| `error` | strategist → coordinator | 策略制定失败 | 阻塞性错误 |
## Toolbox
### Available Commands
| Command | File | Phase | Description |
|---------|------|-------|-------------|
| `analyze-scope` | [commands/analyze-scope.md](commands/analyze-scope.md) | Phase 2-3 | 变更范围分析 + 策略制定 |
### Subagent Capabilities
| Agent Type | Used By | Purpose |
|------------|---------|---------|
| `cli-explore-agent` | analyze-scope.md | 代码结构和依赖分析 |
### CLI Capabilities
| CLI Tool | Mode | Used By | Purpose |
|----------|------|---------|---------|
| `gemini` | analysis | analyze-scope.md | 测试策略分析 |
## Execution (5-Phase)
### Phase 1: Task Discovery
```javascript
const tasks = TaskList()
const myTasks = tasks.filter(t =>
t.subject.startsWith('QASTRAT-') &&
t.owner === 'strategist' &&
t.status === 'pending' &&
t.blockedBy.length === 0
)
if (myTasks.length === 0) return
const task = TaskGet({ taskId: myTasks[0].id })
TaskUpdate({ taskId: task.id, status: 'in_progress' })
```
### Phase 2: Context & Change Analysis
```javascript
// 读取 shared memory 获取 scout 发现
const sessionFolder = task.description.match(/session:\s*(.+)/)?.[1] || '.'
let sharedMemory = {}
try { sharedMemory = JSON.parse(Read(`${sessionFolder}/shared-memory.json`)) } catch {}
const discoveredIssues = sharedMemory.discovered_issues || []
const historicalPatterns = sharedMemory.defect_patterns || []
// 分析变更范围
const changedFiles = Bash(`git diff --name-only HEAD~5 2>/dev/null || git diff --name-only --cached 2>/dev/null || echo ""`)
.split('\n').filter(Boolean)
// 分类变更文件
const fileCategories = {
source: changedFiles.filter(f => /\.(ts|tsx|js|jsx|py|java|go|rs)$/.test(f)),
test: changedFiles.filter(f => /\.(test|spec)\.(ts|tsx|js|jsx)$/.test(f) || /test_/.test(f)),
config: changedFiles.filter(f => /\.(json|yaml|yml|toml|env)$/.test(f)),
style: changedFiles.filter(f => /\.(css|scss|less)$/.test(f))
}
// 检测项目测试框架
const testFramework = Bash(`ls package.json 2>/dev/null && (cat package.json | grep -o '"jest"\\|"vitest"\\|"mocha"\\|"pytest"' | head -1) || echo "unknown"`)
.trim().replace(/"/g, '')
// 检测已有测试覆盖率
const existingCoverage = Bash(`ls coverage/coverage-summary.json 2>/dev/null && cat coverage/coverage-summary.json | head -20 || echo "no coverage data"`)
```
### Phase 3: Strategy Generation
```javascript
// 基于变更范围和发现的问题制定策略
const strategy = {
scope: {
total_changed: changedFiles.length,
source_files: fileCategories.source.length,
test_files: fileCategories.test.length,
issue_count: discoveredIssues.length
},
test_framework: testFramework,
layers: [],
coverage_targets: {}
}
// 层级选择逻辑
if (fileCategories.source.length > 0) {
strategy.layers.push({
level: "L1",
name: "Unit Tests",
target_coverage: 80,
focus_files: fileCategories.source,
rationale: "所有变更的源文件需要单元测试覆盖"
})
}
if (fileCategories.source.length >= 3 || discoveredIssues.some(i => i.severity === 'critical')) {
strategy.layers.push({
level: "L2",
name: "Integration Tests",
target_coverage: 60,
focus_areas: detectIntegrationPoints(fileCategories.source),
rationale: "多文件变更或关键问题需要集成测试"
})
}
if (discoveredIssues.filter(i => i.severity === 'critical' || i.severity === 'high').length >= 3) {
strategy.layers.push({
level: "L3",
name: "E2E Tests",
target_coverage: 40,
focus_flows: detectUserFlows(discoveredIssues),
rationale: "多个高优先级问题需要端到端验证"
})
}
// 如果没有变更但有 scout 发现,聚焦于发现的问题
if (strategy.layers.length === 0 && discoveredIssues.length > 0) {
strategy.layers.push({
level: "L1",
name: "Unit Tests",
target_coverage: 80,
focus_files: [...new Set(discoveredIssues.map(i => i.file))],
rationale: "Scout 发现的问题需要测试覆盖"
})
}
// 辅助函数
function detectIntegrationPoints(files) {
// 检测模块间交互点
return files.filter(f => /service|controller|handler|middleware|route/.test(f))
}
function detectUserFlows(issues) {
// 从问题中推断用户流程
return [...new Set(issues.map(i => i.file.split('/')[1] || 'main'))]
}
// 生成策略文档
const strategyDoc = `# Test Strategy
## Scope Analysis
- Changed files: ${changedFiles.length}
- Source files: ${fileCategories.source.length}
- Scout issues: ${discoveredIssues.length}
- Test framework: ${testFramework}
## Test Layers
${strategy.layers.map(l => `### ${l.level}: ${l.name}
- Coverage target: ${l.target_coverage}%
- Focus: ${l.focus_files?.join(', ') || l.focus_areas?.join(', ') || l.focus_flows?.join(', ')}
- Rationale: ${l.rationale}`).join('\n\n')}
## Priority Issues
${discoveredIssues.slice(0, 10).map(i => `- [${i.severity}] ${i.file}:${i.line} - ${i.description}`).join('\n')}
`
Bash(`mkdir -p "${sessionFolder}/strategy"`)
Write(`${sessionFolder}/strategy/test-strategy.md`, strategyDoc)
// 更新 shared memory
sharedMemory.test_strategy = strategy
Write(`${sessionFolder}/shared-memory.json`, JSON.stringify(sharedMemory, null, 2))
```
### Phase 4: Strategy Validation
```javascript
// 验证策略合理性
const validationChecks = {
has_layers: strategy.layers.length > 0,
has_targets: strategy.layers.every(l => l.target_coverage > 0),
covers_issues: discoveredIssues.length === 0 ||
discoveredIssues.some(i => strategy.layers.some(l =>
l.focus_files?.includes(i.file)
)),
framework_detected: testFramework !== 'unknown'
}
const isValid = Object.values(validationChecks).every(Boolean)
```
### Phase 5: Report to Coordinator
```javascript
const layersSummary = strategy.layers.map(l => `${l.level}(${l.target_coverage}%)`).join(', ')
mcp__ccw-tools__team_msg({
operation: "log",
team: teamName,
from: "strategist",
to: "coordinator",
type: "strategy_ready",
summary: `[strategist] 策略就绪: ${layersSummary}, 框架: ${testFramework}`,
ref: `${sessionFolder}/strategy/test-strategy.md`
})
SendMessage({
type: "message",
recipient: "coordinator",
content: `## [strategist] Test Strategy Ready
**Task**: ${task.subject}
**Layers**: ${layersSummary}
**Framework**: ${testFramework}
### Layer Details
${strategy.layers.map(l => `- **${l.level}**: ${l.name} (target: ${l.target_coverage}%, ${l.focus_files?.length || '?'} files)`).join('\n')}
### Strategy Document
${sessionFolder}/strategy/test-strategy.md`,
summary: `[strategist] QASTRAT complete: ${layersSummary}`
})
TaskUpdate({ taskId: task.id, status: 'completed' })
const nextTasks = TaskList().filter(t =>
t.subject.startsWith('QASTRAT-') && t.owner === 'strategist' &&
t.status === 'pending' && t.blockedBy.length === 0
)
if (nextTasks.length > 0) { /* back to Phase 1 */ }
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| No QASTRAT-* tasks available | Idle, wait for coordinator |
| No changed files detected | Use scout issues as scope, or scan full project |
| Test framework unknown | Default to Jest/Vitest for JS/TS, pytest for Python |
| Shared memory not found | Create with defaults, proceed |
| Critical issue beyond scope | SendMessage error to coordinator |

View File

@@ -0,0 +1,131 @@
{
"team_name": "quality-assurance",
"version": "1.0.0",
"description": "质量保障团队 - 融合\"软件测试\"和\"问题发现\"两大能力域,形成发现→验证→修复→回归的闭环",
"skill_entry": "team-quality-assurance",
"invocation": "Skill(skill=\"team-quality-assurance\", args=\"--role=coordinator ...\")",
"roles": {
"coordinator": {
"name": "coordinator",
"responsibility": "Orchestration",
"task_prefix": null,
"description": "QA 团队协调者。编排 pipeline需求澄清 → 模式选择 → 团队创建 → 任务分发 → 监控协调 → 质量门控 → 结果汇报",
"message_types_sent": ["mode_selected", "gc_loop_trigger", "quality_gate", "task_unblocked", "error", "shutdown"],
"message_types_received": ["scan_ready", "issues_found", "strategy_ready", "tests_generated", "tests_revised", "tests_passed", "tests_failed", "analysis_ready", "quality_report", "error"],
"commands": ["dispatch", "monitor"]
},
"scout": {
"name": "scout",
"responsibility": "Orchestration (多视角扫描编排)",
"task_prefix": "SCOUT-*",
"description": "多视角问题侦察员。主动扫描代码库,从 bug、安全、UX、测试覆盖、代码质量等多个视角发现潜在问题",
"message_types_sent": ["scan_ready", "issues_found", "error"],
"message_types_received": [],
"commands": ["scan"],
"cli_tools": ["gemini"],
"subagents": ["cli-explore-agent"]
},
"strategist": {
"name": "strategist",
"responsibility": "Orchestration (策略制定)",
"task_prefix": "QASTRAT-*",
"description": "测试策略师。分析变更范围确定测试层级L1-L3定义覆盖率目标",
"message_types_sent": ["strategy_ready", "error"],
"message_types_received": [],
"commands": ["analyze-scope"],
"cli_tools": ["gemini"],
"subagents": ["cli-explore-agent"]
},
"generator": {
"name": "generator",
"responsibility": "Code generation (测试代码生成)",
"task_prefix": "QAGEN-*",
"description": "测试用例生成器。按策略和层级生成测试代码,支持 L1/L2/L3",
"message_types_sent": ["tests_generated", "tests_revised", "error"],
"message_types_received": [],
"commands": ["generate-tests"],
"cli_tools": ["gemini"],
"subagents": ["code-developer"]
},
"executor": {
"name": "executor",
"responsibility": "Validation (测试执行与修复)",
"task_prefix": "QARUN-*",
"description": "测试执行者。运行测试套件,收集覆盖率数据,失败时自动修复循环",
"message_types_sent": ["tests_passed", "tests_failed", "coverage_report", "error"],
"message_types_received": [],
"commands": ["run-fix-cycle"],
"subagents": ["code-developer"]
},
"analyst": {
"name": "analyst",
"responsibility": "Read-only analysis (质量分析)",
"task_prefix": "QAANA-*",
"description": "质量分析师。分析缺陷模式、覆盖率差距、测试有效性,生成综合质量报告",
"message_types_sent": ["analysis_ready", "quality_report", "error"],
"message_types_received": [],
"commands": ["quality-report"],
"cli_tools": ["gemini"]
}
},
"pipeline_modes": {
"discovery": {
"description": "Scout先行扫描 → 全流程",
"stages": ["SCOUT", "QASTRAT", "QAGEN", "QARUN", "QAANA"],
"entry_role": "scout"
},
"testing": {
"description": "跳过 Scout → 直接测试",
"stages": ["QASTRAT", "QAGEN-L1", "QARUN-L1", "QAGEN-L2", "QARUN-L2", "QAANA"],
"entry_role": "strategist"
},
"full": {
"description": "完整 QA 闭环 + 回归扫描",
"stages": ["SCOUT", "QASTRAT", "QAGEN-L1", "QAGEN-L2", "QARUN-L1", "QARUN-L2", "QAANA", "SCOUT-REG"],
"entry_role": "scout",
"parallel_stages": [["QAGEN-L1", "QAGEN-L2"], ["QARUN-L1", "QARUN-L2"]]
}
},
"gc_loop": {
"max_iterations": 3,
"trigger": "coverage < target",
"participants": ["generator", "executor"],
"flow": "QAGEN-fix → QARUN-gc → evaluate"
},
"shared_memory": {
"file": "shared-memory.json",
"fields": {
"discovered_issues": { "owner": "scout", "type": "array" },
"test_strategy": { "owner": "strategist", "type": "object" },
"generated_tests": { "owner": "generator", "type": "object" },
"execution_results": { "owner": "executor", "type": "object" },
"defect_patterns": { "owner": "analyst", "type": "array" },
"coverage_history": { "owner": "analyst", "type": "array" },
"quality_score": { "owner": "analyst", "type": "number" }
}
},
"collaboration_patterns": [
"CP-1: Linear Pipeline (Discovery/Testing mode)",
"CP-2: Review-Fix Cycle (GC loop: Generator ↔ Executor)",
"CP-3: Fan-out (Scout multi-perspective scan)",
"CP-5: Escalation (Worker → Coordinator → User)",
"CP-9: Dual-Track (Full mode: L1 + L2 parallel)",
"CP-10: Post-Mortem (Analyst quality report)"
],
"session_directory": {
"pattern": ".workflow/.team/QA-{slug}-{date}",
"subdirectories": ["scan", "strategy", "results", "analysis"]
},
"test_layers": {
"L1": { "name": "Unit Tests", "default_target": 80 },
"L2": { "name": "Integration Tests", "default_target": 60 },
"L3": { "name": "E2E Tests", "default_target": 40 }
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -157,6 +157,7 @@ const mockMessages: Record<Locale, Record<string, string>> = {
'codexlens.envGroup.reranker': 'Reranker',
'codexlens.envGroup.concurrency': 'Concurrency',
'codexlens.envGroup.cascade': 'Cascade Search',
'codexlens.envGroup.indexing': 'Indexing',
'codexlens.envGroup.chunking': 'Chunking',
'codexlens.envField.backend': 'Backend',
'codexlens.envField.model': 'Model',
@@ -175,6 +176,9 @@ const mockMessages: Record<Locale, Record<string, string>> = {
'codexlens.envField.searchStrategy': 'Search Strategy',
'codexlens.envField.coarseK': 'Coarse K',
'codexlens.envField.fineK': 'Fine K',
'codexlens.envField.useAstGrep': 'Use ast-grep',
'codexlens.envField.staticGraphEnabled': 'Static Graph',
'codexlens.envField.staticGraphRelationshipTypes': 'Relationship Types',
'codexlens.envField.stripComments': 'Strip Comments',
'codexlens.envField.stripDocstrings': 'Strip Docstrings',
'codexlens.envField.testFilePenalty': 'Test File Penalty',
@@ -421,6 +425,7 @@ const mockMessages: Record<Locale, Record<string, string>> = {
'codexlens.envGroup.reranker': '重排序',
'codexlens.envGroup.concurrency': '并发',
'codexlens.envGroup.cascade': '级联搜索',
'codexlens.envGroup.indexing': '索引与解析',
'codexlens.envGroup.chunking': '分块',
'codexlens.envField.backend': '后端',
'codexlens.envField.model': '模型',
@@ -439,6 +444,9 @@ const mockMessages: Record<Locale, Record<string, string>> = {
'codexlens.envField.searchStrategy': '搜索策略',
'codexlens.envField.coarseK': '粗筛 K 值',
'codexlens.envField.fineK': '精筛 K 值',
'codexlens.envField.useAstGrep': '使用 ast-grep',
'codexlens.envField.staticGraphEnabled': '启用静态图',
'codexlens.envField.staticGraphRelationshipTypes': '关系类型',
'codexlens.envField.stripComments': '去除注释',
'codexlens.envField.stripDocstrings': '去除文档字符串',
'codexlens.envField.testFilePenalty': '测试文件惩罚',

View File

@@ -915,6 +915,21 @@ export async function handleCodexLensConfigRoutes(ctx: RouteContext): Promise<bo
settingsDefaults['CODEXLENS_LLM_BATCH_SIZE'] = String(settings.llm.batch_size);
}
// Parsing / indexing settings
if (settings.parsing?.use_astgrep !== undefined) {
settingsDefaults['CODEXLENS_USE_ASTGREP'] = String(settings.parsing.use_astgrep);
}
if (settings.indexing?.static_graph_enabled !== undefined) {
settingsDefaults['CODEXLENS_STATIC_GRAPH_ENABLED'] = String(settings.indexing.static_graph_enabled);
}
if (settings.indexing?.static_graph_relationship_types !== undefined) {
if (Array.isArray(settings.indexing.static_graph_relationship_types)) {
settingsDefaults['CODEXLENS_STATIC_GRAPH_RELATIONSHIP_TYPES'] = settings.indexing.static_graph_relationship_types.join(',');
} else if (typeof settings.indexing.static_graph_relationship_types === 'string') {
settingsDefaults['CODEXLENS_STATIC_GRAPH_RELATIONSHIP_TYPES'] = settings.indexing.static_graph_relationship_types;
}
}
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({
success: true,
@@ -1080,7 +1095,7 @@ export async function handleCodexLensConfigRoutes(ctx: RouteContext): Promise<bo
settings = JSON.parse(settingsContent);
} catch {
// File doesn't exist, create default structure
settings = { embedding: {}, reranker: {}, api: {}, cascade: {}, llm: {} };
settings = { embedding: {}, reranker: {}, api: {}, cascade: {}, llm: {}, parsing: {}, indexing: {} };
}
// Map env vars to settings.json structure
@@ -1105,6 +1120,15 @@ export async function handleCodexLensConfigRoutes(ctx: RouteContext): Promise<bo
'CODEXLENS_CASCADE_FINE_K': { path: ['cascade', 'fine_k'], transform: v => parseInt(v, 10) },
'CODEXLENS_LLM_ENABLED': { path: ['llm', 'enabled'], transform: v => v === 'true' },
'CODEXLENS_LLM_BATCH_SIZE': { path: ['llm', 'batch_size'], transform: v => parseInt(v, 10) },
'CODEXLENS_USE_ASTGREP': { path: ['parsing', 'use_astgrep'], transform: v => v === 'true' },
'CODEXLENS_STATIC_GRAPH_ENABLED': { path: ['indexing', 'static_graph_enabled'], transform: v => v === 'true' },
'CODEXLENS_STATIC_GRAPH_RELATIONSHIP_TYPES': {
path: ['indexing', 'static_graph_relationship_types'],
transform: v => v
.split(',')
.map((t) => t.trim())
.filter((t) => t.length > 0),
},
'LITELLM_EMBEDDING_MODEL': { path: ['embedding', 'model'] },
'LITELLM_RERANKER_MODEL': { path: ['reranker', 'model'] }
};

View File

@@ -102,10 +102,206 @@ interface UpdateClaudeMdResult {
path?: string;
}
// ========================================
// Explorer API Types (matching frontend)
// ========================================
interface FileSystemNode {
name: string;
path: string;
type: 'file' | 'directory';
children?: FileSystemNode[];
hasClaudeMd?: boolean;
size?: number;
modifiedTime?: string;
extension?: string;
language?: string;
}
interface FileTreeResponse {
rootNodes: FileSystemNode[];
fileCount: number;
directoryCount: number;
totalSize: number;
buildTime: number;
}
interface RootDirectory {
path: string;
name: string;
isWorkspace: boolean;
isGitRoot: boolean;
}
// ========================================
// Helper Functions
// ========================================
/**
* Build recursive file tree
* @param {string} dirPath - Directory path to build tree from
* @param {number} maxDepth - Maximum depth (0 = unlimited)
* @param {boolean} includeHidden - Include hidden files
* @param {number} currentDepth - Current recursion depth
* @returns {Promise<{ node: FileSystemNode, fileCount: number, directoryCount: number, totalSize: number }>}
*/
async function buildFileTree(
dirPath: string,
maxDepth: number = 6,
includeHidden: boolean = false,
currentDepth: number = 0
): Promise<{ node: FileSystemNode | null; fileCount: number; directoryCount: number; totalSize: number }> {
const result = { node: null as FileSystemNode | null, fileCount: 0, directoryCount: 0, totalSize: 0 };
try {
// Normalize path
let normalizedPath = dirPath.replace(/\\/g, '/');
if (normalizedPath.match(/^\/[a-zA-Z]\//)) {
normalizedPath = normalizedPath.charAt(1).toUpperCase() + ':' + normalizedPath.slice(2);
}
if (!existsSync(normalizedPath) || !statSync(normalizedPath).isDirectory()) {
return result;
}
const dirName = normalizedPath.split('/').pop() || normalizedPath;
const node: FileSystemNode = {
name: dirName,
path: normalizedPath,
type: 'directory',
children: []
};
// Check for CLAUDE.md
const claudeMdPath = join(normalizedPath, 'CLAUDE.md');
node.hasClaudeMd = existsSync(claudeMdPath);
// Parse .gitignore patterns
const gitignorePath = join(normalizedPath, '.gitignore');
const gitignorePatterns = parseGitignore(gitignorePath);
// Read directory entries
const entries = readdirSync(normalizedPath, { withFileTypes: true });
for (const entry of entries) {
const isDirectory = entry.isDirectory();
// Check if should be ignored
if (shouldIgnore(entry.name, gitignorePatterns, isDirectory)) {
// Allow hidden files if includeHidden is true and it's .claude or .workflow
if (!includeHidden || (!entry.name.startsWith('.claude') && !entry.name.startsWith('.workflow'))) {
continue;
}
}
const entryPath = join(normalizedPath, entry.name);
if (isDirectory) {
// Recursively build tree for directories
if (maxDepth === 0 || currentDepth < maxDepth - 1) {
const childResult = await buildFileTree(entryPath, maxDepth, includeHidden, currentDepth + 1);
if (childResult.node) {
node.children!.push(childResult.node);
result.fileCount += childResult.fileCount;
result.directoryCount += childResult.directoryCount + 1;
result.totalSize += childResult.totalSize;
}
} else {
// At max depth, just add directory without children
const childNode: FileSystemNode = {
name: entry.name,
path: entryPath.replace(/\\/g, '/'),
type: 'directory'
};
const childClaudeMdPath = join(entryPath, 'CLAUDE.md');
childNode.hasClaudeMd = existsSync(childClaudeMdPath);
node.children!.push(childNode);
result.directoryCount += 1;
}
} else {
// Add file node
const stats = statSync(entryPath);
const ext = entry.name.includes('.') ? entry.name.split('.').pop()?.toLowerCase() : '';
const language = ext && Object.prototype.hasOwnProperty.call(EXT_TO_LANGUAGE, `.${ext}`)
? EXT_TO_LANGUAGE[`.${ext}` as keyof typeof EXT_TO_LANGUAGE]
: undefined;
const fileNode: FileSystemNode = {
name: entry.name,
path: entryPath.replace(/\\/g, '/'),
type: 'file',
size: stats.size,
modifiedTime: stats.mtime.toISOString(),
extension: ext,
language
};
node.children!.push(fileNode);
result.fileCount += 1;
result.totalSize += stats.size;
}
}
// Sort: directories first, then alphabetically
node.children!.sort((a, b) => {
if (a.type === 'directory' && b.type !== 'directory') return -1;
if (a.type !== 'directory' && b.type === 'directory') return 1;
return a.name.localeCompare(b.name);
});
result.node = node;
return result;
} catch (error: unknown) {
console.error('Error building file tree:', error);
return result;
}
}
/**
* Get available root directories
* @param {string} workspacePath - Current workspace path
* @returns {Promise<RootDirectory[]>}
*/
async function getRootDirectories(workspacePath: string): Promise<RootDirectory[]> {
const roots: RootDirectory[] = [];
// Add workspace root
let normalizedWorkspace = workspacePath.replace(/\\/g, '/');
if (normalizedWorkspace.match(/^\/[a-zA-Z]\//)) {
normalizedWorkspace = normalizedWorkspace.charAt(1).toUpperCase() + ':' + normalizedWorkspace.slice(2);
}
const workspaceName = normalizedWorkspace.split('/').pop() || 'Workspace';
const isGitRoot = existsSync(join(normalizedWorkspace, '.git'));
roots.push({
path: normalizedWorkspace,
name: workspaceName,
isWorkspace: true,
isGitRoot
});
// On Windows, also add drive roots
if (process.platform === 'win32') {
// Get the drive letter from workspace path
const driveMatch = normalizedWorkspace.match(/^([A-Z]):/);
if (driveMatch) {
const driveLetter = driveMatch[1];
// Add drive root if not already the workspace
const driveRoot = `${driveLetter}:/`;
if (driveRoot !== normalizedWorkspace) {
roots.push({
path: driveRoot,
name: `${driveLetter}: Drive`,
isWorkspace: false,
isGitRoot: false
});
}
}
}
return roots;
}
/**
* Parse .gitignore file and return patterns
* @param {string} gitignorePath - Path to .gitignore file
@@ -420,6 +616,166 @@ async function triggerUpdateClaudeMd(targetPath: string, tool: string, strategy:
export async function handleFilesRoutes(ctx: RouteContext): Promise<boolean> {
const { pathname, url, req, res, initialPath, handlePostRequest } = ctx;
// ========================================
// Explorer API Routes (/api/explorer/*)
// ========================================
// API: Get file tree (Explorer view)
if (pathname === '/api/explorer/tree') {
const rootPath = url.searchParams.get('rootPath') || initialPath;
const maxDepth = parseInt(url.searchParams.get('maxDepth') || '6', 10);
const includeHidden = url.searchParams.get('includeHidden') === 'true';
const startTime = Date.now();
try {
const validatedPath = await validateAllowedPath(rootPath, { mustExist: true, allowedDirectories: [initialPath] });
const treeResult = await buildFileTree(validatedPath, maxDepth, includeHidden);
const response: FileTreeResponse = {
rootNodes: treeResult.node ? [treeResult.node] : [],
fileCount: treeResult.fileCount,
directoryCount: treeResult.directoryCount,
totalSize: treeResult.totalSize,
buildTime: Date.now() - startTime
};
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(response));
} catch (err) {
const message = err instanceof Error ? err.message : String(err);
const status = message.includes('Access denied') ? 403 : 400;
console.error(`[Explorer] Tree path validation failed: ${message}`);
res.writeHead(status, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({
rootNodes: [],
fileCount: 0,
directoryCount: 0,
totalSize: 0,
buildTime: 0,
error: status === 403 ? 'Access denied' : 'Invalid path'
}));
}
return true;
}
// API: Get root directories (Explorer view)
if (pathname === '/api/explorer/roots') {
try {
const roots = await getRootDirectories(initialPath);
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(roots));
} catch (err) {
const message = err instanceof Error ? err.message : String(err);
console.error(`[Explorer] Failed to get roots: ${message}`);
res.writeHead(500, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ error: 'Failed to get root directories' }));
}
return true;
}
// API: Get file content (Explorer view)
if (pathname === '/api/explorer/file') {
const filePath = url.searchParams.get('path');
if (!filePath) {
res.writeHead(400, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ error: 'File path is required' }));
return true;
}
try {
const validatedFile = await validateAllowedPath(filePath, { mustExist: true, allowedDirectories: [initialPath] });
const fileData = await getFileContent(validatedFile);
if (fileData.error) {
res.writeHead(404, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(fileData));
} else {
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(fileData));
}
} catch (err) {
const message = err instanceof Error ? err.message : String(err);
const status = message.includes('Access denied') ? 403 : 400;
console.error(`[Explorer] File path validation failed: ${message}`);
res.writeHead(status, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ error: status === 403 ? 'Access denied' : 'Invalid path' }));
}
return true;
}
// API: Search files (Explorer view)
if (pathname === '/api/explorer/search' && req.method === 'POST') {
handlePostRequest(req, res, async (body) => {
if (typeof body !== 'object' || body === null) {
return { error: 'Invalid request body', status: 400 };
}
const { query, rootPath, maxResults = 50 } = body as { query?: unknown; rootPath?: unknown; maxResults?: unknown };
if (typeof query !== 'string' || query.trim().length === 0) {
return { error: 'query is required', status: 400 };
}
try {
const validatedPath = await validateAllowedPath(
typeof rootPath === 'string' ? rootPath : initialPath,
{ mustExist: true, allowedDirectories: [initialPath] }
);
// Simple file search - walk directory tree and match by name
const results: Array<{ path: string; name: string; type: 'file' | 'directory' }> = [];
const searchQuery = query.toLowerCase();
const searchDir = (dirPath: string, depth: number = 0) => {
if (depth > 10 || results.length >= (typeof maxResults === 'number' ? maxResults : 50)) return;
try {
const entries = readdirSync(dirPath, { withFileTypes: true });
const gitignorePath = join(dirPath, '.gitignore');
const gitignorePatterns = parseGitignore(gitignorePath);
for (const entry of entries) {
if (results.length >= (typeof maxResults === 'number' ? maxResults : 50)) break;
if (shouldIgnore(entry.name, gitignorePatterns, entry.isDirectory())) continue;
const entryPath = join(dirPath, entry.name);
if (entry.name.toLowerCase().includes(searchQuery)) {
results.push({
path: entryPath.replace(/\\/g, '/'),
name: entry.name,
type: entry.isDirectory() ? 'directory' : 'file'
});
}
if (entry.isDirectory()) {
searchDir(entryPath, depth + 1);
}
}
} catch {
// Skip directories we can't read
}
};
searchDir(validatedPath);
return { results, total: results.length };
} catch (err) {
const message = err instanceof Error ? err.message : String(err);
const status = message.includes('Access denied') ? 403 : 400;
console.error(`[Explorer] Search path validation failed: ${message}`);
return { error: status === 403 ? 'Access denied' : 'Invalid path', status };
}
});
return true;
}
// ========================================
// Legacy Files API Routes (/api/files/*)
// ========================================
// API: List directory files with .gitignore filtering (Explorer view)
if (pathname === '/api/files') {
const dirPath = url.searchParams.get('path') || initialPath;