mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-28 09:23:08 +08:00
chore: move 3 skills to ccw-skill-hub repository
Migrated to D:/ccw-skill-hub/skills/: - project-analyze - copyright-docs - software-manual
This commit is contained in:
163
.claude/skills/team-review/roles/fixer/commands/execute-fixes.md
Normal file
163
.claude/skills/team-review/roles/fixer/commands/execute-fixes.md
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
# Command: execute-fixes
|
||||||
|
|
||||||
|
> Applies fixes from fix-plan.json via code-developer subagents. Quick path = 1 agent; standard = 1 agent per group.
|
||||||
|
|
||||||
|
## When to Use
|
||||||
|
|
||||||
|
- Phase 3B of Fixer, after plan-fixes
|
||||||
|
- Requires: `${sessionFolder}/fix/fix-plan.json`, `sessionFolder`, `projectRoot`
|
||||||
|
|
||||||
|
## Strategy
|
||||||
|
|
||||||
|
**Mode**: Sequential Delegation (code-developer agents via Task)
|
||||||
|
|
||||||
|
```
|
||||||
|
quick_path=true -> 1 agent, all findings sequentially
|
||||||
|
quick_path=false -> 1 agent per group, groups in execution_order
|
||||||
|
```
|
||||||
|
|
||||||
|
## Execution Steps
|
||||||
|
|
||||||
|
### Step 1: Load Plan + Helpers
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const fixPlan = JSON.parse(Read(`${sessionFolder}/fix/fix-plan.json`))
|
||||||
|
const { groups, execution_order, quick_path: isQuickPath } = fixPlan
|
||||||
|
const results = { fixed: [], failed: [], skipped: [] }
|
||||||
|
|
||||||
|
// --- Agent prompt builder ---
|
||||||
|
function buildAgentPrompt(findings, files) {
|
||||||
|
const fileContents = {}
|
||||||
|
for (const file of files) { try { fileContents[file] = Read(file) } catch {} }
|
||||||
|
|
||||||
|
const fDesc = findings.map((f, i) => {
|
||||||
|
const fix = f.suggested_fix || f.optimization?.approach || '(no suggestion)'
|
||||||
|
const deps = (f.fix_dependencies||[]).length ? `\nDepends on: ${f.fix_dependencies.join(', ')}` : ''
|
||||||
|
return `### ${i+1}. ${f.id} [${f.severity}]\n**File**: ${f.location?.file}:${f.location?.line}\n**Title**: ${f.title}\n**Desc**: ${f.description}\n**Strategy**: ${f.fix_strategy||'minimal'}\n**Fix**: ${fix}${deps}`
|
||||||
|
}).join('\n\n')
|
||||||
|
|
||||||
|
const fContent = Object.entries(fileContents)
|
||||||
|
.filter(([,c]) => c).map(([f,c]) => `### ${f}\n\`\`\`\n${String(c).slice(0,8000)}\n\`\`\``).join('\n\n')
|
||||||
|
|
||||||
|
return `You are a code fixer agent. Apply fixes to the codebase.
|
||||||
|
|
||||||
|
## CRITICAL RULES
|
||||||
|
1. Apply each fix using Edit tool, in the order given (dependency-sorted)
|
||||||
|
2. After each fix, run related tests: tests/**/{filename}.test.* or *_test.*
|
||||||
|
3. Tests PASS -> finding is "fixed"
|
||||||
|
4. Tests FAIL -> revert: Bash("git checkout -- {file}") -> mark "failed" -> continue
|
||||||
|
5. Do NOT retry failed fixes with different strategy. Rollback and move on.
|
||||||
|
6. If a finding depends on a previously failed finding, mark "skipped"
|
||||||
|
|
||||||
|
## Findings (in order)
|
||||||
|
${fDesc}
|
||||||
|
|
||||||
|
## File Contents
|
||||||
|
${fContent}
|
||||||
|
|
||||||
|
## Required Output
|
||||||
|
After ALL findings, output JSON:
|
||||||
|
\`\`\`json
|
||||||
|
{"results":[{"id":"SEC-001","status":"fixed","file":"src/a.ts"},{"id":"COR-002","status":"failed","file":"src/b.ts","error":"reason"}]}
|
||||||
|
\`\`\`
|
||||||
|
Process each finding now. Rollback on failure, never retry.`
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Result parser ---
|
||||||
|
function parseAgentResults(output, findings) {
|
||||||
|
const failedIds = new Set()
|
||||||
|
let parsed = []
|
||||||
|
try {
|
||||||
|
const m = (output||'').match(/```json\s*\n?([\s\S]*?)\n?```/)
|
||||||
|
if (m) { const j = JSON.parse(m[1]); parsed = j.results || j || [] }
|
||||||
|
} catch {}
|
||||||
|
|
||||||
|
if (parsed.length > 0) {
|
||||||
|
for (const r of parsed) {
|
||||||
|
const f = findings.find(x => x.id === r.id); if (!f) continue
|
||||||
|
if (r.status === 'fixed') results.fixed.push({...f})
|
||||||
|
else if (r.status === 'failed') { results.failed.push({...f, error: r.error||'unknown'}); failedIds.add(r.id) }
|
||||||
|
else if (r.status === 'skipped') { results.skipped.push({...f, error: r.error||'dep failed'}); failedIds.add(r.id) }
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Fallback: check git diff per file
|
||||||
|
for (const f of findings) {
|
||||||
|
const file = f.location?.file
|
||||||
|
if (!file) { results.skipped.push({...f, error:'no file'}); continue }
|
||||||
|
const diff = Bash(`git diff --name-only -- "${file}" 2>/dev/null`).trim()
|
||||||
|
if (diff) results.fixed.push({...f})
|
||||||
|
else { results.failed.push({...f, error:'no changes detected'}); failedIds.add(f.id) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Catch unprocessed findings
|
||||||
|
const done = new Set([...results.fixed,...results.failed,...results.skipped].map(x=>x.id))
|
||||||
|
for (const f of findings) {
|
||||||
|
if (done.has(f.id)) continue
|
||||||
|
if ((f.fix_dependencies||[]).some(d => failedIds.has(d)))
|
||||||
|
results.skipped.push({...f, error:'dependency failed'})
|
||||||
|
else results.failed.push({...f, error:'not processed'})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Execute
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
if (isQuickPath) {
|
||||||
|
// Single agent for all findings
|
||||||
|
const group = groups[0]
|
||||||
|
const prompt = buildAgentPrompt(group.findings, group.files)
|
||||||
|
const out = Task({ subagent_type:"code-developer", prompt, run_in_background:false })
|
||||||
|
parseAgentResults(out, group.findings)
|
||||||
|
} else {
|
||||||
|
// One agent per group in execution_order
|
||||||
|
const completedGroups = new Set()
|
||||||
|
|
||||||
|
// Build group dependency map
|
||||||
|
const groupDeps = {}
|
||||||
|
for (const g of groups) {
|
||||||
|
groupDeps[g.id] = new Set()
|
||||||
|
for (const f of g.findings) {
|
||||||
|
for (const depId of (f.fix_dependencies||[])) {
|
||||||
|
const dg = groups.find(x => x.findings.some(fx => fx.id === depId))
|
||||||
|
if (dg && dg.id !== g.id) groupDeps[g.id].add(dg.id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const gid of execution_order) {
|
||||||
|
const group = groups.find(g => g.id === gid)
|
||||||
|
if (!group) continue
|
||||||
|
|
||||||
|
const prompt = buildAgentPrompt(group.findings, group.files)
|
||||||
|
const out = Task({ subagent_type:"code-developer", prompt, run_in_background:false })
|
||||||
|
parseAgentResults(out, group.findings)
|
||||||
|
completedGroups.add(gid)
|
||||||
|
|
||||||
|
Write(`${sessionFolder}/fix/fix-progress.json`, JSON.stringify({
|
||||||
|
completed_groups:[...completedGroups],
|
||||||
|
results_so_far:{fixed:results.fixed.length, failed:results.failed.length}
|
||||||
|
}, null, 2))
|
||||||
|
|
||||||
|
mcp__ccw-tools__team_msg({ operation:"log", team:"team-review", from:"fixer",
|
||||||
|
to:"coordinator", type:"fix_progress",
|
||||||
|
summary:`[fixer] Group ${gid}: ${results.fixed.length} fixed, ${results.failed.length} failed` })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Write Results
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Write(`${sessionFolder}/fix/execution-results.json`, JSON.stringify(results, null, 2))
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Scenario | Resolution |
|
||||||
|
|----------|------------|
|
||||||
|
| Agent crashes | Mark group findings as failed, continue next group |
|
||||||
|
| Test failure after fix | Rollback (`git checkout -- {file}`), mark failed, continue |
|
||||||
|
| No structured output | Fallback to git diff detection |
|
||||||
|
| Dependency failed | Skip dependent findings automatically |
|
||||||
|
| fix-plan.json missing | Report error, write empty results |
|
||||||
187
.claude/skills/team-review/roles/fixer/commands/plan-fixes.md
Normal file
187
.claude/skills/team-review/roles/fixer/commands/plan-fixes.md
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
# Command: plan-fixes
|
||||||
|
|
||||||
|
> Deterministic grouping algorithm. Groups findings by file, merges dependent groups, topological sorts within groups, writes fix-plan.json.
|
||||||
|
|
||||||
|
## When to Use
|
||||||
|
|
||||||
|
- Phase 3A of Fixer, after context resolution
|
||||||
|
- Requires: `fixableFindings[]`, `sessionFolder`, `quickPath` from Phase 2
|
||||||
|
|
||||||
|
**Trigger conditions**:
|
||||||
|
- FIX-* task in Phase 3 with at least 1 fixable finding
|
||||||
|
|
||||||
|
## Strategy
|
||||||
|
|
||||||
|
**Mode**: Direct (inline execution, deterministic algorithm, no CLI needed)
|
||||||
|
|
||||||
|
## Execution Steps
|
||||||
|
|
||||||
|
### Step 1: Group Findings by Primary File
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const fileGroups = {}
|
||||||
|
for (const f of fixableFindings) {
|
||||||
|
const file = f.location?.file || '_unknown'
|
||||||
|
if (!fileGroups[file]) fileGroups[file] = []
|
||||||
|
fileGroups[file].push(f)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Merge Groups with Cross-File Dependencies
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Build adjacency: if finding A (group X) depends on finding B (group Y), merge X into Y
|
||||||
|
const findingFileMap = {}
|
||||||
|
for (const f of fixableFindings) {
|
||||||
|
findingFileMap[f.id] = f.location?.file || '_unknown'
|
||||||
|
}
|
||||||
|
|
||||||
|
// Union-Find for group merging
|
||||||
|
const parent = {}
|
||||||
|
const find = (x) => parent[x] === x ? x : (parent[x] = find(parent[x]))
|
||||||
|
const union = (a, b) => { parent[find(a)] = find(b) }
|
||||||
|
|
||||||
|
const allFiles = Object.keys(fileGroups)
|
||||||
|
for (const file of allFiles) parent[file] = file
|
||||||
|
|
||||||
|
for (const f of fixableFindings) {
|
||||||
|
const myFile = f.location?.file || '_unknown'
|
||||||
|
for (const depId of (f.fix_dependencies || [])) {
|
||||||
|
const depFile = findingFileMap[depId]
|
||||||
|
if (depFile && depFile !== myFile) {
|
||||||
|
union(myFile, depFile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect merged groups
|
||||||
|
const mergedGroupMap = {}
|
||||||
|
for (const file of allFiles) {
|
||||||
|
const root = find(file)
|
||||||
|
if (!mergedGroupMap[root]) mergedGroupMap[root] = { files: [], findings: [] }
|
||||||
|
mergedGroupMap[root].files.push(file)
|
||||||
|
mergedGroupMap[root].findings.push(...fileGroups[file])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deduplicate files
|
||||||
|
for (const g of Object.values(mergedGroupMap)) {
|
||||||
|
g.files = [...new Set(g.files)]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Topological Sort Within Each Group
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function topoSort(findings) {
|
||||||
|
const idSet = new Set(findings.map(f => f.id))
|
||||||
|
const inDegree = {}
|
||||||
|
const adj = {}
|
||||||
|
for (const f of findings) {
|
||||||
|
inDegree[f.id] = 0
|
||||||
|
adj[f.id] = []
|
||||||
|
}
|
||||||
|
for (const f of findings) {
|
||||||
|
for (const depId of (f.fix_dependencies || [])) {
|
||||||
|
if (idSet.has(depId)) {
|
||||||
|
adj[depId].push(f.id)
|
||||||
|
inDegree[f.id]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const queue = findings.filter(f => inDegree[f.id] === 0).map(f => f.id)
|
||||||
|
const sorted = []
|
||||||
|
while (queue.length > 0) {
|
||||||
|
const id = queue.shift()
|
||||||
|
sorted.push(id)
|
||||||
|
for (const next of adj[id]) {
|
||||||
|
inDegree[next]--
|
||||||
|
if (inDegree[next] === 0) queue.push(next)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle cycles: append any unsorted findings at the end
|
||||||
|
const sortedSet = new Set(sorted)
|
||||||
|
for (const f of findings) {
|
||||||
|
if (!sortedSet.has(f.id)) sorted.push(f.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
const findingMap = Object.fromEntries(findings.map(f => [f.id, f]))
|
||||||
|
return sorted.map(id => findingMap[id])
|
||||||
|
}
|
||||||
|
|
||||||
|
const groups = Object.entries(mergedGroupMap).map(([root, g], i) => {
|
||||||
|
const sorted = topoSort(g.findings)
|
||||||
|
const maxSev = sorted.reduce((max, f) => {
|
||||||
|
const ord = { critical: 0, high: 1, medium: 2, low: 3 }
|
||||||
|
return (ord[f.severity] ?? 4) < (ord[max] ?? 4) ? f.severity : max
|
||||||
|
}, 'low')
|
||||||
|
return {
|
||||||
|
id: `G${i + 1}`,
|
||||||
|
files: g.files,
|
||||||
|
findings: sorted,
|
||||||
|
max_severity: maxSev
|
||||||
|
}
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Sort Groups by Max Severity
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const SEV_ORDER = { critical: 0, high: 1, medium: 2, low: 3 }
|
||||||
|
groups.sort((a, b) => (SEV_ORDER[a.max_severity] ?? 4) - (SEV_ORDER[b.max_severity] ?? 4))
|
||||||
|
|
||||||
|
// Re-assign IDs after sort
|
||||||
|
groups.forEach((g, i) => { g.id = `G${i + 1}` })
|
||||||
|
|
||||||
|
const execution_order = groups.map(g => g.id)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 5: Determine Execution Path
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const totalFindings = fixableFindings.length
|
||||||
|
const totalGroups = groups.length
|
||||||
|
const isQuickPath = totalFindings <= 5 && totalGroups <= 1
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 6: Write fix-plan.json
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const fixPlan = {
|
||||||
|
plan_id: `fix-plan-${Date.now()}`,
|
||||||
|
quick_path: isQuickPath,
|
||||||
|
groups: groups.map(g => ({
|
||||||
|
id: g.id,
|
||||||
|
files: g.files,
|
||||||
|
findings: g.findings.map(f => ({
|
||||||
|
id: f.id, severity: f.severity, dimension: f.dimension,
|
||||||
|
title: f.title, description: f.description,
|
||||||
|
location: f.location, suggested_fix: f.suggested_fix,
|
||||||
|
fix_strategy: f.fix_strategy, fix_complexity: f.fix_complexity,
|
||||||
|
fix_dependencies: f.fix_dependencies,
|
||||||
|
root_cause: f.root_cause, optimization: f.optimization
|
||||||
|
})),
|
||||||
|
max_severity: g.max_severity
|
||||||
|
})),
|
||||||
|
execution_order: execution_order,
|
||||||
|
total_findings: totalFindings,
|
||||||
|
total_groups: totalGroups
|
||||||
|
}
|
||||||
|
|
||||||
|
Bash(`mkdir -p "${sessionFolder}/fix"`)
|
||||||
|
Write(`${sessionFolder}/fix/fix-plan.json`, JSON.stringify(fixPlan, null, 2))
|
||||||
|
|
||||||
|
mcp__ccw-tools__team_msg({ operation:"log", team:"team-review", from:"fixer",
|
||||||
|
to:"coordinator", type:"fix_progress",
|
||||||
|
summary:`[fixer] Fix plan: ${totalGroups} groups, ${totalFindings} findings, path=${isQuickPath ? 'quick' : 'standard'}` })
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Scenario | Resolution |
|
||||||
|
|----------|------------|
|
||||||
|
| All findings share one file | Single group, likely quick path |
|
||||||
|
| Dependency cycle detected | Topo sort appends cycle members at end |
|
||||||
|
| Finding references unknown dependency | Ignore that dependency edge |
|
||||||
|
| Empty fixableFindings | Should not reach this command (checked in Phase 2) |
|
||||||
156
.claude/skills/team-review/roles/fixer/role.md
Normal file
156
.claude/skills/team-review/roles/fixer/role.md
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
# Role: fixer
|
||||||
|
|
||||||
|
Fix code based on reviewed findings. Load manifest, group, apply with rollback-on-failure, verify.
|
||||||
|
|
||||||
|
## Role Identity
|
||||||
|
|
||||||
|
| Field | Value |
|
||||||
|
|-------|-------|
|
||||||
|
| Name | `fixer` |
|
||||||
|
| Task Prefix | `FIX-*` |
|
||||||
|
| Type | code-generation |
|
||||||
|
| Output Tag | `[fixer]` |
|
||||||
|
| Communication | coordinator only |
|
||||||
|
|
||||||
|
## Role Boundaries
|
||||||
|
|
||||||
|
**MUST**: Only `FIX-*` tasks. `[fixer]`-prefixed output. Session fix dir. Rollback on test failure -- never self-retry.
|
||||||
|
|
||||||
|
**MUST NOT**: Create tasks for others. Contact scanner/reviewer. Retry failed fixes. Modify outside scope.
|
||||||
|
|
||||||
|
## Messages: `fix_progress`, `fix_complete`, `fix_failed`, `error`
|
||||||
|
|
||||||
|
## Message Bus
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
mcp__ccw-tools__team_msg({ operation:"log", team:"team-review", from:"fixer", to:"coordinator", type:"fix_complete", summary:"[fixer] ..." })
|
||||||
|
```
|
||||||
|
|
||||||
|
## Toolbox
|
||||||
|
|
||||||
|
| Command | File | Phase |
|
||||||
|
|---------|------|-------|
|
||||||
|
| `plan-fixes` | [commands/plan-fixes.md](commands/plan-fixes.md) | 3A: Group + sort findings |
|
||||||
|
| `execute-fixes` | [commands/execute-fixes.md](commands/execute-fixes.md) | 3B: Apply fixes per plan |
|
||||||
|
|
||||||
|
## Execution (5-Phase)
|
||||||
|
|
||||||
|
### Phase 1: Task Discovery
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const tasks = TaskList()
|
||||||
|
const myTasks = tasks.filter(t =>
|
||||||
|
t.subject.startsWith('FIX-') && t.status !== 'completed' && (t.blockedBy||[]).length === 0
|
||||||
|
)
|
||||||
|
if (myTasks.length === 0) return
|
||||||
|
|
||||||
|
const task = TaskGet({ taskId: myTasks[0].id })
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'in_progress' })
|
||||||
|
|
||||||
|
const sessionFolder = task.description.match(/session:\s*(.+)/)?.[1]?.trim()
|
||||||
|
const inputPath = task.description.match(/input:\s*(.+)/)?.[1]?.trim() || `${sessionFolder}/fix/fix-manifest.json`
|
||||||
|
|
||||||
|
let manifest, reviewReport
|
||||||
|
try { manifest = JSON.parse(Read(inputPath)) } catch {
|
||||||
|
mcp__ccw-tools__team_msg({ operation:"log", team:"team-review", from:"fixer", to:"coordinator", type:"error", summary:`[fixer] No manifest: ${inputPath}` })
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'completed' }); return
|
||||||
|
}
|
||||||
|
try { reviewReport = JSON.parse(Read(manifest.source)) } catch {
|
||||||
|
mcp__ccw-tools__team_msg({ operation:"log", team:"team-review", from:"fixer", to:"coordinator", type:"error", summary:`[fixer] No report: ${manifest.source}` })
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'completed' }); return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 2: Context Resolution
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const allFindings = reviewReport.findings || []
|
||||||
|
const scopeSevs = manifest.scope === 'all' ? ['critical','high','medium','low'] : manifest.scope.split(',').map(s=>s.trim())
|
||||||
|
const fixableFindings = allFindings.filter(f => scopeSevs.includes(f.severity) && f.fix_strategy !== 'skip')
|
||||||
|
|
||||||
|
if (fixableFindings.length === 0) {
|
||||||
|
mcp__ccw-tools__team_msg({ operation:"log", team:"team-review", from:"fixer", to:"coordinator", type:"fix_complete", summary:`[fixer] 0 fixable findings.` })
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'completed' }); return
|
||||||
|
}
|
||||||
|
|
||||||
|
const hasCrossDeps = fixableFindings.some(f => (f.fix_dependencies||[]).some(d => {
|
||||||
|
const dep = fixableFindings.find(x=>x.id===d); return dep && dep.location?.file !== f.location?.file }))
|
||||||
|
const quickPath = fixableFindings.length <= 5 && !hasCrossDeps
|
||||||
|
|
||||||
|
const projectRoot = Bash('git rev-parse --show-toplevel 2>/dev/null || pwd').trim()
|
||||||
|
const has = (c) => Bash(c).trim()==='y'
|
||||||
|
const VT = {tsc:has(`test -f "${projectRoot}/tsconfig.json" && echo y || echo n`),
|
||||||
|
eslint:has(`grep -q eslint "${projectRoot}/package.json" 2>/dev/null && echo y || echo n`),
|
||||||
|
jest:has(`grep -q jest "${projectRoot}/package.json" 2>/dev/null && echo y || echo n`),
|
||||||
|
pytest:has(`command -v pytest >/dev/null 2>&1 && test -f "${projectRoot}/pyproject.toml" && echo y || echo n`),
|
||||||
|
semgrep:has(`command -v semgrep >/dev/null 2>&1 && echo y || echo n`)}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 3: Plan + Execute (Delegate)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
Read("commands/plan-fixes.md") // -> fix-plan.json
|
||||||
|
Read("commands/execute-fixes.md") // -> execution-results.json
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 4: Post-Fix Verification
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
let execResults
|
||||||
|
try { execResults = JSON.parse(Read(`${sessionFolder}/fix/execution-results.json`)) }
|
||||||
|
catch { execResults = { fixed:[], failed:[], skipped:[] } }
|
||||||
|
|
||||||
|
const fixedFiles = [...new Set(execResults.fixed.map(f=>f.location?.file).filter(Boolean))]
|
||||||
|
const V = {tsc:null,eslint:null,tests:null,semgrep:null}
|
||||||
|
const run = (cmd,t=120000) => Bash(`cd "${projectRoot}" && ${cmd} 2>&1 || true`,{timeout:t})
|
||||||
|
|
||||||
|
if (VT.tsc) { const o=run('npx tsc --noEmit'); const e=(o.match(/error TS/g)||[]).length; V.tsc={pass:e===0,errors:e} }
|
||||||
|
if (VT.eslint && fixedFiles.length) { const o=run(`npx eslint ${fixedFiles.join(' ')}`); const e=Number((o.match(/(\d+) error/)?.[1])||0); V.eslint={pass:e===0,errors:e} }
|
||||||
|
if (VT.jest) { const o=run('npx jest --passWithNoTests',300000); V.tests={pass:/Tests:.*passed/.test(o)&&!/failed/.test(o)} }
|
||||||
|
else if (VT.pytest) { const o=run('pytest --tb=short',300000); V.tests={pass:/passed/.test(o)&&!/failed|error/.test(o)} }
|
||||||
|
if (VT.semgrep && execResults.fixed.some(f=>f.dimension==='security')) {
|
||||||
|
const sf=[...new Set(execResults.fixed.filter(f=>f.dimension==='security').map(f=>f.location?.file).filter(Boolean))]
|
||||||
|
try { const j=JSON.parse(run(`semgrep --config auto ${sf.join(' ')} --json 2>/dev/null`)); V.semgrep={pass:!j.results?.length} } catch { V.semgrep={pass:true} }
|
||||||
|
}
|
||||||
|
|
||||||
|
const fixRate = fixableFindings.length ? Math.round((execResults.fixed.length/fixableFindings.length)*100) : 0
|
||||||
|
Write(`${sessionFolder}/fix/verify-results.json`, JSON.stringify({fix_rate:fixRate, verification:V}, null, 2))
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 5: Report & Complete
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const R = execResults, n = fixableFindings.length
|
||||||
|
const S = { fix_id:`fix-${Date.now()}`, fix_date:new Date().toISOString(), scope:manifest.scope, quick_path:quickPath,
|
||||||
|
total:n, fixed:R.fixed.length, failed:R.failed.length, skipped:R.skipped.length, fix_rate:fixRate, verification:V,
|
||||||
|
fixed_ids:R.fixed.map(f=>f.id), failed_ids:R.failed.map(f=>({id:f.id,error:f.error})), skipped_ids:R.skipped.map(f=>f.id) }
|
||||||
|
Write(`${sessionFolder}/fix/fix-summary.json`, JSON.stringify(S, null, 2))
|
||||||
|
|
||||||
|
const fL = R.fixed.map(f=>`- [${f.id}] ${f.severity} ${f.location?.file}:${f.location?.line} - ${f.title}`).join('\n')||'(none)'
|
||||||
|
const xL = R.failed.map(f=>`- [${f.id}] ${f.location?.file} - ${f.error}`).join('\n')||'(none)'
|
||||||
|
const vR = Object.entries(V).filter(([,v])=>v!==null).map(([k,v])=>`- **${k}**: ${v.pass?'PASS':'FAIL'}${v.errors?` (${v.errors})`:''}`).join('\n')
|
||||||
|
Write(`${sessionFolder}/fix/fix-summary.md`, `# Fix Summary\n**${S.fix_id}** | ${S.scope} | ${fixRate}%\n## ${S.fixed}/${n} fixed, ${S.failed} failed, ${S.skipped} skipped\n### Fixed\n${fL}\n### Failed\n${xL}\n### Verify\n${vR||'(none)'}`)
|
||||||
|
|
||||||
|
let mem = {}; try { mem = JSON.parse(Read(`${sessionFolder}/shared-memory.json`)) } catch {}
|
||||||
|
mem.fix_results = { file:`${sessionFolder}/fix/fix-summary.json`, total:n, fixed:S.fixed, failed:S.failed, fix_rate:fixRate }
|
||||||
|
mem.fixed_count = S.fixed
|
||||||
|
Write(`${sessionFolder}/shared-memory.json`, JSON.stringify(mem, null, 2))
|
||||||
|
|
||||||
|
const sv = Object.entries(R.fixed.reduce((a,f)=>({...a,[f.severity]:(a[f.severity]||0)+1}),{})).map(([k,v])=>`${k}:${v}`).join(' ')
|
||||||
|
mcp__ccw-tools__team_msg({ operation:"log", team:"team-review", from:"fixer", to:"coordinator", type:"fix_complete",
|
||||||
|
summary:`[fixer] ${S.fixed}/${n} (${fixRate}%)`, ref:`${sessionFolder}/fix/fix-summary.json` })
|
||||||
|
SendMessage({ type:"message", recipient:"coordinator",
|
||||||
|
content:`## [fixer] Fix: ${S.fixed}/${n} (${fixRate}%)\nScope: ${S.scope} | ${sv||'-'} | Failed: ${S.failed} | Skipped: ${S.skipped}`,
|
||||||
|
summary:`[fixer] FIX: ${S.fixed}/${n} (${fixRate}%)` })
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'completed' })
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Scenario | Resolution |
|
||||||
|
|----------|------------|
|
||||||
|
| Manifest/report missing | Error, complete task |
|
||||||
|
| 0 fixable findings | Complete immediately |
|
||||||
|
| Test failure after fix | Rollback, mark failed, continue |
|
||||||
|
| Tool unavailable | Skip that check |
|
||||||
|
| All findings fail | Report 0%, complete |
|
||||||
@@ -0,0 +1,186 @@
|
|||||||
|
# Command: deep-analyze
|
||||||
|
|
||||||
|
> CLI Fan-out deep analysis. Splits findings into 2 domain groups, runs parallel CLI agents for root cause / impact / optimization enrichment.
|
||||||
|
|
||||||
|
## When to Use
|
||||||
|
|
||||||
|
- Phase 3 of Reviewer, when `deep_analysis.length > 0`
|
||||||
|
- Requires `deep_analysis[]` array and `sessionFolder` from Phase 2
|
||||||
|
|
||||||
|
**Trigger conditions**:
|
||||||
|
- REV-* task in Phase 3 with at least 1 finding triaged for deep analysis
|
||||||
|
|
||||||
|
## Strategy
|
||||||
|
|
||||||
|
### Delegation Mode
|
||||||
|
|
||||||
|
**Mode**: CLI Fan-out (max 2 parallel agents, analysis only)
|
||||||
|
|
||||||
|
### Tool Fallback Chain
|
||||||
|
|
||||||
|
```
|
||||||
|
gemini (primary) -> qwen (fallback) -> codex (fallback)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Group Split
|
||||||
|
|
||||||
|
```
|
||||||
|
Group A: Security + Correctness findings -> 1 CLI agent
|
||||||
|
Group B: Performance + Maintainability findings -> 1 CLI agent
|
||||||
|
If either group empty -> skip that agent (run single agent only)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Execution Steps
|
||||||
|
|
||||||
|
### Step 1: Split Findings into Groups
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const groupA = deep_analysis.filter(f =>
|
||||||
|
f.dimension === 'security' || f.dimension === 'correctness'
|
||||||
|
)
|
||||||
|
const groupB = deep_analysis.filter(f =>
|
||||||
|
f.dimension === 'performance' || f.dimension === 'maintainability'
|
||||||
|
)
|
||||||
|
|
||||||
|
// Collect all affected files for CLI context
|
||||||
|
const collectFiles = (group) => [...new Set(
|
||||||
|
group.map(f => f.location?.file).filter(Boolean)
|
||||||
|
)]
|
||||||
|
const filesA = collectFiles(groupA)
|
||||||
|
const filesB = collectFiles(groupB)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Build CLI Prompts
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function buildPrompt(group, groupLabel, affectedFiles) {
|
||||||
|
const findingsJson = JSON.stringify(group, null, 2)
|
||||||
|
const filePattern = affectedFiles.length <= 20
|
||||||
|
? affectedFiles.map(f => `@${f}`).join(' ')
|
||||||
|
: '@**/*.{ts,tsx,js,jsx,py,go,java,rs}'
|
||||||
|
|
||||||
|
return `PURPOSE: Deep analysis of ${groupLabel} code findings -- root cause, impact, optimization suggestions.
|
||||||
|
TASK:
|
||||||
|
- For each finding: trace root cause (independent issue or symptom of another finding?)
|
||||||
|
- Identify findings sharing the same root cause -> mark related_findings with their IDs
|
||||||
|
- Assess impact scope and affected files (blast_radius: function/module/system)
|
||||||
|
- Propose fix strategy (minimal fix vs refactor) with tradeoff analysis
|
||||||
|
- Identify fix dependencies (which findings must be fixed first?)
|
||||||
|
- For each finding add these enrichment fields:
|
||||||
|
root_cause: { description: string, related_findings: string[], is_symptom: boolean }
|
||||||
|
impact: { scope: "low"|"medium"|"high", affected_files: string[], blast_radius: string }
|
||||||
|
optimization: { approach: string, alternative: string, tradeoff: string }
|
||||||
|
fix_strategy: "minimal" | "refactor" | "skip"
|
||||||
|
fix_complexity: "low" | "medium" | "high"
|
||||||
|
fix_dependencies: string[] (finding IDs that must be fixed first)
|
||||||
|
MODE: analysis
|
||||||
|
CONTEXT: ${filePattern}
|
||||||
|
Findings to analyze:
|
||||||
|
${findingsJson}
|
||||||
|
EXPECTED: Respond with ONLY a JSON array. Each element is the original finding object with the 6 enrichment fields added. Preserve ALL original fields exactly.
|
||||||
|
CONSTRAINTS: Preserve original finding fields | Only add enrichment fields | Return raw JSON array only | No markdown wrapping`
|
||||||
|
}
|
||||||
|
|
||||||
|
const promptA = groupA.length > 0
|
||||||
|
? buildPrompt(groupA, 'Security + Correctness', filesA) : null
|
||||||
|
const promptB = groupB.length > 0
|
||||||
|
? buildPrompt(groupB, 'Performance + Maintainability', filesB) : null
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Execute CLI Agents (Parallel)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function runCli(prompt) {
|
||||||
|
const tools = ['gemini', 'qwen', 'codex']
|
||||||
|
for (const tool of tools) {
|
||||||
|
try {
|
||||||
|
const out = Bash(
|
||||||
|
`ccw cli -p "${prompt.replace(/"/g, '\\"')}" --tool ${tool} --mode analysis --rule analysis-diagnose-bug-root-cause`,
|
||||||
|
{ timeout: 300000 }
|
||||||
|
)
|
||||||
|
return out
|
||||||
|
} catch { continue }
|
||||||
|
}
|
||||||
|
return null // All tools failed
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run both groups -- if both present, execute via Bash run_in_background for parallelism
|
||||||
|
let resultA = null, resultB = null
|
||||||
|
|
||||||
|
if (promptA && promptB) {
|
||||||
|
// Both groups: run in parallel
|
||||||
|
// Group A in background
|
||||||
|
Bash(`ccw cli -p "${promptA.replace(/"/g, '\\"')}" --tool gemini --mode analysis --rule analysis-diagnose-bug-root-cause > "${sessionFolder}/review/_groupA.txt" 2>&1`,
|
||||||
|
{ run_in_background: true, timeout: 300000 })
|
||||||
|
// Group B synchronous (blocks until done)
|
||||||
|
resultB = runCli(promptB)
|
||||||
|
// Wait for Group A to finish, then read output
|
||||||
|
Bash(`sleep 5`) // Brief wait if B finished faster
|
||||||
|
try { resultA = Read(`${sessionFolder}/review/_groupA.txt`) } catch {}
|
||||||
|
// If background failed, try synchronous fallback
|
||||||
|
if (!resultA) resultA = runCli(promptA)
|
||||||
|
} else if (promptA) {
|
||||||
|
resultA = runCli(promptA)
|
||||||
|
} else if (promptB) {
|
||||||
|
resultB = runCli(promptB)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Parse & Merge Results
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function parseCliOutput(output) {
|
||||||
|
if (!output) return []
|
||||||
|
try {
|
||||||
|
const match = output.match(/\[[\s\S]*\]/)
|
||||||
|
if (!match) return []
|
||||||
|
const parsed = JSON.parse(match[0])
|
||||||
|
// Validate enrichment fields exist
|
||||||
|
return parsed.filter(f => f.id && f.dimension).map(f => ({
|
||||||
|
...f,
|
||||||
|
root_cause: f.root_cause || { description: 'Unknown', related_findings: [], is_symptom: false },
|
||||||
|
impact: f.impact || { scope: 'medium', affected_files: [f.location?.file].filter(Boolean), blast_radius: 'module' },
|
||||||
|
optimization: f.optimization || { approach: f.suggested_fix || '', alternative: '', tradeoff: '' },
|
||||||
|
fix_strategy: ['minimal', 'refactor', 'skip'].includes(f.fix_strategy) ? f.fix_strategy : 'minimal',
|
||||||
|
fix_complexity: ['low', 'medium', 'high'].includes(f.fix_complexity) ? f.fix_complexity : 'medium',
|
||||||
|
fix_dependencies: Array.isArray(f.fix_dependencies) ? f.fix_dependencies : []
|
||||||
|
}))
|
||||||
|
} catch { return [] }
|
||||||
|
}
|
||||||
|
|
||||||
|
const enrichedA = parseCliOutput(resultA)
|
||||||
|
const enrichedB = parseCliOutput(resultB)
|
||||||
|
|
||||||
|
// Merge: CLI-enriched findings replace originals, unenriched originals kept as fallback
|
||||||
|
const enrichedMap = new Map()
|
||||||
|
for (const f of [...enrichedA, ...enrichedB]) enrichedMap.set(f.id, f)
|
||||||
|
|
||||||
|
const enrichedFindings = deep_analysis.map(f =>
|
||||||
|
enrichedMap.get(f.id) || {
|
||||||
|
...f,
|
||||||
|
root_cause: { description: 'Analysis unavailable', related_findings: [], is_symptom: false },
|
||||||
|
impact: { scope: 'medium', affected_files: [f.location?.file].filter(Boolean), blast_radius: 'unknown' },
|
||||||
|
optimization: { approach: f.suggested_fix || '', alternative: '', tradeoff: '' },
|
||||||
|
fix_strategy: 'minimal',
|
||||||
|
fix_complexity: 'medium',
|
||||||
|
fix_dependencies: []
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Write output
|
||||||
|
Write(`${sessionFolder}/review/enriched-findings.json`, JSON.stringify(enrichedFindings, null, 2))
|
||||||
|
|
||||||
|
// Cleanup temp files
|
||||||
|
Bash(`rm -f "${sessionFolder}/review/_groupA.txt" "${sessionFolder}/review/_groupB.txt"`)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Scenario | Resolution |
|
||||||
|
|----------|------------|
|
||||||
|
| gemini CLI fails | Fallback to qwen, then codex |
|
||||||
|
| All CLI tools fail for a group | Use original findings with default enrichment |
|
||||||
|
| CLI output not valid JSON | Attempt regex extraction, else use defaults |
|
||||||
|
| Background task hangs | Synchronous fallback after timeout |
|
||||||
|
| One group fails, other succeeds | Merge partial results with defaults |
|
||||||
|
| Invalid enrichment fields | Apply defaults for missing/invalid fields |
|
||||||
@@ -0,0 +1,174 @@
|
|||||||
|
# Command: generate-report
|
||||||
|
|
||||||
|
> Cross-correlate enriched + pass-through findings, compute metrics, write review-report.json (for fixer) and review-report.md (for humans).
|
||||||
|
|
||||||
|
## When to Use
|
||||||
|
|
||||||
|
- Phase 4 of Reviewer, after deep analysis (or directly if deep_analysis was empty)
|
||||||
|
- Requires: `enrichedFindings[]` (from Phase 3 or empty), `pass_through[]` (from Phase 2), `sessionFolder`
|
||||||
|
|
||||||
|
## Strategy
|
||||||
|
|
||||||
|
**Mode**: Direct (inline execution, no CLI needed)
|
||||||
|
|
||||||
|
## Execution Steps
|
||||||
|
|
||||||
|
### Step 1: Load & Combine Findings
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
let enrichedFindings = []
|
||||||
|
try { enrichedFindings = JSON.parse(Read(`${sessionFolder}/review/enriched-findings.json`)) } catch {}
|
||||||
|
const allFindings = [...enrichedFindings, ...pass_through]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Cross-Correlate
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// 2a: Critical files (file appears in >=2 dimensions)
|
||||||
|
const fileDimMap = {}
|
||||||
|
for (const f of allFindings) {
|
||||||
|
const file = f.location?.file; if (!file) continue
|
||||||
|
if (!fileDimMap[file]) fileDimMap[file] = new Set()
|
||||||
|
fileDimMap[file].add(f.dimension)
|
||||||
|
}
|
||||||
|
const critical_files = Object.entries(fileDimMap)
|
||||||
|
.filter(([, dims]) => dims.size >= 2)
|
||||||
|
.map(([file, dims]) => ({
|
||||||
|
file, dimensions: [...dims],
|
||||||
|
finding_count: allFindings.filter(f => f.location?.file === file).length,
|
||||||
|
severities: [...new Set(allFindings.filter(f => f.location?.file === file).map(f => f.severity))]
|
||||||
|
})).sort((a, b) => b.finding_count - a.finding_count)
|
||||||
|
|
||||||
|
// 2b: Group by shared root cause
|
||||||
|
const rootCauseGroups = [], grouped = new Set()
|
||||||
|
for (const f of allFindings) {
|
||||||
|
if (grouped.has(f.id)) continue
|
||||||
|
const related = (f.root_cause?.related_findings || []).filter(rid => !grouped.has(rid))
|
||||||
|
if (related.length > 0) {
|
||||||
|
const ids = [f.id, ...related]; ids.forEach(id => grouped.add(id))
|
||||||
|
rootCauseGroups.push({ root_cause: f.root_cause?.description || f.title,
|
||||||
|
finding_ids: ids, primary_id: f.id, dimension: f.dimension, severity: f.severity })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2c: Optimization suggestions from root cause groups + standalone enriched
|
||||||
|
const optimization_suggestions = []
|
||||||
|
for (const group of rootCauseGroups) {
|
||||||
|
const p = allFindings.find(f => f.id === group.primary_id)
|
||||||
|
if (p?.optimization?.approach) {
|
||||||
|
optimization_suggestions.push({ title: `Fix root cause: ${group.root_cause}`,
|
||||||
|
approach: p.optimization.approach, alternative: p.optimization.alternative || '',
|
||||||
|
tradeoff: p.optimization.tradeoff || '', affected_findings: group.finding_ids,
|
||||||
|
fix_strategy: p.fix_strategy || 'minimal', fix_complexity: p.fix_complexity || 'medium',
|
||||||
|
estimated_impact: `Resolves ${group.finding_ids.length} findings` })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (const f of enrichedFindings) {
|
||||||
|
if (grouped.has(f.id) || !f.optimization?.approach || f.severity === 'low' || f.severity === 'info') continue
|
||||||
|
optimization_suggestions.push({ title: `${f.id}: ${f.title}`,
|
||||||
|
approach: f.optimization.approach, alternative: f.optimization.alternative || '',
|
||||||
|
tradeoff: f.optimization.tradeoff || '', affected_findings: [f.id],
|
||||||
|
fix_strategy: f.fix_strategy || 'minimal', fix_complexity: f.fix_complexity || 'medium',
|
||||||
|
estimated_impact: 'Resolves 1 finding' })
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2d: Metrics
|
||||||
|
const by_dimension = {}, by_severity = {}, dimension_severity_matrix = {}
|
||||||
|
for (const f of allFindings) {
|
||||||
|
by_dimension[f.dimension] = (by_dimension[f.dimension] || 0) + 1
|
||||||
|
by_severity[f.severity] = (by_severity[f.severity] || 0) + 1
|
||||||
|
if (!dimension_severity_matrix[f.dimension]) dimension_severity_matrix[f.dimension] = {}
|
||||||
|
dimension_severity_matrix[f.dimension][f.severity] = (dimension_severity_matrix[f.dimension][f.severity] || 0) + 1
|
||||||
|
}
|
||||||
|
const fixable = allFindings.filter(f => f.fix_strategy !== 'skip')
|
||||||
|
const autoFixable = fixable.filter(f => f.fix_complexity === 'low' && f.fix_strategy === 'minimal')
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Write review-report.json
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const reviewReport = {
|
||||||
|
review_id: `rev-${Date.now()}`, review_date: new Date().toISOString(),
|
||||||
|
findings: allFindings, critical_files, optimization_suggestions, root_cause_groups: rootCauseGroups,
|
||||||
|
summary: { total: allFindings.length, deep_analyzed: enrichedFindings.length,
|
||||||
|
pass_through: pass_through.length, by_dimension, by_severity, dimension_severity_matrix,
|
||||||
|
fixable_count: fixable.length, auto_fixable_count: autoFixable.length,
|
||||||
|
critical_file_count: critical_files.length, optimization_count: optimization_suggestions.length }
|
||||||
|
}
|
||||||
|
Bash(`mkdir -p "${sessionFolder}/review"`)
|
||||||
|
Write(`${sessionFolder}/review/review-report.json`, JSON.stringify(reviewReport, null, 2))
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Write review-report.md
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const dims = ['security','correctness','performance','maintainability']
|
||||||
|
const sevs = ['critical','high','medium','low','info']
|
||||||
|
const S = reviewReport.summary
|
||||||
|
|
||||||
|
// Dimension x Severity matrix
|
||||||
|
let mx = '| Dimension | Critical | High | Medium | Low | Info | Total |\n|---|---|---|---|---|---|---|\n'
|
||||||
|
for (const d of dims) {
|
||||||
|
mx += `| ${d} | ${sevs.map(s => dimension_severity_matrix[d]?.[s]||0).join(' | ')} | ${by_dimension[d]||0} |\n`
|
||||||
|
}
|
||||||
|
mx += `| **Total** | ${sevs.map(s => by_severity[s]||0).join(' | ')} | **${S.total}** |\n`
|
||||||
|
|
||||||
|
// Critical+High findings table
|
||||||
|
const ch = allFindings.filter(f => f.severity==='critical'||f.severity==='high')
|
||||||
|
.sort((a,b) => (a.severity==='critical'?0:1)-(b.severity==='critical'?0:1))
|
||||||
|
let ft = '| ID | Sev | Dim | File:Line | Title | Fix |\n|---|---|---|---|---|---|\n'
|
||||||
|
if (ch.length) ch.forEach(f => { ft += `| ${f.id} | ${f.severity} | ${f.dimension} | ${f.location?.file}:${f.location?.line} | ${f.title} | ${f.fix_strategy||'-'} |\n` })
|
||||||
|
else ft += '| - | - | - | - | No critical/high findings | - |\n'
|
||||||
|
|
||||||
|
// Optimization suggestions
|
||||||
|
let os = optimization_suggestions.map((o,i) =>
|
||||||
|
`### ${i+1}. ${o.title}\n- **Approach**: ${o.approach}\n${o.tradeoff?`- **Tradeoff**: ${o.tradeoff}\n`:''}- **Strategy**: ${o.fix_strategy} | **Complexity**: ${o.fix_complexity} | ${o.estimated_impact}`
|
||||||
|
).join('\n\n') || '_No optimization suggestions._'
|
||||||
|
|
||||||
|
// Critical files
|
||||||
|
const cf = critical_files.slice(0,10).map(c =>
|
||||||
|
`- **${c.file}** (${c.finding_count} findings, dims: ${c.dimensions.join(', ')})`
|
||||||
|
).join('\n') || '_No critical files._'
|
||||||
|
|
||||||
|
// Fix scope
|
||||||
|
const fs = [
|
||||||
|
by_severity.critical ? `${by_severity.critical} critical (must fix)` : '',
|
||||||
|
by_severity.high ? `${by_severity.high} high (should fix)` : '',
|
||||||
|
autoFixable.length ? `${autoFixable.length} auto-fixable (low effort)` : ''
|
||||||
|
].filter(Boolean).map(s => `- ${s}`).join('\n') || '- No actionable findings.'
|
||||||
|
|
||||||
|
Write(`${sessionFolder}/review/review-report.md`,
|
||||||
|
`# Review Report
|
||||||
|
|
||||||
|
**ID**: ${reviewReport.review_id} | **Date**: ${reviewReport.review_date}
|
||||||
|
**Findings**: ${S.total} | **Fixable**: ${S.fixable_count} | **Auto-fixable**: ${S.auto_fixable_count}
|
||||||
|
|
||||||
|
## Executive Summary
|
||||||
|
- Deep analyzed: ${S.deep_analyzed} | Pass-through: ${S.pass_through}
|
||||||
|
- Critical files: ${S.critical_file_count} | Optimizations: ${S.optimization_count}
|
||||||
|
|
||||||
|
## Metrics Matrix
|
||||||
|
${mx}
|
||||||
|
## Critical & High Findings
|
||||||
|
${ft}
|
||||||
|
## Critical Files
|
||||||
|
${cf}
|
||||||
|
|
||||||
|
## Optimization Suggestions
|
||||||
|
${os}
|
||||||
|
|
||||||
|
## Recommended Fix Scope
|
||||||
|
${fs}
|
||||||
|
|
||||||
|
**Total fixable**: ${S.fixable_count} / ${S.total}
|
||||||
|
`)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Scenario | Resolution |
|
||||||
|
|----------|------------|
|
||||||
|
| Enriched findings missing | Use empty array, report pass_through only |
|
||||||
|
| JSON parse failure | Log warning, use raw findings |
|
||||||
|
| Session folder missing | Create review subdir via mkdir |
|
||||||
|
| Empty allFindings | Write minimal "clean" report |
|
||||||
202
.claude/skills/team-review/roles/reviewer/role.md
Normal file
202
.claude/skills/team-review/roles/reviewer/role.md
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
# Role: reviewer
|
||||||
|
|
||||||
|
Deep analysis on scan findings, enrichment with root cause / impact / optimization, and structured review report generation. Read-only -- never modifies source code.
|
||||||
|
|
||||||
|
## Role Identity
|
||||||
|
|
||||||
|
| Field | Value |
|
||||||
|
|-------|-------|
|
||||||
|
| Name | `reviewer` |
|
||||||
|
| Task Prefix | `REV-*` |
|
||||||
|
| Type | read-only-analysis |
|
||||||
|
| Output Tag | `[reviewer]` |
|
||||||
|
| Communication | coordinator only |
|
||||||
|
|
||||||
|
## Role Boundaries
|
||||||
|
|
||||||
|
**MUST**: Only `REV-*` tasks. All output `[reviewer]`-prefixed. Write only to session review dir. Triage findings before deep analysis. Cap deep analysis at 15.
|
||||||
|
|
||||||
|
**MUST NOT**: Modify source code files. Fix issues. Create tasks for other roles. Contact scanner/fixer directly. Run any write-mode CLI commands.
|
||||||
|
|
||||||
|
## Messages: `review_progress` (milestone), `review_complete` (Phase 5), `error`
|
||||||
|
|
||||||
|
## Message Bus
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
mcp__ccw-tools__team_msg({ operation:"log", team:"team-review", from:"reviewer", to:"coordinator", type:"review_complete", summary:"[reviewer] ..." })
|
||||||
|
// Fallback: Bash(echo JSON >> "${sessionFolder}/message-log.jsonl")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Toolbox
|
||||||
|
|
||||||
|
| Command | File | Phase |
|
||||||
|
|---------|------|-------|
|
||||||
|
| `deep-analyze` | [commands/deep-analyze.md](commands/deep-analyze.md) | 3: CLI Fan-out root cause analysis |
|
||||||
|
| `generate-report` | [commands/generate-report.md](commands/generate-report.md) | 4: Cross-correlate + report generation |
|
||||||
|
|
||||||
|
## Execution (5-Phase)
|
||||||
|
|
||||||
|
### Phase 1: Task Discovery
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const tasks = TaskList()
|
||||||
|
const myTasks = tasks.filter(t =>
|
||||||
|
t.subject.startsWith('REV-') &&
|
||||||
|
t.status !== 'completed' &&
|
||||||
|
(t.blockedBy || []).length === 0
|
||||||
|
)
|
||||||
|
if (myTasks.length === 0) return
|
||||||
|
|
||||||
|
const task = TaskGet({ taskId: myTasks[0].id })
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'in_progress' })
|
||||||
|
|
||||||
|
// Extract from task description
|
||||||
|
const sessionFolder = task.description.match(/session:\s*(.+)/)?.[1]?.trim()
|
||||||
|
const inputPath = task.description.match(/input:\s*(.+)/)?.[1]?.trim()
|
||||||
|
|| `${sessionFolder}/scan/scan-results.json`
|
||||||
|
const dimStr = task.description.match(/dimensions:\s*(.+)/)?.[1]?.trim() || 'sec,cor,perf,maint'
|
||||||
|
const dimensions = dimStr.split(',').map(d => d.trim())
|
||||||
|
|
||||||
|
// Load scan results
|
||||||
|
let scanResults
|
||||||
|
try {
|
||||||
|
scanResults = JSON.parse(Read(inputPath))
|
||||||
|
} catch {
|
||||||
|
mcp__ccw-tools__team_msg({ operation:"log", team:"team-review", from:"reviewer",
|
||||||
|
to:"coordinator", type:"error", summary:`[reviewer] Cannot load scan results: ${inputPath}` })
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'completed' })
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const findings = scanResults.findings || []
|
||||||
|
if (findings.length === 0) {
|
||||||
|
// No findings to review -- complete immediately
|
||||||
|
mcp__ccw-tools__team_msg({ operation:"log", team:"team-review", from:"reviewer",
|
||||||
|
to:"coordinator", type:"review_complete", summary:"[reviewer] 0 findings. Nothing to review." })
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'completed' })
|
||||||
|
return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 2: Triage Findings
|
||||||
|
|
||||||
|
Split findings into deep analysis vs pass-through buckets.
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const DEEP_SEVERITIES = ['critical', 'high', 'medium']
|
||||||
|
const MAX_DEEP = 15
|
||||||
|
|
||||||
|
// Partition: deep_analysis gets Critical + High + Medium (capped at MAX_DEEP)
|
||||||
|
const candidates = findings
|
||||||
|
.filter(f => DEEP_SEVERITIES.includes(f.severity))
|
||||||
|
.sort((a, b) => {
|
||||||
|
const ord = { critical: 0, high: 1, medium: 2 }
|
||||||
|
return (ord[a.severity] ?? 3) - (ord[b.severity] ?? 3)
|
||||||
|
})
|
||||||
|
|
||||||
|
const deep_analysis = candidates.slice(0, MAX_DEEP)
|
||||||
|
const deepIds = new Set(deep_analysis.map(f => f.id))
|
||||||
|
|
||||||
|
// Everything not selected for deep analysis is pass-through
|
||||||
|
const pass_through = findings.filter(f => !deepIds.has(f.id))
|
||||||
|
|
||||||
|
mcp__ccw-tools__team_msg({ operation:"log", team:"team-review", from:"reviewer",
|
||||||
|
to:"coordinator", type:"review_progress",
|
||||||
|
summary:`[reviewer] Triage: ${deep_analysis.length} deep analysis, ${pass_through.length} pass-through` })
|
||||||
|
|
||||||
|
// If nothing qualifies for deep analysis, skip Phase 3
|
||||||
|
if (deep_analysis.length === 0) {
|
||||||
|
goto Phase4 // pass_through only
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 3: Deep Analysis (Delegate)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// CLI Fan-out: up to 2 parallel agents for root cause analysis
|
||||||
|
Read("commands/deep-analyze.md")
|
||||||
|
// Produces: ${sessionFolder}/review/enriched-findings.json
|
||||||
|
```
|
||||||
|
|
||||||
|
Load enriched results:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
let enrichedFindings = []
|
||||||
|
try {
|
||||||
|
enrichedFindings = JSON.parse(Read(`${sessionFolder}/review/enriched-findings.json`))
|
||||||
|
} catch {
|
||||||
|
// Fallback: use original deep_analysis findings without enrichment
|
||||||
|
enrichedFindings = deep_analysis
|
||||||
|
}
|
||||||
|
|
||||||
|
mcp__ccw-tools__team_msg({ operation:"log", team:"team-review", from:"reviewer",
|
||||||
|
to:"coordinator", type:"review_progress",
|
||||||
|
summary:`[reviewer] Deep analysis complete: ${enrichedFindings.length} findings enriched` })
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 4: Generate Report (Delegate)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Cross-correlate enriched + pass_through, write review-report.json + .md
|
||||||
|
Read("commands/generate-report.md")
|
||||||
|
// Produces: ${sessionFolder}/review/review-report.json
|
||||||
|
// ${sessionFolder}/review/review-report.md
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 5: Update Shared Memory & Report
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Load report summary
|
||||||
|
let reportJson
|
||||||
|
try {
|
||||||
|
reportJson = JSON.parse(Read(`${sessionFolder}/review/review-report.json`))
|
||||||
|
} catch {
|
||||||
|
reportJson = { summary: { total: findings.length, fixable_count: 0 } }
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update shared-memory.json
|
||||||
|
let sharedMemory = {}
|
||||||
|
try { sharedMemory = JSON.parse(Read(`${sessionFolder}/shared-memory.json`)) } catch {}
|
||||||
|
sharedMemory.review_results = {
|
||||||
|
file: `${sessionFolder}/review/review-report.json`,
|
||||||
|
total: reportJson.summary?.total || findings.length,
|
||||||
|
by_severity: reportJson.summary?.by_severity || {},
|
||||||
|
by_dimension: reportJson.summary?.by_dimension || {},
|
||||||
|
critical_files: reportJson.critical_files || [],
|
||||||
|
fixable_count: reportJson.summary?.fixable_count || 0,
|
||||||
|
auto_fixable_count: reportJson.summary?.auto_fixable_count || 0
|
||||||
|
}
|
||||||
|
Write(`${sessionFolder}/shared-memory.json`, JSON.stringify(sharedMemory, null, 2))
|
||||||
|
|
||||||
|
// Build top findings summary for message
|
||||||
|
const topFindings = (reportJson.findings || findings)
|
||||||
|
.filter(f => f.severity === 'critical' || f.severity === 'high')
|
||||||
|
.slice(0, 8)
|
||||||
|
.map(f => `- **[${f.id}]** [${f.severity}] ${f.location?.file}:${f.location?.line} - ${f.title}`)
|
||||||
|
.join('\n')
|
||||||
|
|
||||||
|
const sevSum = Object.entries(reportJson.summary?.by_severity || {})
|
||||||
|
.filter(([,v]) => v > 0).map(([k,v]) => `${k}:${v}`).join(' ')
|
||||||
|
|
||||||
|
mcp__ccw-tools__team_msg({ operation:"log", team:"team-review", from:"reviewer",
|
||||||
|
to:"coordinator", type:"review_complete",
|
||||||
|
summary:`[reviewer] Review complete: ${reportJson.summary?.total || findings.length} findings (${sevSum})`,
|
||||||
|
ref:`${sessionFolder}/review/review-report.json` })
|
||||||
|
|
||||||
|
SendMessage({ type:"message", recipient:"coordinator",
|
||||||
|
content:`## [reviewer] Review Report\n**Findings**: ${reportJson.summary?.total} total | Fixable: ${reportJson.summary?.fixable_count}\n### Critical & High\n${topFindings || '(none)'}\n**Critical files**: ${(reportJson.critical_files || []).slice(0,5).join(', ') || '(none)'}\nOutput: ${sessionFolder}/review/review-report.json`,
|
||||||
|
summary:`[reviewer] REV complete: ${reportJson.summary?.total} findings, ${reportJson.summary?.fixable_count} fixable` })
|
||||||
|
|
||||||
|
TaskUpdate({ taskId: task.id, status: 'completed' })
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
| Scenario | Resolution |
|
||||||
|
|----------|------------|
|
||||||
|
| Scan results file missing | Report error, complete task cleanly |
|
||||||
|
| 0 findings in scan | Report clean, complete immediately |
|
||||||
|
| CLI deep analysis fails | Use original findings without enrichment |
|
||||||
|
| Report generation fails | Write minimal report with raw findings |
|
||||||
|
| Session folder missing | Re-create review subdirectory |
|
||||||
|
| JSON parse failures | Log warning, use fallback data |
|
||||||
Reference in New Issue
Block a user