chore: move ccw-skill-hub to standalone repository

Migrated ccw-skill-hub to D:/ccw-skill-hub as independent git project.
Removed nested git repos (ccw/frontend/ccw-skill-hub, skill-hub-repo, skill-hub-temp).
This commit is contained in:
catlog22
2026-02-24 11:57:26 +08:00
parent 6f0bbe84ea
commit 61e313a0c1
35 changed files with 3189 additions and 362 deletions

1
.claude/skills/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.ace-tool/

View File

@@ -113,7 +113,6 @@ mcp__ccw-tools__team_msg({ summary: `[${role}] ...` })
const TEAM_CONFIG = {
name: "planex",
sessionDir: ".workflow/.team/PEX-{slug}-{date}/",
msgDir: ".workflow/.team-msg/planex/",
issueDataDir: ".workflow/issues/"
}
```

View File

@@ -0,0 +1,131 @@
---
name: team-review
description: "Unified team skill for code scanning, vulnerability review, optimization suggestions, and automated fix. 4-role team: coordinator, scanner, reviewer, fixer. Triggers on team-review."
allowed-tools: Task, AskUserQuestion, TaskCreate, TaskUpdate, TaskList, TaskGet, Read, Write, Edit, Bash, Glob, Grep, Skill, mcp__ace-tool__search_context
---
# Team Review — Role Router
Single entry point for code scanning, review, and fix. Parses `$ARGUMENTS`, extracts role, and dispatches to the corresponding `role.md`.
## Architecture
```
┌─────────────────────────────────────────────────────────────┐
│ SKILL.md (Role Router) │
│ Parse $ARGUMENTS → Extract --role → Dispatch to role.md │
│ No --role → Dispatch to coordinator │
└──────────────────────────┬──────────────────────────────────┘
┌───────────┬───────────┼───────────┐
↓ ↓ ↓ ↓
┌────────┐ ┌────────┐ ┌────────┐ ┌────────┐
│ coord │ │scanner │ │reviewer│ │ fixer │
│ (RC-*) │ │(SCAN-*)│ │(REV-*) │ │(FIX-*) │
└────────┘ └────────┘ └────────┘ └────────┘
```
## Pipeline (CP-1 Linear)
```
coordinator dispatch
→ SCAN-* (scanner: toolchain + LLM scan)
→ REV-* (reviewer: deep analysis + report)
→ [user confirm]
→ FIX-* (fixer: plan + execute + verify)
```
## Available Roles
| Role | Prefix | Type | File |
|------|--------|------|------|
| coordinator | RC | orchestration | roles/coordinator/role.md |
| scanner | SCAN | read-only-analysis | roles/scanner/role.md |
| reviewer | REV | read-only-analysis | roles/reviewer/role.md |
| fixer | FIX | code-generation | roles/fixer/role.md |
## Role Router
```javascript
const VALID_ROLES = {
"coordinator": "roles/coordinator/role.md",
"scanner": "roles/scanner/role.md",
"reviewer": "roles/reviewer/role.md",
"fixer": "roles/fixer/role.md"
}
// 1. Auto mode detection
const autoYes = /\b(-y|--yes)\b/.test($ARGUMENTS)
// 2. Extract role
const roleMatch = $ARGUMENTS.match(/--role[=\s]+(\w+)/)
const role = roleMatch ? roleMatch[1] : null
if (role && VALID_ROLES[role]) {
// Explicit role → dispatch directly
Read(VALID_ROLES[role]) Execute with $ARGUMENTS
} else if (!role) {
// No --role → coordinator handles all routing
Read("roles/coordinator/role.md") Execute with $ARGUMENTS
} else {
Error(`Unknown role "${role}". Available: ${Object.keys(VALID_ROLES).join(", ")}`)
}
```
## Usage
```bash
# Via coordinator (auto pipeline)
Skill(skill="team-review", args="src/auth/**") # scan + review
Skill(skill="team-review", args="--full src/auth/**") # scan + review + fix
Skill(skill="team-review", args="--fix .review/review-*.json") # fix only
Skill(skill="team-review", args="-q src/auth/**") # quick scan only
# Direct role invocation
Skill(skill="team-review", args="--role=scanner src/auth/**")
Skill(skill="team-review", args="--role=reviewer --input scan-result.json")
Skill(skill="team-review", args="--role=fixer --input fix-manifest.json")
# Flags (all modes)
--dimensions=sec,cor,perf,maint # custom dimensions (default: all 4)
-y / --yes # skip confirmations
-q / --quick # quick scan mode
--full # full pipeline (scan → review → fix)
--fix # fix mode only
```
## Coordinator Spawn Template
```javascript
// Coordinator spawns worker roles via Skill
Skill(skill="team-review", args="--role=scanner ${target} ${flags}")
Skill(skill="team-review", args="--role=reviewer --input ${scan_output} ${flags}")
Skill(skill="team-review", args="--role=fixer --input ${fix_manifest} ${flags}")
```
## Shared Infrastructure
| Component | Location |
|-----------|----------|
| Session directory | `.workflow/.team-review/{workflow_id}/` |
| Shared memory | `shared-memory.json` in session dir |
| Team config | `specs/team-config.json` |
| Finding schema | `specs/finding-schema.json` |
| Dimensions | `specs/dimensions.md` |
## Error Handling
| Error | Action |
|-------|--------|
| Unknown --role value | Error with available roles list |
| Role file not found | Error with expected file path |
| Invalid flags | Warn and continue with defaults |
| No target specified (no --role) | AskUserQuestion to clarify |
## Execution Rules
1. **Parse first**: Extract --role and flags from $ARGUMENTS before anything else
2. **Progressive loading**: Read ONLY the matched role.md, not all four
3. **Full delegation**: Role.md owns entire execution — do not add logic here
4. **Self-contained**: Each role.md includes its own message bus, task lifecycle, toolbox
5. **DO NOT STOP**: Continuous execution until role completes all 5 phases

View File

@@ -0,0 +1,145 @@
# Command: dispatch
> Task chain creation based on pipeline mode. Creates SCAN/REV/FIX tasks with dependencies.
## When to Use
- Phase 3 of Coordinator
- Pipeline mode detected, need to create task chain
- Session initialized
**Trigger conditions**:
- Coordinator Phase 2 complete
- Mode switch requires chain rebuild
## Strategy
### Delegation Mode
**Mode**: Direct (coordinator operates TaskCreate/TaskUpdate directly)
### Decision Logic
```javascript
// Build pipeline based on mode
function buildPipeline(pipelineMode) {
const pipelines = {
'default': [
{ prefix: 'SCAN', suffix: '001', owner: 'scanner', desc: 'Multi-dimension code scan', blockedBy: [], meta: {} },
{ prefix: 'REV', suffix: '001', owner: 'reviewer', desc: 'Deep finding analysis and review', blockedBy: ['SCAN-001'], meta: {} }
],
'full': [
{ prefix: 'SCAN', suffix: '001', owner: 'scanner', desc: 'Multi-dimension code scan', blockedBy: [], meta: {} },
{ prefix: 'REV', suffix: '001', owner: 'reviewer', desc: 'Deep finding analysis and review', blockedBy: ['SCAN-001'], meta: {} },
{ prefix: 'FIX', suffix: '001', owner: 'fixer', desc: 'Plan and execute fixes', blockedBy: ['REV-001'], meta: {} }
],
'fix-only': [
{ prefix: 'FIX', suffix: '001', owner: 'fixer', desc: 'Execute fixes from manifest', blockedBy: [], meta: {} }
],
'quick': [
{ prefix: 'SCAN', suffix: '001', owner: 'scanner', desc: 'Quick scan (fast mode)', blockedBy: [], meta: { quick: true } }
]
}
return pipelines[pipelineMode] || pipelines['default']
}
```
## Execution Steps
### Step 1: Session Initialization
```javascript
// Session directory already created in Phase 2
// Write pipeline config to shared memory
const sharedMemory = JSON.parse(Read(`${sessionFolder}/shared-memory.json`))
sharedMemory.pipeline_mode = pipelineMode
sharedMemory.pipeline_stages = buildPipeline(pipelineMode).map(s => `${s.prefix}-${s.suffix}`)
Write(`${sessionFolder}/shared-memory.json`, JSON.stringify(sharedMemory, null, 2))
```
### Step 2: Create Task Chain
```javascript
const pipeline = buildPipeline(pipelineMode)
const taskIds = {}
for (const stage of pipeline) {
const taskSubject = `${stage.prefix}-${stage.suffix}: ${stage.desc}`
// Build task description with session context
const fullDesc = [
stage.desc,
`\nsession: ${sessionFolder}`,
`\ntarget: ${target}`,
`\ndimensions: ${dimensions.join(',')}`,
stage.meta?.quick ? `\nquick: true` : '',
`\n\nGoal: ${taskDescription || target}`
].join('')
// Create task
TaskCreate({
subject: taskSubject,
description: fullDesc,
activeForm: `${stage.desc} in progress`
})
// Record task ID
const allTasks = TaskList()
const newTask = allTasks.find(t => t.subject.startsWith(`${stage.prefix}-${stage.suffix}`))
taskIds[`${stage.prefix}-${stage.suffix}`] = newTask.id
// Set owner and dependencies
const blockedByIds = stage.blockedBy
.map(dep => taskIds[dep])
.filter(Boolean)
TaskUpdate({
taskId: newTask.id,
owner: stage.owner,
addBlockedBy: blockedByIds
})
}
```
### Step 3: Verify Chain
```javascript
const allTasks = TaskList()
const chainTasks = pipeline.map(s => taskIds[`${s.prefix}-${s.suffix}`]).filter(Boolean)
const chainValid = chainTasks.length === pipeline.length
if (!chainValid) {
mcp__ccw-tools__team_msg({
operation: "log", team: teamName, from: "coordinator",
to: "user", type: "error",
summary: `[coordinator] Task chain incomplete: ${chainTasks.length}/${pipeline.length}`
})
}
mcp__ccw-tools__team_msg({
operation: "log", team: teamName, from: "coordinator",
to: "all", type: "dispatch_ready",
summary: `[coordinator] Task chain created: ${pipeline.map(s => `${s.prefix}-${s.suffix}`).join(' -> ')} (mode: ${pipelineMode})`
})
```
## Output Format
```
## Task Chain Created
### Mode: [default|full|fix-only|quick]
### Pipeline Stages: [count]
- [prefix]-[suffix]: [description] (owner: [role], blocked by: [deps])
### Verification: PASS/FAIL
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| Task creation fails | Retry once, then report to user |
| Dependency cycle | Flatten dependencies, warn coordinator |
| Invalid pipelineMode | Default to 'default' mode |
| Missing session folder | Re-create, log warning |

View File

@@ -0,0 +1,218 @@
# Command: monitor
> Stop-Wait stage execution. Spawns each worker via Skill(), blocks until return, drives transitions.
## When to Use
- Phase 4 of Coordinator, after dispatch complete
## Strategy
**Mode**: Stop-Wait (synchronous Skill call, not polling)
> **No polling. Synchronous Skill() call IS the wait mechanism.**
>
> - FORBIDDEN: `while` + `sleep` + check status
> - REQUIRED: `Skill()` blocking call = worker return = stage done
### Stage-Worker Map
```javascript
const STAGE_WORKER_MAP = {
'SCAN': { role: 'scanner', skillArgs: '--role=scanner' },
'REV': { role: 'reviewer', skillArgs: '--role=reviewer' },
'FIX': { role: 'fixer', skillArgs: '--role=fixer' }
}
```
## Execution Steps
### Step 1: Context Preparation
```javascript
const sharedMemory = JSON.parse(Read(`${sessionFolder}/shared-memory.json`))
// Get pipeline tasks in creation order (= dependency order)
const allTasks = TaskList()
const pipelineTasks = allTasks
.filter(t => t.owner && t.owner !== 'coordinator')
.sort((a, b) => Number(a.id) - Number(b.id))
// Auto mode detection
const autoYes = /\b(-y|--yes)\b/.test(args)
```
### Step 2: Sequential Stage Execution (Stop-Wait)
> **Core**: Spawn one worker per stage, block until return.
> Worker return = stage complete. No sleep, no polling.
```javascript
for (const stageTask of pipelineTasks) {
// 1. Extract stage prefix -> determine worker role
const stagePrefix = stageTask.subject.match(/^(\w+)-/)?.[1]
const workerConfig = STAGE_WORKER_MAP[stagePrefix]
if (!workerConfig) {
mcp__ccw-tools__team_msg({
operation: "log", team: teamName, from: "coordinator",
to: "user", type: "error",
summary: `[coordinator] Unknown stage prefix: ${stagePrefix}, skipping`
})
continue
}
// 2. Mark task in progress
TaskUpdate({ taskId: stageTask.id, status: 'in_progress' })
mcp__ccw-tools__team_msg({
operation: "log", team: teamName, from: "coordinator",
to: workerConfig.role, type: "stage_transition",
summary: `[coordinator] Starting stage: ${stageTask.subject} -> ${workerConfig.role}`
})
// 3. Build worker arguments
const workerArgs = buildWorkerArgs(stageTask, workerConfig)
// 4. Spawn worker via Skill — blocks until return (Stop-Wait core)
Skill(skill="team-review", args=workerArgs)
// 5. Worker returned — check result
const taskState = TaskGet({ taskId: stageTask.id })
if (taskState.status !== 'completed') {
const action = handleStageFailure(stageTask, taskState, workerConfig, autoYes)
if (action === 'abort') break
if (action === 'skip') continue
} else {
mcp__ccw-tools__team_msg({
operation: "log", team: teamName, from: "coordinator",
to: "user", type: "stage_transition",
summary: `[coordinator] Stage complete: ${stageTask.subject}`
})
}
// 6. Post-stage: After SCAN check findings
if (stagePrefix === 'SCAN') {
const mem = JSON.parse(Read(`${sessionFolder}/shared-memory.json`))
if ((mem.findings_count || 0) === 0) {
mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "coordinator",
to: "user", type: "pipeline_complete",
summary: `[coordinator] 0 findings. Code is clean. Skipping review/fix.` })
for (const r of pipelineTasks.slice(pipelineTasks.indexOf(stageTask) + 1))
TaskUpdate({ taskId: r.id, status: 'deleted' })
break
}
}
// 7. Post-stage: After REV confirm fix scope
if (stagePrefix === 'REV' && pipelineMode === 'full') {
const mem = JSON.parse(Read(`${sessionFolder}/shared-memory.json`))
if (!autoYes) {
const conf = AskUserQuestion({ questions: [{
question: `${mem.findings_count || 0} findings reviewed. Proceed with fix?`,
header: "Fix Confirmation", multiSelect: false,
options: [
{ label: "Fix all", description: "All actionable findings" },
{ label: "Fix critical/high only", description: "Severity filter" },
{ label: "Skip fix", description: "No code changes" }
]
}] })
if (conf["Fix Confirmation"] === "Skip fix") {
pipelineTasks.filter(t => t.subject.startsWith('FIX-'))
.forEach(ft => TaskUpdate({ taskId: ft.id, status: 'deleted' }))
break
}
mem.fix_scope = conf["Fix Confirmation"] === "Fix critical/high only" ? 'critical,high' : 'all'
Write(`${sessionFolder}/shared-memory.json`, JSON.stringify(mem, null, 2))
}
Write(`${sessionFolder}/fix/fix-manifest.json`, JSON.stringify({
source: `${sessionFolder}/review/review-report.json`,
scope: mem.fix_scope || 'all', session: sessionFolder
}, null, 2))
}
}
```
### Step 2.1: Worker Argument Builder
```javascript
function buildWorkerArgs(stageTask, workerConfig) {
const stagePrefix = stageTask.subject.match(/^(\w+)-/)?.[1]
let workerArgs = `${workerConfig.skillArgs} --session ${sessionFolder}`
if (stagePrefix === 'SCAN') {
workerArgs += ` ${target} --dimensions ${dimensions.join(',')}`
if (stageTask.description?.includes('quick: true')) workerArgs += ' -q'
} else if (stagePrefix === 'REV') {
workerArgs += ` --input ${sessionFolder}/scan/scan-results.json --dimensions ${dimensions.join(',')}`
} else if (stagePrefix === 'FIX') {
workerArgs += ` --input ${sessionFolder}/fix/fix-manifest.json`
}
if (autoYes) workerArgs += ' -y'
return workerArgs
}
```
### Step 2.2: Stage Failure Handler
```javascript
function handleStageFailure(stageTask, taskState, workerConfig, autoYes) {
if (autoYes) {
mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "coordinator",
to: "user", type: "error",
summary: `[coordinator] [auto] ${stageTask.subject} incomplete, skipping` })
TaskUpdate({ taskId: stageTask.id, status: 'deleted' })
return 'skip'
}
const decision = AskUserQuestion({ questions: [{
question: `Stage "${stageTask.subject}" incomplete (${taskState.status}). Action?`,
header: "Stage Failure", multiSelect: false,
options: [
{ label: "Retry", description: "Re-spawn worker" },
{ label: "Skip", description: "Continue pipeline" },
{ label: "Abort", description: "Stop pipeline" }
]
}] })
const answer = decision["Stage Failure"]
if (answer === "Retry") {
TaskUpdate({ taskId: stageTask.id, status: 'in_progress' })
Skill(skill="team-review", args=buildWorkerArgs(stageTask, workerConfig))
if (TaskGet({ taskId: stageTask.id }).status !== 'completed')
TaskUpdate({ taskId: stageTask.id, status: 'deleted' })
return 'retried'
} else if (answer === "Skip") {
TaskUpdate({ taskId: stageTask.id, status: 'deleted' })
return 'skip'
} else {
mcp__ccw-tools__team_msg({ operation: "log", team: teamName, from: "coordinator",
to: "user", type: "error",
summary: `[coordinator] User aborted at: ${stageTask.subject}` })
return 'abort'
}
}
```
### Step 3: Finalize
```javascript
const finalMemory = JSON.parse(Read(`${sessionFolder}/shared-memory.json`))
finalMemory.pipeline_status = 'complete'
finalMemory.completed_at = new Date().toISOString()
Write(`${sessionFolder}/shared-memory.json`, JSON.stringify(finalMemory, null, 2))
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| Worker incomplete (interactive) | AskUser: Retry / Skip / Abort |
| Worker incomplete (auto) | Auto-skip, log warning |
| 0 findings after scan | Skip remaining stages |
| User declines fix | Delete FIX tasks, report review-only |

View File

@@ -0,0 +1,218 @@
# Role: coordinator
Code review team coordinator. Orchestrates the scan-review-fix pipeline (CP-1 Linear): parse target, detect mode, dispatch task chain, drive sequential stage execution via Stop-Wait, aggregate results.
## Role Identity
- **Name**: `coordinator`
- **Task Prefix**: RC (coordinator creates tasks, doesn't receive them)
- **Responsibility**: Orchestration
- **Communication**: SendMessage to all teammates
- **Output Tag**: `[coordinator]`
## Role Boundaries
### MUST
- All output (SendMessage, team_msg, logs) prefixed with `[coordinator]`
- Only: target parsing, mode detection, task creation/dispatch, stage monitoring, result aggregation
- Create tasks via TaskCreate and assign to worker roles
- Drive pipeline stages via Stop-Wait (synchronous Skill() calls)
### MUST NOT
- Run analysis tools directly (semgrep, eslint, tsc, etc.)
- Modify source code files
- Perform code review analysis
- Bypass worker roles to do delegated work
- Omit `[coordinator]` prefix on any output
> **Core principle**: coordinator is the orchestrator, not the executor. All actual work delegated to scanner/reviewer/fixer via task chain.
## Message Types
| Type | Direction | Trigger | Description |
|------|-----------|---------|-------------|
| `dispatch_ready` | coordinator -> all | Phase 3 done | Task chain created, pipeline ready |
| `stage_transition` | coordinator -> worker | Stage unblocked | Next stage starting |
| `pipeline_complete` | coordinator -> user | All stages done | Pipeline finished, summary ready |
| `error` | coordinator -> user | Stage failure | Blocking issue requiring attention |
## Message Bus
Before every SendMessage, call `mcp__ccw-tools__team_msg` to log:
```javascript
mcp__ccw-tools__team_msg({
operation: "log", team: "team-review", from: "coordinator",
to: "user", type: "dispatch_ready",
summary: "[coordinator] Task chain created, pipeline ready"
})
```
**CLI Fallback**: If unavailable, `Bash(echo JSON >> "${sessionFolder}/message-log.jsonl")`
## Toolbox
| Command | File | Phase | Description |
|---------|------|-------|-------------|
| `dispatch` | [commands/dispatch.md](commands/dispatch.md) | Phase 3 | Task chain creation based on mode |
| `monitor` | [commands/monitor.md](commands/monitor.md) | Phase 4 | Stop-Wait stage execution loop |
## Execution (5-Phase)
### Phase 1: Parse Arguments & Detect Mode
```javascript
const args = "$ARGUMENTS"
// Extract task description (strip all flags)
const taskDescription = args
.replace(/--\w+[=\s]+\S+/g, '').replace(/\b(-y|--yes|-q|--quick|--full|--fix)\b/g, '').trim()
// Mode detection
function detectMode(args) {
if (/\b--fix\b/.test(args)) return 'fix-only'
if (/\b--full\b/.test(args)) return 'full'
if (/\b(-q|--quick)\b/.test(args)) return 'quick'
return 'default' // scan + review
}
const pipelineMode = detectMode(args)
// Auto mode (skip confirmations)
const autoYes = /\b(-y|--yes)\b/.test(args)
// Dimension filter (default: all 4)
const dimMatch = args.match(/--dimensions[=\s]+([\w,]+)/)
const dimensions = dimMatch ? dimMatch[1].split(',') : ['sec', 'cor', 'perf', 'maint']
// Target extraction (file patterns or git changes)
const target = taskDescription || '.'
// Check for existing RC-* tasks (when invoked by another coordinator)
const existingTasks = TaskList()
if (!autoYes && !taskDescription) {
AskUserQuestion({
questions: [{
question: "What code should be reviewed?",
header: "Review Target",
multiSelect: false,
options: [
{ label: "Custom", description: "Enter file patterns or paths" },
{ label: "Uncommitted changes", description: "Review git diff" },
{ label: "Full project scan", description: "Scan entire project" }
]
}]
})
}
```
### Phase 2: Initialize Session
```javascript
const teamName = "team-review"
const sessionSlug = target.slice(0, 30).replace(/[^a-zA-Z0-9]/g, '-').replace(/-+/g, '-')
const sessionDate = new Date().toISOString().slice(0, 10)
const workflowId = `RC-${sessionSlug}-${sessionDate}`
const sessionFolder = `.workflow/.team-review/${workflowId}`
Bash(`mkdir -p "${sessionFolder}/scan" "${sessionFolder}/review" "${sessionFolder}/fix"`)
// Initialize shared memory
Write(`${sessionFolder}/shared-memory.json`, JSON.stringify({
workflow_id: workflowId, mode: pipelineMode, target, dimensions, auto: autoYes,
scan_results: null, review_results: null, fix_results: null,
findings_count: 0, fixed_count: 0
}, null, 2))
// Workers spawned per-stage in Phase 4 via Stop-Wait Skill()
goto Phase3
```
### Phase 3: Create Task Chain
```javascript
Output("[coordinator] Phase 3: Task Dispatching")
Read("commands/dispatch.md") // Full task chain creation logic
goto Phase4
```
**Default** (scan+review): `SCAN-001 -> REV-001`
**Full** (scan+review+fix): `SCAN-001 -> REV-001 -> FIX-001`
**Fix-Only**: `FIX-001`
**Quick**: `SCAN-001 (quick=true)`
### Phase 4: Sequential Stage Execution (Stop-Wait)
```javascript
// Read commands/monitor.md for full implementation
Read("commands/monitor.md")
```
> **Strategy**: Spawn workers sequentially via Skill(), synchronous blocking until return. Worker return = stage complete. No polling.
>
> - FORBIDDEN: `while` loop + `sleep` + check status
> - REQUIRED: Synchronous `Skill()` call = natural callback
**Stage Flow**:
| Stage | Worker | On Complete |
|-------|--------|-------------|
| SCAN-001 | scanner | Check findings count -> start REV |
| REV-001 | reviewer | Generate review report -> [user confirm] -> start FIX |
| FIX-001 | fixer | Execute fixes -> verify |
### Phase 5: Aggregate Results & Report
```javascript
const memory = JSON.parse(Read(`${sessionFolder}/shared-memory.json`))
const fixRate = memory.findings_count > 0
? Math.round((memory.fixed_count / memory.findings_count) * 100) : 0
const report = {
mode: pipelineMode, target, dimensions,
findings_total: memory.findings_count || 0,
by_severity: memory.review_results?.by_severity || {},
by_dimension: memory.review_results?.by_dimension || {},
fixed_count: memory.fixed_count || 0,
fix_rate: fixRate
}
mcp__ccw-tools__team_msg({
operation: "log", team: teamName, from: "coordinator",
to: "user", type: "pipeline_complete",
summary: `[coordinator] Complete: ${report.findings_total} findings, ${report.fixed_count} fixed (${fixRate}%)`
})
SendMessage({
content: `## [coordinator] Review Report\n\n${JSON.stringify(report, null, 2)}`,
summary: `[coordinator] ${report.findings_total} findings, ${report.fixed_count} fixed`
})
if (!autoYes) {
AskUserQuestion({
questions: [{
question: "Pipeline complete. Next:",
header: "Next",
multiSelect: false,
options: [
{ label: "New target", description: "Review different files" },
{ label: "Deep review", description: "Re-review stricter" },
{ label: "Done", description: "Close session" }
]
}]
})
}
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| Scanner finds 0 findings | Report clean, skip review + fix stages |
| Worker returns incomplete | Ask user: retry / skip / abort |
| Fix verification fails | Log warning, report partial results |
| Session folder missing | Re-create and log warning |
| Target path invalid | AskUserQuestion for corrected path |

View File

@@ -0,0 +1,186 @@
# Command: semantic-scan
> LLM-based semantic analysis via CLI. Supplements toolchain findings with issues that static tools cannot detect: business logic flaws, architectural problems, complex security patterns.
## When to Use
- Phase 3 of Scanner, Standard mode, Step B
- Runs AFTER toolchain-scan completes (needs its output to avoid duplication)
- Quick mode does NOT use this command
**Trigger conditions**:
- SCAN-* task in Phase 3 with `quickMode === false`
- toolchain-scan.md has completed (toolchain-findings.json exists or empty)
## Strategy
### Delegation Mode
**Mode**: CLI Fan-out (single gemini agent, analysis only)
### Tool Fallback Chain
```
gemini (primary) -> qwen (fallback) -> codex (fallback)
```
## Execution Steps
### Step 1: Prepare Context
Build the CLI prompt with target files and a summary of toolchain findings to avoid duplication.
```javascript
// Read toolchain findings for dedup context
let toolFindings = []
try {
toolFindings = JSON.parse(Read(`${sessionFolder}/scan/toolchain-findings.json`))
} catch { /* no toolchain findings */ }
// Build toolchain summary for dedup (compact: file:line:rule per line)
const toolSummary = toolFindings.length > 0
? toolFindings.slice(0, 50).map(f =>
`${f.location?.file}:${f.location?.line} [${f.source}] ${f.title}`
).join('\n')
: '(no toolchain findings)'
// Build target file list for CLI context
// Limit to reasonable size for CLI prompt
const fileList = targetFiles.slice(0, 100)
const targetPattern = fileList.length <= 20
? fileList.join(' ')
: `${target}/**/*.{ts,tsx,js,jsx,py,go,java,rs}`
// Map requested dimensions to scan focus areas
const DIM_FOCUS = {
sec: 'Security: business logic vulnerabilities, privilege escalation, sensitive data flow, auth bypass, injection beyond simple patterns',
cor: 'Correctness: logic errors, unhandled exception paths, state management bugs, race conditions, incorrect algorithm implementation',
perf: 'Performance: algorithm complexity (O(n^2)+), N+1 queries, unnecessary sync operations, memory leaks, missing caching opportunities',
maint: 'Maintainability: architectural coupling, abstraction leaks, project convention violations, dead code paths, excessive complexity'
}
const focusAreas = dimensions
.map(d => DIM_FOCUS[d])
.filter(Boolean)
.map((desc, i) => `${i + 1}. ${desc}`)
.join('\n')
```
### Step 2: Execute CLI Scan
```javascript
const maxPerDimension = 5
const minSeverity = 'medium'
const cliPrompt = `PURPOSE: Supplement toolchain scan with semantic analysis that static tools cannot detect. Find logic errors, architectural issues, and complex vulnerability patterns.
TASK:
${focusAreas}
MODE: analysis
CONTEXT: @${targetPattern}
Toolchain already detected these issues (DO NOT repeat them):
${toolSummary}
EXPECTED: Respond with ONLY a JSON array (no markdown, no explanation). Each element:
{"dimension":"security|correctness|performance|maintainability","category":"<sub-category>","severity":"critical|high|medium","title":"<concise title>","description":"<detailed explanation>","location":{"file":"<path>","line":<number>,"end_line":<number>,"code_snippet":"<relevant code>"},"source":"llm","suggested_fix":"<how to fix>","effort":"low|medium|high","confidence":"high|medium|low"}
CONSTRAINTS: Max ${maxPerDimension} findings per dimension | Only ${minSeverity} severity and above | Do not duplicate toolchain findings | Focus on issues tools CANNOT detect | Return raw JSON array only`
let cliOutput = null
let cliTool = 'gemini'
// Try primary tool
try {
cliOutput = Bash(
`ccw cli -p "${cliPrompt.replace(/"/g, '\\"')}" --tool gemini --mode analysis --rule analysis-review-code-quality`,
{ timeout: 300000 }
)
} catch {
// Fallback to qwen
try {
cliTool = 'qwen'
cliOutput = Bash(
`ccw cli -p "${cliPrompt.replace(/"/g, '\\"')}" --tool qwen --mode analysis`,
{ timeout: 300000 }
)
} catch {
// Fallback to codex
try {
cliTool = 'codex'
cliOutput = Bash(
`ccw cli -p "${cliPrompt.replace(/"/g, '\\"')}" --tool codex --mode analysis`,
{ timeout: 300000 }
)
} catch {
// All CLI tools failed
cliOutput = null
}
}
}
```
### Step 3: Parse & Validate Output
```javascript
let semanticFindings = []
if (cliOutput) {
try {
// Extract JSON array from CLI output (may have surrounding text)
const jsonMatch = cliOutput.match(/\[[\s\S]*\]/)
if (jsonMatch) {
const parsed = JSON.parse(jsonMatch[0])
// Validate each finding against schema
semanticFindings = parsed.filter(f => {
// Required fields check
if (!f.dimension || !f.title || !f.location?.file) return false
// Dimension must be valid
if (!['security', 'correctness', 'performance', 'maintainability'].includes(f.dimension)) return false
// Severity must be valid and meet minimum
const validSev = ['critical', 'high', 'medium']
if (!validSev.includes(f.severity)) return false
return true
}).map(f => ({
dimension: f.dimension,
category: f.category || 'general',
severity: f.severity,
title: f.title,
description: f.description || f.title,
location: {
file: f.location.file,
line: f.location.line || 1,
end_line: f.location.end_line || f.location.line || 1,
code_snippet: f.location.code_snippet || ''
},
source: 'llm',
tool_rule: null,
suggested_fix: f.suggested_fix || '',
effort: ['low', 'medium', 'high'].includes(f.effort) ? f.effort : 'medium',
confidence: ['high', 'medium', 'low'].includes(f.confidence) ? f.confidence : 'medium'
}))
}
} catch {
// JSON parse failed - log and continue with empty
}
}
// Enforce per-dimension limits
const dimCounts = {}
semanticFindings = semanticFindings.filter(f => {
dimCounts[f.dimension] = (dimCounts[f.dimension] || 0) + 1
return dimCounts[f.dimension] <= maxPerDimension
})
// Write output
Write(`${sessionFolder}/scan/semantic-findings.json`,
JSON.stringify(semanticFindings, null, 2))
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| gemini CLI fails | Fallback to qwen, then codex |
| All CLI tools fail | Log warning, write empty findings array (toolchain results still valid) |
| CLI output not valid JSON | Attempt regex extraction, else empty findings |
| Findings exceed per-dimension limit | Truncate to max per dimension |
| Invalid dimension/severity in output | Filter out invalid entries |
| CLI timeout (>5 min) | Kill, log warning, return empty findings |

View File

@@ -0,0 +1,187 @@
# Command: toolchain-scan
> Parallel static analysis tool execution. Detects available tools, runs concurrently, normalizes output into standardized findings.
## When to Use
- Phase 3 of Scanner, Standard mode, Step A
- At least one tool detected in Phase 2
- Quick mode does NOT use this command
## Strategy
### Delegation Mode
**Mode**: Direct (Bash parallel execution)
## Execution Steps
### Step 1: Build Tool Commands
```javascript
if (!Object.values(toolchain).some(Boolean)) {
Write(`${sessionFolder}/scan/toolchain-findings.json`, '[]')
return
}
const tmpDir = `${sessionFolder}/scan/tmp`
Bash(`mkdir -p "${tmpDir}"`)
const cmds = []
if (toolchain.tsc)
cmds.push(`(cd "${projectRoot}" && npx tsc --noEmit --pretty false 2>&1 | head -500 > "${tmpDir}/tsc.txt") &`)
if (toolchain.eslint)
cmds.push(`(cd "${projectRoot}" && npx eslint "${target}" --format json --no-error-on-unmatched-pattern 2>/dev/null | head -5000 > "${tmpDir}/eslint.json") &`)
if (toolchain.semgrep)
cmds.push(`(cd "${projectRoot}" && semgrep --config auto --json "${target}" 2>/dev/null | head -5000 > "${tmpDir}/semgrep.json") &`)
if (toolchain.ruff)
cmds.push(`(cd "${projectRoot}" && ruff check "${target}" --output-format json 2>/dev/null | head -5000 > "${tmpDir}/ruff.json") &`)
if (toolchain.mypy)
cmds.push(`(cd "${projectRoot}" && mypy "${target}" --output json 2>/dev/null | head -2000 > "${tmpDir}/mypy.txt") &`)
if (toolchain.npmAudit)
cmds.push(`(cd "${projectRoot}" && npm audit --json 2>/dev/null | head -5000 > "${tmpDir}/audit.json") &`)
```
### Step 2: Parallel Execution
```javascript
Bash(cmds.join('\n') + '\nwait', { timeout: 300000 })
```
### Step 3: Parse Tool Outputs
Each parser normalizes to: `{ dimension, category, severity, title, description, location:{file,line,end_line,code_snippet}, source, tool_rule, suggested_fix, effort, confidence }`
```javascript
const findings = []
// --- tsc: file(line,col): error TSxxxx: message ---
if (toolchain.tsc) {
try {
const out = Read(`${tmpDir}/tsc.txt`)
const re = /^(.+)\((\d+),\d+\):\s+(error|warning)\s+(TS\d+):\s+(.+)$/gm
let m; while ((m = re.exec(out)) !== null) {
findings.push({
dimension: 'correctness', category: 'type-safety',
severity: m[3] === 'error' ? 'high' : 'medium',
title: `tsc ${m[4]}: ${m[5].slice(0,80)}`, description: m[5],
location: { file: m[1], line: +m[2] },
source: 'tool:tsc', tool_rule: m[4], suggested_fix: '',
effort: 'low', confidence: 'high'
})
}
} catch {}
}
// --- eslint: JSON array of {filePath, messages[{severity,ruleId,message,line}]} ---
if (toolchain.eslint) {
try {
const data = JSON.parse(Read(`${tmpDir}/eslint.json`))
for (const f of data) for (const msg of (f.messages || [])) {
const isErr = msg.severity === 2
findings.push({
dimension: isErr ? 'correctness' : 'maintainability',
category: isErr ? 'bug' : 'code-smell',
severity: isErr ? 'high' : 'medium',
title: `eslint ${msg.ruleId || '?'}: ${(msg.message||'').slice(0,80)}`,
description: msg.message || '',
location: { file: f.filePath, line: msg.line || 1, end_line: msg.endLine, code_snippet: msg.source || '' },
source: 'tool:eslint', tool_rule: msg.ruleId || null,
suggested_fix: msg.fix ? 'Auto-fixable' : '', effort: msg.fix ? 'low' : 'medium', confidence: 'high'
})
}
} catch {}
}
// --- semgrep: {results[{path,start:{line},end:{line},check_id,extra:{severity,message,fix,lines}}]} ---
if (toolchain.semgrep) {
try {
const data = JSON.parse(Read(`${tmpDir}/semgrep.json`))
const smap = { ERROR:'high', WARNING:'medium', INFO:'low' }
for (const r of (data.results || [])) {
findings.push({
dimension: 'security', category: r.check_id?.split('.').pop() || 'generic',
severity: smap[r.extra?.severity] || 'medium',
title: `semgrep: ${(r.extra?.message || r.check_id || '').slice(0,80)}`,
description: r.extra?.message || '', location: { file: r.path, line: r.start?.line || 1, end_line: r.end?.line, code_snippet: r.extra?.lines || '' },
source: 'tool:semgrep', tool_rule: r.check_id || null,
suggested_fix: r.extra?.fix || '', effort: 'medium', confidence: smap[r.extra?.severity] === 'high' ? 'high' : 'medium'
})
}
} catch {}
}
// --- ruff: [{code,message,filename,location:{row},end_location:{row},fix}] ---
if (toolchain.ruff) {
try {
const data = JSON.parse(Read(`${tmpDir}/ruff.json`))
for (const item of data) {
const code = item.code || ''
const dim = code.startsWith('S') ? 'security' : (code.startsWith('F') || code.startsWith('B')) ? 'correctness' : 'maintainability'
findings.push({
dimension: dim, category: dim === 'security' ? 'input-validation' : dim === 'correctness' ? 'bug' : 'code-smell',
severity: (code.startsWith('S') || code.startsWith('F')) ? 'high' : 'medium',
title: `ruff ${code}: ${(item.message||'').slice(0,80)}`, description: item.message || '',
location: { file: item.filename, line: item.location?.row || 1, end_line: item.end_location?.row },
source: 'tool:ruff', tool_rule: code, suggested_fix: item.fix?.message || '',
effort: item.fix ? 'low' : 'medium', confidence: 'high'
})
}
} catch {}
}
// --- npm audit: {vulnerabilities:{name:{severity,title,fixAvailable,via}}} ---
if (toolchain.npmAudit) {
try {
const data = JSON.parse(Read(`${tmpDir}/audit.json`))
const smap = { critical:'critical', high:'high', moderate:'medium', low:'low', info:'info' }
for (const [,v] of Object.entries(data.vulnerabilities || {})) {
findings.push({
dimension: 'security', category: 'dependency', severity: smap[v.severity] || 'medium',
title: `npm audit: ${v.name} - ${(v.title || '').slice(0,80)}`,
description: v.title || `Vulnerable: ${v.name}`,
location: { file: 'package.json', line: 1 },
source: 'tool:npm-audit', tool_rule: null,
suggested_fix: v.fixAvailable ? 'npm audit fix' : 'Manual resolution',
effort: v.fixAvailable ? 'low' : 'high', confidence: 'high'
})
}
} catch {}
}
// --- mypy: file:line: error: message [code] ---
if (toolchain.mypy) {
try {
const out = Read(`${tmpDir}/mypy.txt`)
const re = /^(.+):(\d+):\s+(error|warning):\s+(.+?)(?:\s+\[(\w[\w-]*)\])?$/gm
let m; while ((m = re.exec(out)) !== null) {
if (m[3] === 'note') continue
findings.push({
dimension: 'correctness', category: 'type-safety',
severity: m[3] === 'error' ? 'high' : 'medium',
title: `mypy${m[5] ? ` [${m[5]}]` : ''}: ${m[4].slice(0,80)}`, description: m[4],
location: { file: m[1], line: +m[2] },
source: 'tool:mypy', tool_rule: m[5] || null, suggested_fix: '',
effort: 'low', confidence: 'high'
})
}
} catch {}
}
```
### Step 4: Write Output
```javascript
Write(`${sessionFolder}/scan/toolchain-findings.json`, JSON.stringify(findings, null, 2))
Bash(`rm -rf "${tmpDir}"`)
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| Tool not found at runtime | Skip gracefully, continue with others |
| Tool times out (>5 min) | Killed by `wait` timeout, partial output used |
| Tool output unparseable | try/catch skips that tool's findings |
| All tools fail | Empty array written, semantic-scan covers all dimensions |

View File

@@ -0,0 +1,160 @@
# Role: scanner
Toolchain + LLM semantic scan producing structured findings. Static analysis tools in parallel, then LLM for issues tools miss.
## Role Identity
| Field | Value |
|-------|-------|
| Name | `scanner` |
| Task Prefix | `SCAN-*` |
| Type | read-only-analysis |
| Output Tag | `[scanner]` |
| Communication | coordinator only |
## Role Boundaries
**MUST**: Only `SCAN-*` tasks. All output `[scanner]`-prefixed. Write only to session scan dir. IDs: SEC-001, COR-001, PRF-001, MNT-001.
**MUST NOT**: Modify source files. Fix issues. Create tasks for other roles. Contact reviewer/fixer directly.
## Messages: `scan_progress` (milestone), `scan_complete` (Phase 5), `error`
## Message Bus
```javascript
mcp__ccw-tools__team_msg({ operation:"log", team:"team-review", from:"scanner", to:"coordinator", type:"scan_complete", summary:"[scanner] ..." })
// Fallback: Bash(echo JSON >> "${sessionFolder}/message-log.jsonl")
```
## Toolbox
| Command | File | Phase |
|---------|------|-------|
| `toolchain-scan` | [commands/toolchain-scan.md](commands/toolchain-scan.md) | 3A: Parallel static analysis |
| `semantic-scan` | [commands/semantic-scan.md](commands/semantic-scan.md) | 3B: LLM analysis via CLI (gemini/qwen/codex fallback) |
## Execution (5-Phase)
### Phase 1: Task Discovery
```javascript
const tasks = TaskList()
const myTasks = tasks.filter(t =>
t.subject.startsWith('SCAN-') &&
t.status !== 'completed' &&
(t.blockedBy || []).length === 0
)
if (myTasks.length === 0) return
const task = TaskGet({ taskId: myTasks[0].id })
TaskUpdate({ taskId: task.id, status: 'in_progress' })
// Extract from task description
const target = task.description.match(/target:\s*(.+)/)?.[1]?.trim() || '.'
const dimStr = task.description.match(/dimensions:\s*(.+)/)?.[1]?.trim() || 'sec,cor,perf,maint'
const dimensions = dimStr.split(',').map(d => d.trim())
const quickMode = /quick:\s*true/.test(task.description)
const sessionFolder = task.description.match(/session:\s*(.+)/)?.[1]?.trim()
```
### Phase 2: Context Resolution
```javascript
const targetFiles = Glob(target.includes('*') ? target : `${target}/**/*`)
.filter(f => /\.(ts|tsx|js|jsx|py|go|java|rs)$/.test(f))
if (targetFiles.length === 0) { /* report error, complete task */ return }
// Detect toolchain: check config files + tool availability
const projectRoot = Bash('git rev-parse --show-toplevel 2>/dev/null || pwd').trim()
const chk = (c) => Bash(c).trim() === 'y'
const toolchain = {
tsc: chk(`test -f "${projectRoot}/tsconfig.json" && echo y || echo n`),
eslint: chk(`(ls "${projectRoot}"/.eslintrc* "${projectRoot}"/eslint.config.* 2>/dev/null | head -1 >/dev/null && echo y) || (grep -q eslint "${projectRoot}/package.json" 2>/dev/null && echo y) || echo n`),
semgrep: chk(`test -f "${projectRoot}/.semgrep.yml" && echo y || echo n`),
ruff: chk(`test -f "${projectRoot}/pyproject.toml" && command -v ruff >/dev/null 2>&1 && echo y || echo n`),
mypy: chk(`command -v mypy >/dev/null 2>&1 && test -f "${projectRoot}/pyproject.toml" && echo y || echo n`),
npmAudit: chk(`test -f "${projectRoot}/package-lock.json" && echo y || echo n`)
}
```
### Phase 3: Scan Execution
```javascript
let toolchainFindings = [], semanticFindings = []
if (quickMode) {
// Quick Mode: Single inline CLI, max 20 findings
const qr = Bash(`ccw cli -p "Quick scan ${target}. Dims: ${dimensions.join(',')}. Return JSON array max 20 critical/high findings. Schema: {dimension,category,severity,title,description,location:{file,line},source:'llm',suggested_fix,effort,confidence}" --tool gemini --mode analysis --rule analysis-review-code-quality`, { timeout: 300000 })
try { const m = qr.match(/\[[\s\S]*\]/); if (m) semanticFindings = JSON.parse(m[0]) } catch {}
} else {
// Standard Mode: Sequential A -> B
Read("commands/toolchain-scan.md") // writes toolchain-findings.json
try { toolchainFindings = JSON.parse(Read(`${sessionFolder}/scan/toolchain-findings.json`)) } catch {}
Read("commands/semantic-scan.md") // writes semantic-findings.json (uses toolchain output for dedup)
try { semanticFindings = JSON.parse(Read(`${sessionFolder}/scan/semantic-findings.json`)) } catch {}
}
```
### Phase 4: Aggregate & Deduplicate
```javascript
// Dedup: same file + line + dimension = duplicate
const seen = new Set()
const unique = [...toolchainFindings, ...semanticFindings].filter(f => {
const key = `${f.location?.file}:${f.location?.line}:${f.dimension}`
return !seen.has(key) && seen.add(key)
})
// Assign dimension-prefixed IDs (SEC-001, COR-001, PRF-001, MNT-001)
const DIM_PREFIX = { security:'SEC', correctness:'COR', performance:'PRF', maintainability:'MNT' }
const dimCounters = { SEC:0, COR:0, PRF:0, MNT:0 }
const findings = unique.map(f => {
const pfx = DIM_PREFIX[f.dimension] || 'MNT'; dimCounters[pfx]++
return { ...f, id: `${pfx}-${String(dimCounters[pfx]).padStart(3,'0')}`,
severity: f.severity||'medium', confidence: f.confidence||'medium', effort: f.effort||'medium', source: f.source||'llm',
root_cause:null, impact:null, optimization:null, fix_strategy:null, fix_complexity:null, fix_dependencies:[] }
})
// Write scan-results.json (schema: scan_date, target, total_findings, by_severity, by_dimension, findings[])
const scanResult = { scan_date: new Date().toISOString(), target, dimensions, quick_mode: quickMode,
total_findings: findings.length,
by_severity: findings.reduce((a,f) => ({...a,[f.severity]:(a[f.severity]||0)+1}), {}),
by_dimension: Object.fromEntries(Object.entries(DIM_PREFIX).map(([k,v]) => [k, dimCounters[v]])),
findings }
Write(`${sessionFolder}/scan/scan-results.json`, JSON.stringify(scanResult, null, 2))
```
### Phase 5: Update Shared Memory & Report
```javascript
let sharedMemory = {}
try { sharedMemory = JSON.parse(Read(`${sessionFolder}/shared-memory.json`)) } catch {}
sharedMemory.scan_results = { file: `${sessionFolder}/scan/scan-results.json`, total: findings.length, by_severity: scanResult.by_severity, by_dimension: scanResult.by_dimension }
sharedMemory.findings_count = findings.length
Write(`${sessionFolder}/shared-memory.json`, JSON.stringify(sharedMemory, null, 2))
const dimSum = Object.entries(dimCounters).filter(([,v]) => v > 0).map(([k,v]) => `${k}:${v}`).join(' ')
const top = findings.filter(f => f.severity==='critical'||f.severity==='high').slice(0,10)
.map(f => `- **[${f.id}]** [${f.severity}] ${f.location.file}:${f.location.line} - ${f.title}`).join('\n')
mcp__ccw-tools__team_msg({ operation:"log", team:"team-review", from:"scanner", to:"coordinator", type:"scan_complete",
summary:`[scanner] Scan complete: ${findings.length} findings (${dimSum})`, ref:`${sessionFolder}/scan/scan-results.json` })
SendMessage({ type:"message", recipient:"coordinator",
content:`## [scanner] Scan Results\n**Target**: ${target} | **Mode**: ${quickMode?'quick':'standard'}\n### ${findings.length} findings (${dimSum})\n${top||'(clean)'}\nOutput: ${sessionFolder}/scan/scan-results.json`,
summary:`[scanner] SCAN complete: ${findings.length} findings` })
TaskUpdate({ taskId: task.id, status: 'completed' })
```
## Error Handling
| Scenario | Resolution |
|----------|------------|
| No source files match target | Report empty, complete task cleanly |
| All toolchain tools unavailable | Skip toolchain, run semantic-only |
| CLI semantic scan fails | Log warning, use toolchain results only |
| Quick mode CLI timeout | Return partial or empty findings |
| Toolchain tool crashes | Skip that tool, continue with others |
| Session folder missing | Re-create scan subdirectory |

View File

@@ -0,0 +1,82 @@
# Review Dimensions (4-Dimension System)
## Security (SEC)
Vulnerabilities, attack surfaces, and data protection issues.
**Categories**: injection, authentication, authorization, data-exposure, encryption, input-validation, access-control
**Tool Support**: Semgrep (`--config auto`), npm audit, tsc strict mode
**LLM Focus**: Business logic vulnerabilities, privilege escalation paths, sensitive data flows
**Severity Mapping**:
- Critical: RCE, SQL injection, auth bypass, data breach
- High: XSS, CSRF, insecure deserialization, weak crypto
- Medium: Missing input validation, overly permissive CORS
- Low: Informational headers, minor config issues
---
## Correctness (COR)
Bugs, logic errors, and type safety issues.
**Categories**: bug, error-handling, edge-case, type-safety, race-condition, null-reference
**Tool Support**: tsc `--noEmit`, ESLint error-level rules
**LLM Focus**: Logic errors, unhandled exception paths, state management bugs, race conditions
**Severity Mapping**:
- Critical: Data corruption, crash in production path
- High: Incorrect business logic, unhandled error in common path
- Medium: Edge case not handled, missing null check
- Low: Minor type inconsistency, unused variable
---
## Performance (PRF)
Inefficiencies, resource waste, and scalability issues.
**Categories**: n-plus-one, memory-leak, blocking-operation, complexity, resource-usage, caching
**Tool Support**: None (LLM-only dimension)
**LLM Focus**: Algorithm complexity, N+1 queries, unnecessary sync operations, memory leaks, missing caching
**Severity Mapping**:
- Critical: Memory leak in long-running process, O(n³) on user data
- High: N+1 query in hot path, blocking I/O in async context
- Medium: Suboptimal algorithm, missing obvious cache
- Low: Minor inefficiency, premature optimization opportunity
---
## Maintainability (MNT)
Code quality, readability, and structural health.
**Categories**: code-smell, naming, complexity, duplication, dead-code, pattern-violation, coupling
**Tool Support**: ESLint warning-level rules, complexity metrics
**LLM Focus**: Architectural coupling, abstraction leaks, project convention violations
**Severity Mapping**:
- High: God class, circular dependency, copy-paste across modules
- Medium: Long method, magic numbers, unclear naming
- Low: Minor style inconsistency, commented-out code
- Info: Pattern observation, refactoring suggestion
---
## Why 4 Dimensions (Not 7)
The original review-cycle used 7 dimensions with significant overlap:
| Original | Problem | Merged Into |
|----------|---------|-------------|
| Quality | Overlaps Maintainability + Best-Practices | **Maintainability** |
| Best-Practices | Overlaps Quality + Maintainability | **Maintainability** |
| Architecture | Overlaps Maintainability (coupling/layering) | **Maintainability** (structure) + **Security** (security architecture) |
| Action-Items | Not a dimension — it's a report format | Standard field on every finding |
4 dimensions = clear ownership, no overlap, each maps to distinct tooling.

View File

@@ -0,0 +1,82 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"title": "Finding",
"description": "Standardized finding format for team-review pipeline",
"type": "object",
"required": ["id", "dimension", "category", "severity", "title", "description", "location", "source", "effort", "confidence"],
"properties": {
"id": {
"type": "string",
"pattern": "^(SEC|COR|PRF|MNT)-\\d{3}$",
"description": "{DIM_PREFIX}-{SEQ}"
},
"dimension": {
"type": "string",
"enum": ["security", "correctness", "performance", "maintainability"]
},
"category": {
"type": "string",
"description": "Sub-category within the dimension"
},
"severity": {
"type": "string",
"enum": ["critical", "high", "medium", "low", "info"]
},
"title": { "type": "string" },
"description": { "type": "string" },
"location": {
"type": "object",
"required": ["file", "line"],
"properties": {
"file": { "type": "string" },
"line": { "type": "integer" },
"end_line": { "type": "integer" },
"code_snippet": { "type": "string" }
}
},
"source": {
"type": "string",
"description": "tool:eslint | tool:tsc | tool:semgrep | llm | tool+llm"
},
"tool_rule": { "type": ["string", "null"] },
"suggested_fix": { "type": "string" },
"references": {
"type": "array",
"items": { "type": "string" }
},
"effort": { "type": "string", "enum": ["low", "medium", "high"] },
"confidence": { "type": "string", "enum": ["high", "medium", "low"] },
"root_cause": {
"type": ["object", "null"],
"description": "Populated by reviewer role",
"properties": {
"description": { "type": "string" },
"related_findings": { "type": "array", "items": { "type": "string" } },
"is_symptom": { "type": "boolean" }
}
},
"impact": {
"type": ["object", "null"],
"properties": {
"scope": { "type": "string", "enum": ["low", "medium", "high"] },
"affected_files": { "type": "array", "items": { "type": "string" } },
"blast_radius": { "type": "string" }
}
},
"optimization": {
"type": ["object", "null"],
"properties": {
"approach": { "type": "string" },
"alternative": { "type": "string" },
"tradeoff": { "type": "string" }
}
},
"fix_strategy": { "type": ["string", "null"], "enum": ["minimal", "refactor", "skip", null] },
"fix_complexity": { "type": ["string", "null"], "enum": ["low", "medium", "high", null] },
"fix_dependencies": {
"type": "array",
"items": { "type": "string" },
"default": []
}
}
}

View File

@@ -0,0 +1,27 @@
{
"name": "team-review",
"description": "Code scanning, vulnerability review, optimization suggestions, and automated fix",
"sessionDir": ".workflow/.team-review/",
"msgDir": ".workflow/.team-msg/team-review/",
"roles": {
"coordinator": { "prefix": "RC", "type": "orchestration", "file": "roles/coordinator/role.md" },
"scanner": { "prefix": "SCAN", "type": "read-only-analysis", "file": "roles/scanner/role.md" },
"reviewer": { "prefix": "REV", "type": "read-only-analysis", "file": "roles/reviewer/role.md" },
"fixer": { "prefix": "FIX", "type": "code-generation", "file": "roles/fixer/role.md" }
},
"collaboration_pattern": "CP-1",
"pipeline": ["scanner", "reviewer", "fixer"],
"dimensions": {
"security": { "prefix": "SEC", "tools": ["semgrep", "npm-audit"] },
"correctness": { "prefix": "COR", "tools": ["tsc", "eslint-error"] },
"performance": { "prefix": "PRF", "tools": [] },
"maintainability": { "prefix": "MNT", "tools": ["eslint-warning"] }
},
"severity_levels": ["critical", "high", "medium", "low", "info"],
"defaults": {
"max_deep_analysis": 15,
"max_quick_findings": 20,
"max_parallel_fixers": 3,
"quick_fix_threshold": 5
}
}