mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-15 02:42:45 +08:00
feat: add Terminal Dashboard components and state management
- Implemented DashboardToolbar for managing panel toggles and layout presets. - Created FloatingPanel for a generic sliding panel interface. - Developed TerminalGrid for rendering a recursive layout of terminal panes. - Added TerminalPane to encapsulate individual terminal instances with toolbar actions. - Introduced layout utilities for managing Allotment layout trees. - Established Zustand store for terminal grid state management, supporting pane operations and layout resets.
This commit is contained in:
@@ -1,337 +1,118 @@
|
||||
---
|
||||
name: review-cycle
|
||||
description: Unified multi-dimensional code review with automated fix orchestration. Supports session-based (git changes) and module-based (path patterns) review modes with 7-dimension parallel analysis, iterative deep-dive, and automated fix pipeline. Triggers on "workflow:review-cycle", "workflow:review-session-cycle", "workflow:review-module-cycle", "workflow:review-cycle-fix".
|
||||
description: Unified multi-dimensional code review with automated fix orchestration. Routes to session-based (git changes), module-based (path patterns), or fix mode. Triggers on "workflow:review-cycle", "workflow:review-session-cycle", "workflow:review-module-cycle", "workflow:review-cycle-fix".
|
||||
allowed-tools: Task, AskUserQuestion, TaskCreate, TaskUpdate, TaskList, Read, Write, Edit, Bash, Glob, Grep, Skill
|
||||
---
|
||||
|
||||
# Review Cycle
|
||||
|
||||
Unified multi-dimensional code review orchestrator with dual-mode (session/module) file discovery, 7-dimension parallel analysis, iterative deep-dive on critical findings, and optional automated fix pipeline with intelligent batching and parallel planning.
|
||||
Unified code review orchestrator with mode-based routing. Detects input type and dispatches to the appropriate execution phase.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```
|
||||
┌──────────────────────────────────────────────────────────────────────┐
|
||||
│ Review Cycle Orchestrator (SKILL.md) │
|
||||
│ → Pure coordinator: mode detection, phase dispatch, state tracking │
|
||||
└───────────────────────────────┬──────────────────────────────────────┘
|
||||
│
|
||||
┌─────────────────────────────┼─────────────────────────────────┐
|
||||
│ Review Pipeline (Phase 1-5) │
|
||||
│ │
|
||||
│ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐
|
||||
│ │ Phase 1 │→ │ Phase 2 │→ │ Phase 3 │→ │ Phase 4 │→ │ Phase 5 │
|
||||
│ │Discovery│ │Parallel │ │Aggregate│ │Deep-Dive│ │Complete │
|
||||
│ │ Init │ │ Review │ │ │ │(cond.) │ │ │
|
||||
│ └─────────┘ └─────────┘ └─────────┘ └─────────┘ └─────────┘
|
||||
│ session| 7 agents severity N agents finalize
|
||||
│ module ×cli-explore calc ×cli-explore state
|
||||
│ ↕ loop
|
||||
└────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
(optional --fix)
|
||||
│
|
||||
┌─────────────────────────────┼─────────────────────────────────┐
|
||||
│ Fix Pipeline (Phase 6-9) │
|
||||
│ │
|
||||
│ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐
|
||||
│ │ Phase 6 │→ │ Phase 7 │→ │ Phase 8 │→ │ Phase 9 │
|
||||
│ │Discovery│ │Parallel │ │Execution│ │Complete │
|
||||
│ │Batching │ │Planning │ │Orchestr.│ │ │
|
||||
│ └─────────┘ └─────────┘ └─────────┘ └─────────┘
|
||||
│ grouping N agents M agents aggregate
|
||||
│ + batch ×cli-plan ×cli-exec + summary
|
||||
└────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Key Design Principles
|
||||
|
||||
1. **Dual-Mode Review**: Session-based (git changes) and module-based (path patterns) share the same review pipeline (Phase 2-5), differing only in file discovery (Phase 1)
|
||||
2. **Pure Orchestrator**: Execute phases in sequence, parse outputs, pass context between them
|
||||
3. **Progressive Phase Loading**: Phase docs are read on-demand when that phase executes, not all at once
|
||||
4. **Auto-Continue**: All phases run autonomously without user intervention between phases
|
||||
5. **Task Attachment Model**: Sub-tasks attached/collapsed dynamically in TaskCreate/TaskUpdate
|
||||
6. **Optional Fix Pipeline**: Phase 6-9 triggered only by explicit `--fix` flag or user confirmation after Phase 5
|
||||
7. **Content Preservation**: All agent prompts, code, schemas preserved verbatim from source commands
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
# Review Pipeline (Phase 1-5)
|
||||
Skill(skill="review-cycle", args="<path-pattern>") # Module mode
|
||||
Skill(skill="review-cycle", args="[session-id]") # Session mode
|
||||
Skill(skill="review-cycle", args="[session-id|path-pattern] [FLAGS]") # With flags
|
||||
|
||||
# Fix Pipeline (Phase 6-9)
|
||||
Skill(skill="review-cycle", args="--fix <review-dir|export-file>") # Fix mode
|
||||
Skill(skill="review-cycle", args="--fix <review-dir> [FLAGS]") # Fix with flags
|
||||
|
||||
# Flags
|
||||
--dimensions=dim1,dim2,... Custom dimensions (default: all 7)
|
||||
--max-iterations=N Max deep-dive iterations (default: 3)
|
||||
--fix Enter fix pipeline after review or standalone
|
||||
--resume Resume interrupted fix session
|
||||
--batch-size=N Findings per planning batch (default: 5, fix mode only)
|
||||
|
||||
# Examples
|
||||
Skill(skill="review-cycle", args="src/auth/**") # Module: review auth
|
||||
Skill(skill="review-cycle", args="src/auth/**,src/payment/**") # Module: multiple paths
|
||||
Skill(skill="review-cycle", args="src/auth/** --dimensions=security,architecture") # Module: custom dims
|
||||
Skill(skill="review-cycle", args="WFS-payment-integration") # Session: specific
|
||||
Skill(skill="review-cycle", args="") # Session: auto-detect
|
||||
Skill(skill="review-cycle", args="--fix .workflow/active/WFS-123/.review/") # Fix: from review dir
|
||||
Skill(skill="review-cycle", args="--fix --resume") # Fix: resume session
|
||||
┌──────────────────────────────────────────────────────────┐
|
||||
│ Review Cycle Orchestrator (SKILL.md) │
|
||||
│ → Parse input → Detect mode → Read phase doc → Execute │
|
||||
└───────────────────────────┬──────────────────────────────┘
|
||||
│
|
||||
┌─────────────────┼─────────────────┐
|
||||
↓ ↓ ↓
|
||||
┌─────────────┐ ┌─────────────┐ ┌─────────────┐
|
||||
│ session │ │ module │ │ fix │
|
||||
│ (git changes│ │(path pattern│ │(export file │
|
||||
│ review) │ │ review) │ │ auto-fix) │
|
||||
└─────────────┘ └─────────────┘ └─────────────┘
|
||||
phases/ phases/ phases/
|
||||
review-session.md review-module.md review-fix.md
|
||||
```
|
||||
|
||||
## Mode Detection
|
||||
|
||||
```javascript
|
||||
// Input parsing logic (orchestrator responsibility)
|
||||
function detectMode(args) {
|
||||
if (args.includes('--fix')) return 'fix';
|
||||
if (args.match(/\*|\.ts|\.js|\.py|src\/|lib\//)) return 'module'; // glob/path patterns
|
||||
if (args.match(/^WFS-/) || args.trim() === '') return 'session'; // session ID or empty
|
||||
if (args.match(/\*|\.ts|\.js|\.py|\.vue|\.jsx|\.tsx|src\/|lib\//)) return 'module';
|
||||
if (args.match(/^WFS-/) || args.trim() === '') return 'session';
|
||||
return 'session'; // default
|
||||
}
|
||||
```
|
||||
|
||||
| Input Pattern | Detected Mode | Phase Entry |
|
||||
|---------------|---------------|-------------|
|
||||
| `src/auth/**` | `module` | Phase 1 (module branch) |
|
||||
| `WFS-payment-integration` | `session` | Phase 1 (session branch) |
|
||||
| _(empty)_ | `session` | Phase 1 (session branch, auto-detect) |
|
||||
| `--fix .review/` | `fix` | Phase 6 |
|
||||
| `--fix --resume` | `fix` | Phase 6 (resume) |
|
||||
| Input Pattern | Detected Mode | Phase Doc |
|
||||
|---------------|---------------|-----------|
|
||||
| `src/auth/**` | `module` | phases/review-module.md |
|
||||
| `src/auth/**,src/payment/**` | `module` | phases/review-module.md |
|
||||
| `WFS-payment-integration` | `session` | phases/review-session.md |
|
||||
| _(empty)_ | `session` | phases/review-session.md |
|
||||
| `--fix .review/` | `fix` | phases/review-fix.md |
|
||||
| `--fix --resume` | `fix` | phases/review-fix.md |
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
Skill(skill="review-cycle", args="src/auth/**") # Module mode
|
||||
Skill(skill="review-cycle", args="src/auth/** --dimensions=security,architecture") # Module + custom dims
|
||||
Skill(skill="review-cycle", args="WFS-payment-integration") # Session mode
|
||||
Skill(skill="review-cycle", args="") # Session: auto-detect
|
||||
Skill(skill="review-cycle", args="--fix .workflow/active/WFS-123/.review/") # Fix mode
|
||||
Skill(skill="review-cycle", args="--fix --resume") # Fix: resume
|
||||
|
||||
# Common flags (all modes):
|
||||
--dimensions=dim1,dim2,... Custom dimensions (default: all 7)
|
||||
--max-iterations=N Max deep-dive iterations (default: 3)
|
||||
|
||||
# Fix-only flags:
|
||||
--fix Enter fix pipeline
|
||||
--resume Resume interrupted fix session
|
||||
--batch-size=N Findings per planning batch (default: 5)
|
||||
--max-iterations=N Max retry per finding (default: 3)
|
||||
```
|
||||
|
||||
## Execution Flow
|
||||
|
||||
```
|
||||
Input Parsing:
|
||||
└─ Detect mode (session|module|fix) → route to appropriate phase entry
|
||||
|
||||
Review Pipeline (session or module mode):
|
||||
|
||||
Phase 1: Discovery & Initialization
|
||||
└─ Ref: phases/01-discovery-initialization.md
|
||||
├─ Session mode: session discovery → git changed files → resolve
|
||||
├─ Module mode: path patterns → glob expand → resolve
|
||||
└─ Common: create session, output dirs, review-state.json, review-progress.json
|
||||
|
||||
Phase 2: Parallel Review Coordination
|
||||
└─ Ref: phases/02-parallel-review.md
|
||||
├─ Launch 7 cli-explore-agent instances (Deep Scan mode)
|
||||
├─ Each produces dimensions/{dimension}.json + reports/{dimension}-analysis.md
|
||||
└─ CLI fallback: Gemini → Qwen → Codex
|
||||
|
||||
Phase 3: Aggregation
|
||||
└─ Ref: phases/03-aggregation.md
|
||||
├─ Load dimension JSONs, calculate severity distribution
|
||||
├─ Identify cross-cutting concerns (files in 3+ dimensions)
|
||||
└─ Decision: critical > 0 OR high > 5 OR critical files → Phase 4
|
||||
Else → Phase 5
|
||||
|
||||
Phase 4: Iterative Deep-Dive (conditional)
|
||||
└─ Ref: phases/04-iterative-deep-dive.md
|
||||
├─ Select critical findings (max 5 per iteration)
|
||||
├─ Launch deep-dive agents for root cause analysis
|
||||
├─ Re-assess severity → loop back to Phase 3 aggregation
|
||||
└─ Exit when: no critical findings OR max iterations reached
|
||||
|
||||
Phase 5: Review Completion
|
||||
└─ Ref: phases/05-review-completion.md
|
||||
├─ Finalize review-state.json + review-progress.json
|
||||
├─ Prompt user: "Run automated fixes? [Y/n]"
|
||||
└─ If yes → Continue to Phase 6
|
||||
|
||||
Fix Pipeline (--fix mode or after Phase 5):
|
||||
|
||||
Phase 6: Fix Discovery & Batching
|
||||
└─ Ref: phases/06-fix-discovery-batching.md
|
||||
├─ Validate export file, create fix session
|
||||
└─ Intelligent grouping by file+dimension similarity → batches
|
||||
|
||||
Phase 7: Fix Parallel Planning
|
||||
└─ Ref: phases/07-fix-parallel-planning.md
|
||||
├─ Launch N cli-planning-agent instances (≤10 parallel)
|
||||
├─ Each outputs partial-plan-{batch-id}.json
|
||||
└─ Orchestrator aggregates → fix-plan.json
|
||||
|
||||
Phase 8: Fix Execution
|
||||
└─ Ref: phases/08-fix-execution.md
|
||||
├─ Stage-based execution per aggregated timeline
|
||||
├─ Each group: analyze → fix → test → commit/rollback
|
||||
└─ 100% test pass rate required
|
||||
|
||||
Phase 9: Fix Completion
|
||||
└─ Ref: phases/09-fix-completion.md
|
||||
├─ Aggregate results → fix-summary.md
|
||||
└─ Optional: complete workflow session if all fixes successful
|
||||
|
||||
Complete: Review reports + optional fix results
|
||||
1. Parse $ARGUMENTS → extract mode + flags
|
||||
2. Detect mode (session | module | fix)
|
||||
3. Read corresponding phase doc:
|
||||
- session → Read phases/review-session.md → execute
|
||||
- module → Read phases/review-module.md → execute
|
||||
- fix → Read phases/review-fix.md → execute
|
||||
4. Phase doc contains full execution detail (5 phases for review, 4+1 phases for fix)
|
||||
```
|
||||
|
||||
**Phase Reference Documents** (read on-demand when phase executes):
|
||||
**Phase Reference Documents** (read on-demand based on detected mode):
|
||||
|
||||
| Phase | Document | Load When | Source |
|
||||
|-------|----------|-----------|--------|
|
||||
| 1 | [phases/01-discovery-initialization.md](phases/01-discovery-initialization.md) | Review/Fix start | review-session-cycle + review-module-cycle Phase 1 (fused) |
|
||||
| 2 | [phases/02-parallel-review.md](phases/02-parallel-review.md) | Phase 1 complete | Shared from both review commands Phase 2 |
|
||||
| 3 | [phases/03-aggregation.md](phases/03-aggregation.md) | Phase 2 complete | Shared from both review commands Phase 3 |
|
||||
| 4 | [phases/04-iterative-deep-dive.md](phases/04-iterative-deep-dive.md) | Aggregation triggers iteration | Shared from both review commands Phase 4 |
|
||||
| 5 | [phases/05-review-completion.md](phases/05-review-completion.md) | No more iterations needed | Shared from both review commands Phase 5 |
|
||||
| 6 | [phases/06-fix-discovery-batching.md](phases/06-fix-discovery-batching.md) | Fix mode entry | review-cycle-fix Phase 1 + 1.5 |
|
||||
| 7 | [phases/07-fix-parallel-planning.md](phases/07-fix-parallel-planning.md) | Phase 6 complete | review-cycle-fix Phase 2 |
|
||||
| 8 | [phases/08-fix-execution.md](phases/08-fix-execution.md) | Phase 7 complete | review-cycle-fix Phase 3 |
|
||||
| 9 | [phases/09-fix-completion.md](phases/09-fix-completion.md) | Phase 8 complete | review-cycle-fix Phase 4 + 5 |
|
||||
| Mode | Document | Source | Description |
|
||||
|------|----------|--------|-------------|
|
||||
| session | [phases/review-session.md](phases/review-session.md) | review-session-cycle.md | Session-based review: git changes → 7-dimension parallel analysis → aggregation → deep-dive → completion |
|
||||
| module | [phases/review-module.md](phases/review-module.md) | review-module-cycle.md | Module-based review: path patterns → 7-dimension parallel analysis → aggregation → deep-dive → completion |
|
||||
| fix | [phases/review-fix.md](phases/review-fix.md) | review-cycle-fix.md | Automated fix: export file → intelligent batching → parallel planning → execution → completion |
|
||||
|
||||
## Core Rules
|
||||
|
||||
1. **Start Immediately**: First action is TaskCreate initialization, second action is Phase 1 execution
|
||||
2. **Mode Detection First**: Parse input to determine session/module/fix mode before Phase 1
|
||||
3. **Parse Every Output**: Extract required data from each phase for next phase
|
||||
4. **Auto-Continue**: Check TaskList status to execute next pending phase automatically
|
||||
5. **Progressive Phase Loading**: Read phase docs ONLY when that phase is about to execute
|
||||
6. **DO NOT STOP**: Continuous multi-phase workflow until all applicable phases complete
|
||||
7. **Conditional Phase 4**: Only execute if aggregation triggers iteration (critical > 0 OR high > 5 OR critical files)
|
||||
8. **Fix Pipeline Optional**: Phase 6-9 only execute with explicit --fix flag or user confirmation
|
||||
|
||||
## Data Flow
|
||||
|
||||
```
|
||||
User Input (path-pattern | session-id | --fix export-file)
|
||||
↓
|
||||
[Mode Detection: session | module | fix]
|
||||
↓
|
||||
Phase 1: Discovery & Initialization
|
||||
↓ Output: sessionId, reviewId, resolvedFiles, reviewMode, outputDir
|
||||
↓ review-state.json, review-progress.json
|
||||
Phase 2: Parallel Review Coordination
|
||||
↓ Output: dimensions/*.json, reports/*-analysis.md
|
||||
Phase 3: Aggregation
|
||||
↓ Output: severityDistribution, criticalFiles, deepDiveFindings
|
||||
↓ Decision: iterate? → Phase 4 : Phase 5
|
||||
Phase 4: Iterative Deep-Dive (conditional, loops with Phase 3)
|
||||
↓ Output: iterations/*.json, reports/deep-dive-*.md
|
||||
↓ Loop: re-aggregate → check criteria → iterate or exit
|
||||
Phase 5: Review Completion
|
||||
↓ Output: final review-state.json, review-progress.json
|
||||
↓ Decision: fix? → Phase 6 : END
|
||||
Phase 6: Fix Discovery & Batching
|
||||
↓ Output: finding batches (in-memory)
|
||||
Phase 7: Fix Parallel Planning
|
||||
↓ Output: partial-plan-*.json → fix-plan.json (aggregated)
|
||||
Phase 8: Fix Execution
|
||||
↓ Output: fix-progress-*.json, git commits
|
||||
Phase 9: Fix Completion
|
||||
↓ Output: fix-summary.md, fix-history.json
|
||||
```
|
||||
|
||||
## TaskCreate/TaskUpdate Pattern
|
||||
|
||||
**Review Pipeline Initialization**:
|
||||
```javascript
|
||||
TaskCreate({ subject: "Phase 1: Discovery & Initialization", activeForm: "Initializing review" });
|
||||
TaskCreate({ subject: "Phase 2: Parallel Reviews (7 dimensions)", activeForm: "Reviewing" });
|
||||
TaskCreate({ subject: "Phase 3: Aggregation", activeForm: "Aggregating findings" });
|
||||
TaskCreate({ subject: "Phase 4: Deep-dive (conditional)", activeForm: "Deep-diving" });
|
||||
TaskCreate({ subject: "Phase 5: Review Completion", activeForm: "Completing review" });
|
||||
```
|
||||
|
||||
**During Phase 2 (sub-tasks for each dimension)**:
|
||||
```javascript
|
||||
// Attach dimension sub-tasks
|
||||
TaskCreate({ subject: " → Security review", activeForm: "Analyzing security" });
|
||||
TaskCreate({ subject: " → Architecture review", activeForm: "Analyzing architecture" });
|
||||
TaskCreate({ subject: " → Quality review", activeForm: "Analyzing quality" });
|
||||
// ... other dimensions
|
||||
// Collapse: Mark all dimension tasks completed when Phase 2 finishes
|
||||
```
|
||||
|
||||
**Fix Pipeline (added after Phase 5 if triggered)**:
|
||||
```javascript
|
||||
TaskCreate({ subject: "Phase 6: Fix Discovery & Batching", activeForm: "Batching findings" });
|
||||
TaskCreate({ subject: "Phase 7: Parallel Planning", activeForm: "Planning fixes" });
|
||||
TaskCreate({ subject: "Phase 8: Execution", activeForm: "Executing fixes" });
|
||||
TaskCreate({ subject: "Phase 9: Fix Completion", activeForm: "Completing fixes" });
|
||||
```
|
||||
1. **Mode Detection First**: Parse input to determine session/module/fix mode before anything else
|
||||
2. **Progressive Loading**: Read ONLY the phase doc for the detected mode, not all three
|
||||
3. **Full Delegation**: Once mode is detected, the phase doc owns the entire execution flow
|
||||
4. **Auto-Continue**: Phase docs contain their own multi-phase execution (Phase 1-5 or Phase 1-4+5)
|
||||
5. **DO NOT STOP**: Continuous execution until all internal phases within the phase doc complete
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Review Pipeline Errors
|
||||
|
||||
| Phase | Error | Blocking? | Action |
|
||||
|-------|-------|-----------|--------|
|
||||
| Phase 1 | Session not found (session mode) | Yes | Error and exit |
|
||||
| Phase 1 | No changed files (session mode) | Yes | Error and exit |
|
||||
| Phase 1 | Invalid path pattern (module mode) | Yes | Error and exit |
|
||||
| Phase 1 | No files matched (module mode) | Yes | Error and exit |
|
||||
| Phase 2 | Single dimension fails | No | Log warning, continue other dimensions |
|
||||
| Phase 2 | All dimensions fail | Yes | Error and exit |
|
||||
| Phase 3 | Missing dimension JSON | No | Skip in aggregation, log warning |
|
||||
| Phase 4 | Deep-dive agent fails | No | Skip finding, continue others |
|
||||
| Phase 4 | Max iterations reached | No | Generate partial report |
|
||||
|
||||
### Fix Pipeline Errors
|
||||
|
||||
| Phase | Error | Blocking? | Action |
|
||||
|-------|-------|-----------|--------|
|
||||
| Phase 6 | Invalid export file | Yes | Abort with error |
|
||||
| Phase 6 | Empty batches | No | Warn and skip empty |
|
||||
| Phase 7 | Planning agent timeout | No | Mark batch failed, continue others |
|
||||
| Phase 7 | All agents fail | Yes | Abort fix session |
|
||||
| Phase 8 | Test failure after fix | No | Rollback, retry up to max_iterations |
|
||||
| Phase 8 | Git operations fail | Yes | Abort, preserve state |
|
||||
| Phase 9 | Aggregation error | No | Generate partial summary |
|
||||
|
||||
### CLI Fallback Chain
|
||||
|
||||
Gemini → Qwen → Codex → degraded mode
|
||||
|
||||
**Fallback Triggers**: HTTP 429/5xx, connection timeout, invalid JSON output, low confidence < 0.4, analysis too brief (< 100 words)
|
||||
|
||||
## Output File Structure
|
||||
|
||||
```
|
||||
.workflow/active/WFS-{session-id}/.review/
|
||||
├── review-state.json # Orchestrator state machine
|
||||
├── review-progress.json # Real-time progress
|
||||
├── dimensions/ # Per-dimension results (Phase 2)
|
||||
│ ├── security.json
|
||||
│ ├── architecture.json
|
||||
│ ├── quality.json
|
||||
│ ├── action-items.json
|
||||
│ ├── performance.json
|
||||
│ ├── maintainability.json
|
||||
│ └── best-practices.json
|
||||
├── iterations/ # Deep-dive results (Phase 4)
|
||||
│ ├── iteration-1-finding-{uuid}.json
|
||||
│ └── iteration-2-finding-{uuid}.json
|
||||
├── reports/ # Human-readable reports
|
||||
│ ├── security-analysis.md
|
||||
│ ├── security-cli-output.txt
|
||||
│ ├── deep-dive-1-{uuid}.md
|
||||
│ └── ...
|
||||
└── fixes/{fix-session-id}/ # Fix results (Phase 6-9)
|
||||
├── partial-plan-*.json
|
||||
├── fix-plan.json
|
||||
├── fix-progress-*.json
|
||||
├── fix-summary.md
|
||||
├── active-fix-session.json
|
||||
└── fix-history.json
|
||||
```
|
||||
| Error | Action |
|
||||
|-------|--------|
|
||||
| Cannot determine mode from input | AskUserQuestion to clarify intent |
|
||||
| Phase doc not found | Error and exit with file path |
|
||||
| Invalid flags for mode | Warn and continue with defaults |
|
||||
|
||||
## Related Commands
|
||||
|
||||
### View Progress
|
||||
```bash
|
||||
# View review/fix progress dashboard
|
||||
ccw view
|
||||
```
|
||||
|
||||
### Workflow Pipeline
|
||||
```bash
|
||||
# Step 1: Review (this skill)
|
||||
# Workflow pipeline
|
||||
# Step 1: Review
|
||||
Skill(skill="review-cycle", args="src/auth/**")
|
||||
|
||||
# Step 2: Fix (continue or standalone)
|
||||
# Step 2: Fix (after review complete)
|
||||
Skill(skill="review-cycle", args="--fix .workflow/active/WFS-{session-id}/.review/")
|
||||
```
|
||||
|
||||
@@ -1,334 +0,0 @@
|
||||
# Phase 1: Discovery & Initialization
|
||||
|
||||
> Source: Fused from `commands/workflow/review-session-cycle.md` Phase 1 + `commands/workflow/review-module-cycle.md` Phase 1
|
||||
|
||||
## Overview
|
||||
|
||||
Detect review mode (session or module), resolve target files, create workflow session, initialize output directory structure and state files.
|
||||
|
||||
## Mode Detection
|
||||
|
||||
The review mode is determined by the input arguments:
|
||||
|
||||
- **Session mode**: No path pattern provided, OR a `WFS-*` session ID is provided. Reviews all changes within an existing workflow session (git-based change detection).
|
||||
- **Module mode**: Glob/path patterns are provided (e.g., `src/auth/**`, `src/payment/processor.ts`). Reviews specific files/directories regardless of session history.
|
||||
|
||||
---
|
||||
|
||||
## Session Mode (review-session-cycle)
|
||||
|
||||
### Step 1.1: Session Discovery
|
||||
|
||||
```javascript
|
||||
// If session ID not provided, auto-detect
|
||||
if (!providedSessionId) {
|
||||
// Check for active sessions
|
||||
const activeSessions = Glob('.workflow/active/WFS-*');
|
||||
if (activeSessions.length === 1) {
|
||||
sessionId = activeSessions[0].match(/WFS-[^/]+/)[0];
|
||||
} else if (activeSessions.length > 1) {
|
||||
// List sessions and prompt user
|
||||
error("Multiple active sessions found. Please specify session ID.");
|
||||
} else {
|
||||
error("No active session found. Create session first with /workflow:session:start");
|
||||
}
|
||||
} else {
|
||||
sessionId = providedSessionId;
|
||||
}
|
||||
|
||||
// Validate session exists
|
||||
Bash(`test -d .workflow/active/${sessionId} && echo "EXISTS"`);
|
||||
```
|
||||
|
||||
### Step 1.2: Session Validation
|
||||
|
||||
- Ensure session has implementation artifacts (check `.summaries/` or `.task/` directory)
|
||||
- Extract session creation timestamp from `workflow-session.json`
|
||||
- Use timestamp for git log filtering: `git log --since="${sessionCreatedAt}"`
|
||||
|
||||
### Step 1.3: Changed Files Detection
|
||||
|
||||
```bash
|
||||
# Get files changed since session creation
|
||||
git log --since="${sessionCreatedAt}" --name-only --pretty=format: | sort -u
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Module Mode (review-module-cycle)
|
||||
|
||||
### Step 1.1: Session Creation
|
||||
|
||||
```javascript
|
||||
// Create workflow session for this review (type: review)
|
||||
Skill(skill="workflow:session:start", args="--type review \"Code review for [target_pattern]\"")
|
||||
|
||||
// Parse output
|
||||
const sessionId = output.match(/SESSION_ID: (WFS-[^\s]+)/)[1];
|
||||
```
|
||||
|
||||
### Step 1.2: Path Resolution & Validation
|
||||
|
||||
```bash
|
||||
# Expand glob pattern to file list (relative paths from project root)
|
||||
find . -path "./src/auth/**" -type f | sed 's|^\./||'
|
||||
|
||||
# Validate files exist and are readable
|
||||
for file in ${resolvedFiles[@]}; do
|
||||
test -r "$file" || error "File not readable: $file"
|
||||
done
|
||||
```
|
||||
|
||||
- Parse and expand file patterns (glob support): `src/auth/**` -> actual file list
|
||||
- Validation: Ensure all specified files exist and are readable
|
||||
- Store as **relative paths** from project root (e.g., `src/auth/service.ts`)
|
||||
- Agents construct absolute paths dynamically during execution
|
||||
|
||||
**Syntax Rules**:
|
||||
- All paths are **relative** from project root (e.g., `src/auth/**` not `/src/auth/**`)
|
||||
- Multiple patterns: comma-separated, **no spaces** (e.g., `src/auth/**,src/payment/**`)
|
||||
- Glob and specific files can be mixed (e.g., `src/auth/**,src/config.ts`)
|
||||
|
||||
**Supported Patterns**:
|
||||
| Pattern Type | Example | Description |
|
||||
|--------------|---------|-------------|
|
||||
| Glob directory | `src/auth/**` | All files under src/auth/ |
|
||||
| Glob with extension | `src/**/*.ts` | All .ts files under src/ |
|
||||
| Specific file | `src/payment/processor.ts` | Single file |
|
||||
| Multiple patterns | `src/auth/**,src/payment/**` | Comma-separated (no spaces) |
|
||||
|
||||
**Resolution Process**:
|
||||
1. Parse input pattern (split by comma, trim whitespace)
|
||||
2. Expand glob patterns to file list via `find` command
|
||||
3. Validate all files exist and are readable
|
||||
4. Error if pattern matches 0 files
|
||||
5. Store resolved file list in review-state.json
|
||||
|
||||
---
|
||||
|
||||
## Common Steps (Both Modes)
|
||||
|
||||
### Step 1.4: Output Directory Setup
|
||||
|
||||
- Output directory: `.workflow/active/${sessionId}/.review/`
|
||||
- Create directory structure:
|
||||
```bash
|
||||
mkdir -p ${sessionDir}/.review/{dimensions,iterations,reports}
|
||||
```
|
||||
|
||||
### Step 1.5: Initialize Review State
|
||||
|
||||
- State initialization: Create `review-state.json` with metadata, dimensions, max_iterations (merged metadata + state)
|
||||
- Session mode includes `git_changes` in metadata
|
||||
- Module mode includes `target_pattern` and `resolved_files` in metadata
|
||||
- Progress tracking: Create `review-progress.json` for progress tracking
|
||||
|
||||
### Step 1.6: Initialize Review Progress
|
||||
|
||||
- Create `review-progress.json` for real-time dashboard updates via polling
|
||||
- See [Review Progress JSON](#review-progress-json) schema below
|
||||
|
||||
### Step 1.7: TaskCreate Initialization
|
||||
|
||||
- Set up progress tracking with hierarchical structure
|
||||
- Mark Phase 1 completed, Phase 2 in_progress
|
||||
|
||||
---
|
||||
|
||||
## Review State JSON (Session Mode)
|
||||
|
||||
**Purpose**: Unified state machine and metadata (merged from metadata + state)
|
||||
|
||||
```json
|
||||
{
|
||||
"session_id": "WFS-payment-integration",
|
||||
"review_id": "review-20250125-143022",
|
||||
"review_type": "session",
|
||||
"metadata": {
|
||||
"created_at": "2025-01-25T14:30:22Z",
|
||||
"git_changes": {
|
||||
"commit_range": "abc123..def456",
|
||||
"files_changed": 15,
|
||||
"insertions": 342,
|
||||
"deletions": 128
|
||||
},
|
||||
"dimensions": ["security", "architecture", "quality", "action-items", "performance", "maintainability", "best-practices"],
|
||||
"max_iterations": 3
|
||||
},
|
||||
"phase": "parallel|aggregate|iterate|complete",
|
||||
"current_iteration": 1,
|
||||
"dimensions_reviewed": ["security", "architecture", "quality", "action-items", "performance", "maintainability", "best-practices"],
|
||||
"selected_strategy": "comprehensive",
|
||||
"next_action": "execute_parallel_reviews|aggregate_findings|execute_deep_dive|generate_final_report|complete",
|
||||
"severity_distribution": {
|
||||
"critical": 2,
|
||||
"high": 5,
|
||||
"medium": 12,
|
||||
"low": 8
|
||||
},
|
||||
"critical_files": [
|
||||
{
|
||||
"file": "src/payment/processor.ts",
|
||||
"finding_count": 5,
|
||||
"dimensions": ["security", "architecture", "quality"]
|
||||
}
|
||||
],
|
||||
"iterations": [
|
||||
{
|
||||
"iteration": 1,
|
||||
"findings_analyzed": ["uuid-1", "uuid-2"],
|
||||
"findings_resolved": 1,
|
||||
"findings_escalated": 1,
|
||||
"severity_change": {
|
||||
"before": {"critical": 2, "high": 5, "medium": 12, "low": 8},
|
||||
"after": {"critical": 1, "high": 6, "medium": 12, "low": 8}
|
||||
},
|
||||
"timestamp": "2025-01-25T14:30:00Z"
|
||||
}
|
||||
],
|
||||
"completion_criteria": {
|
||||
"target": "no_critical_findings_and_high_under_5",
|
||||
"current_status": "in_progress",
|
||||
"estimated_completion": "2 iterations remaining"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Field Descriptions**:
|
||||
- `phase`: Current execution phase (state machine pointer)
|
||||
- `current_iteration`: Iteration counter (used for max check)
|
||||
- `next_action`: Next step orchestrator should execute
|
||||
- `severity_distribution`: Aggregated counts across all dimensions
|
||||
- `critical_files`: Files appearing in 3+ dimensions with metadata
|
||||
- `iterations[]`: Historical log for trend analysis
|
||||
|
||||
## Review State JSON (Module Mode)
|
||||
|
||||
**Purpose**: Unified state machine and metadata (merged from metadata + state)
|
||||
|
||||
```json
|
||||
{
|
||||
"review_id": "review-20250125-143022",
|
||||
"review_type": "module",
|
||||
"session_id": "WFS-auth-system",
|
||||
"metadata": {
|
||||
"created_at": "2025-01-25T14:30:22Z",
|
||||
"target_pattern": "src/auth/**",
|
||||
"resolved_files": [
|
||||
"src/auth/service.ts",
|
||||
"src/auth/validator.ts",
|
||||
"src/auth/middleware.ts"
|
||||
],
|
||||
"dimensions": ["security", "architecture", "quality", "action-items", "performance", "maintainability", "best-practices"],
|
||||
"max_iterations": 3
|
||||
},
|
||||
"phase": "parallel|aggregate|iterate|complete",
|
||||
"current_iteration": 1,
|
||||
"dimensions_reviewed": ["security", "architecture", "quality", "action-items", "performance", "maintainability", "best-practices"],
|
||||
"selected_strategy": "comprehensive",
|
||||
"next_action": "execute_parallel_reviews|aggregate_findings|execute_deep_dive|generate_final_report|complete",
|
||||
"severity_distribution": {
|
||||
"critical": 2,
|
||||
"high": 5,
|
||||
"medium": 12,
|
||||
"low": 8
|
||||
},
|
||||
"critical_files": [...],
|
||||
"iterations": [...],
|
||||
"completion_criteria": {...}
|
||||
}
|
||||
```
|
||||
|
||||
## Review Progress JSON
|
||||
|
||||
**Purpose**: Real-time dashboard updates via polling
|
||||
|
||||
```json
|
||||
{
|
||||
"review_id": "review-20250125-143022",
|
||||
"last_update": "2025-01-25T14:35:10Z",
|
||||
"phase": "parallel|aggregate|iterate|complete",
|
||||
"current_iteration": 1,
|
||||
"progress": {
|
||||
"parallel_review": {
|
||||
"total_dimensions": 7,
|
||||
"completed": 5,
|
||||
"in_progress": 2,
|
||||
"percent_complete": 71
|
||||
},
|
||||
"deep_dive": {
|
||||
"total_findings": 6,
|
||||
"analyzed": 2,
|
||||
"in_progress": 1,
|
||||
"percent_complete": 33
|
||||
}
|
||||
},
|
||||
"agent_status": [
|
||||
{
|
||||
"agent_type": "review-agent",
|
||||
"dimension": "security",
|
||||
"status": "completed",
|
||||
"started_at": "2025-01-25T14:30:00Z",
|
||||
"completed_at": "2025-01-25T15:15:00Z",
|
||||
"duration_ms": 2700000
|
||||
},
|
||||
{
|
||||
"agent_type": "deep-dive-agent",
|
||||
"finding_id": "sec-001-uuid",
|
||||
"status": "in_progress",
|
||||
"started_at": "2025-01-25T14:32:00Z"
|
||||
}
|
||||
],
|
||||
"estimated_completion": "2025-01-25T16:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Output File Structure
|
||||
|
||||
```
|
||||
.workflow/active/WFS-{session-id}/.review/
|
||||
├── review-state.json # Orchestrator state machine (includes metadata)
|
||||
├── review-progress.json # Real-time progress for dashboard
|
||||
├── dimensions/ # Per-dimension results
|
||||
│ ├── security.json
|
||||
│ ├── architecture.json
|
||||
│ ├── quality.json
|
||||
│ ├── action-items.json
|
||||
│ ├── performance.json
|
||||
│ ├── maintainability.json
|
||||
│ └── best-practices.json
|
||||
├── iterations/ # Deep-dive results
|
||||
│ ├── iteration-1-finding-{uuid}.json
|
||||
│ └── iteration-2-finding-{uuid}.json
|
||||
└── reports/ # Human-readable reports
|
||||
├── security-analysis.md
|
||||
├── security-cli-output.txt
|
||||
├── deep-dive-1-{uuid}.md
|
||||
└── ...
|
||||
```
|
||||
|
||||
## Session Context
|
||||
|
||||
```
|
||||
.workflow/active/WFS-{session-id}/
|
||||
├── workflow-session.json
|
||||
├── IMPL_PLAN.md
|
||||
├── TODO_LIST.md
|
||||
├── .task/
|
||||
├── .summaries/
|
||||
└── .review/ # Review results (this command)
|
||||
└── (structure above)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Output
|
||||
|
||||
- **Variables**: `sessionId`, `reviewId`, `resolvedFiles`, `reviewMode`, `outputDir`
|
||||
- **Files**: `review-state.json`, `review-progress.json`
|
||||
|
||||
## Next Phase
|
||||
|
||||
Return to orchestrator, then auto-continue to [Phase 2: Parallel Review](02-parallel-review.md).
|
||||
@@ -1,74 +0,0 @@
|
||||
# Phase 3: Aggregation
|
||||
|
||||
> Source: Shared from `commands/workflow/review-session-cycle.md` + `commands/workflow/review-module-cycle.md` Phase 3
|
||||
|
||||
## Overview
|
||||
|
||||
Load all dimension results, calculate severity distribution, identify cross-cutting concerns, and decide whether to enter iterative deep-dive (Phase 4) or proceed to completion (Phase 5).
|
||||
|
||||
## Execution Steps
|
||||
|
||||
### Step 3.1: Load Dimension Results
|
||||
|
||||
- Load all dimension JSON files from `{outputDir}/dimensions/`
|
||||
- Parse each file following review-dimension-results-schema.json
|
||||
- Handle missing files gracefully (log warning, skip)
|
||||
|
||||
### Step 3.2: Calculate Severity Distribution
|
||||
|
||||
- Count findings by severity level: critical, high, medium, low
|
||||
- Store in review-state.json `severity_distribution` field
|
||||
|
||||
### Step 3.3: Cross-Cutting Concern Detection
|
||||
|
||||
**Cross-Cutting Concern Detection**:
|
||||
1. Files appearing in 3+ dimensions = **Critical Files**
|
||||
2. Same issue pattern across dimensions = **Systemic Issue**
|
||||
3. Severity clustering in specific files = **Hotspots**
|
||||
|
||||
### Step 3.4: Deep-Dive Selection
|
||||
|
||||
**Deep-Dive Selection Criteria**:
|
||||
- All critical severity findings (priority 1)
|
||||
- Top 3 high-severity findings in critical files (priority 2)
|
||||
- Max 5 findings per iteration (prevent overwhelm)
|
||||
|
||||
### Step 3.5: Decision Logic
|
||||
|
||||
**Iteration Trigger**:
|
||||
- Critical findings > 0 OR
|
||||
- High findings > 5 OR
|
||||
- Critical files count > 0
|
||||
|
||||
If any trigger condition is met, proceed to Phase 4 (Iterative Deep-Dive). Otherwise, skip to Phase 5 (Completion).
|
||||
|
||||
### Step 3.6: Update State
|
||||
|
||||
- Update review-state.json with aggregation results
|
||||
- Update review-progress.json
|
||||
|
||||
**Phase 3 Orchestrator Responsibilities**:
|
||||
- Load all dimension JSON files from dimensions/
|
||||
- Calculate severity distribution: Count by critical/high/medium/low
|
||||
- Identify cross-cutting concerns: Files in 3+ dimensions
|
||||
- Select deep-dive findings: Critical + high in critical files (max 5)
|
||||
- Decision logic: Iterate if critical > 0 OR high > 5 OR critical files exist
|
||||
- Update review-state.json with aggregation results
|
||||
|
||||
## Severity Assessment Reference
|
||||
|
||||
**Severity Levels**:
|
||||
- **Critical**: Security vulnerabilities, data corruption risks, system-wide failures, authentication/authorization bypass
|
||||
- **High**: Feature degradation, performance bottlenecks, architecture violations, significant technical debt
|
||||
- **Medium**: Code smells, minor performance issues, style inconsistencies, maintainability concerns
|
||||
- **Low**: Documentation gaps, minor refactoring opportunities, cosmetic issues
|
||||
|
||||
## Output
|
||||
|
||||
- Variables: severityDistribution, criticalFiles, deepDiveFindings, shouldIterate (boolean)
|
||||
- State: review-state.json updated with aggregation results
|
||||
|
||||
## Next Phase
|
||||
|
||||
- If shouldIterate: [Phase 4: Iterative Deep-Dive](04-iterative-deep-dive.md)
|
||||
- Else: [Phase 5: Review Completion](05-review-completion.md)
|
||||
@@ -1,278 +0,0 @@
|
||||
# Phase 4: Iterative Deep-Dive
|
||||
|
||||
> Source: Shared from `commands/workflow/review-session-cycle.md` + `commands/workflow/review-module-cycle.md` Phase 4
|
||||
|
||||
## Overview
|
||||
|
||||
Perform focused root cause analysis on critical findings. Select up to 5 findings per iteration, launch deep-dive agents, re-assess severity, and loop back to aggregation if needed.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Phase 3 determined shouldIterate = true
|
||||
- Available: severityDistribution, criticalFiles, deepDiveFindings
|
||||
|
||||
## Execution Steps
|
||||
|
||||
### Step 4.1: Check Iteration Limit
|
||||
|
||||
- Check `current_iteration` < `max_iterations` (default 3)
|
||||
- If exceeded: Log iteration limit reached, skip to Phase 5
|
||||
- Default iterations: 1 (deep-dive runs once; use --max-iterations=0 to skip entirely)
|
||||
|
||||
### Step 4.2: Select Findings for Deep-Dive
|
||||
|
||||
**Deep-Dive Selection Criteria**:
|
||||
- All critical severity findings (priority 1)
|
||||
- Top 3 high-severity findings in critical files (priority 2)
|
||||
- Max 5 findings per iteration (prevent overwhelm)
|
||||
|
||||
**Selection algorithm**:
|
||||
1. Collect all findings with severity = critical -> add to selection
|
||||
2. If selection < 5: add high-severity findings from critical files (files in 3+ dimensions), sorted by dimension count descending
|
||||
3. Cap at 5 total findings
|
||||
|
||||
### Step 4.3: Launch Deep-Dive Agents
|
||||
|
||||
- Launch cli-explore-agent for each selected finding
|
||||
- Use Dependency Map + Deep Scan mode
|
||||
- Each agent runs independently (can be launched in parallel)
|
||||
- Tool priority: gemini -> qwen -> codex (fallback on error/timeout)
|
||||
|
||||
### Step 4.4: Collect Results
|
||||
|
||||
- Parse iteration JSON files from `{outputDir}/iterations/iteration-{N}-finding-{uuid}.json`
|
||||
- Extract reassessed severities from each result
|
||||
- Collect remediation plans and impact assessments
|
||||
- Handle agent failures gracefully (log warning, mark finding as unanalyzed)
|
||||
|
||||
### Step 4.5: Re-Aggregate
|
||||
|
||||
- Update severity distribution based on reassessments
|
||||
- Record iteration in review-state.json `iterations[]` array:
|
||||
|
||||
```json
|
||||
{
|
||||
"iteration": 1,
|
||||
"findings_analyzed": ["uuid-1", "uuid-2"],
|
||||
"findings_resolved": 1,
|
||||
"findings_escalated": 1,
|
||||
"severity_change": {
|
||||
"before": {"critical": 2, "high": 5, "medium": 12, "low": 8},
|
||||
"after": {"critical": 1, "high": 6, "medium": 12, "low": 8}
|
||||
},
|
||||
"timestamp": "2025-01-25T14:30:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
- Increment `current_iteration` in review-state.json
|
||||
- Re-evaluate decision logic: Iterate if critical > 0 OR high > 5 OR critical files exist
|
||||
- Loop back to Phase 3 aggregation check if conditions still met
|
||||
|
||||
## Deep-Dive Agent Invocation Template
|
||||
|
||||
### Module Mode
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-explore-agent",
|
||||
run_in_background=false,
|
||||
description=`Deep-dive analysis for critical finding: ${findingTitle} via Dependency Map + Deep Scan`,
|
||||
prompt=`
|
||||
## Task Objective
|
||||
Perform focused root cause analysis using Dependency Map mode (for impact analysis) + Deep Scan mode (for semantic understanding) to generate comprehensive remediation plan for critical ${dimension} issue
|
||||
|
||||
## Analysis Mode Selection
|
||||
Use **Dependency Map mode** first to understand dependencies:
|
||||
- Build dependency graph around ${file} to identify affected components
|
||||
- Detect circular dependencies or tight coupling related to this finding
|
||||
- Calculate change risk scores for remediation impact
|
||||
|
||||
Then apply **Deep Scan mode** for semantic analysis:
|
||||
- Understand design intent and architectural context
|
||||
- Identify non-standard patterns or implicit dependencies
|
||||
- Extract remediation insights from code structure
|
||||
|
||||
## Finding Context
|
||||
- Finding ID: ${findingId}
|
||||
- Original Dimension: ${dimension}
|
||||
- Title: ${findingTitle}
|
||||
- File: ${file}:${line}
|
||||
- Severity: ${severity}
|
||||
- Category: ${category}
|
||||
- Original Description: ${description}
|
||||
- Iteration: ${iteration}
|
||||
|
||||
## MANDATORY FIRST STEPS (Execute by Agent)
|
||||
**You (cli-explore-agent) MUST execute these steps in order:**
|
||||
1. Read original finding: ${dimensionJsonPath}
|
||||
2. Read affected file: ${file}
|
||||
3. Identify related code: bash(grep -r "import.*${basename(file)}" ${projectDir}/src --include="*.ts")
|
||||
4. Read test files: bash(find ${projectDir}/tests -name "*${basename(file, '.ts')}*" -type f)
|
||||
5. Execute: cat ~/.ccw/workflows/cli-templates/schemas/review-deep-dive-results-schema.json (get output schema reference)
|
||||
6. Read: .workflow/project-tech.json (technology stack and architecture context)
|
||||
7. Read: .workflow/project-guidelines.json (user-defined constraints for remediation compliance)
|
||||
|
||||
## CLI Configuration
|
||||
- Tool Priority: gemini → qwen → codex
|
||||
- Template: ~/.ccw/workflows/cli-templates/prompts/analysis/01-diagnose-bug-root-cause.txt
|
||||
- Mode: analysis (READ-ONLY)
|
||||
|
||||
## Expected Deliverables
|
||||
|
||||
**Schema Reference**: Schema obtained in MANDATORY FIRST STEPS step 5, follow schema exactly
|
||||
|
||||
1. Deep-Dive Results JSON: ${outputDir}/iterations/iteration-${iteration}-finding-${findingId}.json
|
||||
|
||||
**⚠️ CRITICAL JSON STRUCTURE REQUIREMENTS**:
|
||||
|
||||
Root structure MUST be array: \`[{ ... }]\` NOT \`{ ... }\`
|
||||
|
||||
Required top-level fields:
|
||||
- finding_id, dimension, iteration, analysis_timestamp
|
||||
- cli_tool_used, model, analysis_duration_ms
|
||||
- original_finding, root_cause, remediation_plan
|
||||
- impact_assessment, reassessed_severity, confidence_score, cross_references
|
||||
|
||||
All nested objects must follow schema exactly - read schema for field names
|
||||
|
||||
2. Analysis Report: ${outputDir}/reports/deep-dive-${iteration}-${findingId}.md
|
||||
- Detailed root cause analysis
|
||||
- Step-by-step remediation plan
|
||||
- Impact assessment and rollback strategy
|
||||
|
||||
## Success Criteria
|
||||
- [ ] Schema obtained via cat review-deep-dive-results-schema.json
|
||||
- [ ] Root cause clearly identified with supporting evidence
|
||||
- [ ] Remediation plan is step-by-step actionable with exact file:line references
|
||||
- [ ] Each step includes specific commands and validation tests
|
||||
- [ ] Impact fully assessed (files, tests, breaking changes, dependencies)
|
||||
- [ ] Severity re-evaluation justified with evidence
|
||||
- [ ] Confidence score accurately reflects certainty of analysis
|
||||
- [ ] JSON output follows schema exactly
|
||||
- [ ] References include project-specific and external documentation
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
### Session Mode
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-explore-agent",
|
||||
run_in_background=false,
|
||||
description=`Deep-dive analysis for critical finding: ${findingTitle} via Dependency Map + Deep Scan`,
|
||||
prompt=`
|
||||
## Task Objective
|
||||
Perform focused root cause analysis using Dependency Map mode (for impact analysis) + Deep Scan mode (for semantic understanding) to generate comprehensive remediation plan for critical ${dimension} issue
|
||||
|
||||
## Analysis Mode Selection
|
||||
Use **Dependency Map mode** first to understand dependencies:
|
||||
- Build dependency graph around ${file} to identify affected components
|
||||
- Detect circular dependencies or tight coupling related to this finding
|
||||
- Calculate change risk scores for remediation impact
|
||||
|
||||
Then apply **Deep Scan mode** for semantic analysis:
|
||||
- Understand design intent and architectural context
|
||||
- Identify non-standard patterns or implicit dependencies
|
||||
- Extract remediation insights from code structure
|
||||
|
||||
## Finding Context
|
||||
- Finding ID: ${findingId}
|
||||
- Original Dimension: ${dimension}
|
||||
- Title: ${findingTitle}
|
||||
- File: ${file}:${line}
|
||||
- Severity: ${severity}
|
||||
- Category: ${category}
|
||||
- Original Description: ${description}
|
||||
- Iteration: ${iteration}
|
||||
|
||||
## MANDATORY FIRST STEPS (Execute by Agent)
|
||||
**You (cli-explore-agent) MUST execute these steps in order:**
|
||||
1. Read original finding: ${dimensionJsonPath}
|
||||
2. Read affected file: ${file}
|
||||
3. Identify related code: bash(grep -r "import.*${basename(file)}" ${workflowDir}/src --include="*.ts")
|
||||
4. Read test files: bash(find ${workflowDir}/tests -name "*${basename(file, '.ts')}*" -type f)
|
||||
5. Execute: cat ~/.ccw/workflows/cli-templates/schemas/review-deep-dive-results-schema.json (get output schema reference)
|
||||
6. Read: .workflow/project-tech.json (technology stack and architecture context)
|
||||
7. Read: .workflow/project-guidelines.json (user-defined constraints for remediation compliance)
|
||||
|
||||
## CLI Configuration
|
||||
- Tool Priority: gemini → qwen → codex
|
||||
- Template: ~/.ccw/workflows/cli-templates/prompts/analysis/01-diagnose-bug-root-cause.txt
|
||||
- Timeout: 2400000ms (40 minutes)
|
||||
- Mode: analysis (READ-ONLY)
|
||||
|
||||
## Expected Deliverables
|
||||
|
||||
**Schema Reference**: Schema obtained in MANDATORY FIRST STEPS step 5, follow schema exactly
|
||||
|
||||
1. Deep-Dive Results JSON: ${outputDir}/iterations/iteration-${iteration}-finding-${findingId}.json
|
||||
|
||||
**⚠️ CRITICAL JSON STRUCTURE REQUIREMENTS**:
|
||||
|
||||
Root structure MUST be array: \`[{ ... }]\` NOT \`{ ... }\`
|
||||
|
||||
Required top-level fields:
|
||||
- finding_id, dimension, iteration, analysis_timestamp
|
||||
- cli_tool_used, model, analysis_duration_ms
|
||||
- original_finding, root_cause, remediation_plan
|
||||
- impact_assessment, reassessed_severity, confidence_score, cross_references
|
||||
|
||||
All nested objects must follow schema exactly - read schema for field names
|
||||
|
||||
2. Analysis Report: ${outputDir}/reports/deep-dive-${iteration}-${findingId}.md
|
||||
- Detailed root cause analysis
|
||||
- Step-by-step remediation plan
|
||||
- Impact assessment and rollback strategy
|
||||
|
||||
## Success Criteria
|
||||
- [ ] Schema obtained via cat review-deep-dive-results-schema.json
|
||||
- [ ] Root cause clearly identified with supporting evidence
|
||||
- [ ] Remediation plan is step-by-step actionable with exact file:line references
|
||||
- [ ] Each step includes specific commands and validation tests
|
||||
- [ ] Impact fully assessed (files, tests, breaking changes, dependencies)
|
||||
- [ ] Severity re-evaluation justified with evidence
|
||||
- [ ] Confidence score accurately reflects certainty of analysis
|
||||
- [ ] JSON output follows schema exactly
|
||||
- [ ] References include project-specific and external documentation
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
## Key Differences Between Modes
|
||||
|
||||
| Aspect | Module Mode | Session Mode |
|
||||
|--------|-------------|--------------|
|
||||
| MANDATORY STEP 3 | `${projectDir}/src` | `${workflowDir}/src` |
|
||||
| MANDATORY STEP 4 | `${projectDir}/tests` | `${workflowDir}/tests` |
|
||||
| CLI Timeout | (not specified) | 2400000ms (40 minutes) |
|
||||
|
||||
## Iteration Control
|
||||
|
||||
**Phase 4 Orchestrator Responsibilities**:
|
||||
- Check iteration count < max_iterations (default 3)
|
||||
- Launch deep-dive agents for selected findings
|
||||
- Collect remediation plans and re-assessed severities
|
||||
- Update severity distribution based on re-assessments
|
||||
- Record iteration in review-state.json
|
||||
- Loop back to aggregation if still have critical/high findings
|
||||
|
||||
**Termination Conditions** (any one stops iteration):
|
||||
1. `current_iteration` >= `max_iterations`
|
||||
2. No critical findings remaining AND high findings <= 5 AND no critical files
|
||||
3. No findings selected for deep-dive (all resolved or downgraded)
|
||||
|
||||
**State Updates Per Iteration**:
|
||||
- `review-state.json`: Increment `current_iteration`, append to `iterations[]`, update `severity_distribution`, set `next_action`
|
||||
- `review-progress.json`: Update `deep_dive.analyzed` count, `deep_dive.percent_complete`, `phase`
|
||||
|
||||
## Output
|
||||
|
||||
- Files: `iterations/iteration-{N}-finding-{uuid}.json`, `reports/deep-dive-{N}-{uuid}.md`
|
||||
- State: review-state.json `iterations[]` updated
|
||||
- Decision: Re-enter Phase 3 aggregation or proceed to Phase 5
|
||||
|
||||
## Next Phase
|
||||
|
||||
- If still has critical findings AND iterations < max: Loop to [Phase 3: Aggregation](03-aggregation.md)
|
||||
- Else: [Phase 5: Review Completion](05-review-completion.md)
|
||||
@@ -1,176 +0,0 @@
|
||||
# Phase 5: Review Completion
|
||||
|
||||
> Source: Shared from `commands/workflow/review-session-cycle.md` + `commands/workflow/review-module-cycle.md` Phase 5
|
||||
|
||||
## Overview
|
||||
|
||||
Finalize review state, generate completion statistics, and optionally prompt for automated fix pipeline.
|
||||
|
||||
## Execution Steps
|
||||
|
||||
### Step 5.1: Finalize State
|
||||
|
||||
**Phase 5 Orchestrator Responsibilities**:
|
||||
- Finalize review-progress.json with completion statistics
|
||||
- Update review-state.json with completion_time and phase=complete
|
||||
- TaskUpdate completion: Mark all tasks done
|
||||
|
||||
**review-state.json updates**:
|
||||
```json
|
||||
{
|
||||
"phase": "complete",
|
||||
"completion_time": "2025-01-25T15:00:00Z",
|
||||
"next_action": "none"
|
||||
}
|
||||
```
|
||||
|
||||
**review-progress.json updates**:
|
||||
```json
|
||||
{
|
||||
"phase": "complete",
|
||||
"overall_percent": 100,
|
||||
"completion_time": "2025-01-25T15:00:00Z",
|
||||
"final_severity_distribution": {
|
||||
"critical": 0,
|
||||
"high": 3,
|
||||
"medium": 12,
|
||||
"low": 8
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Step 5.2: Evaluate Completion Status
|
||||
|
||||
**Full Success**:
|
||||
- All dimensions reviewed
|
||||
- Critical findings = 0
|
||||
- High findings <= 5
|
||||
- Action: Generate final report, mark phase=complete
|
||||
|
||||
**Partial Success**:
|
||||
- All dimensions reviewed
|
||||
- Max iterations reached
|
||||
- Still have critical/high findings
|
||||
- Action: Generate report with warnings, recommend follow-up
|
||||
|
||||
### Step 5.3: TaskUpdate Completion
|
||||
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{ content: "Phase 1: Discovery & Initialization", status: "completed", activeForm: "Initializing" },
|
||||
{ content: "Phase 2: Parallel Reviews (7 dimensions)", status: "completed", activeForm: "Reviewing" },
|
||||
{ content: " -> Security review", status: "completed", activeForm: "Analyzing security" },
|
||||
// ... other dimensions as sub-items
|
||||
{ content: "Phase 3: Aggregation", status: "completed", activeForm: "Aggregating" },
|
||||
{ content: "Phase 4: Deep-dive", status: "completed", activeForm: "Deep-diving" },
|
||||
{ content: "Phase 5: Completion", status: "completed", activeForm: "Completing" }
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
### Step 5.4: Fix Pipeline Prompt
|
||||
|
||||
- Ask user: "Run automated fixes on findings? [Y/n]"
|
||||
- If confirmed AND --fix flag: Continue to Phase 6
|
||||
- Display summary of findings by severity:
|
||||
|
||||
```
|
||||
Review Complete - Summary:
|
||||
Critical: 0 High: 3 Medium: 12 Low: 8
|
||||
Total findings: 23
|
||||
Dimensions reviewed: 7/7
|
||||
Iterations completed: 2/3
|
||||
|
||||
Run automated fixes on findings? [Y/n]
|
||||
```
|
||||
|
||||
## Completion Conditions
|
||||
|
||||
**Full Success**:
|
||||
- All dimensions reviewed
|
||||
- Critical findings = 0
|
||||
- High findings <= 5
|
||||
- Action: Generate final report, mark phase=complete
|
||||
|
||||
**Partial Success**:
|
||||
- All dimensions reviewed
|
||||
- Max iterations reached
|
||||
- Still have critical/high findings
|
||||
- Action: Generate report with warnings, recommend follow-up
|
||||
|
||||
## Error Handling Reference
|
||||
|
||||
### Phase-Level Error Matrix
|
||||
|
||||
| Phase | Error | Blocking? | Action |
|
||||
|-------|-------|-----------|--------|
|
||||
| Phase 1 | Invalid path pattern / Session not found | Yes | Error and exit |
|
||||
| Phase 1 | No files matched / No completed tasks | Yes | Error and exit |
|
||||
| Phase 1 | Files not readable / No changed files | Yes | Error and exit |
|
||||
| Phase 2 | Single dimension fails | No | Log warning, continue other dimensions |
|
||||
| Phase 2 | All dimensions fail | Yes | Error and exit |
|
||||
| Phase 3 | Missing dimension JSON | No | Skip in aggregation, log warning |
|
||||
| Phase 4 | Deep-dive agent fails | No | Skip finding, continue others |
|
||||
| Phase 4 | Max iterations reached | No | Generate partial report |
|
||||
|
||||
### CLI Fallback Chain
|
||||
|
||||
Gemini -> Qwen -> Codex -> degraded mode
|
||||
|
||||
### Fallback Triggers
|
||||
|
||||
1. HTTP 429, 5xx errors, connection timeout
|
||||
2. Invalid JSON output (parse error, missing required fields)
|
||||
3. Low confidence score < 0.4
|
||||
4. Analysis too brief (< 100 words in report)
|
||||
|
||||
### Fallback Behavior
|
||||
|
||||
- On trigger: Retry with next tool in chain
|
||||
- After Codex fails: Enter degraded mode (skip analysis, log error)
|
||||
- Degraded mode: Continue workflow with available results
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Start Specific**: Begin with focused module patterns for faster results
|
||||
2. **Expand Gradually**: Add more modules based on initial findings
|
||||
3. **Use Glob Wisely**: `src/auth/**` is more efficient than `src/**` with lots of irrelevant files
|
||||
4. **Trust Aggregation Logic**: Auto-selection based on proven heuristics
|
||||
5. **Monitor Logs**: Check reports/ directory for CLI analysis insights
|
||||
|
||||
## Related Commands
|
||||
|
||||
### View Review Progress
|
||||
|
||||
Use `ccw view` to open the review dashboard in browser:
|
||||
|
||||
```bash
|
||||
ccw view
|
||||
```
|
||||
|
||||
### Automated Fix Workflow
|
||||
|
||||
After completing a review, use the generated findings JSON for automated fixing:
|
||||
|
||||
```bash
|
||||
# Step 1: Complete review (this command)
|
||||
/workflow:review-module-cycle src/auth/**
|
||||
# OR
|
||||
/workflow:review-session-cycle
|
||||
|
||||
# Step 2: Run automated fixes using dimension findings
|
||||
/workflow:review-cycle-fix .workflow/active/WFS-{session-id}/.review/
|
||||
```
|
||||
|
||||
See `/workflow:review-cycle-fix` for automated fixing with smart grouping, parallel execution, and test verification.
|
||||
|
||||
## Output
|
||||
|
||||
- State: review-state.json (phase=complete), review-progress.json (final)
|
||||
- Decision: fix pipeline or end
|
||||
|
||||
## Next Phase
|
||||
|
||||
- If fix requested: [Phase 6: Fix Discovery & Batching](06-fix-discovery-batching.md)
|
||||
- Else: Workflow complete
|
||||
@@ -1,238 +0,0 @@
|
||||
# Phase 6: Fix Discovery & Batching
|
||||
|
||||
> Source: `commands/workflow/review-cycle-fix.md` Phase 1 + Phase 1.5
|
||||
|
||||
## Overview
|
||||
|
||||
Validate fix input source, create fix session structure, and perform intelligent grouping of findings into batches for parallel planning.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Fix from exported findings file (session-based path)
|
||||
Skill(skill="review-cycle", args="--fix .workflow/active/WFS-123/.review/fix-export-1706184622000.json")
|
||||
|
||||
# Fix from review directory (auto-discovers latest export)
|
||||
Skill(skill="review-cycle", args="--fix .workflow/active/WFS-123/.review/")
|
||||
|
||||
# Resume interrupted fix session
|
||||
Skill(skill="review-cycle", args="--fix --resume")
|
||||
|
||||
# Custom max retry attempts per finding
|
||||
Skill(skill="review-cycle", args="--fix .workflow/active/WFS-123/.review/ --max-iterations=5")
|
||||
|
||||
# Custom batch size for parallel planning (default: 5 findings per batch)
|
||||
Skill(skill="review-cycle", args="--fix .workflow/active/WFS-123/.review/ --batch-size=3")
|
||||
```
|
||||
|
||||
**Fix Source**: Exported findings from review cycle dashboard
|
||||
**Output Directory**: `{review-dir}/fixes/{fix-session-id}/` (within session .review/)
|
||||
**Default Max Iterations**: 3 (per finding, adjustable)
|
||||
**Default Batch Size**: 5 (findings per planning batch, adjustable)
|
||||
**Max Parallel Agents**: 10 (concurrent planning agents)
|
||||
**CLI Tools**: @cli-planning-agent (planning), @cli-execute-agent (fixing)
|
||||
|
||||
## Core Concept
|
||||
|
||||
Automated fix orchestrator with **parallel planning architecture**: Multiple AI agents analyze findings concurrently in batches, then coordinate parallel/serial execution. Generates fix timeline with intelligent grouping and dependency analysis, executes fixes with conservative test verification.
|
||||
|
||||
**Fix Process**:
|
||||
- **Batching Phase (1.5)**: Orchestrator groups findings by file+dimension similarity, creates batches
|
||||
- **Planning Phase (2)**: Up to 10 agents plan batches in parallel, generate partial plans, orchestrator aggregates
|
||||
- **Execution Phase (3)**: Main orchestrator coordinates agents per aggregated timeline stages
|
||||
- **Parallel Efficiency**: Customizable batch size (default: 5), MAX_PARALLEL=10 agents
|
||||
- **No rigid structure**: Adapts to task requirements, not bound to fixed JSON format
|
||||
|
||||
**vs Manual Fixing**:
|
||||
- **Manual**: Developer reviews findings one-by-one, fixes sequentially
|
||||
- **Automated**: AI groups related issues, multiple agents plan in parallel, executes in optimal parallel/serial order with automatic test verification
|
||||
|
||||
### Value Proposition
|
||||
1. **Parallel Planning**: Multiple agents analyze findings concurrently, reducing planning time for large batches (10+ findings)
|
||||
2. **Intelligent Batching**: Semantic similarity grouping ensures related findings are analyzed together
|
||||
3. **Multi-stage Coordination**: Supports complex parallel + serial execution with cross-batch dependency management
|
||||
4. **Conservative Safety**: Mandatory test verification with automatic rollback on failure
|
||||
5. **Resume Support**: Checkpoint-based recovery for interrupted sessions
|
||||
|
||||
### Orchestrator Boundary (CRITICAL)
|
||||
- **ONLY command** for automated review finding fixes
|
||||
- Manages: Intelligent batching (Phase 1.5), parallel planning coordination (launch N agents), plan aggregation (merge partial plans, resolve cross-batch dependencies), stage-based execution scheduling, agent scheduling, progress tracking
|
||||
- Delegates: Batch planning to @cli-planning-agent, fix execution to @cli-execute-agent
|
||||
|
||||
## Fix Process Overview
|
||||
|
||||
```
|
||||
Phase 1: Discovery & Initialization
|
||||
└─ Validate export file, create fix session structure, initialize state files
|
||||
|
||||
Phase 1.5: Intelligent Grouping & Batching
|
||||
├─ Analyze findings metadata (file, dimension, severity)
|
||||
├─ Group by semantic similarity (file proximity + dimension affinity)
|
||||
├─ Create batches respecting --batch-size (default: 5)
|
||||
└─ Output: Finding batches for parallel planning
|
||||
|
||||
Phase 2: Parallel Planning Coordination (@cli-planning-agent × N)
|
||||
├─ Launch MAX_PARALLEL planning agents concurrently (default: 10)
|
||||
├─ Each agent processes one batch:
|
||||
│ ├─ Analyze findings for patterns and dependencies
|
||||
│ ├─ Group by file + dimension + root cause similarity
|
||||
│ ├─ Determine execution strategy (parallel/serial/hybrid)
|
||||
│ ├─ Generate fix timeline with stages
|
||||
│ └─ Output: partial-plan-{batch-id}.json
|
||||
├─ Collect results from all agents
|
||||
└─ Aggregate: Merge partial plans → fix-plan.json (resolve cross-batch dependencies)
|
||||
|
||||
Phase 3: Execution Orchestration (Stage-based)
|
||||
For each timeline stage:
|
||||
├─ Load groups for this stage
|
||||
├─ If parallel: Launch all group agents simultaneously
|
||||
├─ If serial: Execute groups sequentially
|
||||
├─ Each agent:
|
||||
│ ├─ Analyze code context
|
||||
│ ├─ Apply fix per strategy
|
||||
│ ├─ Run affected tests
|
||||
│ ├─ On test failure: Rollback, retry up to max_iterations
|
||||
│ └─ On success: Commit, update fix-progress-{N}.json
|
||||
└─ Advance to next stage
|
||||
|
||||
Phase 4: Completion & Aggregation
|
||||
└─ Aggregate results → Generate fix-summary.md → Update history → Output summary
|
||||
|
||||
Phase 5: Session Completion (Optional)
|
||||
└─ If all fixes successful → Prompt to complete workflow session
|
||||
```
|
||||
|
||||
## Agent Roles
|
||||
|
||||
| Agent | Responsibility |
|
||||
|-------|---------------|
|
||||
| **Orchestrator** | Input validation, session management, intelligent batching (Phase 1.5), parallel planning coordination (launch N agents), plan aggregation (merge partial plans, resolve cross-batch dependencies), stage-based execution scheduling, progress tracking, result aggregation |
|
||||
| **@cli-planning-agent** | Batch findings analysis, intelligent grouping (file+dimension+root cause), execution strategy determination (parallel/serial/hybrid), timeline generation with dependency mapping, partial plan output |
|
||||
| **@cli-execute-agent** | Fix execution per group, code context analysis, Edit tool operations, test verification, git rollback on failure, completion JSON generation |
|
||||
|
||||
## Parallel Planning Architecture
|
||||
|
||||
**Batch Processing Strategy**:
|
||||
|
||||
| Phase | Agent Count | Input | Output | Purpose |
|
||||
|-------|-------------|-------|--------|---------|
|
||||
| **Batching (1.5)** | Orchestrator | All findings | Finding batches | Semantic grouping by file+dimension, respecting --batch-size |
|
||||
| **Planning (2)** | N agents (≤10) | 1 batch each | partial-plan-{batch-id}.json | Analyze batch in parallel, generate execution groups and timeline |
|
||||
| **Aggregation (2)** | Orchestrator | All partial plans | fix-plan.json | Merge timelines, resolve cross-batch dependencies |
|
||||
| **Execution (3)** | M agents (dynamic) | 1 group each | fix-progress-{N}.json | Execute fixes per aggregated plan with test verification |
|
||||
|
||||
**Benefits**:
|
||||
- **Speed**: N agents plan concurrently, reducing planning time for large batches
|
||||
- **Scalability**: MAX_PARALLEL=10 prevents resource exhaustion
|
||||
- **Flexibility**: Batch size customizable via --batch-size (default: 5)
|
||||
- **Isolation**: Each planning agent focuses on related findings (semantic grouping)
|
||||
- **Reusable**: Aggregated plan can be re-executed without re-planning
|
||||
|
||||
## Intelligent Grouping Strategy
|
||||
|
||||
**Three-Level Grouping**:
|
||||
|
||||
```javascript
|
||||
// Level 1: Primary grouping by file + dimension
|
||||
{file: "auth.ts", dimension: "security"} → Group A
|
||||
{file: "auth.ts", dimension: "quality"} → Group B
|
||||
{file: "query-builder.ts", dimension: "security"} → Group C
|
||||
|
||||
// Level 2: Secondary grouping by root cause similarity
|
||||
Group A findings → Semantic similarity analysis (threshold 0.7)
|
||||
→ Sub-group A1: "missing-input-validation" (findings 1, 2)
|
||||
→ Sub-group A2: "insecure-crypto" (finding 3)
|
||||
|
||||
// Level 3: Dependency analysis
|
||||
Sub-group A1 creates validation utilities
|
||||
Sub-group C4 depends on those utilities
|
||||
→ A1 must execute before C4 (serial stage dependency)
|
||||
```
|
||||
|
||||
**Similarity Computation**:
|
||||
- Combine: `description + recommendation + category`
|
||||
- Vectorize: TF-IDF or LLM embedding
|
||||
- Cluster: Greedy algorithm with cosine similarity > 0.7
|
||||
|
||||
## Phase 1: Discovery & Initialization (Orchestrator)
|
||||
|
||||
**Phase 1 Orchestrator Responsibilities**:
|
||||
- Input validation: Check export file exists and is valid JSON
|
||||
- Auto-discovery: If review-dir provided, find latest `*-fix-export.json`
|
||||
- Session creation: Generate fix-session-id (`fix-{timestamp}`)
|
||||
- Directory structure: Create `{review-dir}/fixes/{fix-session-id}/` with subdirectories
|
||||
- State files: Initialize active-fix-session.json (session marker)
|
||||
- TodoWrite initialization: Set up 5-phase tracking (including Phase 1.5)
|
||||
|
||||
## Phase 1.5: Intelligent Grouping & Batching (Orchestrator)
|
||||
|
||||
- Load all findings metadata (id, file, dimension, severity, title)
|
||||
- Semantic similarity analysis:
|
||||
- Primary: Group by file proximity (same file or related modules)
|
||||
- Secondary: Group by dimension affinity (same review dimension)
|
||||
- Tertiary: Analyze title/description similarity (root cause clustering)
|
||||
- Create batches respecting --batch-size (default: 5 findings per batch)
|
||||
- Balance workload: Distribute high-severity findings across batches
|
||||
- Output: Array of finding batches for parallel planning
|
||||
|
||||
```javascript
|
||||
// Load findings
|
||||
const findings = JSON.parse(Read(exportFile));
|
||||
const batchSize = flags.batchSize || 5;
|
||||
|
||||
// Semantic similarity analysis: group by file+dimension
|
||||
const batches = [];
|
||||
const grouped = new Map(); // key: "${file}:${dimension}"
|
||||
|
||||
for (const finding of findings) {
|
||||
const key = `${finding.file || 'unknown'}:${finding.dimension || 'general'}`;
|
||||
if (!grouped.has(key)) grouped.set(key, []);
|
||||
grouped.get(key).push(finding);
|
||||
}
|
||||
|
||||
// Create batches respecting batchSize
|
||||
for (const [key, group] of grouped) {
|
||||
while (group.length > 0) {
|
||||
const batch = group.splice(0, batchSize);
|
||||
batches.push({
|
||||
batch_id: batches.length + 1,
|
||||
findings: batch,
|
||||
metadata: { primary_file: batch[0].file, primary_dimension: batch[0].dimension }
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`Created ${batches.length} batches (${batchSize} findings per batch)`);
|
||||
```
|
||||
|
||||
## Output File Structure
|
||||
|
||||
```
|
||||
.workflow/active/WFS-{session-id}/.review/
|
||||
├── fix-export-{timestamp}.json # Exported findings (input)
|
||||
└── fixes/{fix-session-id}/
|
||||
├── partial-plan-1.json # Batch 1 partial plan (planning agent 1 output)
|
||||
├── partial-plan-2.json # Batch 2 partial plan (planning agent 2 output)
|
||||
├── partial-plan-N.json # Batch N partial plan (planning agent N output)
|
||||
├── fix-plan.json # Aggregated execution plan (orchestrator merges partials)
|
||||
├── fix-progress-1.json # Group 1 progress (planning agent init → agent updates)
|
||||
├── fix-progress-2.json # Group 2 progress (planning agent init → agent updates)
|
||||
├── fix-progress-3.json # Group 3 progress (planning agent init → agent updates)
|
||||
├── fix-summary.md # Final report (orchestrator generates)
|
||||
├── active-fix-session.json # Active session marker
|
||||
└── fix-history.json # All sessions history
|
||||
```
|
||||
|
||||
**File Producers**:
|
||||
- **Orchestrator**: Batches findings (Phase 1.5), aggregates partial plans → `fix-plan.json` (Phase 2), launches parallel planning agents
|
||||
- **Planning Agents (N)**: Each outputs `partial-plan-{batch-id}.json` + initializes `fix-progress-*.json` for assigned groups
|
||||
- **Execution Agents (M)**: Update assigned `fix-progress-{N}.json` in real-time
|
||||
|
||||
## Output
|
||||
|
||||
- Variables: batches (array), fixSessionId, sessionDir
|
||||
- Files: active-fix-session.json, directory structure created
|
||||
|
||||
## Next Phase
|
||||
|
||||
Return to orchestrator, then auto-continue to [Phase 7: Fix Parallel Planning](07-fix-parallel-planning.md).
|
||||
@@ -1,199 +0,0 @@
|
||||
# Phase 7: Fix Parallel Planning
|
||||
|
||||
> Source: `commands/workflow/review-cycle-fix.md` Phase 2
|
||||
|
||||
## Overview
|
||||
Launch N planning agents (up to MAX_PARALLEL=10) to analyze finding batches concurrently. Each agent outputs a partial plan. Orchestrator aggregates partial plans into unified fix-plan.json.
|
||||
|
||||
## Execution Strategy Determination
|
||||
|
||||
**Strategy Types**:
|
||||
|
||||
| Strategy | When to Use | Stage Structure |
|
||||
|----------|-------------|-----------------|
|
||||
| **Parallel** | All groups independent, different files | Single stage, all groups in parallel |
|
||||
| **Serial** | Strong dependencies, shared resources | Multiple stages, one group per stage |
|
||||
| **Hybrid** | Mixed dependencies | Multiple stages, parallel within stages |
|
||||
|
||||
**Dependency Detection**:
|
||||
- Shared file modifications
|
||||
- Utility creation + usage patterns
|
||||
- Test dependency chains
|
||||
- Risk level clustering (high-risk groups isolated)
|
||||
|
||||
## Phase 2: Parallel Planning Coordination (Orchestrator)
|
||||
|
||||
```javascript
|
||||
const MAX_PARALLEL = 10;
|
||||
const partialPlans = [];
|
||||
|
||||
// Process batches in chunks of MAX_PARALLEL
|
||||
for (let i = 0; i < batches.length; i += MAX_PARALLEL) {
|
||||
const chunk = batches.slice(i, i + MAX_PARALLEL);
|
||||
const taskIds = [];
|
||||
|
||||
// Launch agents in parallel (run_in_background=true)
|
||||
for (const batch of chunk) {
|
||||
const taskId = Task({
|
||||
subagent_type: "cli-planning-agent",
|
||||
run_in_background: true,
|
||||
description: `Plan batch ${batch.batch_id}: ${batch.findings.length} findings`,
|
||||
prompt: planningPrompt(batch) // See Planning Agent template below
|
||||
});
|
||||
taskIds.push({ taskId, batch });
|
||||
}
|
||||
|
||||
console.log(`Launched ${taskIds.length} planning agents...`);
|
||||
|
||||
// Collect results from this chunk (blocking)
|
||||
for (const { taskId, batch } of taskIds) {
|
||||
const result = TaskOutput({ task_id: taskId, block: true });
|
||||
const partialPlan = JSON.parse(Read(`${sessionDir}/partial-plan-${batch.batch_id}.json`));
|
||||
partialPlans.push(partialPlan);
|
||||
updateTodo(`Batch ${batch.batch_id}`, 'completed');
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregate partial plans → fix-plan.json
|
||||
let groupCounter = 1;
|
||||
const groupIdMap = new Map();
|
||||
|
||||
for (const partial of partialPlans) {
|
||||
for (const group of partial.groups) {
|
||||
const newGroupId = `G${groupCounter}`;
|
||||
groupIdMap.set(`${partial.batch_id}:${group.group_id}`, newGroupId);
|
||||
aggregatedPlan.groups.push({ ...group, group_id: newGroupId, progress_file: `fix-progress-${groupCounter}.json` });
|
||||
groupCounter++;
|
||||
}
|
||||
}
|
||||
|
||||
// Merge timelines, resolve cross-batch conflicts (shared files → serialize)
|
||||
let stageCounter = 1;
|
||||
for (const partial of partialPlans) {
|
||||
for (const stage of partial.timeline) {
|
||||
aggregatedPlan.timeline.push({
|
||||
...stage, stage_id: stageCounter,
|
||||
groups: stage.groups.map(gid => groupIdMap.get(`${partial.batch_id}:${gid}`))
|
||||
});
|
||||
stageCounter++;
|
||||
}
|
||||
}
|
||||
|
||||
// Write aggregated plan + initialize progress files
|
||||
Write(`${sessionDir}/fix-plan.json`, JSON.stringify(aggregatedPlan, null, 2));
|
||||
for (let i = 1; i <= aggregatedPlan.groups.length; i++) {
|
||||
Write(`${sessionDir}/fix-progress-${i}.json`, JSON.stringify(initProgressFile(aggregatedPlan.groups[i-1]), null, 2));
|
||||
}
|
||||
```
|
||||
|
||||
## Planning Agent Template (Batch Mode)
|
||||
|
||||
```javascript
|
||||
Task({
|
||||
subagent_type: "cli-planning-agent",
|
||||
run_in_background: true,
|
||||
description: `Plan batch ${batch.batch_id}: ${batch.findings.length} findings`,
|
||||
prompt: `
|
||||
## Task Objective
|
||||
Analyze code review findings in batch ${batch.batch_id} and generate **partial** execution plan.
|
||||
|
||||
## Input Data
|
||||
Review Session: ${reviewId}
|
||||
Fix Session ID: ${fixSessionId}
|
||||
Batch ID: ${batch.batch_id}
|
||||
Batch Findings: ${batch.findings.length}
|
||||
|
||||
Findings:
|
||||
${JSON.stringify(batch.findings, null, 2)}
|
||||
|
||||
Project Context:
|
||||
- Structure: ${projectStructure}
|
||||
- Test Framework: ${testFramework}
|
||||
- Git Status: ${gitStatus}
|
||||
|
||||
## Output Requirements
|
||||
|
||||
### 1. partial-plan-${batch.batch_id}.json
|
||||
Generate partial execution plan with structure:
|
||||
{
|
||||
"batch_id": ${batch.batch_id},
|
||||
"groups": [...], // Groups created from batch findings (use local IDs: G1, G2, ...)
|
||||
"timeline": [...], // Local timeline for this batch only
|
||||
"metadata": {
|
||||
"findings_count": ${batch.findings.length},
|
||||
"groups_count": N,
|
||||
"created_at": "ISO-8601-timestamp"
|
||||
}
|
||||
}
|
||||
|
||||
**Key Generation Rules**:
|
||||
- **Groups**: Create groups with local IDs (G1, G2, ...) using intelligent grouping (file+dimension+root cause)
|
||||
- **Timeline**: Define stages for this batch only (local dependencies within batch)
|
||||
- **Progress Files**: DO NOT generate fix-progress-*.json here (orchestrator handles after aggregation)
|
||||
|
||||
## Analysis Requirements
|
||||
|
||||
### Intelligent Grouping Strategy
|
||||
Group findings using these criteria (in priority order):
|
||||
|
||||
1. **File Proximity**: Findings in same file or related files
|
||||
2. **Dimension Affinity**: Same dimension (security, performance, etc.)
|
||||
3. **Root Cause Similarity**: Similar underlying issues
|
||||
4. **Fix Approach Commonality**: Can be fixed with similar approach
|
||||
|
||||
**Grouping Guidelines**:
|
||||
- Optimal group size: 2-5 findings per group
|
||||
- Avoid cross-cutting concerns in same group
|
||||
- Consider test isolation (different test suites → different groups)
|
||||
- Balance workload across groups for parallel execution
|
||||
|
||||
### Execution Strategy Determination (Local Only)
|
||||
|
||||
**Parallel Mode**: Use when groups are independent, no shared files
|
||||
**Serial Mode**: Use when groups have dependencies or shared resources
|
||||
**Hybrid Mode**: Use for mixed dependency graphs (recommended for most cases)
|
||||
|
||||
**Dependency Analysis**:
|
||||
- Identify shared files between groups
|
||||
- Detect test dependency chains
|
||||
- Evaluate risk of concurrent modifications
|
||||
|
||||
### Risk Assessment
|
||||
|
||||
For each group, evaluate:
|
||||
- **Complexity**: Based on code structure, file size, existing tests
|
||||
- **Impact Scope**: Number of files affected, API surface changes
|
||||
- **Rollback Feasibility**: Ease of reverting changes if tests fail
|
||||
|
||||
### Test Strategy
|
||||
|
||||
For each group, determine:
|
||||
- **Test Pattern**: Glob pattern matching affected tests
|
||||
- **Pass Criteria**: All tests must pass (100% pass rate)
|
||||
- **Test Command**: Infer from project (package.json, pytest.ini, etc.)
|
||||
|
||||
## Output Files
|
||||
|
||||
Write to ${sessionDir}:
|
||||
- ./partial-plan-${batch.batch_id}.json
|
||||
|
||||
## Quality Checklist
|
||||
|
||||
Before finalizing outputs:
|
||||
- All batch findings assigned to exactly one group
|
||||
- Group dependencies (within batch) correctly identified
|
||||
- Timeline stages respect local dependencies
|
||||
- Test patterns are valid and specific
|
||||
- Risk assessments are realistic
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
- Files: `partial-plan-{batch-id}.json` (per agent), `fix-plan.json` (aggregated), `fix-progress-*.json` (initialized)
|
||||
- TaskUpdate: Mark Phase 7 completed, Phase 8 in_progress
|
||||
|
||||
## Next Phase
|
||||
|
||||
Return to orchestrator, then auto-continue to [Phase 8: Fix Execution](08-fix-execution.md).
|
||||
@@ -1,221 +0,0 @@
|
||||
# Phase 8: Fix Execution
|
||||
|
||||
> Source: `commands/workflow/review-cycle-fix.md` Phase 3
|
||||
|
||||
## Overview
|
||||
Stage-based execution using aggregated fix-plan.json timeline. Each group gets a cli-execute-agent that applies fixes, runs tests, and commits on success or rolls back on failure.
|
||||
|
||||
## Conservative Test Verification
|
||||
|
||||
**Test Strategy** (per fix):
|
||||
|
||||
```javascript
|
||||
// 1. Identify affected tests
|
||||
const testPattern = identifyTestPattern(finding.file);
|
||||
// e.g., "tests/auth/**/*.test.*" for src/auth/service.ts
|
||||
|
||||
// 2. Run tests
|
||||
const result = await runTests(testPattern);
|
||||
|
||||
// 3. Evaluate
|
||||
if (result.passRate < 100%) {
|
||||
// Rollback
|
||||
await gitCheckout(finding.file);
|
||||
|
||||
// Retry with failure context
|
||||
if (attempts < maxIterations) {
|
||||
const fixContext = analyzeFailure(result.stderr);
|
||||
regenerateFix(finding, fixContext);
|
||||
retry();
|
||||
} else {
|
||||
markFailed(finding.id);
|
||||
}
|
||||
} else {
|
||||
// Commit
|
||||
await gitCommit(`Fix: ${finding.title} [${finding.id}]`);
|
||||
markFixed(finding.id);
|
||||
}
|
||||
```
|
||||
|
||||
**Pass Criteria**: 100% test pass rate (no partial fixes)
|
||||
|
||||
## Phase 3: Execution Orchestration (Orchestrator)
|
||||
|
||||
- Load fix-plan.json timeline stages
|
||||
- For each stage:
|
||||
- If parallel mode: Launch all group agents via `Promise.all()`
|
||||
- If serial mode: Execute groups sequentially with `await`
|
||||
- Assign agent IDs (agents update their fix-progress-{N}.json)
|
||||
- Handle agent failures gracefully (mark group as failed, continue)
|
||||
- Advance to next stage only when current stage complete
|
||||
|
||||
## Execution Agent Template (Per Group)
|
||||
|
||||
```javascript
|
||||
Task({
|
||||
subagent_type: "cli-execute-agent",
|
||||
description: `Fix ${group.findings.length} issues: ${group.group_name}`,
|
||||
prompt: `
|
||||
## Task Objective
|
||||
Execute fixes for code review findings in group ${group.group_id}. Update progress file in real-time with flow control tracking.
|
||||
|
||||
## Assignment
|
||||
- Group ID: ${group.group_id}
|
||||
- Group Name: ${group.group_name}
|
||||
- Progress File: ${sessionDir}/${group.progress_file}
|
||||
- Findings Count: ${group.findings.length}
|
||||
- Max Iterations: ${maxIterations} (per finding)
|
||||
|
||||
## Fix Strategy
|
||||
${JSON.stringify(group.fix_strategy, null, 2)}
|
||||
|
||||
## Risk Assessment
|
||||
${JSON.stringify(group.risk_assessment, null, 2)}
|
||||
|
||||
## Execution Flow
|
||||
|
||||
### Initialization (Before Starting)
|
||||
|
||||
1. Read ${group.progress_file} to load initial state
|
||||
2. Update progress file:
|
||||
- assigned_agent: "${agentId}"
|
||||
- status: "in-progress"
|
||||
- started_at: Current ISO 8601 timestamp
|
||||
- last_update: Current ISO 8601 timestamp
|
||||
3. Write updated state back to ${group.progress_file}
|
||||
|
||||
### Main Execution Loop
|
||||
|
||||
For EACH finding in ${group.progress_file}.findings:
|
||||
|
||||
#### Step 1: Analyze Context
|
||||
|
||||
**Before Step**:
|
||||
- Update finding: status→"in-progress", started_at→now()
|
||||
- Update current_finding: Populate with finding details, status→"analyzing", action→"Reading file and understanding code structure"
|
||||
- Update phase→"analyzing"
|
||||
- Update flow_control: Add "analyze_context" step to implementation_approach (status→"in-progress"), set current_step→"analyze_context"
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
**Action**:
|
||||
- Read file: finding.file
|
||||
- Understand code structure around line: finding.line
|
||||
- Analyze surrounding context (imports, dependencies, related functions)
|
||||
- Review recommendations: finding.recommendations
|
||||
|
||||
**After Step**:
|
||||
- Update flow_control: Mark "analyze_context" step as "completed" with completed_at→now()
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
#### Step 2: Apply Fix
|
||||
|
||||
**Before Step**:
|
||||
- Update current_finding: status→"fixing", action→"Applying code changes per recommendations"
|
||||
- Update phase→"fixing"
|
||||
- Update flow_control: Add "apply_fix" step to implementation_approach (status→"in-progress"), set current_step→"apply_fix"
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
**Action**:
|
||||
- Use Edit tool to implement code changes per finding.recommendations
|
||||
- Follow fix_strategy.approach
|
||||
- Maintain code style and existing patterns
|
||||
|
||||
**After Step**:
|
||||
- Update flow_control: Mark "apply_fix" step as "completed" with completed_at→now()
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
#### Step 3: Test Verification
|
||||
|
||||
**Before Step**:
|
||||
- Update current_finding: status→"testing", action→"Running test suite to verify fix"
|
||||
- Update phase→"testing"
|
||||
- Update flow_control: Add "run_tests" step to implementation_approach (status→"in-progress"), set current_step→"run_tests"
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
**Action**:
|
||||
- Run tests using fix_strategy.test_pattern
|
||||
- Require 100% pass rate
|
||||
- Capture test output
|
||||
|
||||
**On Test Failure**:
|
||||
- Git rollback: \`git checkout -- \${finding.file}\`
|
||||
- Increment finding.attempts
|
||||
- Update flow_control: Mark "run_tests" step as "failed" with completed_at→now()
|
||||
- Update errors: Add entry (finding_id, error_type→"test_failure", message, timestamp)
|
||||
- If finding.attempts < ${maxIterations}:
|
||||
- Reset flow_control: implementation_approach→[], current_step→null
|
||||
- Retry from Step 1
|
||||
- Else:
|
||||
- Update finding: status→"completed", result→"failed", error_message→"Max iterations reached", completed_at→now()
|
||||
- Update summary counts, move to next finding
|
||||
|
||||
**On Test Success**:
|
||||
- Update flow_control: Mark "run_tests" step as "completed" with completed_at→now()
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
- Proceed to Step 4
|
||||
|
||||
#### Step 4: Commit Changes
|
||||
|
||||
**Before Step**:
|
||||
- Update current_finding: status→"committing", action→"Creating git commit for successful fix"
|
||||
- Update phase→"committing"
|
||||
- Update flow_control: Add "commit_changes" step to implementation_approach (status→"in-progress"), set current_step→"commit_changes"
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
**Action**:
|
||||
- Git commit: \`git commit -m "fix(${finding.dimension}): ${finding.title} [${finding.id}]"\`
|
||||
- Capture commit hash
|
||||
|
||||
**After Step**:
|
||||
- Update finding: status→"completed", result→"fixed", commit_hash→<captured>, test_passed→true, completed_at→now()
|
||||
- Update flow_control: Mark "commit_changes" step as "completed" with completed_at→now()
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
#### After Each Finding
|
||||
|
||||
- Update summary: Recalculate counts (pending/in_progress/fixed/failed) and percent_complete
|
||||
- If all findings completed: Clear current_finding, reset flow_control
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
### Final Completion
|
||||
|
||||
When all findings processed:
|
||||
- Update status→"completed", phase→"done", summary.percent_complete→100.0
|
||||
- Update last_update→now(), write final state to ${group.progress_file}
|
||||
|
||||
## Critical Requirements
|
||||
|
||||
### Progress File Updates
|
||||
- **MUST update after every significant action** (before/after each step)
|
||||
- **Always maintain complete structure** - never write partial updates
|
||||
- **Use ISO 8601 timestamps** - e.g., "2025-01-25T14:36:00Z"
|
||||
|
||||
### Flow Control Format
|
||||
Follow action-planning-agent flow_control.implementation_approach format:
|
||||
- step: Identifier (e.g., "analyze_context", "apply_fix")
|
||||
- action: Human-readable description
|
||||
- status: "pending" | "in-progress" | "completed" | "failed"
|
||||
- started_at: ISO 8601 timestamp or null
|
||||
- completed_at: ISO 8601 timestamp or null
|
||||
|
||||
### Error Handling
|
||||
- Capture all errors in errors[] array
|
||||
- Never leave progress file in invalid state
|
||||
- Always write complete updates, never partial
|
||||
- On unrecoverable error: Mark group as failed, preserve state
|
||||
|
||||
## Test Patterns
|
||||
Use fix_strategy.test_pattern to run affected tests:
|
||||
- Pattern: ${group.fix_strategy.test_pattern}
|
||||
- Command: Infer from project (npm test, pytest, etc.)
|
||||
- Pass Criteria: 100% pass rate required
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
## Output
|
||||
- Files: fix-progress-{N}.json (updated per group), git commits
|
||||
- TaskUpdate: Mark Phase 8 completed, Phase 9 in_progress
|
||||
|
||||
## Next Phase
|
||||
Return to orchestrator, then auto-continue to [Phase 9: Fix Completion](09-fix-completion.md).
|
||||
@@ -1,153 +0,0 @@
|
||||
# Phase 9: Fix Completion
|
||||
|
||||
> Source: `commands/workflow/review-cycle-fix.md` Phase 4 + Phase 5
|
||||
|
||||
## Overview
|
||||
Aggregate fix results, generate summary report, update history, and optionally complete workflow session.
|
||||
|
||||
## Phase 4: Completion & Aggregation (Orchestrator)
|
||||
|
||||
- Collect final status from all fix-progress-{N}.json files
|
||||
- Generate fix-summary.md with timeline and results
|
||||
- Update fix-history.json with new session entry
|
||||
- Remove active-fix-session.json
|
||||
- TodoWrite completion: Mark all phases done
|
||||
- Output summary to user
|
||||
|
||||
## Phase 5: Session Completion (Orchestrator)
|
||||
|
||||
- If all findings fixed successfully (no failures):
|
||||
- Prompt user: "All fixes complete. Complete workflow session? [Y/n]"
|
||||
- If confirmed: Execute `Skill(skill="workflow:session:complete")` to archive session with lessons learned
|
||||
- If partial success (some failures):
|
||||
- Output: "Some findings failed. Review fix-summary.md before completing session."
|
||||
- Do NOT auto-complete session
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Batching Failures (Phase 1.5)
|
||||
|
||||
- Invalid findings data -> Abort with error message
|
||||
- Empty batches after grouping -> Warn and skip empty batches
|
||||
|
||||
### Planning Failures (Phase 2)
|
||||
|
||||
- Planning agent timeout -> Mark batch as failed, continue with other batches
|
||||
- Partial plan missing -> Skip batch, warn user
|
||||
- Agent crash -> Collect available partial plans, proceed with aggregation
|
||||
- All agents fail -> Abort entire fix session with error
|
||||
- Aggregation conflicts -> Apply conflict resolution (serialize conflicting groups)
|
||||
|
||||
### Execution Failures (Phase 3)
|
||||
|
||||
- Agent crash -> Mark group as failed, continue with other groups
|
||||
- Test command not found -> Skip test verification, warn user
|
||||
- Git operations fail -> Abort with error, preserve state
|
||||
|
||||
### Rollback Scenarios
|
||||
|
||||
- Test failure after fix -> Automatic `git checkout` rollback
|
||||
- Max iterations reached -> Leave file unchanged, mark as failed
|
||||
- Unrecoverable error -> Rollback entire group, save checkpoint
|
||||
|
||||
## TodoWrite Structures
|
||||
|
||||
### Initialization (after Phase 1.5 batching)
|
||||
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{content: "Phase 1: Discovery & Initialization", status: "completed", activeForm: "Discovering"},
|
||||
{content: "Phase 1.5: Intelligent Batching", status: "completed", activeForm: "Batching"},
|
||||
{content: "Phase 2: Parallel Planning", status: "in_progress", activeForm: "Planning"},
|
||||
{content: " → Batch 1: 4 findings (auth.ts:security)", status: "pending", activeForm: "Planning batch 1"},
|
||||
{content: " → Batch 2: 3 findings (query.ts:security)", status: "pending", activeForm: "Planning batch 2"},
|
||||
{content: " → Batch 3: 2 findings (config.ts:quality)", status: "pending", activeForm: "Planning batch 3"},
|
||||
{content: "Phase 3: Execution", status: "pending", activeForm: "Executing"},
|
||||
{content: "Phase 4: Completion", status: "pending", activeForm: "Completing"}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
### During Planning (parallel agents running)
|
||||
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{content: "Phase 1: Discovery & Initialization", status: "completed", activeForm: "Discovering"},
|
||||
{content: "Phase 1.5: Intelligent Batching", status: "completed", activeForm: "Batching"},
|
||||
{content: "Phase 2: Parallel Planning", status: "in_progress", activeForm: "Planning"},
|
||||
{content: " → Batch 1: 4 findings (auth.ts:security)", status: "completed", activeForm: "Planning batch 1"},
|
||||
{content: " → Batch 2: 3 findings (query.ts:security)", status: "in_progress", activeForm: "Planning batch 2"},
|
||||
{content: " → Batch 3: 2 findings (config.ts:quality)", status: "in_progress", activeForm: "Planning batch 3"},
|
||||
{content: "Phase 3: Execution", status: "pending", activeForm: "Executing"},
|
||||
{content: "Phase 4: Completion", status: "pending", activeForm: "Completing"}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
### During Execution
|
||||
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{content: "Phase 1: Discovery & Initialization", status: "completed", activeForm: "Discovering"},
|
||||
{content: "Phase 1.5: Intelligent Batching", status: "completed", activeForm: "Batching"},
|
||||
{content: "Phase 2: Parallel Planning (3 batches → 5 groups)", status: "completed", activeForm: "Planning"},
|
||||
{content: "Phase 3: Execution", status: "in_progress", activeForm: "Executing"},
|
||||
{content: " → Stage 1: Parallel execution (3 groups)", status: "completed", activeForm: "Executing stage 1"},
|
||||
{content: " • Group G1: Auth validation (2 findings)", status: "completed", activeForm: "Fixing G1"},
|
||||
{content: " • Group G2: Query security (3 findings)", status: "completed", activeForm: "Fixing G2"},
|
||||
{content: " • Group G3: Config quality (1 finding)", status: "completed", activeForm: "Fixing G3"},
|
||||
{content: " → Stage 2: Serial execution (1 group)", status: "in_progress", activeForm: "Executing stage 2"},
|
||||
{content: " • Group G4: Dependent fixes (2 findings)", status: "in_progress", activeForm: "Fixing G4"},
|
||||
{content: "Phase 4: Completion", status: "pending", activeForm: "Completing"}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
### Update Rules
|
||||
|
||||
- Add batch items dynamically during Phase 1.5
|
||||
- Mark batch items completed as parallel agents return results
|
||||
- Add stage/group items dynamically after Phase 2 plan aggregation
|
||||
- Mark completed immediately after each group finishes
|
||||
- Update parent phase status when all child items complete
|
||||
|
||||
## Post-Completion Expansion
|
||||
|
||||
After completion, ask user whether to expand into issues (test/enhance/refactor/doc). For selected items, invoke `Skill(skill="issue:new", args="{summary} - {dimension}")`.
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Leverage Parallel Planning**: For 10+ findings, parallel batching significantly reduces planning time
|
||||
2. **Tune Batch Size**: Use `--batch-size` to control granularity (smaller batches = more parallelism, larger = better grouping context)
|
||||
3. **Conservative Approach**: Test verification is mandatory - no fixes kept without passing tests
|
||||
4. **Parallel Efficiency**: MAX_PARALLEL=10 for planning agents, 3 concurrent execution agents per stage
|
||||
5. **Resume Support**: Fix sessions can resume from checkpoints after interruption
|
||||
6. **Manual Review**: Always review failed fixes manually - may require architectural changes
|
||||
7. **Incremental Fixing**: Start with small batches (5-10 findings) before large-scale fixes
|
||||
|
||||
## Related Commands
|
||||
|
||||
### View Fix Progress
|
||||
Use `ccw view` to open the workflow dashboard in browser:
|
||||
|
||||
```bash
|
||||
ccw view
|
||||
```
|
||||
|
||||
### Re-run Fix Pipeline
|
||||
```
|
||||
Skill(skill="review-cycle", args="--fix ...")
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
- Files: fix-summary.md, fix-history.json
|
||||
- State: active-fix-session.json removed
|
||||
- Optional: workflow session completed via `Skill(skill="workflow:session:complete")`
|
||||
|
||||
## Completion
|
||||
|
||||
Review Cycle fix pipeline complete. Review fix-summary.md for results.
|
||||
758
.claude/skills/review-cycle/phases/review-fix.md
Normal file
758
.claude/skills/review-cycle/phases/review-fix.md
Normal file
@@ -0,0 +1,758 @@
|
||||
# Workflow Review-Cycle-Fix Command
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Fix from exported findings file (session-based path)
|
||||
/workflow:review-cycle-fix .workflow/active/WFS-123/.review/fix-export-1706184622000.json
|
||||
|
||||
# Fix from review directory (auto-discovers latest export)
|
||||
/workflow:review-cycle-fix .workflow/active/WFS-123/.review/
|
||||
|
||||
# Resume interrupted fix session
|
||||
/workflow:review-cycle-fix --resume
|
||||
|
||||
# Custom max retry attempts per finding
|
||||
/workflow:review-cycle-fix .workflow/active/WFS-123/.review/ --max-iterations=5
|
||||
|
||||
# Custom batch size for parallel planning (default: 5 findings per batch)
|
||||
/workflow:review-cycle-fix .workflow/active/WFS-123/.review/ --batch-size=3
|
||||
```
|
||||
|
||||
**Fix Source**: Exported findings from review cycle dashboard
|
||||
**Output Directory**: `{review-dir}/fixes/{fix-session-id}/` (within session .review/)
|
||||
**Default Max Iterations**: 3 (per finding, adjustable)
|
||||
**Default Batch Size**: 5 (findings per planning batch, adjustable)
|
||||
**Max Parallel Agents**: 10 (concurrent planning agents)
|
||||
**CLI Tools**: @cli-planning-agent (planning), @cli-execute-agent (fixing)
|
||||
|
||||
## What & Why
|
||||
|
||||
### Core Concept
|
||||
Automated fix orchestrator with **parallel planning architecture**: Multiple AI agents analyze findings concurrently in batches, then coordinate parallel/serial execution. Generates fix timeline with intelligent grouping and dependency analysis, executes fixes with conservative test verification.
|
||||
|
||||
**Fix Process**:
|
||||
- **Batching Phase (1.5)**: Orchestrator groups findings by file+dimension similarity, creates batches
|
||||
- **Planning Phase (2)**: Up to 10 agents plan batches in parallel, generate partial plans, orchestrator aggregates
|
||||
- **Execution Phase (3)**: Main orchestrator coordinates agents per aggregated timeline stages
|
||||
- **Parallel Efficiency**: Customizable batch size (default: 5), MAX_PARALLEL=10 agents
|
||||
- **No rigid structure**: Adapts to task requirements, not bound to fixed JSON format
|
||||
|
||||
**vs Manual Fixing**:
|
||||
- **Manual**: Developer reviews findings one-by-one, fixes sequentially
|
||||
- **Automated**: AI groups related issues, multiple agents plan in parallel, executes in optimal parallel/serial order with automatic test verification
|
||||
|
||||
### Value Proposition
|
||||
1. **Parallel Planning**: Multiple agents analyze findings concurrently, reducing planning time for large batches (10+ findings)
|
||||
2. **Intelligent Batching**: Semantic similarity grouping ensures related findings are analyzed together
|
||||
3. **Multi-stage Coordination**: Supports complex parallel + serial execution with cross-batch dependency management
|
||||
4. **Conservative Safety**: Mandatory test verification with automatic rollback on failure
|
||||
5. **Resume Support**: Checkpoint-based recovery for interrupted sessions
|
||||
|
||||
### Orchestrator Boundary (CRITICAL)
|
||||
- **ONLY command** for automated review finding fixes
|
||||
- Manages: Intelligent batching (Phase 1.5), parallel planning coordination (launch N agents), plan aggregation, stage-based execution, agent scheduling, progress tracking
|
||||
- Delegates: Batch planning to @cli-planning-agent, fix execution to @cli-execute-agent
|
||||
|
||||
|
||||
### Execution Flow
|
||||
|
||||
```
|
||||
Phase 1: Discovery & Initialization
|
||||
└─ Validate export file, create fix session structure, initialize state files
|
||||
|
||||
Phase 1.5: Intelligent Grouping & Batching
|
||||
├─ Analyze findings metadata (file, dimension, severity)
|
||||
├─ Group by semantic similarity (file proximity + dimension affinity)
|
||||
├─ Create batches respecting --batch-size (default: 5)
|
||||
└─ Output: Finding batches for parallel planning
|
||||
|
||||
Phase 2: Parallel Planning Coordination (@cli-planning-agent × N)
|
||||
├─ Launch MAX_PARALLEL planning agents concurrently (default: 10)
|
||||
├─ Each agent processes one batch:
|
||||
│ ├─ Analyze findings for patterns and dependencies
|
||||
│ ├─ Group by file + dimension + root cause similarity
|
||||
│ ├─ Determine execution strategy (parallel/serial/hybrid)
|
||||
│ ├─ Generate fix timeline with stages
|
||||
│ └─ Output: partial-plan-{batch-id}.json
|
||||
├─ Collect results from all agents
|
||||
└─ Aggregate: Merge partial plans → fix-plan.json (resolve cross-batch dependencies)
|
||||
|
||||
Phase 3: Execution Orchestration (Stage-based)
|
||||
For each timeline stage:
|
||||
├─ Load groups for this stage
|
||||
├─ If parallel: Launch all group agents simultaneously
|
||||
├─ If serial: Execute groups sequentially
|
||||
├─ Each agent:
|
||||
│ ├─ Analyze code context
|
||||
│ ├─ Apply fix per strategy
|
||||
│ ├─ Run affected tests
|
||||
│ ├─ On test failure: Rollback, retry up to max_iterations
|
||||
│ └─ On success: Commit, update fix-progress-{N}.json
|
||||
└─ Advance to next stage
|
||||
|
||||
Phase 4: Completion & Aggregation
|
||||
└─ Aggregate results → Generate fix-summary.md → Update history → Output summary
|
||||
|
||||
Phase 5: Session Completion (Optional)
|
||||
└─ If all fixes successful → Prompt to complete workflow session
|
||||
```
|
||||
|
||||
### Agent Roles
|
||||
|
||||
| Agent | Responsibility |
|
||||
|-------|---------------|
|
||||
| **Orchestrator** | Input validation, session management, intelligent batching (Phase 1.5), parallel planning coordination (launch N agents), plan aggregation (merge partial plans, resolve cross-batch dependencies), stage-based execution scheduling, progress tracking, result aggregation |
|
||||
| **@cli-planning-agent** | Batch findings analysis, intelligent grouping (file+dimension+root cause), execution strategy determination (parallel/serial/hybrid), timeline generation with dependency mapping, partial plan output |
|
||||
| **@cli-execute-agent** | Fix execution per group, code context analysis, Edit tool operations, test verification, git rollback on failure, completion JSON generation |
|
||||
|
||||
## Enhanced Features
|
||||
|
||||
### 1. Parallel Planning Architecture
|
||||
|
||||
**Batch Processing Strategy**:
|
||||
|
||||
| Phase | Agent Count | Input | Output | Purpose |
|
||||
|-------|-------------|-------|--------|---------|
|
||||
| **Batching (1.5)** | Orchestrator | All findings | Finding batches | Semantic grouping by file+dimension, respecting --batch-size |
|
||||
| **Planning (2)** | N agents (≤10) | 1 batch each | partial-plan-{batch-id}.json | Analyze batch in parallel, generate execution groups and timeline |
|
||||
| **Aggregation (2)** | Orchestrator | All partial plans | fix-plan.json | Merge timelines, resolve cross-batch dependencies |
|
||||
| **Execution (3)** | M agents (dynamic) | 1 group each | fix-progress-{N}.json | Execute fixes per aggregated plan with test verification |
|
||||
|
||||
**Benefits**:
|
||||
- **Speed**: N agents plan concurrently, reducing planning time for large batches
|
||||
- **Scalability**: MAX_PARALLEL=10 prevents resource exhaustion
|
||||
- **Flexibility**: Batch size customizable via --batch-size (default: 5)
|
||||
- **Isolation**: Each planning agent focuses on related findings (semantic grouping)
|
||||
- **Reusable**: Aggregated plan can be re-executed without re-planning
|
||||
|
||||
### 2. Intelligent Grouping Strategy
|
||||
|
||||
**Three-Level Grouping**:
|
||||
|
||||
```javascript
|
||||
// Level 1: Primary grouping by file + dimension
|
||||
{file: "auth.ts", dimension: "security"} → Group A
|
||||
{file: "auth.ts", dimension: "quality"} → Group B
|
||||
{file: "query-builder.ts", dimension: "security"} → Group C
|
||||
|
||||
// Level 2: Secondary grouping by root cause similarity
|
||||
Group A findings → Semantic similarity analysis (threshold 0.7)
|
||||
→ Sub-group A1: "missing-input-validation" (findings 1, 2)
|
||||
→ Sub-group A2: "insecure-crypto" (finding 3)
|
||||
|
||||
// Level 3: Dependency analysis
|
||||
Sub-group A1 creates validation utilities
|
||||
Sub-group C4 depends on those utilities
|
||||
→ A1 must execute before C4 (serial stage dependency)
|
||||
```
|
||||
|
||||
**Similarity Computation**:
|
||||
- Combine: `description + recommendation + category`
|
||||
- Vectorize: TF-IDF or LLM embedding
|
||||
- Cluster: Greedy algorithm with cosine similarity > 0.7
|
||||
|
||||
### 3. Execution Strategy Determination
|
||||
|
||||
**Strategy Types**:
|
||||
|
||||
| Strategy | When to Use | Stage Structure |
|
||||
|----------|-------------|-----------------|
|
||||
| **Parallel** | All groups independent, different files | Single stage, all groups in parallel |
|
||||
| **Serial** | Strong dependencies, shared resources | Multiple stages, one group per stage |
|
||||
| **Hybrid** | Mixed dependencies | Multiple stages, parallel within stages |
|
||||
|
||||
**Dependency Detection**:
|
||||
- Shared file modifications
|
||||
- Utility creation + usage patterns
|
||||
- Test dependency chains
|
||||
- Risk level clustering (high-risk groups isolated)
|
||||
|
||||
### 4. Conservative Test Verification
|
||||
|
||||
**Test Strategy** (per fix):
|
||||
|
||||
```javascript
|
||||
// 1. Identify affected tests
|
||||
const testPattern = identifyTestPattern(finding.file);
|
||||
// e.g., "tests/auth/**/*.test.*" for src/auth/service.ts
|
||||
|
||||
// 2. Run tests
|
||||
const result = await runTests(testPattern);
|
||||
|
||||
// 3. Evaluate
|
||||
if (result.passRate < 100%) {
|
||||
// Rollback
|
||||
await gitCheckout(finding.file);
|
||||
|
||||
// Retry with failure context
|
||||
if (attempts < maxIterations) {
|
||||
const fixContext = analyzeFailure(result.stderr);
|
||||
regenerateFix(finding, fixContext);
|
||||
retry();
|
||||
} else {
|
||||
markFailed(finding.id);
|
||||
}
|
||||
} else {
|
||||
// Commit
|
||||
await gitCommit(`Fix: ${finding.title} [${finding.id}]`);
|
||||
markFixed(finding.id);
|
||||
}
|
||||
```
|
||||
|
||||
**Pass Criteria**: 100% test pass rate (no partial fixes)
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
### Orchestrator
|
||||
|
||||
**Phase 1: Discovery & Initialization**
|
||||
- Input validation: Check export file exists and is valid JSON
|
||||
- Auto-discovery: If review-dir provided, find latest `*-fix-export.json`
|
||||
- Session creation: Generate fix-session-id (`fix-{timestamp}`)
|
||||
- Directory structure: Create `{review-dir}/fixes/{fix-session-id}/` with subdirectories
|
||||
- State files: Initialize active-fix-session.json (session marker)
|
||||
- TodoWrite initialization: Set up 5-phase tracking (including Phase 1.5)
|
||||
|
||||
**Phase 1.5: Intelligent Grouping & Batching**
|
||||
- Load all findings metadata (id, file, dimension, severity, title)
|
||||
- Semantic similarity analysis:
|
||||
- Primary: Group by file proximity (same file or related modules)
|
||||
- Secondary: Group by dimension affinity (same review dimension)
|
||||
- Tertiary: Analyze title/description similarity (root cause clustering)
|
||||
- Create batches respecting --batch-size (default: 5 findings per batch)
|
||||
- Balance workload: Distribute high-severity findings across batches
|
||||
- Output: Array of finding batches for parallel planning
|
||||
|
||||
**Phase 2: Parallel Planning Coordination**
|
||||
- Determine concurrency: MIN(batch_count, MAX_PARALLEL=10)
|
||||
- For each batch chunk (≤10 batches):
|
||||
- Launch all agents in parallel with run_in_background=true
|
||||
- Pass batch findings + project context + batch_id to each agent
|
||||
- Each agent outputs: partial-plan-{batch-id}.json
|
||||
- Collect results via TaskOutput (blocking until all complete)
|
||||
- Aggregate partial plans:
|
||||
- Merge execution groups (renumber group_ids sequentially: G1, G2, ...)
|
||||
- Merge timelines (detect cross-batch dependencies, adjust stages)
|
||||
- Resolve conflicts (same file in multiple batches → serialize)
|
||||
- Generate final fix-plan.json with aggregated metadata
|
||||
- TodoWrite update: Mark planning complete, start execution
|
||||
|
||||
**Phase 3: Execution Orchestration**
|
||||
- Load fix-plan.json timeline stages
|
||||
- For each stage:
|
||||
- If parallel mode: Launch all group agents via `Promise.all()`
|
||||
- If serial mode: Execute groups sequentially with `await`
|
||||
- Assign agent IDs (agents update their fix-progress-{N}.json)
|
||||
- Handle agent failures gracefully (mark group as failed, continue)
|
||||
- Advance to next stage only when current stage complete
|
||||
|
||||
**Phase 4: Completion & Aggregation**
|
||||
- Collect final status from all fix-progress-{N}.json files
|
||||
- Generate fix-summary.md with timeline and results
|
||||
- Update fix-history.json with new session entry
|
||||
- Remove active-fix-session.json
|
||||
- TodoWrite completion: Mark all phases done
|
||||
- Output summary to user
|
||||
|
||||
**Phase 5: Session Completion (Optional)**
|
||||
- If all findings fixed successfully (no failures):
|
||||
- Prompt user: "All fixes complete. Complete workflow session? [Y/n]"
|
||||
- If confirmed: Execute `/workflow:session:complete` to archive session with lessons learned
|
||||
- If partial success (some failures):
|
||||
- Output: "Some findings failed. Review fix-summary.md before completing session."
|
||||
- Do NOT auto-complete session
|
||||
|
||||
### Output File Structure
|
||||
|
||||
```
|
||||
.workflow/active/WFS-{session-id}/.review/
|
||||
├── fix-export-{timestamp}.json # Exported findings (input)
|
||||
└── fixes/{fix-session-id}/
|
||||
├── partial-plan-1.json # Batch 1 partial plan (planning agent 1 output)
|
||||
├── partial-plan-2.json # Batch 2 partial plan (planning agent 2 output)
|
||||
├── partial-plan-N.json # Batch N partial plan (planning agent N output)
|
||||
├── fix-plan.json # Aggregated execution plan (orchestrator merges partials)
|
||||
├── fix-progress-1.json # Group 1 progress (planning agent init → agent updates)
|
||||
├── fix-progress-2.json # Group 2 progress (planning agent init → agent updates)
|
||||
├── fix-progress-3.json # Group 3 progress (planning agent init → agent updates)
|
||||
├── fix-summary.md # Final report (orchestrator generates)
|
||||
├── active-fix-session.json # Active session marker
|
||||
└── fix-history.json # All sessions history
|
||||
```
|
||||
|
||||
**File Producers**:
|
||||
- **Orchestrator**: Batches findings (Phase 1.5), aggregates partial plans → `fix-plan.json` (Phase 2), launches parallel planning agents
|
||||
- **Planning Agents (N)**: Each outputs `partial-plan-{batch-id}.json` + initializes `fix-progress-*.json` for assigned groups
|
||||
- **Execution Agents (M)**: Update assigned `fix-progress-{N}.json` in real-time
|
||||
|
||||
|
||||
### Agent Invocation Template
|
||||
|
||||
**Phase 1.5: Intelligent Batching** (Orchestrator):
|
||||
```javascript
|
||||
// Load findings
|
||||
const findings = JSON.parse(Read(exportFile));
|
||||
const batchSize = flags.batchSize || 5;
|
||||
|
||||
// Semantic similarity analysis: group by file+dimension
|
||||
const batches = [];
|
||||
const grouped = new Map(); // key: "${file}:${dimension}"
|
||||
|
||||
for (const finding of findings) {
|
||||
const key = `${finding.file || 'unknown'}:${finding.dimension || 'general'}`;
|
||||
if (!grouped.has(key)) grouped.set(key, []);
|
||||
grouped.get(key).push(finding);
|
||||
}
|
||||
|
||||
// Create batches respecting batchSize
|
||||
for (const [key, group] of grouped) {
|
||||
while (group.length > 0) {
|
||||
const batch = group.splice(0, batchSize);
|
||||
batches.push({
|
||||
batch_id: batches.length + 1,
|
||||
findings: batch,
|
||||
metadata: { primary_file: batch[0].file, primary_dimension: batch[0].dimension }
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`Created ${batches.length} batches (${batchSize} findings per batch)`);
|
||||
```
|
||||
|
||||
**Phase 2: Parallel Planning** (Orchestrator launches N agents):
|
||||
```javascript
|
||||
const MAX_PARALLEL = 10;
|
||||
const partialPlans = [];
|
||||
|
||||
// Process batches in chunks of MAX_PARALLEL
|
||||
for (let i = 0; i < batches.length; i += MAX_PARALLEL) {
|
||||
const chunk = batches.slice(i, i + MAX_PARALLEL);
|
||||
const taskIds = [];
|
||||
|
||||
// Launch agents in parallel (run_in_background=true)
|
||||
for (const batch of chunk) {
|
||||
const taskId = Task({
|
||||
subagent_type: "cli-planning-agent",
|
||||
run_in_background: true,
|
||||
description: `Plan batch ${batch.batch_id}: ${batch.findings.length} findings`,
|
||||
prompt: planningPrompt(batch) // See Planning Agent template below
|
||||
});
|
||||
taskIds.push({ taskId, batch });
|
||||
}
|
||||
|
||||
console.log(`Launched ${taskIds.length} planning agents...`);
|
||||
|
||||
// Collect results from this chunk (blocking)
|
||||
for (const { taskId, batch } of taskIds) {
|
||||
const result = TaskOutput({ task_id: taskId, block: true });
|
||||
const partialPlan = JSON.parse(Read(`${sessionDir}/partial-plan-${batch.batch_id}.json`));
|
||||
partialPlans.push(partialPlan);
|
||||
updateTodo(`Batch ${batch.batch_id}`, 'completed');
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregate partial plans → fix-plan.json
|
||||
let groupCounter = 1;
|
||||
const groupIdMap = new Map();
|
||||
|
||||
for (const partial of partialPlans) {
|
||||
for (const group of partial.groups) {
|
||||
const newGroupId = `G${groupCounter}`;
|
||||
groupIdMap.set(`${partial.batch_id}:${group.group_id}`, newGroupId);
|
||||
aggregatedPlan.groups.push({ ...group, group_id: newGroupId, progress_file: `fix-progress-${groupCounter}.json` });
|
||||
groupCounter++;
|
||||
}
|
||||
}
|
||||
|
||||
// Merge timelines, resolve cross-batch conflicts (shared files → serialize)
|
||||
let stageCounter = 1;
|
||||
for (const partial of partialPlans) {
|
||||
for (const stage of partial.timeline) {
|
||||
aggregatedPlan.timeline.push({
|
||||
...stage, stage_id: stageCounter,
|
||||
groups: stage.groups.map(gid => groupIdMap.get(`${partial.batch_id}:${gid}`))
|
||||
});
|
||||
stageCounter++;
|
||||
}
|
||||
}
|
||||
|
||||
// Write aggregated plan + initialize progress files
|
||||
Write(`${sessionDir}/fix-plan.json`, JSON.stringify(aggregatedPlan, null, 2));
|
||||
for (let i = 1; i <= aggregatedPlan.groups.length; i++) {
|
||||
Write(`${sessionDir}/fix-progress-${i}.json`, JSON.stringify(initProgressFile(aggregatedPlan.groups[i-1]), null, 2));
|
||||
}
|
||||
```
|
||||
|
||||
**Planning Agent (Batch Mode - Partial Plan Only)**:
|
||||
```javascript
|
||||
Task({
|
||||
subagent_type: "cli-planning-agent",
|
||||
run_in_background: true,
|
||||
description: `Plan batch ${batch.batch_id}: ${batch.findings.length} findings`,
|
||||
prompt: `
|
||||
## Task Objective
|
||||
Analyze code review findings in batch ${batch.batch_id} and generate **partial** execution plan.
|
||||
|
||||
## Input Data
|
||||
Review Session: ${reviewId}
|
||||
Fix Session ID: ${fixSessionId}
|
||||
Batch ID: ${batch.batch_id}
|
||||
Batch Findings: ${batch.findings.length}
|
||||
|
||||
Findings:
|
||||
${JSON.stringify(batch.findings, null, 2)}
|
||||
|
||||
Project Context:
|
||||
- Structure: ${projectStructure}
|
||||
- Test Framework: ${testFramework}
|
||||
- Git Status: ${gitStatus}
|
||||
|
||||
## Output Requirements
|
||||
|
||||
### 1. partial-plan-${batch.batch_id}.json
|
||||
Generate partial execution plan with structure:
|
||||
{
|
||||
"batch_id": ${batch.batch_id},
|
||||
"groups": [...], // Groups created from batch findings (use local IDs: G1, G2, ...)
|
||||
"timeline": [...], // Local timeline for this batch only
|
||||
"metadata": {
|
||||
"findings_count": ${batch.findings.length},
|
||||
"groups_count": N,
|
||||
"created_at": "ISO-8601-timestamp"
|
||||
}
|
||||
}
|
||||
|
||||
**Key Generation Rules**:
|
||||
- **Groups**: Create groups with local IDs (G1, G2, ...) using intelligent grouping (file+dimension+root cause)
|
||||
- **Timeline**: Define stages for this batch only (local dependencies within batch)
|
||||
- **Progress Files**: DO NOT generate fix-progress-*.json here (orchestrator handles after aggregation)
|
||||
|
||||
## Analysis Requirements
|
||||
|
||||
### Intelligent Grouping Strategy
|
||||
Group findings using these criteria (in priority order):
|
||||
|
||||
1. **File Proximity**: Findings in same file or related files
|
||||
2. **Dimension Affinity**: Same dimension (security, performance, etc.)
|
||||
3. **Root Cause Similarity**: Similar underlying issues
|
||||
4. **Fix Approach Commonality**: Can be fixed with similar approach
|
||||
|
||||
**Grouping Guidelines**:
|
||||
- Optimal group size: 2-5 findings per group
|
||||
- Avoid cross-cutting concerns in same group
|
||||
- Consider test isolation (different test suites → different groups)
|
||||
- Balance workload across groups for parallel execution
|
||||
|
||||
### Execution Strategy Determination (Local Only)
|
||||
|
||||
**Parallel Mode**: Use when groups are independent, no shared files
|
||||
**Serial Mode**: Use when groups have dependencies or shared resources
|
||||
**Hybrid Mode**: Use for mixed dependency graphs (recommended for most cases)
|
||||
|
||||
**Dependency Analysis**:
|
||||
- Identify shared files between groups
|
||||
- Detect test dependency chains
|
||||
- Evaluate risk of concurrent modifications
|
||||
|
||||
### Risk Assessment
|
||||
|
||||
For each group, evaluate:
|
||||
- **Complexity**: Based on code structure, file size, existing tests
|
||||
- **Impact Scope**: Number of files affected, API surface changes
|
||||
- **Rollback Feasibility**: Ease of reverting changes if tests fail
|
||||
|
||||
### Test Strategy
|
||||
|
||||
For each group, determine:
|
||||
- **Test Pattern**: Glob pattern matching affected tests
|
||||
- **Pass Criteria**: All tests must pass (100% pass rate)
|
||||
- **Test Command**: Infer from project (package.json, pytest.ini, etc.)
|
||||
|
||||
## Output Files
|
||||
|
||||
Write to ${sessionDir}:
|
||||
- ./partial-plan-${batch.batch_id}.json
|
||||
|
||||
## Quality Checklist
|
||||
|
||||
Before finalizing outputs:
|
||||
- ✅ All batch findings assigned to exactly one group
|
||||
- ✅ Group dependencies (within batch) correctly identified
|
||||
- ✅ Timeline stages respect local dependencies
|
||||
- ✅ Test patterns are valid and specific
|
||||
- ✅ Risk assessments are realistic
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
**Execution Agent** (per group):
|
||||
```javascript
|
||||
Task({
|
||||
subagent_type: "cli-execute-agent",
|
||||
description: `Fix ${group.findings.length} issues: ${group.group_name}`,
|
||||
prompt: `
|
||||
## Task Objective
|
||||
Execute fixes for code review findings in group ${group.group_id}. Update progress file in real-time with flow control tracking.
|
||||
|
||||
## Assignment
|
||||
- Group ID: ${group.group_id}
|
||||
- Group Name: ${group.group_name}
|
||||
- Progress File: ${sessionDir}/${group.progress_file}
|
||||
- Findings Count: ${group.findings.length}
|
||||
- Max Iterations: ${maxIterations} (per finding)
|
||||
|
||||
## Fix Strategy
|
||||
${JSON.stringify(group.fix_strategy, null, 2)}
|
||||
|
||||
## Risk Assessment
|
||||
${JSON.stringify(group.risk_assessment, null, 2)}
|
||||
|
||||
## Execution Flow
|
||||
|
||||
### Initialization (Before Starting)
|
||||
|
||||
1. Read ${group.progress_file} to load initial state
|
||||
2. Update progress file:
|
||||
- assigned_agent: "${agentId}"
|
||||
- status: "in-progress"
|
||||
- started_at: Current ISO 8601 timestamp
|
||||
- last_update: Current ISO 8601 timestamp
|
||||
3. Write updated state back to ${group.progress_file}
|
||||
|
||||
### Main Execution Loop
|
||||
|
||||
For EACH finding in ${group.progress_file}.findings:
|
||||
|
||||
#### Step 1: Analyze Context
|
||||
|
||||
**Before Step**:
|
||||
- Update finding: status→"in-progress", started_at→now()
|
||||
- Update current_finding: Populate with finding details, status→"analyzing", action→"Reading file and understanding code structure"
|
||||
- Update phase→"analyzing"
|
||||
- Update flow_control: Add "analyze_context" step to implementation_approach (status→"in-progress"), set current_step→"analyze_context"
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
**Action**:
|
||||
- Read file: finding.file
|
||||
- Understand code structure around line: finding.line
|
||||
- Analyze surrounding context (imports, dependencies, related functions)
|
||||
- Review recommendations: finding.recommendations
|
||||
|
||||
**After Step**:
|
||||
- Update flow_control: Mark "analyze_context" step as "completed" with completed_at→now()
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
#### Step 2: Apply Fix
|
||||
|
||||
**Before Step**:
|
||||
- Update current_finding: status→"fixing", action→"Applying code changes per recommendations"
|
||||
- Update phase→"fixing"
|
||||
- Update flow_control: Add "apply_fix" step to implementation_approach (status→"in-progress"), set current_step→"apply_fix"
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
**Action**:
|
||||
- Use Edit tool to implement code changes per finding.recommendations
|
||||
- Follow fix_strategy.approach
|
||||
- Maintain code style and existing patterns
|
||||
|
||||
**After Step**:
|
||||
- Update flow_control: Mark "apply_fix" step as "completed" with completed_at→now()
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
#### Step 3: Test Verification
|
||||
|
||||
**Before Step**:
|
||||
- Update current_finding: status→"testing", action→"Running test suite to verify fix"
|
||||
- Update phase→"testing"
|
||||
- Update flow_control: Add "run_tests" step to implementation_approach (status→"in-progress"), set current_step→"run_tests"
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
**Action**:
|
||||
- Run tests using fix_strategy.test_pattern
|
||||
- Require 100% pass rate
|
||||
- Capture test output
|
||||
|
||||
**On Test Failure**:
|
||||
- Git rollback: \`git checkout -- \${finding.file}\`
|
||||
- Increment finding.attempts
|
||||
- Update flow_control: Mark "run_tests" step as "failed" with completed_at→now()
|
||||
- Update errors: Add entry (finding_id, error_type→"test_failure", message, timestamp)
|
||||
- If finding.attempts < ${maxIterations}:
|
||||
- Reset flow_control: implementation_approach→[], current_step→null
|
||||
- Retry from Step 1
|
||||
- Else:
|
||||
- Update finding: status→"completed", result→"failed", error_message→"Max iterations reached", completed_at→now()
|
||||
- Update summary counts, move to next finding
|
||||
|
||||
**On Test Success**:
|
||||
- Update flow_control: Mark "run_tests" step as "completed" with completed_at→now()
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
- Proceed to Step 4
|
||||
|
||||
#### Step 4: Commit Changes
|
||||
|
||||
**Before Step**:
|
||||
- Update current_finding: status→"committing", action→"Creating git commit for successful fix"
|
||||
- Update phase→"committing"
|
||||
- Update flow_control: Add "commit_changes" step to implementation_approach (status→"in-progress"), set current_step→"commit_changes"
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
**Action**:
|
||||
- Git commit: \`git commit -m "fix(${finding.dimension}): ${finding.title} [${finding.id}]"\`
|
||||
- Capture commit hash
|
||||
|
||||
**After Step**:
|
||||
- Update finding: status→"completed", result→"fixed", commit_hash→<captured>, test_passed→true, completed_at→now()
|
||||
- Update flow_control: Mark "commit_changes" step as "completed" with completed_at→now()
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
#### After Each Finding
|
||||
|
||||
- Update summary: Recalculate counts (pending/in_progress/fixed/failed) and percent_complete
|
||||
- If all findings completed: Clear current_finding, reset flow_control
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
### Final Completion
|
||||
|
||||
When all findings processed:
|
||||
- Update status→"completed", phase→"done", summary.percent_complete→100.0
|
||||
- Update last_update→now(), write final state to ${group.progress_file}
|
||||
|
||||
## Critical Requirements
|
||||
|
||||
### Progress File Updates
|
||||
- **MUST update after every significant action** (before/after each step)
|
||||
- **Always maintain complete structure** - never write partial updates
|
||||
- **Use ISO 8601 timestamps** - e.g., "2025-01-25T14:36:00Z"
|
||||
|
||||
### Flow Control Format
|
||||
Follow action-planning-agent flow_control.implementation_approach format:
|
||||
- step: Identifier (e.g., "analyze_context", "apply_fix")
|
||||
- action: Human-readable description
|
||||
- status: "pending" | "in-progress" | "completed" | "failed"
|
||||
- started_at: ISO 8601 timestamp or null
|
||||
- completed_at: ISO 8601 timestamp or null
|
||||
|
||||
### Error Handling
|
||||
- Capture all errors in errors[] array
|
||||
- Never leave progress file in invalid state
|
||||
- Always write complete updates, never partial
|
||||
- On unrecoverable error: Mark group as failed, preserve state
|
||||
|
||||
## Test Patterns
|
||||
Use fix_strategy.test_pattern to run affected tests:
|
||||
- Pattern: ${group.fix_strategy.test_pattern}
|
||||
- Command: Infer from project (npm test, pytest, etc.)
|
||||
- Pass Criteria: 100% pass rate required
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Error Handling
|
||||
|
||||
**Batching Failures (Phase 1.5)**:
|
||||
- Invalid findings data → Abort with error message
|
||||
- Empty batches after grouping → Warn and skip empty batches
|
||||
|
||||
**Planning Failures (Phase 2)**:
|
||||
- Planning agent timeout → Mark batch as failed, continue with other batches
|
||||
- Partial plan missing → Skip batch, warn user
|
||||
- Agent crash → Collect available partial plans, proceed with aggregation
|
||||
- All agents fail → Abort entire fix session with error
|
||||
- Aggregation conflicts → Apply conflict resolution (serialize conflicting groups)
|
||||
|
||||
**Execution Failures (Phase 3)**:
|
||||
- Agent crash → Mark group as failed, continue with other groups
|
||||
- Test command not found → Skip test verification, warn user
|
||||
- Git operations fail → Abort with error, preserve state
|
||||
|
||||
**Rollback Scenarios**:
|
||||
- Test failure after fix → Automatic `git checkout` rollback
|
||||
- Max iterations reached → Leave file unchanged, mark as failed
|
||||
- Unrecoverable error → Rollback entire group, save checkpoint
|
||||
|
||||
### TodoWrite Structure
|
||||
|
||||
**Initialization (after Phase 1.5 batching)**:
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{content: "Phase 1: Discovery & Initialization", status: "completed", activeForm: "Discovering"},
|
||||
{content: "Phase 1.5: Intelligent Batching", status: "completed", activeForm: "Batching"},
|
||||
{content: "Phase 2: Parallel Planning", status: "in_progress", activeForm: "Planning"},
|
||||
{content: " → Batch 1: 4 findings (auth.ts:security)", status: "pending", activeForm: "Planning batch 1"},
|
||||
{content: " → Batch 2: 3 findings (query.ts:security)", status: "pending", activeForm: "Planning batch 2"},
|
||||
{content: " → Batch 3: 2 findings (config.ts:quality)", status: "pending", activeForm: "Planning batch 3"},
|
||||
{content: "Phase 3: Execution", status: "pending", activeForm: "Executing"},
|
||||
{content: "Phase 4: Completion", status: "pending", activeForm: "Completing"}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
**During Planning (parallel agents running)**:
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{content: "Phase 1: Discovery & Initialization", status: "completed", activeForm: "Discovering"},
|
||||
{content: "Phase 1.5: Intelligent Batching", status: "completed", activeForm: "Batching"},
|
||||
{content: "Phase 2: Parallel Planning", status: "in_progress", activeForm: "Planning"},
|
||||
{content: " → Batch 1: 4 findings (auth.ts:security)", status: "completed", activeForm: "Planning batch 1"},
|
||||
{content: " → Batch 2: 3 findings (query.ts:security)", status: "in_progress", activeForm: "Planning batch 2"},
|
||||
{content: " → Batch 3: 2 findings (config.ts:quality)", status: "in_progress", activeForm: "Planning batch 3"},
|
||||
{content: "Phase 3: Execution", status: "pending", activeForm: "Executing"},
|
||||
{content: "Phase 4: Completion", status: "pending", activeForm: "Completing"}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
**During Execution**:
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{content: "Phase 1: Discovery & Initialization", status: "completed", activeForm: "Discovering"},
|
||||
{content: "Phase 1.5: Intelligent Batching", status: "completed", activeForm: "Batching"},
|
||||
{content: "Phase 2: Parallel Planning (3 batches → 5 groups)", status: "completed", activeForm: "Planning"},
|
||||
{content: "Phase 3: Execution", status: "in_progress", activeForm: "Executing"},
|
||||
{content: " → Stage 1: Parallel execution (3 groups)", status: "completed", activeForm: "Executing stage 1"},
|
||||
{content: " • Group G1: Auth validation (2 findings)", status: "completed", activeForm: "Fixing G1"},
|
||||
{content: " • Group G2: Query security (3 findings)", status: "completed", activeForm: "Fixing G2"},
|
||||
{content: " • Group G3: Config quality (1 finding)", status: "completed", activeForm: "Fixing G3"},
|
||||
{content: " → Stage 2: Serial execution (1 group)", status: "in_progress", activeForm: "Executing stage 2"},
|
||||
{content: " • Group G4: Dependent fixes (2 findings)", status: "in_progress", activeForm: "Fixing G4"},
|
||||
{content: "Phase 4: Completion", status: "pending", activeForm: "Completing"}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
**Update Rules**:
|
||||
- Add batch items dynamically during Phase 1.5
|
||||
- Mark batch items completed as parallel agents return results
|
||||
- Add stage/group items dynamically after Phase 2 plan aggregation
|
||||
- Mark completed immediately after each group finishes
|
||||
- Update parent phase status when all child items complete
|
||||
|
||||
## Post-Completion Expansion
|
||||
|
||||
完成后询问用户是否扩展为issue(test/enhance/refactor/doc),选中项调用 `/issue:new "{summary} - {dimension}"`
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Leverage Parallel Planning**: For 10+ findings, parallel batching significantly reduces planning time
|
||||
2. **Tune Batch Size**: Use `--batch-size` to control granularity (smaller batches = more parallelism, larger = better grouping context)
|
||||
3. **Conservative Approach**: Test verification is mandatory - no fixes kept without passing tests
|
||||
4. **Parallel Efficiency**: MAX_PARALLEL=10 for planning agents, 3 concurrent execution agents per stage
|
||||
5. **Resume Support**: Fix sessions can resume from checkpoints after interruption
|
||||
6. **Manual Review**: Always review failed fixes manually - may require architectural changes
|
||||
7. **Incremental Fixing**: Start with small batches (5-10 findings) before large-scale fixes
|
||||
|
||||
## Related Commands
|
||||
|
||||
### View Fix Progress
|
||||
Use `ccw view` to open the workflow dashboard in browser:
|
||||
|
||||
```bash
|
||||
ccw view
|
||||
```
|
||||
@@ -1,12 +1,100 @@
|
||||
# Phase 2: Parallel Review Coordination
|
||||
# Workflow Review-Module-Cycle Command
|
||||
|
||||
> Source: Shared from `commands/workflow/review-session-cycle.md` + `commands/workflow/review-module-cycle.md` Phase 2
|
||||
## Quick Start
|
||||
|
||||
## Overview
|
||||
```bash
|
||||
# Review specific module (all 7 dimensions)
|
||||
/workflow:review-module-cycle src/auth/**
|
||||
|
||||
Launch 7 dimension-specific review agents simultaneously using cli-explore-agent in Deep Scan mode.
|
||||
# Review multiple modules
|
||||
/workflow:review-module-cycle src/auth/**,src/payment/**
|
||||
|
||||
## Review Dimensions Configuration
|
||||
# Review with custom dimensions
|
||||
/workflow:review-module-cycle src/payment/** --dimensions=security,architecture,quality
|
||||
|
||||
# Review specific files
|
||||
/workflow:review-module-cycle src/payment/processor.ts,src/payment/validator.ts
|
||||
```
|
||||
|
||||
**Review Scope**: Specified modules/files only (independent of git history)
|
||||
**Session Requirement**: Auto-creates workflow session via `/workflow:session:start`
|
||||
**Output Directory**: `.workflow/active/WFS-{session-id}/.review/` (session-based)
|
||||
**Default Dimensions**: Security, Architecture, Quality, Action-Items, Performance, Maintainability, Best-Practices
|
||||
**Max Iterations**: 3 (adjustable via --max-iterations)
|
||||
**Default Iterations**: 1 (deep-dive runs once; use --max-iterations=0 to skip)
|
||||
**CLI Tools**: Gemini → Qwen → Codex (fallback chain)
|
||||
|
||||
## What & Why
|
||||
|
||||
### Core Concept
|
||||
Independent multi-dimensional code review orchestrator with **hybrid parallel-iterative execution** for comprehensive quality assessment of **specific modules or files**.
|
||||
|
||||
**Review Scope**:
|
||||
- **Module-based**: Reviews specified file patterns (e.g., `src/auth/**`, `*.ts`)
|
||||
- **Session-integrated**: Runs within workflow session context for unified tracking
|
||||
- **Output location**: `.review/` subdirectory within active session
|
||||
|
||||
**vs Session Review**:
|
||||
- **Session Review** (`review-session-cycle`): Reviews git changes within a workflow session
|
||||
- **Module Review** (`review-module-cycle`): Reviews any specified code paths, regardless of git history
|
||||
- **Common output**: Both use same `.review/` directory structure within session
|
||||
|
||||
### Value Proposition
|
||||
1. **Module-Focused Review**: Target specific code areas independent of git history
|
||||
2. **Session-Integrated**: Review results tracked within workflow session for unified management
|
||||
3. **Comprehensive Coverage**: Same 7 specialized dimensions as session review
|
||||
4. **Intelligent Prioritization**: Automatic identification of critical issues and cross-cutting concerns
|
||||
5. **Unified Archive**: Review results archived with session for historical reference
|
||||
|
||||
### Orchestrator Boundary (CRITICAL)
|
||||
- **ONLY command** for independent multi-dimensional module review
|
||||
- Manages: dimension coordination, aggregation, iteration control, progress tracking
|
||||
- Delegates: Code exploration and analysis to @cli-explore-agent, dimension-specific reviews via Deep Scan mode
|
||||
|
||||
## How It Works
|
||||
|
||||
### Execution Flow
|
||||
|
||||
```
|
||||
Phase 1: Discovery & Initialization
|
||||
└─ Resolve file patterns, validate paths, initialize state, create output structure
|
||||
|
||||
Phase 2: Parallel Reviews (for each dimension)
|
||||
├─ Launch 7 review agents simultaneously
|
||||
├─ Each executes CLI analysis via Gemini/Qwen on specified files
|
||||
├─ Generate dimension JSON + markdown reports
|
||||
└─ Update review-progress.json
|
||||
|
||||
Phase 3: Aggregation
|
||||
├─ Load all dimension JSON files
|
||||
├─ Calculate severity distribution (critical/high/medium/low)
|
||||
├─ Identify cross-cutting concerns (files in 3+ dimensions)
|
||||
└─ Decision:
|
||||
├─ Critical findings OR high > 5 OR critical files → Phase 4 (Iterate)
|
||||
└─ Else → Phase 5 (Complete)
|
||||
|
||||
Phase 4: Iterative Deep-Dive (optional)
|
||||
├─ Select critical findings (max 5 per iteration)
|
||||
├─ Launch deep-dive agents for root cause analysis
|
||||
├─ Generate remediation plans with impact assessment
|
||||
├─ Re-assess severity based on analysis
|
||||
└─ Loop until no critical findings OR max iterations
|
||||
|
||||
Phase 5: Completion
|
||||
└─ Finalize review-progress.json
|
||||
```
|
||||
|
||||
### Agent Roles
|
||||
|
||||
| Agent | Responsibility |
|
||||
|-------|---------------|
|
||||
| **Orchestrator** | Phase control, path resolution, state management, aggregation logic, iteration control |
|
||||
| **@cli-explore-agent** (Review) | Execute dimension-specific code analysis via Deep Scan mode, generate findings JSON with dual-source strategy (Bash + Gemini), create structured analysis reports |
|
||||
| **@cli-explore-agent** (Deep-dive) | Focused root cause analysis using dependency mapping, remediation planning with architectural insights, impact assessment, severity re-assessment |
|
||||
|
||||
## Enhanced Features
|
||||
|
||||
### 1. Review Dimensions Configuration
|
||||
|
||||
**7 Specialized Dimensions** with priority-based allocation:
|
||||
|
||||
@@ -36,7 +124,41 @@ const CATEGORIES = {
|
||||
};
|
||||
```
|
||||
|
||||
## Severity Assessment
|
||||
### 2. Path Pattern Resolution
|
||||
|
||||
**Syntax Rules**:
|
||||
- All paths are **relative** from project root (e.g., `src/auth/**` not `/src/auth/**`)
|
||||
- Multiple patterns: comma-separated, **no spaces** (e.g., `src/auth/**,src/payment/**`)
|
||||
- Glob and specific files can be mixed (e.g., `src/auth/**,src/config.ts`)
|
||||
|
||||
**Supported Patterns**:
|
||||
| Pattern Type | Example | Description |
|
||||
|--------------|---------|-------------|
|
||||
| Glob directory | `src/auth/**` | All files under src/auth/ |
|
||||
| Glob with extension | `src/**/*.ts` | All .ts files under src/ |
|
||||
| Specific file | `src/payment/processor.ts` | Single file |
|
||||
| Multiple patterns | `src/auth/**,src/payment/**` | Comma-separated (no spaces) |
|
||||
|
||||
**Resolution Process**:
|
||||
1. Parse input pattern (split by comma, trim whitespace)
|
||||
2. Expand glob patterns to file list via `find` command
|
||||
3. Validate all files exist and are readable
|
||||
4. Error if pattern matches 0 files
|
||||
5. Store resolved file list in review-state.json
|
||||
|
||||
### 3. Aggregation Logic
|
||||
|
||||
**Cross-Cutting Concern Detection**:
|
||||
1. Files appearing in 3+ dimensions = **Critical Files**
|
||||
2. Same issue pattern across dimensions = **Systemic Issue**
|
||||
3. Severity clustering in specific files = **Hotspots**
|
||||
|
||||
**Deep-Dive Selection Criteria**:
|
||||
- All critical severity findings (priority 1)
|
||||
- Top 3 high-severity findings in critical files (priority 2)
|
||||
- Max 5 findings per iteration (prevent overwhelm)
|
||||
|
||||
### 4. Severity Assessment
|
||||
|
||||
**Severity Levels**:
|
||||
- **Critical**: Security vulnerabilities, data corruption risks, system-wide failures, authentication/authorization bypass
|
||||
@@ -49,15 +171,199 @@ const CATEGORIES = {
|
||||
- High findings > 5 OR
|
||||
- Critical files count > 0
|
||||
|
||||
## Orchestrator Responsibilities
|
||||
## Core Responsibilities
|
||||
|
||||
### Orchestrator
|
||||
|
||||
**Phase 1: Discovery & Initialization**
|
||||
|
||||
**Step 1: Session Creation**
|
||||
```javascript
|
||||
// Create workflow session for this review (type: review)
|
||||
Skill(skill="workflow:session:start", args="--type review \"Code review for [target_pattern]\"")
|
||||
|
||||
// Parse output
|
||||
const sessionId = output.match(/SESSION_ID: (WFS-[^\s]+)/)[1];
|
||||
```
|
||||
|
||||
**Step 2: Path Resolution & Validation**
|
||||
```bash
|
||||
# Expand glob pattern to file list (relative paths from project root)
|
||||
find . -path "./src/auth/**" -type f | sed 's|^\./||'
|
||||
|
||||
# Validate files exist and are readable
|
||||
for file in ${resolvedFiles[@]}; do
|
||||
test -r "$file" || error "File not readable: $file"
|
||||
done
|
||||
```
|
||||
- Parse and expand file patterns (glob support): `src/auth/**` → actual file list
|
||||
- Validation: Ensure all specified files exist and are readable
|
||||
- Store as **relative paths** from project root (e.g., `src/auth/service.ts`)
|
||||
- Agents construct absolute paths dynamically during execution
|
||||
|
||||
**Step 3: Output Directory Setup**
|
||||
- Output directory: `.workflow/active/${sessionId}/.review/`
|
||||
- Create directory structure:
|
||||
```bash
|
||||
mkdir -p ${sessionDir}/.review/{dimensions,iterations,reports}
|
||||
```
|
||||
|
||||
**Step 4: Initialize Review State**
|
||||
- State initialization: Create `review-state.json` with metadata, dimensions, max_iterations, resolved_files (merged metadata + state)
|
||||
- Progress tracking: Create `review-progress.json` for progress tracking
|
||||
|
||||
**Step 5: TodoWrite Initialization**
|
||||
- Set up progress tracking with hierarchical structure
|
||||
- Mark Phase 1 completed, Phase 2 in_progress
|
||||
|
||||
**Phase 2: Parallel Review Coordination**
|
||||
- Launch 7 @cli-explore-agent instances simultaneously (Deep Scan mode)
|
||||
- Pass dimension-specific context (template, timeout, custom focus, **target files**)
|
||||
- Monitor completion via review-progress.json updates
|
||||
- TodoWrite updates: Mark dimensions as completed
|
||||
- CLI tool fallback: Gemini → Qwen → Codex (on error/timeout)
|
||||
|
||||
## Agent Output Schemas
|
||||
**Phase 3: Aggregation**
|
||||
- Load all dimension JSON files from dimensions/
|
||||
- Calculate severity distribution: Count by critical/high/medium/low
|
||||
- Identify cross-cutting concerns: Files in 3+ dimensions
|
||||
- Select deep-dive findings: Critical + high in critical files (max 5)
|
||||
- Decision logic: Iterate if critical > 0 OR high > 5 OR critical files exist
|
||||
- Update review-state.json with aggregation results
|
||||
|
||||
**Phase 4: Iteration Control**
|
||||
- Check iteration count < max_iterations (default 3)
|
||||
- Launch deep-dive agents for selected findings
|
||||
- Collect remediation plans and re-assessed severities
|
||||
- Update severity distribution based on re-assessments
|
||||
- Record iteration in review-state.json
|
||||
- Loop back to aggregation if still have critical/high findings
|
||||
|
||||
**Phase 5: Completion**
|
||||
- Finalize review-progress.json with completion statistics
|
||||
- Update review-state.json with completion_time and phase=complete
|
||||
- TodoWrite completion: Mark all tasks done
|
||||
|
||||
|
||||
|
||||
### Output File Structure
|
||||
|
||||
```
|
||||
.workflow/active/WFS-{session-id}/.review/
|
||||
├── review-state.json # Orchestrator state machine (includes metadata)
|
||||
├── review-progress.json # Real-time progress for dashboard
|
||||
├── dimensions/ # Per-dimension results
|
||||
│ ├── security.json
|
||||
│ ├── architecture.json
|
||||
│ ├── quality.json
|
||||
│ ├── action-items.json
|
||||
│ ├── performance.json
|
||||
│ ├── maintainability.json
|
||||
│ └── best-practices.json
|
||||
├── iterations/ # Deep-dive results
|
||||
│ ├── iteration-1-finding-{uuid}.json
|
||||
│ └── iteration-2-finding-{uuid}.json
|
||||
└── reports/ # Human-readable reports
|
||||
├── security-analysis.md
|
||||
├── security-cli-output.txt
|
||||
├── deep-dive-1-{uuid}.md
|
||||
└── ...
|
||||
```
|
||||
|
||||
**Session Context**:
|
||||
```
|
||||
.workflow/active/WFS-{session-id}/
|
||||
├── workflow-session.json
|
||||
├── IMPL_PLAN.md
|
||||
├── TODO_LIST.md
|
||||
├── .task/
|
||||
├── .summaries/
|
||||
└── .review/ # Review results (this command)
|
||||
└── (structure above)
|
||||
```
|
||||
|
||||
### Review State JSON
|
||||
|
||||
**Purpose**: Unified state machine and metadata (merged from metadata + state)
|
||||
|
||||
```json
|
||||
{
|
||||
"review_id": "review-20250125-143022",
|
||||
"review_type": "module",
|
||||
"session_id": "WFS-auth-system",
|
||||
"metadata": {
|
||||
"created_at": "2025-01-25T14:30:22Z",
|
||||
"target_pattern": "src/auth/**",
|
||||
"resolved_files": [
|
||||
"src/auth/service.ts",
|
||||
"src/auth/validator.ts",
|
||||
"src/auth/middleware.ts"
|
||||
],
|
||||
"dimensions": ["security", "architecture", "quality", "action-items", "performance", "maintainability", "best-practices"],
|
||||
"max_iterations": 3
|
||||
},
|
||||
"phase": "parallel|aggregate|iterate|complete",
|
||||
"current_iteration": 1,
|
||||
"dimensions_reviewed": ["security", "architecture", "quality", "action-items", "performance", "maintainability", "best-practices"],
|
||||
"selected_strategy": "comprehensive",
|
||||
"next_action": "execute_parallel_reviews|aggregate_findings|execute_deep_dive|generate_final_report|complete",
|
||||
"severity_distribution": {
|
||||
"critical": 2,
|
||||
"high": 5,
|
||||
"medium": 12,
|
||||
"low": 8
|
||||
},
|
||||
"critical_files": [...],
|
||||
"iterations": [...],
|
||||
"completion_criteria": {...}
|
||||
}
|
||||
```
|
||||
|
||||
### Review Progress JSON
|
||||
|
||||
**Purpose**: Real-time dashboard updates via polling
|
||||
|
||||
```json
|
||||
{
|
||||
"review_id": "review-20250125-143022",
|
||||
"last_update": "2025-01-25T14:35:10Z",
|
||||
"phase": "parallel|aggregate|iterate|complete",
|
||||
"current_iteration": 1,
|
||||
"progress": {
|
||||
"parallel_review": {
|
||||
"total_dimensions": 7,
|
||||
"completed": 5,
|
||||
"in_progress": 2,
|
||||
"percent_complete": 71
|
||||
},
|
||||
"deep_dive": {
|
||||
"total_findings": 6,
|
||||
"analyzed": 2,
|
||||
"in_progress": 1,
|
||||
"percent_complete": 33
|
||||
}
|
||||
},
|
||||
"agent_status": [
|
||||
{
|
||||
"agent_type": "review-agent",
|
||||
"dimension": "security",
|
||||
"status": "completed",
|
||||
"started_at": "2025-01-25T14:30:00Z",
|
||||
"completed_at": "2025-01-25T15:15:00Z",
|
||||
"duration_ms": 2700000
|
||||
},
|
||||
{
|
||||
"agent_type": "deep-dive-agent",
|
||||
"finding_id": "sec-001-uuid",
|
||||
"status": "in_progress",
|
||||
"started_at": "2025-01-25T14:32:00Z"
|
||||
}
|
||||
],
|
||||
"estimated_completion": "2025-01-25T16:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### Agent Output Schemas
|
||||
|
||||
**Agent-produced JSON files follow standardized schemas**:
|
||||
|
||||
@@ -71,9 +377,7 @@ const CATEGORIES = {
|
||||
- Output: `{output-dir}/iterations/iteration-{N}-finding-{uuid}.json`
|
||||
- Contains: root_cause, remediation_plan, impact_assessment, reassessed_severity
|
||||
|
||||
## Review Agent Invocation Template
|
||||
|
||||
### Module Mode
|
||||
### Agent Invocation Template
|
||||
|
||||
**Review Agent** (parallel execution, 7 instances):
|
||||
|
||||
@@ -163,99 +467,6 @@ Task(
|
||||
)
|
||||
```
|
||||
|
||||
### Session Mode
|
||||
|
||||
**Review Agent** (parallel execution, 7 instances):
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-explore-agent",
|
||||
run_in_background=false,
|
||||
description=`Execute ${dimension} review analysis via Deep Scan`,
|
||||
prompt=`
|
||||
## Task Objective
|
||||
Conduct comprehensive ${dimension} code exploration and analysis using Deep Scan mode (Bash + Gemini dual-source strategy) for completed implementation in session ${sessionId}
|
||||
|
||||
## Analysis Mode Selection
|
||||
Use **Deep Scan mode** for this review:
|
||||
- Phase 1: Bash structural scan for standard patterns (classes, functions, imports)
|
||||
- Phase 2: Gemini semantic analysis for design intent, non-standard patterns, ${dimension}-specific concerns
|
||||
- Phase 3: Synthesis with attribution (bash-discovered vs gemini-discovered findings)
|
||||
|
||||
## MANDATORY FIRST STEPS (Execute by Agent)
|
||||
**You (cli-explore-agent) MUST execute these steps in order:**
|
||||
1. Read session metadata: ${sessionMetadataPath}
|
||||
2. Read completed task summaries: bash(find ${summariesDir} -name "IMPL-*.md" -type f)
|
||||
3. Get changed files: bash(cd ${workflowDir} && git log --since="${sessionCreatedAt}" --name-only --pretty=format: | sort -u)
|
||||
4. Read review state: ${reviewStateJsonPath}
|
||||
5. Execute: cat ~/.ccw/workflows/cli-templates/schemas/review-dimension-results-schema.json (get output schema reference)
|
||||
6. Read: .workflow/project-tech.json (technology stack and architecture context)
|
||||
7. Read: .workflow/project-guidelines.json (user-defined constraints and conventions to validate against)
|
||||
|
||||
## Session Context
|
||||
- Session ID: ${sessionId}
|
||||
- Review Dimension: ${dimension}
|
||||
- Review ID: ${reviewId}
|
||||
- Implementation Phase: Complete (all tests passing)
|
||||
- Output Directory: ${outputDir}
|
||||
|
||||
## CLI Configuration
|
||||
- Tool Priority: gemini → qwen → codex (fallback chain)
|
||||
- Template: ~/.ccw/workflows/cli-templates/prompts/analysis/${dimensionTemplate}
|
||||
- Custom Focus: ${customFocus || 'Standard dimension analysis'}
|
||||
- Timeout: ${timeout}ms
|
||||
- Mode: analysis (READ-ONLY)
|
||||
|
||||
## Expected Deliverables
|
||||
|
||||
**Schema Reference**: Schema obtained in MANDATORY FIRST STEPS step 5, follow schema exactly
|
||||
|
||||
1. Dimension Results JSON: ${outputDir}/dimensions/${dimension}.json
|
||||
|
||||
**⚠️ CRITICAL JSON STRUCTURE REQUIREMENTS**:
|
||||
|
||||
Root structure MUST be array: \`[{ ... }]\` NOT \`{ ... }\`
|
||||
|
||||
Required top-level fields:
|
||||
- dimension, review_id, analysis_timestamp (NOT timestamp/analyzed_at)
|
||||
- cli_tool_used (gemini|qwen|codex), model, analysis_duration_ms
|
||||
- summary (FLAT structure), findings, cross_references
|
||||
|
||||
Summary MUST be FLAT (NOT nested by_severity):
|
||||
\`{ "total_findings": N, "critical": N, "high": N, "medium": N, "low": N, "files_analyzed": N, "lines_reviewed": N }\`
|
||||
|
||||
Finding required fields:
|
||||
- id: format \`{dim}-{seq}-{uuid8}\` e.g., \`sec-001-a1b2c3d4\` (lowercase)
|
||||
- severity: lowercase only (critical|high|medium|low)
|
||||
- snippet (NOT code_snippet), impact (NOT exploit_scenario)
|
||||
- metadata, iteration (0), status (pending_remediation), cross_references
|
||||
|
||||
2. Analysis Report: ${outputDir}/reports/${dimension}-analysis.md
|
||||
- Human-readable summary with recommendations
|
||||
- Grouped by severity: critical → high → medium → low
|
||||
- Include file:line references for all findings
|
||||
|
||||
3. CLI Output Log: ${outputDir}/reports/${dimension}-cli-output.txt
|
||||
- Raw CLI tool output for debugging
|
||||
- Include full analysis text
|
||||
|
||||
## Dimension-Specific Guidance
|
||||
${getDimensionGuidance(dimension)}
|
||||
|
||||
## Success Criteria
|
||||
- [ ] Schema obtained via cat review-dimension-results-schema.json
|
||||
- [ ] All changed files analyzed for ${dimension} concerns
|
||||
- [ ] All findings include file:line references with code snippets
|
||||
- [ ] Severity assessment follows established criteria (see reference)
|
||||
- [ ] Recommendations are actionable with code examples
|
||||
- [ ] JSON output follows schema exactly
|
||||
- [ ] Report is comprehensive and well-organized
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
## Deep-Dive Agent Invocation Template
|
||||
|
||||
**Deep-Dive Agent** (iteration execution):
|
||||
|
||||
```javascript
|
||||
@@ -340,7 +551,7 @@ Task(
|
||||
)
|
||||
```
|
||||
|
||||
## Dimension Guidance Reference
|
||||
### Dimension Guidance Reference
|
||||
|
||||
```javascript
|
||||
function getDimensionGuidance(dimension) {
|
||||
@@ -412,7 +623,7 @@ function getDimensionGuidance(dimension) {
|
||||
performance: `
|
||||
Focus Areas:
|
||||
- N+1 query problems
|
||||
- Inefficient algorithms (O(n^2) where O(n log n) possible)
|
||||
- Inefficient algorithms (O(n²) where O(n log n) possible)
|
||||
- Memory leaks
|
||||
- Blocking operations on main thread
|
||||
- Missing caching opportunities
|
||||
@@ -420,7 +631,7 @@ function getDimensionGuidance(dimension) {
|
||||
- Database query optimization
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Memory leaks, O(n^2) in hot path, blocking main thread
|
||||
- Critical: Memory leaks, O(n²) in hot path, blocking main thread
|
||||
- High: N+1 queries, missing indexes, inefficient algorithms
|
||||
- Medium: Suboptimal caching, unnecessary computations, lazy loading issues
|
||||
- Low: Minor optimization opportunities, redundant operations
|
||||
@@ -463,11 +674,91 @@ function getDimensionGuidance(dimension) {
|
||||
}
|
||||
```
|
||||
|
||||
## Output
|
||||
### Completion Conditions
|
||||
|
||||
- Files: `dimensions/{dimension}.json`, `reports/{dimension}-analysis.md`, `reports/{dimension}-cli-output.txt`
|
||||
- TaskUpdate: Mark Phase 2 completed, Phase 3 in_progress
|
||||
**Full Success**:
|
||||
- All dimensions reviewed
|
||||
- Critical findings = 0
|
||||
- High findings ≤ 5
|
||||
- Action: Generate final report, mark phase=complete
|
||||
|
||||
## Next Phase
|
||||
**Partial Success**:
|
||||
- All dimensions reviewed
|
||||
- Max iterations reached
|
||||
- Still have critical/high findings
|
||||
- Action: Generate report with warnings, recommend follow-up
|
||||
|
||||
### Error Handling
|
||||
|
||||
**Phase-Level Error Matrix**:
|
||||
|
||||
| Phase | Error | Blocking? | Action |
|
||||
|-------|-------|-----------|--------|
|
||||
| Phase 1 | Invalid path pattern | Yes | Error and exit |
|
||||
| Phase 1 | No files matched | Yes | Error and exit |
|
||||
| Phase 1 | Files not readable | Yes | Error and exit |
|
||||
| Phase 2 | Single dimension fails | No | Log warning, continue other dimensions |
|
||||
| Phase 2 | All dimensions fail | Yes | Error and exit |
|
||||
| Phase 3 | Missing dimension JSON | No | Skip in aggregation, log warning |
|
||||
| Phase 4 | Deep-dive agent fails | No | Skip finding, continue others |
|
||||
| Phase 4 | Max iterations reached | No | Generate partial report |
|
||||
|
||||
**CLI Fallback Chain**: Gemini → Qwen → Codex → degraded mode
|
||||
|
||||
**Fallback Triggers**:
|
||||
1. HTTP 429, 5xx errors, connection timeout
|
||||
2. Invalid JSON output (parse error, missing required fields)
|
||||
3. Low confidence score < 0.4
|
||||
4. Analysis too brief (< 100 words in report)
|
||||
|
||||
**Fallback Behavior**:
|
||||
- On trigger: Retry with next tool in chain
|
||||
- After Codex fails: Enter degraded mode (skip analysis, log error)
|
||||
- Degraded mode: Continue workflow with available results
|
||||
|
||||
### TodoWrite Structure
|
||||
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{ content: "Phase 1: Discovery & Initialization", status: "completed", activeForm: "Initializing" },
|
||||
{ content: "Phase 2: Parallel Reviews (7 dimensions)", status: "in_progress", activeForm: "Reviewing" },
|
||||
{ content: " → Security review", status: "in_progress", activeForm: "Analyzing security" },
|
||||
// ... other dimensions as sub-items
|
||||
{ content: "Phase 3: Aggregation", status: "pending", activeForm: "Aggregating" },
|
||||
{ content: "Phase 4: Deep-dive", status: "pending", activeForm: "Deep-diving" },
|
||||
{ content: "Phase 5: Completion", status: "pending", activeForm: "Completing" }
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Start Specific**: Begin with focused module patterns for faster results
|
||||
2. **Expand Gradually**: Add more modules based on initial findings
|
||||
3. **Use Glob Wisely**: `src/auth/**` is more efficient than `src/**` with lots of irrelevant files
|
||||
4. **Trust Aggregation Logic**: Auto-selection based on proven heuristics
|
||||
5. **Monitor Logs**: Check reports/ directory for CLI analysis insights
|
||||
|
||||
## Related Commands
|
||||
|
||||
### View Review Progress
|
||||
Use `ccw view` to open the review dashboard in browser:
|
||||
|
||||
```bash
|
||||
ccw view
|
||||
```
|
||||
|
||||
### Automated Fix Workflow
|
||||
After completing a module review, use the generated findings JSON for automated fixing:
|
||||
|
||||
```bash
|
||||
# Step 1: Complete review (this command)
|
||||
/workflow:review-module-cycle src/auth/**
|
||||
|
||||
# Step 2: Run automated fixes using dimension findings
|
||||
/workflow:review-cycle-fix .workflow/active/WFS-{session-id}/.review/
|
||||
```
|
||||
|
||||
See `/workflow:review-cycle-fix` for automated fixing with smart grouping, parallel execution, and test verification.
|
||||
|
||||
Return to orchestrator, then auto-continue to [Phase 3: Aggregation](03-aggregation.md).
|
||||
775
.claude/skills/review-cycle/phases/review-session.md
Normal file
775
.claude/skills/review-cycle/phases/review-session.md
Normal file
@@ -0,0 +1,775 @@
|
||||
# Workflow Review-Session-Cycle Command
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Execute comprehensive session review (all 7 dimensions)
|
||||
/workflow:review-session-cycle
|
||||
|
||||
# Review specific session with custom dimensions
|
||||
/workflow:review-session-cycle WFS-payment-integration --dimensions=security,architecture,quality
|
||||
|
||||
# Specify session and iteration limit
|
||||
/workflow:review-session-cycle WFS-payment-integration --max-iterations=5
|
||||
```
|
||||
|
||||
**Review Scope**: Git changes from session creation to present (via `git log --since`)
|
||||
**Session Requirement**: Requires active or completed workflow session
|
||||
**Output Directory**: `.workflow/active/WFS-{session-id}/.review/` (session-based)
|
||||
**Default Dimensions**: Security, Architecture, Quality, Action-Items, Performance, Maintainability, Best-Practices
|
||||
**Max Iterations**: 3 (adjustable via --max-iterations)
|
||||
**Default Iterations**: 1 (deep-dive runs once; use --max-iterations=0 to skip)
|
||||
**CLI Tools**: Gemini → Qwen → Codex (fallback chain)
|
||||
|
||||
## What & Why
|
||||
|
||||
### Core Concept
|
||||
Session-based multi-dimensional code review orchestrator with **hybrid parallel-iterative execution** for comprehensive quality assessment of **git changes within a workflow session**.
|
||||
|
||||
**Review Scope**:
|
||||
- **Session-based**: Reviews only files changed during the workflow session (via `git log --since="${sessionCreatedAt}"`)
|
||||
- **For independent module review**: Use `/workflow:review-module-cycle` command instead
|
||||
|
||||
**vs Standard Review**:
|
||||
- **Standard**: Sequential manual reviews → Inconsistent coverage → Missed cross-cutting concerns
|
||||
- **Review-Session-Cycle**: **Parallel automated analysis → Aggregate findings → Deep-dive critical issues** → Comprehensive coverage
|
||||
|
||||
### Value Proposition
|
||||
1. **Comprehensive Coverage**: 7 specialized dimensions analyze all quality aspects simultaneously
|
||||
2. **Intelligent Prioritization**: Automatic identification of critical issues and cross-cutting concerns
|
||||
3. **Actionable Insights**: Deep-dive iterations provide step-by-step remediation plans
|
||||
|
||||
### Orchestrator Boundary (CRITICAL)
|
||||
- **ONLY command** for comprehensive multi-dimensional review
|
||||
- Manages: dimension coordination, aggregation, iteration control, progress tracking
|
||||
- Delegates: Code exploration and analysis to @cli-explore-agent, dimension-specific reviews via Deep Scan mode
|
||||
|
||||
## How It Works
|
||||
|
||||
### Execution Flow (Simplified)
|
||||
|
||||
```
|
||||
Phase 1: Discovery & Initialization
|
||||
└─ Validate session, initialize state, create output structure
|
||||
|
||||
Phase 2: Parallel Reviews (for each dimension)
|
||||
├─ Launch 7 review agents simultaneously
|
||||
├─ Each executes CLI analysis via Gemini/Qwen
|
||||
├─ Generate dimension JSON + markdown reports
|
||||
└─ Update review-progress.json
|
||||
|
||||
Phase 3: Aggregation
|
||||
├─ Load all dimension JSON files
|
||||
├─ Calculate severity distribution (critical/high/medium/low)
|
||||
├─ Identify cross-cutting concerns (files in 3+ dimensions)
|
||||
└─ Decision:
|
||||
├─ Critical findings OR high > 5 OR critical files → Phase 4 (Iterate)
|
||||
└─ Else → Phase 5 (Complete)
|
||||
|
||||
Phase 4: Iterative Deep-Dive (optional)
|
||||
├─ Select critical findings (max 5 per iteration)
|
||||
├─ Launch deep-dive agents for root cause analysis
|
||||
├─ Generate remediation plans with impact assessment
|
||||
├─ Re-assess severity based on analysis
|
||||
└─ Loop until no critical findings OR max iterations
|
||||
|
||||
Phase 5: Completion
|
||||
└─ Finalize review-progress.json
|
||||
```
|
||||
|
||||
### Agent Roles
|
||||
|
||||
| Agent | Responsibility |
|
||||
|-------|---------------|
|
||||
| **Orchestrator** | Phase control, session discovery, state management, aggregation logic, iteration control |
|
||||
| **@cli-explore-agent** (Review) | Execute dimension-specific code analysis via Deep Scan mode, generate findings JSON with dual-source strategy (Bash + Gemini), create structured analysis reports |
|
||||
| **@cli-explore-agent** (Deep-dive) | Focused root cause analysis using dependency mapping, remediation planning with architectural insights, impact assessment, severity re-assessment |
|
||||
|
||||
## Enhanced Features
|
||||
|
||||
### 1. Review Dimensions Configuration
|
||||
|
||||
**7 Specialized Dimensions** with priority-based allocation:
|
||||
|
||||
| Dimension | Template | Priority | Timeout |
|
||||
|-----------|----------|----------|---------|
|
||||
| **Security** | 03-assess-security-risks.txt | 1 (Critical) | 60min |
|
||||
| **Architecture** | 02-review-architecture.txt | 2 (High) | 60min |
|
||||
| **Quality** | 02-review-code-quality.txt | 3 (Medium) | 40min |
|
||||
| **Action-Items** | 02-analyze-code-patterns.txt | 2 (High) | 40min |
|
||||
| **Performance** | 03-analyze-performance.txt | 3 (Medium) | 60min |
|
||||
| **Maintainability** | 02-review-code-quality.txt* | 3 (Medium) | 40min |
|
||||
| **Best-Practices** | 03-review-quality-standards.txt | 3 (Medium) | 40min |
|
||||
|
||||
*Custom focus: "Assess technical debt and maintainability"
|
||||
|
||||
**Category Definitions by Dimension**:
|
||||
|
||||
```javascript
|
||||
const CATEGORIES = {
|
||||
security: ['injection', 'authentication', 'authorization', 'encryption', 'input-validation', 'access-control', 'data-exposure'],
|
||||
architecture: ['coupling', 'cohesion', 'layering', 'dependency', 'pattern-violation', 'scalability', 'separation-of-concerns'],
|
||||
quality: ['code-smell', 'duplication', 'complexity', 'naming', 'error-handling', 'testability', 'readability'],
|
||||
'action-items': ['requirement-coverage', 'acceptance-criteria', 'documentation', 'deployment-readiness', 'missing-functionality'],
|
||||
performance: ['n-plus-one', 'inefficient-query', 'memory-leak', 'blocking-operation', 'caching', 'resource-usage'],
|
||||
maintainability: ['technical-debt', 'magic-number', 'long-method', 'large-class', 'dead-code', 'commented-code'],
|
||||
'best-practices': ['convention-violation', 'anti-pattern', 'deprecated-api', 'missing-validation', 'inconsistent-style']
|
||||
};
|
||||
```
|
||||
|
||||
### 2. Aggregation Logic
|
||||
|
||||
**Cross-Cutting Concern Detection**:
|
||||
1. Files appearing in 3+ dimensions = **Critical Files**
|
||||
2. Same issue pattern across dimensions = **Systemic Issue**
|
||||
3. Severity clustering in specific files = **Hotspots**
|
||||
|
||||
**Deep-Dive Selection Criteria**:
|
||||
- All critical severity findings (priority 1)
|
||||
- Top 3 high-severity findings in critical files (priority 2)
|
||||
- Max 5 findings per iteration (prevent overwhelm)
|
||||
|
||||
### 3. Severity Assessment
|
||||
|
||||
**Severity Levels**:
|
||||
- **Critical**: Security vulnerabilities, data corruption risks, system-wide failures, authentication/authorization bypass
|
||||
- **High**: Feature degradation, performance bottlenecks, architecture violations, significant technical debt
|
||||
- **Medium**: Code smells, minor performance issues, style inconsistencies, maintainability concerns
|
||||
- **Low**: Documentation gaps, minor refactoring opportunities, cosmetic issues
|
||||
|
||||
**Iteration Trigger**:
|
||||
- Critical findings > 0 OR
|
||||
- High findings > 5 OR
|
||||
- Critical files count > 0
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
### Orchestrator
|
||||
|
||||
**Phase 1: Discovery & Initialization**
|
||||
|
||||
**Step 1: Session Discovery**
|
||||
```javascript
|
||||
// If session ID not provided, auto-detect
|
||||
if (!providedSessionId) {
|
||||
// Check for active sessions
|
||||
const activeSessions = Glob('.workflow/active/WFS-*');
|
||||
if (activeSessions.length === 1) {
|
||||
sessionId = activeSessions[0].match(/WFS-[^/]+/)[0];
|
||||
} else if (activeSessions.length > 1) {
|
||||
// List sessions and prompt user
|
||||
error("Multiple active sessions found. Please specify session ID.");
|
||||
} else {
|
||||
error("No active session found. Create session first with /workflow:session:start");
|
||||
}
|
||||
} else {
|
||||
sessionId = providedSessionId;
|
||||
}
|
||||
|
||||
// Validate session exists
|
||||
Bash(`test -d .workflow/active/${sessionId} && echo "EXISTS"`);
|
||||
```
|
||||
|
||||
**Step 2: Session Validation**
|
||||
- Ensure session has implementation artifacts (check `.summaries/` or `.task/` directory)
|
||||
- Extract session creation timestamp from `workflow-session.json`
|
||||
- Use timestamp for git log filtering: `git log --since="${sessionCreatedAt}"`
|
||||
|
||||
**Step 3: Changed Files Detection**
|
||||
```bash
|
||||
# Get files changed since session creation
|
||||
git log --since="${sessionCreatedAt}" --name-only --pretty=format: | sort -u
|
||||
```
|
||||
|
||||
**Step 4: Output Directory Setup**
|
||||
- Output directory: `.workflow/active/${sessionId}/.review/`
|
||||
- Create directory structure:
|
||||
```bash
|
||||
mkdir -p ${sessionDir}/.review/{dimensions,iterations,reports}
|
||||
```
|
||||
|
||||
**Step 5: Initialize Review State**
|
||||
- State initialization: Create `review-state.json` with metadata, dimensions, max_iterations (merged metadata + state)
|
||||
- Progress tracking: Create `review-progress.json` for progress tracking
|
||||
|
||||
**Step 6: TodoWrite Initialization**
|
||||
- Set up progress tracking with hierarchical structure
|
||||
- Mark Phase 1 completed, Phase 2 in_progress
|
||||
|
||||
**Phase 2: Parallel Review Coordination**
|
||||
- Launch 7 @cli-explore-agent instances simultaneously (Deep Scan mode)
|
||||
- Pass dimension-specific context (template, timeout, custom focus)
|
||||
- Monitor completion via review-progress.json updates
|
||||
- TodoWrite updates: Mark dimensions as completed
|
||||
- CLI tool fallback: Gemini → Qwen → Codex (on error/timeout)
|
||||
|
||||
**Phase 3: Aggregation**
|
||||
- Load all dimension JSON files from dimensions/
|
||||
- Calculate severity distribution: Count by critical/high/medium/low
|
||||
- Identify cross-cutting concerns: Files in 3+ dimensions
|
||||
- Select deep-dive findings: Critical + high in critical files (max 5)
|
||||
- Decision logic: Iterate if critical > 0 OR high > 5 OR critical files exist
|
||||
- Update review-state.json with aggregation results
|
||||
|
||||
**Phase 4: Iteration Control**
|
||||
- Check iteration count < max_iterations (default 3)
|
||||
- Launch deep-dive agents for selected findings
|
||||
- Collect remediation plans and re-assessed severities
|
||||
- Update severity distribution based on re-assessments
|
||||
- Record iteration in review-state.json
|
||||
- Loop back to aggregation if still have critical/high findings
|
||||
|
||||
**Phase 5: Completion**
|
||||
- Finalize review-progress.json with completion statistics
|
||||
- Update review-state.json with completion_time and phase=complete
|
||||
- TodoWrite completion: Mark all tasks done
|
||||
|
||||
|
||||
|
||||
### Session File Structure
|
||||
|
||||
```
|
||||
.workflow/active/WFS-{session-id}/.review/
|
||||
├── review-state.json # Orchestrator state machine (includes metadata)
|
||||
├── review-progress.json # Real-time progress for dashboard
|
||||
├── dimensions/ # Per-dimension results
|
||||
│ ├── security.json
|
||||
│ ├── architecture.json
|
||||
│ ├── quality.json
|
||||
│ ├── action-items.json
|
||||
│ ├── performance.json
|
||||
│ ├── maintainability.json
|
||||
│ └── best-practices.json
|
||||
├── iterations/ # Deep-dive results
|
||||
│ ├── iteration-1-finding-{uuid}.json
|
||||
│ └── iteration-2-finding-{uuid}.json
|
||||
└── reports/ # Human-readable reports
|
||||
├── security-analysis.md
|
||||
├── security-cli-output.txt
|
||||
├── deep-dive-1-{uuid}.md
|
||||
└── ...
|
||||
```
|
||||
|
||||
**Session Context**:
|
||||
```
|
||||
.workflow/active/WFS-{session-id}/
|
||||
├── workflow-session.json
|
||||
├── IMPL_PLAN.md
|
||||
├── TODO_LIST.md
|
||||
├── .task/
|
||||
├── .summaries/
|
||||
└── .review/ # Review results (this command)
|
||||
└── (structure above)
|
||||
```
|
||||
|
||||
### Review State JSON
|
||||
|
||||
**Purpose**: Unified state machine and metadata (merged from metadata + state)
|
||||
|
||||
```json
|
||||
{
|
||||
"session_id": "WFS-payment-integration",
|
||||
"review_id": "review-20250125-143022",
|
||||
"review_type": "session",
|
||||
"metadata": {
|
||||
"created_at": "2025-01-25T14:30:22Z",
|
||||
"git_changes": {
|
||||
"commit_range": "abc123..def456",
|
||||
"files_changed": 15,
|
||||
"insertions": 342,
|
||||
"deletions": 128
|
||||
},
|
||||
"dimensions": ["security", "architecture", "quality", "action-items", "performance", "maintainability", "best-practices"],
|
||||
"max_iterations": 3
|
||||
},
|
||||
"phase": "parallel|aggregate|iterate|complete",
|
||||
"current_iteration": 1,
|
||||
"dimensions_reviewed": ["security", "architecture", "quality", "action-items", "performance", "maintainability", "best-practices"],
|
||||
"selected_strategy": "comprehensive",
|
||||
"next_action": "execute_parallel_reviews|aggregate_findings|execute_deep_dive|generate_final_report|complete",
|
||||
"severity_distribution": {
|
||||
"critical": 2,
|
||||
"high": 5,
|
||||
"medium": 12,
|
||||
"low": 8
|
||||
},
|
||||
"critical_files": [
|
||||
{
|
||||
"file": "src/payment/processor.ts",
|
||||
"finding_count": 5,
|
||||
"dimensions": ["security", "architecture", "quality"]
|
||||
}
|
||||
],
|
||||
"iterations": [
|
||||
{
|
||||
"iteration": 1,
|
||||
"findings_analyzed": ["uuid-1", "uuid-2"],
|
||||
"findings_resolved": 1,
|
||||
"findings_escalated": 1,
|
||||
"severity_change": {
|
||||
"before": {"critical": 2, "high": 5, "medium": 12, "low": 8},
|
||||
"after": {"critical": 1, "high": 6, "medium": 12, "low": 8}
|
||||
},
|
||||
"timestamp": "2025-01-25T14:30:00Z"
|
||||
}
|
||||
],
|
||||
"completion_criteria": {
|
||||
"target": "no_critical_findings_and_high_under_5",
|
||||
"current_status": "in_progress",
|
||||
"estimated_completion": "2 iterations remaining"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Field Descriptions**:
|
||||
- `phase`: Current execution phase (state machine pointer)
|
||||
- `current_iteration`: Iteration counter (used for max check)
|
||||
- `next_action`: Next step orchestrator should execute
|
||||
- `severity_distribution`: Aggregated counts across all dimensions
|
||||
- `critical_files`: Files appearing in 3+ dimensions with metadata
|
||||
- `iterations[]`: Historical log for trend analysis
|
||||
|
||||
### Review Progress JSON
|
||||
|
||||
**Purpose**: Real-time dashboard updates via polling
|
||||
|
||||
```json
|
||||
{
|
||||
"review_id": "review-20250125-143022",
|
||||
"last_update": "2025-01-25T14:35:10Z",
|
||||
"phase": "parallel|aggregate|iterate|complete",
|
||||
"current_iteration": 1,
|
||||
"progress": {
|
||||
"parallel_review": {
|
||||
"total_dimensions": 7,
|
||||
"completed": 5,
|
||||
"in_progress": 2,
|
||||
"percent_complete": 71
|
||||
},
|
||||
"deep_dive": {
|
||||
"total_findings": 6,
|
||||
"analyzed": 2,
|
||||
"in_progress": 1,
|
||||
"percent_complete": 33
|
||||
}
|
||||
},
|
||||
"agent_status": [
|
||||
{
|
||||
"agent_type": "review-agent",
|
||||
"dimension": "security",
|
||||
"status": "completed",
|
||||
"started_at": "2025-01-25T14:30:00Z",
|
||||
"completed_at": "2025-01-25T15:15:00Z",
|
||||
"duration_ms": 2700000
|
||||
},
|
||||
{
|
||||
"agent_type": "deep-dive-agent",
|
||||
"finding_id": "sec-001-uuid",
|
||||
"status": "in_progress",
|
||||
"started_at": "2025-01-25T14:32:00Z"
|
||||
}
|
||||
],
|
||||
"estimated_completion": "2025-01-25T16:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### Agent Output Schemas
|
||||
|
||||
**Agent-produced JSON files follow standardized schemas**:
|
||||
|
||||
1. **Dimension Results** (cli-explore-agent output from parallel reviews)
|
||||
- Schema: `~/.ccw/workflows/cli-templates/schemas/review-dimension-results-schema.json`
|
||||
- Output: `.review-cycle/dimensions/{dimension}.json`
|
||||
- Contains: findings array, summary statistics, cross_references
|
||||
|
||||
2. **Deep-Dive Results** (cli-explore-agent output from iterations)
|
||||
- Schema: `~/.ccw/workflows/cli-templates/schemas/review-deep-dive-results-schema.json`
|
||||
- Output: `.review-cycle/iterations/iteration-{N}-finding-{uuid}.json`
|
||||
- Contains: root_cause, remediation_plan, impact_assessment, reassessed_severity
|
||||
|
||||
### Agent Invocation Template
|
||||
|
||||
**Review Agent** (parallel execution, 7 instances):
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-explore-agent",
|
||||
run_in_background=false,
|
||||
description=`Execute ${dimension} review analysis via Deep Scan`,
|
||||
prompt=`
|
||||
## Task Objective
|
||||
Conduct comprehensive ${dimension} code exploration and analysis using Deep Scan mode (Bash + Gemini dual-source strategy) for completed implementation in session ${sessionId}
|
||||
|
||||
## Analysis Mode Selection
|
||||
Use **Deep Scan mode** for this review:
|
||||
- Phase 1: Bash structural scan for standard patterns (classes, functions, imports)
|
||||
- Phase 2: Gemini semantic analysis for design intent, non-standard patterns, ${dimension}-specific concerns
|
||||
- Phase 3: Synthesis with attribution (bash-discovered vs gemini-discovered findings)
|
||||
|
||||
## MANDATORY FIRST STEPS (Execute by Agent)
|
||||
**You (cli-explore-agent) MUST execute these steps in order:**
|
||||
1. Read session metadata: ${sessionMetadataPath}
|
||||
2. Read completed task summaries: bash(find ${summariesDir} -name "IMPL-*.md" -type f)
|
||||
3. Get changed files: bash(cd ${workflowDir} && git log --since="${sessionCreatedAt}" --name-only --pretty=format: | sort -u)
|
||||
4. Read review state: ${reviewStateJsonPath}
|
||||
5. Execute: cat ~/.ccw/workflows/cli-templates/schemas/review-dimension-results-schema.json (get output schema reference)
|
||||
6. Read: .workflow/project-tech.json (technology stack and architecture context)
|
||||
7. Read: .workflow/project-guidelines.json (user-defined constraints and conventions to validate against)
|
||||
|
||||
## Session Context
|
||||
- Session ID: ${sessionId}
|
||||
- Review Dimension: ${dimension}
|
||||
- Review ID: ${reviewId}
|
||||
- Implementation Phase: Complete (all tests passing)
|
||||
- Output Directory: ${outputDir}
|
||||
|
||||
## CLI Configuration
|
||||
- Tool Priority: gemini → qwen → codex (fallback chain)
|
||||
- Template: ~/.ccw/workflows/cli-templates/prompts/analysis/${dimensionTemplate}
|
||||
- Custom Focus: ${customFocus || 'Standard dimension analysis'}
|
||||
- Timeout: ${timeout}ms
|
||||
- Mode: analysis (READ-ONLY)
|
||||
|
||||
## Expected Deliverables
|
||||
|
||||
**Schema Reference**: Schema obtained in MANDATORY FIRST STEPS step 5, follow schema exactly
|
||||
|
||||
1. Dimension Results JSON: ${outputDir}/dimensions/${dimension}.json
|
||||
|
||||
**⚠️ CRITICAL JSON STRUCTURE REQUIREMENTS**:
|
||||
|
||||
Root structure MUST be array: \`[{ ... }]\` NOT \`{ ... }\`
|
||||
|
||||
Required top-level fields:
|
||||
- dimension, review_id, analysis_timestamp (NOT timestamp/analyzed_at)
|
||||
- cli_tool_used (gemini|qwen|codex), model, analysis_duration_ms
|
||||
- summary (FLAT structure), findings, cross_references
|
||||
|
||||
Summary MUST be FLAT (NOT nested by_severity):
|
||||
\`{ "total_findings": N, "critical": N, "high": N, "medium": N, "low": N, "files_analyzed": N, "lines_reviewed": N }\`
|
||||
|
||||
Finding required fields:
|
||||
- id: format \`{dim}-{seq}-{uuid8}\` e.g., \`sec-001-a1b2c3d4\` (lowercase)
|
||||
- severity: lowercase only (critical|high|medium|low)
|
||||
- snippet (NOT code_snippet), impact (NOT exploit_scenario)
|
||||
- metadata, iteration (0), status (pending_remediation), cross_references
|
||||
|
||||
2. Analysis Report: ${outputDir}/reports/${dimension}-analysis.md
|
||||
- Human-readable summary with recommendations
|
||||
- Grouped by severity: critical → high → medium → low
|
||||
- Include file:line references for all findings
|
||||
|
||||
3. CLI Output Log: ${outputDir}/reports/${dimension}-cli-output.txt
|
||||
- Raw CLI tool output for debugging
|
||||
- Include full analysis text
|
||||
|
||||
## Dimension-Specific Guidance
|
||||
${getDimensionGuidance(dimension)}
|
||||
|
||||
## Success Criteria
|
||||
- [ ] Schema obtained via cat review-dimension-results-schema.json
|
||||
- [ ] All changed files analyzed for ${dimension} concerns
|
||||
- [ ] All findings include file:line references with code snippets
|
||||
- [ ] Severity assessment follows established criteria (see reference)
|
||||
- [ ] Recommendations are actionable with code examples
|
||||
- [ ] JSON output follows schema exactly
|
||||
- [ ] Report is comprehensive and well-organized
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
**Deep-Dive Agent** (iteration execution):
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-explore-agent",
|
||||
run_in_background=false,
|
||||
description=`Deep-dive analysis for critical finding: ${findingTitle} via Dependency Map + Deep Scan`,
|
||||
prompt=`
|
||||
## Task Objective
|
||||
Perform focused root cause analysis using Dependency Map mode (for impact analysis) + Deep Scan mode (for semantic understanding) to generate comprehensive remediation plan for critical ${dimension} issue
|
||||
|
||||
## Analysis Mode Selection
|
||||
Use **Dependency Map mode** first to understand dependencies:
|
||||
- Build dependency graph around ${file} to identify affected components
|
||||
- Detect circular dependencies or tight coupling related to this finding
|
||||
- Calculate change risk scores for remediation impact
|
||||
|
||||
Then apply **Deep Scan mode** for semantic analysis:
|
||||
- Understand design intent and architectural context
|
||||
- Identify non-standard patterns or implicit dependencies
|
||||
- Extract remediation insights from code structure
|
||||
|
||||
## Finding Context
|
||||
- Finding ID: ${findingId}
|
||||
- Original Dimension: ${dimension}
|
||||
- Title: ${findingTitle}
|
||||
- File: ${file}:${line}
|
||||
- Severity: ${severity}
|
||||
- Category: ${category}
|
||||
- Original Description: ${description}
|
||||
- Iteration: ${iteration}
|
||||
|
||||
## MANDATORY FIRST STEPS (Execute by Agent)
|
||||
**You (cli-explore-agent) MUST execute these steps in order:**
|
||||
1. Read original finding: ${dimensionJsonPath}
|
||||
2. Read affected file: ${file}
|
||||
3. Identify related code: bash(grep -r "import.*${basename(file)}" ${workflowDir}/src --include="*.ts")
|
||||
4. Read test files: bash(find ${workflowDir}/tests -name "*${basename(file, '.ts')}*" -type f)
|
||||
5. Execute: cat ~/.ccw/workflows/cli-templates/schemas/review-deep-dive-results-schema.json (get output schema reference)
|
||||
6. Read: .workflow/project-tech.json (technology stack and architecture context)
|
||||
7. Read: .workflow/project-guidelines.json (user-defined constraints for remediation compliance)
|
||||
|
||||
## CLI Configuration
|
||||
- Tool Priority: gemini → qwen → codex
|
||||
- Template: ~/.ccw/workflows/cli-templates/prompts/analysis/01-diagnose-bug-root-cause.txt
|
||||
- Timeout: 2400000ms (40 minutes)
|
||||
- Mode: analysis (READ-ONLY)
|
||||
|
||||
## Expected Deliverables
|
||||
|
||||
**Schema Reference**: Schema obtained in MANDATORY FIRST STEPS step 5, follow schema exactly
|
||||
|
||||
1. Deep-Dive Results JSON: ${outputDir}/iterations/iteration-${iteration}-finding-${findingId}.json
|
||||
|
||||
**⚠️ CRITICAL JSON STRUCTURE REQUIREMENTS**:
|
||||
|
||||
Root structure MUST be array: \`[{ ... }]\` NOT \`{ ... }\`
|
||||
|
||||
Required top-level fields:
|
||||
- finding_id, dimension, iteration, analysis_timestamp
|
||||
- cli_tool_used, model, analysis_duration_ms
|
||||
- original_finding, root_cause, remediation_plan
|
||||
- impact_assessment, reassessed_severity, confidence_score, cross_references
|
||||
|
||||
All nested objects must follow schema exactly - read schema for field names
|
||||
|
||||
2. Analysis Report: ${outputDir}/reports/deep-dive-${iteration}-${findingId}.md
|
||||
- Detailed root cause analysis
|
||||
- Step-by-step remediation plan
|
||||
- Impact assessment and rollback strategy
|
||||
|
||||
## Success Criteria
|
||||
- [ ] Schema obtained via cat review-deep-dive-results-schema.json
|
||||
- [ ] Root cause clearly identified with supporting evidence
|
||||
- [ ] Remediation plan is step-by-step actionable with exact file:line references
|
||||
- [ ] Each step includes specific commands and validation tests
|
||||
- [ ] Impact fully assessed (files, tests, breaking changes, dependencies)
|
||||
- [ ] Severity re-evaluation justified with evidence
|
||||
- [ ] Confidence score accurately reflects certainty of analysis
|
||||
- [ ] JSON output follows schema exactly
|
||||
- [ ] References include project-specific and external documentation
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
### Dimension Guidance Reference
|
||||
|
||||
```javascript
|
||||
function getDimensionGuidance(dimension) {
|
||||
const guidance = {
|
||||
security: `
|
||||
Focus Areas:
|
||||
- Input validation and sanitization
|
||||
- Authentication and authorization mechanisms
|
||||
- Data encryption (at-rest and in-transit)
|
||||
- SQL/NoSQL injection vulnerabilities
|
||||
- XSS, CSRF, and other web vulnerabilities
|
||||
- Sensitive data exposure
|
||||
- Access control and privilege escalation
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Authentication bypass, SQL injection, RCE, sensitive data exposure
|
||||
- High: Missing authorization checks, weak encryption, exposed secrets
|
||||
- Medium: Missing input validation, insecure defaults, weak password policies
|
||||
- Low: Security headers missing, verbose error messages, outdated dependencies
|
||||
`,
|
||||
architecture: `
|
||||
Focus Areas:
|
||||
- Layering and separation of concerns
|
||||
- Coupling and cohesion
|
||||
- Design pattern adherence
|
||||
- Dependency management
|
||||
- Scalability and extensibility
|
||||
- Module boundaries
|
||||
- API design consistency
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Circular dependencies, god objects, tight coupling across layers
|
||||
- High: Violated architectural principles, scalability bottlenecks
|
||||
- Medium: Missing abstractions, inconsistent patterns, suboptimal design
|
||||
- Low: Minor coupling issues, documentation gaps, naming inconsistencies
|
||||
`,
|
||||
quality: `
|
||||
Focus Areas:
|
||||
- Code duplication
|
||||
- Complexity (cyclomatic, cognitive)
|
||||
- Naming conventions
|
||||
- Error handling patterns
|
||||
- Code readability
|
||||
- Comment quality
|
||||
- Dead code
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Severe complexity (CC > 20), massive duplication (>50 lines)
|
||||
- High: High complexity (CC > 10), significant duplication, poor error handling
|
||||
- Medium: Moderate complexity (CC > 5), naming issues, code smells
|
||||
- Low: Minor duplication, documentation gaps, cosmetic issues
|
||||
`,
|
||||
'action-items': `
|
||||
Focus Areas:
|
||||
- Requirements coverage verification
|
||||
- Acceptance criteria met
|
||||
- Documentation completeness
|
||||
- Deployment readiness
|
||||
- Missing functionality
|
||||
- Test coverage gaps
|
||||
- Configuration management
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Core requirements not met, deployment blockers
|
||||
- High: Significant functionality missing, acceptance criteria not met
|
||||
- Medium: Minor requirements gaps, documentation incomplete
|
||||
- Low: Nice-to-have features missing, minor documentation gaps
|
||||
`,
|
||||
performance: `
|
||||
Focus Areas:
|
||||
- N+1 query problems
|
||||
- Inefficient algorithms (O(n²) where O(n log n) possible)
|
||||
- Memory leaks
|
||||
- Blocking operations on main thread
|
||||
- Missing caching opportunities
|
||||
- Resource usage (CPU, memory, network)
|
||||
- Database query optimization
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Memory leaks, O(n²) in hot path, blocking main thread
|
||||
- High: N+1 queries, missing indexes, inefficient algorithms
|
||||
- Medium: Suboptimal caching, unnecessary computations, lazy loading issues
|
||||
- Low: Minor optimization opportunities, redundant operations
|
||||
`,
|
||||
maintainability: `
|
||||
Focus Areas:
|
||||
- Technical debt indicators
|
||||
- Magic numbers and hardcoded values
|
||||
- Long methods (>50 lines)
|
||||
- Large classes (>500 lines)
|
||||
- Dead code and commented code
|
||||
- Code documentation
|
||||
- Test coverage
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Massive methods (>200 lines), severe technical debt blocking changes
|
||||
- High: Large methods (>100 lines), significant dead code, undocumented complex logic
|
||||
- Medium: Magic numbers, moderate technical debt, missing tests
|
||||
- Low: Minor refactoring opportunities, cosmetic improvements
|
||||
`,
|
||||
'best-practices': `
|
||||
Focus Areas:
|
||||
- Framework conventions adherence
|
||||
- Language idioms
|
||||
- Anti-patterns
|
||||
- Deprecated API usage
|
||||
- Coding standards compliance
|
||||
- Error handling patterns
|
||||
- Logging and monitoring
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Severe anti-patterns, deprecated APIs with security risks
|
||||
- High: Major convention violations, poor error handling, missing logging
|
||||
- Medium: Minor anti-patterns, style inconsistencies, suboptimal patterns
|
||||
- Low: Cosmetic style issues, minor convention deviations
|
||||
`
|
||||
};
|
||||
|
||||
return guidance[dimension] || 'Standard code review analysis';
|
||||
}
|
||||
```
|
||||
|
||||
### Completion Conditions
|
||||
|
||||
**Full Success**:
|
||||
- All dimensions reviewed
|
||||
- Critical findings = 0
|
||||
- High findings ≤ 5
|
||||
- Action: Generate final report, mark phase=complete
|
||||
|
||||
**Partial Success**:
|
||||
- All dimensions reviewed
|
||||
- Max iterations reached
|
||||
- Still have critical/high findings
|
||||
- Action: Generate report with warnings, recommend follow-up
|
||||
|
||||
### Error Handling
|
||||
|
||||
**Phase-Level Error Matrix**:
|
||||
|
||||
| Phase | Error | Blocking? | Action |
|
||||
|-------|-------|-----------|--------|
|
||||
| Phase 1 | Session not found | Yes | Error and exit |
|
||||
| Phase 1 | No completed tasks | Yes | Error and exit |
|
||||
| Phase 1 | No changed files | Yes | Error and exit |
|
||||
| Phase 2 | Single dimension fails | No | Log warning, continue other dimensions |
|
||||
| Phase 2 | All dimensions fail | Yes | Error and exit |
|
||||
| Phase 3 | Missing dimension JSON | No | Skip in aggregation, log warning |
|
||||
| Phase 4 | Deep-dive agent fails | No | Skip finding, continue others |
|
||||
| Phase 4 | Max iterations reached | No | Generate partial report |
|
||||
|
||||
**CLI Fallback Chain**: Gemini → Qwen → Codex → degraded mode
|
||||
|
||||
**Fallback Triggers**:
|
||||
1. HTTP 429, 5xx errors, connection timeout
|
||||
2. Invalid JSON output (parse error, missing required fields)
|
||||
3. Low confidence score < 0.4
|
||||
4. Analysis too brief (< 100 words in report)
|
||||
|
||||
**Fallback Behavior**:
|
||||
- On trigger: Retry with next tool in chain
|
||||
- After Codex fails: Enter degraded mode (skip analysis, log error)
|
||||
- Degraded mode: Continue workflow with available results
|
||||
|
||||
### TodoWrite Structure
|
||||
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{ content: "Phase 1: Discovery & Initialization", status: "completed", activeForm: "Initializing" },
|
||||
{ content: "Phase 2: Parallel Reviews (7 dimensions)", status: "in_progress", activeForm: "Reviewing" },
|
||||
{ content: " → Security review", status: "in_progress", activeForm: "Analyzing security" },
|
||||
// ... other dimensions as sub-items
|
||||
{ content: "Phase 3: Aggregation", status: "pending", activeForm: "Aggregating" },
|
||||
{ content: "Phase 4: Deep-dive", status: "pending", activeForm: "Deep-diving" },
|
||||
{ content: "Phase 5: Completion", status: "pending", activeForm: "Completing" }
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Default Settings Work**: 7 dimensions + 3 iterations sufficient for most cases
|
||||
2. **Parallel Execution**: ~60 minutes for full initial review (7 dimensions)
|
||||
3. **Trust Aggregation Logic**: Auto-selection based on proven heuristics
|
||||
4. **Monitor Logs**: Check reports/ directory for CLI analysis insights
|
||||
|
||||
## Related Commands
|
||||
|
||||
### View Review Progress
|
||||
Use `ccw view` to open the review dashboard in browser:
|
||||
|
||||
```bash
|
||||
ccw view
|
||||
```
|
||||
|
||||
### Automated Fix Workflow
|
||||
After completing a review, use the generated findings JSON for automated fixing:
|
||||
|
||||
```bash
|
||||
# Step 1: Complete review (this command)
|
||||
/workflow:review-session-cycle
|
||||
|
||||
# Step 2: Run automated fixes using dimension findings
|
||||
/workflow:review-cycle-fix .workflow/active/WFS-{session-id}/.review/
|
||||
```
|
||||
|
||||
See `/workflow:review-cycle-fix` for automated fixing with smart grouping, parallel execution, and test verification.
|
||||
|
||||
@@ -169,7 +169,7 @@ TaskCreate({ subject: "DISCUSS-005: 执行计划与MVP范围讨论", description
|
||||
TaskUpdate({ taskId: discuss5Id, owner: "discussant", addBlockedBy: [draft4Id] })
|
||||
|
||||
// QUALITY-001: Readiness Check (blockedBy DISCUSS-005)
|
||||
TaskCreate({ subject: "QUALITY-001: 规格就绪度检查", description: `全文档交叉验证和质量评分\n\nSession: ${specSessionFolder}\n输入: 全部文档\n输出: ${specSessionFolder}/readiness-report.md + spec-summary.md\n\n评分维度: 完整性(25%) + 一致性(25%) + 可追溯性(25%) + 深度(25%)`, activeForm: "质量检查中" })
|
||||
TaskCreate({ subject: "QUALITY-001: 规格就绪度检查", description: `全文档交叉验证和质量评分\n\nSession: ${specSessionFolder}\n输入: 全部文档\n输出: ${specSessionFolder}/readiness-report.md + spec-summary.md\n\n评分维度: 完整性(20%) + 一致性(20%) + 可追溯性(20%) + 深度(20%) + 需求覆盖率(20%)`, activeForm: "质量检查中" })
|
||||
TaskUpdate({ taskId: qualityId, owner: "reviewer", addBlockedBy: [discuss5Id] })
|
||||
|
||||
// DISCUSS-006: 最终签收 (blockedBy QUALITY-001)
|
||||
@@ -217,7 +217,7 @@ Receive teammate messages and make dispatch decisions. **Before each decision: `
|
||||
|
||||
| Received Message | Action |
|
||||
|-----------------|--------|
|
||||
| Analyst: research_ready | Read discovery-context.json → team_msg log → TaskUpdate RESEARCH completed (auto-unblocks DISCUSS-001) |
|
||||
| Analyst: research_ready | Read discovery-context.json → **用户确认检查点** → team_msg log → TaskUpdate RESEARCH completed (auto-unblocks DISCUSS-001) |
|
||||
| Discussant: discussion_ready | Read discussion.md → judge if revision needed → unblock next DRAFT task |
|
||||
| Discussant: discussion_blocked | Intervene → AskUserQuestion for user decision → write decision to discussion record → manually unblock |
|
||||
| Writer: draft_ready | Read document summary → team_msg log → TaskUpdate DRAFT completed (auto-unblocks next DISCUSS) |
|
||||
@@ -242,6 +242,46 @@ Receive teammate messages and make dispatch decisions. **Before each decision: `
|
||||
|
||||
When DISCUSS-006 completes in full-lifecycle mode, PLAN-001 is auto-unblocked via the dependency chain.
|
||||
|
||||
#### Research Confirmation Checkpoint
|
||||
|
||||
When receiving `research_ready` from analyst, confirm extracted requirements with user before unblocking:
|
||||
|
||||
```javascript
|
||||
if (msgType === 'research_ready') {
|
||||
const discoveryContext = JSON.parse(Read(`${specSessionFolder}/discovery-context.json`))
|
||||
const dimensions = discoveryContext.seed_analysis?.exploration_dimensions || []
|
||||
const constraints = discoveryContext.seed_analysis?.constraints || []
|
||||
const problemStatement = discoveryContext.seed_analysis?.problem_statement || ''
|
||||
|
||||
// Present extracted requirements for user confirmation
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: `研究阶段提取到以下需求,请确认是否完整:\n\n**问题定义**: ${problemStatement}\n**探索维度**: ${dimensions.join('、')}\n**约束条件**: ${constraints.join('、')}\n\n是否有遗漏?`,
|
||||
header: "需求确认",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "确认完整", description: "提取的需求已覆盖所有关键点,继续推进" },
|
||||
{ label: "需要补充", description: "有遗漏的需求,我来补充" },
|
||||
{ label: "需要重新研究", description: "提取方向有偏差,重新执行研究" }
|
||||
]
|
||||
}]
|
||||
})
|
||||
|
||||
if (userChoice === '需要补充') {
|
||||
// User provides additional requirements via free text
|
||||
// Merge into discovery-context.json, then unblock DISCUSS-001
|
||||
discoveryContext.seed_analysis.user_supplements = userInput
|
||||
Write(`${specSessionFolder}/discovery-context.json`, JSON.stringify(discoveryContext, null, 2))
|
||||
} else if (userChoice === '需要重新研究') {
|
||||
// Reset RESEARCH-001 to pending, notify analyst
|
||||
TaskUpdate({ taskId: researchId, status: 'pending' })
|
||||
team_msg({ type: 'fix_required', summary: 'User requests re-research with revised scope' })
|
||||
return // Do not unblock DISCUSS-001
|
||||
}
|
||||
// '确认完整' → proceed normally: TaskUpdate RESEARCH completed
|
||||
}
|
||||
```
|
||||
|
||||
#### Discussion Blocked Handling
|
||||
|
||||
```javascript
|
||||
|
||||
@@ -51,17 +51,18 @@ Each discussion round analyzes from 4 perspectives:
|
||||
| **Technical** | Feasibility, tech debt, performance, security, maintainability | Tech Lead |
|
||||
| **Quality** | Completeness, testability, consistency, standards compliance | QA Lead |
|
||||
| **Risk** | Risk identification, dependency analysis, assumption validation, failure modes | Risk Analyst |
|
||||
| **Coverage** | Requirement completeness vs original intent, scope drift, gap detection | Requirements Analyst |
|
||||
|
||||
## Discussion Round Configuration
|
||||
|
||||
| Round | Artifact | Key Perspectives | Focus |
|
||||
|-------|----------|-----------------|-------|
|
||||
| DISCUSS-001 | discovery-context | product + risk | Scope confirmation, direction |
|
||||
| DISCUSS-002 | product-brief | product + technical + quality | Positioning, feasibility |
|
||||
| DISCUSS-003 | requirements | quality + product | Completeness, priority |
|
||||
| DISCUSS-001 | discovery-context | product + risk + **coverage** | Scope confirmation, direction, initial coverage check |
|
||||
| DISCUSS-002 | product-brief | product + technical + quality + **coverage** | Positioning, feasibility, requirement coverage |
|
||||
| DISCUSS-003 | requirements | quality + product + **coverage** | Completeness, priority, gap detection |
|
||||
| DISCUSS-004 | architecture | technical + risk | Tech choices, security |
|
||||
| DISCUSS-005 | epics | product + technical + quality | MVP scope, estimation |
|
||||
| DISCUSS-006 | readiness-report | all 4 perspectives | Final sign-off |
|
||||
| DISCUSS-005 | epics | product + technical + quality + **coverage** | MVP scope, estimation, requirement tracing |
|
||||
| DISCUSS-006 | readiness-report | all 5 perspectives | Final sign-off |
|
||||
|
||||
## Execution (5-Phase)
|
||||
|
||||
@@ -91,12 +92,12 @@ const roundMatch = task.subject.match(/DISCUSS-(\d+)/)
|
||||
const roundNumber = roundMatch ? parseInt(roundMatch[1]) : 0
|
||||
|
||||
const roundConfig = {
|
||||
1: { artifact: 'discovery-context.json', type: 'json', outputFile: 'discuss-001-scope.md', perspectives: ['product', 'risk'], label: '范围讨论' },
|
||||
2: { artifact: 'product-brief.md', type: 'md', outputFile: 'discuss-002-brief.md', perspectives: ['product', 'technical', 'quality'], label: 'Brief评审' },
|
||||
3: { artifact: 'requirements/_index.md', type: 'md', outputFile: 'discuss-003-requirements.md', perspectives: ['quality', 'product'], label: '需求讨论' },
|
||||
1: { artifact: 'discovery-context.json', type: 'json', outputFile: 'discuss-001-scope.md', perspectives: ['product', 'risk', 'coverage'], label: '范围讨论' },
|
||||
2: { artifact: 'product-brief.md', type: 'md', outputFile: 'discuss-002-brief.md', perspectives: ['product', 'technical', 'quality', 'coverage'], label: 'Brief评审' },
|
||||
3: { artifact: 'requirements/_index.md', type: 'md', outputFile: 'discuss-003-requirements.md', perspectives: ['quality', 'product', 'coverage'], label: '需求讨论' },
|
||||
4: { artifact: 'architecture/_index.md', type: 'md', outputFile: 'discuss-004-architecture.md', perspectives: ['technical', 'risk'], label: '架构讨论' },
|
||||
5: { artifact: 'epics/_index.md', type: 'md', outputFile: 'discuss-005-epics.md', perspectives: ['product', 'technical', 'quality'], label: 'Epics讨论' },
|
||||
6: { artifact: 'readiness-report.md', type: 'md', outputFile: 'discuss-006-final.md', perspectives: ['product', 'technical', 'quality', 'risk'], label: '最终签收' }
|
||||
5: { artifact: 'epics/_index.md', type: 'md', outputFile: 'discuss-005-epics.md', perspectives: ['product', 'technical', 'quality', 'coverage'], label: 'Epics讨论' },
|
||||
6: { artifact: 'readiness-report.md', type: 'md', outputFile: 'discuss-006-final.md', perspectives: ['product', 'technical', 'quality', 'risk', 'coverage'], label: '最终签收' }
|
||||
}
|
||||
|
||||
const config = roundConfig[roundNumber]
|
||||
@@ -112,8 +113,9 @@ Launch parallel CLI analyses for each required perspective:
|
||||
- **Technical Perspective** (codex): Feasibility, complexity, architecture decisions, tech debt risks. Rate 1-5.
|
||||
- **Quality Perspective** (claude): Completeness, testability, consistency, ambiguity detection. Rate 1-5.
|
||||
- **Risk Perspective** (gemini): Risk identification, dependency analysis, assumption validation, failure modes. Rate risk level.
|
||||
- **Coverage Perspective** (gemini): Compare current artifact against original requirements in discovery-context.json. Identify covered_requirements[], partial_requirements[], missing_requirements[], scope_creep[]. Rate coverage 1-5. **If missing_requirements is non-empty, flag as critical divergence.**
|
||||
|
||||
Each CLI call produces structured critique with: strengths[], weaknesses[], suggestions[], rating.
|
||||
Each CLI call produces structured critique with: strengths[], weaknesses[], suggestions[], rating. Coverage perspective additionally outputs: covered_requirements[], missing_requirements[], scope_creep[].
|
||||
|
||||
### Phase 4: Consensus Synthesis
|
||||
|
||||
@@ -131,6 +133,17 @@ const synthesis = {
|
||||
|
||||
// Extract convergent themes (items mentioned positively by 2+ perspectives)
|
||||
// Extract divergent views (items where perspectives conflict)
|
||||
// Check coverage gaps from coverage perspective (if present)
|
||||
const coverageResult = perspectiveResults.find(p => p.perspective === 'coverage')
|
||||
if (coverageResult?.missing_requirements?.length > 0) {
|
||||
synthesis.coverage_gaps = coverageResult.missing_requirements
|
||||
synthesis.divergent_views.push({
|
||||
topic: 'requirement_coverage_gap',
|
||||
description: `${coverageResult.missing_requirements.length} requirements from discovery-context not covered: ${coverageResult.missing_requirements.join(', ')}`,
|
||||
severity: 'high',
|
||||
source: 'coverage'
|
||||
})
|
||||
}
|
||||
// Check for unresolvable conflicts
|
||||
const criticalDivergences = synthesis.divergent_views.filter(d => d.severity === 'high')
|
||||
if (criticalDivergences.length > 0) synthesis.consensus_reached = false
|
||||
|
||||
@@ -222,7 +222,7 @@ function verifyRequirements(plan, fileContents) {
|
||||
|
||||
```javascript
|
||||
if (reviewMode === 'spec') {
|
||||
const scores = { completeness: 0, consistency: 0, traceability: 0, depth: 0 }
|
||||
const scores = { completeness: 0, consistency: 0, traceability: 0, depth: 0, requirementCoverage: 0 }
|
||||
|
||||
// Completeness (25%): all sections present with content
|
||||
function scoreCompleteness(docs) {
|
||||
@@ -297,19 +297,55 @@ if (reviewMode === 'spec') {
|
||||
return { score: Math.max(0, score), issues }
|
||||
}
|
||||
|
||||
// Requirement Coverage (20%): original requirements → document mapping
|
||||
function scoreRequirementCoverage(docs) {
|
||||
let score = 100
|
||||
const issues = []
|
||||
if (!docs.discoveryContext) {
|
||||
return { score: 0, issues: ['discovery-context.json missing, cannot verify requirement coverage'] }
|
||||
}
|
||||
const context = typeof docs.discoveryContext === 'string' ? JSON.parse(docs.discoveryContext) : docs.discoveryContext
|
||||
const dimensions = context.seed_analysis?.exploration_dimensions || []
|
||||
const constraints = context.seed_analysis?.constraints || []
|
||||
const userSupplements = context.seed_analysis?.user_supplements || ''
|
||||
const allRequirements = [...dimensions, ...constraints]
|
||||
if (userSupplements) allRequirements.push(userSupplements)
|
||||
|
||||
if (allRequirements.length === 0) {
|
||||
return { score: 100, issues: [] } // No requirements to check
|
||||
}
|
||||
|
||||
const allDocContent = [docs.productBrief, docs.requirementsIndex, docs.architectureIndex, docs.epicsIndex,
|
||||
...docs.requirements, ...docs.adrs, ...docs.epics].filter(Boolean).join('\n').toLowerCase()
|
||||
|
||||
let covered = 0
|
||||
for (const req of allRequirements) {
|
||||
const keywords = req.toLowerCase().split(/[\s,;]+/).filter(w => w.length > 2)
|
||||
const isCovered = keywords.some(kw => allDocContent.includes(kw))
|
||||
if (isCovered) { covered++ }
|
||||
else { issues.push(`Requirement not covered in documents: "${req}"`) }
|
||||
}
|
||||
|
||||
score = Math.round((covered / allRequirements.length) * 100)
|
||||
return { score, issues }
|
||||
}
|
||||
|
||||
const completenessResult = scoreCompleteness(documents)
|
||||
const consistencyResult = scoreConsistency(documents)
|
||||
const traceabilityResult = scoreTraceability(documents)
|
||||
const depthResult = scoreDepth(documents)
|
||||
const coverageResult = scoreRequirementCoverage(documents)
|
||||
|
||||
scores.completeness = completenessResult.score
|
||||
scores.consistency = consistencyResult.score
|
||||
scores.traceability = traceabilityResult.score
|
||||
scores.depth = depthResult.score
|
||||
scores.requirementCoverage = coverageResult.score
|
||||
|
||||
const overallScore = (scores.completeness + scores.consistency + scores.traceability + scores.depth) / 4
|
||||
const qualityGate = overallScore >= 80 ? 'PASS' : overallScore >= 60 ? 'REVIEW' : 'FAIL'
|
||||
const allSpecIssues = [...completenessResult.issues, ...consistencyResult.issues, ...traceabilityResult.issues, ...depthResult.issues]
|
||||
const overallScore = (scores.completeness + scores.consistency + scores.traceability + scores.depth + scores.requirementCoverage) / 5
|
||||
const qualityGate = (overallScore >= 80 && scores.requirementCoverage >= 70) ? 'PASS' :
|
||||
(overallScore < 60 || scores.requirementCoverage < 50) ? 'FAIL' : 'REVIEW'
|
||||
const allSpecIssues = [...completenessResult.issues, ...consistencyResult.issues, ...traceabilityResult.issues, ...depthResult.issues, ...coverageResult.issues]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -346,10 +382,11 @@ version: 1
|
||||
## Quality Scores
|
||||
| Dimension | Score | Weight |
|
||||
|-----------|-------|--------|
|
||||
| Completeness | ${scores.completeness}% | 25% |
|
||||
| Consistency | ${scores.consistency}% | 25% |
|
||||
| Traceability | ${scores.traceability}% | 25% |
|
||||
| Depth | ${scores.depth}% | 25% |
|
||||
| Completeness | ${scores.completeness}% | 20% |
|
||||
| Consistency | ${scores.consistency}% | 20% |
|
||||
| Traceability | ${scores.traceability}% | 20% |
|
||||
| Depth | ${scores.depth}% | 20% |
|
||||
| Requirement Coverage | ${scores.requirementCoverage}% | 20% |
|
||||
| **Overall** | **${overallScore.toFixed(1)}%** | **100%** |
|
||||
|
||||
## Quality Gate: ${qualityGate}
|
||||
@@ -448,7 +485,7 @@ if (reviewMode === 'spec') {
|
||||
operation: "log", team: teamName,
|
||||
from: "reviewer", to: "coordinator",
|
||||
type: qualityGate === 'FAIL' ? "fix_required" : "quality_result",
|
||||
summary: `质量检查 ${qualityGate}: ${overallScore.toFixed(1)}分 (完整性${scores.completeness}/一致性${scores.consistency}/追溯${scores.traceability}/深度${scores.depth})`,
|
||||
summary: `质量检查 ${qualityGate}: ${overallScore.toFixed(1)}分 (完整性${scores.completeness}/一致性${scores.consistency}/追溯${scores.traceability}/深度${scores.depth}/覆盖率${scores.requirementCoverage})`,
|
||||
data: { gate: qualityGate, score: overallScore, issues: allSpecIssues }
|
||||
})
|
||||
|
||||
@@ -468,6 +505,7 @@ if (reviewMode === 'spec') {
|
||||
| 一致性 | ${scores.consistency}% |
|
||||
| 可追溯性 | ${scores.traceability}% |
|
||||
| 深度 | ${scores.depth}% |
|
||||
| 需求覆盖率 | ${scores.requirementCoverage}% |
|
||||
|
||||
### 问题列表 (${allSpecIssues.length})
|
||||
${allSpecIssues.map(i => '- ' + i).join('\n') || '无问题'}
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
// ========================================
|
||||
// BottomPanel Component
|
||||
// ========================================
|
||||
// Full-width collapsible bottom panel with Queue + Inspector tabs.
|
||||
// Replaces the separate BottomInspector + middle-column QueuePanel layout.
|
||||
// Queue tab shows inline count badge; Inspector tab shows chain indicator.
|
||||
|
||||
import { useState, useCallback, useMemo } from 'react';
|
||||
import { useIntl } from 'react-intl';
|
||||
import { ListChecks, Info, ChevronDown, ChevronUp } from 'lucide-react';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { Badge } from '@/components/ui/Badge';
|
||||
import { QueuePanel } from './QueuePanel';
|
||||
import { InspectorContent } from './BottomInspector';
|
||||
import { useIssueQueue } from '@/hooks/useIssues';
|
||||
import {
|
||||
useIssueQueueIntegrationStore,
|
||||
selectAssociationChain,
|
||||
} from '@/stores/issueQueueIntegrationStore';
|
||||
|
||||
// ========== Types ==========
|
||||
|
||||
type TabId = 'queue' | 'inspector';
|
||||
|
||||
// ========== Component ==========
|
||||
|
||||
export function BottomPanel() {
|
||||
const { formatMessage } = useIntl();
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
const [activeTab, setActiveTab] = useState<TabId>('queue');
|
||||
|
||||
const queueQuery = useIssueQueue();
|
||||
const associationChain = useIssueQueueIntegrationStore(selectAssociationChain);
|
||||
|
||||
// Count queue items for badge
|
||||
const queueCount = useMemo(() => {
|
||||
if (!queueQuery.data) return 0;
|
||||
const grouped = queueQuery.data.grouped_items ?? {};
|
||||
let count = 0;
|
||||
for (const items of Object.values(grouped)) {
|
||||
count += items.length;
|
||||
}
|
||||
return count;
|
||||
}, [queueQuery.data]);
|
||||
|
||||
const hasChain = associationChain !== null;
|
||||
|
||||
const toggle = useCallback(() => {
|
||||
setIsOpen((prev) => !prev);
|
||||
}, []);
|
||||
|
||||
const handleTabClick = useCallback((tab: TabId) => {
|
||||
setActiveTab(tab);
|
||||
setIsOpen(true);
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
'border-t border-border bg-muted/30 shrink-0 transition-all duration-200',
|
||||
)}
|
||||
>
|
||||
{/* Tab bar (always visible, ~36px) */}
|
||||
<div className="flex items-center gap-0 shrink-0">
|
||||
{/* Queue tab */}
|
||||
<button
|
||||
onClick={() => handleTabClick('queue')}
|
||||
className={cn(
|
||||
'flex items-center gap-1.5 px-3 py-1.5 text-xs transition-colors border-b-2',
|
||||
activeTab === 'queue' && isOpen
|
||||
? 'border-b-primary text-foreground font-medium'
|
||||
: 'border-b-transparent text-muted-foreground hover:text-foreground',
|
||||
)}
|
||||
>
|
||||
<ListChecks className="w-3.5 h-3.5" />
|
||||
{formatMessage({ id: 'terminalDashboard.bottomPanel.queueTab' })}
|
||||
{queueCount > 0 && (
|
||||
<Badge variant="info" className="text-[10px] px-1.5 py-0 ml-0.5">
|
||||
{queueCount}
|
||||
</Badge>
|
||||
)}
|
||||
</button>
|
||||
|
||||
{/* Inspector tab */}
|
||||
<button
|
||||
onClick={() => handleTabClick('inspector')}
|
||||
className={cn(
|
||||
'flex items-center gap-1.5 px-3 py-1.5 text-xs transition-colors border-b-2',
|
||||
activeTab === 'inspector' && isOpen
|
||||
? 'border-b-primary text-foreground font-medium'
|
||||
: 'border-b-transparent text-muted-foreground hover:text-foreground',
|
||||
)}
|
||||
>
|
||||
<Info className="w-3.5 h-3.5" />
|
||||
{formatMessage({ id: 'terminalDashboard.bottomPanel.inspectorTab' })}
|
||||
{hasChain && (
|
||||
<span className="ml-1 w-2 h-2 rounded-full bg-primary shrink-0" />
|
||||
)}
|
||||
</button>
|
||||
|
||||
{/* Collapse/expand toggle at right */}
|
||||
<button
|
||||
onClick={toggle}
|
||||
className="ml-auto px-3 py-1.5 text-muted-foreground hover:text-foreground transition-colors"
|
||||
title={formatMessage({
|
||||
id: isOpen
|
||||
? 'terminalDashboard.bottomPanel.collapse'
|
||||
: 'terminalDashboard.bottomPanel.expand',
|
||||
})}
|
||||
>
|
||||
{isOpen ? (
|
||||
<ChevronDown className="w-3.5 h-3.5" />
|
||||
) : (
|
||||
<ChevronUp className="w-3.5 h-3.5" />
|
||||
)}
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Collapsible content area */}
|
||||
<div
|
||||
className={cn(
|
||||
'overflow-hidden transition-all duration-200',
|
||||
isOpen ? 'max-h-[280px] opacity-100' : 'max-h-0 opacity-0',
|
||||
)}
|
||||
>
|
||||
<div className="h-[280px] border-t border-border/50">
|
||||
{activeTab === 'queue' ? (
|
||||
<QueuePanel embedded />
|
||||
) : (
|
||||
<InspectorContent />
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,197 @@
|
||||
// ========================================
|
||||
// DashboardToolbar Component
|
||||
// ========================================
|
||||
// Top toolbar for Terminal Dashboard V2.
|
||||
// Provides toggle buttons for floating panels (Sessions/Issues/Queue/Inspector)
|
||||
// and layout preset controls.
|
||||
|
||||
import { useCallback, useMemo } from 'react';
|
||||
import { useIntl } from 'react-intl';
|
||||
import {
|
||||
FolderTree,
|
||||
AlertCircle,
|
||||
ListChecks,
|
||||
Info,
|
||||
LayoutGrid,
|
||||
Columns2,
|
||||
Rows2,
|
||||
Square,
|
||||
} from 'lucide-react';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { Badge } from '@/components/ui/Badge';
|
||||
import {
|
||||
useSessionManagerStore,
|
||||
selectGroups,
|
||||
selectTerminalMetas,
|
||||
} from '@/stores/sessionManagerStore';
|
||||
import {
|
||||
useIssueQueueIntegrationStore,
|
||||
selectAssociationChain,
|
||||
} from '@/stores/issueQueueIntegrationStore';
|
||||
import { useIssues, useIssueQueue } from '@/hooks/useIssues';
|
||||
import { useTerminalGridStore } from '@/stores/terminalGridStore';
|
||||
import type { TerminalStatus } from '@/types/terminal-dashboard';
|
||||
|
||||
// ========== Types ==========
|
||||
|
||||
export type PanelId = 'sessions' | 'issues' | 'queue' | 'inspector';
|
||||
|
||||
interface DashboardToolbarProps {
|
||||
activePanel: PanelId | null;
|
||||
onTogglePanel: (panelId: PanelId) => void;
|
||||
}
|
||||
|
||||
// ========== Layout Presets ==========
|
||||
|
||||
const LAYOUT_PRESETS = [
|
||||
{ id: 'single' as const, icon: Square, labelId: 'terminalDashboard.toolbar.layoutSingle' },
|
||||
{ id: 'split-h' as const, icon: Columns2, labelId: 'terminalDashboard.toolbar.layoutSplitH' },
|
||||
{ id: 'split-v' as const, icon: Rows2, labelId: 'terminalDashboard.toolbar.layoutSplitV' },
|
||||
{ id: 'grid-2x2' as const, icon: LayoutGrid, labelId: 'terminalDashboard.toolbar.layoutGrid' },
|
||||
];
|
||||
|
||||
// ========== Component ==========
|
||||
|
||||
export function DashboardToolbar({ activePanel, onTogglePanel }: DashboardToolbarProps) {
|
||||
const { formatMessage } = useIntl();
|
||||
|
||||
// Session count
|
||||
const groups = useSessionManagerStore(selectGroups);
|
||||
const terminalMetas = useSessionManagerStore(selectTerminalMetas);
|
||||
const sessionCount = useMemo(() => {
|
||||
const allSessionIds = groups.flatMap((g) => g.sessionIds);
|
||||
let activeCount = 0;
|
||||
for (const sid of allSessionIds) {
|
||||
const meta = terminalMetas[sid];
|
||||
const status: TerminalStatus = meta?.status ?? 'idle';
|
||||
if (status === 'active') activeCount++;
|
||||
}
|
||||
return activeCount > 0 ? activeCount : allSessionIds.length;
|
||||
}, [groups, terminalMetas]);
|
||||
|
||||
// Issues count
|
||||
const { openCount } = useIssues();
|
||||
|
||||
// Queue count
|
||||
const queueQuery = useIssueQueue();
|
||||
const queueCount = useMemo(() => {
|
||||
if (!queueQuery.data) return 0;
|
||||
const grouped = queueQuery.data.grouped_items ?? {};
|
||||
let count = 0;
|
||||
for (const items of Object.values(grouped)) {
|
||||
count += items.length;
|
||||
}
|
||||
return count;
|
||||
}, [queueQuery.data]);
|
||||
|
||||
// Inspector chain indicator
|
||||
const associationChain = useIssueQueueIntegrationStore(selectAssociationChain);
|
||||
const hasChain = associationChain !== null;
|
||||
|
||||
// Layout preset handler
|
||||
const resetLayout = useTerminalGridStore((s) => s.resetLayout);
|
||||
const handlePreset = useCallback(
|
||||
(preset: 'single' | 'split-h' | 'split-v' | 'grid-2x2') => {
|
||||
resetLayout(preset);
|
||||
},
|
||||
[resetLayout]
|
||||
);
|
||||
|
||||
return (
|
||||
<div className="flex items-center gap-1 px-2 h-[40px] border-b border-border bg-muted/30 shrink-0">
|
||||
{/* Panel toggle buttons */}
|
||||
<ToolbarButton
|
||||
icon={FolderTree}
|
||||
label={formatMessage({ id: 'terminalDashboard.toolbar.sessions' })}
|
||||
isActive={activePanel === 'sessions'}
|
||||
onClick={() => onTogglePanel('sessions')}
|
||||
badge={sessionCount > 0 ? sessionCount : undefined}
|
||||
/>
|
||||
<ToolbarButton
|
||||
icon={AlertCircle}
|
||||
label={formatMessage({ id: 'terminalDashboard.toolbar.issues' })}
|
||||
isActive={activePanel === 'issues'}
|
||||
onClick={() => onTogglePanel('issues')}
|
||||
badge={openCount > 0 ? openCount : undefined}
|
||||
/>
|
||||
<ToolbarButton
|
||||
icon={ListChecks}
|
||||
label={formatMessage({ id: 'terminalDashboard.toolbar.queue' })}
|
||||
isActive={activePanel === 'queue'}
|
||||
onClick={() => onTogglePanel('queue')}
|
||||
badge={queueCount > 0 ? queueCount : undefined}
|
||||
/>
|
||||
<ToolbarButton
|
||||
icon={Info}
|
||||
label={formatMessage({ id: 'terminalDashboard.toolbar.inspector' })}
|
||||
isActive={activePanel === 'inspector'}
|
||||
onClick={() => onTogglePanel('inspector')}
|
||||
dot={hasChain}
|
||||
/>
|
||||
|
||||
{/* Separator */}
|
||||
<div className="w-px h-5 bg-border mx-1" />
|
||||
|
||||
{/* Layout presets */}
|
||||
{LAYOUT_PRESETS.map((preset) => (
|
||||
<button
|
||||
key={preset.id}
|
||||
onClick={() => handlePreset(preset.id)}
|
||||
className={cn(
|
||||
'p-1.5 rounded transition-colors',
|
||||
'text-muted-foreground hover:text-foreground hover:bg-muted'
|
||||
)}
|
||||
title={formatMessage({ id: preset.labelId })}
|
||||
>
|
||||
<preset.icon className="w-3.5 h-3.5" />
|
||||
</button>
|
||||
))}
|
||||
|
||||
{/* Right-aligned title */}
|
||||
<span className="ml-auto text-xs text-muted-foreground font-medium">
|
||||
{formatMessage({ id: 'terminalDashboard.page.title' })}
|
||||
</span>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// ========== Toolbar Button ==========
|
||||
|
||||
function ToolbarButton({
|
||||
icon: Icon,
|
||||
label,
|
||||
isActive,
|
||||
onClick,
|
||||
badge,
|
||||
dot,
|
||||
}: {
|
||||
icon: React.ComponentType<{ className?: string }>;
|
||||
label: string;
|
||||
isActive: boolean;
|
||||
onClick: () => void;
|
||||
badge?: number;
|
||||
dot?: boolean;
|
||||
}) {
|
||||
return (
|
||||
<button
|
||||
onClick={onClick}
|
||||
className={cn(
|
||||
'flex items-center gap-1.5 px-2.5 py-1.5 rounded-md text-xs transition-colors',
|
||||
isActive
|
||||
? 'bg-primary/10 text-primary font-medium'
|
||||
: 'text-muted-foreground hover:text-foreground hover:bg-muted'
|
||||
)}
|
||||
>
|
||||
<Icon className="w-3.5 h-3.5" />
|
||||
<span>{label}</span>
|
||||
{badge !== undefined && badge > 0 && (
|
||||
<Badge variant="secondary" className="text-[10px] px-1.5 py-0 ml-0.5">
|
||||
{badge}
|
||||
</Badge>
|
||||
)}
|
||||
{dot && (
|
||||
<span className="ml-0.5 w-2 h-2 rounded-full bg-primary shrink-0" />
|
||||
)}
|
||||
</button>
|
||||
);
|
||||
}
|
||||
101
ccw/frontend/src/components/terminal-dashboard/FloatingPanel.tsx
Normal file
101
ccw/frontend/src/components/terminal-dashboard/FloatingPanel.tsx
Normal file
@@ -0,0 +1,101 @@
|
||||
// ========================================
|
||||
// FloatingPanel Component
|
||||
// ========================================
|
||||
// Generic floating panel container (Drawer style).
|
||||
// Slides in from left or right side, overlaying the terminal grid.
|
||||
|
||||
import { useCallback, useEffect, useRef } from 'react';
|
||||
import { X } from 'lucide-react';
|
||||
import { cn } from '@/lib/utils';
|
||||
|
||||
// ========== Types ==========
|
||||
|
||||
interface FloatingPanelProps {
|
||||
isOpen: boolean;
|
||||
onClose: () => void;
|
||||
title: string;
|
||||
side?: 'left' | 'right';
|
||||
width?: number;
|
||||
children: React.ReactNode;
|
||||
}
|
||||
|
||||
// ========== Component ==========
|
||||
|
||||
export function FloatingPanel({
|
||||
isOpen,
|
||||
onClose,
|
||||
title,
|
||||
side = 'left',
|
||||
width = 320,
|
||||
children,
|
||||
}: FloatingPanelProps) {
|
||||
const panelRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
// Close on Escape key
|
||||
useEffect(() => {
|
||||
if (!isOpen) return;
|
||||
const handleKeyDown = (e: KeyboardEvent) => {
|
||||
if (e.key === 'Escape') onClose();
|
||||
};
|
||||
document.addEventListener('keydown', handleKeyDown);
|
||||
return () => document.removeEventListener('keydown', handleKeyDown);
|
||||
}, [isOpen, onClose]);
|
||||
|
||||
const handleBackdropClick = useCallback(
|
||||
(e: React.MouseEvent) => {
|
||||
if (e.target === e.currentTarget) onClose();
|
||||
},
|
||||
[onClose]
|
||||
);
|
||||
|
||||
return (
|
||||
<>
|
||||
{/* Backdrop */}
|
||||
<div
|
||||
className={cn(
|
||||
'fixed inset-0 z-40 transition-opacity duration-200',
|
||||
isOpen ? 'opacity-100 pointer-events-auto' : 'opacity-0 pointer-events-none'
|
||||
)}
|
||||
style={{ top: '40px' }} // Below toolbar
|
||||
onClick={handleBackdropClick}
|
||||
>
|
||||
<div className="absolute inset-0 bg-black/20" />
|
||||
</div>
|
||||
|
||||
{/* Panel */}
|
||||
<div
|
||||
ref={panelRef}
|
||||
className={cn(
|
||||
'fixed z-50 flex flex-col bg-background border-border shadow-lg',
|
||||
'transition-transform duration-200 ease-out',
|
||||
side === 'left' && 'left-0 border-r',
|
||||
side === 'right' && 'right-0 border-l',
|
||||
// Transform based on open state and side
|
||||
side === 'left' && (isOpen ? 'translate-x-0' : '-translate-x-full'),
|
||||
side === 'right' && (isOpen ? 'translate-x-0' : 'translate-x-full'),
|
||||
)}
|
||||
style={{
|
||||
top: '40px', // Below toolbar
|
||||
height: 'calc(100vh - 56px - 40px)', // Subtract both app header and toolbar
|
||||
width: `${width}px`,
|
||||
}}
|
||||
>
|
||||
{/* Panel header */}
|
||||
<div className="flex items-center justify-between px-3 py-2 border-b border-border shrink-0">
|
||||
<h2 className="text-sm font-semibold">{title}</h2>
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="p-1 rounded hover:bg-muted transition-colors text-muted-foreground hover:text-foreground"
|
||||
>
|
||||
<X className="w-4 h-4" />
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Panel content */}
|
||||
<div className="flex-1 min-h-0 overflow-hidden">
|
||||
{children}
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
);
|
||||
}
|
||||
118
ccw/frontend/src/components/terminal-dashboard/TerminalGrid.tsx
Normal file
118
ccw/frontend/src/components/terminal-dashboard/TerminalGrid.tsx
Normal file
@@ -0,0 +1,118 @@
|
||||
// ========================================
|
||||
// TerminalGrid Component
|
||||
// ========================================
|
||||
// Recursive Allotment renderer for the terminal split pane layout.
|
||||
// Mirrors the LayoutContainer pattern from cli-viewer but renders
|
||||
// TerminalPane components as leaf nodes.
|
||||
|
||||
import { useCallback, useMemo } from 'react';
|
||||
import { Allotment } from 'allotment';
|
||||
import 'allotment/dist/style.css';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { isPaneId } from '@/lib/layout-utils';
|
||||
import {
|
||||
useTerminalGridStore,
|
||||
selectTerminalGridLayout,
|
||||
selectTerminalGridPanes,
|
||||
} from '@/stores/terminalGridStore';
|
||||
import type { AllotmentLayoutGroup } from '@/stores/viewerStore';
|
||||
import { TerminalPane } from './TerminalPane';
|
||||
|
||||
// ========== Types ==========
|
||||
|
||||
interface GridGroupRendererProps {
|
||||
group: AllotmentLayoutGroup;
|
||||
minSize: number;
|
||||
onSizeChange: (sizes: number[]) => void;
|
||||
}
|
||||
|
||||
// ========== Recursive Group Renderer ==========
|
||||
|
||||
function GridGroupRenderer({ group, minSize, onSizeChange }: GridGroupRendererProps) {
|
||||
const panes = useTerminalGridStore(selectTerminalGridPanes);
|
||||
|
||||
const handleChange = useCallback(
|
||||
(sizes: number[]) => {
|
||||
onSizeChange(sizes);
|
||||
},
|
||||
[onSizeChange]
|
||||
);
|
||||
|
||||
const validChildren = useMemo(() => {
|
||||
return group.children.filter((child) => {
|
||||
if (isPaneId(child)) {
|
||||
return panes[child] !== undefined;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
}, [group.children, panes]);
|
||||
|
||||
if (validChildren.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<Allotment
|
||||
vertical={group.direction === 'vertical'}
|
||||
defaultSizes={group.sizes}
|
||||
onChange={handleChange}
|
||||
className="h-full"
|
||||
>
|
||||
{validChildren.map((child, index) => (
|
||||
<Allotment.Pane key={isPaneId(child) ? child : `group-${index}`} minSize={minSize}>
|
||||
{isPaneId(child) ? (
|
||||
<TerminalPane paneId={child} />
|
||||
) : (
|
||||
<GridGroupRenderer
|
||||
group={child}
|
||||
minSize={minSize}
|
||||
onSizeChange={onSizeChange}
|
||||
/>
|
||||
)}
|
||||
</Allotment.Pane>
|
||||
))}
|
||||
</Allotment>
|
||||
);
|
||||
}
|
||||
|
||||
// ========== Main Component ==========
|
||||
|
||||
export function TerminalGrid({ className }: { className?: string }) {
|
||||
const layout = useTerminalGridStore(selectTerminalGridLayout);
|
||||
const panes = useTerminalGridStore(selectTerminalGridPanes);
|
||||
const setLayout = useTerminalGridStore((s) => s.setLayout);
|
||||
|
||||
const handleSizeChange = useCallback(
|
||||
(sizes: number[]) => {
|
||||
setLayout({ ...layout, sizes });
|
||||
},
|
||||
[layout, setLayout]
|
||||
);
|
||||
|
||||
const content = useMemo(() => {
|
||||
if (!layout.children || layout.children.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Single pane: render directly without Allotment wrapper
|
||||
if (layout.children.length === 1 && isPaneId(layout.children[0])) {
|
||||
const paneId = layout.children[0];
|
||||
if (!panes[paneId]) return null;
|
||||
return <TerminalPane paneId={paneId} />;
|
||||
}
|
||||
|
||||
return (
|
||||
<GridGroupRenderer
|
||||
group={layout}
|
||||
minSize={150}
|
||||
onSizeChange={handleSizeChange}
|
||||
/>
|
||||
);
|
||||
}, [layout, panes, handleSizeChange]);
|
||||
|
||||
return (
|
||||
<div className={cn('h-full w-full overflow-hidden bg-background', className)}>
|
||||
{content}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
246
ccw/frontend/src/components/terminal-dashboard/TerminalPane.tsx
Normal file
246
ccw/frontend/src/components/terminal-dashboard/TerminalPane.tsx
Normal file
@@ -0,0 +1,246 @@
|
||||
// ========================================
|
||||
// TerminalPane Component
|
||||
// ========================================
|
||||
// Single terminal pane = PaneToolbar + TerminalInstance.
|
||||
// Renders within the TerminalGrid recursive layout.
|
||||
|
||||
import { useCallback, useMemo } from 'react';
|
||||
import { useIntl } from 'react-intl';
|
||||
import {
|
||||
SplitSquareHorizontal,
|
||||
SplitSquareVertical,
|
||||
Eraser,
|
||||
AlertTriangle,
|
||||
X,
|
||||
Terminal,
|
||||
ChevronDown,
|
||||
} from 'lucide-react';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { TerminalInstance } from './TerminalInstance';
|
||||
import {
|
||||
useTerminalGridStore,
|
||||
selectTerminalGridPanes,
|
||||
selectTerminalGridFocusedPaneId,
|
||||
} from '@/stores/terminalGridStore';
|
||||
import {
|
||||
useSessionManagerStore,
|
||||
selectGroups,
|
||||
selectTerminalMetas,
|
||||
} from '@/stores/sessionManagerStore';
|
||||
import {
|
||||
useIssueQueueIntegrationStore,
|
||||
selectAssociationChain,
|
||||
} from '@/stores/issueQueueIntegrationStore';
|
||||
import { useCliSessionStore } from '@/stores/cliSessionStore';
|
||||
import { getAllPaneIds } from '@/lib/layout-utils';
|
||||
import type { PaneId } from '@/stores/viewerStore';
|
||||
import type { TerminalStatus } from '@/types/terminal-dashboard';
|
||||
|
||||
// ========== Status Styles ==========
|
||||
|
||||
const statusDotStyles: Record<TerminalStatus, string> = {
|
||||
active: 'bg-green-500',
|
||||
idle: 'bg-gray-400',
|
||||
error: 'bg-red-500',
|
||||
};
|
||||
|
||||
// ========== Props ==========
|
||||
|
||||
interface TerminalPaneProps {
|
||||
paneId: PaneId;
|
||||
}
|
||||
|
||||
// ========== Component ==========
|
||||
|
||||
export function TerminalPane({ paneId }: TerminalPaneProps) {
|
||||
const { formatMessage } = useIntl();
|
||||
|
||||
// Grid store
|
||||
const panes = useTerminalGridStore(selectTerminalGridPanes);
|
||||
const focusedPaneId = useTerminalGridStore(selectTerminalGridFocusedPaneId);
|
||||
const layout = useTerminalGridStore((s) => s.layout);
|
||||
const splitPane = useTerminalGridStore((s) => s.splitPane);
|
||||
const closePane = useTerminalGridStore((s) => s.closePane);
|
||||
const assignSession = useTerminalGridStore((s) => s.assignSession);
|
||||
const setFocused = useTerminalGridStore((s) => s.setFocused);
|
||||
|
||||
const pane = panes[paneId];
|
||||
const sessionId = pane?.sessionId ?? null;
|
||||
const isFocused = focusedPaneId === paneId;
|
||||
const canClose = getAllPaneIds(layout).length > 1;
|
||||
|
||||
// Session data
|
||||
const groups = useSessionManagerStore(selectGroups);
|
||||
const terminalMetas = useSessionManagerStore(selectTerminalMetas);
|
||||
const sessions = useCliSessionStore((s) => s.sessions);
|
||||
|
||||
// Association chain for linked issue badge
|
||||
const associationChain = useIssueQueueIntegrationStore(selectAssociationChain);
|
||||
const linkedIssueId = useMemo(() => {
|
||||
if (!sessionId || !associationChain) return null;
|
||||
if (associationChain.sessionId === sessionId) return associationChain.issueId;
|
||||
return null;
|
||||
}, [sessionId, associationChain]);
|
||||
|
||||
// Terminal metadata
|
||||
const meta = sessionId ? terminalMetas[sessionId] : null;
|
||||
const status: TerminalStatus = meta?.status ?? 'idle';
|
||||
const alertCount = meta?.alertCount ?? 0;
|
||||
|
||||
// Build session options for dropdown
|
||||
const sessionOptions = useMemo(() => {
|
||||
const allSessionIds = groups.flatMap((g) => g.sessionIds);
|
||||
return allSessionIds.map((sid) => {
|
||||
const s = sessions[sid];
|
||||
const name = s ? (s.tool ? `${s.tool} - ${s.shellKind}` : s.shellKind) : sid;
|
||||
return { id: sid, name };
|
||||
});
|
||||
}, [groups, sessions]);
|
||||
|
||||
// Handlers
|
||||
const handleFocus = useCallback(() => {
|
||||
setFocused(paneId);
|
||||
}, [paneId, setFocused]);
|
||||
|
||||
const handleSplitH = useCallback(() => {
|
||||
splitPane(paneId, 'horizontal');
|
||||
}, [paneId, splitPane]);
|
||||
|
||||
const handleSplitV = useCallback(() => {
|
||||
splitPane(paneId, 'vertical');
|
||||
}, [paneId, splitPane]);
|
||||
|
||||
const handleClose = useCallback(() => {
|
||||
closePane(paneId);
|
||||
}, [paneId, closePane]);
|
||||
|
||||
const handleSessionChange = useCallback(
|
||||
(e: React.ChangeEvent<HTMLSelectElement>) => {
|
||||
const value = e.target.value;
|
||||
assignSession(paneId, value || null);
|
||||
},
|
||||
[paneId, assignSession]
|
||||
);
|
||||
|
||||
const handleClear = useCallback(() => {
|
||||
// Clear is handled by re-assigning the same session (triggers reset in TerminalInstance)
|
||||
if (sessionId) {
|
||||
assignSession(paneId, null);
|
||||
// Use microtask to re-assign after clearing
|
||||
queueMicrotask(() => assignSession(paneId, sessionId));
|
||||
}
|
||||
}, [paneId, sessionId, assignSession]);
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
'flex flex-col h-full border border-border/50 rounded-sm overflow-hidden',
|
||||
isFocused && 'ring-1 ring-primary/40'
|
||||
)}
|
||||
onClick={handleFocus}
|
||||
>
|
||||
{/* PaneToolbar */}
|
||||
<div className="flex items-center gap-1 px-2 py-1 border-b border-border bg-muted/30 shrink-0">
|
||||
{/* Left: Session selector + status */}
|
||||
<div className="flex items-center gap-1.5 min-w-0 flex-1">
|
||||
{sessionId && (
|
||||
<span
|
||||
className={cn('w-2 h-2 rounded-full shrink-0', statusDotStyles[status])}
|
||||
/>
|
||||
)}
|
||||
<div className="relative min-w-0 flex-1 max-w-[180px]">
|
||||
<select
|
||||
value={sessionId ?? ''}
|
||||
onChange={handleSessionChange}
|
||||
className={cn(
|
||||
'w-full text-xs bg-transparent border-none outline-none cursor-pointer',
|
||||
'appearance-none pr-5 truncate',
|
||||
!sessionId && 'text-muted-foreground'
|
||||
)}
|
||||
>
|
||||
<option value="">
|
||||
{formatMessage({ id: 'terminalDashboard.pane.selectSession' })}
|
||||
</option>
|
||||
{sessionOptions.map((opt) => (
|
||||
<option key={opt.id} value={opt.id}>
|
||||
{opt.name}
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
<ChevronDown className="absolute right-0 top-1/2 -translate-y-1/2 w-3 h-3 text-muted-foreground pointer-events-none" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Center: Linked issue badge */}
|
||||
{linkedIssueId && (
|
||||
<span className="text-[10px] font-mono px-1.5 py-0.5 rounded bg-primary/10 text-primary shrink-0">
|
||||
{linkedIssueId}
|
||||
</span>
|
||||
)}
|
||||
|
||||
{/* Right: Action buttons */}
|
||||
<div className="flex items-center gap-0.5 shrink-0">
|
||||
<button
|
||||
onClick={handleSplitH}
|
||||
className="p-1 rounded hover:bg-muted transition-colors text-muted-foreground hover:text-foreground"
|
||||
title={formatMessage({ id: 'terminalDashboard.pane.splitHorizontal' })}
|
||||
>
|
||||
<SplitSquareHorizontal className="w-3.5 h-3.5" />
|
||||
</button>
|
||||
<button
|
||||
onClick={handleSplitV}
|
||||
className="p-1 rounded hover:bg-muted transition-colors text-muted-foreground hover:text-foreground"
|
||||
title={formatMessage({ id: 'terminalDashboard.pane.splitVertical' })}
|
||||
>
|
||||
<SplitSquareVertical className="w-3.5 h-3.5" />
|
||||
</button>
|
||||
{sessionId && (
|
||||
<button
|
||||
onClick={handleClear}
|
||||
className="p-1 rounded hover:bg-muted transition-colors text-muted-foreground hover:text-foreground"
|
||||
title={formatMessage({ id: 'terminalDashboard.pane.clearTerminal' })}
|
||||
>
|
||||
<Eraser className="w-3.5 h-3.5" />
|
||||
</button>
|
||||
)}
|
||||
{alertCount > 0 && (
|
||||
<span className="flex items-center gap-0.5 px-1 text-destructive">
|
||||
<AlertTriangle className="w-3 h-3" />
|
||||
<span className="text-[10px] font-semibold tabular-nums">
|
||||
{alertCount > 99 ? '99+' : alertCount}
|
||||
</span>
|
||||
</span>
|
||||
)}
|
||||
{canClose && (
|
||||
<button
|
||||
onClick={handleClose}
|
||||
className="p-1 rounded hover:bg-destructive/10 transition-colors text-muted-foreground hover:text-destructive"
|
||||
title={formatMessage({ id: 'terminalDashboard.pane.closePane' })}
|
||||
>
|
||||
<X className="w-3.5 h-3.5" />
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Terminal content */}
|
||||
{sessionId ? (
|
||||
<div className="flex-1 min-h-0">
|
||||
<TerminalInstance sessionId={sessionId} />
|
||||
</div>
|
||||
) : (
|
||||
<div className="flex-1 flex items-center justify-center text-muted-foreground">
|
||||
<div className="text-center">
|
||||
<Terminal className="h-6 w-6 mx-auto mb-1.5 opacity-30" />
|
||||
<p className="text-sm">
|
||||
{formatMessage({ id: 'terminalDashboard.pane.selectSession' })}
|
||||
</p>
|
||||
<p className="text-xs mt-1 opacity-70">
|
||||
{formatMessage({ id: 'terminalDashboard.pane.selectSessionHint' })}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
// ========================================
|
||||
// TerminalTabBar Component
|
||||
// ========================================
|
||||
// Horizontal tab strip for terminal sessions in the Terminal Dashboard.
|
||||
// Renders tabs from sessionManagerStore groups with status indicators and alert badges.
|
||||
|
||||
import { useMemo } from 'react';
|
||||
import { useIntl } from 'react-intl';
|
||||
import { Terminal, AlertTriangle } from 'lucide-react';
|
||||
import { cn } from '@/lib/utils';
|
||||
import {
|
||||
useSessionManagerStore,
|
||||
selectGroups,
|
||||
selectSessionManagerActiveTerminalId,
|
||||
selectTerminalMetas,
|
||||
} from '@/stores/sessionManagerStore';
|
||||
import type { TerminalStatus } from '@/types/terminal-dashboard';
|
||||
|
||||
// ========== Status Styles ==========
|
||||
|
||||
const statusDotStyles: Record<TerminalStatus, string> = {
|
||||
active: 'bg-green-500',
|
||||
idle: 'bg-gray-400',
|
||||
error: 'bg-red-500',
|
||||
};
|
||||
|
||||
// ========== Component ==========
|
||||
|
||||
export function TerminalTabBar() {
|
||||
const { formatMessage } = useIntl();
|
||||
const groups = useSessionManagerStore(selectGroups);
|
||||
const activeTerminalId = useSessionManagerStore(selectSessionManagerActiveTerminalId);
|
||||
const terminalMetas = useSessionManagerStore(selectTerminalMetas);
|
||||
const setActiveTerminal = useSessionManagerStore((s) => s.setActiveTerminal);
|
||||
|
||||
// Flatten all sessionIds from all groups
|
||||
const allSessionIds = groups.flatMap((g) => g.sessionIds);
|
||||
|
||||
// Total alerts across all terminals
|
||||
const totalAlerts = useMemo(() => {
|
||||
let count = 0;
|
||||
for (const meta of Object.values(terminalMetas)) {
|
||||
count += meta.alertCount;
|
||||
}
|
||||
return count;
|
||||
}, [terminalMetas]);
|
||||
|
||||
if (allSessionIds.length === 0) {
|
||||
return (
|
||||
<div className="flex items-center gap-2 px-3 py-2 border-b border-border bg-muted/30 min-h-[40px]">
|
||||
<Terminal className="w-3.5 h-3.5 text-muted-foreground" />
|
||||
<span className="text-xs text-muted-foreground">
|
||||
{formatMessage({ id: 'terminalDashboard.tabBar.noTabs' })}
|
||||
</span>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex items-center border-b border-border bg-muted/30 overflow-x-auto shrink-0">
|
||||
{allSessionIds.map((sessionId) => {
|
||||
const meta = terminalMetas[sessionId];
|
||||
const title = meta?.title ?? sessionId;
|
||||
const status: TerminalStatus = meta?.status ?? 'idle';
|
||||
const alertCount = meta?.alertCount ?? 0;
|
||||
const isActive = activeTerminalId === sessionId;
|
||||
|
||||
return (
|
||||
<button
|
||||
key={sessionId}
|
||||
className={cn(
|
||||
'flex items-center gap-1.5 px-3 py-2 text-xs border-r border-border',
|
||||
'whitespace-nowrap transition-colors hover:bg-accent/50',
|
||||
isActive
|
||||
? 'bg-background text-foreground border-b-2 border-b-primary'
|
||||
: 'text-muted-foreground'
|
||||
)}
|
||||
onClick={() => setActiveTerminal(sessionId)}
|
||||
title={title}
|
||||
>
|
||||
{/* Status dot */}
|
||||
<span
|
||||
className={cn(
|
||||
'w-2 h-2 rounded-full shrink-0',
|
||||
statusDotStyles[status]
|
||||
)}
|
||||
/>
|
||||
|
||||
{/* Title */}
|
||||
<span className="truncate max-w-[120px]">{title}</span>
|
||||
|
||||
{/* Alert badge */}
|
||||
{alertCount > 0 && (
|
||||
<span className="ml-1 px-1.5 py-0.5 text-[10px] font-medium leading-none rounded-full bg-destructive text-destructive-foreground shrink-0">
|
||||
{alertCount > 99 ? '99+' : alertCount}
|
||||
</span>
|
||||
)}
|
||||
</button>
|
||||
);
|
||||
})}
|
||||
|
||||
{/* Total alerts indicator at right end */}
|
||||
{totalAlerts > 0 && (
|
||||
<div className="ml-auto flex items-center gap-1 px-3 py-2 shrink-0 text-destructive">
|
||||
<AlertTriangle className="w-3.5 h-3.5" />
|
||||
<span className="text-[10px] font-semibold tabular-nums">
|
||||
{totalAlerts > 99 ? '99+' : totalAlerts}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,168 +0,0 @@
|
||||
// ========================================
|
||||
// TerminalWorkbench Component
|
||||
// ========================================
|
||||
// Container for the right panel of the Terminal Dashboard.
|
||||
// Combines TerminalTabBar (tab switching) and TerminalInstance (xterm.js).
|
||||
// When no terminal is active, shows selected issue detail preview
|
||||
// or a compact empty state with action hints.
|
||||
|
||||
import { useMemo } from 'react';
|
||||
import { useIntl } from 'react-intl';
|
||||
import {
|
||||
Terminal,
|
||||
CircleDot,
|
||||
Tag,
|
||||
Clock,
|
||||
User,
|
||||
} from 'lucide-react';
|
||||
import { Badge } from '@/components/ui/Badge';
|
||||
import { cn } from '@/lib/utils';
|
||||
import {
|
||||
useSessionManagerStore,
|
||||
selectSessionManagerActiveTerminalId,
|
||||
} from '@/stores/sessionManagerStore';
|
||||
import {
|
||||
useIssueQueueIntegrationStore,
|
||||
selectSelectedIssueId,
|
||||
} from '@/stores/issueQueueIntegrationStore';
|
||||
import { useIssues } from '@/hooks/useIssues';
|
||||
import type { Issue } from '@/lib/api';
|
||||
import { TerminalTabBar } from './TerminalTabBar';
|
||||
import { TerminalInstance } from './TerminalInstance';
|
||||
|
||||
// ========== Priority Styles ==========
|
||||
|
||||
const PRIORITY_VARIANT: Record<Issue['priority'], 'destructive' | 'warning' | 'info' | 'secondary'> = {
|
||||
critical: 'destructive',
|
||||
high: 'warning',
|
||||
medium: 'info',
|
||||
low: 'secondary',
|
||||
};
|
||||
|
||||
const STATUS_COLORS: Record<Issue['status'], string> = {
|
||||
open: 'text-info',
|
||||
in_progress: 'text-warning',
|
||||
resolved: 'text-success',
|
||||
closed: 'text-muted-foreground',
|
||||
completed: 'text-success',
|
||||
};
|
||||
|
||||
// ========== Issue Detail Preview ==========
|
||||
|
||||
function IssueDetailPreview({ issue }: { issue: Issue }) {
|
||||
const { formatMessage } = useIntl();
|
||||
|
||||
return (
|
||||
<div className="flex-1 overflow-y-auto p-6">
|
||||
<div className="max-w-lg mx-auto space-y-4">
|
||||
{/* Header hint */}
|
||||
<p className="text-[10px] uppercase tracking-wider text-muted-foreground">
|
||||
{formatMessage({ id: 'terminalDashboard.workbench.issuePreview' })}
|
||||
</p>
|
||||
|
||||
{/* Title + Status */}
|
||||
<div className="space-y-2">
|
||||
<div className="flex items-start gap-2">
|
||||
<CircleDot className={cn('w-4 h-4 shrink-0 mt-0.5', STATUS_COLORS[issue.status] ?? 'text-muted-foreground')} />
|
||||
<h3 className="text-base font-semibold text-foreground leading-snug">
|
||||
{issue.title}
|
||||
</h3>
|
||||
</div>
|
||||
<div className="flex items-center gap-2 pl-6">
|
||||
<Badge variant={PRIORITY_VARIANT[issue.priority]} className="text-[10px] px-1.5 py-0">
|
||||
{issue.priority}
|
||||
</Badge>
|
||||
<span className="text-[10px] text-muted-foreground font-mono">{issue.id}</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Context / Description */}
|
||||
{issue.context && (
|
||||
<div className="rounded-md border border-border bg-muted/20 p-3">
|
||||
<p className="text-xs text-foreground/80 leading-relaxed whitespace-pre-wrap">
|
||||
{issue.context}
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Metadata rows */}
|
||||
<div className="space-y-1.5 text-xs text-muted-foreground">
|
||||
{issue.labels && issue.labels.length > 0 && (
|
||||
<div className="flex items-center gap-2">
|
||||
<Tag className="w-3.5 h-3.5 shrink-0" />
|
||||
<div className="flex items-center gap-1 flex-wrap">
|
||||
{issue.labels.map((label) => (
|
||||
<span key={label} className="px-1.5 py-0.5 rounded bg-muted text-[10px]">
|
||||
{label}
|
||||
</span>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
{issue.assignee && (
|
||||
<div className="flex items-center gap-2">
|
||||
<User className="w-3.5 h-3.5 shrink-0" />
|
||||
<span>{issue.assignee}</span>
|
||||
</div>
|
||||
)}
|
||||
{issue.createdAt && (
|
||||
<div className="flex items-center gap-2">
|
||||
<Clock className="w-3.5 h-3.5 shrink-0" />
|
||||
<span>{new Date(issue.createdAt).toLocaleString()}</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Hint */}
|
||||
<p className="text-[10px] text-muted-foreground/60 pt-2">
|
||||
{formatMessage({ id: 'terminalDashboard.workbench.issuePreviewHint' })}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// ========== Component ==========
|
||||
|
||||
export function TerminalWorkbench() {
|
||||
const { formatMessage } = useIntl();
|
||||
const activeTerminalId = useSessionManagerStore(selectSessionManagerActiveTerminalId);
|
||||
const selectedIssueId = useIssueQueueIntegrationStore(selectSelectedIssueId);
|
||||
const { issues } = useIssues();
|
||||
|
||||
// Find selected issue for preview
|
||||
const selectedIssue = useMemo(() => {
|
||||
if (!selectedIssueId) return null;
|
||||
return issues.find((i) => i.id === selectedIssueId) ?? null;
|
||||
}, [selectedIssueId, issues]);
|
||||
|
||||
return (
|
||||
<div className="flex flex-col h-full">
|
||||
{/* Tab strip (fixed height) */}
|
||||
<TerminalTabBar />
|
||||
|
||||
{/* Terminal content (flex-1, takes remaining space) */}
|
||||
{activeTerminalId ? (
|
||||
<div className="flex-1 min-h-0">
|
||||
<TerminalInstance sessionId={activeTerminalId} />
|
||||
</div>
|
||||
) : selectedIssue ? (
|
||||
/* Issue detail preview when no terminal but issue is selected */
|
||||
<IssueDetailPreview issue={selectedIssue} />
|
||||
) : (
|
||||
/* Compact empty state */
|
||||
<div className="flex-1 flex items-center justify-center text-muted-foreground">
|
||||
<div className="text-center">
|
||||
<Terminal className="h-6 w-6 mx-auto mb-1.5 opacity-30" />
|
||||
<p className="text-sm font-medium">
|
||||
{formatMessage({ id: 'terminalDashboard.workbench.noTerminal' })}
|
||||
</p>
|
||||
<p className="text-xs mt-1 opacity-70">
|
||||
{formatMessage({ id: 'terminalDashboard.workbench.noTerminalHint' })}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
214
ccw/frontend/src/lib/layout-utils.ts
Normal file
214
ccw/frontend/src/lib/layout-utils.ts
Normal file
@@ -0,0 +1,214 @@
|
||||
// ========================================
|
||||
// Layout Utilities
|
||||
// ========================================
|
||||
// Pure functions for manipulating Allotment layout trees.
|
||||
// Extracted from viewerStore for reuse across terminal grid and CLI viewer.
|
||||
|
||||
import type { AllotmentLayoutGroup, PaneId } from '@/stores/viewerStore';
|
||||
|
||||
/**
|
||||
* Check if a layout child is a PaneId (string) or a nested group
|
||||
*/
|
||||
export function isPaneId(value: PaneId | AllotmentLayoutGroup): value is PaneId {
|
||||
return typeof value === 'string';
|
||||
}
|
||||
|
||||
/**
|
||||
* Find a pane ID in the layout tree
|
||||
*/
|
||||
export function findPaneInLayout(
|
||||
layout: AllotmentLayoutGroup,
|
||||
paneId: PaneId
|
||||
): { found: boolean; parent: AllotmentLayoutGroup | null; index: number } {
|
||||
const search = (
|
||||
group: AllotmentLayoutGroup,
|
||||
_parent: AllotmentLayoutGroup | null
|
||||
): { found: boolean; parent: AllotmentLayoutGroup | null; index: number } => {
|
||||
for (let i = 0; i < group.children.length; i++) {
|
||||
const child = group.children[i];
|
||||
if (isPaneId(child)) {
|
||||
if (child === paneId) {
|
||||
return { found: true, parent: group, index: i };
|
||||
}
|
||||
} else {
|
||||
const result = search(child, group);
|
||||
if (result.found) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
return { found: false, parent: null, index: -1 };
|
||||
};
|
||||
|
||||
return search(layout, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a pane from layout and clean up empty groups
|
||||
*/
|
||||
export function removePaneFromLayout(
|
||||
layout: AllotmentLayoutGroup,
|
||||
paneId: PaneId
|
||||
): AllotmentLayoutGroup {
|
||||
const removeFromGroup = (group: AllotmentLayoutGroup): AllotmentLayoutGroup | null => {
|
||||
const newChildren: (PaneId | AllotmentLayoutGroup)[] = [];
|
||||
|
||||
for (const child of group.children) {
|
||||
if (isPaneId(child)) {
|
||||
if (child !== paneId) {
|
||||
newChildren.push(child);
|
||||
}
|
||||
} else {
|
||||
const cleanedChild = removeFromGroup(child);
|
||||
if (cleanedChild && cleanedChild.children.length > 0) {
|
||||
if (cleanedChild.children.length === 1) {
|
||||
newChildren.push(cleanedChild.children[0]);
|
||||
} else {
|
||||
newChildren.push(cleanedChild);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (newChildren.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const newSizes = group.sizes
|
||||
? group.sizes.filter((_, i) => {
|
||||
const child = group.children[i];
|
||||
return !isPaneId(child) || child !== paneId;
|
||||
})
|
||||
: undefined;
|
||||
|
||||
const normalizedSizes = newSizes
|
||||
? (() => {
|
||||
const total = newSizes.reduce((sum, s) => sum + s, 0);
|
||||
return total > 0 ? newSizes.map((s) => (s / total) * 100) : undefined;
|
||||
})()
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
direction: group.direction,
|
||||
sizes: normalizedSizes,
|
||||
children: newChildren,
|
||||
};
|
||||
};
|
||||
|
||||
const result = removeFromGroup(layout);
|
||||
return result || { direction: 'horizontal', children: [] };
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a pane to the layout at a specific position
|
||||
*/
|
||||
export function addPaneToLayout(
|
||||
layout: AllotmentLayoutGroup,
|
||||
newPaneId: PaneId,
|
||||
parentPaneId?: PaneId,
|
||||
direction: 'horizontal' | 'vertical' = 'horizontal'
|
||||
): AllotmentLayoutGroup {
|
||||
if (!parentPaneId) {
|
||||
if (layout.children.length === 0) {
|
||||
return {
|
||||
...layout,
|
||||
children: [newPaneId],
|
||||
sizes: [100],
|
||||
};
|
||||
}
|
||||
|
||||
if (layout.direction === direction) {
|
||||
const currentSizes = layout.sizes || layout.children.map(() => 100 / layout.children.length);
|
||||
const totalSize = currentSizes.reduce((sum, s) => sum + s, 0);
|
||||
const newSize = totalSize / (layout.children.length + 1);
|
||||
const scaleFactor = (totalSize - newSize) / totalSize;
|
||||
|
||||
return {
|
||||
...layout,
|
||||
children: [...layout.children, newPaneId],
|
||||
sizes: [...currentSizes.map((s) => s * scaleFactor), newSize],
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
direction,
|
||||
sizes: [50, 50],
|
||||
children: [layout, newPaneId],
|
||||
};
|
||||
}
|
||||
|
||||
const addRelativeTo = (group: AllotmentLayoutGroup): AllotmentLayoutGroup => {
|
||||
const newChildren: (PaneId | AllotmentLayoutGroup)[] = [];
|
||||
let newSizes: number[] | undefined = group.sizes ? [] : undefined;
|
||||
|
||||
for (let i = 0; i < group.children.length; i++) {
|
||||
const child = group.children[i];
|
||||
const childSize = group.sizes ? group.sizes[i] : undefined;
|
||||
|
||||
if (isPaneId(child)) {
|
||||
if (child === parentPaneId) {
|
||||
if (group.direction === direction) {
|
||||
const halfSize = (childSize || 50) / 2;
|
||||
newChildren.push(child, newPaneId);
|
||||
if (newSizes) {
|
||||
newSizes.push(halfSize, halfSize);
|
||||
}
|
||||
} else {
|
||||
const newGroup: AllotmentLayoutGroup = {
|
||||
direction,
|
||||
sizes: [50, 50],
|
||||
children: [child, newPaneId],
|
||||
};
|
||||
newChildren.push(newGroup);
|
||||
if (newSizes && childSize !== undefined) {
|
||||
newSizes.push(childSize);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
newChildren.push(child);
|
||||
if (newSizes && childSize !== undefined) {
|
||||
newSizes.push(childSize);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
const result = findPaneInLayout(child, parentPaneId);
|
||||
if (result.found) {
|
||||
newChildren.push(addRelativeTo(child));
|
||||
} else {
|
||||
newChildren.push(child);
|
||||
}
|
||||
if (newSizes && childSize !== undefined) {
|
||||
newSizes.push(childSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
...group,
|
||||
children: newChildren,
|
||||
sizes: newSizes,
|
||||
};
|
||||
};
|
||||
|
||||
return addRelativeTo(layout);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all pane IDs from layout
|
||||
*/
|
||||
export function getAllPaneIds(layout: AllotmentLayoutGroup): PaneId[] {
|
||||
const paneIds: PaneId[] = [];
|
||||
|
||||
const traverse = (group: AllotmentLayoutGroup) => {
|
||||
for (const child of group.children) {
|
||||
if (isPaneId(child)) {
|
||||
paneIds.push(child);
|
||||
} else {
|
||||
traverse(child);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
traverse(layout);
|
||||
return paneIds;
|
||||
}
|
||||
@@ -68,6 +68,25 @@
|
||||
"blocked": "Blocked"
|
||||
}
|
||||
},
|
||||
"toolbar": {
|
||||
"sessions": "Sessions",
|
||||
"issues": "Issues",
|
||||
"queue": "Queue",
|
||||
"inspector": "Inspector",
|
||||
"layoutSingle": "Single",
|
||||
"layoutSplitH": "Split Horizontal",
|
||||
"layoutSplitV": "Split Vertical",
|
||||
"layoutGrid": "Grid 2×2"
|
||||
},
|
||||
"pane": {
|
||||
"selectSession": "Select a session",
|
||||
"selectSessionHint": "Choose a terminal session from the dropdown",
|
||||
"splitHorizontal": "Split Right",
|
||||
"splitVertical": "Split Down",
|
||||
"clearTerminal": "Clear Terminal",
|
||||
"closePane": "Close Pane",
|
||||
"linkedIssue": "Linked Issue"
|
||||
},
|
||||
"tabBar": {
|
||||
"noTabs": "No terminal sessions"
|
||||
},
|
||||
|
||||
@@ -68,6 +68,25 @@
|
||||
"blocked": "已阻塞"
|
||||
}
|
||||
},
|
||||
"toolbar": {
|
||||
"sessions": "会话",
|
||||
"issues": "问题",
|
||||
"queue": "队列",
|
||||
"inspector": "检查器",
|
||||
"layoutSingle": "单窗格",
|
||||
"layoutSplitH": "左右分割",
|
||||
"layoutSplitV": "上下分割",
|
||||
"layoutGrid": "2×2 网格"
|
||||
},
|
||||
"pane": {
|
||||
"selectSession": "选择会话",
|
||||
"selectSessionHint": "从下拉菜单中选择终端会话",
|
||||
"splitHorizontal": "向右分割",
|
||||
"splitVertical": "向下分割",
|
||||
"clearTerminal": "清屏",
|
||||
"closePane": "关闭窗格",
|
||||
"linkedIssue": "关联问题"
|
||||
},
|
||||
"tabBar": {
|
||||
"noTabs": "暂无终端会话"
|
||||
},
|
||||
|
||||
@@ -1,104 +1,98 @@
|
||||
// ========================================
|
||||
// Terminal Dashboard Page
|
||||
// Terminal Dashboard Page (V2)
|
||||
// ========================================
|
||||
// Three-column Allotment layout for terminal execution management.
|
||||
// Left: session groups + agent list (with active session count badge)
|
||||
// Middle: full-height IssuePanel
|
||||
// Right: terminal workbench (or issue detail preview)
|
||||
// Bottom: collapsible BottomPanel (Queue + Inspector tabs)
|
||||
// Cross-cutting: AssociationHighlightProvider wraps the layout
|
||||
// Terminal-first layout with floating panels.
|
||||
// Main area: TerminalGrid (tmux-style split panes)
|
||||
// Top: DashboardToolbar with panel toggles and layout presets
|
||||
// Floating panels: Sessions, Issues, Queue, Inspector (overlay, mutually exclusive)
|
||||
|
||||
import { useMemo } from 'react';
|
||||
import { useState, useCallback } from 'react';
|
||||
import { useIntl } from 'react-intl';
|
||||
import { Allotment } from 'allotment';
|
||||
import 'allotment/dist/style.css';
|
||||
import { FolderTree, Activity } from 'lucide-react';
|
||||
import { AssociationHighlightProvider } from '@/components/terminal-dashboard/AssociationHighlight';
|
||||
import { DashboardToolbar, type PanelId } from '@/components/terminal-dashboard/DashboardToolbar';
|
||||
import { TerminalGrid } from '@/components/terminal-dashboard/TerminalGrid';
|
||||
import { FloatingPanel } from '@/components/terminal-dashboard/FloatingPanel';
|
||||
import { SessionGroupTree } from '@/components/terminal-dashboard/SessionGroupTree';
|
||||
import { AgentList } from '@/components/terminal-dashboard/AgentList';
|
||||
import { IssuePanel } from '@/components/terminal-dashboard/IssuePanel';
|
||||
import { TerminalWorkbench } from '@/components/terminal-dashboard/TerminalWorkbench';
|
||||
import { BottomPanel } from '@/components/terminal-dashboard/BottomPanel';
|
||||
import { AssociationHighlightProvider } from '@/components/terminal-dashboard/AssociationHighlight';
|
||||
import { Badge } from '@/components/ui/Badge';
|
||||
import {
|
||||
useSessionManagerStore,
|
||||
selectGroups,
|
||||
selectTerminalMetas,
|
||||
} from '@/stores/sessionManagerStore';
|
||||
import type { TerminalStatus } from '@/types/terminal-dashboard';
|
||||
import { QueuePanel } from '@/components/terminal-dashboard/QueuePanel';
|
||||
import { InspectorContent } from '@/components/terminal-dashboard/BottomInspector';
|
||||
|
||||
// ========== Main Page Component ==========
|
||||
|
||||
export function TerminalDashboardPage() {
|
||||
const { formatMessage } = useIntl();
|
||||
const groups = useSessionManagerStore(selectGroups);
|
||||
const terminalMetas = useSessionManagerStore(selectTerminalMetas);
|
||||
const [activePanel, setActivePanel] = useState<PanelId | null>(null);
|
||||
|
||||
// Active session count for left column header badge
|
||||
const sessionCount = useMemo(() => {
|
||||
const allSessionIds = groups.flatMap((g) => g.sessionIds);
|
||||
let activeCount = 0;
|
||||
for (const sid of allSessionIds) {
|
||||
const meta = terminalMetas[sid];
|
||||
const status: TerminalStatus = meta?.status ?? 'idle';
|
||||
if (status === 'active') {
|
||||
activeCount++;
|
||||
}
|
||||
}
|
||||
return activeCount > 0 ? activeCount : allSessionIds.length;
|
||||
}, [groups, terminalMetas]);
|
||||
const togglePanel = useCallback((panelId: PanelId) => {
|
||||
setActivePanel((prev) => (prev === panelId ? null : panelId));
|
||||
}, []);
|
||||
|
||||
const closePanel = useCallback(() => {
|
||||
setActivePanel(null);
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<div className="flex flex-col h-[calc(100vh-56px)] overflow-hidden">
|
||||
{/* AssociationHighlightProvider wraps the three-column layout + bottom panel */}
|
||||
<AssociationHighlightProvider>
|
||||
{/* Three-column Allotment layout (flex-1) */}
|
||||
{/* Global toolbar */}
|
||||
<DashboardToolbar
|
||||
activePanel={activePanel}
|
||||
onTogglePanel={togglePanel}
|
||||
/>
|
||||
|
||||
{/* Terminal grid (flex-1, takes all remaining space) */}
|
||||
<div className="flex-1 min-h-0">
|
||||
<Allotment proportionalLayout={true}>
|
||||
{/* Left column: Sessions + Agents */}
|
||||
<Allotment.Pane preferredSize={220} minSize={180} maxSize={320}>
|
||||
<div className="h-full border-r border-border bg-background flex flex-col">
|
||||
<div className="px-3 py-2 border-b border-border shrink-0 flex items-center justify-between">
|
||||
<h2 className="text-sm font-semibold flex items-center gap-2">
|
||||
<FolderTree className="w-4 h-4" />
|
||||
{formatMessage({ id: 'terminalDashboard.columns.sessions' })}
|
||||
</h2>
|
||||
{sessionCount > 0 && (
|
||||
<Badge variant="secondary" className="text-[10px] px-1.5 py-0 flex items-center gap-1">
|
||||
<Activity className="w-3 h-3" />
|
||||
{sessionCount}
|
||||
</Badge>
|
||||
)}
|
||||
</div>
|
||||
{/* SessionGroupTree takes remaining space */}
|
||||
<div className="flex-1 min-h-0 overflow-y-auto">
|
||||
<SessionGroupTree />
|
||||
</div>
|
||||
{/* AgentList at bottom with max height */}
|
||||
<div className="shrink-0">
|
||||
<AgentList />
|
||||
</div>
|
||||
</div>
|
||||
</Allotment.Pane>
|
||||
|
||||
{/* Middle column: Full-height IssuePanel */}
|
||||
<Allotment.Pane minSize={280}>
|
||||
<div className="h-full border-r border-border bg-background overflow-hidden">
|
||||
<IssuePanel />
|
||||
</div>
|
||||
</Allotment.Pane>
|
||||
|
||||
{/* Right column: Terminal Workbench */}
|
||||
<Allotment.Pane minSize={300}>
|
||||
<div className="h-full bg-background overflow-hidden">
|
||||
<TerminalWorkbench />
|
||||
</div>
|
||||
</Allotment.Pane>
|
||||
</Allotment>
|
||||
<TerminalGrid />
|
||||
</div>
|
||||
|
||||
{/* BottomPanel: collapsible Queue + Inspector tabs (full-width) */}
|
||||
<BottomPanel />
|
||||
{/* Floating panels (conditional, overlay) */}
|
||||
<FloatingPanel
|
||||
isOpen={activePanel === 'sessions'}
|
||||
onClose={closePanel}
|
||||
title={formatMessage({ id: 'terminalDashboard.toolbar.sessions' })}
|
||||
side="left"
|
||||
width={280}
|
||||
>
|
||||
<div className="flex flex-col h-full">
|
||||
<div className="flex-1 min-h-0 overflow-y-auto">
|
||||
<SessionGroupTree />
|
||||
</div>
|
||||
<div className="shrink-0">
|
||||
<AgentList />
|
||||
</div>
|
||||
</div>
|
||||
</FloatingPanel>
|
||||
|
||||
<FloatingPanel
|
||||
isOpen={activePanel === 'issues'}
|
||||
onClose={closePanel}
|
||||
title={formatMessage({ id: 'terminalDashboard.toolbar.issues' })}
|
||||
side="left"
|
||||
width={380}
|
||||
>
|
||||
<IssuePanel />
|
||||
</FloatingPanel>
|
||||
|
||||
<FloatingPanel
|
||||
isOpen={activePanel === 'queue'}
|
||||
onClose={closePanel}
|
||||
title={formatMessage({ id: 'terminalDashboard.toolbar.queue' })}
|
||||
side="right"
|
||||
width={400}
|
||||
>
|
||||
<QueuePanel />
|
||||
</FloatingPanel>
|
||||
|
||||
<FloatingPanel
|
||||
isOpen={activePanel === 'inspector'}
|
||||
onClose={closePanel}
|
||||
title={formatMessage({ id: 'terminalDashboard.toolbar.inspector' })}
|
||||
side="right"
|
||||
width={360}
|
||||
>
|
||||
<InspectorContent />
|
||||
</FloatingPanel>
|
||||
</AssociationHighlightProvider>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -275,6 +275,22 @@ export type {
|
||||
MonitorAlert,
|
||||
} from '../types/terminal-dashboard';
|
||||
|
||||
// Terminal Grid Store
|
||||
export {
|
||||
useTerminalGridStore,
|
||||
selectTerminalGridLayout,
|
||||
selectTerminalGridPanes,
|
||||
selectTerminalGridFocusedPaneId,
|
||||
selectTerminalPane,
|
||||
} from './terminalGridStore';
|
||||
|
||||
export type {
|
||||
TerminalPaneState,
|
||||
TerminalGridState,
|
||||
TerminalGridActions,
|
||||
TerminalGridStore,
|
||||
} from './terminalGridStore';
|
||||
|
||||
// Issue Queue Integration Store Types
|
||||
export type {
|
||||
AssociationChain,
|
||||
|
||||
247
ccw/frontend/src/stores/terminalGridStore.ts
Normal file
247
ccw/frontend/src/stores/terminalGridStore.ts
Normal file
@@ -0,0 +1,247 @@
|
||||
// ========================================
|
||||
// Terminal Grid Store
|
||||
// ========================================
|
||||
// Zustand store for terminal grid layout state.
|
||||
// Manages tmux-style split pane layout where each pane holds a terminal session.
|
||||
// Reuses AllotmentLayoutGroup tree structure and pure layout functions from layout-utils.
|
||||
|
||||
import { create } from 'zustand';
|
||||
import { devtools, persist } from 'zustand/middleware';
|
||||
import type { AllotmentLayoutGroup, PaneId } from './viewerStore';
|
||||
import {
|
||||
addPaneToLayout,
|
||||
removePaneFromLayout,
|
||||
getAllPaneIds,
|
||||
} from '@/lib/layout-utils';
|
||||
|
||||
// ========== Types ==========
|
||||
|
||||
export interface TerminalPaneState {
|
||||
id: PaneId;
|
||||
/** Bound terminal session key (null = empty pane awaiting assignment) */
|
||||
sessionId: string | null;
|
||||
}
|
||||
|
||||
export interface TerminalGridState {
|
||||
layout: AllotmentLayoutGroup;
|
||||
panes: Record<PaneId, TerminalPaneState>;
|
||||
focusedPaneId: PaneId | null;
|
||||
nextPaneIdCounter: number;
|
||||
}
|
||||
|
||||
export interface TerminalGridActions {
|
||||
setLayout: (layout: AllotmentLayoutGroup) => void;
|
||||
splitPane: (paneId: PaneId, direction: 'horizontal' | 'vertical') => PaneId;
|
||||
closePane: (paneId: PaneId) => void;
|
||||
assignSession: (paneId: PaneId, sessionId: string | null) => void;
|
||||
setFocused: (paneId: PaneId) => void;
|
||||
resetLayout: (preset: 'single' | 'split-h' | 'split-v' | 'grid-2x2') => void;
|
||||
}
|
||||
|
||||
export type TerminalGridStore = TerminalGridState & TerminalGridActions;
|
||||
|
||||
// ========== Constants ==========
|
||||
|
||||
const GRID_STORAGE_KEY = 'terminal-grid-storage';
|
||||
const GRID_STORAGE_VERSION = 1;
|
||||
|
||||
// ========== Helpers ==========
|
||||
|
||||
const generatePaneId = (counter: number): PaneId => `tpane-${counter}`;
|
||||
|
||||
// ========== Initial State ==========
|
||||
|
||||
function createInitialLayout(): { layout: AllotmentLayoutGroup; panes: Record<PaneId, TerminalPaneState>; focusedPaneId: PaneId; nextPaneIdCounter: number } {
|
||||
const paneId = generatePaneId(1);
|
||||
return {
|
||||
layout: { direction: 'horizontal', sizes: [100], children: [paneId] },
|
||||
panes: { [paneId]: { id: paneId, sessionId: null } },
|
||||
focusedPaneId: paneId,
|
||||
nextPaneIdCounter: 2,
|
||||
};
|
||||
}
|
||||
|
||||
const initial = createInitialLayout();
|
||||
|
||||
const initialState: TerminalGridState = {
|
||||
layout: initial.layout,
|
||||
panes: initial.panes,
|
||||
focusedPaneId: initial.focusedPaneId,
|
||||
nextPaneIdCounter: initial.nextPaneIdCounter,
|
||||
};
|
||||
|
||||
// ========== Store ==========
|
||||
|
||||
export const useTerminalGridStore = create<TerminalGridStore>()(
|
||||
persist(
|
||||
devtools(
|
||||
(set, get) => ({
|
||||
...initialState,
|
||||
|
||||
setLayout: (layout) => {
|
||||
set({ layout }, false, 'terminalGrid/setLayout');
|
||||
},
|
||||
|
||||
splitPane: (paneId, direction) => {
|
||||
const state = get();
|
||||
const newPaneId = generatePaneId(state.nextPaneIdCounter);
|
||||
const newLayout = addPaneToLayout(state.layout, newPaneId, paneId, direction);
|
||||
|
||||
set(
|
||||
{
|
||||
layout: newLayout,
|
||||
panes: {
|
||||
...state.panes,
|
||||
[newPaneId]: { id: newPaneId, sessionId: null },
|
||||
},
|
||||
focusedPaneId: newPaneId,
|
||||
nextPaneIdCounter: state.nextPaneIdCounter + 1,
|
||||
},
|
||||
false,
|
||||
'terminalGrid/splitPane'
|
||||
);
|
||||
|
||||
return newPaneId;
|
||||
},
|
||||
|
||||
closePane: (paneId) => {
|
||||
const state = get();
|
||||
const allPaneIds = getAllPaneIds(state.layout);
|
||||
if (allPaneIds.length <= 1) return;
|
||||
|
||||
const newLayout = removePaneFromLayout(state.layout, paneId);
|
||||
const newPanes = { ...state.panes };
|
||||
delete newPanes[paneId];
|
||||
|
||||
let newFocused = state.focusedPaneId;
|
||||
if (newFocused === paneId) {
|
||||
const remaining = getAllPaneIds(newLayout);
|
||||
newFocused = remaining.length > 0 ? remaining[0] : null;
|
||||
}
|
||||
|
||||
set(
|
||||
{
|
||||
layout: newLayout,
|
||||
panes: newPanes,
|
||||
focusedPaneId: newFocused,
|
||||
},
|
||||
false,
|
||||
'terminalGrid/closePane'
|
||||
);
|
||||
},
|
||||
|
||||
assignSession: (paneId, sessionId) => {
|
||||
const state = get();
|
||||
const pane = state.panes[paneId];
|
||||
if (!pane) return;
|
||||
|
||||
set(
|
||||
{
|
||||
panes: {
|
||||
...state.panes,
|
||||
[paneId]: { ...pane, sessionId },
|
||||
},
|
||||
},
|
||||
false,
|
||||
'terminalGrid/assignSession'
|
||||
);
|
||||
},
|
||||
|
||||
setFocused: (paneId) => {
|
||||
const state = get();
|
||||
if (!state.panes[paneId]) return;
|
||||
set({ focusedPaneId: paneId }, false, 'terminalGrid/setFocused');
|
||||
},
|
||||
|
||||
resetLayout: (preset) => {
|
||||
let counter = get().nextPaneIdCounter;
|
||||
|
||||
const createPane = (): TerminalPaneState => {
|
||||
const id = generatePaneId(counter++);
|
||||
return { id, sessionId: null };
|
||||
};
|
||||
|
||||
let layout: AllotmentLayoutGroup;
|
||||
const panes: Record<PaneId, TerminalPaneState> = {};
|
||||
|
||||
switch (preset) {
|
||||
case 'single': {
|
||||
const p = createPane();
|
||||
panes[p.id] = p;
|
||||
layout = { direction: 'horizontal', sizes: [100], children: [p.id] };
|
||||
break;
|
||||
}
|
||||
case 'split-h': {
|
||||
const p1 = createPane();
|
||||
const p2 = createPane();
|
||||
panes[p1.id] = p1;
|
||||
panes[p2.id] = p2;
|
||||
layout = { direction: 'horizontal', sizes: [50, 50], children: [p1.id, p2.id] };
|
||||
break;
|
||||
}
|
||||
case 'split-v': {
|
||||
const p1 = createPane();
|
||||
const p2 = createPane();
|
||||
panes[p1.id] = p1;
|
||||
panes[p2.id] = p2;
|
||||
layout = { direction: 'vertical', sizes: [50, 50], children: [p1.id, p2.id] };
|
||||
break;
|
||||
}
|
||||
case 'grid-2x2': {
|
||||
const p1 = createPane();
|
||||
const p2 = createPane();
|
||||
const p3 = createPane();
|
||||
const p4 = createPane();
|
||||
panes[p1.id] = p1;
|
||||
panes[p2.id] = p2;
|
||||
panes[p3.id] = p3;
|
||||
panes[p4.id] = p4;
|
||||
layout = {
|
||||
direction: 'vertical',
|
||||
sizes: [50, 50],
|
||||
children: [
|
||||
{ direction: 'horizontal', sizes: [50, 50], children: [p1.id, p2.id] },
|
||||
{ direction: 'horizontal', sizes: [50, 50], children: [p3.id, p4.id] },
|
||||
],
|
||||
};
|
||||
break;
|
||||
}
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
const firstPaneId = Object.keys(panes)[0] || null;
|
||||
set(
|
||||
{
|
||||
layout,
|
||||
panes,
|
||||
focusedPaneId: firstPaneId,
|
||||
nextPaneIdCounter: counter,
|
||||
},
|
||||
false,
|
||||
'terminalGrid/resetLayout'
|
||||
);
|
||||
},
|
||||
}),
|
||||
{ name: 'TerminalGridStore' }
|
||||
),
|
||||
{
|
||||
name: GRID_STORAGE_KEY,
|
||||
version: GRID_STORAGE_VERSION,
|
||||
partialize: (state) => ({
|
||||
layout: state.layout,
|
||||
panes: state.panes,
|
||||
focusedPaneId: state.focusedPaneId,
|
||||
nextPaneIdCounter: state.nextPaneIdCounter,
|
||||
}),
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
// ========== Selectors ==========
|
||||
|
||||
export const selectTerminalGridLayout = (state: TerminalGridStore) => state.layout;
|
||||
export const selectTerminalGridPanes = (state: TerminalGridStore) => state.panes;
|
||||
export const selectTerminalGridFocusedPaneId = (state: TerminalGridStore) => state.focusedPaneId;
|
||||
export const selectTerminalPane = (paneId: PaneId) => (state: TerminalGridStore) =>
|
||||
state.panes[paneId];
|
||||
Reference in New Issue
Block a user