mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-03-07 16:41:06 +08:00
feat: add DDD scan, sync, and update commands for document indexing
- Implemented `/ddd:scan` command to analyze existing codebases and generate document indices without specifications. This includes phases for project structure analysis, component discovery, feature inference, and requirement extraction. - Introduced `/ddd:sync` command for post-task synchronization, updating document indices, generating action logs, and refreshing feature/component documentation after development tasks. - Added `/ddd:update` command for lightweight incremental updates to the document index, allowing for quick impact checks during development and pre-commit validation. - Created `execute.md` for the coordinator role in the team lifecycle, detailing the spawning of executor team-workers for IMPL tasks. - Added `useHasHydrated` hook to determine if the Zustand workflow store has been rehydrated from localStorage, improving state management reliability.
This commit is contained in:
359
.claude/commands/ddd/auto.md
Normal file
359
.claude/commands/ddd/auto.md
Normal file
@@ -0,0 +1,359 @@
|
||||
---
|
||||
name: auto
|
||||
description: Chain command - automated document-driven development flow. Detects project state and runs the appropriate chain for new or existing projects.
|
||||
argument-hint: "[-y|--yes] [--skip-spec] [--skip-build] [--spec <session-id>] [--resume] \"project idea or task description\""
|
||||
allowed-tools: TodoWrite(*), Agent(*), AskUserQuestion(*), Read(*), Grep(*), Glob(*), Bash(*), Edit(*), Write(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: All sub-commands run in auto mode. Minimal human intervention.
|
||||
|
||||
# DDD Auto Command (/ddd:auto)
|
||||
|
||||
## Purpose
|
||||
|
||||
Orchestrate the full document-driven development lifecycle. **Adapts to project state** — works for both new projects and existing codebases.
|
||||
|
||||
## Flow Variants
|
||||
|
||||
### Variant 1: New Project (no code, no spec)
|
||||
```
|
||||
spec-generator → ddd:index-build → ddd:plan → ddd:execute → verify → ddd:sync
|
||||
```
|
||||
|
||||
### Variant 2: Existing Project (has code, no spec)
|
||||
```
|
||||
ddd:scan → ddd:plan → ddd:execute → verify → ddd:sync
|
||||
```
|
||||
|
||||
### Variant 3: Existing Project with Spec (has code + spec)
|
||||
```
|
||||
ddd:index-build → ddd:plan → ddd:execute → verify → ddd:sync
|
||||
```
|
||||
|
||||
### Variant 4: Index Exists (has doc-index.json)
|
||||
```
|
||||
ddd:plan → ddd:execute → verify → ddd:sync
|
||||
```
|
||||
|
||||
## Flow Diagram
|
||||
|
||||
```
|
||||
┌────────────────────────────────────────────────────────────┐
|
||||
│ /ddd:auto │
|
||||
│ │
|
||||
│ Stage 0: Detect Project State │
|
||||
│ ┌───────────────────────────────────┐ │
|
||||
│ │ has_codebase? has_spec? has_index?│ │
|
||||
│ └────────────┬──────────────────────┘ │
|
||||
│ │ │
|
||||
│ ┌──────────┼──────────────┐ │
|
||||
│ ▼ ▼ ▼ │
|
||||
│ No Code Code Only Code + Spec Index Exists │
|
||||
│ │ │ │ │ │
|
||||
│ ▼ │ │ │ │
|
||||
│ Stage 1 │ │ │ │
|
||||
│ Spec Gen │ │ │ │
|
||||
│ │ │ │ │ │
|
||||
│ ▼ │ ▼ │ │
|
||||
│ Stage 2a Stage 2b Stage 2a │ │
|
||||
│ index-build ddd:scan index-build │ │
|
||||
│ (Path A or Path B auto-detected) │ │
|
||||
│ │ │ │
|
||||
│ └───────────────────┬───────────────────┘ │
|
||||
│ ▼ │
|
||||
│ Stage 3: DDD Plan (enhanced) │
|
||||
│ (doc-index query + exploration + │
|
||||
│ clarification + task planning) │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ Stage 4: Execute │
|
||||
│ (ddd:execute = doc-aware execution) │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ Stage 4.5: Verify Gate │
|
||||
│ (convergence + build + lint + tests │
|
||||
│ → execution-manifest.json) │
|
||||
│ │ │
|
||||
│ PASS / WARN → continue │
|
||||
│ FAIL → ask user │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ Stage 5: Doc Sync │
|
||||
│ (auto-triggered with --from-manifest, │
|
||||
│ or manual /ddd:sync) │
|
||||
└────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Stage 0: Project State Detection
|
||||
|
||||
Automatically detect project state to determine which stages to run:
|
||||
|
||||
```
|
||||
Check 1: doc-index.json exists? → has_index
|
||||
Check 2: SPEC-* directories exist? → has_spec
|
||||
Check 3: Source code directories? → has_codebase
|
||||
Check 4: project-tech.json exists? → has_tech_analysis
|
||||
```
|
||||
|
||||
### Decision Matrix
|
||||
|
||||
| has_codebase | has_spec | has_index | Action |
|
||||
|:---:|:---:|:---:|--------|
|
||||
| No | No | No | Stage 1 (spec-gen) → Stage 2a (index-build) → Stage 3-5 |
|
||||
| No | Yes | No | Stage 2a (index-build) → Stage 3-5 |
|
||||
| Yes | No | No | **Stage 2b (ddd:scan)** → Stage 3-5 |
|
||||
| Yes | Yes | No | Stage 2a (index-build) → Stage 3-5 |
|
||||
| Yes | * | Yes | **Skip to Stage 3** (index exists) |
|
||||
|
||||
### Override Flags
|
||||
|
||||
| Flag | Effect |
|
||||
|------|--------|
|
||||
| `--skip-spec` | Never run spec-generator |
|
||||
| `--skip-build` | Never run index-build |
|
||||
| `--spec <id>` | Use specific spec session, force Path A |
|
||||
| `--from-scratch` | Rebuild index even if exists |
|
||||
|
||||
## Stage 1: Specification (conditional)
|
||||
|
||||
### Run When
|
||||
- No codebase AND no spec AND `--skip-spec` not set
|
||||
- User provides a new project idea (not an existing task description)
|
||||
|
||||
### Skip When
|
||||
- `--skip-spec` flag
|
||||
- Codebase already exists (existing project)
|
||||
- `--spec <id>` pointing to existing session
|
||||
|
||||
### Execution
|
||||
```
|
||||
Invoke /spec-generator with user input
|
||||
→ Output: .workflow/.doc-index/specs/SPEC-{slug}-{date}/
|
||||
```
|
||||
|
||||
## Stage 2: Index Construction (conditional)
|
||||
|
||||
### Run When
|
||||
- `doc-index.json` does not exist
|
||||
- OR `--from-scratch` flag
|
||||
|
||||
### Route Selection
|
||||
|
||||
```
|
||||
Has spec outputs → Stage 2a: /ddd:index-build (spec-first)
|
||||
No spec, has code → Stage 2b: /ddd:scan (code-first)
|
||||
```
|
||||
|
||||
### Stage 2a: /ddd:index-build (has spec)
|
||||
```
|
||||
Invoke /ddd:index-build [-y] [-s <spec-id>]
|
||||
→ Output: doc-index.json from spec entities + code mapping
|
||||
```
|
||||
|
||||
### Stage 2b: /ddd:scan (no spec, has code)
|
||||
```
|
||||
Invoke /ddd:scan [-y]
|
||||
→ Output: doc-index.json from code analysis + inferred features
|
||||
```
|
||||
|
||||
### Skip When
|
||||
- `--skip-build` flag
|
||||
- `doc-index.json` exists AND NOT `--from-scratch`
|
||||
- In this case, suggest `/ddd:update` for incremental refresh
|
||||
|
||||
## Stage 3: Planning (always runs)
|
||||
|
||||
### Execution
|
||||
|
||||
```
|
||||
Invoke /ddd:plan [-y] "task description"
|
||||
```
|
||||
|
||||
The enhanced `/ddd:plan` now performs:
|
||||
1. Doc-index query (instant context from features, requirements, components, ADRs)
|
||||
2. Doc-index-guided exploration (1-4 angles based on affected features)
|
||||
3. Clarification (aggregate ambiguities from exploration + doc-index gaps)
|
||||
4. Task planning (plan.json + TASK-*.json with doc_context traceability)
|
||||
5. Handoff selection
|
||||
|
||||
Output:
|
||||
- `plan.json` — plan overview with doc_context
|
||||
- `.task/TASK-*.json` — individual tasks with doc_context
|
||||
- `exploration-{angle}.json` — exploration results (if Phase 2 ran)
|
||||
- `planning-context.md` — legacy context package
|
||||
|
||||
### Handoff Decision
|
||||
|
||||
After planning, `/ddd:plan` presents execution options:
|
||||
|
||||
| Option | Description | Auto-Select When |
|
||||
|--------|-------------|-----------------|
|
||||
| **ddd:execute** | Document-aware execution (recommended) | Default in ddd workflow |
|
||||
| **lite-execute** | Standard execution (no doc awareness) | When doc traceability not needed |
|
||||
| **direct** | Start coding with context | User prefers manual |
|
||||
| **stop** | Just the plan context | Planning/research only |
|
||||
|
||||
With `-y`: Auto-select `ddd:execute`.
|
||||
|
||||
## Stage 4: Execution
|
||||
|
||||
Based on Stage 3 handoff decision:
|
||||
|
||||
| Mode | Delegates To |
|
||||
|------|-------------|
|
||||
| **ddd:execute** | `/ddd:execute --in-memory` with plan.json + doc-index enrichment |
|
||||
| lite-execute | `/workflow:lite-execute` with plan.json path |
|
||||
| direct | Output context package, developer works manually |
|
||||
| stop | End here, no execution |
|
||||
|
||||
### ddd:execute Features (when selected)
|
||||
- Doc-enriched task prompts (feature context + component docs + ADR constraints)
|
||||
- Per-batch impact verification (changes stay within planned scope)
|
||||
- Result persistence (`TASK-*.result.json` per task, `execution-manifest.json` per session)
|
||||
- Post-execution verify gate (Stage 4.5, unless `--skip-verify`)
|
||||
- Post-completion auto-sync with manifest (Stage 5 triggered automatically)
|
||||
|
||||
**Note**: When using `ddd:execute`, Stage 4.5 and Stage 5 are auto-triggered. For other modes, run Stage 5 manually.
|
||||
|
||||
## Stage 4.5: Verify Gate
|
||||
|
||||
Embedded within `ddd:execute` (Step 4.5). Runs after all batches complete, before doc sync.
|
||||
|
||||
### Purpose
|
||||
|
||||
Quality gate ensuring execution output is correct before committing to documentation updates. Prevents bad code from being "blessed" into the doc-index.
|
||||
|
||||
### Checks Performed
|
||||
|
||||
| Check | Description | Gate Behavior |
|
||||
|-------|-------------|---------------|
|
||||
| **Convergence** | Run `task.convergence.verification` for each task | FAIL if any critical task fails |
|
||||
| **Build** | Run project build command (`tsc --noEmit`, etc.) | FAIL on build errors |
|
||||
| **Lint** | Run project linter (`eslint`, etc.) | WARN only (non-blocking) |
|
||||
| **Regression** | Run full test suite, compare to baseline | FAIL on new test failures |
|
||||
|
||||
### Gate Results
|
||||
|
||||
| Result | Action |
|
||||
|--------|--------|
|
||||
| **PASS** | All checks passed → proceed to Stage 5 |
|
||||
| **WARN** | Non-critical issues (lint warnings) → proceed with warnings logged |
|
||||
| **FAIL** | Critical issues → ask user: fix now / skip sync / abort |
|
||||
| **FAIL + `-y`** | Log failures, set `error_state` in session, stop |
|
||||
|
||||
### Output
|
||||
|
||||
- `execution-manifest.json` — persisted to session folder, consumed by Stage 5
|
||||
- Contains: task results, files_modified (with task attribution), verify gate results
|
||||
|
||||
## Stage 5: Post-Task Sync
|
||||
|
||||
### Trigger
|
||||
- **Auto**: `/ddd:execute` triggers `/ddd:sync --from-manifest` automatically after verify gate passes
|
||||
- **Manual**: User runs `/ddd:sync` after completing work in direct/lite-execute mode
|
||||
- **Resume**: `/ddd:auto --resume` after task completion
|
||||
|
||||
### Execution
|
||||
```
|
||||
# Auto mode (from ddd:execute): uses manifest for precise change tracking
|
||||
Invoke /ddd:sync [-y] --task-id <id> --from-manifest {session}/execution-manifest.json "task summary"
|
||||
|
||||
# Manual mode (from direct/lite-execute): falls back to git diff
|
||||
Invoke /ddd:sync [-y] [--task-id <id>] "task summary"
|
||||
|
||||
→ Updates: doc-index.json, feature-maps/, tech-registry/, action-logs/
|
||||
```
|
||||
|
||||
## State Tracking
|
||||
|
||||
### Session File: `.workflow/.doc-index/.auto-session.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"session_id": "DAUTO-{timestamp}",
|
||||
"input": "user's original input",
|
||||
"detected_state": {
|
||||
"has_codebase": true,
|
||||
"has_spec": false,
|
||||
"has_index": false,
|
||||
"build_path": "code-first"
|
||||
},
|
||||
"stages_completed": ["detect", "index-build", "plan"],
|
||||
"current_stage": "execute",
|
||||
"spec_session": "SPEC-{slug}-{date}|null",
|
||||
"plan_session": "planning/{task-slug}-{date}/",
|
||||
"plan_context": "planning/{task-slug}-{date}/plan.json",
|
||||
"execution_mode": "ddd:execute|lite-execute|direct|stop",
|
||||
"execution_manifest": "planning/{task-slug}-{date}/execution-manifest.json|null",
|
||||
"verify_gate": "PASS|WARN|FAIL|null",
|
||||
"error_state": null,
|
||||
"last_error": {
|
||||
"stage": "execute",
|
||||
"message": "Task TASK-002 failed: compilation error",
|
||||
"timestamp": "ISO8601",
|
||||
"recoverable": true
|
||||
},
|
||||
"created_at": "ISO8601",
|
||||
"last_updated": "ISO8601"
|
||||
}
|
||||
```
|
||||
|
||||
### Resume
|
||||
```
|
||||
/ddd:auto --resume → Resume from current_stage in .auto-session.json
|
||||
```
|
||||
|
||||
### Error Recovery
|
||||
```
|
||||
/ddd:auto --resume
|
||||
IF error_state is set:
|
||||
Display last error context
|
||||
Ask: retry current stage / skip to next / abort
|
||||
ELSE:
|
||||
Resume from current_stage normally
|
||||
```
|
||||
|
||||
## Example Workflows
|
||||
|
||||
### New Project (Full Flow)
|
||||
```
|
||||
/ddd:auto "Build a task management API with user auth and team features"
|
||||
→ Stage 0: No code, no spec → need spec-gen
|
||||
→ Stage 1: spec-generator produces full spec
|
||||
→ Stage 2: index-build creates index from spec + empty codebase
|
||||
→ Stage 3: ddd:plan produces plan.json + TASK-*.json with doc_context
|
||||
→ Stage 4: ddd:execute runs tasks with feature context enrichment
|
||||
→ Stage 4.5: verify gate — convergence ✓, build ✓, tests ✓ → PASS
|
||||
→ Stage 5: ddd:sync --from-manifest auto-triggered, updates index
|
||||
```
|
||||
|
||||
### Existing Project, No Spec (Code-First)
|
||||
```
|
||||
/ddd:auto "Add rate limiting to API endpoints"
|
||||
→ Stage 0: Has code, no spec, no index
|
||||
→ Stage 2b: ddd:scan analyzes code, infers features from codebase
|
||||
→ Stage 3: ddd:plan queries index, explores with security + patterns angles
|
||||
→ Stage 4: ddd:execute runs with rate-limit component docs as context
|
||||
→ Stage 4.5: verify gate — convergence ✓, tests 41/42 (1 regression) → WARN
|
||||
→ Stage 5: ddd:sync --from-manifest, registers new rate-limit component
|
||||
```
|
||||
|
||||
### Existing Project with Index (Incremental)
|
||||
```
|
||||
/ddd:auto "Fix auth token expiration bug"
|
||||
→ Stage 0: Has code, has index → skip to plan
|
||||
→ Stage 3: ddd:plan finds feat-auth, REQ-002, tech-auth-service (Low complexity, skip exploration)
|
||||
→ Stage 4: ddd:execute runs single task with auth feature context
|
||||
→ Stage 4.5: verify gate — convergence ✓, build ✓, tests ✓ → PASS
|
||||
→ Stage 5: ddd:sync --from-manifest, updates tech-auth-service code locations
|
||||
```
|
||||
|
||||
### Planning Only
|
||||
```
|
||||
/ddd:auto "Investigate payment module architecture"
|
||||
→ Stage 0-2: (as needed)
|
||||
→ Stage 3: ddd:plan shows full context with exploration results
|
||||
→ Stage 4: user selects "stop" → gets plan.json + context package only
|
||||
```
|
||||
405
.claude/commands/ddd/execute.md
Normal file
405
.claude/commands/ddd/execute.md
Normal file
@@ -0,0 +1,405 @@
|
||||
---
|
||||
name: execute
|
||||
description: Document-aware execution engine — executes plan.json + TASK-*.json with doc-index context enrichment, per-batch impact verification, and post-completion doc sync.
|
||||
argument-hint: "[-y|--yes] [--skip-sync] [--skip-verify] [--plan <path>] [--in-memory] \"optional task description\""
|
||||
allowed-tools: TodoWrite(*), Agent(*), AskUserQuestion(*), Read(*), Grep(*), Glob(*), Bash(*), Edit(*), Write(*), mcp__ace-tool__search_context(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-confirm all decisions, auto-sync after completion.
|
||||
|
||||
# DDD Execute Command (/ddd:execute)
|
||||
|
||||
## Purpose
|
||||
|
||||
Same execution engine model as lite-execute, but each step is **doc-index-aware**:
|
||||
- Tasks are enriched with feature context, component docs, and architecture constraints
|
||||
- Per-batch impact verification ensures changes stay within planned scope
|
||||
- Post-completion automatically syncs the document index
|
||||
|
||||
### Core Differentiator
|
||||
Unlike generic execution engines, ddd:execute leverages the document architecture:
|
||||
- Feature-maps provide business context for each task
|
||||
- Tech-registry provides implementation patterns to follow
|
||||
- ADRs surface as hard constraints during execution
|
||||
- Requirement acceptance criteria inform convergence verification
|
||||
|
||||
## Prerequisite
|
||||
|
||||
- `plan.json` + `.task/TASK-*.json` files from `/ddd:plan`
|
||||
- `doc-index.json` at `.workflow/.doc-index/doc-index.json`
|
||||
- If `--in-memory`: receives executionContext from `/ddd:plan` handoff
|
||||
|
||||
---
|
||||
|
||||
## Step 1: Initialize & Load Context
|
||||
|
||||
### 1.1 Locate Plan
|
||||
|
||||
```
|
||||
IF --in-memory:
|
||||
Load executionContext from ddd:plan handoff
|
||||
plan_path = executionContext.plan_path
|
||||
task_dir = executionContext.task_dir
|
||||
ELIF --plan <path>:
|
||||
plan_path = <path>
|
||||
task_dir = {dirname(path)}/.task/
|
||||
ELSE:
|
||||
Scan .workflow/.doc-index/planning/ for most recent session
|
||||
plan_path = {latest_session}/plan.json
|
||||
task_dir = {latest_session}/.task/
|
||||
```
|
||||
|
||||
### 1.2 Load Plan & Tasks
|
||||
|
||||
- Read `plan.json` — validate against plan-overview-base-schema
|
||||
- Read all `TASK-*.json` from `.task/` directory — validate against task-schema
|
||||
- Read `doc-index.json` from `.workflow/.doc-index/`
|
||||
|
||||
### 1.3 Pre-Load Doc Context
|
||||
|
||||
For each task with `doc_context`:
|
||||
- Load referenced `feature_docs` (feature-maps/{slug}.md)
|
||||
- Load referenced `component_docs` (tech-registry/{slug}.md)
|
||||
- Load ADR excerpts from doc-index `architectureDecisions[]`
|
||||
- Extract requirement acceptance criteria from doc-index `requirements[]`
|
||||
|
||||
### 1.4 Echo Strategy
|
||||
|
||||
Display execution summary:
|
||||
|
||||
```
|
||||
DDD Execute: {plan.summary}
|
||||
Complexity: {plan.complexity}
|
||||
Tasks: {plan.task_count}
|
||||
|
||||
Doc-Index Impact:
|
||||
Features: {doc_context.affected_features}
|
||||
Requirements: {doc_context.affected_requirements}
|
||||
Components: {doc_context.affected_components}
|
||||
Constraints: {doc_context.architecture_constraints}
|
||||
|
||||
Execution plan: {batch count} batches, {parallel tasks} parallel where possible
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 2: Task Grouping & Batch Creation
|
||||
|
||||
### 2.1 Extract Dependencies
|
||||
|
||||
From each `TASK-*.json`, read `depends_on[]` to build dependency graph.
|
||||
|
||||
### 2.2 Group into Batches
|
||||
|
||||
```
|
||||
Batch 1: Tasks with no dependencies (depends_on: [])
|
||||
Batch 2: Tasks depending only on Batch 1 tasks
|
||||
Batch 3: Tasks depending on Batch 1 + 2 tasks
|
||||
...
|
||||
```
|
||||
|
||||
Within each batch, tasks with the same `parallel_group` can run concurrently.
|
||||
|
||||
### 2.3 Assign Executor per Task
|
||||
|
||||
| Signal | Executor |
|
||||
|--------|----------|
|
||||
| `meta.execution_config.method == "cli"` | CLI tool (gemini/codex/qwen) |
|
||||
| `meta.execution_config.method == "agent"` | Agent (code-developer/universal-executor) |
|
||||
| Default | Agent (code-developer) |
|
||||
|
||||
---
|
||||
|
||||
## Step 3: Doc-Enriched Execution
|
||||
|
||||
For each task in batch order, build an enriched prompt:
|
||||
|
||||
### 3.1 Task Prompt Template
|
||||
|
||||
```markdown
|
||||
## Goal
|
||||
${plan.summary} — specifically: ${task.title}
|
||||
|
||||
## Document Context
|
||||
|
||||
### Feature: ${feature.name} (${feature.id})
|
||||
${feature-map content excerpt — overview + requirements section}
|
||||
|
||||
### Components
|
||||
${for each component in task.doc_context.component_ids:
|
||||
tech-registry excerpt — responsibility + code locations + key patterns}
|
||||
|
||||
### Architecture Constraints
|
||||
${for each ADR in task.doc_context.adr_ids:
|
||||
ADR title + decision + rationale from doc-index}
|
||||
|
||||
### Requirement Acceptance Criteria
|
||||
${for each requirement in task.doc_context.requirement_ids:
|
||||
requirement title + priority + success criteria from doc-index}
|
||||
|
||||
## Task Details
|
||||
${task.description}
|
||||
|
||||
### Files to Modify
|
||||
${task.files[] — path, action, changes}
|
||||
|
||||
### Implementation Steps
|
||||
${task.implementation[] — step-by-step guide}
|
||||
|
||||
## Done When
|
||||
${task.convergence.criteria[]}
|
||||
${task.convergence.verification}
|
||||
```
|
||||
|
||||
### 3.2 Execute Task
|
||||
|
||||
**Agent execution**:
|
||||
```
|
||||
Agent(subagent_type="code-developer", prompt="{enriched prompt}")
|
||||
```
|
||||
|
||||
**CLI execution**:
|
||||
```bash
|
||||
ccw cli -p "{enriched prompt}" --tool {cli_tool} --mode write
|
||||
```
|
||||
|
||||
### 3.3 Record & Persist Result
|
||||
|
||||
After each task completes:
|
||||
- Update `TASK-*.json` with `status`, `executed_at`, `result`
|
||||
- Track `result.files_modified` for impact verification
|
||||
- **Persist** result to `TASK-{id}.result.json` alongside the task file:
|
||||
|
||||
```json
|
||||
{
|
||||
"task_id": "TASK-001",
|
||||
"status": "completed|failed",
|
||||
"executed_at": "ISO8601",
|
||||
"executor": "code-developer|gemini|codex",
|
||||
"files_modified": [
|
||||
{ "path": "src/services/auth.ts", "action": "modified", "symbols_changed": ["AuthService.validate"] },
|
||||
{ "path": "src/routes/login.ts", "action": "created", "symbols_changed": ["loginRoute"] }
|
||||
],
|
||||
"convergence_result": {
|
||||
"criteria_met": ["Rate limiter middleware exists"],
|
||||
"criteria_unmet": [],
|
||||
"verification_output": "test output snippet..."
|
||||
},
|
||||
"error": null
|
||||
}
|
||||
```
|
||||
|
||||
This file serves as the durable handoff between execute and sync — survives process interruptions.
|
||||
|
||||
---
|
||||
|
||||
## Step 4: Per-Batch Impact Verification
|
||||
|
||||
After each batch completes (unless `--skip-verify`):
|
||||
|
||||
### 4.1 Trace Changed Files
|
||||
|
||||
For each file modified in the batch:
|
||||
```
|
||||
changed_file → match to doc-index.technicalComponents[].codeLocations[].path
|
||||
→ component_ids → featureIds → requirementIds
|
||||
```
|
||||
|
||||
### 4.2 Scope Verification
|
||||
|
||||
Compare actual impact to planned impact:
|
||||
|
||||
```
|
||||
Planned scope:
|
||||
Features: [feat-auth]
|
||||
Components: [tech-auth-service, tech-user-model]
|
||||
|
||||
Actual impact:
|
||||
Features: [feat-auth] ← OK, within scope
|
||||
Components: [tech-auth-service, tech-user-model, tech-email-service]
|
||||
← WARNING: tech-email-service not in plan
|
||||
```
|
||||
|
||||
### 4.3 Flag Unexpected Impact
|
||||
|
||||
If changes affect features/components NOT in `plan.doc_context`:
|
||||
- **Warning**: Display unexpected impact
|
||||
- **No -y**: Ask user to confirm continuation
|
||||
- **With -y**: Log warning, continue execution
|
||||
|
||||
### 4.4 Skip Conditions
|
||||
|
||||
Skip verification when:
|
||||
- `--skip-verify` flag is set
|
||||
- Only 1 batch (no intermediate verification needed for simple plans)
|
||||
|
||||
---
|
||||
|
||||
## Step 4.5: Post-Execution Verify Gate
|
||||
|
||||
After all batches complete, before doc sync (unless `--skip-verify`):
|
||||
|
||||
### 4.5.1 Convergence Verification
|
||||
|
||||
For each completed task with `convergence.verification`:
|
||||
```
|
||||
Execute: {task.convergence.verification}
|
||||
→ e.g., "npm test -- --grep rate-limit"
|
||||
Record: pass/fail → update TASK-{id}.result.json.convergence_result
|
||||
```
|
||||
|
||||
### 4.5.2 Build & Lint Check
|
||||
|
||||
```
|
||||
Run project build command (if configured):
|
||||
→ npm run build / tsc --noEmit / etc.
|
||||
Run project lint command (if configured):
|
||||
→ npm run lint / eslint src/ / etc.
|
||||
```
|
||||
|
||||
If build or lint fails:
|
||||
- **No -y**: Display errors, ask user: fix now / continue anyway / abort
|
||||
- **With -y**: Log warning, continue (non-blocking)
|
||||
|
||||
### 4.5.3 Regression Test
|
||||
|
||||
```
|
||||
Run project test suite:
|
||||
→ npm test / pytest / etc.
|
||||
Compare: test results before execution (baseline) vs after
|
||||
```
|
||||
|
||||
If tests fail:
|
||||
- **No -y**: Display failures, ask user: fix now / skip sync / abort
|
||||
- **With -y**: Log failures as warning in execution results, continue
|
||||
|
||||
### 4.5.4 Verify Summary
|
||||
|
||||
```
|
||||
Verify Gate Results:
|
||||
Convergence: {passed}/{total} tasks verified
|
||||
Build: pass|fail|skipped
|
||||
Lint: pass|fail|skipped
|
||||
Tests: {passed}/{total} ({new_failures} regressions)
|
||||
|
||||
Gate: PASS / WARN (continue with warnings) / FAIL (blocked)
|
||||
```
|
||||
|
||||
### 4.5.5 Persist Verify Manifest
|
||||
|
||||
Write `execution-manifest.json` to session folder:
|
||||
|
||||
```json
|
||||
{
|
||||
"session_id": "{session-id}",
|
||||
"plan_path": "planning/{slug}/plan.json",
|
||||
"completed_at": "ISO8601",
|
||||
"tasks": [
|
||||
{
|
||||
"task_id": "TASK-001",
|
||||
"status": "completed",
|
||||
"result_file": ".task/TASK-001.result.json"
|
||||
}
|
||||
],
|
||||
"files_modified": [
|
||||
{ "path": "src/services/auth.ts", "action": "modified", "task_id": "TASK-001" },
|
||||
{ "path": "src/routes/login.ts", "action": "created", "task_id": "TASK-001" }
|
||||
],
|
||||
"verify": {
|
||||
"convergence": { "passed": 2, "total": 2 },
|
||||
"build": "pass",
|
||||
"lint": "pass",
|
||||
"tests": { "passed": 42, "total": 42, "regressions": 0 },
|
||||
"gate": "PASS"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This manifest is the **single source of truth** consumed by `ddd:sync --from-manifest`.
|
||||
|
||||
---
|
||||
|
||||
## Step 5: Post-Completion Doc Sync
|
||||
|
||||
After all batches complete (unless `--skip-sync`):
|
||||
|
||||
### 5.1 Auto-Trigger ddd:sync
|
||||
|
||||
```
|
||||
Invoke /ddd:sync [-y] --task-id {session-id} --from-manifest {session}/execution-manifest.json "{plan.summary}"
|
||||
```
|
||||
|
||||
Note: `/ddd:sync` automatically creates a backup of `doc-index.json` before modifications.
|
||||
|
||||
When `--from-manifest` is provided, sync uses the **execution manifest** as its primary data source instead of git diff. This ensures:
|
||||
- Precise file-level and symbol-level change tracking (from TASK-*.result.json)
|
||||
- Task-to-file attribution (which task modified which file)
|
||||
- Convergence verification results carried forward
|
||||
- Survives process interruptions (manifest is persisted to disk)
|
||||
|
||||
Fallback: If manifest is unavailable (e.g., manual mode), sync falls back to git diff discovery.
|
||||
|
||||
### 5.2 Generate Action Log
|
||||
|
||||
Create action entry with:
|
||||
- All tasks executed and their results
|
||||
- Files modified across all batches
|
||||
- Features and requirements addressed
|
||||
|
||||
### 5.3 Update Feature Status
|
||||
|
||||
Based on execution results:
|
||||
- Requirements with verified convergence → update status
|
||||
- Features with all requirements met → `status: "implemented"`
|
||||
|
||||
---
|
||||
|
||||
## Step 6: Summary & Follow-up
|
||||
|
||||
### 6.1 Execution Results
|
||||
|
||||
```
|
||||
DDD Execute Complete
|
||||
|
||||
Tasks: {completed}/{total} ({failed} failed)
|
||||
Files modified: {count}
|
||||
Batches: {batch_count}
|
||||
|
||||
Doc-Index Changes:
|
||||
Features updated: {list}
|
||||
Components updated: {list}
|
||||
New components registered: {list}
|
||||
Requirements addressed: {list}
|
||||
|
||||
Convergence:
|
||||
{for each task: task.id — criteria met: X/Y}
|
||||
```
|
||||
|
||||
### 6.2 Follow-up Suggestions
|
||||
|
||||
Based on execution results, suggest:
|
||||
- **New issues**: If unexpected scope expansion was detected
|
||||
- **Additional tests**: If convergence criteria only partially met
|
||||
- **Documentation gaps**: If new components were created without docs
|
||||
- **Next tasks**: If plan had tasks marked as future/deferred
|
||||
|
||||
---
|
||||
|
||||
## Flags
|
||||
|
||||
| Flag | Effect |
|
||||
|------|--------|
|
||||
| `-y, --yes` | Auto-confirm, auto-sync |
|
||||
| `--skip-sync` | Skip post-completion ddd:sync (Step 5) |
|
||||
| `--skip-verify` | Skip per-batch impact verification (Step 4) AND post-execution verify gate (Step 4.5) |
|
||||
| `--plan <path>` | Explicit plan.json path |
|
||||
| `--in-memory` | Accept executionContext from ddd:plan handoff |
|
||||
|
||||
## Integration Points
|
||||
|
||||
- **Input from**: `/ddd:plan` output (plan.json + TASK-*.json), `doc-index.json`
|
||||
- **Output to**: Updated `doc-index.json` (via ddd:sync), `TASK-*.result.json` (per-task), `execution-manifest.json` (session-level)
|
||||
- **Schemas**: `plan-overview-ddd-schema.json` (input), `task-schema.json` + `task-ddd-extension-schema.json` (input), `doc-index.json` (enrichment)
|
||||
- **Delegates to**: `/ddd:sync` for post-completion synchronization
|
||||
266
.claude/commands/ddd/index-build.md
Normal file
266
.claude/commands/ddd/index-build.md
Normal file
@@ -0,0 +1,266 @@
|
||||
---
|
||||
name: index-build
|
||||
description: Build document index from spec-generator outputs + codebase mapping. Requires existing spec session. For projects without specs, use /ddd:scan instead.
|
||||
argument-hint: "[-y|--yes] [-s|--spec <spec-session-id>] [--from-scratch]"
|
||||
allowed-tools: TodoWrite(*), Agent(*), AskUserQuestion(*), Read(*), Grep(*), Glob(*), Bash(*), Edit(*), Write(*), mcp__ace-tool__search_context(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-confirm all decisions, use inferred mappings, skip interactive review.
|
||||
|
||||
# DDD Index Build Command (/ddd:index-build)
|
||||
|
||||
## Purpose
|
||||
|
||||
From **spec-generator outputs** (requirements, architecture, epics), construct the central document index and map spec entities to actual code locations.
|
||||
|
||||
```
|
||||
Spec outputs (REQ, ADR, EPIC) + Codebase → doc-index.json
|
||||
```
|
||||
|
||||
> **No spec?** Use `/ddd:scan` instead — it reverse-engineers the index from code alone.
|
||||
|
||||
## Prerequisite
|
||||
|
||||
- At least one spec session in `.workflow/.doc-index/specs/` or `.workflow/.spec/`
|
||||
- If no spec found → error with suggestion: "No spec session found. Run /ddd:scan for code-first indexing, or /spec-generator to create specs."
|
||||
|
||||
## Storage Location
|
||||
|
||||
```
|
||||
.workflow/.doc-index/
|
||||
├── doc-index.json ← Central index (primary output)
|
||||
├── specs/ ← Spec-generator outputs
|
||||
│ └── SPEC-{slug}-{date}/
|
||||
├── feature-maps/ ← Feature documentation (from Epics)
|
||||
│ ├── _index.md
|
||||
│ └── {feature-slug}.md
|
||||
├── tech-registry/ ← Technical component docs (from code mapping)
|
||||
│ ├── _index.md
|
||||
│ └── {component-slug}.md
|
||||
└── action-logs/ ← Change history (initially empty)
|
||||
└── _index.md
|
||||
```
|
||||
|
||||
## Phase 1: Discover & Parse Spec Sources
|
||||
|
||||
### 1.1 Locate Spec Session
|
||||
|
||||
```
|
||||
IF --spec <id> provided:
|
||||
Load from .workflow/.doc-index/specs/<id>/ OR .workflow/.spec/<id>/
|
||||
ELSE:
|
||||
Scan for all SPEC-* directories
|
||||
IF multiple → present list, ask user to select (-y picks latest)
|
||||
IF none → ERROR: "No spec session found. Use /ddd:scan or /spec-generator."
|
||||
```
|
||||
|
||||
### 1.2 Migrate Specs (if needed)
|
||||
|
||||
If spec in `.workflow/.spec/` but not in `.workflow/.doc-index/specs/`:
|
||||
- Copy to `.workflow/.doc-index/specs/`
|
||||
- Preserve original (backward compatibility)
|
||||
|
||||
### 1.3 Extract Structured Entities
|
||||
|
||||
| Source File | Extract To |
|
||||
|------------|------------|
|
||||
| `spec-config.json` | project name, domain, spec_type |
|
||||
| `glossary.json` | → index glossary[] |
|
||||
| `product-brief.md` | vision, goals |
|
||||
| `requirements/REQ-*.md` | → index requirements[] (with MoSCoW priority) |
|
||||
| `requirements/NFR-*.md` | → index requirements[] (non-functional) |
|
||||
| `architecture/ADR-*.md` | → index architectureDecisions[] |
|
||||
| `epics/EPIC-*.md` | → feature grouping seeds |
|
||||
|
||||
## Phase 2: Codebase Mapping
|
||||
|
||||
Map spec entities to actual code locations using Gemini:
|
||||
|
||||
```bash
|
||||
ccw cli -p "PURPOSE: Map codebase to specification entities for documentation indexing.
|
||||
TASK:
|
||||
• Scan the codebase and identify all major modules/components
|
||||
• For each component: extract file paths, exported symbols (classes, functions, types)
|
||||
• Match components to these specification entities by name/domain similarity:
|
||||
Requirements: {REQ-001: desc, REQ-002: desc, ...extracted from Phase 1}
|
||||
Architecture decisions: {ADR-001: title, ...extracted from Phase 1}
|
||||
• Report unmatched components (exist in code but no spec counterpart)
|
||||
• Report unmatched requirements (in spec but no code found)
|
||||
MODE: analysis
|
||||
CONTEXT: @**/*
|
||||
EXPECTED: JSON: { components: [{ name, type, files, symbols, matched_req_ids, matched_adr_id, is_orphan }], unmatched_reqs: [REQ-NNN] }
|
||||
CONSTRAINTS: Focus on source directories | Ignore node_modules, dist, build" --tool gemini --mode analysis
|
||||
```
|
||||
|
||||
### 2.1 Generate Component IDs & Link
|
||||
|
||||
For each discovered component:
|
||||
- ID: `tech-{kebab-case-name}`
|
||||
- Link to matched `REQ-NNN` and `ADR-NNN`
|
||||
- Flag orphans for user review
|
||||
|
||||
## Phase 3: Build Feature Map (from Epics)
|
||||
|
||||
### 3.1 Epic → Feature Mapping
|
||||
|
||||
```
|
||||
Each EPIC-NNN → one feat-{slug}
|
||||
- id: feat-{slug} (from epic slug)
|
||||
- name: from Epic name
|
||||
- epicId: EPIC-NNN
|
||||
- status: inferred from code mapping
|
||||
- all requirements have matched components → "implemented"
|
||||
- some matched → "in-progress"
|
||||
- none matched → "planned"
|
||||
- requirementIds: from Epic's stories → requirement links
|
||||
- tags: from domain keywords
|
||||
```
|
||||
|
||||
### 3.2 Generate Feature Map Documents
|
||||
|
||||
For each feature → `feature-maps/{slug}.md`:
|
||||
|
||||
```markdown
|
||||
---
|
||||
id: feat-{slug}
|
||||
name: Feature Name
|
||||
epic_id: EPIC-NNN
|
||||
status: implemented
|
||||
requirements: [REQ-001, REQ-002]
|
||||
components: [tech-auth-service, tech-user-model]
|
||||
tags: [auth, security]
|
||||
last_updated: ISO8601
|
||||
---
|
||||
|
||||
# Feature Name
|
||||
|
||||
## Overview
|
||||
{Description from epic}
|
||||
|
||||
## Requirements
|
||||
- **REQ-001**: {title} (Must) — {component mapping status}
|
||||
- **REQ-002**: {title} (Should) — {component mapping status}
|
||||
|
||||
## Technical Components
|
||||
- **AuthService** (`src/services/auth.ts`): {role}
|
||||
|
||||
## Architecture Decisions
|
||||
- **ADR-001**: {title}
|
||||
|
||||
## Change History
|
||||
| Date | Task | Description |
|
||||
|------|------|-------------|
|
||||
| {date} | Initial | Indexed from spec SPEC-{session} |
|
||||
```
|
||||
|
||||
### 3.3 Generate Tech Registry Documents
|
||||
|
||||
For each component → `tech-registry/{slug}.md`:
|
||||
|
||||
```markdown
|
||||
---
|
||||
id: tech-{slug}
|
||||
name: ComponentName
|
||||
type: service
|
||||
features: [feat-auth]
|
||||
code_locations:
|
||||
- path: src/services/auth.ts
|
||||
symbols: [AuthService, AuthService.login]
|
||||
last_updated: ISO8601
|
||||
---
|
||||
|
||||
# ComponentName
|
||||
|
||||
## Responsibility
|
||||
{From Gemini analysis}
|
||||
|
||||
## Code Locations
|
||||
- `src/services/auth.ts`: Main implementation
|
||||
|
||||
## Related Requirements
|
||||
- **REQ-001**: {title}
|
||||
|
||||
## Architecture Decisions
|
||||
- **ADR-001**: {title}
|
||||
```
|
||||
|
||||
## Phase 4: Assemble doc-index.json
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "1.0",
|
||||
"project": "{project-name}",
|
||||
"build_path": "spec-first",
|
||||
"spec_session": "SPEC-{slug}-{date}",
|
||||
"last_updated": "ISO8601",
|
||||
"glossary": [
|
||||
{ "id": "gloss-{slug}", "term": "Term", "definition": "...", "aliases": [], "category": "core|technical|business" }
|
||||
],
|
||||
"features": [
|
||||
{ "id": "feat-{slug}", "name": "...", "epicId": "EPIC-NNN", "status": "...", "docPath": "feature-maps/{slug}.md", "requirementIds": ["REQ-NNN"], "tags": [] }
|
||||
],
|
||||
"requirements": [
|
||||
{ "id": "REQ-NNN", "title": "...", "source": "spec", "priority": "Must|Should|Could|Won't", "sourcePath": "specs/SPEC-*/requirements/REQ-NNN-*.md", "techComponentIds": ["tech-{slug}"], "featureId": "feat-{slug}" }
|
||||
],
|
||||
"technicalComponents": [
|
||||
{ "id": "tech-{slug}", "name": "...", "type": "...", "responsibility": "...", "adrId": "ADR-NNN|null", "docPath": "tech-registry/{slug}.md", "codeLocations": [{ "path": "...", "symbols": [], "lineRange": [0,0] }], "dependsOn": [], "featureIds": ["feat-{slug}"], "actionIds": [] }
|
||||
],
|
||||
"architectureDecisions": [
|
||||
{ "id": "ADR-NNN", "title": "...", "source": "spec", "sourcePath": "specs/SPEC-*/architecture/ADR-NNN-*.md", "componentIds": ["tech-{slug}"] }
|
||||
],
|
||||
"actions": []
|
||||
}
|
||||
```
|
||||
|
||||
### Merge with Existing Code-First Index
|
||||
|
||||
If a code-first index exists (from prior `/ddd:scan`):
|
||||
- Replace `IREQ-NNN` with matching `REQ-NNN` where content overlaps
|
||||
- Keep `IREQ-NNN` without spec counterpart (mark `source: "legacy-inferred"`)
|
||||
- Replace `IADR-NNN` with `ADR-NNN` where applicable
|
||||
- Update `build_path` to `"spec-first"`
|
||||
- Preserve existing `tech-*` components (update links only)
|
||||
|
||||
## Phase 5: Generate Index Documents & Validation
|
||||
|
||||
### 5.1 Index Documents
|
||||
- `feature-maps/_index.md` — feature overview table
|
||||
- `tech-registry/_index.md` — component registry table
|
||||
- `action-logs/_index.md` — empty, populated by `/ddd:sync`
|
||||
|
||||
### 5.2 Coverage Report
|
||||
|
||||
```
|
||||
Index Build Report (spec-first)
|
||||
|
||||
Spec: {session-id}
|
||||
Features: {N} (from {N} Epics)
|
||||
Requirements: {N} (REQ: {n}, NFR: {n})
|
||||
Components: {N} ({orphan} orphans without spec match)
|
||||
ADRs: {N}
|
||||
|
||||
Mapping Coverage:
|
||||
Requirements → Components: {%} ({unmapped} unmapped)
|
||||
Components → Features: {%}
|
||||
Epics → Features: 100%
|
||||
|
||||
Gaps:
|
||||
- {N} requirements have no matching code component
|
||||
- {N} code components are not linked to any requirement
|
||||
```
|
||||
|
||||
## Flags
|
||||
|
||||
| Flag | Effect |
|
||||
|------|--------|
|
||||
| `-y, --yes` | Skip all interactive prompts |
|
||||
| `-s, --spec <id>` | Use specific spec session |
|
||||
| `--from-scratch` | Delete existing index and rebuild |
|
||||
|
||||
## Integration Points
|
||||
|
||||
- **Input from**: `spec-generator` outputs, codebase, existing `/ddd:scan` index
|
||||
- **Output to**: `ddd:plan`, `ddd:sync`, `ddd:update`
|
||||
- **Upgrades**: Can merge with prior code-first (`/ddd:scan`) index
|
||||
451
.claude/commands/ddd/plan.md
Normal file
451
.claude/commands/ddd/plan.md
Normal file
@@ -0,0 +1,451 @@
|
||||
---
|
||||
name: plan
|
||||
description: Document-driven planning pipeline — queries doc-index, explores codebase with doc-aware angles, clarifies ambiguities, and produces unified plan.json + TASK-*.json artifacts with doc_context traceability.
|
||||
argument-hint: "[-y|--yes] [--explore] [--skip-explore] [--skip-clarify] \"task description or feature keyword\""
|
||||
allowed-tools: TodoWrite(*), Agent(*), AskUserQuestion(*), Read(*), Grep(*), Glob(*), Bash(*), Write(*), mcp__ace-tool__search_context(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Skip clarification (Phase 3), auto-select ddd:execute (Phase 5), skip interactive refinement.
|
||||
|
||||
# DDD Plan Command (/ddd:plan)
|
||||
|
||||
## Purpose
|
||||
|
||||
Full planning pipeline for document-driven development. Unlike simple context lookup, this command:
|
||||
1. **Queries** the doc-index for instant context (features, requirements, components, ADRs)
|
||||
2. **Explores** the codebase with doc-index-informed angles (not generic presets)
|
||||
3. **Clarifies** ambiguities from exploration results and doc-index gaps
|
||||
4. **Plans** with unified schema output (plan.json + TASK-*.json with doc_context)
|
||||
5. **Hands off** to ddd:execute or other execution engines
|
||||
|
||||
### Key Differentiation from lite-plan
|
||||
- Phase 1 provides instant context from doc-index (no cold-start exploration)
|
||||
- Exploration angles are doc-index-informed (not generic preset selection)
|
||||
- Tasks carry doc_context for traceability (features → requirements → code)
|
||||
- Architecture decisions (ADRs) automatically surface as constraints
|
||||
|
||||
## Prerequisite
|
||||
|
||||
- `doc-index.json` must exist at `.workflow/.doc-index/doc-index.json`
|
||||
- If not found → suggest running `/ddd:index-build` or `/ddd:scan` first
|
||||
|
||||
## Session Folder
|
||||
|
||||
```
|
||||
.workflow/.doc-index/planning/{task-slug}-{YYYY-MM-DD}/
|
||||
├── exploration-{angle}.json # Per-angle exploration (Phase 2)
|
||||
├── explorations-manifest.json # Exploration index
|
||||
├── plan.json # Plan overview (Phase 4)
|
||||
├── planning-context.md # Legacy context package (Phase 0+1 combined)
|
||||
└── .task/
|
||||
├── TASK-001.json
|
||||
└── TASK-002.json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 0: Parse Task Intent (enhanced)
|
||||
|
||||
### 0.1 Extract Keywords
|
||||
|
||||
From the user's task description, extract:
|
||||
- **Domain keywords**: feature names, module names, business terms
|
||||
- **Technical keywords**: file paths, class names, function names
|
||||
- **Action type**: feature | bugfix | refactor | optimization | migration
|
||||
|
||||
### 0.2 Glossary Match
|
||||
|
||||
Cross-reference extracted keywords against `doc-index.json.glossary[]`:
|
||||
- Match terms and aliases
|
||||
- Expand user's vocabulary with canonical terms
|
||||
|
||||
### 0.3 Classify Complexity
|
||||
|
||||
Assess task complexity based on:
|
||||
- Number of features potentially affected (from keyword matching)
|
||||
- Whether new components are needed or existing ones modified
|
||||
- Cross-feature impact (single feature vs multiple)
|
||||
|
||||
| Signal | Complexity |
|
||||
|--------|-----------|
|
||||
| Single feature, existing components | Low |
|
||||
| 1-2 features, some new components | Medium |
|
||||
| 3+ features, new architecture needed | High |
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Doc-Index Query
|
||||
|
||||
### 1.1 Feature Search
|
||||
|
||||
```
|
||||
Search doc-index.json.features[] where:
|
||||
- name CONTAINS keyword (fuzzy)
|
||||
- tags INTERSECT keywords
|
||||
- requirementIds link to matching requirements
|
||||
→ Output: matched feature IDs + names
|
||||
```
|
||||
|
||||
### 1.2 Requirement Search
|
||||
|
||||
```
|
||||
Search doc-index.json.requirements[] where:
|
||||
- title CONTAINS keyword
|
||||
- id matches explicit REQ-NNN reference
|
||||
- featureId matches found features
|
||||
→ Output: matched requirement IDs + titles + priorities
|
||||
```
|
||||
|
||||
### 1.3 Component Search
|
||||
|
||||
```
|
||||
Search doc-index.json.technicalComponents[] where:
|
||||
- name CONTAINS keyword
|
||||
- codeLocations[].path CONTAINS file path keyword
|
||||
- codeLocations[].symbols CONTAINS symbol keyword
|
||||
- featureIds INTERSECT found features
|
||||
→ Output: matched component IDs + code locations
|
||||
```
|
||||
|
||||
### 1.4 ADR Search
|
||||
|
||||
```
|
||||
Search doc-index.json.architectureDecisions[] where:
|
||||
- componentIds INTERSECT found components
|
||||
→ Output: matched ADR IDs + titles
|
||||
```
|
||||
|
||||
### 1.5 Action History Search
|
||||
|
||||
```
|
||||
Search doc-index.json.actions[] where:
|
||||
- related to found features or components
|
||||
→ Output: recent actions with descriptions
|
||||
```
|
||||
|
||||
### 1.6 Build Impact Map
|
||||
|
||||
Assemble all found references into a structured impact map:
|
||||
|
||||
```json
|
||||
{
|
||||
"affected_features": ["feat-auth"],
|
||||
"affected_requirements": ["REQ-001", "REQ-002"],
|
||||
"affected_components": ["tech-auth-service", "tech-user-model"],
|
||||
"architecture_constraints": ["ADR-001"],
|
||||
"recent_actions": ["task-123"],
|
||||
"complexity": "Medium"
|
||||
}
|
||||
```
|
||||
|
||||
Save as `planning-context.md` (legacy format for backward compatibility).
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Doc-Index-Guided Exploration (NEW)
|
||||
|
||||
Use Phase 1 results to **SELECT exploration angles intelligently**:
|
||||
|
||||
### 2.1 Angle Selection Logic
|
||||
|
||||
| Phase 1 Signal | Add Exploration Angle |
|
||||
|----------------|----------------------|
|
||||
| feat-auth or security-related ADR affected | `security` |
|
||||
| Multiple features crossed (2+) | `integration-points` |
|
||||
| New component needed (no matching tech-*) | `architecture` |
|
||||
| Performance-related requirements | `performance` |
|
||||
| Default (always included) | `patterns` + `dependencies` |
|
||||
|
||||
Select 1-4 angles total. More angles for higher complexity.
|
||||
|
||||
### 2.2 Skip & Trigger Conditions
|
||||
|
||||
| Complexity | Default Behavior | Override |
|
||||
|-----------|-----------------|---------|
|
||||
| **Low** | Auto-skip Phase 2 | `--explore` forces exploration |
|
||||
| **Medium** | Ask user (unless `-y` → skip) | `--explore` forces, `--skip-explore` forces skip |
|
||||
| **High** | Always run | `--skip-explore` forces skip |
|
||||
|
||||
Skip Phase 2 entirely when:
|
||||
- Complexity is Low AND `--explore` not set
|
||||
- OR `--skip-explore` flag is set
|
||||
- OR `-y` flag AND complexity is Medium
|
||||
|
||||
### 2.3 Parallel Exploration
|
||||
|
||||
Launch 1-4 parallel `cli-explore-agent` runs:
|
||||
|
||||
```
|
||||
For each selected angle:
|
||||
Agent(subagent_type="cli-explore-agent", prompt="
|
||||
Explore codebase for: {user task description}
|
||||
Angle: {angle}
|
||||
|
||||
## Doc-Index Context (pre-loaded)
|
||||
Features affected: {feature names + IDs}
|
||||
Components: {component names + code locations}
|
||||
Requirements: {requirement titles}
|
||||
Architecture decisions: {ADR titles + decisions}
|
||||
|
||||
Focus exploration on {angle}-specific concerns.
|
||||
Output: explore-json-schema format.
|
||||
")
|
||||
```
|
||||
|
||||
Each agent receives doc-index context (feature-maps, tech-registry docs) to avoid cold-start.
|
||||
|
||||
### 2.4 Save Exploration Results
|
||||
|
||||
- Each exploration → `exploration-{angle}.json` (explore-json-schema)
|
||||
- Manifest → `explorations-manifest.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"explorations": [
|
||||
{ "angle": "patterns", "path": "exploration-patterns.json", "file_count": 12 },
|
||||
{ "angle": "security", "path": "exploration-security.json", "file_count": 8 }
|
||||
],
|
||||
"total_files_discovered": 18,
|
||||
"timestamp": "ISO8601"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: Clarification (NEW)
|
||||
|
||||
### 3.1 Aggregate Clarification Needs
|
||||
|
||||
Collect from three sources:
|
||||
1. **Exploration results**: `clarification_needs[]` from each exploration JSON
|
||||
2. **Doc-index gaps**: unmapped requirements, orphan components, missing feature coverage
|
||||
3. **Conflicting constraints**: contradictory architecture decisions, requirement priority conflicts
|
||||
|
||||
### 3.2 Deduplicate & Batch
|
||||
|
||||
- Merge duplicate/similar questions across exploration angles
|
||||
- Group into rounds (max 4 questions per AskUserQuestion call)
|
||||
- Prioritize: blocking questions first, nice-to-have last
|
||||
|
||||
### 3.3 Skip Conditions
|
||||
|
||||
Skip Phase 3 when:
|
||||
- `-y` flag is set
|
||||
- `--skip-clarify` flag is set
|
||||
- No clarification needs collected from any source
|
||||
- Complexity is Low AND Phase 2 was skipped (no exploration results to aggregate)
|
||||
|
||||
### 3.4 Execute Clarification
|
||||
|
||||
```
|
||||
AskUserQuestion(questions=[
|
||||
{
|
||||
question: "Which authentication strategy should the new endpoint use?",
|
||||
header: "Auth strategy",
|
||||
options: [
|
||||
{ label: "JWT Bearer (Recommended)", description: "Consistent with ADR-001 and existing auth middleware" },
|
||||
{ label: "API Key", description: "Simpler but inconsistent with current architecture" },
|
||||
{ label: "OAuth2", description: "Most flexible but higher implementation cost" }
|
||||
],
|
||||
multiSelect: false
|
||||
}
|
||||
])
|
||||
```
|
||||
|
||||
Feed answers back into Phase 4 as constraints.
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: Task Planning (NEW — produces plan.json + TASK-*.json)
|
||||
|
||||
### 4.1 Planning Strategy Selection
|
||||
|
||||
| Complexity | Strategy |
|
||||
|-----------|---------|
|
||||
| Low | Direct Claude planning (inline) |
|
||||
| Medium | cli-lite-planning-agent with doc-index context |
|
||||
| High | cli-lite-planning-agent with full exploration + doc-index context |
|
||||
|
||||
### 4.2 Planning Input Assembly
|
||||
|
||||
Combine:
|
||||
- User's original task description
|
||||
- Phase 1 impact map (features, requirements, components, ADRs)
|
||||
- Phase 2 exploration results (if executed)
|
||||
- Phase 3 clarification answers (if collected)
|
||||
- Relevant feature-map and tech-registry doc excerpts
|
||||
|
||||
### 4.3 Execute Planning
|
||||
|
||||
For **Low complexity** (direct):
|
||||
```
|
||||
Generate plan.json + TASK-*.json directly based on assembled context.
|
||||
```
|
||||
|
||||
For **Medium/High complexity**:
|
||||
```
|
||||
Agent(subagent_type="cli-lite-planning-agent", prompt="
|
||||
Task: {user task description}
|
||||
|
||||
## Doc-Index Impact Map
|
||||
{Phase 1 results}
|
||||
|
||||
## Exploration Context
|
||||
{Phase 2 results summary}
|
||||
|
||||
## Clarification Answers
|
||||
{Phase 3 answers}
|
||||
|
||||
## Architecture Constraints
|
||||
{ADR excerpts}
|
||||
|
||||
Generate plan following plan-overview-base-schema.
|
||||
Generate tasks following task-schema.
|
||||
Include doc_context in both plan.json and each TASK-*.json.
|
||||
")
|
||||
```
|
||||
|
||||
### 4.4 Output Schema: plan.json
|
||||
|
||||
Follows `plan-overview-base-schema` with ddd-specific `doc_context` extension:
|
||||
|
||||
```json
|
||||
{
|
||||
"summary": "...",
|
||||
"approach": "...",
|
||||
"task_ids": ["TASK-001", "TASK-002"],
|
||||
"task_count": 2,
|
||||
"complexity": "Medium",
|
||||
"doc_context": {
|
||||
"affected_features": ["feat-auth"],
|
||||
"affected_requirements": ["REQ-001", "REQ-002"],
|
||||
"affected_components": ["tech-auth-service"],
|
||||
"architecture_constraints": ["ADR-001"],
|
||||
"index_path": ".workflow/.doc-index/doc-index.json"
|
||||
},
|
||||
"_metadata": {
|
||||
"timestamp": "ISO8601",
|
||||
"source": "cli-lite-planning-agent",
|
||||
"plan_type": "feature",
|
||||
"schema_version": "2.0",
|
||||
"exploration_angles": ["patterns", "security"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 4.5 Output Schema: TASK-*.json
|
||||
|
||||
Follows `task-schema` with ddd-specific `doc_context` extension:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "TASK-001",
|
||||
"title": "Add rate limiting middleware",
|
||||
"description": "...",
|
||||
"depends_on": [],
|
||||
"convergence": {
|
||||
"criteria": ["Rate limiter middleware exists and is registered", "Tests pass"],
|
||||
"verification": "npm test -- --grep rate-limit",
|
||||
"definition_of_done": "API endpoints enforce rate limits per ADR-001 specifications"
|
||||
},
|
||||
"doc_context": {
|
||||
"feature_ids": ["feat-auth"],
|
||||
"requirement_ids": ["REQ-001"],
|
||||
"component_ids": ["tech-auth-service"],
|
||||
"adr_ids": ["ADR-001"],
|
||||
"feature_docs": ["feature-maps/auth.md"],
|
||||
"component_docs": ["tech-registry/auth-service.md"]
|
||||
},
|
||||
"files": [...],
|
||||
"implementation": [...]
|
||||
}
|
||||
```
|
||||
|
||||
### 4.6 Enrichment Rules
|
||||
|
||||
Each task is enriched with:
|
||||
- `feature_ids`, `requirement_ids`, `component_ids`, `adr_ids` — traced from Phase 1
|
||||
- Relevant feature-map and tech-registry doc paths
|
||||
- Requirement acceptance criteria as convergence criteria source
|
||||
- ADR decisions as implementation constraints
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Confirmation & Handoff Selection
|
||||
|
||||
### 5.1 Display Plan Summary
|
||||
|
||||
Show:
|
||||
- Plan overview (summary, approach, complexity)
|
||||
- Task list with dependencies
|
||||
- Doc-index impact: which features/requirements/components will be affected
|
||||
- Estimated scope
|
||||
|
||||
### 5.2 Handoff Options
|
||||
|
||||
| Option | Description | When |
|
||||
|--------|-------------|------|
|
||||
| **ddd:execute** | Document-aware execution (recommended) | Default for ddd workflow |
|
||||
| **lite-execute** | Standard execution (no doc awareness) | When doc traceability not needed |
|
||||
| **direct** | Output context, manual work | User prefers manual coding |
|
||||
| **stop** | Planning only, no execution | Research/analysis tasks |
|
||||
|
||||
### 5.3 Auto-Selection
|
||||
|
||||
With `-y`: auto-select `ddd:execute`.
|
||||
|
||||
Without `-y`: present options via AskUserQuestion.
|
||||
|
||||
---
|
||||
|
||||
## Phase 6: Handoff
|
||||
|
||||
### 6.1 Build Execution Context
|
||||
|
||||
Build `executionContext` compatible with lite-execute format:
|
||||
|
||||
```json
|
||||
{
|
||||
"plan_path": ".workflow/.doc-index/planning/{slug}/plan.json",
|
||||
"task_dir": ".workflow/.doc-index/planning/{slug}/.task/",
|
||||
"doc_index_path": ".workflow/.doc-index/doc-index.json",
|
||||
"exploration_manifest": ".workflow/.doc-index/planning/{slug}/explorations-manifest.json",
|
||||
"original_input": "user's task description"
|
||||
}
|
||||
```
|
||||
|
||||
### 6.2 Invoke Selected Engine
|
||||
|
||||
| Selection | Action |
|
||||
|-----------|--------|
|
||||
| `ddd:execute` | Invoke `/ddd:execute --in-memory` with executionContext |
|
||||
| `lite-execute` | Invoke `/workflow:lite-execute` with plan.json path |
|
||||
| `direct` | Display context package + file list for manual work |
|
||||
| `stop` | Output plan summary, end here |
|
||||
|
||||
---
|
||||
|
||||
## Flags
|
||||
|
||||
| Flag | Effect |
|
||||
|------|--------|
|
||||
| `-y, --yes` | Skip clarification, auto-select ddd:execute |
|
||||
| `--explore` | Force Phase 2 exploration even for Low complexity |
|
||||
| `--skip-explore` | Skip Phase 2 (doc-index-guided exploration) |
|
||||
| `--skip-clarify` | Skip Phase 3 (clarification) only |
|
||||
|
||||
## Output
|
||||
|
||||
- **Primary**: plan.json + TASK-*.json in session folder
|
||||
- **Secondary**: planning-context.md (legacy format)
|
||||
- **Exploration**: exploration-{angle}.json files (if Phase 2 ran)
|
||||
- **Console**: Plan summary with doc-index impact
|
||||
|
||||
## Integration Points
|
||||
|
||||
- **Input from**: `doc-index.json` (built by `/ddd:index-build` or `/ddd:scan`)
|
||||
- **Output to**: `/ddd:execute`, `/workflow:lite-execute`, `/ddd:sync` post-task
|
||||
- **Schemas**: `plan-overview-ddd-schema.json` (plan output), `task-schema.json` + `task-ddd-extension-schema.json` (task output), `explore-json-schema.json`
|
||||
- **Triggers**: Before any development task in ddd workflow
|
||||
340
.claude/commands/ddd/scan.md
Normal file
340
.claude/commands/ddd/scan.md
Normal file
@@ -0,0 +1,340 @@
|
||||
---
|
||||
name: scan
|
||||
description: Scan existing codebase to build document index without specs. Analyzes code structure, infers features, discovers components, and reverse-engineers project knowledge graph.
|
||||
argument-hint: "[-y|--yes] [--from-scratch] [--scope <dir>] \"optional project description\""
|
||||
allowed-tools: TodoWrite(*), Agent(*), AskUserQuestion(*), Read(*), Grep(*), Glob(*), Bash(*), Edit(*), Write(*), mcp__ace-tool__search_context(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-confirm feature groupings, component naming, skip interactive review.
|
||||
|
||||
# DDD Scan Command (/ddd:scan)
|
||||
|
||||
## Purpose
|
||||
|
||||
For **existing projects without specifications**: analyze codebase to construct the document index by reverse-engineering project structure. This is the code-first entry point — no spec-generator required.
|
||||
|
||||
```
|
||||
Codebase → Components → Features (inferred) → Requirements (inferred) → doc-index.json
|
||||
```
|
||||
|
||||
## When to Use
|
||||
|
||||
- Existing project, no spec-generator outputs
|
||||
- Want to start using doc-driven workflow on a legacy codebase
|
||||
- Quick project mapping for onboarding or audit
|
||||
|
||||
## Prerequisite
|
||||
|
||||
- A codebase must exist (src/, lib/, app/, or similar source directories)
|
||||
- Git repository recommended (for action history seeding)
|
||||
|
||||
## Storage Location
|
||||
|
||||
```
|
||||
.workflow/.doc-index/
|
||||
├── doc-index.json ← Central index (primary output)
|
||||
├── feature-maps/ ← Inferred feature documentation
|
||||
│ ├── _index.md
|
||||
│ └── {feature-slug}.md
|
||||
├── tech-registry/ ← Discovered component documentation
|
||||
│ ├── _index.md
|
||||
│ └── {component-slug}.md
|
||||
└── action-logs/ ← Git history seeds
|
||||
├── _index.md
|
||||
└── {act-hash}.md
|
||||
```
|
||||
|
||||
## Phase 1: Project Structure Analysis
|
||||
|
||||
### 1.1 Framework & Stack Detection
|
||||
|
||||
```bash
|
||||
ccw cli -p "PURPOSE: Analyze project structure, tech stack, and architecture for documentation indexing.
|
||||
TASK:
|
||||
• Detect language/framework from manifest files (package.json, go.mod, Cargo.toml, requirements.txt, etc.)
|
||||
• Map directory structure: source dirs, test dirs, config dirs, entry points
|
||||
• Identify architectural pattern: monolith, microservices, monorepo, library, CLI tool
|
||||
• Detect key dependencies and their roles (ORM, HTTP framework, auth library, etc.)
|
||||
• List all major source directories with brief purpose description
|
||||
MODE: analysis
|
||||
CONTEXT: @**/*
|
||||
EXPECTED: JSON with: {
|
||||
project_name, language, framework, architecture_pattern,
|
||||
source_dirs: [{ path, purpose, file_count }],
|
||||
dependencies: [{ name, role }],
|
||||
entry_points: [{ path, description }]
|
||||
}
|
||||
CONSTRAINTS: Prioritize source directories | Ignore node_modules, dist, build, vendor" --tool gemini --mode analysis
|
||||
```
|
||||
|
||||
### 1.2 Merge with project-tech.json
|
||||
|
||||
If `.workflow/project-tech.json` exists, merge to reduce redundant analysis.
|
||||
|
||||
## Phase 2: Component Discovery
|
||||
|
||||
### 2.1 Deep Module Scan
|
||||
|
||||
```bash
|
||||
ccw cli -p "PURPOSE: Discover all significant code components/modules for documentation indexing.
|
||||
TASK:
|
||||
• For each source directory, identify distinct modules/components
|
||||
• For each component extract:
|
||||
- Name (class name, module name, or logical group)
|
||||
- Type: service | controller | model | util | hook | route | config | middleware | component
|
||||
- File paths (primary file + related files)
|
||||
- Exported symbols (public API: classes, functions, types, constants)
|
||||
- Internal dependencies: what other modules it imports from within the project
|
||||
- Responsibility: one-line description of what it does
|
||||
• Group small utility files under parent module when they share domain
|
||||
MODE: analysis
|
||||
CONTEXT: @{source_dirs from Phase 1}
|
||||
EXPECTED: JSON array: [{ name, type, files, symbols, depends_on, responsibility }]
|
||||
CONSTRAINTS: Focus on business logic | Min threshold: components with 2+ exports or clear domain purpose | Group utilities under parent domain" --tool gemini --mode analysis
|
||||
```
|
||||
|
||||
### 2.2 Generate Component IDs
|
||||
|
||||
For each discovered component:
|
||||
- ID: `tech-{kebab-case-name}` (e.g., `tech-auth-service`, `tech-user-model`)
|
||||
- Validate uniqueness, append counter on collision
|
||||
|
||||
### 2.3 Build Dependency Graph
|
||||
|
||||
From `depends_on` fields, construct internal dependency edges:
|
||||
```
|
||||
tech-auth-service → tech-user-model
|
||||
tech-auth-service → tech-jwt-util
|
||||
tech-order-controller → tech-auth-service
|
||||
```
|
||||
|
||||
## Phase 3: Feature Inference
|
||||
|
||||
**Key step: group components into logical features without formal specs.**
|
||||
|
||||
### 3.1 Inference Strategy (priority order)
|
||||
|
||||
```
|
||||
Strategy 1 — Directory grouping:
|
||||
src/auth/** → feat-auth
|
||||
src/orders/** → feat-orders
|
||||
src/payments/** → feat-payments
|
||||
|
||||
Strategy 2 — Route/endpoint grouping (web apps):
|
||||
/api/users/* → feat-user-management
|
||||
/api/orders/* → feat-order-management
|
||||
|
||||
Strategy 3 — Dependency clustering:
|
||||
Components that heavily import each other → same feature
|
||||
|
||||
Strategy 4 — Domain keyword extraction:
|
||||
Class names + file names → domain terms → feature names
|
||||
```
|
||||
|
||||
### 3.2 Gemini Feature Synthesis
|
||||
|
||||
```bash
|
||||
ccw cli -p "PURPOSE: Infer high-level features from discovered code components. This project has no formal specification.
|
||||
TASK:
|
||||
Given these discovered components:
|
||||
{component list from Phase 2: names, types, files, responsibilities, dependencies}
|
||||
|
||||
• Group them into logical features (3-10 features for a typical project)
|
||||
• For each feature:
|
||||
- name: human-readable (Chinese OK)
|
||||
- component_ids: which components belong
|
||||
- description: what the feature does (inferred from code)
|
||||
- inferred_requirements: what this feature needs to accomplish (1-3 per feature)
|
||||
- status: 'implemented' (code complete) or 'partial' (incomplete patterns)
|
||||
- tags: search keywords
|
||||
• Identify cross-cutting concerns (logging, auth middleware, error handling) as separate features
|
||||
MODE: analysis
|
||||
CONTEXT: {component list JSON}
|
||||
EXPECTED: JSON: { features: [{ name, description, component_ids, inferred_requirements: [{ id, title }], status, tags }] }
|
||||
CONSTRAINTS: Every component must belong to at least 1 feature | Prefer fewer broad features over many narrow ones" --tool gemini --mode analysis
|
||||
```
|
||||
|
||||
### 3.3 Interactive Feature Review (unless -y)
|
||||
|
||||
Present inferred features to user:
|
||||
- Allow renaming, merging, splitting
|
||||
- Allow reassigning components between features
|
||||
- Confirm final feature list
|
||||
|
||||
## Phase 4: Implicit Requirement & Architecture Extraction
|
||||
|
||||
### 4.1 Inferred Requirements
|
||||
|
||||
For each feature, generate lightweight requirement entries from its components:
|
||||
|
||||
```
|
||||
Feature: feat-auth (User Authentication)
|
||||
→ IREQ-001: "Users can log in with email and password" (from LoginController)
|
||||
→ IREQ-002: "JWT tokens for session management" (from AuthMiddleware + jwt dep)
|
||||
→ IREQ-003: "Password reset via email" (from PasswordResetService)
|
||||
```
|
||||
|
||||
**ID Convention**: `IREQ-NNN` — distinguishes inferred from formal `REQ-NNN`.
|
||||
|
||||
### 4.2 Inferred Architecture Decisions
|
||||
|
||||
Detect patterns from code + dependencies:
|
||||
|
||||
```
|
||||
Express.js + JWT middleware → IADR-001: "REST API with JWT authentication"
|
||||
Prisma ORM + PostgreSQL → IADR-002: "PostgreSQL via Prisma ORM"
|
||||
React + Redux → IADR-003: "React frontend with Redux state"
|
||||
```
|
||||
|
||||
**ID Convention**: `IADR-NNN` — distinguishes inferred from formal `ADR-NNN`.
|
||||
|
||||
### 4.3 Glossary Generation
|
||||
|
||||
Extract domain terms from:
|
||||
- Class/function names (CamelCase → terms)
|
||||
- Key business terms in comments and strings
|
||||
- Framework-specific terminology
|
||||
|
||||
Write to `.workflow/.doc-index/glossary.json`.
|
||||
|
||||
## Phase 5: Git History Seeds
|
||||
|
||||
```bash
|
||||
git log --oneline --since="3 months ago" --no-merges --format="%H|%s|%ai" | head -30
|
||||
```
|
||||
|
||||
For each significant commit:
|
||||
- Match changed files to discovered components
|
||||
- Create action entry with `type: "historical"`
|
||||
|
||||
## Phase 6: Assemble doc-index.json
|
||||
|
||||
Write the index with code-first markers:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "1.0",
|
||||
"project": "{project-name}",
|
||||
"build_path": "code-first",
|
||||
"spec_session": null,
|
||||
"last_updated": "ISO8601",
|
||||
"glossary": [...],
|
||||
"features": [{
|
||||
"id": "feat-{slug}",
|
||||
"name": "Feature Name",
|
||||
"epicId": null,
|
||||
"status": "implemented|partial",
|
||||
"docPath": "feature-maps/{slug}.md",
|
||||
"requirementIds": ["IREQ-NNN"],
|
||||
"tags": ["tag"]
|
||||
}],
|
||||
"requirements": [{
|
||||
"id": "IREQ-NNN",
|
||||
"title": "Inferred requirement",
|
||||
"source": "inferred",
|
||||
"priority": "inferred",
|
||||
"sourcePath": null,
|
||||
"techComponentIds": ["tech-{slug}"],
|
||||
"featureId": "feat-{slug}"
|
||||
}],
|
||||
"technicalComponents": [{
|
||||
"id": "tech-{slug}",
|
||||
"name": "ComponentName",
|
||||
"type": "service|controller|model|...",
|
||||
"responsibility": "One-line description",
|
||||
"adrId": "IADR-NNN|null",
|
||||
"docPath": "tech-registry/{slug}.md",
|
||||
"codeLocations": [{ "path": "src/...", "symbols": [...] }],
|
||||
"dependsOn": ["tech-{other}"],
|
||||
"featureIds": ["feat-{slug}"],
|
||||
"actionIds": []
|
||||
}],
|
||||
"architectureDecisions": [{
|
||||
"id": "IADR-NNN",
|
||||
"title": "Inferred decision",
|
||||
"source": "inferred",
|
||||
"sourcePath": null,
|
||||
"componentIds": ["tech-{slug}"]
|
||||
}],
|
||||
"actions": [{
|
||||
"id": "act-{short-hash}",
|
||||
"description": "Commit message",
|
||||
"type": "historical",
|
||||
"status": "historical",
|
||||
"affectedComponents": ["tech-{slug}"],
|
||||
"relatedCommit": "full-hash",
|
||||
"timestamp": "ISO8601"
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
## Phase 7: Generate Documents
|
||||
|
||||
### 7.1 Feature Maps
|
||||
|
||||
For each feature → `feature-maps/{slug}.md`:
|
||||
- Frontmatter with id, name, status, inferred requirements, components, tags
|
||||
- Sections: Overview, Inferred Requirements, Technical Components, Dependencies, Change History
|
||||
- Mark inferred content: `> Inferred from code analysis`
|
||||
|
||||
### 7.2 Tech Registry
|
||||
|
||||
For each component → `tech-registry/{slug}.md`:
|
||||
- Frontmatter with id, name, type, code_locations, depends_on
|
||||
- Sections: Responsibility, Code Locations, Related Features, Dependencies (in/out)
|
||||
|
||||
### 7.3 Index Documents
|
||||
|
||||
- `feature-maps/_index.md` — feature table
|
||||
- `tech-registry/_index.md` — component table
|
||||
- `action-logs/_index.md` — recent git history table
|
||||
|
||||
## Phase 8: Validation & Report
|
||||
|
||||
```
|
||||
Scan Report
|
||||
|
||||
Project: {name} ({language}/{framework})
|
||||
Architecture: {pattern}
|
||||
Source dirs: {N}
|
||||
|
||||
Discovered:
|
||||
Components: {N} ({by type breakdown})
|
||||
Features: {N} (inferred)
|
||||
Requirements: {N} (IREQ, inferred)
|
||||
Architecture Decisions: {N} (IADR, inferred)
|
||||
Historical Actions: {N} (from git)
|
||||
|
||||
Coverage:
|
||||
Components → Features: {%}
|
||||
Dependencies mapped: {%}
|
||||
|
||||
Recommendations:
|
||||
- Run /spec-generator to formalize {N} inferred requirements
|
||||
- {N} components have unclear responsibility — review tech-registry docs
|
||||
- Use /ddd:plan to start planning tasks with this index
|
||||
```
|
||||
|
||||
## Flags
|
||||
|
||||
| Flag | Effect |
|
||||
|------|--------|
|
||||
| `-y, --yes` | Auto-confirm all decisions |
|
||||
| `--from-scratch` | Delete existing index and rebuild |
|
||||
| `--scope <dir>` | Limit scan to specific directory (e.g., `--scope src/auth`) |
|
||||
|
||||
## Upgrade Path: scan → spec
|
||||
|
||||
When a scanned project later runs `spec-generator` + `/ddd:index-build`:
|
||||
- `/ddd:index-build` detects existing code-first index
|
||||
- Merges: `IREQ-NNN` → `REQ-NNN`, `IADR-NNN` → `ADR-NNN` where content overlaps
|
||||
- Updates `build_path` to `"spec-first"`
|
||||
- Preserves all `tech-*` and `feat-*` entries (updates links only)
|
||||
|
||||
## Integration Points
|
||||
|
||||
- **Input from**: Codebase, git history, `project-tech.json`
|
||||
- **Output to**: `ddd:plan`, `ddd:sync`, `ddd:update`, `ddd:index-build` (upgrade)
|
||||
- **Standalone**: Can be used independently on any project
|
||||
265
.claude/commands/ddd/sync.md
Normal file
265
.claude/commands/ddd/sync.md
Normal file
@@ -0,0 +1,265 @@
|
||||
---
|
||||
name: sync
|
||||
description: Post-task synchronization - update document index, generate action log, and refresh feature/component docs after completing a development task.
|
||||
argument-hint: "[-y|--yes] [--dry-run] [--from-manifest <path>] [--task-id <id>] [--commit <hash>] \"task summary\""
|
||||
allowed-tools: TodoWrite(*), Agent(*), AskUserQuestion(*), Read(*), Grep(*), Glob(*), Bash(*), Edit(*), Write(*), mcp__ace-tool__search_context(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-detect changes, auto-update all docs, skip review prompts.
|
||||
|
||||
# DDD Sync Command (/ddd:sync)
|
||||
|
||||
## Purpose
|
||||
|
||||
After completing a development task, synchronize the document index with actual code changes:
|
||||
1. **Analyze** what changed (git diff)
|
||||
2. **Trace** which features/requirements/components are affected
|
||||
3. **Update** index entries (status, code locations, links)
|
||||
4. **Generate** action log entry
|
||||
5. **Refresh** feature-map and tech-registry documents
|
||||
|
||||
## When to Use: sync vs update
|
||||
|
||||
| Scenario | Use |
|
||||
|----------|-----|
|
||||
| Task completed, ready to commit | **ddd:sync** — full post-task reconciliation |
|
||||
| Mid-development, quick impact check | ddd:update |
|
||||
| Pre-commit validation | ddd:update --check-only |
|
||||
| Auto-triggered after ddd:execute | **ddd:sync** (automatic) |
|
||||
| Periodic index refresh during refactoring | ddd:update |
|
||||
|
||||
**Rule of thumb**: `sync` = task boundary (done something), `update` = development pulse (doing something).
|
||||
|
||||
## Prerequisite
|
||||
|
||||
- `doc-index.json` must exist
|
||||
- Git repository with committed or staged changes
|
||||
|
||||
## Phase 1: Change Detection
|
||||
|
||||
### 1.0 Data Source Selection
|
||||
|
||||
```
|
||||
IF --from-manifest <path>:
|
||||
Load execution-manifest.json
|
||||
→ files_modified[] provides precise file list + action type + task attribution
|
||||
→ TASK-*.result.json provides symbol-level changes + convergence results
|
||||
→ Skip Phase 1.1/1.2 (already classified by execute)
|
||||
→ Proceed directly to Phase 2 with manifest data
|
||||
ELSE:
|
||||
→ Fall through to Phase 1.1 (git-based discovery)
|
||||
```
|
||||
|
||||
**`--from-manifest` advantages** (used automatically by ddd:execute):
|
||||
- Precise file → task attribution (which task modified which file)
|
||||
- Symbol-level change tracking (not just file-level)
|
||||
- Convergence verification results carried forward to action-log
|
||||
- Survives process interruptions (manifest is persisted to disk)
|
||||
|
||||
### 1.1 Identify Changes (git-based fallback)
|
||||
|
||||
```bash
|
||||
# If --commit provided:
|
||||
git diff --name-only {commit}^..{commit}
|
||||
git diff --stat {commit}^..{commit}
|
||||
|
||||
# If --task-id provided, find related commits:
|
||||
git log --oneline --grep="task-{id}" | head -10
|
||||
|
||||
# Otherwise: changes since last ddd:sync
|
||||
git diff --name-only HEAD~1..HEAD
|
||||
```
|
||||
|
||||
### 1.2 Classify Changes (git-based fallback)
|
||||
|
||||
For each changed file, determine:
|
||||
- **Type**: added | modified | deleted | renamed
|
||||
- **Category**: source | test | config | docs | other
|
||||
- **Symbols affected**: parse diff for changed functions/classes (use Gemini if complex)
|
||||
|
||||
## Phase 2: Impact Tracing
|
||||
|
||||
### 2.1 Match to Index
|
||||
|
||||
For each changed file path:
|
||||
|
||||
```
|
||||
Search doc-index.json.technicalComponents[].codeLocations[].path
|
||||
→ Find matching component IDs
|
||||
→ From components, find linked featureIds
|
||||
→ From features, find linked requirementIds
|
||||
```
|
||||
|
||||
### 2.2 Discover New Components
|
||||
|
||||
If changed files don't match any existing component:
|
||||
- Flag as potential new component
|
||||
- Ask user if it should be registered (or auto-register with `-y`)
|
||||
|
||||
### 2.3 Build Impact Report
|
||||
|
||||
```markdown
|
||||
## Impact Summary
|
||||
|
||||
### Changed Files (5)
|
||||
- src/services/auth.ts (modified) → tech-auth-service → feat-auth
|
||||
- src/models/user.ts (modified) → tech-user-model → feat-auth
|
||||
- src/routes/login.ts (added) → NEW COMPONENT → feat-auth
|
||||
- src/tests/auth.test.ts (modified) → [test file, skip]
|
||||
- package.json (modified) → [config, skip]
|
||||
|
||||
### Affected Features
|
||||
- feat-auth: User Authentication (2 components modified, 1 new)
|
||||
|
||||
### Affected Requirements
|
||||
- REQ-001: Email login (implementation updated)
|
||||
- REQ-002: JWT token generation (implementation updated)
|
||||
```
|
||||
|
||||
## Phase 3: Update Index
|
||||
|
||||
### 3.0 Dry-Run Gate
|
||||
|
||||
If `--dry-run` is set:
|
||||
- Execute Phase 3 analysis (determine what would change)
|
||||
- Display planned modifications as a preview report
|
||||
- Skip all file writes (Phase 3.1-3.5 and Phase 4)
|
||||
- Output: "Dry-run complete. Run without --dry-run to apply changes."
|
||||
|
||||
### 3.0.1 Backup Index
|
||||
|
||||
Before any modifications, create backup:
|
||||
- Copy `doc-index.json` → `doc-index.json.bak`
|
||||
- On failure: restore from `.bak` and report error
|
||||
- On success: remove `.bak`
|
||||
|
||||
### 3.1 Update Technical Components
|
||||
|
||||
For each affected component in `doc-index.json`:
|
||||
- Update `codeLocations` if file paths or line ranges changed
|
||||
- Update `symbols` if new exports were added
|
||||
- Add new `actionIds` entry
|
||||
|
||||
### 3.2 Register New Components
|
||||
|
||||
For newly discovered components:
|
||||
- Generate `tech-{slug}` ID
|
||||
- Create entry in `technicalComponents[]`
|
||||
- Link to appropriate features
|
||||
- Generate new `tech-registry/{slug}.md` document
|
||||
|
||||
### 3.3 Update Feature Status
|
||||
|
||||
For each affected feature:
|
||||
- If all requirements now have mapped components → `status: "implemented"`
|
||||
- If some requirements still unmapped → `status: "in-progress"`
|
||||
|
||||
### 3.4 Add Action Entry
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "task-{id}",
|
||||
"description": "{task summary from user}",
|
||||
"type": "feature|bugfix|refactor",
|
||||
"status": "completed",
|
||||
"affectedFeatures": ["feat-auth"],
|
||||
"affectedComponents": ["tech-auth-service", "tech-user-model"],
|
||||
"changedFiles": [
|
||||
{ "path": "src/services/auth.ts", "action": "modified", "task_id": "TASK-001" },
|
||||
{ "path": "src/models/user.ts", "action": "modified", "task_id": "TASK-001" }
|
||||
],
|
||||
"symbolsChanged": ["AuthService.validate", "UserModel.toJSON"],
|
||||
"convergenceResults": {
|
||||
"passed": 2,
|
||||
"total": 2,
|
||||
"details": ["Rate limiter middleware exists", "Config accepts per-route limits"]
|
||||
},
|
||||
"verifyGate": "PASS|WARN|FAIL|skipped",
|
||||
"relatedCommit": "{commit hash}",
|
||||
"manifestPath": "{execution-manifest.json path | null}",
|
||||
"timestamp": "ISO8601"
|
||||
}
|
||||
```
|
||||
|
||||
### 3.5 Update Timestamp
|
||||
|
||||
Set `doc-index.json.last_updated` to current time.
|
||||
|
||||
## Phase 4: Refresh Documents
|
||||
|
||||
### 4.1 Update Feature Maps
|
||||
|
||||
For each affected feature's `feature-maps/{slug}.md`:
|
||||
- Update "Change History" table with new action entry
|
||||
- Update component list if new components were added
|
||||
- Update status if changed
|
||||
|
||||
### 4.2 Update Tech Registry
|
||||
|
||||
For each affected component's `tech-registry/{slug}.md`:
|
||||
- Update code locations
|
||||
- Update symbol list
|
||||
- Add action to change history
|
||||
|
||||
### 4.3 Update Action Log
|
||||
|
||||
Create `.workflow/.doc-index/action-logs/{task-id}.md`:
|
||||
|
||||
```markdown
|
||||
---
|
||||
id: task-{id}
|
||||
type: feature|bugfix|refactor
|
||||
status: completed
|
||||
features: [feat-auth]
|
||||
components: [tech-auth-service, tech-user-model]
|
||||
commit: {hash}
|
||||
timestamp: ISO8601
|
||||
---
|
||||
|
||||
# Task: {summary}
|
||||
|
||||
## Changes
|
||||
| File | Type | Component |
|
||||
|------|------|-----------|
|
||||
| src/services/auth.ts | modified | tech-auth-service |
|
||||
|
||||
## Impact
|
||||
- Features affected: feat-auth
|
||||
- Requirements addressed: REQ-001, REQ-002
|
||||
|
||||
## Notes
|
||||
{any user-provided notes}
|
||||
```
|
||||
|
||||
### 4.4 Update Index Documents
|
||||
|
||||
- Refresh `feature-maps/_index.md` table
|
||||
- Refresh `tech-registry/_index.md` table
|
||||
- Append to `action-logs/_index.md` table
|
||||
|
||||
## Phase 5: Confirmation (unless -y)
|
||||
|
||||
Present update summary to user:
|
||||
- Files updated in doc-index
|
||||
- New documents created
|
||||
- Status changes
|
||||
- Ask for confirmation before writing
|
||||
|
||||
## Flags
|
||||
|
||||
| Flag | Effect |
|
||||
|------|--------|
|
||||
| `-y, --yes` | Auto-confirm all updates |
|
||||
| `--dry-run` | Preview all changes without modifying any files |
|
||||
| `--from-manifest <path>` | Use execution-manifest.json as data source (auto-set by ddd:execute) |
|
||||
| `--task-id <id>` | Associate with specific task ID |
|
||||
| `--commit <hash>` | Analyze specific commit |
|
||||
|
||||
## Integration Points
|
||||
|
||||
- **Input from**: `execution-manifest.json` (preferred, from ddd:execute) OR Git history (fallback), `doc-index.json`, `/ddd:plan` output
|
||||
- **Output to**: Updated `doc-index.json`, feature-maps/, tech-registry/, action-logs/
|
||||
- **Triggers**: After completing any development task
|
||||
- **Data source priority**: `--from-manifest` > `--commit` > `--task-id` > git diff HEAD~1
|
||||
154
.claude/commands/ddd/update.md
Normal file
154
.claude/commands/ddd/update.md
Normal file
@@ -0,0 +1,154 @@
|
||||
---
|
||||
name: update
|
||||
description: Incremental index update - detect code changes and trace impact to related features/requirements. Lightweight alternative to full sync.
|
||||
argument-hint: "[-y|--yes] [--files <file1,file2,...>] [--staged] [--check-only]"
|
||||
allowed-tools: TodoWrite(*), AskUserQuestion(*), Read(*), Grep(*), Glob(*), Bash(*), Edit(*), Write(*), mcp__ace-tool__search_context(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-update index without confirmation prompts.
|
||||
|
||||
# DDD Update Command (/ddd:update)
|
||||
|
||||
## Purpose
|
||||
|
||||
Lightweight incremental update: given a set of changed files, trace their impact through the document index and update affected entries. Unlike `/ddd:sync` (full post-task sync), this command focuses on keeping the index fresh during development.
|
||||
|
||||
## When to Use: update vs sync
|
||||
|
||||
| Scenario | Use |
|
||||
|----------|-----|
|
||||
| Quick impact check during development | **ddd:update** |
|
||||
| Preview what sync would change | **ddd:update --check-only** |
|
||||
| Task completed, full reconciliation | ddd:sync |
|
||||
| Register new components + update all docs | ddd:sync |
|
||||
|
||||
**Rule of thumb**: `update` = lightweight pulse (during work), `sync` = full checkpoint (after work).
|
||||
|
||||
## Use Cases
|
||||
|
||||
1. **During development**: Quick check which docs are affected by current changes
|
||||
2. **Pre-commit check**: Ensure index is up-to-date before committing
|
||||
3. **Periodic refresh**: Update stale code locations after refactoring
|
||||
|
||||
## Prerequisite
|
||||
|
||||
- `doc-index.json` must exist at `.workflow/.doc-index/doc-index.json`
|
||||
|
||||
## Phase 1: Identify Changed Files
|
||||
|
||||
### Source Priority
|
||||
|
||||
```
|
||||
1. --files <list> → Explicit file list
|
||||
2. --staged → git diff --cached --name-only
|
||||
3. (default) → git diff --name-only (unstaged changes)
|
||||
```
|
||||
|
||||
### Output
|
||||
|
||||
List of changed file paths with change type (added/modified/deleted/renamed).
|
||||
|
||||
## Phase 2: Trace Impact
|
||||
|
||||
### 2.1 Forward Lookup (Code → Components → Features)
|
||||
|
||||
For each changed file:
|
||||
|
||||
```
|
||||
doc-index.json.technicalComponents[]
|
||||
.codeLocations[].path MATCH changed_file
|
||||
→ component_ids[]
|
||||
|
||||
doc-index.json.technicalComponents[component_ids]
|
||||
.featureIds[]
|
||||
→ feature_ids[]
|
||||
|
||||
doc-index.json.features[feature_ids]
|
||||
.requirementIds[]
|
||||
→ requirement_ids[]
|
||||
```
|
||||
|
||||
### 2.2 Orphan Detection
|
||||
|
||||
Files not matching any component → flag as:
|
||||
- **Potential new component**: if in src/ directory
|
||||
- **Ignorable**: if in test/, docs/, config/ directories
|
||||
|
||||
### 2.3 Impact Report
|
||||
|
||||
```
|
||||
Impact Analysis for 3 changed files:
|
||||
|
||||
src/services/auth.ts (modified)
|
||||
→ Component: tech-auth-service (AuthService)
|
||||
→ Feature: feat-auth (User Authentication)
|
||||
→ Requirements: REQ-001, REQ-002
|
||||
|
||||
src/middleware/rate-limit.ts (added)
|
||||
→ No matching component (new file)
|
||||
→ Suggested: Register as new component
|
||||
|
||||
src/utils/hash.ts (modified)
|
||||
→ Component: tech-hash-util
|
||||
→ Features: feat-auth, feat-password-reset
|
||||
→ Requirements: REQ-001, REQ-005
|
||||
```
|
||||
|
||||
## Phase 3: Update Index (unless --check-only)
|
||||
|
||||
### 3.1 Update Code Locations
|
||||
|
||||
For matched components:
|
||||
- If file was renamed → update `codeLocations[].path`
|
||||
- If file was deleted → remove code location entry
|
||||
- If symbols changed → update `symbols` list (requires AST or Gemini analysis)
|
||||
|
||||
### 3.2 Register New Components (interactive unless -y)
|
||||
|
||||
For orphan files in src/:
|
||||
- Prompt user for component name and type
|
||||
- Or auto-generate with `-y`: derive name from file path
|
||||
- Create `technicalComponents[]` entry
|
||||
- Ask which feature it belongs to (or auto-link by directory structure)
|
||||
|
||||
### 3.3 Update Timestamps
|
||||
|
||||
- Update `technicalComponents[].docPath` last_updated in corresponding .md
|
||||
- Update `doc-index.json.last_updated`
|
||||
|
||||
## Phase 4: Refresh Documents (if updates were made)
|
||||
|
||||
### 4.1 Minimal Doc Updates
|
||||
|
||||
Only update documents that need changes:
|
||||
- `tech-registry/{slug}.md` — if code locations changed
|
||||
- `feature-maps/{slug}.md` — only if component list changed
|
||||
- `_index.md` files — only if entries were added/removed
|
||||
|
||||
### 4.2 Skip If --check-only
|
||||
|
||||
With `--check-only`, only output the impact report without modifying any files.
|
||||
|
||||
## Flags
|
||||
|
||||
| Flag | Effect |
|
||||
|------|--------|
|
||||
| `-y, --yes` | Auto-confirm updates |
|
||||
| `--files <list>` | Explicit comma-separated file list |
|
||||
| `--staged` | Analyze staged (git cached) files |
|
||||
| `--check-only` | Report impact without modifying index |
|
||||
|
||||
## Output
|
||||
|
||||
- **Console**: Impact report showing affected features/requirements
|
||||
- **Updated**: `doc-index.json` (if not --check-only)
|
||||
- **Updated**: Affected tech-registry/ and feature-maps/ docs
|
||||
|
||||
## Integration Points
|
||||
|
||||
- **Input from**: Git working tree, `doc-index.json`
|
||||
- **Output to**: Updated `doc-index.json`, impact report
|
||||
- **Triggers**: During development, pre-commit, or periodic refresh
|
||||
- **Can chain to**: `/ddd:sync` for full post-task synchronization
|
||||
@@ -25,40 +25,56 @@ Create task chains based on execution mode with conditional routing, dynamic rol
|
||||
| 5 | DRAFT-004 | writer | DRAFT-003 | Generate Epics & Stories | self-validate | P0 |
|
||||
| 6 | QUALITY-001 | reviewer | DRAFT-004 | 5-dimension spec quality + sign-off | DISCUSS-003 | P0 |
|
||||
|
||||
### Impl Pipeline - Conditional Routing (v3 NEW)
|
||||
### Impl Pipeline - Dynamic Task Creation
|
||||
|
||||
#### Low Complexity (1-2 modules, shallow deps)
|
||||
Initial dispatch creates **only PLAN-001**. IMPL-*, TEST-001, REVIEW-001 are dynamically created after PLAN-001 completes (see monitor.md handleCallback → PLAN-001).
|
||||
|
||||
#### Initial Dispatch (all complexity levels)
|
||||
|
||||
| # | Subject | Owner | BlockedBy | Description | Priority |
|
||||
|---|---------|-------|-----------|-------------|----------|
|
||||
| 1 | PLAN-001 | planner | (none) | Planning with complexity assessment | P0 |
|
||||
| 2 | IMPL-001 | executor | PLAN-001 | Code implementation | P0 |
|
||||
| 3 | TEST-001 | tester | IMPL-001 | Test-fix cycles | P1 |
|
||||
| 4 | REVIEW-001 | reviewer | IMPL-001 | 4-dimension code review | P1 |
|
||||
| 1 | PLAN-001 | planner | (none) | Planning with complexity assessment + TASK-*.json generation | P0 |
|
||||
|
||||
#### Medium Complexity (3-4 modules, moderate deps)
|
||||
#### Dynamic Creation (after PLAN-001 completes)
|
||||
|
||||
| # | Subject | Owner | BlockedBy | Description | Priority |
|
||||
|---|---------|-------|-----------|-------------|----------|
|
||||
| 1 | PLAN-001 | planner | (none) | Planning with complexity assessment | P0 |
|
||||
| 2 | ORCH-001 | orchestrator | PLAN-001 | Task decomposition & coordination | P0 |
|
||||
| 3 | IMPL-001 | executor | ORCH-001 | Backend implementation | P0 |
|
||||
| 4 | IMPL-002 | executor | ORCH-001 | Module 2 implementation | P0 |
|
||||
| 5 | TEST-001 | tester | IMPL-001, IMPL-002 | Test-fix cycles | P1 |
|
||||
| 6 | REVIEW-001 | reviewer | IMPL-001, IMPL-002 | 4-dimension code review | P1 |
|
||||
Coordinator reads planner's output and creates tasks dynamically:
|
||||
|
||||
#### High Complexity (5+ modules, deep deps)
|
||||
1. Read `<session-folder>/plan/plan.json` → extract `complexity` field
|
||||
2. Read `<session-folder>/plan/.task/TASK-*.json` → enumerate sub-tasks
|
||||
3. Apply complexity routing:
|
||||
|
||||
| # | Subject | Owner | BlockedBy | Description | Priority |
|
||||
|---|---------|-------|-----------|-------------|----------|
|
||||
| 1 | PLAN-001 | planner | (none) | Planning with complexity assessment | P0 |
|
||||
| 2 | ARCH-001 | architect | PLAN-001 | Architecture design | P0 |
|
||||
| 3 | ORCH-001 | orchestrator | ARCH-001 | Task decomposition & coordination | P0 |
|
||||
| 4 | IMPL-001 | executor | ORCH-001 | Backend implementation | P0 |
|
||||
| 5 | IMPL-002 | executor | ORCH-001 | Module 2 implementation | P0 |
|
||||
| 6 | IMPL-003 | executor | ORCH-001 | Module 3 implementation | P0 |
|
||||
| 7 | TEST-001 | tester | IMPL-001, IMPL-002, IMPL-003 | Test-fix cycles | P1 |
|
||||
| 8 | REVIEW-001 | reviewer | IMPL-001, IMPL-002, IMPL-003 | 4-dimension code review | P1 |
|
||||
| Complexity | Pre-IMPL tasks | Then |
|
||||
|------------|---------------|------|
|
||||
| Low | (none) | Create IMPL-* directly, blockedBy PLAN-001 |
|
||||
| Medium | ORCH-001 (orchestrator, blockedBy PLAN-001) | Create IMPL-* blockedBy ORCH-001 |
|
||||
| High | ARCH-001 (architect, blockedBy PLAN-001) → ORCH-001 (blockedBy ARCH-001) | Create IMPL-* blockedBy ORCH-001 |
|
||||
|
||||
4. For each `TASK-N.json`, create corresponding IMPL task:
|
||||
|
||||
```
|
||||
TaskCreate({
|
||||
subject: "IMPL-00N",
|
||||
description: "PURPOSE: <TASK-N.title> | Success: <TASK-N.convergence.criteria>
|
||||
TASK:
|
||||
- <steps from TASK-N.json>
|
||||
CONTEXT:
|
||||
- Session: <session-folder>
|
||||
- Task file: <session-folder>/plan/.task/TASK-N.json
|
||||
- Files: <TASK-N.files[]>
|
||||
- Priority: P0
|
||||
EXPECTED: Implementation matching task file specification
|
||||
CONSTRAINTS: Only modify files listed in task file
|
||||
---
|
||||
Validation: self-validate
|
||||
InnerLoop: false
|
||||
Priority: P0",
|
||||
addBlockedBy: [<PLAN-001 or ORCH-001>]
|
||||
})
|
||||
```
|
||||
|
||||
5. Create TEST-001 (tester, blockedBy all IMPL-*, P1)
|
||||
6. Create REVIEW-001 (reviewer, blockedBy all IMPL-*, P1)
|
||||
7. Apply dynamic role injection (see below)
|
||||
|
||||
### FE Pipeline (3 tasks)
|
||||
|
||||
@@ -81,21 +97,21 @@ Create task chains based on execution mode with conditional routing, dynamic rol
|
||||
|
||||
### Dynamic Role Injection (v3 NEW)
|
||||
|
||||
When specialist roles are injected, add corresponding tasks:
|
||||
When specialist roles are injected, add corresponding tasks (blockedBy references the **last IMPL-*** task dynamically):
|
||||
|
||||
| Injected Role | Task ID | Owner | BlockedBy | Description | Priority |
|
||||
|---------------|---------|-------|-----------|-------------|----------|
|
||||
| security-expert | SECURITY-001 | security-expert | IMPL-001 | Security audit (OWASP Top 10) | P0 |
|
||||
| performance-optimizer | PERF-001 | performance-optimizer | IMPL-001 | Performance profiling & optimization | P1 |
|
||||
| security-expert | SECURITY-001 | security-expert | all IMPL-* | Security audit (OWASP Top 10) | P0 |
|
||||
| performance-optimizer | PERF-001 | performance-optimizer | all IMPL-* | Performance profiling & optimization | P1 |
|
||||
| data-engineer | DATA-001 | data-engineer | PLAN-001 | Data pipeline implementation (parallel) | P0 |
|
||||
| devops-engineer | DEVOPS-001 | devops-engineer | IMPL-001 | CI/CD & infrastructure setup | P1 |
|
||||
| devops-engineer | DEVOPS-001 | devops-engineer | all IMPL-* | CI/CD & infrastructure setup | P1 |
|
||||
| ml-engineer | ML-001 | ml-engineer | PLAN-001 | ML pipeline implementation (parallel) | P0 |
|
||||
|
||||
**Injection Rules**:
|
||||
- Security tasks: P0 priority, block REVIEW-001
|
||||
- Performance tasks: P1 priority, parallel with TEST-001
|
||||
- Data/ML tasks: P0 priority, parallel with IMPL-001
|
||||
- DevOps tasks: P1 priority, after IMPL-001
|
||||
- Data/ML tasks: P0 priority, parallel with IMPL-*
|
||||
- DevOps tasks: P1 priority, after all IMPL-*
|
||||
|
||||
### Composite Modes
|
||||
|
||||
@@ -131,17 +147,18 @@ Priority: <P0|P1|P2>",
|
||||
|
||||
### Complexity Assessment Logic (v3 NEW)
|
||||
|
||||
PLAN-001 task description includes complexity assessment instructions:
|
||||
PLAN-001 task description includes complexity assessment and TASK-*.json generation instructions:
|
||||
|
||||
```
|
||||
PURPOSE: Create implementation plan with complexity assessment | Success: Actionable plan with module breakdown and complexity rating
|
||||
PURPOSE: Create implementation plan with complexity assessment and task decomposition | Success: plan.json + TASK-*.json files
|
||||
|
||||
TASK:
|
||||
- Analyze requirements and scope
|
||||
- Identify modules and dependencies
|
||||
- Assess complexity (Low/Medium/High)
|
||||
- Generate implementation plan
|
||||
- Write complexity assessment to plan.json
|
||||
- Generate plan.json with complexity field
|
||||
- Generate .task/TASK-*.json files (1 per implementation unit, 2-7 tasks)
|
||||
- Each TASK-*.json must include: id, title, files[].change, convergence.criteria, depends_on
|
||||
|
||||
CONTEXT:
|
||||
- Session: <session-folder>
|
||||
@@ -151,7 +168,9 @@ CONTEXT:
|
||||
* Medium: 3-4 modules, moderate deps, 2 tech stacks
|
||||
* High: 5+ modules, deep deps, multiple tech stacks
|
||||
|
||||
EXPECTED: plan.json with complexity field + implementation plan document
|
||||
EXPECTED:
|
||||
<session-folder>/plan/plan.json (with complexity field)
|
||||
<session-folder>/plan/.task/TASK-001.json ... TASK-00N.json
|
||||
|
||||
Priority: P0
|
||||
```
|
||||
|
||||
@@ -0,0 +1,68 @@
|
||||
# Command: execute
|
||||
|
||||
Spawn executor team-workers for IMPL tasks. All execution routes through executor role (has team interaction protocol). Coordinator only handles spawning — executor internally selects agent or CLI mode.
|
||||
|
||||
## Trigger
|
||||
|
||||
Called by `monitor.md handleSpawnNext` when IMPL-* tasks become ready.
|
||||
|
||||
## Spawn Logic
|
||||
|
||||
Each IMPL task = 1 executor team-worker. Coordinator passes task file + executor assignment, executor handles the rest.
|
||||
|
||||
```
|
||||
Agent({
|
||||
subagent_type: "team-worker",
|
||||
run_in_background: true,
|
||||
description: "IMPL-00N: <task title>",
|
||||
prompt: "## Role Assignment
|
||||
role: executor
|
||||
skill: team-lifecycle-v3
|
||||
session: <session-folder>
|
||||
session_id: <session-id>
|
||||
team_name: <team-name>
|
||||
requirement: <task-description>
|
||||
inner_loop: false
|
||||
priority: P0
|
||||
|
||||
Task file: <task_file>
|
||||
Executor: <agent|gemini|codex|qwen>
|
||||
Session: <session-folder>"
|
||||
})
|
||||
```
|
||||
|
||||
## Parallel Spawn
|
||||
|
||||
When multiple IMPL tasks are ready simultaneously (same blockedBy set), spawn all in a **single message with multiple Agent() calls**:
|
||||
|
||||
```
|
||||
// IMPL-001 (agent), IMPL-002 (codex), IMPL-003 (gemini) all ready
|
||||
Agent({ subagent_type: "team-worker", description: "IMPL-001: ...", prompt: "...Executor: agent..." })
|
||||
Agent({ subagent_type: "team-worker", description: "IMPL-002: ...", prompt: "...Executor: codex..." })
|
||||
Agent({ subagent_type: "team-worker", description: "IMPL-003: ...", prompt: "...Executor: gemini..." })
|
||||
```
|
||||
|
||||
**Rules**:
|
||||
- Independent IMPL tasks (no mutual blockedBy) → parallel spawn
|
||||
- Dependent IMPL tasks (TASK-B depends_on TASK-A) → sequential, spawn B after A completes
|
||||
- Each worker is fully isolated — no shared context between IMPL agents
|
||||
|
||||
## Coordinator State Update
|
||||
|
||||
After spawning, log to message bus:
|
||||
|
||||
```
|
||||
team_msg({
|
||||
operation: "log",
|
||||
session_id: "<session-id>",
|
||||
from: "coordinator",
|
||||
type: "impl_dispatched",
|
||||
summary: "Spawned N executor workers",
|
||||
data: {
|
||||
tasks: [
|
||||
{ id: "IMPL-001", executor: "agent", task_file: "..." },
|
||||
{ id: "IMPL-002", executor: "codex", task_file: "..." }
|
||||
]
|
||||
}
|
||||
})
|
||||
```
|
||||
@@ -52,10 +52,24 @@ Monitor team progress, handle callbacks, manage artifact registry, spawn next wo
|
||||
- Pause for user command
|
||||
- STOP
|
||||
- If task_id == "PLAN-001" (v3 NEW):
|
||||
- Read plan.json for complexity assessment
|
||||
- Display routing decision
|
||||
- Apply conditional routing
|
||||
- Continue
|
||||
- **Dynamic IMPL Task Creation from Planner DAG**:
|
||||
1. Read `<session>/plan/plan.json` → extract `complexity` field
|
||||
2. Glob `<session>/plan/.task/TASK-*.json` → enumerate task files
|
||||
3. For each TASK file, read `depends_on` field to build DAG
|
||||
4. Determine complexity routing:
|
||||
- Low: no pre-IMPL tasks
|
||||
- Medium: create ORCH-001 (blockedBy PLAN-001)
|
||||
- High: create ARCH-001 (blockedBy PLAN-001) + ORCH-001 (blockedBy ARCH-001)
|
||||
5. For each TASK-N.json, create IMPL-00N:
|
||||
- `blockedBy`: map TASK-N `depends_on` → corresponding IMPL IDs + (PLAN-001 or ORCH-001)
|
||||
- `description`: include `Task file: <session>/plan/.task/TASK-N.json`
|
||||
- `Priority`: P0
|
||||
6. Collect all IMPL-* task IDs
|
||||
7. Create TEST-001 (tester, blockedBy all IMPL-*, P1)
|
||||
8. Create REVIEW-001 (reviewer, blockedBy all IMPL-*, P1)
|
||||
9. Apply dynamic role injection if specialist roles were identified
|
||||
10. Display routing decision + created task count
|
||||
- Continue to handleSpawnNext
|
||||
|
||||
5. **Spawn Next**
|
||||
- Call handleSpawnNext
|
||||
@@ -114,7 +128,7 @@ Agent({
|
||||
run_in_background: true,
|
||||
prompt: `## Role Assignment
|
||||
role: <role>
|
||||
role_spec: .claude/skills/team-lifecycle-v3/role-specs/<role>.md
|
||||
skill: team-lifecycle-v3
|
||||
session: <session-folder>
|
||||
session_id: <session-id>
|
||||
team_name: <team-name>
|
||||
@@ -123,9 +137,7 @@ inner_loop: <true|false>
|
||||
priority: <P0|P1|P2>
|
||||
context_artifacts: <session-folder>/context-artifacts.json
|
||||
|
||||
Read role_spec file to load Phase 2-4 domain instructions.
|
||||
Read context_artifacts for upstream artifact paths (automatic discovery).
|
||||
Execute built-in Phase 1 (task discovery) -> role-spec Phase 2-4 -> built-in Phase 5 (report).`
|
||||
Execute built-in Phase 1 (task discovery) -> skill routes to role Phase 2-4 -> built-in Phase 5 (report).`
|
||||
})
|
||||
```
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
---
|
||||
role: executor
|
||||
prefix: IMPL
|
||||
inner_loop: true
|
||||
inner_loop: false
|
||||
discuss_rounds: []
|
||||
input_artifact_types: []
|
||||
input_artifact_types: [plan, spec, architecture]
|
||||
message_types:
|
||||
success: impl_complete
|
||||
progress: impl_progress
|
||||
@@ -12,56 +12,160 @@ message_types:
|
||||
|
||||
# Executor — Phase 2-4
|
||||
|
||||
## Phase 2: Task & Plan Loading
|
||||
**Role**: Implementation worker with team interaction protocol. Supports two execution modes: direct agent implementation or CLI delegation. Coordinator assigns mode per task via `Executor:` field.
|
||||
|
||||
**Objective**: Load plan and determine execution strategy.
|
||||
## Phase 2: Parse Task & Resolve Execution Mode
|
||||
|
||||
1. Load plan.json and .task/TASK-*.json from `<session-folder>/plan/`
|
||||
**Objective**: Load task JSON, execute pre-analysis, resolve execution mode.
|
||||
|
||||
**Backend selection** (priority order):
|
||||
### 2.1 Extract from task description
|
||||
|
||||
| Priority | Source | Method |
|
||||
|----------|--------|--------|
|
||||
| 1 | Task metadata | task.metadata.executor field |
|
||||
| 2 | Plan default | "Execution Backend:" in plan |
|
||||
| 3 | Auto-select | Simple (< 200 chars, no refactor) → agent; Complex → codex |
|
||||
- `Task file:` → `task_file` path
|
||||
- `Session:` → `session` folder
|
||||
- `Executor:` → `mode` (`agent` | `gemini` | `codex` | `qwen`)
|
||||
|
||||
**Code review selection**:
|
||||
### 2.2 Load task JSON (read task_file)
|
||||
|
||||
| Priority | Source | Method |
|
||||
|----------|--------|--------|
|
||||
| 1 | Task metadata | task.metadata.code_review field |
|
||||
| 2 | Plan default | "Code Review:" in plan |
|
||||
| 3 | Auto-select | Critical keywords (auth, security, payment) → enabled |
|
||||
```
|
||||
Task JSON Fields:
|
||||
├── id, title, scope, action
|
||||
├── description → Implementation goal
|
||||
├── files[] → Target files (path, target, change)
|
||||
├── implementation[] → Step-by-step execution instructions
|
||||
├── convergence.criteria[] → Done-when checklist
|
||||
├── pre_analysis[] → Context gathering steps (optional)
|
||||
│ └── { step, action, commands[], output_to, on_error }
|
||||
├── reference → Pattern reference (pattern, files[], examples)
|
||||
├── risks[] → Risk mitigations (optional)
|
||||
├── rationale → Approach rationale (optional)
|
||||
└── depends_on[] → (handled by coordinator, not executor)
|
||||
```
|
||||
|
||||
## Phase 3: Code Implementation
|
||||
### 2.3 Resolve execution mode (priority order)
|
||||
|
||||
**Objective**: Execute implementation across batches.
|
||||
| Priority | Source | Resolution |
|
||||
|----------|--------|------------|
|
||||
| 1 | Task description `Executor:` | Coordinator assignment |
|
||||
| 2 | task.meta.execution_config.method | Per-task config from planner |
|
||||
| 3 | plan.json recommended_execution | Plan-level default |
|
||||
| 4 | Auto-select | Low complexity → agent; Medium/High → codex |
|
||||
|
||||
**Batching**: Topological sort by IMPL task dependencies → sequential batches.
|
||||
### 2.4 Execute pre_analysis (if exists, runs locally regardless of mode)
|
||||
|
||||
| Backend | Invocation | Use Case |
|
||||
|---------|-----------|----------|
|
||||
| gemini | `ccw cli --tool gemini --mode write` (foreground) | Simple, direct edits |
|
||||
| codex | `ccw cli --tool codex --mode write` (foreground) | Complex, architecture |
|
||||
| qwen | `ccw cli --tool qwen --mode write` (foreground) | Alternative backend |
|
||||
```
|
||||
For each step in task.pre_analysis[]:
|
||||
→ Parse step.commands[] using command-to-tool mapping:
|
||||
"Read(path)" → Read tool
|
||||
"bash(command)" → Bash tool
|
||||
"Search(pattern,path)" → Grep tool
|
||||
"Glob(pattern)" → Glob tool
|
||||
→ Store output in [step.output_to] variable
|
||||
→ Handle errors per step.on_error (fail | continue | skip)
|
||||
```
|
||||
|
||||
## Phase 3: Execute Implementation
|
||||
|
||||
Route by resolved execution mode:
|
||||
|
||||
### Mode: `agent` — Direct Implementation
|
||||
|
||||
Executor implements directly using Edit/Write/Bash tools. Follows code-developer patterns.
|
||||
|
||||
```
|
||||
1. Read task.files[] as target files
|
||||
2. Read task.implementation[] as step-by-step instructions
|
||||
3. For each implementation step:
|
||||
- Substitute [variable_name] placeholders with pre_analysis results
|
||||
- For each file in step:
|
||||
* New file → Write tool
|
||||
* Modify file → Edit tool
|
||||
- Follow task.reference (pattern, files) for consistency
|
||||
4. Apply task.rationale.chosen_approach
|
||||
5. Mitigate task.risks[] during implementation
|
||||
```
|
||||
|
||||
**Quality rules** (same as code-developer):
|
||||
- Verify module/package existence before referencing (use Grep/Glob)
|
||||
- Incremental progress — small working changes
|
||||
- Follow existing code patterns from task.reference
|
||||
- No premature abstractions
|
||||
- ASCII-only, GBK-compatible
|
||||
|
||||
### Mode: `gemini` / `codex` / `qwen` — CLI Delegation
|
||||
|
||||
Build structured prompt from task JSON, delegate to CLI tool.
|
||||
|
||||
**Build handoff prompt**:
|
||||
|
||||
```javascript
|
||||
function buildCliHandoffPrompt(task, preAnalysisResults) {
|
||||
const context = Object.entries(preAnalysisResults)
|
||||
.map(([key, value]) => `### ${key}\n${value}`)
|
||||
.join('\n\n')
|
||||
|
||||
return `
|
||||
PURPOSE: ${task.title}
|
||||
${task.description}
|
||||
|
||||
## TARGET FILES
|
||||
${task.files?.map(f => `- **${f.path}** → ${f.change}`).join('\n')}
|
||||
|
||||
## IMPLEMENTATION STEPS
|
||||
${task.implementation?.map((s, i) => `${i+1}. ${s}`).join('\n')}
|
||||
|
||||
${context ? `## PRE-ANALYSIS CONTEXT\n${context}` : ''}
|
||||
|
||||
${task.reference ? `## REFERENCE\n- Pattern: ${task.reference.pattern}\n- Files: ${task.reference.files?.join(', ')}` : ''}
|
||||
|
||||
${task.rationale ? `## APPROACH\n${task.rationale.chosen_approach}` : ''}
|
||||
|
||||
${task.risks?.length ? `## RISKS\n${task.risks.map(r => `- ${r.description} → **${r.mitigation}**`).join('\n')}` : ''}
|
||||
|
||||
## DONE WHEN
|
||||
${task.convergence?.criteria?.map(c => `- [ ] ${c}`).join('\n')}
|
||||
|
||||
MODE: write
|
||||
CONSTRAINTS: Only modify files listed above | Follow existing patterns
|
||||
`.trim()
|
||||
}
|
||||
```
|
||||
|
||||
**CLI call**:
|
||||
|
||||
```
|
||||
Bash({
|
||||
command: `ccw cli -p "${buildCliHandoffPrompt(task, preAnalysisResults)}"
|
||||
--tool <cli_tool> --mode write --rule development-implement-feature`,
|
||||
run_in_background: false,
|
||||
timeout: 3600000
|
||||
})
|
||||
```
|
||||
|
||||
**Resume strategy** (if task.cli_execution exists):
|
||||
|
||||
| Strategy | Command |
|
||||
|----------|---------|
|
||||
| new | `--id <session>-<task_id>` |
|
||||
| resume | `--resume <parent_id>` |
|
||||
| fork | `--resume <parent_id> --id <new_id>` |
|
||||
| merge_fork | `--resume <id1>,<id2> --id <new_id>` |
|
||||
|
||||
## Phase 4: Self-Validation
|
||||
|
||||
| Step | Method | Pass Criteria |
|
||||
|------|--------|--------------|
|
||||
| Syntax check | `tsc --noEmit` (30s) | Exit code 0 |
|
||||
| Acceptance criteria | Match criteria keywords vs implementation | All addressed |
|
||||
| Test detection | Find .test.ts/.spec.ts for modified files | Tests identified |
|
||||
| Code review (optional) | gemini analysis or codex review | No blocking issues |
|
||||
| Convergence check | Match task.convergence.criteria vs output | All criteria addressed |
|
||||
| Syntax check | `tsc --noEmit` or language-appropriate (30s) | Exit code 0 |
|
||||
| Test detection | Find test files for modified files | Tests identified |
|
||||
|
||||
**Report**: task ID, status, files modified, validation results, backend used.
|
||||
**Report**: task ID, status, mode used, files modified, convergence results.
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Scenario | Resolution |
|
||||
|----------|------------|
|
||||
| Syntax errors | Retry with error context (max 3) |
|
||||
| Missing dependencies | Request from coordinator |
|
||||
| Backend unavailable | Fallback to alternative tool |
|
||||
| Circular dependencies | Abort, report graph |
|
||||
| Agent mode: syntax errors | Retry with error context (max 3) |
|
||||
| CLI mode: execution failure | Retry, or resume with --resume |
|
||||
| pre_analysis failure | Follow step.on_error (fail/continue/skip) |
|
||||
| CLI tool unavailable | Fallback: gemini → qwen → codex |
|
||||
| Max retries exceeded | Report failure to coordinator |
|
||||
|
||||
@@ -19,6 +19,7 @@ import { useNotificationStore, selectCurrentQuestion, selectCurrentPopupCard } f
|
||||
import { useWorkflowStore } from '@/stores/workflowStore';
|
||||
import { useAppStore, selectIsImmersiveMode } from '@/stores/appStore';
|
||||
import { useWebSocketNotifications, useWebSocket } from '@/hooks';
|
||||
import { useHasHydrated } from '@/hooks/useHasHydrated';
|
||||
|
||||
export interface AppShellProps {
|
||||
/** Callback for refresh action */
|
||||
@@ -40,9 +41,14 @@ export function AppShell({
|
||||
// Workspace initialization from URL query parameter
|
||||
const switchWorkspace = useWorkflowStore((state) => state.switchWorkspace);
|
||||
const projectPath = useWorkflowStore((state) => state.projectPath);
|
||||
const hasHydrated = useWorkflowStore((state) => state._hasHydrated);
|
||||
const hasHydrated = useHasHydrated();
|
||||
const location = useLocation();
|
||||
|
||||
// Manually trigger hydration on mount (needed because of skipHydration: true in store config)
|
||||
useEffect(() => {
|
||||
useWorkflowStore.persist.rehydrate();
|
||||
}, []);
|
||||
|
||||
// Immersive mode (fullscreen) - hide chrome
|
||||
const isImmersiveMode = useAppStore(selectIsImmersiveMode);
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ import { ChevronDown, X, FolderOpen, Check, Loader2 } from 'lucide-react';
|
||||
import { useIntl } from 'react-intl';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { selectFolder } from '@/lib/nativeDialog';
|
||||
import { useNotifications } from '@/hooks/useNotifications';
|
||||
import { Button } from '@/components/ui/Button';
|
||||
import { Input } from '@/components/ui/Input';
|
||||
import {
|
||||
@@ -81,6 +82,7 @@ export function WorkspaceSelector({ className }: WorkspaceSelectorProps) {
|
||||
const recentPaths = useWorkflowStore((state) => state.recentPaths);
|
||||
const switchWorkspace = useWorkflowStore((state) => state.switchWorkspace);
|
||||
const removeRecentPath = useWorkflowStore((state) => state.removeRecentPath);
|
||||
const { error: showError } = useNotifications();
|
||||
|
||||
const [isDropdownOpen, setIsDropdownOpen] = useState(false);
|
||||
const [isManualOpen, setIsManualOpen] = useState(false);
|
||||
@@ -113,11 +115,27 @@ export function WorkspaceSelector({ className }: WorkspaceSelectorProps) {
|
||||
);
|
||||
|
||||
const handleBrowseFolder = useCallback(async () => {
|
||||
const selected = await selectFolder(projectPath || undefined);
|
||||
if (selected) {
|
||||
await handleSwitchWorkspace(selected);
|
||||
const result = await selectFolder(projectPath || undefined);
|
||||
|
||||
// User cancelled the dialog - no action needed
|
||||
if (result.cancelled) {
|
||||
return;
|
||||
}
|
||||
}, [projectPath, handleSwitchWorkspace]);
|
||||
|
||||
// Error occurred - show error notification
|
||||
if (result.error) {
|
||||
showError(
|
||||
formatMessage({ id: 'workspace.selector.browseError' }),
|
||||
result.error
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Successfully selected a folder
|
||||
if (result.path) {
|
||||
await handleSwitchWorkspace(result.path);
|
||||
}
|
||||
}, [projectPath, handleSwitchWorkspace, showError, formatMessage]);
|
||||
|
||||
const handleManualPathSubmit = useCallback(async () => {
|
||||
const trimmedPath = manualPath.trim();
|
||||
|
||||
55
ccw/frontend/src/hooks/useHasHydrated.ts
Normal file
55
ccw/frontend/src/hooks/useHasHydrated.ts
Normal file
@@ -0,0 +1,55 @@
|
||||
// ========================================
|
||||
// useHasHydrated Hook
|
||||
// ========================================
|
||||
// Determines if the Zustand workflow store has been rehydrated from localStorage
|
||||
// Uses Zustand persist middleware's onFinishHydration callback for reliable detection
|
||||
|
||||
import { useState, useEffect } from 'react';
|
||||
import { useWorkflowStore } from '@/stores/workflowStore';
|
||||
|
||||
/**
|
||||
* A hook to determine if the Zustand workflow store has been rehydrated.
|
||||
* Returns `true` once the persisted state has been loaded from localStorage.
|
||||
*
|
||||
* This hook uses the Zustand persist middleware's onFinishHydration callback
|
||||
* instead of relying on internal state management, which avoids circular
|
||||
* reference issues during store initialization.
|
||||
*
|
||||
* @example
|
||||
* ```tsx
|
||||
* function MyComponent() {
|
||||
* const hasHydrated = useHasHydrated();
|
||||
*
|
||||
* useEffect(() => {
|
||||
* if (!hasHydrated) return;
|
||||
* // Safe to access persisted store values here
|
||||
* }, [hasHydrated]);
|
||||
*
|
||||
* if (!hasHydrated) return <LoadingSpinner />;
|
||||
* return <Content />;
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
export function useHasHydrated(): boolean {
|
||||
const [hydrated, setHydrated] = useState(() => {
|
||||
// Check initial hydration status synchronously
|
||||
return useWorkflowStore.persist.hasHydrated();
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
// If already hydrated, no need to subscribe
|
||||
if (hydrated) return;
|
||||
|
||||
// Subscribe to hydration completion event
|
||||
// onFinishHydration returns an unsubscribe function
|
||||
const unsubscribe = useWorkflowStore.persist.onFinishHydration(() => {
|
||||
setHydrated(true);
|
||||
});
|
||||
|
||||
return unsubscribe;
|
||||
}, [hydrated]);
|
||||
|
||||
return hydrated;
|
||||
}
|
||||
|
||||
export default useHasHydrated;
|
||||
@@ -1,36 +1,109 @@
|
||||
/**
|
||||
* Native OS dialog helpers
|
||||
* Calls server-side endpoints that open system-native file/folder picker dialogs.
|
||||
*/
|
||||
// ========================================
|
||||
// Native OS Dialog Helpers
|
||||
// ========================================
|
||||
// Calls server-side endpoints that open system-native file/folder picker dialogs.
|
||||
// Returns structured DialogResult objects for clear success/cancel/error handling.
|
||||
|
||||
export async function selectFolder(initialDir?: string): Promise<string | null> {
|
||||
/**
|
||||
* Represents the result of a native dialog operation.
|
||||
*/
|
||||
export interface DialogResult {
|
||||
/** The selected path. Null if cancelled or an error occurred. */
|
||||
path: string | null;
|
||||
/** True if the user cancelled the dialog. */
|
||||
cancelled: boolean;
|
||||
/** An error message if the operation failed. */
|
||||
error?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Opens a native OS folder selection dialog.
|
||||
*
|
||||
* @param initialDir - Optional directory to start the dialog in
|
||||
* @returns DialogResult with path, cancelled status, and optional error
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const result = await selectFolder('/home/user/projects');
|
||||
* if (result.path) {
|
||||
* console.log('Selected:', result.path);
|
||||
* } else if (result.cancelled) {
|
||||
* console.log('User cancelled');
|
||||
* } else if (result.error) {
|
||||
* console.error('Error:', result.error);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
export async function selectFolder(initialDir?: string): Promise<DialogResult> {
|
||||
try {
|
||||
const res = await fetch('/api/dialog/select-folder', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ initialDir }),
|
||||
});
|
||||
if (!res.ok) return null;
|
||||
|
||||
if (!res.ok) {
|
||||
return {
|
||||
path: null,
|
||||
cancelled: false,
|
||||
error: `Server responded with status: ${res.status}`,
|
||||
};
|
||||
}
|
||||
|
||||
const data = await res.json();
|
||||
if (data.cancelled) return null;
|
||||
return data.path || null;
|
||||
} catch {
|
||||
return null;
|
||||
if (data.cancelled) {
|
||||
return { path: null, cancelled: true };
|
||||
}
|
||||
|
||||
return { path: data.path || null, cancelled: false };
|
||||
} catch (err) {
|
||||
const message = err instanceof Error ? err.message : 'An unknown error occurred';
|
||||
return { path: null, cancelled: false, error: message };
|
||||
}
|
||||
}
|
||||
|
||||
export async function selectFile(initialDir?: string): Promise<string | null> {
|
||||
/**
|
||||
* Opens a native OS file selection dialog.
|
||||
*
|
||||
* @param initialDir - Optional directory to start the dialog in
|
||||
* @returns DialogResult with path, cancelled status, and optional error
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const result = await selectFile('/home/user/documents');
|
||||
* if (result.path) {
|
||||
* console.log('Selected:', result.path);
|
||||
* } else if (result.cancelled) {
|
||||
* console.log('User cancelled');
|
||||
* } else if (result.error) {
|
||||
* console.error('Error:', result.error);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
export async function selectFile(initialDir?: string): Promise<DialogResult> {
|
||||
try {
|
||||
const res = await fetch('/api/dialog/select-file', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ initialDir }),
|
||||
});
|
||||
if (!res.ok) return null;
|
||||
|
||||
if (!res.ok) {
|
||||
return {
|
||||
path: null,
|
||||
cancelled: false,
|
||||
error: `Server responded with status: ${res.status}`,
|
||||
};
|
||||
}
|
||||
|
||||
const data = await res.json();
|
||||
if (data.cancelled) return null;
|
||||
return data.path || null;
|
||||
} catch {
|
||||
return null;
|
||||
if (data.cancelled) {
|
||||
return { path: null, cancelled: true };
|
||||
}
|
||||
|
||||
return { path: data.path || null, cancelled: false };
|
||||
} catch (err) {
|
||||
const message = err instanceof Error ? err.message : 'An unknown error occurred';
|
||||
return { path: null, cancelled: false, error: message };
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,9 +113,9 @@ function FilePathInput({ value, onChange, placeholder }: FilePathInputProps) {
|
||||
const handleBrowse = async () => {
|
||||
const { selectFile } = await import('@/lib/nativeDialog');
|
||||
const initialDir = value ? value.replace(/[/\\][^/\\]*$/, '') : undefined;
|
||||
const selected = await selectFile(initialDir);
|
||||
if (selected) {
|
||||
onChange(selected);
|
||||
const result = await selectFile(initialDir);
|
||||
if (result.path && !result.cancelled && !result.error) {
|
||||
onChange(result.path);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
// Manages workflow sessions, tasks, and related data
|
||||
|
||||
import { create } from 'zustand';
|
||||
import { devtools, persist } from 'zustand/middleware';
|
||||
import type {
|
||||
WorkflowStore,
|
||||
WorkflowState,
|
||||
@@ -16,6 +15,35 @@ import type {
|
||||
} from '../types/store';
|
||||
import { switchWorkspace as apiSwitchWorkspace, fetchRecentPaths, removeRecentPath as apiRemoveRecentPath } from '../lib/api';
|
||||
|
||||
// LocalStorage key for persisting projectPath
|
||||
const STORAGE_KEY = 'ccw-workflow-store';
|
||||
|
||||
// Helper to load persisted projectPath from localStorage
|
||||
const loadPersistedPath = (): string => {
|
||||
try {
|
||||
const stored = localStorage.getItem(STORAGE_KEY);
|
||||
if (stored) {
|
||||
const data = JSON.parse(stored);
|
||||
return data?.state?.projectPath || '';
|
||||
}
|
||||
} catch {
|
||||
// Ignore parse errors
|
||||
}
|
||||
return '';
|
||||
};
|
||||
|
||||
// Helper to persist projectPath to localStorage
|
||||
const persistPath = (projectPath: string): void => {
|
||||
try {
|
||||
localStorage.setItem(STORAGE_KEY, JSON.stringify({
|
||||
state: { projectPath },
|
||||
version: 1,
|
||||
}));
|
||||
} catch {
|
||||
// Ignore storage errors
|
||||
}
|
||||
};
|
||||
|
||||
// Helper to generate session key from ID
|
||||
const sessionKey = (sessionId: string): string => {
|
||||
return `session-${sessionId}`.replace(/[^a-zA-Z0-9-]/g, '-');
|
||||
@@ -34,14 +62,14 @@ const defaultSorting: WorkflowSorting = {
|
||||
direction: 'desc',
|
||||
};
|
||||
|
||||
// Initial state
|
||||
// Initial state - load persisted projectPath from localStorage
|
||||
const initialState: WorkflowState = {
|
||||
// Core data
|
||||
workflowData: {
|
||||
activeSessions: [],
|
||||
archivedSessions: [],
|
||||
},
|
||||
projectPath: '',
|
||||
projectPath: loadPersistedPath(),
|
||||
recentPaths: [],
|
||||
serverPlatform: 'win32',
|
||||
|
||||
@@ -57,14 +85,11 @@ const initialState: WorkflowState = {
|
||||
filters: defaultFilters,
|
||||
sorting: defaultSorting,
|
||||
|
||||
// Hydration state (internal)
|
||||
_hasHydrated: false,
|
||||
};
|
||||
|
||||
export const useWorkflowStore = create<WorkflowStore>()(
|
||||
devtools(
|
||||
persist(
|
||||
(set, get) => ({
|
||||
persist(
|
||||
(set, get) => ({
|
||||
...initialState,
|
||||
|
||||
// ========== Session Actions ==========
|
||||
@@ -396,6 +421,9 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
'switchWorkspace'
|
||||
);
|
||||
|
||||
// Persist projectPath to localStorage manually
|
||||
persistPath(response.projectPath);
|
||||
|
||||
// Trigger query invalidation callback
|
||||
const callback = get()._invalidateQueriesCallback;
|
||||
if (callback) {
|
||||
@@ -417,10 +445,6 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
set({ _invalidateQueriesCallback: callback }, false, 'registerQueryInvalidator');
|
||||
},
|
||||
|
||||
setHasHydrated: (state: boolean) => {
|
||||
set({ _hasHydrated: state }, false, 'setHasHydrated');
|
||||
},
|
||||
|
||||
// ========== Filters and Sorting ==========
|
||||
|
||||
setFilters: (filters: Partial<WorkflowFilters>) => {
|
||||
@@ -521,46 +545,15 @@ export const useWorkflowStore = create<WorkflowStore>()(
|
||||
}),
|
||||
{
|
||||
name: 'ccw-workflow-store',
|
||||
version: 1, // State version for migration support
|
||||
// Only persist projectPath - minimal state for workspace switching
|
||||
partialize: (state) => ({
|
||||
projectPath: state.projectPath,
|
||||
}),
|
||||
migrate: (persistedState, version) => {
|
||||
// Migration logic for future state shape changes
|
||||
if (version < 1) {
|
||||
// No migrations needed for initial version
|
||||
// Example: if (version === 0) { persistedState.newField = defaultValue; }
|
||||
}
|
||||
return persistedState as typeof persistedState;
|
||||
},
|
||||
onRehydrateStorage: () => {
|
||||
// Only log in development to avoid noise in production
|
||||
if (process.env.NODE_ENV === 'development') {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log('[WorkflowStore] Hydrating from localStorage...');
|
||||
}
|
||||
return (state, error) => {
|
||||
if (error) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error('[WorkflowStore] Rehydration error:', error);
|
||||
return;
|
||||
}
|
||||
// Mark hydration as complete
|
||||
useWorkflowStore.getState().setHasHydrated(true);
|
||||
if (state?.projectPath) {
|
||||
if (process.env.NODE_ENV === 'development') {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log('[WorkflowStore] Rehydrated with persisted projectPath:', state.projectPath);
|
||||
}
|
||||
// The initialization logic is now handled by AppShell.tsx
|
||||
// to correctly prioritize URL parameters over localStorage.
|
||||
}
|
||||
};
|
||||
},
|
||||
// Skip automatic hydration to avoid TDZ error during module initialization
|
||||
// Hydration will be triggered manually in AppShell after mount
|
||||
skipHydration: true,
|
||||
}
|
||||
),
|
||||
{ name: 'WorkflowStore' }
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
// Selectors for common access patterns
|
||||
|
||||
@@ -335,9 +335,6 @@ export interface WorkflowState {
|
||||
|
||||
// Query invalidation callback (internal)
|
||||
_invalidateQueriesCallback?: () => void;
|
||||
|
||||
// Hydration state (internal)
|
||||
_hasHydrated: boolean;
|
||||
}
|
||||
|
||||
export interface WorkflowActions {
|
||||
@@ -372,7 +369,6 @@ export interface WorkflowActions {
|
||||
removeRecentPath: (path: string) => Promise<void>;
|
||||
refreshRecentPaths: () => Promise<void>;
|
||||
registerQueryInvalidator: (callback: () => void) => void;
|
||||
setHasHydrated: (state: boolean) => void;
|
||||
|
||||
// Filters and sorting
|
||||
setFilters: (filters: Partial<WorkflowFilters>) => void;
|
||||
|
||||
Reference in New Issue
Block a user