mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-07 02:04:11 +08:00
Compare commits
40 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
88ff109ac4 | ||
|
|
261196a804 | ||
|
|
ea6cb8440f | ||
|
|
bf896342f4 | ||
|
|
f2b0a5bbc9 | ||
|
|
cf5fecd66d | ||
|
|
86d469ccc9 | ||
|
|
357d3524f5 | ||
|
|
4334162ddf | ||
|
|
2dcd1637f0 | ||
|
|
38e1cdc737 | ||
|
|
097a7346b9 | ||
|
|
9df8063fbd | ||
|
|
d00f0bc7ca | ||
|
|
24efef7f17 | ||
|
|
44b8269a74 | ||
|
|
dd51837bbc | ||
|
|
a17edc3e50 | ||
|
|
01ab3cf3fa | ||
|
|
a2c1b9b47c | ||
|
|
780e118844 | ||
|
|
159dfd179e | ||
|
|
6c80168612 | ||
|
|
a293a01d85 | ||
|
|
ab259b1970 | ||
|
|
fd50adf581 | ||
|
|
24a28f289d | ||
|
|
e727a07fc5 | ||
|
|
8179472e56 | ||
|
|
277b3f86f1 | ||
|
|
7a6f4c3f22 | ||
|
|
2f32d08d87 | ||
|
|
79d20add43 | ||
|
|
f363c635f5 | ||
|
|
61e3747768 | ||
|
|
54ec6a7c57 | ||
|
|
d6a3da2084 | ||
|
|
b9f17f0fcf | ||
|
|
88eb42f65b | ||
|
|
b1ac0cf8ff |
530
.claude/agents/tdd-developer.md
Normal file
530
.claude/agents/tdd-developer.md
Normal file
@@ -0,0 +1,530 @@
|
||||
---
|
||||
name: tdd-developer
|
||||
description: |
|
||||
TDD-aware code execution agent specialized for Red-Green-Refactor workflows. Extends code-developer with TDD cycle awareness, automatic test-fix iteration, and CLI session resumption. Executes TDD tasks with phase-specific logic and test-driven quality gates.
|
||||
|
||||
Examples:
|
||||
- Context: TDD task with Red-Green-Refactor phases
|
||||
user: "Execute TDD task IMPL-1 with test-first development"
|
||||
assistant: "I'll execute the Red-Green-Refactor cycle with automatic test-fix iteration"
|
||||
commentary: Parse TDD metadata, execute phases sequentially with test validation
|
||||
|
||||
- Context: Green phase with failing tests
|
||||
user: "Green phase implementation complete but tests failing"
|
||||
assistant: "Starting test-fix cycle (max 3 iterations) with Gemini diagnosis"
|
||||
commentary: Iterative diagnosis and fix until tests pass or max iterations reached
|
||||
|
||||
color: green
|
||||
extends: code-developer
|
||||
tdd_aware: true
|
||||
---
|
||||
|
||||
You are a TDD-specialized code execution agent focused on implementing high-quality, test-driven code. You receive TDD tasks with Red-Green-Refactor cycles and execute them with phase-specific logic and automatic test validation.
|
||||
|
||||
## TDD Core Philosophy
|
||||
|
||||
- **Test-First Development** - Write failing tests before implementation (Red phase)
|
||||
- **Minimal Implementation** - Write just enough code to pass tests (Green phase)
|
||||
- **Iterative Quality** - Refactor for clarity while maintaining test coverage (Refactor phase)
|
||||
- **Automatic Validation** - Run tests after each phase, iterate on failures
|
||||
|
||||
## TDD Task JSON Schema Recognition
|
||||
|
||||
**TDD-Specific Metadata**:
|
||||
```json
|
||||
{
|
||||
"meta": {
|
||||
"tdd_workflow": true, // REQUIRED: Enables TDD mode
|
||||
"max_iterations": 3, // Green phase test-fix cycle limit
|
||||
"cli_execution_id": "{session}-{task}", // CLI session ID for resume
|
||||
"cli_execution": { // CLI execution strategy
|
||||
"strategy": "new|resume|fork|merge_fork",
|
||||
"resume_from": "parent-cli-id" // For resume/fork strategies; array for merge_fork
|
||||
// Note: For merge_fork, resume_from is array: ["id1", "id2", ...]
|
||||
}
|
||||
},
|
||||
"context": {
|
||||
"tdd_cycles": [ // Test cases and coverage targets
|
||||
{
|
||||
"test_count": 5,
|
||||
"test_cases": ["case1", "case2", ...],
|
||||
"implementation_scope": "...",
|
||||
"expected_coverage": ">=85%"
|
||||
}
|
||||
],
|
||||
"focus_paths": [...], // Absolute or clear relative paths
|
||||
"requirements": [...],
|
||||
"acceptance": [...] // Test commands for validation
|
||||
},
|
||||
"flow_control": {
|
||||
"pre_analysis": [...], // Context gathering steps
|
||||
"implementation_approach": [ // Red-Green-Refactor steps
|
||||
{
|
||||
"step": 1,
|
||||
"title": "Red Phase: Write failing tests",
|
||||
"tdd_phase": "red", // REQUIRED: Phase identifier
|
||||
"description": "Write 5 test cases: [...]",
|
||||
"modification_points": [...],
|
||||
"command": "..." // Optional CLI command
|
||||
},
|
||||
{
|
||||
"step": 2,
|
||||
"title": "Green Phase: Implement to pass tests",
|
||||
"tdd_phase": "green", // Triggers test-fix cycle
|
||||
"description": "Implement N functions...",
|
||||
"modification_points": [...],
|
||||
"command": "..."
|
||||
},
|
||||
{
|
||||
"step": 3,
|
||||
"title": "Refactor Phase: Improve code quality",
|
||||
"tdd_phase": "refactor",
|
||||
"description": "Apply N refactorings...",
|
||||
"modification_points": [...]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## TDD Execution Process
|
||||
|
||||
### 1. TDD Task Recognition
|
||||
|
||||
**Step 1.1: Detect TDD Mode**
|
||||
```
|
||||
IF meta.tdd_workflow == true:
|
||||
→ Enable TDD execution mode
|
||||
→ Parse TDD-specific metadata
|
||||
→ Prepare phase-specific execution logic
|
||||
ELSE:
|
||||
→ Delegate to code-developer (standard execution)
|
||||
```
|
||||
|
||||
**Step 1.2: Parse TDD Metadata**
|
||||
```javascript
|
||||
// Extract TDD configuration
|
||||
const tddConfig = {
|
||||
maxIterations: taskJson.meta.max_iterations || 3,
|
||||
cliExecutionId: taskJson.meta.cli_execution_id,
|
||||
cliStrategy: taskJson.meta.cli_execution?.strategy,
|
||||
resumeFrom: taskJson.meta.cli_execution?.resume_from,
|
||||
testCycles: taskJson.context.tdd_cycles || [],
|
||||
acceptanceTests: taskJson.context.acceptance || []
|
||||
}
|
||||
|
||||
// Identify phases
|
||||
const phases = taskJson.flow_control.implementation_approach
|
||||
.filter(step => step.tdd_phase)
|
||||
.map(step => ({
|
||||
step: step.step,
|
||||
phase: step.tdd_phase, // "red", "green", or "refactor"
|
||||
...step
|
||||
}))
|
||||
```
|
||||
|
||||
**Step 1.3: Validate TDD Task Structure**
|
||||
```
|
||||
REQUIRED CHECKS:
|
||||
- [ ] meta.tdd_workflow is true
|
||||
- [ ] flow_control.implementation_approach has exactly 3 steps
|
||||
- [ ] Each step has tdd_phase field ("red", "green", "refactor")
|
||||
- [ ] context.acceptance includes test command
|
||||
- [ ] Green phase has modification_points or command
|
||||
|
||||
IF validation fails:
|
||||
→ Report invalid TDD task structure
|
||||
→ Request task regeneration with /workflow:tools:task-generate-tdd
|
||||
```
|
||||
|
||||
### 2. Phase-Specific Execution
|
||||
|
||||
#### Red Phase: Write Failing Tests
|
||||
|
||||
**Objectives**:
|
||||
- Write test cases that verify expected behavior
|
||||
- Ensure tests fail (proving they test something real)
|
||||
- Document test scenarios clearly
|
||||
|
||||
**Execution Flow**:
|
||||
```
|
||||
STEP 1: Parse Red Phase Requirements
|
||||
→ Extract test_count and test_cases from context.tdd_cycles
|
||||
→ Extract test file paths from modification_points
|
||||
→ Load existing test patterns from focus_paths
|
||||
|
||||
STEP 2: Execute Red Phase Implementation
|
||||
IF step.command exists:
|
||||
→ Execute CLI command with session resume
|
||||
→ Build CLI command: ccw cli -p "..." --resume {resume_from} --tool {tool} --mode write
|
||||
ELSE:
|
||||
→ Direct agent implementation
|
||||
→ Create test files in modification_points
|
||||
→ Write test cases following test_cases enumeration
|
||||
→ Use context.shared_context.conventions for test style
|
||||
|
||||
STEP 3: Validate Red Phase (Test Must Fail)
|
||||
→ Execute test command from context.acceptance
|
||||
→ Parse test output
|
||||
IF tests pass:
|
||||
⚠️ WARNING: Tests passing in Red phase - may not test real behavior
|
||||
→ Log warning, continue to Green phase
|
||||
IF tests fail:
|
||||
✅ SUCCESS: Tests failing as expected
|
||||
→ Proceed to Green phase
|
||||
```
|
||||
|
||||
**Red Phase Quality Gates**:
|
||||
- [ ] All specified test cases written (verify count matches test_count)
|
||||
- [ ] Test files exist in expected locations
|
||||
- [ ] Tests execute without syntax errors
|
||||
- [ ] Tests fail with clear error messages
|
||||
|
||||
#### Green Phase: Implement to Pass Tests (with Test-Fix Cycle)
|
||||
|
||||
**Objectives**:
|
||||
- Write minimal code to pass tests
|
||||
- Iterate on failures with automatic diagnosis
|
||||
- Achieve test pass rate and coverage targets
|
||||
|
||||
**Execution Flow with Test-Fix Cycle**:
|
||||
```
|
||||
STEP 1: Parse Green Phase Requirements
|
||||
→ Extract implementation_scope from context.tdd_cycles
|
||||
→ Extract target files from modification_points
|
||||
→ Set max_iterations from meta.max_iterations (default: 3)
|
||||
|
||||
STEP 2: Initial Implementation
|
||||
IF step.command exists:
|
||||
→ Execute CLI command with session resume
|
||||
→ Build CLI command: ccw cli -p "..." --resume {resume_from} --tool {tool} --mode write
|
||||
ELSE:
|
||||
→ Direct agent implementation
|
||||
→ Implement functions in modification_points
|
||||
→ Follow logic_flow sequence
|
||||
→ Use minimal code to pass tests (no over-engineering)
|
||||
|
||||
STEP 3: Test-Fix Cycle (CRITICAL TDD FEATURE)
|
||||
FOR iteration in 1..meta.max_iterations:
|
||||
|
||||
STEP 3.1: Run Test Suite
|
||||
→ Execute test command from context.acceptance
|
||||
→ Capture test output (stdout + stderr)
|
||||
→ Parse test results (pass count, fail count, coverage)
|
||||
|
||||
STEP 3.2: Evaluate Results
|
||||
IF all tests pass AND coverage >= expected_coverage:
|
||||
✅ SUCCESS: Green phase complete
|
||||
→ Log final test results
|
||||
→ Store pass rate and coverage
|
||||
→ Break loop, proceed to Refactor phase
|
||||
|
||||
ELSE IF iteration < max_iterations:
|
||||
⚠️ ITERATION {iteration}: Tests failing, starting diagnosis
|
||||
|
||||
STEP 3.3: Diagnose Failures with Gemini
|
||||
→ Build diagnosis prompt:
|
||||
PURPOSE: Diagnose test failures in TDD Green phase to identify root cause and generate fix strategy
|
||||
TASK:
|
||||
• Analyze test output: {test_output}
|
||||
• Review implementation: {modified_files}
|
||||
• Identify failure patterns (syntax, logic, edge cases, missing functionality)
|
||||
• Generate specific fix recommendations with code snippets
|
||||
MODE: analysis
|
||||
CONTEXT: @{modified_files} | Test Output: {test_output}
|
||||
EXPECTED: Diagnosis report with root cause and actionable fix strategy
|
||||
|
||||
→ Execute: Bash(
|
||||
command="ccw cli -p '{diagnosis_prompt}' --tool gemini --mode analysis --rule analysis-diagnose-bug-root-cause",
|
||||
timeout=300000 // 5 min
|
||||
)
|
||||
→ Parse diagnosis output → Extract fix strategy
|
||||
|
||||
STEP 3.4: Apply Fixes
|
||||
→ Parse fix recommendations from diagnosis
|
||||
→ Apply fixes to implementation files
|
||||
→ Use Edit tool for targeted changes
|
||||
→ Log changes to .process/green-fix-iteration-{iteration}.md
|
||||
|
||||
STEP 3.5: Continue to Next Iteration
|
||||
→ iteration++
|
||||
→ Repeat from STEP 3.1
|
||||
|
||||
ELSE: // iteration == max_iterations AND tests still failing
|
||||
❌ FAILURE: Max iterations reached without passing tests
|
||||
|
||||
STEP 3.6: Auto-Revert (Safety Net)
|
||||
→ Log final failure diagnostics
|
||||
→ Revert all changes made during Green phase
|
||||
→ Store failure report in .process/green-phase-failure.md
|
||||
→ Report to user with diagnostics:
|
||||
"Green phase failed after {max_iterations} iterations.
|
||||
All changes reverted. See diagnostics in green-phase-failure.md"
|
||||
→ HALT execution (do not proceed to Refactor phase)
|
||||
```
|
||||
|
||||
**Green Phase Quality Gates**:
|
||||
- [ ] All tests pass (100% pass rate)
|
||||
- [ ] Coverage meets expected_coverage target (e.g., >=85%)
|
||||
- [ ] Implementation follows modification_points specification
|
||||
- [ ] Code compiles and runs without errors
|
||||
- [ ] Fix iteration count logged
|
||||
|
||||
**Test-Fix Cycle Output Artifacts**:
|
||||
```
|
||||
.workflow/active/{session-id}/.process/
|
||||
├── green-fix-iteration-1.md # First fix attempt
|
||||
├── green-fix-iteration-2.md # Second fix attempt
|
||||
├── green-fix-iteration-3.md # Final fix attempt
|
||||
└── green-phase-failure.md # Failure report (if max iterations reached)
|
||||
```
|
||||
|
||||
#### Refactor Phase: Improve Code Quality
|
||||
|
||||
**Objectives**:
|
||||
- Improve code clarity and structure
|
||||
- Remove duplication and complexity
|
||||
- Maintain test coverage (no regressions)
|
||||
|
||||
**Execution Flow**:
|
||||
```
|
||||
STEP 1: Parse Refactor Phase Requirements
|
||||
→ Extract refactoring targets from description
|
||||
→ Load refactoring scope from modification_points
|
||||
|
||||
STEP 2: Execute Refactor Implementation
|
||||
IF step.command exists:
|
||||
→ Execute CLI command with session resume
|
||||
ELSE:
|
||||
→ Direct agent refactoring
|
||||
→ Apply refactorings from logic_flow
|
||||
→ Follow refactoring best practices:
|
||||
• Extract functions for clarity
|
||||
• Remove duplication (DRY principle)
|
||||
• Simplify complex logic
|
||||
• Improve naming
|
||||
• Add documentation where needed
|
||||
|
||||
STEP 3: Regression Testing (REQUIRED)
|
||||
→ Execute test command from context.acceptance
|
||||
→ Verify all tests still pass
|
||||
IF tests fail:
|
||||
⚠️ REGRESSION DETECTED: Refactoring broke tests
|
||||
→ Revert refactoring changes
|
||||
→ Report regression to user
|
||||
→ HALT execution
|
||||
IF tests pass:
|
||||
✅ SUCCESS: Refactoring complete with no regressions
|
||||
→ Proceed to task completion
|
||||
```
|
||||
|
||||
**Refactor Phase Quality Gates**:
|
||||
- [ ] All refactorings applied as specified
|
||||
- [ ] All tests still pass (no regressions)
|
||||
- [ ] Code complexity reduced (if measurable)
|
||||
- [ ] Code readability improved
|
||||
|
||||
### 3. CLI Execution Integration
|
||||
|
||||
**CLI Session Resumption** (when step.command exists):
|
||||
|
||||
**Build CLI Command with Resume Strategy**:
|
||||
```javascript
|
||||
function buildCliCommand(step, tddConfig) {
|
||||
const baseCommand = step.command // From task JSON
|
||||
|
||||
// Parse cli_execution strategy
|
||||
switch (tddConfig.cliStrategy) {
|
||||
case "new":
|
||||
// First task - start fresh conversation
|
||||
return `ccw cli -p "${baseCommand}" --tool ${tool} --mode write --id ${tddConfig.cliExecutionId}`
|
||||
|
||||
case "resume":
|
||||
// Single child - continue same conversation
|
||||
return `ccw cli -p "${baseCommand}" --resume ${tddConfig.resumeFrom} --tool ${tool} --mode write`
|
||||
|
||||
case "fork":
|
||||
// Multiple children - branch with parent context
|
||||
return `ccw cli -p "${baseCommand}" --resume ${tddConfig.resumeFrom} --id ${tddConfig.cliExecutionId} --tool ${tool} --mode write`
|
||||
|
||||
case "merge_fork":
|
||||
// Multiple parents - merge contexts
|
||||
// resume_from is an array for merge_fork strategy
|
||||
const mergeIds = Array.isArray(tddConfig.resumeFrom)
|
||||
? tddConfig.resumeFrom.join(',')
|
||||
: tddConfig.resumeFrom
|
||||
return `ccw cli -p "${baseCommand}" --resume ${mergeIds} --id ${tddConfig.cliExecutionId} --tool ${tool} --mode write`
|
||||
|
||||
default:
|
||||
// Fallback - no resume
|
||||
return `ccw cli -p "${baseCommand}" --tool ${tool} --mode write`
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Execute CLI Command**:
|
||||
```javascript
|
||||
// TDD agent runs in foreground - can receive hook callbacks
|
||||
Bash(
|
||||
command=buildCliCommand(step, tddConfig),
|
||||
timeout=3600000, // 60 min for CLI execution
|
||||
run_in_background=false // Agent can receive task completion hooks
|
||||
)
|
||||
```
|
||||
|
||||
### 4. Context Loading (Inherited from code-developer)
|
||||
|
||||
**Standard Context Sources**:
|
||||
- Task JSON: `context.requirements`, `context.acceptance`, `context.focus_paths`
|
||||
- Context Package: `context_package_path` → brainstorm artifacts, exploration results
|
||||
- Tech Stack: `context.shared_context.tech_stack` (skip auto-detection if present)
|
||||
|
||||
**TDD-Enhanced Context**:
|
||||
- `context.tdd_cycles`: Test case enumeration and coverage targets
|
||||
- `meta.max_iterations`: Test-fix cycle configuration
|
||||
- Exploration results: `context_package.exploration_results` for critical_files and integration_points
|
||||
|
||||
### 5. Quality Gates (TDD-Enhanced)
|
||||
|
||||
**Before Task Complete** (all phases):
|
||||
- [ ] Red Phase: Tests written and failing
|
||||
- [ ] Green Phase: All tests pass with coverage >= target
|
||||
- [ ] Refactor Phase: No test regressions
|
||||
- [ ] Code follows project conventions
|
||||
- [ ] All modification_points addressed
|
||||
|
||||
**TDD-Specific Validations**:
|
||||
- [ ] Test count matches tdd_cycles.test_count
|
||||
- [ ] Coverage meets tdd_cycles.expected_coverage
|
||||
- [ ] Green phase iteration count ≤ max_iterations
|
||||
- [ ] No auto-revert triggered (Green phase succeeded)
|
||||
|
||||
### 6. Task Completion (TDD-Enhanced)
|
||||
|
||||
**Upon completing TDD task:**
|
||||
|
||||
1. **Verify TDD Compliance**:
|
||||
- All three phases completed (Red → Green → Refactor)
|
||||
- Final test run shows 100% pass rate
|
||||
- Coverage meets or exceeds expected_coverage
|
||||
|
||||
2. **Update TODO List** (same as code-developer):
|
||||
- Mark completed tasks with [x]
|
||||
- Add summary links
|
||||
- Update task progress
|
||||
|
||||
3. **Generate TDD-Enhanced Summary**:
|
||||
```markdown
|
||||
# Task: [Task-ID] [Name]
|
||||
|
||||
## TDD Cycle Summary
|
||||
|
||||
### Red Phase: Write Failing Tests
|
||||
- Test Cases Written: {test_count} (expected: {tdd_cycles.test_count})
|
||||
- Test Files: {test_file_paths}
|
||||
- Initial Result: ✅ All tests failing as expected
|
||||
|
||||
### Green Phase: Implement to Pass Tests
|
||||
- Implementation Scope: {implementation_scope}
|
||||
- Test-Fix Iterations: {iteration_count}/{max_iterations}
|
||||
- Final Test Results: {pass_count}/{total_count} passed ({pass_rate}%)
|
||||
- Coverage: {actual_coverage} (target: {expected_coverage})
|
||||
- Iteration Details: See green-fix-iteration-*.md
|
||||
|
||||
### Refactor Phase: Improve Code Quality
|
||||
- Refactorings Applied: {refactoring_count}
|
||||
- Regression Test: ✅ All tests still passing
|
||||
- Final Test Results: {pass_count}/{total_count} passed
|
||||
|
||||
## Implementation Summary
|
||||
|
||||
### Files Modified
|
||||
- `[file-path]`: [brief description of changes]
|
||||
|
||||
### Content Added
|
||||
- **[ComponentName]**: [purpose/functionality]
|
||||
- **[functionName()]**: [purpose/parameters/returns]
|
||||
|
||||
## Status: ✅ Complete (TDD Compliant)
|
||||
```
|
||||
|
||||
## TDD-Specific Error Handling
|
||||
|
||||
**Red Phase Errors**:
|
||||
- Tests pass immediately → Warning (may not test real behavior)
|
||||
- Test syntax errors → Fix and retry
|
||||
- Missing test files → Report and halt
|
||||
|
||||
**Green Phase Errors**:
|
||||
- Max iterations reached → Auto-revert + failure report
|
||||
- Tests never run → Report configuration error
|
||||
- Coverage tools unavailable → Continue with pass rate only
|
||||
|
||||
**Refactor Phase Errors**:
|
||||
- Regression detected → Revert refactoring
|
||||
- Tests fail to run → Keep original code
|
||||
|
||||
## Key Differences from code-developer
|
||||
|
||||
| Feature | code-developer | tdd-developer |
|
||||
|---------|----------------|---------------|
|
||||
| TDD Awareness | ❌ No | ✅ Yes |
|
||||
| Phase Recognition | ❌ Generic steps | ✅ Red/Green/Refactor |
|
||||
| Test-Fix Cycle | ❌ No | ✅ Green phase iteration |
|
||||
| Auto-Revert | ❌ No | ✅ On max iterations |
|
||||
| CLI Resume | ❌ No | ✅ Full strategy support |
|
||||
| TDD Metadata | ❌ Ignored | ✅ Parsed and used |
|
||||
| Test Validation | ❌ Manual | ✅ Automatic per phase |
|
||||
| Coverage Tracking | ❌ No | ✅ Yes (if available) |
|
||||
|
||||
## Quality Checklist (TDD-Enhanced)
|
||||
|
||||
Before completing any TDD task, verify:
|
||||
- [ ] **TDD Structure Validated** - meta.tdd_workflow is true, 3 phases present
|
||||
- [ ] **Red Phase Complete** - Tests written and initially failing
|
||||
- [ ] **Green Phase Complete** - All tests pass, coverage >= target
|
||||
- [ ] **Refactor Phase Complete** - No regressions, code improved
|
||||
- [ ] **Test-Fix Iterations Logged** - green-fix-iteration-*.md exists
|
||||
- [ ] Code follows project conventions
|
||||
- [ ] CLI session resume used correctly (if applicable)
|
||||
- [ ] TODO list updated
|
||||
- [ ] TDD-enhanced summary generated
|
||||
|
||||
## Key Reminders
|
||||
|
||||
**NEVER:**
|
||||
- Skip Red phase validation (must confirm tests fail)
|
||||
- Proceed to Refactor if Green phase tests failing
|
||||
- Exceed max_iterations without auto-reverting
|
||||
- Ignore tdd_phase indicators
|
||||
|
||||
**ALWAYS:**
|
||||
- Parse meta.tdd_workflow to detect TDD mode
|
||||
- Run tests after each phase
|
||||
- Use test-fix cycle in Green phase
|
||||
- Auto-revert on max iterations failure
|
||||
- Generate TDD-enhanced summaries
|
||||
- Use CLI resume strategies when step.command exists
|
||||
- Log all test-fix iterations to .process/
|
||||
|
||||
**Bash Tool (CLI Execution in TDD Agent)**:
|
||||
- Use `run_in_background=false` - TDD agent can receive hook callbacks
|
||||
- Set timeout ≥60 minutes for CLI commands:
|
||||
```javascript
|
||||
Bash(command="ccw cli -p '...' --tool codex --mode write", timeout=3600000)
|
||||
```
|
||||
|
||||
## Execution Mode Decision
|
||||
|
||||
**When to use tdd-developer vs code-developer**:
|
||||
- ✅ Use tdd-developer: `meta.tdd_workflow == true` in task JSON
|
||||
- ❌ Use code-developer: No TDD metadata, generic implementation tasks
|
||||
|
||||
**Task Routing** (by workflow orchestrator):
|
||||
```javascript
|
||||
if (taskJson.meta?.tdd_workflow) {
|
||||
agent = "tdd-developer" // Use TDD-aware agent
|
||||
} else {
|
||||
agent = "code-developer" // Use generic agent
|
||||
}
|
||||
```
|
||||
948
.claude/commands/ccw-coordinator.md
Normal file
948
.claude/commands/ccw-coordinator.md
Normal file
@@ -0,0 +1,948 @@
|
||||
---
|
||||
name: ccw-coordinator
|
||||
description: Command orchestration tool - analyze requirements, recommend chain, execute sequentially with state persistence
|
||||
argument-hint: "[task description]"
|
||||
allowed-tools: Task(*), AskUserQuestion(*), Read(*), Write(*), Bash(*), Glob(*), Grep(*)
|
||||
---
|
||||
|
||||
# CCW Coordinator Command
|
||||
|
||||
Interactive orchestration tool: analyze task → discover commands → recommend chain → execute sequentially → track state.
|
||||
|
||||
**Execution Model**: Pseudocode guidance. Claude intelligently executes each phase based on context.
|
||||
|
||||
## Core Concept: Minimum Execution Units (最小执行单元)
|
||||
|
||||
### What is a Minimum Execution Unit?
|
||||
|
||||
**Definition**: A set of commands that must execute together as an atomic group to achieve a meaningful workflow milestone. Splitting these commands breaks the logical flow and creates incomplete states.
|
||||
|
||||
**Why This Matters**:
|
||||
- **Prevents Incomplete States**: Avoid stopping after task generation without execution
|
||||
- **User Experience**: User gets complete results, not intermediate artifacts requiring manual follow-up
|
||||
- **Workflow Integrity**: Maintains logical coherence of multi-step operations
|
||||
|
||||
### Minimum Execution Units
|
||||
|
||||
**Planning + Execution Units** (规划+执行单元):
|
||||
|
||||
| Unit Name | Commands | Purpose | Output |
|
||||
|-----------|----------|---------|--------|
|
||||
| **Quick Implementation** | lite-plan → lite-execute | Lightweight plan and immediate execution | Working code |
|
||||
| **Multi-CLI Planning** | multi-cli-plan → lite-execute | Multi-perspective analysis and execution | Working code |
|
||||
| **Bug Fix** | lite-fix → lite-execute | Quick bug diagnosis and fix execution | Fixed code |
|
||||
| **Full Planning + Execution** | plan → execute | Detailed planning and execution | Working code |
|
||||
| **Verified Planning + Execution** | plan → plan-verify → execute | Planning with verification and execution | Working code |
|
||||
| **Replanning + Execution** | replan → execute | Update plan and execute changes | Working code |
|
||||
| **TDD Planning + Execution** | tdd-plan → execute | Test-driven development planning and execution | Working code |
|
||||
| **Test Generation + Execution** | test-gen → execute | Generate test suite and execute | Generated tests |
|
||||
|
||||
**Testing Units** (测试单元):
|
||||
|
||||
| Unit Name | Commands | Purpose | Output |
|
||||
|-----------|----------|---------|--------|
|
||||
| **Test Validation** | test-fix-gen → test-cycle-execute | Generate test tasks and execute test-fix cycle | Tests passed |
|
||||
|
||||
**Review Units** (审查单元):
|
||||
|
||||
| Unit Name | Commands | Purpose | Output |
|
||||
|-----------|----------|---------|--------|
|
||||
| **Code Review (Session)** | review-session-cycle → review-fix | Complete review cycle and apply fixes | Fixed code |
|
||||
| **Code Review (Module)** | review-module-cycle → review-fix | Module review cycle and apply fixes | Fixed code |
|
||||
|
||||
### Command-to-Unit Mapping (命令与最小单元的映射)
|
||||
|
||||
| Command | Can Precede | Atomic Units |
|
||||
|---------|-----------|--------------|
|
||||
| lite-plan | lite-execute | Quick Implementation |
|
||||
| multi-cli-plan | lite-execute | Multi-CLI Planning |
|
||||
| lite-fix | lite-execute | Bug Fix |
|
||||
| plan | plan-verify, execute | Full Planning + Execution, Verified Planning + Execution |
|
||||
| plan-verify | execute | Verified Planning + Execution |
|
||||
| replan | execute | Replanning + Execution |
|
||||
| test-gen | execute | Test Generation + Execution |
|
||||
| tdd-plan | execute | TDD Planning + Execution |
|
||||
| review-session-cycle | review-fix | Code Review (Session) |
|
||||
| review-module-cycle | review-fix | Code Review (Module) |
|
||||
| test-fix-gen | test-cycle-execute | Test Validation |
|
||||
|
||||
### Atomic Group Rules
|
||||
|
||||
1. **Never Split Units**: Coordinator must recommend complete units, not partial chains
|
||||
2. **Multi-Unit Participation**: Some commands can participate in multiple units (e.g., plan → execute or plan → plan-verify → execute)
|
||||
3. **User Override**: User can explicitly request partial execution (advanced mode)
|
||||
4. **Visualization**: Pipeline view shows unit boundaries with `【 】` markers
|
||||
5. **Validation**: Before execution, verify all unit commands are included
|
||||
|
||||
**Example Pipeline with Units**:
|
||||
```
|
||||
需求 → 【lite-plan → lite-execute】→ 代码 → 【test-fix-gen → test-cycle-execute】→ 测试通过
|
||||
└──── Quick Implementation ────┘ └────── Test Validation ──────┘
|
||||
```
|
||||
|
||||
## 3-Phase Workflow
|
||||
|
||||
### Phase 1: Analyze Requirements
|
||||
|
||||
Parse task to extract: goal, scope, constraints, complexity, and task type.
|
||||
|
||||
```javascript
|
||||
function analyzeRequirements(taskDescription) {
|
||||
return {
|
||||
goal: extractMainGoal(taskDescription), // e.g., "Implement user registration"
|
||||
scope: extractScope(taskDescription), // e.g., ["auth", "user_management"]
|
||||
constraints: extractConstraints(taskDescription), // e.g., ["no breaking changes"]
|
||||
complexity: determineComplexity(taskDescription), // 'simple' | 'medium' | 'complex'
|
||||
task_type: detectTaskType(taskDescription) // See task type patterns below
|
||||
};
|
||||
}
|
||||
|
||||
// Task Type Detection Patterns
|
||||
function detectTaskType(text) {
|
||||
// Priority order (first match wins)
|
||||
if (/fix|bug|error|crash|fail|debug|diagnose/.test(text)) return 'bugfix';
|
||||
if (/tdd|test-driven|先写测试|test first/.test(text)) return 'tdd';
|
||||
if (/测试失败|test fail|fix test|failing test/.test(text)) return 'test-fix';
|
||||
if (/generate test|写测试|add test|补充测试/.test(text)) return 'test-gen';
|
||||
if (/review|审查|code review/.test(text)) return 'review';
|
||||
if (/不确定|explore|研究|what if|brainstorm|权衡/.test(text)) return 'brainstorm';
|
||||
if (/多视角|比较方案|cross-verify|multi-cli/.test(text)) return 'multi-cli';
|
||||
return 'feature'; // Default
|
||||
}
|
||||
|
||||
// Complexity Assessment
|
||||
function determineComplexity(text) {
|
||||
let score = 0;
|
||||
if (/refactor|重构|migrate|迁移|architect|架构|system|系统/.test(text)) score += 2;
|
||||
if (/multiple|多个|across|跨|all|所有|entire|整个/.test(text)) score += 2;
|
||||
if (/integrate|集成|api|database|数据库/.test(text)) score += 1;
|
||||
if (/security|安全|performance|性能|scale|扩展/.test(text)) score += 1;
|
||||
return score >= 4 ? 'complex' : score >= 2 ? 'medium' : 'simple';
|
||||
}
|
||||
```
|
||||
|
||||
**Display to user**:
|
||||
```
|
||||
Analysis Complete:
|
||||
Goal: [extracted goal]
|
||||
Scope: [identified areas]
|
||||
Constraints: [identified constraints]
|
||||
Complexity: [level]
|
||||
Task Type: [detected type]
|
||||
```
|
||||
|
||||
### Phase 2: Discover Commands & Recommend Chain
|
||||
|
||||
Dynamic command chain assembly using port-based matching.
|
||||
|
||||
#### Command Port Definition
|
||||
|
||||
Each command has input/output ports (tags) for pipeline composition:
|
||||
|
||||
```javascript
|
||||
// Port labels represent data types flowing through the pipeline
|
||||
const commandPorts = {
|
||||
'lite-plan': {
|
||||
name: 'lite-plan',
|
||||
input: ['requirement'], // 输入端口:需求
|
||||
output: ['plan'], // 输出端口:计划
|
||||
tags: ['planning'],
|
||||
atomic_group: 'quick-implementation' // 最小单元:与 lite-execute 绑定
|
||||
},
|
||||
'lite-execute': {
|
||||
name: 'lite-execute',
|
||||
input: ['plan', 'multi-cli-plan', 'lite-fix'], // 输入端口:可接受多种规划输出
|
||||
output: ['code'], // 输出端口:代码
|
||||
tags: ['execution'],
|
||||
atomic_groups: [ // 可参与多个最小单元
|
||||
'quick-implementation', // lite-plan → lite-execute
|
||||
'multi-cli-planning', // multi-cli-plan → lite-execute
|
||||
'bug-fix' // lite-fix → lite-execute
|
||||
]
|
||||
},
|
||||
'plan': {
|
||||
name: 'plan',
|
||||
input: ['requirement'],
|
||||
output: ['detailed-plan'],
|
||||
tags: ['planning'],
|
||||
atomic_groups: [ // 可参与多个最小单元
|
||||
'full-planning-execution', // plan → execute
|
||||
'verified-planning-execution' // plan → plan-verify → execute
|
||||
]
|
||||
},
|
||||
'plan-verify': {
|
||||
name: 'plan-verify',
|
||||
input: ['detailed-plan'],
|
||||
output: ['verified-plan'],
|
||||
tags: ['planning'],
|
||||
atomic_group: 'verified-planning-execution' // 最小单元:plan → plan-verify → execute
|
||||
},
|
||||
'replan': {
|
||||
name: 'replan',
|
||||
input: ['session', 'feedback'], // 输入端口:会话或反馈
|
||||
output: ['replan'], // 输出端口:更新后的计划(供 execute 执行)
|
||||
tags: ['planning'],
|
||||
atomic_group: 'replanning-execution' // 最小单元:与 execute 绑定
|
||||
},
|
||||
'execute': {
|
||||
name: 'execute',
|
||||
input: ['detailed-plan', 'verified-plan', 'replan', 'test-tasks', 'tdd-tasks'], // 可接受多种规划输出
|
||||
output: ['code'],
|
||||
tags: ['execution'],
|
||||
atomic_groups: [ // 可参与多个最小单元
|
||||
'full-planning-execution', // plan → execute
|
||||
'verified-planning-execution', // plan → plan-verify → execute
|
||||
'replanning-execution', // replan → execute
|
||||
'test-generation-execution', // test-gen → execute
|
||||
'tdd-planning-execution' // tdd-plan → execute
|
||||
]
|
||||
},
|
||||
'test-cycle-execute': {
|
||||
name: 'test-cycle-execute',
|
||||
input: ['test-tasks'], // 输入端口:测试任务(需先test-fix-gen生成)
|
||||
output: ['test-passed'], // 输出端口:测试通过
|
||||
tags: ['testing'],
|
||||
atomic_group: 'test-validation', // 最小单元:与 test-fix-gen 绑定
|
||||
note: '需要先执行test-fix-gen生成测试任务,再由此命令执行测试周期'
|
||||
},
|
||||
'tdd-plan': {
|
||||
name: 'tdd-plan',
|
||||
input: ['requirement'],
|
||||
output: ['tdd-tasks'], // TDD 任务(供 execute 执行)
|
||||
tags: ['planning', 'tdd'],
|
||||
atomic_group: 'tdd-planning-execution' // 最小单元:与 execute 绑定
|
||||
},
|
||||
'tdd-verify': {
|
||||
name: 'tdd-verify',
|
||||
input: ['code'],
|
||||
output: ['tdd-verified'],
|
||||
tags: ['testing']
|
||||
},
|
||||
'lite-fix': {
|
||||
name: 'lite-fix',
|
||||
input: ['bug-report'], // 输入端口:bug 报告
|
||||
output: ['lite-fix'], // 输出端口:修复计划(供 lite-execute 执行)
|
||||
tags: ['bugfix'],
|
||||
atomic_group: 'bug-fix' // 最小单元:与 lite-execute 绑定
|
||||
},
|
||||
'debug': {
|
||||
name: 'debug',
|
||||
input: ['bug-report'],
|
||||
output: ['debug-log'],
|
||||
tags: ['bugfix']
|
||||
},
|
||||
'test-gen': {
|
||||
name: 'test-gen',
|
||||
input: ['code', 'session'], // 可接受代码或会话
|
||||
output: ['test-tasks'], // 输出测试任务(IMPL-001,IMPL-002),供 execute 执行
|
||||
tags: ['testing'],
|
||||
atomic_group: 'test-generation-execution' // 最小单元:与 execute 绑定
|
||||
},
|
||||
'test-fix-gen': {
|
||||
name: 'test-fix-gen',
|
||||
input: ['failing-tests', 'session'],
|
||||
output: ['test-tasks'], // 输出测试任务,针对特定问题生成测试并在测试中修正
|
||||
tags: ['testing'],
|
||||
atomic_group: 'test-validation', // 最小单元:与 test-cycle-execute 绑定
|
||||
note: '生成测试任务供test-cycle-execute执行'
|
||||
},
|
||||
'review': {
|
||||
name: 'review',
|
||||
input: ['code', 'session'],
|
||||
output: ['review-findings'],
|
||||
tags: ['review']
|
||||
},
|
||||
'review-fix': {
|
||||
name: 'review-fix',
|
||||
input: ['review-findings', 'review-verified'], // Accept output from review-session-cycle or review-module-cycle
|
||||
output: ['fixed-code'],
|
||||
tags: ['review'],
|
||||
atomic_group: 'code-review' // 最小单元:与 review-session-cycle/review-module-cycle 绑定
|
||||
},
|
||||
'brainstorm:auto-parallel': {
|
||||
name: 'brainstorm:auto-parallel',
|
||||
input: ['exploration-topic'], // 输入端口:探索主题
|
||||
output: ['brainstorm-analysis'],
|
||||
tags: ['brainstorm']
|
||||
},
|
||||
'multi-cli-plan': {
|
||||
name: 'multi-cli-plan',
|
||||
input: ['requirement'],
|
||||
output: ['multi-cli-plan'], // 对比分析计划(供 lite-execute 执行)
|
||||
tags: ['planning', 'multi-cli'],
|
||||
atomic_group: 'multi-cli-planning' // 最小单元:与 lite-execute 绑定
|
||||
},
|
||||
'review-session-cycle': {
|
||||
name: 'review-session-cycle',
|
||||
input: ['code', 'session'], // 可接受代码或会话
|
||||
output: ['review-verified'], // 输出端口:审查通过
|
||||
tags: ['review'],
|
||||
atomic_group: 'code-review' // 最小单元:与 review-fix 绑定
|
||||
},
|
||||
'review-module-cycle': {
|
||||
name: 'review-module-cycle',
|
||||
input: ['module-pattern'], // 输入端口:模块模式
|
||||
output: ['review-verified'], // 输出端口:审查通过
|
||||
tags: ['review'],
|
||||
atomic_group: 'code-review' // 最小单元:与 review-fix 绑定
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
#### Recommendation Algorithm
|
||||
|
||||
```javascript
|
||||
async function recommendCommandChain(analysis) {
|
||||
// Step 1: 根据任务类型确定起始端口和目标端口
|
||||
const { inputPort, outputPort } = determinePortFlow(analysis.task_type, analysis.constraints);
|
||||
|
||||
// Step 2: Claude 根据命令端口定义和任务特征,智能选择命令序列
|
||||
// 优先级:简单任务 → lite-* 命令,复杂任务 → 完整命令,特殊约束 → 调整流程
|
||||
const chain = selectChainByPorts(inputPort, outputPort, analysis);
|
||||
|
||||
return chain;
|
||||
}
|
||||
|
||||
// 任务类型对应的端口流
|
||||
function determinePortFlow(taskType, constraints) {
|
||||
const flows = {
|
||||
'bugfix': { inputPort: 'bug-report', outputPort: constraints?.includes('skip-tests') ? 'fixed-code' : 'test-passed' },
|
||||
'tdd': { inputPort: 'requirement', outputPort: 'tdd-verified' },
|
||||
'test-fix': { inputPort: 'failing-tests', outputPort: 'test-passed' },
|
||||
'test-gen': { inputPort: 'code', outputPort: 'test-passed' },
|
||||
'review': { inputPort: 'code', outputPort: 'review-verified' },
|
||||
'brainstorm': { inputPort: 'exploration-topic', outputPort: 'test-passed' },
|
||||
'multi-cli': { inputPort: 'requirement', outputPort: 'test-passed' },
|
||||
'feature': { inputPort: 'requirement', outputPort: constraints?.includes('skip-tests') ? 'code' : 'test-passed' }
|
||||
};
|
||||
return flows[taskType] || flows['feature'];
|
||||
}
|
||||
|
||||
// Claude 根据端口流选择命令链
|
||||
function selectChainByPorts(inputPort, outputPort, analysis) {
|
||||
// 参考下面的命令端口定义表和执行示例,Claude 智能选择合适的命令序列
|
||||
// 返回值示例: [lite-plan, lite-execute, test-cycle-execute]
|
||||
}
|
||||
```
|
||||
|
||||
#### Display to User
|
||||
|
||||
```
|
||||
Recommended Command Chain:
|
||||
|
||||
Pipeline (管道视图):
|
||||
需求 → lite-plan → 计划 → lite-execute → 代码 → test-cycle-execute → 测试通过
|
||||
|
||||
Commands (命令列表):
|
||||
1. /workflow:lite-plan
|
||||
2. /workflow:lite-execute
|
||||
3. /workflow:test-cycle-execute
|
||||
|
||||
Proceed? [Confirm / Show Details / Adjust / Cancel]
|
||||
```
|
||||
|
||||
### Phase 2b: Get User Confirmation
|
||||
|
||||
```javascript
|
||||
async function getUserConfirmation(chain) {
|
||||
const response = await AskUserQuestion({
|
||||
questions: [{
|
||||
question: 'Proceed with this command chain?',
|
||||
header: 'Confirm',
|
||||
options: [
|
||||
{ label: 'Confirm and execute', description: 'Proceed with commands' },
|
||||
{ label: 'Show details', description: 'View each command' },
|
||||
{ label: 'Adjust chain', description: 'Remove or reorder' },
|
||||
{ label: 'Cancel', description: 'Abort' }
|
||||
]
|
||||
}]
|
||||
});
|
||||
|
||||
if (response.confirm === 'Cancel') throw new Error('Cancelled');
|
||||
if (response.confirm === 'Show details') {
|
||||
displayCommandDetails(chain);
|
||||
return getUserConfirmation(chain);
|
||||
}
|
||||
if (response.confirm === 'Adjust chain') {
|
||||
return await adjustChain(chain);
|
||||
}
|
||||
return chain;
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 3: Execute Sequential Command Chain
|
||||
|
||||
```javascript
|
||||
async function executeCommandChain(chain, analysis) {
|
||||
const sessionId = `ccw-coord-${Date.now()}`;
|
||||
const stateDir = `.workflow/.ccw-coordinator/${sessionId}`;
|
||||
Bash(`mkdir -p "${stateDir}"`);
|
||||
|
||||
const state = {
|
||||
session_id: sessionId,
|
||||
status: 'running',
|
||||
created_at: new Date().toISOString(),
|
||||
updated_at: new Date().toISOString(),
|
||||
analysis: analysis,
|
||||
command_chain: chain.map((cmd, idx) => ({ ...cmd, index: idx, status: 'pending' })),
|
||||
execution_results: [],
|
||||
prompts_used: []
|
||||
};
|
||||
|
||||
// Save initial state immediately after confirmation
|
||||
Write(`${stateDir}/state.json`, JSON.stringify(state, null, 2));
|
||||
|
||||
for (let i = 0; i < chain.length; i++) {
|
||||
const cmd = chain[i];
|
||||
console.log(`[${i+1}/${chain.length}] ${cmd.command}`);
|
||||
|
||||
// Update command_chain status to running
|
||||
state.command_chain[i].status = 'running';
|
||||
state.updated_at = new Date().toISOString();
|
||||
Write(`${stateDir}/state.json`, JSON.stringify(state, null, 2));
|
||||
|
||||
// Assemble prompt: Command first, then context
|
||||
let promptContent = formatCommand(cmd, state.execution_results, analysis);
|
||||
|
||||
// Build full prompt: Command → Task → Previous Results
|
||||
let prompt = `${promptContent}\n\nTask: ${analysis.goal}`;
|
||||
if (state.execution_results.length > 0) {
|
||||
prompt += '\n\nPrevious results:\n';
|
||||
state.execution_results.forEach(r => {
|
||||
if (r.session_id) {
|
||||
prompt += `- ${r.command}: ${r.session_id} (${r.artifacts?.join(', ') || 'completed'})\n`;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Record prompt used
|
||||
state.prompts_used.push({
|
||||
index: i,
|
||||
command: cmd.command,
|
||||
prompt: prompt
|
||||
});
|
||||
|
||||
// Execute CLI command in background and stop
|
||||
// Format: ccw cli -p "PROMPT" --tool <tool> --mode <mode>
|
||||
// Note: -y is a command parameter INSIDE the prompt, not a ccw cli parameter
|
||||
// Example prompt: "/workflow:plan -y \"task description here\""
|
||||
try {
|
||||
const taskId = Bash(
|
||||
`ccw cli -p "${escapePrompt(prompt)}" --tool claude --mode write`,
|
||||
{ run_in_background: true }
|
||||
).task_id;
|
||||
|
||||
// Save checkpoint
|
||||
state.execution_results.push({
|
||||
index: i,
|
||||
command: cmd.command,
|
||||
status: 'in-progress',
|
||||
task_id: taskId,
|
||||
session_id: null,
|
||||
artifacts: [],
|
||||
timestamp: new Date().toISOString()
|
||||
});
|
||||
state.command_chain[i].status = 'running';
|
||||
state.updated_at = new Date().toISOString();
|
||||
Write(`${stateDir}/state.json`, JSON.stringify(state, null, 2));
|
||||
|
||||
console.log(`[${i+1}/${chain.length}] ${cmd.command}\n`);
|
||||
break; // Stop, wait for hook callback
|
||||
|
||||
} catch (error) {
|
||||
state.command_chain[i].status = 'failed';
|
||||
state.updated_at = new Date().toISOString();
|
||||
Write(`${stateDir}/state.json`, JSON.stringify(state, null, 2));
|
||||
|
||||
const action = await AskUserQuestion({
|
||||
questions: [{
|
||||
question: `${cmd.command} failed to start: ${error.message}. What to do?`,
|
||||
header: 'Error',
|
||||
options: [
|
||||
{ label: 'Retry', description: 'Try again' },
|
||||
{ label: 'Skip', description: 'Continue next command' },
|
||||
{ label: 'Abort', description: 'Stop execution' }
|
||||
]
|
||||
}]
|
||||
});
|
||||
|
||||
if (action.error === 'Retry') {
|
||||
state.command_chain[i].status = 'pending';
|
||||
state.execution_results.pop();
|
||||
i--;
|
||||
} else if (action.error === 'Skip') {
|
||||
state.execution_results[state.execution_results.length - 1].status = 'skipped';
|
||||
} else if (action.error === 'Abort') {
|
||||
state.status = 'failed';
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Write(`${stateDir}/state.json`, JSON.stringify(state, null, 2));
|
||||
}
|
||||
|
||||
// Hook callbacks handle completion
|
||||
if (state.status !== 'failed') state.status = 'waiting';
|
||||
state.updated_at = new Date().toISOString();
|
||||
Write(`${stateDir}/state.json`, JSON.stringify(state, null, 2));
|
||||
|
||||
console.log(`\n📋 Orchestrator paused: ${state.session_id}\n`);
|
||||
return state;
|
||||
}
|
||||
|
||||
// Smart parameter assembly
|
||||
// Returns prompt content to be used with: ccw cli -p "RETURNED_VALUE" --tool claude --mode write
|
||||
function formatCommand(cmd, previousResults, analysis) {
|
||||
// Format: /workflow:<command> -y <parameters>
|
||||
let prompt = `/workflow:${cmd.name} -y`;
|
||||
const name = cmd.name;
|
||||
|
||||
// Planning commands - take task description
|
||||
if (['lite-plan', 'plan', 'tdd-plan', 'multi-cli-plan'].includes(name)) {
|
||||
prompt += ` "${analysis.goal}"`;
|
||||
|
||||
// Lite execution - use --in-memory if plan exists
|
||||
} else if (name === 'lite-execute') {
|
||||
const hasPlan = previousResults.some(r => r.command.includes('plan'));
|
||||
prompt += hasPlan ? ' --in-memory' : ` "${analysis.goal}"`;
|
||||
|
||||
// Standard execution - resume from planning session
|
||||
} else if (name === 'execute') {
|
||||
const plan = previousResults.find(r => r.command.includes('plan'));
|
||||
if (plan?.session_id) prompt += ` --resume-session="${plan.session_id}"`;
|
||||
|
||||
// Bug fix commands - take bug description
|
||||
} else if (['lite-fix', 'debug'].includes(name)) {
|
||||
prompt += ` "${analysis.goal}"`;
|
||||
|
||||
// Brainstorm - take topic description
|
||||
} else if (name === 'brainstorm:auto-parallel' || name === 'auto-parallel') {
|
||||
prompt += ` "${analysis.goal}"`;
|
||||
|
||||
// Test generation from session - needs source session
|
||||
} else if (name === 'test-gen') {
|
||||
const impl = previousResults.find(r =>
|
||||
r.command.includes('execute') || r.command.includes('lite-execute')
|
||||
);
|
||||
if (impl?.session_id) prompt += ` "${impl.session_id}"`;
|
||||
else prompt += ` "${analysis.goal}"`;
|
||||
|
||||
// Test fix generation - session or description
|
||||
} else if (name === 'test-fix-gen') {
|
||||
const latest = previousResults.filter(r => r.session_id).pop();
|
||||
if (latest?.session_id) prompt += ` "${latest.session_id}"`;
|
||||
else prompt += ` "${analysis.goal}"`;
|
||||
|
||||
// Review commands - take session or use latest
|
||||
} else if (name === 'review') {
|
||||
const latest = previousResults.filter(r => r.session_id).pop();
|
||||
if (latest?.session_id) prompt += ` --session="${latest.session_id}"`;
|
||||
|
||||
// Review fix - takes session from review
|
||||
} else if (name === 'review-fix') {
|
||||
const review = previousResults.find(r => r.command.includes('review'));
|
||||
const latest = review || previousResults.filter(r => r.session_id).pop();
|
||||
if (latest?.session_id) prompt += ` --session="${latest.session_id}"`;
|
||||
|
||||
// TDD verify - takes execution session
|
||||
} else if (name === 'tdd-verify') {
|
||||
const exec = previousResults.find(r => r.command.includes('execute'));
|
||||
if (exec?.session_id) prompt += ` --session="${exec.session_id}"`;
|
||||
|
||||
// Session-based commands (test-cycle, review-session, plan-verify)
|
||||
} else if (name.includes('test') || name.includes('review') || name.includes('verify')) {
|
||||
const latest = previousResults.filter(r => r.session_id).pop();
|
||||
if (latest?.session_id) prompt += ` --session="${latest.session_id}"`;
|
||||
}
|
||||
|
||||
return prompt;
|
||||
}
|
||||
|
||||
// Hook callback: Called when background CLI completes
|
||||
async function handleCliCompletion(sessionId, taskId, output) {
|
||||
const stateDir = `.workflow/.ccw-coordinator/${sessionId}`;
|
||||
const state = JSON.parse(Read(`${stateDir}/state.json`));
|
||||
|
||||
const pendingIdx = state.execution_results.findIndex(r => r.task_id === taskId);
|
||||
if (pendingIdx === -1) {
|
||||
console.error(`Unknown task_id: ${taskId}`);
|
||||
return;
|
||||
}
|
||||
|
||||
const parsed = parseOutput(output);
|
||||
const cmdIdx = state.execution_results[pendingIdx].index;
|
||||
|
||||
// Update result
|
||||
state.execution_results[pendingIdx] = {
|
||||
...state.execution_results[pendingIdx],
|
||||
status: parsed.sessionId ? 'completed' : 'failed',
|
||||
session_id: parsed.sessionId,
|
||||
artifacts: parsed.artifacts,
|
||||
completed_at: new Date().toISOString()
|
||||
};
|
||||
state.command_chain[cmdIdx].status = parsed.sessionId ? 'completed' : 'failed';
|
||||
state.updated_at = new Date().toISOString();
|
||||
Write(`${stateDir}/state.json`, JSON.stringify(state, null, 2));
|
||||
|
||||
// Trigger next command or complete
|
||||
const nextIdx = cmdIdx + 1;
|
||||
if (nextIdx < state.command_chain.length) {
|
||||
await resumeChainExecution(sessionId, nextIdx);
|
||||
} else {
|
||||
state.status = 'completed';
|
||||
Write(`${stateDir}/state.json`, JSON.stringify(state, null, 2));
|
||||
console.log(`✅ Completed: ${sessionId}\n`);
|
||||
}
|
||||
}
|
||||
|
||||
// Parse command output
|
||||
function parseOutput(output) {
|
||||
const sessionMatch = output.match(/WFS-[\w-]+/);
|
||||
const artifacts = [];
|
||||
output.matchAll(/\.workflow\/[^\s]+/g).forEach(m => artifacts.push(m[0]));
|
||||
return { sessionId: sessionMatch?.[0] || null, artifacts };
|
||||
}
|
||||
```
|
||||
|
||||
## State File Structure
|
||||
|
||||
**Location**: `.workflow/.ccw-coordinator/{session_id}/state.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"session_id": "ccw-coord-20250124-143025",
|
||||
"status": "running|waiting|completed|failed",
|
||||
"created_at": "2025-01-24T14:30:25Z",
|
||||
"updated_at": "2025-01-24T14:35:45Z",
|
||||
"analysis": {
|
||||
"goal": "Implement user registration",
|
||||
"scope": ["authentication", "user_management"],
|
||||
"constraints": ["no breaking changes"],
|
||||
"complexity": "medium"
|
||||
},
|
||||
"command_chain": [
|
||||
{
|
||||
"index": 0,
|
||||
"command": "/workflow:plan",
|
||||
"name": "plan",
|
||||
"description": "Detailed planning",
|
||||
"argumentHint": "[--explore] \"task\"",
|
||||
"status": "completed"
|
||||
},
|
||||
{
|
||||
"index": 1,
|
||||
"command": "/workflow:execute",
|
||||
"name": "execute",
|
||||
"description": "Execute with state resume",
|
||||
"argumentHint": "[--resume-session=\"WFS-xxx\"]",
|
||||
"status": "completed"
|
||||
},
|
||||
{
|
||||
"index": 2,
|
||||
"command": "/workflow:test-cycle-execute",
|
||||
"name": "test-cycle-execute",
|
||||
"status": "pending"
|
||||
}
|
||||
],
|
||||
"execution_results": [
|
||||
{
|
||||
"index": 0,
|
||||
"command": "/workflow:plan",
|
||||
"status": "completed",
|
||||
"task_id": "task-001",
|
||||
"session_id": "WFS-plan-20250124",
|
||||
"artifacts": ["IMPL_PLAN.md", "exploration-architecture.json"],
|
||||
"timestamp": "2025-01-24T14:30:25Z",
|
||||
"completed_at": "2025-01-24T14:30:45Z"
|
||||
},
|
||||
{
|
||||
"index": 1,
|
||||
"command": "/workflow:execute",
|
||||
"status": "in-progress",
|
||||
"task_id": "task-002",
|
||||
"session_id": null,
|
||||
"artifacts": [],
|
||||
"timestamp": "2025-01-24T14:32:00Z",
|
||||
"completed_at": null
|
||||
}
|
||||
],
|
||||
"prompts_used": [
|
||||
{
|
||||
"index": 0,
|
||||
"command": "/workflow:plan",
|
||||
"prompt": "/workflow:plan -y \"Implement user registration...\"\n\nTask: Implement user registration..."
|
||||
},
|
||||
{
|
||||
"index": 1,
|
||||
"command": "/workflow:execute",
|
||||
"prompt": "/workflow:execute -y --resume-session=\"WFS-plan-20250124\"\n\nTask: Implement user registration\n\nPrevious results:\n- /workflow:plan: WFS-plan-20250124 (IMPL_PLAN.md)"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Status Flow
|
||||
|
||||
```
|
||||
running → waiting → [hook callback] → waiting → [hook callback] → completed
|
||||
↓ ↑
|
||||
failed ←────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Status Values**:
|
||||
- `running`: Orchestrator actively executing (launching CLI commands)
|
||||
- `waiting`: Paused, waiting for hook callbacks to trigger continuation
|
||||
- `completed`: All commands finished successfully
|
||||
- `failed`: User aborted or unrecoverable error
|
||||
|
||||
### Field Descriptions
|
||||
|
||||
**execution_results[] fields**:
|
||||
- `index`: Command position in chain (0-indexed)
|
||||
- `command`: Full command string (e.g., `/workflow:plan`)
|
||||
- `status`: `in-progress` | `completed` | `skipped` | `failed`
|
||||
- `task_id`: Background task identifier (from Bash tool)
|
||||
- `session_id`: Workflow session ID (e.g., `WFS-*`) or null if failed
|
||||
- `artifacts`: Generated files/directories
|
||||
- `timestamp`: Command start time (ISO 8601)
|
||||
- `completed_at`: Command completion time or null if pending
|
||||
|
||||
**command_chain[] status values**:
|
||||
- `pending`: Not started yet
|
||||
- `running`: Currently executing
|
||||
- `completed`: Successfully finished
|
||||
- `failed`: Failed to execute
|
||||
|
||||
## CommandRegistry Integration
|
||||
|
||||
Sole CCW tool for command discovery:
|
||||
|
||||
```javascript
|
||||
import { CommandRegistry } from 'ccw/tools/command-registry';
|
||||
|
||||
const registry = new CommandRegistry();
|
||||
|
||||
// Get all commands
|
||||
const allCommands = registry.getAllCommandsSummary();
|
||||
// Map<"/workflow:lite-plan" => {name, description}>
|
||||
|
||||
// Get categorized
|
||||
const byCategory = registry.getAllCommandsByCategory();
|
||||
// {planning, execution, testing, review, other}
|
||||
|
||||
// Get single command metadata
|
||||
const cmd = registry.getCommand('lite-plan');
|
||||
// {name, command, description, argumentHint, allowedTools, filePath}
|
||||
```
|
||||
|
||||
## Universal Prompt Template
|
||||
|
||||
### Standard Format
|
||||
|
||||
```bash
|
||||
ccw cli -p "PROMPT_CONTENT" --tool <tool> --mode <mode>
|
||||
```
|
||||
|
||||
### Prompt Content Template
|
||||
|
||||
```
|
||||
/workflow:<command> -y <command_parameters>
|
||||
|
||||
Task: <task_description>
|
||||
|
||||
<optional_previous_results>
|
||||
```
|
||||
|
||||
### Template Variables
|
||||
|
||||
| Variable | Description | Examples |
|
||||
|----------|-------------|----------|
|
||||
| `<command>` | Workflow command name | `plan`, `lite-execute`, `test-cycle-execute` |
|
||||
| `-y` | Auto-confirm flag (inside prompt) | Always include for automation |
|
||||
| `<command_parameters>` | Command-specific parameters | Task description, session ID, flags |
|
||||
| `<task_description>` | Brief task description | "Implement user authentication", "Fix memory leak" |
|
||||
| `<optional_previous_results>` | Context from previous commands | "Previous results:\n- /workflow:plan: WFS-xxx" |
|
||||
|
||||
### Command Parameter Patterns
|
||||
|
||||
| Command Type | Parameter Pattern | Example |
|
||||
|--------------|------------------|---------|
|
||||
| **Planning** | `"task description"` | `/workflow:plan -y "Implement OAuth2"` |
|
||||
| **Execution (with plan)** | `--resume-session="WFS-xxx"` | `/workflow:execute -y --resume-session="WFS-plan-001"` |
|
||||
| **Execution (standalone)** | `--in-memory` or `"task"` | `/workflow:lite-execute -y --in-memory` |
|
||||
| **Session-based** | `--session="WFS-xxx"` | `/workflow:test-fix-gen -y --session="WFS-impl-001"` |
|
||||
| **Fix/Debug** | `"problem description"` | `/workflow:lite-fix -y "Fix timeout bug"` |
|
||||
|
||||
### Complete Examples
|
||||
|
||||
**Planning Command**:
|
||||
```bash
|
||||
ccw cli -p '/workflow:plan -y "Implement user registration with email validation"
|
||||
|
||||
Task: Implement user registration' --tool claude --mode write
|
||||
```
|
||||
|
||||
**Execution with Context**:
|
||||
```bash
|
||||
ccw cli -p '/workflow:execute -y --resume-session="WFS-plan-20250124"
|
||||
|
||||
Task: Implement user registration
|
||||
|
||||
Previous results:
|
||||
- /workflow:plan: WFS-plan-20250124 (IMPL_PLAN.md)' --tool claude --mode write
|
||||
```
|
||||
|
||||
**Standalone Lite Execution**:
|
||||
```bash
|
||||
ccw cli -p '/workflow:lite-fix -y "Fix login timeout in auth module"
|
||||
|
||||
Task: Fix login timeout' --tool claude --mode write
|
||||
```
|
||||
|
||||
## Execution Flow
|
||||
|
||||
```javascript
|
||||
// Main entry point
|
||||
async function ccwCoordinator(taskDescription) {
|
||||
// Phase 1
|
||||
const analysis = await analyzeRequirements(taskDescription);
|
||||
|
||||
// Phase 2
|
||||
const chain = await recommendCommandChain(analysis);
|
||||
const confirmedChain = await getUserConfirmation(chain);
|
||||
|
||||
// Phase 3
|
||||
const state = await executeCommandChain(confirmedChain, analysis);
|
||||
|
||||
console.log(`✅ Complete! Session: ${state.session_id}`);
|
||||
console.log(`State: .workflow/.ccw-coordinator/${state.session_id}/state.json`);
|
||||
}
|
||||
```
|
||||
|
||||
## Key Design Principles
|
||||
|
||||
1. **No Fixed Logic** - Claude intelligently decides based on analysis
|
||||
2. **Dynamic Discovery** - CommandRegistry retrieves available commands
|
||||
3. **Smart Parameters** - Command args assembled based on previous results
|
||||
4. **Full State Tracking** - All execution recorded to state.json
|
||||
5. **User Control** - Confirmation + error handling with user choice
|
||||
6. **Context Passing** - Each prompt includes previous results
|
||||
7. **Resumable** - Can load state.json to continue
|
||||
8. **Serial Blocking** - Commands execute one-by-one with hook-based continuation
|
||||
|
||||
## CLI Execution Model
|
||||
|
||||
### CLI Invocation Format
|
||||
|
||||
**IMPORTANT**: The `ccw cli` command executes prompts through external tools. The format is:
|
||||
|
||||
```bash
|
||||
ccw cli -p "PROMPT_CONTENT" --tool <tool> --mode <mode>
|
||||
```
|
||||
|
||||
**Parameters**:
|
||||
- `-p "PROMPT_CONTENT"`: The prompt content to execute (required)
|
||||
- `--tool <tool>`: CLI tool to use (e.g., `claude`, `gemini`, `qwen`)
|
||||
- `--mode <mode>`: Execution mode (`analysis` or `write`)
|
||||
|
||||
**Note**: `-y` is a **command parameter inside the prompt**, NOT a `ccw cli` parameter.
|
||||
|
||||
### Prompt Assembly
|
||||
|
||||
The prompt content MUST start with the workflow command, followed by task context:
|
||||
|
||||
```
|
||||
/workflow:<command> -y <parameters>
|
||||
|
||||
Task: <description>
|
||||
|
||||
<optional_context>
|
||||
```
|
||||
|
||||
**Examples**:
|
||||
```bash
|
||||
# Planning command
|
||||
ccw cli -p '/workflow:plan -y "Implement user registration feature"
|
||||
|
||||
Task: Implement user registration' --tool claude --mode write
|
||||
|
||||
# Execution command (with session reference)
|
||||
ccw cli -p '/workflow:execute -y --resume-session="WFS-plan-20250124"
|
||||
|
||||
Task: Implement user registration
|
||||
|
||||
Previous results:
|
||||
- /workflow:plan: WFS-plan-20250124' --tool claude --mode write
|
||||
|
||||
# Lite execution (in-memory from previous plan)
|
||||
ccw cli -p '/workflow:lite-execute -y --in-memory
|
||||
|
||||
Task: Implement user registration' --tool claude --mode write
|
||||
```
|
||||
|
||||
### Serial Blocking
|
||||
|
||||
**CRITICAL**: Commands execute one-by-one. After launching CLI in background:
|
||||
1. Orchestrator stops immediately (`break`)
|
||||
2. Wait for hook callback - **DO NOT use TaskOutput polling**
|
||||
3. Hook callback triggers next command
|
||||
|
||||
**Prompt Structure**: Command must be first in prompt content
|
||||
|
||||
```javascript
|
||||
// Example: Execute command and stop
|
||||
const prompt = '/workflow:plan -y "Implement user authentication"\n\nTask: Implement user auth system';
|
||||
const taskId = Bash(`ccw cli -p "${prompt}" --tool claude --mode write`, { run_in_background: true }).task_id;
|
||||
state.execution_results.push({ status: 'in-progress', task_id: taskId, ... });
|
||||
Write(`${stateDir}/state.json`, JSON.stringify(state, null, 2));
|
||||
break; // ⚠️ STOP HERE - DO NOT use TaskOutput polling
|
||||
|
||||
// Hook callback will call handleCliCompletion(sessionId, taskId, output) when done
|
||||
// → Updates state → Triggers next command via resumeChainExecution()
|
||||
```
|
||||
|
||||
|
||||
## Available Commands
|
||||
|
||||
All from `~/.claude/commands/workflow/`:
|
||||
|
||||
**Planning**: lite-plan, plan, multi-cli-plan, plan-verify, tdd-plan
|
||||
**Execution**: lite-execute, execute, develop-with-file
|
||||
**Testing**: test-cycle-execute, test-gen, test-fix-gen, tdd-verify
|
||||
**Review**: review, review-session-cycle, review-module-cycle, review-fix
|
||||
**Bug Fixes**: lite-fix, debug, debug-with-file
|
||||
**Brainstorming**: brainstorm:auto-parallel, brainstorm:artifacts, brainstorm:synthesis
|
||||
**Design**: ui-design:*, animation-extract, layout-extract, style-extract, codify-style
|
||||
**Session Management**: session:start, session:resume, session:complete, session:solidify, session:list
|
||||
**Tools**: context-gather, test-context-gather, task-generate, conflict-resolution, action-plan-verify
|
||||
**Utility**: clean, init, replan
|
||||
|
||||
### Testing Commands Distinction
|
||||
|
||||
| Command | Purpose | Output | Follow-up |
|
||||
|---------|---------|--------|-----------|
|
||||
| **test-gen** | 广泛测试示例生成并进行测试 | test-tasks (IMPL-001, IMPL-002) | `/workflow:execute` |
|
||||
| **test-fix-gen** | 针对特定问题生成测试并在测试中修正 | test-tasks | `/workflow:test-cycle-execute` |
|
||||
| **test-cycle-execute** | 执行测试周期(迭代测试和修复) | test-passed | N/A (终点) |
|
||||
|
||||
**流程说明**:
|
||||
- **test-gen → execute**: 生成全面的测试套件,execute 执行生成和测试
|
||||
- **test-fix-gen → test-cycle-execute**: 针对特定问题生成修复任务,test-cycle-execute 迭代测试和修复直到通过
|
||||
|
||||
### Task Type Routing (Pipeline Summary)
|
||||
|
||||
**Note**: `【 】` marks Minimum Execution Units (最小执行单元) - these commands must execute together.
|
||||
|
||||
| Task Type | Pipeline | Minimum Units |
|
||||
|-----------|----------|---|
|
||||
| **feature** (simple) | 需求 →【lite-plan → lite-execute】→ 代码 →【test-fix-gen → test-cycle-execute】→ 测试通过 | Quick Implementation + Test Validation |
|
||||
| **feature** (complex) | 需求 →【plan → plan-verify】→ validate → execute → 代码 → review → fix | Full Planning + Code Review + Testing |
|
||||
| **bugfix** | Bug报告 → lite-fix → 修复代码 →【test-fix-gen → test-cycle-execute】→ 测试通过 | Bug Fix + Test Validation |
|
||||
| **tdd** | 需求 → tdd-plan → TDD任务 → execute → 代码 → tdd-verify | TDD Planning + Execution |
|
||||
| **test-fix** | 失败测试 →【test-fix-gen → test-cycle-execute】→ 测试通过 | Test Validation |
|
||||
| **test-gen** | 代码/会话 →【test-gen → execute】→ 测试通过 | Test Generation + Execution |
|
||||
| **review** | 代码 →【review-* → review-fix】→ 修复代码 →【test-fix-gen → test-cycle-execute】→ 测试通过 | Code Review + Testing |
|
||||
| **brainstorm** | 探索主题 → brainstorm → 分析 →【plan → plan-verify】→ execute → test | Exploration + Planning + Execution |
|
||||
| **multi-cli** | 需求 → multi-cli-plan → 对比分析 → lite-execute → test | Multi-Perspective + Testing |
|
||||
|
||||
Use `CommandRegistry.getAllCommandsSummary()` to discover all commands dynamically.
|
||||
486
.claude/commands/ccw.md
Normal file
486
.claude/commands/ccw.md
Normal file
@@ -0,0 +1,486 @@
|
||||
---
|
||||
name: ccw
|
||||
description: Main workflow orchestrator - analyze intent, select workflow, execute command chain in main process
|
||||
argument-hint: "\"task description\""
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), AskUserQuestion(*), Read(*), Grep(*), Glob(*)
|
||||
---
|
||||
|
||||
# CCW Command - Main Workflow Orchestrator
|
||||
|
||||
Main process orchestrator: intent analysis → workflow selection → command chain execution.
|
||||
|
||||
## Core Concept: Minimum Execution Units (最小执行单元)
|
||||
|
||||
**Definition**: A set of commands that must execute together as an atomic group to achieve a meaningful workflow milestone.
|
||||
|
||||
**Why This Matters**:
|
||||
- **Prevents Incomplete States**: Avoid stopping after task generation without execution
|
||||
- **User Experience**: User gets complete results, not intermediate artifacts requiring manual follow-up
|
||||
- **Workflow Integrity**: Maintains logical coherence of multi-step operations
|
||||
|
||||
**Key Units in CCW**:
|
||||
|
||||
| Unit Type | Pattern | Example |
|
||||
|-----------|---------|---------|
|
||||
| **Planning + Execution** | plan-cmd → execute-cmd | lite-plan → lite-execute |
|
||||
| **Testing** | test-gen-cmd → test-exec-cmd | test-fix-gen → test-cycle-execute |
|
||||
| **Review** | review-cmd → fix-cmd | review-session-cycle → review-fix |
|
||||
|
||||
**Atomic Rules**:
|
||||
1. CCW automatically groups commands into minimum units - never splits them
|
||||
2. Pipeline visualization shows units with `【 】` markers
|
||||
3. Error handling preserves unit boundaries (retry/skip affects whole unit)
|
||||
|
||||
## Execution Model
|
||||
|
||||
**Synchronous (Main Process)**: Commands execute via SlashCommand in main process, blocking until complete.
|
||||
|
||||
```
|
||||
User Input → Analyze Intent → Select Workflow → [Confirm] → Execute Chain
|
||||
↓
|
||||
SlashCommand (blocking)
|
||||
↓
|
||||
Update TodoWrite
|
||||
↓
|
||||
Next Command...
|
||||
```
|
||||
|
||||
**vs ccw-coordinator**: External CLI execution with background tasks and hook callbacks.
|
||||
|
||||
## 5-Phase Workflow
|
||||
|
||||
### Phase 1: Analyze Intent
|
||||
|
||||
```javascript
|
||||
function analyzeIntent(input) {
|
||||
return {
|
||||
goal: extractGoal(input),
|
||||
scope: extractScope(input),
|
||||
constraints: extractConstraints(input),
|
||||
task_type: detectTaskType(input), // bugfix|feature|tdd|review|exploration|...
|
||||
complexity: assessComplexity(input), // low|medium|high
|
||||
clarity_score: calculateClarity(input) // 0-3 (>=2 = clear)
|
||||
};
|
||||
}
|
||||
|
||||
// Task type detection (priority order)
|
||||
function detectTaskType(text) {
|
||||
const patterns = {
|
||||
'bugfix-hotfix': /urgent|production|critical/ && /fix|bug/,
|
||||
'bugfix': /fix|bug|error|crash|fail|debug/,
|
||||
'issue-batch': /issues?|batch/ && /fix|resolve/,
|
||||
'exploration': /uncertain|explore|research|what if/,
|
||||
'multi-perspective': /multi-perspective|compare|cross-verify/,
|
||||
'quick-task': /quick|simple|small/ && /feature|function/,
|
||||
'ui-design': /ui|design|component|style/,
|
||||
'tdd': /tdd|test-driven|test first/,
|
||||
'test-fix': /test fail|fix test|failing test/,
|
||||
'review': /review|code review/,
|
||||
'documentation': /docs|documentation|readme/
|
||||
};
|
||||
for (const [type, pattern] of Object.entries(patterns)) {
|
||||
if (pattern.test(text)) return type;
|
||||
}
|
||||
return 'feature';
|
||||
}
|
||||
```
|
||||
|
||||
**Output**: `Type: [task_type] | Goal: [goal] | Complexity: [complexity] | Clarity: [clarity_score]/3`
|
||||
|
||||
---
|
||||
|
||||
### Phase 1.5: Requirement Clarification (if clarity_score < 2)
|
||||
|
||||
```javascript
|
||||
async function clarifyRequirements(analysis) {
|
||||
if (analysis.clarity_score >= 2) return analysis;
|
||||
|
||||
const questions = generateClarificationQuestions(analysis); // Goal, Scope, Constraints
|
||||
const answers = await AskUserQuestion({ questions });
|
||||
return updateAnalysis(analysis, answers);
|
||||
}
|
||||
```
|
||||
|
||||
**Questions**: Goal (Create/Fix/Optimize/Analyze), Scope (Single file/Module/Cross-module/System), Constraints (Backward compat/Skip tests/Urgent hotfix)
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Select Workflow & Build Command Chain
|
||||
|
||||
```javascript
|
||||
function selectWorkflow(analysis) {
|
||||
const levelMap = {
|
||||
'bugfix-hotfix': { level: 2, flow: 'bugfix.hotfix' },
|
||||
'bugfix': { level: 2, flow: 'bugfix.standard' },
|
||||
'issue-batch': { level: 'Issue', flow: 'issue' },
|
||||
'exploration': { level: 4, flow: 'full' },
|
||||
'quick-task': { level: 1, flow: 'lite-lite-lite' },
|
||||
'ui-design': { level: analysis.complexity === 'high' ? 4 : 3, flow: 'ui' },
|
||||
'tdd': { level: 3, flow: 'tdd' },
|
||||
'test-fix': { level: 3, flow: 'test-fix-gen' },
|
||||
'review': { level: 3, flow: 'review-fix' },
|
||||
'documentation': { level: 2, flow: 'docs' },
|
||||
'feature': { level: analysis.complexity === 'high' ? 3 : 2, flow: analysis.complexity === 'high' ? 'coupled' : 'rapid' }
|
||||
};
|
||||
|
||||
const selected = levelMap[analysis.task_type] || levelMap['feature'];
|
||||
return buildCommandChain(selected, analysis);
|
||||
}
|
||||
|
||||
// Build command chain (port-based matching with Minimum Execution Units)
|
||||
function buildCommandChain(workflow, analysis) {
|
||||
const chains = {
|
||||
// Level 1 - Rapid
|
||||
'lite-lite-lite': [
|
||||
{ cmd: '/workflow:lite-lite-lite', args: `"${analysis.goal}"` }
|
||||
],
|
||||
|
||||
// Level 2 - Lightweight
|
||||
'rapid': [
|
||||
// Unit: Quick Implementation【lite-plan → lite-execute】
|
||||
{ cmd: '/workflow:lite-plan', args: `"${analysis.goal}"`, unit: 'quick-impl' },
|
||||
{ cmd: '/workflow:lite-execute', args: '--in-memory', unit: 'quick-impl' },
|
||||
// Unit: Test Validation【test-fix-gen → test-cycle-execute】
|
||||
...(analysis.constraints?.includes('skip-tests') ? [] : [
|
||||
{ cmd: '/workflow:test-fix-gen', args: '', unit: 'test-validation' },
|
||||
{ cmd: '/workflow:test-cycle-execute', args: '', unit: 'test-validation' }
|
||||
])
|
||||
],
|
||||
|
||||
'bugfix.standard': [
|
||||
// Unit: Bug Fix【lite-fix → lite-execute】
|
||||
{ cmd: '/workflow:lite-fix', args: `"${analysis.goal}"`, unit: 'bug-fix' },
|
||||
{ cmd: '/workflow:lite-execute', args: '--in-memory', unit: 'bug-fix' },
|
||||
// Unit: Test Validation【test-fix-gen → test-cycle-execute】
|
||||
...(analysis.constraints?.includes('skip-tests') ? [] : [
|
||||
{ cmd: '/workflow:test-fix-gen', args: '', unit: 'test-validation' },
|
||||
{ cmd: '/workflow:test-cycle-execute', args: '', unit: 'test-validation' }
|
||||
])
|
||||
],
|
||||
|
||||
'bugfix.hotfix': [
|
||||
{ cmd: '/workflow:lite-fix', args: `--hotfix "${analysis.goal}"` }
|
||||
],
|
||||
|
||||
'multi-cli-plan': [
|
||||
// Unit: Multi-CLI Planning【multi-cli-plan → lite-execute】
|
||||
{ cmd: '/workflow:multi-cli-plan', args: `"${analysis.goal}"`, unit: 'multi-cli' },
|
||||
{ cmd: '/workflow:lite-execute', args: '--in-memory', unit: 'multi-cli' },
|
||||
// Unit: Test Validation【test-fix-gen → test-cycle-execute】
|
||||
...(analysis.constraints?.includes('skip-tests') ? [] : [
|
||||
{ cmd: '/workflow:test-fix-gen', args: '', unit: 'test-validation' },
|
||||
{ cmd: '/workflow:test-cycle-execute', args: '', unit: 'test-validation' }
|
||||
])
|
||||
],
|
||||
|
||||
'docs': [
|
||||
// Unit: Quick Implementation【lite-plan → lite-execute】
|
||||
{ cmd: '/workflow:lite-plan', args: `"${analysis.goal}"`, unit: 'quick-impl' },
|
||||
{ cmd: '/workflow:lite-execute', args: '--in-memory', unit: 'quick-impl' }
|
||||
],
|
||||
|
||||
// Level 3 - Standard
|
||||
'coupled': [
|
||||
// Unit: Verified Planning【plan → plan-verify】
|
||||
{ cmd: '/workflow:plan', args: `"${analysis.goal}"`, unit: 'verified-planning' },
|
||||
{ cmd: '/workflow:plan-verify', args: '', unit: 'verified-planning' },
|
||||
// Execution
|
||||
{ cmd: '/workflow:execute', args: '' },
|
||||
// Unit: Code Review【review-session-cycle → review-fix】
|
||||
{ cmd: '/workflow:review-session-cycle', args: '', unit: 'code-review' },
|
||||
{ cmd: '/workflow:review-fix', args: '', unit: 'code-review' },
|
||||
// Unit: Test Validation【test-fix-gen → test-cycle-execute】
|
||||
...(analysis.constraints?.includes('skip-tests') ? [] : [
|
||||
{ cmd: '/workflow:test-fix-gen', args: '', unit: 'test-validation' },
|
||||
{ cmd: '/workflow:test-cycle-execute', args: '', unit: 'test-validation' }
|
||||
])
|
||||
],
|
||||
|
||||
'tdd': [
|
||||
// Unit: TDD Planning + Execution【tdd-plan → execute】
|
||||
{ cmd: '/workflow:tdd-plan', args: `"${analysis.goal}"`, unit: 'tdd-planning' },
|
||||
{ cmd: '/workflow:execute', args: '', unit: 'tdd-planning' },
|
||||
// TDD Verification
|
||||
{ cmd: '/workflow:tdd-verify', args: '' }
|
||||
],
|
||||
|
||||
'test-fix-gen': [
|
||||
// Unit: Test Validation【test-fix-gen → test-cycle-execute】
|
||||
{ cmd: '/workflow:test-fix-gen', args: `"${analysis.goal}"`, unit: 'test-validation' },
|
||||
{ cmd: '/workflow:test-cycle-execute', args: '', unit: 'test-validation' }
|
||||
],
|
||||
|
||||
'review-fix': [
|
||||
// Unit: Code Review【review-session-cycle → review-fix】
|
||||
{ cmd: '/workflow:review-session-cycle', args: '', unit: 'code-review' },
|
||||
{ cmd: '/workflow:review-fix', args: '', unit: 'code-review' },
|
||||
// Unit: Test Validation【test-fix-gen → test-cycle-execute】
|
||||
{ cmd: '/workflow:test-fix-gen', args: '', unit: 'test-validation' },
|
||||
{ cmd: '/workflow:test-cycle-execute', args: '', unit: 'test-validation' }
|
||||
],
|
||||
|
||||
'ui': [
|
||||
{ cmd: '/workflow:ui-design:explore-auto', args: `"${analysis.goal}"` },
|
||||
// Unit: Planning + Execution【plan → execute】
|
||||
{ cmd: '/workflow:plan', args: '', unit: 'plan-execute' },
|
||||
{ cmd: '/workflow:execute', args: '', unit: 'plan-execute' }
|
||||
],
|
||||
|
||||
// Level 4 - Brainstorm
|
||||
'full': [
|
||||
{ cmd: '/workflow:brainstorm:auto-parallel', args: `"${analysis.goal}"` },
|
||||
// Unit: Verified Planning【plan → plan-verify】
|
||||
{ cmd: '/workflow:plan', args: '', unit: 'verified-planning' },
|
||||
{ cmd: '/workflow:plan-verify', args: '', unit: 'verified-planning' },
|
||||
// Execution
|
||||
{ cmd: '/workflow:execute', args: '' },
|
||||
// Unit: Test Validation【test-fix-gen → test-cycle-execute】
|
||||
{ cmd: '/workflow:test-fix-gen', args: '', unit: 'test-validation' },
|
||||
{ cmd: '/workflow:test-cycle-execute', args: '', unit: 'test-validation' }
|
||||
],
|
||||
|
||||
// Issue Workflow
|
||||
'issue': [
|
||||
{ cmd: '/issue:discover', args: '' },
|
||||
{ cmd: '/issue:plan', args: '--all-pending' },
|
||||
{ cmd: '/issue:queue', args: '' },
|
||||
{ cmd: '/issue:execute', args: '' }
|
||||
]
|
||||
};
|
||||
|
||||
return chains[workflow.flow] || chains['rapid'];
|
||||
}
|
||||
```
|
||||
|
||||
**Output**: `Level [X] - [flow] | Pipeline: [...] | Commands: [1. /cmd1 2. /cmd2 ...]`
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: User Confirmation
|
||||
|
||||
```javascript
|
||||
async function getUserConfirmation(chain) {
|
||||
const response = await AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Execute this command chain?",
|
||||
header: "Confirm",
|
||||
options: [
|
||||
{ label: "Confirm", description: "Start" },
|
||||
{ label: "Adjust", description: "Modify" },
|
||||
{ label: "Cancel", description: "Abort" }
|
||||
]
|
||||
}]
|
||||
});
|
||||
|
||||
if (response.error === "Cancel") throw new Error("Cancelled");
|
||||
if (response.error === "Adjust") return await adjustChain(chain);
|
||||
return chain;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Phase 4: Setup TODO Tracking
|
||||
|
||||
```javascript
|
||||
function setupTodoTracking(chain, workflow) {
|
||||
const todos = chain.map((step, i) => ({
|
||||
content: `CCW:${workflow}: [${i + 1}/${chain.length}] ${step.cmd}`,
|
||||
status: i === 0 ? 'in_progress' : 'pending',
|
||||
activeForm: `Executing ${step.cmd}`
|
||||
}));
|
||||
TodoWrite({ todos });
|
||||
}
|
||||
```
|
||||
|
||||
**Output**: `-> CCW:rapid: [1/3] /workflow:lite-plan | CCW:rapid: [2/3] /workflow:lite-execute | ...`
|
||||
|
||||
---
|
||||
|
||||
### Phase 5: Execute Command Chain
|
||||
|
||||
```javascript
|
||||
async function executeCommandChain(chain, workflow) {
|
||||
let previousResult = null;
|
||||
|
||||
for (let i = 0; i < chain.length; i++) {
|
||||
try {
|
||||
const fullCommand = assembleCommand(chain[i], previousResult);
|
||||
const result = await SlashCommand({ command: fullCommand });
|
||||
|
||||
previousResult = { ...result, success: true };
|
||||
updateTodoStatus(i, chain.length, workflow, 'completed');
|
||||
|
||||
} catch (error) {
|
||||
const action = await handleError(chain[i], error, i);
|
||||
if (action === 'retry') {
|
||||
i--; // Retry
|
||||
} else if (action === 'abort') {
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
// 'skip' - continue
|
||||
}
|
||||
}
|
||||
|
||||
return { success: true, completed: chain.length };
|
||||
}
|
||||
|
||||
// Assemble full command with session/plan parameters
|
||||
function assembleCommand(step, previousResult) {
|
||||
let command = step.cmd;
|
||||
if (step.args) {
|
||||
command += ` ${step.args}`;
|
||||
} else if (previousResult?.session_id) {
|
||||
command += ` --session="${previousResult.session_id}"`;
|
||||
}
|
||||
return command;
|
||||
}
|
||||
|
||||
// Update TODO: mark current as complete, next as in-progress
|
||||
function updateTodoStatus(index, total, workflow, status) {
|
||||
const todos = getAllCurrentTodos();
|
||||
const updated = todos.map(todo => {
|
||||
if (todo.content.startsWith(`CCW:${workflow}:`)) {
|
||||
const stepNum = extractStepIndex(todo.content);
|
||||
if (stepNum === index + 1) return { ...todo, status };
|
||||
if (stepNum === index + 2 && status === 'completed') return { ...todo, status: 'in_progress' };
|
||||
}
|
||||
return todo;
|
||||
});
|
||||
TodoWrite({ todos: updated });
|
||||
}
|
||||
|
||||
// Error handling: Retry/Skip/Abort
|
||||
async function handleError(step, error, index) {
|
||||
const response = await AskUserQuestion({
|
||||
questions: [{
|
||||
question: `${step.cmd} failed: ${error.message}`,
|
||||
header: "Error",
|
||||
options: [
|
||||
{ label: "Retry", description: "Re-execute" },
|
||||
{ label: "Skip", description: "Continue next" },
|
||||
{ label: "Abort", description: "Stop" }
|
||||
]
|
||||
}]
|
||||
});
|
||||
return { Retry: 'retry', Skip: 'skip', Abort: 'abort' }[response.Error] || 'abort';
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Execution Flow Summary
|
||||
|
||||
```
|
||||
User Input
|
||||
|
|
||||
Phase 1: Analyze Intent
|
||||
|-- Extract: goal, scope, constraints, task_type, complexity, clarity
|
||||
+-- If clarity < 2 -> Phase 1.5: Clarify Requirements
|
||||
|
|
||||
Phase 2: Select Workflow & Build Chain
|
||||
|-- Map task_type -> Level (1/2/3/4/Issue)
|
||||
|-- Select flow based on complexity
|
||||
+-- Build command chain (port-based)
|
||||
|
|
||||
Phase 3: User Confirmation (optional)
|
||||
|-- Show pipeline visualization
|
||||
+-- Allow adjustment
|
||||
|
|
||||
Phase 4: Setup TODO Tracking
|
||||
+-- Create todos with CCW prefix
|
||||
|
|
||||
Phase 5: Execute Command Chain
|
||||
|-- For each command:
|
||||
| |-- Assemble full command
|
||||
| |-- Execute via SlashCommand
|
||||
| |-- Update TODO status
|
||||
| +-- Handle errors (retry/skip/abort)
|
||||
+-- Return workflow result
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Pipeline Examples (with Minimum Execution Units)
|
||||
|
||||
**Note**: `【 】` marks Minimum Execution Units - commands execute together as atomic groups.
|
||||
|
||||
| Input | Type | Level | Pipeline (with Units) |
|
||||
|-------|------|-------|-----------------------|
|
||||
| "Add API endpoint" | feature (low) | 2 |【lite-plan → lite-execute】→【test-fix-gen → test-cycle-execute】|
|
||||
| "Fix login timeout" | bugfix | 2 |【lite-fix → lite-execute】→【test-fix-gen → test-cycle-execute】|
|
||||
| "OAuth2 system" | feature (high) | 3 |【plan → plan-verify】→ execute →【review-session-cycle → review-fix】→【test-fix-gen → test-cycle-execute】|
|
||||
| "Implement with TDD" | tdd | 3 |【tdd-plan → execute】→ tdd-verify |
|
||||
| "Uncertain: real-time arch" | exploration | 4 | brainstorm:auto-parallel →【plan → plan-verify】→ execute →【test-fix-gen → test-cycle-execute】|
|
||||
|
||||
---
|
||||
|
||||
## Key Design Principles
|
||||
|
||||
1. **Main Process Execution** - Use SlashCommand in main process, no external CLI
|
||||
2. **Intent-Driven** - Auto-select workflow based on task intent
|
||||
3. **Port-Based Chaining** - Build command chain using port matching
|
||||
4. **Minimum Execution Units** - Commands grouped into atomic units, never split (e.g., lite-plan → lite-execute)
|
||||
5. **Progressive Clarification** - Low clarity triggers clarification phase
|
||||
6. **TODO Tracking** - Use CCW prefix to isolate workflow todos
|
||||
7. **Unit-Aware Error Handling** - Retry/skip/abort affects whole unit, not individual commands
|
||||
8. **User Control** - Optional user confirmation at each phase
|
||||
|
||||
---
|
||||
|
||||
## State Management
|
||||
|
||||
**TodoWrite-Based Tracking**: All execution state tracked via TodoWrite with `CCW:` prefix.
|
||||
|
||||
```javascript
|
||||
// Initial state
|
||||
todos = [
|
||||
{ content: "CCW:rapid: [1/3] /workflow:lite-plan", status: "in_progress" },
|
||||
{ content: "CCW:rapid: [2/3] /workflow:lite-execute", status: "pending" },
|
||||
{ content: "CCW:rapid: [3/3] /workflow:test-cycle-execute", status: "pending" }
|
||||
];
|
||||
|
||||
// After command 1 completes
|
||||
todos = [
|
||||
{ content: "CCW:rapid: [1/3] /workflow:lite-plan", status: "completed" },
|
||||
{ content: "CCW:rapid: [2/3] /workflow:lite-execute", status: "in_progress" },
|
||||
{ content: "CCW:rapid: [3/3] /workflow:test-cycle-execute", status: "pending" }
|
||||
];
|
||||
```
|
||||
|
||||
**vs ccw-coordinator**: Extensive state.json with task_id, status transitions, hook callbacks.
|
||||
|
||||
---
|
||||
|
||||
## Type Comparison: ccw vs ccw-coordinator
|
||||
|
||||
| Aspect | ccw | ccw-coordinator |
|
||||
|--------|-----|-----------------|
|
||||
| **Type** | Main process (SlashCommand) | External CLI (ccw cli + hook callbacks) |
|
||||
| **Execution** | Synchronous blocking | Async background with hook completion |
|
||||
| **Workflow** | Auto intent-based selection | Manual chain building |
|
||||
| **Intent Analysis** | 5-phase clarity check | 3-phase requirement analysis |
|
||||
| **State** | TodoWrite only (in-memory) | state.json + checkpoint/resume |
|
||||
| **Error Handling** | Retry/skip/abort (interactive) | Retry/skip/abort (via AskUser) |
|
||||
| **Use Case** | Auto workflow for any task | Manual orchestration, large chains |
|
||||
|
||||
---
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Auto-select workflow
|
||||
ccw "Add user authentication"
|
||||
|
||||
# Complex requirement (triggers clarification)
|
||||
ccw "Optimize system performance"
|
||||
|
||||
# Bug fix
|
||||
ccw "Fix memory leak in WebSocket handler"
|
||||
|
||||
# TDD development
|
||||
ccw "Implement user registration with TDD"
|
||||
|
||||
# Exploratory task
|
||||
ccw "Uncertain about architecture for real-time notifications"
|
||||
```
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
name: issue:discover-by-prompt
|
||||
description: Discover issues from user prompt with Gemini-planned iterative multi-agent exploration. Uses ACE semantic search for context gathering and supports cross-module comparison (e.g., frontend vs backend API contracts).
|
||||
argument-hint: "<prompt> [--scope=src/**] [--depth=standard|deep] [--max-iterations=5]"
|
||||
argument-hint: "[-y|--yes] <prompt> [--scope=src/**] [--depth=standard|deep] [--max-iterations=5]"
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), Read(*), Bash(*), Task(*), AskUserQuestion(*), Glob(*), Grep(*), mcp__ace-tool__search_context(*), mcp__exa__search(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-continue all iterations, skip confirmations.
|
||||
|
||||
# Issue Discovery by Prompt
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
name: issue:discover
|
||||
description: Discover potential issues from multiple perspectives (bug, UX, test, quality, security, performance, maintainability, best-practices) using CLI explore. Supports Exa external research for security and best-practices perspectives.
|
||||
argument-hint: "<path-pattern> [--perspectives=bug,ux,...] [--external]"
|
||||
argument-hint: "[-y|--yes] <path-pattern> [--perspectives=bug,ux,...] [--external]"
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), Read(*), Bash(*), Task(*), AskUserQuestion(*), Glob(*), Grep(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-select all perspectives, skip confirmations.
|
||||
|
||||
# Issue Discovery Command
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
name: execute
|
||||
description: Execute queue with DAG-based parallel orchestration (one commit per solution)
|
||||
argument-hint: "--queue <queue-id> [--worktree [<existing-path>]]"
|
||||
argument-hint: "[-y|--yes] --queue <queue-id> [--worktree [<existing-path>]]"
|
||||
allowed-tools: TodoWrite(*), Bash(*), Read(*), AskUserQuestion(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-confirm execution, use recommended settings.
|
||||
|
||||
# Issue Execute Command (/issue:execute)
|
||||
|
||||
## Overview
|
||||
@@ -312,65 +316,60 @@ batch.forEach(id => updateTodo(id, 'completed'));
|
||||
function dispatchExecutor(solutionId, executorType, worktreePath = null) {
|
||||
// If worktree is provided, executor works in that directory
|
||||
// No per-solution worktree creation - ONE worktree for entire queue
|
||||
const cdCommand = worktreePath ? `cd "${worktreePath}"` : '';
|
||||
|
||||
// Pre-defined values (replaced at dispatch time, NOT by executor)
|
||||
const SOLUTION_ID = solutionId;
|
||||
const WORK_DIR = worktreePath || null;
|
||||
|
||||
// Build prompt without markdown code blocks to avoid escaping issues
|
||||
const prompt = `
|
||||
## Execute Solution ${solutionId}
|
||||
${worktreePath ? `
|
||||
### Step 0: Enter Queue Worktree
|
||||
\`\`\`bash
|
||||
cd "${worktreePath}"
|
||||
\`\`\`
|
||||
` : ''}
|
||||
### Step 1: Get Solution (read-only)
|
||||
\`\`\`bash
|
||||
ccw issue detail ${solutionId}
|
||||
\`\`\`
|
||||
## Execute Solution: ${SOLUTION_ID}
|
||||
${WORK_DIR ? `Working Directory: ${WORK_DIR}` : ''}
|
||||
|
||||
### Step 1: Get Solution Details
|
||||
Run this command to get the full solution with all tasks:
|
||||
ccw issue detail ${SOLUTION_ID}
|
||||
|
||||
### Step 2: Execute All Tasks Sequentially
|
||||
The detail command returns a FULL SOLUTION with all tasks.
|
||||
Execute each task in order (T1 → T2 → T3 → ...):
|
||||
|
||||
For each task:
|
||||
1. Follow task.implementation steps
|
||||
2. Run task.test commands
|
||||
3. Verify task.acceptance criteria
|
||||
(Do NOT commit after each task)
|
||||
- Follow task.implementation steps
|
||||
- Run task.test commands
|
||||
- Verify task.acceptance criteria
|
||||
- Do NOT commit after each task
|
||||
|
||||
### Step 3: Commit Solution (Once)
|
||||
After ALL tasks pass, commit once with formatted summary:
|
||||
\`\`\`bash
|
||||
git add <all-modified-files>
|
||||
git commit -m "[type](scope): [solution.description]
|
||||
After ALL tasks pass, commit once with formatted summary.
|
||||
|
||||
## Solution Summary
|
||||
- Solution-ID: ${solutionId}
|
||||
- Tasks: T1, T2, ...
|
||||
Command:
|
||||
git add -A
|
||||
git commit -m "<type>(<scope>): <description>
|
||||
|
||||
## Tasks Completed
|
||||
- [T1] task1.title: action
|
||||
- [T2] task2.title: action
|
||||
Solution: ${SOLUTION_ID}
|
||||
Tasks completed: <list task IDs>
|
||||
|
||||
## Files Modified
|
||||
- file1.ts
|
||||
- file2.ts
|
||||
Changes:
|
||||
- <file1>: <what changed>
|
||||
- <file2>: <what changed>
|
||||
|
||||
## Verification
|
||||
- All tests passed
|
||||
- All acceptance criteria verified"
|
||||
\`\`\`
|
||||
Verified: all tests passed"
|
||||
|
||||
Replace <type> with: feat|fix|refactor|docs|test
|
||||
Replace <scope> with: affected module name
|
||||
Replace <description> with: brief summary from solution
|
||||
|
||||
### Step 4: Report Completion
|
||||
\`\`\`bash
|
||||
ccw issue done ${solutionId} --result '{"summary": "...", "files_modified": [...], "commit": {"hash": "...", "type": "feat"}, "tasks_completed": N}'
|
||||
\`\`\`
|
||||
On success, run:
|
||||
ccw issue done ${SOLUTION_ID} --result '{"summary": "<brief>", "files_modified": ["<file1>", "<file2>"], "commit": {"hash": "<hash>", "type": "<type>"}, "tasks_completed": <N>}'
|
||||
|
||||
If any task failed:
|
||||
\`\`\`bash
|
||||
ccw issue done ${solutionId} --fail --reason '{"task_id": "TX", "error_type": "test_failure", "message": "..."}'
|
||||
\`\`\`
|
||||
On failure, run:
|
||||
ccw issue done ${SOLUTION_ID} --fail --reason '{"task_id": "<TX>", "error_type": "<test_failure|build_error|other>", "message": "<error details>"}'
|
||||
|
||||
**Note**: Do NOT cleanup worktree after this solution. Worktree is shared by all solutions in the queue.
|
||||
### Important Notes
|
||||
- Do NOT cleanup worktree - it is shared by all solutions in the queue
|
||||
- Replace all <placeholder> values with actual values from your execution
|
||||
`;
|
||||
|
||||
// For CLI tools, pass --cd to set working directory
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
name: new
|
||||
description: Create structured issue from GitHub URL or text description
|
||||
argument-hint: "<github-url | text-description> [--priority 1-5]"
|
||||
argument-hint: "[-y|--yes] <github-url | text-description> [--priority 1-5]"
|
||||
allowed-tools: TodoWrite(*), Bash(*), Read(*), AskUserQuestion(*), mcp__ace-tool__search_context(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Skip clarification questions, create issue with inferred details.
|
||||
|
||||
# Issue New Command (/issue:new)
|
||||
|
||||
## Core Principle
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
name: plan
|
||||
description: Batch plan issue resolution using issue-plan-agent (explore + plan closed-loop)
|
||||
argument-hint: "--all-pending <issue-id>[,<issue-id>,...] [--batch-size 3] "
|
||||
argument-hint: "[-y|--yes] --all-pending <issue-id>[,<issue-id>,...] [--batch-size 3]"
|
||||
allowed-tools: TodoWrite(*), Task(*), SlashCommand(*), AskUserQuestion(*), Bash(*), Read(*), Write(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-bind solutions without confirmation, use recommended settings.
|
||||
|
||||
# Issue Plan Command (/issue:plan)
|
||||
|
||||
## Overview
|
||||
@@ -55,11 +59,11 @@ Unified planning command using **issue-plan-agent** that combines exploration an
|
||||
## Execution Process
|
||||
|
||||
```
|
||||
Phase 1: Issue Loading
|
||||
Phase 1: Issue Loading & Intelligent Grouping
|
||||
├─ Parse input (single, comma-separated, or --all-pending)
|
||||
├─ Fetch issue metadata (ID, title, tags)
|
||||
├─ Validate issues exist (create if needed)
|
||||
└─ Group by similarity (shared tags or title keywords, max 3 per batch)
|
||||
└─ Intelligent grouping via Gemini (semantic similarity, max 3 per batch)
|
||||
|
||||
Phase 2: Unified Explore + Plan (issue-plan-agent)
|
||||
├─ Launch issue-plan-agent per batch
|
||||
@@ -119,46 +123,11 @@ if (useAllPending) {
|
||||
}
|
||||
// Note: Agent fetches full issue content via `ccw issue status <id> --json`
|
||||
|
||||
// Semantic grouping via Gemini CLI (max 4 issues per group)
|
||||
async function groupBySimilarityGemini(issues) {
|
||||
const issueSummaries = issues.map(i => ({
|
||||
id: i.id, title: i.title, tags: i.tags
|
||||
}));
|
||||
// Intelligent grouping: Analyze issues by title/tags, group semantically similar ones
|
||||
// Strategy: Same module/component, related bugs, feature clusters
|
||||
// Constraint: Max ${batchSize} issues per batch
|
||||
|
||||
const prompt = `
|
||||
PURPOSE: Group similar issues by semantic similarity for batch processing; maximize within-group coherence; max 4 issues per group
|
||||
TASK: • Analyze issue titles/tags semantically • Identify functional/architectural clusters • Assign each issue to one group
|
||||
MODE: analysis
|
||||
CONTEXT: Issue metadata only
|
||||
EXPECTED: JSON with groups array, each containing max 4 issue_ids, theme, rationale
|
||||
CONSTRAINTS: Each issue in exactly one group | Max 4 issues per group | Balance group sizes
|
||||
|
||||
INPUT:
|
||||
${JSON.stringify(issueSummaries, null, 2)}
|
||||
|
||||
OUTPUT FORMAT:
|
||||
{"groups":[{"group_id":1,"theme":"...","issue_ids":["..."],"rationale":"..."}],"ungrouped":[]}
|
||||
`;
|
||||
|
||||
const taskId = Bash({
|
||||
command: `ccw cli -p "${prompt}" --tool gemini --mode analysis`,
|
||||
run_in_background: true, timeout: 600000
|
||||
});
|
||||
const output = TaskOutput({ task_id: taskId, block: true });
|
||||
|
||||
// Extract JSON from potential markdown code blocks
|
||||
function extractJsonFromMarkdown(text) {
|
||||
const jsonMatch = text.match(/```json\s*\n([\s\S]*?)\n```/) ||
|
||||
text.match(/```\s*\n([\s\S]*?)\n```/);
|
||||
return jsonMatch ? jsonMatch[1] : text;
|
||||
}
|
||||
|
||||
const result = JSON.parse(extractJsonFromMarkdown(output));
|
||||
return result.groups.map(g => g.issue_ids.map(id => issues.find(i => i.id === id)));
|
||||
}
|
||||
|
||||
const batches = await groupBySimilarityGemini(issues);
|
||||
console.log(`Processing ${issues.length} issues in ${batches.length} batch(es) (max 4 issues/agent)`);
|
||||
console.log(`Processing ${issues.length} issues in ${batches.length} batch(es)`);
|
||||
|
||||
TodoWrite({
|
||||
todos: batches.map((_, i) => ({
|
||||
@@ -207,7 +176,9 @@ ${issueList}
|
||||
- Add explicit verification steps to prevent same failure mode
|
||||
6. **If github_url exists**: Add final task to comment on GitHub issue
|
||||
7. Write solution to: .workflow/issues/solutions/{issue-id}.jsonl
|
||||
8. Single solution → auto-bind; Multiple → return for selection
|
||||
8. **CRITICAL - Binding Decision**:
|
||||
- Single solution → **MUST execute**: ccw issue bind <issue-id> <solution-id>
|
||||
- Multiple solutions → Return pending_selection only (no bind)
|
||||
|
||||
### Failure-Aware Planning Rules
|
||||
- **Extract failure patterns**: Parse issue.feedback where type='failure' and stage='execute'
|
||||
@@ -265,35 +236,55 @@ for (let i = 0; i < agentTasks.length; i += MAX_PARALLEL) {
|
||||
}
|
||||
agentResults.push(summary); // Store for Phase 3 conflict aggregation
|
||||
|
||||
// Verify binding for bound issues (agent should have executed bind)
|
||||
for (const item of summary.bound || []) {
|
||||
console.log(`✓ ${item.issue_id}: ${item.solution_id} (${item.task_count} tasks)`);
|
||||
const status = JSON.parse(Bash(`ccw issue status ${item.issue_id} --json`).trim());
|
||||
if (status.bound_solution_id === item.solution_id) {
|
||||
console.log(`✓ ${item.issue_id}: ${item.solution_id} (${item.task_count} tasks)`);
|
||||
} else {
|
||||
// Fallback: agent failed to bind, execute here
|
||||
Bash(`ccw issue bind ${item.issue_id} ${item.solution_id}`);
|
||||
console.log(`✓ ${item.issue_id}: ${item.solution_id} (${item.task_count} tasks) [recovered]`);
|
||||
}
|
||||
}
|
||||
// Collect and notify pending selections
|
||||
// Collect pending selections for Phase 3
|
||||
for (const pending of summary.pending_selection || []) {
|
||||
console.log(`⏳ ${pending.issue_id}: ${pending.solutions.length} solutions → awaiting selection`);
|
||||
pendingSelections.push(pending);
|
||||
}
|
||||
if (summary.conflicts?.length > 0) {
|
||||
console.log(`⚠ Conflicts: ${summary.conflicts.length} detected (will resolve in Phase 3)`);
|
||||
}
|
||||
updateTodo(`Plan batch ${batchIndex + 1}`, 'completed');
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 3: Conflict Resolution & Solution Selection
|
||||
### Phase 3: Solution Selection (if pending)
|
||||
|
||||
**Conflict Handling:**
|
||||
- Collect `conflicts` from all agent results
|
||||
- Low/Medium severity → auto-resolve with `recommended_resolution`
|
||||
- High severity → use `AskUserQuestion` to let user choose resolution
|
||||
```javascript
|
||||
// Handle multi-solution issues
|
||||
for (const pending of pendingSelections) {
|
||||
if (pending.solutions.length === 0) continue;
|
||||
|
||||
**Multi-Solution Selection:**
|
||||
- If `pending_selection` contains issues with multiple solutions:
|
||||
- Use `AskUserQuestion` to present options (solution ID + task count + description)
|
||||
- Extract selected solution ID from user response
|
||||
- Verify solution file exists, recover from payload if missing
|
||||
- Bind selected solution via `ccw issue bind <issue-id> <solution-id>`
|
||||
const options = pending.solutions.slice(0, 4).map(sol => ({
|
||||
label: `${sol.id} (${sol.task_count} tasks)`,
|
||||
description: sol.description || sol.approach || 'No description'
|
||||
}));
|
||||
|
||||
const answer = AskUserQuestion({
|
||||
questions: [{
|
||||
question: `Issue ${pending.issue_id}: which solution to bind?`,
|
||||
header: pending.issue_id,
|
||||
options: options,
|
||||
multiSelect: false
|
||||
}]
|
||||
});
|
||||
|
||||
const selected = answer[Object.keys(answer)[0]];
|
||||
if (!selected || selected === 'Other') continue;
|
||||
|
||||
const solId = selected.split(' ')[0];
|
||||
Bash(`ccw issue bind ${pending.issue_id} ${solId}`);
|
||||
console.log(`✓ ${pending.issue_id}: ${solId} bound`);
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 4: Summary
|
||||
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
name: queue
|
||||
description: Form execution queue from bound solutions using issue-queue-agent (solution-level)
|
||||
argument-hint: "[--queues <n>] [--issue <id>]"
|
||||
argument-hint: "[-y|--yes] [--queues <n>] [--issue <id>]"
|
||||
allowed-tools: TodoWrite(*), Task(*), Bash(*), Read(*), Write(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-confirm queue formation, use recommended conflict resolutions.
|
||||
|
||||
# Issue Queue Command (/issue:queue)
|
||||
|
||||
## Overview
|
||||
@@ -28,12 +32,13 @@ Queue formation command using **issue-queue-agent** that analyzes all bound solu
|
||||
| Operation | Correct | Incorrect |
|
||||
|-----------|---------|-----------|
|
||||
| List issues (brief) | `ccw issue list --status planned --brief` | `Read('issues.jsonl')` |
|
||||
| **Batch solutions (NEW)** | `ccw issue solutions --status planned --brief` | Loop `ccw issue solution <id>` |
|
||||
| List queue (brief) | `ccw issue queue --brief` | `Read('queues/*.json')` |
|
||||
| Read issue details | `ccw issue status <id> --json` | `Read('issues.jsonl')` |
|
||||
| Get next item | `ccw issue next --json` | `Read('queues/*.json')` |
|
||||
| Update status | `ccw issue update <id> --status ...` | Direct file edit |
|
||||
| Sync from queue | `ccw issue update --from-queue` | Direct file edit |
|
||||
| **Read solution (brief)** | `ccw issue solution <id> --brief` | `Read('solutions/*.jsonl')` |
|
||||
| Read solution (single) | `ccw issue solution <id> --brief` | `Read('solutions/*.jsonl')` |
|
||||
|
||||
**Output Options**:
|
||||
- `--brief`: JSON with minimal fields (id, status, counts)
|
||||
@@ -131,24 +136,23 @@ Phase 7: Active Queue Check & Decision (REQUIRED)
|
||||
### Phase 1: Solution Loading & Distribution
|
||||
|
||||
**Data Loading:**
|
||||
- Use `ccw issue list --status planned --brief` to get planned issues with `bound_solution_id`
|
||||
- If no planned issues found → display message, suggest `/issue:plan`
|
||||
|
||||
**Solution Brief Loading** (for each planned issue):
|
||||
```bash
|
||||
ccw issue solution <issue-id> --brief
|
||||
# Returns: [{ solution_id, is_bound, task_count, files_touched[] }]
|
||||
```
|
||||
- Use `ccw issue solutions --status planned --brief` to get all planned issues with solutions in **one call**
|
||||
- Returns: Array of `{ issue_id, solution_id, is_bound, task_count, files_touched[], priority }`
|
||||
- If no bound solutions found → display message, suggest `/issue:plan`
|
||||
|
||||
**Build Solution Objects:**
|
||||
```json
|
||||
{
|
||||
"issue_id": "ISS-xxx",
|
||||
"solution_id": "SOL-ISS-xxx-1",
|
||||
"task_count": 3,
|
||||
"files_touched": ["src/auth.ts", "src/utils.ts"],
|
||||
"priority": "medium"
|
||||
```javascript
|
||||
// Single CLI call replaces N individual queries
|
||||
const result = Bash(`ccw issue solutions --status planned --brief`).trim();
|
||||
const solutions = result ? JSON.parse(result) : [];
|
||||
|
||||
if (solutions.length === 0) {
|
||||
console.log('No bound solutions found. Run /issue:plan first.');
|
||||
return;
|
||||
}
|
||||
|
||||
// solutions already in correct format:
|
||||
// { issue_id, solution_id, is_bound, task_count, files_touched[], priority }
|
||||
```
|
||||
|
||||
**Multi-Queue Distribution** (if `--queues > 1`):
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
---
|
||||
name: breakdown
|
||||
description: Decompose complex task into subtasks with dependency mapping, creates child task JSONs with parent references and execution order
|
||||
argument-hint: "task-id"
|
||||
argument-hint: "[-y|--yes] task-id"
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-confirm breakdown, use recommended subtask structure.
|
||||
|
||||
# Task Breakdown Command (/task:breakdown)
|
||||
|
||||
## Overview
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
name: replan
|
||||
description: Update task JSON with new requirements or batch-update multiple tasks from verification report, tracks changes in task-changes.json
|
||||
argument-hint: "task-id [\"text\"|file.md] | --batch [verification-report.md]"
|
||||
argument-hint: "[-y|--yes] task-id [\"text\"|file.md] | --batch [verification-report.md]"
|
||||
allowed-tools: Read(*), Write(*), Edit(*), TodoWrite(*), Glob(*), Bash(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-confirm updates, use recommended changes.
|
||||
|
||||
# Task Replan Command (/task:replan)
|
||||
|
||||
> **⚠️ DEPRECATION NOTICE**: This command is maintained for backward compatibility. For new workflows, use `/workflow:replan` which provides:
|
||||
@@ -353,7 +357,7 @@ Review error details in summary report
|
||||
|
||||
# No replan recommendations found
|
||||
Verification report contains no replan recommendations
|
||||
Check report content or use /workflow:action-plan-verify first
|
||||
Check report content or use /workflow:plan-verify first
|
||||
```
|
||||
|
||||
## Batch Mode Integration
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
---
|
||||
name: action-plan-verify
|
||||
description: Perform non-destructive cross-artifact consistency analysis between IMPL_PLAN.md and task JSONs with quality gate validation
|
||||
name: plan-verify
|
||||
description: Perform READ-ONLY verification analysis between IMPL_PLAN.md, task JSONs, and brainstorming artifacts. Generates structured report with quality gate recommendation. Does NOT modify any files.
|
||||
argument-hint: "[optional: --session session-id]"
|
||||
allowed-tools: Read(*), TodoWrite(*), Glob(*), Bash(*)
|
||||
allowed-tools: Read(*), Write(*), Glob(*), Bash(*)
|
||||
---
|
||||
|
||||
## User Input
|
||||
@@ -15,13 +15,26 @@ You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Goal
|
||||
|
||||
Identify inconsistencies, duplications, ambiguities, and underspecified items between action planning artifacts (`IMPL_PLAN.md`, `task.json`) and brainstorming artifacts (`role analysis documents`) before implementation. This command MUST run only after `/workflow:plan` has successfully produced complete `IMPL_PLAN.md` and task JSON files.
|
||||
Generate a comprehensive verification report that identifies inconsistencies, duplications, ambiguities, and underspecified items between action planning artifacts (`IMPL_PLAN.md`, `task.json`) and brainstorming artifacts (`role analysis documents`). This command MUST run only after `/workflow:plan` has successfully produced complete `IMPL_PLAN.md` and task JSON files.
|
||||
|
||||
**Output**: A structured Markdown report saved to `.workflow/active/WFS-{session}/.process/ACTION_PLAN_VERIFICATION.md` containing:
|
||||
- Executive summary with quality gate recommendation
|
||||
- Detailed findings by severity (CRITICAL/HIGH/MEDIUM/LOW)
|
||||
- Requirements coverage analysis
|
||||
- Dependency integrity check
|
||||
- Synthesis alignment validation
|
||||
- Actionable remediation recommendations
|
||||
|
||||
## Operating Constraints
|
||||
|
||||
**STRICTLY READ-ONLY**: Do **not** modify any files. Output a structured analysis report. Offer an optional remediation plan (user must explicitly approve before any follow-up editing commands).
|
||||
**STRICTLY READ-ONLY FOR SOURCE ARTIFACTS**:
|
||||
- **MUST NOT** modify `IMPL_PLAN.md`, any `task.json` files, or brainstorming artifacts
|
||||
- **MUST NOT** create or delete task files
|
||||
- **MUST ONLY** write the verification report to `.process/ACTION_PLAN_VERIFICATION.md`
|
||||
|
||||
**Synthesis Authority**: The `role analysis documents` is **authoritative** for requirements and design decisions. Any conflicts between IMPL_PLAN/tasks and synthesis are automatically CRITICAL and require adjustment of the plan/tasks—not reinterpretation of requirements.
|
||||
**Synthesis Authority**: The `role analysis documents` are **authoritative** for requirements and design decisions. Any conflicts between IMPL_PLAN/tasks and synthesis are automatically CRITICAL and require adjustment of the plan/tasks—not reinterpretation of requirements.
|
||||
|
||||
**Quality Gate Authority**: The verification report provides a binding recommendation (BLOCK_EXECUTION / PROCEED_WITH_FIXES / PROCEED_WITH_CAUTION / PROCEED) based on objective severity criteria. User MUST review critical/high issues before proceeding with implementation.
|
||||
|
||||
## Execution Steps
|
||||
|
||||
@@ -47,6 +60,12 @@ ELSE:
|
||||
session_dir = .workflow/active/WFS-{session}
|
||||
brainstorm_dir = session_dir/.brainstorming
|
||||
task_dir = session_dir/.task
|
||||
process_dir = session_dir/.process
|
||||
session_file = session_dir/workflow-session.json
|
||||
|
||||
# Create .process directory if not exists (report output location)
|
||||
IF NOT EXISTS(process_dir):
|
||||
bash(mkdir -p "{process_dir}")
|
||||
|
||||
# Validate required artifacts
|
||||
# Note: "role analysis documents" refers to [role]/analysis.md files (e.g., product-manager/analysis.md)
|
||||
@@ -54,7 +73,12 @@ SYNTHESIS_DIR = brainstorm_dir # Contains role analysis files: */analysis.md
|
||||
IMPL_PLAN = session_dir/IMPL_PLAN.md
|
||||
TASK_FILES = Glob(task_dir/*.json)
|
||||
|
||||
# Abort if missing
|
||||
# Abort if missing - in order of dependency
|
||||
SESSION_FILE_EXISTS = EXISTS(session_file)
|
||||
IF NOT SESSION_FILE_EXISTS:
|
||||
WARNING: "workflow-session.json not found. User intent alignment verification will be skipped."
|
||||
# Continue execution - this is optional context, not blocking
|
||||
|
||||
SYNTHESIS_FILES = Glob(brainstorm_dir/*/analysis.md)
|
||||
IF SYNTHESIS_FILES.count == 0:
|
||||
ERROR: "No role analysis documents found in .brainstorming/*/analysis.md. Run /workflow:brainstorm:synthesis first"
|
||||
@@ -73,12 +97,14 @@ IF TASK_FILES.count == 0:
|
||||
|
||||
Load only minimal necessary context from each artifact:
|
||||
|
||||
**From workflow-session.json** (NEW - PRIMARY REFERENCE):
|
||||
**From workflow-session.json** (OPTIONAL - Primary Reference for User Intent):
|
||||
- **ONLY IF EXISTS**: Load user intent context
|
||||
- Original user prompt/intent (project or description field)
|
||||
- User's stated goals and objectives
|
||||
- User's scope definition
|
||||
- **IF MISSING**: Set user_intent_analysis = "SKIPPED: workflow-session.json not found"
|
||||
|
||||
**From role analysis documents**:
|
||||
**From role analysis documents** (AUTHORITATIVE SOURCE):
|
||||
- Functional Requirements (IDs, descriptions, acceptance criteria)
|
||||
- Non-Functional Requirements (IDs, targets)
|
||||
- Business Requirements (IDs, success metrics)
|
||||
@@ -126,9 +152,21 @@ Create internal representations (do not include raw artifacts in output):
|
||||
|
||||
### 4. Detection Passes (Token-Efficient Analysis)
|
||||
|
||||
Focus on high-signal findings. Limit to 50 findings total; aggregate remainder in overflow summary.
|
||||
**Token Budget Strategy**:
|
||||
- **Total Limit**: 50 findings maximum (aggregate remainder in overflow summary)
|
||||
- **Priority Allocation**: CRITICAL (unlimited) → HIGH (15) → MEDIUM (20) → LOW (15)
|
||||
- **Early Exit**: If CRITICAL findings > 0 in User Intent/Requirements Coverage, skip LOW/MEDIUM priority checks
|
||||
|
||||
#### A. User Intent Alignment (NEW - CRITICAL)
|
||||
**Execution Order** (Process in sequence; skip if token budget exhausted):
|
||||
|
||||
1. **Tier 1 (CRITICAL Path)**: A, B, C - User intent, coverage, consistency (process fully)
|
||||
2. **Tier 2 (HIGH Priority)**: D, E - Dependencies, synthesis alignment (limit 15 findings total)
|
||||
3. **Tier 3 (MEDIUM Priority)**: F - Specification quality (limit 20 findings)
|
||||
4. **Tier 4 (LOW Priority)**: G, H - Duplication, feasibility (limit 15 findings total)
|
||||
|
||||
---
|
||||
|
||||
#### A. User Intent Alignment (CRITICAL - Tier 1)
|
||||
|
||||
- **Goal Alignment**: IMPL_PLAN objectives match user's original intent
|
||||
- **Scope Drift**: Plan covers user's stated scope without unauthorized expansion
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
name: artifacts
|
||||
description: Interactive clarification generating confirmed guidance specification through role-based analysis and synthesis
|
||||
argument-hint: "topic or challenge description [--count N]"
|
||||
argument-hint: "[-y|--yes] topic or challenge description [--count N]"
|
||||
allowed-tools: TodoWrite(*), Read(*), Write(*), Glob(*), AskUserQuestion(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-select recommended roles, skip all clarification questions, use default answers.
|
||||
|
||||
## Overview
|
||||
|
||||
Seven-phase workflow: **Context collection** → **Topic analysis** → **Role selection** → **Role questions** → **Conflict resolution** → **Final check** → **Generate specification**
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
name: auto-parallel
|
||||
description: Parallel brainstorming automation with dynamic role selection and concurrent execution across multiple perspectives
|
||||
argument-hint: "topic or challenge description" [--count N]
|
||||
argument-hint: "[-y|--yes] topic or challenge description [--count N]"
|
||||
allowed-tools: SlashCommand(*), Task(*), TodoWrite(*), Read(*), Write(*), Bash(*), Glob(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-select recommended roles, skip all clarification questions, use default answers.
|
||||
|
||||
# Workflow Brainstorm Parallel Auto Command
|
||||
|
||||
## Coordinator Role
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
name: synthesis
|
||||
description: Clarify and refine role analyses through intelligent Q&A and targeted updates with synthesis agent
|
||||
argument-hint: "[optional: --session session-id]"
|
||||
argument-hint: "[-y|--yes] [optional: --session session-id]"
|
||||
allowed-tools: Task(conceptual-planning-agent), TodoWrite(*), Read(*), Write(*), Edit(*), Glob(*), AskUserQuestion(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-select all enhancements, skip clarification questions, use default answers.
|
||||
|
||||
## Overview
|
||||
|
||||
Six-phase workflow to eliminate ambiguities and enhance conceptual depth in role analyses:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: clean
|
||||
description: Intelligent code cleanup with mainline detection, stale artifact discovery, and safe execution
|
||||
argument-hint: "[--dry-run] [\"focus area\"]"
|
||||
argument-hint: "[-y|--yes] [--dry-run] [\"focus area\"]"
|
||||
allowed-tools: TodoWrite(*), Task(*), AskUserQuestion(*), Read(*), Glob(*), Bash(*), Write(*)
|
||||
---
|
||||
|
||||
@@ -21,8 +21,22 @@ Intelligent cleanup command that explores the codebase to identify the developme
|
||||
|
||||
```bash
|
||||
/workflow:clean # Full intelligent cleanup (explore → analyze → confirm → execute)
|
||||
/workflow:clean --yes # Auto mode (use safe defaults, no confirmation)
|
||||
/workflow:clean --dry-run # Explore and analyze only, no execution
|
||||
/workflow:clean "auth module" # Focus cleanup on specific area
|
||||
/workflow:clean -y "auth module" # Auto mode with focus area
|
||||
```
|
||||
|
||||
## Auto Mode Defaults
|
||||
|
||||
When `--yes` or `-y` flag is used:
|
||||
- **Categories to Clean**: Auto-selects `["Sessions"]` only (safest - only workflow sessions)
|
||||
- **Risk Level**: Auto-selects `"Low only"` (only low-risk items)
|
||||
- All confirmations skipped, proceeds directly to execution
|
||||
|
||||
**Flag Parsing**:
|
||||
```javascript
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
const dryRun = $ARGUMENTS.includes('--dry-run')
|
||||
```
|
||||
|
||||
## Execution Process
|
||||
@@ -329,39 +343,57 @@ To execute cleanup: /workflow:clean
|
||||
|
||||
**Step 3.3: User Confirmation**
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "Which categories to clean?",
|
||||
header: "Categories",
|
||||
multiSelect: true,
|
||||
options: [
|
||||
{
|
||||
label: "Sessions",
|
||||
description: `${manifest.summary.by_category.stale_sessions} stale workflow sessions`
|
||||
},
|
||||
{
|
||||
label: "Documents",
|
||||
description: `${manifest.summary.by_category.drifted_documents} drifted documents`
|
||||
},
|
||||
{
|
||||
label: "Dead Code",
|
||||
description: `${manifest.summary.by_category.dead_code} unused code files`
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Risk level to include?",
|
||||
header: "Risk",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Low only", description: "Safest - only obviously stale items" },
|
||||
{ label: "Low + Medium", description: "Recommended - includes likely unused items" },
|
||||
{ label: "All", description: "Aggressive - includes high-risk items" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
// Parse --yes flag
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
let userSelection
|
||||
|
||||
if (autoYes) {
|
||||
// Auto mode: Use safe defaults
|
||||
console.log(`[--yes] Auto-selecting safe cleanup defaults:`)
|
||||
console.log(` - Categories: Sessions only`)
|
||||
console.log(` - Risk level: Low only`)
|
||||
|
||||
userSelection = {
|
||||
categories: ["Sessions"],
|
||||
risk: "Low only"
|
||||
}
|
||||
} else {
|
||||
// Interactive mode: Ask user
|
||||
userSelection = AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "Which categories to clean?",
|
||||
header: "Categories",
|
||||
multiSelect: true,
|
||||
options: [
|
||||
{
|
||||
label: "Sessions",
|
||||
description: `${manifest.summary.by_category.stale_sessions} stale workflow sessions`
|
||||
},
|
||||
{
|
||||
label: "Documents",
|
||||
description: `${manifest.summary.by_category.drifted_documents} drifted documents`
|
||||
},
|
||||
{
|
||||
label: "Dead Code",
|
||||
description: `${manifest.summary.by_category.dead_code} unused code files`
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Risk level to include?",
|
||||
header: "Risk",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Low only", description: "Safest - only obviously stale items" },
|
||||
{ label: "Low + Medium", description: "Recommended - includes likely unused items" },
|
||||
{ label: "All", description: "Aggressive - includes high-risk items" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
name: debug-with-file
|
||||
description: Interactive hypothesis-driven debugging with documented exploration, understanding evolution, and Gemini-assisted correction
|
||||
argument-hint: "\"bug description or error message\""
|
||||
argument-hint: "[-y|--yes] \"bug description or error message\""
|
||||
allowed-tools: TodoWrite(*), Task(*), AskUserQuestion(*), Read(*), Grep(*), Glob(*), Bash(*), Edit(*), Write(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-confirm all decisions (hypotheses, fixes, iteration), use recommended settings.
|
||||
|
||||
# Workflow Debug-With-File Command (/workflow:debug-with-file)
|
||||
|
||||
## Overview
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
name: debug
|
||||
description: Interactive hypothesis-driven debugging with NDJSON logging, iterative until resolved
|
||||
argument-hint: "\"bug description or error message\""
|
||||
argument-hint: "[-y|--yes] \"bug description or error message\""
|
||||
allowed-tools: TodoWrite(*), Task(*), AskUserQuestion(*), Read(*), Grep(*), Glob(*), Bash(*), Edit(*), Write(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-confirm all decisions (hypotheses, fixes, iteration), use recommended settings.
|
||||
|
||||
# Workflow Debug Command (/workflow:debug)
|
||||
|
||||
## Overview
|
||||
|
||||
1044
.claude/commands/workflow/develop-with-file.md
Normal file
1044
.claude/commands/workflow/develop-with-file.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: execute
|
||||
description: Coordinate agent execution for workflow tasks with automatic session discovery, parallel task processing, and status tracking
|
||||
argument-hint: "[--resume-session=\"session-id\"]"
|
||||
argument-hint: "[-y|--yes] [--resume-session=\"session-id\"]"
|
||||
---
|
||||
|
||||
# Workflow Execute Command
|
||||
@@ -11,6 +11,30 @@ Orchestrates autonomous workflow execution through systematic task discovery, ag
|
||||
|
||||
**Resume Mode**: When called with `--resume-session` flag, skips discovery phase and directly enters TodoWrite generation and agent execution for the specified session.
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Interactive mode (with confirmations)
|
||||
/workflow:execute
|
||||
/workflow:execute --resume-session="WFS-auth"
|
||||
|
||||
# Auto mode (skip confirmations, use defaults)
|
||||
/workflow:execute --yes
|
||||
/workflow:execute -y
|
||||
/workflow:execute -y --resume-session="WFS-auth"
|
||||
```
|
||||
|
||||
## Auto Mode Defaults
|
||||
|
||||
When `--yes` or `-y` flag is used:
|
||||
- **Session Selection**: Automatically selects the first (most recent) active session
|
||||
- **Completion Choice**: Automatically completes session (runs `/workflow:session:complete --yes`)
|
||||
|
||||
**Flag Parsing**:
|
||||
```javascript
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
```
|
||||
|
||||
## Performance Optimization Strategy
|
||||
|
||||
**Lazy Loading**: Task JSONs read **on-demand** during execution, not upfront. TODO_LIST.md + IMPL_PLAN.md provide metadata for planning.
|
||||
@@ -122,24 +146,38 @@ List sessions with metadata and prompt user selection:
|
||||
bash(for dir in .workflow/active/WFS-*/; do [ -d "$dir" ] || continue; session=$(basename "$dir"); project=$(jq -r '.project // "Unknown"' "${dir}workflow-session.json" 2>/dev/null || echo "Unknown"); total=$(grep -c '^\- \[' "${dir}TODO_LIST.md" 2>/dev/null || echo 0); completed=$(grep -c '^\- \[x\]' "${dir}TODO_LIST.md" 2>/dev/null || echo 0); if [ "$total" -gt 0 ]; then progress=$((completed * 100 / total)); else progress=0; fi; echo "$session | $project | $completed/$total tasks ($progress%)"; done)
|
||||
```
|
||||
|
||||
Use AskUserQuestion to present formatted options (max 4 options shown):
|
||||
**Parse --yes flag**:
|
||||
```javascript
|
||||
// If more than 4 sessions, show most recent 4 with "Other" option for manual input
|
||||
const sessions = getActiveSessions() // sorted by last modified
|
||||
const displaySessions = sessions.slice(0, 4)
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
```
|
||||
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Multiple active sessions detected. Select one:",
|
||||
header: "Session",
|
||||
multiSelect: false,
|
||||
options: displaySessions.map(s => ({
|
||||
label: s.id,
|
||||
description: `${s.project} | ${s.progress}`
|
||||
}))
|
||||
// Note: User can select "Other" to manually enter session ID
|
||||
}]
|
||||
})
|
||||
**Conditional Selection**:
|
||||
```javascript
|
||||
if (autoYes) {
|
||||
// Auto mode: Select first session (most recent)
|
||||
const firstSession = sessions[0]
|
||||
console.log(`[--yes] Auto-selecting session: ${firstSession.id}`)
|
||||
selectedSessionId = firstSession.id
|
||||
// Continue to Phase 2
|
||||
} else {
|
||||
// Interactive mode: Use AskUserQuestion to present formatted options (max 4 options shown)
|
||||
// If more than 4 sessions, show most recent 4 with "Other" option for manual input
|
||||
const sessions = getActiveSessions() // sorted by last modified
|
||||
const displaySessions = sessions.slice(0, 4)
|
||||
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Multiple active sessions detected. Select one:",
|
||||
header: "Session",
|
||||
multiSelect: false,
|
||||
options: displaySessions.map(s => ({
|
||||
label: s.id,
|
||||
description: `${s.project} | ${s.progress}`
|
||||
}))
|
||||
// Note: User can select "Other" to manually enter session ID
|
||||
}]
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Input Validation**:
|
||||
@@ -252,23 +290,33 @@ while (TODO_LIST.md has pending tasks) {
|
||||
6. **User Choice**: When all tasks finished, ask user to choose next step:
|
||||
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "All tasks completed. What would you like to do next?",
|
||||
header: "Next Step",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{
|
||||
label: "Enter Review",
|
||||
description: "Run specialized review (security/architecture/quality/action-items)"
|
||||
},
|
||||
{
|
||||
label: "Complete Session",
|
||||
description: "Archive session and update manifest"
|
||||
}
|
||||
]
|
||||
}]
|
||||
})
|
||||
// Parse --yes flag
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
if (autoYes) {
|
||||
// Auto mode: Complete session automatically
|
||||
console.log(`[--yes] Auto-selecting: Complete Session`)
|
||||
SlashCommand("/workflow:session:complete --yes")
|
||||
} else {
|
||||
// Interactive mode: Ask user
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "All tasks completed. What would you like to do next?",
|
||||
header: "Next Step",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{
|
||||
label: "Enter Review",
|
||||
description: "Run specialized review (security/architecture/quality/action-items)"
|
||||
},
|
||||
{
|
||||
label: "Complete Session",
|
||||
description: "Archive session and update manifest"
|
||||
}
|
||||
]
|
||||
}]
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Based on user selection**:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: lite-execute
|
||||
description: Execute tasks based on in-memory plan, prompt description, or file content
|
||||
argument-hint: "[--in-memory] [\"task description\"|file-path]"
|
||||
argument-hint: "[-y|--yes] [--in-memory] [\"task description\"|file-path]"
|
||||
allowed-tools: TodoWrite(*), Task(*), Bash(*)
|
||||
---
|
||||
|
||||
@@ -62,31 +62,49 @@ Flexible task execution command supporting three input modes: in-memory plan (fr
|
||||
|
||||
**User Interaction**:
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "Select execution method:",
|
||||
header: "Execution",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Agent", description: "@code-developer agent" },
|
||||
{ label: "Codex", description: "codex CLI tool" },
|
||||
{ label: "Auto", description: "Auto-select based on complexity" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Enable code review after execution?",
|
||||
header: "Code Review",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Skip", description: "No review" },
|
||||
{ label: "Gemini Review", description: "Gemini CLI tool" },
|
||||
{ label: "Codex Review", description: "Git-aware review (prompt OR --uncommitted)" },
|
||||
{ label: "Agent Review", description: "Current agent review" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
// Parse --yes flag
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
let userSelection
|
||||
|
||||
if (autoYes) {
|
||||
// Auto mode: Use defaults
|
||||
console.log(`[--yes] Auto-confirming execution:`)
|
||||
console.log(` - Execution method: Auto`)
|
||||
console.log(` - Code review: Skip`)
|
||||
|
||||
userSelection = {
|
||||
execution_method: "Auto",
|
||||
code_review_tool: "Skip"
|
||||
}
|
||||
} else {
|
||||
// Interactive mode: Ask user
|
||||
userSelection = AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "Select execution method:",
|
||||
header: "Execution",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Agent", description: "@code-developer agent" },
|
||||
{ label: "Codex", description: "codex CLI tool" },
|
||||
{ label: "Auto", description: "Auto-select based on complexity" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Enable code review after execution?",
|
||||
header: "Code Review",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Skip", description: "No review" },
|
||||
{ label: "Gemini Review", description: "Gemini CLI tool" },
|
||||
{ label: "Codex Review", description: "Git-aware review (prompt OR --uncommitted)" },
|
||||
{ label: "Agent Review", description: "Current agent review" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### Mode 3: File Content
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: lite-fix
|
||||
description: Lightweight bug diagnosis and fix workflow with intelligent severity assessment and optional hotfix mode for production incidents
|
||||
argument-hint: "[--hotfix] \"bug description or issue reference\""
|
||||
argument-hint: "[-y|--yes] [--hotfix] \"bug description or issue reference\""
|
||||
allowed-tools: TodoWrite(*), Task(*), SlashCommand(*), AskUserQuestion(*)
|
||||
---
|
||||
|
||||
@@ -25,10 +25,32 @@ Intelligent lightweight bug fixing command with dynamic workflow adaptation base
|
||||
/workflow:lite-fix [FLAGS] <BUG_DESCRIPTION>
|
||||
|
||||
# Flags
|
||||
-y, --yes Skip all confirmations (auto mode)
|
||||
--hotfix, -h Production hotfix mode (minimal diagnosis, fast fix)
|
||||
|
||||
# Arguments
|
||||
<bug-description> Bug description, error message, or path to .md file (required)
|
||||
|
||||
# Examples
|
||||
/workflow:lite-fix "用户登录失败" # Interactive mode
|
||||
/workflow:lite-fix --yes "用户登录失败" # Auto mode (no confirmations)
|
||||
/workflow:lite-fix -y --hotfix "生产环境数据库连接失败" # Auto + hotfix mode
|
||||
```
|
||||
|
||||
## Auto Mode Defaults
|
||||
|
||||
When `--yes` or `-y` flag is used:
|
||||
- **Clarification Questions**: Skipped (no clarification phase)
|
||||
- **Fix Plan Confirmation**: Auto-selected "Allow"
|
||||
- **Execution Method**: Auto-selected "Auto"
|
||||
- **Code Review**: Auto-selected "Skip"
|
||||
- **Severity**: Uses auto-detected severity (no manual override)
|
||||
- **Hotfix Mode**: Respects --hotfix flag if present, otherwise normal mode
|
||||
|
||||
**Flag Parsing**:
|
||||
```javascript
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
const hotfixMode = $ARGUMENTS.includes('--hotfix') || $ARGUMENTS.includes('-h')
|
||||
```
|
||||
|
||||
## Execution Process
|
||||
@@ -332,9 +354,17 @@ function deduplicateClarifications(clarifications) {
|
||||
|
||||
const uniqueClarifications = deduplicateClarifications(allClarifications)
|
||||
|
||||
// Multi-round clarification: batch questions (max 4 per round)
|
||||
// ⚠️ MUST execute ALL rounds until uniqueClarifications exhausted
|
||||
if (uniqueClarifications.length > 0) {
|
||||
// Parse --yes flag
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
if (autoYes) {
|
||||
// Auto mode: Skip clarification phase
|
||||
console.log(`[--yes] Skipping ${uniqueClarifications.length} clarification questions`)
|
||||
console.log(`Proceeding to fix planning with diagnosis results...`)
|
||||
// Continue to Phase 3
|
||||
} else if (uniqueClarifications.length > 0) {
|
||||
// Interactive mode: Multi-round clarification
|
||||
// ⚠️ MUST execute ALL rounds until uniqueClarifications exhausted
|
||||
const BATCH_SIZE = 4
|
||||
const totalRounds = Math.ceil(uniqueClarifications.length / BATCH_SIZE)
|
||||
|
||||
@@ -600,40 +630,60 @@ ${fixPlan.tasks.map((t, i) => `${i+1}. ${t.title} (${t.scope})`).join('\n')}
|
||||
|
||||
**Step 4.2: Collect Confirmation**
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: `Confirm fix plan? (${fixPlan.tasks.length} tasks, ${fixPlan.severity} severity)`,
|
||||
header: "Confirm",
|
||||
multiSelect: true,
|
||||
options: [
|
||||
{ label: "Allow", description: "Proceed as-is" },
|
||||
{ label: "Modify", description: "Adjust before execution" },
|
||||
{ label: "Cancel", description: "Abort workflow" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Execution method:",
|
||||
header: "Execution",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Agent", description: "@code-developer agent" },
|
||||
{ label: "Codex", description: "codex CLI tool" },
|
||||
{ label: "Auto", description: `Auto: ${fixPlan.severity === 'Low' ? 'Agent' : 'Codex'}` }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Code review after fix?",
|
||||
header: "Review",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Gemini Review", description: "Gemini CLI" },
|
||||
{ label: "Agent Review", description: "@code-reviewer" },
|
||||
{ label: "Skip", description: "No review" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
// Parse --yes flag
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
let userSelection
|
||||
|
||||
if (autoYes) {
|
||||
// Auto mode: Use defaults
|
||||
console.log(`[--yes] Auto-confirming fix plan:`)
|
||||
console.log(` - Confirmation: Allow`)
|
||||
console.log(` - Execution: Auto`)
|
||||
console.log(` - Review: Skip`)
|
||||
|
||||
userSelection = {
|
||||
confirmation: "Allow",
|
||||
execution_method: "Auto",
|
||||
code_review_tool: "Skip"
|
||||
}
|
||||
} else {
|
||||
// Interactive mode: Ask user
|
||||
userSelection = AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: `Confirm fix plan? (${fixPlan.tasks.length} tasks, ${fixPlan.severity} severity)`,
|
||||
header: "Confirm",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Allow", description: "Proceed as-is" },
|
||||
{ label: "Modify", description: "Adjust before execution" },
|
||||
{ label: "Cancel", description: "Abort workflow" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Execution method:",
|
||||
header: "Execution",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Agent", description: "@code-developer agent" },
|
||||
{ label: "Codex", description: "codex CLI tool" },
|
||||
{ label: "Auto", description: `Auto: ${fixPlan.severity === 'Low' ? 'Agent' : 'Codex'}` }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Code review after fix?",
|
||||
header: "Review",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Gemini Review", description: "Gemini CLI" },
|
||||
{ label: "Agent Review", description: "@code-reviewer" },
|
||||
{ label: "Skip", description: "No review" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
name: workflow:lite-lite-lite
|
||||
description: Ultra-lightweight multi-tool analysis and direct execution. No artifacts for simple tasks; auto-creates planning docs in .workflow/.scratchpad/ for complex tasks. Auto tool selection based on task analysis, user-driven iteration via AskUser.
|
||||
argument-hint: "<task description>"
|
||||
argument-hint: "[-y|--yes] <task description>"
|
||||
allowed-tools: TodoWrite(*), Task(*), AskUserQuestion(*), Read(*), Bash(*), Write(*), mcp__ace-tool__search_context(*), mcp__ccw-tools__write_file(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Skip clarification questions, auto-select tools, execute directly with recommended settings.
|
||||
|
||||
# Ultra-Lite Multi-Tool Workflow
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: lite-plan
|
||||
description: Lightweight interactive planning workflow with in-memory planning, code exploration, and execution execute to lite-execute after user confirmation
|
||||
argument-hint: "[-e|--explore] \"task description\"|file.md"
|
||||
argument-hint: "[-y|--yes] [-e|--explore] \"task description\"|file.md"
|
||||
allowed-tools: TodoWrite(*), Task(*), SlashCommand(*), AskUserQuestion(*)
|
||||
---
|
||||
|
||||
@@ -25,10 +25,30 @@ Intelligent lightweight planning command with dynamic workflow adaptation based
|
||||
/workflow:lite-plan [FLAGS] <TASK_DESCRIPTION>
|
||||
|
||||
# Flags
|
||||
-y, --yes Skip all confirmations (auto mode)
|
||||
-e, --explore Force code exploration phase (overrides auto-detection)
|
||||
|
||||
# Arguments
|
||||
<task-description> Task description or path to .md file (required)
|
||||
|
||||
# Examples
|
||||
/workflow:lite-plan "实现JWT认证" # Interactive mode
|
||||
/workflow:lite-plan --yes "实现JWT认证" # Auto mode (no confirmations)
|
||||
/workflow:lite-plan -y -e "优化数据库查询性能" # Auto mode + force exploration
|
||||
```
|
||||
|
||||
## Auto Mode Defaults
|
||||
|
||||
When `--yes` or `-y` flag is used:
|
||||
- **Clarification Questions**: Skipped (no clarification phase)
|
||||
- **Plan Confirmation**: Auto-selected "Allow"
|
||||
- **Execution Method**: Auto-selected "Auto"
|
||||
- **Code Review**: Auto-selected "Skip"
|
||||
|
||||
**Flag Parsing**:
|
||||
```javascript
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
const forceExplore = $ARGUMENTS.includes('--explore') || $ARGUMENTS.includes('-e')
|
||||
```
|
||||
|
||||
## Execution Process
|
||||
@@ -323,8 +343,16 @@ explorations.forEach(exp => {
|
||||
// - Produce dedupedClarifications with unique intents only
|
||||
const dedupedClarifications = intelligentMerge(allClarifications)
|
||||
|
||||
// Multi-round clarification: batch questions (max 4 per round)
|
||||
if (dedupedClarifications.length > 0) {
|
||||
// Parse --yes flag
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
if (autoYes) {
|
||||
// Auto mode: Skip clarification phase
|
||||
console.log(`[--yes] Skipping ${dedupedClarifications.length} clarification questions`)
|
||||
console.log(`Proceeding to planning with exploration results...`)
|
||||
// Continue to Phase 3
|
||||
} else if (dedupedClarifications.length > 0) {
|
||||
// Interactive mode: Multi-round clarification
|
||||
const BATCH_SIZE = 4
|
||||
const totalRounds = Math.ceil(dedupedClarifications.length / BATCH_SIZE)
|
||||
|
||||
@@ -497,42 +525,62 @@ ${plan.tasks.map((t, i) => `${i+1}. ${t.title} (${t.file})`).join('\n')}
|
||||
|
||||
**Step 4.2: Collect Confirmation**
|
||||
```javascript
|
||||
// Note: Execution "Other" option allows specifying CLI tools from ~/.claude/cli-tools.json
|
||||
AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: `Confirm plan? (${plan.tasks.length} tasks, ${plan.complexity})`,
|
||||
header: "Confirm",
|
||||
multiSelect: true,
|
||||
options: [
|
||||
{ label: "Allow", description: "Proceed as-is" },
|
||||
{ label: "Modify", description: "Adjust before execution" },
|
||||
{ label: "Cancel", description: "Abort workflow" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Execution method:",
|
||||
header: "Execution",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Agent", description: "@code-developer agent" },
|
||||
{ label: "Codex", description: "codex CLI tool" },
|
||||
{ label: "Auto", description: `Auto: ${plan.complexity === 'Low' ? 'Agent' : 'Codex'}` }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Code review after execution?",
|
||||
header: "Review",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Gemini Review", description: "Gemini CLI review" },
|
||||
{ label: "Codex Review", description: "Git-aware review (prompt OR --uncommitted)" },
|
||||
{ label: "Agent Review", description: "@code-reviewer agent" },
|
||||
{ label: "Skip", description: "No review" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
// Parse --yes flag
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
let userSelection
|
||||
|
||||
if (autoYes) {
|
||||
// Auto mode: Use defaults
|
||||
console.log(`[--yes] Auto-confirming plan:`)
|
||||
console.log(` - Confirmation: Allow`)
|
||||
console.log(` - Execution: Auto`)
|
||||
console.log(` - Review: Skip`)
|
||||
|
||||
userSelection = {
|
||||
confirmation: "Allow",
|
||||
execution_method: "Auto",
|
||||
code_review_tool: "Skip"
|
||||
}
|
||||
} else {
|
||||
// Interactive mode: Ask user
|
||||
// Note: Execution "Other" option allows specifying CLI tools from ~/.claude/cli-tools.json
|
||||
userSelection = AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: `Confirm plan? (${plan.tasks.length} tasks, ${plan.complexity})`,
|
||||
header: "Confirm",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Allow", description: "Proceed as-is" },
|
||||
{ label: "Modify", description: "Adjust before execution" },
|
||||
{ label: "Cancel", description: "Abort workflow" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Execution method:",
|
||||
header: "Execution",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Agent", description: "@code-developer agent" },
|
||||
{ label: "Codex", description: "codex CLI tool" },
|
||||
{ label: "Auto", description: `Auto: ${plan.complexity === 'Low' ? 'Agent' : 'Codex'}` }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Code review after execution?",
|
||||
header: "Review",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Gemini Review", description: "Gemini CLI review" },
|
||||
{ label: "Codex Review", description: "Git-aware review (prompt OR --uncommitted)" },
|
||||
{ label: "Agent Review", description: "@code-reviewer agent" },
|
||||
{ label: "Skip", description: "No review" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
name: workflow:multi-cli-plan
|
||||
description: Multi-CLI collaborative planning workflow with ACE context gathering and iterative cross-verification. Uses cli-discuss-agent for Gemini+Codex+Claude analysis to converge on optimal execution plan.
|
||||
argument-hint: "<task description> [--max-rounds=3] [--tools=gemini,codex] [--mode=parallel|serial]"
|
||||
argument-hint: "[-y|--yes] <task description> [--max-rounds=3] [--tools=gemini,codex] [--mode=parallel|serial]"
|
||||
allowed-tools: TodoWrite(*), Task(*), AskUserQuestion(*), Read(*), Bash(*), Write(*), mcp__ace-tool__search_context(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-approve plan, use recommended solution and execution method (Agent, Skip review).
|
||||
|
||||
# Multi-CLI Collaborative Planning Command
|
||||
|
||||
## Quick Start
|
||||
|
||||
527
.claude/commands/workflow/plan-verify.md
Normal file
527
.claude/commands/workflow/plan-verify.md
Normal file
@@ -0,0 +1,527 @@
|
||||
---
|
||||
name: plan-verify
|
||||
description: Perform READ-ONLY verification analysis between IMPL_PLAN.md, task JSONs, and brainstorming artifacts. Generates structured report with quality gate recommendation. Does NOT modify any files.
|
||||
argument-hint: "[optional: --session session-id]"
|
||||
allowed-tools: Read(*), Write(*), Glob(*), Bash(*)
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Goal
|
||||
|
||||
Generate a comprehensive verification report that identifies inconsistencies, duplications, ambiguities, and underspecified items between action planning artifacts (`IMPL_PLAN.md`, `task.json`) and brainstorming artifacts (`role analysis documents`). This command MUST run only after `/workflow:plan` has successfully produced complete `IMPL_PLAN.md` and task JSON files.
|
||||
|
||||
**Output**: A structured Markdown report saved to `.workflow/active/WFS-{session}/.process/PLAN_VERIFICATION.md` containing:
|
||||
- Executive summary with quality gate recommendation
|
||||
- Detailed findings by severity (CRITICAL/HIGH/MEDIUM/LOW)
|
||||
- Requirements coverage analysis
|
||||
- Dependency integrity check
|
||||
- Synthesis alignment validation
|
||||
- Actionable remediation recommendations
|
||||
|
||||
## Operating Constraints
|
||||
|
||||
**STRICTLY READ-ONLY FOR SOURCE ARTIFACTS**:
|
||||
- **MUST NOT** modify `IMPL_PLAN.md`, any `task.json` files, or brainstorming artifacts
|
||||
- **MUST NOT** create or delete task files
|
||||
- **MUST ONLY** write the verification report to `.process/PLAN_VERIFICATION.md`
|
||||
|
||||
**Synthesis Authority**: The `role analysis documents` are **authoritative** for requirements and design decisions. Any conflicts between IMPL_PLAN/tasks and synthesis are automatically CRITICAL and require adjustment of the plan/tasks—not reinterpretation of requirements.
|
||||
|
||||
**Quality Gate Authority**: The verification report provides a binding recommendation (BLOCK_EXECUTION / PROCEED_WITH_FIXES / PROCEED_WITH_CAUTION / PROCEED) based on objective severity criteria. User MUST review critical/high issues before proceeding with implementation.
|
||||
|
||||
## Execution Steps
|
||||
|
||||
### 1. Initialize Analysis Context
|
||||
|
||||
```bash
|
||||
# Detect active workflow session
|
||||
IF --session parameter provided:
|
||||
session_id = provided session
|
||||
ELSE:
|
||||
# Auto-detect active session
|
||||
active_sessions = bash(find .workflow/active/ -name "WFS-*" -type d 2>/dev/null)
|
||||
IF active_sessions is empty:
|
||||
ERROR: "No active workflow session found. Use --session <session-id>"
|
||||
EXIT
|
||||
ELSE IF active_sessions has multiple entries:
|
||||
# Use most recently modified session
|
||||
session_id = bash(ls -td .workflow/active/WFS-*/ 2>/dev/null | head -1 | xargs basename)
|
||||
ELSE:
|
||||
session_id = basename(active_sessions[0])
|
||||
|
||||
# Derive absolute paths
|
||||
session_dir = .workflow/active/WFS-{session}
|
||||
brainstorm_dir = session_dir/.brainstorming
|
||||
task_dir = session_dir/.task
|
||||
process_dir = session_dir/.process
|
||||
session_file = session_dir/workflow-session.json
|
||||
|
||||
# Create .process directory if not exists (report output location)
|
||||
IF NOT EXISTS(process_dir):
|
||||
bash(mkdir -p "{process_dir}")
|
||||
|
||||
# Validate required artifacts
|
||||
# Note: "role analysis documents" refers to [role]/analysis.md files (e.g., product-manager/analysis.md)
|
||||
SYNTHESIS_DIR = brainstorm_dir # Contains role analysis files: */analysis.md
|
||||
IMPL_PLAN = session_dir/IMPL_PLAN.md
|
||||
TASK_FILES = Glob(task_dir/*.json)
|
||||
|
||||
# Abort if missing - in order of dependency
|
||||
SESSION_FILE_EXISTS = EXISTS(session_file)
|
||||
IF NOT SESSION_FILE_EXISTS:
|
||||
WARNING: "workflow-session.json not found. User intent alignment verification will be skipped."
|
||||
# Continue execution - this is optional context, not blocking
|
||||
|
||||
SYNTHESIS_FILES = Glob(brainstorm_dir/*/analysis.md)
|
||||
IF SYNTHESIS_FILES.count == 0:
|
||||
ERROR: "No role analysis documents found in .brainstorming/*/analysis.md. Run /workflow:brainstorm:synthesis first"
|
||||
EXIT
|
||||
|
||||
IF NOT EXISTS(IMPL_PLAN):
|
||||
ERROR: "IMPL_PLAN.md not found. Run /workflow:plan first"
|
||||
EXIT
|
||||
|
||||
IF TASK_FILES.count == 0:
|
||||
ERROR: "No task JSON files found. Run /workflow:plan first"
|
||||
EXIT
|
||||
```
|
||||
|
||||
### 2. Load Artifacts (Progressive Disclosure)
|
||||
|
||||
Load only minimal necessary context from each artifact:
|
||||
|
||||
**From workflow-session.json** (OPTIONAL - Primary Reference for User Intent):
|
||||
- **ONLY IF EXISTS**: Load user intent context
|
||||
- Original user prompt/intent (project or description field)
|
||||
- User's stated goals and objectives
|
||||
- User's scope definition
|
||||
- **IF MISSING**: Set user_intent_analysis = "SKIPPED: workflow-session.json not found"
|
||||
|
||||
**From role analysis documents** (AUTHORITATIVE SOURCE):
|
||||
- Functional Requirements (IDs, descriptions, acceptance criteria)
|
||||
- Non-Functional Requirements (IDs, targets)
|
||||
- Business Requirements (IDs, success metrics)
|
||||
- Key Architecture Decisions
|
||||
- Risk factors and mitigation strategies
|
||||
- Implementation Roadmap (high-level phases)
|
||||
|
||||
**From IMPL_PLAN.md**:
|
||||
- Summary and objectives
|
||||
- Context Analysis
|
||||
- Implementation Strategy
|
||||
- Task Breakdown Summary
|
||||
- Success Criteria
|
||||
- Brainstorming Artifacts References (if present)
|
||||
|
||||
**From task.json files**:
|
||||
- Task IDs
|
||||
- Titles and descriptions
|
||||
- Status
|
||||
- Dependencies (depends_on, blocks)
|
||||
- Context (requirements, focus_paths, acceptance, artifacts)
|
||||
- Flow control (pre_analysis, implementation_approach)
|
||||
- Meta (complexity, priority)
|
||||
|
||||
### 3. Build Semantic Models
|
||||
|
||||
Create internal representations (do not include raw artifacts in output):
|
||||
|
||||
**Requirements inventory**:
|
||||
- Each functional/non-functional/business requirement with stable ID
|
||||
- Requirement text, acceptance criteria, priority
|
||||
|
||||
**Architecture decisions inventory**:
|
||||
- ADRs from synthesis
|
||||
- Technology choices
|
||||
- Data model references
|
||||
|
||||
**Task coverage mapping**:
|
||||
- Map each task to one or more requirements (by ID reference or keyword inference)
|
||||
- Map each requirement to covering tasks
|
||||
|
||||
**Dependency graph**:
|
||||
- Task-to-task dependencies (depends_on, blocks)
|
||||
- Requirement-level dependencies (from synthesis)
|
||||
|
||||
### 4. Detection Passes (Token-Efficient Analysis)
|
||||
|
||||
**Token Budget Strategy**:
|
||||
- **Total Limit**: 50 findings maximum (aggregate remainder in overflow summary)
|
||||
- **Priority Allocation**: CRITICAL (unlimited) → HIGH (15) → MEDIUM (20) → LOW (15)
|
||||
- **Early Exit**: If CRITICAL findings > 0 in User Intent/Requirements Coverage, skip LOW/MEDIUM priority checks
|
||||
|
||||
**Execution Order** (Process in sequence; skip if token budget exhausted):
|
||||
|
||||
1. **Tier 1 (CRITICAL Path)**: A, B, C - User intent, coverage, consistency (process fully)
|
||||
2. **Tier 2 (HIGH Priority)**: D, E - Dependencies, synthesis alignment (limit 15 findings total)
|
||||
3. **Tier 3 (MEDIUM Priority)**: F - Specification quality (limit 20 findings)
|
||||
4. **Tier 4 (LOW Priority)**: G, H - Duplication, feasibility (limit 15 findings total)
|
||||
|
||||
---
|
||||
|
||||
#### A. User Intent Alignment (CRITICAL - Tier 1)
|
||||
|
||||
- **Goal Alignment**: IMPL_PLAN objectives match user's original intent
|
||||
- **Scope Drift**: Plan covers user's stated scope without unauthorized expansion
|
||||
- **Success Criteria Match**: Plan's success criteria reflect user's expectations
|
||||
- **Intent Conflicts**: Tasks contradicting user's original objectives
|
||||
|
||||
#### B. Requirements Coverage Analysis
|
||||
|
||||
- **Orphaned Requirements**: Requirements in synthesis with zero associated tasks
|
||||
- **Unmapped Tasks**: Tasks with no clear requirement linkage
|
||||
- **NFR Coverage Gaps**: Non-functional requirements (performance, security, scalability) not reflected in tasks
|
||||
|
||||
#### C. Consistency Validation
|
||||
|
||||
- **Requirement Conflicts**: Tasks contradicting synthesis requirements
|
||||
- **Architecture Drift**: IMPL_PLAN architecture not matching synthesis ADRs
|
||||
- **Terminology Drift**: Same concept named differently across IMPL_PLAN and tasks
|
||||
- **Data Model Inconsistency**: Tasks referencing entities/fields not in synthesis data model
|
||||
|
||||
#### D. Dependency Integrity
|
||||
|
||||
- **Circular Dependencies**: Task A depends on B, B depends on C, C depends on A
|
||||
- **Missing Dependencies**: Task requires outputs from another task but no explicit dependency
|
||||
- **Broken Dependencies**: Task depends on non-existent task ID
|
||||
- **Logical Ordering Issues**: Implementation tasks before foundational setup without dependency note
|
||||
|
||||
#### E. Synthesis Alignment
|
||||
|
||||
- **Priority Conflicts**: High-priority synthesis requirements mapped to low-priority tasks
|
||||
- **Success Criteria Mismatch**: IMPL_PLAN success criteria not covering synthesis acceptance criteria
|
||||
- **Risk Mitigation Gaps**: Critical risks in synthesis without corresponding mitigation tasks
|
||||
|
||||
#### F. Task Specification Quality
|
||||
|
||||
- **Ambiguous Focus Paths**: Tasks with vague or missing focus_paths
|
||||
- **Underspecified Acceptance**: Tasks without clear acceptance criteria
|
||||
- **Missing Artifacts References**: Tasks not referencing relevant brainstorming artifacts in context.artifacts
|
||||
- **Weak Flow Control**: Tasks without clear implementation_approach or pre_analysis steps
|
||||
- **Missing Target Files**: Tasks without flow_control.target_files specification
|
||||
|
||||
#### G. Duplication Detection
|
||||
|
||||
- **Overlapping Task Scope**: Multiple tasks with nearly identical descriptions
|
||||
- **Redundant Requirements Coverage**: Same requirement covered by multiple tasks without clear partitioning
|
||||
|
||||
#### H. Feasibility Assessment
|
||||
|
||||
- **Complexity Misalignment**: Task marked "simple" but requires multiple file modifications
|
||||
- **Resource Conflicts**: Parallel tasks requiring same resources/files
|
||||
- **Skill Gap Risks**: Tasks requiring skills not in team capability assessment (from synthesis)
|
||||
|
||||
### 5. Severity Assignment
|
||||
|
||||
Use this heuristic to prioritize findings:
|
||||
|
||||
- **CRITICAL**:
|
||||
- Violates user's original intent (goal misalignment, scope drift)
|
||||
- Violates synthesis authority (requirement conflict)
|
||||
- Core requirement with zero coverage
|
||||
- Circular dependencies
|
||||
- Broken dependencies
|
||||
|
||||
- **HIGH**:
|
||||
- NFR coverage gaps
|
||||
- Priority conflicts
|
||||
- Missing risk mitigation tasks
|
||||
- Ambiguous acceptance criteria
|
||||
|
||||
- **MEDIUM**:
|
||||
- Terminology drift
|
||||
- Missing artifacts references
|
||||
- Weak flow control
|
||||
- Logical ordering issues
|
||||
|
||||
- **LOW**:
|
||||
- Style/wording improvements
|
||||
- Minor redundancy not affecting execution
|
||||
|
||||
### 6. Produce Compact Analysis Report
|
||||
|
||||
**Report Generation**: Generate report content and save to file.
|
||||
|
||||
Output a Markdown report with the following structure:
|
||||
|
||||
```markdown
|
||||
# Plan Verification Report
|
||||
|
||||
**Session**: WFS-{session-id}
|
||||
**Generated**: {timestamp}
|
||||
**Artifacts Analyzed**: role analysis documents, IMPL_PLAN.md, {N} task files
|
||||
**User Intent Analysis**: {user_intent_analysis or "SKIPPED: workflow-session.json not found"}
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
### Quality Gate Decision
|
||||
|
||||
| Metric | Value | Status |
|
||||
|--------|-------|--------|
|
||||
| Overall Risk Level | CRITICAL \| HIGH \| MEDIUM \| LOW | {status_emoji} |
|
||||
| Critical Issues | {count} | 🔴 |
|
||||
| High Issues | {count} | 🟠 |
|
||||
| Medium Issues | {count} | 🟡 |
|
||||
| Low Issues | {count} | 🟢 |
|
||||
|
||||
### Recommendation
|
||||
|
||||
**{RECOMMENDATION}**
|
||||
|
||||
**Decision Rationale**:
|
||||
{brief explanation based on severity criteria}
|
||||
|
||||
**Quality Gate Criteria**:
|
||||
- **BLOCK_EXECUTION**: Critical issues > 0 (must fix before proceeding)
|
||||
- **PROCEED_WITH_FIXES**: Critical = 0, High > 0 (fix recommended before execution)
|
||||
- **PROCEED_WITH_CAUTION**: Critical = 0, High = 0, Medium > 0 (proceed with awareness)
|
||||
- **PROCEED**: Only Low issues or None (safe to execute)
|
||||
|
||||
---
|
||||
|
||||
## Findings Summary
|
||||
|
||||
| ID | Category | Severity | Location(s) | Summary | Recommendation |
|
||||
|----|----------|----------|-------------|---------|----------------|
|
||||
| C1 | Coverage | CRITICAL | synthesis:FR-03 | Requirement "User auth" has zero task coverage | Add authentication implementation task |
|
||||
| H1 | Consistency | HIGH | IMPL-1.2 vs synthesis:ADR-02 | Task uses REST while synthesis specifies GraphQL | Align task with ADR-02 decision |
|
||||
| M1 | Specification | MEDIUM | IMPL-2.1 | Missing context.artifacts reference | Add @synthesis reference |
|
||||
| L1 | Duplication | LOW | IMPL-3.1, IMPL-3.2 | Similar scope | Consider merging |
|
||||
|
||||
(Generate stable IDs prefixed by severity initial: C/H/M/L + number)
|
||||
|
||||
---
|
||||
|
||||
## User Intent Alignment Analysis
|
||||
|
||||
{IF user_intent_analysis != "SKIPPED"}
|
||||
|
||||
### Goal Alignment
|
||||
- **User Intent**: {user_original_intent}
|
||||
- **IMPL_PLAN Objectives**: {plan_objectives}
|
||||
- **Alignment Status**: {ALIGNED/MISALIGNED/PARTIAL}
|
||||
- **Findings**: {specific alignment issues}
|
||||
|
||||
### Scope Verification
|
||||
- **User Scope**: {user_defined_scope}
|
||||
- **Plan Scope**: {plan_actual_scope}
|
||||
- **Drift Detection**: {NONE/MINOR/MAJOR}
|
||||
- **Findings**: {specific scope issues}
|
||||
|
||||
{ELSE}
|
||||
> ⚠️ User intent alignment analysis was skipped because workflow-session.json was not found.
|
||||
|
||||
{END IF}
|
||||
|
||||
---
|
||||
|
||||
## Requirements Coverage Analysis
|
||||
|
||||
### Functional Requirements
|
||||
|
||||
| Requirement ID | Requirement Summary | Has Task? | Task IDs | Priority Match | Notes |
|
||||
|----------------|---------------------|-----------|----------|----------------|-------|
|
||||
| FR-01 | User authentication | Yes | IMPL-1.1, IMPL-1.2 | Match | Complete |
|
||||
| FR-02 | Data export | Yes | IMPL-2.3 | Mismatch | High req → Med priority task |
|
||||
| FR-03 | Profile management | No | - | - | **CRITICAL: Zero coverage** |
|
||||
|
||||
### Non-Functional Requirements
|
||||
|
||||
| Requirement ID | Requirement Summary | Has Task? | Task IDs | Notes |
|
||||
|----------------|---------------------|-----------|----------|-------|
|
||||
| NFR-01 | Response time <200ms | No | - | **HIGH: No performance tasks** |
|
||||
| NFR-02 | Security compliance | Yes | IMPL-4.1 | Complete |
|
||||
|
||||
### Business Requirements
|
||||
|
||||
| Requirement ID | Requirement Summary | Has Task? | Task IDs | Notes |
|
||||
|----------------|---------------------|-----------|----------|-------|
|
||||
| BR-01 | Launch by Q2 | Yes | IMPL-1.* through IMPL-5.* | Timeline realistic |
|
||||
|
||||
### Coverage Metrics
|
||||
|
||||
| Requirement Type | Total | Covered | Coverage % |
|
||||
|------------------|-------|---------|------------|
|
||||
| Functional | {count} | {count} | {percent}% |
|
||||
| Non-Functional | {count} | {count} | {percent}% |
|
||||
| Business | {count} | {count} | {percent}% |
|
||||
| **Overall** | **{total}** | **{covered}** | **{percent}%** |
|
||||
|
||||
---
|
||||
|
||||
## Dependency Integrity
|
||||
|
||||
### Dependency Graph Analysis
|
||||
|
||||
**Circular Dependencies**: {None or List}
|
||||
|
||||
**Broken Dependencies**:
|
||||
- IMPL-2.3 depends on "IMPL-2.4" (non-existent)
|
||||
|
||||
**Missing Dependencies**:
|
||||
- IMPL-5.1 (integration test) has no dependency on IMPL-1.* (implementation tasks)
|
||||
|
||||
**Logical Ordering Issues**:
|
||||
{List or "None detected"}
|
||||
|
||||
---
|
||||
|
||||
## Synthesis Alignment Issues
|
||||
|
||||
| Issue Type | Synthesis Reference | IMPL_PLAN/Task | Impact | Recommendation |
|
||||
|------------|---------------------|----------------|--------|----------------|
|
||||
| Architecture Conflict | synthesis:ADR-01 (JWT auth) | IMPL_PLAN uses session cookies | HIGH | Update IMPL_PLAN to use JWT |
|
||||
| Priority Mismatch | synthesis:FR-02 (High) | IMPL-2.3 (Medium) | MEDIUM | Elevate task priority |
|
||||
| Missing Risk Mitigation | synthesis:Risk-03 (API rate limits) | No mitigation tasks | HIGH | Add rate limiting implementation task |
|
||||
|
||||
---
|
||||
|
||||
## Task Specification Quality
|
||||
|
||||
### Aggregate Statistics
|
||||
|
||||
| Quality Dimension | Tasks Affected | Percentage |
|
||||
|-------------------|----------------|------------|
|
||||
| Missing Artifacts References | {count} | {percent}% |
|
||||
| Weak Flow Control | {count} | {percent}% |
|
||||
| Missing Target Files | {count} | {percent}% |
|
||||
| Ambiguous Focus Paths | {count} | {percent}% |
|
||||
|
||||
### Sample Issues
|
||||
|
||||
- **IMPL-1.2**: No context.artifacts reference to synthesis
|
||||
- **IMPL-3.1**: Missing flow_control.target_files specification
|
||||
- **IMPL-4.2**: Vague focus_paths ["src/"] - needs refinement
|
||||
|
||||
---
|
||||
|
||||
## Feasibility Concerns
|
||||
|
||||
| Concern | Tasks Affected | Issue | Recommendation |
|
||||
|---------|----------------|-------|----------------|
|
||||
| Skill Gap | IMPL-6.1, IMPL-6.2 | Requires Kubernetes expertise not in team | Add training task or external consultant |
|
||||
| Resource Conflict | IMPL-3.1, IMPL-3.2 | Both modify src/auth/service.ts in parallel | Add dependency or serialize |
|
||||
|
||||
---
|
||||
|
||||
## Detailed Findings by Severity
|
||||
|
||||
### CRITICAL Issues ({count})
|
||||
|
||||
{Detailed breakdown of each critical issue with location, impact, and recommendation}
|
||||
|
||||
### HIGH Issues ({count})
|
||||
|
||||
{Detailed breakdown of each high issue with location, impact, and recommendation}
|
||||
|
||||
### MEDIUM Issues ({count})
|
||||
|
||||
{Detailed breakdown of each medium issue with location, impact, and recommendation}
|
||||
|
||||
### LOW Issues ({count})
|
||||
|
||||
{Detailed breakdown of each low issue with location, impact, and recommendation}
|
||||
|
||||
---
|
||||
|
||||
## Metrics Summary
|
||||
|
||||
| Metric | Value |
|
||||
|--------|-------|
|
||||
| Total Requirements | {count} ({functional} functional, {nonfunctional} non-functional, {business} business) |
|
||||
| Total Tasks | {count} |
|
||||
| Overall Coverage | {percent}% ({covered}/{total} requirements with ≥1 task) |
|
||||
| Critical Issues | {count} |
|
||||
| High Issues | {count} |
|
||||
| Medium Issues | {count} |
|
||||
| Low Issues | {count} |
|
||||
| Total Findings | {total_findings} |
|
||||
|
||||
---
|
||||
|
||||
## Remediation Recommendations
|
||||
|
||||
### Priority Order
|
||||
|
||||
1. **CRITICAL** - Must fix before proceeding
|
||||
2. **HIGH** - Fix before execution
|
||||
3. **MEDIUM** - Fix during or after implementation
|
||||
4. **LOW** - Optional improvements
|
||||
|
||||
### Next Steps
|
||||
|
||||
Based on the quality gate recommendation ({RECOMMENDATION}):
|
||||
|
||||
{IF BLOCK_EXECUTION}
|
||||
**🛑 BLOCK EXECUTION**
|
||||
|
||||
You must resolve all CRITICAL issues before proceeding with implementation:
|
||||
|
||||
1. Review each critical issue in detail
|
||||
2. Determine remediation approach (modify IMPL_PLAN.md, update task.json, or add new tasks)
|
||||
3. Apply fixes systematically
|
||||
4. Re-run verification to confirm resolution
|
||||
|
||||
{ELSE IF PROCEED_WITH_FIXES}
|
||||
**⚠️ PROCEED WITH FIXES RECOMMENDED**
|
||||
|
||||
No critical issues detected, but HIGH issues exist. Recommended workflow:
|
||||
|
||||
1. Review high-priority issues
|
||||
2. Apply fixes before execution for optimal results
|
||||
3. Re-run verification (optional)
|
||||
|
||||
{ELSE IF PROCEED_WITH_CAUTION}
|
||||
**✅ PROCEED WITH CAUTION**
|
||||
|
||||
Only MEDIUM issues detected. You may proceed with implementation:
|
||||
|
||||
- Address medium issues during or after implementation
|
||||
- Maintain awareness of identified concerns
|
||||
|
||||
{ELSE}
|
||||
**✅ PROCEED**
|
||||
|
||||
No significant issues detected. Safe to execute implementation workflow.
|
||||
|
||||
{END IF}
|
||||
|
||||
---
|
||||
|
||||
**Report End**
|
||||
```
|
||||
|
||||
### 7. Save and Display Report
|
||||
|
||||
**Step 7.1: Save Report**:
|
||||
```bash
|
||||
report_path = ".workflow/active/WFS-{session}/.process/PLAN_VERIFICATION.md"
|
||||
Write(report_path, full_report_content)
|
||||
```
|
||||
|
||||
**Step 7.2: Display Summary to User**:
|
||||
```bash
|
||||
# Display executive summary in terminal
|
||||
echo "=== Plan Verification Complete ==="
|
||||
echo "Report saved to: {report_path}"
|
||||
echo ""
|
||||
echo "Quality Gate: {RECOMMENDATION}"
|
||||
echo "Critical: {count} | High: {count} | Medium: {count} | Low: {count}"
|
||||
echo ""
|
||||
echo "Next: Review full report for detailed findings and recommendations"
|
||||
```
|
||||
|
||||
**Step 7.3: Completion**:
|
||||
- Report is saved to `.process/PLAN_VERIFICATION.md`
|
||||
- User can review findings and decide on remediation approach
|
||||
- No automatic modifications are made to source artifacts
|
||||
- User can manually apply fixes or use separate remediation command (if available)
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
name: plan
|
||||
description: 5-phase planning workflow with action-planning-agent task generation, outputs IMPL_PLAN.md and task JSONs
|
||||
argument-hint: "\"text description\"|file.md"
|
||||
argument-hint: "[-y|--yes] \"text description\"|file.md"
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), Read(*), Bash(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-continue all phases (skip confirmations), use recommended conflict resolutions.
|
||||
|
||||
# Workflow Plan Command (/workflow:plan)
|
||||
|
||||
## Coordinator Role
|
||||
@@ -318,11 +322,11 @@ Tasks generated: [count]
|
||||
Plan: .workflow/active/[sessionId]/IMPL_PLAN.md
|
||||
|
||||
Recommended Next Steps:
|
||||
1. /workflow:action-plan-verify --session [sessionId] # Verify plan quality before execution
|
||||
1. /workflow:plan-verify --session [sessionId] # Verify plan quality before execution
|
||||
2. /workflow:status # Review task breakdown
|
||||
3. /workflow:execute # Start implementation (after verification)
|
||||
|
||||
Quality Gate: Consider running /workflow:action-plan-verify to catch issues early
|
||||
Quality Gate: Consider running /workflow:plan-verify to catch issues early
|
||||
```
|
||||
|
||||
## TodoWrite Pattern
|
||||
@@ -546,6 +550,6 @@ CONSTRAINTS: [Limitations or boundaries]
|
||||
- `/workflow:tools:task-generate-agent` - Phase 4: Generate task JSON files with agent-driven approach
|
||||
|
||||
**Follow-up Commands**:
|
||||
- `/workflow:action-plan-verify` - Recommended: Verify plan quality and catch issues before execution
|
||||
- `/workflow:plan-verify` - Recommended: Verify plan quality and catch issues before execution
|
||||
- `/workflow:status` - Review task breakdown and current progress
|
||||
- `/workflow:execute` - Begin implementation of generated tasks
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: replan
|
||||
description: Interactive workflow replanning with session-level artifact updates and boundary clarification through guided questioning
|
||||
argument-hint: "[--session session-id] [task-id] \"requirements\"|file.md [--interactive]"
|
||||
argument-hint: "[-y|--yes] [--session session-id] [task-id] \"requirements\"|file.md [--interactive]"
|
||||
allowed-tools: Read(*), Write(*), Edit(*), TodoWrite(*), Glob(*), Bash(*)
|
||||
---
|
||||
|
||||
@@ -117,10 +117,48 @@ const taskId = taskIdMatch?.[1]
|
||||
|
||||
---
|
||||
|
||||
### Auto Mode Support
|
||||
|
||||
When `--yes` or `-y` flag is used, the command skips interactive clarification and uses safe defaults:
|
||||
|
||||
```javascript
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
```
|
||||
|
||||
**Auto Mode Defaults**:
|
||||
- **Modification Scope**: `tasks_only` (safest - only update task details)
|
||||
- **Affected Modules**: All modules related to the task
|
||||
- **Task Changes**: `update_only` (no structural changes)
|
||||
- **Dependency Changes**: `no` (preserve existing dependencies)
|
||||
- **User Confirmation**: Auto-confirm execution
|
||||
|
||||
**Note**: `--interactive` flag overrides `--yes` flag (forces interactive mode).
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Interactive Requirement Clarification
|
||||
|
||||
**Purpose**: Define modification scope through guided questioning
|
||||
|
||||
**Auto Mode Check**:
|
||||
```javascript
|
||||
if (autoYes && !interactive) {
|
||||
// Use defaults and skip to Phase 3
|
||||
console.log(`[--yes] Using safe defaults for replan:`)
|
||||
console.log(` - Scope: tasks_only`)
|
||||
console.log(` - Changes: update_only`)
|
||||
console.log(` - Dependencies: preserve existing`)
|
||||
|
||||
userSelections = {
|
||||
scope: 'tasks_only',
|
||||
modules: 'all_affected',
|
||||
task_changes: 'update_only',
|
||||
dependency_changes: false
|
||||
}
|
||||
// Proceed to Phase 3
|
||||
}
|
||||
```
|
||||
|
||||
#### Session Mode Questions
|
||||
|
||||
**Q1: Modification Scope**
|
||||
@@ -228,10 +266,29 @@ interface ImpactAnalysis {
|
||||
**Step 3.3: User Confirmation**
|
||||
|
||||
```javascript
|
||||
Options:
|
||||
- 确认执行: 开始应用所有修改
|
||||
- 调整计划: 重新回答问题调整范围
|
||||
- 取消操作: 放弃本次重规划
|
||||
// Parse --yes flag
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
if (autoYes) {
|
||||
// Auto mode: Auto-confirm execution
|
||||
console.log(`[--yes] Auto-confirming replan execution`)
|
||||
userConfirmation = '确认执行'
|
||||
// Proceed to Phase 4
|
||||
} else {
|
||||
// Interactive mode: Ask user
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "修改计划已生成,请确认操作:",
|
||||
header: "Confirm",
|
||||
options: [
|
||||
{ label: "确认执行", description: "开始应用所有修改" },
|
||||
{ label: "调整计划", description: "重新回答问题调整范围" },
|
||||
{ label: "取消操作", description: "放弃本次重规划" }
|
||||
],
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Output**: Modification plan confirmed or adjusted
|
||||
|
||||
@@ -605,6 +605,4 @@ Use `ccw view` to open the workflow dashboard in browser:
|
||||
|
||||
```bash
|
||||
ccw view
|
||||
```
|
||||
|
||||
|
||||
```
|
||||
@@ -1,8 +1,10 @@
|
||||
---
|
||||
name: complete
|
||||
description: Mark active workflow session as complete, archive with lessons learned, update manifest, remove active flag
|
||||
argument-hint: "[-y|--yes] [--detailed]"
|
||||
examples:
|
||||
- /workflow:session:complete
|
||||
- /workflow:session:complete --yes
|
||||
- /workflow:session:complete --detailed
|
||||
---
|
||||
|
||||
@@ -139,20 +141,41 @@ test -f .workflow/project-tech.json || echo "SKIP"
|
||||
After successful archival, prompt user to capture learnings:
|
||||
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Would you like to solidify learnings from this session into project guidelines?",
|
||||
header: "Solidify",
|
||||
options: [
|
||||
{ label: "Yes, solidify now", description: "Extract learnings and update project-guidelines.json" },
|
||||
{ label: "Skip", description: "Archive complete, no learnings to capture" }
|
||||
],
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
// Parse --yes flag
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
if (autoYes) {
|
||||
// Auto mode: Skip solidify
|
||||
console.log(`[--yes] Auto-selecting: Skip solidify`)
|
||||
console.log(`Session archived successfully.`)
|
||||
// Done - no solidify
|
||||
} else {
|
||||
// Interactive mode: Ask user
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Would you like to solidify learnings from this session into project guidelines?",
|
||||
header: "Solidify",
|
||||
options: [
|
||||
{ label: "Yes, solidify now", description: "Extract learnings and update project-guidelines.json" },
|
||||
{ label: "Skip", description: "Archive complete, no learnings to capture" }
|
||||
],
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
|
||||
// **If "Yes, solidify now"**: Execute `/workflow:session:solidify` with the archived session ID.
|
||||
}
|
||||
```
|
||||
|
||||
**If "Yes, solidify now"**: Execute `/workflow:session:solidify` with the archived session ID.
|
||||
## Auto Mode Defaults
|
||||
|
||||
When `--yes` or `-y` flag is used:
|
||||
- **Solidify Learnings**: Auto-selected "Skip" (archive only, no solidify)
|
||||
|
||||
**Flag Parsing**:
|
||||
```javascript
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
```
|
||||
|
||||
**Output**:
|
||||
```
|
||||
|
||||
@@ -1,14 +1,18 @@
|
||||
---
|
||||
name: solidify
|
||||
description: Crystallize session learnings and user-defined constraints into permanent project guidelines
|
||||
argument-hint: "[--type <convention|constraint|learning>] [--category <category>] \"rule or insight\""
|
||||
argument-hint: "[-y|--yes] [--type <convention|constraint|learning>] [--category <category>] \"rule or insight\""
|
||||
examples:
|
||||
- /workflow:session:solidify "Use functional components for all React code" --type convention
|
||||
- /workflow:session:solidify "No direct DB access from controllers" --type constraint --category architecture
|
||||
- /workflow:session:solidify -y "No direct DB access from controllers" --type constraint --category architecture
|
||||
- /workflow:session:solidify "Cache invalidation requires event sourcing" --type learning --category architecture
|
||||
- /workflow:session:solidify --interactive
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-categorize and add guideline without confirmation.
|
||||
|
||||
# Session Solidify Command (/workflow:session:solidify)
|
||||
|
||||
## Overview
|
||||
|
||||
@@ -268,15 +268,19 @@ SlashCommand(command="/workflow:tools:conflict-resolution --session [sessionId]
|
||||
|
||||
### Phase 5: TDD Task Generation
|
||||
|
||||
**Step 5.1: Execute** - TDD task generation via action-planning-agent
|
||||
**Step 5.1: Execute** - TDD task generation via action-planning-agent with Phase 0 user configuration
|
||||
|
||||
```javascript
|
||||
SlashCommand(command="/workflow:tools:task-generate-tdd --session [sessionId]")
|
||||
```
|
||||
|
||||
**Note**: CLI tool usage is determined semantically from user's task description.
|
||||
**Note**: Phase 0 now includes:
|
||||
- Supplementary materials collection (file paths or inline content)
|
||||
- Execution method preference (Agent/Hybrid/CLI)
|
||||
- CLI tool preference (Codex/Gemini/Qwen/Auto)
|
||||
- These preferences are passed to agent for task generation
|
||||
|
||||
**Parse**: Extract feature count, task count (not chain count - tasks now contain internal TDD cycles)
|
||||
**Parse**: Extract feature count, task count (not chain count - tasks now contain internal TDD cycles), CLI execution IDs assigned
|
||||
|
||||
**Validate**:
|
||||
- IMPL_PLAN.md exists (unified plan with TDD Implementation Tasks section)
|
||||
@@ -284,15 +288,24 @@ SlashCommand(command="/workflow:tools:task-generate-tdd --session [sessionId]")
|
||||
- TODO_LIST.md exists with internal TDD phase indicators
|
||||
- Each IMPL task includes:
|
||||
- `meta.tdd_workflow: true`
|
||||
- `flow_control.implementation_approach` with 3 steps (red/green/refactor)
|
||||
- `meta.cli_execution_id: {session_id}-{task_id}`
|
||||
- `meta.cli_execution: { "strategy": "new|resume|fork|merge_fork", ... }`
|
||||
- `flow_control.implementation_approach` with exactly 3 steps (red/green/refactor)
|
||||
- Green phase includes test-fix-cycle configuration
|
||||
- `context.focus_paths`: absolute or clear relative paths (enhanced with exploration critical_files)
|
||||
- `flow_control.pre_analysis`: includes exploration integration_points analysis
|
||||
- IMPL_PLAN.md contains workflow_type: "tdd" in frontmatter
|
||||
- Task count ≤10 (compliance with task limit)
|
||||
- User configuration applied:
|
||||
- If executionMethod == "cli" or "hybrid": command field added to steps
|
||||
- CLI tool preference reflected in execution guidance
|
||||
- Task count ≤18 (compliance with hard limit)
|
||||
|
||||
**Red Flag Detection** (Non-Blocking Warnings):
|
||||
- Task count >10: `⚠️ High task count may indicate insufficient decomposition`
|
||||
- Task count >18: `⚠️ Task count exceeds hard limit - request re-scope`
|
||||
- Missing cli_execution_id: `⚠️ Task lacks CLI execution ID for resume support`
|
||||
- Missing test-fix-cycle: `⚠️ Green phase lacks auto-revert configuration`
|
||||
- Generic task names: `⚠️ Vague task names suggest unclear TDD cycles`
|
||||
- Missing focus_paths: `⚠️ Task lacks clear file scope for implementation`
|
||||
|
||||
**Action**: Log warnings to `.workflow/active/[sessionId]/.process/tdd-warnings.log` (non-blocking)
|
||||
|
||||
@@ -338,14 +351,22 @@ SlashCommand(command="/workflow:tools:task-generate-tdd --session [sessionId]")
|
||||
1. Each task contains complete TDD workflow (Red-Green-Refactor internally)
|
||||
2. Task structure validation:
|
||||
- `meta.tdd_workflow: true` in all IMPL tasks
|
||||
- `meta.cli_execution_id` present (format: {session_id}-{task_id})
|
||||
- `meta.cli_execution` strategy assigned (new/resume/fork/merge_fork)
|
||||
- `flow_control.implementation_approach` has exactly 3 steps
|
||||
- Each step has correct `tdd_phase`: "red", "green", "refactor"
|
||||
- `context.focus_paths` are absolute or clear relative paths
|
||||
- `flow_control.pre_analysis` includes exploration integration analysis
|
||||
3. Dependency validation:
|
||||
- Sequential features: IMPL-N depends_on ["IMPL-(N-1)"] if needed
|
||||
- Complex features: IMPL-N.M depends_on ["IMPL-N.(M-1)"] for subtasks
|
||||
- CLI execution strategies correctly assigned based on dependency graph
|
||||
4. Agent assignment: All IMPL tasks use @code-developer
|
||||
5. Test-fix cycle: Green phase step includes test-fix-cycle logic with max_iterations
|
||||
6. Task count: Total tasks ≤10 (simple + subtasks)
|
||||
6. Task count: Total tasks ≤18 (simple + subtasks hard limit)
|
||||
7. User configuration:
|
||||
- Execution method choice reflected in task structure
|
||||
- CLI tool preference documented in implementation guidance (if CLI selected)
|
||||
|
||||
**Red Flag Checklist** (from TDD best practices):
|
||||
- [ ] No tasks skip Red phase (`tdd_phase: "red"` exists in step 1)
|
||||
@@ -371,7 +392,7 @@ ls -la .workflow/active/[sessionId]/.task/IMPL-*.json
|
||||
echo "IMPL tasks: $(ls .workflow/active/[sessionId]/.task/IMPL-*.json 2>/dev/null | wc -l)"
|
||||
|
||||
# Sample task structure verification (first task)
|
||||
jq '{id, tdd: .meta.tdd_workflow, phases: [.flow_control.implementation_approach[].tdd_phase]}' \
|
||||
jq '{id, tdd: .meta.tdd_workflow, cli_id: .meta.cli_execution_id, phases: [.flow_control.implementation_approach[].tdd_phase]}' \
|
||||
"$(ls .workflow/active/[sessionId]/.task/IMPL-*.json | head -1)"
|
||||
```
|
||||
|
||||
@@ -379,8 +400,9 @@ jq '{id, tdd: .meta.tdd_workflow, phases: [.flow_control.implementation_approach
|
||||
| Evidence Type | Verification Method | Pass Criteria |
|
||||
|---------------|---------------------|---------------|
|
||||
| File existence | `ls -la` artifacts | All files present |
|
||||
| Task count | Count IMPL-*.json | Count matches claims |
|
||||
| TDD structure | jq sample extraction | Shows red/green/refactor |
|
||||
| Task count | Count IMPL-*.json | Count matches claims (≤18) |
|
||||
| TDD structure | jq sample extraction | Shows red/green/refactor + cli_execution_id |
|
||||
| CLI execution IDs | jq extraction | All tasks have cli_execution_id assigned |
|
||||
| Warning log | Check tdd-warnings.log | Logged (may be empty) |
|
||||
|
||||
**Return Summary**:
|
||||
@@ -393,7 +415,7 @@ Total tasks: [M] (1 task per simple feature + subtasks for complex features)
|
||||
Task breakdown:
|
||||
- Simple features: [K] tasks (IMPL-1 to IMPL-K)
|
||||
- Complex features: [L] features with [P] subtasks
|
||||
- Total task count: [M] (within 10-task limit)
|
||||
- Total task count: [M] (within 18-task hard limit)
|
||||
|
||||
Structure:
|
||||
- IMPL-1: {Feature 1 Name} (Internal: Red → Green → Refactor)
|
||||
@@ -407,22 +429,31 @@ Plans generated:
|
||||
- Unified Implementation Plan: .workflow/active/[sessionId]/IMPL_PLAN.md
|
||||
(includes TDD Implementation Tasks section with workflow_type: "tdd")
|
||||
- Task List: .workflow/active/[sessionId]/TODO_LIST.md
|
||||
(with internal TDD phase indicators)
|
||||
(with internal TDD phase indicators and CLI execution strategies)
|
||||
- Task JSONs: .workflow/active/[sessionId]/.task/IMPL-*.json
|
||||
(with cli_execution_id and execution strategies for resume support)
|
||||
|
||||
TDD Configuration:
|
||||
- Each task contains complete Red-Green-Refactor cycle
|
||||
- Green phase includes test-fix cycle (max 3 iterations)
|
||||
- Auto-revert on max iterations reached
|
||||
- CLI execution strategies: new/resume/fork/merge_fork based on dependency graph
|
||||
|
||||
User Configuration Applied:
|
||||
- Execution Method: [agent|hybrid|cli]
|
||||
- CLI Tool Preference: [codex|gemini|qwen|auto]
|
||||
- Supplementary Materials: [included|none]
|
||||
- Task generation follows cli-tools-usage.md guidelines
|
||||
|
||||
⚠️ ACTION REQUIRED: Before execution, ensure you understand WHY each Red phase test is expected to fail.
|
||||
This is crucial for valid TDD - if you don't know why the test fails, you can't verify it tests the right thing.
|
||||
|
||||
Recommended Next Steps:
|
||||
1. /workflow:action-plan-verify --session [sessionId] # Verify TDD plan quality and dependencies
|
||||
2. /workflow:execute --session [sessionId] # Start TDD execution
|
||||
1. /workflow:plan-verify --session [sessionId] # Verify TDD plan quality and dependencies
|
||||
2. /workflow:execute --session [sessionId] # Start TDD execution with CLI strategies
|
||||
3. /workflow:tdd-verify [sessionId] # Post-execution TDD compliance check
|
||||
|
||||
Quality Gate: Consider running /workflow:action-plan-verify to validate TDD task structure and dependencies
|
||||
Quality Gate: Consider running /workflow:plan-verify to validate TDD task structure, dependencies, and CLI execution strategies
|
||||
```
|
||||
|
||||
## TodoWrite Pattern
|
||||
@@ -500,7 +531,7 @@ TDD Workflow Orchestrator
|
||||
│
|
||||
└─ Phase 6: TDD Structure Validation
|
||||
└─ Internal validation + summary returned
|
||||
└─ Recommend: /workflow:action-plan-verify
|
||||
└─ Recommend: /workflow:plan-verify
|
||||
|
||||
Key Points:
|
||||
• ← ATTACHED: SlashCommand attaches sub-tasks to orchestrator TodoWrite
|
||||
@@ -547,9 +578,11 @@ Convert user input to TDD-structured format:
|
||||
| Parsing failure | Empty/malformed output | Retry once, then report |
|
||||
| Missing context-package | File read error | Re-run `/workflow:tools:context-gather` |
|
||||
| Invalid task JSON | jq parse error | Report malformed file path |
|
||||
| High task count (>10) | Count validation | Log warning, continue (non-blocking) |
|
||||
| Task count exceeds 18 | Count validation ≥19 | Request re-scope, split into multiple sessions |
|
||||
| Missing cli_execution_id | All tasks lack ID | Regenerate tasks with phase 0 user config |
|
||||
| Test-context missing | File not found | Re-run `/workflow:tools:test-context-gather` |
|
||||
| Phase timeout | No response | Retry phase, check CLI connectivity |
|
||||
| CLI tool not available | Tool not in cli-tools.json | Fall back to alternative preferred tool |
|
||||
|
||||
## Related Commands
|
||||
|
||||
@@ -565,7 +598,7 @@ Convert user input to TDD-structured format:
|
||||
- `/workflow:tools:task-generate-tdd` - Phase 5: Generate TDD tasks (CLI tool usage determined semantically)
|
||||
|
||||
**Follow-up Commands**:
|
||||
- `/workflow:action-plan-verify` - Recommended: Verify TDD plan quality and structure before execution
|
||||
- `/workflow:plan-verify` - Recommended: Verify TDD plan quality and structure before execution
|
||||
- `/workflow:status` - Review TDD task breakdown
|
||||
- `/workflow:execute` - Begin TDD implementation
|
||||
- `/workflow:tdd-verify` - Post-execution: Verify TDD compliance and generate quality report
|
||||
@@ -574,7 +607,7 @@ Convert user input to TDD-structured format:
|
||||
|
||||
| Situation | Recommended Command | Purpose |
|
||||
|-----------|---------------------|---------|
|
||||
| First time planning | `/workflow:action-plan-verify` | Validate task structure before execution |
|
||||
| First time planning | `/workflow:plan-verify` | Validate task structure before execution |
|
||||
| Warnings in tdd-warnings.log | Review log, refine tasks | Address Red Flags before proceeding |
|
||||
| High task count warning | Consider `/workflow:session:start` | Split into focused sub-sessions |
|
||||
| Ready to implement | `/workflow:execute` | Begin TDD Red-Green-Refactor cycles |
|
||||
@@ -587,7 +620,7 @@ Convert user input to TDD-structured format:
|
||||
```
|
||||
/workflow:tdd-plan
|
||||
↓
|
||||
[Planning Complete] ──→ /workflow:action-plan-verify (recommended)
|
||||
[Planning Complete] ──→ /workflow:plan-verify (recommended)
|
||||
↓
|
||||
[Verified/Ready] ─────→ /workflow:execute
|
||||
↓
|
||||
|
||||
@@ -1,214 +1,301 @@
|
||||
---
|
||||
name: tdd-verify
|
||||
description: Verify TDD workflow compliance against Red-Green-Refactor cycles, generate quality report with coverage analysis
|
||||
|
||||
argument-hint: "[optional: WFS-session-id]"
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), Read(*), Bash(gemini:*)
|
||||
description: Verify TDD workflow compliance against Red-Green-Refactor cycles. Generates quality report with coverage analysis and quality gate recommendation. Orchestrates sub-commands for comprehensive validation.
|
||||
argument-hint: "[optional: --session WFS-session-id]"
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), Read(*), Write(*), Bash(*), Glob(*)
|
||||
---
|
||||
|
||||
# TDD Verification Command (/workflow:tdd-verify)
|
||||
|
||||
## Goal
|
||||
|
||||
Verify TDD workflow execution quality by validating Red-Green-Refactor cycle compliance, test coverage completeness, and task chain structure integrity. This command orchestrates multiple analysis phases and generates a comprehensive compliance report with quality gate recommendation.
|
||||
|
||||
**Output**: A structured Markdown report saved to `.workflow/active/WFS-{session}/TDD_COMPLIANCE_REPORT.md` containing:
|
||||
- Executive summary with compliance score and quality gate recommendation
|
||||
- Task chain validation (TEST → IMPL → REFACTOR structure)
|
||||
- Test coverage metrics (line, branch, function)
|
||||
- Red-Green-Refactor cycle verification
|
||||
- Best practices adherence assessment
|
||||
- Actionable improvement recommendations
|
||||
|
||||
## Operating Constraints
|
||||
|
||||
**ORCHESTRATOR MODE**:
|
||||
- This command coordinates multiple sub-commands (`/workflow:tools:tdd-coverage-analysis`, `ccw cli`)
|
||||
- MAY write output files: TDD_COMPLIANCE_REPORT.md (primary report), .process/*.json (intermediate artifacts)
|
||||
- MUST NOT modify source task files or implementation code
|
||||
- MUST NOT create or delete tasks in the workflow
|
||||
|
||||
**Quality Gate Authority**: The compliance report provides a binding recommendation (BLOCK_MERGE / REQUIRE_FIXES / PROCEED_WITH_CAVEATS / APPROVED) based on objective compliance criteria.
|
||||
|
||||
## Coordinator Role
|
||||
|
||||
**This command is a pure orchestrator**: Execute 4 phases to verify TDD workflow compliance, test coverage, and Red-Green-Refactor cycle execution.
|
||||
|
||||
## Core Responsibilities
|
||||
- Verify TDD task chain structure
|
||||
- Analyze test coverage
|
||||
- Validate TDD cycle execution
|
||||
- Generate compliance report
|
||||
- Verify TDD task chain structure (TEST → IMPL → REFACTOR)
|
||||
- Analyze test coverage metrics
|
||||
- Validate TDD cycle execution quality
|
||||
- Generate compliance report with quality gate recommendation
|
||||
|
||||
## Execution Process
|
||||
|
||||
```
|
||||
Input Parsing:
|
||||
└─ Decision (session argument):
|
||||
├─ session-id provided → Use provided session
|
||||
└─ No session-id → Auto-detect active session
|
||||
├─ --session provided → Use provided session
|
||||
└─ No session → Auto-detect active session
|
||||
|
||||
Phase 1: Session Discovery
|
||||
├─ Validate session directory exists
|
||||
└─ TodoWrite: Mark phase 1 completed
|
||||
Phase 1: Session Discovery & Validation
|
||||
├─ Detect or validate session directory
|
||||
├─ Check required artifacts exist (.task/*.json, .summaries/*)
|
||||
└─ ERROR if invalid or incomplete
|
||||
|
||||
Phase 2: Task Chain Validation
|
||||
Phase 2: Task Chain Structure Validation
|
||||
├─ Load all task JSONs from .task/
|
||||
├─ Extract task IDs and group by feature
|
||||
├─ Validate TDD structure:
|
||||
│ ├─ TEST-N.M → IMPL-N.M → REFACTOR-N.M chain
|
||||
│ ├─ Dependency verification
|
||||
│ └─ Meta field validation (tdd_phase, agent)
|
||||
└─ TodoWrite: Mark phase 2 completed
|
||||
├─ Validate TDD structure: TEST-N.M → IMPL-N.M → REFACTOR-N.M
|
||||
├─ Verify dependencies (depends_on)
|
||||
├─ Validate meta fields (tdd_phase, agent)
|
||||
└─ Extract chain validation data
|
||||
|
||||
Phase 3: Test Execution Analysis
|
||||
└─ /workflow:tools:tdd-coverage-analysis
|
||||
├─ Coverage metrics extraction
|
||||
├─ TDD cycle verification
|
||||
└─ Compliance score calculation
|
||||
Phase 3: Coverage & Cycle Analysis
|
||||
├─ Call: /workflow:tools:tdd-coverage-analysis
|
||||
├─ Parse: test-results.json, coverage-report.json, tdd-cycle-report.md
|
||||
└─ Extract coverage metrics and TDD cycle verification
|
||||
|
||||
Phase 4: Compliance Report Generation
|
||||
├─ Gemini analysis for comprehensive report
|
||||
├─ Aggregate findings from Phases 1-3
|
||||
├─ Calculate compliance score (0-100)
|
||||
├─ Determine quality gate recommendation
|
||||
├─ Generate TDD_COMPLIANCE_REPORT.md
|
||||
└─ Return summary to user
|
||||
└─ Display summary to user
|
||||
```
|
||||
|
||||
## 4-Phase Execution
|
||||
|
||||
### Phase 1: Session Discovery
|
||||
**Auto-detect or use provided session**
|
||||
### Phase 1: Session Discovery & Validation
|
||||
|
||||
**Step 1.1: Detect Session**
|
||||
```bash
|
||||
# If session-id provided
|
||||
sessionId = argument
|
||||
IF --session parameter provided:
|
||||
session_id = provided session
|
||||
ELSE:
|
||||
# Auto-detect active session
|
||||
active_sessions = bash(find .workflow/active/ -name "WFS-*" -type d 2>/dev/null)
|
||||
IF active_sessions is empty:
|
||||
ERROR: "No active workflow session found. Use --session <session-id>"
|
||||
EXIT
|
||||
ELSE IF active_sessions has multiple entries:
|
||||
# Use most recently modified session
|
||||
session_id = bash(ls -td .workflow/active/WFS-*/ 2>/dev/null | head -1 | xargs basename)
|
||||
ELSE:
|
||||
session_id = basename(active_sessions[0])
|
||||
|
||||
# Else auto-detect active session
|
||||
find .workflow/active/ -name "WFS-*" -type d | head -1 | sed 's/.*\///'
|
||||
# Derive paths
|
||||
session_dir = .workflow/active/WFS-{session_id}
|
||||
task_dir = session_dir/.task
|
||||
summaries_dir = session_dir/.summaries
|
||||
process_dir = session_dir/.process
|
||||
```
|
||||
|
||||
**Extract**: sessionId
|
||||
**Step 1.2: Validate Required Artifacts**
|
||||
```bash
|
||||
# Check task files exist
|
||||
task_files = Glob(task_dir/*.json)
|
||||
IF task_files.count == 0:
|
||||
ERROR: "No task JSON files found. Run /workflow:tdd-plan first"
|
||||
EXIT
|
||||
|
||||
**Validation**: Session directory exists
|
||||
# Check summaries exist (optional but recommended for full analysis)
|
||||
summaries_exist = EXISTS(summaries_dir)
|
||||
IF NOT summaries_exist:
|
||||
WARNING: "No .summaries/ directory found. Some analysis may be limited."
|
||||
```
|
||||
|
||||
**TodoWrite**: Mark phase 1 completed, phase 2 in_progress
|
||||
**Output**: session_id, session_dir, task_files list
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Task Chain Validation
|
||||
**Validate TDD structure using bash commands**
|
||||
### Phase 2: Task Chain Structure Validation
|
||||
|
||||
**Step 2.1: Load and Parse Task JSONs**
|
||||
```bash
|
||||
# Load all task JSONs
|
||||
for task_file in .workflow/active/{sessionId}/.task/*.json; do
|
||||
cat "$task_file"
|
||||
done
|
||||
# Single-pass JSON extraction using jq
|
||||
validation_data = bash("""
|
||||
# Load all tasks and extract structured data
|
||||
cd '{session_dir}/.task'
|
||||
|
||||
# Extract task IDs
|
||||
for task_file in .workflow/active/{sessionId}/.task/*.json; do
|
||||
cat "$task_file" | jq -r '.id'
|
||||
done
|
||||
# Extract all task IDs
|
||||
task_ids=$(jq -r '.id' *.json 2>/dev/null | sort)
|
||||
|
||||
# Check dependencies - read tasks and filter for IMPL/REFACTOR
|
||||
for task_file in .workflow/active/{sessionId}/.task/IMPL-*.json; do
|
||||
cat "$task_file" | jq -r '.context.depends_on[]?'
|
||||
done
|
||||
# Extract dependencies for IMPL tasks
|
||||
impl_deps=$(jq -r 'select(.id | startswith("IMPL")) | .id + ":" + (.context.depends_on[]? // "none")' *.json 2>/dev/null)
|
||||
|
||||
for task_file in .workflow/active/{sessionId}/.task/REFACTOR-*.json; do
|
||||
cat "$task_file" | jq -r '.context.depends_on[]?'
|
||||
done
|
||||
# Extract dependencies for REFACTOR tasks
|
||||
refactor_deps=$(jq -r 'select(.id | startswith("REFACTOR")) | .id + ":" + (.context.depends_on[]? // "none")' *.json 2>/dev/null)
|
||||
|
||||
# Check meta fields
|
||||
for task_file in .workflow/active/{sessionId}/.task/*.json; do
|
||||
cat "$task_file" | jq -r '.meta.tdd_phase'
|
||||
done
|
||||
# Extract meta fields
|
||||
meta_tdd=$(jq -r '.id + ":" + (.meta.tdd_phase // "missing")' *.json 2>/dev/null)
|
||||
meta_agent=$(jq -r '.id + ":" + (.meta.agent // "missing")' *.json 2>/dev/null)
|
||||
|
||||
for task_file in .workflow/active/{sessionId}/.task/*.json; do
|
||||
cat "$task_file" | jq -r '.meta.agent'
|
||||
done
|
||||
# Output as JSON
|
||||
jq -n --arg ids "$task_ids" \\
|
||||
--arg impl "$impl_deps" \\
|
||||
--arg refactor "$refactor_deps" \\
|
||||
--arg tdd "$meta_tdd" \\
|
||||
--arg agent "$meta_agent" \\
|
||||
'{ids: $ids, impl_deps: $impl, refactor_deps: $refactor, tdd: $tdd, agent: $agent}'
|
||||
""")
|
||||
```
|
||||
|
||||
**Validation**:
|
||||
- For each feature N, verify TEST-N.M → IMPL-N.M → REFACTOR-N.M exists
|
||||
- IMPL-N.M.context.depends_on includes TEST-N.M
|
||||
- REFACTOR-N.M.context.depends_on includes IMPL-N.M
|
||||
- TEST tasks have tdd_phase="red" and agent="@code-review-test-agent"
|
||||
- IMPL/REFACTOR tasks have tdd_phase="green"/"refactor" and agent="@code-developer"
|
||||
**Step 2.2: Validate TDD Chain Structure**
|
||||
```
|
||||
Parse validation_data JSON and validate:
|
||||
|
||||
**Extract**: Chain validation report
|
||||
For each feature N (extracted from task IDs):
|
||||
1. TEST-N.M exists?
|
||||
2. IMPL-N.M exists?
|
||||
3. REFACTOR-N.M exists? (optional but recommended)
|
||||
4. IMPL-N.M.context.depends_on contains TEST-N.M?
|
||||
5. REFACTOR-N.M.context.depends_on contains IMPL-N.M?
|
||||
6. TEST-N.M.meta.tdd_phase == "red"?
|
||||
7. TEST-N.M.meta.agent == "@code-review-test-agent"?
|
||||
8. IMPL-N.M.meta.tdd_phase == "green"?
|
||||
9. IMPL-N.M.meta.agent == "@code-developer"?
|
||||
10. REFACTOR-N.M.meta.tdd_phase == "refactor"?
|
||||
|
||||
**TodoWrite**: Mark phase 2 completed, phase 3 in_progress
|
||||
Calculate:
|
||||
- chain_completeness_score = (complete_chains / total_chains) * 100
|
||||
- dependency_accuracy = (correct_deps / total_deps) * 100
|
||||
- meta_field_accuracy = (correct_meta / total_meta) * 100
|
||||
```
|
||||
|
||||
**Output**: chain_validation_report (JSON structure with validation results)
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Test Execution Analysis
|
||||
**Command**: `SlashCommand(command="/workflow:tools:tdd-coverage-analysis --session [sessionId]")`
|
||||
### Phase 3: Coverage & Cycle Analysis
|
||||
|
||||
**Input**: sessionId from Phase 1
|
||||
**Step 3.1: Call Coverage Analysis Sub-command**
|
||||
```bash
|
||||
SlashCommand(command="/workflow:tools:tdd-coverage-analysis --session {session_id}")
|
||||
```
|
||||
|
||||
**Parse Output**:
|
||||
- Coverage metrics (line, branch, function percentages)
|
||||
- TDD cycle verification results
|
||||
- Compliance score
|
||||
**Step 3.2: Parse Output Files**
|
||||
```bash
|
||||
# Check required outputs exist
|
||||
IF NOT EXISTS(process_dir/test-results.json):
|
||||
WARNING: "test-results.json not found. Coverage analysis incomplete."
|
||||
coverage_data = null
|
||||
ELSE:
|
||||
coverage_data = Read(process_dir/test-results.json)
|
||||
|
||||
**Validation**:
|
||||
- `.workflow/active/{sessionId}/.process/test-results.json` exists
|
||||
- `.workflow/active/{sessionId}/.process/coverage-report.json` exists
|
||||
- `.workflow/active/{sessionId}/.process/tdd-cycle-report.md` exists
|
||||
IF NOT EXISTS(process_dir/coverage-report.json):
|
||||
WARNING: "coverage-report.json not found. Coverage metrics incomplete."
|
||||
metrics = null
|
||||
ELSE:
|
||||
metrics = Read(process_dir/coverage-report.json)
|
||||
|
||||
**TodoWrite**: Mark phase 3 completed, phase 4 in_progress
|
||||
IF NOT EXISTS(process_dir/tdd-cycle-report.md):
|
||||
WARNING: "tdd-cycle-report.md not found. Cycle validation incomplete."
|
||||
cycle_data = null
|
||||
ELSE:
|
||||
cycle_data = Read(process_dir/tdd-cycle-report.md)
|
||||
```
|
||||
|
||||
**Step 3.3: Extract Coverage Metrics**
|
||||
```
|
||||
If coverage_data exists:
|
||||
- line_coverage_percent
|
||||
- branch_coverage_percent
|
||||
- function_coverage_percent
|
||||
- uncovered_files (list)
|
||||
- uncovered_lines (map: file -> line ranges)
|
||||
|
||||
If cycle_data exists:
|
||||
- red_phase_compliance (tests failed initially?)
|
||||
- green_phase_compliance (tests pass after impl?)
|
||||
- refactor_phase_compliance (tests stay green during refactor?)
|
||||
- minimal_implementation_score (was impl minimal?)
|
||||
```
|
||||
|
||||
**Output**: coverage_analysis, cycle_analysis
|
||||
|
||||
---
|
||||
|
||||
### Phase 4: Compliance Report Generation
|
||||
**Gemini analysis for comprehensive TDD compliance report**
|
||||
|
||||
**Step 4.1: Calculate Compliance Score**
|
||||
```
|
||||
Base Score: 100 points
|
||||
|
||||
Deductions:
|
||||
Chain Structure:
|
||||
- Missing TEST task: -30 points per feature
|
||||
- Missing IMPL task: -30 points per feature
|
||||
- Missing REFACTOR task: -10 points per feature
|
||||
- Wrong dependency: -15 points per error
|
||||
- Wrong agent: -5 points per error
|
||||
- Wrong tdd_phase: -5 points per error
|
||||
|
||||
TDD Cycle Compliance:
|
||||
- Test didn't fail initially: -10 points per feature
|
||||
- Tests didn't pass after IMPL: -20 points per feature
|
||||
- Tests broke during REFACTOR: -15 points per feature
|
||||
- Over-engineered IMPL: -10 points per feature
|
||||
|
||||
Coverage Quality:
|
||||
- Line coverage < 80%: -5 points
|
||||
- Branch coverage < 70%: -5 points
|
||||
- Function coverage < 80%: -5 points
|
||||
- Critical paths uncovered: -10 points
|
||||
|
||||
Final Score: Max(0, Base Score - Total Deductions)
|
||||
```
|
||||
|
||||
**Step 4.2: Determine Quality Gate**
|
||||
```
|
||||
IF score >= 90 AND no_critical_violations:
|
||||
recommendation = "APPROVED"
|
||||
ELSE IF score >= 70 AND critical_violations == 0:
|
||||
recommendation = "PROCEED_WITH_CAVEATS"
|
||||
ELSE IF score >= 50:
|
||||
recommendation = "REQUIRE_FIXES"
|
||||
ELSE:
|
||||
recommendation = "BLOCK_MERGE"
|
||||
```
|
||||
|
||||
**Step 4.3: Generate Report**
|
||||
```bash
|
||||
ccw cli -p "
|
||||
PURPOSE: Generate TDD compliance report
|
||||
TASK: Analyze TDD workflow execution and generate quality report
|
||||
CONTEXT: @{.workflow/active/{sessionId}/.task/*.json,.workflow/active/{sessionId}/.summaries/*,.workflow/active/{sessionId}/.process/tdd-cycle-report.md}
|
||||
EXPECTED:
|
||||
- TDD compliance score (0-100)
|
||||
- Chain completeness verification
|
||||
- Test coverage analysis summary
|
||||
- Quality recommendations
|
||||
- Red-Green-Refactor cycle validation
|
||||
- Best practices adherence assessment
|
||||
RULES: Focus on TDD best practices and workflow adherence. Be specific about violations and improvements.
|
||||
" --tool gemini --mode analysis --cd project-root > .workflow/active/{sessionId}/TDD_COMPLIANCE_REPORT.md
|
||||
report_content = Generate markdown report (see structure below)
|
||||
report_path = "{session_dir}/TDD_COMPLIANCE_REPORT.md"
|
||||
Write(report_path, report_content)
|
||||
```
|
||||
|
||||
**Output**: TDD_COMPLIANCE_REPORT.md
|
||||
|
||||
**TodoWrite**: Mark phase 4 completed
|
||||
|
||||
**Return to User**:
|
||||
```
|
||||
TDD Verification Report - Session: {sessionId}
|
||||
|
||||
## Chain Validation
|
||||
[COMPLETE] Feature 1: TEST-1.1 → IMPL-1.1 → REFACTOR-1.1 (Complete)
|
||||
[COMPLETE] Feature 2: TEST-2.1 → IMPL-2.1 → REFACTOR-2.1 (Complete)
|
||||
[INCOMPLETE] Feature 3: TEST-3.1 → IMPL-3.1 (Missing REFACTOR phase)
|
||||
|
||||
## Test Execution
|
||||
All TEST tasks produced failing tests
|
||||
All IMPL tasks made tests pass
|
||||
All REFACTOR tasks maintained green tests
|
||||
|
||||
## Coverage Metrics
|
||||
Line Coverage: {percentage}%
|
||||
Branch Coverage: {percentage}%
|
||||
Function Coverage: {percentage}%
|
||||
|
||||
## Compliance Score: {score}/100
|
||||
|
||||
Detailed report: .workflow/active/{sessionId}/TDD_COMPLIANCE_REPORT.md
|
||||
|
||||
Recommendations:
|
||||
- Complete missing REFACTOR-3.1 task
|
||||
- Consider additional edge case tests for Feature 2
|
||||
- Improve test failure message clarity in Feature 1
|
||||
**Step 4.4: Display Summary to User**
|
||||
```bash
|
||||
echo "=== TDD Verification Complete ==="
|
||||
echo "Session: {session_id}"
|
||||
echo "Report: {report_path}"
|
||||
echo ""
|
||||
echo "Quality Gate: {recommendation}"
|
||||
echo "Compliance Score: {score}/100"
|
||||
echo ""
|
||||
echo "Chain Validation: {chain_completeness_score}%"
|
||||
echo "Line Coverage: {line_coverage}%"
|
||||
echo "Branch Coverage: {branch_coverage}%"
|
||||
echo ""
|
||||
echo "Next: Review full report for detailed findings"
|
||||
```
|
||||
|
||||
## TodoWrite Pattern
|
||||
## TodoWrite Pattern (Optional)
|
||||
|
||||
**Note**: As an orchestrator command, TodoWrite tracking is optional and primarily useful for long-running verification processes. For most cases, the 4-phase execution is fast enough that progress tracking adds noise without value.
|
||||
|
||||
```javascript
|
||||
// Initialize (before Phase 1)
|
||||
TodoWrite({todos: [
|
||||
{"content": "Identify target session", "status": "in_progress", "activeForm": "Identifying target session"},
|
||||
{"content": "Validate task chain structure", "status": "pending", "activeForm": "Validating task chain structure"},
|
||||
{"content": "Analyze test execution", "status": "pending", "activeForm": "Analyzing test execution"},
|
||||
{"content": "Generate compliance report", "status": "pending", "activeForm": "Generating compliance report"}
|
||||
]})
|
||||
|
||||
// After Phase 1
|
||||
TodoWrite({todos: [
|
||||
{"content": "Identify target session", "status": "completed", "activeForm": "Identifying target session"},
|
||||
{"content": "Validate task chain structure", "status": "in_progress", "activeForm": "Validating task chain structure"},
|
||||
{"content": "Analyze test execution", "status": "pending", "activeForm": "Analyzing test execution"},
|
||||
{"content": "Generate compliance report", "status": "pending", "activeForm": "Generating compliance report"}
|
||||
]})
|
||||
|
||||
// Continue pattern for Phase 2, 3, 4...
|
||||
// Only use TodoWrite for complex multi-session verification
|
||||
// Skip for single-session verification
|
||||
```
|
||||
|
||||
## Validation Logic
|
||||
@@ -229,27 +316,24 @@ TodoWrite({todos: [
|
||||
5. Report incomplete or invalid chains
|
||||
```
|
||||
|
||||
### Compliance Scoring
|
||||
```
|
||||
Base Score: 100 points
|
||||
### Quality Gate Criteria
|
||||
|
||||
Deductions:
|
||||
- Missing TEST task: -30 points per feature
|
||||
- Missing IMPL task: -30 points per feature
|
||||
- Missing REFACTOR task: -10 points per feature
|
||||
- Wrong dependency: -15 points per error
|
||||
- Wrong agent: -5 points per error
|
||||
- Wrong tdd_phase: -5 points per error
|
||||
- Test didn't fail initially: -10 points per feature
|
||||
- Tests didn't pass after IMPL: -20 points per feature
|
||||
- Tests broke during REFACTOR: -15 points per feature
|
||||
| Recommendation | Score Range | Critical Violations | Action |
|
||||
|----------------|-------------|---------------------|--------|
|
||||
| **APPROVED** | ≥90 | 0 | Safe to merge |
|
||||
| **PROCEED_WITH_CAVEATS** | ≥70 | 0 | Can proceed, address minor issues |
|
||||
| **REQUIRE_FIXES** | ≥50 | Any | Must fix before merge |
|
||||
| **BLOCK_MERGE** | <50 | Any | Block merge until resolved |
|
||||
|
||||
Final Score: Max(0, Base Score - Deductions)
|
||||
```
|
||||
**Critical Violations**:
|
||||
- Missing TEST or IMPL task for any feature
|
||||
- Tests didn't fail initially (Red phase violation)
|
||||
- Tests didn't pass after IMPL (Green phase violation)
|
||||
- Tests broke during REFACTOR (Refactor phase violation)
|
||||
|
||||
## Output Files
|
||||
```
|
||||
.workflow/active/{session-id}/
|
||||
.workflow/active/WFS-{session-id}/
|
||||
├── TDD_COMPLIANCE_REPORT.md # Comprehensive compliance report ⭐
|
||||
└── .process/
|
||||
├── test-results.json # From tdd-coverage-analysis
|
||||
@@ -262,14 +346,14 @@ Final Score: Max(0, Base Score - Deductions)
|
||||
### Session Discovery Errors
|
||||
| Error | Cause | Resolution |
|
||||
|-------|-------|------------|
|
||||
| No active session | No WFS-* directories | Provide session-id explicitly |
|
||||
| Multiple active sessions | Multiple WFS-* directories | Provide session-id explicitly |
|
||||
| No active session | No WFS-* directories | Provide --session explicitly |
|
||||
| Multiple active sessions | Multiple WFS-* directories | Provide --session explicitly |
|
||||
| Session not found | Invalid session-id | Check available sessions |
|
||||
|
||||
### Validation Errors
|
||||
| Error | Cause | Resolution |
|
||||
|-------|-------|------------|
|
||||
| Task files missing | Incomplete planning | Run tdd-plan first |
|
||||
| Task files missing | Incomplete planning | Run /workflow:tdd-plan first |
|
||||
| Invalid JSON | Corrupted task files | Regenerate tasks |
|
||||
| Missing summaries | Tasks not executed | Execute tasks before verify |
|
||||
|
||||
@@ -278,13 +362,13 @@ Final Score: Max(0, Base Score - Deductions)
|
||||
|-------|-------|------------|
|
||||
| Coverage tool missing | No test framework | Configure testing first |
|
||||
| Tests fail to run | Code errors | Fix errors before verify |
|
||||
| Gemini analysis fails | Token limit / API error | Retry or reduce context |
|
||||
| Sub-command fails | tdd-coverage-analysis error | Check sub-command logs |
|
||||
|
||||
## Integration & Usage
|
||||
|
||||
### Command Chain
|
||||
- **Called After**: `/workflow:execute` (when TDD tasks completed)
|
||||
- **Calls**: `/workflow:tools:tdd-coverage-analysis`, Gemini CLI
|
||||
- **Calls**: `/workflow:tools:tdd-coverage-analysis`
|
||||
- **Related**: `/workflow:tdd-plan`, `/workflow:status`
|
||||
|
||||
### Basic Usage
|
||||
@@ -293,7 +377,7 @@ Final Score: Max(0, Base Score - Deductions)
|
||||
/workflow:tdd-verify
|
||||
|
||||
# Specify session
|
||||
/workflow:tdd-verify WFS-auth
|
||||
/workflow:tdd-verify --session WFS-auth
|
||||
```
|
||||
|
||||
### When to Use
|
||||
@@ -308,61 +392,125 @@ Final Score: Max(0, Base Score - Deductions)
|
||||
# TDD Compliance Report - {Session ID}
|
||||
|
||||
**Generated**: {timestamp}
|
||||
**Session**: {sessionId}
|
||||
**Session**: WFS-{sessionId}
|
||||
**Workflow Type**: TDD
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
Overall Compliance Score: {score}/100
|
||||
Status: {EXCELLENT | GOOD | NEEDS IMPROVEMENT | FAILED}
|
||||
|
||||
### Quality Gate Decision
|
||||
|
||||
| Metric | Value | Status |
|
||||
|--------|-------|--------|
|
||||
| Compliance Score | {score}/100 | {status_emoji} |
|
||||
| Chain Completeness | {percentage}% | {status} |
|
||||
| Line Coverage | {percentage}% | {status} |
|
||||
| Branch Coverage | {percentage}% | {status} |
|
||||
| Function Coverage | {percentage}% | {status} |
|
||||
|
||||
### Recommendation
|
||||
|
||||
**{RECOMMENDATION}**
|
||||
|
||||
**Decision Rationale**:
|
||||
{brief explanation based on score and violations}
|
||||
|
||||
**Quality Gate Criteria**:
|
||||
- **APPROVED**: Score ≥90, no critical violations
|
||||
- **PROCEED_WITH_CAVEATS**: Score ≥70, no critical violations
|
||||
- **REQUIRE_FIXES**: Score ≥50 or critical violations exist
|
||||
- **BLOCK_MERGE**: Score <50
|
||||
|
||||
---
|
||||
|
||||
## Chain Analysis
|
||||
|
||||
### Feature 1: {Feature Name}
|
||||
**Status**: Complete
|
||||
**Status**: ✅ Complete
|
||||
**Chain**: TEST-1.1 → IMPL-1.1 → REFACTOR-1.1
|
||||
|
||||
- **Red Phase**: Test created and failed with clear message
|
||||
- **Green Phase**: Minimal implementation made test pass
|
||||
- **Refactor Phase**: Code improved, tests remained green
|
||||
| Phase | Task | Status | Details |
|
||||
|-------|------|--------|---------|
|
||||
| Red | TEST-1.1 | ✅ Pass | Test created and failed with clear message |
|
||||
| Green | IMPL-1.1 | ✅ Pass | Minimal implementation made test pass |
|
||||
| Refactor | REFACTOR-1.1 | ✅ Pass | Code improved, tests remained green |
|
||||
|
||||
### Feature 2: {Feature Name}
|
||||
**Status**: Incomplete
|
||||
**Status**: ⚠️ Incomplete
|
||||
**Chain**: TEST-2.1 → IMPL-2.1 (Missing REFACTOR-2.1)
|
||||
|
||||
- **Red Phase**: Test created and failed
|
||||
- **Green Phase**: Implementation seems over-engineered
|
||||
- **Refactor Phase**: Missing
|
||||
| Phase | Task | Status | Details |
|
||||
|-------|------|--------|---------|
|
||||
| Red | TEST-2.1 | ✅ Pass | Test created and failed |
|
||||
| Green | IMPL-2.1 | ⚠️ Warning | Implementation seems over-engineered |
|
||||
| Refactor | REFACTOR-2.1 | ❌ Missing | Task not completed |
|
||||
|
||||
**Issues**:
|
||||
- REFACTOR-2.1 task not completed
|
||||
- IMPL-2.1 implementation exceeded minimal scope
|
||||
- REFACTOR-2.1 task not completed (-10 points)
|
||||
- IMPL-2.1 implementation exceeded minimal scope (-10 points)
|
||||
|
||||
[Repeat for all features]
|
||||
### Chain Validation Summary
|
||||
|
||||
| Metric | Value |
|
||||
|--------|-------|
|
||||
| Total Features | {count} |
|
||||
| Complete Chains | {count} ({percent}%) |
|
||||
| Incomplete Chains | {count} |
|
||||
| Missing TEST | {count} |
|
||||
| Missing IMPL | {count} |
|
||||
| Missing REFACTOR | {count} |
|
||||
| Dependency Errors | {count} |
|
||||
| Meta Field Errors | {count} |
|
||||
|
||||
---
|
||||
|
||||
## Test Coverage Analysis
|
||||
|
||||
### Coverage Metrics
|
||||
- Line Coverage: {percentage}% {status}
|
||||
- Branch Coverage: {percentage}% {status}
|
||||
- Function Coverage: {percentage}% {status}
|
||||
|
||||
| Metric | Coverage | Target | Status |
|
||||
|--------|----------|--------|--------|
|
||||
| Line Coverage | {percentage}% | ≥80% | {status} |
|
||||
| Branch Coverage | {percentage}% | ≥70% | {status} |
|
||||
| Function Coverage | {percentage}% | ≥80% | {status} |
|
||||
|
||||
### Coverage Gaps
|
||||
- {file}:{lines} - Uncovered error handling
|
||||
- {file}:{lines} - Uncovered edge case
|
||||
|
||||
| File | Lines | Issue | Priority |
|
||||
|------|-------|-------|----------|
|
||||
| src/auth/service.ts | 45-52 | Uncovered error handling | HIGH |
|
||||
| src/utils/parser.ts | 78-85 | Uncovered edge case | MEDIUM |
|
||||
|
||||
---
|
||||
|
||||
## TDD Cycle Validation
|
||||
|
||||
### Red Phase (Write Failing Test)
|
||||
- {N}/{total} features had failing tests initially
|
||||
- Feature 3: No evidence of initial test failure
|
||||
- {N}/{total} features had failing tests initially ({percent}%)
|
||||
- ✅ Compliant features: {list}
|
||||
- ❌ Non-compliant features: {list}
|
||||
|
||||
**Violations**:
|
||||
- Feature 3: No evidence of initial test failure (-10 points)
|
||||
|
||||
### Green Phase (Make Test Pass)
|
||||
- {N}/{total} implementations made tests pass
|
||||
- All implementations minimal and focused
|
||||
- {N}/{total} implementations made tests pass ({percent}%)
|
||||
- ✅ Compliant features: {list}
|
||||
- ❌ Non-compliant features: {list}
|
||||
|
||||
**Violations**:
|
||||
- Feature 2: Implementation over-engineered (-10 points)
|
||||
|
||||
### Refactor Phase (Improve Quality)
|
||||
- {N}/{total} features completed refactoring
|
||||
- Feature 2, 4: Refactoring step skipped
|
||||
- {N}/{total} features completed refactoring ({percent}%)
|
||||
- ✅ Compliant features: {list}
|
||||
- ❌ Non-compliant features: {list}
|
||||
|
||||
**Violations**:
|
||||
- Feature 2, 4: Refactoring step skipped (-20 points total)
|
||||
|
||||
---
|
||||
|
||||
## Best Practices Assessment
|
||||
|
||||
@@ -377,24 +525,61 @@ Status: {EXCELLENT | GOOD | NEEDS IMPROVEMENT | FAILED}
|
||||
- Missing refactoring steps
|
||||
- Test failure messages could be more descriptive
|
||||
|
||||
---
|
||||
|
||||
## Detailed Findings by Severity
|
||||
|
||||
### Critical Issues ({count})
|
||||
{List of critical issues with impact and remediation}
|
||||
|
||||
### High Priority Issues ({count})
|
||||
{List of high priority issues with impact and remediation}
|
||||
|
||||
### Medium Priority Issues ({count})
|
||||
{List of medium priority issues with impact and remediation}
|
||||
|
||||
### Low Priority Issues ({count})
|
||||
{List of low priority issues with impact and remediation}
|
||||
|
||||
---
|
||||
|
||||
## Recommendations
|
||||
|
||||
### High Priority
|
||||
### Required Fixes (Before Merge)
|
||||
1. Complete missing REFACTOR tasks (Features 2, 4)
|
||||
2. Verify initial test failures for Feature 3
|
||||
3. Simplify over-engineered implementations
|
||||
3. Fix tests that broke during refactoring
|
||||
|
||||
### Medium Priority
|
||||
1. Add edge case tests for Features 1, 3
|
||||
2. Improve test failure message clarity
|
||||
3. Increase branch coverage to >85%
|
||||
### Recommended Improvements
|
||||
1. Simplify over-engineered implementations
|
||||
2. Add edge case tests for Features 1, 3
|
||||
3. Improve test failure message clarity
|
||||
4. Increase branch coverage to >85%
|
||||
|
||||
### Low Priority
|
||||
### Optional Enhancements
|
||||
1. Add more descriptive test names
|
||||
2. Consider parameterized tests for similar scenarios
|
||||
3. Document TDD process learnings
|
||||
|
||||
## Conclusion
|
||||
{Summary of compliance status and next steps}
|
||||
---
|
||||
|
||||
## Metrics Summary
|
||||
|
||||
| Metric | Value |
|
||||
|--------|-------|
|
||||
| Total Features | {count} |
|
||||
| Complete Chains | {count} ({percent}%) |
|
||||
| Compliance Score | {score}/100 |
|
||||
| Critical Issues | {count} |
|
||||
| High Issues | {count} |
|
||||
| Medium Issues | {count} |
|
||||
| Low Issues | {count} |
|
||||
| Line Coverage | {percent}% |
|
||||
| Branch Coverage | {percent}% |
|
||||
| Function Coverage | {percent}% |
|
||||
|
||||
---
|
||||
|
||||
**Report End**
|
||||
```
|
||||
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
---
|
||||
name: conflict-resolution
|
||||
description: Detect and resolve conflicts between plan and existing codebase using CLI-powered analysis with Gemini/Qwen
|
||||
argument-hint: "--session WFS-session-id --context path/to/context-package.json"
|
||||
argument-hint: "[-y|--yes] --session WFS-session-id --context path/to/context-package.json"
|
||||
examples:
|
||||
- /workflow:tools:conflict-resolution --session WFS-auth --context .workflow/active/WFS-auth/.process/context-package.json
|
||||
- /workflow:tools:conflict-resolution --session WFS-payment --context .workflow/active/WFS-payment/.process/context-package.json
|
||||
- /workflow:tools:conflict-resolution -y --session WFS-payment --context .workflow/active/WFS-payment/.process/context-package.json
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-select recommended strategy for each conflict, skip clarification questions.
|
||||
|
||||
# Conflict Resolution Command
|
||||
|
||||
## Purpose
|
||||
@@ -209,6 +213,8 @@ Task(subagent_type="cli-execution-agent", run_in_background=false, prompt=`
|
||||
### Phase 3: User Interaction Loop
|
||||
|
||||
```javascript
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
FOR each conflict:
|
||||
round = 0, clarified = false, userClarifications = []
|
||||
|
||||
@@ -216,8 +222,13 @@ FOR each conflict:
|
||||
// 1. Display conflict info (text output for context)
|
||||
displayConflictSummary(conflict) // id, brief, severity, overlap_analysis if ModuleOverlap
|
||||
|
||||
// 2. Strategy selection via AskUserQuestion
|
||||
AskUserQuestion({
|
||||
// 2. Strategy selection
|
||||
if (autoYes) {
|
||||
console.log(`[--yes] Auto-selecting recommended strategy`)
|
||||
selectedStrategy = conflict.strategies[conflict.recommended || 0]
|
||||
clarified = true // Skip clarification loop
|
||||
} else {
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: formatStrategiesForDisplay(conflict.strategies),
|
||||
header: "策略选择",
|
||||
@@ -230,18 +241,19 @@ FOR each conflict:
|
||||
{ label: "自定义修改", description: `建议: ${conflict.modification_suggestions?.slice(0,2).join('; ')}` }
|
||||
]
|
||||
}]
|
||||
})
|
||||
})
|
||||
|
||||
// 3. Handle selection
|
||||
if (userChoice === "自定义修改") {
|
||||
customConflicts.push({ id, brief, category, suggestions, overlap_analysis })
|
||||
break
|
||||
// 3. Handle selection
|
||||
if (userChoice === "自定义修改") {
|
||||
customConflicts.push({ id, brief, category, suggestions, overlap_analysis })
|
||||
break
|
||||
}
|
||||
|
||||
selectedStrategy = findStrategyByName(userChoice)
|
||||
}
|
||||
|
||||
selectedStrategy = findStrategyByName(userChoice)
|
||||
|
||||
// 4. Clarification (if needed) - batched max 4 per call
|
||||
if (selectedStrategy.clarification_needed?.length > 0) {
|
||||
if (!autoYes && selectedStrategy.clarification_needed?.length > 0) {
|
||||
for (batch of chunk(selectedStrategy.clarification_needed, 4)) {
|
||||
AskUserQuestion({
|
||||
questions: batch.map((q, i) => ({
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
---
|
||||
name: task-generate-agent
|
||||
description: Generate implementation plan documents (IMPL_PLAN.md, task JSONs, TODO_LIST.md) using action-planning-agent - produces planning artifacts, does NOT execute code implementation
|
||||
argument-hint: "--session WFS-session-id"
|
||||
argument-hint: "[-y|--yes] --session WFS-session-id"
|
||||
examples:
|
||||
- /workflow:tools:task-generate-agent --session WFS-auth
|
||||
- /workflow:tools:task-generate-agent -y --session WFS-auth
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Skip user questions, use defaults (no materials, Agent executor, Codex CLI tool).
|
||||
|
||||
# Generate Implementation Plan Command
|
||||
|
||||
## Overview
|
||||
@@ -67,9 +72,25 @@ Phase 3: Integration (+1 Coordinator, Multi-Module Only)
|
||||
|
||||
**Purpose**: Collect user preferences before task generation to ensure generated tasks match execution expectations.
|
||||
|
||||
**User Questions**:
|
||||
**Auto Mode Check**:
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
if (autoYes) {
|
||||
console.log(`[--yes] Using defaults: No materials, Agent executor, Codex CLI`)
|
||||
userConfig = {
|
||||
supplementaryMaterials: { type: "none", content: [] },
|
||||
executionMethod: "agent",
|
||||
preferredCliTool: "codex",
|
||||
enableResume: true
|
||||
}
|
||||
// Skip to Phase 1
|
||||
}
|
||||
```
|
||||
|
||||
**User Questions** (skipped if autoYes):
|
||||
```javascript
|
||||
if (!autoYes) AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "Do you have supplementary materials or guidelines to include?",
|
||||
@@ -104,11 +125,10 @@ AskUserQuestion({
|
||||
}
|
||||
]
|
||||
})
|
||||
```
|
||||
|
||||
**Handle Materials Response**:
|
||||
**Handle Materials Response** (skipped if autoYes):
|
||||
```javascript
|
||||
if (userConfig.materials === "Provide file paths") {
|
||||
if (!autoYes && userConfig.materials === "Provide file paths") {
|
||||
// Follow-up question for file paths
|
||||
const pathsResponse = AskUserQuestion({
|
||||
questions: [{
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
---
|
||||
name: task-generate-tdd
|
||||
description: Autonomous TDD task generation using action-planning-agent with Red-Green-Refactor cycles, test-first structure, and cycle validation
|
||||
argument-hint: "--session WFS-session-id"
|
||||
argument-hint: "[-y|--yes] --session WFS-session-id"
|
||||
examples:
|
||||
- /workflow:tools:task-generate-tdd --session WFS-auth
|
||||
- /workflow:tools:task-generate-tdd -y --session WFS-auth
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Skip user questions, use defaults (no materials, Agent executor).
|
||||
|
||||
# Autonomous TDD Task Generation Command
|
||||
|
||||
## Overview
|
||||
@@ -78,44 +83,176 @@ Phase 2: Agent Execution (Document Generation)
|
||||
|
||||
## Execution Lifecycle
|
||||
|
||||
### Phase 1: Discovery & Context Loading
|
||||
### Phase 0: User Configuration (Interactive)
|
||||
|
||||
**Purpose**: Collect user preferences before TDD task generation to ensure generated tasks match execution expectations and provide necessary supplementary context.
|
||||
|
||||
**User Questions**:
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "Do you have supplementary materials or guidelines to include?",
|
||||
header: "Materials",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "No additional materials", description: "Use existing context only" },
|
||||
{ label: "Provide file paths", description: "I'll specify paths to include" },
|
||||
{ label: "Provide inline content", description: "I'll paste content directly" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Select execution method for generated TDD tasks:",
|
||||
header: "Execution",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Agent (Recommended)", description: "Claude agent executes Red-Green-Refactor cycles directly" },
|
||||
{ label: "Hybrid", description: "Agent orchestrates, calls CLI for complex steps (Red/Green phases)" },
|
||||
{ label: "CLI Only", description: "All TDD cycles via CLI tools (codex/gemini/qwen)" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "If using CLI, which tool do you prefer?",
|
||||
header: "CLI Tool",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Codex (Recommended)", description: "Best for TDD Red-Green-Refactor cycles" },
|
||||
{ label: "Gemini", description: "Best for analysis and large context" },
|
||||
{ label: "Qwen", description: "Alternative analysis tool" },
|
||||
{ label: "Auto", description: "Let agent decide per-task" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
```
|
||||
|
||||
**Handle Materials Response**:
|
||||
```javascript
|
||||
if (userConfig.materials === "Provide file paths") {
|
||||
// Follow-up question for file paths
|
||||
const pathsResponse = AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Enter file paths to include (comma-separated or one per line):",
|
||||
header: "Paths",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Enter paths", description: "Provide paths in text input" }
|
||||
]
|
||||
}]
|
||||
})
|
||||
userConfig.supplementaryPaths = parseUserPaths(pathsResponse)
|
||||
}
|
||||
```
|
||||
|
||||
**Build userConfig**:
|
||||
```javascript
|
||||
const userConfig = {
|
||||
supplementaryMaterials: {
|
||||
type: "none|paths|inline",
|
||||
content: [...], // Parsed paths or inline content
|
||||
},
|
||||
executionMethod: "agent|hybrid|cli",
|
||||
preferredCliTool: "codex|gemini|qwen|auto",
|
||||
enableResume: true // Always enable resume for CLI executions
|
||||
}
|
||||
```
|
||||
|
||||
**Pass to Agent**: Include `userConfig` in agent prompt for Phase 2.
|
||||
|
||||
---
|
||||
|
||||
### Phase 1: Context Preparation & Discovery
|
||||
|
||||
**Command Responsibility**: Command prepares session paths and metadata, provides to agent for autonomous context loading.
|
||||
|
||||
**⚡ Memory-First Rule**: Skip file loading if documents already in conversation memory
|
||||
|
||||
**Agent Context Package**:
|
||||
**📊 Progressive Loading Strategy**: Load context incrementally due to large analysis.md file sizes:
|
||||
- **Core**: session metadata + context-package.json (always load)
|
||||
- **Selective**: synthesis_output OR (guidance + relevant role analyses) - NOT all role analyses
|
||||
- **On-Demand**: conflict resolution (if conflict_risk >= medium), test context
|
||||
|
||||
**🛤️ Path Clarity Requirement**: All `focus_paths` prefer absolute paths (e.g., `D:\\project\\src\\module`), or clear relative paths from project root (e.g., `./src/module`)
|
||||
|
||||
**Session Path Structure** (Provided by Command to Agent):
|
||||
```
|
||||
.workflow/active/WFS-{session-id}/
|
||||
├── workflow-session.json # Session metadata
|
||||
├── .process/
|
||||
│ ├── context-package.json # Context package with artifact catalog
|
||||
│ ├── test-context-package.json # Test coverage analysis
|
||||
│ └── conflict-resolution.json # Conflict resolution (if exists)
|
||||
├── .task/ # Output: Task JSON files
|
||||
│ ├── IMPL-1.json
|
||||
│ ├── IMPL-2.json
|
||||
│ └── ...
|
||||
├── IMPL_PLAN.md # Output: TDD implementation plan
|
||||
└── TODO_LIST.md # Output: TODO list with TDD phases
|
||||
```
|
||||
|
||||
**Command Preparation**:
|
||||
1. **Assemble Session Paths** for agent prompt:
|
||||
- `session_metadata_path`: `.workflow/active/{session-id}/workflow-session.json`
|
||||
- `context_package_path`: `.workflow/active/{session-id}/.process/context-package.json`
|
||||
- `test_context_package_path`: `.workflow/active/{session-id}/.process/test-context-package.json`
|
||||
- Output directory paths
|
||||
|
||||
2. **Provide Metadata** (simple values):
|
||||
- `session_id`: WFS-{session-id}
|
||||
- `workflow_type`: "tdd"
|
||||
- `mcp_capabilities`: {exa_code, exa_web, code_index}
|
||||
|
||||
3. **Pass userConfig** from Phase 0
|
||||
|
||||
**Agent Context Package** (Agent loads autonomously):
|
||||
```javascript
|
||||
{
|
||||
"session_id": "WFS-[session-id]",
|
||||
"workflow_type": "tdd",
|
||||
// Note: CLI tool usage is determined semantically by action-planning-agent based on user's task description
|
||||
|
||||
// Core (ALWAYS load)
|
||||
"session_metadata": {
|
||||
// If in memory: use cached content
|
||||
// Else: Load from .workflow/active//{session-id}/workflow-session.json
|
||||
// Else: Load from workflow-session.json
|
||||
},
|
||||
"context_package": {
|
||||
// If in memory: use cached content
|
||||
// Else: Load from context-package.json
|
||||
},
|
||||
|
||||
// Selective (load based on progressive strategy)
|
||||
"brainstorm_artifacts": {
|
||||
// Loaded from context-package.json → brainstorm_artifacts section
|
||||
"role_analyses": [
|
||||
"synthesis_output": {"path": "...", "exists": true}, // Load if exists (highest priority)
|
||||
"guidance_specification": {"path": "...", "exists": true}, // Load if no synthesis
|
||||
"role_analyses": [ // Load SELECTIVELY based on task relevance
|
||||
{
|
||||
"role": "system-architect",
|
||||
"files": [{"path": "...", "type": "primary|supplementary"}]
|
||||
}
|
||||
],
|
||||
"guidance_specification": {"path": "...", "exists": true},
|
||||
"synthesis_output": {"path": "...", "exists": true},
|
||||
"conflict_resolution": {"path": "...", "exists": true} // if conflict_risk >= medium
|
||||
]
|
||||
},
|
||||
"context_package_path": ".workflow/active//{session-id}/.process/context-package.json",
|
||||
"context_package": {
|
||||
// If in memory: use cached content
|
||||
// Else: Load from .workflow/active//{session-id}/.process/context-package.json
|
||||
},
|
||||
"test_context_package_path": ".workflow/active//{session-id}/.process/test-context-package.json",
|
||||
|
||||
// On-Demand (load if exists)
|
||||
"test_context_package": {
|
||||
// Existing test patterns and coverage analysis
|
||||
// Load from test-context-package.json
|
||||
// Contains existing test patterns and coverage analysis
|
||||
},
|
||||
"conflict_resolution": {
|
||||
// Load from conflict-resolution.json if conflict_risk >= medium
|
||||
// Check context-package.conflict_detection.resolution_file
|
||||
},
|
||||
|
||||
// Capabilities
|
||||
"mcp_capabilities": {
|
||||
"codex_lens": true,
|
||||
"exa_code": true,
|
||||
"exa_web": true
|
||||
"exa_web": true,
|
||||
"code_index": true
|
||||
},
|
||||
|
||||
// User configuration from Phase 0
|
||||
"user_config": {
|
||||
// From Phase 0 AskUserQuestion
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -124,21 +261,21 @@ Phase 2: Agent Execution (Document Generation)
|
||||
1. **Load Session Context** (if not in memory)
|
||||
```javascript
|
||||
if (!memory.has("workflow-session.json")) {
|
||||
Read(.workflow/active//{session-id}/workflow-session.json)
|
||||
Read(.workflow/active/{session-id}/workflow-session.json)
|
||||
}
|
||||
```
|
||||
|
||||
2. **Load Context Package** (if not in memory)
|
||||
```javascript
|
||||
if (!memory.has("context-package.json")) {
|
||||
Read(.workflow/active//{session-id}/.process/context-package.json)
|
||||
Read(.workflow/active/{session-id}/.process/context-package.json)
|
||||
}
|
||||
```
|
||||
|
||||
3. **Load Test Context Package** (if not in memory)
|
||||
```javascript
|
||||
if (!memory.has("test-context-package.json")) {
|
||||
Read(.workflow/active//{session-id}/.process/test-context-package.json)
|
||||
Read(.workflow/active/{session-id}/.process/test-context-package.json)
|
||||
}
|
||||
```
|
||||
|
||||
@@ -180,62 +317,81 @@ Phase 2: Agent Execution (Document Generation)
|
||||
)
|
||||
```
|
||||
|
||||
### Phase 2: Agent Execution (Document Generation)
|
||||
### Phase 2: Agent Execution (TDD Document Generation)
|
||||
|
||||
**Pre-Agent Template Selection** (Command decides path before invoking agent):
|
||||
```javascript
|
||||
// Command checks flag and selects template PATH (not content)
|
||||
const templatePath = hasCliExecuteFlag
|
||||
? "~/.claude/workflows/cli-templates/prompts/workflow/task-json-cli-mode.txt"
|
||||
: "~/.claude/workflows/cli-templates/prompts/workflow/task-json-agent-mode.txt";
|
||||
```
|
||||
**Purpose**: Generate TDD planning documents (IMPL_PLAN.md, task JSONs, TODO_LIST.md) - planning only, NOT code implementation.
|
||||
|
||||
**Agent Invocation**:
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="action-planning-agent",
|
||||
run_in_background=false,
|
||||
description="Generate TDD task JSON and implementation plan",
|
||||
description="Generate TDD planning documents (IMPL_PLAN.md, task JSONs, TODO_LIST.md)",
|
||||
prompt=`
|
||||
## Execution Context
|
||||
## TASK OBJECTIVE
|
||||
Generate TDD implementation planning documents (IMPL_PLAN.md, task JSONs, TODO_LIST.md) for workflow session
|
||||
|
||||
**Session ID**: WFS-{session-id}
|
||||
**Workflow Type**: TDD
|
||||
**Note**: CLI tool usage is determined semantically from user's task description
|
||||
IMPORTANT: This is PLANNING ONLY - you are generating planning documents, NOT implementing code.
|
||||
|
||||
## Phase 1: Discovery Results (Provided Context)
|
||||
CRITICAL: Follow the progressive loading strategy (load analysis.md files incrementally due to file size):
|
||||
- **Core**: session metadata + context-package.json (always)
|
||||
- **Selective**: synthesis_output OR (guidance + relevant role analyses) - NOT all
|
||||
- **On-Demand**: conflict resolution (if conflict_risk >= medium), test context
|
||||
|
||||
### Session Metadata
|
||||
{session_metadata_content}
|
||||
## SESSION PATHS
|
||||
Input:
|
||||
- Session Metadata: .workflow/active/{session-id}/workflow-session.json
|
||||
- Context Package: .workflow/active/{session-id}/.process/context-package.json
|
||||
- Test Context: .workflow/active/{session-id}/.process/test-context-package.json
|
||||
|
||||
### Role Analyses (Enhanced by Synthesis)
|
||||
{role_analyses_content}
|
||||
- Includes requirements, design specs, enhancements, and clarifications from synthesis phase
|
||||
Output:
|
||||
- Task Dir: .workflow/active/{session-id}/.task/
|
||||
- IMPL_PLAN: .workflow/active/{session-id}/IMPL_PLAN.md
|
||||
- TODO_LIST: .workflow/active/{session-id}/TODO_LIST.md
|
||||
|
||||
### Artifacts Inventory
|
||||
- **Guidance Specification**: {guidance_spec_path}
|
||||
- **Role Analyses**: {role_analyses_list}
|
||||
## CONTEXT METADATA
|
||||
Session ID: {session-id}
|
||||
Workflow Type: TDD
|
||||
MCP Capabilities: {exa_code, exa_web, code_index}
|
||||
|
||||
### Context Package
|
||||
{context_package_summary}
|
||||
- Includes conflict_risk assessment
|
||||
## USER CONFIGURATION (from Phase 0)
|
||||
Execution Method: ${userConfig.executionMethod} // agent|hybrid|cli
|
||||
Preferred CLI Tool: ${userConfig.preferredCliTool} // codex|gemini|qwen|auto
|
||||
Supplementary Materials: ${userConfig.supplementaryMaterials}
|
||||
|
||||
### Test Context Package
|
||||
{test_context_package_summary}
|
||||
- Existing test patterns, framework config, coverage analysis
|
||||
## CLI TOOL SELECTION
|
||||
Based on userConfig.executionMethod:
|
||||
- "agent": No command field in implementation_approach steps
|
||||
- "hybrid": Add command field to complex steps only (Red/Green phases recommended for CLI)
|
||||
- "cli": Add command field to ALL Red-Green-Refactor steps
|
||||
|
||||
### Conflict Resolution (Conditional)
|
||||
If conflict_risk was medium/high, modifications have been applied to:
|
||||
- **guidance-specification.md**: Design decisions updated to resolve conflicts
|
||||
- **Role analyses (*.md)**: Recommendations adjusted for compatibility
|
||||
- **context-package.json**: Marked as "resolved" with conflict IDs
|
||||
- Conflict resolution results stored in conflict-resolution.json
|
||||
CLI Resume Support (MANDATORY for all CLI commands):
|
||||
- Use --resume parameter to continue from previous task execution
|
||||
- Read previous task's cliExecutionId from session state
|
||||
- Format: ccw cli -p "[prompt]" --resume [previousCliId] --tool [tool] --mode write
|
||||
|
||||
### MCP Analysis Results (Optional)
|
||||
**Code Structure**: {mcp_code_index_results}
|
||||
**External Research**: {mcp_exa_research_results}
|
||||
## EXPLORATION CONTEXT (from context-package.exploration_results)
|
||||
- Load exploration_results from context-package.json
|
||||
- Use aggregated_insights.critical_files for focus_paths generation
|
||||
- Apply aggregated_insights.constraints to acceptance criteria
|
||||
- Reference aggregated_insights.all_patterns for implementation approach
|
||||
- Use aggregated_insights.all_integration_points for precise modification locations
|
||||
- Use conflict_indicators for risk-aware task sequencing
|
||||
|
||||
## Phase 2: TDD Document Generation Task
|
||||
## CONFLICT RESOLUTION CONTEXT (if exists)
|
||||
- Check context-package.conflict_detection.resolution_file for conflict-resolution.json path
|
||||
- If exists, load .process/conflict-resolution.json:
|
||||
- Apply planning_constraints as task constraints (for brainstorm-less workflows)
|
||||
- Reference resolved_conflicts for implementation approach alignment
|
||||
- Handle custom_conflicts with explicit task notes
|
||||
|
||||
## TEST CONTEXT INTEGRATION
|
||||
- Load test-context-package.json for existing test patterns and coverage analysis
|
||||
- Extract test framework configuration (Jest/Pytest/etc.)
|
||||
- Identify existing test conventions and patterns
|
||||
- Map coverage gaps to TDD Red phase test targets
|
||||
|
||||
## TDD DOCUMENT GENERATION TASK
|
||||
|
||||
**Agent Configuration Reference**: All TDD task generation rules, quantification requirements, Red-Green-Refactor cycle structure, quality standards, and execution details are defined in action-planning-agent.
|
||||
|
||||
@@ -256,31 +412,61 @@ If conflict_risk was medium/high, modifications have been applied to:
|
||||
#### Required Outputs Summary
|
||||
|
||||
##### 1. TDD Task JSON Files (.task/IMPL-*.json)
|
||||
- **Location**: `.workflow/active//{session-id}/.task/`
|
||||
- **Schema**: 5-field structure with TDD-specific metadata
|
||||
- **Location**: `.workflow/active/{session-id}/.task/`
|
||||
- **Schema**: 6-field structure with TDD-specific metadata
|
||||
- `id, title, status, context_package_path, meta, context, flow_control`
|
||||
- `meta.tdd_workflow`: true (REQUIRED)
|
||||
- `meta.max_iterations`: 3 (Green phase test-fix cycle limit)
|
||||
- `meta.cli_execution_id`: Unique CLI execution ID (format: `{session_id}-{task_id}`)
|
||||
- `meta.cli_execution`: Strategy object (new|resume|fork|merge_fork)
|
||||
- `context.tdd_cycles`: Array with quantified test cases and coverage
|
||||
- `context.focus_paths`: Absolute or clear relative paths (enhanced with exploration critical_files)
|
||||
- `flow_control.implementation_approach`: Exactly 3 steps with `tdd_phase` field
|
||||
1. Red Phase (`tdd_phase: "red"`): Write failing tests
|
||||
2. Green Phase (`tdd_phase: "green"`): Implement to pass tests
|
||||
3. Refactor Phase (`tdd_phase: "refactor"`): Improve code quality
|
||||
- CLI tool usage determined semantically (add `command` field when user requests CLI execution)
|
||||
- `flow_control.pre_analysis`: Include exploration integration_points analysis
|
||||
- CLI tool usage based on userConfig (add `command` field per executionMethod)
|
||||
- **Details**: See action-planning-agent.md § TDD Task JSON Generation
|
||||
|
||||
##### 2. IMPL_PLAN.md (TDD Variant)
|
||||
- **Location**: `.workflow/active//{session-id}/IMPL_PLAN.md`
|
||||
- **Location**: `.workflow/active/{session-id}/IMPL_PLAN.md`
|
||||
- **Template**: `~/.claude/workflows/cli-templates/prompts/workflow/impl-plan-template.txt`
|
||||
- **TDD-Specific Frontmatter**: workflow_type="tdd", tdd_workflow=true, feature_count, task_breakdown
|
||||
- **TDD Implementation Tasks Section**: Feature-by-feature with internal Red-Green-Refactor cycles
|
||||
- **Context Analysis**: Artifact references and exploration insights
|
||||
- **Details**: See action-planning-agent.md § TDD Implementation Plan Creation
|
||||
|
||||
##### 3. TODO_LIST.md
|
||||
- **Location**: `.workflow/active//{session-id}/TODO_LIST.md`
|
||||
- **Location**: `.workflow/active/{session-id}/TODO_LIST.md`
|
||||
- **Format**: Hierarchical task list with internal TDD phase indicators (Red → Green → Refactor)
|
||||
- **Status**: ▸ (container), [ ] (pending), [x] (completed)
|
||||
- **Links**: Task JSON references and summaries
|
||||
- **Details**: See action-planning-agent.md § TODO List Generation
|
||||
|
||||
### CLI EXECUTION ID REQUIREMENTS (MANDATORY)
|
||||
|
||||
Each task JSON MUST include:
|
||||
- **meta.cli_execution_id**: Unique ID for CLI execution (format: `{session_id}-{task_id}`)
|
||||
- **meta.cli_execution**: Strategy object based on depends_on:
|
||||
- No deps → `{ "strategy": "new" }`
|
||||
- 1 dep (single child) → `{ "strategy": "resume", "resume_from": "parent-cli-id" }`
|
||||
- 1 dep (multiple children) → `{ "strategy": "fork", "resume_from": "parent-cli-id" }`
|
||||
- N deps → `{ "strategy": "merge_fork", "resume_from": ["id1", "id2", ...] }`
|
||||
- **Type**: `resume_from: string | string[]` (string for resume/fork, array for merge_fork)
|
||||
|
||||
**CLI Execution Strategy Rules**:
|
||||
1. **new**: Task has no dependencies - starts fresh CLI conversation
|
||||
2. **resume**: Task has 1 parent AND that parent has only this child - continues same conversation
|
||||
3. **fork**: Task has 1 parent BUT parent has multiple children - creates new branch with parent context
|
||||
4. **merge_fork**: Task has multiple parents - merges all parent contexts into new conversation
|
||||
|
||||
**Execution Command Patterns**:
|
||||
- new: `ccw cli -p "[prompt]" --tool [tool] --mode write --id [cli_execution_id]`
|
||||
- resume: `ccw cli -p "[prompt]" --resume [resume_from] --tool [tool] --mode write`
|
||||
- fork: `ccw cli -p "[prompt]" --resume [resume_from] --id [cli_execution_id] --tool [tool] --mode write`
|
||||
- merge_fork: `ccw cli -p "[prompt]" --resume [resume_from.join(',')] --id [cli_execution_id] --tool [tool] --mode write` (resume_from is array)
|
||||
|
||||
### Quantification Requirements (MANDATORY)
|
||||
|
||||
**Core Rules**:
|
||||
@@ -302,6 +488,7 @@ If conflict_risk was medium/high, modifications have been applied to:
|
||||
- [ ] Every acceptance criterion includes measurable coverage percentage
|
||||
- [ ] tdd_cycles array contains test_count and test_cases for each cycle
|
||||
- [ ] No vague language ("comprehensive", "complete", "thorough")
|
||||
- [ ] cli_execution_id and cli_execution strategy assigned to each task
|
||||
|
||||
### Agent Execution Summary
|
||||
|
||||
@@ -317,20 +504,34 @@ If conflict_risk was medium/high, modifications have been applied to:
|
||||
- ✓ Quantification requirements enforced (explicit counts, measurable acceptance, exact targets)
|
||||
- ✓ Task count ≤18 (hard limit)
|
||||
- ✓ Each task has meta.tdd_workflow: true
|
||||
- ✓ Each task has exactly 3 implementation steps with tdd_phase field
|
||||
- ✓ Green phase includes test-fix cycle logic
|
||||
- ✓ Artifact references mapped correctly
|
||||
- ✓ MCP tool integration added
|
||||
- ✓ Each task has exactly 3 implementation steps with tdd_phase field ("red", "green", "refactor")
|
||||
- ✓ Each task has meta.cli_execution_id and meta.cli_execution strategy
|
||||
- ✓ Green phase includes test-fix cycle logic with max_iterations
|
||||
- ✓ focus_paths are absolute or clear relative paths (from exploration critical_files)
|
||||
- ✓ Artifact references mapped correctly from context package
|
||||
- ✓ Exploration context integrated (critical_files, constraints, patterns, integration_points)
|
||||
- ✓ Conflict resolution context applied (if conflict_risk >= medium)
|
||||
- ✓ Test context integrated (existing test patterns and coverage analysis)
|
||||
- ✓ Documents follow TDD template structure
|
||||
- ✓ CLI tool selection based on userConfig.executionMethod
|
||||
|
||||
## Output
|
||||
## SUCCESS CRITERIA
|
||||
- All planning documents generated successfully:
|
||||
- Task JSONs valid and saved to .task/ directory with cli_execution_id
|
||||
- IMPL_PLAN.md created with complete TDD structure
|
||||
- TODO_LIST.md generated matching task JSONs
|
||||
- CLI execution strategies assigned based on task dependencies
|
||||
- Return completion status with document count and task breakdown summary
|
||||
|
||||
Generate all three documents and report completion status:
|
||||
- TDD task JSON files created: N files (IMPL-*.json)
|
||||
## OUTPUT SUMMARY
|
||||
Generate all three documents and report:
|
||||
- TDD task JSON files created: N files (IMPL-*.json) with cli_execution_id assigned
|
||||
- TDD cycles configured: N cycles with quantified test cases
|
||||
- Artifacts integrated: synthesis-spec, guidance-specification, N role analyses
|
||||
- CLI execution strategies: new/resume/fork/merge_fork assigned per dependency graph
|
||||
- Artifacts integrated: synthesis-spec/guidance-specification, relevant role analyses
|
||||
- Exploration context: critical_files, constraints, patterns, integration_points
|
||||
- Test context integrated: existing patterns and coverage
|
||||
- MCP enhancements: CodexLens, exa-research
|
||||
- Conflict resolution: applied (if conflict_risk >= medium)
|
||||
- Session ready for TDD execution: /workflow:execute
|
||||
`
|
||||
)
|
||||
@@ -338,50 +539,64 @@ Generate all three documents and report completion status:
|
||||
|
||||
### Agent Context Passing
|
||||
|
||||
**Memory-Aware Context Assembly**:
|
||||
**Context Delegation Model**: Command provides paths and metadata, agent loads context autonomously using progressive loading strategy.
|
||||
|
||||
**Command Provides** (in agent prompt):
|
||||
```javascript
|
||||
// Assemble context package for agent
|
||||
const agentContext = {
|
||||
session_id: "WFS-[id]",
|
||||
// Command assembles these simple values and paths for agent
|
||||
const commandProvides = {
|
||||
// Session paths
|
||||
session_metadata_path: ".workflow/active/WFS-{id}/workflow-session.json",
|
||||
context_package_path: ".workflow/active/WFS-{id}/.process/context-package.json",
|
||||
test_context_package_path: ".workflow/active/WFS-{id}/.process/test-context-package.json",
|
||||
output_task_dir: ".workflow/active/WFS-{id}/.task/",
|
||||
output_impl_plan: ".workflow/active/WFS-{id}/IMPL_PLAN.md",
|
||||
output_todo_list: ".workflow/active/WFS-{id}/TODO_LIST.md",
|
||||
|
||||
// Simple metadata
|
||||
session_id: "WFS-{id}",
|
||||
workflow_type: "tdd",
|
||||
mcp_capabilities: { exa_code: true, exa_web: true, code_index: true },
|
||||
|
||||
// Use memory if available, else load
|
||||
session_metadata: memory.has("workflow-session.json")
|
||||
? memory.get("workflow-session.json")
|
||||
: Read(.workflow/active/WFS-[id]/workflow-session.json),
|
||||
|
||||
context_package_path: ".workflow/active/WFS-[id]/.process/context-package.json",
|
||||
|
||||
context_package: memory.has("context-package.json")
|
||||
? memory.get("context-package.json")
|
||||
: Read(".workflow/active/WFS-[id]/.process/context-package.json"),
|
||||
|
||||
test_context_package_path: ".workflow/active/WFS-[id]/.process/test-context-package.json",
|
||||
|
||||
test_context_package: memory.has("test-context-package.json")
|
||||
? memory.get("test-context-package.json")
|
||||
: Read(".workflow/active/WFS-[id]/.process/test-context-package.json"),
|
||||
|
||||
// Extract brainstorm artifacts from context package
|
||||
brainstorm_artifacts: extractBrainstormArtifacts(context_package),
|
||||
|
||||
// Load role analyses using paths from context package
|
||||
role_analyses: brainstorm_artifacts.role_analyses
|
||||
.flatMap(role => role.files)
|
||||
.map(file => Read(file.path)),
|
||||
|
||||
// Load conflict resolution if exists (prefer new JSON format)
|
||||
conflict_resolution: context_package.conflict_detection?.resolution_file
|
||||
? Read(context_package.conflict_detection.resolution_file) // .process/conflict-resolution.json
|
||||
: (brainstorm_artifacts?.conflict_resolution?.exists
|
||||
? Read(brainstorm_artifacts.conflict_resolution.path)
|
||||
: null),
|
||||
|
||||
// Optional MCP enhancements
|
||||
mcp_analysis: executeMcpDiscovery()
|
||||
// User configuration from Phase 0
|
||||
user_config: {
|
||||
supplementaryMaterials: { type: "...", content: [...] },
|
||||
executionMethod: "agent|hybrid|cli",
|
||||
preferredCliTool: "codex|gemini|qwen|auto",
|
||||
enableResume: true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Agent Loads Autonomously** (progressive loading):
|
||||
```javascript
|
||||
// Agent executes progressive loading based on memory state
|
||||
const agentLoads = {
|
||||
// Core (ALWAYS load if not in memory)
|
||||
session_metadata: loadIfNotInMemory(session_metadata_path),
|
||||
context_package: loadIfNotInMemory(context_package_path),
|
||||
|
||||
// Selective (based on progressive strategy)
|
||||
// Priority: synthesis_output > guidance + relevant_role_analyses
|
||||
brainstorm_content: loadSelectiveBrainstormArtifacts(context_package),
|
||||
|
||||
// On-Demand (load if exists and relevant)
|
||||
test_context: loadIfExists(test_context_package_path),
|
||||
conflict_resolution: loadConflictResolution(context_package),
|
||||
|
||||
// Optional (if MCP available)
|
||||
exploration_results: extractExplorationResults(context_package),
|
||||
external_research: executeMcpResearch() // If needed
|
||||
}
|
||||
```
|
||||
|
||||
**Progressive Loading Implementation** (agent responsibility):
|
||||
1. **Check memory first** - skip if already loaded
|
||||
2. **Load core files** - session metadata + context-package.json
|
||||
3. **Smart selective loading** - synthesis_output OR (guidance + task-relevant role analyses)
|
||||
4. **On-demand loading** - test context, conflict resolution (if conflict_risk >= medium)
|
||||
5. **Extract references** - exploration results, artifact paths from context package
|
||||
|
||||
## TDD Task Structure Reference
|
||||
|
||||
This section provides quick reference for TDD task JSON structure. For complete implementation details, see the agent invocation prompt in Phase 2 above.
|
||||
@@ -389,14 +604,31 @@ This section provides quick reference for TDD task JSON structure. For complete
|
||||
**Quick Reference**:
|
||||
- Each TDD task contains complete Red-Green-Refactor cycle
|
||||
- Task ID format: `IMPL-N` (simple) or `IMPL-N.M` (complex subtasks)
|
||||
- Required metadata: `meta.tdd_workflow: true`, `meta.max_iterations: 3`
|
||||
- Flow control: Exactly 3 steps with `tdd_phase` field (red, green, refactor)
|
||||
- Context: `tdd_cycles` array with quantified test cases and coverage
|
||||
- Required metadata:
|
||||
- `meta.tdd_workflow: true`
|
||||
- `meta.max_iterations: 3`
|
||||
- `meta.cli_execution_id: "{session_id}-{task_id}"`
|
||||
- `meta.cli_execution: { "strategy": "new|resume|fork|merge_fork", ... }`
|
||||
- Context: `tdd_cycles` array with quantified test cases and coverage:
|
||||
```javascript
|
||||
tdd_cycles: [
|
||||
{
|
||||
test_count: 5, // Number of test cases to write
|
||||
test_cases: ["case1", "case2"], // Enumerated test scenarios
|
||||
implementation_scope: "...", // Files and functions to implement
|
||||
expected_coverage: ">=85%" // Coverage target
|
||||
}
|
||||
]
|
||||
```
|
||||
- Context: `focus_paths` use absolute or clear relative paths
|
||||
- Flow control: Exactly 3 steps with `tdd_phase` field ("red", "green", "refactor")
|
||||
- Flow control: `pre_analysis` includes exploration integration_points analysis
|
||||
- Command field: Added per `userConfig.executionMethod` (agent/hybrid/cli)
|
||||
- See Phase 2 agent prompt for full schema and requirements
|
||||
|
||||
## Output Files Structure
|
||||
```
|
||||
.workflow/active//{session-id}/
|
||||
.workflow/active/{session-id}/
|
||||
├── IMPL_PLAN.md # Unified plan with TDD Implementation Tasks section
|
||||
├── TODO_LIST.md # Progress tracking with internal TDD phase indicators
|
||||
├── .task/
|
||||
@@ -432,9 +664,9 @@ This section provides quick reference for TDD task JSON structure. For complete
|
||||
- No circular dependencies allowed
|
||||
|
||||
### Task Limits
|
||||
- Maximum 10 total tasks (simple + subtasks)
|
||||
- Flat hierarchy (≤5 tasks) or two-level (6-10 tasks with containers)
|
||||
- Re-scope requirements if >10 tasks needed
|
||||
- Maximum 18 total tasks (simple + subtasks) - hard limit for TDD workflows
|
||||
- Flat hierarchy (≤5 tasks) or two-level (6-18 tasks with containers)
|
||||
- Re-scope requirements if >18 tasks needed
|
||||
|
||||
### TDD Workflow Validation
|
||||
- `meta.tdd_workflow` must be true
|
||||
@@ -454,7 +686,7 @@ This section provides quick reference for TDD task JSON structure. For complete
|
||||
### TDD Generation Errors
|
||||
| Error | Cause | Resolution |
|
||||
|-------|-------|------------|
|
||||
| Task count exceeds 10 | Too many features or subtasks | Re-scope requirements or merge features |
|
||||
| Task count exceeds 18 | Too many features or subtasks | Re-scope requirements or merge features into multiple TDD sessions |
|
||||
| Missing test framework | No test config | Configure testing first |
|
||||
| Invalid TDD workflow | Missing tdd_phase or incomplete flow_control | Fix TDD structure in ANALYSIS_RESULTS.md |
|
||||
| Missing tdd_workflow flag | Task doesn't have meta.tdd_workflow: true | Add TDD workflow metadata |
|
||||
@@ -512,6 +744,6 @@ IMPL (Green phase) tasks include automatic test-fix cycle:
|
||||
|
||||
|
||||
## Configuration Options
|
||||
- **meta.max_iterations**: Number of fix attempts (default: 3 for TDD, 5 for test-gen)
|
||||
- **meta.max_iterations**: Number of fix attempts in Green phase (default: 3)
|
||||
- **CLI tool usage**: Determined semantically from user's task description via `command` field in implementation_approach
|
||||
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
name: animation-extract
|
||||
description: Extract animation and transition patterns from prompt inference and image references for design system documentation
|
||||
argument-hint: "[--design-id <id>] [--session <id>] [--images "<glob>"] [--focus "<types>"] [--interactive] [--refine]"
|
||||
argument-hint: "[-y|--yes] [--design-id <id>] [--session <id>] [--images "<glob>"] [--focus "<types>"] [--interactive] [--refine]"
|
||||
allowed-tools: TodoWrite(*), Read(*), Write(*), Glob(*), Bash(*), AskUserQuestion(*), Task(ui-design-agent)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Skip all clarification questions, use AI-inferred animation decisions.
|
||||
|
||||
# Animation Extraction Command
|
||||
|
||||
## Overview
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
name: layout-extract
|
||||
description: Extract structural layout information from reference images or text prompts using Claude analysis with variant generation or refinement mode
|
||||
argument-hint: [--design-id <id>] [--session <id>] [--images "<glob>"] [--prompt "<desc>"] [--targets "<list>"] [--variants <count>] [--device-type <desktop|mobile|tablet|responsive>] [--interactive] [--refine]
|
||||
argument-hint: "[-y|--yes] [--design-id <id>] [--session <id>] [--images "<glob>"] [--prompt "<desc>"] [--targets "<list>"] [--variants <count>] [--device-type <desktop|mobile|tablet|responsive>] [--interactive] [--refine]"
|
||||
allowed-tools: TodoWrite(*), Read(*), Write(*), Glob(*), Bash(*), AskUserQuestion(*), Task(ui-design-agent), mcp__exa__web_search_exa(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Skip all clarification questions, use AI-inferred layout decisions.
|
||||
|
||||
# Layout Extraction Command
|
||||
|
||||
## Overview
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
name: style-extract
|
||||
description: Extract design style from reference images or text prompts using Claude analysis with variant generation or refinement mode
|
||||
argument-hint: "[--design-id <id>] [--session <id>] [--images "<glob>"] [--prompt "<desc>"] [--variants <count>] [--interactive] [--refine]"
|
||||
argument-hint: "[-y|--yes] [--design-id <id>] [--session <id>] [--images "<glob>"] [--prompt "<desc>"] [--variants <count>] [--interactive] [--refine]"
|
||||
allowed-tools: TodoWrite(*), Read(*), Write(*), Glob(*), AskUserQuestion(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Skip all clarification questions, use AI-inferred design decisions.
|
||||
|
||||
# Style Extraction Command
|
||||
|
||||
## Overview
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
"/workflow:review-session-cycle",
|
||||
"/memory:docs",
|
||||
"/workflow:brainstorm:artifacts",
|
||||
"/workflow:action-plan-verify",
|
||||
"/workflow:plan-verify",
|
||||
"/version"
|
||||
],
|
||||
|
||||
@@ -69,7 +69,7 @@
|
||||
"difficulty": "Intermediate",
|
||||
"essential": true,
|
||||
"flow": {
|
||||
"next_steps": ["/workflow:action-plan-verify", "/workflow:execute"],
|
||||
"next_steps": ["/workflow:plan-verify", "/workflow:execute"],
|
||||
"alternatives": ["/workflow:tdd-plan"]
|
||||
},
|
||||
"source": "../../../commands/workflow/plan.md"
|
||||
@@ -89,8 +89,8 @@
|
||||
"source": "../../../commands/workflow/execute.md"
|
||||
},
|
||||
{
|
||||
"name": "action-plan-verify",
|
||||
"command": "/workflow:action-plan-verify",
|
||||
"name": "plan-verify",
|
||||
"command": "/workflow:plan-verify",
|
||||
"description": "Cross-artifact consistency analysis",
|
||||
"arguments": "[--session session-id]",
|
||||
"category": "workflow",
|
||||
@@ -100,7 +100,7 @@
|
||||
"prerequisites": ["/workflow:plan"],
|
||||
"next_steps": ["/workflow:execute"]
|
||||
},
|
||||
"source": "../../../commands/workflow/action-plan-verify.md"
|
||||
"source": "../../../commands/workflow/plan-verify.md"
|
||||
},
|
||||
{
|
||||
"name": "init",
|
||||
|
||||
@@ -144,7 +144,7 @@ def build_command_relationships() -> Dict[str, Any]:
|
||||
return {
|
||||
"workflow:plan": {
|
||||
"calls_internally": ["workflow:session:start", "workflow:tools:context-gather", "workflow:tools:conflict-resolution", "workflow:tools:task-generate-agent"],
|
||||
"next_steps": ["workflow:action-plan-verify", "workflow:status", "workflow:execute"],
|
||||
"next_steps": ["workflow:plan-verify", "workflow:status", "workflow:execute"],
|
||||
"alternatives": ["workflow:tdd-plan"],
|
||||
"prerequisites": []
|
||||
},
|
||||
@@ -159,7 +159,7 @@ def build_command_relationships() -> Dict[str, Any]:
|
||||
"related": ["workflow:status", "workflow:resume"],
|
||||
"next_steps": ["workflow:review", "workflow:tdd-verify"]
|
||||
},
|
||||
"workflow:action-plan-verify": {
|
||||
"workflow:plan-verify": {
|
||||
"prerequisites": ["workflow:plan"],
|
||||
"next_steps": ["workflow:execute"],
|
||||
"related": ["workflow:status"]
|
||||
@@ -217,7 +217,7 @@ def identify_essential_commands(all_commands: List[Dict]) -> List[Dict]:
|
||||
"workflow:execute", "workflow:status", "workflow:session:start",
|
||||
"workflow:review-session-cycle", "cli:analyze", "cli:chat",
|
||||
"memory:docs", "workflow:brainstorm:artifacts",
|
||||
"workflow:action-plan-verify", "workflow:resume", "version"
|
||||
"workflow:plan-verify", "workflow:resume", "version"
|
||||
]
|
||||
|
||||
essential = []
|
||||
|
||||
@@ -1,522 +0,0 @@
|
||||
---
|
||||
name: ccw
|
||||
description: Stateless workflow orchestrator. Auto-selects optimal workflow based on task intent. Triggers "ccw", "workflow".
|
||||
allowed-tools: Task(*), SlashCommand(*), AskUserQuestion(*), Read(*), Bash(*), Grep(*), TodoWrite(*)
|
||||
---
|
||||
|
||||
# CCW - Claude Code Workflow Orchestrator
|
||||
|
||||
无状态工作流协调器,根据任务意图自动选择最优工作流。
|
||||
|
||||
## Workflow System Overview
|
||||
|
||||
CCW 提供两个工作流系统:**Main Workflow** 和 **Issue Workflow**,协同覆盖完整的软件开发生命周期。
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Main Workflow │
|
||||
│ │
|
||||
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
|
||||
│ │ Level 1 │ → │ Level 2 │ → │ Level 3 │ → │ Level 4 │ │
|
||||
│ │ Rapid │ │ Lightweight │ │ Standard │ │ Brainstorm │ │
|
||||
│ │ │ │ │ │ │ │ │ │
|
||||
│ │ lite-lite- │ │ lite-plan │ │ plan │ │ brainstorm │ │
|
||||
│ │ lite │ │ lite-fix │ │ tdd-plan │ │ :auto- │ │
|
||||
│ │ │ │ multi-cli- │ │ test-fix- │ │ parallel │ │
|
||||
│ │ │ │ plan │ │ gen │ │ ↓ │ │
|
||||
│ │ │ │ │ │ │ │ plan │ │
|
||||
│ └─────────────┘ └─────────────┘ └─────────────┘ └─────────────┘ │
|
||||
│ │
|
||||
│ Complexity: ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━▶ │
|
||||
│ Low High │
|
||||
└─────────────────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
│ After development
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Issue Workflow │
|
||||
│ │
|
||||
│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │
|
||||
│ │ Accumulate │ → │ Plan │ → │ Execute │ │
|
||||
│ │ Discover & │ │ Batch │ │ Parallel │ │
|
||||
│ │ Collect │ │ Planning │ │ Execution │ │
|
||||
│ └──────────────┘ └──────────────┘ └──────────────┘ │
|
||||
│ │
|
||||
│ Supplementary role: Maintain main branch stability, worktree isolation │
|
||||
└─────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ CCW Orchestrator (CLI-Enhanced + Requirement Analysis) │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ Phase 1 │ Input Analysis (rule-based, fast path) │
|
||||
│ Phase 1.5 │ CLI Classification (semantic, smart path) │
|
||||
│ Phase 1.75 │ Requirement Clarification (clarity < 2) │
|
||||
│ Phase 2 │ Level Selection (intent → level → workflow) │
|
||||
│ Phase 2.5 │ CLI Action Planning (high complexity) │
|
||||
│ Phase 3 │ User Confirmation (optional) │
|
||||
│ Phase 4 │ TODO Tracking Setup │
|
||||
│ Phase 5 │ Execution Loop │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Level Quick Reference
|
||||
|
||||
| Level | Name | Workflows | Artifacts | Execution |
|
||||
|-------|------|-----------|-----------|-----------|
|
||||
| **1** | Rapid | `lite-lite-lite` | None | Direct execute |
|
||||
| **2** | Lightweight | `lite-plan`, `lite-fix`, `multi-cli-plan` | Memory/Lightweight files | → `lite-execute` |
|
||||
| **3** | Standard | `plan`, `tdd-plan`, `test-fix-gen` | Session persistence | → `execute` / `test-cycle-execute` |
|
||||
| **4** | Brainstorm | `brainstorm:auto-parallel` → `plan` | Multi-role analysis + Session | → `execute` |
|
||||
| **-** | Issue | `discover` → `plan` → `queue` → `execute` | Issue records | Worktree isolation (optional) |
|
||||
|
||||
## Workflow Selection Decision Tree
|
||||
|
||||
```
|
||||
Start
|
||||
│
|
||||
├─ Is it post-development maintenance?
|
||||
│ ├─ Yes → Issue Workflow
|
||||
│ └─ No ↓
|
||||
│
|
||||
├─ Are requirements clear?
|
||||
│ ├─ Uncertain → Level 4 (brainstorm:auto-parallel)
|
||||
│ └─ Clear ↓
|
||||
│
|
||||
├─ Need persistent Session?
|
||||
│ ├─ Yes → Level 3 (plan / tdd-plan / test-fix-gen)
|
||||
│ └─ No ↓
|
||||
│
|
||||
├─ Need multi-perspective / solution comparison?
|
||||
│ ├─ Yes → Level 2 (multi-cli-plan)
|
||||
│ └─ No ↓
|
||||
│
|
||||
├─ Is it a bug fix?
|
||||
│ ├─ Yes → Level 2 (lite-fix)
|
||||
│ └─ No ↓
|
||||
│
|
||||
├─ Need planning?
|
||||
│ ├─ Yes → Level 2 (lite-plan)
|
||||
│ └─ No → Level 1 (lite-lite-lite)
|
||||
```
|
||||
|
||||
## Intent Classification
|
||||
|
||||
### Priority Order (with Level Mapping)
|
||||
|
||||
| Priority | Intent | Patterns | Level | Flow |
|
||||
|----------|--------|----------|-------|------|
|
||||
| 1 | bugfix/hotfix | `urgent,production,critical` + bug | L2 | `bugfix.hotfix` |
|
||||
| 1 | bugfix | `fix,bug,error,crash,fail` | L2 | `bugfix.standard` |
|
||||
| 2 | issue batch | `issues,batch` + `fix,resolve` | Issue | `issue` |
|
||||
| 3 | exploration | `不确定,explore,研究,what if` | L4 | `full` |
|
||||
| 3 | multi-perspective | `多视角,权衡,比较方案,cross-verify` | L2 | `multi-cli-plan` |
|
||||
| 4 | quick-task | `快速,简单,small,quick` + feature | L1 | `lite-lite-lite` |
|
||||
| 5 | ui design | `ui,design,component,style` | L3/L4 | `ui` |
|
||||
| 6 | tdd | `tdd,test-driven,先写测试` | L3 | `tdd` |
|
||||
| 7 | test-fix | `测试失败,test fail,fix test` | L3 | `test-fix-gen` |
|
||||
| 8 | review | `review,审查,code review` | L3 | `review-fix` |
|
||||
| 9 | documentation | `文档,docs,readme` | L2 | `docs` |
|
||||
| 99 | feature | complexity-based | L2/L3 | `rapid`/`coupled` |
|
||||
|
||||
### Quick Selection Guide
|
||||
|
||||
| Scenario | Recommended Workflow | Level |
|
||||
|----------|---------------------|-------|
|
||||
| Quick fixes, config adjustments | `lite-lite-lite` | 1 |
|
||||
| Clear single-module features | `lite-plan → lite-execute` | 2 |
|
||||
| Bug diagnosis and fix | `lite-fix` | 2 |
|
||||
| Production emergencies | `lite-fix --hotfix` | 2 |
|
||||
| Technology selection, solution comparison | `multi-cli-plan → lite-execute` | 2 |
|
||||
| Multi-module changes, refactoring | `plan → verify → execute` | 3 |
|
||||
| Test-driven development | `tdd-plan → execute → tdd-verify` | 3 |
|
||||
| Test failure fixes | `test-fix-gen → test-cycle-execute` | 3 |
|
||||
| New features, architecture design | `brainstorm:auto-parallel → plan → execute` | 4 |
|
||||
| Post-development issue fixes | Issue Workflow | - |
|
||||
|
||||
### Complexity Assessment
|
||||
|
||||
```javascript
|
||||
function assessComplexity(text) {
|
||||
let score = 0
|
||||
if (/refactor|重构|migrate|迁移|architect|架构|system|系统/.test(text)) score += 2
|
||||
if (/multiple|多个|across|跨|all|所有|entire|整个/.test(text)) score += 2
|
||||
if (/integrate|集成|api|database|数据库/.test(text)) score += 1
|
||||
if (/security|安全|performance|性能|scale|扩展/.test(text)) score += 1
|
||||
return score >= 4 ? 'high' : score >= 2 ? 'medium' : 'low'
|
||||
}
|
||||
```
|
||||
|
||||
| Complexity | Flow |
|
||||
|------------|------|
|
||||
| high | `coupled` (plan → verify → execute) |
|
||||
| medium/low | `rapid` (lite-plan → lite-execute) |
|
||||
|
||||
### Dimension Extraction (WHAT/WHERE/WHY/HOW)
|
||||
|
||||
从用户输入提取四个维度,用于需求澄清和工作流选择:
|
||||
|
||||
| 维度 | 提取内容 | 示例模式 |
|
||||
|------|----------|----------|
|
||||
| **WHAT** | action + target | `创建/修复/重构/优化/分析` + 目标对象 |
|
||||
| **WHERE** | scope + paths | `file/module/system` + 文件路径 |
|
||||
| **WHY** | goal + motivation | `为了.../因为.../目的是...` |
|
||||
| **HOW** | constraints + preferences | `必须.../不要.../应该...` |
|
||||
|
||||
**Clarity Score** (0-3):
|
||||
- +0.5: 有明确 action
|
||||
- +0.5: 有具体 target
|
||||
- +0.5: 有文件路径
|
||||
- +0.5: scope 不是 unknown
|
||||
- +0.5: 有明确 goal
|
||||
- +0.5: 有约束条件
|
||||
- -0.5: 包含不确定词 (`不知道/maybe/怎么`)
|
||||
|
||||
### Requirement Clarification
|
||||
|
||||
当 `clarity_score < 2` 时触发需求澄清:
|
||||
|
||||
```javascript
|
||||
if (dimensions.clarity_score < 2) {
|
||||
const questions = generateClarificationQuestions(dimensions)
|
||||
// 生成问题:目标是什么? 范围是什么? 有什么约束?
|
||||
AskUserQuestion({ questions })
|
||||
}
|
||||
```
|
||||
|
||||
**澄清问题类型**:
|
||||
- 目标不明确 → "你想要对什么进行操作?"
|
||||
- 范围不明确 → "操作的范围是什么?"
|
||||
- 目的不明确 → "这个操作的主要目标是什么?"
|
||||
- 复杂操作 → "有什么特殊要求或限制?"
|
||||
|
||||
## TODO Tracking Protocol
|
||||
|
||||
### CRITICAL: Append-Only Rule
|
||||
|
||||
CCW 创建的 Todo **必须附加到现有列表**,不能覆盖用户的其他 Todo。
|
||||
|
||||
### Implementation
|
||||
|
||||
```javascript
|
||||
// 1. 使用 CCW 前缀隔离工作流 todo
|
||||
const prefix = `CCW:${flowName}`
|
||||
|
||||
// 2. 创建新 todo 时使用前缀格式
|
||||
TodoWrite({
|
||||
todos: [
|
||||
...existingNonCCWTodos, // 保留用户的 todo
|
||||
{ content: `${prefix}: [1/N] /command:step1`, status: "in_progress", activeForm: "..." },
|
||||
{ content: `${prefix}: [2/N] /command:step2`, status: "pending", activeForm: "..." }
|
||||
]
|
||||
})
|
||||
|
||||
// 3. 更新状态时只修改匹配前缀的 todo
|
||||
```
|
||||
|
||||
### Todo Format
|
||||
|
||||
```
|
||||
CCW:{flow}: [{N}/{Total}] /command:name
|
||||
```
|
||||
|
||||
### Visual Example
|
||||
|
||||
```
|
||||
✓ CCW:rapid: [1/2] /workflow:lite-plan
|
||||
→ CCW:rapid: [2/2] /workflow:lite-execute
|
||||
用户自己的 todo(保留不动)
|
||||
```
|
||||
|
||||
### Status Management
|
||||
|
||||
- 开始工作流:创建所有步骤 todo,第一步 `in_progress`
|
||||
- 完成步骤:当前步骤 `completed`,下一步 `in_progress`
|
||||
- 工作流结束:所有 CCW todo 标记 `completed`
|
||||
|
||||
## Execution Flow
|
||||
|
||||
```javascript
|
||||
// 1. Check explicit command
|
||||
if (input.startsWith('/workflow:') || input.startsWith('/issue:')) {
|
||||
SlashCommand(input)
|
||||
return
|
||||
}
|
||||
|
||||
// 2. Classify intent
|
||||
const intent = classifyIntent(input) // See command.json intent_rules
|
||||
|
||||
// 3. Select flow
|
||||
const flow = selectFlow(intent) // See command.json flows
|
||||
|
||||
// 4. Create todos with CCW prefix
|
||||
createWorkflowTodos(flow)
|
||||
|
||||
// 5. Dispatch first command
|
||||
SlashCommand(flow.steps[0].command, args: input)
|
||||
```
|
||||
|
||||
## CLI Tool Integration
|
||||
|
||||
CCW 在特定条件下自动注入 CLI 调用:
|
||||
|
||||
| Condition | CLI Inject |
|
||||
|-----------|------------|
|
||||
| 大量代码上下文 (≥50k chars) | `gemini --mode analysis` |
|
||||
| 高复杂度任务 | `gemini --mode analysis` |
|
||||
| Bug 诊断 | `gemini --mode analysis` |
|
||||
| 多任务执行 (≥3 tasks) | `codex --mode write` |
|
||||
|
||||
### CLI Enhancement Phases
|
||||
|
||||
**Phase 1.5: CLI-Assisted Classification**
|
||||
|
||||
当规则匹配不明确时,使用 CLI 辅助分类:
|
||||
|
||||
| 触发条件 | 说明 |
|
||||
|----------|------|
|
||||
| matchCount < 2 | 多个意图模式匹配 |
|
||||
| complexity = high | 高复杂度任务 |
|
||||
| input > 100 chars | 长输入需要语义理解 |
|
||||
|
||||
**Phase 2.5: CLI-Assisted Action Planning**
|
||||
|
||||
高复杂度任务的工作流优化:
|
||||
|
||||
| 触发条件 | 说明 |
|
||||
|----------|------|
|
||||
| complexity = high | 高复杂度任务 |
|
||||
| steps >= 3 | 多步骤工作流 |
|
||||
| input > 200 chars | 复杂需求描述 |
|
||||
|
||||
CLI 可返回建议:`use_default` | `modify` (调整步骤) | `upgrade` (升级工作流)
|
||||
|
||||
## Continuation Commands
|
||||
|
||||
工作流执行中的用户控制命令:
|
||||
|
||||
| 命令 | 作用 |
|
||||
|------|------|
|
||||
| `continue` | 继续执行下一步 |
|
||||
| `skip` | 跳过当前步骤 |
|
||||
| `abort` | 终止工作流 |
|
||||
| `/workflow:*` | 切换到指定命令 |
|
||||
| 自然语言 | 重新分析意图 |
|
||||
|
||||
## Workflow Flow Details
|
||||
|
||||
### Issue Workflow (Main Workflow 补充机制)
|
||||
|
||||
Issue Workflow 是 Main Workflow 的**补充机制**,专注于开发后的持续维护。
|
||||
|
||||
#### 设计理念
|
||||
|
||||
| 方面 | Main Workflow | Issue Workflow |
|
||||
|------|---------------|----------------|
|
||||
| **用途** | 主要开发周期 | 开发后维护 |
|
||||
| **时机** | 功能开发阶段 | 主工作流完成后 |
|
||||
| **范围** | 完整功能实现 | 针对性修复/增强 |
|
||||
| **并行性** | 依赖分析 → Agent 并行 | Worktree 隔离 (可选) |
|
||||
| **分支模型** | 当前分支工作 | 可使用隔离的 worktree |
|
||||
|
||||
#### 为什么 Main Workflow 不自动使用 Worktree?
|
||||
|
||||
**依赖分析已解决并行性问题**:
|
||||
1. 规划阶段 (`/workflow:plan`) 执行依赖分析
|
||||
2. 自动识别任务依赖和关键路径
|
||||
3. 划分为**并行组**(独立任务)和**串行链**(依赖任务)
|
||||
4. Agent 并行执行独立任务,无需文件系统隔离
|
||||
|
||||
#### 两阶段生命周期
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────┐
|
||||
│ Phase 1: Accumulation (积累阶段) │
|
||||
│ │
|
||||
│ Triggers: 任务完成后的 review、代码审查发现、测试失败 │
|
||||
│ │
|
||||
│ ┌────────────┐ ┌────────────┐ ┌────────────┐ │
|
||||
│ │ discover │ │ discover- │ │ new │ │
|
||||
│ │ Auto-find │ │ by-prompt │ │ Manual │ │
|
||||
│ └────────────┘ └────────────┘ └────────────┘ │
|
||||
│ │
|
||||
│ 持续积累 issues 到待处理队列 │
|
||||
└─────────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
│ 积累足够后
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────┐
|
||||
│ Phase 2: Batch Resolution (批量解决阶段) │
|
||||
│ │
|
||||
│ ┌────────────┐ ┌────────────┐ ┌────────────┐ │
|
||||
│ │ plan │ ──→ │ queue │ ──→ │ execute │ │
|
||||
│ │ --all- │ │ Optimize │ │ Parallel │ │
|
||||
│ │ pending │ │ order │ │ execution │ │
|
||||
│ └────────────┘ └────────────┘ └────────────┘ │
|
||||
│ │
|
||||
│ 支持 worktree 隔离,保持主分支稳定 │
|
||||
└─────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
#### 与 Main Workflow 的协作
|
||||
|
||||
```
|
||||
开发迭代循环
|
||||
┌─────────────────────────────────────────────────────────────────────┐
|
||||
│ │
|
||||
│ ┌─────────┐ ┌─────────┐ │
|
||||
│ │ Feature │ ──→ Main Workflow ──→ Done ──→│ Review │ │
|
||||
│ │ Request │ (Level 1-4) └────┬────┘ │
|
||||
│ └─────────┘ │ │
|
||||
│ ▲ │ 发现 Issues │
|
||||
│ │ ▼ │
|
||||
│ │ ┌─────────┐ │
|
||||
│ 继续 │ │ Issue │ │
|
||||
│ 新功能│ │ Workflow│ │
|
||||
│ │ └────┬────┘ │
|
||||
│ │ ┌──────────────────────────────┘ │
|
||||
│ │ │ 修复完成 │
|
||||
│ │ ▼ │
|
||||
│ ┌────┴────┐◀────── │
|
||||
│ │ Main │ Merge │
|
||||
│ │ Branch │ back │
|
||||
│ └─────────┘ │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
#### 命令列表
|
||||
|
||||
**积累阶段:**
|
||||
```bash
|
||||
/issue:discover # 多视角自动发现
|
||||
/issue:discover-by-prompt # 基于提示发现
|
||||
/issue:new # 手动创建
|
||||
```
|
||||
|
||||
**批量解决阶段:**
|
||||
```bash
|
||||
/issue:plan --all-pending # 批量规划所有待处理
|
||||
/issue:queue # 生成优化执行队列
|
||||
/issue:execute # 并行执行
|
||||
```
|
||||
|
||||
### lite-lite-lite vs multi-cli-plan
|
||||
|
||||
| 维度 | lite-lite-lite | multi-cli-plan |
|
||||
|------|---------------|----------------|
|
||||
| **产物** | 无文件 | IMPL_PLAN.md + plan.json + synthesis.json |
|
||||
| **状态** | 无状态 | 持久化 session |
|
||||
| **CLI选择** | 自动分析任务类型选择 | 配置驱动 |
|
||||
| **迭代** | 通过 AskUser | 多轮收敛 |
|
||||
| **执行** | 直接执行 | 通过 lite-execute |
|
||||
| **适用** | 快速修复、简单功能 | 复杂多步骤实现 |
|
||||
|
||||
**选择指南**:
|
||||
- 任务清晰、改动范围小 → `lite-lite-lite`
|
||||
- 需要多视角分析、复杂架构 → `multi-cli-plan`
|
||||
|
||||
### multi-cli-plan vs lite-plan
|
||||
|
||||
| 维度 | multi-cli-plan | lite-plan |
|
||||
|------|---------------|-----------|
|
||||
| **上下文** | ACE 语义搜索 | 手动文件模式 |
|
||||
| **分析** | 多 CLI 交叉验证 | 单次规划 |
|
||||
| **迭代** | 多轮直到收敛 | 单轮 |
|
||||
| **置信度** | 高 (共识驱动) | 中 (单一视角) |
|
||||
| **适用** | 需要多视角的复杂任务 | 直接明确的实现 |
|
||||
|
||||
**选择指南**:
|
||||
- 需求明确、路径清晰 → `lite-plan`
|
||||
- 需要权衡、多方案比较 → `multi-cli-plan`
|
||||
|
||||
## Artifact Flow Protocol
|
||||
|
||||
工作流产出的自动流转机制,支持不同格式产出间的意图提取和完成度判断。
|
||||
|
||||
### 产出格式
|
||||
|
||||
| 命令 | 产出位置 | 格式 | 关键字段 |
|
||||
|------|----------|------|----------|
|
||||
| `/workflow:lite-plan` | memory://plan | structured_plan | tasks, files, dependencies |
|
||||
| `/workflow:plan` | .workflow/{session}/IMPL_PLAN.md | markdown_plan | phases, tasks, risks |
|
||||
| `/workflow:execute` | execution_log.json | execution_report | completed_tasks, errors |
|
||||
| `/workflow:test-cycle-execute` | test_results.json | test_report | pass_rate, failures, coverage |
|
||||
| `/workflow:review-session-cycle` | review_report.md | review_report | findings, severity_counts |
|
||||
|
||||
### 意图提取 (Intent Extraction)
|
||||
|
||||
流转到下一步时,自动提取关键信息:
|
||||
|
||||
```
|
||||
plan → execute:
|
||||
提取: tasks (未完成), priority_order, files_to_modify, context_summary
|
||||
|
||||
execute → test:
|
||||
提取: modified_files, test_scope (推断), pending_verification
|
||||
|
||||
test → fix:
|
||||
条件: pass_rate < 0.95
|
||||
提取: failures, error_messages, affected_files, suggested_fixes
|
||||
|
||||
review → fix:
|
||||
条件: critical > 0 OR high > 3
|
||||
提取: findings (critical/high), fix_priority, affected_files
|
||||
```
|
||||
|
||||
### 完成度判断
|
||||
|
||||
**Test 完成度路由**:
|
||||
```
|
||||
pass_rate >= 0.95 AND coverage >= 0.80 → complete
|
||||
pass_rate >= 0.95 AND coverage < 0.80 → add_more_tests
|
||||
pass_rate >= 0.80 → fix_failures_then_continue
|
||||
pass_rate < 0.80 → major_fix_required
|
||||
```
|
||||
|
||||
**Review 完成度路由**:
|
||||
```
|
||||
critical == 0 AND high <= 3 → complete_or_optional_fix
|
||||
critical > 0 → mandatory_fix
|
||||
high > 3 → recommended_fix
|
||||
```
|
||||
|
||||
### 流转决策模式
|
||||
|
||||
**plan_execute_test**:
|
||||
```
|
||||
plan → execute → test
|
||||
↓ (if test fail)
|
||||
extract_failures → fix → test (max 3 iterations)
|
||||
↓ (if still fail)
|
||||
manual_intervention
|
||||
```
|
||||
|
||||
**iterative_improvement**:
|
||||
```
|
||||
execute → test → fix → test → ...
|
||||
loop until: pass_rate >= 0.95 OR iterations >= 3
|
||||
```
|
||||
|
||||
### 使用示例
|
||||
|
||||
```javascript
|
||||
// 执行完成后,根据产出决定下一步
|
||||
const result = await execute(plan)
|
||||
|
||||
// 提取意图流转到测试
|
||||
const testContext = extractIntent('execute_to_test', result)
|
||||
// testContext = { modified_files, test_scope, pending_verification }
|
||||
|
||||
// 测试完成后,根据完成度决定路由
|
||||
const testResult = await test(testContext)
|
||||
const nextStep = evaluateCompletion('test', testResult)
|
||||
// nextStep = 'fix_failures_then_continue' if pass_rate = 0.85
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
- [command.json](command.json) - 命令元数据、Flow 定义、意图规则、Artifact Flow
|
||||
@@ -1,641 +0,0 @@
|
||||
{
|
||||
"_metadata": {
|
||||
"version": "2.0.0",
|
||||
"description": "Unified CCW command index with capabilities, flows, and intent rules"
|
||||
},
|
||||
|
||||
"capabilities": {
|
||||
"explore": {
|
||||
"description": "Codebase exploration and context gathering",
|
||||
"commands": ["/workflow:init", "/workflow:tools:gather", "/memory:load"],
|
||||
"agents": ["cli-explore-agent", "context-search-agent"]
|
||||
},
|
||||
"brainstorm": {
|
||||
"description": "Multi-perspective analysis and ideation",
|
||||
"commands": ["/workflow:brainstorm:auto-parallel", "/workflow:brainstorm:artifacts", "/workflow:brainstorm:synthesis"],
|
||||
"roles": ["product-manager", "system-architect", "ux-expert", "data-architect", "api-designer"]
|
||||
},
|
||||
"plan": {
|
||||
"description": "Task planning and decomposition",
|
||||
"commands": ["/workflow:lite-plan", "/workflow:plan", "/workflow:tdd-plan", "/task:create", "/task:breakdown"],
|
||||
"agents": ["cli-lite-planning-agent", "action-planning-agent"]
|
||||
},
|
||||
"verify": {
|
||||
"description": "Plan and quality verification",
|
||||
"commands": ["/workflow:action-plan-verify", "/workflow:tdd-verify"]
|
||||
},
|
||||
"execute": {
|
||||
"description": "Task execution and implementation",
|
||||
"commands": ["/workflow:lite-execute", "/workflow:execute", "/task:execute"],
|
||||
"agents": ["code-developer", "cli-execution-agent", "universal-executor"]
|
||||
},
|
||||
"bugfix": {
|
||||
"description": "Bug diagnosis and fixing",
|
||||
"commands": ["/workflow:lite-fix"],
|
||||
"agents": ["code-developer"]
|
||||
},
|
||||
"test": {
|
||||
"description": "Test generation and execution",
|
||||
"commands": ["/workflow:test-gen", "/workflow:test-fix-gen", "/workflow:test-cycle-execute"],
|
||||
"agents": ["test-fix-agent"]
|
||||
},
|
||||
"review": {
|
||||
"description": "Code review and quality analysis",
|
||||
"commands": ["/workflow:review-session-cycle", "/workflow:review-module-cycle", "/workflow:review", "/workflow:review-fix"]
|
||||
},
|
||||
"issue": {
|
||||
"description": "Issue lifecycle management - discover, accumulate, batch resolve",
|
||||
"commands": ["/issue:new", "/issue:discover", "/issue:discover-by-prompt", "/issue:plan", "/issue:queue", "/issue:execute", "/issue:manage"],
|
||||
"agents": ["issue-plan-agent", "issue-queue-agent", "cli-explore-agent"],
|
||||
"lifecycle": {
|
||||
"accumulation": {
|
||||
"description": "任务完成后进行需求扩展、bug分析、测试发现",
|
||||
"triggers": ["post-task review", "code review findings", "test failures"],
|
||||
"commands": ["/issue:discover", "/issue:discover-by-prompt", "/issue:new"]
|
||||
},
|
||||
"batch_resolution": {
|
||||
"description": "积累的issue集中规划和并行执行",
|
||||
"flow": ["plan", "queue", "execute"],
|
||||
"commands": ["/issue:plan --all-pending", "/issue:queue", "/issue:execute"]
|
||||
}
|
||||
}
|
||||
},
|
||||
"ui-design": {
|
||||
"description": "UI design and prototyping",
|
||||
"commands": ["/workflow:ui-design:explore-auto", "/workflow:ui-design:imitate-auto", "/workflow:ui-design:design-sync"],
|
||||
"agents": ["ui-design-agent"]
|
||||
},
|
||||
"memory": {
|
||||
"description": "Documentation and knowledge management",
|
||||
"commands": ["/memory:docs", "/memory:update-related", "/memory:update-full", "/memory:skill-memory"],
|
||||
"agents": ["doc-generator", "memory-bridge"]
|
||||
}
|
||||
},
|
||||
|
||||
"flows": {
|
||||
"_level_guide": {
|
||||
"L1": "Rapid - No artifacts, direct execution",
|
||||
"L2": "Lightweight - Memory/lightweight files, → lite-execute",
|
||||
"L3": "Standard - Session persistence, → execute/test-cycle-execute",
|
||||
"L4": "Brainstorm - Multi-role analysis + Session, → execute"
|
||||
},
|
||||
"lite-lite-lite": {
|
||||
"name": "Ultra-Rapid Execution",
|
||||
"level": "L1",
|
||||
"description": "零文件 + 自动CLI选择 + 语义描述 + 直接执行",
|
||||
"complexity": ["low"],
|
||||
"artifacts": "none",
|
||||
"steps": [
|
||||
{ "phase": "clarify", "description": "需求澄清 (AskUser if needed)" },
|
||||
{ "phase": "auto-select", "description": "任务分析 → 自动选择CLI组合" },
|
||||
{ "phase": "multi-cli", "description": "并行多CLI分析" },
|
||||
{ "phase": "decision", "description": "展示结果 → AskUser决策" },
|
||||
{ "phase": "execute", "description": "直接执行 (无中间文件)" }
|
||||
],
|
||||
"cli_hints": {
|
||||
"analysis": { "tool": "auto", "mode": "analysis", "parallel": true },
|
||||
"execution": { "tool": "auto", "mode": "write" }
|
||||
},
|
||||
"estimated_time": "10-30 min"
|
||||
},
|
||||
"rapid": {
|
||||
"name": "Rapid Iteration",
|
||||
"level": "L2",
|
||||
"description": "内存规划 + 直接执行",
|
||||
"complexity": ["low", "medium"],
|
||||
"artifacts": "memory://plan",
|
||||
"steps": [
|
||||
{ "command": "/workflow:lite-plan", "optional": false, "auto_continue": true },
|
||||
{ "command": "/workflow:lite-execute", "optional": false }
|
||||
],
|
||||
"cli_hints": {
|
||||
"explore_phase": { "tool": "gemini", "mode": "analysis", "trigger": "needs_exploration" },
|
||||
"execution": { "tool": "codex", "mode": "write", "trigger": "complexity >= medium" }
|
||||
},
|
||||
"estimated_time": "15-45 min"
|
||||
},
|
||||
"multi-cli-plan": {
|
||||
"name": "Multi-CLI Collaborative Planning",
|
||||
"level": "L2",
|
||||
"description": "ACE上下文 + 多CLI协作分析 + 迭代收敛 + 计划生成",
|
||||
"complexity": ["medium", "high"],
|
||||
"artifacts": ".workflow/.multi-cli-plan/{session}/",
|
||||
"steps": [
|
||||
{ "command": "/workflow:multi-cli-plan", "optional": false, "phases": [
|
||||
"context_gathering: ACE语义搜索",
|
||||
"multi_cli_discussion: cli-discuss-agent多轮分析",
|
||||
"present_options: 展示解决方案",
|
||||
"user_decision: 用户选择",
|
||||
"plan_generation: cli-lite-planning-agent生成计划"
|
||||
]},
|
||||
{ "command": "/workflow:lite-execute", "optional": false }
|
||||
],
|
||||
"vs_lite_plan": {
|
||||
"context": "ACE semantic search vs Manual file patterns",
|
||||
"analysis": "Multi-CLI cross-verification vs Single-pass planning",
|
||||
"iteration": "Multiple rounds until convergence vs Single round",
|
||||
"confidence": "High (consensus-based) vs Medium (single perspective)",
|
||||
"best_for": "Complex tasks needing multiple perspectives vs Straightforward implementations"
|
||||
},
|
||||
"agents": ["cli-discuss-agent", "cli-lite-planning-agent"],
|
||||
"cli_hints": {
|
||||
"discussion": { "tools": ["gemini", "codex", "claude"], "mode": "analysis", "parallel": true },
|
||||
"planning": { "tool": "gemini", "mode": "analysis" }
|
||||
},
|
||||
"estimated_time": "30-90 min"
|
||||
},
|
||||
"coupled": {
|
||||
"name": "Standard Planning",
|
||||
"level": "L3",
|
||||
"description": "完整规划 + 验证 + 执行",
|
||||
"complexity": ["medium", "high"],
|
||||
"artifacts": ".workflow/active/{session}/",
|
||||
"steps": [
|
||||
{ "command": "/workflow:plan", "optional": false },
|
||||
{ "command": "/workflow:action-plan-verify", "optional": false, "auto_continue": true },
|
||||
{ "command": "/workflow:execute", "optional": false },
|
||||
{ "command": "/workflow:review", "optional": true }
|
||||
],
|
||||
"cli_hints": {
|
||||
"pre_analysis": { "tool": "gemini", "mode": "analysis", "trigger": "always" },
|
||||
"execution": { "tool": "codex", "mode": "write", "trigger": "always" }
|
||||
},
|
||||
"estimated_time": "2-4 hours"
|
||||
},
|
||||
"full": {
|
||||
"name": "Full Exploration (Brainstorm)",
|
||||
"level": "L4",
|
||||
"description": "头脑风暴 + 规划 + 执行",
|
||||
"complexity": ["high"],
|
||||
"artifacts": ".workflow/active/{session}/.brainstorming/",
|
||||
"steps": [
|
||||
{ "command": "/workflow:brainstorm:auto-parallel", "optional": false, "confirm_before": true },
|
||||
{ "command": "/workflow:plan", "optional": false },
|
||||
{ "command": "/workflow:action-plan-verify", "optional": true, "auto_continue": true },
|
||||
{ "command": "/workflow:execute", "optional": false }
|
||||
],
|
||||
"cli_hints": {
|
||||
"role_analysis": { "tool": "gemini", "mode": "analysis", "trigger": "always", "parallel": true },
|
||||
"execution": { "tool": "codex", "mode": "write", "trigger": "task_count >= 3" }
|
||||
},
|
||||
"estimated_time": "1-3 hours"
|
||||
},
|
||||
"bugfix": {
|
||||
"name": "Bug Fix",
|
||||
"level": "L2",
|
||||
"description": "智能诊断 + 修复 (5 phases)",
|
||||
"complexity": ["low", "medium"],
|
||||
"artifacts": ".workflow/.lite-fix/{bug-slug}-{date}/",
|
||||
"variants": {
|
||||
"standard": [{ "command": "/workflow:lite-fix", "optional": false }],
|
||||
"hotfix": [{ "command": "/workflow:lite-fix --hotfix", "optional": false }]
|
||||
},
|
||||
"phases": [
|
||||
"Phase 1: Bug Analysis & Diagnosis (severity pre-assessment)",
|
||||
"Phase 2: Clarification (optional, AskUserQuestion)",
|
||||
"Phase 3: Fix Planning (Low/Medium → Claude, High/Critical → cli-lite-planning-agent)",
|
||||
"Phase 4: Confirmation & Selection",
|
||||
"Phase 5: Execute (→ lite-execute --mode bugfix)"
|
||||
],
|
||||
"cli_hints": {
|
||||
"diagnosis": { "tool": "gemini", "mode": "analysis", "trigger": "always" },
|
||||
"fix": { "tool": "codex", "mode": "write", "trigger": "severity >= medium" }
|
||||
},
|
||||
"estimated_time": "10-30 min"
|
||||
},
|
||||
"issue": {
|
||||
"name": "Issue Lifecycle",
|
||||
"level": "Supplementary",
|
||||
"description": "发现积累 → 批量规划 → 队列优化 → 并行执行 (Main Workflow 补充机制)",
|
||||
"complexity": ["medium", "high"],
|
||||
"artifacts": ".workflow/.issues/",
|
||||
"purpose": "Post-development continuous maintenance, maintain main branch stability",
|
||||
"phases": {
|
||||
"accumulation": {
|
||||
"description": "项目迭代中持续发现和积累issue",
|
||||
"commands": ["/issue:discover", "/issue:discover-by-prompt", "/issue:new"],
|
||||
"trigger": "post-task, code-review, test-failure"
|
||||
},
|
||||
"resolution": {
|
||||
"description": "集中规划和执行积累的issue",
|
||||
"steps": [
|
||||
{ "command": "/issue:plan --all-pending", "optional": false },
|
||||
{ "command": "/issue:queue", "optional": false },
|
||||
{ "command": "/issue:execute", "optional": false }
|
||||
]
|
||||
}
|
||||
},
|
||||
"worktree_support": {
|
||||
"description": "可选的 worktree 隔离,保持主分支稳定",
|
||||
"use_case": "主开发完成后的 issue 修复"
|
||||
},
|
||||
"cli_hints": {
|
||||
"discovery": { "tool": "gemini", "mode": "analysis", "trigger": "perspective_analysis", "parallel": true },
|
||||
"solution_generation": { "tool": "gemini", "mode": "analysis", "trigger": "always", "parallel": true },
|
||||
"batch_execution": { "tool": "codex", "mode": "write", "trigger": "always" }
|
||||
},
|
||||
"estimated_time": "1-4 hours"
|
||||
},
|
||||
"tdd": {
|
||||
"name": "Test-Driven Development",
|
||||
"level": "L3",
|
||||
"description": "TDD规划 + 执行 + 验证 (6 phases)",
|
||||
"complexity": ["medium", "high"],
|
||||
"artifacts": ".workflow/active/{session}/",
|
||||
"steps": [
|
||||
{ "command": "/workflow:tdd-plan", "optional": false },
|
||||
{ "command": "/workflow:action-plan-verify", "optional": true, "auto_continue": true },
|
||||
{ "command": "/workflow:execute", "optional": false },
|
||||
{ "command": "/workflow:tdd-verify", "optional": false }
|
||||
],
|
||||
"tdd_structure": {
|
||||
"description": "Each IMPL task contains complete internal Red-Green-Refactor cycle",
|
||||
"meta": "tdd_workflow: true",
|
||||
"flow_control": "implementation_approach contains 3 steps (red/green/refactor)"
|
||||
},
|
||||
"cli_hints": {
|
||||
"test_strategy": { "tool": "gemini", "mode": "analysis", "trigger": "always" },
|
||||
"red_green_refactor": { "tool": "codex", "mode": "write", "trigger": "always" }
|
||||
},
|
||||
"estimated_time": "1-3 hours"
|
||||
},
|
||||
"test-fix": {
|
||||
"name": "Test Fix Generation",
|
||||
"level": "L3",
|
||||
"description": "测试修复生成 + 执行循环 (5 phases)",
|
||||
"complexity": ["medium", "high"],
|
||||
"artifacts": ".workflow/active/WFS-test-{session}/",
|
||||
"dual_mode": {
|
||||
"session_mode": { "input": "WFS-xxx", "context_source": "Source session summaries" },
|
||||
"prompt_mode": { "input": "Text/file path", "context_source": "Direct codebase analysis" }
|
||||
},
|
||||
"steps": [
|
||||
{ "command": "/workflow:test-fix-gen", "optional": false },
|
||||
{ "command": "/workflow:test-cycle-execute", "optional": false }
|
||||
],
|
||||
"task_structure": [
|
||||
"IMPL-001.json (test understanding & generation)",
|
||||
"IMPL-001.5-review.json (quality gate)",
|
||||
"IMPL-002.json (test execution & fix cycle)"
|
||||
],
|
||||
"cli_hints": {
|
||||
"analysis": { "tool": "gemini", "mode": "analysis", "trigger": "always" },
|
||||
"fix_cycle": { "tool": "codex", "mode": "write", "trigger": "pass_rate < 0.95" }
|
||||
},
|
||||
"estimated_time": "1-2 hours"
|
||||
},
|
||||
"ui": {
|
||||
"name": "UI-First Development",
|
||||
"level": "L3/L4",
|
||||
"description": "UI设计 + 规划 + 执行",
|
||||
"complexity": ["medium", "high"],
|
||||
"artifacts": ".workflow/active/{session}/",
|
||||
"variants": {
|
||||
"explore": [
|
||||
{ "command": "/workflow:ui-design:explore-auto", "optional": false },
|
||||
{ "command": "/workflow:ui-design:design-sync", "optional": false, "auto_continue": true },
|
||||
{ "command": "/workflow:plan", "optional": false },
|
||||
{ "command": "/workflow:execute", "optional": false }
|
||||
],
|
||||
"imitate": [
|
||||
{ "command": "/workflow:ui-design:imitate-auto", "optional": false },
|
||||
{ "command": "/workflow:ui-design:design-sync", "optional": false, "auto_continue": true },
|
||||
{ "command": "/workflow:plan", "optional": false },
|
||||
{ "command": "/workflow:execute", "optional": false }
|
||||
]
|
||||
},
|
||||
"estimated_time": "2-4 hours"
|
||||
},
|
||||
"review-fix": {
|
||||
"name": "Review and Fix",
|
||||
"level": "L3",
|
||||
"description": "多维审查 + 自动修复",
|
||||
"complexity": ["medium"],
|
||||
"artifacts": ".workflow/active/{session}/review_report.md",
|
||||
"steps": [
|
||||
{ "command": "/workflow:review-session-cycle", "optional": false },
|
||||
{ "command": "/workflow:review-fix", "optional": true }
|
||||
],
|
||||
"cli_hints": {
|
||||
"multi_dimension_review": { "tool": "gemini", "mode": "analysis", "trigger": "always", "parallel": true },
|
||||
"auto_fix": { "tool": "codex", "mode": "write", "trigger": "findings_count >= 3" }
|
||||
},
|
||||
"estimated_time": "30-90 min"
|
||||
},
|
||||
"docs": {
|
||||
"name": "Documentation",
|
||||
"level": "L2",
|
||||
"description": "批量文档生成",
|
||||
"complexity": ["low", "medium"],
|
||||
"variants": {
|
||||
"incremental": [{ "command": "/memory:update-related", "optional": false }],
|
||||
"full": [
|
||||
{ "command": "/memory:docs", "optional": false },
|
||||
{ "command": "/workflow:execute", "optional": false }
|
||||
]
|
||||
},
|
||||
"estimated_time": "15-60 min"
|
||||
}
|
||||
},
|
||||
|
||||
"intent_rules": {
|
||||
"_level_mapping": {
|
||||
"description": "Intent → Level → Flow mapping guide",
|
||||
"L1": ["lite-lite-lite"],
|
||||
"L2": ["rapid", "bugfix", "multi-cli-plan", "docs"],
|
||||
"L3": ["coupled", "tdd", "test-fix", "review-fix", "ui"],
|
||||
"L4": ["full"],
|
||||
"Supplementary": ["issue"]
|
||||
},
|
||||
"bugfix": {
|
||||
"priority": 1,
|
||||
"level": "L2",
|
||||
"variants": {
|
||||
"hotfix": {
|
||||
"patterns": ["hotfix", "urgent", "production", "critical", "emergency", "紧急", "生产环境", "线上"],
|
||||
"flow": "bugfix.hotfix"
|
||||
},
|
||||
"standard": {
|
||||
"patterns": ["fix", "bug", "error", "issue", "crash", "broken", "fail", "wrong", "修复", "错误", "崩溃"],
|
||||
"flow": "bugfix.standard"
|
||||
}
|
||||
}
|
||||
},
|
||||
"issue_batch": {
|
||||
"priority": 2,
|
||||
"level": "Supplementary",
|
||||
"patterns": {
|
||||
"batch": ["issues", "batch", "queue", "多个", "批量"],
|
||||
"action": ["fix", "resolve", "处理", "解决"]
|
||||
},
|
||||
"require_both": true,
|
||||
"flow": "issue"
|
||||
},
|
||||
"exploration": {
|
||||
"priority": 3,
|
||||
"level": "L4",
|
||||
"patterns": ["不确定", "不知道", "explore", "研究", "分析一下", "怎么做", "what if", "探索"],
|
||||
"flow": "full"
|
||||
},
|
||||
"multi_perspective": {
|
||||
"priority": 3,
|
||||
"level": "L2",
|
||||
"patterns": ["多视角", "权衡", "比较方案", "cross-verify", "多CLI", "协作分析"],
|
||||
"flow": "multi-cli-plan"
|
||||
},
|
||||
"quick_task": {
|
||||
"priority": 4,
|
||||
"level": "L1",
|
||||
"patterns": ["快速", "简单", "small", "quick", "simple", "trivial", "小改动"],
|
||||
"flow": "lite-lite-lite"
|
||||
},
|
||||
"ui_design": {
|
||||
"priority": 5,
|
||||
"level": "L3/L4",
|
||||
"patterns": ["ui", "界面", "design", "设计", "component", "组件", "style", "样式", "layout", "布局"],
|
||||
"variants": {
|
||||
"imitate": { "triggers": ["参考", "模仿", "像", "类似"], "flow": "ui.imitate" },
|
||||
"explore": { "triggers": [], "flow": "ui.explore" }
|
||||
}
|
||||
},
|
||||
"tdd": {
|
||||
"priority": 6,
|
||||
"level": "L3",
|
||||
"patterns": ["tdd", "test-driven", "测试驱动", "先写测试", "test first"],
|
||||
"flow": "tdd"
|
||||
},
|
||||
"test_fix": {
|
||||
"priority": 7,
|
||||
"level": "L3",
|
||||
"patterns": ["测试失败", "test fail", "fix test", "test error", "pass rate", "coverage gap"],
|
||||
"flow": "test-fix"
|
||||
},
|
||||
"review": {
|
||||
"priority": 8,
|
||||
"level": "L3",
|
||||
"patterns": ["review", "审查", "检查代码", "code review", "质量检查"],
|
||||
"flow": "review-fix"
|
||||
},
|
||||
"documentation": {
|
||||
"priority": 9,
|
||||
"level": "L2",
|
||||
"patterns": ["文档", "documentation", "docs", "readme"],
|
||||
"variants": {
|
||||
"incremental": { "triggers": ["更新", "增量"], "flow": "docs.incremental" },
|
||||
"full": { "triggers": ["全部", "完整"], "flow": "docs.full" }
|
||||
}
|
||||
},
|
||||
"feature": {
|
||||
"priority": 99,
|
||||
"complexity_map": {
|
||||
"high": { "level": "L3", "flow": "coupled" },
|
||||
"medium": { "level": "L2", "flow": "rapid" },
|
||||
"low": { "level": "L1", "flow": "lite-lite-lite" }
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
"complexity_indicators": {
|
||||
"high": {
|
||||
"threshold": 4,
|
||||
"patterns": {
|
||||
"architecture": { "keywords": ["refactor", "重构", "migrate", "迁移", "architect", "架构", "system", "系统"], "weight": 2 },
|
||||
"multi_module": { "keywords": ["multiple", "多个", "across", "跨", "all", "所有", "entire", "整个"], "weight": 2 },
|
||||
"integration": { "keywords": ["integrate", "集成", "api", "database", "数据库"], "weight": 1 },
|
||||
"quality": { "keywords": ["security", "安全", "performance", "性能", "scale", "扩展"], "weight": 1 }
|
||||
}
|
||||
},
|
||||
"medium": { "threshold": 2 },
|
||||
"low": { "threshold": 0 }
|
||||
},
|
||||
|
||||
"cli_tools": {
|
||||
"gemini": {
|
||||
"strengths": ["超长上下文", "深度分析", "架构理解", "执行流追踪"],
|
||||
"triggers": ["分析", "理解", "设计", "架构", "诊断"],
|
||||
"mode": "analysis"
|
||||
},
|
||||
"qwen": {
|
||||
"strengths": ["代码模式识别", "多维度分析"],
|
||||
"triggers": ["评估", "对比", "验证"],
|
||||
"mode": "analysis"
|
||||
},
|
||||
"codex": {
|
||||
"strengths": ["精确代码生成", "自主执行"],
|
||||
"triggers": ["实现", "重构", "修复", "生成"],
|
||||
"mode": "write"
|
||||
}
|
||||
},
|
||||
|
||||
"cli_injection_rules": {
|
||||
"context_gathering": { "trigger": "file_read >= 50k OR module_count >= 5", "inject": "gemini --mode analysis" },
|
||||
"pre_planning_analysis": { "trigger": "complexity === high", "inject": "gemini --mode analysis" },
|
||||
"debug_diagnosis": { "trigger": "intent === bugfix AND root_cause_unclear", "inject": "gemini --mode analysis" },
|
||||
"code_review": { "trigger": "step === review", "inject": "gemini --mode analysis" },
|
||||
"implementation": { "trigger": "step === execute AND task_count >= 3", "inject": "codex --mode write" }
|
||||
},
|
||||
|
||||
"artifact_flow": {
|
||||
"_description": "定义工作流产出的格式、意图提取和流转规则",
|
||||
|
||||
"outputs": {
|
||||
"/workflow:lite-plan": {
|
||||
"artifact": "memory://plan",
|
||||
"format": "structured_plan",
|
||||
"fields": ["tasks", "files", "dependencies", "approach"]
|
||||
},
|
||||
"/workflow:plan": {
|
||||
"artifact": ".workflow/{session}/IMPL_PLAN.md",
|
||||
"format": "markdown_plan",
|
||||
"fields": ["phases", "tasks", "dependencies", "risks", "test_strategy"]
|
||||
},
|
||||
"/workflow:multi-cli-plan": {
|
||||
"artifact": ".workflow/.multi-cli-plan/{session}/",
|
||||
"format": "multi_file",
|
||||
"files": ["IMPL_PLAN.md", "plan.json", "synthesis.json"],
|
||||
"fields": ["consensus", "divergences", "recommended_approach", "tasks"]
|
||||
},
|
||||
"/workflow:lite-execute": {
|
||||
"artifact": "git_changes",
|
||||
"format": "code_diff",
|
||||
"fields": ["modified_files", "added_files", "deleted_files", "build_status"]
|
||||
},
|
||||
"/workflow:execute": {
|
||||
"artifact": ".workflow/{session}/execution_log.json",
|
||||
"format": "execution_report",
|
||||
"fields": ["completed_tasks", "pending_tasks", "errors", "warnings"]
|
||||
},
|
||||
"/workflow:test-cycle-execute": {
|
||||
"artifact": ".workflow/{session}/test_results.json",
|
||||
"format": "test_report",
|
||||
"fields": ["pass_rate", "failures", "coverage", "duration"]
|
||||
},
|
||||
"/workflow:review-session-cycle": {
|
||||
"artifact": ".workflow/{session}/review_report.md",
|
||||
"format": "review_report",
|
||||
"fields": ["findings", "severity_counts", "recommendations"]
|
||||
},
|
||||
"/workflow:lite-fix": {
|
||||
"artifact": "git_changes",
|
||||
"format": "fix_report",
|
||||
"fields": ["root_cause", "fix_applied", "files_modified", "verification_status"]
|
||||
}
|
||||
},
|
||||
|
||||
"intent_extraction": {
|
||||
"plan_to_execute": {
|
||||
"from": ["lite-plan", "plan", "multi-cli-plan"],
|
||||
"to": ["lite-execute", "execute"],
|
||||
"extract": {
|
||||
"tasks": "$.tasks[] | filter(status != 'completed')",
|
||||
"priority_order": "$.tasks | sort_by(priority)",
|
||||
"files_to_modify": "$.tasks[].files | flatten | unique",
|
||||
"dependencies": "$.dependencies",
|
||||
"context_summary": "$.approach OR $.recommended_approach"
|
||||
}
|
||||
},
|
||||
"execute_to_test": {
|
||||
"from": ["lite-execute", "execute"],
|
||||
"to": ["test-cycle-execute", "test-fix-gen"],
|
||||
"extract": {
|
||||
"modified_files": "$.modified_files",
|
||||
"test_scope": "infer_from($.modified_files)",
|
||||
"build_status": "$.build_status",
|
||||
"pending_verification": "$.completed_tasks | needs_test"
|
||||
}
|
||||
},
|
||||
"test_to_fix": {
|
||||
"from": ["test-cycle-execute"],
|
||||
"to": ["lite-fix", "review-fix"],
|
||||
"condition": "$.pass_rate < 0.95",
|
||||
"extract": {
|
||||
"failures": "$.failures",
|
||||
"error_messages": "$.failures[].message",
|
||||
"affected_files": "$.failures[].file",
|
||||
"suggested_fixes": "$.failures[].suggested_fix"
|
||||
}
|
||||
},
|
||||
"review_to_fix": {
|
||||
"from": ["review-session-cycle", "review-module-cycle"],
|
||||
"to": ["review-fix"],
|
||||
"condition": "$.severity_counts.critical > 0 OR $.severity_counts.high > 3",
|
||||
"extract": {
|
||||
"findings": "$.findings | filter(severity in ['critical', 'high'])",
|
||||
"fix_priority": "$.findings | group_by(category) | sort_by(severity)",
|
||||
"affected_files": "$.findings[].file | unique"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
"completion_criteria": {
|
||||
"plan": {
|
||||
"required": ["has_tasks", "has_files"],
|
||||
"optional": ["has_tests", "no_blocking_risks"],
|
||||
"threshold": 0.8,
|
||||
"routing": {
|
||||
"complete": "proceed_to_execute",
|
||||
"incomplete": "clarify_requirements"
|
||||
}
|
||||
},
|
||||
"execute": {
|
||||
"required": ["all_tasks_attempted", "no_critical_errors"],
|
||||
"optional": ["build_passes", "lint_passes"],
|
||||
"threshold": 1.0,
|
||||
"routing": {
|
||||
"complete": "proceed_to_test_or_review",
|
||||
"partial": "continue_execution",
|
||||
"failed": "diagnose_and_retry"
|
||||
}
|
||||
},
|
||||
"test": {
|
||||
"metrics": {
|
||||
"pass_rate": { "target": 0.95, "minimum": 0.80 },
|
||||
"coverage": { "target": 0.80, "minimum": 0.60 }
|
||||
},
|
||||
"routing": {
|
||||
"pass_rate >= 0.95 AND coverage >= 0.80": "complete",
|
||||
"pass_rate >= 0.95 AND coverage < 0.80": "add_more_tests",
|
||||
"pass_rate >= 0.80": "fix_failures_then_continue",
|
||||
"pass_rate < 0.80": "major_fix_required"
|
||||
}
|
||||
},
|
||||
"review": {
|
||||
"metrics": {
|
||||
"critical_findings": { "target": 0, "maximum": 0 },
|
||||
"high_findings": { "target": 0, "maximum": 3 }
|
||||
},
|
||||
"routing": {
|
||||
"critical == 0 AND high <= 3": "complete_or_optional_fix",
|
||||
"critical > 0": "mandatory_fix",
|
||||
"high > 3": "recommended_fix"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
"flow_decisions": {
|
||||
"_description": "根据产出完成度决定下一步",
|
||||
"patterns": {
|
||||
"plan_execute_test": {
|
||||
"sequence": ["plan", "execute", "test"],
|
||||
"on_test_fail": {
|
||||
"action": "extract_failures_and_fix",
|
||||
"max_iterations": 3,
|
||||
"fallback": "manual_intervention"
|
||||
}
|
||||
},
|
||||
"plan_execute_review": {
|
||||
"sequence": ["plan", "execute", "review"],
|
||||
"on_review_issues": {
|
||||
"action": "prioritize_and_fix",
|
||||
"auto_fix_threshold": "severity < high"
|
||||
}
|
||||
},
|
||||
"iterative_improvement": {
|
||||
"sequence": ["execute", "test", "fix"],
|
||||
"loop_until": "pass_rate >= 0.95 OR iterations >= 3",
|
||||
"on_loop_exit": "report_status"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -43,7 +43,7 @@ Core requirements, objectives, technical approach summary (2-3 paragraphs max).
|
||||
|
||||
**Quality Gates**:
|
||||
- concept-verify: ✅ Passed (0 ambiguities remaining) | ⏭️ Skipped (user decision) | ⏳ Pending
|
||||
- action-plan-verify: ⏳ Pending (recommended before /workflow:execute)
|
||||
- plan-verify: ⏳ Pending (recommended before /workflow:execute)
|
||||
|
||||
**Context Package Summary**:
|
||||
- **Focus Paths**: {list key directories from context-package.json}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
---
|
||||
name: CCW Loop-B
|
||||
description: Hybrid orchestrator pattern for iterative development. Coordinator + specialized workers with batch wait support. Triggers on "ccw-loop-b".
|
||||
argument-hint: TASK="<task description>" [--loop-id=<id>] [--mode=<interactive|auto|parallel>]
|
||||
---
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
---
|
||||
description: Stateless iterative development loop workflow with documented progress. Supports develop, debug, and validate phases with file-based state tracking. Triggers on "ccw-loop", "dev loop", "development loop".
|
||||
name: CCW Loop
|
||||
description: Stateless iterative development loop workflow with documented progress. Supports develop, debug, and validate phases with file-based state tracking. Triggers on "ccw-loop", "dev loop", "development loop", "开发循环", "迭代开发".
|
||||
argument-hint: TASK="<task description>" [--loop-id=<id>] [--auto]
|
||||
---
|
||||
|
||||
|
||||
385
.codex/skills/parallel-dev-cycle/README.md
Normal file
385
.codex/skills/parallel-dev-cycle/README.md
Normal file
@@ -0,0 +1,385 @@
|
||||
# Parallel Dev Cycle Skill
|
||||
|
||||
Multi-agent parallel development cycle using Codex subagent pattern with continuous iteration support.
|
||||
|
||||
## Overview
|
||||
|
||||
This skill implements a **single-file-per-agent** development workflow:
|
||||
|
||||
- **RA**: `requirements.md` (all requirements + edge cases + history)
|
||||
- **EP**: `exploration.md`, `architecture.md`, `plan.json` (codebase exploration + architecture + structured tasks)
|
||||
- **CD**: `implementation.md` (progress + files + decisions + testing)
|
||||
- **VAS**: `summary.md` (validation + test results + recommendations)
|
||||
|
||||
Each file is **completely rewritten** on each iteration, with old versions auto-archived to `history/`.
|
||||
|
||||
## Installation
|
||||
|
||||
Files are in `.codex/skills/parallel-dev-cycle/`:
|
||||
|
||||
```
|
||||
.codex/skills/parallel-dev-cycle/
|
||||
├── SKILL.md # Main skill definition
|
||||
├── README.md # This file
|
||||
├── phases/
|
||||
│ ├── orchestrator.md # Multi-agent coordination
|
||||
│ ├── state-schema.md # Unified state structure
|
||||
│ └── agents/
|
||||
│ ├── requirements-analyst.md # RA role
|
||||
│ ├── exploration-planner.md # EP role
|
||||
│ ├── code-developer.md # CD role
|
||||
│ └── validation-archivist.md # VAS role
|
||||
└── specs/
|
||||
├── coordination-protocol.md # Agent communication
|
||||
└── versioning-strategy.md # Version management
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Launch New Cycle
|
||||
|
||||
```bash
|
||||
/parallel-dev-cycle TASK="Implement OAuth authentication"
|
||||
```
|
||||
|
||||
Creates:
|
||||
```
|
||||
.workflow/.cycle/cycle-v1-20260122-abc123.progress/
|
||||
├── ra/
|
||||
│ ├── requirements.md (v1.0.0)
|
||||
│ └── changes.log (NDJSON)
|
||||
├── ep/
|
||||
│ ├── exploration.md (v1.0.0)
|
||||
│ ├── architecture.md (v1.0.0)
|
||||
│ ├── plan.json (v1.0.0)
|
||||
│ └── changes.log (NDJSON)
|
||||
├── cd/
|
||||
│ ├── implementation.md (v1.0.0)
|
||||
│ └── changes.log (NDJSON)
|
||||
└── vas/
|
||||
├── summary.md (v1.0.0)
|
||||
└── changes.log (NDJSON)
|
||||
```
|
||||
|
||||
### Continue With Extension (XX-1 Pattern)
|
||||
|
||||
User adds requirement: "Also support Google OAuth"
|
||||
|
||||
```bash
|
||||
/parallel-dev-cycle --cycle-id=cycle-v1-20260122-abc123 --extend="Add Google OAuth"
|
||||
```
|
||||
|
||||
Automatically:
|
||||
1. Archives old `requirements.md (v1.0.0)` → `history/requirements-v1.0.0.md`
|
||||
2. Rewrites `requirements.md (v1.1.0)` - complete file replacement
|
||||
3. Appends change to `changes.log` (NDJSON audit trail)
|
||||
|
||||
### Next Iteration (XX-2)
|
||||
|
||||
```bash
|
||||
/parallel-dev-cycle --cycle-id=cycle-v1-20260122-abc123 --extend="Add GitHub provider"
|
||||
```
|
||||
|
||||
All files update to v1.2.0, previous versions archived.
|
||||
|
||||
## Execution Flow
|
||||
|
||||
### Phase 1: Parallel Agent Execution
|
||||
|
||||
```
|
||||
Time RA EP CD VAS
|
||||
──── ── ── ── ──
|
||||
0ms [spawned] [spawned] [spawned] [spawned]
|
||||
↓ ↓ ↓ ↓
|
||||
Analyzing Exploring Reading plan Waiting
|
||||
task codebase from EP...
|
||||
↓
|
||||
5min Outputs req. Outputs plan Requirements
|
||||
v1.0.0 ✓ v1.0.0 ✓ unclear - BLOCKED
|
||||
|
||||
10min Clarifies req Updates plan ✓ Ready
|
||||
v1.0.1 ✓ v1.0.1 ✓ Implementing...
|
||||
↓
|
||||
15min ✓ Complete ✓ Complete ✓ Code done [waiting for CD]
|
||||
|
||||
20min [starts tests]
|
||||
↓
|
||||
25min Outputs summary
|
||||
v1.0.0 ✓
|
||||
```
|
||||
|
||||
### Phase 2: Version Transition
|
||||
|
||||
When iteration completes, next extends to v1.1.0:
|
||||
|
||||
```
|
||||
Current State (v1.0.0)
|
||||
├── requirements.md (v1.0.0)
|
||||
├── plan.json (v1.0.0)
|
||||
├── implementation.md (v1.0.0)
|
||||
└── summary.md (v1.0.0)
|
||||
|
||||
User: "Add GitHub provider"
|
||||
↓
|
||||
Archive Old Write New
|
||||
├── history/requirements-v1.0.0.md → requirements.md (v1.1.0) - REWRITTEN
|
||||
├── history/plan-v1.0.0.json → plan.json (v1.1.0) - REWRITTEN
|
||||
├── history/impl-v1.0.0.md → implementation.md (v1.1.0) - REWRITTEN
|
||||
└── history/summary-v1.0.0.md → summary.md (v1.1.0) - REWRITTEN
|
||||
↓
|
||||
Append to changes.log (NDJSON)
|
||||
```
|
||||
|
||||
## Session Files
|
||||
|
||||
```
|
||||
.workflow/.cycle/{cycleId}.progress/
|
||||
|
||||
ra/ - Requirements Analyst
|
||||
├── requirements.md # v1.2.0 (current, complete rewrite)
|
||||
├── changes.log # NDJSON audit trail
|
||||
└── history/
|
||||
├── requirements-v1.0.0.md
|
||||
└── requirements-v1.1.0.md
|
||||
|
||||
ep/ - Exploration & Planning
|
||||
├── exploration.md # v1.2.0 (codebase exploration)
|
||||
├── architecture.md # v1.2.0 (architecture design)
|
||||
├── plan.json # v1.2.0 (structured task list, current)
|
||||
├── changes.log # NDJSON audit trail
|
||||
└── history/
|
||||
├── plan-v1.0.0.json
|
||||
└── plan-v1.1.0.json
|
||||
|
||||
cd/ - Code Developer
|
||||
├── implementation.md # v1.2.0 (current)
|
||||
├── changes.log # NDJSON audit trail
|
||||
└── history/
|
||||
├── implementation-v1.0.0.md
|
||||
└── implementation-v1.1.0.md
|
||||
|
||||
vas/ - Validation & Archival
|
||||
├── summary.md # v1.2.0 (current)
|
||||
├── changes.log # NDJSON audit trail
|
||||
└── history/
|
||||
├── summary-v1.0.0.md
|
||||
└── summary-v1.1.0.md
|
||||
```
|
||||
|
||||
## Versioning Strategy
|
||||
|
||||
### Semantic Versioning
|
||||
|
||||
- **1.0.0**: Initial cycle
|
||||
- **1.1.0**: User extends with new requirement
|
||||
- **1.2.0**: Another iteration with more requirements
|
||||
|
||||
### What Gets Versioned
|
||||
|
||||
✅ **Main Document File**
|
||||
- Completely rewritten each iteration
|
||||
- Auto-archived to `history/`
|
||||
- No inline version history (stays clean)
|
||||
|
||||
✅ **Changes.log (NDJSON)**
|
||||
- Append-only (never deleted)
|
||||
- Complete audit trail of all changes
|
||||
- Used to trace requirement origins
|
||||
|
||||
✅ **Historical Snapshots**
|
||||
- Auto-created in `history/` directory
|
||||
- Keep last N versions (default: 5)
|
||||
- For reference when needed
|
||||
|
||||
### Key Principle
|
||||
|
||||
> **主文档简洁清晰** ← Agent 只关注当前版本
|
||||
>
|
||||
> **完整历史记录** ← Changes.log 保留每个变更
|
||||
>
|
||||
> **版本快照归档** ← History/ 备份旧版本
|
||||
|
||||
## File Maintenance
|
||||
|
||||
### Each Agent
|
||||
|
||||
| Agent | File | Contains | Size |
|
||||
|-------|------|----------|------|
|
||||
| **RA** | requirements.md | All FR, NFR, edge cases, history summary | ~2-5KB |
|
||||
| **EP** | exploration.md + architecture.md + plan.json | Codebase exploration, architecture design, structured task list | ~5-10KB total |
|
||||
| **CD** | implementation.md | Completed tasks, files changed, decisions, tests | ~4-10KB |
|
||||
| **VAS** | summary.md | Test results, coverage, issues, recommendations | ~5-12KB |
|
||||
|
||||
### Changes.log (Shared)
|
||||
|
||||
NDJSON format - one line per change:
|
||||
|
||||
```jsonl
|
||||
{"timestamp":"2026-01-22T10:00:00+08:00","version":"1.0.0","agent":"ra","action":"create","change":"Initial requirements","iteration":1}
|
||||
{"timestamp":"2026-01-22T11:00:00+08:00","version":"1.1.0","agent":"ra","action":"update","change":"Added Google OAuth","iteration":2}
|
||||
{"timestamp":"2026-01-22T12:00:00+08:00","version":"1.2.0","agent":"ra","action":"update","change":"Added GitHub, MFA","iteration":3}
|
||||
```
|
||||
|
||||
## Accessing History
|
||||
|
||||
### Current Version
|
||||
|
||||
```bash
|
||||
# View latest requirements
|
||||
cat .workflow/.cycle/cycle-xxx.progress/ra/requirements.md
|
||||
|
||||
# Quick check - version is in header
|
||||
head -5 requirements.md # "# Requirements Specification - v1.2.0"
|
||||
```
|
||||
|
||||
### Version History
|
||||
|
||||
```bash
|
||||
# View previous version
|
||||
cat .workflow/.cycle/cycle-xxx.progress/ra/history/requirements-v1.1.0.md
|
||||
|
||||
# Audit trail - all changes
|
||||
cat .workflow/.cycle/cycle-xxx.progress/ra/changes.log | jq .
|
||||
|
||||
# Changes in specific iteration
|
||||
cat changes.log | jq 'select(.iteration==2)'
|
||||
|
||||
# Trace requirement history
|
||||
cat changes.log | jq 'select(.change | contains("OAuth"))'
|
||||
```
|
||||
|
||||
## Codex Pattern Implementation
|
||||
|
||||
### Multi-Agent Parallel
|
||||
|
||||
```javascript
|
||||
// Create 4 agents in parallel
|
||||
const agents = {
|
||||
ra: spawn_agent({ message: raRoleAndTask }),
|
||||
ep: spawn_agent({ message: epRoleAndTask }),
|
||||
cd: spawn_agent({ message: cdRoleAndTask }),
|
||||
vas: spawn_agent({ message: vasRoleAndTask })
|
||||
}
|
||||
|
||||
// Wait for all 4 in parallel
|
||||
const results = wait({ ids: [agents.ra, agents.ep, agents.cd, agents.vas] })
|
||||
```
|
||||
|
||||
### Role Path Passing
|
||||
|
||||
Each agent reads its own role definition:
|
||||
|
||||
```javascript
|
||||
spawn_agent({
|
||||
message: `
|
||||
## MANDATORY FIRST STEPS
|
||||
1. Read role: ~/.codex/agents/requirements-analyst.md
|
||||
2. Read: .workflow/project-tech.json
|
||||
3. Read: .workflow/project-guidelines.json
|
||||
|
||||
## TASK
|
||||
${taskDescription}
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
### Deep Interaction
|
||||
|
||||
Use `send_input` for iteration refinement:
|
||||
|
||||
```javascript
|
||||
// First output
|
||||
const initial = wait({ ids: [agent] })
|
||||
|
||||
// User feedback
|
||||
send_input({
|
||||
id: agent,
|
||||
message: `
|
||||
## Feedback
|
||||
|
||||
${feedback}
|
||||
|
||||
## Next Steps
|
||||
Update ${filename} based on feedback. Increment version.
|
||||
Output PHASE_RESULT when complete.
|
||||
`
|
||||
})
|
||||
|
||||
// Updated output
|
||||
const revised = wait({ ids: [agent] })
|
||||
|
||||
// Only close when done
|
||||
close_agent({ id: agent })
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Situation | Recovery |
|
||||
|-----------|----------|
|
||||
| Agent timeout | send_input requesting convergence or retry |
|
||||
| State corrupted | Rebuild from changes.log NDJSON |
|
||||
| Version mismatch | Agent checks version in state before reading |
|
||||
| Blocked dependency | Orchestrator sends updated file path |
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Let agents rewrite** - Don't maintain incremental history in main doc
|
||||
2. **Trust changes.log** - NDJSON is the source of truth for history
|
||||
3. **Archive on version bump** - Automatic, no manual versioning needed
|
||||
4. **Keep files focused** - Each file should be readable in 5 minutes
|
||||
5. **Version header always present** - Makes version obvious at a glance
|
||||
|
||||
## Integration
|
||||
|
||||
This skill works standalone or integrated with:
|
||||
- Dashboard Loop Monitor (API triggers)
|
||||
- CCW workflow system
|
||||
- Custom orchestration
|
||||
|
||||
### API Trigger
|
||||
|
||||
```bash
|
||||
POST /api/cycles/start
|
||||
{
|
||||
"task": "Implement OAuth",
|
||||
"mode": "auto"
|
||||
}
|
||||
→ Returns cycle_id
|
||||
|
||||
GET /api/cycles/{cycle_id}/status
|
||||
→ Returns agents status and progress
|
||||
```
|
||||
|
||||
## Architecture Diagram
|
||||
|
||||
```
|
||||
User Task
|
||||
↓
|
||||
Orchestrator (main coordinator)
|
||||
├─→ spawn_agent(RA)
|
||||
├─→ spawn_agent(EP)
|
||||
├─→ spawn_agent(CD)
|
||||
└─→ spawn_agent(VAS)
|
||||
↓
|
||||
wait({ ids: [all 4] })
|
||||
↓
|
||||
All write to:
|
||||
- requirements.md (v1.x.0)
|
||||
- exploration.md, architecture.md, plan.json (v1.x.0)
|
||||
- implementation.md (v1.x.0)
|
||||
- summary.md (v1.x.0)
|
||||
- changes.log (NDJSON append)
|
||||
↓
|
||||
[Automatic archival]
|
||||
- history/requirements-v1.{x-1}.0.md
|
||||
- history/plan-v1.{x-1}.0.json
|
||||
- etc...
|
||||
↓
|
||||
Orchestrator: Next iteration?
|
||||
- Yes: send_input with feedback
|
||||
- No: close_agent, report summary
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
194
.codex/skills/parallel-dev-cycle/SKILL.md
Normal file
194
.codex/skills/parallel-dev-cycle/SKILL.md
Normal file
@@ -0,0 +1,194 @@
|
||||
---
|
||||
name: Parallel Dev Cycle
|
||||
description: Multi-agent parallel development cycle with requirement analysis, exploration planning, code development, and validation. Supports continuous iteration with markdown progress documentation.
|
||||
argument-hint: TASK="<task description>" | --cycle-id=<id> [--extend="<extension>"] [--auto] [--parallel=<count>]
|
||||
---
|
||||
|
||||
# Parallel Dev Cycle - Multi-Agent Development Workflow
|
||||
|
||||
Multi-agent parallel development cycle using Codex subagent pattern with four specialized workers:
|
||||
1. **Requirements Analysis & Extension** (RA) - Requirement analysis and self-enhancement
|
||||
2. **Exploration & Planning** (EP) - Exploration and planning
|
||||
3. **Code Development** (CD) - Code development with debug strategy support
|
||||
4. **Validation & Archival Summary** (VAS) - Validation and archival summary
|
||||
|
||||
Each agent **maintains one main document** (e.g., requirements.md, plan.json, implementation.md) that is completely rewritten per iteration, plus auxiliary logs (changes.log, debug-log.ndjson) that are append-only. Supports versioning, automatic archival, and complete history tracking.
|
||||
|
||||
## Arguments
|
||||
|
||||
| Arg | Required | Description |
|
||||
|-----|----------|-------------|
|
||||
| TASK | One of TASK or --cycle-id | Task description (for new cycle, mutually exclusive with --cycle-id) |
|
||||
| --cycle-id | One of TASK or --cycle-id | Existing cycle ID to continue (from API or previous session) |
|
||||
| --extend | No | Extension description (only valid with --cycle-id) |
|
||||
| --auto | No | Auto-cycle mode (run all phases sequentially) |
|
||||
| --parallel | No | Number of parallel agents (default: 4, max: 4) |
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ User Input (Task) │
|
||||
└────────────────────────────┬────────────────────────────────┘
|
||||
│
|
||||
v
|
||||
┌──────────────────────┐
|
||||
│ Orchestrator Agent │ (Coordinator)
|
||||
│ (spawned once) │
|
||||
└──────────────────────┘
|
||||
│
|
||||
┌────────────────────┼────────────────────┐
|
||||
│ │ │
|
||||
v v v
|
||||
┌────────┐ ┌────────┐ ┌────────┐
|
||||
│ RA │ │ EP │ │ CD │
|
||||
│Agent │ │Agent │ │Agent │
|
||||
└────────┘ └────────┘ └────────┘
|
||||
│ │ │
|
||||
└────────────────────┼────────────────────┘
|
||||
│
|
||||
v
|
||||
┌────────┐
|
||||
│ VAS │
|
||||
│ Agent │
|
||||
└────────┘
|
||||
│
|
||||
v
|
||||
┌──────────────────────┐
|
||||
│ Summary Report │
|
||||
│ & Markdown Docs │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
## Key Design Principles
|
||||
|
||||
1. **Main Document + Auxiliary Logs**: Each agent maintains one main document (rewritten per iteration) and auxiliary logs (append-only)
|
||||
2. **Version-Based Overwrite**: Main documents completely rewritten per version; logs append-only
|
||||
3. **Automatic Archival**: Old main document versions automatically archived to `history/` directory
|
||||
4. **Complete Audit Trail**: Changes.log (NDJSON) preserves all change history
|
||||
5. **Parallel Coordination**: Four agents launched simultaneously; coordination via shared state and orchestrator
|
||||
6. **File References**: Use short file paths instead of content passing
|
||||
7. **Self-Enhancement**: RA agent proactively extends requirements based on context
|
||||
|
||||
## Session Structure
|
||||
|
||||
```
|
||||
.workflow/.cycle/
|
||||
+-- {cycleId}.json # Master state file
|
||||
+-- {cycleId}.progress/
|
||||
+-- ra/
|
||||
| +-- requirements.md # Current version (complete rewrite)
|
||||
| +-- changes.log # NDJSON complete history (append-only)
|
||||
| └-- history/
|
||||
| +-- requirements-v1.0.0.md # Archived snapshot
|
||||
| +-- requirements-v1.1.0.md # Archived snapshot
|
||||
+-- ep/
|
||||
| +-- exploration.md # Codebase exploration report
|
||||
| +-- architecture.md # Architecture design
|
||||
| +-- plan.json # Structured task list (current version)
|
||||
| +-- changes.log # NDJSON complete history
|
||||
| └-- history/
|
||||
| +-- plan-v1.0.0.json
|
||||
| +-- plan-v1.1.0.json
|
||||
+-- cd/
|
||||
| +-- implementation.md # Current version
|
||||
| +-- debug-log.ndjson # Debug hypothesis tracking
|
||||
| +-- changes.log # NDJSON complete history
|
||||
| └-- history/
|
||||
| +-- implementation-v1.0.0.md
|
||||
| +-- implementation-v1.1.0.md
|
||||
+-- vas/
|
||||
| +-- summary.md # Current version
|
||||
| +-- changes.log # NDJSON complete history
|
||||
| └-- history/
|
||||
| +-- summary-v1.0.0.md
|
||||
| +-- summary-v1.1.0.md
|
||||
└-- coordination/
|
||||
+-- timeline.md # Execution timeline
|
||||
+-- decisions.log # Decision log
|
||||
```
|
||||
|
||||
## State Management
|
||||
|
||||
State schema is defined in [phases/state-schema.md](phases/state-schema.md). The master state file (`{cycleId}.json`) tracks:
|
||||
|
||||
- Cycle metadata (id, title, status, iterations)
|
||||
- Agent states (status, output files, version)
|
||||
- Shared context (requirements, plan, changes, test results)
|
||||
- Coordination data (feedback log, decisions, blockers)
|
||||
|
||||
## Versioning Workflow
|
||||
|
||||
### Initial Version (v1.0.0)
|
||||
|
||||
```bash
|
||||
/parallel-dev-cycle TASK="Implement OAuth login"
|
||||
```
|
||||
|
||||
Generates:
|
||||
```
|
||||
requirements.md (v1.0.0)
|
||||
exploration.md (v1.0.0)
|
||||
architecture.md (v1.0.0)
|
||||
plan.json (v1.0.0)
|
||||
implementation.md (v1.0.0) - if applicable
|
||||
summary.md (v1.0.0) - if applicable
|
||||
```
|
||||
|
||||
### Iteration Versions (v1.1.0, v1.2.0)
|
||||
|
||||
```bash
|
||||
/parallel-dev-cycle --cycle-id=cycle-v1-xxx --extend="Add GitHub support"
|
||||
```
|
||||
|
||||
**Automatic handling**:
|
||||
1. Read current `requirements.md (v1.0.0)`
|
||||
2. Auto-archive to `history/requirements-v1.0.0.md`
|
||||
3. Recreate `requirements.md (v1.1.0)` - complete overwrite
|
||||
4. Append changes to `changes.log` (NDJSON)
|
||||
|
||||
## Changes.log Format (NDJSON)
|
||||
|
||||
Permanent audit log (append-only, never deleted):
|
||||
|
||||
```jsonl
|
||||
{"timestamp":"2026-01-22T10:00:00+08:00","version":"1.0.0","agent":"ra","action":"create","change":"Initial requirements","iteration":1}
|
||||
{"timestamp":"2026-01-22T11:00:00+08:00","version":"1.1.0","agent":"ra","action":"update","change":"Added Google OAuth requirement","iteration":2}
|
||||
{"timestamp":"2026-01-22T11:30:00+08:00","version":"1.0.0","agent":"ep","action":"create","change":"Initial implementation plan","iteration":1}
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Start new cycle
|
||||
/parallel-dev-cycle TASK="Implement real-time notifications"
|
||||
|
||||
# Continue cycle
|
||||
/parallel-dev-cycle --cycle-id=cycle-v1-20260122-abc123
|
||||
|
||||
# Iteration with extension
|
||||
/parallel-dev-cycle --cycle-id=cycle-v1-20260122-abc123 --extend="Also add email notifications"
|
||||
|
||||
# Auto mode
|
||||
/parallel-dev-cycle --auto TASK="Add OAuth authentication"
|
||||
```
|
||||
|
||||
## Key Benefits
|
||||
|
||||
- **Simple**: Each agent maintains only 1 file + changes.log
|
||||
- **Efficient**: Version rewrite without complex version marking
|
||||
- **Traceable**: Complete history in `history/` and `changes.log`
|
||||
- **Fast**: Agent reads current version quickly (no history parsing needed)
|
||||
- **Auditable**: NDJSON changes.log fully traces every change
|
||||
- **Self-Enhancing**: RA agent proactively extends requirements
|
||||
- **Debug-Ready**: CD agent supports hypothesis-driven debugging
|
||||
|
||||
## Reference Documents
|
||||
|
||||
| Document | Purpose |
|
||||
|----------|---------|
|
||||
| [phases/orchestrator.md](phases/orchestrator.md) | Orchestrator logic |
|
||||
| [phases/state-schema.md](phases/state-schema.md) | State structure definition |
|
||||
| [phases/agents/](phases/agents/) | Four agent role definitions |
|
||||
| [specs/coordination-protocol.md](specs/coordination-protocol.md) | Communication protocol |
|
||||
| [specs/versioning-strategy.md](specs/versioning-strategy.md) | Version management |
|
||||
327
.codex/skills/parallel-dev-cycle/phases/agents/code-developer.md
Normal file
327
.codex/skills/parallel-dev-cycle/phases/agents/code-developer.md
Normal file
@@ -0,0 +1,327 @@
|
||||
---
|
||||
name: Code Developer Agent
|
||||
description: Implement features based on plan and requirements
|
||||
color: cyan
|
||||
---
|
||||
|
||||
# Code Developer Agent (CD)
|
||||
|
||||
## Role Definition
|
||||
|
||||
The Code Developer is responsible for implementing features according to the plan and requirements. This agent handles all code changes, tracks modifications, and reports issues.
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
1. **Implement Features**
|
||||
- Write code following project conventions
|
||||
- Follow the implementation plan
|
||||
- Ensure code quality
|
||||
- Track progress
|
||||
|
||||
2. **Handle Integration**
|
||||
- Integrate with existing systems
|
||||
- Maintain compatibility
|
||||
- Update related components
|
||||
- Handle data migrations
|
||||
|
||||
3. **Track Changes**
|
||||
- Document all file modifications
|
||||
- Log changes in NDJSON format
|
||||
- Track which iteration introduced which changes
|
||||
- Update changes.log
|
||||
|
||||
4. **Report Issues**
|
||||
- Document development blockers
|
||||
- Identify missing requirements
|
||||
- Flag integration conflicts
|
||||
- Report unforeseen challenges
|
||||
|
||||
## Key Reminders
|
||||
|
||||
**ALWAYS**:
|
||||
- Follow existing code style and patterns
|
||||
- Test code before submitting
|
||||
- Document code changes clearly
|
||||
- Track blockers and issues
|
||||
- Append to changes.log, never overwrite
|
||||
- Reference requirements in code comments
|
||||
- Use meaningful commit messages in implementation notes
|
||||
|
||||
**NEVER**:
|
||||
- Ignore linting or code quality warnings
|
||||
- Make assumptions about unclear requirements
|
||||
- Skip testing critical functionality
|
||||
- Modify unrelated code
|
||||
- Leave TODO comments without context
|
||||
- Implement features not in the plan
|
||||
|
||||
## Execution Process
|
||||
|
||||
### Phase 1: Planning & Setup
|
||||
|
||||
1. **Read Context**
|
||||
- Plan from exploration-planner.md
|
||||
- Requirements from requirements-analyst.md
|
||||
- Project tech stack and guidelines
|
||||
|
||||
2. **Understand Project Structure**
|
||||
- Review similar existing implementations
|
||||
- Understand coding conventions
|
||||
- Check for relevant utilities/libraries
|
||||
|
||||
3. **Prepare Environment**
|
||||
- Create feature branch (if using git)
|
||||
- Set up development environment
|
||||
- Prepare test environment
|
||||
|
||||
### Phase 2: Implementation
|
||||
|
||||
For each task in the plan:
|
||||
|
||||
1. **Read Task Details**
|
||||
- Task description and success criteria
|
||||
- Dependencies (ensure they're completed)
|
||||
- Integration points
|
||||
|
||||
2. **Implement Feature**
|
||||
- Write code in target files
|
||||
- Follow project conventions
|
||||
- Add code comments
|
||||
- Reference requirements
|
||||
|
||||
3. **Track Changes**
|
||||
- Log each file modification to changes.log
|
||||
- Format: `{timestamp, iteration, file, action, description}`
|
||||
- Include reason for change
|
||||
|
||||
4. **Test Implementation**
|
||||
- Run unit tests
|
||||
- Verify integration
|
||||
- Test error cases
|
||||
- Check performance
|
||||
- **If tests fail**: Initiate Debug Workflow (see Debug Workflow section)
|
||||
|
||||
5. **Report Progress**
|
||||
- Update implementation.md
|
||||
- Log any issues or blockers
|
||||
- Note decisions made
|
||||
|
||||
## Debug Workflow
|
||||
|
||||
When tests fail during implementation, the CD agent MUST initiate the hypothesis-driven debug workflow. This workflow systematically identifies and resolves bugs through structured hypothesis testing.
|
||||
|
||||
### Debug Triggers
|
||||
|
||||
| Trigger | Condition | Action |
|
||||
|---------|-----------|--------|
|
||||
| **Test Failure** | Automated tests fail during implementation | Start debug workflow |
|
||||
| **Integration Conflict** | Blockers logged in `issues.md` | Start debug workflow |
|
||||
| **VAS Feedback** | Orchestrator provides validation failure feedback | Start debug workflow |
|
||||
|
||||
### Debug Workflow Phases
|
||||
|
||||
1. **Isolate Failure**
|
||||
- Pinpoint the specific test or condition that is failing
|
||||
- Extract exact error message and stack trace
|
||||
- Identify the failing component/function
|
||||
|
||||
2. **Formulate Hypothesis**
|
||||
- Generate a specific, testable hypothesis about the root cause
|
||||
- Example: "Error is caused by null value passed from function X"
|
||||
- Log hypothesis in `debug-log.ndjson`
|
||||
- Prioritize hypotheses based on: error messages > recent changes > dependency relationships > edge cases
|
||||
|
||||
3. **Design Experiment**
|
||||
- Determine minimal change to test hypothesis
|
||||
- Options: add logging, create minimal unit test, inspect variable, add breakpoint
|
||||
- Document experiment design
|
||||
|
||||
4. **Execute & Observe**
|
||||
- Apply the change and run the test
|
||||
- Capture inputs, actions taken, and observed outcomes
|
||||
- Log structured results in `debug-log.ndjson`
|
||||
|
||||
5. **Analyze & Conclude**
|
||||
- Compare outcome to hypothesis
|
||||
- If **confirmed**: Proceed to implement fix (Phase 6)
|
||||
- If **refuted**: Log finding and formulate new hypothesis (return to Phase 2)
|
||||
- If **inconclusive**: Refine experiment and repeat
|
||||
|
||||
6. **Implement Fix**
|
||||
- Once root cause confirmed, implement necessary code changes
|
||||
- Document fix rationale in implementation.md
|
||||
- Log fix in changes.log
|
||||
|
||||
7. **Verify Fix**
|
||||
- Run all relevant tests to ensure fix is effective
|
||||
- Verify no regressions introduced
|
||||
- Mark issue as resolved in issues.md
|
||||
|
||||
### Debug Log Format (NDJSON)
|
||||
|
||||
File: `.workflow/.cycle/{cycleId}.progress/cd/debug-log.ndjson`
|
||||
|
||||
Schema:
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-01-23T10:00:00+08:00",
|
||||
"iteration": 1,
|
||||
"issue_id": "BUG-001",
|
||||
"file": "src/auth/oauth.ts",
|
||||
"hypothesis": "OAuth token refresh fails due to expired refresh_token not handled",
|
||||
"action": "Added logging to capture refresh_token expiry",
|
||||
"observation": "Refresh token is expired but code doesn't check expiry before use",
|
||||
"outcome": "confirmed"
|
||||
}
|
||||
```
|
||||
|
||||
Outcome values: `confirmed | refuted | inconclusive`
|
||||
|
||||
### Hypothesis Priority Order
|
||||
|
||||
1. **Direct Error Messages/Stack Traces**: Most reliable starting point
|
||||
2. **Recent Changes**: Check `changes.log` for recent modifications
|
||||
3. **Dependency Relationships**: Analyze relationships between failing component and its dependencies
|
||||
4. **Edge Cases**: Review `edge-cases.md` for documented edge cases
|
||||
|
||||
### Output
|
||||
|
||||
Debug workflow generates an additional file:
|
||||
- **debug-log.ndjson**: NDJSON log of all hypothesis-test cycles
|
||||
|
||||
### Phase 3: Output
|
||||
|
||||
Generate files in `.workflow/.cycle/{cycleId}.progress/cd/`:
|
||||
|
||||
**implementation.md**:
|
||||
```markdown
|
||||
# Implementation Progress - Version X.Y.Z
|
||||
|
||||
## Summary
|
||||
Overview of what was implemented in this iteration.
|
||||
|
||||
## Completed Tasks
|
||||
- ✓ TASK-001: Setup OAuth configuration
|
||||
- ✓ TASK-002: Update User model
|
||||
- ✓ TASK-003: Implement OAuth strategy
|
||||
- ⏳ TASK-004: Create authentication endpoints (in progress)
|
||||
|
||||
## Key Implementation Decisions
|
||||
1. Used passport-oauth2 for OAuth handling
|
||||
- Rationale: Mature, well-maintained library
|
||||
- Alternative considered: Manual OAuth implementation
|
||||
- Chosen: passport-oauth2 (community support)
|
||||
|
||||
2. Stored OAuth tokens in database
|
||||
- Rationale: Needed for refresh tokens
|
||||
- Alternative: Client-side storage
|
||||
- Chosen: Database (security)
|
||||
|
||||
## Code Structure
|
||||
- src/config/oauth.ts - OAuth configuration
|
||||
- src/strategies/oauth-google.ts - Google strategy implementation
|
||||
- src/routes/auth.ts - Authentication endpoints
|
||||
- src/models/User.ts - Updated User model
|
||||
|
||||
## Testing Status
|
||||
- Unit tests: 15/15 passing
|
||||
- Integration tests: 8/10 passing
|
||||
- Failing: OAuth refresh token edge cases
|
||||
|
||||
## Next Steps
|
||||
- Fix OAuth refresh token handling
|
||||
- Complete integration tests
|
||||
- Code review and merge
|
||||
```
|
||||
|
||||
**changes.log** (NDJSON):
|
||||
```
|
||||
{"timestamp":"2026-01-22T10:30:00+08:00","iteration":1,"file":"src/config/oauth.ts","action":"create","task":"TASK-001","description":"Created OAuth configuration","lines_added":45,"lines_removed":0}
|
||||
{"timestamp":"2026-01-22T10:45:00+08:00","iteration":1,"file":"src/models/User.ts","action":"modify","task":"TASK-002","description":"Added oauth_id and oauth_provider fields","lines_added":8,"lines_removed":0}
|
||||
{"timestamp":"2026-01-22T11:15:00+08:00","iteration":1,"file":"src/strategies/oauth-google.ts","action":"create","task":"TASK-003","description":"Implemented Google OAuth strategy","lines_added":120,"lines_removed":0}
|
||||
```
|
||||
|
||||
**issues.md**:
|
||||
```markdown
|
||||
# Development Issues - Version X.Y.Z
|
||||
|
||||
## Open Issues
|
||||
### Issue 1: OAuth Token Refresh
|
||||
- Severity: High
|
||||
- Description: Refresh token logic doesn't handle expired refresh tokens
|
||||
- Blocker: No, can implement fallback
|
||||
- Suggested Solution: Redirect to re-authentication
|
||||
|
||||
### Issue 2: Database Migration
|
||||
- Severity: Medium
|
||||
- Description: Migration doesn't handle existing users
|
||||
- Blocker: No, can use default values
|
||||
- Suggested Solution: Set oauth_id = null for existing users
|
||||
|
||||
## Resolved Issues
|
||||
- ✓ OAuth callback URL validation (fixed in commit abc123)
|
||||
- ✓ CORS issues with OAuth provider (updated headers)
|
||||
|
||||
## Questions for RA
|
||||
- Q1: Should OAuth be optional or required for login?
|
||||
- Current: Optional (can still use password)
|
||||
- Impact: Affects user flow design
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
```
|
||||
PHASE_RESULT:
|
||||
- phase: cd
|
||||
- status: success | failed | partial
|
||||
- files_written: [implementation.md, changes.log, debug-log.ndjson (if debug executed), issues.md]
|
||||
- summary: N tasks completed, M files modified, X blockers identified
|
||||
- tasks_completed: N
|
||||
- files_modified: M
|
||||
- tests_passing: X/Y
|
||||
- debug_cycles: Z (if debug executed)
|
||||
- blockers: []
|
||||
- issues: [list of open issues]
|
||||
```
|
||||
|
||||
## Interaction with Other Agents
|
||||
|
||||
### Receives From:
|
||||
- **EP (Exploration Planner)**: "Here's the implementation plan"
|
||||
- Used to guide development
|
||||
- **RA (Requirements Analyst)**: "Requirement FR-X means..."
|
||||
- Used for clarification
|
||||
- **Orchestrator**: "Fix these issues in next iteration"
|
||||
- Used for priority setting
|
||||
|
||||
### Sends To:
|
||||
- **VAS (Validator)**: "Here are code changes, ready for testing"
|
||||
- Used for test generation
|
||||
- **RA (Requirements Analyst)**: "FR-X is unclear, need clarification"
|
||||
- Used for requirement updates
|
||||
- **Orchestrator**: "Found blocker X, need help"
|
||||
- Used for decision making
|
||||
|
||||
## Code Quality Standards
|
||||
|
||||
**Minimum Standards**:
|
||||
- Follow project linting rules
|
||||
- Include error handling for all external calls
|
||||
- Add comments for non-obvious code
|
||||
- Reference requirements in code
|
||||
- Test all happy and unhappy paths
|
||||
|
||||
**Expected Commits Include**:
|
||||
- Why: Reason for change
|
||||
- What: What was changed
|
||||
- Testing: How was it tested
|
||||
- Related: Link to requirement/task
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Incremental Implementation**: Complete one task fully before starting next
|
||||
2. **Early Testing**: Test as you implement, not after
|
||||
3. **Clear Documentation**: Document implementation decisions
|
||||
4. **Communication**: Report blockers immediately
|
||||
5. **Code Review Readiness**: Keep commits atomic and well-described
|
||||
6. **Track Progress**: Update implementation.md regularly
|
||||
@@ -0,0 +1,285 @@
|
||||
---
|
||||
name: Exploration & Planning Agent
|
||||
description: Explore architecture and generate implementation plan
|
||||
color: green
|
||||
---
|
||||
|
||||
# Exploration & Planning Agent (EP)
|
||||
|
||||
## Role Definition
|
||||
|
||||
The Exploration & Planning Agent is responsible for understanding the codebase architecture, identifying integration points, and generating detailed implementation plans. This agent bridges between requirements and development.
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
1. **Explore Codebase**
|
||||
- Map existing architecture
|
||||
- Identify relevant modules
|
||||
- Find similar implementations
|
||||
- Locate integration points
|
||||
|
||||
2. **Analyze Dependencies**
|
||||
- Track external dependencies
|
||||
- Identify internal dependencies
|
||||
- Map data flow
|
||||
- Document integration interfaces
|
||||
|
||||
3. **Design Implementation Plan**
|
||||
- Break down into actionable tasks
|
||||
- Estimate effort levels
|
||||
- Identify critical paths
|
||||
- Plan task dependencies
|
||||
|
||||
4. **Generate Architecture Design**
|
||||
- Component diagrams
|
||||
- Integration points
|
||||
- Data model considerations
|
||||
- Potential risks and mitigations
|
||||
|
||||
## Key Reminders
|
||||
|
||||
**ALWAYS**:
|
||||
- Generate plan.json with structured format
|
||||
- Version both exploration.md and plan.json
|
||||
- Include effort estimates for each task
|
||||
- Document identified risks
|
||||
- Map task dependencies accurately
|
||||
- Provide clear integration guidelines
|
||||
|
||||
**NEVER**:
|
||||
- Plan implementation details (leave for CD agent)
|
||||
- Create tasks that are too large (break into subtasks)
|
||||
- Ignore existing code patterns
|
||||
- Skip dependency analysis
|
||||
- Forget to document risks
|
||||
|
||||
## Execution Process
|
||||
|
||||
### Phase 1: Codebase Exploration
|
||||
|
||||
1. **Read Context**
|
||||
- Cycle state
|
||||
- Requirements from RA
|
||||
- Project tech stack and guidelines
|
||||
|
||||
2. **Explore Architecture**
|
||||
- Identify existing patterns and conventions
|
||||
- Find similar feature implementations
|
||||
- Map module boundaries
|
||||
- Document current architecture
|
||||
|
||||
3. **Analyze Integration Points**
|
||||
- Where will new code integrate?
|
||||
- What interfaces need to match?
|
||||
- What data models exist?
|
||||
- What dependencies exist?
|
||||
|
||||
4. **Generate Exploration Report**
|
||||
- Write `exploration.md` documenting findings
|
||||
- Include architecture overview
|
||||
- Document identified patterns
|
||||
- List integration points and risks
|
||||
|
||||
### Phase 2: Planning
|
||||
|
||||
1. **Decompose Requirements**
|
||||
- Convert each requirement to one or more tasks
|
||||
- Identify logical grouping
|
||||
- Determine task sequencing
|
||||
|
||||
2. **Estimate Effort**
|
||||
- Small (< 1 hour)
|
||||
- Medium (1-4 hours)
|
||||
- Large (> 4 hours)
|
||||
|
||||
3. **Map Dependencies**
|
||||
- Task A depends on Task B
|
||||
- Identify critical path
|
||||
- Plan parallel opportunities
|
||||
|
||||
4. **Generate Plan.json**
|
||||
- Structured task list
|
||||
- Dependencies between tasks
|
||||
- Effort estimates
|
||||
- Integration guidelines
|
||||
|
||||
### Phase 3: Output
|
||||
|
||||
Generate files in `.workflow/.cycle/{cycleId}.progress/ep/`:
|
||||
|
||||
**exploration.md**:
|
||||
```markdown
|
||||
# Codebase Exploration - Version X.Y.Z
|
||||
|
||||
## Architecture Overview
|
||||
Current system architecture and how new code fits in.
|
||||
|
||||
## Existing Patterns
|
||||
- Authentication: Uses JWT with middleware
|
||||
- Database: PostgreSQL with TypeORM
|
||||
- API: Express.js with REST conventions
|
||||
- ...
|
||||
|
||||
## Integration Points for [Feature]
|
||||
- File: src/middleware/auth.ts
|
||||
- Add new OAuth strategies here
|
||||
- Extend AuthProvider interface
|
||||
- Update token generation logic
|
||||
- File: src/models/User.ts
|
||||
- Add oauth_id field
|
||||
- Migrate existing users
|
||||
- Update constraints
|
||||
|
||||
## Identified Risks
|
||||
- Risk 1: OAuth token refresh complexity
|
||||
- Mitigation: Use library like passport-oauth2
|
||||
- Risk 2: Database migration impact
|
||||
- Mitigation: Rolling deployment strategy
|
||||
```
|
||||
|
||||
**architecture.md**:
|
||||
```markdown
|
||||
# Architecture Design - Version X.Y.Z
|
||||
|
||||
## Component Diagram
|
||||
[Describe relationships between components]
|
||||
|
||||
## Data Model Changes
|
||||
- User table: Add oauth_id, oauth_provider fields
|
||||
- Sessions table: Update token structure
|
||||
- ...
|
||||
|
||||
## API Endpoints
|
||||
- POST /auth/oauth/google - Initiate OAuth
|
||||
- GET /auth/oauth/callback - Handle callback
|
||||
- ...
|
||||
|
||||
## Integration Flow
|
||||
1. User clicks "Login with Google"
|
||||
2. Client redirects to /auth/oauth/google
|
||||
3. Server initiates Google OAuth flow
|
||||
4. ... (complete flow)
|
||||
```
|
||||
|
||||
**plan.json**:
|
||||
```json
|
||||
{
|
||||
"version": "1.0.0",
|
||||
"total_tasks": 8,
|
||||
"estimated_duration": "Medium",
|
||||
"tasks": [
|
||||
{
|
||||
"id": "TASK-001",
|
||||
"title": "Setup OAuth configuration",
|
||||
"description": "Create OAuth app credentials and config",
|
||||
"effort": "small",
|
||||
"estimated_hours": 1,
|
||||
"depends_on": [],
|
||||
"files": ["src/config/oauth.ts"],
|
||||
"success_criteria": "Config loads without errors"
|
||||
},
|
||||
{
|
||||
"id": "TASK-002",
|
||||
"title": "Update User model",
|
||||
"description": "Add oauth_id and oauth_provider fields",
|
||||
"effort": "medium",
|
||||
"estimated_hours": 2,
|
||||
"depends_on": ["TASK-001"],
|
||||
"files": ["src/models/User.ts", "migrations/*"],
|
||||
"success_criteria": "Migration runs successfully"
|
||||
},
|
||||
{
|
||||
"id": "TASK-003",
|
||||
"title": "Implement OAuth strategy",
|
||||
"description": "Add Google OAuth strategy",
|
||||
"effort": "large",
|
||||
"estimated_hours": 4,
|
||||
"depends_on": ["TASK-001"],
|
||||
"files": ["src/strategies/oauth-google.ts"],
|
||||
"success_criteria": "OAuth flow works end-to-end"
|
||||
},
|
||||
{
|
||||
"id": "TASK-004",
|
||||
"title": "Create authentication endpoints",
|
||||
"description": "POST /auth/oauth/google, GET /auth/oauth/callback",
|
||||
"effort": "medium",
|
||||
"estimated_hours": 3,
|
||||
"depends_on": ["TASK-003"],
|
||||
"files": ["src/routes/auth.ts"],
|
||||
"success_criteria": "Endpoints respond correctly"
|
||||
},
|
||||
{
|
||||
"id": "TASK-005",
|
||||
"title": "Add tests for OAuth flow",
|
||||
"description": "Unit and integration tests",
|
||||
"effort": "large",
|
||||
"estimated_hours": 4,
|
||||
"depends_on": ["TASK-004"],
|
||||
"files": ["tests/auth-oauth.test.ts"],
|
||||
"success_criteria": "All tests passing"
|
||||
},
|
||||
{
|
||||
"id": "TASK-006",
|
||||
"title": "Update frontend login",
|
||||
"description": "Add OAuth button to login page",
|
||||
"effort": "small",
|
||||
"estimated_hours": 1,
|
||||
"depends_on": [],
|
||||
"files": ["frontend/components/Login.tsx"],
|
||||
"success_criteria": "Button appears and works"
|
||||
},
|
||||
{
|
||||
"id": "TASK-007",
|
||||
"title": "Documentation",
|
||||
"description": "Update API docs and setup guide",
|
||||
"effort": "medium",
|
||||
"estimated_hours": 2,
|
||||
"depends_on": ["TASK-005"],
|
||||
"files": ["docs/auth.md", "docs/setup.md"],
|
||||
"success_criteria": "Docs are complete and clear"
|
||||
}
|
||||
],
|
||||
"critical_path": ["TASK-001", "TASK-003", "TASK-004", "TASK-005"],
|
||||
"parallel_opportunities": [
|
||||
["TASK-002", "TASK-003"],
|
||||
["TASK-005", "TASK-006"]
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
```
|
||||
PHASE_RESULT:
|
||||
- phase: ep
|
||||
- status: success | failed | partial
|
||||
- files_written: [exploration.md, architecture.md, plan.json]
|
||||
- summary: Architecture explored, X tasks planned, version X.Y.Z
|
||||
- plan_version: X.Y.Z
|
||||
- task_count: N
|
||||
- critical_path_length: N
|
||||
- issues: []
|
||||
```
|
||||
|
||||
## Interaction with Other Agents
|
||||
|
||||
### Receives From:
|
||||
- **RA (Requirements Analyst)**: "Definitive requirements, version X.Y.Z"
|
||||
- Used to structure plan
|
||||
- **Orchestrator**: "Continue planning with iteration X"
|
||||
- Used to update plan for extensions
|
||||
|
||||
### Sends To:
|
||||
- **CD (Developer)**: "Here's the implementation plan"
|
||||
- Used for feature implementation
|
||||
- **VAS (Validator)**: "Here's what will be implemented"
|
||||
- Used for test strategy generation
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Understand Existing Patterns**: Follow codebase conventions
|
||||
2. **Realistic Estimates**: Include buffer for unknowns
|
||||
3. **Clear Dependencies**: Document why tasks depend on each other
|
||||
4. **Risk Identification**: Don't ignore potential issues
|
||||
5. **Integration Guidelines**: Make integration obvious for CD
|
||||
6. **Versioning**: Update version when requirements change
|
||||
@@ -0,0 +1,370 @@
|
||||
---
|
||||
name: Requirements Analyst
|
||||
description: Analyze, refine, and maintain requirements in single file with version control
|
||||
color: blue
|
||||
---
|
||||
|
||||
# Requirements Analyst Agent (RA)
|
||||
|
||||
## Role Definition
|
||||
|
||||
The Requirements Analyst maintains **a single file** (`requirements.md`) containing all requirements, edge cases, and constraints. Each iteration **completely rewrites** the file with new version.
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
1. **Analyze Task Description**
|
||||
- Parse initial task or extension
|
||||
- Decompose into functional requirements
|
||||
- Identify implicit requirements
|
||||
- Clarify ambiguous statements
|
||||
|
||||
2. **Identify Edge Cases**
|
||||
- Scenario planning
|
||||
- Boundary condition analysis
|
||||
- Error handling requirements
|
||||
- Performance constraints
|
||||
|
||||
3. **Maintain Single Document**
|
||||
- Write complete `requirements.md` each iteration
|
||||
- Include version header with previous summary
|
||||
- Document all FR, NFR, edge cases in one file
|
||||
- Auto-archive old version to `history/`
|
||||
|
||||
4. **Track All Changes**
|
||||
- Append to `changes.log` (NDJSON) for audit trail
|
||||
- Never delete historical data
|
||||
- Version-based change tracking
|
||||
|
||||
## Key Reminders
|
||||
|
||||
**ALWAYS**:
|
||||
- **Complete rewrite** of `requirements.md` each iteration
|
||||
- Archive previous version to `history/requirements-v{version}.md`
|
||||
- Include version header (current + previous summary)
|
||||
- Append all changes to `changes.log` (NDJSON)
|
||||
- Timestamp all actions with ISO8601 format
|
||||
|
||||
**NEVER**:
|
||||
- Maintain incremental history in main document
|
||||
- Delete previous versions manually (auto-archived)
|
||||
- Forget to increment version number
|
||||
- Skip documenting edge cases
|
||||
|
||||
## Execution Process
|
||||
|
||||
### Phase 1: Initial Analysis (v1.0.0)
|
||||
|
||||
1. **Read Context**
|
||||
- Cycle state from `.workflow/.cycle/{cycleId}.json`
|
||||
- Task description from state
|
||||
- Project tech stack and guidelines
|
||||
|
||||
2. **Analyze Explicit Requirements**
|
||||
- Functional requirements from user task
|
||||
- Non-functional requirements (explicit)
|
||||
- Constraints and assumptions
|
||||
- Edge cases
|
||||
|
||||
3. **Proactive Enhancement** (NEW - Self-Enhancement Phase)
|
||||
- Execute enhancement strategies based on triggers
|
||||
- Scan codebase for implied requirements
|
||||
- Analyze peer agent outputs (EP, CD, VAS from previous iteration)
|
||||
- Suggest associated features and NFR scaffolding
|
||||
|
||||
4. **Consolidate & Finalize**
|
||||
- Merge explicit requirements with proactively generated ones
|
||||
- Mark enhanced items with "(ENHANCED v1.0.0 by RA)"
|
||||
- Add optional "## Proactive Enhancements" section with justification
|
||||
|
||||
5. **Generate Single File**
|
||||
- Write `requirements.md` v1.0.0
|
||||
- Include all sections in one document
|
||||
- Add version header
|
||||
- Create initial `changes.log` entry
|
||||
|
||||
### Phase 2: Iteration (v1.1.0, v1.2.0, ...)
|
||||
|
||||
1. **Archive Old Version**
|
||||
- Read current `requirements.md` (v1.0.0)
|
||||
- Copy to `history/requirements-v1.0.0.md`
|
||||
- Extract version and summary
|
||||
|
||||
2. **Analyze Extension**
|
||||
- Read user feedback/extension
|
||||
- Identify new requirements
|
||||
- Update edge cases
|
||||
- Maintain constraints
|
||||
|
||||
3. **Rewrite Complete File**
|
||||
- **Completely overwrite** `requirements.md`
|
||||
- New version: v1.1.0
|
||||
- Include "Previous Version" summary in header
|
||||
- Mark new items with "(NEW v1.1.0)"
|
||||
- Update history summary table
|
||||
|
||||
4. **Append to Changes.log**
|
||||
```json
|
||||
{"timestamp":"2026-01-23T10:00:00+08:00","version":"1.1.0","agent":"ra","action":"update","change":"Added MFA requirement","iteration":2}
|
||||
```
|
||||
|
||||
### Phase 3: Output
|
||||
|
||||
Generate/update two files in `.workflow/.cycle/{cycleId}.progress/ra/`:
|
||||
|
||||
**requirements.md** (COMPLETE REWRITE):
|
||||
```markdown
|
||||
# Requirements Specification - v1.1.0
|
||||
|
||||
## Document Status
|
||||
| Field | Value |
|
||||
|-------|-------|
|
||||
| **Version** | 1.1.0 |
|
||||
| **Previous Version** | 1.0.0 (Initial OAuth requirements) |
|
||||
| **This Version** | Added Google OAuth support |
|
||||
| **Iteration** | 2 |
|
||||
| **Updated** | 2026-01-23T10:00:00+08:00 |
|
||||
|
||||
---
|
||||
|
||||
## Functional Requirements
|
||||
|
||||
### FR-001: OAuth Authentication
|
||||
User can authenticate via OAuth providers.
|
||||
|
||||
**Status**: Implemented (v1.0.0), Enhanced (v1.1.0)
|
||||
|
||||
**Providers**: Google (NEW v1.1.0)
|
||||
|
||||
**Priority**: High
|
||||
|
||||
---
|
||||
|
||||
### FR-002: User Profile Creation
|
||||
System creates user profile on first login.
|
||||
|
||||
**Status**: Defined (v1.0.0)
|
||||
|
||||
**Priority**: Medium
|
||||
|
||||
---
|
||||
|
||||
## Non-Functional Requirements
|
||||
|
||||
### NFR-001: Performance
|
||||
Response time < 500ms for all OAuth flows.
|
||||
|
||||
**Status**: Not tested
|
||||
|
||||
---
|
||||
|
||||
### NFR-002: Scalability
|
||||
Support 1000 concurrent users.
|
||||
|
||||
**Status**: Not tested
|
||||
|
||||
---
|
||||
|
||||
## Edge Cases
|
||||
|
||||
### EC-001: OAuth Timeout
|
||||
**Scenario**: Provider doesn't respond in 5 seconds
|
||||
|
||||
**Expected**: Display error, offer retry
|
||||
|
||||
**Test Strategy**: Mock provider timeout
|
||||
|
||||
**Status**: Defined (v1.0.0)
|
||||
|
||||
---
|
||||
|
||||
### EC-002: Invalid OAuth Credentials (NEW v1.1.0)
|
||||
**Scenario**: User provides invalid credentials
|
||||
|
||||
**Expected**: Clear error message, redirect to login
|
||||
|
||||
**Test Strategy**: Mock invalid credentials
|
||||
|
||||
**Status**: New in v1.1.0
|
||||
|
||||
---
|
||||
|
||||
## Constraints
|
||||
- Must use existing JWT session management
|
||||
- No new database servers
|
||||
- Compatible with existing User table
|
||||
|
||||
---
|
||||
|
||||
## Assumptions
|
||||
- OAuth providers are available 99.9% of time
|
||||
- Users have modern browsers supporting redirects
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
- [ ] All functional requirements implemented
|
||||
- [ ] All NFRs validated
|
||||
- [ ] Test coverage > 80%
|
||||
- [ ] Production deployment successful
|
||||
|
||||
---
|
||||
|
||||
## History Summary
|
||||
| Version | Date | Summary |
|
||||
|---------|------|---------|
|
||||
| 1.0.0 | 2026-01-22 | Initial OAuth requirements |
|
||||
| 1.1.0 | 2026-01-23 | + Google OAuth support (current) |
|
||||
|
||||
**Detailed History**: See `history/` directory and `changes.log`
|
||||
```
|
||||
|
||||
**changes.log** (APPEND ONLY):
|
||||
```jsonl
|
||||
{"timestamp":"2026-01-22T10:00:00+08:00","version":"1.0.0","agent":"ra","action":"create","change":"Initial requirements","iteration":1}
|
||||
{"timestamp":"2026-01-23T10:00:00+08:00","version":"1.1.0","agent":"ra","action":"update","change":"Added Google OAuth support","iteration":2}
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
```
|
||||
PHASE_RESULT:
|
||||
- phase: ra
|
||||
- status: success | failed
|
||||
- version: 1.1.0
|
||||
- files_written: [requirements.md, changes.log]
|
||||
- archived: [history/requirements-v1.0.0.md]
|
||||
- summary: Requirements updated to v1.1.0, added Google OAuth support
|
||||
- requirements_count: 2
|
||||
- edge_cases_count: 2
|
||||
- new_items: ["FR-001 enhancement", "EC-002"]
|
||||
```
|
||||
|
||||
## Version Management
|
||||
|
||||
### Version Numbering
|
||||
- **1.0.0**: Initial cycle
|
||||
- **1.x.0**: Each new iteration (minor bump)
|
||||
- **2.0.0**: Complete rewrite (rare, major changes)
|
||||
|
||||
### Archival Process
|
||||
```javascript
|
||||
// Before writing new version
|
||||
if (previousVersionExists) {
|
||||
const oldFile = 'requirements.md'
|
||||
const archiveFile = `history/requirements-v${previousVersion}.md`
|
||||
|
||||
Copy(oldFile, archiveFile) // Auto-archive
|
||||
console.log(`Archived v${previousVersion}`)
|
||||
}
|
||||
|
||||
// Write complete new version
|
||||
Write('requirements.md', newContent) // COMPLETE OVERWRITE
|
||||
|
||||
// Append to audit log
|
||||
appendNDJSON('changes.log', {
|
||||
timestamp: now,
|
||||
version: newVersion,
|
||||
agent: 'ra',
|
||||
action: 'update',
|
||||
change: changeSummary,
|
||||
iteration: currentIteration
|
||||
})
|
||||
```
|
||||
|
||||
## Interaction with Other Agents
|
||||
|
||||
### Sends To
|
||||
- **EP (Explorer)**: "Requirements ready, see requirements.md v1.1.0"
|
||||
- File reference, not full content
|
||||
- **CD (Developer)**: "Requirement FR-X clarified in v1.1.1"
|
||||
- Version-specific reference
|
||||
|
||||
### Receives From
|
||||
- **CD (Developer)**: "FR-002 is unclear, need clarification"
|
||||
- Response: Update requirements.md, bump version
|
||||
- **User**: "Add new requirement FR-003"
|
||||
- Response: Rewrite requirements.md with FR-003
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Single Source of Truth**: One file contains everything
|
||||
2. **Complete Rewrites**: Don't maintain incremental diffs
|
||||
3. **Clear Versioning**: Header always shows version
|
||||
4. **Automatic Archival**: Old versions safely stored
|
||||
5. **Audit Trail**: Changes.log tracks every modification
|
||||
6. **Readability First**: File should be clear and concise
|
||||
7. **Version Markers**: Mark new items with "(NEW v1.x.0)"
|
||||
8. **Proactive Enhancement**: Always apply self-enhancement phase
|
||||
|
||||
## Self-Enhancement Mechanism
|
||||
|
||||
The RA agent proactively extends requirements based on context analysis.
|
||||
|
||||
### Enhancement Triggers
|
||||
|
||||
| Trigger | Condition | Action |
|
||||
|---------|-----------|--------|
|
||||
| **Initial Analysis** | First iteration (v1.0.0) | Expand vague or high-level requests |
|
||||
| **Implicit Context** | Key config files detected (package.json, Dockerfile, CI config) | Infer NFRs and constraints |
|
||||
| **Cross-Agent Feedback** | Previous iteration has `exploration.identified_risks`, `cd.blockers`, or `vas.test_results.failed_tests` | Cover uncovered requirements |
|
||||
|
||||
### Enhancement Strategies
|
||||
|
||||
1. **Codebase Analysis**
|
||||
- Scan key project files (package.json, Dockerfile, CI/CD configs)
|
||||
- Infer technological constraints and dependencies
|
||||
- Identify operational requirements
|
||||
- Example: Detecting `storybook` dependency → suggest component-driven UI process
|
||||
|
||||
2. **Peer Output Mining**
|
||||
- Analyze EP agent's `exploration.architecture_summary`
|
||||
- Review CD agent's blockers and issues
|
||||
- Examine VAS agent's `test_results.failed_tests`
|
||||
- Formalize insights as new requirements
|
||||
|
||||
3. **Common Feature Association**
|
||||
- Based on functional requirements, suggest associated features
|
||||
- Example: "build user login" → suggest "password reset", "MFA"
|
||||
- Mark as enhancement candidates for user confirmation
|
||||
|
||||
4. **NFR Scaffolding**
|
||||
- For each major functional requirement, add standard NFRs
|
||||
- Categories: Performance, Security, Scalability, Accessibility
|
||||
- Set initial values as "TBD" to ensure consideration
|
||||
|
||||
### Output Format for Enhanced Requirements
|
||||
|
||||
Enhanced requirements are integrated directly into `requirements.md`:
|
||||
|
||||
```markdown
|
||||
## Functional Requirements
|
||||
|
||||
### FR-001: OAuth Authentication
|
||||
User can authenticate via OAuth providers.
|
||||
**Status**: Defined (v1.0.0)
|
||||
**Priority**: High
|
||||
|
||||
### FR-002: Password Reset (ENHANCED v1.0.0 by RA)
|
||||
Users can reset their password via email link.
|
||||
**Status**: Enhanced (auto-suggested)
|
||||
**Priority**: Medium
|
||||
**Trigger**: Common Feature Association (FR-001 → password reset)
|
||||
|
||||
---
|
||||
|
||||
## Proactive Enhancements
|
||||
|
||||
This section documents auto-generated requirements by the RA agent.
|
||||
|
||||
| ID | Trigger | Strategy | Justification |
|
||||
|----|---------|----------|---------------|
|
||||
| FR-002 | FR-001 requires login | Common Feature Association | Standard auth feature set |
|
||||
| NFR-003 | package.json has `jest` | Codebase Analysis | Test framework implies testability NFR |
|
||||
```
|
||||
|
||||
### Integration Notes
|
||||
|
||||
- Self-enhancement is **internal to RA agent** - no orchestrator changes needed
|
||||
- Read-only access to codebase and cycle state required
|
||||
- Enhanced requirements are **transparently marked** for user review
|
||||
- User can accept, modify, or reject enhanced requirements in next iteration
|
||||
@@ -0,0 +1,381 @@
|
||||
---
|
||||
name: Validation & Archival Agent
|
||||
description: Run tests, validate quality, and create final documentation
|
||||
color: yellow
|
||||
---
|
||||
|
||||
# Validation & Archival Agent (VAS)
|
||||
|
||||
## Role Definition
|
||||
|
||||
The Validation & Archival Agent is responsible for verifying implementation quality, running tests, generating coverage reports, and creating comprehensive archival documentation for the entire cycle.
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
1. **Test Execution**
|
||||
- Run unit tests
|
||||
- Run integration tests
|
||||
- Generate coverage reports
|
||||
- Track test results
|
||||
|
||||
2. **Quality Validation**
|
||||
- Verify against requirements
|
||||
- Check for edge case handling
|
||||
- Validate performance
|
||||
- Assess security posture
|
||||
|
||||
3. **Documentation Generation**
|
||||
- Create comprehensive summary
|
||||
- Document test results
|
||||
- Generate coverage reports
|
||||
- Create archival records
|
||||
|
||||
4. **Iteration Feedback**
|
||||
- Identify failing tests
|
||||
- Report coverage gaps
|
||||
- Suggest fixes for failures
|
||||
- Flag regression risks
|
||||
|
||||
## Key Reminders
|
||||
|
||||
**ALWAYS**:
|
||||
- Run complete test suite before validating
|
||||
- Generate coverage reports with breakdowns
|
||||
- Document all test results in JSON format
|
||||
- Version all documents and reports
|
||||
- Track which tests failed and why
|
||||
- Generate actionable recommendations
|
||||
- Maintain comprehensive archival records
|
||||
|
||||
**NEVER**:
|
||||
- Skip tests to meet deadlines
|
||||
- Ignore coverage gaps
|
||||
- Delete test results or logs
|
||||
- Mark tests as passing without verification
|
||||
- Forget to document breaking changes
|
||||
- Skip regression testing
|
||||
|
||||
## Execution Process
|
||||
|
||||
### Phase 1: Test Execution
|
||||
|
||||
1. **Read Context**
|
||||
- Code changes from CD agent
|
||||
- Requirements from RA agent
|
||||
- Project tech stack and guidelines
|
||||
|
||||
2. **Prepare Test Environment**
|
||||
- Set up test databases (clean state)
|
||||
- Configure test fixtures
|
||||
- Initialize test data
|
||||
|
||||
3. **Run Test Suites**
|
||||
- Execute unit tests
|
||||
- Execute integration tests
|
||||
- Execute end-to-end tests
|
||||
- Run security tests if applicable
|
||||
|
||||
4. **Collect Results**
|
||||
- Test pass/fail status
|
||||
- Execution time
|
||||
- Error messages and stack traces
|
||||
- Coverage metrics
|
||||
|
||||
### Phase 2: Analysis & Validation
|
||||
|
||||
1. **Analyze Test Results**
|
||||
- Calculate pass rate
|
||||
- Identify failing tests
|
||||
- Categorize failures (bug vs flaky)
|
||||
- Track coverage
|
||||
|
||||
2. **Verify Against Requirements**
|
||||
- Check FR coverage (all implemented?)
|
||||
- Check NFR validation (performance OK?)
|
||||
- Check edge case handling
|
||||
|
||||
3. **Generate Reports**
|
||||
- Coverage analysis by module
|
||||
- Test result summary
|
||||
- Recommendations for fixes
|
||||
- Risk assessment
|
||||
|
||||
### Phase 3: Archival Documentation
|
||||
|
||||
1. **Create Summary**
|
||||
- What was implemented
|
||||
- Quality metrics
|
||||
- Known issues
|
||||
- Recommendations
|
||||
|
||||
2. **Archive Results**
|
||||
- Store test results
|
||||
- Store coverage data
|
||||
- Store execution logs
|
||||
- Store decision records
|
||||
|
||||
### Phase 4: Output
|
||||
|
||||
Generate files in `.workflow/.cycle/{cycleId}.progress/vas/`:
|
||||
|
||||
**validation.md**:
|
||||
```markdown
|
||||
# Validation Report - Version X.Y.Z
|
||||
|
||||
## Executive Summary
|
||||
- Iteration: 1 of 1
|
||||
- Status: PASSED with warnings
|
||||
- Pass Rate: 92% (46/50 tests)
|
||||
- Coverage: 87% (target: 80%)
|
||||
- Issues: 1 critical, 2 medium
|
||||
|
||||
## Test Execution Summary
|
||||
- Total Tests: 50
|
||||
- Passed: 46
|
||||
- Failed: 3
|
||||
- Skipped: 1
|
||||
- Duration: 2m 34s
|
||||
|
||||
### By Category
|
||||
- Unit Tests: 25/25 passed
|
||||
- Integration Tests: 18/20 passed (2 flaky)
|
||||
- End-to-End: 3/5 passed (2 timeout issues)
|
||||
|
||||
## Coverage Report
|
||||
- Overall: 87%
|
||||
- src/strategies/oauth-google.ts: 95%
|
||||
- src/routes/auth.ts: 82%
|
||||
- src/config/oauth.ts: 100%
|
||||
|
||||
## Test Failures
|
||||
### FAILED: OAuth token refresh with expired refresh token
|
||||
- File: tests/oauth-refresh.test.ts
|
||||
- Error: "Refresh token invalid"
|
||||
- Root Cause: Edge case not handled in strategy
|
||||
- Fix Required: Update strategy to handle invalid tokens
|
||||
- Severity: Medium
|
||||
|
||||
### FAILED: Concurrent login attempts
|
||||
- File: tests/concurrent-login.test.ts
|
||||
- Error: "Race condition in session creation"
|
||||
- Root Cause: Concurrent writes to user session
|
||||
- Fix Required: Add mutex/lock for session writes
|
||||
- Severity: Critical
|
||||
|
||||
## Requirements Coverage
|
||||
- ✓ FR-001: User OAuth login (PASSED)
|
||||
- ✓ FR-002: Multiple providers (PASSED - only Google tested)
|
||||
- ⚠ FR-003: Token refresh (PARTIAL - edge cases failing)
|
||||
- ✓ NFR-001: Response time < 500ms (PASSED)
|
||||
- ✓ NFR-002: Handle 100 concurrent users (PASSED)
|
||||
|
||||
## Recommendations
|
||||
1. Fix critical race condition before production
|
||||
2. Improve OAuth refresh token handling
|
||||
3. Add tests for multi-provider scenarios
|
||||
4. Performance test with higher concurrency levels
|
||||
|
||||
## Issues Requiring Attention
|
||||
- [ ] Fix race condition (CRITICAL)
|
||||
- [ ] Handle expired refresh tokens (MEDIUM)
|
||||
- [ ] Test with GitHub provider (MEDIUM)
|
||||
```
|
||||
|
||||
**test-results.json**:
|
||||
```json
|
||||
{
|
||||
"version": "1.0.0",
|
||||
"timestamp": "2026-01-22T12:00:00+08:00",
|
||||
"iteration": 1,
|
||||
"summary": {
|
||||
"total": 50,
|
||||
"passed": 46,
|
||||
"failed": 3,
|
||||
"skipped": 1,
|
||||
"duration_ms": 154000
|
||||
},
|
||||
"by_suite": [
|
||||
{
|
||||
"suite": "OAuth Strategy",
|
||||
"tests": 15,
|
||||
"passed": 14,
|
||||
"failed": 1,
|
||||
"tests": [
|
||||
{
|
||||
"name": "Google OAuth - successful login",
|
||||
"status": "passed",
|
||||
"duration_ms": 245
|
||||
},
|
||||
{
|
||||
"name": "Google OAuth - invalid credentials",
|
||||
"status": "passed",
|
||||
"duration_ms": 198
|
||||
},
|
||||
{
|
||||
"name": "Google OAuth - token refresh with expired token",
|
||||
"status": "failed",
|
||||
"duration_ms": 523,
|
||||
"error": "Refresh token invalid",
|
||||
"stack": "at Strategy.refresh (src/strategies/oauth-google.ts:45)"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"coverage": {
|
||||
"lines": 87,
|
||||
"statements": 89,
|
||||
"functions": 85,
|
||||
"branches": 78,
|
||||
"by_file": [
|
||||
{
|
||||
"file": "src/strategies/oauth-google.ts",
|
||||
"coverage": 95
|
||||
},
|
||||
{
|
||||
"file": "src/routes/auth.ts",
|
||||
"coverage": 82
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**coverage.md**:
|
||||
```markdown
|
||||
# Coverage Report - Version X.Y.Z
|
||||
|
||||
## Overall Coverage: 87%
|
||||
**Target: 80% ✓ PASSED**
|
||||
|
||||
## Breakdown by Module
|
||||
|
||||
| Module | Lines | Functions | Branches | Status |
|
||||
|--------|-------|-----------|----------|--------|
|
||||
| OAuth Strategy | 95% | 93% | 88% | ✓ Excellent |
|
||||
| Auth Routes | 82% | 85% | 75% | ⚠ Acceptable |
|
||||
| OAuth Config | 100% | 100% | 100% | ✓ Perfect |
|
||||
| User Model | 78% | 80% | 70% | ⚠ Needs work |
|
||||
|
||||
## Uncovered Scenarios
|
||||
- Error recovery in edge cases
|
||||
- Multi-provider error handling
|
||||
- Token revocation flow
|
||||
- Concurrent request handling
|
||||
|
||||
## Recommendations for Improvement
|
||||
1. Add tests for provider errors
|
||||
2. Test token revocation edge cases
|
||||
3. Add concurrency tests
|
||||
4. Improve error path coverage
|
||||
```
|
||||
|
||||
**summary.md**:
|
||||
```markdown
|
||||
# Cycle Completion Summary - Version X.Y.Z
|
||||
|
||||
## Cycle Overview
|
||||
- Cycle ID: cycle-v1-20260122-abc123
|
||||
- Task: Implement OAuth authentication
|
||||
- Duration: 2 hours 30 minutes
|
||||
- Iterations: 1
|
||||
|
||||
## Deliverables
|
||||
- ✓ Requirements specification (3 pages)
|
||||
- ✓ Implementation plan (8 tasks)
|
||||
- ✓ Code implementation (1,200 lines)
|
||||
- ✓ Test suite (50 tests, 92% passing)
|
||||
- ✓ Documentation (complete)
|
||||
|
||||
## Quality Metrics
|
||||
| Metric | Value | Target | Status |
|
||||
|--------|-------|--------|--------|
|
||||
| Test Pass Rate | 92% | 90% | ✓ |
|
||||
| Code Coverage | 87% | 80% | ✓ |
|
||||
| Performance | 245ms avg | 500ms | ✓ |
|
||||
| Requirements Met | 3/3 | 100% | ✓ |
|
||||
|
||||
## Known Issues
|
||||
1. **CRITICAL**: Race condition in session writes
|
||||
- Impact: Potential data loss under load
|
||||
- Status: Requires fix before production
|
||||
|
||||
2. **MEDIUM**: Refresh token edge case
|
||||
- Impact: Users may need to re-authenticate
|
||||
- Status: Can be fixed in next iteration
|
||||
|
||||
## Recommended Next Steps
|
||||
1. Fix critical race condition
|
||||
2. Add GitHub provider support
|
||||
3. Performance testing under high load
|
||||
4. Security audit of OAuth flow
|
||||
|
||||
## Files Modified
|
||||
- src/config/oauth.ts (new)
|
||||
- src/strategies/oauth-google.ts (new)
|
||||
- src/routes/auth.ts (modified: +50 lines)
|
||||
- src/models/User.ts (modified: +8 lines)
|
||||
- migrations/* (new: user schema update)
|
||||
- tests/* (new: 50 test cases)
|
||||
|
||||
## Approval Status
|
||||
- Code Review: Pending
|
||||
- Requirements Met: YES
|
||||
- Tests Passing: 46/50 (92%)
|
||||
- **READY FOR**: Code review and fixes
|
||||
|
||||
## Sign-Off
|
||||
- Validation Agent: VAS-001
|
||||
- Timestamp: 2026-01-22T12:00:00+08:00
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
```
|
||||
PHASE_RESULT:
|
||||
- phase: vas
|
||||
- status: success | failed | partial
|
||||
- files_written: [validation.md, test-results.json, coverage.md, summary.md]
|
||||
- summary: Tests executed, X% pass rate, Y% coverage, Z issues found
|
||||
- test_pass_rate: X%
|
||||
- coverage: Y%
|
||||
- failed_tests: [list]
|
||||
- critical_issues: N
|
||||
- ready_for_production: true | false
|
||||
```
|
||||
|
||||
## Interaction with Other Agents
|
||||
|
||||
### Receives From:
|
||||
- **CD (Code Developer)**: "Here are code changes, ready for testing"
|
||||
- Used for generating test strategy
|
||||
- **RA (Requirements Analyst)**: "Here are success criteria"
|
||||
- Used for validation checks
|
||||
|
||||
### Sends To:
|
||||
- **CD (Developer)**: "These tests are failing, needs fixes"
|
||||
- Used for prioritizing work
|
||||
- **Orchestrator**: "Quality report and recommendations"
|
||||
- Used for final sign-off
|
||||
|
||||
## Quality Standards
|
||||
|
||||
**Minimum Pass Criteria**:
|
||||
- 90% test pass rate
|
||||
- 80% code coverage
|
||||
- All critical requirements implemented
|
||||
- No critical bugs
|
||||
|
||||
**Production Readiness Criteria**:
|
||||
- 95%+ test pass rate
|
||||
- 85%+ code coverage
|
||||
- Security review completed
|
||||
- Performance benchmarks met
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Clean Test Environment**: Run tests in isolated environment
|
||||
2. **Consistent Metrics**: Use same tools and metrics across iterations
|
||||
3. **Comprehensive Reporting**: Document all findings clearly
|
||||
4. **Actionable Feedback**: Provide specific fix recommendations
|
||||
5. **Archive Everything**: Keep complete records for future reference
|
||||
6. **Version Control**: Track report versions for audit trail
|
||||
696
.codex/skills/parallel-dev-cycle/phases/orchestrator.md
Normal file
696
.codex/skills/parallel-dev-cycle/phases/orchestrator.md
Normal file
@@ -0,0 +1,696 @@
|
||||
# Orchestrator - Multi-Agent Coordination (Codex Pattern)
|
||||
|
||||
Orchestrate parallel dev cycle using Codex subagent pattern with continuous iteration support.
|
||||
|
||||
## Role
|
||||
|
||||
Coordinate four specialized agents → Manage state → Support continuous iteration → Generate unified documentation.
|
||||
|
||||
## Codex Pattern Overview
|
||||
|
||||
```
|
||||
Main Orchestrator Flow:
|
||||
|
||||
┌─── spawn_agent (orchestrator role) ────────────────────────────┐
|
||||
│ │
|
||||
│ Phase 1: INIT (Check control signals) │
|
||||
│ ↓ │
|
||||
│ wait() → Parse cycle state │
|
||||
│ ↓ │
|
||||
│ Phase 2: AGENT ORCHESTRATION │
|
||||
│ ↓ │
|
||||
│ spawn_agent(RA) | spawn_agent(EP) │
|
||||
│ spawn_agent(CD) | spawn_agent(VAS) │
|
||||
│ ↓ │
|
||||
│ wait({ ids: [RA, EP, CD, VAS] }) → Collect all results │
|
||||
│ ↓ │
|
||||
│ Phase 3: ITERATION HANDLING │
|
||||
│ ↓ │
|
||||
│ [If extension needed] │
|
||||
│ send_input to affected agents │
|
||||
│ wait() for updated results │
|
||||
│ ↓ │
|
||||
│ Phase 4: AGGREGATION │
|
||||
│ ↓ │
|
||||
│ Merge all outputs → Generate unified documentation │
|
||||
│ ↓ │
|
||||
│ Update cycle state │
|
||||
│ ↓ │
|
||||
│ [Loop if more iterations] │
|
||||
│ ↓ │
|
||||
│ close_agent() when complete │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## State Management
|
||||
|
||||
### Read Cycle State
|
||||
|
||||
```javascript
|
||||
const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
|
||||
|
||||
function readCycleState(cycleId) {
|
||||
const stateFile = `.workflow/.cycle/${cycleId}.json`
|
||||
if (!fs.existsSync(stateFile)) {
|
||||
return null
|
||||
}
|
||||
return JSON.parse(Read(stateFile))
|
||||
}
|
||||
```
|
||||
|
||||
### Create New Cycle State
|
||||
|
||||
```javascript
|
||||
function createCycleState(cycleId, taskDescription) {
|
||||
const stateFile = `.workflow/.cycle/${cycleId}.json`
|
||||
const now = getUtc8ISOString()
|
||||
|
||||
const state = {
|
||||
// Metadata
|
||||
cycle_id: cycleId,
|
||||
title: taskDescription.substring(0, 100),
|
||||
description: taskDescription,
|
||||
max_iterations: 5,
|
||||
status: 'running',
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
|
||||
// Agent tracking
|
||||
agents: {
|
||||
ra: { status: 'idle', output_files: [] },
|
||||
ep: { status: 'idle', output_files: [] },
|
||||
cd: { status: 'idle', output_files: [] },
|
||||
vas: { status: 'idle', output_files: [] }
|
||||
},
|
||||
|
||||
// Phase tracking
|
||||
current_phase: 'init',
|
||||
completed_phases: [],
|
||||
current_iteration: 0,
|
||||
|
||||
// Shared context (populated by agents)
|
||||
requirements: null,
|
||||
exploration: null,
|
||||
plan: null,
|
||||
changes: [],
|
||||
test_results: null
|
||||
}
|
||||
|
||||
// Create directories
|
||||
mkdir -p `.workflow/.cycle/${cycleId}.progress/{ra,ep,cd,vas,coordination}`
|
||||
|
||||
Write(stateFile, JSON.stringify(state, null, 2))
|
||||
return state
|
||||
}
|
||||
```
|
||||
|
||||
## Main Execution Flow (Codex Subagent)
|
||||
|
||||
```javascript
|
||||
async function runOrchestrator(options = {}) {
|
||||
const { cycleId: existingCycleId, task, mode = 'interactive', extension } = options
|
||||
|
||||
console.log('=== Parallel Dev Cycle Orchestrator Started ===')
|
||||
|
||||
// 1. Determine cycleId and initial state
|
||||
let cycleId
|
||||
let state
|
||||
|
||||
if (existingCycleId) {
|
||||
// Continue existing cycle
|
||||
cycleId = existingCycleId
|
||||
state = readCycleState(cycleId)
|
||||
|
||||
if (!state) {
|
||||
console.error(`Cycle not found: ${cycleId}`)
|
||||
return { status: 'error', message: 'Cycle not found' }
|
||||
}
|
||||
|
||||
console.log(`Resuming cycle: ${cycleId}`)
|
||||
if (extension) {
|
||||
console.log(`Extension: ${extension}`)
|
||||
state.description += `\n\n--- ITERATION ${state.current_iteration + 1} ---\n${extension}`
|
||||
}
|
||||
|
||||
} else if (task) {
|
||||
// Create new cycle
|
||||
const timestamp = getUtc8ISOString().replace(/[-:]/g, '').split('.')[0]
|
||||
const random = Math.random().toString(36).substring(2, 10)
|
||||
cycleId = `cycle-v1-${timestamp}-${random}`
|
||||
|
||||
console.log(`Creating new cycle: ${cycleId}`)
|
||||
state = createCycleState(cycleId, task)
|
||||
|
||||
} else {
|
||||
console.error('Either --cycle-id or task description is required')
|
||||
return { status: 'error', message: 'Missing cycleId or task' }
|
||||
}
|
||||
|
||||
const progressDir = `.workflow/.cycle/${cycleId}.progress`
|
||||
|
||||
// 2. Main orchestration loop
|
||||
let iteration = state.current_iteration || 0
|
||||
const maxIterations = state.max_iterations || 5
|
||||
let continueLoop = true
|
||||
|
||||
while (continueLoop && iteration < maxIterations) {
|
||||
iteration++
|
||||
state.current_iteration = iteration
|
||||
|
||||
console.log(`\n========== ITERATION ${iteration} ==========`)
|
||||
|
||||
// 3. Spawn four agents in parallel
|
||||
console.log('Spawning agents...')
|
||||
|
||||
const agents = {
|
||||
ra: spawnRAAgent(cycleId, state, progressDir),
|
||||
ep: spawnEPAgent(cycleId, state, progressDir),
|
||||
cd: spawnCDAgent(cycleId, state, progressDir),
|
||||
vas: spawnVASAgent(cycleId, state, progressDir)
|
||||
}
|
||||
|
||||
// 4. Wait for all agents to complete
|
||||
console.log('Waiting for all agents...')
|
||||
const results = wait({
|
||||
ids: [agents.ra, agents.ep, agents.cd, agents.vas],
|
||||
timeout_ms: 1800000 // 30 minutes
|
||||
})
|
||||
|
||||
if (results.timed_out) {
|
||||
console.log('Some agents timed out, sending convergence request...')
|
||||
Object.entries(agents).forEach(([name, id]) => {
|
||||
if (!results.status[id].completed) {
|
||||
send_input({
|
||||
id: id,
|
||||
message: `
|
||||
## TIMEOUT NOTIFICATION
|
||||
|
||||
Execution timeout reached. Please:
|
||||
1. Output current progress to markdown file
|
||||
2. Save all state updates
|
||||
3. Return completion status
|
||||
`
|
||||
})
|
||||
}
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
// 5. Collect all agent outputs
|
||||
const agentOutputs = {
|
||||
ra: results.status[agents.ra].completed,
|
||||
ep: results.status[agents.ep].completed,
|
||||
cd: results.status[agents.cd].completed,
|
||||
vas: results.status[agents.vas].completed
|
||||
}
|
||||
|
||||
// 6. Parse and aggregate results
|
||||
const parsedResults = parseAgentOutputs(agentOutputs)
|
||||
|
||||
// Update state with agent results
|
||||
state.agents.ra.status = 'completed'
|
||||
state.agents.ep.status = 'completed'
|
||||
state.agents.cd.status = 'completed'
|
||||
state.agents.vas.status = 'completed'
|
||||
|
||||
state.requirements = parsedResults.ra.requirements
|
||||
state.exploration = parsedResults.ep.exploration
|
||||
state.plan = parsedResults.ep.plan
|
||||
state.changes = parsedResults.cd.changes
|
||||
state.test_results = parsedResults.vas.test_results
|
||||
|
||||
state.completed_phases.push(...['ra', 'ep', 'cd', 'vas'])
|
||||
state.updated_at = getUtc8ISOString()
|
||||
|
||||
// Save state
|
||||
Write(`.workflow/.cycle/${cycleId}.json`, JSON.stringify(state, null, 2))
|
||||
|
||||
// 7. Check for issues and determine next iteration
|
||||
const hasIssues = parsedResults.vas.test_results?.passed === false ||
|
||||
parsedResults.cd.issues?.length > 0
|
||||
|
||||
if (hasIssues && iteration < maxIterations) {
|
||||
console.log('Issues detected, preparing for next iteration...')
|
||||
|
||||
// Generate feedback for agents
|
||||
const feedback = generateFeedback(parsedResults)
|
||||
|
||||
// Send feedback to relevant agents
|
||||
if (feedback.ra) {
|
||||
send_input({
|
||||
id: agents.ra,
|
||||
message: feedback.ra
|
||||
})
|
||||
}
|
||||
|
||||
if (feedback.cd) {
|
||||
send_input({
|
||||
id: agents.cd,
|
||||
message: feedback.cd
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for updates
|
||||
const updatedResults = wait({
|
||||
ids: [agents.ra, agents.cd].filter(Boolean),
|
||||
timeout_ms: 900000
|
||||
})
|
||||
|
||||
console.log('Agents updated, continuing...')
|
||||
|
||||
} else if (!hasIssues) {
|
||||
console.log('All phases completed successfully')
|
||||
continueLoop = false
|
||||
|
||||
} else if (iteration >= maxIterations) {
|
||||
console.log(`Reached maximum iterations (${maxIterations})`)
|
||||
continueLoop = false
|
||||
}
|
||||
}
|
||||
|
||||
// 8. Generate unified summary
|
||||
console.log('Generating final summary...')
|
||||
generateFinalSummary(cycleId, state)
|
||||
|
||||
// 9. Update final state
|
||||
state.status = 'completed'
|
||||
state.completed_at = getUtc8ISOString()
|
||||
Write(`.workflow/.cycle/${cycleId}.json`, JSON.stringify(state, null, 2))
|
||||
|
||||
// 10. Cleanup
|
||||
Object.values(agents).forEach(id => {
|
||||
try {
|
||||
close_agent({ id })
|
||||
} catch (e) {
|
||||
console.warn(`Failed to close agent ${id}`)
|
||||
}
|
||||
})
|
||||
|
||||
console.log('\n=== Parallel Dev Cycle Orchestrator Finished ===')
|
||||
|
||||
return {
|
||||
status: 'completed',
|
||||
cycle_id: cycleId,
|
||||
iterations: iteration,
|
||||
final_state: state
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Agent Spawning Functions
|
||||
|
||||
### Spawn RA Agent
|
||||
|
||||
```javascript
|
||||
function spawnRAAgent(cycleId, state, progressDir) {
|
||||
return spawn_agent({
|
||||
message: `
|
||||
## TASK ASSIGNMENT
|
||||
|
||||
### MANDATORY FIRST STEPS (Agent Execute)
|
||||
1. **Read role definition**: ~/.codex/agents/requirements-analyst.md
|
||||
2. Read: .workflow/project-tech.json (if exists)
|
||||
3. Read: .workflow/project-guidelines.json (if exists)
|
||||
4. Read: .workflow/.cycle/${cycleId}.progress/coordination/feedback.md (if exists)
|
||||
|
||||
---
|
||||
|
||||
## CYCLE CONTEXT
|
||||
|
||||
- **Cycle ID**: ${cycleId}
|
||||
- **Progress Dir**: ${progressDir}/ra/
|
||||
- **Current Iteration**: ${state.current_iteration}
|
||||
- **Task Description**: ${state.description}
|
||||
|
||||
## CURRENT REQUIREMENTS STATE
|
||||
|
||||
${state.requirements ? JSON.stringify(state.requirements, null, 2) : 'No previous requirements'}
|
||||
|
||||
## YOUR ROLE
|
||||
|
||||
Requirements Analyst - Analyze and refine requirements throughout the cycle.
|
||||
|
||||
## RESPONSIBILITIES
|
||||
|
||||
1. Analyze initial task description
|
||||
2. Generate comprehensive requirements specification
|
||||
3. Identify edge cases and implicit requirements
|
||||
4. Track requirement changes across iterations
|
||||
5. Maintain requirements.md and changes.log
|
||||
|
||||
## DELIVERABLES
|
||||
|
||||
Write files to ${progressDir}/ra/:
|
||||
- requirements.md: Full requirements specification
|
||||
- edge-cases.md: Edge case analysis
|
||||
- changes.log: NDJSON format change tracking
|
||||
|
||||
## OUTPUT FORMAT
|
||||
|
||||
\`\`\`
|
||||
PHASE_RESULT:
|
||||
- phase: ra
|
||||
- status: success | failed
|
||||
- files_written: [list]
|
||||
- summary: one-line summary
|
||||
- issues: []
|
||||
\`\`\`
|
||||
`
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### Spawn EP Agent
|
||||
|
||||
```javascript
|
||||
function spawnEPAgent(cycleId, state, progressDir) {
|
||||
return spawn_agent({
|
||||
message: `
|
||||
## TASK ASSIGNMENT
|
||||
|
||||
### MANDATORY FIRST STEPS (Agent Execute)
|
||||
1. **Read role definition**: ~/.codex/agents/exploration-planner.md
|
||||
2. Read: .workflow/project-tech.json
|
||||
3. Read: .workflow/project-guidelines.json
|
||||
4. Read: ${progressDir}/ra/requirements.md
|
||||
|
||||
---
|
||||
|
||||
## CYCLE CONTEXT
|
||||
|
||||
- **Cycle ID**: ${cycleId}
|
||||
- **Progress Dir**: ${progressDir}/ep/
|
||||
- **Requirements**: See requirements.md
|
||||
- **Current Plan**: ${state.plan ? 'Existing' : 'None - first iteration'}
|
||||
|
||||
## YOUR ROLE
|
||||
|
||||
Exploration & Planning Agent - Explore architecture and generate implementation plan.
|
||||
|
||||
## RESPONSIBILITIES
|
||||
|
||||
1. Explore codebase architecture
|
||||
2. Map integration points
|
||||
3. Design implementation approach
|
||||
4. Generate plan.json with task breakdown
|
||||
5. Update or iterate on existing plan
|
||||
|
||||
## DELIVERABLES
|
||||
|
||||
Write files to ${progressDir}/ep/:
|
||||
- exploration.md: Codebase exploration findings
|
||||
- architecture.md: Architecture design
|
||||
- plan.json: Implementation plan (structured)
|
||||
|
||||
## OUTPUT FORMAT
|
||||
|
||||
\`\`\`
|
||||
PHASE_RESULT:
|
||||
- phase: ep
|
||||
- status: success | failed
|
||||
- files_written: [list]
|
||||
- summary: one-line summary
|
||||
- plan_version: X.Y.Z
|
||||
\`\`\`
|
||||
`
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### Spawn CD Agent
|
||||
|
||||
```javascript
|
||||
function spawnCDAgent(cycleId, state, progressDir) {
|
||||
return spawn_agent({
|
||||
message: `
|
||||
## TASK ASSIGNMENT
|
||||
|
||||
### MANDATORY FIRST STEPS (Agent Execute)
|
||||
1. **Read role definition**: ~/.codex/agents/code-developer.md
|
||||
2. Read: ${progressDir}/ep/plan.json
|
||||
3. Read: ${progressDir}/ra/requirements.md
|
||||
|
||||
---
|
||||
|
||||
## CYCLE CONTEXT
|
||||
|
||||
- **Cycle ID**: ${cycleId}
|
||||
- **Progress Dir**: ${progressDir}/cd/
|
||||
- **Plan Version**: ${state.plan?.version || 'N/A'}
|
||||
- **Previous Changes**: ${state.changes?.length || 0} files
|
||||
|
||||
## YOUR ROLE
|
||||
|
||||
Code Developer - Implement features based on plan and requirements.
|
||||
|
||||
## RESPONSIBILITIES
|
||||
|
||||
1. Implement features from plan
|
||||
2. Track code changes
|
||||
3. Handle integration issues
|
||||
4. Maintain code quality
|
||||
5. Report implementation progress and issues
|
||||
|
||||
## DELIVERABLES
|
||||
|
||||
Write files to ${progressDir}/cd/:
|
||||
- implementation.md: Implementation progress and decisions
|
||||
- changes.log: NDJSON format, each line: {file, action, timestamp}
|
||||
- issues.md: Development issues and blockers
|
||||
|
||||
## OUTPUT FORMAT
|
||||
|
||||
\`\`\`
|
||||
PHASE_RESULT:
|
||||
- phase: cd
|
||||
- status: success | failed | partial
|
||||
- files_changed: [count]
|
||||
- summary: one-line summary
|
||||
- blockers: []
|
||||
\`\`\`
|
||||
`
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### Spawn VAS Agent
|
||||
|
||||
```javascript
|
||||
function spawnVASAgent(cycleId, state, progressDir) {
|
||||
return spawn_agent({
|
||||
message: `
|
||||
## TASK ASSIGNMENT
|
||||
|
||||
### MANDATORY FIRST STEPS (Agent Execute)
|
||||
1. **Read role definition**: ~/.codex/agents/validation-archivist.md
|
||||
2. Read: ${progressDir}/cd/changes.log
|
||||
|
||||
---
|
||||
|
||||
## CYCLE CONTEXT
|
||||
|
||||
- **Cycle ID**: ${cycleId}
|
||||
- **Progress Dir**: ${progressDir}/vas/
|
||||
- **Changes Count**: ${state.changes?.length || 0}
|
||||
- **Iteration**: ${state.current_iteration}
|
||||
|
||||
## YOUR ROLE
|
||||
|
||||
Validation & Archival Specialist - Validate quality and create documentation.
|
||||
|
||||
## RESPONSIBILITIES
|
||||
|
||||
1. Run tests on implemented features
|
||||
2. Generate coverage reports
|
||||
3. Create archival documentation
|
||||
4. Summarize cycle results
|
||||
5. Generate version history
|
||||
|
||||
## DELIVERABLES
|
||||
|
||||
Write files to ${progressDir}/vas/:
|
||||
- validation.md: Test validation results
|
||||
- test-results.json: Detailed test results
|
||||
- coverage.md: Coverage report
|
||||
- summary.md: Cycle summary and recommendations
|
||||
|
||||
## OUTPUT FORMAT
|
||||
|
||||
\`\`\`
|
||||
PHASE_RESULT:
|
||||
- phase: vas
|
||||
- status: success | failed
|
||||
- test_pass_rate: X%
|
||||
- coverage: X%
|
||||
- issues: []
|
||||
\`\`\`
|
||||
`
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
## Result Parsing
|
||||
|
||||
```javascript
|
||||
function parseAgentOutputs(agentOutputs) {
|
||||
const results = {
|
||||
ra: parseOutput(agentOutputs.ra, 'ra'),
|
||||
ep: parseOutput(agentOutputs.ep, 'ep'),
|
||||
cd: parseOutput(agentOutputs.cd, 'cd'),
|
||||
vas: parseOutput(agentOutputs.vas, 'vas')
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
function parseOutput(output, agent) {
|
||||
const result = {
|
||||
agent: agent,
|
||||
status: 'unknown',
|
||||
data: {}
|
||||
}
|
||||
|
||||
// Parse PHASE_RESULT block
|
||||
const match = output.match(/PHASE_RESULT:\s*([\s\S]*?)(?:\n\n|$)/)
|
||||
if (match) {
|
||||
const lines = match[1].split('\n')
|
||||
for (const line of lines) {
|
||||
const m = line.match(/^-\s*(\w+):\s*(.+)$/)
|
||||
if (m) {
|
||||
result[m[1]] = m[2].trim()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
```
|
||||
|
||||
## Feedback Generation
|
||||
|
||||
```javascript
|
||||
function generateFeedback(parsedResults) {
|
||||
const feedback = {}
|
||||
|
||||
// Check VAS results
|
||||
if (parsedResults.vas.test_pass_rate < 100) {
|
||||
feedback.cd = `
|
||||
## FEEDBACK FROM VALIDATION
|
||||
|
||||
Test pass rate: ${parsedResults.vas.test_pass_rate}%
|
||||
|
||||
## ISSUES TO FIX
|
||||
|
||||
${parsedResults.vas.data.issues || 'See test-results.json for details'}
|
||||
|
||||
## NEXT STEP
|
||||
|
||||
Fix failing tests and update implementation.md with resolution.
|
||||
`
|
||||
}
|
||||
|
||||
// Check CD blockers
|
||||
if (parsedResults.cd.blockers?.length > 0) {
|
||||
feedback.ra = `
|
||||
## FEEDBACK FROM DEVELOPMENT
|
||||
|
||||
Blockers encountered:
|
||||
${parsedResults.cd.blockers.map(b => `- ${b}`).join('\n')}
|
||||
|
||||
## NEXT STEP
|
||||
|
||||
Clarify requirements or identify alternative approaches.
|
||||
Update requirements.md if needed.
|
||||
`
|
||||
}
|
||||
|
||||
return feedback
|
||||
}
|
||||
```
|
||||
|
||||
## Summary Generation
|
||||
|
||||
```javascript
|
||||
function generateFinalSummary(cycleId, state) {
|
||||
const summaryFile = `.workflow/.cycle/${cycleId}.progress/coordination/summary.md`
|
||||
|
||||
const summary = `# Cycle Summary - ${cycleId}
|
||||
|
||||
## Metadata
|
||||
- Cycle ID: ${cycleId}
|
||||
- Started: ${state.created_at}
|
||||
- Completed: ${state.completed_at}
|
||||
- Iterations: ${state.current_iteration}
|
||||
- Status: ${state.status}
|
||||
|
||||
## Phase Results
|
||||
- Requirements Analysis: ✓ Completed
|
||||
- Exploration & Planning: ✓ Completed
|
||||
- Code Development: ✓ Completed
|
||||
- Validation & Archival: ✓ Completed
|
||||
|
||||
## Key Deliverables
|
||||
- Requirements: ${state.requirements ? '✓' : '✗'}
|
||||
- Architecture Plan: ${state.plan ? '✓' : '✗'}
|
||||
- Code Changes: ${state.changes?.length || 0} files
|
||||
- Test Results: ${state.test_results?.pass_rate || '0'}% passing
|
||||
|
||||
## Generated Files
|
||||
- .workflow/.cycle/${cycleId}.progress/ra/requirements.md
|
||||
- .workflow/.cycle/${cycleId}.progress/ep/plan.json
|
||||
- .workflow/.cycle/${cycleId}.progress/cd/changes.log
|
||||
- .workflow/.cycle/${cycleId}.progress/vas/summary.md
|
||||
|
||||
## Continuation Instructions
|
||||
|
||||
To extend this cycle:
|
||||
|
||||
\`\`\`bash
|
||||
/parallel-dev-cycle --cycle-id=${cycleId} --extend="New requirement or feedback"
|
||||
\`\`\`
|
||||
|
||||
This will spawn agents for iteration ${state.current_iteration + 1}.
|
||||
`
|
||||
|
||||
Write(summaryFile, summary)
|
||||
}
|
||||
```
|
||||
|
||||
## Control Signal Checking
|
||||
|
||||
```javascript
|
||||
function checkControlSignals(cycleId) {
|
||||
const state = readCycleState(cycleId)
|
||||
|
||||
switch (state?.status) {
|
||||
case 'paused':
|
||||
return { continue: false, action: 'pause_exit' }
|
||||
case 'failed':
|
||||
return { continue: false, action: 'stop_exit' }
|
||||
case 'running':
|
||||
return { continue: true, action: 'continue' }
|
||||
default:
|
||||
return { continue: false, action: 'stop_exit' }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Recovery Strategies
|
||||
|
||||
| Error Type | Recovery |
|
||||
|------------|----------|
|
||||
| Agent timeout | send_input requesting convergence |
|
||||
| State corrupted | Rebuild from progress markdown files |
|
||||
| Agent failed | Re-spawn agent with previous context |
|
||||
| Conflicting results | Orchestrator sends reconciliation request |
|
||||
| Missing files | RA/EP agents identify and request clarification |
|
||||
|
||||
## Codex Best Practices Applied
|
||||
|
||||
1. **Single Orchestrator**: One main agent manages all phases
|
||||
2. **Parallel Workers**: Four specialized agents execute simultaneously
|
||||
3. **Batch wait()**: Wait for all agents with `wait({ ids: [...] })`
|
||||
4. **Deep Interaction**: Use send_input for iteration and refinement
|
||||
5. **Delayed close_agent**: Only after all phases and iterations complete
|
||||
6. **Role Path Passing**: Each agent reads its own role definition
|
||||
7. **Persistent Context**: Cycle state shared across all agents
|
||||
436
.codex/skills/parallel-dev-cycle/phases/state-schema.md
Normal file
436
.codex/skills/parallel-dev-cycle/phases/state-schema.md
Normal file
@@ -0,0 +1,436 @@
|
||||
# State Schema - Parallel Dev Cycle
|
||||
|
||||
Unified cycle state structure for multi-agent coordination and iteration support.
|
||||
|
||||
## State File Location
|
||||
|
||||
**Location**: `.workflow/.cycle/{cycleId}.json` (unified state, all agents access)
|
||||
|
||||
**Format**: JSON
|
||||
|
||||
## Cycle State Interface
|
||||
|
||||
```typescript
|
||||
interface CycleState {
|
||||
// =====================================================
|
||||
// CORE METADATA
|
||||
// =====================================================
|
||||
|
||||
cycle_id: string // Unique cycle identifier
|
||||
title: string // Task title (first 100 chars)
|
||||
description: string // Full task description
|
||||
task_history: string[] // All task descriptions across iterations
|
||||
|
||||
// =====================================================
|
||||
// STATUS & TIMING
|
||||
// =====================================================
|
||||
|
||||
status: 'created' | 'running' | 'paused' | 'completed' | 'failed'
|
||||
created_at: string // ISO8601 format
|
||||
updated_at: string // ISO8601 format
|
||||
completed_at?: string // ISO8601 format
|
||||
|
||||
max_iterations: number // Maximum iteration limit
|
||||
current_iteration: number // Current iteration count
|
||||
failure_reason?: string // If failed, why
|
||||
|
||||
// =====================================================
|
||||
// MULTI-AGENT TRACKING
|
||||
// =====================================================
|
||||
|
||||
agents: {
|
||||
ra: AgentState // Requirements Analyst
|
||||
ep: AgentState // Exploration Planner
|
||||
cd: AgentState // Code Developer
|
||||
vas: AgentState // Validation Archivist
|
||||
}
|
||||
|
||||
// =====================================================
|
||||
// PHASE TRACKING
|
||||
// =====================================================
|
||||
|
||||
current_phase: 'init' | 'ra' | 'ep' | 'cd' | 'vas' | 'aggregation' | 'complete'
|
||||
completed_phases: string[]
|
||||
phase_errors: Array<{
|
||||
phase: string
|
||||
error: string
|
||||
timestamp: string
|
||||
}>
|
||||
|
||||
// =====================================================
|
||||
// SHARED CONTEXT (Populated by agents)
|
||||
// =====================================================
|
||||
|
||||
requirements?: {
|
||||
version: string // e.g., "1.0.0", "1.1.0"
|
||||
specification: string // Full spec from requirements.md
|
||||
edge_cases: string[]
|
||||
last_updated: string
|
||||
}
|
||||
|
||||
exploration?: {
|
||||
version: string
|
||||
architecture_summary: string
|
||||
integration_points: string[]
|
||||
identified_risks: string[]
|
||||
last_updated: string
|
||||
}
|
||||
|
||||
plan?: {
|
||||
version: string
|
||||
tasks: PlanTask[]
|
||||
total_estimated_effort: string
|
||||
critical_path: string[]
|
||||
last_updated: string
|
||||
}
|
||||
|
||||
changes?: {
|
||||
total_files: number
|
||||
changes: ChangeLog[]
|
||||
iteration_markers: Record<number, string> // Iteration timestamps
|
||||
}
|
||||
|
||||
test_results?: {
|
||||
version: string
|
||||
pass_rate: number // 0-100
|
||||
coverage: number // 0-100
|
||||
failed_tests: string[]
|
||||
total_tests: number
|
||||
last_run: string
|
||||
}
|
||||
|
||||
// =====================================================
|
||||
// ITERATION TRACKING
|
||||
// =====================================================
|
||||
|
||||
iterations: IterationRecord[]
|
||||
|
||||
// =====================================================
|
||||
// COORDINATION DATA
|
||||
// =====================================================
|
||||
|
||||
coordination: {
|
||||
feedback_log: FeedbackEntry[]
|
||||
pending_decisions: Decision[]
|
||||
blockers: Blocker[]
|
||||
}
|
||||
}
|
||||
|
||||
// =====================================================
|
||||
// SUPPORTING TYPES
|
||||
// =====================================================
|
||||
|
||||
interface AgentState {
|
||||
status: 'idle' | 'running' | 'waiting' | 'completed' | 'failed'
|
||||
started_at?: string
|
||||
completed_at?: string
|
||||
output_files: string[]
|
||||
last_message?: string
|
||||
error?: string
|
||||
iterations_completed: number
|
||||
}
|
||||
|
||||
interface PlanTask {
|
||||
id: string // e.g., "TASK-001"
|
||||
description: string
|
||||
effort: 'small' | 'medium' | 'large'
|
||||
depends_on: string[]
|
||||
status: 'pending' | 'in_progress' | 'completed' | 'blocked'
|
||||
assigned_to?: string // Agent name
|
||||
files: string[]
|
||||
}
|
||||
|
||||
interface ChangeLog {
|
||||
timestamp: string
|
||||
file: string
|
||||
action: 'create' | 'modify' | 'delete'
|
||||
iteration: number
|
||||
agent: string // which agent made change
|
||||
description: string
|
||||
}
|
||||
|
||||
interface IterationRecord {
|
||||
number: number
|
||||
extension?: string // User feedback/extension for this iteration
|
||||
started_at: string
|
||||
completed_at: string
|
||||
agent_results: Record<string, {
|
||||
status: string
|
||||
files_modified: number
|
||||
}>
|
||||
issues_found: string[]
|
||||
resolved: boolean
|
||||
}
|
||||
|
||||
interface FeedbackEntry {
|
||||
timestamp: string
|
||||
source: string // Agent or 'user'
|
||||
target: string // Recipient agent
|
||||
content: string
|
||||
type: 'requirement_update' | 'bug_report' | 'issue_fix' | 'clarification'
|
||||
}
|
||||
|
||||
interface Decision {
|
||||
id: string
|
||||
description: string
|
||||
options: string[]
|
||||
made_by?: string
|
||||
chosen_option?: string
|
||||
status: 'pending' | 'made' | 'implemented'
|
||||
}
|
||||
|
||||
interface Blocker {
|
||||
id: string
|
||||
description: string
|
||||
reported_by: string
|
||||
status: 'open' | 'resolved' | 'workaround'
|
||||
resolution?: string
|
||||
}
|
||||
```
|
||||
|
||||
## Initial State (New Cycle)
|
||||
|
||||
When creating a new cycle:
|
||||
|
||||
```json
|
||||
{
|
||||
"cycle_id": "cycle-v1-20260122T100000-abc123",
|
||||
"title": "Implement OAuth authentication",
|
||||
"description": "Add OAuth2 login support with Google and GitHub providers",
|
||||
"task_history": [
|
||||
"Implement OAuth authentication"
|
||||
],
|
||||
"status": "created",
|
||||
"created_at": "2026-01-22T10:00:00+08:00",
|
||||
"updated_at": "2026-01-22T10:00:00+08:00",
|
||||
"max_iterations": 5,
|
||||
"current_iteration": 0,
|
||||
"agents": {
|
||||
"ra": { "status": "idle", "output_files": [], "iterations_completed": 0 },
|
||||
"ep": { "status": "idle", "output_files": [], "iterations_completed": 0 },
|
||||
"cd": { "status": "idle", "output_files": [], "iterations_completed": 0 },
|
||||
"vas": { "status": "idle", "output_files": [], "iterations_completed": 0 }
|
||||
},
|
||||
"current_phase": "init",
|
||||
"completed_phases": [],
|
||||
"phase_errors": [],
|
||||
"iterations": [],
|
||||
"coordination": {
|
||||
"feedback_log": [],
|
||||
"pending_decisions": [],
|
||||
"blockers": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## State Transitions
|
||||
|
||||
### Iteration 1: Initial Execution
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "running",
|
||||
"current_iteration": 1,
|
||||
"current_phase": "ra",
|
||||
"agents": {
|
||||
"ra": { "status": "running", "started_at": "2026-01-22T10:05:00+08:00" },
|
||||
"ep": { "status": "idle" },
|
||||
"cd": { "status": "idle" },
|
||||
"vas": { "status": "idle" }
|
||||
},
|
||||
"requirements": {
|
||||
"version": "1.0.0",
|
||||
"specification": "...",
|
||||
"edge_cases": ["OAuth timeout handling", "PKCE validation"],
|
||||
"last_updated": "2026-01-22T10:15:00+08:00"
|
||||
},
|
||||
"iterations": [{
|
||||
"number": 1,
|
||||
"started_at": "2026-01-22T10:00:00+08:00",
|
||||
"agent_results": {
|
||||
"ra": { "status": "completed", "files_modified": 3 },
|
||||
"ep": { "status": "completed", "files_modified": 2 },
|
||||
"cd": { "status": "partial", "files_modified": 5 },
|
||||
"vas": { "status": "pending", "files_modified": 0 }
|
||||
}
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
### After Phase Completion
|
||||
|
||||
```json
|
||||
{
|
||||
"current_phase": "aggregation",
|
||||
"completed_phases": ["ra", "ep", "cd", "vas"],
|
||||
"plan": {
|
||||
"version": "1.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"id": "TASK-001",
|
||||
"description": "Setup OAuth application credentials",
|
||||
"effort": "small",
|
||||
"status": "completed",
|
||||
"files": ["src/config/oauth.ts"]
|
||||
}
|
||||
]
|
||||
},
|
||||
"changes": {
|
||||
"total_files": 12,
|
||||
"iteration_markers": {
|
||||
"1": "2026-01-22T10:30:00+08:00"
|
||||
}
|
||||
},
|
||||
"test_results": {
|
||||
"version": "1.0.0",
|
||||
"pass_rate": 85,
|
||||
"coverage": 78,
|
||||
"failed_tests": ["test: OAuth timeout retry"],
|
||||
"total_tests": 20
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Iteration 2: User Extension
|
||||
|
||||
User provides feedback: "Also add multi-factor authentication"
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "running",
|
||||
"current_iteration": 2,
|
||||
"task_history": [
|
||||
"Implement OAuth authentication",
|
||||
"Also add multi-factor authentication"
|
||||
],
|
||||
"description": "Add OAuth2 login support with Google and GitHub providers\n\n--- ITERATION 2 ---\nAlso add multi-factor authentication",
|
||||
"agents": {
|
||||
"ra": { "status": "running", "iterations_completed": 1 },
|
||||
"ep": { "status": "idle", "iterations_completed": 1 },
|
||||
"cd": { "status": "idle", "iterations_completed": 1 },
|
||||
"vas": { "status": "idle", "iterations_completed": 1 }
|
||||
},
|
||||
"requirements": {
|
||||
"version": "1.1.0",
|
||||
"specification": "...",
|
||||
"last_updated": "2026-01-22T11:00:00+08:00"
|
||||
},
|
||||
"iterations": [
|
||||
{ "number": 1, "completed_at": "..." },
|
||||
{
|
||||
"number": 2,
|
||||
"extension": "Also add multi-factor authentication",
|
||||
"started_at": "2026-01-22T10:45:00+08:00",
|
||||
"agent_results": {}
|
||||
}
|
||||
],
|
||||
"coordination": {
|
||||
"feedback_log": [{
|
||||
"timestamp": "2026-01-22T10:45:00+08:00",
|
||||
"source": "user",
|
||||
"target": "ra",
|
||||
"content": "Add multi-factor authentication to requirements",
|
||||
"type": "requirement_update"
|
||||
}]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Version Tracking
|
||||
|
||||
Each component tracks its version:
|
||||
|
||||
- **Requirements**: `1.0.0` → `1.1.0` → `1.2.0` (each iteration)
|
||||
- **Plan**: `1.0.0` → `1.1.0` (updated based on requirements)
|
||||
- **Code**: Changes appended with iteration markers
|
||||
- **Tests**: Results tracked per iteration
|
||||
|
||||
## File Sync Protocol
|
||||
|
||||
State changes trigger file writes:
|
||||
|
||||
| State Change | File Sync |
|
||||
|--------------|-----------|
|
||||
| `requirements` updated | `.progress/ra/requirements.md` + version bump |
|
||||
| `plan` updated | `.progress/ep/plan.json` + version bump |
|
||||
| `changes` appended | `.progress/cd/changes.log` + iteration marker |
|
||||
| `test_results` updated | `.progress/vas/test-results.json` + version bump |
|
||||
| Full iteration done | `.progress/coordination/timeline.md` appended |
|
||||
|
||||
## Control Signal Checking
|
||||
|
||||
Agents check status before each action:
|
||||
|
||||
```javascript
|
||||
function checkControlSignals(cycleId) {
|
||||
const state = JSON.parse(Read(`.workflow/.cycle/${cycleId}.json`))
|
||||
|
||||
if (state.status === 'paused') {
|
||||
return { continue: false, action: 'pause' }
|
||||
}
|
||||
if (state.status === 'failed') {
|
||||
return { continue: false, action: 'stop' }
|
||||
}
|
||||
if (state.status === 'running') {
|
||||
return { continue: true, action: 'continue' }
|
||||
}
|
||||
|
||||
return { continue: false, action: 'unknown' }
|
||||
}
|
||||
```
|
||||
|
||||
## State Persistence
|
||||
|
||||
### Write Operations
|
||||
|
||||
After each agent completes or phase transitions:
|
||||
|
||||
```javascript
|
||||
Write(
|
||||
`.workflow/.cycle/${cycleId}.json`,
|
||||
JSON.stringify(state, null, 2)
|
||||
)
|
||||
```
|
||||
|
||||
### Read Operations
|
||||
|
||||
Agents always read fresh state before executing:
|
||||
|
||||
```javascript
|
||||
const currentState = JSON.parse(
|
||||
Read(`.workflow/.cycle/${cycleId}.json`)
|
||||
)
|
||||
```
|
||||
|
||||
## State Rebuild (Recovery)
|
||||
|
||||
If master state corrupted, rebuild from markdown files:
|
||||
|
||||
```javascript
|
||||
function rebuildState(cycleId) {
|
||||
const progressDir = `.workflow/.cycle/${cycleId}.progress`
|
||||
|
||||
// Read markdown files
|
||||
const raMarkdown = Read(`${progressDir}/ra/requirements.md`)
|
||||
const epMarkdown = Read(`${progressDir}/ep/plan.json`)
|
||||
const cdChanges = Read(`${progressDir}/cd/changes.log`)
|
||||
const vasResults = Read(`${progressDir}/vas/test-results.json`)
|
||||
|
||||
// Reconstruct state from files
|
||||
return {
|
||||
requirements: parseMarkdown(raMarkdown),
|
||||
plan: JSON.parse(epMarkdown),
|
||||
changes: parseNDJSON(cdChanges),
|
||||
test_results: JSON.parse(vasResults)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Immutable Reads**: Never modify state during read
|
||||
2. **Version Bumps**: Increment version on each iteration
|
||||
3. **Timestamp Accuracy**: Use UTC+8 consistently
|
||||
4. **Append-Only Logs**: Never delete history
|
||||
5. **Atomic Writes**: Write complete state, not partial updates
|
||||
6. **Coordination Tracking**: Log all inter-agent communication
|
||||
@@ -0,0 +1,423 @@
|
||||
# Agent Communication Optimization
|
||||
|
||||
优化 agent 通信机制:使用简短的产出文件引用而不是内容传递。
|
||||
|
||||
## 背景
|
||||
|
||||
在多 agent 系统中,传递完整的文件内容会导致:
|
||||
- 消息体积过大
|
||||
- 上下文使用量增加
|
||||
- 通信效率低下
|
||||
- 容易引入上下文断层
|
||||
|
||||
**优化方案**: 使用文件路径引用,让 agent 自动读取需要的文件。
|
||||
|
||||
## 优化原则
|
||||
|
||||
### 原则 1: 文件引用而非内容传递
|
||||
|
||||
❌ **错误做法**(传递内容):
|
||||
```javascript
|
||||
send_input({
|
||||
id: agents.cd,
|
||||
message: `
|
||||
Requirements:
|
||||
${requirements_content} // 完整内容 - 浪费空间
|
||||
|
||||
Plan:
|
||||
${plan_json} // 完整 JSON - 重复信息
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
✅ **正确做法**(引用文件):
|
||||
```javascript
|
||||
send_input({
|
||||
id: agents.cd,
|
||||
message: `
|
||||
## Feedback from Validation
|
||||
|
||||
Test failures found. Review these outputs:
|
||||
|
||||
## Reference
|
||||
- Requirements: .workflow/.cycle/${cycleId}.progress/ra/requirements.md (v1.0.0)
|
||||
- Plan: .workflow/.cycle/${cycleId}.progress/ep/plan.json (v1.0.0)
|
||||
- Test Results: .workflow/.cycle/${cycleId}.progress/vas/test-results.json
|
||||
|
||||
## Issues Found
|
||||
${summary_of_issues} // 只传递摘要
|
||||
|
||||
## Actions Required
|
||||
1. Fix OAuth token refresh (test line 45)
|
||||
2. Update implementation.md with fixes
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
### 原则 2: 摘要而非全文
|
||||
|
||||
❌ **错误**:
|
||||
```javascript
|
||||
// 传递所有文件内容
|
||||
RA输出: "requirements.md (2000 lines) + edge-cases.md (1000 lines) + changes.log (500 lines)"
|
||||
|
||||
EP读取: 全文解析所有内容(浪费token)
|
||||
```
|
||||
|
||||
✅ **正确**:
|
||||
```javascript
|
||||
// 只传递关键摘要
|
||||
RA输出:
|
||||
- 10个功能需求
|
||||
- 5个非功能需求
|
||||
- 8个边界场景
|
||||
- 文件路径用于完整查看
|
||||
|
||||
EP读取: 读取摘要 + 需要时查看完整文件(高效)
|
||||
```
|
||||
|
||||
### 原则 3: 文件版本跟踪
|
||||
|
||||
每个引用必须包含版本:
|
||||
|
||||
```javascript
|
||||
send_input({
|
||||
id: agents.cd,
|
||||
message: `
|
||||
Requirements: .workflow/.cycle/${cycleId}.progress/ra/requirements.md (v1.1.0)
|
||||
^^^^^^^ 版本号
|
||||
|
||||
Plan: .workflow/.cycle/${cycleId}.progress/ep/plan.json (v1.0.0)
|
||||
^^^^^^^ 版本号
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
**好处**:
|
||||
- 避免使用过期信息
|
||||
- 自动检测版本不匹配
|
||||
- 支持多版本迭代
|
||||
|
||||
## 实现模式
|
||||
|
||||
### Pattern 1: 通知 + 引用
|
||||
|
||||
Agent 向其他 agent 通知输出,而非传递内容:
|
||||
|
||||
```javascript
|
||||
// RA 输出摘要
|
||||
const raSummary = {
|
||||
requirements_count: 10,
|
||||
edge_cases_count: 8,
|
||||
version: "1.0.0",
|
||||
output_file: ".workflow/.cycle/${cycleId}.progress/ra/requirements.md",
|
||||
key_requirements: [
|
||||
"FR-001: OAuth authentication",
|
||||
"FR-002: Multi-provider support",
|
||||
"..." // 只列出标题,不传递完整内容
|
||||
]
|
||||
}
|
||||
|
||||
// 更新状态,让其他 agent 读取
|
||||
state.requirements = {
|
||||
version: raSummary.version,
|
||||
output_file: raSummary.output_file,
|
||||
summary: raSummary.key_requirements
|
||||
}
|
||||
|
||||
// EP agent 从状态读取
|
||||
const requiredDetails = state.requirements
|
||||
const outputFile = requiredDetails.output_file
|
||||
const requirements = JSON.parse(Read(outputFile)) // EP 自己读取完整文件
|
||||
```
|
||||
|
||||
### Pattern 2: 反馈通知
|
||||
|
||||
Orchestrator 发送反馈时只传递摘要和行号:
|
||||
|
||||
```javascript
|
||||
// ❌ 错误:传递完整测试结果
|
||||
send_input({
|
||||
id: agents.cd,
|
||||
message: `
|
||||
Test Results:
|
||||
${entire_test_results_json} // 完整 JSON - 太大
|
||||
`
|
||||
})
|
||||
|
||||
// ✅ 正确:引用文件 + 问题摘要
|
||||
send_input({
|
||||
id: agents.cd,
|
||||
message: `
|
||||
## Test Failures
|
||||
|
||||
Full results: .workflow/.cycle/${cycleId}.progress/vas/test-results.json (v1.0.0)
|
||||
|
||||
## Quick Summary
|
||||
- Failed: oauth-refresh (line 45, expected token refresh)
|
||||
- Failed: concurrent-login (line 78, race condition)
|
||||
|
||||
## Fix Instructions
|
||||
1. Review test cases at referenced lines
|
||||
2. Fix implementation
|
||||
3. Re-run tests
|
||||
4. Update implementation.md
|
||||
|
||||
Reference previous file paths if you need full details.
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
### Pattern 3: 依赖链路
|
||||
|
||||
Agent 通过文件引用获取依赖:
|
||||
|
||||
```javascript
|
||||
// EP agent: 从状态读取 RA 输出路径
|
||||
const raOutputPath = state.requirements?.output_file
|
||||
if (raOutputPath && exists(raOutputPath)) {
|
||||
const requirements = Read(raOutputPath)
|
||||
// 使用 requirements 生成计划
|
||||
}
|
||||
|
||||
// CD agent: 从状态读取 EP 输出路径
|
||||
const epPlanPath = state.plan?.output_file
|
||||
if (epPlanPath && exists(epPlanPath)) {
|
||||
const plan = JSON.parse(Read(epPlanPath))
|
||||
// 根据 plan 实现功能
|
||||
}
|
||||
|
||||
// VAS agent: 从状态读取 CD 输出路径
|
||||
const cdChangesPath = state.changes?.output_file
|
||||
if (cdChangesPath && exists(cdChangesPath)) {
|
||||
const changes = readNDJSON(cdChangesPath)
|
||||
// 根据 changes 生成测试
|
||||
}
|
||||
```
|
||||
|
||||
## 状态文件引用结构
|
||||
|
||||
优化后的状态文件应该包含文件路径而不是内容:
|
||||
|
||||
```json
|
||||
{
|
||||
"cycle_id": "cycle-v1-20260122-abc123",
|
||||
|
||||
"requirements": {
|
||||
"version": "1.0.0",
|
||||
"output_files": {
|
||||
"specification": ".workflow/.cycle/cycle-v1-20260122-abc123.progress/ra/requirements.md",
|
||||
"edge_cases": ".workflow/.cycle/cycle-v1-20260122-abc123.progress/ra/edge-cases.md",
|
||||
"changes_log": ".workflow/.cycle/cycle-v1-20260122-abc123.progress/ra/changes.log"
|
||||
},
|
||||
"summary": {
|
||||
"functional_requirements": 10,
|
||||
"edge_cases": 8,
|
||||
"constraints": 5
|
||||
}
|
||||
},
|
||||
|
||||
"exploration": {
|
||||
"version": "1.0.0",
|
||||
"output_files": {
|
||||
"exploration": ".workflow/.cycle/cycle-v1-20260122-abc123.progress/ep/exploration.md",
|
||||
"architecture": ".workflow/.cycle/cycle-v1-20260122-abc123.progress/ep/architecture.md"
|
||||
},
|
||||
"summary": {
|
||||
"key_components": ["Auth Module", "User Service"],
|
||||
"integration_points": 5,
|
||||
"identified_risks": 3
|
||||
}
|
||||
},
|
||||
|
||||
"plan": {
|
||||
"version": "1.0.0",
|
||||
"output_file": ".workflow/.cycle/cycle-v1-20260122-abc123.progress/ep/plan.json",
|
||||
"summary": {
|
||||
"total_tasks": 8,
|
||||
"critical_path": ["TASK-001", "TASK-003", "TASK-004"],
|
||||
"estimated_hours": 16
|
||||
}
|
||||
},
|
||||
|
||||
"implementation": {
|
||||
"version": "1.0.0",
|
||||
"output_files": {
|
||||
"progress": ".workflow/.cycle/cycle-v1-20260122-abc123.progress/cd/implementation.md",
|
||||
"changes": ".workflow/.cycle/cycle-v1-20260122-abc123.progress/cd/changes.log",
|
||||
"issues": ".workflow/.cycle/cycle-v1-20260122-abc123.progress/cd/issues.md"
|
||||
},
|
||||
"summary": {
|
||||
"tasks_completed": 3,
|
||||
"files_modified": 5,
|
||||
"blockers": 0
|
||||
}
|
||||
},
|
||||
|
||||
"validation": {
|
||||
"version": "1.0.0",
|
||||
"output_files": {
|
||||
"validation": ".workflow/.cycle/cycle-v1-20260122-abc123.progress/vas/validation.md",
|
||||
"test_results": ".workflow/.cycle/cycle-v1-20260122-abc123.progress/vas/test-results.json",
|
||||
"coverage": ".workflow/.cycle/cycle-v1-20260122-abc123.progress/vas/coverage.md"
|
||||
},
|
||||
"summary": {
|
||||
"pass_rate": 92,
|
||||
"coverage": 87,
|
||||
"failures": 4
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Agent 通信模板优化
|
||||
|
||||
### 优化前: 完整内容传递
|
||||
|
||||
```javascript
|
||||
send_input({
|
||||
id: agents.cd,
|
||||
message: `
|
||||
## Requirements (Complete Content)
|
||||
|
||||
${fs.readFileSync(requirementsFile, 'utf8')} // 2000+ lines
|
||||
|
||||
## Plan (Complete JSON)
|
||||
|
||||
${fs.readFileSync(planFile, 'utf8')} // 1000+ lines
|
||||
|
||||
## Test Results (Complete)
|
||||
|
||||
${fs.readFileSync(testResultsFile, 'utf8')} // 500+ lines
|
||||
|
||||
## Your Task
|
||||
|
||||
Fix the implementation...
|
||||
` // 总消息体: 4000+ 行
|
||||
})
|
||||
```
|
||||
|
||||
### 优化后: 文件引用 + 摘要
|
||||
|
||||
```javascript
|
||||
send_input({
|
||||
id: agents.cd,
|
||||
message: `
|
||||
## Test Failures - Action Required
|
||||
|
||||
Full Test Report: .workflow/.cycle/${cycleId}.progress/vas/test-results.json (v1.0.0)
|
||||
|
||||
## Summary of Failures
|
||||
- oauth-refresh: Expected token refresh, got error (test line 45)
|
||||
- concurrent-login: Race condition in session writes (test line 78)
|
||||
|
||||
## Implementation Reference
|
||||
- Current Code: .workflow/.cycle/${cycleId}.progress/cd/implementation.md (v1.0.0)
|
||||
- Code Changes: .workflow/.cycle/${cycleId}.progress/cd/changes.log (v1.0.0)
|
||||
|
||||
## Action Required
|
||||
1. Review failing tests in referenced test results file
|
||||
2. Fix root causes (race condition, token handling)
|
||||
3. Update implementation.md with fixes
|
||||
4. Re-run tests
|
||||
|
||||
## Context
|
||||
- Requirement: .workflow/.cycle/${cycleId}.progress/ra/requirements.md (v1.0.0)
|
||||
- Plan: .workflow/.cycle/${cycleId}.progress/ep/plan.json (v1.0.0)
|
||||
|
||||
Output PHASE_RESULT when complete.
|
||||
` // 总消息体: <500 行,高效传递
|
||||
})
|
||||
```
|
||||
|
||||
## 版本控制最佳实践
|
||||
|
||||
### 版本不匹配检测
|
||||
|
||||
```javascript
|
||||
function validateVersionConsistency(state) {
|
||||
const versions = {
|
||||
ra: state.requirements?.version,
|
||||
ep: state.plan?.version,
|
||||
cd: state.implementation?.version,
|
||||
vas: state.validation?.version
|
||||
}
|
||||
|
||||
// 检查版本一致性
|
||||
const allVersions = Object.values(versions).filter(v => v)
|
||||
const unique = new Set(allVersions)
|
||||
|
||||
if (unique.size > 1) {
|
||||
console.warn('Version mismatch detected:')
|
||||
console.warn(versions)
|
||||
// 返回版本差异,让 orchestrator 决定是否继续
|
||||
}
|
||||
|
||||
return unique.size === 1
|
||||
}
|
||||
```
|
||||
|
||||
### 文件存在性检查
|
||||
|
||||
```javascript
|
||||
function validateReferences(state, cycleId) {
|
||||
const checks = []
|
||||
|
||||
// 检查所有引用的文件是否存在
|
||||
for (const [agent, data] of Object.entries(state)) {
|
||||
if (data?.output_files) {
|
||||
for (const [name, path] of Object.entries(data.output_files)) {
|
||||
if (!fs.existsSync(path)) {
|
||||
checks.push({
|
||||
agent: agent,
|
||||
file: name,
|
||||
path: path,
|
||||
status: 'missing'
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return checks
|
||||
}
|
||||
```
|
||||
|
||||
## 好处总结
|
||||
|
||||
| 方面 | 改进 |
|
||||
|------|------|
|
||||
| 消息体积 | 减少 80-90% |
|
||||
| Token 使用 | 减少 60-70% |
|
||||
| 读取速度 | 无需解析冗余内容 |
|
||||
| 版本控制 | 清晰的版本跟踪 |
|
||||
| 上下文清晰 | 不会混淆版本 |
|
||||
| 可维护性 | 文件变更不需要修改消息 |
|
||||
|
||||
## 迁移建议
|
||||
|
||||
### 第一步: 更新状态结构
|
||||
|
||||
```json
|
||||
// 从这样:
|
||||
"requirements": "完整内容"
|
||||
|
||||
// 改为这样:
|
||||
"requirements": {
|
||||
"version": "1.0.0",
|
||||
"output_file": "path/to/file",
|
||||
"summary": {...}
|
||||
}
|
||||
```
|
||||
|
||||
### 第二步: 更新通信模板
|
||||
|
||||
所有 `send_input` 消息改为引用路径。
|
||||
|
||||
### 第三步: Agent 自动读取
|
||||
|
||||
Agent 从引用路径自动读取所需文件。
|
||||
|
||||
### 第四步: 测试版本检测
|
||||
|
||||
确保版本不匹配时有警告。
|
||||
406
.codex/skills/parallel-dev-cycle/specs/coordination-protocol.md
Normal file
406
.codex/skills/parallel-dev-cycle/specs/coordination-protocol.md
Normal file
@@ -0,0 +1,406 @@
|
||||
# Coordination Protocol - Multi-Agent Communication
|
||||
|
||||
Inter-agent communication protocols and patterns for parallel-dev-cycle skill.
|
||||
|
||||
## Overview
|
||||
|
||||
The coordination protocol enables four parallel agents (RA, EP, CD, VAS) to communicate efficiently while maintaining clear responsibilities and avoiding conflicts.
|
||||
|
||||
## Communication Channels
|
||||
|
||||
### 1. Shared State File (Primary)
|
||||
|
||||
**Location**: `.workflow/.cycle/{cycleId}.json`
|
||||
|
||||
**Access Pattern**:
|
||||
- **Agents**: READ ONLY - check dependencies and status
|
||||
- **Orchestrator**: READ-WRITE - updates state after each phase
|
||||
|
||||
```javascript
|
||||
// Every agent: Read state to check dependencies
|
||||
const state = JSON.parse(Read(`.workflow/.cycle/${cycleId}.json`))
|
||||
const canProceed = checkDependencies(state)
|
||||
|
||||
// Agent outputs PHASE_RESULT (reports to orchestrator, NOT writes directly)
|
||||
console.log("PHASE_RESULT: ...")
|
||||
|
||||
// Only Orchestrator writes to state file after receiving PHASE_RESULT
|
||||
// Write(`.workflow/.cycle/${cycleId}.json`, JSON.stringify(updatedState, null, 2))
|
||||
```
|
||||
|
||||
**Protocol**:
|
||||
- Only orchestrator writes to state file (no concurrent writes, no lock needed)
|
||||
- Agents read state to understand dependencies
|
||||
- Timestamp all orchestrator updates with ISO8601 format
|
||||
- Never delete existing data, only append
|
||||
|
||||
### 2. Progress Markdown Files (Async Log)
|
||||
|
||||
**Location**: `.workflow/.cycle/{cycleId}.progress/{agent}/`
|
||||
|
||||
Each agent writes progress to dedicated markdown files:
|
||||
|
||||
| Agent | Main Documents (Rewrite) | Logs (Append-Only) |
|
||||
|-------|--------------------------|-------------------|
|
||||
| RA | requirements.md | changes.log |
|
||||
| EP | exploration.md, architecture.md, plan.json | changes.log |
|
||||
| CD | implementation.md, issues.md | changes.log, debug-log.ndjson |
|
||||
| VAS | validation.md, summary.md, test-results.json | changes.log |
|
||||
|
||||
**Protocol**:
|
||||
- **Main documents**: Complete rewrite per iteration, archived to `history/`
|
||||
- **Log files**: Append-only (changes.log, debug-log.ndjson) - never delete
|
||||
- **Version synchronization**: All main documents share same version (e.g., all v1.1.0 in iteration 2)
|
||||
- Include timestamp on each update
|
||||
|
||||
### 3. Orchestrator send_input (Synchronous)
|
||||
|
||||
**When**: Orchestrator needs to send feedback or corrections
|
||||
|
||||
```javascript
|
||||
// Example: CD agent receives test failure feedback
|
||||
send_input({
|
||||
id: agents.cd,
|
||||
message: `
|
||||
## FEEDBACK FROM VALIDATION
|
||||
|
||||
Test failures detected: ${failures}
|
||||
|
||||
## REQUIRED ACTION
|
||||
|
||||
Fix the following:
|
||||
${actionItems}
|
||||
|
||||
## NEXT STEP
|
||||
Update implementation.md with fixes, then re-run tests.
|
||||
Output PHASE_RESULT when complete.
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
**Protocol**:
|
||||
- Only orchestrator initiates send_input
|
||||
- Clear action items and expected output
|
||||
- Single message per iteration (no rapid-fire sends)
|
||||
|
||||
### 4. Coordination Log
|
||||
|
||||
**Location**: `.workflow/.cycle/{cycleId}.progress/coordination/`
|
||||
|
||||
Centralized log for inter-agent decisions and communication:
|
||||
|
||||
**feedback.md**:
|
||||
```markdown
|
||||
# Feedback & Coordination Log - Version X.Y.Z
|
||||
|
||||
## Timeline
|
||||
- [10:00:00] Orchestrator: Created cycle
|
||||
- [10:05:00] RA: Requirements analysis started
|
||||
- [10:10:00] RA: Requirements completed, v1.0.0
|
||||
- [10:10:01] EP: Starting exploration (depends on RA output)
|
||||
- [10:15:00] EP: Architecture designed, plan.json v1.0.0
|
||||
- [10:15:01] CD: Starting implementation (depends on EP plan)
|
||||
- [10:30:00] CD: Implementation progressing, found blocker
|
||||
- [10:31:00] RA: Clarified requirement after CD blocker
|
||||
- [10:31:01] CD: Continuing with clarification
|
||||
- [10:40:00] CD: Implementation complete
|
||||
- [10:40:01] VAS: Starting validation
|
||||
- [10:45:00] VAS: Testing complete, found failures
|
||||
- [10:45:01] Orchestrator: Sending feedback to CD
|
||||
- [10:46:00] CD: Fixed issues
|
||||
- [10:50:00] VAS: Re-validation, all passing
|
||||
- [10:50:01] Orchestrator: Cycle complete
|
||||
|
||||
## Decision Records
|
||||
- [10:31:00] RA Clarification: OAuth optional vs required?
|
||||
- Decision: Optional (can use password)
|
||||
- Rationale: More flexible for users
|
||||
- Impact: Affects FR-003 implementation
|
||||
|
||||
## Blockers & Resolutions
|
||||
- [10:30:00] Blocker: Database migration for existing users
|
||||
- Reported by: CD
|
||||
- Resolution: Set oauth_id = null for existing users
|
||||
- Status: Resolved
|
||||
|
||||
## Cross-Agent Dependencies
|
||||
- EP depends on: RA requirements (v1.0.0)
|
||||
- CD depends on: EP plan (v1.0.0)
|
||||
- VAS depends on: CD code changes
|
||||
```
|
||||
|
||||
## Message Formats
|
||||
|
||||
### Agent Status Update
|
||||
|
||||
Each agent updates state with its status:
|
||||
|
||||
```json
|
||||
{
|
||||
"agents": {
|
||||
"ra": {
|
||||
"status": "completed",
|
||||
"started_at": "2026-01-22T10:05:00+08:00",
|
||||
"completed_at": "2026-01-22T10:15:00+08:00",
|
||||
"output_files": [
|
||||
".workflow/.cycle/cycle-xxx.progress/ra/requirements.md",
|
||||
".workflow/.cycle/cycle-xxx.progress/ra/edge-cases.md",
|
||||
".workflow/.cycle/cycle-xxx.progress/ra/changes.log"
|
||||
],
|
||||
"iterations_completed": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Feedback Message Format
|
||||
|
||||
When orchestrator sends feedback via send_input:
|
||||
|
||||
```text
|
||||
## FEEDBACK FROM [Agent Name]
|
||||
|
||||
[Summary of findings or issues]
|
||||
|
||||
## REFERENCED OUTPUT
|
||||
File: [path to agent output]
|
||||
Version: [X.Y.Z]
|
||||
|
||||
## REQUIRED ACTION
|
||||
|
||||
1. [Action 1 with specific details]
|
||||
2. [Action 2 with specific details]
|
||||
|
||||
## SUCCESS CRITERIA
|
||||
|
||||
- [ ] Item 1
|
||||
- [ ] Item 2
|
||||
|
||||
## NEXT STEP
|
||||
[What agent should do next]
|
||||
Output PHASE_RESULT when complete.
|
||||
|
||||
## CONTEXT
|
||||
|
||||
Previous iteration: [N]
|
||||
Current iteration: [N+1]
|
||||
```
|
||||
|
||||
### Phase Result Format
|
||||
|
||||
Every agent outputs PHASE_RESULT:
|
||||
|
||||
```text
|
||||
PHASE_RESULT:
|
||||
- phase: [ra|ep|cd|vas]
|
||||
- status: success | failed | partial
|
||||
- files_written: [list of files]
|
||||
- summary: [one-line summary]
|
||||
- [agent-specific fields]
|
||||
- issues: [list of issues if any]
|
||||
|
||||
PHASE_DETAILS:
|
||||
[Additional details or metrics]
|
||||
```
|
||||
|
||||
## Dependency Resolution
|
||||
|
||||
**Execution Model**: All four agents are spawned in parallel, but execution blocks based on dependencies. Orchestrator manages dependency resolution via shared state.
|
||||
|
||||
### Build Order (Default)
|
||||
|
||||
```
|
||||
RA (Requirements) → EP (Planning) → CD (Development) → VAS (Validation)
|
||||
↓ ↓ ↓ ↓
|
||||
Block EP Block CD Block VAS Block completion
|
||||
```
|
||||
|
||||
**Explanation**:
|
||||
- All agents spawned simultaneously
|
||||
- Each agent checks dependencies in shared state before proceeding
|
||||
- Blocked agents wait for dependency completion
|
||||
- Orchestrator uses `send_input` to notify dependent agents when ready
|
||||
|
||||
### Parallel Opportunities
|
||||
|
||||
Some phases can run in parallel:
|
||||
|
||||
```
|
||||
RA + FrontendCode (independent)
|
||||
EP + RA (not blocking)
|
||||
CD.Task1 + CD.Task2 (if no dependencies)
|
||||
```
|
||||
|
||||
### Dependency Tracking
|
||||
|
||||
State file tracks dependencies:
|
||||
|
||||
```json
|
||||
{
|
||||
"agents": {
|
||||
"ep": {
|
||||
"depends_on": ["ra"],
|
||||
"ready": true, // RA completed
|
||||
"can_start": true
|
||||
},
|
||||
"cd": {
|
||||
"depends_on": ["ep"],
|
||||
"ready": true, // EP completed
|
||||
"can_start": true
|
||||
},
|
||||
"vas": {
|
||||
"depends_on": ["cd"],
|
||||
"ready": false, // CD not yet complete
|
||||
"can_start": false
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Iteration Flow with Communication
|
||||
|
||||
### Iteration 1: Initial Execution
|
||||
|
||||
```
|
||||
Time Agent Action State Update
|
||||
──────────────────────────────────────────────────────
|
||||
10:00 Init Create cycle status: running
|
||||
10:05 RA Start analysis agents.ra.status: running
|
||||
10:10 RA Complete (v1.0.0) agents.ra.status: completed
|
||||
10:10 EP Start planning agents.ep.status: running
|
||||
(depends on RA completion)
|
||||
10:15 EP Complete (v1.0.0) agents.ep.status: completed
|
||||
10:15 CD Start development agents.cd.status: running
|
||||
(depends on EP completion)
|
||||
10:30 CD Found blocker coordination.blockers.add()
|
||||
10:31 RA Clarify blocker requirements.v1.1.0 created
|
||||
10:35 CD Continue (with fix) agents.cd.status: running
|
||||
10:40 CD Complete agents.cd.status: completed
|
||||
10:40 VAS Start validation agents.vas.status: running
|
||||
(depends on CD completion)
|
||||
10:45 VAS Tests failing coordination.feedback_log.add()
|
||||
10:45 Orch Send feedback agents.cd.message: "Fix these tests"
|
||||
10:46 CD Resume (send_input) agents.cd.status: running
|
||||
10:48 CD Fix complete agents.cd.status: completed
|
||||
10:50 VAS Re-validate agents.vas.status: running
|
||||
10:55 VAS All pass agents.vas.status: completed
|
||||
11:00 Orch Complete cycle status: completed
|
||||
```
|
||||
|
||||
## Conflict Resolution
|
||||
|
||||
### Conflict Type 1: Unclear Requirement
|
||||
|
||||
**Scenario**: CD needs clarification on FR-X
|
||||
|
||||
**Resolution Flow**:
|
||||
1. CD reports blocker in issues.md
|
||||
2. Orchestrator extracts blocker
|
||||
3. Orchestrator sends message to RA
|
||||
4. RA updates requirements with clarification
|
||||
5. RA outputs new requirements.md (v1.1.0)
|
||||
6. Orchestrator sends message to CD with clarification
|
||||
7. CD resumes and continues
|
||||
|
||||
### Conflict Type 2: Test Failure
|
||||
|
||||
**Scenario**: VAS finds test failures
|
||||
|
||||
**Resolution Flow**:
|
||||
1. VAS reports failures in validation.md
|
||||
2. VAS outputs test-results.json with details
|
||||
3. Orchestrator extracts failure details
|
||||
4. Orchestrator categorizes failures
|
||||
5. If blocker: Orchestrator sends to CD/RA for fixes
|
||||
6. CD/RA fix and report completion
|
||||
7. Orchestrator sends CD/VAS to retry
|
||||
8. VAS re-validates
|
||||
|
||||
### Conflict Type 3: Plan Mismatch
|
||||
|
||||
**Scenario**: CD realizes plan tasks are incomplete
|
||||
|
||||
**Resolution Flow**:
|
||||
1. CD reports in issues.md
|
||||
2. Orchestrator extracts issue
|
||||
3. Orchestrator sends to EP to revise plan
|
||||
4. EP updates plan.json (v1.1.0)
|
||||
5. EP adds new tasks or dependencies
|
||||
6. Orchestrator sends to CD with updated plan
|
||||
7. CD implements remaining tasks
|
||||
|
||||
## Escalation Path
|
||||
|
||||
For issues that block resolution:
|
||||
|
||||
```
|
||||
Agent Issue
|
||||
↓
|
||||
Agent reports blocker
|
||||
↓
|
||||
Orchestrator analyzes
|
||||
↓
|
||||
Can fix automatically?
|
||||
├─ Yes: send_input to agent with fix
|
||||
└─ No: Escalate to user
|
||||
↓
|
||||
User provides guidance
|
||||
↓
|
||||
Orchestrator applies guidance
|
||||
↓
|
||||
Resume agents
|
||||
```
|
||||
|
||||
## Communication Best Practices
|
||||
|
||||
1. **Clear Timestamps**: All events timestamped ISO8601 format
|
||||
2. **Structured Messages**: Use consistent format for feedback
|
||||
3. **Version Tracking**: Always include version numbers
|
||||
4. **Audit Trail**: Maintain complete log of decisions
|
||||
5. **No Direct Agent Communication**: All communication via orchestrator
|
||||
6. **Document Decisions**: Record why decisions were made
|
||||
7. **Append-Only Logs**: Never delete history
|
||||
|
||||
## State Consistency Rules
|
||||
|
||||
1. **Single Writer Per Field**: Only one agent updates each field
|
||||
- RA writes: requirements, edge_cases
|
||||
- EP writes: exploration, plan
|
||||
- CD writes: changes, implementation
|
||||
- VAS writes: test_results, summary
|
||||
|
||||
2. **Read-Write Serialization**: Orchestrator ensures no conflicts
|
||||
|
||||
3. **Version Synchronization**: All versions increment together
|
||||
- v1.0.0 → v1.1.0 (all docs updated)
|
||||
|
||||
4. **Timestamp Consistency**: All timestamps in state file UTC+8
|
||||
|
||||
## Monitoring & Debugging
|
||||
|
||||
### State Inspection
|
||||
|
||||
```javascript
|
||||
// Check agent status
|
||||
const state = JSON.parse(Read(`.workflow/.cycle/${cycleId}.json`))
|
||||
console.log(state.agents) // See status of all agents
|
||||
|
||||
// Check for blockers
|
||||
console.log(state.coordination.blockers)
|
||||
|
||||
// Check feedback history
|
||||
console.log(state.coordination.feedback_log)
|
||||
```
|
||||
|
||||
### Log Analysis
|
||||
|
||||
```bash
|
||||
# Check RA progress
|
||||
tail .workflow/.cycle/cycle-xxx.progress/ra/changes.log
|
||||
|
||||
# Check CD changes
|
||||
grep "TASK-001" .workflow/.cycle/cycle-xxx.progress/cd/changes.log
|
||||
|
||||
# Check coordination timeline
|
||||
tail -50 .workflow/.cycle/cycle-xxx.progress/coordination/feedback.md
|
||||
```
|
||||
331
.codex/skills/parallel-dev-cycle/specs/versioning-strategy.md
Normal file
331
.codex/skills/parallel-dev-cycle/specs/versioning-strategy.md
Normal file
@@ -0,0 +1,331 @@
|
||||
# Document Versioning Strategy
|
||||
|
||||
Document version management strategy: Complete Rewrite + Archive History
|
||||
|
||||
## Recommended Approach: Complete Rewrite + Archive History
|
||||
|
||||
For each iteration, **completely rewrite** the main document, and automatically archive the old version to the `history/` directory.
|
||||
|
||||
### File Structure
|
||||
|
||||
```
|
||||
.workflow/.cycle/cycle-v1-20260122-abc123.progress/
|
||||
├── ra/
|
||||
│ ├── requirements.md # v1.2.0 (current version, complete rewrite)
|
||||
│ ├── edge-cases.md # v1.2.0 (current version, complete rewrite)
|
||||
│ ├── changes.log # NDJSON complete change history (append-only)
|
||||
│ └── history/
|
||||
│ ├── requirements-v1.0.0.md (archived)
|
||||
│ ├── requirements-v1.1.0.md (archived)
|
||||
│ ├── edge-cases-v1.0.0.md (archived)
|
||||
│ └── edge-cases-v1.1.0.md (archived)
|
||||
├── ep/
|
||||
│ ├── exploration.md # v1.2.0 (current)
|
||||
│ ├── architecture.md # v1.2.0 (current)
|
||||
│ ├── plan.json # v1.2.0 (current)
|
||||
│ └── history/
|
||||
│ ├── plan-v1.0.0.json
|
||||
│ └── plan-v1.1.0.json
|
||||
├── cd/
|
||||
│ ├── implementation.md # v1.2.0 (current)
|
||||
│ ├── changes.log # NDJSON complete history
|
||||
│ ├── debug-log.ndjson # Debug hypothesis tracking
|
||||
│ ├── issues.md # Current unresolved issues
|
||||
│ └── history/
|
||||
│ ├── implementation-v1.0.0.md
|
||||
│ └── implementation-v1.1.0.md
|
||||
└── vas/
|
||||
├── validation.md # v1.2.0 (current)
|
||||
├── test-results.json # v1.2.0 (current)
|
||||
├── summary.md # v1.2.0 (current)
|
||||
└── history/
|
||||
├── validation-v1.0.0.md
|
||||
└── test-results-v1.0.0.json
|
||||
```
|
||||
|
||||
## Optimized Document Template
|
||||
|
||||
### Requirements.md (Complete Rewrite Version)
|
||||
|
||||
```markdown
|
||||
# Requirements Specification - v1.2.0
|
||||
|
||||
## Document Metadata
|
||||
| Field | Value |
|
||||
|-------|-------|
|
||||
| Version | 1.2.0 |
|
||||
| Previous | 1.1.0 (Added Google OAuth) |
|
||||
| Changes | Added MFA, GitHub provider |
|
||||
| Date | 2026-01-23T10:00:00+08:00 |
|
||||
| Cycle | cycle-v1-20260122-abc123 |
|
||||
| Iteration | 3 |
|
||||
|
||||
---
|
||||
|
||||
## Functional Requirements
|
||||
|
||||
### FR-001: OAuth Authentication
|
||||
**Description**: Users can log in using OAuth providers.
|
||||
|
||||
**Supported Providers**: Google, GitHub
|
||||
|
||||
**Priority**: High
|
||||
|
||||
**Status**: ✓ Implemented (v1.0.0), Enhanced (v1.1.0, v1.2.0)
|
||||
|
||||
**Success Criteria**:
|
||||
- User can click provider button
|
||||
- Redirect to provider
|
||||
- Return with valid token
|
||||
- Session created
|
||||
|
||||
---
|
||||
|
||||
### FR-002: Multi-Provider Support
|
||||
**Description**: System supports multiple OAuth providers simultaneously.
|
||||
|
||||
**Providers**:
|
||||
- Google (v1.1.0)
|
||||
- GitHub (v1.2.0)
|
||||
|
||||
**Priority**: High
|
||||
|
||||
**Status**: ✓ Implemented
|
||||
|
||||
---
|
||||
|
||||
### FR-003: Multi-Factor Authentication
|
||||
**Description**: Optional MFA for enhanced security.
|
||||
|
||||
**Method**: TOTP (Time-based One-Time Password)
|
||||
|
||||
**Priority**: Medium
|
||||
|
||||
**Status**: 🆕 New in v1.2.0
|
||||
|
||||
**Success Criteria**:
|
||||
- User can enable MFA in settings
|
||||
- TOTP QR code generated
|
||||
- Verification on login
|
||||
|
||||
---
|
||||
|
||||
## Non-Functional Requirements
|
||||
|
||||
### NFR-001: Performance
|
||||
Response time < 500ms for all OAuth flows.
|
||||
|
||||
**Status**: ✓ Met (v1.0.0)
|
||||
|
||||
---
|
||||
|
||||
## Edge Cases
|
||||
|
||||
### EC-001: OAuth Provider Timeout
|
||||
**Scenario**: Provider doesn't respond in 5 seconds
|
||||
|
||||
**Expected**: Display error, offer retry
|
||||
|
||||
**Status**: ✓ Handled
|
||||
|
||||
---
|
||||
|
||||
### EC-002: Invalid MFA Code (NEW v1.2.0)
|
||||
**Scenario**: User enters incorrect TOTP code
|
||||
|
||||
**Expected**: Display error, max 3 attempts, lock after
|
||||
|
||||
**Status**: 🔄 To be implemented
|
||||
|
||||
---
|
||||
|
||||
## Constraints
|
||||
- Must use existing JWT session management
|
||||
- No new database servers
|
||||
- Compatible with existing user table
|
||||
|
||||
---
|
||||
|
||||
## Assumptions
|
||||
- Users have access to authenticator app for MFA
|
||||
- OAuth providers are always available
|
||||
|
||||
---
|
||||
|
||||
## Version History Summary
|
||||
|
||||
| Version | Date | Summary |
|
||||
|---------|------|---------|
|
||||
| 1.0.0 | 2026-01-22 | Initial OAuth login (Google only implicit) |
|
||||
| 1.1.0 | 2026-01-22 | + Explicit Google OAuth support |
|
||||
| 1.2.0 | 2026-01-23 | + GitHub provider, + MFA (current) |
|
||||
|
||||
**Detailed History**: See `history/` directory and `changes.log`
|
||||
```
|
||||
|
||||
### Changes.log (NDJSON - Complete History)
|
||||
|
||||
```jsonl
|
||||
{"timestamp":"2026-01-22T10:00:00+08:00","iteration":1,"version":"1.0.0","action":"create","type":"requirement","id":"FR-001","description":"Initial OAuth requirement"}
|
||||
{"timestamp":"2026-01-22T10:05:00+08:00","iteration":1,"version":"1.0.0","action":"create","type":"requirement","id":"NFR-001","description":"Performance requirement"}
|
||||
{"timestamp":"2026-01-22T11:00:00+08:00","iteration":2,"version":"1.1.0","action":"update","type":"requirement","id":"FR-001","description":"Clarified Google OAuth support"}
|
||||
{"timestamp":"2026-01-22T11:05:00+08:00","iteration":2,"version":"1.1.0","action":"create","type":"requirement","id":"FR-002","description":"Multi-provider support"}
|
||||
{"timestamp":"2026-01-23T10:00:00+08:00","iteration":3,"version":"1.2.0","action":"create","type":"requirement","id":"FR-003","description":"MFA requirement"}
|
||||
{"timestamp":"2026-01-23T10:05:00+08:00","iteration":3,"version":"1.2.0","action":"update","type":"requirement","id":"FR-002","description":"Added GitHub provider"}
|
||||
```
|
||||
|
||||
## Implementation Flow
|
||||
|
||||
### Agent Workflow (RA Example)
|
||||
|
||||
```javascript
|
||||
// ==================== RA Agent Iteration Flow ====================
|
||||
|
||||
// Read current state
|
||||
const state = JSON.parse(Read(`.workflow/.cycle/${cycleId}.json`))
|
||||
const currentVersion = state.requirements?.version || "0.0.0"
|
||||
const iteration = state.current_iteration
|
||||
|
||||
// If iteration (old version exists)
|
||||
if (currentVersion !== "0.0.0") {
|
||||
// 1. Archive old version
|
||||
const oldFile = `.workflow/.cycle/${cycleId}.progress/ra/requirements.md`
|
||||
const archiveFile = `.workflow/.cycle/${cycleId}.progress/ra/history/requirements-v${currentVersion}.md`
|
||||
|
||||
Copy(oldFile, archiveFile) // Archive
|
||||
|
||||
// 2. Read old version (optional, for context understanding)
|
||||
const oldRequirements = Read(oldFile)
|
||||
|
||||
// 3. Read change history
|
||||
const changesLog = readNDJSON(`.workflow/.cycle/${cycleId}.progress/ra/changes.log`)
|
||||
}
|
||||
|
||||
// 4. Generate new version number
|
||||
const newVersion = bumpVersion(currentVersion, 'minor') // 1.1.0 -> 1.2.0
|
||||
|
||||
// 5. Generate new document (complete rewrite)
|
||||
const newRequirements = generateRequirements({
|
||||
version: newVersion,
|
||||
previousVersion: currentVersion,
|
||||
previousSummary: "Added Google OAuth support",
|
||||
currentChanges: "Added MFA and GitHub provider",
|
||||
iteration: iteration,
|
||||
taskDescription: state.description,
|
||||
changesLog: changesLog // For understanding history
|
||||
})
|
||||
|
||||
// 6. Write new document (overwrite old)
|
||||
Write(`.workflow/.cycle/${cycleId}.progress/ra/requirements.md`, newRequirements)
|
||||
|
||||
// 7. Append change to changes.log
|
||||
appendNDJSON(`.workflow/.cycle/${cycleId}.progress/ra/changes.log`, {
|
||||
timestamp: getUtc8ISOString(),
|
||||
iteration: iteration,
|
||||
version: newVersion,
|
||||
action: "create",
|
||||
type: "requirement",
|
||||
id: "FR-003",
|
||||
description: "Added MFA requirement"
|
||||
})
|
||||
|
||||
// 8. Update state
|
||||
state.requirements = {
|
||||
version: newVersion,
|
||||
output_file: `.workflow/.cycle/${cycleId}.progress/ra/requirements.md`,
|
||||
summary: {
|
||||
functional_requirements: 3,
|
||||
edge_cases: 2,
|
||||
constraints: 3
|
||||
}
|
||||
}
|
||||
|
||||
Write(`.workflow/.cycle/${cycleId}.json`, JSON.stringify(state, null, 2))
|
||||
```
|
||||
|
||||
## Advantages Comparison
|
||||
|
||||
| Aspect | Incremental Update | Complete Rewrite + Archive |
|
||||
|--------|-------------------|---------------------------|
|
||||
| **Document Conciseness** | ❌ Gets longer | ✅ Always concise |
|
||||
| **Agent Parsing** | ❌ Must parse history | ✅ Only read current version |
|
||||
| **Maintenance Complexity** | ❌ High (version marking) | ✅ Low (direct rewrite) |
|
||||
| **File Size** | ❌ Bloats | ✅ Fixed |
|
||||
| **History Tracking** | ✅ In main document | ✅ In history/ + changes.log |
|
||||
| **Human Readability** | ❌ Must skip history | ✅ Direct current view |
|
||||
| **Token Usage** | ❌ More (read complete history) | ✅ Less (only read current) |
|
||||
|
||||
## Archive Strategy
|
||||
|
||||
### Auto-Archive Trigger
|
||||
|
||||
```javascript
|
||||
function shouldArchive(currentVersion, state) {
|
||||
// Archive on each version update
|
||||
return currentVersion !== state.requirements?.version
|
||||
}
|
||||
|
||||
function archiveOldVersion(cycleId, agent, filename, currentVersion) {
|
||||
const currentFile = `.workflow/.cycle/${cycleId}.progress/${agent}/${filename}`
|
||||
const archiveDir = `.workflow/.cycle/${cycleId}.progress/${agent}/history`
|
||||
const archiveFile = `${archiveDir}/${filename.replace('.', `-v${currentVersion}.`)}`
|
||||
|
||||
// Ensure archive directory exists
|
||||
mkdir -p ${archiveDir}
|
||||
|
||||
// Copy (not move, keep current file until new version written)
|
||||
Copy(currentFile, archiveFile)
|
||||
|
||||
console.log(`Archived ${filename} v${currentVersion} to history/`)
|
||||
}
|
||||
```
|
||||
|
||||
### Cleanup Strategy (Optional)
|
||||
|
||||
Keep most recent N versions, delete older archives:
|
||||
|
||||
```javascript
|
||||
function cleanupArchives(cycleId, agent, keepVersions = 3) {
|
||||
const historyDir = `.workflow/.cycle/${cycleId}.progress/${agent}/history`
|
||||
const archives = listFiles(historyDir)
|
||||
|
||||
// Sort by version number
|
||||
archives.sort((a, b) => compareVersions(extractVersion(a), extractVersion(b)))
|
||||
|
||||
// Delete oldest versions (keep most recent N)
|
||||
if (archives.length > keepVersions) {
|
||||
const toDelete = archives.slice(0, archives.length - keepVersions)
|
||||
toDelete.forEach(file => Delete(`${historyDir}/${file}`))
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Importance of Changes.log
|
||||
|
||||
Although main document is completely rewritten, **changes.log (NDJSON) permanently preserves complete history**:
|
||||
|
||||
```bash
|
||||
# View all changes
|
||||
cat .workflow/.cycle/cycle-xxx.progress/ra/changes.log | jq .
|
||||
|
||||
# View history of specific requirement
|
||||
cat .workflow/.cycle/cycle-xxx.progress/ra/changes.log | jq 'select(.id=="FR-001")'
|
||||
|
||||
# View changes by iteration
|
||||
cat .workflow/.cycle/cycle-xxx.progress/ra/changes.log | jq 'select(.iteration==2)'
|
||||
```
|
||||
|
||||
This way:
|
||||
- **Main Document**: Clear and concise (current state)
|
||||
- **Changes.log**: Complete traceability (all history)
|
||||
- **History/**: Snapshot backups (view on demand)
|
||||
|
||||
## Recommended Implementation
|
||||
|
||||
1. ✅ Adopt "Complete Rewrite" strategy
|
||||
2. ✅ Main document only keeps "previous version summary"
|
||||
3. ✅ Auto-archive to `history/` directory
|
||||
4. ✅ Changes.log (NDJSON) preserves complete history
|
||||
5. ✅ Optional: Keep most recent 3-5 historical versions
|
||||
|
||||
This approach keeps documents concise (agent-friendly) while preserving complete history (audit-friendly).
|
||||
@@ -60,7 +60,7 @@ These commands orchestrate complex, multi-phase development processes, from plan
|
||||
|
||||
| Command | Description |
|
||||
|---|---|
|
||||
| `/workflow:action-plan-verify`| Perform non-destructive cross-artifact consistency and quality analysis of IMPL_PLAN.md and task.json before execution. |
|
||||
| `/workflow:plan-verify`| Perform non-destructive cross-artifact consistency and quality analysis of IMPL_PLAN.md and task.json before execution. |
|
||||
|
||||
### Code Review Cycle
|
||||
|
||||
|
||||
2
FAQ.md
2
FAQ.md
@@ -665,7 +665,7 @@ CCW ensures dependencies are completed before dependent tasks execute.
|
||||
|
||||
2. **Run verification**:
|
||||
```bash
|
||||
/workflow:action-plan-verify
|
||||
/workflow:plan-verify
|
||||
```
|
||||
|
||||
3. **Automated reviews**:
|
||||
|
||||
@@ -153,7 +153,7 @@ After planning, validate your implementation plan for consistency and completene
|
||||
|
||||
```bash
|
||||
# After /workflow:plan completes, verify task quality
|
||||
/workflow:action-plan-verify
|
||||
/workflow:plan-verify
|
||||
|
||||
# The command will:
|
||||
# 1. Check requirements coverage (all requirements have tasks)
|
||||
|
||||
@@ -158,7 +158,7 @@
|
||||
|
||||
```bash
|
||||
# /workflow:plan 完成后,验证任务质量
|
||||
/workflow:action-plan-verify
|
||||
/workflow:plan-verify
|
||||
|
||||
# 该命令将:
|
||||
# 1. 检查需求覆盖率(所有需求都有任务)
|
||||
|
||||
43
README.md
43
README.md
@@ -263,6 +263,49 @@ Open Dashboard via `ccw view`, manage indexes and execute searches in **CodexLen
|
||||
|
||||
## 💻 CCW CLI Commands
|
||||
|
||||
### 🌟 Recommended Commands (Main Features)
|
||||
|
||||
<div align="center">
|
||||
<table>
|
||||
<tr><th>Command</th><th>Description</th><th>When to Use</th></tr>
|
||||
<tr>
|
||||
<td><b>/ccw</b></td>
|
||||
<td>Auto workflow orchestrator - analyzes intent, selects workflow level, executes command chain in main process</td>
|
||||
<td>✅ General tasks, auto workflow selection, quick development</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><b>/ccw-coordinator</b></td>
|
||||
<td>Manual orchestrator - recommends command chains, executes via external CLI with state persistence</td>
|
||||
<td>🔧 Complex multi-step workflows, custom chains, resumable sessions</td>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
**Quick Examples**:
|
||||
|
||||
```bash
|
||||
# /ccw - Auto workflow selection (Main Process)
|
||||
/ccw "Add user authentication" # Auto-selects workflow based on intent
|
||||
/ccw "Fix memory leak in WebSocket" # Detects bugfix workflow
|
||||
/ccw "Implement with TDD" # Routes to TDD workflow
|
||||
|
||||
# /ccw-coordinator - Manual chain orchestration (External CLI)
|
||||
/ccw-coordinator "Implement OAuth2 system" # Analyzes → Recommends chain → User confirms → Executes
|
||||
```
|
||||
|
||||
**Key Differences**:
|
||||
|
||||
| Aspect | /ccw | /ccw-coordinator |
|
||||
|--------|------|------------------|
|
||||
| **Execution** | Main process (SlashCommand) | External CLI (background tasks) |
|
||||
| **Selection** | Auto intent-based | Manual chain confirmation |
|
||||
| **State** | TodoWrite tracking | Persistent state.json |
|
||||
| **Use Case** | General tasks, quick dev | Complex chains, resumable |
|
||||
|
||||
---
|
||||
|
||||
### Other CLI Commands
|
||||
|
||||
```bash
|
||||
ccw install # Install workflow files
|
||||
ccw view # Open dashboard
|
||||
|
||||
43
README_CN.md
43
README_CN.md
@@ -263,6 +263,49 @@ codexlens index /path/to/project
|
||||
|
||||
## 💻 CCW CLI 命令
|
||||
|
||||
### 🌟 推荐命令(核心功能)
|
||||
|
||||
<div align="center">
|
||||
<table>
|
||||
<tr><th>命令</th><th>说明</th><th>适用场景</th></tr>
|
||||
<tr>
|
||||
<td><b>/ccw</b></td>
|
||||
<td>自动工作流编排器 - 分析意图、自动选择工作流级别、在主进程中执行命令链</td>
|
||||
<td>✅ 通用任务、自动选择工作流、快速开发</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><b>/ccw-coordinator</b></td>
|
||||
<td>手动编排器 - 推荐命令链、通过外部 CLI 执行、持久化状态</td>
|
||||
<td>🔧 复杂多步骤工作流、自定义链、可恢复会话</td>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
**快速示例**:
|
||||
|
||||
```bash
|
||||
# /ccw - 自动工作流选择(主进程)
|
||||
/ccw "添加用户认证" # 自动根据意图选择工作流
|
||||
/ccw "修复 WebSocket 中的内存泄漏" # 识别为 bugfix 工作流
|
||||
/ccw "使用 TDD 方式实现" # 路由到 TDD 工作流
|
||||
|
||||
# /ccw-coordinator - 手动链编排(外部 CLI)
|
||||
/ccw-coordinator "实现 OAuth2 系统" # 分析 → 推荐链 → 用户确认 → 执行
|
||||
```
|
||||
|
||||
**主要区别**:
|
||||
|
||||
| 方面 | /ccw | /ccw-coordinator |
|
||||
|------|------|------------------|
|
||||
| **执行方式** | 主进程(SlashCommand) | 外部 CLI(后台任务) |
|
||||
| **选择方式** | 自动基于意图识别 | 手动链确认 |
|
||||
| **状态管理** | TodoWrite 跟踪 | 持久化 state.json |
|
||||
| **适用场景** | 通用任务、快速开发 | 复杂链条、可恢复 |
|
||||
|
||||
---
|
||||
|
||||
### 其他 CLI 命令
|
||||
|
||||
```bash
|
||||
ccw install # 安装工作流文件
|
||||
ccw view # 打开 Dashboard
|
||||
|
||||
@@ -310,7 +310,7 @@ Return: Summary + Next Steps
|
||||
|
||||
```bash
|
||||
/workflow:plan "task description" # Complete planning
|
||||
/workflow:action-plan-verify # Verify plan (recommended)
|
||||
/workflow:plan-verify # Verify plan (recommended)
|
||||
/workflow:execute # Execute
|
||||
/workflow:review # (optional) Review
|
||||
```
|
||||
@@ -354,7 +354,7 @@ Phase 6: TDD Structure Validation
|
||||
|
||||
```bash
|
||||
/workflow:tdd-plan "feature description" # TDD planning
|
||||
/workflow:action-plan-verify # Verify (recommended)
|
||||
/workflow:plan-verify # Verify (recommended)
|
||||
/workflow:execute # Execute (follow Red-Green-Refactor)
|
||||
/workflow:tdd-verify # Verify TDD compliance
|
||||
```
|
||||
@@ -454,7 +454,7 @@ Phase 3: Synthesis Integration
|
||||
```bash
|
||||
/workflow:brainstorm:auto-parallel "topic" [--count N] [--style-skill package]
|
||||
/workflow:plan --session {sessionId} # Plan based on brainstorm results
|
||||
/workflow:action-plan-verify # Verify
|
||||
/workflow:plan-verify # Verify
|
||||
/workflow:execute # Execute
|
||||
```
|
||||
|
||||
|
||||
@@ -309,7 +309,7 @@ Return: Summary + Next Steps
|
||||
|
||||
```bash
|
||||
/workflow:plan "task description" # 完整规划
|
||||
/workflow:action-plan-verify # 验证计划 (推荐)
|
||||
/workflow:plan-verify # 验证计划 (推荐)
|
||||
/workflow:execute # 执行
|
||||
/workflow:review # (可选) 审查
|
||||
```
|
||||
@@ -353,7 +353,7 @@ Phase 6: TDD Structure Validation
|
||||
|
||||
```bash
|
||||
/workflow:tdd-plan "feature description" # TDD 规划
|
||||
/workflow:action-plan-verify # 验证 (推荐)
|
||||
/workflow:plan-verify # 验证 (推荐)
|
||||
/workflow:execute # 执行 (遵循 Red-Green-Refactor)
|
||||
/workflow:tdd-verify # 验证 TDD 流程合规
|
||||
```
|
||||
@@ -453,7 +453,7 @@ Phase 3: Synthesis Integration
|
||||
```bash
|
||||
/workflow:brainstorm:auto-parallel "topic" [--count N] [--style-skill package]
|
||||
/workflow:plan --session {sessionId} # 基于头脑风暴结果规划
|
||||
/workflow:action-plan-verify # 验证
|
||||
/workflow:plan-verify # 验证
|
||||
/workflow:execute # 执行
|
||||
```
|
||||
|
||||
|
||||
@@ -233,6 +233,28 @@ const ISSUES_DIR = '.workflow/issues';
|
||||
|
||||
// ============ Storage Layer (JSONL) ============
|
||||
|
||||
/**
|
||||
* Cached project root to avoid repeated git command execution
|
||||
*/
|
||||
let cachedProjectRoot: string | null = null;
|
||||
|
||||
/**
|
||||
* Clear cached project root (for testing)
|
||||
*/
|
||||
export function clearProjectRootCache(): void {
|
||||
cachedProjectRoot = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Debug logging helper (enabled via CCW_DEBUG=true)
|
||||
*/
|
||||
const DEBUG = process.env.CCW_DEBUG === 'true';
|
||||
function debugLog(msg: string): void {
|
||||
if (DEBUG) {
|
||||
console.log(`[ccw:worktree] ${msg}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize path for comparison (handles Windows case sensitivity)
|
||||
*/
|
||||
@@ -271,7 +293,32 @@ function resolveMainRepoFromGitFile(gitFilePath: string): string | null {
|
||||
* This ensures .workflow/issues/ is always accessed from the main repo.
|
||||
*/
|
||||
function getProjectRoot(): string {
|
||||
// First, try to detect if we're in a git worktree using git commands
|
||||
// Return cached result if available
|
||||
if (cachedProjectRoot) {
|
||||
debugLog(`Using cached project root: ${cachedProjectRoot}`);
|
||||
return cachedProjectRoot;
|
||||
}
|
||||
|
||||
debugLog(`Detecting project root from cwd: ${process.cwd()}`);
|
||||
|
||||
// Priority 1: Check CCW_MAIN_REPO environment variable
|
||||
const envMainRepo = process.env.CCW_MAIN_REPO;
|
||||
if (envMainRepo) {
|
||||
debugLog(`Found CCW_MAIN_REPO env: ${envMainRepo}`);
|
||||
const hasWorkflow = existsSync(join(envMainRepo, '.workflow'));
|
||||
const hasGit = existsSync(join(envMainRepo, '.git'));
|
||||
|
||||
if (hasWorkflow || hasGit) {
|
||||
debugLog(`CCW_MAIN_REPO validated (workflow=${hasWorkflow}, git=${hasGit})`);
|
||||
cachedProjectRoot = envMainRepo;
|
||||
return envMainRepo;
|
||||
} else {
|
||||
console.warn('[ccw] CCW_MAIN_REPO is set but path is invalid (no .workflow or .git)');
|
||||
console.warn(`[ccw] Path: ${envMainRepo}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Priority 2: Try to detect if we're in a git worktree using git commands
|
||||
try {
|
||||
// Get the common git directory (points to main repo's .git)
|
||||
const gitCommonDir = execSync('git rev-parse --git-common-dir', {
|
||||
@@ -287,6 +334,9 @@ function getProjectRoot(): string {
|
||||
timeout: EXEC_TIMEOUTS.GIT_QUICK,
|
||||
}).trim();
|
||||
|
||||
debugLog(`Git common dir: ${gitCommonDir}`);
|
||||
debugLog(`Git dir: ${gitDir}`);
|
||||
|
||||
// Normalize paths for comparison (Windows case insensitive)
|
||||
const normalizedCommon = normalizePath(gitCommonDir);
|
||||
const normalizedGit = normalizePath(gitDir);
|
||||
@@ -298,8 +348,12 @@ function getProjectRoot(): string {
|
||||
// .git directory's parent is the repo root
|
||||
const mainRepoRoot = resolve(absoluteCommonDir, '..');
|
||||
|
||||
debugLog(`Detected worktree, main repo: ${mainRepoRoot}`);
|
||||
|
||||
// Verify .workflow or .git exists in main repo
|
||||
if (existsSync(join(mainRepoRoot, '.workflow')) || existsSync(join(mainRepoRoot, '.git'))) {
|
||||
debugLog(`Main repo validated, returning: ${mainRepoRoot}`);
|
||||
cachedProjectRoot = mainRepoRoot;
|
||||
return mainRepoRoot;
|
||||
}
|
||||
}
|
||||
@@ -307,10 +361,11 @@ function getProjectRoot(): string {
|
||||
if (isExecTimeoutError(err)) {
|
||||
console.warn(`[issue] git rev-parse timed out after ${EXEC_TIMEOUTS.GIT_QUICK}ms; falling back to filesystem detection`);
|
||||
}
|
||||
debugLog(`Git command failed, falling back to filesystem detection`);
|
||||
// Git command failed - fall through to manual detection
|
||||
}
|
||||
|
||||
// Standard detection with worktree file support: walk up to find .workflow or .git
|
||||
// Priority 3: Standard detection with worktree file support: walk up to find .workflow or .git
|
||||
let dir = process.cwd();
|
||||
while (dir !== resolve(dir, '..')) {
|
||||
const gitPath = join(dir, '.git');
|
||||
@@ -322,22 +377,45 @@ function getProjectRoot(): string {
|
||||
if (gitStat.isFile()) {
|
||||
// .git is a file - this is a worktree, try to resolve main repo
|
||||
const mainRepo = resolveMainRepoFromGitFile(gitPath);
|
||||
if (mainRepo && existsSync(join(mainRepo, '.workflow'))) {
|
||||
return mainRepo;
|
||||
debugLog(`Parsed .git file, main repo: ${mainRepo}`);
|
||||
|
||||
if (mainRepo) {
|
||||
// Verify main repo has .git directory (always true for main repo)
|
||||
// Don't require .workflow - it may not exist yet in a new repo
|
||||
const hasGit = existsSync(join(mainRepo, '.git'));
|
||||
const hasWorkflow = existsSync(join(mainRepo, '.workflow'));
|
||||
|
||||
if (hasGit || hasWorkflow) {
|
||||
if (!hasWorkflow) {
|
||||
console.warn('[ccw] Worktree detected but main repo has no .workflow directory');
|
||||
console.warn(`[ccw] Main repo: ${mainRepo}`);
|
||||
console.warn('[ccw] Issue commands may fail until .workflow is created');
|
||||
console.warn('[ccw] Set CCW_MAIN_REPO environment variable to override detection');
|
||||
}
|
||||
debugLog(`Main repo validated via .git file (git=${hasGit}, workflow=${hasWorkflow})`);
|
||||
cachedProjectRoot = mainRepo;
|
||||
return mainRepo;
|
||||
}
|
||||
}
|
||||
// If main repo doesn't have .workflow, fall back to current worktree
|
||||
}
|
||||
} catch {
|
||||
// stat failed, continue with normal logic
|
||||
debugLog(`Failed to stat ${gitPath}, continuing`);
|
||||
}
|
||||
}
|
||||
|
||||
if (existsSync(join(dir, '.workflow')) || existsSync(gitPath)) {
|
||||
debugLog(`Found project root at: ${dir}`);
|
||||
cachedProjectRoot = dir;
|
||||
return dir;
|
||||
}
|
||||
dir = resolve(dir, '..');
|
||||
}
|
||||
return process.cwd();
|
||||
|
||||
debugLog(`No project root found, using cwd: ${process.cwd()}`);
|
||||
const fallback = process.cwd();
|
||||
cachedProjectRoot = fallback;
|
||||
return fallback;
|
||||
}
|
||||
|
||||
function getIssuesDir(): string {
|
||||
@@ -1220,6 +1298,81 @@ async function solutionAction(issueId: string | undefined, options: IssueOptions
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* solutions - Batch query solutions for multiple issues
|
||||
* Usage: ccw issue solutions --status planned --brief
|
||||
*/
|
||||
async function solutionsAction(options: IssueOptions): Promise<void> {
|
||||
// Get issues filtered by status
|
||||
const issues = readIssues();
|
||||
let targetIssues = issues;
|
||||
|
||||
if (options.status) {
|
||||
const statuses = options.status.split(',').map((s: string) => s.trim());
|
||||
targetIssues = issues.filter((i: Issue) => statuses.includes(i.status));
|
||||
}
|
||||
|
||||
// Filter to only issues with bound_solution_id
|
||||
const boundIssues = targetIssues.filter((i: Issue) => i.bound_solution_id);
|
||||
|
||||
if (boundIssues.length === 0) {
|
||||
if (options.json || options.brief) {
|
||||
console.log('[]');
|
||||
} else {
|
||||
console.log(chalk.yellow('No bound solutions found'));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Collect solutions for all bound issues
|
||||
const allSolutions: Array<{
|
||||
issue_id: string;
|
||||
solution_id: string;
|
||||
is_bound: boolean;
|
||||
task_count: number;
|
||||
files_touched: string[];
|
||||
priority?: number;
|
||||
}> = [];
|
||||
|
||||
for (const issue of boundIssues) {
|
||||
const solutions = readSolutions(issue.id);
|
||||
const boundSolution = solutions.find(s => s.id === issue.bound_solution_id);
|
||||
|
||||
if (boundSolution) {
|
||||
const filesTouched = new Set<string>();
|
||||
for (const task of boundSolution.tasks) {
|
||||
if (task.modification_points) {
|
||||
for (const mp of task.modification_points) {
|
||||
if (mp.file) filesTouched.add(mp.file);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
allSolutions.push({
|
||||
issue_id: issue.id,
|
||||
solution_id: boundSolution.id,
|
||||
is_bound: true,
|
||||
task_count: boundSolution.tasks.length,
|
||||
files_touched: Array.from(filesTouched),
|
||||
priority: issue.priority
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Brief mode: already minimal
|
||||
if (options.brief || options.json) {
|
||||
console.log(JSON.stringify(allSolutions, null, 2));
|
||||
return;
|
||||
}
|
||||
|
||||
// Human-readable output
|
||||
console.log(chalk.bold.cyan(`\nBound Solutions (${allSolutions.length}):\n`));
|
||||
for (const sol of allSolutions) {
|
||||
console.log(`${chalk.green('◉')} ${sol.issue_id} → ${sol.solution_id}`);
|
||||
console.log(chalk.gray(` Tasks: ${sol.task_count}, Files: ${sol.files_touched.length}`));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* init - Initialize a new issue (manual ID)
|
||||
*/
|
||||
@@ -2832,6 +2985,9 @@ export async function issueCommand(
|
||||
case 'solution':
|
||||
await solutionAction(argsArray[0], options);
|
||||
break;
|
||||
case 'solutions':
|
||||
await solutionsAction(options);
|
||||
break;
|
||||
case 'init':
|
||||
await initAction(argsArray[0], options);
|
||||
break;
|
||||
|
||||
1
ccw/src/core/routes/.gitignore
vendored
Normal file
1
ccw/src/core/routes/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
.ace-tool/
|
||||
@@ -35,6 +35,7 @@ import {
|
||||
import {
|
||||
loadClaudeCliTools,
|
||||
ensureClaudeCliTools,
|
||||
ensureClaudeCliToolsAsync,
|
||||
saveClaudeCliTools,
|
||||
loadClaudeCliSettings,
|
||||
saveClaudeCliSettings,
|
||||
@@ -329,16 +330,18 @@ export async function handleCliRoutes(ctx: RouteContext): Promise<boolean> {
|
||||
|
||||
// API: Get all API endpoints (for --tool custom --model <id>)
|
||||
if (pathname === '/api/cli/endpoints' && req.method === 'GET') {
|
||||
try {
|
||||
// Use ensureClaudeCliTools to auto-create config if missing
|
||||
const config = ensureClaudeCliTools(initialPath);
|
||||
const endpoints = getApiEndpointsFromTools(config);
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ endpoints }));
|
||||
} catch (err) {
|
||||
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||
}
|
||||
(async () => {
|
||||
try {
|
||||
// Use ensureClaudeCliToolsAsync to auto-create config with availability sync
|
||||
const config = await ensureClaudeCliToolsAsync(initialPath);
|
||||
const endpoints = getApiEndpointsFromTools(config);
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ endpoints }));
|
||||
} catch (err) {
|
||||
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||
}
|
||||
})();
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -820,21 +823,23 @@ export async function handleCliRoutes(ctx: RouteContext): Promise<boolean> {
|
||||
|
||||
// API: Get CLI Tools Config from .claude/cli-tools.json (with fallback to global)
|
||||
if (pathname === '/api/cli/tools-config' && req.method === 'GET') {
|
||||
try {
|
||||
// Use ensureClaudeCliTools to auto-create config if missing
|
||||
const toolsConfig = ensureClaudeCliTools(initialPath);
|
||||
const settingsConfig = loadClaudeCliSettings(initialPath);
|
||||
const info = getClaudeCliToolsInfo(initialPath);
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({
|
||||
tools: toolsConfig,
|
||||
settings: settingsConfig,
|
||||
_configInfo: info
|
||||
}));
|
||||
} catch (err) {
|
||||
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||
}
|
||||
(async () => {
|
||||
try {
|
||||
// Use ensureClaudeCliToolsAsync to auto-create config with availability sync
|
||||
const toolsConfig = await ensureClaudeCliToolsAsync(initialPath);
|
||||
const settingsConfig = loadClaudeCliSettings(initialPath);
|
||||
const info = getClaudeCliToolsInfo(initialPath);
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({
|
||||
tools: toolsConfig,
|
||||
settings: settingsConfig,
|
||||
_configInfo: info
|
||||
}));
|
||||
} catch (err) {
|
||||
res.writeHead(500, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: (err as Error).message }));
|
||||
}
|
||||
})();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -299,7 +299,7 @@ async function refreshWorkspace() {
|
||||
});
|
||||
|
||||
[...(data.liteTasks?.litePlan || []), ...(data.liteTasks?.liteFix || [])].forEach(s => {
|
||||
const sessionKey = `lite-${s.session_id}`.replace(/[^a-zA-Z0-9-]/g, '-');
|
||||
const sessionKey = `lite-${s.type}-${s.id}`.replace(/[^a-zA-Z0-9-]/g, '-');
|
||||
liteTaskDataStore[sessionKey] = s;
|
||||
});
|
||||
|
||||
|
||||
@@ -823,7 +823,7 @@ async function refreshWorkspaceData(newData) {
|
||||
});
|
||||
|
||||
[...(newData.liteTasks?.litePlan || []), ...(newData.liteTasks?.liteFix || [])].forEach(s => {
|
||||
const key = `lite-${s.session_id}`.replace(/[^a-zA-Z0-9-]/g, '-');
|
||||
const key = `lite-${s.type}-${s.id}`.replace(/[^a-zA-Z0-9-]/g, '-');
|
||||
liteTaskDataStore[key] = s;
|
||||
});
|
||||
|
||||
|
||||
@@ -726,7 +726,7 @@ function getWorkflowGraphData(workflow) {
|
||||
{ data: { id: 'start', label: ht('help.workflows.planFull.start') } },
|
||||
{ data: { id: 'cli-analyze', label: ht('help.workflows.planFull.cliAnalyze') } },
|
||||
{ data: { id: 'plan', label: '/workflow:plan' } },
|
||||
{ data: { id: 'verify', label: '/workflow:action-plan-verify' } },
|
||||
{ data: { id: 'verify', label: '/workflow:plan-verify' } },
|
||||
{ data: { id: 'execute', label: '/workflow:execute' } },
|
||||
{ data: { id: 'test', label: '/workflow:test-gen' } },
|
||||
{ data: { id: 'review', label: '/workflow:review' } },
|
||||
|
||||
@@ -2817,7 +2817,7 @@ function showLiteTaskDetailPage(sessionKey) {
|
||||
</div>
|
||||
|
||||
<!-- Tab Content -->
|
||||
<div class="detail-tab-content" id="liteDetailTabContent">
|
||||
<div class="detail-tab-content active" id="liteDetailTabContent">
|
||||
${renderLiteTasksTab(session, tasks, completed, inProgress, pending)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -107,7 +107,7 @@ function showSessionDetailPage(sessionKey) {
|
||||
</div>
|
||||
|
||||
<!-- Tab Content -->
|
||||
<div class="detail-tab-content" id="detailTabContent">
|
||||
<div class="detail-tab-content active" id="detailTabContent">
|
||||
${renderTasksTab(session, tasks, completed, inProgress, pending)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -418,6 +418,56 @@ export function ensureClaudeCliTools(projectDir: string, createInProject: boolea
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Async version of ensureClaudeCliTools with automatic availability sync
|
||||
* Creates default config in global ~/.claude directory and syncs with actual tool availability
|
||||
* @param projectDir - Project directory path (used for reading existing project config)
|
||||
* @param createInProject - DEPRECATED: Always creates in global dir. Kept for backward compatibility.
|
||||
* @returns The config that was created/exists
|
||||
*/
|
||||
export async function ensureClaudeCliToolsAsync(projectDir: string, createInProject: boolean = false): Promise<ClaudeCliToolsConfig & { _source?: string }> {
|
||||
const resolved = resolveConfigPath(projectDir);
|
||||
|
||||
if (resolved.source !== 'default') {
|
||||
// Config exists, load and return it
|
||||
return loadClaudeCliTools(projectDir);
|
||||
}
|
||||
|
||||
// Config doesn't exist - create in global directory only
|
||||
debugLog('[claude-cli-tools] Config not found, creating default cli-tools.json in ~/.claude');
|
||||
|
||||
const defaultConfig: ClaudeCliToolsConfig = { ...DEFAULT_TOOLS_CONFIG };
|
||||
|
||||
// Always create in global directory (user-level config), respecting CCW_DATA_DIR
|
||||
const claudeHome = process.env.CCW_DATA_DIR
|
||||
? path.join(process.env.CCW_DATA_DIR, '.claude')
|
||||
: path.join(os.homedir(), '.claude');
|
||||
if (!fs.existsSync(claudeHome)) {
|
||||
fs.mkdirSync(claudeHome, { recursive: true });
|
||||
}
|
||||
const globalPath = getGlobalConfigPath();
|
||||
try {
|
||||
fs.writeFileSync(globalPath, JSON.stringify(defaultConfig, null, 2), 'utf-8');
|
||||
debugLog(`[claude-cli-tools] Created default config at: ${globalPath}`);
|
||||
|
||||
// Auto-sync with actual tool availability on first creation
|
||||
try {
|
||||
debugLog('[claude-cli-tools] Auto-syncing tool availability on first creation...');
|
||||
const syncResult = await syncBuiltinToolsAvailability(projectDir);
|
||||
debugLog(`[claude-cli-tools] Auto-sync completed: enabled=[${syncResult.changes.enabled.join(', ')}], disabled=[${syncResult.changes.disabled.join(', ')}]`);
|
||||
return { ...syncResult.config, _source: 'global' };
|
||||
} catch (syncErr) {
|
||||
console.warn('[claude-cli-tools] Failed to auto-sync availability:', syncErr);
|
||||
// Return default config if sync fails
|
||||
return { ...defaultConfig, _source: 'global' };
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('[claude-cli-tools] Failed to create global config:', err);
|
||||
return { ...defaultConfig, _source: 'default' };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Load CLI tools configuration from global ~/.claude/cli-tools.json
|
||||
* Falls back to default config if not found.
|
||||
|
||||
@@ -83,8 +83,8 @@ function findLocalPackagePath(packageName: string): string | null {
|
||||
possiblePaths.push(join(cwdParent, packageName));
|
||||
}
|
||||
|
||||
// First pass: prefer non-node_modules paths (development environment)
|
||||
for (const localPath of possiblePaths) {
|
||||
// Skip paths inside node_modules
|
||||
if (isInsideNodeModules(localPath)) {
|
||||
continue;
|
||||
}
|
||||
@@ -94,8 +94,12 @@ function findLocalPackagePath(packageName: string): string | null {
|
||||
}
|
||||
}
|
||||
|
||||
if (!isDevEnvironment()) {
|
||||
console.log(`[CodexLens] Running from node_modules - will try PyPI for ${packageName}`);
|
||||
// Second pass: allow node_modules paths (NPM global install)
|
||||
for (const localPath of possiblePaths) {
|
||||
if (existsSync(join(localPath, 'pyproject.toml'))) {
|
||||
console.log(`[CodexLens] Found ${packageName} in node_modules at: ${localPath}`);
|
||||
return localPath;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
@@ -666,14 +670,26 @@ async function bootstrapWithUv(gpuMode: GpuMode = 'cpu'): Promise<BootstrapResul
|
||||
|
||||
if (!codexLensPath) {
|
||||
// codex-lens is a local-only package, not published to PyPI
|
||||
// Generate dynamic paths for error message (cross-platform)
|
||||
const possiblePaths = [
|
||||
join(process.cwd(), 'codex-lens'),
|
||||
join(__dirname, '..', '..', '..', 'codex-lens'),
|
||||
join(homedir(), 'codex-lens'),
|
||||
];
|
||||
const cwd = process.cwd();
|
||||
const cwdParent = dirname(cwd);
|
||||
if (cwdParent !== cwd) {
|
||||
possiblePaths.push(join(cwdParent, 'codex-lens'));
|
||||
}
|
||||
const pathsList = possiblePaths.map(p => ` - ${p}`).join('\n');
|
||||
|
||||
const errorMsg = `Cannot find codex-lens directory for local installation.\n\n` +
|
||||
`codex-lens is a local development package (not published to PyPI) and must be installed from local files.\n\n` +
|
||||
`To fix this:\n` +
|
||||
`1. Ensure the 'codex-lens' directory exists in your project root\n` +
|
||||
` Expected location: D:\\Claude_dms3\\codex-lens\n` +
|
||||
`2. Verify pyproject.toml exists: D:\\Claude_dms3\\codex-lens\\pyproject.toml\n` +
|
||||
`3. Run ccw from the correct working directory (e.g., D:\\Claude_dms3)\n` +
|
||||
`4. Or manually install: cd D:\\Claude_dms3\\codex-lens && pip install -e .[${extras.join(',')}]`;
|
||||
`1. Ensure 'codex-lens' directory exists at one of these locations:\n${pathsList}\n` +
|
||||
`2. Verify pyproject.toml exists in the codex-lens directory\n` +
|
||||
`3. Run ccw from the correct working directory\n` +
|
||||
`4. Or manually install: cd /path/to/codex-lens && pip install -e .[${extras.join(',')}]`;
|
||||
return { success: false, error: errorMsg };
|
||||
}
|
||||
|
||||
@@ -740,13 +756,26 @@ async function installSemanticWithUv(gpuMode: GpuMode = 'cpu'): Promise<Bootstra
|
||||
// Install with extras - UV handles dependency conflicts automatically
|
||||
if (!codexLensPath) {
|
||||
// codex-lens is a local-only package, not published to PyPI
|
||||
// Generate dynamic paths for error message (cross-platform)
|
||||
const possiblePaths = [
|
||||
join(process.cwd(), 'codex-lens'),
|
||||
join(__dirname, '..', '..', '..', 'codex-lens'),
|
||||
join(homedir(), 'codex-lens'),
|
||||
];
|
||||
const cwd = process.cwd();
|
||||
const cwdParent = dirname(cwd);
|
||||
if (cwdParent !== cwd) {
|
||||
possiblePaths.push(join(cwdParent, 'codex-lens'));
|
||||
}
|
||||
const pathsList = possiblePaths.map(p => ` - ${p}`).join('\n');
|
||||
|
||||
const errorMsg = `Cannot find codex-lens directory for local installation.\n\n` +
|
||||
`codex-lens is a local development package (not published to PyPI) and must be installed from local files.\n\n` +
|
||||
`To fix this:\n` +
|
||||
`1. Ensure the 'codex-lens' directory exists in your project root\n` +
|
||||
`2. Verify pyproject.toml exists in codex-lens directory\n` +
|
||||
`1. Ensure 'codex-lens' directory exists at one of these locations:\n${pathsList}\n` +
|
||||
`2. Verify pyproject.toml exists in the codex-lens directory\n` +
|
||||
`3. Run ccw from the correct working directory\n` +
|
||||
`4. Or manually install: cd codex-lens && pip install -e .[${extras.join(',')}]`;
|
||||
`4. Or manually install: cd /path/to/codex-lens && pip install -e .[${extras.join(',')}]`;
|
||||
return { success: false, error: errorMsg };
|
||||
}
|
||||
|
||||
|
||||
669
ccw/src/tools/command-registry.test.ts
Normal file
669
ccw/src/tools/command-registry.test.ts
Normal file
@@ -0,0 +1,669 @@
|
||||
/**
|
||||
* CommandRegistry Tests
|
||||
*
|
||||
* Test coverage:
|
||||
* - YAML header parsing
|
||||
* - Command metadata extraction
|
||||
* - Directory detection (relative and home)
|
||||
* - Caching mechanism
|
||||
* - Batch operations
|
||||
* - Categorization
|
||||
* - Error handling
|
||||
*/
|
||||
|
||||
import { CommandRegistry, createCommandRegistry, getAllCommandsSync, getCommandSync } from './command-registry';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
|
||||
// Mock fs module
|
||||
jest.mock('fs');
|
||||
jest.mock('os');
|
||||
|
||||
describe('CommandRegistry', () => {
|
||||
const mockReadFileSync = fs.readFileSync as jest.MockedFunction<typeof fs.readFileSync>;
|
||||
const mockExistsSync = fs.existsSync as jest.MockedFunction<typeof fs.existsSync>;
|
||||
const mockReaddirSync = fs.readdirSync as jest.MockedFunction<typeof fs.readdirSync>;
|
||||
const mockStatSync = fs.statSync as jest.MockedFunction<typeof fs.statSync>;
|
||||
const mockHomedir = os.homedir as jest.MockedFunction<typeof os.homedir>;
|
||||
|
||||
// Sample YAML headers
|
||||
const sampleLitePlanYaml = `---
|
||||
name: lite-plan
|
||||
description: Quick planning for simple features
|
||||
argument-hint: "\"feature description\""
|
||||
allowed-tools: Task(*), Read(*), Write(*), Bash(*)
|
||||
---
|
||||
|
||||
# Content here`;
|
||||
|
||||
const sampleExecuteYaml = `---
|
||||
name: execute
|
||||
description: Execute implementation from plan
|
||||
argument-hint: "--resume-session=\"WFS-xxx\""
|
||||
allowed-tools: Task(*), Bash(*)
|
||||
---
|
||||
|
||||
# Content here`;
|
||||
|
||||
const sampleTestYaml = `---
|
||||
name: test-cycle-execute
|
||||
description: Run tests and fix failures
|
||||
argument-hint: "--session=\"WFS-xxx\""
|
||||
allowed-tools: Task(*), Bash(*)
|
||||
---
|
||||
|
||||
# Content here`;
|
||||
|
||||
const sampleReviewYaml = `---
|
||||
name: review
|
||||
description: Code review workflow
|
||||
argument-hint: "--session=\"WFS-xxx\""
|
||||
allowed-tools: Task(*), Read(*)
|
||||
---
|
||||
|
||||
# Content here`;
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('constructor & directory detection', () => {
|
||||
it('should use provided command directory', () => {
|
||||
const customDir = '/custom/path';
|
||||
const registry = new CommandRegistry(customDir);
|
||||
|
||||
expect((registry as any).commandDir).toBe(customDir);
|
||||
});
|
||||
|
||||
it('should auto-detect relative .claude/commands/workflow directory', () => {
|
||||
mockExistsSync.mockImplementation((path: string) => {
|
||||
return path === '.claude/commands/workflow';
|
||||
});
|
||||
|
||||
const registry = new CommandRegistry();
|
||||
|
||||
expect((registry as any).commandDir).toBe('.claude/commands/workflow');
|
||||
expect(mockExistsSync).toHaveBeenCalledWith('.claude/commands/workflow');
|
||||
});
|
||||
|
||||
it('should auto-detect home directory ~/.claude/commands/workflow', () => {
|
||||
mockExistsSync.mockImplementation((checkPath: string) => {
|
||||
return checkPath === path.join('/home/user', '.claude', 'commands', 'workflow');
|
||||
});
|
||||
mockHomedir.mockReturnValue('/home/user');
|
||||
|
||||
const registry = new CommandRegistry();
|
||||
|
||||
expect((registry as any).commandDir).toBe(
|
||||
path.join('/home/user', '.claude', 'commands', 'workflow')
|
||||
);
|
||||
});
|
||||
|
||||
it('should return null if no command directory found', () => {
|
||||
mockExistsSync.mockReturnValue(false);
|
||||
mockHomedir.mockReturnValue('/home/user');
|
||||
|
||||
const registry = new CommandRegistry();
|
||||
|
||||
expect((registry as any).commandDir).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseYamlHeader', () => {
|
||||
it('should parse simple YAML header with Unix line endings', () => {
|
||||
const yaml = `---
|
||||
name: test-command
|
||||
description: Test description
|
||||
argument-hint: "\"test\""
|
||||
allowed-tools: Task(*), Read(*)
|
||||
---
|
||||
|
||||
Content here`;
|
||||
|
||||
const registry = new CommandRegistry('/fake/path');
|
||||
const result = (registry as any).parseYamlHeader(yaml);
|
||||
|
||||
expect(result).toEqual({
|
||||
name: 'test-command',
|
||||
description: 'Test description',
|
||||
'argument-hint': '"test"',
|
||||
'allowed-tools': 'Task(*), Read(*)'
|
||||
});
|
||||
});
|
||||
|
||||
it('should parse YAML header with Windows line endings (\\r\\n)', () => {
|
||||
const yaml = `---\r\nname: test-command\r\ndescription: Test\r\n---`;
|
||||
|
||||
const registry = new CommandRegistry('/fake/path');
|
||||
const result = (registry as any).parseYamlHeader(yaml);
|
||||
|
||||
expect(result).toEqual({
|
||||
name: 'test-command',
|
||||
description: 'Test'
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle quoted values', () => {
|
||||
const yaml = `---
|
||||
name: "cmd"
|
||||
description: 'double quoted'
|
||||
---`;
|
||||
|
||||
const registry = new CommandRegistry('/fake/path');
|
||||
const result = (registry as any).parseYamlHeader(yaml);
|
||||
|
||||
expect(result).toEqual({
|
||||
name: 'cmd',
|
||||
description: 'double quoted'
|
||||
});
|
||||
});
|
||||
|
||||
it('should parse allowed-tools and trim spaces', () => {
|
||||
const yaml = `---
|
||||
name: test
|
||||
allowed-tools: Task(*), Read(*) , Write(*), Bash(*)
|
||||
---`;
|
||||
|
||||
const registry = new CommandRegistry('/fake/path');
|
||||
const result = (registry as any).parseYamlHeader(yaml);
|
||||
|
||||
expect(result['allowed-tools']).toBe('Task(*), Read(*), Write(*), Bash(*)');
|
||||
});
|
||||
|
||||
it('should skip comments and empty lines', () => {
|
||||
const yaml = `---
|
||||
# This is a comment
|
||||
name: test-command
|
||||
|
||||
# Another comment
|
||||
description: Test
|
||||
|
||||
---`;
|
||||
|
||||
const registry = new CommandRegistry('/fake/path');
|
||||
const result = (registry as any).parseYamlHeader(yaml);
|
||||
|
||||
expect(result).toEqual({
|
||||
name: 'test-command',
|
||||
description: 'Test'
|
||||
});
|
||||
});
|
||||
|
||||
it('should return null for missing YAML markers', () => {
|
||||
const yaml = `name: test-command
|
||||
description: Test`;
|
||||
|
||||
const registry = new CommandRegistry('/fake/path');
|
||||
const result = (registry as any).parseYamlHeader(yaml);
|
||||
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should return null for malformed YAML', () => {
|
||||
const yaml = `---
|
||||
invalid yaml content without colons
|
||||
---`;
|
||||
|
||||
const registry = new CommandRegistry('/fake/path');
|
||||
const result = (registry as any).parseYamlHeader(yaml);
|
||||
|
||||
expect(result).toEqual({});
|
||||
});
|
||||
});
|
||||
|
||||
describe('getCommand', () => {
|
||||
it('should get command metadata by name', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockImplementation((checkPath: string) => {
|
||||
return checkPath === path.join(cmdDir, 'lite-plan.md');
|
||||
});
|
||||
mockReadFileSync.mockReturnValue(sampleLitePlanYaml);
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
const result = registry.getCommand('lite-plan');
|
||||
|
||||
expect(result).toEqual({
|
||||
name: 'lite-plan',
|
||||
command: '/workflow:lite-plan',
|
||||
description: 'Quick planning for simple features',
|
||||
argumentHint: '"feature description"',
|
||||
allowedTools: ['Task(*)', 'Read(*)', 'Write(*)', 'Bash(*)'],
|
||||
filePath: path.join(cmdDir, 'lite-plan.md')
|
||||
});
|
||||
});
|
||||
|
||||
it('should normalize /workflow: prefix', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReadFileSync.mockReturnValue(sampleLitePlanYaml);
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
const result = registry.getCommand('/workflow:lite-plan');
|
||||
|
||||
expect(result?.name).toBe('lite-plan');
|
||||
});
|
||||
|
||||
it('should use cache for repeated requests', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReadFileSync.mockReturnValue(sampleLitePlanYaml);
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
|
||||
registry.getCommand('lite-plan');
|
||||
registry.getCommand('lite-plan');
|
||||
|
||||
// readFileSync should only be called once due to cache
|
||||
expect(mockReadFileSync).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should return null if command file not found', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(false);
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
const result = registry.getCommand('nonexistent');
|
||||
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should return null if no command directory', () => {
|
||||
mockExistsSync.mockReturnValue(false);
|
||||
mockHomedir.mockReturnValue('/home/user');
|
||||
|
||||
const registry = new CommandRegistry();
|
||||
const result = registry.getCommand('lite-plan');
|
||||
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should return null if YAML header is invalid', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReadFileSync.mockReturnValue('No YAML header here');
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
const result = registry.getCommand('lite-plan');
|
||||
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should parse allowedTools correctly', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReadFileSync.mockReturnValue(sampleExecuteYaml);
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
const result = registry.getCommand('execute');
|
||||
|
||||
expect(result?.allowedTools).toEqual(['Task(*)', 'Bash(*)']);
|
||||
});
|
||||
|
||||
it('should handle empty allowedTools', () => {
|
||||
const yaml = `---
|
||||
name: minimal-cmd
|
||||
description: Minimal command
|
||||
---`;
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReadFileSync.mockReturnValue(yaml);
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
const result = registry.getCommand('minimal-cmd');
|
||||
|
||||
expect(result?.allowedTools).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getCommands', () => {
|
||||
it('should get multiple commands', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReadFileSync.mockImplementation((filePath: string) => {
|
||||
if (filePath.includes('lite-plan')) return sampleLitePlanYaml;
|
||||
if (filePath.includes('execute')) return sampleExecuteYaml;
|
||||
return '';
|
||||
});
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
const result = registry.getCommands(['lite-plan', 'execute', 'nonexistent']);
|
||||
|
||||
expect(result.size).toBe(2);
|
||||
expect(result.has('/workflow:lite-plan')).toBe(true);
|
||||
expect(result.has('/workflow:execute')).toBe(true);
|
||||
});
|
||||
|
||||
it('should skip nonexistent commands', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(false);
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
const result = registry.getCommands(['nonexistent1', 'nonexistent2']);
|
||||
|
||||
expect(result.size).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getAllCommandsSummary', () => {
|
||||
it('should get all commands summary', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReaddirSync.mockReturnValue(['lite-plan.md', 'execute.md', 'test.md'] as any);
|
||||
mockStatSync.mockReturnValue({ isDirectory: () => false } as any);
|
||||
mockReadFileSync.mockImplementation((filePath: string) => {
|
||||
if (filePath.includes('lite-plan')) return sampleLitePlanYaml;
|
||||
if (filePath.includes('execute')) return sampleExecuteYaml;
|
||||
if (filePath.includes('test')) return sampleTestYaml;
|
||||
return '';
|
||||
});
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
const result = registry.getAllCommandsSummary();
|
||||
|
||||
expect(result.size).toBe(3);
|
||||
expect(result.get('/workflow:lite-plan')).toEqual({
|
||||
name: 'lite-plan',
|
||||
description: 'Quick planning for simple features'
|
||||
});
|
||||
});
|
||||
|
||||
it('should skip directories', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReaddirSync.mockReturnValue(['file.md', 'directory'] as any);
|
||||
mockStatSync.mockImplementation((filePath: string) => ({
|
||||
isDirectory: () => filePath.includes('directory')
|
||||
} as any));
|
||||
mockReadFileSync.mockReturnValue(sampleLitePlanYaml);
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
const result = registry.getAllCommandsSummary();
|
||||
|
||||
// Only file.md should be processed
|
||||
expect(mockReadFileSync).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should skip files with invalid YAML headers', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReaddirSync.mockReturnValue(['valid.md', 'invalid.md'] as any);
|
||||
mockStatSync.mockReturnValue({ isDirectory: () => false } as any);
|
||||
mockReadFileSync.mockImplementation((filePath: string) => {
|
||||
if (filePath.includes('valid')) return sampleLitePlanYaml;
|
||||
return 'No YAML header';
|
||||
});
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
const result = registry.getAllCommandsSummary();
|
||||
|
||||
expect(result.size).toBe(1);
|
||||
});
|
||||
|
||||
it('should return empty map if no command directory', () => {
|
||||
mockExistsSync.mockReturnValue(false);
|
||||
mockHomedir.mockReturnValue('/home/user');
|
||||
|
||||
const registry = new CommandRegistry();
|
||||
const result = registry.getAllCommandsSummary();
|
||||
|
||||
expect(result.size).toBe(0);
|
||||
});
|
||||
|
||||
it('should handle directory read errors gracefully', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReaddirSync.mockImplementation(() => {
|
||||
throw new Error('Permission denied');
|
||||
});
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
const result = registry.getAllCommandsSummary();
|
||||
|
||||
expect(result.size).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getAllCommandsByCategory', () => {
|
||||
it('should categorize commands by name patterns', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReaddirSync.mockReturnValue(['lite-plan.md', 'execute.md', 'test-cycle-execute.md', 'review.md'] as any);
|
||||
mockStatSync.mockReturnValue({ isDirectory: () => false } as any);
|
||||
mockReadFileSync.mockImplementation((filePath: string) => {
|
||||
if (filePath.includes('lite-plan')) return sampleLitePlanYaml;
|
||||
if (filePath.includes('execute')) return sampleExecuteYaml;
|
||||
if (filePath.includes('test')) return sampleTestYaml;
|
||||
if (filePath.includes('review')) return sampleReviewYaml;
|
||||
return '';
|
||||
});
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
const result = registry.getAllCommandsByCategory();
|
||||
|
||||
expect(result.planning.length).toBe(1);
|
||||
expect(result.execution.length).toBe(1);
|
||||
expect(result.testing.length).toBe(1);
|
||||
expect(result.review.length).toBe(1);
|
||||
expect(result.other.length).toBe(0);
|
||||
|
||||
expect(result.planning[0].name).toBe('lite-plan');
|
||||
expect(result.execution[0].name).toBe('execute');
|
||||
});
|
||||
|
||||
it('should handle commands matching multiple patterns', () => {
|
||||
const yamlMultiMatch = `---
|
||||
name: test-plan
|
||||
description: TDD planning
|
||||
allowed-tools: Task(*)
|
||||
---`;
|
||||
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReaddirSync.mockReturnValue(['test-plan.md'] as any);
|
||||
mockStatSync.mockReturnValue({ isDirectory: () => false } as any);
|
||||
mockReadFileSync.mockReturnValue(yamlMultiMatch);
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
const result = registry.getAllCommandsByCategory();
|
||||
|
||||
// Should match 'plan' pattern (planning)
|
||||
expect(result.planning.length).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('toJSON', () => {
|
||||
it('should serialize cached commands to JSON', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReadFileSync.mockReturnValue(sampleLitePlanYaml);
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
registry.getCommand('lite-plan');
|
||||
|
||||
const json = registry.toJSON();
|
||||
|
||||
expect(json['/workflow:lite-plan']).toEqual({
|
||||
name: 'lite-plan',
|
||||
command: '/workflow:lite-plan',
|
||||
description: 'Quick planning for simple features',
|
||||
argumentHint: '"feature description"',
|
||||
allowedTools: ['Task(*)', 'Read(*)', 'Write(*)', 'Bash(*)'],
|
||||
filePath: path.join(cmdDir, 'lite-plan.md')
|
||||
});
|
||||
});
|
||||
|
||||
it('should only include cached commands', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReadFileSync.mockImplementation((filePath: string) => {
|
||||
if (filePath.includes('lite-plan')) return sampleLitePlanYaml;
|
||||
return sampleExecuteYaml;
|
||||
});
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
registry.getCommand('lite-plan');
|
||||
// Don't call getCommand for 'execute'
|
||||
|
||||
const json = registry.toJSON();
|
||||
|
||||
expect(Object.keys(json).length).toBe(1);
|
||||
expect(json['/workflow:lite-plan']).toBeDefined();
|
||||
expect(json['/workflow:execute']).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('exported functions', () => {
|
||||
it('createCommandRegistry should create new instance', () => {
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
|
||||
const registry = createCommandRegistry('/custom/path');
|
||||
|
||||
expect((registry as any).commandDir).toBe('/custom/path');
|
||||
});
|
||||
|
||||
it('getAllCommandsSync should return all commands', () => {
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReaddirSync.mockReturnValue(['lite-plan.md'] as any);
|
||||
mockStatSync.mockReturnValue({ isDirectory: () => false } as any);
|
||||
mockReadFileSync.mockReturnValue(sampleLitePlanYaml);
|
||||
mockHomedir.mockReturnValue('/home/user');
|
||||
|
||||
const result = getAllCommandsSync();
|
||||
|
||||
expect(result.size).toBeGreaterThanOrEqual(1);
|
||||
});
|
||||
|
||||
it('getCommandSync should return specific command', () => {
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReadFileSync.mockReturnValue(sampleLitePlanYaml);
|
||||
mockHomedir.mockReturnValue('/home/user');
|
||||
|
||||
const result = getCommandSync('lite-plan');
|
||||
|
||||
expect(result?.name).toBe('lite-plan');
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle file read errors', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReadFileSync.mockImplementation(() => {
|
||||
throw new Error('File read error');
|
||||
});
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
const result = registry.getCommand('lite-plan');
|
||||
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should handle YAML parsing errors', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
// Return something that will cause parsing to fail
|
||||
mockReadFileSync.mockReturnValue('---\ninvalid: : : yaml\n---');
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
const result = registry.getCommand('lite-plan');
|
||||
|
||||
// Should return null since name is not in result
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should handle empty command directory', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReaddirSync.mockReturnValue([] as any);
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
const result = registry.getAllCommandsSummary();
|
||||
|
||||
expect(result.size).toBe(0);
|
||||
});
|
||||
|
||||
it('should handle non-md files in command directory', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReaddirSync.mockReturnValue(['lite-plan.md', 'readme.txt', '.gitignore'] as any);
|
||||
mockStatSync.mockReturnValue({ isDirectory: () => false } as any);
|
||||
mockReadFileSync.mockReturnValue(sampleLitePlanYaml);
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
const result = registry.getAllCommandsSummary();
|
||||
|
||||
expect(result.size).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('integration tests', () => {
|
||||
it('should work with full workflow', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReaddirSync.mockReturnValue(['lite-plan.md', 'execute.md', 'test-cycle-execute.md'] as any);
|
||||
mockStatSync.mockReturnValue({ isDirectory: () => false } as any);
|
||||
mockReadFileSync.mockImplementation((filePath: string) => {
|
||||
if (filePath.includes('lite-plan')) return sampleLitePlanYaml;
|
||||
if (filePath.includes('execute')) return sampleExecuteYaml;
|
||||
if (filePath.includes('test')) return sampleTestYaml;
|
||||
return '';
|
||||
});
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
|
||||
// Get all summary
|
||||
const summary = registry.getAllCommandsSummary();
|
||||
expect(summary.size).toBe(3);
|
||||
|
||||
// Get by category
|
||||
const byCategory = registry.getAllCommandsByCategory();
|
||||
expect(byCategory.planning.length).toBe(1);
|
||||
expect(byCategory.execution.length).toBe(1);
|
||||
expect(byCategory.testing.length).toBe(1);
|
||||
|
||||
// Get specific command
|
||||
const cmd = registry.getCommand('lite-plan');
|
||||
expect(cmd?.name).toBe('lite-plan');
|
||||
|
||||
// Get multiple commands
|
||||
const multiple = registry.getCommands(['lite-plan', 'execute']);
|
||||
expect(multiple.size).toBe(2);
|
||||
|
||||
// Convert to JSON
|
||||
const json = registry.toJSON();
|
||||
expect(Object.keys(json).length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should maintain cache across operations', () => {
|
||||
const cmdDir = '/workflows';
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReaddirSync.mockReturnValue(['lite-plan.md', 'execute.md'] as any);
|
||||
mockStatSync.mockReturnValue({ isDirectory: () => false } as any);
|
||||
mockReadFileSync.mockImplementation((filePath: string) => {
|
||||
if (filePath.includes('lite-plan')) return sampleLitePlanYaml;
|
||||
return sampleExecuteYaml;
|
||||
});
|
||||
|
||||
const registry = new CommandRegistry(cmdDir);
|
||||
|
||||
// First call
|
||||
registry.getCommand('lite-plan');
|
||||
const initialCallCount = mockReadFileSync.mock.calls.length;
|
||||
|
||||
// getAllCommandsSummary will read all files
|
||||
registry.getAllCommandsSummary();
|
||||
const afterSummaryCallCount = mockReadFileSync.mock.calls.length;
|
||||
|
||||
// Second getCommand should use cache
|
||||
registry.getCommand('lite-plan');
|
||||
const finalCallCount = mockReadFileSync.mock.calls.length;
|
||||
|
||||
// lite-plan.md should only be read twice:
|
||||
// 1. Initial getCommand
|
||||
// 2. getAllCommandsSummary (must read all files)
|
||||
// Not again in second getCommand due to cache
|
||||
expect(finalCallCount).toBe(afterSummaryCallCount);
|
||||
});
|
||||
});
|
||||
});
|
||||
308
ccw/src/tools/command-registry.ts
Normal file
308
ccw/src/tools/command-registry.ts
Normal file
@@ -0,0 +1,308 @@
|
||||
/**
|
||||
* Command Registry Tool
|
||||
*
|
||||
* Features:
|
||||
* 1. Scan and parse YAML headers from command files
|
||||
* 2. Read from global ~/.claude/commands/workflow directory
|
||||
* 3. Support on-demand extraction (not full scan)
|
||||
* 4. Cache parsed metadata for performance
|
||||
*/
|
||||
|
||||
import { existsSync, readdirSync, readFileSync, statSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
import { homedir } from 'os';
|
||||
|
||||
export interface CommandMetadata {
|
||||
name: string;
|
||||
command: string;
|
||||
description: string;
|
||||
argumentHint: string;
|
||||
allowedTools: string[];
|
||||
filePath: string;
|
||||
}
|
||||
|
||||
export interface CommandSummary {
|
||||
name: string;
|
||||
description: string;
|
||||
}
|
||||
|
||||
export class CommandRegistry {
|
||||
private commandDir: string | null;
|
||||
private cache: Map<string, CommandMetadata>;
|
||||
|
||||
constructor(commandDir?: string) {
|
||||
this.cache = new Map();
|
||||
|
||||
if (commandDir) {
|
||||
this.commandDir = commandDir;
|
||||
} else {
|
||||
this.commandDir = this.findCommandDir();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Auto-detect ~/.claude/commands/workflow directory
|
||||
*/
|
||||
private findCommandDir(): string | null {
|
||||
// Try relative to current working directory
|
||||
const relativePath = join('.claude', 'commands', 'workflow');
|
||||
if (existsSync(relativePath)) {
|
||||
return relativePath;
|
||||
}
|
||||
|
||||
// Try user home directory
|
||||
const homeDir = homedir();
|
||||
const homeCommandDir = join(homeDir, '.claude', 'commands', 'workflow');
|
||||
if (existsSync(homeCommandDir)) {
|
||||
return homeCommandDir;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse YAML header (simplified version)
|
||||
*
|
||||
* Limitations:
|
||||
* - Only supports simple key: value pairs (single-line values)
|
||||
* - No support for multi-line values, nested objects, complex lists
|
||||
* - allowed-tools field converts comma-separated strings to arrays
|
||||
*/
|
||||
private parseYamlHeader(content: string): Record<string, any> | null {
|
||||
// Handle Windows line endings (\r\n)
|
||||
const match = content.match(/^---[\r\n]+([\s\S]*?)[\r\n]+---/);
|
||||
if (!match) return null;
|
||||
|
||||
const yamlContent = match[1];
|
||||
const result: Record<string, any> = {};
|
||||
|
||||
try {
|
||||
const lines = yamlContent.split(/[\r\n]+/);
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
if (!trimmed || trimmed.startsWith('#')) continue; // Skip empty lines and comments
|
||||
|
||||
const colonIndex = trimmed.indexOf(':');
|
||||
if (colonIndex === -1) continue;
|
||||
|
||||
const key = trimmed.substring(0, colonIndex).trim();
|
||||
let value = trimmed.substring(colonIndex + 1).trim();
|
||||
|
||||
if (!key) continue; // Skip invalid lines
|
||||
|
||||
// Remove quotes (single or double)
|
||||
let cleanValue = value.replace(/^["']|["']$/g, '');
|
||||
|
||||
// Special handling for allowed-tools field: convert to array
|
||||
// Supports format: "Read, Write, Bash" or "Read,Write,Bash"
|
||||
if (key === 'allowed-tools') {
|
||||
cleanValue = cleanValue
|
||||
.split(',')
|
||||
.map(t => t.trim())
|
||||
.filter(t => t)
|
||||
.join(','); // Keep as comma-separated for now, will convert in getCommand
|
||||
}
|
||||
|
||||
result[key] = cleanValue;
|
||||
}
|
||||
} catch (error) {
|
||||
const err = error as Error;
|
||||
console.error('YAML parsing error:', err.message);
|
||||
return null;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get single command metadata
|
||||
* @param commandName Command name (e.g., "lite-plan" or "/workflow:lite-plan")
|
||||
* @returns Command metadata or null
|
||||
*/
|
||||
public getCommand(commandName: string): CommandMetadata | null {
|
||||
if (!this.commandDir) {
|
||||
console.error('ERROR: ~/.claude/commands/workflow directory not found');
|
||||
return null;
|
||||
}
|
||||
|
||||
// Normalize command name
|
||||
const normalized = commandName.startsWith('/workflow:')
|
||||
? commandName.substring('/workflow:'.length)
|
||||
: commandName;
|
||||
|
||||
// Check cache
|
||||
const cached = this.cache.get(normalized);
|
||||
if (cached) {
|
||||
return cached;
|
||||
}
|
||||
|
||||
// Read command file
|
||||
const filePath = join(this.commandDir, `${normalized}.md`);
|
||||
if (!existsSync(filePath)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
const content = readFileSync(filePath, 'utf-8');
|
||||
const header = this.parseYamlHeader(content);
|
||||
|
||||
if (header && header.name) {
|
||||
const toolsStr = header['allowed-tools'] || '';
|
||||
const allowedTools = toolsStr
|
||||
.split(',')
|
||||
.map((t: string) => t.trim())
|
||||
.filter((t: string) => t);
|
||||
|
||||
const result: CommandMetadata = {
|
||||
name: header.name,
|
||||
command: `/workflow:${header.name}`,
|
||||
description: header.description || '',
|
||||
argumentHint: header['argument-hint'] || '',
|
||||
allowedTools: allowedTools,
|
||||
filePath: filePath
|
||||
};
|
||||
|
||||
// Cache result
|
||||
this.cache.set(normalized, result);
|
||||
return result;
|
||||
}
|
||||
} catch (error) {
|
||||
const err = error as Error;
|
||||
console.error(`Failed to read command ${filePath}:`, err.message);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get multiple commands metadata
|
||||
* @param commandNames Array of command names
|
||||
* @returns Map of command metadata
|
||||
*/
|
||||
public getCommands(commandNames: string[]): Map<string, CommandMetadata> {
|
||||
const result = new Map<string, CommandMetadata>();
|
||||
|
||||
for (const name of commandNames) {
|
||||
const cmd = this.getCommand(name);
|
||||
if (cmd) {
|
||||
result.set(cmd.command, cmd);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all commands' names and descriptions
|
||||
* @returns Map of command names to summaries
|
||||
*/
|
||||
public getAllCommandsSummary(): Map<string, CommandSummary> {
|
||||
const result = new Map<string, CommandSummary>();
|
||||
|
||||
if (!this.commandDir) {
|
||||
return result;
|
||||
}
|
||||
|
||||
try {
|
||||
const files = readdirSync(this.commandDir);
|
||||
|
||||
for (const file of files) {
|
||||
if (!file.endsWith('.md')) continue;
|
||||
|
||||
const filePath = join(this.commandDir, file);
|
||||
const stat = statSync(filePath);
|
||||
|
||||
if (stat.isDirectory()) continue;
|
||||
|
||||
try {
|
||||
const content = readFileSync(filePath, 'utf-8');
|
||||
const header = this.parseYamlHeader(content);
|
||||
|
||||
if (header && header.name) {
|
||||
const commandName = `/workflow:${header.name}`;
|
||||
result.set(commandName, {
|
||||
name: header.name,
|
||||
description: header.description || ''
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
// Skip files that fail to read
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Return empty map if directory read fails
|
||||
return result;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all commands organized by category/tags
|
||||
*/
|
||||
public getAllCommandsByCategory(): Record<string, CommandMetadata[]> {
|
||||
const summary = this.getAllCommandsSummary();
|
||||
const result: Record<string, CommandMetadata[]> = {
|
||||
planning: [],
|
||||
execution: [],
|
||||
testing: [],
|
||||
review: [],
|
||||
other: []
|
||||
};
|
||||
|
||||
for (const [cmdName] of summary) {
|
||||
const cmd = this.getCommand(cmdName);
|
||||
if (cmd) {
|
||||
// Categorize based on command name patterns
|
||||
if (cmd.name.includes('plan')) {
|
||||
result.planning.push(cmd);
|
||||
} else if (cmd.name.includes('execute')) {
|
||||
result.execution.push(cmd);
|
||||
} else if (cmd.name.includes('test')) {
|
||||
result.testing.push(cmd);
|
||||
} else if (cmd.name.includes('review')) {
|
||||
result.review.push(cmd);
|
||||
} else {
|
||||
result.other.push(cmd);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert to JSON for serialization
|
||||
*/
|
||||
public toJSON(): Record<string, any> {
|
||||
const result: Record<string, CommandMetadata> = {};
|
||||
for (const [key, value] of this.cache) {
|
||||
result[`/workflow:${key}`] = value;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Export function for direct usage
|
||||
*/
|
||||
export function createCommandRegistry(commandDir?: string): CommandRegistry {
|
||||
return new CommandRegistry(commandDir);
|
||||
}
|
||||
|
||||
/**
|
||||
* Export function to get all commands
|
||||
*/
|
||||
export function getAllCommandsSync(): Map<string, CommandSummary> {
|
||||
const registry = new CommandRegistry();
|
||||
return registry.getAllCommandsSummary();
|
||||
}
|
||||
|
||||
/**
|
||||
* Export function to get specific command
|
||||
*/
|
||||
export function getCommandSync(name: string): CommandMetadata | null {
|
||||
const registry = new CommandRegistry();
|
||||
return registry.getCommand(name);
|
||||
}
|
||||
@@ -378,3 +378,7 @@ export { registerTool };
|
||||
|
||||
// Export ToolSchema type
|
||||
export type { ToolSchema };
|
||||
|
||||
// Export CommandRegistry for direct import
|
||||
export { CommandRegistry, createCommandRegistry, getAllCommandsSync, getCommandSync } from './command-registry.js';
|
||||
export type { CommandMetadata, CommandSummary } from './command-registry.js';
|
||||
|
||||
@@ -19,5 +19,5 @@
|
||||
"noEmit": false
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["src/templates/**/*", "node_modules", "dist"]
|
||||
"exclude": ["src/templates/**/*", "src/**/*.test.ts", "node_modules", "dist"]
|
||||
}
|
||||
|
||||
21
codex-lens/LICENSE
Normal file
21
codex-lens/LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2024 CodexLens Contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
59
codex-lens/README.md
Normal file
59
codex-lens/README.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# CodexLens
|
||||
|
||||
CodexLens is a multi-modal code analysis platform designed to provide comprehensive code understanding and analysis capabilities.
|
||||
|
||||
## Features
|
||||
|
||||
- **Multi-language Support**: Analyze code in Python, JavaScript, TypeScript and more using Tree-sitter parsers
|
||||
- **Semantic Search**: Find relevant code snippets using semantic understanding with fastembed and HNSWLIB
|
||||
- **Code Parsing**: Advanced code structure parsing with tree-sitter
|
||||
- **Flexible Architecture**: Modular design for easy extension and customization
|
||||
|
||||
## Installation
|
||||
|
||||
### Basic Installation
|
||||
|
||||
```bash
|
||||
pip install codex-lens
|
||||
```
|
||||
|
||||
### With Semantic Search
|
||||
|
||||
```bash
|
||||
pip install codex-lens[semantic]
|
||||
```
|
||||
|
||||
### With GPU Acceleration (NVIDIA CUDA)
|
||||
|
||||
```bash
|
||||
pip install codex-lens[semantic-gpu]
|
||||
```
|
||||
|
||||
### With DirectML (Windows - NVIDIA/AMD/Intel)
|
||||
|
||||
```bash
|
||||
pip install codex-lens[semantic-directml]
|
||||
```
|
||||
|
||||
### With All Optional Features
|
||||
|
||||
```bash
|
||||
pip install codex-lens[full]
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
- Python >= 3.10
|
||||
- See `pyproject.toml` for detailed dependency list
|
||||
|
||||
## Development
|
||||
|
||||
This project uses setuptools for building and packaging.
|
||||
|
||||
## License
|
||||
|
||||
MIT License
|
||||
|
||||
## Authors
|
||||
|
||||
CodexLens Contributors
|
||||
28
codex-lens/build/lib/codexlens/__init__.py
Normal file
28
codex-lens/build/lib/codexlens/__init__.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""CodexLens package."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from . import config, entities, errors
|
||||
from .config import Config
|
||||
from .entities import IndexedFile, SearchResult, SemanticChunk, Symbol
|
||||
from .errors import CodexLensError, ConfigError, ParseError, SearchError, StorageError
|
||||
|
||||
__version__ = "0.1.0"
|
||||
|
||||
__all__ = [
|
||||
"__version__",
|
||||
"config",
|
||||
"entities",
|
||||
"errors",
|
||||
"Config",
|
||||
"IndexedFile",
|
||||
"SearchResult",
|
||||
"SemanticChunk",
|
||||
"Symbol",
|
||||
"CodexLensError",
|
||||
"ConfigError",
|
||||
"ParseError",
|
||||
"StorageError",
|
||||
"SearchError",
|
||||
]
|
||||
|
||||
14
codex-lens/build/lib/codexlens/__main__.py
Normal file
14
codex-lens/build/lib/codexlens/__main__.py
Normal file
@@ -0,0 +1,14 @@
|
||||
"""Module entrypoint for `python -m codexlens`."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from codexlens.cli import app
|
||||
|
||||
|
||||
def main() -> None:
|
||||
app()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
88
codex-lens/build/lib/codexlens/api/__init__.py
Normal file
88
codex-lens/build/lib/codexlens/api/__init__.py
Normal file
@@ -0,0 +1,88 @@
|
||||
"""Codexlens Public API Layer.
|
||||
|
||||
This module exports all public API functions and dataclasses for the
|
||||
codexlens LSP-like functionality.
|
||||
|
||||
Dataclasses (from models.py):
|
||||
- CallInfo: Call relationship information
|
||||
- MethodContext: Method context with call relationships
|
||||
- FileContextResult: File context result with method summaries
|
||||
- DefinitionResult: Definition lookup result
|
||||
- ReferenceResult: Reference lookup result
|
||||
- GroupedReferences: References grouped by definition
|
||||
- SymbolInfo: Symbol information for workspace search
|
||||
- HoverInfo: Hover information for a symbol
|
||||
- SemanticResult: Semantic search result
|
||||
|
||||
Utility functions (from utils.py):
|
||||
- resolve_project: Resolve and validate project root path
|
||||
- normalize_relationship_type: Normalize relationship type to canonical form
|
||||
- rank_by_proximity: Rank results by file path proximity
|
||||
|
||||
Example:
|
||||
>>> from codexlens.api import (
|
||||
... DefinitionResult,
|
||||
... resolve_project,
|
||||
... normalize_relationship_type
|
||||
... )
|
||||
>>> project = resolve_project("/path/to/project")
|
||||
>>> rel_type = normalize_relationship_type("calls")
|
||||
>>> print(rel_type)
|
||||
'call'
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# Dataclasses
|
||||
from .models import (
|
||||
CallInfo,
|
||||
MethodContext,
|
||||
FileContextResult,
|
||||
DefinitionResult,
|
||||
ReferenceResult,
|
||||
GroupedReferences,
|
||||
SymbolInfo,
|
||||
HoverInfo,
|
||||
SemanticResult,
|
||||
)
|
||||
|
||||
# Utility functions
|
||||
from .utils import (
|
||||
resolve_project,
|
||||
normalize_relationship_type,
|
||||
rank_by_proximity,
|
||||
rank_by_score,
|
||||
)
|
||||
|
||||
# API functions
|
||||
from .definition import find_definition
|
||||
from .symbols import workspace_symbols
|
||||
from .hover import get_hover
|
||||
from .file_context import file_context
|
||||
from .references import find_references
|
||||
from .semantic import semantic_search
|
||||
|
||||
__all__ = [
|
||||
# Dataclasses
|
||||
"CallInfo",
|
||||
"MethodContext",
|
||||
"FileContextResult",
|
||||
"DefinitionResult",
|
||||
"ReferenceResult",
|
||||
"GroupedReferences",
|
||||
"SymbolInfo",
|
||||
"HoverInfo",
|
||||
"SemanticResult",
|
||||
# Utility functions
|
||||
"resolve_project",
|
||||
"normalize_relationship_type",
|
||||
"rank_by_proximity",
|
||||
"rank_by_score",
|
||||
# API functions
|
||||
"find_definition",
|
||||
"workspace_symbols",
|
||||
"get_hover",
|
||||
"file_context",
|
||||
"find_references",
|
||||
"semantic_search",
|
||||
]
|
||||
126
codex-lens/build/lib/codexlens/api/definition.py
Normal file
126
codex-lens/build/lib/codexlens/api/definition.py
Normal file
@@ -0,0 +1,126 @@
|
||||
"""find_definition API implementation.
|
||||
|
||||
This module provides the find_definition() function for looking up
|
||||
symbol definitions with a 3-stage fallback strategy.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
from ..entities import Symbol
|
||||
from ..storage.global_index import GlobalSymbolIndex
|
||||
from ..storage.registry import RegistryStore
|
||||
from ..errors import IndexNotFoundError
|
||||
from .models import DefinitionResult
|
||||
from .utils import resolve_project, rank_by_proximity
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def find_definition(
|
||||
project_root: str,
|
||||
symbol_name: str,
|
||||
symbol_kind: Optional[str] = None,
|
||||
file_context: Optional[str] = None,
|
||||
limit: int = 10
|
||||
) -> List[DefinitionResult]:
|
||||
"""Find definition locations for a symbol.
|
||||
|
||||
Uses a 3-stage fallback strategy:
|
||||
1. Exact match with kind filter
|
||||
2. Exact match without kind filter
|
||||
3. Prefix match
|
||||
|
||||
Args:
|
||||
project_root: Project root directory (for index location)
|
||||
symbol_name: Name of the symbol to find
|
||||
symbol_kind: Optional symbol kind filter (class, function, etc.)
|
||||
file_context: Optional file path for proximity ranking
|
||||
limit: Maximum number of results to return
|
||||
|
||||
Returns:
|
||||
List of DefinitionResult sorted by proximity if file_context provided
|
||||
|
||||
Raises:
|
||||
IndexNotFoundError: If project is not indexed
|
||||
"""
|
||||
project_path = resolve_project(project_root)
|
||||
|
||||
# Get project info from registry
|
||||
registry = RegistryStore()
|
||||
project_info = registry.get_project(project_path)
|
||||
if project_info is None:
|
||||
raise IndexNotFoundError(f"Project not indexed: {project_path}")
|
||||
|
||||
# Open global symbol index
|
||||
index_db = project_info.index_root / "_global_symbols.db"
|
||||
if not index_db.exists():
|
||||
raise IndexNotFoundError(f"Global symbol index not found: {index_db}")
|
||||
|
||||
global_index = GlobalSymbolIndex(str(index_db), project_info.id)
|
||||
|
||||
# Stage 1: Exact match with kind filter
|
||||
results = _search_with_kind(global_index, symbol_name, symbol_kind, limit)
|
||||
if results:
|
||||
logger.debug(f"Stage 1 (exact+kind): Found {len(results)} results for {symbol_name}")
|
||||
return _rank_and_convert(results, file_context)
|
||||
|
||||
# Stage 2: Exact match without kind (if kind was specified)
|
||||
if symbol_kind:
|
||||
results = _search_with_kind(global_index, symbol_name, None, limit)
|
||||
if results:
|
||||
logger.debug(f"Stage 2 (exact): Found {len(results)} results for {symbol_name}")
|
||||
return _rank_and_convert(results, file_context)
|
||||
|
||||
# Stage 3: Prefix match
|
||||
results = global_index.search(
|
||||
name=symbol_name,
|
||||
kind=None,
|
||||
limit=limit,
|
||||
prefix_mode=True
|
||||
)
|
||||
if results:
|
||||
logger.debug(f"Stage 3 (prefix): Found {len(results)} results for {symbol_name}")
|
||||
return _rank_and_convert(results, file_context)
|
||||
|
||||
logger.debug(f"No definitions found for {symbol_name}")
|
||||
return []
|
||||
|
||||
|
||||
def _search_with_kind(
|
||||
global_index: GlobalSymbolIndex,
|
||||
symbol_name: str,
|
||||
symbol_kind: Optional[str],
|
||||
limit: int
|
||||
) -> List[Symbol]:
|
||||
"""Search for symbols with optional kind filter."""
|
||||
return global_index.search(
|
||||
name=symbol_name,
|
||||
kind=symbol_kind,
|
||||
limit=limit,
|
||||
prefix_mode=False
|
||||
)
|
||||
|
||||
|
||||
def _rank_and_convert(
|
||||
symbols: List[Symbol],
|
||||
file_context: Optional[str]
|
||||
) -> List[DefinitionResult]:
|
||||
"""Convert symbols to DefinitionResult and rank by proximity."""
|
||||
results = [
|
||||
DefinitionResult(
|
||||
name=sym.name,
|
||||
kind=sym.kind,
|
||||
file_path=sym.file or "",
|
||||
line=sym.range[0] if sym.range else 1,
|
||||
end_line=sym.range[1] if sym.range else 1,
|
||||
signature=None, # Could extract from file if needed
|
||||
container=None, # Could extract from parent symbol
|
||||
score=1.0
|
||||
)
|
||||
for sym in symbols
|
||||
]
|
||||
return rank_by_proximity(results, file_context)
|
||||
271
codex-lens/build/lib/codexlens/api/file_context.py
Normal file
271
codex-lens/build/lib/codexlens/api/file_context.py
Normal file
@@ -0,0 +1,271 @@
|
||||
"""file_context API implementation.
|
||||
|
||||
This module provides the file_context() function for retrieving
|
||||
method call graphs from a source file.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from ..entities import Symbol
|
||||
from ..storage.global_index import GlobalSymbolIndex
|
||||
from ..storage.dir_index import DirIndexStore
|
||||
from ..storage.registry import RegistryStore
|
||||
from ..errors import IndexNotFoundError
|
||||
from .models import (
|
||||
FileContextResult,
|
||||
MethodContext,
|
||||
CallInfo,
|
||||
)
|
||||
from .utils import resolve_project, normalize_relationship_type
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def file_context(
|
||||
project_root: str,
|
||||
file_path: str,
|
||||
include_calls: bool = True,
|
||||
include_callers: bool = True,
|
||||
max_depth: int = 1,
|
||||
format: str = "brief"
|
||||
) -> FileContextResult:
|
||||
"""Get method call context for a code file.
|
||||
|
||||
Retrieves all methods/functions in the file along with their
|
||||
outgoing calls and incoming callers.
|
||||
|
||||
Args:
|
||||
project_root: Project root directory (for index location)
|
||||
file_path: Path to the code file to analyze
|
||||
include_calls: Whether to include outgoing calls
|
||||
include_callers: Whether to include incoming callers
|
||||
max_depth: Call chain depth (V1 only supports 1)
|
||||
format: Output format (brief | detailed | tree)
|
||||
|
||||
Returns:
|
||||
FileContextResult with method contexts and summary
|
||||
|
||||
Raises:
|
||||
IndexNotFoundError: If project is not indexed
|
||||
FileNotFoundError: If file does not exist
|
||||
ValueError: If max_depth > 1 (V1 limitation)
|
||||
"""
|
||||
# V1 limitation: only depth=1 supported
|
||||
if max_depth > 1:
|
||||
raise ValueError(
|
||||
f"max_depth > 1 not supported in V1. "
|
||||
f"Requested: {max_depth}, supported: 1"
|
||||
)
|
||||
|
||||
project_path = resolve_project(project_root)
|
||||
file_path_resolved = Path(file_path).resolve()
|
||||
|
||||
# Validate file exists
|
||||
if not file_path_resolved.exists():
|
||||
raise FileNotFoundError(f"File not found: {file_path_resolved}")
|
||||
|
||||
# Get project info from registry
|
||||
registry = RegistryStore()
|
||||
project_info = registry.get_project(project_path)
|
||||
if project_info is None:
|
||||
raise IndexNotFoundError(f"Project not indexed: {project_path}")
|
||||
|
||||
# Open global symbol index
|
||||
index_db = project_info.index_root / "_global_symbols.db"
|
||||
if not index_db.exists():
|
||||
raise IndexNotFoundError(f"Global symbol index not found: {index_db}")
|
||||
|
||||
global_index = GlobalSymbolIndex(str(index_db), project_info.id)
|
||||
|
||||
# Get all symbols in the file
|
||||
symbols = global_index.get_file_symbols(str(file_path_resolved))
|
||||
|
||||
# Filter to functions, methods, and classes
|
||||
method_symbols = [
|
||||
s for s in symbols
|
||||
if s.kind in ("function", "method", "class")
|
||||
]
|
||||
|
||||
logger.debug(f"Found {len(method_symbols)} methods in {file_path}")
|
||||
|
||||
# Try to find dir_index for relationship queries
|
||||
dir_index = _find_dir_index(project_info, file_path_resolved)
|
||||
|
||||
# Build method contexts
|
||||
methods: List[MethodContext] = []
|
||||
outgoing_resolved = True
|
||||
incoming_resolved = True
|
||||
targets_resolved = True
|
||||
|
||||
for symbol in method_symbols:
|
||||
calls: List[CallInfo] = []
|
||||
callers: List[CallInfo] = []
|
||||
|
||||
if include_calls and dir_index:
|
||||
try:
|
||||
outgoing = dir_index.get_outgoing_calls(
|
||||
str(file_path_resolved),
|
||||
symbol.name
|
||||
)
|
||||
for target_name, rel_type, line, target_file in outgoing:
|
||||
calls.append(CallInfo(
|
||||
symbol_name=target_name,
|
||||
file_path=target_file,
|
||||
line=line,
|
||||
relationship=normalize_relationship_type(rel_type)
|
||||
))
|
||||
if target_file is None:
|
||||
targets_resolved = False
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to get outgoing calls: {e}")
|
||||
outgoing_resolved = False
|
||||
|
||||
if include_callers and dir_index:
|
||||
try:
|
||||
incoming = dir_index.get_incoming_calls(symbol.name)
|
||||
for source_name, rel_type, line, source_file in incoming:
|
||||
callers.append(CallInfo(
|
||||
symbol_name=source_name,
|
||||
file_path=source_file,
|
||||
line=line,
|
||||
relationship=normalize_relationship_type(rel_type)
|
||||
))
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to get incoming calls: {e}")
|
||||
incoming_resolved = False
|
||||
|
||||
methods.append(MethodContext(
|
||||
name=symbol.name,
|
||||
kind=symbol.kind,
|
||||
line_range=symbol.range if symbol.range else (1, 1),
|
||||
signature=None, # Could extract from source
|
||||
calls=calls,
|
||||
callers=callers
|
||||
))
|
||||
|
||||
# Detect language from file extension
|
||||
language = _detect_language(file_path_resolved)
|
||||
|
||||
# Generate summary
|
||||
summary = _generate_summary(file_path_resolved, methods, format)
|
||||
|
||||
return FileContextResult(
|
||||
file_path=str(file_path_resolved),
|
||||
language=language,
|
||||
methods=methods,
|
||||
summary=summary,
|
||||
discovery_status={
|
||||
"outgoing_resolved": outgoing_resolved,
|
||||
"incoming_resolved": incoming_resolved,
|
||||
"targets_resolved": targets_resolved
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _find_dir_index(project_info, file_path: Path) -> Optional[DirIndexStore]:
|
||||
"""Find the dir_index that contains the file.
|
||||
|
||||
Args:
|
||||
project_info: Project information from registry
|
||||
file_path: Path to the file
|
||||
|
||||
Returns:
|
||||
DirIndexStore if found, None otherwise
|
||||
"""
|
||||
try:
|
||||
# Look for _index.db in file's directory or parent directories
|
||||
current = file_path.parent
|
||||
while current != current.parent:
|
||||
index_db = current / "_index.db"
|
||||
if index_db.exists():
|
||||
return DirIndexStore(str(index_db))
|
||||
|
||||
# Also check in project's index_root
|
||||
relative = current.relative_to(project_info.source_root)
|
||||
index_in_cache = project_info.index_root / relative / "_index.db"
|
||||
if index_in_cache.exists():
|
||||
return DirIndexStore(str(index_in_cache))
|
||||
|
||||
current = current.parent
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to find dir_index: {e}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _detect_language(file_path: Path) -> str:
|
||||
"""Detect programming language from file extension.
|
||||
|
||||
Args:
|
||||
file_path: Path to the file
|
||||
|
||||
Returns:
|
||||
Language name
|
||||
"""
|
||||
ext_map = {
|
||||
".py": "python",
|
||||
".js": "javascript",
|
||||
".ts": "typescript",
|
||||
".jsx": "javascript",
|
||||
".tsx": "typescript",
|
||||
".go": "go",
|
||||
".rs": "rust",
|
||||
".java": "java",
|
||||
".c": "c",
|
||||
".cpp": "cpp",
|
||||
".h": "c",
|
||||
".hpp": "cpp",
|
||||
}
|
||||
return ext_map.get(file_path.suffix.lower(), "unknown")
|
||||
|
||||
|
||||
def _generate_summary(
|
||||
file_path: Path,
|
||||
methods: List[MethodContext],
|
||||
format: str
|
||||
) -> str:
|
||||
"""Generate human-readable summary of file context.
|
||||
|
||||
Args:
|
||||
file_path: Path to the file
|
||||
methods: List of method contexts
|
||||
format: Output format (brief | detailed | tree)
|
||||
|
||||
Returns:
|
||||
Markdown-formatted summary
|
||||
"""
|
||||
lines = [f"## {file_path.name} ({len(methods)} methods)\n"]
|
||||
|
||||
for method in methods:
|
||||
start, end = method.line_range
|
||||
lines.append(f"### {method.name} (line {start}-{end})")
|
||||
|
||||
if method.calls:
|
||||
calls_str = ", ".join(
|
||||
f"{c.symbol_name} ({c.file_path or 'unresolved'}:{c.line})"
|
||||
if format == "detailed"
|
||||
else c.symbol_name
|
||||
for c in method.calls
|
||||
)
|
||||
lines.append(f"- Calls: {calls_str}")
|
||||
|
||||
if method.callers:
|
||||
callers_str = ", ".join(
|
||||
f"{c.symbol_name} ({c.file_path}:{c.line})"
|
||||
if format == "detailed"
|
||||
else c.symbol_name
|
||||
for c in method.callers
|
||||
)
|
||||
lines.append(f"- Called by: {callers_str}")
|
||||
|
||||
if not method.calls and not method.callers:
|
||||
lines.append("- (no call relationships)")
|
||||
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
148
codex-lens/build/lib/codexlens/api/hover.py
Normal file
148
codex-lens/build/lib/codexlens/api/hover.py
Normal file
@@ -0,0 +1,148 @@
|
||||
"""get_hover API implementation.
|
||||
|
||||
This module provides the get_hover() function for retrieving
|
||||
detailed hover information for symbols.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from ..entities import Symbol
|
||||
from ..storage.global_index import GlobalSymbolIndex
|
||||
from ..storage.registry import RegistryStore
|
||||
from ..errors import IndexNotFoundError
|
||||
from .models import HoverInfo
|
||||
from .utils import resolve_project
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_hover(
|
||||
project_root: str,
|
||||
symbol_name: str,
|
||||
file_path: Optional[str] = None
|
||||
) -> Optional[HoverInfo]:
|
||||
"""Get detailed hover information for a symbol.
|
||||
|
||||
Args:
|
||||
project_root: Project root directory (for index location)
|
||||
symbol_name: Name of the symbol to look up
|
||||
file_path: Optional file path to disambiguate when symbol
|
||||
appears in multiple files
|
||||
|
||||
Returns:
|
||||
HoverInfo if symbol found, None otherwise
|
||||
|
||||
Raises:
|
||||
IndexNotFoundError: If project is not indexed
|
||||
"""
|
||||
project_path = resolve_project(project_root)
|
||||
|
||||
# Get project info from registry
|
||||
registry = RegistryStore()
|
||||
project_info = registry.get_project(project_path)
|
||||
if project_info is None:
|
||||
raise IndexNotFoundError(f"Project not indexed: {project_path}")
|
||||
|
||||
# Open global symbol index
|
||||
index_db = project_info.index_root / "_global_symbols.db"
|
||||
if not index_db.exists():
|
||||
raise IndexNotFoundError(f"Global symbol index not found: {index_db}")
|
||||
|
||||
global_index = GlobalSymbolIndex(str(index_db), project_info.id)
|
||||
|
||||
# Search for the symbol
|
||||
results = global_index.search(
|
||||
name=symbol_name,
|
||||
kind=None,
|
||||
limit=50,
|
||||
prefix_mode=False
|
||||
)
|
||||
|
||||
if not results:
|
||||
logger.debug(f"No hover info found for {symbol_name}")
|
||||
return None
|
||||
|
||||
# If file_path provided, filter to that file
|
||||
if file_path:
|
||||
file_path_resolved = str(Path(file_path).resolve())
|
||||
matching = [s for s in results if s.file == file_path_resolved]
|
||||
if matching:
|
||||
results = matching
|
||||
|
||||
# Take the first result
|
||||
symbol = results[0]
|
||||
|
||||
# Build hover info
|
||||
return HoverInfo(
|
||||
name=symbol.name,
|
||||
kind=symbol.kind,
|
||||
signature=_extract_signature(symbol),
|
||||
documentation=_extract_documentation(symbol),
|
||||
file_path=symbol.file or "",
|
||||
line_range=symbol.range if symbol.range else (1, 1),
|
||||
type_info=_extract_type_info(symbol)
|
||||
)
|
||||
|
||||
|
||||
def _extract_signature(symbol: Symbol) -> str:
|
||||
"""Extract signature from symbol.
|
||||
|
||||
For now, generates a basic signature based on kind and name.
|
||||
In a full implementation, this would parse the actual source code.
|
||||
|
||||
Args:
|
||||
symbol: The symbol to extract signature from
|
||||
|
||||
Returns:
|
||||
Signature string
|
||||
"""
|
||||
if symbol.kind == "function":
|
||||
return f"def {symbol.name}(...)"
|
||||
elif symbol.kind == "method":
|
||||
return f"def {symbol.name}(self, ...)"
|
||||
elif symbol.kind == "class":
|
||||
return f"class {symbol.name}"
|
||||
elif symbol.kind == "variable":
|
||||
return symbol.name
|
||||
elif symbol.kind == "constant":
|
||||
return f"{symbol.name} = ..."
|
||||
else:
|
||||
return f"{symbol.kind} {symbol.name}"
|
||||
|
||||
|
||||
def _extract_documentation(symbol: Symbol) -> Optional[str]:
|
||||
"""Extract documentation from symbol.
|
||||
|
||||
In a full implementation, this would parse docstrings from source.
|
||||
For now, returns None.
|
||||
|
||||
Args:
|
||||
symbol: The symbol to extract documentation from
|
||||
|
||||
Returns:
|
||||
Documentation string if available, None otherwise
|
||||
"""
|
||||
# Would need to read source file and parse docstring
|
||||
# For V1, return None
|
||||
return None
|
||||
|
||||
|
||||
def _extract_type_info(symbol: Symbol) -> Optional[str]:
|
||||
"""Extract type information from symbol.
|
||||
|
||||
In a full implementation, this would parse type annotations.
|
||||
For now, returns None.
|
||||
|
||||
Args:
|
||||
symbol: The symbol to extract type info from
|
||||
|
||||
Returns:
|
||||
Type info string if available, None otherwise
|
||||
"""
|
||||
# Would need to parse type annotations from source
|
||||
# For V1, return None
|
||||
return None
|
||||
281
codex-lens/build/lib/codexlens/api/models.py
Normal file
281
codex-lens/build/lib/codexlens/api/models.py
Normal file
@@ -0,0 +1,281 @@
|
||||
"""API dataclass definitions for codexlens LSP API.
|
||||
|
||||
This module defines all result dataclasses used by the public API layer,
|
||||
following the patterns established in mcp/schema.py.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from typing import List, Optional, Dict, Tuple
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Section 4.2: file_context dataclasses
|
||||
# =============================================================================
|
||||
|
||||
@dataclass
|
||||
class CallInfo:
|
||||
"""Call relationship information.
|
||||
|
||||
Attributes:
|
||||
symbol_name: Name of the called/calling symbol
|
||||
file_path: Target file path (may be None if unresolved)
|
||||
line: Line number of the call
|
||||
relationship: Type of relationship (call | import | inheritance)
|
||||
"""
|
||||
symbol_name: str
|
||||
file_path: Optional[str]
|
||||
line: int
|
||||
relationship: str # call | import | inheritance
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary, filtering None values."""
|
||||
return {k: v for k, v in asdict(self).items() if v is not None}
|
||||
|
||||
|
||||
@dataclass
|
||||
class MethodContext:
|
||||
"""Method context with call relationships.
|
||||
|
||||
Attributes:
|
||||
name: Method/function name
|
||||
kind: Symbol kind (function | method | class)
|
||||
line_range: Start and end line numbers
|
||||
signature: Function signature (if available)
|
||||
calls: List of outgoing calls
|
||||
callers: List of incoming calls
|
||||
"""
|
||||
name: str
|
||||
kind: str # function | method | class
|
||||
line_range: Tuple[int, int]
|
||||
signature: Optional[str]
|
||||
calls: List[CallInfo] = field(default_factory=list)
|
||||
callers: List[CallInfo] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary, filtering None values."""
|
||||
result = {
|
||||
"name": self.name,
|
||||
"kind": self.kind,
|
||||
"line_range": list(self.line_range),
|
||||
"calls": [c.to_dict() for c in self.calls],
|
||||
"callers": [c.to_dict() for c in self.callers],
|
||||
}
|
||||
if self.signature is not None:
|
||||
result["signature"] = self.signature
|
||||
return result
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileContextResult:
|
||||
"""File context result with method summaries.
|
||||
|
||||
Attributes:
|
||||
file_path: Path to the analyzed file
|
||||
language: Programming language
|
||||
methods: List of method contexts
|
||||
summary: Human-readable summary
|
||||
discovery_status: Status flags for call resolution
|
||||
"""
|
||||
file_path: str
|
||||
language: str
|
||||
methods: List[MethodContext]
|
||||
summary: str
|
||||
discovery_status: Dict[str, bool] = field(default_factory=lambda: {
|
||||
"outgoing_resolved": False,
|
||||
"incoming_resolved": True,
|
||||
"targets_resolved": False
|
||||
})
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary for JSON serialization."""
|
||||
return {
|
||||
"file_path": self.file_path,
|
||||
"language": self.language,
|
||||
"methods": [m.to_dict() for m in self.methods],
|
||||
"summary": self.summary,
|
||||
"discovery_status": self.discovery_status,
|
||||
}
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Section 4.3: find_definition dataclasses
|
||||
# =============================================================================
|
||||
|
||||
@dataclass
|
||||
class DefinitionResult:
|
||||
"""Definition lookup result.
|
||||
|
||||
Attributes:
|
||||
name: Symbol name
|
||||
kind: Symbol kind (class, function, method, etc.)
|
||||
file_path: File where symbol is defined
|
||||
line: Start line number
|
||||
end_line: End line number
|
||||
signature: Symbol signature (if available)
|
||||
container: Containing class/module (if any)
|
||||
score: Match score for ranking
|
||||
"""
|
||||
name: str
|
||||
kind: str
|
||||
file_path: str
|
||||
line: int
|
||||
end_line: int
|
||||
signature: Optional[str] = None
|
||||
container: Optional[str] = None
|
||||
score: float = 1.0
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary, filtering None values."""
|
||||
return {k: v for k, v in asdict(self).items() if v is not None}
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Section 4.4: find_references dataclasses
|
||||
# =============================================================================
|
||||
|
||||
@dataclass
|
||||
class ReferenceResult:
|
||||
"""Reference lookup result.
|
||||
|
||||
Attributes:
|
||||
file_path: File containing the reference
|
||||
line: Line number
|
||||
column: Column number
|
||||
context_line: The line of code containing the reference
|
||||
relationship: Type of reference (call | import | type_annotation | inheritance)
|
||||
"""
|
||||
file_path: str
|
||||
line: int
|
||||
column: int
|
||||
context_line: str
|
||||
relationship: str # call | import | type_annotation | inheritance
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary."""
|
||||
return asdict(self)
|
||||
|
||||
|
||||
@dataclass
|
||||
class GroupedReferences:
|
||||
"""References grouped by definition.
|
||||
|
||||
Used when a symbol has multiple definitions (e.g., overloads).
|
||||
|
||||
Attributes:
|
||||
definition: The definition this group refers to
|
||||
references: List of references to this definition
|
||||
"""
|
||||
definition: DefinitionResult
|
||||
references: List[ReferenceResult] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary."""
|
||||
return {
|
||||
"definition": self.definition.to_dict(),
|
||||
"references": [r.to_dict() for r in self.references],
|
||||
}
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Section 4.5: workspace_symbols dataclasses
|
||||
# =============================================================================
|
||||
|
||||
@dataclass
|
||||
class SymbolInfo:
|
||||
"""Symbol information for workspace search.
|
||||
|
||||
Attributes:
|
||||
name: Symbol name
|
||||
kind: Symbol kind
|
||||
file_path: File where symbol is defined
|
||||
line: Line number
|
||||
container: Containing class/module (if any)
|
||||
score: Match score for ranking
|
||||
"""
|
||||
name: str
|
||||
kind: str
|
||||
file_path: str
|
||||
line: int
|
||||
container: Optional[str] = None
|
||||
score: float = 1.0
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary, filtering None values."""
|
||||
return {k: v for k, v in asdict(self).items() if v is not None}
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Section 4.6: get_hover dataclasses
|
||||
# =============================================================================
|
||||
|
||||
@dataclass
|
||||
class HoverInfo:
|
||||
"""Hover information for a symbol.
|
||||
|
||||
Attributes:
|
||||
name: Symbol name
|
||||
kind: Symbol kind
|
||||
signature: Symbol signature
|
||||
documentation: Documentation string (if available)
|
||||
file_path: File where symbol is defined
|
||||
line_range: Start and end line numbers
|
||||
type_info: Type information (if available)
|
||||
"""
|
||||
name: str
|
||||
kind: str
|
||||
signature: str
|
||||
documentation: Optional[str]
|
||||
file_path: str
|
||||
line_range: Tuple[int, int]
|
||||
type_info: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary, filtering None values."""
|
||||
result = {
|
||||
"name": self.name,
|
||||
"kind": self.kind,
|
||||
"signature": self.signature,
|
||||
"file_path": self.file_path,
|
||||
"line_range": list(self.line_range),
|
||||
}
|
||||
if self.documentation is not None:
|
||||
result["documentation"] = self.documentation
|
||||
if self.type_info is not None:
|
||||
result["type_info"] = self.type_info
|
||||
return result
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Section 4.7: semantic_search dataclasses
|
||||
# =============================================================================
|
||||
|
||||
@dataclass
|
||||
class SemanticResult:
|
||||
"""Semantic search result.
|
||||
|
||||
Attributes:
|
||||
symbol_name: Name of the matched symbol
|
||||
kind: Symbol kind
|
||||
file_path: File where symbol is defined
|
||||
line: Line number
|
||||
vector_score: Vector similarity score (None if not available)
|
||||
structural_score: Structural match score (None if not available)
|
||||
fusion_score: Combined fusion score
|
||||
snippet: Code snippet
|
||||
match_reason: Explanation of why this matched (optional)
|
||||
"""
|
||||
symbol_name: str
|
||||
kind: str
|
||||
file_path: str
|
||||
line: int
|
||||
vector_score: Optional[float]
|
||||
structural_score: Optional[float]
|
||||
fusion_score: float
|
||||
snippet: str
|
||||
match_reason: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary, filtering None values."""
|
||||
return {k: v for k, v in asdict(self).items() if v is not None}
|
||||
345
codex-lens/build/lib/codexlens/api/references.py
Normal file
345
codex-lens/build/lib/codexlens/api/references.py
Normal file
@@ -0,0 +1,345 @@
|
||||
"""Find references API for codexlens.
|
||||
|
||||
This module implements the find_references() function that wraps
|
||||
ChainSearchEngine.search_references() with grouped result structure
|
||||
for multi-definition symbols.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Dict
|
||||
|
||||
from .models import (
|
||||
DefinitionResult,
|
||||
ReferenceResult,
|
||||
GroupedReferences,
|
||||
)
|
||||
from .utils import (
|
||||
resolve_project,
|
||||
normalize_relationship_type,
|
||||
)
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _read_line_from_file(file_path: str, line: int) -> str:
|
||||
"""Read a specific line from a file.
|
||||
|
||||
Args:
|
||||
file_path: Path to the file
|
||||
line: Line number (1-based)
|
||||
|
||||
Returns:
|
||||
The line content, stripped of trailing whitespace.
|
||||
Returns empty string if file cannot be read or line doesn't exist.
|
||||
"""
|
||||
try:
|
||||
path = Path(file_path)
|
||||
if not path.exists():
|
||||
return ""
|
||||
|
||||
with path.open("r", encoding="utf-8", errors="replace") as f:
|
||||
for i, content in enumerate(f, 1):
|
||||
if i == line:
|
||||
return content.rstrip()
|
||||
return ""
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to read line %d from %s: %s", line, file_path, exc)
|
||||
return ""
|
||||
|
||||
|
||||
def _transform_to_reference_result(
|
||||
raw_ref: "RawReferenceResult",
|
||||
) -> ReferenceResult:
|
||||
"""Transform raw ChainSearchEngine reference to API ReferenceResult.
|
||||
|
||||
Args:
|
||||
raw_ref: Raw reference result from ChainSearchEngine
|
||||
|
||||
Returns:
|
||||
API ReferenceResult with context_line and normalized relationship
|
||||
"""
|
||||
# Read the actual line from the file
|
||||
context_line = _read_line_from_file(raw_ref.file_path, raw_ref.line)
|
||||
|
||||
# Normalize relationship type
|
||||
relationship = normalize_relationship_type(raw_ref.relationship_type)
|
||||
|
||||
return ReferenceResult(
|
||||
file_path=raw_ref.file_path,
|
||||
line=raw_ref.line,
|
||||
column=raw_ref.column,
|
||||
context_line=context_line,
|
||||
relationship=relationship,
|
||||
)
|
||||
|
||||
|
||||
def find_references(
|
||||
project_root: str,
|
||||
symbol_name: str,
|
||||
symbol_kind: Optional[str] = None,
|
||||
include_definition: bool = True,
|
||||
group_by_definition: bool = True,
|
||||
limit: int = 100,
|
||||
) -> List[GroupedReferences]:
|
||||
"""Find all reference locations for a symbol.
|
||||
|
||||
Multi-definition case returns grouped results to resolve ambiguity.
|
||||
|
||||
This function wraps ChainSearchEngine.search_references() and groups
|
||||
the results by definition location. Each GroupedReferences contains
|
||||
a definition and all references that point to it.
|
||||
|
||||
Args:
|
||||
project_root: Project root directory path
|
||||
symbol_name: Name of the symbol to find references for
|
||||
symbol_kind: Optional symbol kind filter (e.g., 'function', 'class')
|
||||
include_definition: Whether to include the definition location
|
||||
in the result (default True)
|
||||
group_by_definition: Whether to group references by definition.
|
||||
If False, returns a single group with all references.
|
||||
(default True)
|
||||
limit: Maximum number of references to return (default 100)
|
||||
|
||||
Returns:
|
||||
List of GroupedReferences. Each group contains:
|
||||
- definition: The DefinitionResult for this symbol definition
|
||||
- references: List of ReferenceResult pointing to this definition
|
||||
|
||||
Raises:
|
||||
ValueError: If project_root does not exist or is not a directory
|
||||
|
||||
Examples:
|
||||
>>> refs = find_references("/path/to/project", "authenticate")
|
||||
>>> for group in refs:
|
||||
... print(f"Definition: {group.definition.file_path}:{group.definition.line}")
|
||||
... for ref in group.references:
|
||||
... print(f" Reference: {ref.file_path}:{ref.line} ({ref.relationship})")
|
||||
|
||||
Note:
|
||||
Reference relationship types are normalized:
|
||||
- 'calls' -> 'call'
|
||||
- 'imports' -> 'import'
|
||||
- 'inherits' -> 'inheritance'
|
||||
"""
|
||||
# Validate and resolve project root
|
||||
project_path = resolve_project(project_root)
|
||||
|
||||
# Import here to avoid circular imports
|
||||
from codexlens.config import Config
|
||||
from codexlens.storage.registry import RegistryStore
|
||||
from codexlens.storage.path_mapper import PathMapper
|
||||
from codexlens.storage.global_index import GlobalSymbolIndex
|
||||
from codexlens.search.chain_search import ChainSearchEngine
|
||||
from codexlens.search.chain_search import ReferenceResult as RawReferenceResult
|
||||
from codexlens.entities import Symbol
|
||||
|
||||
# Initialize infrastructure
|
||||
config = Config()
|
||||
registry = RegistryStore()
|
||||
mapper = PathMapper(config.index_dir)
|
||||
|
||||
# Create chain search engine
|
||||
engine = ChainSearchEngine(registry, mapper, config=config)
|
||||
|
||||
try:
|
||||
# Step 1: Find definitions for the symbol
|
||||
definitions: List[DefinitionResult] = []
|
||||
|
||||
if include_definition or group_by_definition:
|
||||
# Search for symbol definitions
|
||||
symbols = engine.search_symbols(
|
||||
name=symbol_name,
|
||||
source_path=project_path,
|
||||
kind=symbol_kind,
|
||||
)
|
||||
|
||||
# Convert Symbol to DefinitionResult
|
||||
for sym in symbols:
|
||||
# Only include exact name matches for definitions
|
||||
if sym.name != symbol_name:
|
||||
continue
|
||||
|
||||
# Optionally filter by kind
|
||||
if symbol_kind and sym.kind != symbol_kind:
|
||||
continue
|
||||
|
||||
definitions.append(DefinitionResult(
|
||||
name=sym.name,
|
||||
kind=sym.kind,
|
||||
file_path=sym.file or "",
|
||||
line=sym.range[0] if sym.range else 1,
|
||||
end_line=sym.range[1] if sym.range else 1,
|
||||
signature=None, # Not available from Symbol
|
||||
container=None, # Not available from Symbol
|
||||
score=1.0,
|
||||
))
|
||||
|
||||
# Step 2: Get all references using ChainSearchEngine
|
||||
raw_references = engine.search_references(
|
||||
symbol_name=symbol_name,
|
||||
source_path=project_path,
|
||||
depth=-1,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
# Step 3: Transform raw references to API ReferenceResult
|
||||
api_references: List[ReferenceResult] = []
|
||||
for raw_ref in raw_references:
|
||||
api_ref = _transform_to_reference_result(raw_ref)
|
||||
api_references.append(api_ref)
|
||||
|
||||
# Step 4: Group references by definition
|
||||
if group_by_definition and definitions:
|
||||
return _group_references_by_definition(
|
||||
definitions=definitions,
|
||||
references=api_references,
|
||||
include_definition=include_definition,
|
||||
)
|
||||
else:
|
||||
# Return single group with placeholder definition or first definition
|
||||
if definitions:
|
||||
definition = definitions[0]
|
||||
else:
|
||||
# Create placeholder definition when no definition found
|
||||
definition = DefinitionResult(
|
||||
name=symbol_name,
|
||||
kind=symbol_kind or "unknown",
|
||||
file_path="",
|
||||
line=0,
|
||||
end_line=0,
|
||||
signature=None,
|
||||
container=None,
|
||||
score=0.0,
|
||||
)
|
||||
|
||||
return [GroupedReferences(
|
||||
definition=definition,
|
||||
references=api_references,
|
||||
)]
|
||||
|
||||
finally:
|
||||
engine.close()
|
||||
|
||||
|
||||
def _group_references_by_definition(
|
||||
definitions: List[DefinitionResult],
|
||||
references: List[ReferenceResult],
|
||||
include_definition: bool = True,
|
||||
) -> List[GroupedReferences]:
|
||||
"""Group references by their likely definition.
|
||||
|
||||
Uses file proximity heuristic to assign references to definitions.
|
||||
References in the same file or directory as a definition are
|
||||
assigned to that definition.
|
||||
|
||||
Args:
|
||||
definitions: List of definition locations
|
||||
references: List of reference locations
|
||||
include_definition: Whether to include definition in results
|
||||
|
||||
Returns:
|
||||
List of GroupedReferences with references assigned to definitions
|
||||
"""
|
||||
import os
|
||||
|
||||
if not definitions:
|
||||
return []
|
||||
|
||||
if len(definitions) == 1:
|
||||
# Single definition - all references belong to it
|
||||
return [GroupedReferences(
|
||||
definition=definitions[0],
|
||||
references=references,
|
||||
)]
|
||||
|
||||
# Multiple definitions - group by proximity
|
||||
groups: Dict[int, List[ReferenceResult]] = {
|
||||
i: [] for i in range(len(definitions))
|
||||
}
|
||||
|
||||
for ref in references:
|
||||
# Find the closest definition by file proximity
|
||||
best_def_idx = 0
|
||||
best_score = -1
|
||||
|
||||
for i, defn in enumerate(definitions):
|
||||
score = _proximity_score(ref.file_path, defn.file_path)
|
||||
if score > best_score:
|
||||
best_score = score
|
||||
best_def_idx = i
|
||||
|
||||
groups[best_def_idx].append(ref)
|
||||
|
||||
# Build result groups
|
||||
result: List[GroupedReferences] = []
|
||||
for i, defn in enumerate(definitions):
|
||||
# Skip definitions with no references if not including definition itself
|
||||
if not include_definition and not groups[i]:
|
||||
continue
|
||||
|
||||
result.append(GroupedReferences(
|
||||
definition=defn,
|
||||
references=groups[i],
|
||||
))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _proximity_score(ref_path: str, def_path: str) -> int:
|
||||
"""Calculate proximity score between two file paths.
|
||||
|
||||
Args:
|
||||
ref_path: Reference file path
|
||||
def_path: Definition file path
|
||||
|
||||
Returns:
|
||||
Proximity score (higher = closer):
|
||||
- Same file: 1000
|
||||
- Same directory: 100
|
||||
- Otherwise: common path prefix length
|
||||
"""
|
||||
import os
|
||||
|
||||
if not ref_path or not def_path:
|
||||
return 0
|
||||
|
||||
# Normalize paths
|
||||
ref_path = os.path.normpath(ref_path)
|
||||
def_path = os.path.normpath(def_path)
|
||||
|
||||
# Same file
|
||||
if ref_path == def_path:
|
||||
return 1000
|
||||
|
||||
ref_dir = os.path.dirname(ref_path)
|
||||
def_dir = os.path.dirname(def_path)
|
||||
|
||||
# Same directory
|
||||
if ref_dir == def_dir:
|
||||
return 100
|
||||
|
||||
# Common path prefix
|
||||
try:
|
||||
common = os.path.commonpath([ref_path, def_path])
|
||||
return len(common)
|
||||
except ValueError:
|
||||
# No common path (different drives on Windows)
|
||||
return 0
|
||||
|
||||
|
||||
# Type alias for the raw reference from ChainSearchEngine
|
||||
class RawReferenceResult:
|
||||
"""Type stub for ChainSearchEngine.ReferenceResult.
|
||||
|
||||
This is only used for type hints and is replaced at runtime
|
||||
by the actual import.
|
||||
"""
|
||||
file_path: str
|
||||
line: int
|
||||
column: int
|
||||
context: str
|
||||
relationship_type: str
|
||||
471
codex-lens/build/lib/codexlens/api/semantic.py
Normal file
471
codex-lens/build/lib/codexlens/api/semantic.py
Normal file
@@ -0,0 +1,471 @@
|
||||
"""Semantic search API with RRF fusion.
|
||||
|
||||
This module provides the semantic_search() function for combining
|
||||
vector, structural, and keyword search with configurable fusion strategies.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
from .models import SemanticResult
|
||||
from .utils import resolve_project
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def semantic_search(
|
||||
project_root: str,
|
||||
query: str,
|
||||
mode: str = "fusion",
|
||||
vector_weight: float = 0.5,
|
||||
structural_weight: float = 0.3,
|
||||
keyword_weight: float = 0.2,
|
||||
fusion_strategy: str = "rrf",
|
||||
kind_filter: Optional[List[str]] = None,
|
||||
limit: int = 20,
|
||||
include_match_reason: bool = False,
|
||||
) -> List[SemanticResult]:
|
||||
"""Semantic search - combining vector and structural search.
|
||||
|
||||
This function provides a high-level API for semantic code search,
|
||||
combining vector similarity, structural (symbol + relationships),
|
||||
and keyword-based search methods with configurable fusion.
|
||||
|
||||
Args:
|
||||
project_root: Project root directory
|
||||
query: Natural language query
|
||||
mode: Search mode
|
||||
- vector: Vector search only
|
||||
- structural: Structural search only (symbol + relationships)
|
||||
- fusion: Fusion search (default)
|
||||
vector_weight: Vector search weight [0, 1] (default 0.5)
|
||||
structural_weight: Structural search weight [0, 1] (default 0.3)
|
||||
keyword_weight: Keyword search weight [0, 1] (default 0.2)
|
||||
fusion_strategy: Fusion strategy (maps to chain_search.py)
|
||||
- rrf: Reciprocal Rank Fusion (recommended, default)
|
||||
- staged: Staged cascade -> staged_cascade_search
|
||||
- binary: Binary rerank cascade -> binary_cascade_search
|
||||
- hybrid: Hybrid cascade -> hybrid_cascade_search
|
||||
kind_filter: Symbol type filter (e.g., ["function", "class"])
|
||||
limit: Max return count (default 20)
|
||||
include_match_reason: Generate match reason (heuristic, not LLM)
|
||||
|
||||
Returns:
|
||||
Results sorted by fusion_score
|
||||
|
||||
Degradation:
|
||||
- No vector index: vector_score=None, uses FTS + structural search
|
||||
- No relationship data: structural_score=None, vector search only
|
||||
|
||||
Examples:
|
||||
>>> results = semantic_search(
|
||||
... "/path/to/project",
|
||||
... "authentication handler",
|
||||
... mode="fusion",
|
||||
... fusion_strategy="rrf"
|
||||
... )
|
||||
>>> for r in results:
|
||||
... print(f"{r.symbol_name}: {r.fusion_score:.3f}")
|
||||
"""
|
||||
# Validate and resolve project path
|
||||
project_path = resolve_project(project_root)
|
||||
|
||||
# Normalize weights to sum to 1.0
|
||||
total_weight = vector_weight + structural_weight + keyword_weight
|
||||
if total_weight > 0:
|
||||
vector_weight = vector_weight / total_weight
|
||||
structural_weight = structural_weight / total_weight
|
||||
keyword_weight = keyword_weight / total_weight
|
||||
else:
|
||||
# Default to equal weights if all zero
|
||||
vector_weight = structural_weight = keyword_weight = 1.0 / 3.0
|
||||
|
||||
# Initialize search infrastructure
|
||||
try:
|
||||
from codexlens.config import Config
|
||||
from codexlens.storage.registry import RegistryStore
|
||||
from codexlens.storage.path_mapper import PathMapper
|
||||
from codexlens.search.chain_search import ChainSearchEngine, SearchOptions
|
||||
except ImportError as exc:
|
||||
logger.error("Failed to import search dependencies: %s", exc)
|
||||
return []
|
||||
|
||||
# Load config
|
||||
config = Config.load()
|
||||
|
||||
# Get or create registry and mapper
|
||||
try:
|
||||
registry = RegistryStore.default()
|
||||
mapper = PathMapper(registry)
|
||||
except Exception as exc:
|
||||
logger.error("Failed to initialize search infrastructure: %s", exc)
|
||||
return []
|
||||
|
||||
# Build search options based on mode
|
||||
search_options = _build_search_options(
|
||||
mode=mode,
|
||||
vector_weight=vector_weight,
|
||||
structural_weight=structural_weight,
|
||||
keyword_weight=keyword_weight,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
# Execute search based on fusion_strategy
|
||||
try:
|
||||
with ChainSearchEngine(registry, mapper, config=config) as engine:
|
||||
chain_result = _execute_search(
|
||||
engine=engine,
|
||||
query=query,
|
||||
source_path=project_path,
|
||||
fusion_strategy=fusion_strategy,
|
||||
options=search_options,
|
||||
limit=limit,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.error("Search execution failed: %s", exc)
|
||||
return []
|
||||
|
||||
# Transform results to SemanticResult
|
||||
semantic_results = _transform_results(
|
||||
results=chain_result.results,
|
||||
mode=mode,
|
||||
vector_weight=vector_weight,
|
||||
structural_weight=structural_weight,
|
||||
keyword_weight=keyword_weight,
|
||||
kind_filter=kind_filter,
|
||||
include_match_reason=include_match_reason,
|
||||
query=query,
|
||||
)
|
||||
|
||||
return semantic_results[:limit]
|
||||
|
||||
|
||||
def _build_search_options(
|
||||
mode: str,
|
||||
vector_weight: float,
|
||||
structural_weight: float,
|
||||
keyword_weight: float,
|
||||
limit: int,
|
||||
) -> "SearchOptions":
|
||||
"""Build SearchOptions based on mode and weights.
|
||||
|
||||
Args:
|
||||
mode: Search mode (vector, structural, fusion)
|
||||
vector_weight: Vector search weight
|
||||
structural_weight: Structural search weight
|
||||
keyword_weight: Keyword search weight
|
||||
limit: Result limit
|
||||
|
||||
Returns:
|
||||
Configured SearchOptions
|
||||
"""
|
||||
from codexlens.search.chain_search import SearchOptions
|
||||
|
||||
# Default options
|
||||
options = SearchOptions(
|
||||
total_limit=limit * 2, # Fetch extra for filtering
|
||||
limit_per_dir=limit,
|
||||
include_symbols=True, # Always include symbols for structural
|
||||
)
|
||||
|
||||
if mode == "vector":
|
||||
# Pure vector mode
|
||||
options.hybrid_mode = True
|
||||
options.enable_vector = True
|
||||
options.pure_vector = True
|
||||
options.enable_fuzzy = False
|
||||
elif mode == "structural":
|
||||
# Structural only - use FTS + symbols
|
||||
options.hybrid_mode = True
|
||||
options.enable_vector = False
|
||||
options.enable_fuzzy = True
|
||||
options.include_symbols = True
|
||||
else:
|
||||
# Fusion mode (default)
|
||||
options.hybrid_mode = True
|
||||
options.enable_vector = vector_weight > 0
|
||||
options.enable_fuzzy = keyword_weight > 0
|
||||
options.include_symbols = structural_weight > 0
|
||||
|
||||
# Set custom weights for RRF
|
||||
if options.enable_vector and keyword_weight > 0:
|
||||
options.hybrid_weights = {
|
||||
"vector": vector_weight,
|
||||
"exact": keyword_weight * 0.7,
|
||||
"fuzzy": keyword_weight * 0.3,
|
||||
}
|
||||
|
||||
return options
|
||||
|
||||
|
||||
def _execute_search(
|
||||
engine: "ChainSearchEngine",
|
||||
query: str,
|
||||
source_path: Path,
|
||||
fusion_strategy: str,
|
||||
options: "SearchOptions",
|
||||
limit: int,
|
||||
) -> "ChainSearchResult":
|
||||
"""Execute search using appropriate strategy.
|
||||
|
||||
Maps fusion_strategy to ChainSearchEngine methods:
|
||||
- rrf: Standard hybrid search with RRF fusion
|
||||
- staged: staged_cascade_search
|
||||
- binary: binary_cascade_search
|
||||
- hybrid: hybrid_cascade_search
|
||||
|
||||
Args:
|
||||
engine: ChainSearchEngine instance
|
||||
query: Search query
|
||||
source_path: Project root path
|
||||
fusion_strategy: Strategy name
|
||||
options: Search options
|
||||
limit: Result limit
|
||||
|
||||
Returns:
|
||||
ChainSearchResult from the search
|
||||
"""
|
||||
from codexlens.search.chain_search import ChainSearchResult
|
||||
|
||||
if fusion_strategy == "staged":
|
||||
# Use staged cascade search (4-stage pipeline)
|
||||
return engine.staged_cascade_search(
|
||||
query=query,
|
||||
source_path=source_path,
|
||||
k=limit,
|
||||
coarse_k=limit * 5,
|
||||
options=options,
|
||||
)
|
||||
elif fusion_strategy == "binary":
|
||||
# Use binary cascade search (binary coarse + dense fine)
|
||||
return engine.binary_cascade_search(
|
||||
query=query,
|
||||
source_path=source_path,
|
||||
k=limit,
|
||||
coarse_k=limit * 5,
|
||||
options=options,
|
||||
)
|
||||
elif fusion_strategy == "hybrid":
|
||||
# Use hybrid cascade search (FTS+SPLADE+Vector + cross-encoder)
|
||||
return engine.hybrid_cascade_search(
|
||||
query=query,
|
||||
source_path=source_path,
|
||||
k=limit,
|
||||
coarse_k=limit * 5,
|
||||
options=options,
|
||||
)
|
||||
else:
|
||||
# Default: rrf - Standard search with RRF fusion
|
||||
return engine.search(
|
||||
query=query,
|
||||
source_path=source_path,
|
||||
options=options,
|
||||
)
|
||||
|
||||
|
||||
def _transform_results(
|
||||
results: List,
|
||||
mode: str,
|
||||
vector_weight: float,
|
||||
structural_weight: float,
|
||||
keyword_weight: float,
|
||||
kind_filter: Optional[List[str]],
|
||||
include_match_reason: bool,
|
||||
query: str,
|
||||
) -> List[SemanticResult]:
|
||||
"""Transform ChainSearchEngine results to SemanticResult.
|
||||
|
||||
Args:
|
||||
results: List of SearchResult objects
|
||||
mode: Search mode
|
||||
vector_weight: Vector weight used
|
||||
structural_weight: Structural weight used
|
||||
keyword_weight: Keyword weight used
|
||||
kind_filter: Optional symbol kind filter
|
||||
include_match_reason: Whether to generate match reasons
|
||||
query: Original query (for match reason generation)
|
||||
|
||||
Returns:
|
||||
List of SemanticResult objects
|
||||
"""
|
||||
semantic_results = []
|
||||
|
||||
for result in results:
|
||||
# Extract symbol info
|
||||
symbol_name = getattr(result, "symbol_name", None)
|
||||
symbol_kind = getattr(result, "symbol_kind", None)
|
||||
start_line = getattr(result, "start_line", None)
|
||||
|
||||
# Use symbol object if available
|
||||
if hasattr(result, "symbol") and result.symbol:
|
||||
symbol_name = symbol_name or result.symbol.name
|
||||
symbol_kind = symbol_kind or result.symbol.kind
|
||||
if hasattr(result.symbol, "range") and result.symbol.range:
|
||||
start_line = start_line or result.symbol.range[0]
|
||||
|
||||
# Filter by kind if specified
|
||||
if kind_filter and symbol_kind:
|
||||
if symbol_kind.lower() not in [k.lower() for k in kind_filter]:
|
||||
continue
|
||||
|
||||
# Determine scores based on mode and metadata
|
||||
metadata = getattr(result, "metadata", {}) or {}
|
||||
fusion_score = result.score
|
||||
|
||||
# Try to extract source scores from metadata
|
||||
source_scores = metadata.get("source_scores", {})
|
||||
vector_score: Optional[float] = None
|
||||
structural_score: Optional[float] = None
|
||||
|
||||
if mode == "vector":
|
||||
# In pure vector mode, the main score is the vector score
|
||||
vector_score = result.score
|
||||
structural_score = None
|
||||
elif mode == "structural":
|
||||
# In structural mode, no vector score
|
||||
vector_score = None
|
||||
structural_score = result.score
|
||||
else:
|
||||
# Fusion mode - try to extract individual scores
|
||||
if "vector" in source_scores:
|
||||
vector_score = source_scores["vector"]
|
||||
elif metadata.get("fusion_method") == "simple_weighted":
|
||||
# From weighted fusion
|
||||
vector_score = source_scores.get("vector")
|
||||
|
||||
# Structural score approximation (from exact/fuzzy FTS)
|
||||
fts_scores = []
|
||||
if "exact" in source_scores:
|
||||
fts_scores.append(source_scores["exact"])
|
||||
if "fuzzy" in source_scores:
|
||||
fts_scores.append(source_scores["fuzzy"])
|
||||
if "splade" in source_scores:
|
||||
fts_scores.append(source_scores["splade"])
|
||||
|
||||
if fts_scores:
|
||||
structural_score = max(fts_scores)
|
||||
|
||||
# Build snippet
|
||||
snippet = getattr(result, "excerpt", "") or getattr(result, "content", "")
|
||||
if len(snippet) > 500:
|
||||
snippet = snippet[:500] + "..."
|
||||
|
||||
# Generate match reason if requested
|
||||
match_reason = None
|
||||
if include_match_reason:
|
||||
match_reason = _generate_match_reason(
|
||||
query=query,
|
||||
symbol_name=symbol_name,
|
||||
symbol_kind=symbol_kind,
|
||||
snippet=snippet,
|
||||
vector_score=vector_score,
|
||||
structural_score=structural_score,
|
||||
)
|
||||
|
||||
semantic_result = SemanticResult(
|
||||
symbol_name=symbol_name or Path(result.path).stem,
|
||||
kind=symbol_kind or "unknown",
|
||||
file_path=result.path,
|
||||
line=start_line or 1,
|
||||
vector_score=vector_score,
|
||||
structural_score=structural_score,
|
||||
fusion_score=fusion_score,
|
||||
snippet=snippet,
|
||||
match_reason=match_reason,
|
||||
)
|
||||
|
||||
semantic_results.append(semantic_result)
|
||||
|
||||
# Sort by fusion_score descending
|
||||
semantic_results.sort(key=lambda r: r.fusion_score, reverse=True)
|
||||
|
||||
return semantic_results
|
||||
|
||||
|
||||
def _generate_match_reason(
|
||||
query: str,
|
||||
symbol_name: Optional[str],
|
||||
symbol_kind: Optional[str],
|
||||
snippet: str,
|
||||
vector_score: Optional[float],
|
||||
structural_score: Optional[float],
|
||||
) -> str:
|
||||
"""Generate human-readable match reason heuristically.
|
||||
|
||||
This is a simple heuristic-based approach, not LLM-powered.
|
||||
|
||||
Args:
|
||||
query: Original search query
|
||||
symbol_name: Symbol name if available
|
||||
symbol_kind: Symbol kind if available
|
||||
snippet: Code snippet
|
||||
vector_score: Vector similarity score
|
||||
structural_score: Structural match score
|
||||
|
||||
Returns:
|
||||
Human-readable explanation string
|
||||
"""
|
||||
reasons = []
|
||||
|
||||
# Check for direct name match
|
||||
query_lower = query.lower()
|
||||
query_words = set(query_lower.split())
|
||||
|
||||
if symbol_name:
|
||||
name_lower = symbol_name.lower()
|
||||
# Direct substring match
|
||||
if query_lower in name_lower or name_lower in query_lower:
|
||||
reasons.append(f"Symbol name '{symbol_name}' matches query")
|
||||
# Word overlap
|
||||
name_words = set(_split_camel_case(symbol_name).lower().split())
|
||||
overlap = query_words & name_words
|
||||
if overlap and not reasons:
|
||||
reasons.append(f"Symbol name contains: {', '.join(overlap)}")
|
||||
|
||||
# Check snippet for keyword matches
|
||||
snippet_lower = snippet.lower()
|
||||
matching_words = [w for w in query_words if w in snippet_lower and len(w) > 2]
|
||||
if matching_words and len(reasons) < 2:
|
||||
reasons.append(f"Code contains keywords: {', '.join(matching_words[:3])}")
|
||||
|
||||
# Add score-based reasoning
|
||||
if vector_score is not None and vector_score > 0.7:
|
||||
reasons.append("High semantic similarity")
|
||||
elif vector_score is not None and vector_score > 0.5:
|
||||
reasons.append("Moderate semantic similarity")
|
||||
|
||||
if structural_score is not None and structural_score > 0.8:
|
||||
reasons.append("Strong structural match")
|
||||
|
||||
# Symbol kind context
|
||||
if symbol_kind and len(reasons) < 3:
|
||||
reasons.append(f"Matched {symbol_kind}")
|
||||
|
||||
if not reasons:
|
||||
reasons.append("Partial relevance based on content analysis")
|
||||
|
||||
return "; ".join(reasons[:3])
|
||||
|
||||
|
||||
def _split_camel_case(name: str) -> str:
|
||||
"""Split camelCase and PascalCase to words.
|
||||
|
||||
Args:
|
||||
name: Symbol name in camelCase or PascalCase
|
||||
|
||||
Returns:
|
||||
Space-separated words
|
||||
"""
|
||||
import re
|
||||
|
||||
# Insert space before uppercase letters
|
||||
result = re.sub(r"([a-z])([A-Z])", r"\1 \2", name)
|
||||
# Insert space before uppercase followed by lowercase
|
||||
result = re.sub(r"([A-Z]+)([A-Z][a-z])", r"\1 \2", result)
|
||||
# Replace underscores with spaces
|
||||
result = result.replace("_", " ")
|
||||
|
||||
return result
|
||||
146
codex-lens/build/lib/codexlens/api/symbols.py
Normal file
146
codex-lens/build/lib/codexlens/api/symbols.py
Normal file
@@ -0,0 +1,146 @@
|
||||
"""workspace_symbols API implementation.
|
||||
|
||||
This module provides the workspace_symbols() function for searching
|
||||
symbols across the entire workspace with prefix matching.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import fnmatch
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
from ..entities import Symbol
|
||||
from ..storage.global_index import GlobalSymbolIndex
|
||||
from ..storage.registry import RegistryStore
|
||||
from ..errors import IndexNotFoundError
|
||||
from .models import SymbolInfo
|
||||
from .utils import resolve_project
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def workspace_symbols(
|
||||
project_root: str,
|
||||
query: str,
|
||||
kind_filter: Optional[List[str]] = None,
|
||||
file_pattern: Optional[str] = None,
|
||||
limit: int = 50
|
||||
) -> List[SymbolInfo]:
|
||||
"""Search for symbols across the entire workspace.
|
||||
|
||||
Uses prefix matching for efficient searching.
|
||||
|
||||
Args:
|
||||
project_root: Project root directory (for index location)
|
||||
query: Search query (prefix match)
|
||||
kind_filter: Optional list of symbol kinds to include
|
||||
(e.g., ["class", "function"])
|
||||
file_pattern: Optional glob pattern to filter by file path
|
||||
(e.g., "*.py", "src/**/*.ts")
|
||||
limit: Maximum number of results to return
|
||||
|
||||
Returns:
|
||||
List of SymbolInfo sorted by score
|
||||
|
||||
Raises:
|
||||
IndexNotFoundError: If project is not indexed
|
||||
"""
|
||||
project_path = resolve_project(project_root)
|
||||
|
||||
# Get project info from registry
|
||||
registry = RegistryStore()
|
||||
project_info = registry.get_project(project_path)
|
||||
if project_info is None:
|
||||
raise IndexNotFoundError(f"Project not indexed: {project_path}")
|
||||
|
||||
# Open global symbol index
|
||||
index_db = project_info.index_root / "_global_symbols.db"
|
||||
if not index_db.exists():
|
||||
raise IndexNotFoundError(f"Global symbol index not found: {index_db}")
|
||||
|
||||
global_index = GlobalSymbolIndex(str(index_db), project_info.id)
|
||||
|
||||
# Search with prefix matching
|
||||
# If kind_filter has multiple kinds, we need to search for each
|
||||
all_results: List[Symbol] = []
|
||||
|
||||
if kind_filter and len(kind_filter) > 0:
|
||||
# Search for each kind separately
|
||||
for kind in kind_filter:
|
||||
results = global_index.search(
|
||||
name=query,
|
||||
kind=kind,
|
||||
limit=limit,
|
||||
prefix_mode=True
|
||||
)
|
||||
all_results.extend(results)
|
||||
else:
|
||||
# Search without kind filter
|
||||
all_results = global_index.search(
|
||||
name=query,
|
||||
kind=None,
|
||||
limit=limit,
|
||||
prefix_mode=True
|
||||
)
|
||||
|
||||
logger.debug(f"Found {len(all_results)} symbols matching '{query}'")
|
||||
|
||||
# Apply file pattern filter if specified
|
||||
if file_pattern:
|
||||
all_results = [
|
||||
sym for sym in all_results
|
||||
if sym.file and fnmatch.fnmatch(sym.file, file_pattern)
|
||||
]
|
||||
logger.debug(f"After file filter '{file_pattern}': {len(all_results)} symbols")
|
||||
|
||||
# Convert to SymbolInfo and sort by relevance
|
||||
symbols = [
|
||||
SymbolInfo(
|
||||
name=sym.name,
|
||||
kind=sym.kind,
|
||||
file_path=sym.file or "",
|
||||
line=sym.range[0] if sym.range else 1,
|
||||
container=None, # Could extract from parent
|
||||
score=_calculate_score(sym.name, query)
|
||||
)
|
||||
for sym in all_results
|
||||
]
|
||||
|
||||
# Sort by score (exact matches first)
|
||||
symbols.sort(key=lambda s: s.score, reverse=True)
|
||||
|
||||
return symbols[:limit]
|
||||
|
||||
|
||||
def _calculate_score(symbol_name: str, query: str) -> float:
|
||||
"""Calculate relevance score for a symbol match.
|
||||
|
||||
Scoring:
|
||||
- Exact match: 1.0
|
||||
- Prefix match: 0.8 + 0.2 * (query_len / symbol_len)
|
||||
- Case-insensitive match: 0.6
|
||||
|
||||
Args:
|
||||
symbol_name: The matched symbol name
|
||||
query: The search query
|
||||
|
||||
Returns:
|
||||
Score between 0.0 and 1.0
|
||||
"""
|
||||
if symbol_name == query:
|
||||
return 1.0
|
||||
|
||||
if symbol_name.lower() == query.lower():
|
||||
return 0.9
|
||||
|
||||
if symbol_name.startswith(query):
|
||||
ratio = len(query) / len(symbol_name)
|
||||
return 0.8 + 0.2 * ratio
|
||||
|
||||
if symbol_name.lower().startswith(query.lower()):
|
||||
ratio = len(query) / len(symbol_name)
|
||||
return 0.6 + 0.2 * ratio
|
||||
|
||||
return 0.5
|
||||
153
codex-lens/build/lib/codexlens/api/utils.py
Normal file
153
codex-lens/build/lib/codexlens/api/utils.py
Normal file
@@ -0,0 +1,153 @@
|
||||
"""Utility functions for the codexlens API.
|
||||
|
||||
This module provides helper functions for:
|
||||
- Project resolution
|
||||
- Relationship type normalization
|
||||
- Result ranking by proximity
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, TypeVar, Callable
|
||||
|
||||
from .models import DefinitionResult
|
||||
|
||||
|
||||
# Type variable for generic ranking
|
||||
T = TypeVar('T')
|
||||
|
||||
|
||||
def resolve_project(project_root: str) -> Path:
|
||||
"""Resolve and validate project root path.
|
||||
|
||||
Args:
|
||||
project_root: Path to project root (relative or absolute)
|
||||
|
||||
Returns:
|
||||
Resolved absolute Path
|
||||
|
||||
Raises:
|
||||
ValueError: If path does not exist or is not a directory
|
||||
"""
|
||||
path = Path(project_root).resolve()
|
||||
if not path.exists():
|
||||
raise ValueError(f"Project root does not exist: {path}")
|
||||
if not path.is_dir():
|
||||
raise ValueError(f"Project root is not a directory: {path}")
|
||||
return path
|
||||
|
||||
|
||||
# Relationship type normalization mapping
|
||||
_RELATIONSHIP_NORMALIZATION = {
|
||||
# Plural to singular
|
||||
"calls": "call",
|
||||
"imports": "import",
|
||||
"inherits": "inheritance",
|
||||
"uses": "use",
|
||||
# Already normalized (passthrough)
|
||||
"call": "call",
|
||||
"import": "import",
|
||||
"inheritance": "inheritance",
|
||||
"use": "use",
|
||||
"type_annotation": "type_annotation",
|
||||
}
|
||||
|
||||
|
||||
def normalize_relationship_type(relationship: str) -> str:
|
||||
"""Normalize relationship type to canonical form.
|
||||
|
||||
Converts plural forms and variations to standard singular forms:
|
||||
- 'calls' -> 'call'
|
||||
- 'imports' -> 'import'
|
||||
- 'inherits' -> 'inheritance'
|
||||
- 'uses' -> 'use'
|
||||
|
||||
Args:
|
||||
relationship: Raw relationship type string
|
||||
|
||||
Returns:
|
||||
Normalized relationship type
|
||||
|
||||
Examples:
|
||||
>>> normalize_relationship_type('calls')
|
||||
'call'
|
||||
>>> normalize_relationship_type('inherits')
|
||||
'inheritance'
|
||||
>>> normalize_relationship_type('call')
|
||||
'call'
|
||||
"""
|
||||
return _RELATIONSHIP_NORMALIZATION.get(relationship.lower(), relationship)
|
||||
|
||||
|
||||
def rank_by_proximity(
|
||||
results: List[DefinitionResult],
|
||||
file_context: Optional[str] = None
|
||||
) -> List[DefinitionResult]:
|
||||
"""Rank results by file path proximity to context.
|
||||
|
||||
V1 Implementation: Uses path-based proximity scoring.
|
||||
|
||||
Scoring algorithm:
|
||||
1. Same directory: highest score (100)
|
||||
2. Otherwise: length of common path prefix
|
||||
|
||||
Args:
|
||||
results: List of definition results to rank
|
||||
file_context: Reference file path for proximity calculation.
|
||||
If None, returns results unchanged.
|
||||
|
||||
Returns:
|
||||
Results sorted by proximity score (highest first)
|
||||
|
||||
Examples:
|
||||
>>> results = [
|
||||
... DefinitionResult(name="foo", kind="function",
|
||||
... file_path="/a/b/c.py", line=1, end_line=10),
|
||||
... DefinitionResult(name="foo", kind="function",
|
||||
... file_path="/a/x/y.py", line=1, end_line=10),
|
||||
... ]
|
||||
>>> ranked = rank_by_proximity(results, "/a/b/test.py")
|
||||
>>> ranked[0].file_path
|
||||
'/a/b/c.py'
|
||||
"""
|
||||
if not file_context or not results:
|
||||
return results
|
||||
|
||||
def proximity_score(result: DefinitionResult) -> int:
|
||||
"""Calculate proximity score for a result."""
|
||||
result_dir = os.path.dirname(result.file_path)
|
||||
context_dir = os.path.dirname(file_context)
|
||||
|
||||
# Same directory gets highest score
|
||||
if result_dir == context_dir:
|
||||
return 100
|
||||
|
||||
# Otherwise, score by common path prefix length
|
||||
try:
|
||||
common = os.path.commonpath([result.file_path, file_context])
|
||||
return len(common)
|
||||
except ValueError:
|
||||
# No common path (different drives on Windows)
|
||||
return 0
|
||||
|
||||
return sorted(results, key=proximity_score, reverse=True)
|
||||
|
||||
|
||||
def rank_by_score(
|
||||
results: List[T],
|
||||
score_fn: Callable[[T], float],
|
||||
reverse: bool = True
|
||||
) -> List[T]:
|
||||
"""Generic ranking function by custom score.
|
||||
|
||||
Args:
|
||||
results: List of items to rank
|
||||
score_fn: Function to extract score from item
|
||||
reverse: If True, highest scores first (default)
|
||||
|
||||
Returns:
|
||||
Sorted list
|
||||
"""
|
||||
return sorted(results, key=score_fn, reverse=reverse)
|
||||
27
codex-lens/build/lib/codexlens/cli/__init__.py
Normal file
27
codex-lens/build/lib/codexlens/cli/__init__.py
Normal file
@@ -0,0 +1,27 @@
|
||||
"""CLI package for CodexLens."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Force UTF-8 encoding for Windows console
|
||||
# This ensures Chinese characters display correctly instead of GBK garbled text
|
||||
if sys.platform == "win32":
|
||||
# Set environment variable for Python I/O encoding
|
||||
os.environ.setdefault("PYTHONIOENCODING", "utf-8")
|
||||
|
||||
# Reconfigure stdout/stderr to use UTF-8 if possible
|
||||
try:
|
||||
if hasattr(sys.stdout, "reconfigure"):
|
||||
sys.stdout.reconfigure(encoding="utf-8", errors="replace")
|
||||
if hasattr(sys.stderr, "reconfigure"):
|
||||
sys.stderr.reconfigure(encoding="utf-8", errors="replace")
|
||||
except Exception:
|
||||
# Fallback: some environments don't support reconfigure
|
||||
pass
|
||||
|
||||
from .commands import app
|
||||
|
||||
__all__ = ["app"]
|
||||
|
||||
4494
codex-lens/build/lib/codexlens/cli/commands.py
Normal file
4494
codex-lens/build/lib/codexlens/cli/commands.py
Normal file
File diff suppressed because it is too large
Load Diff
2001
codex-lens/build/lib/codexlens/cli/embedding_manager.py
Normal file
2001
codex-lens/build/lib/codexlens/cli/embedding_manager.py
Normal file
File diff suppressed because it is too large
Load Diff
1026
codex-lens/build/lib/codexlens/cli/model_manager.py
Normal file
1026
codex-lens/build/lib/codexlens/cli/model_manager.py
Normal file
File diff suppressed because it is too large
Load Diff
135
codex-lens/build/lib/codexlens/cli/output.py
Normal file
135
codex-lens/build/lib/codexlens/cli/output.py
Normal file
@@ -0,0 +1,135 @@
|
||||
"""Rich and JSON output helpers for CodexLens CLI."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sys
|
||||
from dataclasses import asdict, is_dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, Iterable, Mapping, Sequence
|
||||
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
from rich.text import Text
|
||||
|
||||
from codexlens.entities import SearchResult, Symbol
|
||||
|
||||
# Force UTF-8 encoding for Windows console to properly display Chinese text
|
||||
# Use force_terminal=True and legacy_windows=False to avoid GBK encoding issues
|
||||
console = Console(force_terminal=True, legacy_windows=False)
|
||||
|
||||
|
||||
def _to_jsonable(value: Any) -> Any:
|
||||
if value is None:
|
||||
return None
|
||||
if hasattr(value, "model_dump"):
|
||||
return value.model_dump()
|
||||
if is_dataclass(value):
|
||||
return asdict(value)
|
||||
if isinstance(value, Path):
|
||||
return str(value)
|
||||
if isinstance(value, Mapping):
|
||||
return {k: _to_jsonable(v) for k, v in value.items()}
|
||||
if isinstance(value, (list, tuple, set)):
|
||||
return [_to_jsonable(v) for v in value]
|
||||
return value
|
||||
|
||||
|
||||
def print_json(*, success: bool, result: Any = None, error: str | None = None, **kwargs: Any) -> None:
|
||||
"""Print JSON output with optional additional fields.
|
||||
|
||||
Args:
|
||||
success: Whether the operation succeeded
|
||||
result: Result data (used when success=True)
|
||||
error: Error message (used when success=False)
|
||||
**kwargs: Additional fields to include in the payload (e.g., code, details)
|
||||
"""
|
||||
payload: dict[str, Any] = {"success": success}
|
||||
if success:
|
||||
payload["result"] = _to_jsonable(result)
|
||||
else:
|
||||
payload["error"] = error or "Unknown error"
|
||||
# Include additional error details if provided
|
||||
for key, value in kwargs.items():
|
||||
payload[key] = _to_jsonable(value)
|
||||
console.print_json(json.dumps(payload, ensure_ascii=False))
|
||||
|
||||
|
||||
def render_search_results(
|
||||
results: Sequence[SearchResult], *, title: str = "Search Results", verbose: bool = False
|
||||
) -> None:
|
||||
"""Render search results with optional source tags in verbose mode.
|
||||
|
||||
Args:
|
||||
results: Search results to display
|
||||
title: Table title
|
||||
verbose: If True, show search source tags ([E], [F], [V]) and fusion scores
|
||||
"""
|
||||
table = Table(title=title, show_lines=False)
|
||||
|
||||
if verbose:
|
||||
# Verbose mode: show source tags
|
||||
table.add_column("Source", style="dim", width=6, justify="center")
|
||||
|
||||
table.add_column("Path", style="cyan", no_wrap=True)
|
||||
table.add_column("Score", style="magenta", justify="right")
|
||||
table.add_column("Excerpt", style="white")
|
||||
|
||||
for res in results:
|
||||
excerpt = res.excerpt or ""
|
||||
score_str = f"{res.score:.3f}"
|
||||
|
||||
if verbose:
|
||||
# Extract search source tag if available
|
||||
source = getattr(res, "search_source", None)
|
||||
source_tag = ""
|
||||
if source == "exact":
|
||||
source_tag = "[E]"
|
||||
elif source == "fuzzy":
|
||||
source_tag = "[F]"
|
||||
elif source == "vector":
|
||||
source_tag = "[V]"
|
||||
elif source == "fusion":
|
||||
source_tag = "[RRF]"
|
||||
table.add_row(source_tag, res.path, score_str, excerpt)
|
||||
else:
|
||||
table.add_row(res.path, score_str, excerpt)
|
||||
|
||||
console.print(table)
|
||||
|
||||
|
||||
def render_symbols(symbols: Sequence[Symbol], *, title: str = "Symbols") -> None:
|
||||
table = Table(title=title)
|
||||
table.add_column("Name", style="green")
|
||||
table.add_column("Kind", style="yellow")
|
||||
table.add_column("Range", style="white", justify="right")
|
||||
|
||||
for sym in symbols:
|
||||
start, end = sym.range
|
||||
table.add_row(sym.name, sym.kind, f"{start}-{end}")
|
||||
|
||||
console.print(table)
|
||||
|
||||
|
||||
def render_status(stats: Mapping[str, Any]) -> None:
|
||||
table = Table(title="Index Status")
|
||||
table.add_column("Metric", style="cyan")
|
||||
table.add_column("Value", style="white")
|
||||
|
||||
for key, value in stats.items():
|
||||
if isinstance(value, Mapping):
|
||||
value_text = ", ".join(f"{k}:{v}" for k, v in value.items())
|
||||
elif isinstance(value, (list, tuple)):
|
||||
value_text = ", ".join(str(v) for v in value)
|
||||
else:
|
||||
value_text = str(value)
|
||||
table.add_row(str(key), value_text)
|
||||
|
||||
console.print(table)
|
||||
|
||||
|
||||
def render_file_inspect(path: str, language: str, symbols: Iterable[Symbol]) -> None:
|
||||
header = Text.assemble(("File: ", "bold"), (path, "cyan"), (" Language: ", "bold"), (language, "green"))
|
||||
console.print(header)
|
||||
render_symbols(list(symbols), title="Discovered Symbols")
|
||||
|
||||
692
codex-lens/build/lib/codexlens/config.py
Normal file
692
codex-lens/build/lib/codexlens/config.py
Normal file
@@ -0,0 +1,692 @@
|
||||
"""Configuration system for CodexLens."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from dataclasses import dataclass, field
|
||||
from functools import cached_property
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from .errors import ConfigError
|
||||
|
||||
|
||||
# Workspace-local directory name
|
||||
WORKSPACE_DIR_NAME = ".codexlens"
|
||||
|
||||
# Settings file name
|
||||
SETTINGS_FILE_NAME = "settings.json"
|
||||
|
||||
# SPLADE index database name (centralized storage)
|
||||
SPLADE_DB_NAME = "_splade.db"
|
||||
|
||||
# Dense vector storage names (centralized storage)
|
||||
VECTORS_HNSW_NAME = "_vectors.hnsw"
|
||||
VECTORS_META_DB_NAME = "_vectors_meta.db"
|
||||
BINARY_VECTORS_MMAP_NAME = "_binary_vectors.mmap"
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _default_global_dir() -> Path:
|
||||
"""Get global CodexLens data directory."""
|
||||
env_override = os.getenv("CODEXLENS_DATA_DIR")
|
||||
if env_override:
|
||||
return Path(env_override).expanduser().resolve()
|
||||
return (Path.home() / ".codexlens").resolve()
|
||||
|
||||
|
||||
def find_workspace_root(start_path: Path) -> Optional[Path]:
|
||||
"""Find the workspace root by looking for .codexlens directory.
|
||||
|
||||
Searches from start_path upward to find an existing .codexlens directory.
|
||||
Returns None if not found.
|
||||
"""
|
||||
current = start_path.resolve()
|
||||
|
||||
# Search up to filesystem root
|
||||
while current != current.parent:
|
||||
workspace_dir = current / WORKSPACE_DIR_NAME
|
||||
if workspace_dir.is_dir():
|
||||
return current
|
||||
current = current.parent
|
||||
|
||||
# Check root as well
|
||||
workspace_dir = current / WORKSPACE_DIR_NAME
|
||||
if workspace_dir.is_dir():
|
||||
return current
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@dataclass
|
||||
class Config:
|
||||
"""Runtime configuration for CodexLens.
|
||||
|
||||
- data_dir: Base directory for all persistent CodexLens data.
|
||||
- venv_path: Optional virtualenv used for language tooling.
|
||||
- supported_languages: Language IDs and their associated file extensions.
|
||||
- parsing_rules: Per-language parsing and chunking hints.
|
||||
"""
|
||||
|
||||
data_dir: Path = field(default_factory=_default_global_dir)
|
||||
venv_path: Path = field(default_factory=lambda: _default_global_dir() / "venv")
|
||||
supported_languages: Dict[str, Dict[str, Any]] = field(
|
||||
default_factory=lambda: {
|
||||
# Source code languages (category: "code")
|
||||
"python": {"extensions": [".py"], "tree_sitter_language": "python", "category": "code"},
|
||||
"javascript": {"extensions": [".js", ".jsx"], "tree_sitter_language": "javascript", "category": "code"},
|
||||
"typescript": {"extensions": [".ts", ".tsx"], "tree_sitter_language": "typescript", "category": "code"},
|
||||
"java": {"extensions": [".java"], "tree_sitter_language": "java", "category": "code"},
|
||||
"go": {"extensions": [".go"], "tree_sitter_language": "go", "category": "code"},
|
||||
"zig": {"extensions": [".zig"], "tree_sitter_language": "zig", "category": "code"},
|
||||
"objective-c": {"extensions": [".m", ".mm"], "tree_sitter_language": "objc", "category": "code"},
|
||||
"c": {"extensions": [".c", ".h"], "tree_sitter_language": "c", "category": "code"},
|
||||
"cpp": {"extensions": [".cc", ".cpp", ".hpp", ".cxx"], "tree_sitter_language": "cpp", "category": "code"},
|
||||
"rust": {"extensions": [".rs"], "tree_sitter_language": "rust", "category": "code"},
|
||||
}
|
||||
)
|
||||
parsing_rules: Dict[str, Dict[str, Any]] = field(
|
||||
default_factory=lambda: {
|
||||
"default": {
|
||||
"max_chunk_chars": 4000,
|
||||
"max_chunk_lines": 200,
|
||||
"overlap_lines": 20,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
llm_enabled: bool = False
|
||||
llm_tool: str = "gemini"
|
||||
llm_timeout_ms: int = 300000
|
||||
llm_batch_size: int = 5
|
||||
|
||||
# Hybrid chunker configuration
|
||||
hybrid_max_chunk_size: int = 2000 # Max characters per chunk before LLM refinement
|
||||
hybrid_llm_refinement: bool = False # Enable LLM-based semantic boundary refinement
|
||||
|
||||
# Embedding configuration
|
||||
embedding_backend: str = "fastembed" # "fastembed" (local) or "litellm" (API)
|
||||
embedding_model: str = "code" # For fastembed: profile (fast/code/multilingual/balanced)
|
||||
# For litellm: model name from config (e.g., "qwen3-embedding")
|
||||
embedding_use_gpu: bool = True # For fastembed: whether to use GPU acceleration
|
||||
|
||||
# SPLADE sparse retrieval configuration
|
||||
enable_splade: bool = False # Disable SPLADE by default (slow ~360ms, use FTS instead)
|
||||
splade_model: str = "naver/splade-cocondenser-ensembledistil"
|
||||
splade_threshold: float = 0.01 # Min weight to store in index
|
||||
splade_onnx_path: Optional[str] = None # Custom ONNX model path
|
||||
|
||||
# FTS fallback (disabled by default, available via --use-fts)
|
||||
use_fts_fallback: bool = True # Use FTS for sparse search (fast, SPLADE disabled)
|
||||
|
||||
# Indexing/search optimizations
|
||||
global_symbol_index_enabled: bool = True # Enable project-wide symbol index fast path
|
||||
enable_merkle_detection: bool = True # Enable content-hash based incremental indexing
|
||||
|
||||
# Graph expansion (search-time, uses precomputed neighbors)
|
||||
enable_graph_expansion: bool = False
|
||||
graph_expansion_depth: int = 2
|
||||
|
||||
# Optional search reranking (disabled by default)
|
||||
enable_reranking: bool = False
|
||||
reranking_top_k: int = 50
|
||||
symbol_boost_factor: float = 1.5
|
||||
|
||||
# Optional cross-encoder reranking (second stage; requires optional reranker deps)
|
||||
enable_cross_encoder_rerank: bool = False
|
||||
reranker_backend: str = "onnx"
|
||||
reranker_model: str = "cross-encoder/ms-marco-MiniLM-L-6-v2"
|
||||
reranker_top_k: int = 50
|
||||
reranker_max_input_tokens: int = 8192 # Maximum tokens for reranker API batching
|
||||
reranker_chunk_type_weights: Optional[Dict[str, float]] = None # Weights for chunk types: {"code": 1.0, "docstring": 0.7}
|
||||
reranker_test_file_penalty: float = 0.0 # Penalty for test files (0.0-1.0, e.g., 0.2 = 20% reduction)
|
||||
|
||||
# Chunk stripping configuration (for semantic embedding)
|
||||
chunk_strip_comments: bool = True # Strip comments from code chunks
|
||||
chunk_strip_docstrings: bool = True # Strip docstrings from code chunks
|
||||
|
||||
# Cascade search configuration (two-stage retrieval)
|
||||
enable_cascade_search: bool = False # Enable cascade search (coarse + fine ranking)
|
||||
cascade_coarse_k: int = 100 # Number of coarse candidates from first stage
|
||||
cascade_fine_k: int = 10 # Number of final results after reranking
|
||||
cascade_strategy: str = "binary" # "binary" (fast binary+dense) or "hybrid" (FTS+SPLADE+Vector+CrossEncoder)
|
||||
|
||||
# Staged cascade search configuration (4-stage pipeline)
|
||||
staged_coarse_k: int = 200 # Number of coarse candidates from Stage 1 binary search
|
||||
staged_lsp_depth: int = 2 # LSP relationship expansion depth in Stage 2
|
||||
staged_clustering_strategy: str = "auto" # "auto", "hdbscan", "dbscan", "frequency", "noop"
|
||||
staged_clustering_min_size: int = 3 # Minimum cluster size for Stage 3 grouping
|
||||
enable_staged_rerank: bool = True # Enable optional cross-encoder reranking in Stage 4
|
||||
|
||||
# RRF fusion configuration
|
||||
fusion_method: str = "rrf" # "simple" (weighted sum) or "rrf" (reciprocal rank fusion)
|
||||
rrf_k: int = 60 # RRF constant (default 60)
|
||||
|
||||
# Category-based filtering to separate code/doc results
|
||||
enable_category_filter: bool = True # Enable code/doc result separation
|
||||
|
||||
# Multi-endpoint configuration for litellm backend
|
||||
embedding_endpoints: List[Dict[str, Any]] = field(default_factory=list)
|
||||
# List of endpoint configs: [{"model": "...", "api_key": "...", "api_base": "...", "weight": 1.0}]
|
||||
embedding_pool_enabled: bool = False # Enable high availability pool for embeddings
|
||||
embedding_strategy: str = "latency_aware" # round_robin, latency_aware, weighted_random
|
||||
embedding_cooldown: float = 60.0 # Default cooldown seconds for rate-limited endpoints
|
||||
|
||||
# Reranker multi-endpoint configuration
|
||||
reranker_pool_enabled: bool = False # Enable high availability pool for reranker
|
||||
reranker_strategy: str = "latency_aware" # round_robin, latency_aware, weighted_random
|
||||
reranker_cooldown: float = 60.0 # Default cooldown seconds for rate-limited endpoints
|
||||
|
||||
# API concurrency settings
|
||||
api_max_workers: int = 4 # Max concurrent API calls for embedding/reranking
|
||||
api_batch_size: int = 8 # Batch size for API requests
|
||||
api_batch_size_dynamic: bool = False # Enable dynamic batch size calculation
|
||||
api_batch_size_utilization_factor: float = 0.8 # Use 80% of model token capacity
|
||||
api_batch_size_max: int = 2048 # Absolute upper limit for batch size
|
||||
chars_per_token_estimate: int = 4 # Characters per token estimation ratio
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
try:
|
||||
self.data_dir = self.data_dir.expanduser().resolve()
|
||||
self.venv_path = self.venv_path.expanduser().resolve()
|
||||
self.data_dir.mkdir(parents=True, exist_ok=True)
|
||||
except PermissionError as exc:
|
||||
raise ConfigError(
|
||||
f"Permission denied initializing paths (data_dir={self.data_dir}, venv_path={self.venv_path}) "
|
||||
f"[{type(exc).__name__}]: {exc}"
|
||||
) from exc
|
||||
except OSError as exc:
|
||||
raise ConfigError(
|
||||
f"Filesystem error initializing paths (data_dir={self.data_dir}, venv_path={self.venv_path}) "
|
||||
f"[{type(exc).__name__}]: {exc}"
|
||||
) from exc
|
||||
except Exception as exc:
|
||||
raise ConfigError(
|
||||
f"Unexpected error initializing paths (data_dir={self.data_dir}, venv_path={self.venv_path}) "
|
||||
f"[{type(exc).__name__}]: {exc}"
|
||||
) from exc
|
||||
|
||||
@cached_property
|
||||
def cache_dir(self) -> Path:
|
||||
"""Directory for transient caches."""
|
||||
return self.data_dir / "cache"
|
||||
|
||||
@cached_property
|
||||
def index_dir(self) -> Path:
|
||||
"""Directory where index artifacts are stored."""
|
||||
return self.data_dir / "index"
|
||||
|
||||
@cached_property
|
||||
def db_path(self) -> Path:
|
||||
"""Default SQLite index path."""
|
||||
return self.index_dir / "codexlens.db"
|
||||
|
||||
def ensure_runtime_dirs(self) -> None:
|
||||
"""Create standard runtime directories if missing."""
|
||||
for directory in (self.cache_dir, self.index_dir):
|
||||
try:
|
||||
directory.mkdir(parents=True, exist_ok=True)
|
||||
except PermissionError as exc:
|
||||
raise ConfigError(
|
||||
f"Permission denied creating directory {directory} [{type(exc).__name__}]: {exc}"
|
||||
) from exc
|
||||
except OSError as exc:
|
||||
raise ConfigError(
|
||||
f"Filesystem error creating directory {directory} [{type(exc).__name__}]: {exc}"
|
||||
) from exc
|
||||
except Exception as exc:
|
||||
raise ConfigError(
|
||||
f"Unexpected error creating directory {directory} [{type(exc).__name__}]: {exc}"
|
||||
) from exc
|
||||
|
||||
def language_for_path(self, path: str | Path) -> str | None:
|
||||
"""Infer a supported language ID from a file path."""
|
||||
extension = Path(path).suffix.lower()
|
||||
for language_id, spec in self.supported_languages.items():
|
||||
extensions: List[str] = spec.get("extensions", [])
|
||||
if extension in extensions:
|
||||
return language_id
|
||||
return None
|
||||
|
||||
def category_for_path(self, path: str | Path) -> str | None:
|
||||
"""Get file category ('code' or 'doc') from a file path."""
|
||||
language = self.language_for_path(path)
|
||||
if language is None:
|
||||
return None
|
||||
spec = self.supported_languages.get(language, {})
|
||||
return spec.get("category")
|
||||
|
||||
def rules_for_language(self, language_id: str) -> Dict[str, Any]:
|
||||
"""Get parsing rules for a specific language, falling back to defaults."""
|
||||
return {**self.parsing_rules.get("default", {}), **self.parsing_rules.get(language_id, {})}
|
||||
|
||||
@cached_property
|
||||
def settings_path(self) -> Path:
|
||||
"""Path to the settings file."""
|
||||
return self.data_dir / SETTINGS_FILE_NAME
|
||||
|
||||
def save_settings(self) -> None:
|
||||
"""Save embedding and other settings to file."""
|
||||
embedding_config = {
|
||||
"backend": self.embedding_backend,
|
||||
"model": self.embedding_model,
|
||||
"use_gpu": self.embedding_use_gpu,
|
||||
"pool_enabled": self.embedding_pool_enabled,
|
||||
"strategy": self.embedding_strategy,
|
||||
"cooldown": self.embedding_cooldown,
|
||||
}
|
||||
# Include multi-endpoint config if present
|
||||
if self.embedding_endpoints:
|
||||
embedding_config["endpoints"] = self.embedding_endpoints
|
||||
|
||||
settings = {
|
||||
"embedding": embedding_config,
|
||||
"llm": {
|
||||
"enabled": self.llm_enabled,
|
||||
"tool": self.llm_tool,
|
||||
"timeout_ms": self.llm_timeout_ms,
|
||||
"batch_size": self.llm_batch_size,
|
||||
},
|
||||
"reranker": {
|
||||
"enabled": self.enable_cross_encoder_rerank,
|
||||
"backend": self.reranker_backend,
|
||||
"model": self.reranker_model,
|
||||
"top_k": self.reranker_top_k,
|
||||
"max_input_tokens": self.reranker_max_input_tokens,
|
||||
"pool_enabled": self.reranker_pool_enabled,
|
||||
"strategy": self.reranker_strategy,
|
||||
"cooldown": self.reranker_cooldown,
|
||||
},
|
||||
"cascade": {
|
||||
"strategy": self.cascade_strategy,
|
||||
"coarse_k": self.cascade_coarse_k,
|
||||
"fine_k": self.cascade_fine_k,
|
||||
},
|
||||
"api": {
|
||||
"max_workers": self.api_max_workers,
|
||||
"batch_size": self.api_batch_size,
|
||||
"batch_size_dynamic": self.api_batch_size_dynamic,
|
||||
"batch_size_utilization_factor": self.api_batch_size_utilization_factor,
|
||||
"batch_size_max": self.api_batch_size_max,
|
||||
"chars_per_token_estimate": self.chars_per_token_estimate,
|
||||
},
|
||||
}
|
||||
with open(self.settings_path, "w", encoding="utf-8") as f:
|
||||
json.dump(settings, f, indent=2)
|
||||
|
||||
def load_settings(self) -> None:
|
||||
"""Load settings from file if exists."""
|
||||
if not self.settings_path.exists():
|
||||
return
|
||||
|
||||
try:
|
||||
with open(self.settings_path, "r", encoding="utf-8") as f:
|
||||
settings = json.load(f)
|
||||
|
||||
# Load embedding settings
|
||||
embedding = settings.get("embedding", {})
|
||||
if "backend" in embedding:
|
||||
backend = embedding["backend"]
|
||||
# Support 'api' as alias for 'litellm'
|
||||
if backend == "api":
|
||||
backend = "litellm"
|
||||
if backend in {"fastembed", "litellm"}:
|
||||
self.embedding_backend = backend
|
||||
else:
|
||||
log.warning(
|
||||
"Invalid embedding backend in %s: %r (expected 'fastembed' or 'litellm')",
|
||||
self.settings_path,
|
||||
embedding["backend"],
|
||||
)
|
||||
if "model" in embedding:
|
||||
self.embedding_model = embedding["model"]
|
||||
if "use_gpu" in embedding:
|
||||
self.embedding_use_gpu = embedding["use_gpu"]
|
||||
|
||||
# Load multi-endpoint configuration
|
||||
if "endpoints" in embedding:
|
||||
self.embedding_endpoints = embedding["endpoints"]
|
||||
if "pool_enabled" in embedding:
|
||||
self.embedding_pool_enabled = embedding["pool_enabled"]
|
||||
if "strategy" in embedding:
|
||||
self.embedding_strategy = embedding["strategy"]
|
||||
if "cooldown" in embedding:
|
||||
self.embedding_cooldown = embedding["cooldown"]
|
||||
|
||||
# Load LLM settings
|
||||
llm = settings.get("llm", {})
|
||||
if "enabled" in llm:
|
||||
self.llm_enabled = llm["enabled"]
|
||||
if "tool" in llm:
|
||||
self.llm_tool = llm["tool"]
|
||||
if "timeout_ms" in llm:
|
||||
self.llm_timeout_ms = llm["timeout_ms"]
|
||||
if "batch_size" in llm:
|
||||
self.llm_batch_size = llm["batch_size"]
|
||||
|
||||
# Load reranker settings
|
||||
reranker = settings.get("reranker", {})
|
||||
if "enabled" in reranker:
|
||||
self.enable_cross_encoder_rerank = reranker["enabled"]
|
||||
if "backend" in reranker:
|
||||
backend = reranker["backend"]
|
||||
if backend in {"fastembed", "onnx", "api", "litellm", "legacy"}:
|
||||
self.reranker_backend = backend
|
||||
else:
|
||||
log.warning(
|
||||
"Invalid reranker backend in %s: %r (expected 'fastembed', 'onnx', 'api', 'litellm', or 'legacy')",
|
||||
self.settings_path,
|
||||
backend,
|
||||
)
|
||||
if "model" in reranker:
|
||||
self.reranker_model = reranker["model"]
|
||||
if "top_k" in reranker:
|
||||
self.reranker_top_k = reranker["top_k"]
|
||||
if "max_input_tokens" in reranker:
|
||||
self.reranker_max_input_tokens = reranker["max_input_tokens"]
|
||||
if "pool_enabled" in reranker:
|
||||
self.reranker_pool_enabled = reranker["pool_enabled"]
|
||||
if "strategy" in reranker:
|
||||
self.reranker_strategy = reranker["strategy"]
|
||||
if "cooldown" in reranker:
|
||||
self.reranker_cooldown = reranker["cooldown"]
|
||||
|
||||
# Load cascade settings
|
||||
cascade = settings.get("cascade", {})
|
||||
if "strategy" in cascade:
|
||||
strategy = cascade["strategy"]
|
||||
if strategy in {"binary", "hybrid", "binary_rerank", "dense_rerank"}:
|
||||
self.cascade_strategy = strategy
|
||||
else:
|
||||
log.warning(
|
||||
"Invalid cascade strategy in %s: %r (expected 'binary', 'hybrid', 'binary_rerank', or 'dense_rerank')",
|
||||
self.settings_path,
|
||||
strategy,
|
||||
)
|
||||
if "coarse_k" in cascade:
|
||||
self.cascade_coarse_k = cascade["coarse_k"]
|
||||
if "fine_k" in cascade:
|
||||
self.cascade_fine_k = cascade["fine_k"]
|
||||
|
||||
# Load API settings
|
||||
api = settings.get("api", {})
|
||||
if "max_workers" in api:
|
||||
self.api_max_workers = api["max_workers"]
|
||||
if "batch_size" in api:
|
||||
self.api_batch_size = api["batch_size"]
|
||||
if "batch_size_dynamic" in api:
|
||||
self.api_batch_size_dynamic = api["batch_size_dynamic"]
|
||||
if "batch_size_utilization_factor" in api:
|
||||
self.api_batch_size_utilization_factor = api["batch_size_utilization_factor"]
|
||||
if "batch_size_max" in api:
|
||||
self.api_batch_size_max = api["batch_size_max"]
|
||||
if "chars_per_token_estimate" in api:
|
||||
self.chars_per_token_estimate = api["chars_per_token_estimate"]
|
||||
except Exception as exc:
|
||||
log.warning(
|
||||
"Failed to load settings from %s (%s): %s",
|
||||
self.settings_path,
|
||||
type(exc).__name__,
|
||||
exc,
|
||||
)
|
||||
|
||||
# Apply .env overrides (highest priority)
|
||||
self._apply_env_overrides()
|
||||
|
||||
def _apply_env_overrides(self) -> None:
|
||||
"""Apply environment variable overrides from .env file.
|
||||
|
||||
Priority: default → settings.json → .env (highest)
|
||||
|
||||
Supported variables (with or without CODEXLENS_ prefix):
|
||||
EMBEDDING_MODEL: Override embedding model/profile
|
||||
EMBEDDING_BACKEND: Override embedding backend (fastembed/litellm)
|
||||
EMBEDDING_POOL_ENABLED: Enable embedding high availability pool
|
||||
EMBEDDING_STRATEGY: Load balance strategy for embedding
|
||||
EMBEDDING_COOLDOWN: Rate limit cooldown for embedding
|
||||
RERANKER_MODEL: Override reranker model
|
||||
RERANKER_BACKEND: Override reranker backend
|
||||
RERANKER_ENABLED: Override reranker enabled state (true/false)
|
||||
RERANKER_POOL_ENABLED: Enable reranker high availability pool
|
||||
RERANKER_STRATEGY: Load balance strategy for reranker
|
||||
RERANKER_COOLDOWN: Rate limit cooldown for reranker
|
||||
"""
|
||||
from .env_config import load_global_env
|
||||
|
||||
env_vars = load_global_env()
|
||||
if not env_vars:
|
||||
return
|
||||
|
||||
def get_env(key: str) -> str | None:
|
||||
"""Get env var with or without CODEXLENS_ prefix."""
|
||||
# Check prefixed version first (Dashboard format), then unprefixed
|
||||
return env_vars.get(f"CODEXLENS_{key}") or env_vars.get(key)
|
||||
|
||||
# Embedding overrides
|
||||
embedding_model = get_env("EMBEDDING_MODEL")
|
||||
if embedding_model:
|
||||
self.embedding_model = embedding_model
|
||||
log.debug("Overriding embedding_model from .env: %s", self.embedding_model)
|
||||
|
||||
embedding_backend = get_env("EMBEDDING_BACKEND")
|
||||
if embedding_backend:
|
||||
backend = embedding_backend.lower()
|
||||
# Support 'api' as alias for 'litellm'
|
||||
if backend == "api":
|
||||
backend = "litellm"
|
||||
if backend in {"fastembed", "litellm"}:
|
||||
self.embedding_backend = backend
|
||||
log.debug("Overriding embedding_backend from .env: %s", backend)
|
||||
else:
|
||||
log.warning("Invalid EMBEDDING_BACKEND in .env: %r", embedding_backend)
|
||||
|
||||
embedding_pool = get_env("EMBEDDING_POOL_ENABLED")
|
||||
if embedding_pool:
|
||||
value = embedding_pool.lower()
|
||||
self.embedding_pool_enabled = value in {"true", "1", "yes", "on"}
|
||||
log.debug("Overriding embedding_pool_enabled from .env: %s", self.embedding_pool_enabled)
|
||||
|
||||
embedding_strategy = get_env("EMBEDDING_STRATEGY")
|
||||
if embedding_strategy:
|
||||
strategy = embedding_strategy.lower()
|
||||
if strategy in {"round_robin", "latency_aware", "weighted_random"}:
|
||||
self.embedding_strategy = strategy
|
||||
log.debug("Overriding embedding_strategy from .env: %s", strategy)
|
||||
else:
|
||||
log.warning("Invalid EMBEDDING_STRATEGY in .env: %r", embedding_strategy)
|
||||
|
||||
embedding_cooldown = get_env("EMBEDDING_COOLDOWN")
|
||||
if embedding_cooldown:
|
||||
try:
|
||||
self.embedding_cooldown = float(embedding_cooldown)
|
||||
log.debug("Overriding embedding_cooldown from .env: %s", self.embedding_cooldown)
|
||||
except ValueError:
|
||||
log.warning("Invalid EMBEDDING_COOLDOWN in .env: %r", embedding_cooldown)
|
||||
|
||||
# Reranker overrides
|
||||
reranker_model = get_env("RERANKER_MODEL")
|
||||
if reranker_model:
|
||||
self.reranker_model = reranker_model
|
||||
log.debug("Overriding reranker_model from .env: %s", self.reranker_model)
|
||||
|
||||
reranker_backend = get_env("RERANKER_BACKEND")
|
||||
if reranker_backend:
|
||||
backend = reranker_backend.lower()
|
||||
if backend in {"fastembed", "onnx", "api", "litellm", "legacy"}:
|
||||
self.reranker_backend = backend
|
||||
log.debug("Overriding reranker_backend from .env: %s", backend)
|
||||
else:
|
||||
log.warning("Invalid RERANKER_BACKEND in .env: %r", reranker_backend)
|
||||
|
||||
reranker_enabled = get_env("RERANKER_ENABLED")
|
||||
if reranker_enabled:
|
||||
value = reranker_enabled.lower()
|
||||
self.enable_cross_encoder_rerank = value in {"true", "1", "yes", "on"}
|
||||
log.debug("Overriding reranker_enabled from .env: %s", self.enable_cross_encoder_rerank)
|
||||
|
||||
reranker_pool = get_env("RERANKER_POOL_ENABLED")
|
||||
if reranker_pool:
|
||||
value = reranker_pool.lower()
|
||||
self.reranker_pool_enabled = value in {"true", "1", "yes", "on"}
|
||||
log.debug("Overriding reranker_pool_enabled from .env: %s", self.reranker_pool_enabled)
|
||||
|
||||
reranker_strategy = get_env("RERANKER_STRATEGY")
|
||||
if reranker_strategy:
|
||||
strategy = reranker_strategy.lower()
|
||||
if strategy in {"round_robin", "latency_aware", "weighted_random"}:
|
||||
self.reranker_strategy = strategy
|
||||
log.debug("Overriding reranker_strategy from .env: %s", strategy)
|
||||
else:
|
||||
log.warning("Invalid RERANKER_STRATEGY in .env: %r", reranker_strategy)
|
||||
|
||||
reranker_cooldown = get_env("RERANKER_COOLDOWN")
|
||||
if reranker_cooldown:
|
||||
try:
|
||||
self.reranker_cooldown = float(reranker_cooldown)
|
||||
log.debug("Overriding reranker_cooldown from .env: %s", self.reranker_cooldown)
|
||||
except ValueError:
|
||||
log.warning("Invalid RERANKER_COOLDOWN in .env: %r", reranker_cooldown)
|
||||
|
||||
reranker_max_tokens = get_env("RERANKER_MAX_INPUT_TOKENS")
|
||||
if reranker_max_tokens:
|
||||
try:
|
||||
self.reranker_max_input_tokens = int(reranker_max_tokens)
|
||||
log.debug("Overriding reranker_max_input_tokens from .env: %s", self.reranker_max_input_tokens)
|
||||
except ValueError:
|
||||
log.warning("Invalid RERANKER_MAX_INPUT_TOKENS in .env: %r", reranker_max_tokens)
|
||||
|
||||
# Reranker tuning from environment
|
||||
test_penalty = get_env("RERANKER_TEST_FILE_PENALTY")
|
||||
if test_penalty:
|
||||
try:
|
||||
self.reranker_test_file_penalty = float(test_penalty)
|
||||
log.debug("Overriding reranker_test_file_penalty from .env: %s", self.reranker_test_file_penalty)
|
||||
except ValueError:
|
||||
log.warning("Invalid RERANKER_TEST_FILE_PENALTY in .env: %r", test_penalty)
|
||||
|
||||
docstring_weight = get_env("RERANKER_DOCSTRING_WEIGHT")
|
||||
if docstring_weight:
|
||||
try:
|
||||
weight = float(docstring_weight)
|
||||
self.reranker_chunk_type_weights = {"code": 1.0, "docstring": weight}
|
||||
log.debug("Overriding reranker docstring weight from .env: %s", weight)
|
||||
except ValueError:
|
||||
log.warning("Invalid RERANKER_DOCSTRING_WEIGHT in .env: %r", docstring_weight)
|
||||
|
||||
# Chunk stripping from environment
|
||||
strip_comments = get_env("CHUNK_STRIP_COMMENTS")
|
||||
if strip_comments:
|
||||
self.chunk_strip_comments = strip_comments.lower() in ("true", "1", "yes")
|
||||
log.debug("Overriding chunk_strip_comments from .env: %s", self.chunk_strip_comments)
|
||||
|
||||
strip_docstrings = get_env("CHUNK_STRIP_DOCSTRINGS")
|
||||
if strip_docstrings:
|
||||
self.chunk_strip_docstrings = strip_docstrings.lower() in ("true", "1", "yes")
|
||||
log.debug("Overriding chunk_strip_docstrings from .env: %s", self.chunk_strip_docstrings)
|
||||
|
||||
@classmethod
|
||||
def load(cls) -> "Config":
|
||||
"""Load config with settings from file."""
|
||||
config = cls()
|
||||
config.load_settings()
|
||||
return config
|
||||
|
||||
|
||||
@dataclass
|
||||
class WorkspaceConfig:
|
||||
"""Workspace-local configuration for CodexLens.
|
||||
|
||||
Stores index data in project/.codexlens/ directory.
|
||||
"""
|
||||
|
||||
workspace_root: Path
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
self.workspace_root = Path(self.workspace_root).resolve()
|
||||
|
||||
@property
|
||||
def codexlens_dir(self) -> Path:
|
||||
"""The .codexlens directory in workspace root."""
|
||||
return self.workspace_root / WORKSPACE_DIR_NAME
|
||||
|
||||
@property
|
||||
def db_path(self) -> Path:
|
||||
"""SQLite index path for this workspace."""
|
||||
return self.codexlens_dir / "index.db"
|
||||
|
||||
@property
|
||||
def cache_dir(self) -> Path:
|
||||
"""Cache directory for this workspace."""
|
||||
return self.codexlens_dir / "cache"
|
||||
|
||||
@property
|
||||
def env_path(self) -> Path:
|
||||
"""Path to workspace .env file."""
|
||||
return self.codexlens_dir / ".env"
|
||||
|
||||
def load_env(self, *, override: bool = False) -> int:
|
||||
"""Load .env file and apply to os.environ.
|
||||
|
||||
Args:
|
||||
override: If True, override existing environment variables
|
||||
|
||||
Returns:
|
||||
Number of variables applied
|
||||
"""
|
||||
from .env_config import apply_workspace_env
|
||||
return apply_workspace_env(self.workspace_root, override=override)
|
||||
|
||||
def get_api_config(self, prefix: str) -> dict:
|
||||
"""Get API configuration from environment.
|
||||
|
||||
Args:
|
||||
prefix: Environment variable prefix (e.g., "RERANKER", "EMBEDDING")
|
||||
|
||||
Returns:
|
||||
Dictionary with api_key, api_base, model, etc.
|
||||
"""
|
||||
from .env_config import get_api_config
|
||||
return get_api_config(prefix, workspace_root=self.workspace_root)
|
||||
|
||||
def initialize(self) -> None:
|
||||
"""Create the .codexlens directory structure."""
|
||||
try:
|
||||
self.codexlens_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Create .gitignore to exclude cache but keep index
|
||||
gitignore_path = self.codexlens_dir / ".gitignore"
|
||||
if not gitignore_path.exists():
|
||||
gitignore_path.write_text(
|
||||
"# CodexLens workspace data\n"
|
||||
"cache/\n"
|
||||
"*.log\n"
|
||||
".env\n" # Exclude .env from git
|
||||
)
|
||||
except Exception as exc:
|
||||
raise ConfigError(f"Failed to initialize workspace at {self.codexlens_dir}: {exc}") from exc
|
||||
|
||||
def exists(self) -> bool:
|
||||
"""Check if workspace is already initialized."""
|
||||
return self.codexlens_dir.is_dir() and self.db_path.exists()
|
||||
|
||||
@classmethod
|
||||
def from_path(cls, path: Path) -> Optional["WorkspaceConfig"]:
|
||||
"""Create WorkspaceConfig from a path by finding workspace root.
|
||||
|
||||
Returns None if no workspace found.
|
||||
"""
|
||||
root = find_workspace_root(path)
|
||||
if root is None:
|
||||
return None
|
||||
return cls(workspace_root=root)
|
||||
|
||||
@classmethod
|
||||
def create_at(cls, path: Path) -> "WorkspaceConfig":
|
||||
"""Create a new workspace at the given path."""
|
||||
config = cls(workspace_root=path)
|
||||
config.initialize()
|
||||
return config
|
||||
128
codex-lens/build/lib/codexlens/entities.py
Normal file
128
codex-lens/build/lib/codexlens/entities.py
Normal file
@@ -0,0 +1,128 @@
|
||||
"""Pydantic entity models for CodexLens."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
|
||||
class Symbol(BaseModel):
|
||||
"""A code symbol discovered in a file."""
|
||||
|
||||
name: str = Field(..., min_length=1)
|
||||
kind: str = Field(..., min_length=1)
|
||||
range: Tuple[int, int] = Field(..., description="(start_line, end_line), 1-based inclusive")
|
||||
file: Optional[str] = Field(default=None, description="Full path to the file containing this symbol")
|
||||
|
||||
@field_validator("range")
|
||||
@classmethod
|
||||
def validate_range(cls, value: Tuple[int, int]) -> Tuple[int, int]:
|
||||
if len(value) != 2:
|
||||
raise ValueError("range must be a (start_line, end_line) tuple")
|
||||
start_line, end_line = value
|
||||
if start_line < 1 or end_line < 1:
|
||||
raise ValueError("range lines must be >= 1")
|
||||
if end_line < start_line:
|
||||
raise ValueError("end_line must be >= start_line")
|
||||
return value
|
||||
|
||||
|
||||
class SemanticChunk(BaseModel):
|
||||
"""A semantically meaningful chunk of content, optionally embedded."""
|
||||
|
||||
content: str = Field(..., min_length=1)
|
||||
embedding: Optional[List[float]] = Field(default=None, description="Vector embedding for semantic search")
|
||||
metadata: Dict[str, Any] = Field(default_factory=dict)
|
||||
id: Optional[int] = Field(default=None, description="Database row ID")
|
||||
file_path: Optional[str] = Field(default=None, description="Source file path")
|
||||
|
||||
@field_validator("embedding")
|
||||
@classmethod
|
||||
def validate_embedding(cls, value: Optional[List[float]]) -> Optional[List[float]]:
|
||||
if value is None:
|
||||
return value
|
||||
if not value:
|
||||
raise ValueError("embedding cannot be empty when provided")
|
||||
norm = math.sqrt(sum(x * x for x in value))
|
||||
epsilon = 1e-10
|
||||
if norm < epsilon:
|
||||
raise ValueError("embedding cannot be a zero vector")
|
||||
return value
|
||||
|
||||
|
||||
class IndexedFile(BaseModel):
|
||||
"""An indexed source file with symbols and optional semantic chunks."""
|
||||
|
||||
path: str = Field(..., min_length=1)
|
||||
language: str = Field(..., min_length=1)
|
||||
symbols: List[Symbol] = Field(default_factory=list)
|
||||
chunks: List[SemanticChunk] = Field(default_factory=list)
|
||||
relationships: List["CodeRelationship"] = Field(default_factory=list)
|
||||
|
||||
@field_validator("path", "language")
|
||||
@classmethod
|
||||
def strip_and_validate_nonempty(cls, value: str) -> str:
|
||||
cleaned = value.strip()
|
||||
if not cleaned:
|
||||
raise ValueError("value cannot be blank")
|
||||
return cleaned
|
||||
|
||||
|
||||
class RelationshipType(str, Enum):
|
||||
"""Types of code relationships."""
|
||||
CALL = "calls"
|
||||
INHERITS = "inherits"
|
||||
IMPORTS = "imports"
|
||||
|
||||
|
||||
class CodeRelationship(BaseModel):
|
||||
"""A relationship between code symbols (e.g., function calls, inheritance)."""
|
||||
|
||||
source_symbol: str = Field(..., min_length=1, description="Name of source symbol")
|
||||
target_symbol: str = Field(..., min_length=1, description="Name of target symbol")
|
||||
relationship_type: RelationshipType = Field(..., description="Type of relationship (call, inherits, etc.)")
|
||||
source_file: str = Field(..., min_length=1, description="File path containing source symbol")
|
||||
target_file: Optional[str] = Field(default=None, description="File path containing target (None if same file)")
|
||||
source_line: int = Field(..., ge=1, description="Line number where relationship occurs (1-based)")
|
||||
|
||||
|
||||
class AdditionalLocation(BaseModel):
|
||||
"""A pointer to another location where a similar result was found.
|
||||
|
||||
Used for grouping search results with similar scores and content,
|
||||
where the primary result is stored in SearchResult and secondary
|
||||
locations are stored in this model.
|
||||
"""
|
||||
|
||||
path: str = Field(..., min_length=1)
|
||||
score: float = Field(..., ge=0.0)
|
||||
start_line: Optional[int] = Field(default=None, description="Start line of the result (1-based)")
|
||||
end_line: Optional[int] = Field(default=None, description="End line of the result (1-based)")
|
||||
symbol_name: Optional[str] = Field(default=None, description="Name of matched symbol")
|
||||
|
||||
|
||||
class SearchResult(BaseModel):
|
||||
"""A unified search result for lexical or semantic search."""
|
||||
|
||||
path: str = Field(..., min_length=1)
|
||||
score: float = Field(..., ge=0.0)
|
||||
excerpt: Optional[str] = None
|
||||
content: Optional[str] = Field(default=None, description="Full content of matched code block")
|
||||
symbol: Optional[Symbol] = None
|
||||
chunk: Optional[SemanticChunk] = None
|
||||
metadata: Dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
# Additional context for complete code blocks
|
||||
start_line: Optional[int] = Field(default=None, description="Start line of code block (1-based)")
|
||||
end_line: Optional[int] = Field(default=None, description="End line of code block (1-based)")
|
||||
symbol_name: Optional[str] = Field(default=None, description="Name of matched symbol/function/class")
|
||||
symbol_kind: Optional[str] = Field(default=None, description="Kind of symbol (function/class/method)")
|
||||
|
||||
# Field for grouping similar results
|
||||
additional_locations: List["AdditionalLocation"] = Field(
|
||||
default_factory=list,
|
||||
description="Other locations for grouped results with similar scores and content."
|
||||
)
|
||||
304
codex-lens/build/lib/codexlens/env_config.py
Normal file
304
codex-lens/build/lib/codexlens/env_config.py
Normal file
@@ -0,0 +1,304 @@
|
||||
"""Environment configuration loader for CodexLens.
|
||||
|
||||
Loads .env files from workspace .codexlens directory with fallback to project root.
|
||||
Provides unified access to API configurations.
|
||||
|
||||
Priority order:
|
||||
1. Environment variables (already set)
|
||||
2. .codexlens/.env (workspace-local)
|
||||
3. .env (project root)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Supported environment variables with descriptions
|
||||
ENV_VARS = {
|
||||
# Reranker configuration (overrides settings.json)
|
||||
"RERANKER_MODEL": "Reranker model name (overrides settings.json)",
|
||||
"RERANKER_BACKEND": "Reranker backend: fastembed, onnx, api, litellm, legacy",
|
||||
"RERANKER_ENABLED": "Enable reranker: true/false",
|
||||
"RERANKER_API_KEY": "API key for reranker service (SiliconFlow/Cohere/Jina)",
|
||||
"RERANKER_API_BASE": "Base URL for reranker API (overrides provider default)",
|
||||
"RERANKER_PROVIDER": "Reranker provider: siliconflow, cohere, jina",
|
||||
"RERANKER_POOL_ENABLED": "Enable reranker high availability pool: true/false",
|
||||
"RERANKER_STRATEGY": "Reranker load balance strategy: round_robin, latency_aware, weighted_random",
|
||||
"RERANKER_COOLDOWN": "Reranker rate limit cooldown in seconds",
|
||||
# Embedding configuration (overrides settings.json)
|
||||
"EMBEDDING_MODEL": "Embedding model/profile name (overrides settings.json)",
|
||||
"EMBEDDING_BACKEND": "Embedding backend: fastembed, litellm",
|
||||
"EMBEDDING_API_KEY": "API key for embedding service",
|
||||
"EMBEDDING_API_BASE": "Base URL for embedding API",
|
||||
"EMBEDDING_POOL_ENABLED": "Enable embedding high availability pool: true/false",
|
||||
"EMBEDDING_STRATEGY": "Embedding load balance strategy: round_robin, latency_aware, weighted_random",
|
||||
"EMBEDDING_COOLDOWN": "Embedding rate limit cooldown in seconds",
|
||||
# LiteLLM configuration
|
||||
"LITELLM_API_KEY": "API key for LiteLLM",
|
||||
"LITELLM_API_BASE": "Base URL for LiteLLM",
|
||||
"LITELLM_MODEL": "LiteLLM model name",
|
||||
# General configuration
|
||||
"CODEXLENS_DATA_DIR": "Custom data directory path",
|
||||
"CODEXLENS_DEBUG": "Enable debug mode (true/false)",
|
||||
# Chunking configuration
|
||||
"CHUNK_STRIP_COMMENTS": "Strip comments from code chunks for embedding: true/false (default: true)",
|
||||
"CHUNK_STRIP_DOCSTRINGS": "Strip docstrings from code chunks for embedding: true/false (default: true)",
|
||||
# Reranker tuning
|
||||
"RERANKER_TEST_FILE_PENALTY": "Penalty for test files in reranking: 0.0-1.0 (default: 0.0)",
|
||||
"RERANKER_DOCSTRING_WEIGHT": "Weight for docstring chunks in reranking: 0.0-1.0 (default: 1.0)",
|
||||
}
|
||||
|
||||
|
||||
def _parse_env_line(line: str) -> tuple[str, str] | None:
|
||||
"""Parse a single .env line, returning (key, value) or None."""
|
||||
line = line.strip()
|
||||
|
||||
# Skip empty lines and comments
|
||||
if not line or line.startswith("#"):
|
||||
return None
|
||||
|
||||
# Handle export prefix
|
||||
if line.startswith("export "):
|
||||
line = line[7:].strip()
|
||||
|
||||
# Split on first =
|
||||
if "=" not in line:
|
||||
return None
|
||||
|
||||
key, _, value = line.partition("=")
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
|
||||
# Remove surrounding quotes
|
||||
if len(value) >= 2:
|
||||
if (value.startswith('"') and value.endswith('"')) or \
|
||||
(value.startswith("'") and value.endswith("'")):
|
||||
value = value[1:-1]
|
||||
|
||||
return key, value
|
||||
|
||||
|
||||
def load_env_file(env_path: Path) -> Dict[str, str]:
|
||||
"""Load environment variables from a .env file.
|
||||
|
||||
Args:
|
||||
env_path: Path to .env file
|
||||
|
||||
Returns:
|
||||
Dictionary of environment variables
|
||||
"""
|
||||
if not env_path.is_file():
|
||||
return {}
|
||||
|
||||
env_vars: Dict[str, str] = {}
|
||||
|
||||
try:
|
||||
content = env_path.read_text(encoding="utf-8")
|
||||
for line in content.splitlines():
|
||||
result = _parse_env_line(line)
|
||||
if result:
|
||||
key, value = result
|
||||
env_vars[key] = value
|
||||
except Exception as exc:
|
||||
log.warning("Failed to load .env file %s: %s", env_path, exc)
|
||||
|
||||
return env_vars
|
||||
|
||||
|
||||
def _get_global_data_dir() -> Path:
|
||||
"""Get global CodexLens data directory."""
|
||||
env_override = os.environ.get("CODEXLENS_DATA_DIR")
|
||||
if env_override:
|
||||
return Path(env_override).expanduser().resolve()
|
||||
return (Path.home() / ".codexlens").resolve()
|
||||
|
||||
|
||||
def load_global_env() -> Dict[str, str]:
|
||||
"""Load environment variables from global ~/.codexlens/.env file.
|
||||
|
||||
Returns:
|
||||
Dictionary of environment variables from global config
|
||||
"""
|
||||
global_env_path = _get_global_data_dir() / ".env"
|
||||
if global_env_path.is_file():
|
||||
env_vars = load_env_file(global_env_path)
|
||||
log.debug("Loaded %d vars from global %s", len(env_vars), global_env_path)
|
||||
return env_vars
|
||||
return {}
|
||||
|
||||
|
||||
def load_workspace_env(workspace_root: Path | None = None) -> Dict[str, str]:
|
||||
"""Load environment variables from workspace .env files.
|
||||
|
||||
Priority (later overrides earlier):
|
||||
1. Global ~/.codexlens/.env (lowest priority)
|
||||
2. Project root .env
|
||||
3. .codexlens/.env (highest priority)
|
||||
|
||||
Args:
|
||||
workspace_root: Workspace root directory. If None, uses current directory.
|
||||
|
||||
Returns:
|
||||
Merged dictionary of environment variables
|
||||
"""
|
||||
if workspace_root is None:
|
||||
workspace_root = Path.cwd()
|
||||
|
||||
workspace_root = Path(workspace_root).resolve()
|
||||
|
||||
env_vars: Dict[str, str] = {}
|
||||
|
||||
# Load from global ~/.codexlens/.env (lowest priority)
|
||||
global_vars = load_global_env()
|
||||
if global_vars:
|
||||
env_vars.update(global_vars)
|
||||
|
||||
# Load from project root .env (medium priority)
|
||||
root_env = workspace_root / ".env"
|
||||
if root_env.is_file():
|
||||
loaded = load_env_file(root_env)
|
||||
env_vars.update(loaded)
|
||||
log.debug("Loaded %d vars from %s", len(loaded), root_env)
|
||||
|
||||
# Load from .codexlens/.env (highest priority)
|
||||
codexlens_env = workspace_root / ".codexlens" / ".env"
|
||||
if codexlens_env.is_file():
|
||||
loaded = load_env_file(codexlens_env)
|
||||
env_vars.update(loaded)
|
||||
log.debug("Loaded %d vars from %s", len(loaded), codexlens_env)
|
||||
|
||||
return env_vars
|
||||
|
||||
|
||||
def apply_workspace_env(workspace_root: Path | None = None, *, override: bool = False) -> int:
|
||||
"""Load .env files and apply to os.environ.
|
||||
|
||||
Args:
|
||||
workspace_root: Workspace root directory
|
||||
override: If True, override existing environment variables
|
||||
|
||||
Returns:
|
||||
Number of variables applied
|
||||
"""
|
||||
env_vars = load_workspace_env(workspace_root)
|
||||
applied = 0
|
||||
|
||||
for key, value in env_vars.items():
|
||||
if override or key not in os.environ:
|
||||
os.environ[key] = value
|
||||
applied += 1
|
||||
log.debug("Applied env var: %s", key)
|
||||
|
||||
return applied
|
||||
|
||||
|
||||
def get_env(key: str, default: str | None = None, *, workspace_root: Path | None = None) -> str | None:
|
||||
"""Get environment variable with .env file fallback.
|
||||
|
||||
Priority:
|
||||
1. os.environ (already set)
|
||||
2. .codexlens/.env
|
||||
3. .env
|
||||
4. default value
|
||||
|
||||
Args:
|
||||
key: Environment variable name
|
||||
default: Default value if not found
|
||||
workspace_root: Workspace root for .env file lookup
|
||||
|
||||
Returns:
|
||||
Value or default
|
||||
"""
|
||||
# Check os.environ first
|
||||
if key in os.environ:
|
||||
return os.environ[key]
|
||||
|
||||
# Load from .env files
|
||||
env_vars = load_workspace_env(workspace_root)
|
||||
if key in env_vars:
|
||||
return env_vars[key]
|
||||
|
||||
return default
|
||||
|
||||
|
||||
def get_api_config(
|
||||
prefix: str,
|
||||
*,
|
||||
workspace_root: Path | None = None,
|
||||
defaults: Dict[str, Any] | None = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Get API configuration from environment.
|
||||
|
||||
Loads {PREFIX}_API_KEY, {PREFIX}_API_BASE, {PREFIX}_MODEL, etc.
|
||||
|
||||
Args:
|
||||
prefix: Environment variable prefix (e.g., "RERANKER", "EMBEDDING")
|
||||
workspace_root: Workspace root for .env file lookup
|
||||
defaults: Default values
|
||||
|
||||
Returns:
|
||||
Dictionary with api_key, api_base, model, etc.
|
||||
"""
|
||||
defaults = defaults or {}
|
||||
|
||||
config: Dict[str, Any] = {}
|
||||
|
||||
# Standard API config fields
|
||||
field_mapping = {
|
||||
"api_key": f"{prefix}_API_KEY",
|
||||
"api_base": f"{prefix}_API_BASE",
|
||||
"model": f"{prefix}_MODEL",
|
||||
"provider": f"{prefix}_PROVIDER",
|
||||
"timeout": f"{prefix}_TIMEOUT",
|
||||
}
|
||||
|
||||
for field, env_key in field_mapping.items():
|
||||
value = get_env(env_key, workspace_root=workspace_root)
|
||||
if value is not None:
|
||||
# Type conversion for specific fields
|
||||
if field == "timeout":
|
||||
try:
|
||||
config[field] = float(value)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
config[field] = value
|
||||
elif field in defaults:
|
||||
config[field] = defaults[field]
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def generate_env_example() -> str:
|
||||
"""Generate .env.example content with all supported variables.
|
||||
|
||||
Returns:
|
||||
String content for .env.example file
|
||||
"""
|
||||
lines = [
|
||||
"# CodexLens Environment Configuration",
|
||||
"# Copy this file to .codexlens/.env and fill in your values",
|
||||
"",
|
||||
]
|
||||
|
||||
# Group by prefix
|
||||
groups: Dict[str, list] = {}
|
||||
for key, desc in ENV_VARS.items():
|
||||
prefix = key.split("_")[0]
|
||||
if prefix not in groups:
|
||||
groups[prefix] = []
|
||||
groups[prefix].append((key, desc))
|
||||
|
||||
for prefix, items in groups.items():
|
||||
lines.append(f"# {prefix} Configuration")
|
||||
for key, desc in items:
|
||||
lines.append(f"# {desc}")
|
||||
lines.append(f"# {key}=")
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user