feat(skills): implement enable/disable functionality for skills

- Added new API endpoints to enable and disable skills.
- Introduced logic to manage disabled skills, including loading and saving configurations.
- Enhanced skills routes to return lists of disabled skills.
- Updated frontend to display disabled skills and allow toggling their status.
- Added internationalization support for new skill status messages.
- Created JSON schemas for plan verification agent and findings.
- Defined new types for skill management in TypeScript.
This commit is contained in:
catlog22
2026-01-28 00:49:39 +08:00
parent 8d178feaac
commit 7a40f16235
35 changed files with 1123 additions and 2016 deletions

View File

@@ -50,11 +50,26 @@ Interactive orchestration tool: analyze task → discover commands → recommend
| **Code Review (Session)** | review-session-cycle → review-fix | Complete review cycle and apply fixes | Fixed code |
| **Code Review (Module)** | review-module-cycle → review-fix | Module review cycle and apply fixes | Fixed code |
**Issue Units** (Issue单元):
| Unit Name | Commands | Purpose | Output |
|-----------|----------|---------|--------|
| **Issue Workflow** | discover → plan → queue → execute | Complete issue lifecycle | Completed issues |
| **Rapid-to-Issue** | lite-plan → convert-to-plan → queue → execute | Bridge lite workflow to issue workflow | Completed issues |
**With-File Units** (文档化单元):
| Unit Name | Commands | Purpose | Output |
|-----------|----------|---------|--------|
| **Brainstorm With File** | brainstorm-with-file | Multi-perspective ideation with documentation | brainstorm.md |
| **Debug With File** | debug-with-file | Hypothesis-driven debugging with documentation | understanding.md |
| **Analyze With File** | analyze-with-file | Collaborative analysis with documentation | discussion.md |
### Command-to-Unit Mapping (命令与最小单元的映射)
| Command | Can Precede | Atomic Units |
|---------|-----------|--------------|
| lite-plan | lite-execute | Quick Implementation |
| lite-plan | lite-execute, convert-to-plan | Quick Implementation, Rapid-to-Issue |
| multi-cli-plan | lite-execute | Multi-CLI Planning |
| lite-fix | lite-execute | Bug Fix |
| plan | plan-verify, execute | Full Planning + Execution, Verified Planning + Execution |
@@ -65,6 +80,13 @@ Interactive orchestration tool: analyze task → discover commands → recommend
| review-session-cycle | review-fix | Code Review (Session) |
| review-module-cycle | review-fix | Code Review (Module) |
| test-fix-gen | test-cycle-execute | Test Validation |
| issue:discover | issue:plan | Issue Workflow |
| issue:plan | issue:queue | Issue Workflow |
| convert-to-plan | issue:queue | Rapid-to-Issue |
| issue:queue | issue:execute | Issue Workflow, Rapid-to-Issue |
| brainstorm-with-file | (standalone) | Brainstorm With File |
| debug-with-file | (standalone) | Debug With File |
| analyze-with-file | (standalone) | Analyze With File |
### Atomic Group Rules
@@ -105,6 +127,13 @@ function detectTaskType(text) {
if (/测试失败|test fail|fix test|failing test/.test(text)) return 'test-fix';
if (/generate test|写测试|add test|补充测试/.test(text)) return 'test-gen';
if (/review|审查|code review/.test(text)) return 'review';
// Issue workflow patterns
if (/issues?.*batch|batch.*issues?|批量.*issue|issue.*批量/.test(text)) return 'issue-batch';
if (/issue workflow|structured workflow|queue|multi-stage|转.*issue|issue.*流程/.test(text)) return 'issue-transition';
// With-File workflow patterns
if (/brainstorm|ideation|头脑风暴|创意|发散思维|creative thinking/.test(text)) return 'brainstorm-file';
if (/debug.*document|hypothesis.*debug|深度调试|假设.*验证|systematic debug/.test(text)) return 'debug-file';
if (/analyze.*document|collaborative analysis|协作分析|深度.*理解/.test(text)) return 'analyze-file';
if (/不确定|explore|研究|what if|brainstorm|权衡/.test(text)) return 'brainstorm';
if (/多视角|比较方案|cross-verify|multi-cli/.test(text)) return 'multi-cli';
return 'feature'; // Default
@@ -285,6 +314,66 @@ const commandPorts = {
output: ['review-verified'], // 输出端口:审查通过
tags: ['review'],
atomic_group: 'code-review' // 最小单元:与 review-fix 绑定
},
// Issue workflow commands
'issue:discover': {
name: 'issue:discover',
input: ['codebase'], // 输入端口:代码库
output: ['pending-issues'], // 输出端口:待处理 issues
tags: ['issue'],
atomic_group: 'issue-workflow' // 最小单元discover → plan → queue → execute
},
'issue:plan': {
name: 'issue:plan',
input: ['pending-issues'], // 输入端口:待处理 issues
output: ['issue-plans'], // 输出端口issue 计划
tags: ['issue'],
atomic_group: 'issue-workflow'
},
'issue:queue': {
name: 'issue:queue',
input: ['issue-plans', 'converted-plan'], // 可接受 issue:plan 或 convert-to-plan 输出
output: ['execution-queue'], // 输出端口:执行队列
tags: ['issue'],
atomic_groups: ['issue-workflow', 'rapid-to-issue']
},
'issue:execute': {
name: 'issue:execute',
input: ['execution-queue'], // 输入端口:执行队列
output: ['completed-issues'], // 输出端口:已完成 issues
tags: ['issue'],
atomic_groups: ['issue-workflow', 'rapid-to-issue']
},
'issue:convert-to-plan': {
name: 'issue:convert-to-plan',
input: ['plan'], // 输入端口lite-plan 输出
output: ['converted-plan'], // 输出端口:转换后的 issue 计划
tags: ['issue', 'planning'],
atomic_group: 'rapid-to-issue' // 最小单元lite-plan → convert-to-plan → queue → execute
},
// With-File workflows (documented exploration with multi-CLI collaboration)
'brainstorm-with-file': {
name: 'brainstorm-with-file',
input: ['exploration-topic'], // 输入端口:探索主题
output: ['brainstorm-document'], // 输出端口brainstorm.md + 综合结论
tags: ['brainstorm', 'with-file'],
note: 'Self-contained workflow with multi-round diverge-converge cycles'
},
'debug-with-file': {
name: 'debug-with-file',
input: ['bug-report'], // 输入端口bug 报告
output: ['understanding-document'], // 输出端口understanding.md + 修复
tags: ['bugfix', 'with-file'],
note: 'Self-contained workflow with hypothesis-driven iteration'
},
'analyze-with-file': {
name: 'analyze-with-file',
input: ['analysis-topic'], // 输入端口:分析主题
output: ['discussion-document'], // 输出端口discussion.md + 结论
tags: ['analysis', 'with-file'],
note: 'Self-contained workflow with multi-round discussion'
}
};
```
@@ -306,14 +395,21 @@ async function recommendCommandChain(analysis) {
// 任务类型对应的端口流
function determinePortFlow(taskType, constraints) {
const flows = {
'bugfix': { inputPort: 'bug-report', outputPort: constraints?.includes('skip-tests') ? 'fixed-code' : 'test-passed' },
'tdd': { inputPort: 'requirement', outputPort: 'tdd-verified' },
'test-fix': { inputPort: 'failing-tests', outputPort: 'test-passed' },
'test-gen': { inputPort: 'code', outputPort: 'test-passed' },
'review': { inputPort: 'code', outputPort: 'review-verified' },
'brainstorm': { inputPort: 'exploration-topic', outputPort: 'test-passed' },
'multi-cli': { inputPort: 'requirement', outputPort: 'test-passed' },
'feature': { inputPort: 'requirement', outputPort: constraints?.includes('skip-tests') ? 'code' : 'test-passed' }
'bugfix': { inputPort: 'bug-report', outputPort: constraints?.includes('skip-tests') ? 'fixed-code' : 'test-passed' },
'tdd': { inputPort: 'requirement', outputPort: 'tdd-verified' },
'test-fix': { inputPort: 'failing-tests', outputPort: 'test-passed' },
'test-gen': { inputPort: 'code', outputPort: 'test-passed' },
'review': { inputPort: 'code', outputPort: 'review-verified' },
'brainstorm': { inputPort: 'exploration-topic', outputPort: 'test-passed' },
'multi-cli': { inputPort: 'requirement', outputPort: 'test-passed' },
// Issue workflow types
'issue-batch': { inputPort: 'codebase', outputPort: 'completed-issues' },
'issue-transition': { inputPort: 'requirement', outputPort: 'completed-issues' },
// With-File workflow types
'brainstorm-file': { inputPort: 'exploration-topic', outputPort: 'brainstorm-document' },
'debug-file': { inputPort: 'bug-report', outputPort: 'understanding-document' },
'analyze-file': { inputPort: 'analysis-topic', outputPort: 'discussion-document' },
'feature': { inputPort: 'requirement', outputPort: constraints?.includes('skip-tests') ? 'code' : 'test-passed' }
};
return flows[taskType] || flows['feature'];
}
@@ -553,6 +649,34 @@ function formatCommand(cmd, previousResults, analysis) {
} else if (name.includes('test') || name.includes('review') || name.includes('verify')) {
const latest = previousResults.filter(r => r.session_id).pop();
if (latest?.session_id) prompt += ` --session="${latest.session_id}"`;
// Issue workflow commands
} else if (name === 'issue:discover') {
// No parameters needed - discovers from codebase
prompt = `/issue:discover -y`;
} else if (name === 'issue:plan') {
prompt = `/issue:plan -y --all-pending`;
} else if (name === 'issue:queue') {
prompt = `/issue:queue -y`;
} else if (name === 'issue:execute') {
prompt = `/issue:execute -y --queue auto`;
} else if (name === 'issue:convert-to-plan' || name === 'convert-to-plan') {
// Convert latest lite-plan to issue plan
prompt = `/issue:convert-to-plan -y --latest-lite-plan`;
// With-File workflows (self-contained)
} else if (name === 'brainstorm-with-file') {
prompt = `/workflow:brainstorm-with-file -y "${analysis.goal}"`;
} else if (name === 'debug-with-file') {
prompt = `/workflow:debug-with-file -y "${analysis.goal}"`;
} else if (name === 'analyze-with-file') {
prompt = `/workflow:analyze-with-file -y "${analysis.goal}"`;
}
return prompt;
@@ -904,7 +1028,7 @@ break; // ⚠️ STOP HERE - DO NOT use TaskOutput polling
## Available Commands
All from `~/.claude/commands/workflow/`:
All from `~/.claude/commands/workflow/` and `~/.claude/commands/issue/`:
**Planning**: lite-plan, plan, multi-cli-plan, plan-verify, tdd-plan
**Execution**: lite-execute, execute, develop-with-file
@@ -916,6 +1040,8 @@ All from `~/.claude/commands/workflow/`:
**Session Management**: session:start, session:resume, session:complete, session:solidify, session:list
**Tools**: context-gather, test-context-gather, task-generate, conflict-resolution, action-plan-verify
**Utility**: clean, init, replan
**Issue Workflow**: issue:discover, issue:plan, issue:queue, issue:execute, issue:convert-to-plan
**With-File Workflows**: brainstorm-with-file, debug-with-file, analyze-with-file
### Testing Commands Distinction
@@ -944,5 +1070,10 @@ All from `~/.claude/commands/workflow/`:
| **review** | 代码 →【review-* → review-fix】→ 修复代码 →【test-fix-gen → test-cycle-execute】→ 测试通过 | Code Review + Testing |
| **brainstorm** | 探索主题 → brainstorm → 分析 →【plan → plan-verify】→ execute → test | Exploration + Planning + Execution |
| **multi-cli** | 需求 → multi-cli-plan → 对比分析 → lite-execute → test | Multi-Perspective + Testing |
| **issue-batch** | 代码库 →【discover → plan → queue → execute】→ 完成 issues | Issue Workflow |
| **issue-transition** | 需求 →【lite-plan → convert-to-plan → queue → execute】→ 完成 issues | Rapid-to-Issue |
| **brainstorm-file** | 主题 → brainstorm-with-file → brainstorm.md (自包含) | Brainstorm With File |
| **debug-file** | Bug报告 → debug-with-file → understanding.md (自包含) | Debug With File |
| **analyze-file** | 分析主题 → analyze-with-file → discussion.md (自包含) | Analyze With File |
Use `CommandRegistry.getAllCommandsSummary()` to discover all commands dynamically.

View File

@@ -67,11 +67,15 @@ function analyzeIntent(input) {
function detectTaskType(text) {
const patterns = {
'bugfix-hotfix': /urgent|production|critical/ && /fix|bug/,
// With-File workflows (documented exploration with multi-CLI collaboration)
'brainstorm': /brainstorm|ideation|头脑风暴|创意|发散思维|creative thinking|multi-perspective.*think|compare perspectives|探索.*可能/,
'debug-file': /debug.*document|hypothesis.*debug|troubleshoot.*track|investigate.*log|调试.*记录|假设.*验证|systematic debug|深度调试/,
'analyze-file': /analyze.*document|explore.*concept|understand.*architecture|investigate.*discuss|collaborative analysis|分析.*讨论|深度.*理解|协作.*分析/,
// Standard workflows
'bugfix': /fix|bug|error|crash|fail|debug/,
'issue-batch': /issues?|batch/ && /fix|resolve/,
'issue-transition': /issue workflow|structured workflow|queue|multi-stage/,
'exploration': /uncertain|explore|research|what if/,
'multi-perspective': /multi-perspective|compare|cross-verify/,
'quick-task': /quick|simple|small/ && /feature|function/,
'ui-design': /ui|design|component|style/,
'tdd': /tdd|test-driven|test first/,
@@ -112,6 +116,11 @@ async function clarifyRequirements(analysis) {
function selectWorkflow(analysis) {
const levelMap = {
'bugfix-hotfix': { level: 2, flow: 'bugfix.hotfix' },
// With-File workflows (documented exploration with multi-CLI collaboration)
'brainstorm': { level: 4, flow: 'brainstorm-with-file' }, // Multi-perspective ideation
'debug-file': { level: 3, flow: 'debug-with-file' }, // Hypothesis-driven debugging
'analyze-file': { level: 3, flow: 'analyze-with-file' }, // Collaborative analysis
// Standard workflows
'bugfix': { level: 2, flow: 'bugfix.standard' },
'issue-batch': { level: 'Issue', flow: 'issue' },
'issue-transition': { level: 2.5, flow: 'rapid-to-issue' }, // Bridge workflow
@@ -191,6 +200,22 @@ function buildCommandChain(workflow, analysis) {
{ cmd: '/workflow:lite-execute', args: '--in-memory', unit: 'quick-impl' }
],
// With-File workflows (documented exploration with multi-CLI collaboration)
'brainstorm-with-file': [
{ cmd: '/workflow:brainstorm-with-file', args: `"${analysis.goal}"` }
// Note: Has built-in post-completion options (create plan, create issue, deep analysis)
],
'debug-with-file': [
{ cmd: '/workflow:debug-with-file', args: `"${analysis.goal}"` }
// Note: Self-contained with hypothesis-driven iteration and Gemini validation
],
'analyze-with-file': [
{ cmd: '/workflow:analyze-with-file', args: `"${analysis.goal}"` }
// Note: Self-contained with multi-round discussion and CLI exploration
],
// Level 3 - Standard
'coupled': [
// Unit: Verified Planning【plan → plan-verify】
@@ -422,6 +447,9 @@ Phase 5: Execute Command Chain
| "Add API endpoint" | feature (low) | 2 |【lite-plan → lite-execute】→【test-fix-gen → test-cycle-execute】|
| "Fix login timeout" | bugfix | 2 |【lite-fix → lite-execute】→【test-fix-gen → test-cycle-execute】|
| "Use issue workflow" | issue-transition | 2.5 |【lite-plan → convert-to-plan】→ queue → execute |
| "头脑风暴: 通知系统重构" | brainstorm | 4 | brainstorm-with-file → (built-in post-completion) |
| "深度调试 WebSocket 连接断开" | debug-file | 3 | debug-with-file → (hypothesis iteration) |
| "协作分析: 认证架构优化" | analyze-file | 3 | analyze-with-file → (multi-round discussion) |
| "OAuth2 system" | feature (high) | 3 |【plan → plan-verify】→ execute →【review-session-cycle → review-fix】→【test-fix-gen → test-cycle-execute】|
| "Implement with TDD" | tdd | 3 |【tdd-plan → execute】→ tdd-verify |
| "Uncertain: real-time arch" | exploration | 4 | brainstorm:auto-parallel →【plan → plan-verify】→ execute →【test-fix-gen → test-cycle-execute】|
@@ -465,6 +493,29 @@ todos = [
---
## With-File Workflows
**With-File workflows** provide documented exploration with multi-CLI collaboration. They are self-contained and generate comprehensive session artifacts.
| Workflow | Purpose | Key Features | Output Folder |
|----------|---------|--------------|---------------|
| **brainstorm-with-file** | Multi-perspective ideation | Gemini/Codex/Claude perspectives, diverge-converge cycles | `.workflow/.brainstorm/` |
| **debug-with-file** | Hypothesis-driven debugging | Gemini validation, understanding evolution, NDJSON logging | `.workflow/.debug/` |
| **analyze-with-file** | Collaborative analysis | Multi-round Q&A, CLI exploration, documented discussions | `.workflow/.analysis/` |
**Detection Keywords**:
- **brainstorm**: 头脑风暴, 创意, 发散思维, multi-perspective, compare perspectives
- **debug-file**: 深度调试, 假设验证, systematic debug, hypothesis debug
- **analyze-file**: 协作分析, 深度理解, collaborative analysis, explore concept
**Characteristics**:
1. **Self-Contained**: Each workflow handles its own iteration loop
2. **Documented Process**: Creates evolving documents (brainstorm.md, understanding.md, discussion.md)
3. **Multi-CLI**: Uses Gemini/Codex/Claude for different perspectives
4. **Built-in Post-Completion**: Offers follow-up options (create plan, issue, etc.)
---
## Type Comparison: ccw vs ccw-coordinator
| Aspect | ccw | ccw-coordinator |
@@ -496,4 +547,9 @@ ccw "Implement user registration with TDD"
# Exploratory task
ccw "Uncertain about architecture for real-time notifications"
# With-File workflows (documented exploration with multi-CLI collaboration)
ccw "头脑风暴: 用户通知系统重新设计" # → brainstorm-with-file
ccw "深度调试: 系统随机崩溃问题" # → debug-with-file
ccw "协作分析: 理解现有认证架构的设计决策" # → analyze-with-file
```

File diff suppressed because it is too large Load Diff

View File

@@ -170,652 +170,152 @@ Create internal representations (do not include raw artifacts in output):
#### Phase 4.1: Launch Unified Verification Agent
**Single Agent, Multi-Dimensional Analysis**:
```javascript
Task(
subagent_type="cli-explore-agent",
run_in_background=false, // ⚠️ MANDATORY: Must wait for results
run_in_background=false,
description="Multi-dimensional plan verification",
prompt=`
## Plan Verification Task
Execute comprehensive verification across dimensions A-H, using Gemini CLI for semantic analysis.
### MANDATORY FIRST STEPS
1. Read: ${session_file} (user intent/context)
2. Read: ${IMPL_PLAN} (implementation plan)
3. Glob: ${task_dir}/*.json (all task JSON files)
4. Glob: ${SYNTHESIS_DIR}/*/analysis.md (role analysis documents)
5. Read: \~/.claude/workflows/cli-templates/schemas/verify-json-schema.json (output schema reference)
1. Read: ~/.claude/workflows/cli-templates/schemas/plan-verify-agent-schema.json (dimensions & rules)
2. Read: ~/.claude/workflows/cli-templates/schemas/verify-json-schema.json (output schema)
3. Read: ${session_file} (user intent)
4. Read: ${IMPL_PLAN} (implementation plan)
5. Glob: ${task_dir}/*.json (task files)
6. Glob: ${SYNTHESIS_DIR}/*/analysis.md (role analyses)
### Output Location
${process_dir}/verification-findings.json
### Execution Flow
### Verification Dimensions
**Load schema → Execute tiered CLI analysis → Aggregate findings → Write JSON**
#### Dimension A: User Intent Alignment (CRITICAL - Tier 1)
- Goal Alignment: IMPL_PLAN objectives match user's original intent
- Scope Drift: Plan covers user's stated scope without unauthorized expansion
- Success Criteria Match: Plan's success criteria reflect user's expectations
- Intent Conflicts: Tasks contradicting user's original objectives
FOR each tier in [1, 2, 3, 4]:
- Load tier config from plan-verify-agent-schema.json
- Execute: ccw cli -p "PURPOSE: Verify dimensions {tier.dimensions}
TASK: {tier.checks from schema}
CONTEXT: @${session_dir}/**/*
EXPECTED: Findings JSON with dimension, severity, location, summary, recommendation
CONSTRAINTS: Limit {tier.limit} findings
" --tool gemini --mode analysis --rule {tier.rule}
- Parse findings, check early exit condition
- IF tier == 1 AND critical_count > 0: skip tier 3-4
#### Dimension B: Requirements Coverage Analysis (CRITICAL - Tier 1)
- Orphaned Requirements: Requirements in synthesis with zero associated tasks
- Unmapped Tasks: Tasks with no clear requirement linkage
- NFR Coverage Gaps: Non-functional requirements not reflected in tasks
#### Dimension C: Consistency Validation (CRITICAL - Tier 1)
- Requirement Conflicts: Tasks contradicting synthesis requirements
- Architecture Drift: IMPL_PLAN architecture not matching synthesis ADRs
- Terminology Drift: Same concept named differently across artifacts
- Data Model Inconsistency: Tasks referencing entities/fields not in synthesis
#### Dimension D: Dependency Integrity (HIGH - Tier 2)
- Circular Dependencies: Cyclic task dependencies
- Missing Dependencies: Task requires outputs from another task but no explicit dependency
- Broken Dependencies: Task depends on non-existent task ID
- Logical Ordering Issues: Implementation tasks before foundational setup
#### Dimension E: Synthesis Alignment (HIGH - Tier 2)
- Priority Conflicts: High-priority synthesis requirements mapped to low-priority tasks
- Success Criteria Mismatch: IMPL_PLAN success criteria not covering synthesis acceptance criteria
- Risk Mitigation Gaps: Critical risks without corresponding mitigation tasks
#### Dimension F: Task Specification Quality (MEDIUM - Tier 3)
- Ambiguous Focus Paths: Tasks with vague/missing focus_paths
- Underspecified Acceptance: Tasks without clear acceptance criteria
- Missing Artifacts References: Tasks not referencing brainstorming artifacts
- Weak Flow Control: Tasks without clear implementation_approach or pre_analysis
- Missing Target Files: Tasks without flow_control.target_files
#### Dimension G: Duplication Detection (LOW - Tier 4)
- Overlapping Task Scope: Multiple tasks with nearly identical descriptions
- Redundant Requirements Coverage: Same requirement covered by multiple tasks
#### Dimension H: Feasibility Assessment (LOW - Tier 4)
- Complexity Misalignment: Task marked "simple" but requires multiple file modifications
- Resource Conflicts: Parallel tasks requiring same resources/files
- Skill Gap Risks: Tasks requiring unavailable team skills
### CLI Analysis Execution
**Execute Tier 1 Analysis (All Dimensions)**:
\`\`\`bash
ccw cli -p "PURPOSE: Multi-dimensional plan verification for Tier 1 (user intent, coverage, consistency)
TASK:
• Verify user original intent matches IMPL_PLAN objectives (dimension A)
• Check all synthesis requirements have corresponding tasks (dimension B)
• Identify conflicts between tasks and synthesis decisions (dimension C)
• Find orphaned requirements or unmapped tasks
CONTEXT: @${session_dir}/**/* | Memory: Verification session WFS-${session_id}
EXPECTED: Findings JSON array with: dimension, severity, location, summary, recommendation
CONSTRAINTS: Focus on CRITICAL issues only | Identify all intent misalignments
" --tool gemini --mode analysis --rule analysis-review-architecture
\`\`\`
**If CRITICAL findings == 0, continue to Tier 2**:
\`\`\`bash
ccw cli -p "PURPOSE: Plan verification for Tier 2 (dependencies and synthesis alignment)
TASK:
• Detect circular or broken task dependencies (dimension D)
• Identify priority conflicts between synthesis and tasks (dimension E)
• Check risk mitigation coverage
CONTEXT: @${session_dir}/**/* | Previous: Tier 1 verified, no critical issues
EXPECTED: Findings JSON with dimension D-E results
CONSTRAINTS: Limit to 15 HIGH severity findings
" --tool gemini --mode analysis --rule analysis-diagnose-bug-root-cause
\`\`\`
**If High findings <= 15, continue to Tier 3**:
\`\`\`bash
ccw cli -p "PURPOSE: Plan verification for Tier 3 (task specification quality)
TASK:
• Check for missing or vague acceptance criteria (dimension F)
• Validate flow control specifications in tasks
• Identify missing artifact references
CONTEXT: @${task_dir}/**/* @${IMPL_PLAN}
EXPECTED: Findings JSON with dimension F results
CONSTRAINTS: Limit to 20 MEDIUM severity findings
" --tool gemini --mode analysis --rule analysis-analyze-code-patterns
\`\`\`
**If Medium findings <= 20, execute Tier 4**:
\`\`\`bash
ccw cli -p "PURPOSE: Plan verification for Tier 4 (duplication and feasibility)
TASK:
• Detect overlapping task scopes (dimension G)
• Assess complexity alignment and resource conflicts (dimension H)
CONTEXT: @${task_dir}/**/*
EXPECTED: Findings JSON with dimension G-H results
CONSTRAINTS: Limit to 15 LOW severity findings
" --tool gemini --mode analysis --rule analysis-analyze-code-patterns
\`\`\`
### Severity Assignment
**CRITICAL**:
- Violates user's original intent (goal misalignment, scope drift)
- Violates synthesis authority (requirement conflict)
- Core requirement with zero coverage
- Circular dependencies
- Broken dependencies
**HIGH**:
- NFR coverage gaps
- Priority conflicts
- Missing risk mitigation tasks
- Ambiguous acceptance criteria
**MEDIUM**:
- Terminology drift
- Missing artifacts references
- Weak flow control
- Logical ordering issues
**LOW**:
- Style/wording improvements
- Minor redundancy not affecting execution
### Output Schema
JSON findings array (reference from step 5 above):
\`\`\`json
{
"session_id": "${session_id}",
"timestamp": "2025-01-27T...",
"verification_tiers_completed": ["Tier 1", "Tier 2"],
"findings": [
{
"id": "C1",
"dimension": "A",
"dimension_name": "User Intent Alignment",
"severity": "CRITICAL",
"location": ["${IMPL_PLAN}:L45", "synthesis:FR-03"],
"summary": "User goal: add user profiles, but IMPL_PLAN focuses on authentication",
"recommendation": "Update IMPL_PLAN to include profile management tasks"
},
{
"id": "H1",
"dimension": "D",
"dimension_name": "Dependency Integrity",
"severity": "HIGH",
"location": ["task:IMPL-2.3"],
"summary": "Depends on non-existent IMPL-2.4",
"recommendation": "Fix depends_on reference or remove dependency"
}
],
"summary": {
"critical_count": 2,
"high_count": 3,
"medium_count": 5,
"low_count": 8,
"total_findings": 18,
"coverage_percentage": 92,
"recommendation": "PROCEED_WITH_FIXES"
}
}
\`\`\`
### Success Criteria
- [ ] All Tier 1 findings identified (no early exit)
- [ ] Tier 2-4 executed in sequence (skipped only by token budget exhaustion)
- [ ] Each finding includes: dimension, severity, location, recommendation
- [ ] Findings aggregated in single JSON output file
- [ ] Agent returns completion summary with quality gate recommendation
### Return Output
Write: \`${process_dir}/verification-findings.json\`
Return: 2-3 sentence summary with quality gate decision (BLOCK_EXECUTION / PROCEED_WITH_FIXES / PROCEED_WITH_CAUTION / PROCEED)
### Output
Write: ${process_dir}/verification-findings.json (follow verify-json-schema.json)
Return: Quality gate decision + 2-3 sentence summary
`
)
```
---
#### Phase 4.2: Parse and Aggregate Agent Results
#### Phase 4.2: Load and Organize Findings
```javascript
// Load agent findings
const findings = JSON.parse(Read(\`${process_dir}/verification-findings.json\`))
// Organize by severity
const byServerity = {
CRITICAL: findings.findings.filter(f => f.severity === 'CRITICAL'),
HIGH: findings.findings.filter(f => f.severity === 'HIGH'),
MEDIUM: findings.findings.filter(f => f.severity === 'MEDIUM'),
LOW: findings.findings.filter(f => f.severity === 'LOW')
}
// Determine quality gate
const recommendation =
byServerity.CRITICAL.length > 0 ? 'BLOCK_EXECUTION' :
byServerity.HIGH.length > 0 ? 'PROCEED_WITH_FIXES' :
byServerity.MEDIUM.length > 0 ? 'PROCEED_WITH_CAUTION' :
'PROCEED'
```
### 5. Generate Human-Readable Report
**Report Generation**: Transform agent findings JSON into comprehensive Markdown report.
**Step 5.1: Load Agent Findings**
```javascript
// Load verification findings from agent
const findingsData = JSON.parse(Read(`${process_dir}/verification-findings.json`))
// Extract key metrics
const { session_id, timestamp, verification_tiers_completed, findings, summary } = findingsData
// Load findings (single parse for all subsequent use)
const data = JSON.parse(Read(`${process_dir}/verification-findings.json`))
const { session_id, timestamp, verification_tiers_completed, findings, summary } = data
const { critical_count, high_count, medium_count, low_count, total_findings, coverage_percentage, recommendation } = summary
// Organize findings by severity
const bySeverity = {
CRITICAL: findings.filter(f => f.severity === 'CRITICAL'),
HIGH: findings.filter(f => f.severity === 'HIGH'),
MEDIUM: findings.filter(f => f.severity === 'MEDIUM'),
LOW: findings.filter(f => f.severity === 'LOW')
}
// Group by severity and dimension
const bySeverity = Object.groupBy(findings, f => f.severity)
const byDimension = Object.groupBy(findings, f => f.dimension)
// Organize findings by dimension
const byDimension = findings.reduce((acc, f) => {
acc[f.dimension] = acc[f.dimension] || []
acc[f.dimension].push(f)
return acc
}, {})
// Dimension metadata (from schema)
const DIMS = {
A: "User Intent Alignment", B: "Requirements Coverage", C: "Consistency Validation",
D: "Dependency Integrity", E: "Synthesis Alignment", F: "Task Specification Quality",
G: "Duplication Detection", H: "Feasibility Assessment"
}
```
**Step 5.2: Generate Markdown Report**
### 5. Generate Report
Output a Markdown report with the following structure:
```javascript
// Helper: render dimension section
const renderDimension = (dim) => {
const items = byDimension[dim] || []
return items.length > 0
? items.map(f => `### ${f.id}: ${f.summary}\n- **Severity**: ${f.severity}\n- **Location**: ${f.location.join(', ')}\n- **Recommendation**: ${f.recommendation}`).join('\n\n')
: `> ✅ No ${DIMS[dim]} issues detected.`
}
```markdown
// Helper: render severity section
const renderSeverity = (severity, impact) => {
const items = bySeverity[severity] || []
return items.length > 0
? items.map(f => `#### ${f.id}: ${f.summary}\n- **Dimension**: ${f.dimension_name}\n- **Location**: ${f.location.join(', ')}\n- **Impact**: ${impact}\n- **Recommendation**: ${f.recommendation}`).join('\n\n')
: `> ✅ No ${severity.toLowerCase()}-severity issues detected.`
}
// Build Markdown report
const fullReport = `
# Plan Verification Report
**Session**: WFS-${session_id}
**Generated**: ${timestamp}
**Verification Tiers Completed**: ${verification_tiers_completed.join(', ')}
**Artifacts Analyzed**: role analysis documents, IMPL_PLAN.md, ${task_files_count} task files
**Session**: WFS-${session_id} | **Generated**: ${timestamp}
**Tiers Completed**: ${verification_tiers_completed.join(', ')}
---
## Executive Summary
### Quality Gate Decision
| Metric | Value | Status |
|--------|-------|--------|
| Overall Risk Level | ${critical_count > 0 ? 'CRITICAL' : high_count > 0 ? 'HIGH' : medium_count > 0 ? 'MEDIUM' : 'LOW'} | ${critical_count > 0 ? '🔴' : high_count > 0 ? '🟠' : medium_count > 0 ? '🟡' : '🟢'} |
| Critical Issues | ${critical_count} | 🔴 |
| High Issues | ${high_count} | 🟠 |
| Medium Issues | ${medium_count} | 🟡 |
| Low Issues | ${low_count} | 🟢 |
| Requirements Coverage | ${coverage_percentage}% | ${coverage_percentage >= 90 ? '🟢' : coverage_percentage >= 75 ? '🟡' : '🔴'} |
| Risk Level | ${critical_count > 0 ? 'CRITICAL' : high_count > 0 ? 'HIGH' : medium_count > 0 ? 'MEDIUM' : 'LOW'} | ${critical_count > 0 ? '🔴' : high_count > 0 ? '🟠' : medium_count > 0 ? '🟡' : '🟢'} |
| Critical/High/Medium/Low | ${critical_count}/${high_count}/${medium_count}/${low_count} | |
| Coverage | ${coverage_percentage}% | ${coverage_percentage >= 90 ? '🟢' : coverage_percentage >= 75 ? '🟡' : '🔴'} |
### Recommendation
**${recommendation}**
**Decision Rationale**:
${
recommendation === 'BLOCK_EXECUTION' ?
`Critical issues detected that violate core requirements or user intent. Must be resolved before implementation.` :
recommendation === 'PROCEED_WITH_FIXES' ?
`No critical issues, but high-severity concerns exist. Recommended to fix before execution to ensure quality.` :
recommendation === 'PROCEED_WITH_CAUTION' ?
`Medium-severity issues detected. May proceed but address concerns during/after implementation.` :
`No significant issues detected. Safe to proceed with implementation.`
}
**Quality Gate Criteria**:
- **BLOCK_EXECUTION**: Critical issues > 0 (must fix before proceeding)
- **PROCEED_WITH_FIXES**: Critical = 0, High > 0 (fix recommended before execution)
- **PROCEED_WITH_CAUTION**: Critical = 0, High = 0, Medium > 0 (proceed with awareness)
- **PROCEED**: Only Low issues or None (safe to execute)
**Recommendation**: **${recommendation}**
---
## Findings Summary
| ID | Dimension | Severity | Location(s) | Summary | Recommendation |
|----|-----------|----------|-------------|---------|----------------|
${findings.map(f => `| ${f.id} | ${f.dimension_name} | ${f.severity} | ${f.location.join(', ')} | ${f.summary} | ${f.recommendation} |`).join('\n')}
(IDs prefixed by severity initial: C/H/M/L + number)
| ID | Dimension | Severity | Location | Summary |
|----|-----------|----------|----------|---------|
${findings.map(f => `| ${f.id} | ${f.dimension_name} | ${f.severity} | ${f.location.join(', ')} | ${f.summary} |`).join('\n')}
---
## User Intent Alignment Analysis (Dimension A)
## Analysis by Dimension
${
byDimension['A'] && byDimension['A'].length > 0 ?
byDimension['A'].map(f => `
### ${f.summary}
**Severity**: ${f.severity}
**Location**: ${f.location.join(', ')}
**Issue Description**:
${f.summary}
**Recommendation**:
${f.recommendation}
`).join('\n') :
`> ✅ No user intent alignment issues detected. IMPL_PLAN objectives and scope match user's original intent.`
}
${['A','B','C','D','E','F','G','H'].map(d => `### ${d}. ${DIMS[d]}\n\n${renderDimension(d)}`).join('\n\n---\n\n')}
---
## Requirements Coverage Analysis (Dimension B)
## Findings by Severity
### Coverage Metrics
### CRITICAL (${critical_count})
${renderSeverity('CRITICAL', 'Blocks execution')}
| Metric | Value |
|--------|-------|
| Overall Coverage | ${coverage_percentage}% |
| Total Findings | ${byDimension['B']?.length || 0} |
### HIGH (${high_count})
${renderSeverity('HIGH', 'Fix before execution recommended')}
### Findings
### MEDIUM (${medium_count})
${renderSeverity('MEDIUM', 'Address during/after implementation')}
${
byDimension['B'] && byDimension['B'].length > 0 ?
byDimension['B'].map(f => `
#### ${f.id}: ${f.summary}
- **Severity**: ${f.severity}
- **Location**: ${f.location.join(', ')}
- **Recommendation**: ${f.recommendation}
`).join('\n') :
`> ✅ All synthesis requirements have corresponding tasks. No coverage gaps detected.`
}
### LOW (${low_count})
${renderSeverity('LOW', 'Optional improvement')}
---
## Consistency Validation (Dimension C)
## Next Steps
${
byDimension['C'] && byDimension['C'].length > 0 ?
byDimension['C'].map(f => `
### ${f.id}: ${f.summary}
${recommendation === 'BLOCK_EXECUTION' ? '🛑 **BLOCK**: Fix critical issues → Re-verify' :
recommendation === 'PROCEED_WITH_FIXES' ? '⚠️ **FIX RECOMMENDED**: Address high issues → Re-verify or Execute' :
'✅ **READY**: Proceed to /workflow:execute'}
- **Severity**: ${f.severity}
- **Location**: ${f.location.join(', ')}
- **Recommendation**: ${f.recommendation}
`).join('\n') :
`> ✅ No consistency issues detected. Tasks align with synthesis requirements and architecture.`
}
---
## Dependency Integrity (Dimension D)
${
byDimension['D'] && byDimension['D'].length > 0 ?
byDimension['D'].map(f => `
### ${f.id}: ${f.summary}
- **Severity**: ${f.severity}
- **Location**: ${f.location.join(', ')}
- **Recommendation**: ${f.recommendation}
`).join('\n') :
`> ✅ No dependency issues detected. All task dependencies are valid and logically ordered.`
}
---
## Synthesis Alignment (Dimension E)
${
byDimension['E'] && byDimension['E'].length > 0 ?
byDimension['E'].map(f => `
### ${f.id}: ${f.summary}
- **Severity**: ${f.severity}
- **Location**: ${f.location.join(', ')}
- **Recommendation**: ${f.recommendation}
`).join('\n') :
`> ✅ No synthesis alignment issues. Task priorities and success criteria match synthesis specifications.`
}
---
## Task Specification Quality (Dimension F)
${
byDimension['F'] && byDimension['F'].length > 0 ?
byDimension['F'].map(f => `
### ${f.id}: ${f.summary}
- **Severity**: ${f.severity}
- **Location**: ${f.location.join(', ')}
- **Recommendation**: ${f.recommendation}
`).join('\n') :
`> ✅ All tasks have clear specifications with proper focus_paths, acceptance criteria, and flow control.`
}
---
## Duplication Detection (Dimension G)
${
byDimension['G'] && byDimension['G'].length > 0 ?
byDimension['G'].map(f => `
### ${f.id}: ${f.summary}
- **Severity**: ${f.severity}
- **Location**: ${f.location.join(', ')}
- **Recommendation**: ${f.recommendation}
`).join('\n') :
`> ✅ No duplicate task scopes detected. All tasks have distinct responsibilities.`
}
---
## Feasibility Assessment (Dimension H)
${
byDimension['H'] && byDimension['H'].length > 0 ?
byDimension['H'].map(f => `
### ${f.id}: ${f.summary}
- **Severity**: ${f.severity}
- **Location**: ${f.location.join(', ')}
- **Recommendation**: ${f.recommendation}
`).join('\n') :
`> ✅ No feasibility concerns. Task complexity assessments and resource allocations are appropriate.`
}
---
## Detailed Findings by Severity
### CRITICAL Issues (${critical_count})
${
bySeverity.CRITICAL.length > 0 ?
bySeverity.CRITICAL.map(f => `
#### ${f.id}: ${f.summary}
- **Dimension**: ${f.dimension_name} (${f.dimension})
- **Location**: ${f.location.join(', ')}
- **Impact**: Blocks execution - must be resolved before implementation
- **Recommendation**: ${f.recommendation}
`).join('\n') :
`> ✅ No critical issues detected.`
}
### HIGH Issues (${high_count})
${
bySeverity.HIGH.length > 0 ?
bySeverity.HIGH.map(f => `
#### ${f.id}: ${f.summary}
- **Dimension**: ${f.dimension_name} (${f.dimension})
- **Location**: ${f.location.join(', ')}
- **Impact**: Significant quality concern - recommended to fix before execution
- **Recommendation**: ${f.recommendation}
`).join('\n') :
`> ✅ No high-severity issues detected.`
}
### MEDIUM Issues (${medium_count})
${
bySeverity.MEDIUM.length > 0 ?
bySeverity.MEDIUM.map(f => `
#### ${f.id}: ${f.summary}
- **Dimension**: ${f.dimension_name} (${f.dimension})
- **Location**: ${f.location.join(', ')}
- **Impact**: Quality improvement opportunity - address during/after implementation
- **Recommendation**: ${f.recommendation}
`).join('\n') :
`> ✅ No medium-severity issues detected.`
}
### LOW Issues (${low_count})
${
bySeverity.LOW.length > 0 ?
bySeverity.LOW.map(f => `
#### ${f.id}: ${f.summary}
- **Dimension**: ${f.dimension_name} (${f.dimension})
- **Location**: ${f.location.join(', ')}
- **Impact**: Minor improvement - optional
- **Recommendation**: ${f.recommendation}
`).join('\n') :
`> ✅ No low-severity issues detected.`
}
---
## Metrics Summary
| Metric | Value |
|--------|-------|
| Requirements Coverage | ${coverage_percentage}% |
| Total Findings | ${total_findings} |
| Critical Issues | ${critical_count} |
| High Issues | ${high_count} |
| Medium Issues | ${medium_count} |
| Low Issues | ${low_count} |
| Verification Tiers Completed | ${verification_tiers_completed.join(', ')} |
---
## Remediation Recommendations
### Priority Order
1. **CRITICAL** - Must fix before proceeding
2. **HIGH** - Fix before execution
3. **MEDIUM** - Fix during or after implementation
4. **LOW** - Optional improvements
### Next Steps
Based on the quality gate recommendation (**${recommendation}**):
${
recommendation === 'BLOCK_EXECUTION' ?
`
**🛑 BLOCK EXECUTION**
You must resolve all CRITICAL issues before proceeding with implementation:
1. Review each critical issue in detail (see section "CRITICAL Issues" above)
2. Determine remediation approach:
- Modify IMPL_PLAN.md for goal/scope conflicts
- Update task.json for requirement misalignments
- Add new tasks for coverage gaps
- Fix dependencies for circular/broken references
3. Apply fixes systematically
4. Re-run verification to confirm resolution: \`/workflow:plan-verify --session ${session_id}\`
` :
recommendation === 'PROCEED_WITH_FIXES' ?
`
**⚠️ PROCEED WITH FIXES RECOMMENDED**
No critical issues detected, but HIGH issues exist. Recommended workflow:
1. Review high-priority issues (see section "HIGH Issues" above)
2. Apply fixes before execution for optimal results:
- Use IMPL_PLAN.md for architecture/priority misalignments
- Update task.json for specification improvements
- Add missing dependencies or risk mitigation tasks
3. Re-run verification to confirm resolution: \`/workflow:plan-verify --session ${session_id}\`
4. Proceed to implementation when ready
` :
recommendation === 'PROCEED_WITH_CAUTION' ?
`
**✅ PROCEED WITH CAUTION**
Only MEDIUM issues detected. You may proceed with implementation:
- Review medium-severity issues (see section "MEDIUM Issues" above)
- Address concerns during or after implementation
- Maintain awareness of identified concerns
- Schedule remediation for future improvement cycles
` :
`
**✅ PROCEED**
No significant issues detected. Safe to execute implementation workflow:
- Requirements fully covered
- User intent aligned
- Dependencies valid and logically ordered
- All tasks properly specified
- Ready for immediate execution
Re-verify: \`/workflow:plan-verify --session ${session_id}\`
Execute: \`/workflow:execute --resume-session="${session_id}"\`
`
}
---
**Report End**
\`\`\`
### 6. Save and Display Report
**Step 6.1: Generate Complete Markdown Report**
```javascript
// Build complete report from template above using findings data
const fullReport = \`
# Plan Verification Report
... [complete markdown template generated above] ...
\`
// Write report to file
const reportPath = \`${process_dir}/PLAN_VERIFICATION.md\`
Write(reportPath, fullReport)
// Write report
Write(`${process_dir}/PLAN_VERIFICATION.md`, fullReport)
console.log(`✅ Report: ${process_dir}/PLAN_VERIFICATION.md\n📊 ${recommendation} | C:${critical_count} H:${high_count} M:${medium_count} L:${low_count} | Coverage:${coverage_percentage}%`)
```
**Step 6.2: Display Summary to User**
```javascript
console.log(\`
=== Plan Verification Complete ===
Report saved to: ${reportPath}
Quality Gate: \${recommendation}
Critical: \${critical_count} | High: \${high_count} | Medium: \${medium_count} | Low: \${low_count}
Coverage: \${coverage_percentage}%
Next: Review full report at ${reportPath} for detailed findings and recommendations
\`)
```
**Step 6.3: Next Step Selection**
### 6. Next Step Selection
```javascript
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')

View File

@@ -1,303 +0,0 @@
# CCW Loop Skill
无状态迭代开发循环工作流,支持开发 (Develop)、调试 (Debug)、验证 (Validate) 三个阶段,每个阶段都有独立的文件记录进展。
## Overview
CCW Loop 是一个自主模式 (Autonomous) 的 Skill通过文件驱动的无状态循环帮助开发者系统化地完成开发任务。
### 核心特性
1. **无状态循环**: 每次执行从文件读取状态,不依赖内存
2. **文件驱动**: 所有进度记录在 Markdown 文件中,可审计、可回顾
3. **Gemini 辅助**: 关键决策点使用 CLI 工具进行深度分析
4. **可恢复**: 任何时候中断后可继续
5. **双模式**: 支持交互式和自动循环
### 三大阶段
- **Develop**: 任务分解 → 代码实现 → 进度记录
- **Debug**: 假设生成 → 证据收集 → 根因分析 → 修复验证
- **Validate**: 测试执行 → 覆盖率检查 → 质量评估
## Installation
已包含在 `.claude/skills/ccw-loop/`,无需额外安装。
## Usage
### 基本用法
```bash
# 启动新循环
/ccw-loop "实现用户认证功能"
# 继续现有循环
/ccw-loop --resume LOOP-auth-2026-01-22
# 自动循环模式
/ccw-loop --auto "修复登录bug并添加测试"
```
### 交互式流程
```
1. 启动: /ccw-loop "任务描述"
2. 初始化: 自动分析任务并生成子任务列表
3. 显示菜单:
- 📝 继续开发 (Develop)
- 🔍 开始调试 (Debug)
- ✅ 运行验证 (Validate)
- 📊 查看详情 (Status)
- 🏁 完成循环 (Complete)
- 🚪 退出 (Exit)
4. 执行选择的动作
5. 重复步骤 3-4 直到完成
```
### 自动循环流程
```
Develop (所有任务) → Debug (如有需要) → Validate → 完成
```
## Directory Structure
```
.workflow/.loop/{session-id}/
├── meta.json # 会话元数据 (不可修改)
├── state.json # 当前状态 (每次更新)
├── summary.md # 完成报告 (结束时生成)
├── develop/
│ ├── progress.md # 开发进度时间线
│ ├── tasks.json # 任务列表
│ └── changes.log # 代码变更日志 (NDJSON)
├── debug/
│ ├── understanding.md # 理解演变文档
│ ├── hypotheses.json # 假设历史
│ └── debug.log # 调试日志 (NDJSON)
└── validate/
├── validation.md # 验证报告
├── test-results.json # 测试结果
└── coverage.json # 覆盖率数据
```
## Action Reference
| Action | 描述 | 触发条件 |
|--------|------|----------|
| action-init | 初始化会话 | 首次启动 |
| action-menu | 显示操作菜单 | 交互模式下每次循环 |
| action-develop-with-file | 执行开发任务 | 有待处理任务 |
| action-debug-with-file | 假设驱动调试 | 需要调试 |
| action-validate-with-file | 运行测试验证 | 需要验证 |
| action-complete | 完成并生成报告 | 所有任务完成 |
详细说明见 [specs/action-catalog.md](specs/action-catalog.md)
## CLI Integration
CCW Loop 在关键决策点集成 CLI 工具:
### 任务分解 (action-init)
```bash
ccw cli -p "PURPOSE: 分解开发任务..."
--tool gemini
--mode analysis
--rule planning-breakdown-task-steps
```
### 代码实现 (action-develop)
```bash
ccw cli -p "PURPOSE: 实现功能代码..."
--tool gemini
--mode write
--rule development-implement-feature
```
### 假设生成 (action-debug - 探索)
```bash
ccw cli -p "PURPOSE: Generate debugging hypotheses..."
--tool gemini
--mode analysis
--rule analysis-diagnose-bug-root-cause
```
### 证据分析 (action-debug - 分析)
```bash
ccw cli -p "PURPOSE: Analyze debug log evidence..."
--tool gemini
--mode analysis
--rule analysis-diagnose-bug-root-cause
```
### 质量评估 (action-validate)
```bash
ccw cli -p "PURPOSE: Analyze test results and coverage..."
--tool gemini
--mode analysis
--rule analysis-review-code-quality
```
## State Management
### State Schema
参见 [phases/state-schema.md](phases/state-schema.md)
### State Transitions
```
pending → running → completed
user_exit
failed
```
### State Recovery
如果 `state.json` 损坏,可从其他文件重建:
- develop/tasks.json → develop.*
- debug/hypotheses.json → debug.*
- validate/test-results.json → validate.*
## Examples
### Example 1: 功能开发
```bash
# 1. 启动循环
/ccw-loop "Add user profile page"
# 2. 系统初始化,生成任务:
# - task-001: Create profile component
# - task-002: Add API endpoints
# - task-003: Implement tests
# 3. 选择 "继续开发"
# → 执行 task-001 (Gemini 辅助实现)
# → 更新 progress.md
# 4. 重复开发直到所有任务完成
# 5. 选择 "运行验证"
# → 运行测试
# → 检查覆盖率
# → 生成 validation.md
# 6. 选择 "完成循环"
# → 生成 summary.md
# → 询问是否扩展为 Issue
```
### Example 2: Bug 修复
```bash
# 1. 启动循环
/ccw-loop "Fix login timeout issue"
# 2. 选择 "开始调试"
# → 输入 bug 描述: "Login times out after 30s"
# → Gemini 生成假设 (H1, H2, H3)
# → 添加 NDJSON 日志
# → 提示复现 bug
# 3. 复现 bug (在应用中操作)
# 4. 再次选择 "开始调试"
# → 解析 debug.log
# → Gemini 分析证据
# → H2 确认为根因
# → 生成修复代码
# → 更新 understanding.md
# 5. 选择 "运行验证"
# → 测试通过
# 6. 完成
```
## Templates
- [progress-template.md](templates/progress-template.md): 开发进度文档模板
- [understanding-template.md](templates/understanding-template.md): 调试理解文档模板
- [validation-template.md](templates/validation-template.md): 验证报告模板
## Specifications
- [loop-requirements.md](specs/loop-requirements.md): 循环需求规范
- [action-catalog.md](specs/action-catalog.md): 动作目录
## Integration
### Dashboard Integration
CCW Loop 与 Dashboard Loop Monitor 集成:
- Dashboard 创建 Loop → 触发此 Skill
- state.json → Dashboard 实时显示
- 任务列表双向同步
- 控制按钮映射到 actions
### Issue System Integration
完成后可扩展为 Issue:
- 维度: test, enhance, refactor, doc
- 自动调用 `/issue:new`
- 上下文自动填充
## Error Handling
| 情况 | 处理 |
|------|------|
| Session 不存在 | 创建新会话 |
| state.json 损坏 | 从文件重建 |
| CLI 工具失败 | 回退到手动模式 |
| 测试失败 | 循环回到 develop/debug |
| >10 迭代 | 警告用户,建议拆分 |
## Limitations
1. **单会话限制**: 同一时间只能有一个活跃会话
2. **迭代限制**: 建议不超过 10 次迭代
3. **CLI 依赖**: 部分功能依赖 Gemini CLI 可用性
4. **测试框架**: 需要 package.json 中定义测试脚本
## Troubleshooting
### Q: 如何查看当前会话状态?
A: 在菜单中选择 "查看详情 (Status)"
### Q: 如何恢复中断的会话?
A: 使用 `--resume` 参数:
```bash
/ccw-loop --resume LOOP-xxx-2026-01-22
```
### Q: 如果 CLI 工具失败怎么办?
A: Skill 会自动降级到手动模式,提示用户手动输入
### Q: 如何添加自定义 action
A: 参见 [specs/action-catalog.md](specs/action-catalog.md) 的 "Action Extensions" 部分
## Contributing
添加新功能:
1. 创建 action 文件在 `phases/actions/`
2. 更新 orchestrator 决策逻辑
3. 添加到 action-catalog.md
4. 更新 action-menu.md
## License
MIT
---
**Version**: 1.0.0
**Last Updated**: 2026-01-22
**Author**: CCW Team

View File

@@ -0,0 +1,47 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Plan Verification Agent Schema",
"description": "Defines dimensions, severity rules, and CLI templates for plan verification agent",
"dimensions": {
"A": { "name": "User Intent Alignment", "tier": 1, "severity": "CRITICAL",
"checks": ["Goal Alignment", "Scope Drift", "Success Criteria Match", "Intent Conflicts"] },
"B": { "name": "Requirements Coverage", "tier": 1, "severity": "CRITICAL",
"checks": ["Orphaned Requirements", "Unmapped Tasks", "NFR Coverage Gaps"] },
"C": { "name": "Consistency Validation", "tier": 1, "severity": "CRITICAL",
"checks": ["Requirement Conflicts", "Architecture Drift", "Terminology Drift", "Data Model Inconsistency"] },
"D": { "name": "Dependency Integrity", "tier": 2, "severity": "HIGH",
"checks": ["Circular Dependencies", "Missing Dependencies", "Broken Dependencies", "Logical Ordering"] },
"E": { "name": "Synthesis Alignment", "tier": 2, "severity": "HIGH",
"checks": ["Priority Conflicts", "Success Criteria Mismatch", "Risk Mitigation Gaps"] },
"F": { "name": "Task Specification Quality", "tier": 3, "severity": "MEDIUM",
"checks": ["Ambiguous Focus Paths", "Underspecified Acceptance", "Missing Artifacts", "Weak Flow Control"] },
"G": { "name": "Duplication Detection", "tier": 4, "severity": "LOW",
"checks": ["Overlapping Task Scope", "Redundant Coverage"] },
"H": { "name": "Feasibility Assessment", "tier": 4, "severity": "LOW",
"checks": ["Complexity Misalignment", "Resource Conflicts", "Skill Gap Risks"] }
},
"tiers": {
"1": { "dimensions": ["A", "B", "C"], "priority": "CRITICAL", "limit": null, "rule": "analysis-review-architecture" },
"2": { "dimensions": ["D", "E"], "priority": "HIGH", "limit": 15, "rule": "analysis-diagnose-bug-root-cause" },
"3": { "dimensions": ["F"], "priority": "MEDIUM", "limit": 20, "rule": "analysis-analyze-code-patterns" },
"4": { "dimensions": ["G", "H"], "priority": "LOW", "limit": 15, "rule": "analysis-analyze-code-patterns" }
},
"severity_rules": {
"CRITICAL": ["User intent violation", "Synthesis authority violation", "Zero coverage", "Circular/broken deps"],
"HIGH": ["NFR gaps", "Priority conflicts", "Missing risk mitigation"],
"MEDIUM": ["Terminology drift", "Missing refs", "Weak flow control"],
"LOW": ["Style improvements", "Minor redundancy"]
},
"quality_gate": {
"BLOCK_EXECUTION": { "condition": "critical > 0", "emoji": "🛑" },
"PROCEED_WITH_FIXES": { "condition": "critical == 0 && high > 0", "emoji": "⚠️" },
"PROCEED_WITH_CAUTION": { "condition": "critical == 0 && high == 0 && medium > 0", "emoji": "✅" },
"PROCEED": { "condition": "only low or none", "emoji": "✅" }
},
"token_budget": { "total_findings": 50, "early_exit": "CRITICAL > 0 in Tier 1 → skip Tier 3-4" }
}

View File

@@ -0,0 +1,158 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Plan Verification Findings Schema",
"description": "Schema for plan verification findings output from cli-explore-agent",
"type": "object",
"required": [
"session_id",
"timestamp",
"verification_tiers_completed",
"findings",
"summary"
],
"properties": {
"session_id": {
"type": "string",
"description": "Workflow session ID (e.g., WFS-20250127-143000)",
"pattern": "^WFS-[0-9]{8}-[0-9]{6}$"
},
"timestamp": {
"type": "string",
"description": "ISO 8601 timestamp when verification was completed",
"format": "date-time"
},
"verification_tiers_completed": {
"type": "array",
"description": "List of verification tiers completed (e.g., ['Tier 1', 'Tier 2'])",
"items": {
"type": "string",
"enum": ["Tier 1", "Tier 2", "Tier 3", "Tier 4"]
},
"minItems": 1,
"maxItems": 4
},
"findings": {
"type": "array",
"description": "Array of all findings across all dimensions",
"items": {
"type": "object",
"required": [
"id",
"dimension",
"dimension_name",
"severity",
"location",
"summary",
"recommendation"
],
"properties": {
"id": {
"type": "string",
"description": "Unique finding ID prefixed by severity (C1, H1, M1, L1)",
"pattern": "^[CHML][0-9]+$"
},
"dimension": {
"type": "string",
"description": "Verification dimension identifier",
"enum": ["A", "B", "C", "D", "E", "F", "G", "H"]
},
"dimension_name": {
"type": "string",
"description": "Human-readable dimension name",
"enum": [
"User Intent Alignment",
"Requirements Coverage Analysis",
"Consistency Validation",
"Dependency Integrity",
"Synthesis Alignment",
"Task Specification Quality",
"Duplication Detection",
"Feasibility Assessment"
]
},
"severity": {
"type": "string",
"description": "Severity level of the finding",
"enum": ["CRITICAL", "HIGH", "MEDIUM", "LOW"]
},
"location": {
"type": "array",
"description": "Array of locations where issue was found (e.g., 'IMPL_PLAN.md:L45', 'task:IMPL-1.2', 'synthesis:FR-03')",
"items": {
"type": "string"
},
"minItems": 1
},
"summary": {
"type": "string",
"description": "Concise summary of the issue (1-2 sentences)",
"minLength": 10,
"maxLength": 500
},
"recommendation": {
"type": "string",
"description": "Actionable recommendation to resolve the issue",
"minLength": 10,
"maxLength": 500
}
}
}
},
"summary": {
"type": "object",
"description": "Aggregate summary of verification results",
"required": [
"critical_count",
"high_count",
"medium_count",
"low_count",
"total_findings",
"coverage_percentage",
"recommendation"
],
"properties": {
"critical_count": {
"type": "integer",
"description": "Number of critical severity findings",
"minimum": 0
},
"high_count": {
"type": "integer",
"description": "Number of high severity findings",
"minimum": 0
},
"medium_count": {
"type": "integer",
"description": "Number of medium severity findings",
"minimum": 0
},
"low_count": {
"type": "integer",
"description": "Number of low severity findings",
"minimum": 0
},
"total_findings": {
"type": "integer",
"description": "Total number of findings",
"minimum": 0
},
"coverage_percentage": {
"type": "number",
"description": "Percentage of synthesis requirements covered by tasks (0-100)",
"minimum": 0,
"maximum": 100
},
"recommendation": {
"type": "string",
"description": "Quality gate recommendation",
"enum": [
"BLOCK_EXECUTION",
"PROCEED_WITH_FIXES",
"PROCEED_WITH_CAUTION",
"PROCEED"
]
}
}
}
}
}