mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-05 01:50:27 +08:00
feat(skills): implement enable/disable functionality for skills
- Added new API endpoints to enable and disable skills. - Introduced logic to manage disabled skills, including loading and saving configurations. - Enhanced skills routes to return lists of disabled skills. - Updated frontend to display disabled skills and allow toggling their status. - Added internationalization support for new skill status messages. - Created JSON schemas for plan verification agent and findings. - Defined new types for skill management in TypeScript.
This commit is contained in:
@@ -50,11 +50,26 @@ Interactive orchestration tool: analyze task → discover commands → recommend
|
||||
| **Code Review (Session)** | review-session-cycle → review-fix | Complete review cycle and apply fixes | Fixed code |
|
||||
| **Code Review (Module)** | review-module-cycle → review-fix | Module review cycle and apply fixes | Fixed code |
|
||||
|
||||
**Issue Units** (Issue单元):
|
||||
|
||||
| Unit Name | Commands | Purpose | Output |
|
||||
|-----------|----------|---------|--------|
|
||||
| **Issue Workflow** | discover → plan → queue → execute | Complete issue lifecycle | Completed issues |
|
||||
| **Rapid-to-Issue** | lite-plan → convert-to-plan → queue → execute | Bridge lite workflow to issue workflow | Completed issues |
|
||||
|
||||
**With-File Units** (文档化单元):
|
||||
|
||||
| Unit Name | Commands | Purpose | Output |
|
||||
|-----------|----------|---------|--------|
|
||||
| **Brainstorm With File** | brainstorm-with-file | Multi-perspective ideation with documentation | brainstorm.md |
|
||||
| **Debug With File** | debug-with-file | Hypothesis-driven debugging with documentation | understanding.md |
|
||||
| **Analyze With File** | analyze-with-file | Collaborative analysis with documentation | discussion.md |
|
||||
|
||||
### Command-to-Unit Mapping (命令与最小单元的映射)
|
||||
|
||||
| Command | Can Precede | Atomic Units |
|
||||
|---------|-----------|--------------|
|
||||
| lite-plan | lite-execute | Quick Implementation |
|
||||
| lite-plan | lite-execute, convert-to-plan | Quick Implementation, Rapid-to-Issue |
|
||||
| multi-cli-plan | lite-execute | Multi-CLI Planning |
|
||||
| lite-fix | lite-execute | Bug Fix |
|
||||
| plan | plan-verify, execute | Full Planning + Execution, Verified Planning + Execution |
|
||||
@@ -65,6 +80,13 @@ Interactive orchestration tool: analyze task → discover commands → recommend
|
||||
| review-session-cycle | review-fix | Code Review (Session) |
|
||||
| review-module-cycle | review-fix | Code Review (Module) |
|
||||
| test-fix-gen | test-cycle-execute | Test Validation |
|
||||
| issue:discover | issue:plan | Issue Workflow |
|
||||
| issue:plan | issue:queue | Issue Workflow |
|
||||
| convert-to-plan | issue:queue | Rapid-to-Issue |
|
||||
| issue:queue | issue:execute | Issue Workflow, Rapid-to-Issue |
|
||||
| brainstorm-with-file | (standalone) | Brainstorm With File |
|
||||
| debug-with-file | (standalone) | Debug With File |
|
||||
| analyze-with-file | (standalone) | Analyze With File |
|
||||
|
||||
### Atomic Group Rules
|
||||
|
||||
@@ -105,6 +127,13 @@ function detectTaskType(text) {
|
||||
if (/测试失败|test fail|fix test|failing test/.test(text)) return 'test-fix';
|
||||
if (/generate test|写测试|add test|补充测试/.test(text)) return 'test-gen';
|
||||
if (/review|审查|code review/.test(text)) return 'review';
|
||||
// Issue workflow patterns
|
||||
if (/issues?.*batch|batch.*issues?|批量.*issue|issue.*批量/.test(text)) return 'issue-batch';
|
||||
if (/issue workflow|structured workflow|queue|multi-stage|转.*issue|issue.*流程/.test(text)) return 'issue-transition';
|
||||
// With-File workflow patterns
|
||||
if (/brainstorm|ideation|头脑风暴|创意|发散思维|creative thinking/.test(text)) return 'brainstorm-file';
|
||||
if (/debug.*document|hypothesis.*debug|深度调试|假设.*验证|systematic debug/.test(text)) return 'debug-file';
|
||||
if (/analyze.*document|collaborative analysis|协作分析|深度.*理解/.test(text)) return 'analyze-file';
|
||||
if (/不确定|explore|研究|what if|brainstorm|权衡/.test(text)) return 'brainstorm';
|
||||
if (/多视角|比较方案|cross-verify|multi-cli/.test(text)) return 'multi-cli';
|
||||
return 'feature'; // Default
|
||||
@@ -285,6 +314,66 @@ const commandPorts = {
|
||||
output: ['review-verified'], // 输出端口:审查通过
|
||||
tags: ['review'],
|
||||
atomic_group: 'code-review' // 最小单元:与 review-fix 绑定
|
||||
},
|
||||
|
||||
// Issue workflow commands
|
||||
'issue:discover': {
|
||||
name: 'issue:discover',
|
||||
input: ['codebase'], // 输入端口:代码库
|
||||
output: ['pending-issues'], // 输出端口:待处理 issues
|
||||
tags: ['issue'],
|
||||
atomic_group: 'issue-workflow' // 最小单元:discover → plan → queue → execute
|
||||
},
|
||||
'issue:plan': {
|
||||
name: 'issue:plan',
|
||||
input: ['pending-issues'], // 输入端口:待处理 issues
|
||||
output: ['issue-plans'], // 输出端口:issue 计划
|
||||
tags: ['issue'],
|
||||
atomic_group: 'issue-workflow'
|
||||
},
|
||||
'issue:queue': {
|
||||
name: 'issue:queue',
|
||||
input: ['issue-plans', 'converted-plan'], // 可接受 issue:plan 或 convert-to-plan 输出
|
||||
output: ['execution-queue'], // 输出端口:执行队列
|
||||
tags: ['issue'],
|
||||
atomic_groups: ['issue-workflow', 'rapid-to-issue']
|
||||
},
|
||||
'issue:execute': {
|
||||
name: 'issue:execute',
|
||||
input: ['execution-queue'], // 输入端口:执行队列
|
||||
output: ['completed-issues'], // 输出端口:已完成 issues
|
||||
tags: ['issue'],
|
||||
atomic_groups: ['issue-workflow', 'rapid-to-issue']
|
||||
},
|
||||
'issue:convert-to-plan': {
|
||||
name: 'issue:convert-to-plan',
|
||||
input: ['plan'], // 输入端口:lite-plan 输出
|
||||
output: ['converted-plan'], // 输出端口:转换后的 issue 计划
|
||||
tags: ['issue', 'planning'],
|
||||
atomic_group: 'rapid-to-issue' // 最小单元:lite-plan → convert-to-plan → queue → execute
|
||||
},
|
||||
|
||||
// With-File workflows (documented exploration with multi-CLI collaboration)
|
||||
'brainstorm-with-file': {
|
||||
name: 'brainstorm-with-file',
|
||||
input: ['exploration-topic'], // 输入端口:探索主题
|
||||
output: ['brainstorm-document'], // 输出端口:brainstorm.md + 综合结论
|
||||
tags: ['brainstorm', 'with-file'],
|
||||
note: 'Self-contained workflow with multi-round diverge-converge cycles'
|
||||
},
|
||||
'debug-with-file': {
|
||||
name: 'debug-with-file',
|
||||
input: ['bug-report'], // 输入端口:bug 报告
|
||||
output: ['understanding-document'], // 输出端口:understanding.md + 修复
|
||||
tags: ['bugfix', 'with-file'],
|
||||
note: 'Self-contained workflow with hypothesis-driven iteration'
|
||||
},
|
||||
'analyze-with-file': {
|
||||
name: 'analyze-with-file',
|
||||
input: ['analysis-topic'], // 输入端口:分析主题
|
||||
output: ['discussion-document'], // 输出端口:discussion.md + 结论
|
||||
tags: ['analysis', 'with-file'],
|
||||
note: 'Self-contained workflow with multi-round discussion'
|
||||
}
|
||||
};
|
||||
```
|
||||
@@ -306,14 +395,21 @@ async function recommendCommandChain(analysis) {
|
||||
// 任务类型对应的端口流
|
||||
function determinePortFlow(taskType, constraints) {
|
||||
const flows = {
|
||||
'bugfix': { inputPort: 'bug-report', outputPort: constraints?.includes('skip-tests') ? 'fixed-code' : 'test-passed' },
|
||||
'tdd': { inputPort: 'requirement', outputPort: 'tdd-verified' },
|
||||
'test-fix': { inputPort: 'failing-tests', outputPort: 'test-passed' },
|
||||
'test-gen': { inputPort: 'code', outputPort: 'test-passed' },
|
||||
'review': { inputPort: 'code', outputPort: 'review-verified' },
|
||||
'brainstorm': { inputPort: 'exploration-topic', outputPort: 'test-passed' },
|
||||
'multi-cli': { inputPort: 'requirement', outputPort: 'test-passed' },
|
||||
'feature': { inputPort: 'requirement', outputPort: constraints?.includes('skip-tests') ? 'code' : 'test-passed' }
|
||||
'bugfix': { inputPort: 'bug-report', outputPort: constraints?.includes('skip-tests') ? 'fixed-code' : 'test-passed' },
|
||||
'tdd': { inputPort: 'requirement', outputPort: 'tdd-verified' },
|
||||
'test-fix': { inputPort: 'failing-tests', outputPort: 'test-passed' },
|
||||
'test-gen': { inputPort: 'code', outputPort: 'test-passed' },
|
||||
'review': { inputPort: 'code', outputPort: 'review-verified' },
|
||||
'brainstorm': { inputPort: 'exploration-topic', outputPort: 'test-passed' },
|
||||
'multi-cli': { inputPort: 'requirement', outputPort: 'test-passed' },
|
||||
// Issue workflow types
|
||||
'issue-batch': { inputPort: 'codebase', outputPort: 'completed-issues' },
|
||||
'issue-transition': { inputPort: 'requirement', outputPort: 'completed-issues' },
|
||||
// With-File workflow types
|
||||
'brainstorm-file': { inputPort: 'exploration-topic', outputPort: 'brainstorm-document' },
|
||||
'debug-file': { inputPort: 'bug-report', outputPort: 'understanding-document' },
|
||||
'analyze-file': { inputPort: 'analysis-topic', outputPort: 'discussion-document' },
|
||||
'feature': { inputPort: 'requirement', outputPort: constraints?.includes('skip-tests') ? 'code' : 'test-passed' }
|
||||
};
|
||||
return flows[taskType] || flows['feature'];
|
||||
}
|
||||
@@ -553,6 +649,34 @@ function formatCommand(cmd, previousResults, analysis) {
|
||||
} else if (name.includes('test') || name.includes('review') || name.includes('verify')) {
|
||||
const latest = previousResults.filter(r => r.session_id).pop();
|
||||
if (latest?.session_id) prompt += ` --session="${latest.session_id}"`;
|
||||
|
||||
// Issue workflow commands
|
||||
} else if (name === 'issue:discover') {
|
||||
// No parameters needed - discovers from codebase
|
||||
prompt = `/issue:discover -y`;
|
||||
|
||||
} else if (name === 'issue:plan') {
|
||||
prompt = `/issue:plan -y --all-pending`;
|
||||
|
||||
} else if (name === 'issue:queue') {
|
||||
prompt = `/issue:queue -y`;
|
||||
|
||||
} else if (name === 'issue:execute') {
|
||||
prompt = `/issue:execute -y --queue auto`;
|
||||
|
||||
} else if (name === 'issue:convert-to-plan' || name === 'convert-to-plan') {
|
||||
// Convert latest lite-plan to issue plan
|
||||
prompt = `/issue:convert-to-plan -y --latest-lite-plan`;
|
||||
|
||||
// With-File workflows (self-contained)
|
||||
} else if (name === 'brainstorm-with-file') {
|
||||
prompt = `/workflow:brainstorm-with-file -y "${analysis.goal}"`;
|
||||
|
||||
} else if (name === 'debug-with-file') {
|
||||
prompt = `/workflow:debug-with-file -y "${analysis.goal}"`;
|
||||
|
||||
} else if (name === 'analyze-with-file') {
|
||||
prompt = `/workflow:analyze-with-file -y "${analysis.goal}"`;
|
||||
}
|
||||
|
||||
return prompt;
|
||||
@@ -904,7 +1028,7 @@ break; // ⚠️ STOP HERE - DO NOT use TaskOutput polling
|
||||
|
||||
## Available Commands
|
||||
|
||||
All from `~/.claude/commands/workflow/`:
|
||||
All from `~/.claude/commands/workflow/` and `~/.claude/commands/issue/`:
|
||||
|
||||
**Planning**: lite-plan, plan, multi-cli-plan, plan-verify, tdd-plan
|
||||
**Execution**: lite-execute, execute, develop-with-file
|
||||
@@ -916,6 +1040,8 @@ All from `~/.claude/commands/workflow/`:
|
||||
**Session Management**: session:start, session:resume, session:complete, session:solidify, session:list
|
||||
**Tools**: context-gather, test-context-gather, task-generate, conflict-resolution, action-plan-verify
|
||||
**Utility**: clean, init, replan
|
||||
**Issue Workflow**: issue:discover, issue:plan, issue:queue, issue:execute, issue:convert-to-plan
|
||||
**With-File Workflows**: brainstorm-with-file, debug-with-file, analyze-with-file
|
||||
|
||||
### Testing Commands Distinction
|
||||
|
||||
@@ -944,5 +1070,10 @@ All from `~/.claude/commands/workflow/`:
|
||||
| **review** | 代码 →【review-* → review-fix】→ 修复代码 →【test-fix-gen → test-cycle-execute】→ 测试通过 | Code Review + Testing |
|
||||
| **brainstorm** | 探索主题 → brainstorm → 分析 →【plan → plan-verify】→ execute → test | Exploration + Planning + Execution |
|
||||
| **multi-cli** | 需求 → multi-cli-plan → 对比分析 → lite-execute → test | Multi-Perspective + Testing |
|
||||
| **issue-batch** | 代码库 →【discover → plan → queue → execute】→ 完成 issues | Issue Workflow |
|
||||
| **issue-transition** | 需求 →【lite-plan → convert-to-plan → queue → execute】→ 完成 issues | Rapid-to-Issue |
|
||||
| **brainstorm-file** | 主题 → brainstorm-with-file → brainstorm.md (自包含) | Brainstorm With File |
|
||||
| **debug-file** | Bug报告 → debug-with-file → understanding.md (自包含) | Debug With File |
|
||||
| **analyze-file** | 分析主题 → analyze-with-file → discussion.md (自包含) | Analyze With File |
|
||||
|
||||
Use `CommandRegistry.getAllCommandsSummary()` to discover all commands dynamically.
|
||||
|
||||
@@ -67,11 +67,15 @@ function analyzeIntent(input) {
|
||||
function detectTaskType(text) {
|
||||
const patterns = {
|
||||
'bugfix-hotfix': /urgent|production|critical/ && /fix|bug/,
|
||||
// With-File workflows (documented exploration with multi-CLI collaboration)
|
||||
'brainstorm': /brainstorm|ideation|头脑风暴|创意|发散思维|creative thinking|multi-perspective.*think|compare perspectives|探索.*可能/,
|
||||
'debug-file': /debug.*document|hypothesis.*debug|troubleshoot.*track|investigate.*log|调试.*记录|假设.*验证|systematic debug|深度调试/,
|
||||
'analyze-file': /analyze.*document|explore.*concept|understand.*architecture|investigate.*discuss|collaborative analysis|分析.*讨论|深度.*理解|协作.*分析/,
|
||||
// Standard workflows
|
||||
'bugfix': /fix|bug|error|crash|fail|debug/,
|
||||
'issue-batch': /issues?|batch/ && /fix|resolve/,
|
||||
'issue-transition': /issue workflow|structured workflow|queue|multi-stage/,
|
||||
'exploration': /uncertain|explore|research|what if/,
|
||||
'multi-perspective': /multi-perspective|compare|cross-verify/,
|
||||
'quick-task': /quick|simple|small/ && /feature|function/,
|
||||
'ui-design': /ui|design|component|style/,
|
||||
'tdd': /tdd|test-driven|test first/,
|
||||
@@ -112,6 +116,11 @@ async function clarifyRequirements(analysis) {
|
||||
function selectWorkflow(analysis) {
|
||||
const levelMap = {
|
||||
'bugfix-hotfix': { level: 2, flow: 'bugfix.hotfix' },
|
||||
// With-File workflows (documented exploration with multi-CLI collaboration)
|
||||
'brainstorm': { level: 4, flow: 'brainstorm-with-file' }, // Multi-perspective ideation
|
||||
'debug-file': { level: 3, flow: 'debug-with-file' }, // Hypothesis-driven debugging
|
||||
'analyze-file': { level: 3, flow: 'analyze-with-file' }, // Collaborative analysis
|
||||
// Standard workflows
|
||||
'bugfix': { level: 2, flow: 'bugfix.standard' },
|
||||
'issue-batch': { level: 'Issue', flow: 'issue' },
|
||||
'issue-transition': { level: 2.5, flow: 'rapid-to-issue' }, // Bridge workflow
|
||||
@@ -191,6 +200,22 @@ function buildCommandChain(workflow, analysis) {
|
||||
{ cmd: '/workflow:lite-execute', args: '--in-memory', unit: 'quick-impl' }
|
||||
],
|
||||
|
||||
// With-File workflows (documented exploration with multi-CLI collaboration)
|
||||
'brainstorm-with-file': [
|
||||
{ cmd: '/workflow:brainstorm-with-file', args: `"${analysis.goal}"` }
|
||||
// Note: Has built-in post-completion options (create plan, create issue, deep analysis)
|
||||
],
|
||||
|
||||
'debug-with-file': [
|
||||
{ cmd: '/workflow:debug-with-file', args: `"${analysis.goal}"` }
|
||||
// Note: Self-contained with hypothesis-driven iteration and Gemini validation
|
||||
],
|
||||
|
||||
'analyze-with-file': [
|
||||
{ cmd: '/workflow:analyze-with-file', args: `"${analysis.goal}"` }
|
||||
// Note: Self-contained with multi-round discussion and CLI exploration
|
||||
],
|
||||
|
||||
// Level 3 - Standard
|
||||
'coupled': [
|
||||
// Unit: Verified Planning【plan → plan-verify】
|
||||
@@ -422,6 +447,9 @@ Phase 5: Execute Command Chain
|
||||
| "Add API endpoint" | feature (low) | 2 |【lite-plan → lite-execute】→【test-fix-gen → test-cycle-execute】|
|
||||
| "Fix login timeout" | bugfix | 2 |【lite-fix → lite-execute】→【test-fix-gen → test-cycle-execute】|
|
||||
| "Use issue workflow" | issue-transition | 2.5 |【lite-plan → convert-to-plan】→ queue → execute |
|
||||
| "头脑风暴: 通知系统重构" | brainstorm | 4 | brainstorm-with-file → (built-in post-completion) |
|
||||
| "深度调试 WebSocket 连接断开" | debug-file | 3 | debug-with-file → (hypothesis iteration) |
|
||||
| "协作分析: 认证架构优化" | analyze-file | 3 | analyze-with-file → (multi-round discussion) |
|
||||
| "OAuth2 system" | feature (high) | 3 |【plan → plan-verify】→ execute →【review-session-cycle → review-fix】→【test-fix-gen → test-cycle-execute】|
|
||||
| "Implement with TDD" | tdd | 3 |【tdd-plan → execute】→ tdd-verify |
|
||||
| "Uncertain: real-time arch" | exploration | 4 | brainstorm:auto-parallel →【plan → plan-verify】→ execute →【test-fix-gen → test-cycle-execute】|
|
||||
@@ -465,6 +493,29 @@ todos = [
|
||||
|
||||
---
|
||||
|
||||
## With-File Workflows
|
||||
|
||||
**With-File workflows** provide documented exploration with multi-CLI collaboration. They are self-contained and generate comprehensive session artifacts.
|
||||
|
||||
| Workflow | Purpose | Key Features | Output Folder |
|
||||
|----------|---------|--------------|---------------|
|
||||
| **brainstorm-with-file** | Multi-perspective ideation | Gemini/Codex/Claude perspectives, diverge-converge cycles | `.workflow/.brainstorm/` |
|
||||
| **debug-with-file** | Hypothesis-driven debugging | Gemini validation, understanding evolution, NDJSON logging | `.workflow/.debug/` |
|
||||
| **analyze-with-file** | Collaborative analysis | Multi-round Q&A, CLI exploration, documented discussions | `.workflow/.analysis/` |
|
||||
|
||||
**Detection Keywords**:
|
||||
- **brainstorm**: 头脑风暴, 创意, 发散思维, multi-perspective, compare perspectives
|
||||
- **debug-file**: 深度调试, 假设验证, systematic debug, hypothesis debug
|
||||
- **analyze-file**: 协作分析, 深度理解, collaborative analysis, explore concept
|
||||
|
||||
**Characteristics**:
|
||||
1. **Self-Contained**: Each workflow handles its own iteration loop
|
||||
2. **Documented Process**: Creates evolving documents (brainstorm.md, understanding.md, discussion.md)
|
||||
3. **Multi-CLI**: Uses Gemini/Codex/Claude for different perspectives
|
||||
4. **Built-in Post-Completion**: Offers follow-up options (create plan, issue, etc.)
|
||||
|
||||
---
|
||||
|
||||
## Type Comparison: ccw vs ccw-coordinator
|
||||
|
||||
| Aspect | ccw | ccw-coordinator |
|
||||
@@ -496,4 +547,9 @@ ccw "Implement user registration with TDD"
|
||||
|
||||
# Exploratory task
|
||||
ccw "Uncertain about architecture for real-time notifications"
|
||||
|
||||
# With-File workflows (documented exploration with multi-CLI collaboration)
|
||||
ccw "头脑风暴: 用户通知系统重新设计" # → brainstorm-with-file
|
||||
ccw "深度调试: 系统随机崩溃问题" # → debug-with-file
|
||||
ccw "协作分析: 理解现有认证架构的设计决策" # → analyze-with-file
|
||||
```
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -170,652 +170,152 @@ Create internal representations (do not include raw artifacts in output):
|
||||
|
||||
#### Phase 4.1: Launch Unified Verification Agent
|
||||
|
||||
**Single Agent, Multi-Dimensional Analysis**:
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-explore-agent",
|
||||
run_in_background=false, // ⚠️ MANDATORY: Must wait for results
|
||||
run_in_background=false,
|
||||
description="Multi-dimensional plan verification",
|
||||
prompt=`
|
||||
## Plan Verification Task
|
||||
|
||||
Execute comprehensive verification across dimensions A-H, using Gemini CLI for semantic analysis.
|
||||
|
||||
### MANDATORY FIRST STEPS
|
||||
1. Read: ${session_file} (user intent/context)
|
||||
2. Read: ${IMPL_PLAN} (implementation plan)
|
||||
3. Glob: ${task_dir}/*.json (all task JSON files)
|
||||
4. Glob: ${SYNTHESIS_DIR}/*/analysis.md (role analysis documents)
|
||||
5. Read: \~/.claude/workflows/cli-templates/schemas/verify-json-schema.json (output schema reference)
|
||||
1. Read: ~/.claude/workflows/cli-templates/schemas/plan-verify-agent-schema.json (dimensions & rules)
|
||||
2. Read: ~/.claude/workflows/cli-templates/schemas/verify-json-schema.json (output schema)
|
||||
3. Read: ${session_file} (user intent)
|
||||
4. Read: ${IMPL_PLAN} (implementation plan)
|
||||
5. Glob: ${task_dir}/*.json (task files)
|
||||
6. Glob: ${SYNTHESIS_DIR}/*/analysis.md (role analyses)
|
||||
|
||||
### Output Location
|
||||
${process_dir}/verification-findings.json
|
||||
### Execution Flow
|
||||
|
||||
### Verification Dimensions
|
||||
**Load schema → Execute tiered CLI analysis → Aggregate findings → Write JSON**
|
||||
|
||||
#### Dimension A: User Intent Alignment (CRITICAL - Tier 1)
|
||||
- Goal Alignment: IMPL_PLAN objectives match user's original intent
|
||||
- Scope Drift: Plan covers user's stated scope without unauthorized expansion
|
||||
- Success Criteria Match: Plan's success criteria reflect user's expectations
|
||||
- Intent Conflicts: Tasks contradicting user's original objectives
|
||||
FOR each tier in [1, 2, 3, 4]:
|
||||
- Load tier config from plan-verify-agent-schema.json
|
||||
- Execute: ccw cli -p "PURPOSE: Verify dimensions {tier.dimensions}
|
||||
TASK: {tier.checks from schema}
|
||||
CONTEXT: @${session_dir}/**/*
|
||||
EXPECTED: Findings JSON with dimension, severity, location, summary, recommendation
|
||||
CONSTRAINTS: Limit {tier.limit} findings
|
||||
" --tool gemini --mode analysis --rule {tier.rule}
|
||||
- Parse findings, check early exit condition
|
||||
- IF tier == 1 AND critical_count > 0: skip tier 3-4
|
||||
|
||||
#### Dimension B: Requirements Coverage Analysis (CRITICAL - Tier 1)
|
||||
- Orphaned Requirements: Requirements in synthesis with zero associated tasks
|
||||
- Unmapped Tasks: Tasks with no clear requirement linkage
|
||||
- NFR Coverage Gaps: Non-functional requirements not reflected in tasks
|
||||
|
||||
#### Dimension C: Consistency Validation (CRITICAL - Tier 1)
|
||||
- Requirement Conflicts: Tasks contradicting synthesis requirements
|
||||
- Architecture Drift: IMPL_PLAN architecture not matching synthesis ADRs
|
||||
- Terminology Drift: Same concept named differently across artifacts
|
||||
- Data Model Inconsistency: Tasks referencing entities/fields not in synthesis
|
||||
|
||||
#### Dimension D: Dependency Integrity (HIGH - Tier 2)
|
||||
- Circular Dependencies: Cyclic task dependencies
|
||||
- Missing Dependencies: Task requires outputs from another task but no explicit dependency
|
||||
- Broken Dependencies: Task depends on non-existent task ID
|
||||
- Logical Ordering Issues: Implementation tasks before foundational setup
|
||||
|
||||
#### Dimension E: Synthesis Alignment (HIGH - Tier 2)
|
||||
- Priority Conflicts: High-priority synthesis requirements mapped to low-priority tasks
|
||||
- Success Criteria Mismatch: IMPL_PLAN success criteria not covering synthesis acceptance criteria
|
||||
- Risk Mitigation Gaps: Critical risks without corresponding mitigation tasks
|
||||
|
||||
#### Dimension F: Task Specification Quality (MEDIUM - Tier 3)
|
||||
- Ambiguous Focus Paths: Tasks with vague/missing focus_paths
|
||||
- Underspecified Acceptance: Tasks without clear acceptance criteria
|
||||
- Missing Artifacts References: Tasks not referencing brainstorming artifacts
|
||||
- Weak Flow Control: Tasks without clear implementation_approach or pre_analysis
|
||||
- Missing Target Files: Tasks without flow_control.target_files
|
||||
|
||||
#### Dimension G: Duplication Detection (LOW - Tier 4)
|
||||
- Overlapping Task Scope: Multiple tasks with nearly identical descriptions
|
||||
- Redundant Requirements Coverage: Same requirement covered by multiple tasks
|
||||
|
||||
#### Dimension H: Feasibility Assessment (LOW - Tier 4)
|
||||
- Complexity Misalignment: Task marked "simple" but requires multiple file modifications
|
||||
- Resource Conflicts: Parallel tasks requiring same resources/files
|
||||
- Skill Gap Risks: Tasks requiring unavailable team skills
|
||||
|
||||
### CLI Analysis Execution
|
||||
|
||||
**Execute Tier 1 Analysis (All Dimensions)**:
|
||||
|
||||
\`\`\`bash
|
||||
ccw cli -p "PURPOSE: Multi-dimensional plan verification for Tier 1 (user intent, coverage, consistency)
|
||||
TASK:
|
||||
• Verify user original intent matches IMPL_PLAN objectives (dimension A)
|
||||
• Check all synthesis requirements have corresponding tasks (dimension B)
|
||||
• Identify conflicts between tasks and synthesis decisions (dimension C)
|
||||
• Find orphaned requirements or unmapped tasks
|
||||
CONTEXT: @${session_dir}/**/* | Memory: Verification session WFS-${session_id}
|
||||
EXPECTED: Findings JSON array with: dimension, severity, location, summary, recommendation
|
||||
CONSTRAINTS: Focus on CRITICAL issues only | Identify all intent misalignments
|
||||
" --tool gemini --mode analysis --rule analysis-review-architecture
|
||||
\`\`\`
|
||||
|
||||
**If CRITICAL findings == 0, continue to Tier 2**:
|
||||
|
||||
\`\`\`bash
|
||||
ccw cli -p "PURPOSE: Plan verification for Tier 2 (dependencies and synthesis alignment)
|
||||
TASK:
|
||||
• Detect circular or broken task dependencies (dimension D)
|
||||
• Identify priority conflicts between synthesis and tasks (dimension E)
|
||||
• Check risk mitigation coverage
|
||||
CONTEXT: @${session_dir}/**/* | Previous: Tier 1 verified, no critical issues
|
||||
EXPECTED: Findings JSON with dimension D-E results
|
||||
CONSTRAINTS: Limit to 15 HIGH severity findings
|
||||
" --tool gemini --mode analysis --rule analysis-diagnose-bug-root-cause
|
||||
\`\`\`
|
||||
|
||||
**If High findings <= 15, continue to Tier 3**:
|
||||
|
||||
\`\`\`bash
|
||||
ccw cli -p "PURPOSE: Plan verification for Tier 3 (task specification quality)
|
||||
TASK:
|
||||
• Check for missing or vague acceptance criteria (dimension F)
|
||||
• Validate flow control specifications in tasks
|
||||
• Identify missing artifact references
|
||||
CONTEXT: @${task_dir}/**/* @${IMPL_PLAN}
|
||||
EXPECTED: Findings JSON with dimension F results
|
||||
CONSTRAINTS: Limit to 20 MEDIUM severity findings
|
||||
" --tool gemini --mode analysis --rule analysis-analyze-code-patterns
|
||||
\`\`\`
|
||||
|
||||
**If Medium findings <= 20, execute Tier 4**:
|
||||
|
||||
\`\`\`bash
|
||||
ccw cli -p "PURPOSE: Plan verification for Tier 4 (duplication and feasibility)
|
||||
TASK:
|
||||
• Detect overlapping task scopes (dimension G)
|
||||
• Assess complexity alignment and resource conflicts (dimension H)
|
||||
CONTEXT: @${task_dir}/**/*
|
||||
EXPECTED: Findings JSON with dimension G-H results
|
||||
CONSTRAINTS: Limit to 15 LOW severity findings
|
||||
" --tool gemini --mode analysis --rule analysis-analyze-code-patterns
|
||||
\`\`\`
|
||||
|
||||
### Severity Assignment
|
||||
|
||||
**CRITICAL**:
|
||||
- Violates user's original intent (goal misalignment, scope drift)
|
||||
- Violates synthesis authority (requirement conflict)
|
||||
- Core requirement with zero coverage
|
||||
- Circular dependencies
|
||||
- Broken dependencies
|
||||
|
||||
**HIGH**:
|
||||
- NFR coverage gaps
|
||||
- Priority conflicts
|
||||
- Missing risk mitigation tasks
|
||||
- Ambiguous acceptance criteria
|
||||
|
||||
**MEDIUM**:
|
||||
- Terminology drift
|
||||
- Missing artifacts references
|
||||
- Weak flow control
|
||||
- Logical ordering issues
|
||||
|
||||
**LOW**:
|
||||
- Style/wording improvements
|
||||
- Minor redundancy not affecting execution
|
||||
|
||||
### Output Schema
|
||||
|
||||
JSON findings array (reference from step 5 above):
|
||||
|
||||
\`\`\`json
|
||||
{
|
||||
"session_id": "${session_id}",
|
||||
"timestamp": "2025-01-27T...",
|
||||
"verification_tiers_completed": ["Tier 1", "Tier 2"],
|
||||
"findings": [
|
||||
{
|
||||
"id": "C1",
|
||||
"dimension": "A",
|
||||
"dimension_name": "User Intent Alignment",
|
||||
"severity": "CRITICAL",
|
||||
"location": ["${IMPL_PLAN}:L45", "synthesis:FR-03"],
|
||||
"summary": "User goal: add user profiles, but IMPL_PLAN focuses on authentication",
|
||||
"recommendation": "Update IMPL_PLAN to include profile management tasks"
|
||||
},
|
||||
{
|
||||
"id": "H1",
|
||||
"dimension": "D",
|
||||
"dimension_name": "Dependency Integrity",
|
||||
"severity": "HIGH",
|
||||
"location": ["task:IMPL-2.3"],
|
||||
"summary": "Depends on non-existent IMPL-2.4",
|
||||
"recommendation": "Fix depends_on reference or remove dependency"
|
||||
}
|
||||
],
|
||||
"summary": {
|
||||
"critical_count": 2,
|
||||
"high_count": 3,
|
||||
"medium_count": 5,
|
||||
"low_count": 8,
|
||||
"total_findings": 18,
|
||||
"coverage_percentage": 92,
|
||||
"recommendation": "PROCEED_WITH_FIXES"
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
### Success Criteria
|
||||
|
||||
- [ ] All Tier 1 findings identified (no early exit)
|
||||
- [ ] Tier 2-4 executed in sequence (skipped only by token budget exhaustion)
|
||||
- [ ] Each finding includes: dimension, severity, location, recommendation
|
||||
- [ ] Findings aggregated in single JSON output file
|
||||
- [ ] Agent returns completion summary with quality gate recommendation
|
||||
|
||||
### Return Output
|
||||
|
||||
Write: \`${process_dir}/verification-findings.json\`
|
||||
|
||||
Return: 2-3 sentence summary with quality gate decision (BLOCK_EXECUTION / PROCEED_WITH_FIXES / PROCEED_WITH_CAUTION / PROCEED)
|
||||
### Output
|
||||
Write: ${process_dir}/verification-findings.json (follow verify-json-schema.json)
|
||||
Return: Quality gate decision + 2-3 sentence summary
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
#### Phase 4.2: Parse and Aggregate Agent Results
|
||||
#### Phase 4.2: Load and Organize Findings
|
||||
|
||||
```javascript
|
||||
// Load agent findings
|
||||
const findings = JSON.parse(Read(\`${process_dir}/verification-findings.json\`))
|
||||
|
||||
// Organize by severity
|
||||
const byServerity = {
|
||||
CRITICAL: findings.findings.filter(f => f.severity === 'CRITICAL'),
|
||||
HIGH: findings.findings.filter(f => f.severity === 'HIGH'),
|
||||
MEDIUM: findings.findings.filter(f => f.severity === 'MEDIUM'),
|
||||
LOW: findings.findings.filter(f => f.severity === 'LOW')
|
||||
}
|
||||
|
||||
// Determine quality gate
|
||||
const recommendation =
|
||||
byServerity.CRITICAL.length > 0 ? 'BLOCK_EXECUTION' :
|
||||
byServerity.HIGH.length > 0 ? 'PROCEED_WITH_FIXES' :
|
||||
byServerity.MEDIUM.length > 0 ? 'PROCEED_WITH_CAUTION' :
|
||||
'PROCEED'
|
||||
```
|
||||
|
||||
### 5. Generate Human-Readable Report
|
||||
|
||||
**Report Generation**: Transform agent findings JSON into comprehensive Markdown report.
|
||||
|
||||
**Step 5.1: Load Agent Findings**
|
||||
```javascript
|
||||
// Load verification findings from agent
|
||||
const findingsData = JSON.parse(Read(`${process_dir}/verification-findings.json`))
|
||||
|
||||
// Extract key metrics
|
||||
const { session_id, timestamp, verification_tiers_completed, findings, summary } = findingsData
|
||||
// Load findings (single parse for all subsequent use)
|
||||
const data = JSON.parse(Read(`${process_dir}/verification-findings.json`))
|
||||
const { session_id, timestamp, verification_tiers_completed, findings, summary } = data
|
||||
const { critical_count, high_count, medium_count, low_count, total_findings, coverage_percentage, recommendation } = summary
|
||||
|
||||
// Organize findings by severity
|
||||
const bySeverity = {
|
||||
CRITICAL: findings.filter(f => f.severity === 'CRITICAL'),
|
||||
HIGH: findings.filter(f => f.severity === 'HIGH'),
|
||||
MEDIUM: findings.filter(f => f.severity === 'MEDIUM'),
|
||||
LOW: findings.filter(f => f.severity === 'LOW')
|
||||
}
|
||||
// Group by severity and dimension
|
||||
const bySeverity = Object.groupBy(findings, f => f.severity)
|
||||
const byDimension = Object.groupBy(findings, f => f.dimension)
|
||||
|
||||
// Organize findings by dimension
|
||||
const byDimension = findings.reduce((acc, f) => {
|
||||
acc[f.dimension] = acc[f.dimension] || []
|
||||
acc[f.dimension].push(f)
|
||||
return acc
|
||||
}, {})
|
||||
// Dimension metadata (from schema)
|
||||
const DIMS = {
|
||||
A: "User Intent Alignment", B: "Requirements Coverage", C: "Consistency Validation",
|
||||
D: "Dependency Integrity", E: "Synthesis Alignment", F: "Task Specification Quality",
|
||||
G: "Duplication Detection", H: "Feasibility Assessment"
|
||||
}
|
||||
```
|
||||
|
||||
**Step 5.2: Generate Markdown Report**
|
||||
### 5. Generate Report
|
||||
|
||||
Output a Markdown report with the following structure:
|
||||
```javascript
|
||||
// Helper: render dimension section
|
||||
const renderDimension = (dim) => {
|
||||
const items = byDimension[dim] || []
|
||||
return items.length > 0
|
||||
? items.map(f => `### ${f.id}: ${f.summary}\n- **Severity**: ${f.severity}\n- **Location**: ${f.location.join(', ')}\n- **Recommendation**: ${f.recommendation}`).join('\n\n')
|
||||
: `> ✅ No ${DIMS[dim]} issues detected.`
|
||||
}
|
||||
|
||||
```markdown
|
||||
// Helper: render severity section
|
||||
const renderSeverity = (severity, impact) => {
|
||||
const items = bySeverity[severity] || []
|
||||
return items.length > 0
|
||||
? items.map(f => `#### ${f.id}: ${f.summary}\n- **Dimension**: ${f.dimension_name}\n- **Location**: ${f.location.join(', ')}\n- **Impact**: ${impact}\n- **Recommendation**: ${f.recommendation}`).join('\n\n')
|
||||
: `> ✅ No ${severity.toLowerCase()}-severity issues detected.`
|
||||
}
|
||||
|
||||
// Build Markdown report
|
||||
const fullReport = `
|
||||
# Plan Verification Report
|
||||
|
||||
**Session**: WFS-${session_id}
|
||||
**Generated**: ${timestamp}
|
||||
**Verification Tiers Completed**: ${verification_tiers_completed.join(', ')}
|
||||
**Artifacts Analyzed**: role analysis documents, IMPL_PLAN.md, ${task_files_count} task files
|
||||
**Session**: WFS-${session_id} | **Generated**: ${timestamp}
|
||||
**Tiers Completed**: ${verification_tiers_completed.join(', ')}
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
### Quality Gate Decision
|
||||
|
||||
| Metric | Value | Status |
|
||||
|--------|-------|--------|
|
||||
| Overall Risk Level | ${critical_count > 0 ? 'CRITICAL' : high_count > 0 ? 'HIGH' : medium_count > 0 ? 'MEDIUM' : 'LOW'} | ${critical_count > 0 ? '🔴' : high_count > 0 ? '🟠' : medium_count > 0 ? '🟡' : '🟢'} |
|
||||
| Critical Issues | ${critical_count} | 🔴 |
|
||||
| High Issues | ${high_count} | 🟠 |
|
||||
| Medium Issues | ${medium_count} | 🟡 |
|
||||
| Low Issues | ${low_count} | 🟢 |
|
||||
| Requirements Coverage | ${coverage_percentage}% | ${coverage_percentage >= 90 ? '🟢' : coverage_percentage >= 75 ? '🟡' : '🔴'} |
|
||||
| Risk Level | ${critical_count > 0 ? 'CRITICAL' : high_count > 0 ? 'HIGH' : medium_count > 0 ? 'MEDIUM' : 'LOW'} | ${critical_count > 0 ? '🔴' : high_count > 0 ? '🟠' : medium_count > 0 ? '🟡' : '🟢'} |
|
||||
| Critical/High/Medium/Low | ${critical_count}/${high_count}/${medium_count}/${low_count} | |
|
||||
| Coverage | ${coverage_percentage}% | ${coverage_percentage >= 90 ? '🟢' : coverage_percentage >= 75 ? '🟡' : '🔴'} |
|
||||
|
||||
### Recommendation
|
||||
|
||||
**${recommendation}**
|
||||
|
||||
**Decision Rationale**:
|
||||
${
|
||||
recommendation === 'BLOCK_EXECUTION' ?
|
||||
`Critical issues detected that violate core requirements or user intent. Must be resolved before implementation.` :
|
||||
recommendation === 'PROCEED_WITH_FIXES' ?
|
||||
`No critical issues, but high-severity concerns exist. Recommended to fix before execution to ensure quality.` :
|
||||
recommendation === 'PROCEED_WITH_CAUTION' ?
|
||||
`Medium-severity issues detected. May proceed but address concerns during/after implementation.` :
|
||||
`No significant issues detected. Safe to proceed with implementation.`
|
||||
}
|
||||
|
||||
**Quality Gate Criteria**:
|
||||
- **BLOCK_EXECUTION**: Critical issues > 0 (must fix before proceeding)
|
||||
- **PROCEED_WITH_FIXES**: Critical = 0, High > 0 (fix recommended before execution)
|
||||
- **PROCEED_WITH_CAUTION**: Critical = 0, High = 0, Medium > 0 (proceed with awareness)
|
||||
- **PROCEED**: Only Low issues or None (safe to execute)
|
||||
**Recommendation**: **${recommendation}**
|
||||
|
||||
---
|
||||
|
||||
## Findings Summary
|
||||
|
||||
| ID | Dimension | Severity | Location(s) | Summary | Recommendation |
|
||||
|----|-----------|----------|-------------|---------|----------------|
|
||||
${findings.map(f => `| ${f.id} | ${f.dimension_name} | ${f.severity} | ${f.location.join(', ')} | ${f.summary} | ${f.recommendation} |`).join('\n')}
|
||||
|
||||
(IDs prefixed by severity initial: C/H/M/L + number)
|
||||
| ID | Dimension | Severity | Location | Summary |
|
||||
|----|-----------|----------|----------|---------|
|
||||
${findings.map(f => `| ${f.id} | ${f.dimension_name} | ${f.severity} | ${f.location.join(', ')} | ${f.summary} |`).join('\n')}
|
||||
|
||||
---
|
||||
|
||||
## User Intent Alignment Analysis (Dimension A)
|
||||
## Analysis by Dimension
|
||||
|
||||
${
|
||||
byDimension['A'] && byDimension['A'].length > 0 ?
|
||||
byDimension['A'].map(f => `
|
||||
### ${f.summary}
|
||||
|
||||
**Severity**: ${f.severity}
|
||||
**Location**: ${f.location.join(', ')}
|
||||
|
||||
**Issue Description**:
|
||||
${f.summary}
|
||||
|
||||
**Recommendation**:
|
||||
${f.recommendation}
|
||||
`).join('\n') :
|
||||
`> ✅ No user intent alignment issues detected. IMPL_PLAN objectives and scope match user's original intent.`
|
||||
}
|
||||
${['A','B','C','D','E','F','G','H'].map(d => `### ${d}. ${DIMS[d]}\n\n${renderDimension(d)}`).join('\n\n---\n\n')}
|
||||
|
||||
---
|
||||
|
||||
## Requirements Coverage Analysis (Dimension B)
|
||||
## Findings by Severity
|
||||
|
||||
### Coverage Metrics
|
||||
### CRITICAL (${critical_count})
|
||||
${renderSeverity('CRITICAL', 'Blocks execution')}
|
||||
|
||||
| Metric | Value |
|
||||
|--------|-------|
|
||||
| Overall Coverage | ${coverage_percentage}% |
|
||||
| Total Findings | ${byDimension['B']?.length || 0} |
|
||||
### HIGH (${high_count})
|
||||
${renderSeverity('HIGH', 'Fix before execution recommended')}
|
||||
|
||||
### Findings
|
||||
### MEDIUM (${medium_count})
|
||||
${renderSeverity('MEDIUM', 'Address during/after implementation')}
|
||||
|
||||
${
|
||||
byDimension['B'] && byDimension['B'].length > 0 ?
|
||||
byDimension['B'].map(f => `
|
||||
#### ${f.id}: ${f.summary}
|
||||
|
||||
- **Severity**: ${f.severity}
|
||||
- **Location**: ${f.location.join(', ')}
|
||||
- **Recommendation**: ${f.recommendation}
|
||||
`).join('\n') :
|
||||
`> ✅ All synthesis requirements have corresponding tasks. No coverage gaps detected.`
|
||||
}
|
||||
### LOW (${low_count})
|
||||
${renderSeverity('LOW', 'Optional improvement')}
|
||||
|
||||
---
|
||||
|
||||
## Consistency Validation (Dimension C)
|
||||
## Next Steps
|
||||
|
||||
${
|
||||
byDimension['C'] && byDimension['C'].length > 0 ?
|
||||
byDimension['C'].map(f => `
|
||||
### ${f.id}: ${f.summary}
|
||||
${recommendation === 'BLOCK_EXECUTION' ? '🛑 **BLOCK**: Fix critical issues → Re-verify' :
|
||||
recommendation === 'PROCEED_WITH_FIXES' ? '⚠️ **FIX RECOMMENDED**: Address high issues → Re-verify or Execute' :
|
||||
'✅ **READY**: Proceed to /workflow:execute'}
|
||||
|
||||
- **Severity**: ${f.severity}
|
||||
- **Location**: ${f.location.join(', ')}
|
||||
- **Recommendation**: ${f.recommendation}
|
||||
`).join('\n') :
|
||||
`> ✅ No consistency issues detected. Tasks align with synthesis requirements and architecture.`
|
||||
}
|
||||
|
||||
---
|
||||
|
||||
## Dependency Integrity (Dimension D)
|
||||
|
||||
${
|
||||
byDimension['D'] && byDimension['D'].length > 0 ?
|
||||
byDimension['D'].map(f => `
|
||||
### ${f.id}: ${f.summary}
|
||||
|
||||
- **Severity**: ${f.severity}
|
||||
- **Location**: ${f.location.join(', ')}
|
||||
- **Recommendation**: ${f.recommendation}
|
||||
`).join('\n') :
|
||||
`> ✅ No dependency issues detected. All task dependencies are valid and logically ordered.`
|
||||
}
|
||||
|
||||
---
|
||||
|
||||
## Synthesis Alignment (Dimension E)
|
||||
|
||||
${
|
||||
byDimension['E'] && byDimension['E'].length > 0 ?
|
||||
byDimension['E'].map(f => `
|
||||
### ${f.id}: ${f.summary}
|
||||
|
||||
- **Severity**: ${f.severity}
|
||||
- **Location**: ${f.location.join(', ')}
|
||||
- **Recommendation**: ${f.recommendation}
|
||||
`).join('\n') :
|
||||
`> ✅ No synthesis alignment issues. Task priorities and success criteria match synthesis specifications.`
|
||||
}
|
||||
|
||||
---
|
||||
|
||||
## Task Specification Quality (Dimension F)
|
||||
|
||||
${
|
||||
byDimension['F'] && byDimension['F'].length > 0 ?
|
||||
byDimension['F'].map(f => `
|
||||
### ${f.id}: ${f.summary}
|
||||
|
||||
- **Severity**: ${f.severity}
|
||||
- **Location**: ${f.location.join(', ')}
|
||||
- **Recommendation**: ${f.recommendation}
|
||||
`).join('\n') :
|
||||
`> ✅ All tasks have clear specifications with proper focus_paths, acceptance criteria, and flow control.`
|
||||
}
|
||||
|
||||
---
|
||||
|
||||
## Duplication Detection (Dimension G)
|
||||
|
||||
${
|
||||
byDimension['G'] && byDimension['G'].length > 0 ?
|
||||
byDimension['G'].map(f => `
|
||||
### ${f.id}: ${f.summary}
|
||||
|
||||
- **Severity**: ${f.severity}
|
||||
- **Location**: ${f.location.join(', ')}
|
||||
- **Recommendation**: ${f.recommendation}
|
||||
`).join('\n') :
|
||||
`> ✅ No duplicate task scopes detected. All tasks have distinct responsibilities.`
|
||||
}
|
||||
|
||||
---
|
||||
|
||||
## Feasibility Assessment (Dimension H)
|
||||
|
||||
${
|
||||
byDimension['H'] && byDimension['H'].length > 0 ?
|
||||
byDimension['H'].map(f => `
|
||||
### ${f.id}: ${f.summary}
|
||||
|
||||
- **Severity**: ${f.severity}
|
||||
- **Location**: ${f.location.join(', ')}
|
||||
- **Recommendation**: ${f.recommendation}
|
||||
`).join('\n') :
|
||||
`> ✅ No feasibility concerns. Task complexity assessments and resource allocations are appropriate.`
|
||||
}
|
||||
|
||||
---
|
||||
|
||||
## Detailed Findings by Severity
|
||||
|
||||
### CRITICAL Issues (${critical_count})
|
||||
|
||||
${
|
||||
bySeverity.CRITICAL.length > 0 ?
|
||||
bySeverity.CRITICAL.map(f => `
|
||||
#### ${f.id}: ${f.summary}
|
||||
|
||||
- **Dimension**: ${f.dimension_name} (${f.dimension})
|
||||
- **Location**: ${f.location.join(', ')}
|
||||
- **Impact**: Blocks execution - must be resolved before implementation
|
||||
- **Recommendation**: ${f.recommendation}
|
||||
`).join('\n') :
|
||||
`> ✅ No critical issues detected.`
|
||||
}
|
||||
|
||||
### HIGH Issues (${high_count})
|
||||
|
||||
${
|
||||
bySeverity.HIGH.length > 0 ?
|
||||
bySeverity.HIGH.map(f => `
|
||||
#### ${f.id}: ${f.summary}
|
||||
|
||||
- **Dimension**: ${f.dimension_name} (${f.dimension})
|
||||
- **Location**: ${f.location.join(', ')}
|
||||
- **Impact**: Significant quality concern - recommended to fix before execution
|
||||
- **Recommendation**: ${f.recommendation}
|
||||
`).join('\n') :
|
||||
`> ✅ No high-severity issues detected.`
|
||||
}
|
||||
|
||||
### MEDIUM Issues (${medium_count})
|
||||
|
||||
${
|
||||
bySeverity.MEDIUM.length > 0 ?
|
||||
bySeverity.MEDIUM.map(f => `
|
||||
#### ${f.id}: ${f.summary}
|
||||
|
||||
- **Dimension**: ${f.dimension_name} (${f.dimension})
|
||||
- **Location**: ${f.location.join(', ')}
|
||||
- **Impact**: Quality improvement opportunity - address during/after implementation
|
||||
- **Recommendation**: ${f.recommendation}
|
||||
`).join('\n') :
|
||||
`> ✅ No medium-severity issues detected.`
|
||||
}
|
||||
|
||||
### LOW Issues (${low_count})
|
||||
|
||||
${
|
||||
bySeverity.LOW.length > 0 ?
|
||||
bySeverity.LOW.map(f => `
|
||||
#### ${f.id}: ${f.summary}
|
||||
|
||||
- **Dimension**: ${f.dimension_name} (${f.dimension})
|
||||
- **Location**: ${f.location.join(', ')}
|
||||
- **Impact**: Minor improvement - optional
|
||||
- **Recommendation**: ${f.recommendation}
|
||||
`).join('\n') :
|
||||
`> ✅ No low-severity issues detected.`
|
||||
}
|
||||
|
||||
---
|
||||
|
||||
## Metrics Summary
|
||||
|
||||
| Metric | Value |
|
||||
|--------|-------|
|
||||
| Requirements Coverage | ${coverage_percentage}% |
|
||||
| Total Findings | ${total_findings} |
|
||||
| Critical Issues | ${critical_count} |
|
||||
| High Issues | ${high_count} |
|
||||
| Medium Issues | ${medium_count} |
|
||||
| Low Issues | ${low_count} |
|
||||
| Verification Tiers Completed | ${verification_tiers_completed.join(', ')} |
|
||||
|
||||
---
|
||||
|
||||
## Remediation Recommendations
|
||||
|
||||
### Priority Order
|
||||
|
||||
1. **CRITICAL** - Must fix before proceeding
|
||||
2. **HIGH** - Fix before execution
|
||||
3. **MEDIUM** - Fix during or after implementation
|
||||
4. **LOW** - Optional improvements
|
||||
|
||||
### Next Steps
|
||||
|
||||
Based on the quality gate recommendation (**${recommendation}**):
|
||||
|
||||
${
|
||||
recommendation === 'BLOCK_EXECUTION' ?
|
||||
`
|
||||
**🛑 BLOCK EXECUTION**
|
||||
|
||||
You must resolve all CRITICAL issues before proceeding with implementation:
|
||||
|
||||
1. Review each critical issue in detail (see section "CRITICAL Issues" above)
|
||||
2. Determine remediation approach:
|
||||
- Modify IMPL_PLAN.md for goal/scope conflicts
|
||||
- Update task.json for requirement misalignments
|
||||
- Add new tasks for coverage gaps
|
||||
- Fix dependencies for circular/broken references
|
||||
3. Apply fixes systematically
|
||||
4. Re-run verification to confirm resolution: \`/workflow:plan-verify --session ${session_id}\`
|
||||
` :
|
||||
recommendation === 'PROCEED_WITH_FIXES' ?
|
||||
`
|
||||
**⚠️ PROCEED WITH FIXES RECOMMENDED**
|
||||
|
||||
No critical issues detected, but HIGH issues exist. Recommended workflow:
|
||||
|
||||
1. Review high-priority issues (see section "HIGH Issues" above)
|
||||
2. Apply fixes before execution for optimal results:
|
||||
- Use IMPL_PLAN.md for architecture/priority misalignments
|
||||
- Update task.json for specification improvements
|
||||
- Add missing dependencies or risk mitigation tasks
|
||||
3. Re-run verification to confirm resolution: \`/workflow:plan-verify --session ${session_id}\`
|
||||
4. Proceed to implementation when ready
|
||||
` :
|
||||
recommendation === 'PROCEED_WITH_CAUTION' ?
|
||||
`
|
||||
**✅ PROCEED WITH CAUTION**
|
||||
|
||||
Only MEDIUM issues detected. You may proceed with implementation:
|
||||
|
||||
- Review medium-severity issues (see section "MEDIUM Issues" above)
|
||||
- Address concerns during or after implementation
|
||||
- Maintain awareness of identified concerns
|
||||
- Schedule remediation for future improvement cycles
|
||||
` :
|
||||
`
|
||||
**✅ PROCEED**
|
||||
|
||||
No significant issues detected. Safe to execute implementation workflow:
|
||||
|
||||
- Requirements fully covered
|
||||
- User intent aligned
|
||||
- Dependencies valid and logically ordered
|
||||
- All tasks properly specified
|
||||
- Ready for immediate execution
|
||||
Re-verify: \`/workflow:plan-verify --session ${session_id}\`
|
||||
Execute: \`/workflow:execute --resume-session="${session_id}"\`
|
||||
`
|
||||
}
|
||||
|
||||
---
|
||||
|
||||
**Report End**
|
||||
\`\`\`
|
||||
|
||||
### 6. Save and Display Report
|
||||
|
||||
**Step 6.1: Generate Complete Markdown Report**
|
||||
|
||||
```javascript
|
||||
// Build complete report from template above using findings data
|
||||
const fullReport = \`
|
||||
# Plan Verification Report
|
||||
... [complete markdown template generated above] ...
|
||||
\`
|
||||
|
||||
// Write report to file
|
||||
const reportPath = \`${process_dir}/PLAN_VERIFICATION.md\`
|
||||
Write(reportPath, fullReport)
|
||||
// Write report
|
||||
Write(`${process_dir}/PLAN_VERIFICATION.md`, fullReport)
|
||||
console.log(`✅ Report: ${process_dir}/PLAN_VERIFICATION.md\n📊 ${recommendation} | C:${critical_count} H:${high_count} M:${medium_count} L:${low_count} | Coverage:${coverage_percentage}%`)
|
||||
```
|
||||
|
||||
**Step 6.2: Display Summary to User**
|
||||
|
||||
```javascript
|
||||
console.log(\`
|
||||
=== Plan Verification Complete ===
|
||||
Report saved to: ${reportPath}
|
||||
|
||||
Quality Gate: \${recommendation}
|
||||
Critical: \${critical_count} | High: \${high_count} | Medium: \${medium_count} | Low: \${low_count}
|
||||
Coverage: \${coverage_percentage}%
|
||||
|
||||
Next: Review full report at ${reportPath} for detailed findings and recommendations
|
||||
\`)
|
||||
```
|
||||
|
||||
**Step 6.3: Next Step Selection**
|
||||
### 6. Next Step Selection
|
||||
|
||||
```javascript
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
@@ -1,303 +0,0 @@
|
||||
# CCW Loop Skill
|
||||
|
||||
无状态迭代开发循环工作流,支持开发 (Develop)、调试 (Debug)、验证 (Validate) 三个阶段,每个阶段都有独立的文件记录进展。
|
||||
|
||||
## Overview
|
||||
|
||||
CCW Loop 是一个自主模式 (Autonomous) 的 Skill,通过文件驱动的无状态循环,帮助开发者系统化地完成开发任务。
|
||||
|
||||
### 核心特性
|
||||
|
||||
1. **无状态循环**: 每次执行从文件读取状态,不依赖内存
|
||||
2. **文件驱动**: 所有进度记录在 Markdown 文件中,可审计、可回顾
|
||||
3. **Gemini 辅助**: 关键决策点使用 CLI 工具进行深度分析
|
||||
4. **可恢复**: 任何时候中断后可继续
|
||||
5. **双模式**: 支持交互式和自动循环
|
||||
|
||||
### 三大阶段
|
||||
|
||||
- **Develop**: 任务分解 → 代码实现 → 进度记录
|
||||
- **Debug**: 假设生成 → 证据收集 → 根因分析 → 修复验证
|
||||
- **Validate**: 测试执行 → 覆盖率检查 → 质量评估
|
||||
|
||||
## Installation
|
||||
|
||||
已包含在 `.claude/skills/ccw-loop/`,无需额外安装。
|
||||
|
||||
## Usage
|
||||
|
||||
### 基本用法
|
||||
|
||||
```bash
|
||||
# 启动新循环
|
||||
/ccw-loop "实现用户认证功能"
|
||||
|
||||
# 继续现有循环
|
||||
/ccw-loop --resume LOOP-auth-2026-01-22
|
||||
|
||||
# 自动循环模式
|
||||
/ccw-loop --auto "修复登录bug并添加测试"
|
||||
```
|
||||
|
||||
### 交互式流程
|
||||
|
||||
```
|
||||
1. 启动: /ccw-loop "任务描述"
|
||||
2. 初始化: 自动分析任务并生成子任务列表
|
||||
3. 显示菜单:
|
||||
- 📝 继续开发 (Develop)
|
||||
- 🔍 开始调试 (Debug)
|
||||
- ✅ 运行验证 (Validate)
|
||||
- 📊 查看详情 (Status)
|
||||
- 🏁 完成循环 (Complete)
|
||||
- 🚪 退出 (Exit)
|
||||
4. 执行选择的动作
|
||||
5. 重复步骤 3-4 直到完成
|
||||
```
|
||||
|
||||
### 自动循环流程
|
||||
|
||||
```
|
||||
Develop (所有任务) → Debug (如有需要) → Validate → 完成
|
||||
```
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
.workflow/.loop/{session-id}/
|
||||
├── meta.json # 会话元数据 (不可修改)
|
||||
├── state.json # 当前状态 (每次更新)
|
||||
├── summary.md # 完成报告 (结束时生成)
|
||||
├── develop/
|
||||
│ ├── progress.md # 开发进度时间线
|
||||
│ ├── tasks.json # 任务列表
|
||||
│ └── changes.log # 代码变更日志 (NDJSON)
|
||||
├── debug/
|
||||
│ ├── understanding.md # 理解演变文档
|
||||
│ ├── hypotheses.json # 假设历史
|
||||
│ └── debug.log # 调试日志 (NDJSON)
|
||||
└── validate/
|
||||
├── validation.md # 验证报告
|
||||
├── test-results.json # 测试结果
|
||||
└── coverage.json # 覆盖率数据
|
||||
```
|
||||
|
||||
## Action Reference
|
||||
|
||||
| Action | 描述 | 触发条件 |
|
||||
|--------|------|----------|
|
||||
| action-init | 初始化会话 | 首次启动 |
|
||||
| action-menu | 显示操作菜单 | 交互模式下每次循环 |
|
||||
| action-develop-with-file | 执行开发任务 | 有待处理任务 |
|
||||
| action-debug-with-file | 假设驱动调试 | 需要调试 |
|
||||
| action-validate-with-file | 运行测试验证 | 需要验证 |
|
||||
| action-complete | 完成并生成报告 | 所有任务完成 |
|
||||
|
||||
详细说明见 [specs/action-catalog.md](specs/action-catalog.md)
|
||||
|
||||
## CLI Integration
|
||||
|
||||
CCW Loop 在关键决策点集成 CLI 工具:
|
||||
|
||||
### 任务分解 (action-init)
|
||||
```bash
|
||||
ccw cli -p "PURPOSE: 分解开发任务..."
|
||||
--tool gemini
|
||||
--mode analysis
|
||||
--rule planning-breakdown-task-steps
|
||||
```
|
||||
|
||||
### 代码实现 (action-develop)
|
||||
```bash
|
||||
ccw cli -p "PURPOSE: 实现功能代码..."
|
||||
--tool gemini
|
||||
--mode write
|
||||
--rule development-implement-feature
|
||||
```
|
||||
|
||||
### 假设生成 (action-debug - 探索)
|
||||
```bash
|
||||
ccw cli -p "PURPOSE: Generate debugging hypotheses..."
|
||||
--tool gemini
|
||||
--mode analysis
|
||||
--rule analysis-diagnose-bug-root-cause
|
||||
```
|
||||
|
||||
### 证据分析 (action-debug - 分析)
|
||||
```bash
|
||||
ccw cli -p "PURPOSE: Analyze debug log evidence..."
|
||||
--tool gemini
|
||||
--mode analysis
|
||||
--rule analysis-diagnose-bug-root-cause
|
||||
```
|
||||
|
||||
### 质量评估 (action-validate)
|
||||
```bash
|
||||
ccw cli -p "PURPOSE: Analyze test results and coverage..."
|
||||
--tool gemini
|
||||
--mode analysis
|
||||
--rule analysis-review-code-quality
|
||||
```
|
||||
|
||||
## State Management
|
||||
|
||||
### State Schema
|
||||
|
||||
参见 [phases/state-schema.md](phases/state-schema.md)
|
||||
|
||||
### State Transitions
|
||||
|
||||
```
|
||||
pending → running → completed
|
||||
↓
|
||||
user_exit
|
||||
↓
|
||||
failed
|
||||
```
|
||||
|
||||
### State Recovery
|
||||
|
||||
如果 `state.json` 损坏,可从其他文件重建:
|
||||
- develop/tasks.json → develop.*
|
||||
- debug/hypotheses.json → debug.*
|
||||
- validate/test-results.json → validate.*
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1: 功能开发
|
||||
|
||||
```bash
|
||||
# 1. 启动循环
|
||||
/ccw-loop "Add user profile page"
|
||||
|
||||
# 2. 系统初始化,生成任务:
|
||||
# - task-001: Create profile component
|
||||
# - task-002: Add API endpoints
|
||||
# - task-003: Implement tests
|
||||
|
||||
# 3. 选择 "继续开发"
|
||||
# → 执行 task-001 (Gemini 辅助实现)
|
||||
# → 更新 progress.md
|
||||
|
||||
# 4. 重复开发直到所有任务完成
|
||||
|
||||
# 5. 选择 "运行验证"
|
||||
# → 运行测试
|
||||
# → 检查覆盖率
|
||||
# → 生成 validation.md
|
||||
|
||||
# 6. 选择 "完成循环"
|
||||
# → 生成 summary.md
|
||||
# → 询问是否扩展为 Issue
|
||||
```
|
||||
|
||||
### Example 2: Bug 修复
|
||||
|
||||
```bash
|
||||
# 1. 启动循环
|
||||
/ccw-loop "Fix login timeout issue"
|
||||
|
||||
# 2. 选择 "开始调试"
|
||||
# → 输入 bug 描述: "Login times out after 30s"
|
||||
# → Gemini 生成假设 (H1, H2, H3)
|
||||
# → 添加 NDJSON 日志
|
||||
# → 提示复现 bug
|
||||
|
||||
# 3. 复现 bug (在应用中操作)
|
||||
|
||||
# 4. 再次选择 "开始调试"
|
||||
# → 解析 debug.log
|
||||
# → Gemini 分析证据
|
||||
# → H2 确认为根因
|
||||
# → 生成修复代码
|
||||
# → 更新 understanding.md
|
||||
|
||||
# 5. 选择 "运行验证"
|
||||
# → 测试通过
|
||||
|
||||
# 6. 完成
|
||||
```
|
||||
|
||||
## Templates
|
||||
|
||||
- [progress-template.md](templates/progress-template.md): 开发进度文档模板
|
||||
- [understanding-template.md](templates/understanding-template.md): 调试理解文档模板
|
||||
- [validation-template.md](templates/validation-template.md): 验证报告模板
|
||||
|
||||
## Specifications
|
||||
|
||||
- [loop-requirements.md](specs/loop-requirements.md): 循环需求规范
|
||||
- [action-catalog.md](specs/action-catalog.md): 动作目录
|
||||
|
||||
## Integration
|
||||
|
||||
### Dashboard Integration
|
||||
|
||||
CCW Loop 与 Dashboard Loop Monitor 集成:
|
||||
- Dashboard 创建 Loop → 触发此 Skill
|
||||
- state.json → Dashboard 实时显示
|
||||
- 任务列表双向同步
|
||||
- 控制按钮映射到 actions
|
||||
|
||||
### Issue System Integration
|
||||
|
||||
完成后可扩展为 Issue:
|
||||
- 维度: test, enhance, refactor, doc
|
||||
- 自动调用 `/issue:new`
|
||||
- 上下文自动填充
|
||||
|
||||
## Error Handling
|
||||
|
||||
| 情况 | 处理 |
|
||||
|------|------|
|
||||
| Session 不存在 | 创建新会话 |
|
||||
| state.json 损坏 | 从文件重建 |
|
||||
| CLI 工具失败 | 回退到手动模式 |
|
||||
| 测试失败 | 循环回到 develop/debug |
|
||||
| >10 迭代 | 警告用户,建议拆分 |
|
||||
|
||||
## Limitations
|
||||
|
||||
1. **单会话限制**: 同一时间只能有一个活跃会话
|
||||
2. **迭代限制**: 建议不超过 10 次迭代
|
||||
3. **CLI 依赖**: 部分功能依赖 Gemini CLI 可用性
|
||||
4. **测试框架**: 需要 package.json 中定义测试脚本
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Q: 如何查看当前会话状态?
|
||||
|
||||
A: 在菜单中选择 "查看详情 (Status)"
|
||||
|
||||
### Q: 如何恢复中断的会话?
|
||||
|
||||
A: 使用 `--resume` 参数:
|
||||
```bash
|
||||
/ccw-loop --resume LOOP-xxx-2026-01-22
|
||||
```
|
||||
|
||||
### Q: 如果 CLI 工具失败怎么办?
|
||||
|
||||
A: Skill 会自动降级到手动模式,提示用户手动输入
|
||||
|
||||
### Q: 如何添加自定义 action?
|
||||
|
||||
A: 参见 [specs/action-catalog.md](specs/action-catalog.md) 的 "Action Extensions" 部分
|
||||
|
||||
## Contributing
|
||||
|
||||
添加新功能:
|
||||
1. 创建 action 文件在 `phases/actions/`
|
||||
2. 更新 orchestrator 决策逻辑
|
||||
3. 添加到 action-catalog.md
|
||||
4. 更新 action-menu.md
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
---
|
||||
|
||||
**Version**: 1.0.0
|
||||
**Last Updated**: 2026-01-22
|
||||
**Author**: CCW Team
|
||||
@@ -0,0 +1,47 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "Plan Verification Agent Schema",
|
||||
"description": "Defines dimensions, severity rules, and CLI templates for plan verification agent",
|
||||
|
||||
"dimensions": {
|
||||
"A": { "name": "User Intent Alignment", "tier": 1, "severity": "CRITICAL",
|
||||
"checks": ["Goal Alignment", "Scope Drift", "Success Criteria Match", "Intent Conflicts"] },
|
||||
"B": { "name": "Requirements Coverage", "tier": 1, "severity": "CRITICAL",
|
||||
"checks": ["Orphaned Requirements", "Unmapped Tasks", "NFR Coverage Gaps"] },
|
||||
"C": { "name": "Consistency Validation", "tier": 1, "severity": "CRITICAL",
|
||||
"checks": ["Requirement Conflicts", "Architecture Drift", "Terminology Drift", "Data Model Inconsistency"] },
|
||||
"D": { "name": "Dependency Integrity", "tier": 2, "severity": "HIGH",
|
||||
"checks": ["Circular Dependencies", "Missing Dependencies", "Broken Dependencies", "Logical Ordering"] },
|
||||
"E": { "name": "Synthesis Alignment", "tier": 2, "severity": "HIGH",
|
||||
"checks": ["Priority Conflicts", "Success Criteria Mismatch", "Risk Mitigation Gaps"] },
|
||||
"F": { "name": "Task Specification Quality", "tier": 3, "severity": "MEDIUM",
|
||||
"checks": ["Ambiguous Focus Paths", "Underspecified Acceptance", "Missing Artifacts", "Weak Flow Control"] },
|
||||
"G": { "name": "Duplication Detection", "tier": 4, "severity": "LOW",
|
||||
"checks": ["Overlapping Task Scope", "Redundant Coverage"] },
|
||||
"H": { "name": "Feasibility Assessment", "tier": 4, "severity": "LOW",
|
||||
"checks": ["Complexity Misalignment", "Resource Conflicts", "Skill Gap Risks"] }
|
||||
},
|
||||
|
||||
"tiers": {
|
||||
"1": { "dimensions": ["A", "B", "C"], "priority": "CRITICAL", "limit": null, "rule": "analysis-review-architecture" },
|
||||
"2": { "dimensions": ["D", "E"], "priority": "HIGH", "limit": 15, "rule": "analysis-diagnose-bug-root-cause" },
|
||||
"3": { "dimensions": ["F"], "priority": "MEDIUM", "limit": 20, "rule": "analysis-analyze-code-patterns" },
|
||||
"4": { "dimensions": ["G", "H"], "priority": "LOW", "limit": 15, "rule": "analysis-analyze-code-patterns" }
|
||||
},
|
||||
|
||||
"severity_rules": {
|
||||
"CRITICAL": ["User intent violation", "Synthesis authority violation", "Zero coverage", "Circular/broken deps"],
|
||||
"HIGH": ["NFR gaps", "Priority conflicts", "Missing risk mitigation"],
|
||||
"MEDIUM": ["Terminology drift", "Missing refs", "Weak flow control"],
|
||||
"LOW": ["Style improvements", "Minor redundancy"]
|
||||
},
|
||||
|
||||
"quality_gate": {
|
||||
"BLOCK_EXECUTION": { "condition": "critical > 0", "emoji": "🛑" },
|
||||
"PROCEED_WITH_FIXES": { "condition": "critical == 0 && high > 0", "emoji": "⚠️" },
|
||||
"PROCEED_WITH_CAUTION": { "condition": "critical == 0 && high == 0 && medium > 0", "emoji": "✅" },
|
||||
"PROCEED": { "condition": "only low or none", "emoji": "✅" }
|
||||
},
|
||||
|
||||
"token_budget": { "total_findings": 50, "early_exit": "CRITICAL > 0 in Tier 1 → skip Tier 3-4" }
|
||||
}
|
||||
158
.claude/workflows/cli-templates/schemas/verify-json-schema.json
Normal file
158
.claude/workflows/cli-templates/schemas/verify-json-schema.json
Normal file
@@ -0,0 +1,158 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "Plan Verification Findings Schema",
|
||||
"description": "Schema for plan verification findings output from cli-explore-agent",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"session_id",
|
||||
"timestamp",
|
||||
"verification_tiers_completed",
|
||||
"findings",
|
||||
"summary"
|
||||
],
|
||||
"properties": {
|
||||
"session_id": {
|
||||
"type": "string",
|
||||
"description": "Workflow session ID (e.g., WFS-20250127-143000)",
|
||||
"pattern": "^WFS-[0-9]{8}-[0-9]{6}$"
|
||||
},
|
||||
"timestamp": {
|
||||
"type": "string",
|
||||
"description": "ISO 8601 timestamp when verification was completed",
|
||||
"format": "date-time"
|
||||
},
|
||||
"verification_tiers_completed": {
|
||||
"type": "array",
|
||||
"description": "List of verification tiers completed (e.g., ['Tier 1', 'Tier 2'])",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"enum": ["Tier 1", "Tier 2", "Tier 3", "Tier 4"]
|
||||
},
|
||||
"minItems": 1,
|
||||
"maxItems": 4
|
||||
},
|
||||
"findings": {
|
||||
"type": "array",
|
||||
"description": "Array of all findings across all dimensions",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"id",
|
||||
"dimension",
|
||||
"dimension_name",
|
||||
"severity",
|
||||
"location",
|
||||
"summary",
|
||||
"recommendation"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"description": "Unique finding ID prefixed by severity (C1, H1, M1, L1)",
|
||||
"pattern": "^[CHML][0-9]+$"
|
||||
},
|
||||
"dimension": {
|
||||
"type": "string",
|
||||
"description": "Verification dimension identifier",
|
||||
"enum": ["A", "B", "C", "D", "E", "F", "G", "H"]
|
||||
},
|
||||
"dimension_name": {
|
||||
"type": "string",
|
||||
"description": "Human-readable dimension name",
|
||||
"enum": [
|
||||
"User Intent Alignment",
|
||||
"Requirements Coverage Analysis",
|
||||
"Consistency Validation",
|
||||
"Dependency Integrity",
|
||||
"Synthesis Alignment",
|
||||
"Task Specification Quality",
|
||||
"Duplication Detection",
|
||||
"Feasibility Assessment"
|
||||
]
|
||||
},
|
||||
"severity": {
|
||||
"type": "string",
|
||||
"description": "Severity level of the finding",
|
||||
"enum": ["CRITICAL", "HIGH", "MEDIUM", "LOW"]
|
||||
},
|
||||
"location": {
|
||||
"type": "array",
|
||||
"description": "Array of locations where issue was found (e.g., 'IMPL_PLAN.md:L45', 'task:IMPL-1.2', 'synthesis:FR-03')",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"minItems": 1
|
||||
},
|
||||
"summary": {
|
||||
"type": "string",
|
||||
"description": "Concise summary of the issue (1-2 sentences)",
|
||||
"minLength": 10,
|
||||
"maxLength": 500
|
||||
},
|
||||
"recommendation": {
|
||||
"type": "string",
|
||||
"description": "Actionable recommendation to resolve the issue",
|
||||
"minLength": 10,
|
||||
"maxLength": 500
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"summary": {
|
||||
"type": "object",
|
||||
"description": "Aggregate summary of verification results",
|
||||
"required": [
|
||||
"critical_count",
|
||||
"high_count",
|
||||
"medium_count",
|
||||
"low_count",
|
||||
"total_findings",
|
||||
"coverage_percentage",
|
||||
"recommendation"
|
||||
],
|
||||
"properties": {
|
||||
"critical_count": {
|
||||
"type": "integer",
|
||||
"description": "Number of critical severity findings",
|
||||
"minimum": 0
|
||||
},
|
||||
"high_count": {
|
||||
"type": "integer",
|
||||
"description": "Number of high severity findings",
|
||||
"minimum": 0
|
||||
},
|
||||
"medium_count": {
|
||||
"type": "integer",
|
||||
"description": "Number of medium severity findings",
|
||||
"minimum": 0
|
||||
},
|
||||
"low_count": {
|
||||
"type": "integer",
|
||||
"description": "Number of low severity findings",
|
||||
"minimum": 0
|
||||
},
|
||||
"total_findings": {
|
||||
"type": "integer",
|
||||
"description": "Total number of findings",
|
||||
"minimum": 0
|
||||
},
|
||||
"coverage_percentage": {
|
||||
"type": "number",
|
||||
"description": "Percentage of synthesis requirements covered by tasks (0-100)",
|
||||
"minimum": 0,
|
||||
"maximum": 100
|
||||
},
|
||||
"recommendation": {
|
||||
"type": "string",
|
||||
"description": "Quality gate recommendation",
|
||||
"enum": [
|
||||
"BLOCK_EXECUTION",
|
||||
"PROCEED_WITH_FIXES",
|
||||
"PROCEED_WITH_CAUTION",
|
||||
"PROCEED"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,51 +2,26 @@
|
||||
* Skills Routes Module
|
||||
* Handles all Skills-related API endpoints
|
||||
*/
|
||||
import { readFileSync, existsSync, readdirSync, statSync, unlinkSync, promises as fsPromises } from 'fs';
|
||||
import { readFileSync, existsSync, readdirSync, statSync, unlinkSync, renameSync, writeFileSync, mkdirSync, cpSync, rmSync, promises as fsPromises } from 'fs';
|
||||
import { join } from 'path';
|
||||
import { homedir } from 'os';
|
||||
import { executeCliTool } from '../../tools/cli-executor.js';
|
||||
import { SmartContentFormatter } from '../../tools/cli-output-converter.js';
|
||||
import { validatePath as validateAllowedPath } from '../../utils/path-validator.js';
|
||||
import type { RouteContext } from './types.js';
|
||||
|
||||
type SkillLocation = 'project' | 'user';
|
||||
|
||||
interface ParsedSkillFrontmatter {
|
||||
name: string;
|
||||
description: string;
|
||||
version: string | null;
|
||||
allowedTools: string[];
|
||||
content: string;
|
||||
}
|
||||
|
||||
interface SkillSummary {
|
||||
name: string;
|
||||
folderName: string;
|
||||
description: string;
|
||||
version: string | null;
|
||||
allowedTools: string[];
|
||||
location: SkillLocation;
|
||||
path: string;
|
||||
supportingFiles: string[];
|
||||
}
|
||||
|
||||
interface SkillsConfig {
|
||||
projectSkills: SkillSummary[];
|
||||
userSkills: SkillSummary[];
|
||||
}
|
||||
|
||||
interface SkillInfo {
|
||||
name: string;
|
||||
description: string;
|
||||
version: string | null;
|
||||
allowedTools: string[];
|
||||
supportingFiles: string[];
|
||||
}
|
||||
|
||||
type SkillFolderValidation =
|
||||
| { valid: true; errors: string[]; skillInfo: SkillInfo }
|
||||
| { valid: false; errors: string[]; skillInfo: null };
|
||||
import type {
|
||||
SkillLocation,
|
||||
ParsedSkillFrontmatter,
|
||||
SkillSummary,
|
||||
SkillsConfig,
|
||||
SkillInfo,
|
||||
SkillFolderValidation,
|
||||
DisabledSkillInfo,
|
||||
DisabledSkillsConfig,
|
||||
DisabledSkillSummary,
|
||||
ExtendedSkillsConfig,
|
||||
SkillOperationResult
|
||||
} from '../../types/skill-types.js';
|
||||
|
||||
type GenerationType = 'description' | 'template';
|
||||
|
||||
@@ -65,6 +40,260 @@ function isRecord(value: unknown): value is Record<string, unknown> {
|
||||
|
||||
// ========== Skills Helper Functions ==========
|
||||
|
||||
// ========== Disabled Skills Helper Functions ==========
|
||||
|
||||
/**
|
||||
* Get disabled skills directory path
|
||||
*/
|
||||
function getDisabledSkillsDir(location: SkillLocation, projectPath: string): string {
|
||||
if (location === 'project') {
|
||||
return join(projectPath, '.claude', '.disabled-skills');
|
||||
}
|
||||
return join(homedir(), '.claude', '.disabled-skills');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get disabled skills config file path
|
||||
*/
|
||||
function getDisabledSkillsConfigPath(location: SkillLocation, projectPath: string): string {
|
||||
if (location === 'project') {
|
||||
return join(projectPath, '.claude', 'disabled-skills.json');
|
||||
}
|
||||
return join(homedir(), '.claude', 'disabled-skills.json');
|
||||
}
|
||||
|
||||
/**
|
||||
* Load disabled skills configuration
|
||||
*/
|
||||
function loadDisabledSkillsConfig(location: SkillLocation, projectPath: string): DisabledSkillsConfig {
|
||||
const configPath = getDisabledSkillsConfigPath(location, projectPath);
|
||||
try {
|
||||
if (existsSync(configPath)) {
|
||||
const content = readFileSync(configPath, 'utf8');
|
||||
const config = JSON.parse(content);
|
||||
return { skills: config.skills || {} };
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`[Skills] Failed to load disabled skills config: ${error}`);
|
||||
}
|
||||
return { skills: {} };
|
||||
}
|
||||
|
||||
/**
|
||||
* Save disabled skills configuration
|
||||
*/
|
||||
function saveDisabledSkillsConfig(location: SkillLocation, projectPath: string, config: DisabledSkillsConfig): void {
|
||||
const configPath = getDisabledSkillsConfigPath(location, projectPath);
|
||||
const configDir = join(configPath, '..');
|
||||
|
||||
if (!existsSync(configDir)) {
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
}
|
||||
|
||||
writeFileSync(configPath, JSON.stringify(config, null, 2), 'utf8');
|
||||
}
|
||||
|
||||
/**
|
||||
* Move directory with fallback to copy-delete
|
||||
*/
|
||||
function moveDirectory(source: string, target: string): void {
|
||||
try {
|
||||
// Try atomic rename first
|
||||
renameSync(source, target);
|
||||
} catch (error: unknown) {
|
||||
const err = error as NodeJS.ErrnoException;
|
||||
// If rename fails (cross-filesystem, permission issues), fallback to copy-delete
|
||||
if (err.code === 'EXDEV' || err.code === 'EPERM' || err.code === 'EBUSY') {
|
||||
cpSync(source, target, { recursive: true, force: true });
|
||||
rmSync(source, { recursive: true, force: true });
|
||||
} else {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Disable a skill by moving it to disabled directory
|
||||
*/
|
||||
async function disableSkill(
|
||||
skillName: string,
|
||||
location: SkillLocation,
|
||||
projectPath: string,
|
||||
initialPath: string,
|
||||
reason?: string
|
||||
): Promise<SkillOperationResult> {
|
||||
try {
|
||||
// Validate skill name
|
||||
if (skillName.includes('/') || skillName.includes('\\') || skillName.includes('..')) {
|
||||
return { success: false, message: 'Invalid skill name', status: 400 };
|
||||
}
|
||||
|
||||
// Get source directory
|
||||
let skillsDir: string;
|
||||
if (location === 'project') {
|
||||
try {
|
||||
const validatedProjectPath = await validateAllowedPath(projectPath, { mustExist: true, allowedDirectories: [initialPath] });
|
||||
skillsDir = join(validatedProjectPath, '.claude', 'skills');
|
||||
} catch (err) {
|
||||
const message = err instanceof Error ? err.message : String(err);
|
||||
return { success: false, message: message.includes('Access denied') ? 'Access denied' : 'Invalid path', status: 403 };
|
||||
}
|
||||
} else {
|
||||
skillsDir = join(homedir(), '.claude', 'skills');
|
||||
}
|
||||
|
||||
const sourceDir = join(skillsDir, skillName);
|
||||
if (!existsSync(sourceDir)) {
|
||||
return { success: false, message: 'Skill not found', status: 404 };
|
||||
}
|
||||
|
||||
// Get target directory
|
||||
const disabledDir = getDisabledSkillsDir(location, projectPath);
|
||||
if (!existsSync(disabledDir)) {
|
||||
mkdirSync(disabledDir, { recursive: true });
|
||||
}
|
||||
|
||||
const targetDir = join(disabledDir, skillName);
|
||||
if (existsSync(targetDir)) {
|
||||
return { success: false, message: 'Skill already exists in disabled directory', status: 409 };
|
||||
}
|
||||
|
||||
// Move skill to disabled directory
|
||||
moveDirectory(sourceDir, targetDir);
|
||||
|
||||
// Update config
|
||||
const config = loadDisabledSkillsConfig(location, projectPath);
|
||||
config.skills[skillName] = {
|
||||
disabledAt: new Date().toISOString(),
|
||||
reason
|
||||
};
|
||||
saveDisabledSkillsConfig(location, projectPath, config);
|
||||
|
||||
return { success: true, message: 'Skill disabled', skillName, location };
|
||||
} catch (error) {
|
||||
return { success: false, message: (error as Error).message, status: 500 };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Enable a skill by moving it back from disabled directory
|
||||
*/
|
||||
async function enableSkill(
|
||||
skillName: string,
|
||||
location: SkillLocation,
|
||||
projectPath: string,
|
||||
initialPath: string
|
||||
): Promise<SkillOperationResult> {
|
||||
try {
|
||||
// Validate skill name
|
||||
if (skillName.includes('/') || skillName.includes('\\') || skillName.includes('..')) {
|
||||
return { success: false, message: 'Invalid skill name', status: 400 };
|
||||
}
|
||||
|
||||
// Get source directory (disabled)
|
||||
const disabledDir = getDisabledSkillsDir(location, projectPath);
|
||||
const sourceDir = join(disabledDir, skillName);
|
||||
if (!existsSync(sourceDir)) {
|
||||
return { success: false, message: 'Disabled skill not found', status: 404 };
|
||||
}
|
||||
|
||||
// Get target directory (skills)
|
||||
let skillsDir: string;
|
||||
if (location === 'project') {
|
||||
try {
|
||||
const validatedProjectPath = await validateAllowedPath(projectPath, { mustExist: true, allowedDirectories: [initialPath] });
|
||||
skillsDir = join(validatedProjectPath, '.claude', 'skills');
|
||||
} catch (err) {
|
||||
const message = err instanceof Error ? err.message : String(err);
|
||||
return { success: false, message: message.includes('Access denied') ? 'Access denied' : 'Invalid path', status: 403 };
|
||||
}
|
||||
} else {
|
||||
skillsDir = join(homedir(), '.claude', 'skills');
|
||||
}
|
||||
|
||||
if (!existsSync(skillsDir)) {
|
||||
mkdirSync(skillsDir, { recursive: true });
|
||||
}
|
||||
|
||||
const targetDir = join(skillsDir, skillName);
|
||||
if (existsSync(targetDir)) {
|
||||
return { success: false, message: 'Skill already exists in skills directory', status: 409 };
|
||||
}
|
||||
|
||||
// Move skill back to skills directory
|
||||
moveDirectory(sourceDir, targetDir);
|
||||
|
||||
// Update config
|
||||
const config = loadDisabledSkillsConfig(location, projectPath);
|
||||
delete config.skills[skillName];
|
||||
saveDisabledSkillsConfig(location, projectPath, config);
|
||||
|
||||
return { success: true, message: 'Skill enabled', skillName, location };
|
||||
} catch (error) {
|
||||
return { success: false, message: (error as Error).message, status: 500 };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get list of disabled skills
|
||||
*/
|
||||
function getDisabledSkillsList(location: SkillLocation, projectPath: string): DisabledSkillSummary[] {
|
||||
const disabledDir = getDisabledSkillsDir(location, projectPath);
|
||||
const config = loadDisabledSkillsConfig(location, projectPath);
|
||||
const result: DisabledSkillSummary[] = [];
|
||||
|
||||
if (!existsSync(disabledDir)) {
|
||||
return result;
|
||||
}
|
||||
|
||||
try {
|
||||
const skills = readdirSync(disabledDir, { withFileTypes: true });
|
||||
for (const skill of skills) {
|
||||
if (skill.isDirectory()) {
|
||||
const skillMdPath = join(disabledDir, skill.name, 'SKILL.md');
|
||||
if (existsSync(skillMdPath)) {
|
||||
const content = readFileSync(skillMdPath, 'utf8');
|
||||
const parsed = parseSkillFrontmatter(content);
|
||||
const skillDir = join(disabledDir, skill.name);
|
||||
const supportingFiles = getSupportingFiles(skillDir);
|
||||
const disabledInfo = config.skills[skill.name] || { disabledAt: new Date().toISOString() };
|
||||
|
||||
result.push({
|
||||
name: parsed.name || skill.name,
|
||||
folderName: skill.name,
|
||||
description: parsed.description,
|
||||
version: parsed.version,
|
||||
allowedTools: parsed.allowedTools,
|
||||
location,
|
||||
path: skillDir,
|
||||
supportingFiles,
|
||||
disabledAt: disabledInfo.disabledAt,
|
||||
reason: disabledInfo.reason
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`[Skills] Failed to read disabled skills: ${error}`);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get extended skills config including disabled skills
|
||||
*/
|
||||
function getExtendedSkillsConfig(projectPath: string): ExtendedSkillsConfig {
|
||||
const baseConfig = getSkillsConfig(projectPath);
|
||||
return {
|
||||
...baseConfig,
|
||||
disabledProjectSkills: getDisabledSkillsList('project', projectPath),
|
||||
disabledUserSkills: getDisabledSkillsList('user', projectPath)
|
||||
};
|
||||
}
|
||||
|
||||
// ========== Active Skills Helper Functions ==========
|
||||
|
||||
/**
|
||||
* Parse skill frontmatter (YAML header)
|
||||
* @param {string} content - Skill file content
|
||||
@@ -660,15 +889,23 @@ Create a new Claude Code skill with the following specifications:
|
||||
export async function handleSkillsRoutes(ctx: RouteContext): Promise<boolean> {
|
||||
const { pathname, url, req, res, initialPath, handlePostRequest, broadcastToClients } = ctx;
|
||||
|
||||
// API: Get all skills (project and user)
|
||||
if (pathname === '/api/skills') {
|
||||
// API: Get all skills (project and user) - with optional extended format
|
||||
if (pathname === '/api/skills' && req.method === 'GET') {
|
||||
const projectPathParam = url.searchParams.get('path') || initialPath;
|
||||
const includeDisabled = url.searchParams.get('includeDisabled') === 'true';
|
||||
|
||||
try {
|
||||
const validatedProjectPath = await validateAllowedPath(projectPathParam, { mustExist: true, allowedDirectories: [initialPath] });
|
||||
const skillsData = getSkillsConfig(validatedProjectPath);
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify(skillsData));
|
||||
|
||||
if (includeDisabled) {
|
||||
const extendedData = getExtendedSkillsConfig(validatedProjectPath);
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify(extendedData));
|
||||
} else {
|
||||
const skillsData = getSkillsConfig(validatedProjectPath);
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify(skillsData));
|
||||
}
|
||||
} catch (err) {
|
||||
const message = err instanceof Error ? err.message : String(err);
|
||||
const status = message.includes('Access denied') ? 403 : 400;
|
||||
@@ -679,6 +916,73 @@ export async function handleSkillsRoutes(ctx: RouteContext): Promise<boolean> {
|
||||
return true;
|
||||
}
|
||||
|
||||
// API: Get disabled skills list
|
||||
if (pathname === '/api/skills/disabled' && req.method === 'GET') {
|
||||
const projectPathParam = url.searchParams.get('path') || initialPath;
|
||||
|
||||
try {
|
||||
const validatedProjectPath = await validateAllowedPath(projectPathParam, { mustExist: true, allowedDirectories: [initialPath] });
|
||||
const disabledProjectSkills = getDisabledSkillsList('project', validatedProjectPath);
|
||||
const disabledUserSkills = getDisabledSkillsList('user', validatedProjectPath);
|
||||
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ disabledProjectSkills, disabledUserSkills }));
|
||||
} catch (err) {
|
||||
const message = err instanceof Error ? err.message : String(err);
|
||||
const status = message.includes('Access denied') ? 403 : 400;
|
||||
res.writeHead(status, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: status === 403 ? 'Access denied' : 'Invalid path', disabledProjectSkills: [], disabledUserSkills: [] }));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// API: Disable a skill
|
||||
if (pathname.match(/^\/api\/skills\/[^/]+\/disable$/) && req.method === 'POST') {
|
||||
const pathParts = pathname.split('/');
|
||||
const skillName = decodeURIComponent(pathParts[3]);
|
||||
|
||||
handlePostRequest(req, res, async (body) => {
|
||||
if (!isRecord(body)) {
|
||||
return { error: 'Invalid request body', status: 400 };
|
||||
}
|
||||
|
||||
const locationValue = body.location;
|
||||
const projectPathParam = typeof body.projectPath === 'string' ? body.projectPath : undefined;
|
||||
const reason = typeof body.reason === 'string' ? body.reason : undefined;
|
||||
|
||||
if (locationValue !== 'project' && locationValue !== 'user') {
|
||||
return { error: 'Location is required (project or user)' };
|
||||
}
|
||||
|
||||
const projectPath = projectPathParam || initialPath;
|
||||
return disableSkill(skillName, locationValue, projectPath, initialPath, reason);
|
||||
});
|
||||
return true;
|
||||
}
|
||||
|
||||
// API: Enable a skill
|
||||
if (pathname.match(/^\/api\/skills\/[^/]+\/enable$/) && req.method === 'POST') {
|
||||
const pathParts = pathname.split('/');
|
||||
const skillName = decodeURIComponent(pathParts[3]);
|
||||
|
||||
handlePostRequest(req, res, async (body) => {
|
||||
if (!isRecord(body)) {
|
||||
return { error: 'Invalid request body', status: 400 };
|
||||
}
|
||||
|
||||
const locationValue = body.location;
|
||||
const projectPathParam = typeof body.projectPath === 'string' ? body.projectPath : undefined;
|
||||
|
||||
if (locationValue !== 'project' && locationValue !== 'user') {
|
||||
return { error: 'Location is required (project or user)' };
|
||||
}
|
||||
|
||||
const projectPath = projectPathParam || initialPath;
|
||||
return enableSkill(skillName, locationValue, projectPath, initialPath);
|
||||
});
|
||||
return true;
|
||||
}
|
||||
|
||||
// API: List skill directory contents
|
||||
if (pathname.match(/^\/api\/skills\/[^/]+\/dir$/) && req.method === 'GET') {
|
||||
const pathParts = pathname.split('/');
|
||||
|
||||
@@ -1588,6 +1588,18 @@ const i18n = {
|
||||
'skills.generate': 'Generate',
|
||||
'skills.cliGenerateInfo': 'AI will generate a complete skill based on your description',
|
||||
'skills.cliGenerateTimeHint': 'Generation may take a few minutes depending on complexity',
|
||||
'skills.disable': 'Disable',
|
||||
'skills.enable': 'Enable',
|
||||
'skills.disabled': 'Disabled',
|
||||
'skills.enabled': 'Enabled',
|
||||
'skills.disabledSkills': 'Disabled Skills',
|
||||
'skills.disabledAt': 'Disabled at',
|
||||
'skills.enableConfirm': 'Are you sure you want to enable the skill "{name}"?',
|
||||
'skills.disableConfirm': 'Are you sure you want to disable the skill "{name}"?',
|
||||
'skills.noDisabledSkills': 'No disabled skills',
|
||||
'skills.toggleError': 'Failed to toggle skill status',
|
||||
'skills.enableSuccess': 'Skill "{name}" enabled successfully',
|
||||
'skills.disableSuccess': 'Skill "{name}" disabled successfully',
|
||||
|
||||
// Rules
|
||||
'nav.rules': 'Rules',
|
||||
@@ -4212,6 +4224,18 @@ const i18n = {
|
||||
'skills.generate': '生成',
|
||||
'skills.cliGenerateInfo': 'AI 将根据你的描述生成完整的技能',
|
||||
'skills.cliGenerateTimeHint': '生成时间取决于复杂度,可能需要几分钟',
|
||||
'skills.disable': '禁用',
|
||||
'skills.enable': '启用',
|
||||
'skills.disabled': '已禁用',
|
||||
'skills.enabled': '已启用',
|
||||
'skills.disabledSkills': '已禁用的技能',
|
||||
'skills.disabledAt': '禁用时间',
|
||||
'skills.enableConfirm': '确定要启用技能 "{name}" 吗?',
|
||||
'skills.disableConfirm': '确定要禁用技能 "{name}" 吗?',
|
||||
'skills.noDisabledSkills': '没有已禁用的技能',
|
||||
'skills.toggleError': '切换技能状态失败',
|
||||
'skills.enableSuccess': '技能 "{name}" 启用成功',
|
||||
'skills.disableSuccess': '技能 "{name}" 禁用成功',
|
||||
|
||||
// Rules
|
||||
'nav.rules': '规则',
|
||||
|
||||
@@ -4,10 +4,13 @@
|
||||
// ========== Skills State ==========
|
||||
var skillsData = {
|
||||
projectSkills: [],
|
||||
userSkills: []
|
||||
userSkills: [],
|
||||
disabledProjectSkills: [],
|
||||
disabledUserSkills: []
|
||||
};
|
||||
var selectedSkill = null;
|
||||
var skillsLoading = false;
|
||||
var showDisabledSkills = false;
|
||||
|
||||
// ========== Main Render Function ==========
|
||||
async function renderSkillsManager() {
|
||||
@@ -36,18 +39,20 @@ async function renderSkillsManager() {
|
||||
async function loadSkillsData() {
|
||||
skillsLoading = true;
|
||||
try {
|
||||
const response = await fetch('/api/skills?path=' + encodeURIComponent(projectPath));
|
||||
const response = await fetch('/api/skills?path=' + encodeURIComponent(projectPath) + '&includeDisabled=true');
|
||||
if (!response.ok) throw new Error('Failed to load skills');
|
||||
const data = await response.json();
|
||||
skillsData = {
|
||||
projectSkills: data.projectSkills || [],
|
||||
userSkills: data.userSkills || []
|
||||
userSkills: data.userSkills || [],
|
||||
disabledProjectSkills: data.disabledProjectSkills || [],
|
||||
disabledUserSkills: data.disabledUserSkills || []
|
||||
};
|
||||
// Update badge
|
||||
updateSkillsBadge();
|
||||
} catch (err) {
|
||||
console.error('Failed to load skills:', err);
|
||||
skillsData = { projectSkills: [], userSkills: [] };
|
||||
skillsData = { projectSkills: [], userSkills: [], disabledProjectSkills: [], disabledUserSkills: [] };
|
||||
} finally {
|
||||
skillsLoading = false;
|
||||
}
|
||||
@@ -67,6 +72,9 @@ function renderSkillsView() {
|
||||
|
||||
const projectSkills = skillsData.projectSkills || [];
|
||||
const userSkills = skillsData.userSkills || [];
|
||||
const disabledProjectSkills = skillsData.disabledProjectSkills || [];
|
||||
const disabledUserSkills = skillsData.disabledUserSkills || [];
|
||||
const totalDisabled = disabledProjectSkills.length + disabledUserSkills.length;
|
||||
|
||||
container.innerHTML = `
|
||||
<div class="skills-manager">
|
||||
@@ -109,7 +117,7 @@ function renderSkillsView() {
|
||||
</div>
|
||||
` : `
|
||||
<div class="skills-grid grid gap-3">
|
||||
${projectSkills.map(skill => renderSkillCard(skill, 'project')).join('')}
|
||||
${projectSkills.map(skill => renderSkillCard(skill, 'project', false)).join('')}
|
||||
</div>
|
||||
`}
|
||||
</div>
|
||||
@@ -133,11 +141,48 @@ function renderSkillsView() {
|
||||
</div>
|
||||
` : `
|
||||
<div class="skills-grid grid gap-3">
|
||||
${userSkills.map(skill => renderSkillCard(skill, 'user')).join('')}
|
||||
${userSkills.map(skill => renderSkillCard(skill, 'user', false)).join('')}
|
||||
</div>
|
||||
`}
|
||||
</div>
|
||||
|
||||
<!-- Disabled Skills Section -->
|
||||
${totalDisabled > 0 ? `
|
||||
<div class="skills-section mb-6">
|
||||
<div class="flex items-center justify-between mb-4 cursor-pointer" onclick="toggleDisabledSkillsSection()">
|
||||
<div class="flex items-center gap-2">
|
||||
<i data-lucide="${showDisabledSkills ? 'chevron-down' : 'chevron-right'}" class="w-5 h-5 text-muted-foreground transition-transform"></i>
|
||||
<i data-lucide="eye-off" class="w-5 h-5 text-muted-foreground"></i>
|
||||
<h3 class="text-lg font-semibold text-muted-foreground">${t('skills.disabledSkills')}</h3>
|
||||
</div>
|
||||
<span class="text-sm text-muted-foreground">${totalDisabled} ${t('skills.skillsCount')}</span>
|
||||
</div>
|
||||
|
||||
${showDisabledSkills ? `
|
||||
${disabledProjectSkills.length > 0 ? `
|
||||
<div class="mb-4">
|
||||
<div class="flex items-center gap-2 mb-2">
|
||||
<span class="text-xs px-2 py-0.5 bg-muted text-muted-foreground rounded-full">${t('skills.projectSkills')}</span>
|
||||
</div>
|
||||
<div class="skills-grid grid gap-3">
|
||||
${disabledProjectSkills.map(skill => renderSkillCard(skill, 'project', true)).join('')}
|
||||
</div>
|
||||
</div>
|
||||
` : ''}
|
||||
${disabledUserSkills.length > 0 ? `
|
||||
<div>
|
||||
<div class="flex items-center gap-2 mb-2">
|
||||
<span class="text-xs px-2 py-0.5 bg-muted text-muted-foreground rounded-full">${t('skills.userSkills')}</span>
|
||||
</div>
|
||||
<div class="skills-grid grid gap-3">
|
||||
${disabledUserSkills.map(skill => renderSkillCard(skill, 'user', true)).join('')}
|
||||
</div>
|
||||
</div>
|
||||
` : ''}
|
||||
` : ''}
|
||||
</div>
|
||||
` : ''}
|
||||
|
||||
<!-- Skill Detail Panel -->
|
||||
${selectedSkill ? renderSkillDetailPanel(selectedSkill) : ''}
|
||||
</div>
|
||||
@@ -147,19 +192,19 @@ function renderSkillsView() {
|
||||
if (typeof lucide !== 'undefined') lucide.createIcons();
|
||||
}
|
||||
|
||||
function renderSkillCard(skill, location) {
|
||||
function renderSkillCard(skill, location, isDisabled = false) {
|
||||
const hasAllowedTools = skill.allowedTools && skill.allowedTools.length > 0;
|
||||
const hasSupportingFiles = skill.supportingFiles && skill.supportingFiles.length > 0;
|
||||
const locationIcon = location === 'project' ? 'folder' : 'user';
|
||||
const locationClass = location === 'project' ? 'text-primary' : 'text-indigo';
|
||||
const locationBg = location === 'project' ? 'bg-primary/10' : 'bg-indigo/10';
|
||||
const folderName = skill.folderName || skill.name;
|
||||
const cardOpacity = isDisabled ? 'opacity-60' : '';
|
||||
|
||||
return `
|
||||
<div class="skill-card bg-card border border-border rounded-lg p-4 hover:shadow-md transition-all cursor-pointer"
|
||||
onclick="showSkillDetail('${escapeHtml(folderName)}', '${location}')">
|
||||
<div class="skill-card bg-card border border-border rounded-lg p-4 hover:shadow-md transition-all ${cardOpacity}">
|
||||
<div class="flex items-start justify-between mb-3">
|
||||
<div class="flex items-center gap-3">
|
||||
<div class="flex items-center gap-3 cursor-pointer" onclick="showSkillDetail('${escapeHtml(folderName)}', '${location}')">
|
||||
<div class="w-10 h-10 ${locationBg} rounded-lg flex items-center justify-center">
|
||||
<i data-lucide="sparkles" class="w-5 h-5 ${locationClass}"></i>
|
||||
</div>
|
||||
@@ -168,27 +213,39 @@ function renderSkillCard(skill, location) {
|
||||
${skill.version ? `<span class="text-xs text-muted-foreground">v${escapeHtml(skill.version)}</span>` : ''}
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex items-center gap-1">
|
||||
<div class="flex items-center gap-2">
|
||||
<span class="inline-flex items-center px-2 py-0.5 text-xs font-medium rounded-full ${locationBg} ${locationClass}">
|
||||
<i data-lucide="${locationIcon}" class="w-3 h-3 mr-1"></i>
|
||||
${location}
|
||||
</span>
|
||||
<button class="p-1.5 rounded-lg transition-colors ${isDisabled ? 'text-green-600 hover:bg-green-100' : 'text-amber-600 hover:bg-amber-100'}"
|
||||
onclick="event.stopPropagation(); toggleSkillEnabled('${escapeHtml(folderName)}', '${location}', ${!isDisabled})"
|
||||
title="${isDisabled ? t('skills.enable') : t('skills.disable')}">
|
||||
<i data-lucide="${isDisabled ? 'toggle-left' : 'toggle-right'}" class="w-4 h-4"></i>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<p class="text-sm text-muted-foreground mb-3 line-clamp-2">${escapeHtml(skill.description || t('skills.noDescription'))}</p>
|
||||
<p class="text-sm text-muted-foreground mb-3 line-clamp-2 cursor-pointer" onclick="showSkillDetail('${escapeHtml(folderName)}', '${location}')">${escapeHtml(skill.description || t('skills.noDescription'))}</p>
|
||||
|
||||
<div class="flex items-center gap-3 text-xs text-muted-foreground">
|
||||
${hasAllowedTools ? `
|
||||
<span class="flex items-center gap-1">
|
||||
<i data-lucide="lock" class="w-3 h-3"></i>
|
||||
${skill.allowedTools.length} ${t('skills.tools')}
|
||||
</span>
|
||||
` : ''}
|
||||
${hasSupportingFiles ? `
|
||||
<span class="flex items-center gap-1">
|
||||
<i data-lucide="file-text" class="w-3 h-3"></i>
|
||||
${skill.supportingFiles.length} ${t('skills.files')}
|
||||
<div class="flex items-center justify-between text-xs text-muted-foreground">
|
||||
<div class="flex items-center gap-3">
|
||||
${hasAllowedTools ? `
|
||||
<span class="flex items-center gap-1">
|
||||
<i data-lucide="lock" class="w-3 h-3"></i>
|
||||
${skill.allowedTools.length} ${t('skills.tools')}
|
||||
</span>
|
||||
` : ''}
|
||||
${hasSupportingFiles ? `
|
||||
<span class="flex items-center gap-1">
|
||||
<i data-lucide="file-text" class="w-3 h-3"></i>
|
||||
${skill.supportingFiles.length} ${t('skills.files')}
|
||||
</span>
|
||||
` : ''}
|
||||
</div>
|
||||
${isDisabled && skill.disabledAt ? `
|
||||
<span class="text-xs text-muted-foreground/70">
|
||||
${t('skills.disabledAt')}: ${formatDisabledDate(skill.disabledAt)}
|
||||
</span>
|
||||
` : ''}
|
||||
</div>
|
||||
@@ -373,6 +430,61 @@ function editSkill(skillName, location) {
|
||||
}
|
||||
}
|
||||
|
||||
// ========== Enable/Disable Skills Functions ==========
|
||||
|
||||
async function toggleSkillEnabled(skillName, location, currentlyEnabled) {
|
||||
const action = currentlyEnabled ? 'disable' : 'enable';
|
||||
const confirmMessage = currentlyEnabled
|
||||
? t('skills.disableConfirm', { name: skillName })
|
||||
: t('skills.enableConfirm', { name: skillName });
|
||||
|
||||
if (!confirm(confirmMessage)) return;
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/skills/' + encodeURIComponent(skillName) + '/' + action, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ location, projectPath })
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.json();
|
||||
throw new Error(error.message || 'Operation failed');
|
||||
}
|
||||
|
||||
// Close detail panel if open
|
||||
selectedSkill = null;
|
||||
|
||||
// Reload skills data
|
||||
await loadSkillsData();
|
||||
renderSkillsView();
|
||||
|
||||
if (window.showToast) {
|
||||
const message = currentlyEnabled ? t('skills.disabled') : t('skills.enabled');
|
||||
showToast(message, 'success');
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Failed to toggle skill:', err);
|
||||
if (window.showToast) {
|
||||
showToast(err.message || t('skills.toggleError'), 'error');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function toggleDisabledSkillsSection() {
|
||||
showDisabledSkills = !showDisabledSkills;
|
||||
renderSkillsView();
|
||||
}
|
||||
|
||||
function formatDisabledDate(isoString) {
|
||||
try {
|
||||
const date = new Date(isoString);
|
||||
return date.toLocaleDateString() + ' ' + date.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' });
|
||||
} catch {
|
||||
return isoString;
|
||||
}
|
||||
}
|
||||
|
||||
// ========== Create Skill Modal ==========
|
||||
var skillCreateState = {
|
||||
mode: 'import', // 'import' or 'cli-generate'
|
||||
|
||||
@@ -280,11 +280,12 @@ export async function handler(params: Record<string, unknown>): Promise<ToolResu
|
||||
// Scan directory
|
||||
const { info: structureInfo, folderType } = scanDirectoryStructure(targetPath);
|
||||
|
||||
// Calculate output path
|
||||
// Calculate output path (relative for display, absolute for CLI prompt)
|
||||
const outputPath = calculateOutputPath(targetPath, projectName, process.cwd());
|
||||
const absOutputPath = resolve(process.cwd(), outputPath);
|
||||
|
||||
// Ensure output directory exists
|
||||
mkdirSync(outputPath, { recursive: true });
|
||||
mkdirSync(absOutputPath, { recursive: true });
|
||||
|
||||
// Build prompt based on strategy
|
||||
let prompt: string;
|
||||
@@ -304,7 +305,7 @@ Generate documentation files:
|
||||
- API.md: Code API documentation
|
||||
- README.md: Module overview and usage
|
||||
|
||||
Output directory: ${outputPath}
|
||||
Output directory: ${absOutputPath}
|
||||
|
||||
Template Guidelines:
|
||||
${templateContent}`;
|
||||
@@ -318,7 +319,7 @@ Read: @*/API.md @*/README.md
|
||||
Generate documentation file:
|
||||
- README.md: Navigation overview of subdirectories
|
||||
|
||||
Output directory: ${outputPath}
|
||||
Output directory: ${absOutputPath}
|
||||
|
||||
Template Guidelines:
|
||||
${templateContent}`;
|
||||
@@ -327,12 +328,13 @@ ${templateContent}`;
|
||||
|
||||
case 'project-readme':
|
||||
templateContent = loadTemplate('project-readme');
|
||||
const projectDocsDir = resolve(process.cwd(), '.workflow', 'docs', projectName);
|
||||
prompt = `Read all module documentation:
|
||||
@.workflow/docs/${projectName}/**/API.md
|
||||
@.workflow/docs/${projectName}/**/README.md
|
||||
|
||||
Generate project-level documentation:
|
||||
- README.md in .workflow/docs/${projectName}/
|
||||
- README.md in ${projectDocsDir}/
|
||||
|
||||
Template Guidelines:
|
||||
${templateContent}`;
|
||||
@@ -340,6 +342,7 @@ ${templateContent}`;
|
||||
|
||||
case 'project-architecture':
|
||||
templateContent = loadTemplate('project-architecture');
|
||||
const projectArchDir = resolve(process.cwd(), '.workflow', 'docs', projectName);
|
||||
prompt = `Read project documentation:
|
||||
@.workflow/docs/${projectName}/README.md
|
||||
@.workflow/docs/${projectName}/**/API.md
|
||||
@@ -348,13 +351,14 @@ Generate:
|
||||
- ARCHITECTURE.md: System design documentation
|
||||
- EXAMPLES.md: Usage examples
|
||||
|
||||
Output directory: .workflow/docs/${projectName}/
|
||||
Output directory: ${projectArchDir}/
|
||||
|
||||
Template Guidelines:
|
||||
${templateContent}`;
|
||||
break;
|
||||
|
||||
case 'http-api':
|
||||
const apiDocsDir = resolve(process.cwd(), '.workflow', 'docs', projectName, 'api');
|
||||
prompt = `Read API route files:
|
||||
@**/routes/**/*.ts @**/routes/**/*.js
|
||||
@**/api/**/*.ts @**/api/**/*.js
|
||||
@@ -362,7 +366,7 @@ ${templateContent}`;
|
||||
Generate HTTP API documentation:
|
||||
- api/README.md: REST API endpoints documentation
|
||||
|
||||
Output directory: .workflow/docs/${projectName}/api/`;
|
||||
Output directory: ${apiDocsDir}/`;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
118
ccw/src/types/skill-types.ts
Normal file
118
ccw/src/types/skill-types.ts
Normal file
@@ -0,0 +1,118 @@
|
||||
/**
|
||||
* Skill Types Definition
|
||||
* Types for skill management including enable/disable functionality
|
||||
*/
|
||||
|
||||
/**
|
||||
* Skill location type
|
||||
*/
|
||||
export type SkillLocation = 'project' | 'user';
|
||||
|
||||
/**
|
||||
* Information about a disabled skill
|
||||
*/
|
||||
export interface DisabledSkillInfo {
|
||||
/** When the skill was disabled */
|
||||
disabledAt: string;
|
||||
/** Optional reason for disabling */
|
||||
reason?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Configuration for disabled skills
|
||||
* Stored in disabled-skills.json
|
||||
*/
|
||||
export interface DisabledSkillsConfig {
|
||||
/** Map of skill name to disabled info */
|
||||
skills: Record<string, DisabledSkillInfo>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Result of a skill operation (enable/disable)
|
||||
*/
|
||||
export interface SkillOperationResult {
|
||||
success: boolean;
|
||||
message?: string;
|
||||
skillName?: string;
|
||||
location?: SkillLocation;
|
||||
status?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Summary information for an active skill
|
||||
*/
|
||||
export interface SkillSummary {
|
||||
/** Skill name from SKILL.md frontmatter */
|
||||
name: string;
|
||||
/** Folder name (actual directory name) */
|
||||
folderName: string;
|
||||
/** Skill description */
|
||||
description: string;
|
||||
/** Skill version if specified */
|
||||
version: string | null;
|
||||
/** Allowed tools list */
|
||||
allowedTools: string[];
|
||||
/** Skill location (project or user) */
|
||||
location: SkillLocation;
|
||||
/** Full path to skill directory */
|
||||
path: string;
|
||||
/** Supporting files in the skill folder */
|
||||
supportingFiles: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Summary information for a disabled skill
|
||||
*/
|
||||
export interface DisabledSkillSummary extends SkillSummary {
|
||||
/** When the skill was disabled */
|
||||
disabledAt: string;
|
||||
/** Optional reason for disabling */
|
||||
reason?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Skills configuration for active skills only (backward compatible)
|
||||
*/
|
||||
export interface SkillsConfig {
|
||||
projectSkills: SkillSummary[];
|
||||
userSkills: SkillSummary[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Extended skills configuration including disabled skills
|
||||
*/
|
||||
export interface ExtendedSkillsConfig extends SkillsConfig {
|
||||
/** Disabled project skills */
|
||||
disabledProjectSkills: DisabledSkillSummary[];
|
||||
/** Disabled user skills */
|
||||
disabledUserSkills: DisabledSkillSummary[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Parsed skill frontmatter from SKILL.md
|
||||
*/
|
||||
export interface ParsedSkillFrontmatter {
|
||||
name: string;
|
||||
description: string;
|
||||
version: string | null;
|
||||
allowedTools: string[];
|
||||
content: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Skill info extracted from validation
|
||||
*/
|
||||
export interface SkillInfo {
|
||||
name: string;
|
||||
description: string;
|
||||
version: string | null;
|
||||
allowedTools: string[];
|
||||
supportingFiles: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Skill folder validation result
|
||||
*/
|
||||
export type SkillFolderValidation =
|
||||
| { valid: true; errors: string[]; skillInfo: SkillInfo }
|
||||
| { valid: false; errors: string[]; skillInfo: null };
|
||||
Reference in New Issue
Block a user