mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-03-01 15:03:57 +08:00
Compare commits
33 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a753327acc | ||
|
|
f61a3da957 | ||
|
|
b0fb899675 | ||
|
|
0a49dc0675 | ||
|
|
096fc1c380 | ||
|
|
29f0a6cdb8 | ||
|
|
e83414abf3 | ||
|
|
e42597b1bc | ||
|
|
67b2129f3c | ||
|
|
19fb4d86c7 | ||
|
|
65763c76e9 | ||
|
|
4a89f626fc | ||
|
|
b0bfdb907a | ||
|
|
39bbf960c2 | ||
|
|
5e35da32e8 | ||
|
|
84610661ca | ||
|
|
cd54c10256 | ||
|
|
c3ddf7e322 | ||
|
|
ab65caec45 | ||
|
|
3788ba1268 | ||
|
|
d14a710797 | ||
|
|
92ac9897f2 | ||
|
|
67e78b132c | ||
|
|
0a37bc3eaf | ||
|
|
604f89b7aa | ||
|
|
46989dcbad | ||
|
|
4763edb0e4 | ||
|
|
7d349a64fb | ||
|
|
22a98b7b6c | ||
|
|
a59a86fb9b | ||
|
|
101417e028 | ||
|
|
902ee8528a | ||
|
|
54f15b6bda |
@@ -301,7 +301,7 @@ Document known constraints that affect planning:
|
||||
|
||||
[Continue for all major feature groups]
|
||||
|
||||
**Note**: Detailed task breakdown into executable work items is handled by `/workflow:plan` → `IMPL_PLAN.md`
|
||||
**Note**: Detailed task breakdown into executable work items is handled by `/workflow-plan` → `IMPL_PLAN.md`
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ Core requirements, objectives, technical approach summary (2-3 paragraphs max).
|
||||
|
||||
**Quality Gates**:
|
||||
- concept-verify: ✅ Passed (0 ambiguities remaining) | ⏭️ Skipped (user decision) | ⏳ Pending
|
||||
- plan-verify: ⏳ Pending (recommended before /workflow:execute)
|
||||
- plan-verify: ⏳ Pending (recommended before /workflow-execute)
|
||||
|
||||
**Context Package Summary**:
|
||||
- **Focus Paths**: {list key directories from context-package.json}
|
||||
|
||||
@@ -711,7 +711,7 @@ All workflows use the same file structure definition regardless of complexity. *
|
||||
│ ├── [.chat/] # CLI interaction sessions (created when analysis is run)
|
||||
│ │ ├── chat-*.md # Saved chat sessions
|
||||
│ │ └── analysis-*.md # Analysis results
|
||||
│ ├── [.process/] # Planning analysis results (created by /workflow:plan)
|
||||
│ ├── [.process/] # Planning analysis results (created by /workflow-plan)
|
||||
│ │ └── ANALYSIS_RESULTS.md # Analysis results and planning artifacts
|
||||
│ ├── IMPL_PLAN.md # Planning document (REQUIRED)
|
||||
│ ├── TODO_LIST.md # Progress tracking (REQUIRED)
|
||||
@@ -783,7 +783,7 @@ All workflows use the same file structure definition regardless of complexity. *
|
||||
**Examples**:
|
||||
|
||||
*Workflow Commands (lightweight):*
|
||||
- `/workflow:lite-plan "feature idea"` (exploratory) → `.scratchpad/lite-plan-feature-idea-20250105-143110.md`
|
||||
- `/workflow-lite-plan "feature idea"` (exploratory) → `.scratchpad/lite-plan-feature-idea-20250105-143110.md`
|
||||
- `/workflow:lite-fix "bug description"` (bug fixing) → `.scratchpad/lite-fix-bug-20250105-143130.md`
|
||||
|
||||
> **Note**: Direct CLI commands (`/cli:analyze`, `/cli:execute`, etc.) have been replaced by semantic invocation and workflow commands.
|
||||
|
||||
@@ -455,7 +455,7 @@ function buildCliCommand(task, cliTool, cliPrompt) {
|
||||
|
||||
**Auto-Check Workflow Context**:
|
||||
- Verify session context paths are provided in agent prompt
|
||||
- If missing, request session context from workflow:execute
|
||||
- If missing, request session context from workflow-execute
|
||||
- Never assume default paths without explicit session context
|
||||
|
||||
### 5. Problem-Solving
|
||||
|
||||
@@ -237,7 +237,7 @@ After Phase 4 completes, determine Phase 5 variant:
|
||||
### Phase 5-L: Loop Completion (when inner_loop=true AND more same-prefix tasks pending)
|
||||
|
||||
1. **TaskUpdate**: Mark current task `completed`
|
||||
2. **Message Bus**: Log completion
|
||||
2. **Message Bus**: Log completion with verification evidence
|
||||
```
|
||||
mcp__ccw-tools__team_msg(
|
||||
operation="log",
|
||||
@@ -245,7 +245,7 @@ After Phase 4 completes, determine Phase 5 variant:
|
||||
from=<role>,
|
||||
to="coordinator",
|
||||
type=<message_types.success>,
|
||||
summary="[<role>] <task-id> complete. <brief-summary>",
|
||||
summary="[<role>] <task-id> complete. <brief-summary>. Verified: <verification_method>",
|
||||
ref=<artifact-path>
|
||||
)
|
||||
```
|
||||
@@ -283,7 +283,7 @@ After Phase 4 completes, determine Phase 5 variant:
|
||||
| Condition | Action |
|
||||
|-----------|--------|
|
||||
| Same-prefix successor (inner loop role) | Do NOT spawn — main agent handles via inner loop |
|
||||
| 1 ready task, simple linear successor, different prefix | Spawn directly via `Task(run_in_background: true)` |
|
||||
| 1 ready task, simple linear successor, different prefix | Spawn directly via `Task(run_in_background: true)` + log `fast_advance` to message bus |
|
||||
| Multiple ready tasks (parallel window) | SendMessage to coordinator (needs orchestration) |
|
||||
| No ready tasks + others running | SendMessage to coordinator (status update) |
|
||||
| No ready tasks + nothing running | SendMessage to coordinator (pipeline may be complete) |
|
||||
@@ -311,6 +311,23 @@ inner_loop: <true|false based on successor role>`
|
||||
})
|
||||
```
|
||||
|
||||
### Fast-Advance Notification
|
||||
|
||||
After spawning a successor via fast-advance, MUST log to message bus:
|
||||
|
||||
```
|
||||
mcp__ccw-tools__team_msg(
|
||||
operation="log",
|
||||
team=<session_id>,
|
||||
from=<role>,
|
||||
to="coordinator",
|
||||
type="fast_advance",
|
||||
summary="[<role>] fast-advanced <completed-task-id> → spawned <successor-role> for <successor-task-id>"
|
||||
)
|
||||
```
|
||||
|
||||
This is a passive log entry (NOT a SendMessage). Coordinator reads it on next callback to reconcile `active_workers`.
|
||||
|
||||
### SendMessage Format
|
||||
|
||||
```
|
||||
@@ -320,8 +337,10 @@ SendMessage(team_name=<team_name>, recipient="coordinator", message="[<role>] <f
|
||||
**Final report contents**:
|
||||
- Tasks completed (count + list)
|
||||
- Artifacts produced (paths)
|
||||
- Files modified (paths + before/after evidence from Phase 4 verification)
|
||||
- Discuss results (verdicts + ratings)
|
||||
- Key decisions (from context_accumulator)
|
||||
- Verification summary (methods used, pass/fail status)
|
||||
- Any warnings or issues
|
||||
|
||||
---
|
||||
@@ -385,6 +404,48 @@ Write discoveries to corresponding wisdom files:
|
||||
|
||||
---
|
||||
|
||||
## Knowledge Transfer
|
||||
|
||||
### Upstream Context Loading (Phase 2)
|
||||
|
||||
When executing Phase 2 of a role-spec, the worker MUST load available cross-role context:
|
||||
|
||||
| Source | Path | Load Method |
|
||||
|--------|------|-------------|
|
||||
| Upstream artifacts | `<session>/artifacts/*.md` | Read files listed in task description or dependency chain |
|
||||
| Shared memory | `<session>/shared-memory.json` | Read and parse JSON |
|
||||
| Wisdom | `<session>/wisdom/*.md` | Read all wisdom files |
|
||||
| Exploration cache | `<session>/explorations/cache-index.json` | Check before new explorations |
|
||||
|
||||
### Downstream Context Publishing (Phase 4)
|
||||
|
||||
After Phase 4 verification, the worker MUST publish its contributions:
|
||||
|
||||
1. **Artifact**: Write deliverable to `<session>/artifacts/<prefix>-<task-id>-<name>.md`
|
||||
2. **shared-memory.json**: Read-merge-write under role namespace
|
||||
```json
|
||||
{ "<role>": { "key_findings": [...], "decisions": [...], "files_modified": [...] } }
|
||||
```
|
||||
3. **Wisdom**: Append new patterns to `learnings.md`, decisions to `decisions.md`, issues to `issues.md`
|
||||
|
||||
### Inner Loop Context Accumulator
|
||||
|
||||
For `inner_loop: true` roles, `context_accumulator` is maintained in-memory:
|
||||
|
||||
```
|
||||
context_accumulator.append({
|
||||
task: "<task-id>",
|
||||
artifact: "<output-path>",
|
||||
key_decisions: [...],
|
||||
summary: "<brief>",
|
||||
files_modified: [...]
|
||||
})
|
||||
```
|
||||
|
||||
Pass the full accumulator to each subsequent task's Phase 3 subagent as `## Prior Context`.
|
||||
|
||||
---
|
||||
|
||||
## Message Bus Protocol
|
||||
|
||||
Always use `mcp__ccw-tools__team_msg` for logging. Parameters:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -18,25 +18,17 @@ Main process orchestrator: intent analysis → workflow selection → command ch
|
||||
| `workflow-lite-plan` | explore → plan → confirm → execute |
|
||||
| `workflow-plan` | session → context → convention → gen → verify/replan |
|
||||
| `workflow-execute` | session discovery → task processing → commit |
|
||||
| `workflow-tdd` | 6-phase TDD plan → verify |
|
||||
| `workflow-tdd-plan` | 6-phase TDD plan → verify |
|
||||
| `workflow-test-fix` | session → context → analysis → gen → cycle |
|
||||
| `workflow-multi-cli-plan` | ACE context → CLI discussion → plan → execute |
|
||||
| `review-cycle` | session/module review → fix orchestration |
|
||||
| `brainstorm` | auto/single-role → artifacts → analysis → synthesis |
|
||||
| `spec-generator` | product-brief → PRD → architecture → epics |
|
||||
| `workflow:collaborative-plan-with-file` | understanding agent → parallel agents → plan-note.md |
|
||||
| `workflow:req-plan-with-file` | requirement decomposition → issue creation → execution-plan.json |
|
||||
| `workflow:roadmap-with-file` | strategic requirement roadmap → issue creation → execution-plan.json |
|
||||
| `workflow:integration-test-cycle` | explore → test dev → test-fix cycle → reflection |
|
||||
| `workflow:refactor-cycle` | tech debt discovery → prioritize → execute → validate |
|
||||
| `team-planex` | planner + executor wave pipeline(边规划边执行)|
|
||||
| `team-iterdev` | 迭代开发团队(planner → developer → reviewer 循环)|
|
||||
| `team-lifecycle` | 全生命周期团队(spec → impl → test)|
|
||||
| `team-issue` | issue 解决团队(discover → plan → execute)|
|
||||
| `team-testing` | 测试团队(strategy → generate → execute → analyze)|
|
||||
| `team-quality-assurance` | QA 团队(scout → strategist → generator → executor → analyst)|
|
||||
| `team-brainstorm` | 团队头脑风暴(facilitator → participants → synthesizer)|
|
||||
| `team-uidesign` | UI 设计团队(designer → implementer dual-track)|
|
||||
|
||||
独立命令(仍使用 colon 格式):workflow:brainstorm-with-file, workflow:debug-with-file, workflow:analyze-with-file, workflow:collaborative-plan-with-file, workflow:req-plan-with-file, workflow:integration-test-cycle, workflow:refactor-cycle, workflow:unified-execute-with-file, workflow:clean, workflow:init, workflow:init-guidelines, workflow:ui-design:*, issue:*, workflow:session:*
|
||||
| `team-planex` | planner + executor wave pipeline(适合大量零散 issue 或 roadmap 产出的清晰 issue,实现 0→1 开发)|
|
||||
|
||||
## Core Concept: Self-Contained Skills (自包含 Skill)
|
||||
|
||||
@@ -53,22 +45,17 @@ Main process orchestrator: intent analysis → workflow selection → command ch
|
||||
|---------|-------|------|
|
||||
| 轻量 Plan+Execute | `workflow-lite-plan` | 内部完成 plan→execute |
|
||||
| 标准 Planning | `workflow-plan` → `workflow-execute` | plan 和 execute 是独立 Skill |
|
||||
| TDD Planning | `workflow-tdd` → `workflow-execute` | tdd-plan 和 execute 是独立 Skill |
|
||||
| TDD Planning | `workflow-tdd-plan` → `workflow-execute` | tdd-plan 和 execute 是独立 Skill |
|
||||
| 规格驱动 | `spec-generator` → `workflow-plan` → `workflow-execute` | 规格文档驱动完整开发 |
|
||||
| 测试流水线 | `workflow-test-fix` | 内部完成 gen→cycle |
|
||||
| 代码审查 | `review-cycle` | 内部完成 review→fix |
|
||||
| 多CLI协作 | `workflow-multi-cli-plan` | ACE context → CLI discussion → plan → execute |
|
||||
| 协作规划 | `workflow:collaborative-plan-with-file` | 多 agent 协作生成 plan-note.md |
|
||||
| 需求路线图 | `workflow:req-plan-with-file` | 需求拆解→issue 创建→执行计划 |
|
||||
| 分析→规划 | `workflow:analyze-with-file` → `workflow-lite-plan` | 协作分析产物自动传递给 lite-plan |
|
||||
| 头脑风暴→规划 | `workflow:brainstorm-with-file` → `workflow-lite-plan` | 头脑风暴产物自动传递给 lite-plan |
|
||||
| 协作规划 | `workflow:collaborative-plan-with-file` → `workflow:unified-execute-with-file` | 多 agent 协作规划→通用执行 |
|
||||
| 需求路线图 | `workflow:roadmap-with-file` → `team-planex` | 需求拆解→issue 创建→wave pipeline 执行 |
|
||||
| 集成测试循环 | `workflow:integration-test-cycle` | 自迭代集成测试闭环 |
|
||||
| 重构循环 | `workflow:refactor-cycle` | 技术债务发现→重构→验证 |
|
||||
| 团队 Plan+Execute | `team-planex` | 2 人团队 wave pipeline,边规划边执行 |
|
||||
| 团队迭代开发 | `team-iterdev` | 多角色迭代开发闭环 |
|
||||
| 团队全生命周期 | `team-lifecycle` | spec→impl→test 全流程 |
|
||||
| 团队 Issue | `team-issue` | 多角色协作 issue 解决 |
|
||||
| 团队测试 | `team-testing` | 多角色测试流水线 |
|
||||
| 团队 QA | `team-quality-assurance` | 多角色质量保障闭环 |
|
||||
| 团队头脑风暴 | `team-brainstorm` | 多角色协作头脑风暴 |
|
||||
| 团队 UI 设计 | `team-uidesign` | dual-track 设计+实现 |
|
||||
|
||||
## Execution Model
|
||||
|
||||
@@ -136,27 +123,21 @@ function analyzeIntent(input) {
|
||||
function detectTaskType(text) {
|
||||
const patterns = {
|
||||
'bugfix-hotfix': /urgent|production|critical/ && /fix|bug/,
|
||||
// With-File workflows (documented exploration with multi-CLI collaboration)
|
||||
// With-File workflows (documented exploration → auto chain to lite-plan)
|
||||
'brainstorm': /brainstorm|ideation|头脑风暴|创意|发散思维|creative thinking|multi-perspective.*think|compare perspectives|探索.*可能/,
|
||||
'brainstorm-to-issue': /brainstorm.*issue|头脑风暴.*issue|idea.*issue|想法.*issue|从.*头脑风暴|convert.*brainstorm/,
|
||||
'debug-file': /debug.*document|hypothesis.*debug|troubleshoot.*track|investigate.*log|调试.*记录|假设.*验证|systematic debug|深度调试/,
|
||||
'analyze-file': /analyze.*document|explore.*concept|understand.*architecture|investigate.*discuss|collaborative analysis|分析.*讨论|深度.*理解|协作.*分析/,
|
||||
'collaborative-plan': /collaborative.*plan|协作.*规划|多人.*规划|multi.*agent.*plan|Plan Note|分工.*规划/,
|
||||
'req-plan': /roadmap|需求.*规划|需求.*拆解|requirement.*plan|req.*plan|progressive.*plan|路线.*图/,
|
||||
'roadmap': /roadmap|需求.*规划|需求.*拆解|requirement.*plan|progressive.*plan|路线.*图/,
|
||||
'spec-driven': /spec.*gen|specification|PRD|产品需求|产品文档|产品规格/,
|
||||
// Cycle workflows (self-iterating with reflection)
|
||||
'integration-test': /integration.*test|集成测试|端到端.*测试|e2e.*test|integration.*cycle/,
|
||||
'refactor': /refactor|重构|tech.*debt|技术债务/,
|
||||
// Team workflows (multi-role collaboration, explicit "team" keyword required)
|
||||
// Team workflows (kept: team-planex only)
|
||||
'team-planex': /team.*plan.*exec|team.*planex|团队.*规划.*执行|并行.*规划.*执行|wave.*pipeline/,
|
||||
'team-iterdev': /team.*iter|team.*iterdev|迭代.*开发.*团队|iterative.*dev.*team/,
|
||||
'team-lifecycle': /team.*lifecycle|全生命周期|full.*lifecycle|spec.*impl.*test.*team/,
|
||||
'team-issue': /team.*issue.*resolv|团队.*issue|team.*resolve.*issue/,
|
||||
'team-testing': /team.*test|测试团队|comprehensive.*test.*team|全面.*测试.*团队/,
|
||||
'team-qa': /team.*qa|quality.*assurance.*team|QA.*团队|质量.*保障.*团队|团队.*质量/,
|
||||
'team-brainstorm': /team.*brainstorm|团队.*头脑风暴|team.*ideation|多人.*头脑风暴/,
|
||||
'team-uidesign': /team.*ui.*design|UI.*设计.*团队|dual.*track.*design|团队.*UI/,
|
||||
// Standard workflows
|
||||
'multi-cli-plan': /multi.*cli|多.*CLI|多模型.*协作|multi.*model.*collab/,
|
||||
'multi-cli': /multi.*cli|多.*CLI|多模型.*协作|multi.*model.*collab/,
|
||||
'bugfix': /fix|bug|error|crash|fail|debug/,
|
||||
'issue-batch': /issues?|batch/ && /fix|resolve/,
|
||||
'issue-transition': /issue workflow|structured workflow|queue|multi-stage/,
|
||||
@@ -165,6 +146,7 @@ function detectTaskType(text) {
|
||||
'ui-design': /ui|design|component|style/,
|
||||
'tdd': /tdd|test-driven|test first/,
|
||||
'test-fix': /test fail|fix test|failing test/,
|
||||
'test-gen': /generate test|写测试|add test|补充测试/,
|
||||
'review': /review|code review/,
|
||||
'documentation': /docs|documentation|readme/
|
||||
};
|
||||
@@ -202,34 +184,29 @@ async function clarifyRequirements(analysis) {
|
||||
function selectWorkflow(analysis) {
|
||||
const levelMap = {
|
||||
'bugfix-hotfix': { level: 2, flow: 'bugfix.hotfix' },
|
||||
// With-File workflows (documented exploration with multi-CLI collaboration)
|
||||
'brainstorm': { level: 4, flow: 'brainstorm-with-file' }, // Multi-perspective ideation
|
||||
'brainstorm-to-issue': { level: 4, flow: 'brainstorm-to-issue' }, // Brainstorm → Issue workflow
|
||||
'debug-file': { level: 3, flow: 'debug-with-file' }, // Hypothesis-driven debugging
|
||||
'analyze-file': { level: 3, flow: 'analyze-with-file' }, // Collaborative analysis
|
||||
// With-File workflows → auto chain to lite-plan
|
||||
'brainstorm': { level: 4, flow: 'brainstorm-to-plan' }, // brainstorm-with-file → lite-plan
|
||||
'brainstorm-to-issue': { level: 4, flow: 'brainstorm-to-issue' }, // Brainstorm → Issue workflow
|
||||
'debug-file': { level: 3, flow: 'debug-with-file' }, // Hypothesis-driven debugging (standalone)
|
||||
'analyze-file': { level: 3, flow: 'analyze-to-plan' }, // analyze-with-file → lite-plan
|
||||
'collaborative-plan': { level: 3, flow: 'collaborative-plan' }, // Multi-agent collaborative planning
|
||||
'req-plan': { level: 4, flow: 'req-plan' }, // Requirement-level roadmap planning
|
||||
'roadmap': { level: 4, flow: 'roadmap' }, // roadmap → team-planex
|
||||
'spec-driven': { level: 4, flow: 'spec-driven' }, // spec-generator → plan → execute
|
||||
// Cycle workflows (self-iterating with reflection)
|
||||
'integration-test': { level: 3, flow: 'integration-test-cycle' }, // Self-iterating integration test
|
||||
'refactor': { level: 3, flow: 'refactor-cycle' }, // Tech debt discovery and refactoring
|
||||
// Team workflows (multi-role collaboration)
|
||||
'integration-test': { level: 3, flow: 'integration-test-cycle' },
|
||||
'refactor': { level: 3, flow: 'refactor-cycle' },
|
||||
// Team workflows (kept: team-planex only)
|
||||
'team-planex': { level: 'Team', flow: 'team-planex' },
|
||||
'team-iterdev': { level: 'Team', flow: 'team-iterdev' },
|
||||
'team-lifecycle': { level: 'Team', flow: 'team-lifecycle' },
|
||||
'team-issue': { level: 'Team', flow: 'team-issue' },
|
||||
'team-testing': { level: 'Team', flow: 'team-testing' },
|
||||
'team-qa': { level: 'Team', flow: 'team-qa' },
|
||||
'team-brainstorm': { level: 'Team', flow: 'team-brainstorm' },
|
||||
'team-uidesign': { level: 'Team', flow: 'team-uidesign' },
|
||||
// Standard workflows
|
||||
'multi-cli-plan': { level: 3, flow: 'multi-cli-plan' }, // Multi-CLI collaborative planning
|
||||
'multi-cli': { level: 3, flow: 'multi-cli-plan' },
|
||||
'bugfix': { level: 2, flow: 'bugfix.standard' },
|
||||
'issue-batch': { level: 'Issue', flow: 'issue' },
|
||||
'issue-transition': { level: 2.5, flow: 'rapid-to-issue' }, // Bridge workflow
|
||||
'issue-transition': { level: 2.5, flow: 'rapid-to-issue' },
|
||||
'exploration': { level: 4, flow: 'full' },
|
||||
'quick-task': { level: 2, flow: 'rapid' },
|
||||
'ui-design': { level: analysis.complexity === 'high' ? 4 : 3, flow: 'ui' },
|
||||
'tdd': { level: 3, flow: 'tdd' },
|
||||
'test-gen': { level: 3, flow: 'test-gen' },
|
||||
'test-fix': { level: 3, flow: 'test-fix-gen' },
|
||||
'review': { level: 3, flow: 'review-cycle-fix' },
|
||||
'documentation': { level: 2, flow: 'docs' },
|
||||
@@ -281,18 +258,15 @@ function buildCommandChain(workflow, analysis) {
|
||||
{ cmd: 'workflow-lite-plan', args: `"${analysis.goal}"` }
|
||||
],
|
||||
|
||||
// With-File workflows (documented exploration with multi-CLI collaboration)
|
||||
'brainstorm-with-file': [
|
||||
{ cmd: 'workflow:brainstorm-with-file', args: `"${analysis.goal}"` }
|
||||
// Note: Has built-in post-completion options (create plan, create issue, deep analysis)
|
||||
// With-File → Auto Chain to lite-plan
|
||||
'analyze-to-plan': [
|
||||
{ cmd: 'workflow:analyze-with-file', args: `"${analysis.goal}"` },
|
||||
{ cmd: 'workflow-lite-plan', args: '' } // auto receives analysis artifacts (discussion.md)
|
||||
],
|
||||
|
||||
// Brainstorm-to-Issue workflow (bridge from brainstorm to issue execution)
|
||||
'brainstorm-to-issue': [
|
||||
// Note: Assumes brainstorm session already exists, or run brainstorm first
|
||||
{ cmd: 'issue:from-brainstorm', args: `SESSION="${extractBrainstormSession(analysis)}" --auto` },
|
||||
{ cmd: 'issue:queue', args: '' },
|
||||
{ cmd: 'issue:execute', args: '--queue auto' }
|
||||
'brainstorm-to-plan': [
|
||||
{ cmd: 'workflow:brainstorm-with-file', args: `"${analysis.goal}"` },
|
||||
{ cmd: 'workflow-lite-plan', args: '' } // auto receives brainstorm artifacts (brainstorm.md)
|
||||
],
|
||||
|
||||
'debug-with-file': [
|
||||
@@ -300,32 +274,22 @@ function buildCommandChain(workflow, analysis) {
|
||||
// Note: Self-contained with hypothesis-driven iteration and Gemini validation
|
||||
],
|
||||
|
||||
'analyze-with-file': [
|
||||
{ cmd: 'workflow:analyze-with-file', args: `"${analysis.goal}"` }
|
||||
// Note: Self-contained with multi-round discussion and CLI exploration
|
||||
// Brainstorm-to-Issue workflow (bridge from brainstorm to issue execution)
|
||||
'brainstorm-to-issue': [
|
||||
{ cmd: 'issue:from-brainstorm', args: `SESSION="${extractBrainstormSession(analysis)}" --auto` },
|
||||
{ cmd: 'issue:queue', args: '' },
|
||||
{ cmd: 'issue:execute', args: '--queue auto' }
|
||||
],
|
||||
|
||||
// Universal Plan+Execute
|
||||
'collaborative-plan': [
|
||||
{ cmd: 'workflow:collaborative-plan-with-file', args: `"${analysis.goal}"` },
|
||||
{ cmd: 'workflow:unified-execute-with-file', args: '' }
|
||||
// Note: Plan Note → unified execution engine
|
||||
],
|
||||
|
||||
'req-plan': [
|
||||
{ cmd: 'workflow:req-plan-with-file', args: `"${analysis.goal}"` },
|
||||
'roadmap': [
|
||||
{ cmd: 'workflow:roadmap-with-file', args: `"${analysis.goal}"` },
|
||||
{ cmd: 'team-planex', args: '' }
|
||||
// Note: Requirement decomposition → issue creation → team-planex wave execution
|
||||
],
|
||||
|
||||
// Cycle workflows (self-iterating with reflection)
|
||||
'integration-test-cycle': [
|
||||
{ cmd: 'workflow:integration-test-cycle', args: `"${analysis.goal}"` }
|
||||
// Note: Self-contained explore → test → fix cycle with reflection
|
||||
],
|
||||
|
||||
'refactor-cycle': [
|
||||
{ cmd: 'workflow:refactor-cycle', args: `"${analysis.goal}"` }
|
||||
// Note: Self-contained tech debt discovery → refactor → validate
|
||||
],
|
||||
|
||||
// Level 3 - Standard
|
||||
@@ -338,11 +302,25 @@ function buildCommandChain(workflow, analysis) {
|
||||
])
|
||||
],
|
||||
|
||||
// Level 4 - Spec-Driven Full Pipeline
|
||||
'spec-driven': [
|
||||
{ cmd: 'spec-generator', args: `"${analysis.goal}"` },
|
||||
{ cmd: 'workflow-plan', args: '' },
|
||||
{ cmd: 'workflow-execute', args: '' },
|
||||
...(analysis.constraints?.includes('skip-tests') ? [] : [
|
||||
{ cmd: 'workflow-test-fix', args: '' }
|
||||
])
|
||||
],
|
||||
|
||||
'tdd': [
|
||||
{ cmd: 'workflow-tdd', args: `"${analysis.goal}"` },
|
||||
{ cmd: 'workflow-tdd-plan', args: `"${analysis.goal}"` },
|
||||
{ cmd: 'workflow-execute', args: '' }
|
||||
],
|
||||
|
||||
'test-gen': [
|
||||
{ cmd: 'workflow-test-fix', args: `"${analysis.goal}"` }
|
||||
],
|
||||
|
||||
'test-fix-gen': [
|
||||
{ cmd: 'workflow-test-fix', args: `"${analysis.goal}"` }
|
||||
],
|
||||
@@ -360,7 +338,7 @@ function buildCommandChain(workflow, analysis) {
|
||||
{ cmd: 'workflow-execute', args: '' }
|
||||
],
|
||||
|
||||
// Level 4 - Full
|
||||
// Level 4 - Full Exploration
|
||||
'full': [
|
||||
{ cmd: 'brainstorm', args: `"${analysis.goal}"` },
|
||||
{ cmd: 'workflow-plan', args: '' },
|
||||
@@ -370,6 +348,15 @@ function buildCommandChain(workflow, analysis) {
|
||||
])
|
||||
],
|
||||
|
||||
// Cycle workflows (self-iterating with reflection)
|
||||
'integration-test-cycle': [
|
||||
{ cmd: 'workflow:integration-test-cycle', args: `"${analysis.goal}"` }
|
||||
],
|
||||
|
||||
'refactor-cycle': [
|
||||
{ cmd: 'workflow:refactor-cycle', args: `"${analysis.goal}"` }
|
||||
],
|
||||
|
||||
// Issue Workflow
|
||||
'issue': [
|
||||
{ cmd: 'issue:discover', args: '' },
|
||||
@@ -378,37 +365,9 @@ function buildCommandChain(workflow, analysis) {
|
||||
{ cmd: 'issue:execute', args: '' }
|
||||
],
|
||||
|
||||
// Team Workflows (multi-role collaboration, self-contained)
|
||||
// Team Workflows (kept: team-planex only)
|
||||
'team-planex': [
|
||||
{ cmd: 'team-planex', args: `"${analysis.goal}"` }
|
||||
],
|
||||
|
||||
'team-iterdev': [
|
||||
{ cmd: 'team-iterdev', args: `"${analysis.goal}"` }
|
||||
],
|
||||
|
||||
'team-lifecycle': [
|
||||
{ cmd: 'team-lifecycle', args: `"${analysis.goal}"` }
|
||||
],
|
||||
|
||||
'team-issue': [
|
||||
{ cmd: 'team-issue', args: `"${analysis.goal}"` }
|
||||
],
|
||||
|
||||
'team-testing': [
|
||||
{ cmd: 'team-testing', args: `"${analysis.goal}"` }
|
||||
],
|
||||
|
||||
'team-qa': [
|
||||
{ cmd: 'team-quality-assurance', args: `"${analysis.goal}"` }
|
||||
],
|
||||
|
||||
'team-brainstorm': [
|
||||
{ cmd: 'team-brainstorm', args: `"${analysis.goal}"` }
|
||||
],
|
||||
|
||||
'team-uidesign': [
|
||||
{ cmd: 'team-uidesign', args: `"${analysis.goal}"` }
|
||||
]
|
||||
};
|
||||
|
||||
@@ -607,7 +566,7 @@ Phase 1: Analyze Intent
|
||||
+-- If clarity < 2 -> Phase 1.5: Clarify Requirements
|
||||
|
|
||||
Phase 2: Select Workflow & Build Chain
|
||||
|-- Map task_type -> Level (1/2/3/4/Issue)
|
||||
|-- Map task_type -> Level (2/3/4/Issue/Team)
|
||||
|-- Select flow based on complexity
|
||||
+-- Build command chain (Skill-based)
|
||||
|
|
||||
@@ -639,26 +598,20 @@ Phase 5: Execute Command Chain
|
||||
| "Add API endpoint" | feature (low) | 2 | workflow-lite-plan → workflow-test-fix |
|
||||
| "Fix login timeout" | bugfix | 2 | workflow-lite-plan → workflow-test-fix |
|
||||
| "Use issue workflow" | issue-transition | 2.5 | workflow-lite-plan(plan-only) → convert-to-plan → queue → execute |
|
||||
| "头脑风暴: 通知系统重构" | brainstorm | 4 | workflow:brainstorm-with-file |
|
||||
| "从头脑风暴创建 issue" | brainstorm-to-issue | 4 | issue:from-brainstorm → issue:queue → issue:execute |
|
||||
| "协作分析: 认证架构" | analyze-file | 3 | analyze-with-file → workflow-lite-plan |
|
||||
| "深度调试 WebSocket" | debug-file | 3 | workflow:debug-with-file |
|
||||
| "协作分析: 认证架构优化" | analyze-file | 3 | workflow:analyze-with-file |
|
||||
| "头脑风暴: 通知系统" | brainstorm | 4 | brainstorm-with-file → workflow-lite-plan |
|
||||
| "从头脑风暴创建 issue" | brainstorm-to-issue | 4 | issue:from-brainstorm → issue:queue → issue:execute |
|
||||
| "协作规划: 实时通知系统" | collaborative-plan | 3 | collaborative-plan-with-file → unified-execute-with-file |
|
||||
| "需求规划: OAuth + 2FA" | req-plan | 4 | req-plan-with-file → team-planex |
|
||||
| "需求路线图: OAuth + 2FA" | roadmap | 4 | roadmap-with-file → team-planex |
|
||||
| "specification: 用户系统" | spec-driven | 4 | spec-generator → workflow-plan → workflow-execute → workflow-test-fix |
|
||||
| "集成测试: 支付流程" | integration-test | 3 | workflow:integration-test-cycle |
|
||||
| "重构 auth 模块" | refactor | 3 | workflow:refactor-cycle |
|
||||
| "multi-cli plan: API设计" | multi-cli-plan | 3 | workflow-multi-cli-plan → workflow-test-fix |
|
||||
| "OAuth2 system" | feature (high) | 3 | workflow-plan → workflow-execute → review-cycle → workflow-test-fix |
|
||||
| "Implement with TDD" | tdd | 3 | workflow-tdd → workflow-execute |
|
||||
| "Implement with TDD" | tdd | 3 | workflow-tdd-plan → workflow-execute |
|
||||
| "Uncertain: real-time" | exploration | 4 | brainstorm → workflow-plan → workflow-execute → workflow-test-fix |
|
||||
| "team planex: 用户系统" | team-planex | Team | team-planex |
|
||||
| "迭代开发团队: 支付模块" | team-iterdev | Team | team-iterdev |
|
||||
| "全生命周期: 通知服务" | team-lifecycle | Team | team-lifecycle |
|
||||
| "team resolve issue #42" | team-issue | Team | team-issue |
|
||||
| "测试团队: 全面测试认证" | team-testing | Team | team-testing |
|
||||
| "QA 团队: 质量保障支付" | team-qa | Team | team-quality-assurance |
|
||||
| "团队头脑风暴: API 设计" | team-brainstorm | Team | team-brainstorm |
|
||||
| "团队 UI 设计: 仪表盘" | team-uidesign | Team | team-uidesign |
|
||||
|
||||
---
|
||||
|
||||
@@ -668,10 +621,11 @@ Phase 5: Execute Command Chain
|
||||
2. **Intent-Driven** - Auto-select workflow based on task intent
|
||||
3. **Skill-Based Chaining** - Build command chain by composing independent Skills
|
||||
4. **Self-Contained Skills** - 每个 Skill 内部处理完整流水线,是天然的最小执行单元
|
||||
5. **Progressive Clarification** - Low clarity triggers clarification phase
|
||||
6. **TODO Tracking** - Use CCW prefix to isolate workflow todos
|
||||
7. **Error Handling** - Retry/skip/abort at Skill level
|
||||
8. **User Control** - Optional user confirmation at each phase
|
||||
5. **Auto Chain** - With-File 产物自动传递给下游 Skill(如 analyze → lite-plan)
|
||||
6. **Progressive Clarification** - Low clarity triggers clarification phase
|
||||
7. **TODO Tracking** - Use CCW prefix to isolate workflow todos
|
||||
8. **Error Handling** - Retry/skip/abort at Skill level
|
||||
9. **User Control** - Optional user confirmation at each phase
|
||||
|
||||
---
|
||||
|
||||
@@ -715,114 +669,51 @@ todos = [
|
||||
"complexity": "medium"
|
||||
},
|
||||
"command_chain": [
|
||||
{
|
||||
"index": 0,
|
||||
"command": "workflow-lite-plan",
|
||||
"status": "completed"
|
||||
},
|
||||
{
|
||||
"index": 1,
|
||||
"command": "workflow-test-fix",
|
||||
"status": "running"
|
||||
}
|
||||
{ "index": 0, "command": "workflow-lite-plan", "status": "completed" },
|
||||
{ "index": 1, "command": "workflow-test-fix", "status": "running" }
|
||||
],
|
||||
"current_index": 1
|
||||
}
|
||||
```
|
||||
|
||||
**Status Values**:
|
||||
- `running`: Workflow executing commands
|
||||
- `completed`: All commands finished
|
||||
- `failed`: User aborted or unrecoverable error
|
||||
- `error`: Command execution failed (during error handling)
|
||||
|
||||
**Command Status Values**:
|
||||
- `pending`: Not started
|
||||
- `running`: Currently executing
|
||||
- `completed`: Successfully finished
|
||||
- `failed`: Execution failed
|
||||
**Status Values**: `running` | `completed` | `failed` | `error`
|
||||
**Command Status Values**: `pending` | `running` | `completed` | `failed`
|
||||
|
||||
---
|
||||
|
||||
## With-File Workflows
|
||||
|
||||
**With-File workflows** provide documented exploration with multi-CLI collaboration. They are self-contained and generate comprehensive session artifacts.
|
||||
**With-File workflows** provide documented exploration with multi-CLI collaboration. They generate comprehensive session artifacts and can auto-chain to lite-plan for implementation.
|
||||
|
||||
| Workflow | Purpose | Key Features | Output Folder |
|
||||
|----------|---------|--------------|---------------|
|
||||
| **brainstorm-with-file** | Multi-perspective ideation | Gemini/Codex/Claude perspectives, diverge-converge cycles | `.workflow/.brainstorm/` |
|
||||
| **debug-with-file** | Hypothesis-driven debugging | Gemini validation, understanding evolution, NDJSON logging | `.workflow/.debug/` |
|
||||
| **analyze-with-file** | Collaborative analysis | Multi-round Q&A, CLI exploration, documented discussions | `.workflow/.analysis/` |
|
||||
| **collaborative-plan-with-file** | Multi-agent collaborative planning | Understanding agent + parallel agents, Plan Note shared doc | `.workflow/.planning/` |
|
||||
| **req-plan-with-file** | Requirement roadmap planning | Requirement decomposition, issue creation, execution-plan.json | `.workflow/.planning/` |
|
||||
| Workflow | Purpose | Auto Chain | Output Folder |
|
||||
|----------|---------|------------|---------------|
|
||||
| **brainstorm-with-file** | Multi-perspective ideation | → workflow-lite-plan (auto) | `.workflow/.brainstorm/` |
|
||||
| **debug-with-file** | Hypothesis-driven debugging | Standalone (self-contained) | `.workflow/.debug/` |
|
||||
| **analyze-with-file** | Collaborative analysis | → workflow-lite-plan (auto) | `.workflow/.analysis/` |
|
||||
| **collaborative-plan-with-file** | Multi-agent collaborative planning | → unified-execute-with-file | `.workflow/.planning/` |
|
||||
| **roadmap-with-file** | Strategic requirement roadmap | → team-planex | `.workflow/.planning/` |
|
||||
|
||||
**Auto Chain Mechanism**: When `analyze-with-file` or `brainstorm-with-file` completes, its artifacts (discussion.md / brainstorm.md) are automatically passed to `workflow-lite-plan` as context input. No user intervention needed.
|
||||
|
||||
**Detection Keywords**:
|
||||
- **brainstorm**: 头脑风暴, 创意, 发散思维, multi-perspective, compare perspectives
|
||||
- **debug-file**: 深度调试, 假设验证, systematic debug, hypothesis debug
|
||||
- **analyze-file**: 协作分析, 深度理解, collaborative analysis, explore concept
|
||||
- **collaborative-plan**: 协作规划, 多人规划, collaborative plan, multi-agent plan, Plan Note
|
||||
- **req-plan**: roadmap, 需求规划, 需求拆解, requirement plan, progressive plan
|
||||
|
||||
**Characteristics**:
|
||||
1. **Self-Contained**: Each workflow handles its own iteration loop
|
||||
2. **Documented Process**: Creates evolving documents (brainstorm.md, understanding.md, discussion.md)
|
||||
3. **Multi-CLI**: Uses Gemini/Codex/Claude for different perspectives
|
||||
4. **Built-in Post-Completion**: Offers follow-up options (create plan, issue, etc.)
|
||||
|
||||
---
|
||||
|
||||
## Team Workflows
|
||||
|
||||
**Team workflows** provide multi-role collaboration for complex tasks. Each team skill is self-contained with internal role routing via `--role=xxx`.
|
||||
|
||||
| Workflow | Roles | Pipeline | Use Case |
|
||||
|----------|-------|----------|----------|
|
||||
| **team-planex** | planner + executor | wave pipeline(边规划边执行)| 需要并行规划和执行的任务 |
|
||||
| **team-iterdev** | planner → developer → reviewer | 迭代开发循环 | 需要多轮迭代的开发任务 |
|
||||
| **team-lifecycle** | spec → impl → test | 全生命周期 | 从需求到测试的完整流程 |
|
||||
| **team-issue** | discover → plan → execute | issue 解决 | 多角色协作解决 issue |
|
||||
| **team-testing** | strategy → generate → execute → analyze | 测试流水线 | 全面测试覆盖 |
|
||||
| **team-quality-assurance** | scout → strategist → generator → executor → analyst | QA 闭环 | 质量保障全流程 |
|
||||
| **team-brainstorm** | facilitator → participants → synthesizer | 团队头脑风暴 | 多角色协作头脑风暴 |
|
||||
| **team-uidesign** | designer → implementer | dual-track 设计+实现 | UI 设计与实现并行 |
|
||||
|
||||
**Detection Keywords**:
|
||||
- **team-planex**: team planex, 团队规划执行, wave pipeline
|
||||
- **team-iterdev**: team iterdev, 迭代开发团队, iterative dev team
|
||||
- **team-lifecycle**: team lifecycle, 全生命周期, full lifecycle
|
||||
- **team-issue**: team issue, 团队 issue, team resolve issue
|
||||
- **team-testing**: team test, 测试团队, comprehensive test team
|
||||
- **team-qa**: team qa, QA 团队, 质量保障团队
|
||||
- **team-brainstorm**: team brainstorm, 团队头脑风暴, team ideation
|
||||
- **team-uidesign**: team ui design, UI 设计团队, dual track design
|
||||
|
||||
**Characteristics**:
|
||||
1. **Self-Contained**: Each team skill handles internal role coordination
|
||||
2. **Role-Based Routing**: All roles invoke the same skill with `--role=xxx`
|
||||
3. **Shared Memory**: Roles communicate via shared-memory.json and message bus
|
||||
4. **Auto Mode Support**: All team skills support `-y`/`--yes` for skip confirmations
|
||||
- **roadmap**: roadmap, 需求规划, 需求拆解, requirement plan, progressive plan
|
||||
- **spec-driven**: specification, PRD, 产品需求, 产品文档
|
||||
|
||||
---
|
||||
|
||||
## Cycle Workflows
|
||||
|
||||
**Cycle workflows** provide self-iterating development cycles with reflection-driven strategy adjustment. Each cycle is autonomous with built-in test-fix loops and quality gates.
|
||||
**Cycle workflows** provide self-iterating development cycles with reflection-driven strategy adjustment.
|
||||
|
||||
| Workflow | Pipeline | Key Features | Output Folder |
|
||||
|----------|----------|--------------|---------------|
|
||||
| **integration-test-cycle** | explore → test dev → test-fix → reflection | Self-iterating with max-iterations, auto continue | `.workflow/.test-cycle/` |
|
||||
| **refactor-cycle** | discover → prioritize → execute → validate | Multi-dimensional analysis, regression validation | `.workflow/.refactor-cycle/` |
|
||||
|
||||
**Detection Keywords**:
|
||||
- **integration-test**: integration test, 集成测试, 端到端测试, e2e test
|
||||
- **refactor**: refactor, 重构, tech debt, 技术债务
|
||||
|
||||
**Characteristics**:
|
||||
1. **Self-Iterating**: Autonomous test-fix loops until quality gate passes
|
||||
2. **Reflection-Driven**: Strategy adjusts based on previous iteration results
|
||||
3. **Continue Support**: `--continue` flag to resume interrupted sessions
|
||||
4. **Auto Mode Support**: `-y`/`--yes` for fully autonomous execution
|
||||
|
||||
---
|
||||
|
||||
## Utility Commands
|
||||
@@ -831,10 +722,11 @@ todos = [
|
||||
|
||||
| Command | Purpose |
|
||||
|---------|---------|
|
||||
| `workflow:unified-execute-with-file` | Universal execution engine - consumes plan output from collaborative-plan, req-plan, brainstorm |
|
||||
| `workflow:unified-execute-with-file` | Universal execution engine - consumes plan output from collaborative-plan, roadmap, brainstorm |
|
||||
| `workflow:clean` | Intelligent code cleanup - mainline detection, stale artifact removal |
|
||||
| `workflow:init` | Initialize `.workflow/project-tech.json` with project analysis |
|
||||
| `workflow:init-guidelines` | Interactive wizard to fill `specs/*.md` |
|
||||
| `workflow:status` | Generate on-demand views for project overview and workflow tasks |
|
||||
|
||||
---
|
||||
|
||||
@@ -848,9 +740,6 @@ todos = [
|
||||
/ccw -y "Add user authentication"
|
||||
/ccw --yes "Fix memory leak in WebSocket handler"
|
||||
|
||||
# Complex requirement (triggers clarification)
|
||||
/ccw "Optimize system performance"
|
||||
|
||||
# Bug fix
|
||||
/ccw "Fix memory leak in WebSocket handler"
|
||||
|
||||
@@ -863,35 +752,31 @@ todos = [
|
||||
# Multi-CLI collaborative planning
|
||||
/ccw "multi-cli plan: 支付网关API设计" # → workflow-multi-cli-plan → workflow-test-fix
|
||||
|
||||
# With-File workflows (documented exploration with multi-CLI collaboration)
|
||||
/ccw "头脑风暴: 用户通知系统重新设计" # → brainstorm-with-file
|
||||
# With-File workflows → auto chain to lite-plan
|
||||
/ccw "协作分析: 理解现有认证架构的设计决策" # → analyze-with-file → workflow-lite-plan
|
||||
/ccw "头脑风暴: 用户通知系统重新设计" # → brainstorm-with-file → workflow-lite-plan
|
||||
/ccw "深度调试: 系统随机崩溃问题" # → debug-with-file (standalone)
|
||||
/ccw "从头脑风暴 BS-通知系统-2025-01-28 创建 issue" # → brainstorm-to-issue (bridge)
|
||||
/ccw "深度调试: 系统随机崩溃问题" # → debug-with-file
|
||||
/ccw "协作分析: 理解现有认证架构的设计决策" # → analyze-with-file
|
||||
|
||||
# Team workflows (multi-role collaboration)
|
||||
/ccw "team planex: 用户认证系统" # → team-planex (planner + executor wave pipeline)
|
||||
/ccw "迭代开发团队: 支付模块重构" # → team-iterdev (planner → developer → reviewer)
|
||||
/ccw "全生命周期: 通知服务开发" # → team-lifecycle (spec → impl → test)
|
||||
/ccw "team resolve issue #42" # → team-issue (discover → plan → execute)
|
||||
/ccw "测试团队: 全面测试认证模块" # → team-testing (strategy → generate → execute → analyze)
|
||||
/ccw "QA 团队: 质量保障支付流程" # → team-quality-assurance (scout → strategist → generator → executor → analyst)
|
||||
/ccw "团队头脑风暴: API 网关设计" # → team-brainstorm (facilitator → participants → synthesizer)
|
||||
/ccw "团队 UI 设计: 管理后台仪表盘" # → team-uidesign (designer → implementer dual-track)
|
||||
# Spec-driven full pipeline
|
||||
/ccw "specification: 用户认证系统产品文档" # → spec-generator → workflow-plan → workflow-execute → workflow-test-fix
|
||||
|
||||
# Collaborative planning & requirement workflows
|
||||
/ccw "协作规划: 实时通知系统架构" # → collaborative-plan-with-file → unified-execute
|
||||
/ccw "需求规划: 用户认证 OAuth + 2FA" # → req-plan-with-file → team-planex
|
||||
/ccw "roadmap: 数据导出功能路线图" # → req-plan-with-file → team-planex
|
||||
/ccw "需求路线图: 用户认证 OAuth + 2FA" # → roadmap-with-file → team-planex
|
||||
/ccw "roadmap: 数据导出功能路线图" # → roadmap-with-file → team-planex
|
||||
|
||||
# Team workflows (kept: team-planex)
|
||||
/ccw "team planex: 用户认证系统" # → team-planex (planner + executor wave pipeline)
|
||||
|
||||
# Cycle workflows (self-iterating)
|
||||
/ccw "集成测试: 支付流程端到端" # → integration-test-cycle
|
||||
/ccw "重构 auth 模块的技术债务" # → refactor-cycle
|
||||
/ccw "tech debt: 清理支付服务" # → refactor-cycle
|
||||
|
||||
# Utility commands (invoked directly, not auto-routed)
|
||||
# /workflow:unified-execute-with-file # 通用执行引擎(消费 plan 输出)
|
||||
# /workflow:clean # 智能代码清理
|
||||
# /workflow:init # 初始化项目状态
|
||||
# /workflow:init-guidelines # 交互式填充项目规范
|
||||
# /workflow:status # 项目概览和工作流状态
|
||||
```
|
||||
|
||||
@@ -33,7 +33,7 @@ Creates tool-specific configuration directories:
|
||||
- `.gemini/settings.json`:
|
||||
```json
|
||||
{
|
||||
"contextfilename": ["CLAUDE.md","GEMINI.md"]
|
||||
"contextfilename": "CLAUDE.md"
|
||||
}
|
||||
```
|
||||
|
||||
@@ -41,7 +41,7 @@ Creates tool-specific configuration directories:
|
||||
- `.qwen/settings.json`:
|
||||
```json
|
||||
{
|
||||
"contextfilename": ["CLAUDE.md","QWEN.md"]
|
||||
"contextfilename": "CLAUDE.md"
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -107,24 +107,24 @@ async function selectCommandCategory() {
|
||||
async function selectCommand(category) {
|
||||
const commandOptions = {
|
||||
'Planning': [
|
||||
{ label: "/workflow:lite-plan", description: "Lightweight merged-mode planning" },
|
||||
{ label: "/workflow:plan", description: "Full planning with architecture design" },
|
||||
{ label: "/workflow:multi-cli-plan", description: "Multi-CLI collaborative planning (Gemini+Codex+Claude)" },
|
||||
{ label: "/workflow:tdd-plan", description: "TDD workflow planning with Red-Green-Refactor" },
|
||||
{ label: "/workflow-lite-plan", description: "Lightweight merged-mode planning" },
|
||||
{ label: "/workflow-plan", description: "Full planning with architecture design" },
|
||||
{ label: "/workflow-multi-cli-plan", description: "Multi-CLI collaborative planning (Gemini+Codex+Claude)" },
|
||||
{ label: "/workflow-tdd-plan", description: "TDD workflow planning with Red-Green-Refactor" },
|
||||
{ label: "/workflow:quick-plan-with-file", description: "Rapid planning with minimal docs" },
|
||||
{ label: "/workflow:plan-verify", description: "Verify plan against requirements" },
|
||||
{ label: "/workflow-plan-verify", description: "Verify plan against requirements" },
|
||||
{ label: "/workflow:replan", description: "Update plan and execute changes" }
|
||||
],
|
||||
'Execution': [
|
||||
{ label: "/workflow:lite-execute", description: "Execute from in-memory plan" },
|
||||
{ label: "/workflow:execute", description: "Execute from planning session" },
|
||||
{ label: "/workflow-execute", description: "Execute from planning session" },
|
||||
{ label: "/workflow:unified-execute-with-file", description: "Universal execution engine" }
|
||||
],
|
||||
'Testing': [
|
||||
{ label: "/workflow:test-fix-gen", description: "Generate test tasks for specific issues" },
|
||||
{ label: "/workflow:test-cycle-execute", description: "Execute iterative test-fix cycle (>=95% pass)" },
|
||||
{ label: "/workflow-test-fix", description: "Generate test tasks for specific issues" },
|
||||
{ label: "/workflow-test-fix", description: "Execute iterative test-fix cycle (>=95% pass)" },
|
||||
{ label: "/workflow:test-gen", description: "Generate comprehensive test suite" },
|
||||
{ label: "/workflow:tdd-verify", description: "Verify TDD workflow compliance" }
|
||||
{ label: "/workflow-tdd-verify", description: "Verify TDD workflow compliance" }
|
||||
],
|
||||
'Review': [
|
||||
{ label: "/workflow:review-session-cycle", description: "Session-based multi-dimensional code review" },
|
||||
@@ -133,7 +133,7 @@ async function selectCommand(category) {
|
||||
{ label: "/workflow:review", description: "Post-implementation review" }
|
||||
],
|
||||
'Bug Fix': [
|
||||
{ label: "/workflow:lite-plan", description: "Lightweight bug diagnosis and fix (with --bugfix flag)" },
|
||||
{ label: "/workflow-lite-plan", description: "Lightweight bug diagnosis and fix (with --bugfix flag)" },
|
||||
{ label: "/workflow:debug-with-file", description: "Hypothesis-driven debugging with documentation" }
|
||||
],
|
||||
'Brainstorm': [
|
||||
@@ -303,10 +303,10 @@ async function defineSteps(templateDesign) {
|
||||
"description": "Quick implementation with testing",
|
||||
"level": 2,
|
||||
"steps": [
|
||||
{ "cmd": "/workflow:lite-plan", "args": "\"{{goal}}\"", "unit": "quick-implementation", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Create lightweight implementation plan" },
|
||||
{ "cmd": "/workflow-lite-plan", "args": "\"{{goal}}\"", "unit": "quick-implementation", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Create lightweight implementation plan" },
|
||||
{ "cmd": "/workflow:lite-execute", "args": "--in-memory", "unit": "quick-implementation", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Execute implementation based on plan" },
|
||||
{ "cmd": "/workflow:test-fix-gen", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Generate test tasks" },
|
||||
{ "cmd": "/workflow:test-cycle-execute", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Execute test-fix cycle until pass rate >= 95%" }
|
||||
{ "cmd": "/workflow-test-fix", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Generate test tasks" },
|
||||
{ "cmd": "/workflow-test-fix", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Execute test-fix cycle until pass rate >= 95%" }
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -318,13 +318,13 @@ async function defineSteps(templateDesign) {
|
||||
"description": "Full workflow with verification, review, and testing",
|
||||
"level": 3,
|
||||
"steps": [
|
||||
{ "cmd": "/workflow:plan", "args": "\"{{goal}}\"", "unit": "verified-planning-execution", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Create detailed implementation plan" },
|
||||
{ "cmd": "/workflow:plan-verify", "unit": "verified-planning-execution", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Verify plan against requirements" },
|
||||
{ "cmd": "/workflow:execute", "unit": "verified-planning-execution", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Execute implementation" },
|
||||
{ "cmd": "/workflow-plan", "args": "\"{{goal}}\"", "unit": "verified-planning-execution", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Create detailed implementation plan" },
|
||||
{ "cmd": "/workflow-plan-verify", "unit": "verified-planning-execution", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Verify plan against requirements" },
|
||||
{ "cmd": "/workflow-execute", "unit": "verified-planning-execution", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Execute implementation" },
|
||||
{ "cmd": "/workflow:review-session-cycle", "unit": "code-review", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Multi-dimensional code review" },
|
||||
{ "cmd": "/workflow:review-cycle-fix", "unit": "code-review", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Fix review findings" },
|
||||
{ "cmd": "/workflow:test-fix-gen", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Generate test tasks" },
|
||||
{ "cmd": "/workflow:test-cycle-execute", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Execute test-fix cycle" }
|
||||
{ "cmd": "/workflow-test-fix", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Generate test tasks" },
|
||||
{ "cmd": "/workflow-test-fix", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Execute test-fix cycle" }
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -336,10 +336,10 @@ async function defineSteps(templateDesign) {
|
||||
"description": "Bug diagnosis and fix with testing",
|
||||
"level": 2,
|
||||
"steps": [
|
||||
{ "cmd": "/workflow:lite-plan", "args": "--bugfix \"{{goal}}\"", "unit": "bug-fix", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Diagnose and plan bug fix" },
|
||||
{ "cmd": "/workflow-lite-plan", "args": "--bugfix \"{{goal}}\"", "unit": "bug-fix", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Diagnose and plan bug fix" },
|
||||
{ "cmd": "/workflow:lite-execute", "args": "--in-memory", "unit": "bug-fix", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Execute bug fix" },
|
||||
{ "cmd": "/workflow:test-fix-gen", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Generate regression tests" },
|
||||
{ "cmd": "/workflow:test-cycle-execute", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Verify fix with tests" }
|
||||
{ "cmd": "/workflow-test-fix", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Generate regression tests" },
|
||||
{ "cmd": "/workflow-test-fix", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Verify fix with tests" }
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -351,7 +351,7 @@ async function defineSteps(templateDesign) {
|
||||
"description": "Urgent production bug fix (no tests)",
|
||||
"level": 2,
|
||||
"steps": [
|
||||
{ "cmd": "/workflow:lite-plan", "args": "--hotfix \"{{goal}}\"", "unit": "standalone", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Emergency hotfix mode" }
|
||||
{ "cmd": "/workflow-lite-plan", "args": "--hotfix \"{{goal}}\"", "unit": "standalone", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Emergency hotfix mode" }
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -363,9 +363,9 @@ async function defineSteps(templateDesign) {
|
||||
"description": "Test-driven development with Red-Green-Refactor",
|
||||
"level": 3,
|
||||
"steps": [
|
||||
{ "cmd": "/workflow:tdd-plan", "args": "\"{{goal}}\"", "unit": "tdd-planning-execution", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Create TDD task chain" },
|
||||
{ "cmd": "/workflow:execute", "unit": "tdd-planning-execution", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Execute TDD cycle" },
|
||||
{ "cmd": "/workflow:tdd-verify", "unit": "standalone", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Verify TDD compliance" }
|
||||
{ "cmd": "/workflow-tdd-plan", "args": "\"{{goal}}\"", "unit": "tdd-planning-execution", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Create TDD task chain" },
|
||||
{ "cmd": "/workflow-execute", "unit": "tdd-planning-execution", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Execute TDD cycle" },
|
||||
{ "cmd": "/workflow-tdd-verify", "unit": "standalone", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Verify TDD compliance" }
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -379,8 +379,8 @@ async function defineSteps(templateDesign) {
|
||||
"steps": [
|
||||
{ "cmd": "/workflow:review-session-cycle", "unit": "code-review", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Multi-dimensional code review" },
|
||||
{ "cmd": "/workflow:review-cycle-fix", "unit": "code-review", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Fix review findings" },
|
||||
{ "cmd": "/workflow:test-fix-gen", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Generate tests for fixes" },
|
||||
{ "cmd": "/workflow:test-cycle-execute", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Verify fixes pass tests" }
|
||||
{ "cmd": "/workflow-test-fix", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Generate tests for fixes" },
|
||||
{ "cmd": "/workflow-test-fix", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Verify fixes pass tests" }
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -392,8 +392,8 @@ async function defineSteps(templateDesign) {
|
||||
"description": "Fix failing tests",
|
||||
"level": 3,
|
||||
"steps": [
|
||||
{ "cmd": "/workflow:test-fix-gen", "args": "\"{{goal}}\"", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Generate test fix tasks" },
|
||||
{ "cmd": "/workflow:test-cycle-execute", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Execute test-fix cycle" }
|
||||
{ "cmd": "/workflow-test-fix", "args": "\"{{goal}}\"", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Generate test fix tasks" },
|
||||
{ "cmd": "/workflow-test-fix", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Execute test-fix cycle" }
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -420,7 +420,7 @@ async function defineSteps(templateDesign) {
|
||||
"description": "Bridge lightweight planning to issue workflow",
|
||||
"level": 2,
|
||||
"steps": [
|
||||
{ "cmd": "/workflow:lite-plan", "args": "\"{{goal}}\"", "unit": "rapid-to-issue", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Create lightweight plan" },
|
||||
{ "cmd": "/workflow-lite-plan", "args": "\"{{goal}}\"", "unit": "rapid-to-issue", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Create lightweight plan" },
|
||||
{ "cmd": "/issue:convert-to-plan", "args": "--latest-lite-plan -y", "unit": "rapid-to-issue", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Convert to issue plan" },
|
||||
{ "cmd": "/issue:queue", "unit": "rapid-to-issue", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Form execution queue" },
|
||||
{ "cmd": "/issue:execute", "args": "--queue auto", "unit": "rapid-to-issue", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Execute issue queue" }
|
||||
@@ -486,11 +486,11 @@ async function defineSteps(templateDesign) {
|
||||
"level": 4,
|
||||
"steps": [
|
||||
{ "cmd": "/brainstorm", "args": "\"{{goal}}\"", "unit": "standalone", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Unified brainstorming with multi-perspective exploration" },
|
||||
{ "cmd": "/workflow:plan", "unit": "verified-planning-execution", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Create detailed plan from brainstorm" },
|
||||
{ "cmd": "/workflow:plan-verify", "unit": "verified-planning-execution", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Verify plan quality" },
|
||||
{ "cmd": "/workflow:execute", "unit": "verified-planning-execution", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Execute implementation" },
|
||||
{ "cmd": "/workflow:test-fix-gen", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Generate comprehensive tests" },
|
||||
{ "cmd": "/workflow:test-cycle-execute", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Execute test cycle" }
|
||||
{ "cmd": "/workflow-plan", "unit": "verified-planning-execution", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Create detailed plan from brainstorm" },
|
||||
{ "cmd": "/workflow-plan-verify", "unit": "verified-planning-execution", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Verify plan quality" },
|
||||
{ "cmd": "/workflow-execute", "unit": "verified-planning-execution", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Execute implementation" },
|
||||
{ "cmd": "/workflow-test-fix", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Generate comprehensive tests" },
|
||||
{ "cmd": "/workflow-test-fix", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Execute test cycle" }
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -502,10 +502,10 @@ async function defineSteps(templateDesign) {
|
||||
"description": "Multi-CLI collaborative planning with cross-verification",
|
||||
"level": 3,
|
||||
"steps": [
|
||||
{ "cmd": "/workflow:multi-cli-plan", "args": "\"{{goal}}\"", "unit": "multi-cli-planning", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Gemini+Codex+Claude collaborative planning" },
|
||||
{ "cmd": "/workflow-multi-cli-plan", "args": "\"{{goal}}\"", "unit": "multi-cli-planning", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Gemini+Codex+Claude collaborative planning" },
|
||||
{ "cmd": "/workflow:lite-execute", "args": "--in-memory", "unit": "multi-cli-planning", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Execute converged plan" },
|
||||
{ "cmd": "/workflow:test-fix-gen", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Generate tests" },
|
||||
{ "cmd": "/workflow:test-cycle-execute", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Execute test cycle" }
|
||||
{ "cmd": "/workflow-test-fix", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "mainprocess" }, "contextHint": "Generate tests" },
|
||||
{ "cmd": "/workflow-test-fix", "unit": "test-validation", "execution": { "type": "slash-command", "mode": "async" }, "contextHint": "Execute test cycle" }
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -656,9 +656,9 @@ async function generateTemplate(design, steps, outputPath) {
|
||||
→ Level: 3 (Standard)
|
||||
→ Steps: Customize
|
||||
→ Step 1: /brainstorm (standalone, mainprocess)
|
||||
→ Step 2: /workflow:plan (verified-planning-execution, mainprocess)
|
||||
→ Step 3: /workflow:plan-verify (verified-planning-execution, mainprocess)
|
||||
→ Step 4: /workflow:execute (verified-planning-execution, async)
|
||||
→ Step 2: /workflow-plan (verified-planning-execution, mainprocess)
|
||||
→ Step 3: /workflow-plan-verify (verified-planning-execution, mainprocess)
|
||||
→ Step 4: /workflow-execute (verified-planning-execution, async)
|
||||
→ Step 5: /workflow:review-session-cycle (code-review, mainprocess)
|
||||
→ Step 6: /workflow:review-cycle-fix (code-review, mainprocess)
|
||||
→ Done
|
||||
|
||||
@@ -252,6 +252,17 @@ await updateDiscoveryState(outputDir, {
|
||||
const hasHighPriority = issues.some(i => i.priority === 'critical' || i.priority === 'high');
|
||||
const hasMediumFindings = prioritizedFindings.some(f => f.priority === 'medium');
|
||||
|
||||
// Auto mode: auto-select recommended action
|
||||
if (autoYes) {
|
||||
if (hasHighPriority) {
|
||||
await appendJsonl('.workflow/issues/issues.jsonl', issues);
|
||||
console.log(`Exported ${issues.length} issues. Run /issue:plan to continue.`);
|
||||
} else {
|
||||
console.log('Discovery complete. No significant issues found.');
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
await AskUserQuestion({
|
||||
questions: [{
|
||||
question: `Discovery complete: ${issues.length} issues generated, ${prioritizedFindings.length} total findings. What would you like to do next?`,
|
||||
|
||||
@@ -152,6 +152,12 @@ if (!QUEUE_ID) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Auto mode: auto-select if exactly one active queue
|
||||
if (autoYes && activeQueues.length === 1) {
|
||||
QUEUE_ID = activeQueues[0].id;
|
||||
console.log(`Auto-selected queue: ${QUEUE_ID}`);
|
||||
} else {
|
||||
|
||||
// Display and prompt user
|
||||
console.log('\nAvailable Queues:');
|
||||
console.log('ID'.padEnd(22) + 'Status'.padEnd(12) + 'Progress'.padEnd(12) + 'Issues');
|
||||
@@ -176,6 +182,7 @@ if (!QUEUE_ID) {
|
||||
});
|
||||
|
||||
QUEUE_ID = answer['Queue'];
|
||||
} // end else (multi-queue prompt)
|
||||
}
|
||||
|
||||
console.log(`\n## Executing Queue: ${QUEUE_ID}\n`);
|
||||
@@ -203,6 +210,13 @@ console.log(`
|
||||
- Parallel in batch 1: ${dag.parallel_batches[0]?.length || 0}
|
||||
`);
|
||||
|
||||
// Auto mode: use recommended defaults (Codex + Execute + Worktree)
|
||||
if (autoYes) {
|
||||
var executor = 'codex';
|
||||
var isDryRun = false;
|
||||
var useWorktree = true;
|
||||
} else {
|
||||
|
||||
// Interactive selection via AskUserQuestion
|
||||
const answer = AskUserQuestion({
|
||||
questions: [
|
||||
@@ -237,9 +251,10 @@ const answer = AskUserQuestion({
|
||||
]
|
||||
});
|
||||
|
||||
const executor = answer['Executor'].toLowerCase().split(' ')[0]; // codex|gemini|agent
|
||||
const isDryRun = answer['Mode'].includes('Dry-run');
|
||||
const useWorktree = answer['Worktree'].includes('Yes');
|
||||
var executor = answer['Executor'].toLowerCase().split(' ')[0]; // codex|gemini|agent
|
||||
var isDryRun = answer['Mode'].includes('Dry-run');
|
||||
var useWorktree = answer['Worktree'].includes('Yes');
|
||||
} // end else (interactive selection)
|
||||
|
||||
// Dry run mode
|
||||
if (isDryRun) {
|
||||
@@ -451,27 +466,33 @@ if (refreshedDag.ready_count > 0) {
|
||||
if (useWorktree && refreshedDag.ready_count === 0 && refreshedDag.completed_count === refreshedDag.total) {
|
||||
console.log('\n## All Solutions Completed - Worktree Cleanup');
|
||||
|
||||
const answer = AskUserQuestion({
|
||||
questions: [{
|
||||
question: `Queue complete. What to do with worktree branch "${worktreeBranch}"?`,
|
||||
header: 'Merge',
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: 'Create PR (Recommended)', description: 'Push branch and create pull request' },
|
||||
{ label: 'Merge to main', description: 'Merge all commits and cleanup worktree' },
|
||||
{ label: 'Keep branch', description: 'Cleanup worktree, keep branch for manual handling' }
|
||||
]
|
||||
}]
|
||||
});
|
||||
// Auto mode: Create PR (recommended)
|
||||
if (autoYes) {
|
||||
var mergeAction = 'Create PR';
|
||||
} else {
|
||||
const answer = AskUserQuestion({
|
||||
questions: [{
|
||||
question: `Queue complete. What to do with worktree branch "${worktreeBranch}"?`,
|
||||
header: 'Merge',
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: 'Create PR (Recommended)', description: 'Push branch and create pull request' },
|
||||
{ label: 'Merge to main', description: 'Merge all commits and cleanup worktree' },
|
||||
{ label: 'Keep branch', description: 'Cleanup worktree, keep branch for manual handling' }
|
||||
]
|
||||
}]
|
||||
});
|
||||
var mergeAction = answer['Merge'];
|
||||
}
|
||||
|
||||
const repoRoot = Bash('git rev-parse --show-toplevel').trim();
|
||||
|
||||
if (answer['Merge'].includes('Create PR')) {
|
||||
if (mergeAction.includes('Create PR')) {
|
||||
Bash(`git -C "${worktreePath}" push -u origin "${worktreeBranch}"`);
|
||||
Bash(`gh pr create --title "Queue ${dag.queue_id}" --body "Issue queue execution - all solutions completed" --head "${worktreeBranch}"`);
|
||||
Bash(`git worktree remove "${worktreePath}"`);
|
||||
console.log(`PR created for branch: ${worktreeBranch}`);
|
||||
} else if (answer['Merge'].includes('Merge to main')) {
|
||||
} else if (mergeAction.includes('Merge to main')) {
|
||||
// Check main is clean
|
||||
const mainDirty = Bash('git status --porcelain').trim();
|
||||
if (mainDirty) {
|
||||
|
||||
@@ -154,8 +154,8 @@ Phase 6: Bind Solution
|
||||
├─ Update issue status to 'planned'
|
||||
└─ Returns: SOL-{issue-id}-{uid}
|
||||
|
||||
Phase 7: Next Steps
|
||||
└─ Offer: Form queue | Convert another idea | View details | Done
|
||||
Phase 7: Next Steps (skip in auto mode)
|
||||
└─ Auto mode: complete directly | Interactive: Form queue | Convert another | Done
|
||||
```
|
||||
|
||||
## Context Enrichment Logic
|
||||
|
||||
@@ -263,6 +263,14 @@ for (let i = 0; i < agentTasks.length; i += MAX_PARALLEL) {
|
||||
for (const pending of pendingSelections) {
|
||||
if (pending.solutions.length === 0) continue;
|
||||
|
||||
// Auto mode: auto-bind first (highest-ranked) solution
|
||||
if (autoYes) {
|
||||
const solId = pending.solutions[0].id;
|
||||
Bash(`ccw issue bind ${pending.issue_id} ${solId}`);
|
||||
console.log(`✓ ${pending.issue_id}: ${solId} bound (auto)`);
|
||||
continue;
|
||||
}
|
||||
|
||||
const options = pending.solutions.slice(0, 4).map(sol => ({
|
||||
label: `${sol.id} (${sol.task_count} tasks)`,
|
||||
description: sol.description || sol.approach || 'No description'
|
||||
|
||||
@@ -273,6 +273,17 @@ const allClarifications = results.flatMap((r, i) =>
|
||||
```javascript
|
||||
if (allClarifications.length > 0) {
|
||||
for (const clarification of allClarifications) {
|
||||
// Auto mode: use recommended resolution (first option)
|
||||
if (autoYes) {
|
||||
const autoAnswer = clarification.options[0]?.label || 'skip';
|
||||
Task(
|
||||
subagent_type="issue-queue-agent",
|
||||
resume=clarification.agent_id,
|
||||
prompt=`Conflict ${clarification.conflict_id} resolved: ${autoAnswer}`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Present to user via AskUserQuestion
|
||||
const answer = AskUserQuestion({
|
||||
questions: [{
|
||||
@@ -345,6 +356,14 @@ ccw issue queue list --brief
|
||||
|
||||
**AskUserQuestion:**
|
||||
```javascript
|
||||
// Auto mode: merge into existing queue
|
||||
if (autoYes) {
|
||||
Bash(`ccw issue queue merge ${newQueueId} --queue ${activeQueueId}`);
|
||||
Bash(`ccw issue queue delete ${newQueueId}`);
|
||||
console.log(`Auto-merged new queue into ${activeQueueId}`);
|
||||
return;
|
||||
}
|
||||
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Active queue exists. How would you like to proceed?",
|
||||
|
||||
@@ -632,6 +632,14 @@ Why is config value None during update?
|
||||
|
||||
**Auto-sync**: 执行 `/workflow:session:sync -y "{summary}"` 更新 specs/*.md + project-tech。
|
||||
|
||||
```javascript
|
||||
// Auto mode: skip expansion question, complete session directly
|
||||
if (autoYes) {
|
||||
console.log('Debug session complete. Auto mode: skipping expansion.');
|
||||
return;
|
||||
}
|
||||
```
|
||||
|
||||
完成后询问用户是否扩展为issue(test/enhance/refactor/doc),选中项调用 `/issue:new "{summary} - {dimension}"`
|
||||
|
||||
---
|
||||
|
||||
@@ -222,7 +222,7 @@ if (skipSpecs) {
|
||||
Next steps:
|
||||
- Use /workflow:init-specs to create individual specs
|
||||
- Use /workflow:init-guidelines to configure specs interactively
|
||||
- Use /workflow:plan to start planning
|
||||
- Use /workflow-plan to start planning
|
||||
`);
|
||||
return;
|
||||
}
|
||||
@@ -260,7 +260,7 @@ Next steps:
|
||||
- Use /workflow:init-specs to create individual specs
|
||||
- Use /workflow:init-guidelines to configure specs interactively
|
||||
- Use ccw spec load to import specs from external sources
|
||||
- Use /workflow:plan to start planning
|
||||
- Use /workflow-plan to start planning
|
||||
`);
|
||||
}
|
||||
} else {
|
||||
@@ -271,7 +271,7 @@ Next steps:
|
||||
- Use /workflow:init-specs to create additional specs
|
||||
- Use /workflow:init-guidelines --reset to reconfigure
|
||||
- Use /workflow:session:solidify to add individual rules
|
||||
- Use /workflow:plan to start planning
|
||||
- Use /workflow-plan to start planning
|
||||
`);
|
||||
}
|
||||
```
|
||||
|
||||
@@ -923,7 +923,7 @@ Single evolving state file — each phase writes its section:
|
||||
- Already have a completed implementation session (WFS-*)
|
||||
- Only need unit/component level tests
|
||||
|
||||
**Use `workflow-tdd` skill when:**
|
||||
**Use `workflow-tdd-plan` skill when:**
|
||||
- Building new features with test-first approach
|
||||
- Red-Green-Refactor cycle
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ Closed-loop tech debt lifecycle: **Discover → Assess → Plan → Refactor →
|
||||
|
||||
**vs Existing Commands**:
|
||||
- **workflow:lite-fix**: Single bug fix, no systematic debt analysis
|
||||
- **workflow:plan + execute**: Generic implementation, no debt-aware prioritization or regression validation
|
||||
- **workflow-plan + execute**: Generic implementation, no debt-aware prioritization or regression validation
|
||||
- **This command**: Full debt lifecycle — discovery through multi-dimensional scan, prioritized execution with per-item regression validation
|
||||
|
||||
### Value Proposition
|
||||
|
||||
@@ -534,7 +534,7 @@ ${selectedMode === 'progressive' ? `**Progressive Mode**:
|
||||
| Scenario | Recommended Command |
|
||||
|----------|-------------------|
|
||||
| Strategic planning, need issue tracking | `/workflow:roadmap-with-file` |
|
||||
| Quick task breakdown, immediate execution | `/workflow:lite-plan` |
|
||||
| Quick task breakdown, immediate execution | `/workflow-lite-plan` |
|
||||
| Collaborative multi-agent planning | `/workflow:collaborative-plan-with-file` |
|
||||
| Full specification documents | `spec-generator` skill |
|
||||
| Code implementation from existing plan | `/workflow:lite-execute` |
|
||||
|
||||
@@ -57,5 +57,5 @@ Session WFS-user-auth resumed
|
||||
- Status: active
|
||||
- Paused at: 2025-09-15T14:30:00Z
|
||||
- Resumed at: 2025-09-15T15:45:00Z
|
||||
- Ready for: /workflow:execute
|
||||
- Ready for: /workflow-execute
|
||||
```
|
||||
@@ -27,7 +27,7 @@ The `--type` parameter classifies sessions for CCW dashboard organization:
|
||||
|------|-------------|-------------|
|
||||
| `workflow` | Standard implementation (default) | `workflow-plan` skill |
|
||||
| `review` | Code review sessions | `review-cycle` skill |
|
||||
| `tdd` | TDD-based development | `workflow-tdd` skill |
|
||||
| `tdd` | TDD-based development | `workflow-tdd-plan` skill |
|
||||
| `test` | Test generation/fix sessions | `workflow-test-fix` skill |
|
||||
| `docs` | Documentation sessions | `memory-manage` skill |
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
name: design-sync
|
||||
description: Synchronize finalized design system references to brainstorming artifacts, preparing them for /workflow:plan consumption
|
||||
description: Synchronize finalized design system references to brainstorming artifacts, preparing them for /workflow-plan consumption
|
||||
argument-hint: --session <session_id> [--selected-prototypes "<list>"]
|
||||
allowed-tools: Read(*), Write(*), Edit(*), TodoWrite(*), Glob(*), Bash(*)
|
||||
---
|
||||
@@ -351,10 +351,10 @@ Updated artifacts:
|
||||
✓ {role_count} role analysis.md files - Design system references
|
||||
✓ ui-designer/design-system-reference.md - Design system reference guide
|
||||
|
||||
Design system assets ready for /workflow:plan:
|
||||
Design system assets ready for /workflow-plan:
|
||||
- design-tokens.json | style-guide.md | {prototype_count} reference prototypes
|
||||
|
||||
Next: /workflow:plan [--agent] "<task description>"
|
||||
Next: /workflow-plan [--agent] "<task description>"
|
||||
The plan phase will automatically discover and utilize the design system.
|
||||
```
|
||||
|
||||
@@ -394,7 +394,7 @@ Next: /workflow:plan [--agent] "<task description>"
|
||||
@../../{design_id}/prototypes/{prototype}.html
|
||||
```
|
||||
|
||||
## Integration with /workflow:plan
|
||||
## Integration with /workflow-plan
|
||||
|
||||
After this update, `workflow-plan` skill will discover design assets through:
|
||||
|
||||
|
||||
@@ -606,7 +606,7 @@ Total workflow time: ~{estimate_total_time()} minutes
|
||||
|
||||
{IF session_id:
|
||||
2. Create implementation tasks:
|
||||
/workflow:plan --session {session_id}
|
||||
/workflow-plan --session {session_id}
|
||||
|
||||
3. Generate tests (if needed):
|
||||
/workflow:test-gen {session_id}
|
||||
@@ -741,5 +741,5 @@ Design Quality:
|
||||
- Design token driven
|
||||
- {generated_count} assembled prototypes
|
||||
|
||||
Next: [/workflow:execute] OR [Open compare.html → /workflow:plan]
|
||||
Next: [/workflow-execute] OR [Open compare.html → /workflow-plan]
|
||||
```
|
||||
|
||||
@@ -43,19 +43,19 @@ function getExistingCommandSources() {
|
||||
// These commands were migrated to skills but references were never updated
|
||||
const COMMAND_TO_SKILL_MAP = {
|
||||
// workflow commands → skills
|
||||
'/workflow:plan': 'workflow-plan',
|
||||
'/workflow:execute': 'workflow-execute',
|
||||
'/workflow:lite-plan': 'workflow-lite-plan',
|
||||
'/workflow-plan': 'workflow-plan',
|
||||
'/workflow-execute': 'workflow-execute',
|
||||
'/workflow-lite-plan': 'workflow-lite-plan',
|
||||
'/workflow:lite-execute': 'workflow-lite-plan', // lite-execute is part of lite-plan skill
|
||||
'/workflow:lite-fix': 'workflow-lite-plan', // lite-fix is part of lite-plan skill
|
||||
'/workflow:multi-cli-plan': 'workflow-multi-cli-plan',
|
||||
'/workflow:plan-verify': 'workflow-plan', // plan-verify is a phase of workflow-plan
|
||||
'/workflow-multi-cli-plan': 'workflow-multi-cli-plan',
|
||||
'/workflow-plan-verify': 'workflow-plan', // plan-verify is a phase of workflow-plan
|
||||
'/workflow:replan': 'workflow-plan', // replan is a phase of workflow-plan
|
||||
'/workflow:tdd-plan': 'workflow-tdd',
|
||||
'/workflow:tdd-verify': 'workflow-tdd', // tdd-verify is a phase of workflow-tdd
|
||||
'/workflow:test-fix-gen': 'workflow-test-fix',
|
||||
'/workflow-tdd-plan': 'workflow-tdd-plan',
|
||||
'/workflow-tdd-verify': 'workflow-tdd-plan', // tdd-verify is a phase of workflow-tdd-plan
|
||||
'/workflow-test-fix': 'workflow-test-fix',
|
||||
'/workflow:test-gen': 'workflow-test-fix',
|
||||
'/workflow:test-cycle-execute': 'workflow-test-fix',
|
||||
'/workflow-test-fix': 'workflow-test-fix',
|
||||
'/workflow:review': 'review-cycle',
|
||||
'/workflow:review-session-cycle': 'review-cycle',
|
||||
'/workflow:review-module-cycle': 'review-cycle',
|
||||
@@ -70,8 +70,8 @@ const COMMAND_TO_SKILL_MAP = {
|
||||
'/workflow:tools:context-gather': 'workflow-plan',
|
||||
'/workflow:tools:conflict-resolution': 'workflow-plan',
|
||||
'/workflow:tools:task-generate-agent': 'workflow-plan',
|
||||
'/workflow:tools:task-generate-tdd': 'workflow-tdd',
|
||||
'/workflow:tools:tdd-coverage-analysis': 'workflow-tdd',
|
||||
'/workflow:tools:task-generate-tdd': 'workflow-tdd-plan',
|
||||
'/workflow:tools:tdd-coverage-analysis': 'workflow-tdd-plan',
|
||||
'/workflow:tools:test-concept-enhanced': 'workflow-test-fix',
|
||||
'/workflow:tools:test-context-gather': 'workflow-test-fix',
|
||||
'/workflow:tools:test-task-generate': 'workflow-test-fix',
|
||||
@@ -319,17 +319,17 @@ function fixBrokenReferences() {
|
||||
// Pattern: `/ command:name` references that point to non-existent commands
|
||||
// These are documentation references - update to point to skill names
|
||||
const proseRefFixes = {
|
||||
'`/workflow:plan`': '`workflow-plan` skill',
|
||||
'`/workflow:execute`': '`workflow-execute` skill',
|
||||
'`/workflow-plan`': '`workflow-plan` skill',
|
||||
'`/workflow-execute`': '`workflow-execute` skill',
|
||||
'`/workflow:lite-execute`': '`workflow-lite-plan` skill',
|
||||
'`/workflow:lite-fix`': '`workflow-lite-plan` skill',
|
||||
'`/workflow:plan-verify`': '`workflow-plan` skill (plan-verify phase)',
|
||||
'`/workflow-plan-verify`': '`workflow-plan` skill (plan-verify phase)',
|
||||
'`/workflow:replan`': '`workflow-plan` skill (replan phase)',
|
||||
'`/workflow:tdd-plan`': '`workflow-tdd` skill',
|
||||
'`/workflow:tdd-verify`': '`workflow-tdd` skill (tdd-verify phase)',
|
||||
'`/workflow:test-fix-gen`': '`workflow-test-fix` skill',
|
||||
'`/workflow-tdd-plan`': '`workflow-tdd-plan` skill',
|
||||
'`/workflow-tdd-verify`': '`workflow-tdd-plan` skill (tdd-verify phase)',
|
||||
'`/workflow-test-fix`': '`workflow-test-fix` skill',
|
||||
'`/workflow:test-gen`': '`workflow-test-fix` skill',
|
||||
'`/workflow:test-cycle-execute`': '`workflow-test-fix` skill',
|
||||
'`/workflow-test-fix`': '`workflow-test-fix` skill',
|
||||
'`/workflow:review`': '`review-cycle` skill',
|
||||
'`/workflow:review-session-cycle`': '`review-cycle` skill',
|
||||
'`/workflow:review-module-cycle`': '`review-cycle` skill',
|
||||
@@ -346,8 +346,8 @@ function fixBrokenReferences() {
|
||||
'`/workflow:tools:task-generate`': '`workflow-plan` skill (task-generate phase)',
|
||||
'`/workflow:ui-design:auto`': '`/workflow:ui-design:explore-auto`',
|
||||
'`/workflow:ui-design:update`': '`/workflow:ui-design:generate`',
|
||||
'`/workflow:multi-cli-plan`': '`workflow-multi-cli-plan` skill',
|
||||
'`/workflow:lite-plan`': '`workflow-lite-plan` skill',
|
||||
'`/workflow-multi-cli-plan`': '`workflow-multi-cli-plan` skill',
|
||||
'`/workflow-lite-plan`': '`workflow-lite-plan` skill',
|
||||
'`/cli:plan`': '`workflow-lite-plan` skill',
|
||||
'`/test-cycle-execute`': '`workflow-test-fix` skill',
|
||||
};
|
||||
|
||||
@@ -123,7 +123,7 @@
|
||||
| **命令调用语法** | 转换为 Phase 文件的相对路径 | `/workflow:session:start` → `phases/01-session-discovery.md` |
|
||||
| **命令路径引用** | 转换为 Skill 目录内路径 | `commands/workflow/tools/` → `phases/` |
|
||||
| **跨命令引用** | 转换为 Phase 间文件引用 | `workflow-plan` skill (context-gather phase) → `phases/02-context-gathering.md` |
|
||||
| **命令参数说明** | 移除或转为 Phase Prerequisites | `usage: /workflow:plan [session-id]` → Phase Prerequisites 中说明 |
|
||||
| **命令参数说明** | 移除或转为 Phase Prerequisites | `usage: /workflow-plan [session-id]` → Phase Prerequisites 中说明 |
|
||||
|
||||
**转换示例**:
|
||||
|
||||
|
||||
@@ -373,7 +373,7 @@ Initial → Phase 1 Mode Routing (completed)
|
||||
- `/workflow:session:start` - Start a new workflow session (optional, brainstorm creates its own)
|
||||
|
||||
**Follow-ups** (after brainstorm completes):
|
||||
- `/workflow:plan --session {sessionId}` - Generate implementation plan
|
||||
- `/workflow-plan --session {sessionId}` - Generate implementation plan
|
||||
- `/workflow:brainstorm:synthesis --session {sessionId}` - Run synthesis standalone (if skipped)
|
||||
|
||||
## Reference Information
|
||||
|
||||
@@ -469,7 +469,7 @@ ${selected_roles.length > 1 ? `
|
||||
- Run synthesis: /brainstorm --session ${session_id} (auto mode)
|
||||
` : `
|
||||
- Clarify insights: /brainstorm --session ${session_id} (auto mode)
|
||||
- Generate plan: /workflow:plan --session ${session_id}
|
||||
- Generate plan: /workflow-plan --session ${session_id}
|
||||
`}
|
||||
```
|
||||
|
||||
|
||||
@@ -744,7 +744,7 @@ Write(context_pkg_path, JSON.stringify(context_pkg))
|
||||
**Changelog**: .brainstorming/synthesis-changelog.md
|
||||
|
||||
### Next Steps
|
||||
PROCEED: `/workflow:plan --session {session-id}`
|
||||
PROCEED: `/workflow-plan --session {session-id}`
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
@@ -341,7 +341,7 @@
|
||||
},
|
||||
{
|
||||
"name": "execute",
|
||||
"command": "/workflow:execute",
|
||||
"command": "/workflow-execute",
|
||||
"description": "Coordinate agent execution for workflow tasks with automatic session discovery, parallel task processing, and status tracking",
|
||||
"arguments": "[-y|--yes] [--resume-session=\\\"session-id\\\"]",
|
||||
"category": "workflow",
|
||||
@@ -396,7 +396,7 @@
|
||||
},
|
||||
{
|
||||
"name": "lite-plan",
|
||||
"command": "/workflow:lite-plan",
|
||||
"command": "/workflow-lite-plan",
|
||||
"description": "Lightweight interactive planning workflow with in-memory planning, code exploration, and execution execute to lite-execute after user confirmation",
|
||||
"arguments": "[-y|--yes] [-e|--explore] \\\"task description\\\"|file.md",
|
||||
"category": "workflow",
|
||||
@@ -406,8 +406,8 @@
|
||||
"source": "../../commands/workflow/lite-plan.md"
|
||||
},
|
||||
{
|
||||
"name": "workflow:multi-cli-plan",
|
||||
"command": "/workflow:multi-cli-plan",
|
||||
"name": "workflow-multi-cli-plan",
|
||||
"command": "/workflow-multi-cli-plan",
|
||||
"description": "Multi-CLI collaborative planning workflow with ACE context gathering and iterative cross-verification. Uses cli-discuss-agent for Gemini+Codex+Claude analysis to converge on optimal execution plan.",
|
||||
"arguments": "[-y|--yes] <task description> [--max-rounds=3] [--tools=gemini,codex] [--mode=parallel|serial]",
|
||||
"category": "workflow",
|
||||
@@ -418,7 +418,7 @@
|
||||
},
|
||||
{
|
||||
"name": "plan-verify",
|
||||
"command": "/workflow:plan-verify",
|
||||
"command": "/workflow-plan-verify",
|
||||
"description": "Perform READ-ONLY verification analysis between IMPL_PLAN.md, task JSONs, and brainstorming artifacts. Generates structured report with quality gate recommendation. Does NOT modify any files.",
|
||||
"arguments": "[optional: --session session-id]",
|
||||
"category": "workflow",
|
||||
@@ -429,7 +429,7 @@
|
||||
},
|
||||
{
|
||||
"name": "plan",
|
||||
"command": "/workflow:plan",
|
||||
"command": "/workflow-plan",
|
||||
"description": "5-phase planning workflow with action-planning-agent task generation, outputs IMPL_PLAN.md and task JSONs",
|
||||
"arguments": "[-y|--yes] \\\"text description\\\"|file.md",
|
||||
"category": "workflow",
|
||||
@@ -550,7 +550,7 @@
|
||||
},
|
||||
{
|
||||
"name": "tdd-plan",
|
||||
"command": "/workflow:tdd-plan",
|
||||
"command": "/workflow-tdd-plan",
|
||||
"description": "TDD workflow planning with Red-Green-Refactor task chain generation, test-first development structure, and cycle tracking",
|
||||
"arguments": "\\\"feature description\\\"|file.md",
|
||||
"category": "workflow",
|
||||
@@ -561,7 +561,7 @@
|
||||
},
|
||||
{
|
||||
"name": "tdd-verify",
|
||||
"command": "/workflow:tdd-verify",
|
||||
"command": "/workflow-tdd-verify",
|
||||
"description": "Verify TDD workflow compliance against Red-Green-Refactor cycles. Generates quality report with coverage analysis and quality gate recommendation. Orchestrates sub-commands for comprehensive validation.",
|
||||
"arguments": "[optional: --session WFS-session-id]",
|
||||
"category": "workflow",
|
||||
@@ -572,7 +572,7 @@
|
||||
},
|
||||
{
|
||||
"name": "test-cycle-execute",
|
||||
"command": "/workflow:test-cycle-execute",
|
||||
"command": "/workflow-test-fix",
|
||||
"description": "Execute test-fix workflow with dynamic task generation and iterative fix cycles until test pass rate >= 95% or max iterations reached. Uses @cli-planning-agent for failure analysis and task generation.",
|
||||
"arguments": "[--resume-session=\\\"session-id\\\"] [--max-iterations=N]",
|
||||
"category": "workflow",
|
||||
@@ -583,7 +583,7 @@
|
||||
},
|
||||
{
|
||||
"name": "test-fix-gen",
|
||||
"command": "/workflow:test-fix-gen",
|
||||
"command": "/workflow-test-fix",
|
||||
"description": "Create test-fix workflow session from session ID, description, or file path with test strategy generation and task planning",
|
||||
"arguments": "(source-session-id | \\\"feature description\\\" | /path/to/file.md)",
|
||||
"category": "workflow",
|
||||
@@ -716,7 +716,7 @@
|
||||
{
|
||||
"name": "design-sync",
|
||||
"command": "/workflow:ui-design:design-sync",
|
||||
"description": "Synchronize finalized design system references to brainstorming artifacts, preparing them for /workflow:plan consumption",
|
||||
"description": "Synchronize finalized design system references to brainstorming artifacts, preparing them for /workflow-plan consumption",
|
||||
"arguments": "--session <session_id> [--selected-prototypes \"<list>\"]",
|
||||
"category": "workflow",
|
||||
"subcategory": "ui-design",
|
||||
|
||||
@@ -277,7 +277,7 @@
|
||||
{
|
||||
"name": "design-sync",
|
||||
"command": "/workflow:ui-design:design-sync",
|
||||
"description": "Synchronize finalized design system references to brainstorming artifacts, preparing them for /workflow:plan consumption",
|
||||
"description": "Synchronize finalized design system references to brainstorming artifacts, preparing them for /workflow-plan consumption",
|
||||
"arguments": "--session <session_id> [--selected-prototypes \"<list>\"]",
|
||||
"category": "workflow",
|
||||
"subcategory": "ui-design",
|
||||
|
||||
@@ -310,7 +310,7 @@
|
||||
{
|
||||
"name": "design-sync",
|
||||
"command": "/workflow:ui-design:design-sync",
|
||||
"description": "Synchronize finalized design system references to brainstorming artifacts, preparing them for /workflow:plan consumption",
|
||||
"description": "Synchronize finalized design system references to brainstorming artifacts, preparing them for /workflow-plan consumption",
|
||||
"arguments": "--session <session_id> [--selected-prototypes \"<list>\"]",
|
||||
"category": "workflow",
|
||||
"subcategory": "ui-design",
|
||||
|
||||
@@ -282,7 +282,7 @@
|
||||
{
|
||||
"name": "design-sync",
|
||||
"command": "/workflow:ui-design:design-sync",
|
||||
"description": "Synchronize finalized design system references to brainstorming artifacts, preparing them for /workflow:plan consumption",
|
||||
"description": "Synchronize finalized design system references to brainstorming artifacts, preparing them for /workflow-plan consumption",
|
||||
"arguments": "--session <session_id> [--selected-prototypes \"<list>\"]",
|
||||
"category": "workflow",
|
||||
"subcategory": "ui-design",
|
||||
|
||||
@@ -142,70 +142,70 @@ def analyze_agent_file(file_path: Path) -> Dict[str, Any]:
|
||||
def build_command_relationships() -> Dict[str, Any]:
|
||||
"""Build command relationship mappings."""
|
||||
return {
|
||||
"workflow:plan": {
|
||||
"workflow-plan": {
|
||||
"calls_internally": ["workflow:session:start", "workflow:tools:context-gather", "workflow:tools:conflict-resolution", "workflow:tools:task-generate-agent"],
|
||||
"next_steps": ["workflow:plan-verify", "workflow:status", "workflow:execute"],
|
||||
"alternatives": ["workflow:tdd-plan"],
|
||||
"next_steps": ["workflow-plan-verify", "workflow:status", "workflow-execute"],
|
||||
"alternatives": ["workflow-tdd-plan"],
|
||||
"prerequisites": []
|
||||
},
|
||||
"workflow:tdd-plan": {
|
||||
"workflow-tdd-plan": {
|
||||
"calls_internally": ["workflow:session:start", "workflow:tools:context-gather", "workflow:tools:task-generate-tdd"],
|
||||
"next_steps": ["workflow:tdd-verify", "workflow:status", "workflow:execute"],
|
||||
"alternatives": ["workflow:plan"],
|
||||
"next_steps": ["workflow-tdd-verify", "workflow:status", "workflow-execute"],
|
||||
"alternatives": ["workflow-plan"],
|
||||
"prerequisites": []
|
||||
},
|
||||
"workflow:execute": {
|
||||
"prerequisites": ["workflow:plan", "workflow:tdd-plan"],
|
||||
"workflow-execute": {
|
||||
"prerequisites": ["workflow-plan", "workflow-tdd-plan"],
|
||||
"related": ["workflow:status", "workflow:resume"],
|
||||
"next_steps": ["workflow:review", "workflow:tdd-verify"]
|
||||
"next_steps": ["workflow:review", "workflow-tdd-verify"]
|
||||
},
|
||||
"workflow:plan-verify": {
|
||||
"prerequisites": ["workflow:plan"],
|
||||
"next_steps": ["workflow:execute"],
|
||||
"workflow-plan-verify": {
|
||||
"prerequisites": ["workflow-plan"],
|
||||
"next_steps": ["workflow-execute"],
|
||||
"related": ["workflow:status"]
|
||||
},
|
||||
"workflow:tdd-verify": {
|
||||
"prerequisites": ["workflow:execute"],
|
||||
"workflow-tdd-verify": {
|
||||
"prerequisites": ["workflow-execute"],
|
||||
"related": ["workflow:tools:tdd-coverage-analysis"]
|
||||
},
|
||||
"workflow:session:start": {
|
||||
"next_steps": ["workflow:plan", "workflow:execute"],
|
||||
"next_steps": ["workflow-plan", "workflow-execute"],
|
||||
"related": ["workflow:session:list", "workflow:session:resume"]
|
||||
},
|
||||
"workflow:session:resume": {
|
||||
"alternatives": ["workflow:resume"],
|
||||
"related": ["workflow:session:list", "workflow:status"]
|
||||
},
|
||||
"workflow:lite-plan": {
|
||||
"workflow-lite-plan": {
|
||||
"calls_internally": ["workflow:lite-execute"],
|
||||
"next_steps": ["workflow:lite-execute", "workflow:status"],
|
||||
"alternatives": ["workflow:plan"],
|
||||
"alternatives": ["workflow-plan"],
|
||||
"prerequisites": []
|
||||
},
|
||||
"workflow:lite-fix": {
|
||||
"next_steps": ["workflow:lite-execute", "workflow:status"],
|
||||
"alternatives": ["workflow:lite-plan"],
|
||||
"related": ["workflow:test-cycle-execute"]
|
||||
"alternatives": ["workflow-lite-plan"],
|
||||
"related": ["workflow-test-fix"]
|
||||
},
|
||||
"workflow:lite-execute": {
|
||||
"prerequisites": ["workflow:lite-plan", "workflow:lite-fix"],
|
||||
"related": ["workflow:execute", "workflow:status"]
|
||||
"prerequisites": ["workflow-lite-plan", "workflow:lite-fix"],
|
||||
"related": ["workflow-execute", "workflow:status"]
|
||||
},
|
||||
"workflow:review-session-cycle": {
|
||||
"prerequisites": ["workflow:execute"],
|
||||
"prerequisites": ["workflow-execute"],
|
||||
"next_steps": ["workflow:review-fix"],
|
||||
"related": ["workflow:review-module-cycle"]
|
||||
},
|
||||
"workflow:review-fix": {
|
||||
"prerequisites": ["workflow:review-module-cycle", "workflow:review-session-cycle"],
|
||||
"related": ["workflow:test-cycle-execute"]
|
||||
"related": ["workflow-test-fix"]
|
||||
},
|
||||
"memory:docs": {
|
||||
"calls_internally": ["workflow:session:start", "workflow:tools:context-gather"],
|
||||
"next_steps": ["workflow:execute"]
|
||||
"next_steps": ["workflow-execute"]
|
||||
},
|
||||
"memory:skill-memory": {
|
||||
"next_steps": ["workflow:plan", "cli:analyze"],
|
||||
"next_steps": ["workflow-plan", "cli:analyze"],
|
||||
"related": ["memory:load-skill-memory"]
|
||||
}
|
||||
}
|
||||
@@ -213,11 +213,11 @@ def build_command_relationships() -> Dict[str, Any]:
|
||||
def identify_essential_commands(all_commands: List[Dict]) -> List[Dict]:
|
||||
"""Identify the most essential commands for beginners."""
|
||||
essential_names = [
|
||||
"workflow:lite-plan", "workflow:lite-fix", "workflow:plan",
|
||||
"workflow:execute", "workflow:status", "workflow:session:start",
|
||||
"workflow-lite-plan", "workflow:lite-fix", "workflow-plan",
|
||||
"workflow-execute", "workflow:status", "workflow:session:start",
|
||||
"workflow:review-session-cycle", "cli:analyze", "cli:chat",
|
||||
"memory:docs", "workflow:brainstorm:artifacts",
|
||||
"workflow:plan-verify", "workflow:resume", "version"
|
||||
"workflow-plan-verify", "workflow:resume", "version"
|
||||
]
|
||||
|
||||
essential = []
|
||||
|
||||
@@ -67,9 +67,9 @@ spec-generator/
|
||||
## Handoff
|
||||
|
||||
After Phase 6, choose execution path:
|
||||
- `workflow:lite-plan` - Execute per Epic
|
||||
- `workflow-lite-plan` - Execute per Epic
|
||||
- `workflow:req-plan-with-file` - Roadmap decomposition
|
||||
- `workflow:plan` - Full planning
|
||||
- `workflow-plan` - Full planning
|
||||
- `issue:new` - Create issues per Epic
|
||||
|
||||
## Design Principles
|
||||
|
||||
@@ -210,7 +210,7 @@ AskUserQuestion({
|
||||
options: [
|
||||
{
|
||||
label: "Execute via lite-plan",
|
||||
description: "Start implementing with /workflow:lite-plan, one Epic at a time"
|
||||
description: "Start implementing with /workflow-lite-plan, one Epic at a time"
|
||||
},
|
||||
{
|
||||
label: "Create roadmap",
|
||||
@@ -218,7 +218,7 @@ AskUserQuestion({
|
||||
},
|
||||
{
|
||||
label: "Full planning",
|
||||
description: "Detailed planning with /workflow:plan for the full scope"
|
||||
description: "Detailed planning with /workflow-plan for the full scope"
|
||||
},
|
||||
{
|
||||
label: "Create Issues",
|
||||
@@ -242,7 +242,7 @@ if (selection === "Execute via lite-plan") {
|
||||
const epicContent = Read(firstMvpFile);
|
||||
const title = extractTitle(epicContent); // First # heading
|
||||
const description = extractSection(epicContent, "Description");
|
||||
Skill(skill="workflow:lite-plan", args=`"${title}: ${description}"`)
|
||||
Skill(skill="workflow-lite-plan", args=`"${title}: ${description}"`)
|
||||
}
|
||||
|
||||
if (selection === "Full planning" || selection === "Create roadmap") {
|
||||
@@ -368,7 +368,7 @@ ${extractSection(epicContent, "Architecture")}
|
||||
// → context-package.json.brainstorm_artifacts populated
|
||||
// → action-planning-agent loads guidance_specification (P1) + feature_index (P2)
|
||||
if (selection === "Full planning") {
|
||||
Skill(skill="workflow:plan", args=`"${structuredDesc}"`)
|
||||
Skill(skill="workflow-plan", args=`"${structuredDesc}"`)
|
||||
} else {
|
||||
Skill(skill="workflow:req-plan-with-file", args=`"${extractGoal(specSummary)}"`)
|
||||
}
|
||||
|
||||
@@ -58,6 +58,27 @@ Each capability produces default output artifacts:
|
||||
| tester | Test results | `<session>/artifacts/test-report.md` |
|
||||
| planner | Execution plan | `<session>/artifacts/execution-plan.md` |
|
||||
|
||||
### Step 2.5: Key File Inference
|
||||
|
||||
For each task, infer relevant files based on capability type and task keywords:
|
||||
|
||||
| Capability | File Inference Strategy |
|
||||
|------------|------------------------|
|
||||
| researcher | Extract domain keywords → map to likely directories (e.g., "auth" → `src/auth/**`, `middleware/auth.ts`) |
|
||||
| developer | Extract feature/module keywords → map to source files (e.g., "payment" → `src/payments/**`, `types/payment.ts`) |
|
||||
| designer | Look for architecture/config keywords → map to config/schema files |
|
||||
| analyst | Extract target keywords → map to files under analysis |
|
||||
| tester | Extract test target keywords → map to source + test files |
|
||||
| writer | Extract documentation target → map to relevant source files for context |
|
||||
| planner | No specific files (planning is abstract) |
|
||||
|
||||
**Inference rules:**
|
||||
- Extract nouns and verbs from task description
|
||||
- Match against common directory patterns (src/, lib/, components/, services/, utils/)
|
||||
- Include related type definition files (types/, *.d.ts)
|
||||
- For "fix bug" tasks, include error-prone areas (error handlers, validation)
|
||||
- For "implement feature" tasks, include similar existing features as reference
|
||||
|
||||
### Step 3: Dependency Graph Construction
|
||||
|
||||
Build a DAG of work streams using natural ordering tiers:
|
||||
@@ -90,16 +111,26 @@ Apply merging rules to reduce role count (cap at 5).
|
||||
|
||||
### Step 6: Role-Spec Metadata Assignment
|
||||
|
||||
For each role, determine frontmatter fields:
|
||||
For each role, determine frontmatter and generation hints:
|
||||
|
||||
| Field | Derivation |
|
||||
|-------|------------|
|
||||
| `prefix` | From capability prefix (e.g., RESEARCH, DRAFT, IMPL) |
|
||||
| `inner_loop` | `true` if role has 2+ serial same-prefix tasks |
|
||||
| `subagents` | Inferred from responsibility type: orchestration -> [explore], code-gen (docs) -> [explore], validation -> [] |
|
||||
| `subagents` | Suggested, not mandatory — coordinator may adjust based on task needs |
|
||||
| `pattern_hint` | Reference pattern name from role-spec-template (research/document/code/analysis/validation) — guides coordinator's Phase 2-4 composition, NOT a rigid template selector |
|
||||
| `output_type` | `artifact` (new files in session/artifacts/) / `codebase` (modify existing project files) / `mixed` (both) — determines verification strategy in Behavioral Traits |
|
||||
| `message_types.success` | `<prefix>_complete` |
|
||||
| `message_types.error` | `error` |
|
||||
|
||||
**output_type derivation**:
|
||||
|
||||
| Task Signal | output_type | Example |
|
||||
|-------------|-------------|---------|
|
||||
| "write report", "analyze", "research" | `artifact` | New analysis-report.md in session |
|
||||
| "update docs", "modify code", "fix bug" | `codebase` | Modify existing project files |
|
||||
| "implement feature + write summary" | `mixed` | Code changes + implementation summary |
|
||||
|
||||
## Phase 4: Output
|
||||
|
||||
Write `<session-folder>/task-analysis.json`:
|
||||
@@ -113,7 +144,22 @@ Write `<session-folder>/task-analysis.json`:
|
||||
"prefix": "RESEARCH",
|
||||
"responsibility_type": "orchestration",
|
||||
"tasks": [
|
||||
{ "id": "RESEARCH-001", "description": "..." }
|
||||
{
|
||||
"id": "RESEARCH-001",
|
||||
"goal": "What this task achieves and why",
|
||||
"steps": [
|
||||
"step 1: specific action with clear verb",
|
||||
"step 2: specific action with clear verb",
|
||||
"step 3: specific action with clear verb"
|
||||
],
|
||||
"key_files": [
|
||||
"src/path/to/relevant.ts",
|
||||
"src/path/to/other.ts"
|
||||
],
|
||||
"upstream_artifacts": [],
|
||||
"success_criteria": "Measurable completion condition",
|
||||
"constraints": "Scope limits, focus areas"
|
||||
}
|
||||
],
|
||||
"artifacts": ["research-findings.md"]
|
||||
}
|
||||
@@ -132,6 +178,8 @@ Write `<session-folder>/task-analysis.json`:
|
||||
"inner_loop": false,
|
||||
"role_spec_metadata": {
|
||||
"subagents": ["explore"],
|
||||
"pattern_hint": "research",
|
||||
"output_type": "artifact",
|
||||
"message_types": {
|
||||
"success": "research_complete",
|
||||
"error": "error"
|
||||
|
||||
@@ -26,7 +26,21 @@ Create task chains from dynamic dependency graphs. Builds pipelines from the tas
|
||||
TaskCreate({
|
||||
subject: "<PREFIX>-<NNN>",
|
||||
owner: "<role-name>",
|
||||
description: "<task description from task-analysis>\nSession: <session-folder>\nScope: <scope>\nInnerLoop: <true|false>\nRoleSpec: <session-folder>/role-specs/<role-name>.md",
|
||||
description: "PURPOSE: <goal> | Success: <success_criteria>
|
||||
TASK:
|
||||
- <step 1>
|
||||
- <step 2>
|
||||
- <step 3>
|
||||
CONTEXT:
|
||||
- Session: <session-folder>
|
||||
- Upstream artifacts: <artifact-1.md>, <artifact-2.md>
|
||||
- Key files: <file1>, <file2>
|
||||
- Shared memory: <session>/shared-memory.json
|
||||
EXPECTED: <deliverable path> + <quality criteria>
|
||||
CONSTRAINTS: <scope limits>
|
||||
---
|
||||
InnerLoop: <true|false>
|
||||
RoleSpec: <session-folder>/role-specs/<role-name>.md",
|
||||
blockedBy: [<dependency-list from graph>],
|
||||
status: "pending"
|
||||
})
|
||||
@@ -37,16 +51,34 @@ TaskCreate({
|
||||
|
||||
### Task Description Template
|
||||
|
||||
Every task description includes session path, inner loop flag, and role-spec path:
|
||||
Every task description includes structured fields for clarity:
|
||||
|
||||
```
|
||||
<task description>
|
||||
Session: <session-folder>
|
||||
Scope: <scope>
|
||||
PURPOSE: <goal from task-analysis.json#tasks[].goal> | Success: <success_criteria from task-analysis.json#tasks[].success_criteria>
|
||||
TASK:
|
||||
- <step 1 from task-analysis.json#tasks[].steps[]>
|
||||
- <step 2 from task-analysis.json#tasks[].steps[]>
|
||||
- <step 3 from task-analysis.json#tasks[].steps[]>
|
||||
CONTEXT:
|
||||
- Session: <session-folder>
|
||||
- Upstream artifacts: <comma-separated list from task-analysis.json#tasks[].upstream_artifacts[]>
|
||||
- Key files: <comma-separated list from task-analysis.json#tasks[].key_files[]>
|
||||
- Shared memory: <session>/shared-memory.json
|
||||
EXPECTED: <artifact path from task-analysis.json#capabilities[].artifacts[]> + <quality criteria based on capability type>
|
||||
CONSTRAINTS: <constraints from task-analysis.json#tasks[].constraints>
|
||||
---
|
||||
InnerLoop: <true|false>
|
||||
RoleSpec: <session-folder>/role-specs/<role-name>.md
|
||||
```
|
||||
|
||||
**Field Mapping**:
|
||||
- `PURPOSE`: From `task-analysis.json#capabilities[].tasks[].goal` + `success_criteria`
|
||||
- `TASK`: From `task-analysis.json#capabilities[].tasks[].steps[]`
|
||||
- `CONTEXT.Upstream artifacts`: From `task-analysis.json#capabilities[].tasks[].upstream_artifacts[]`
|
||||
- `CONTEXT.Key files`: From `task-analysis.json#capabilities[].tasks[].key_files[]`
|
||||
- `EXPECTED`: From `task-analysis.json#capabilities[].artifacts[]` + quality criteria
|
||||
- `CONSTRAINTS`: From `task-analysis.json#capabilities[].tasks[].constraints`
|
||||
|
||||
### InnerLoop Flag Rules
|
||||
|
||||
| Condition | InnerLoop |
|
||||
|
||||
@@ -60,10 +60,12 @@ Receive callback from [<role>]
|
||||
+- None completed -> STOP
|
||||
```
|
||||
|
||||
**Fast-advance note**: A worker may have already spawned its successor via fast-advance. When processing a callback:
|
||||
1. Check if the expected next task is already `in_progress` (fast-advanced)
|
||||
2. If yes -> skip spawning that task, update active_workers to include the fast-advanced worker
|
||||
3. If no -> normal handleSpawnNext
|
||||
**Fast-advance reconciliation**: A worker may have already spawned its successor via fast-advance. When processing any callback or resume:
|
||||
1. Read recent `fast_advance` messages from team_msg (type="fast_advance")
|
||||
2. For each fast_advance message: add the spawned successor to `active_workers` if not already present
|
||||
3. Check if the expected next task is already `in_progress` (fast-advanced)
|
||||
4. If yes -> skip spawning that task (already running)
|
||||
5. If no -> normal handleSpawnNext
|
||||
|
||||
---
|
||||
|
||||
@@ -262,6 +264,13 @@ handleCallback / handleResume detects:
|
||||
4. -> handleSpawnNext (will re-spawn the task normally)
|
||||
```
|
||||
|
||||
### Fast-Advance State Sync
|
||||
|
||||
On every coordinator wake (handleCallback, handleResume, handleCheck):
|
||||
1. Read team_msg entries with `type="fast_advance"` since last coordinator wake
|
||||
2. For each entry: sync `active_workers` with the spawned successor
|
||||
3. This ensures coordinator's state reflects fast-advance decisions even before the successor's callback arrives
|
||||
|
||||
### Consensus-Blocked Handling
|
||||
|
||||
```
|
||||
|
||||
@@ -182,13 +182,15 @@ Regardless of complexity score or role count, coordinator MUST:
|
||||
|
||||
4. **Call TeamCreate** with team name derived from session ID
|
||||
|
||||
5. **Read `specs/role-spec-template.md`** + task-analysis.json
|
||||
5. **Read `specs/role-spec-template.md`** for Behavioral Traits + Reference Patterns
|
||||
|
||||
6. **For each role in task-analysis.json#roles**:
|
||||
- Fill role-spec template with:
|
||||
- YAML frontmatter: role, prefix, inner_loop, subagents, message_types
|
||||
- Phase 2-4 content from responsibility type reference sections in template
|
||||
- Task-specific instructions from task description
|
||||
- Fill YAML frontmatter: role, prefix, inner_loop, subagents, message_types
|
||||
- **Compose Phase 2-4 content** (NOT copy from template):
|
||||
- Phase 2: Derive input sources and context loading steps from **task description + upstream dependencies**
|
||||
- Phase 3: Describe **execution goal** (WHAT to achieve) from task description — do NOT prescribe specific subagent or tool
|
||||
- Phase 4: Combine **Behavioral Traits** (from template) + **output_type** (from task analysis) to compose verification steps
|
||||
- Reference Patterns may guide phase structure, but task description determines specific content
|
||||
- Write generated role-spec to `<session>/role-specs/<role-name>.md`
|
||||
|
||||
7. **Register roles** in team-session.json#roles (with `role_spec` path instead of `role_file`)
|
||||
|
||||
@@ -63,233 +63,117 @@ message_types:
|
||||
| `<placeholder>` notation | Use angle brackets for variable substitution |
|
||||
| Reference subagents by name | team-worker resolves invocation from its delegation templates |
|
||||
|
||||
## Phase 2-4 Content by Responsibility Type
|
||||
## Behavioral Traits
|
||||
|
||||
Select the matching section based on `responsibility_type` from task analysis.
|
||||
All dynamically generated role-specs MUST embed these traits into Phase 4. Coordinator copies this section verbatim into every generated role-spec as a Phase 4 appendix.
|
||||
|
||||
### orchestration
|
||||
**Design principle**: Constrain behavioral characteristics (accuracy, feedback, quality gates), NOT specific actions (which tool, which subagent, which path). Tasks are diverse — the coordinator composes task-specific Phase 2-3 instructions, while these traits ensure execution quality regardless of task type.
|
||||
|
||||
**Phase 2: Context Assessment**
|
||||
### Accuracy — outputs must be verifiable
|
||||
|
||||
- Files claimed as **created** → Read to confirm file exists and has content
|
||||
- Files claimed as **modified** → Read to confirm content actually changed
|
||||
- Analysis claimed as **complete** → artifact file exists in `<session>/artifacts/`
|
||||
|
||||
### Feedback Contract — completion report must include evidence
|
||||
|
||||
Phase 4 must produce a verification summary with these fields:
|
||||
|
||||
| Field | When Required | Content |
|
||||
|-------|---------------|---------|
|
||||
| `files_produced` | New files created | Path list |
|
||||
| `files_modified` | Existing files changed | Path + before/after line count |
|
||||
| `artifacts_written` | Always | Paths in `<session>/artifacts/` |
|
||||
| `verification_method` | Always | How verified: Read confirm / syntax check / diff |
|
||||
|
||||
### Quality Gate — verify before reporting complete
|
||||
|
||||
- Phase 4 MUST verify Phase 3's **actual output** (not planned output)
|
||||
- Verification fails → retry Phase 3 (max 2 retries)
|
||||
- Still fails → report `partial_completion` with details, NOT `completed`
|
||||
- Update `shared-memory.json` with key findings after verification passes
|
||||
|
||||
### Error Protocol
|
||||
|
||||
- Primary approach fails → try alternative (different subagent / different tool)
|
||||
- 2 retries exhausted → escalate to coordinator with failure details
|
||||
- NEVER: skip verification and report completed
|
||||
|
||||
---
|
||||
|
||||
## Reference Patterns
|
||||
|
||||
Coordinator MAY reference these patterns when composing Phase 2-4 content for a role-spec. These are **structural guidance, not mandatory templates**. The task description determines specific behavior — patterns only suggest common phase structures.
|
||||
|
||||
### Research / Exploration
|
||||
|
||||
- Phase 2: Define exploration scope + load prior knowledge from shared-memory and wisdom
|
||||
- Phase 3: Explore via subagents, direct tool calls, or codebase search — approach chosen by agent
|
||||
- Phase 4: Verify findings documented (Behavioral Traits) + update shared-memory
|
||||
|
||||
### Document / Content
|
||||
|
||||
- Phase 2: Load upstream artifacts + read target files (if modifying existing docs)
|
||||
- Phase 3: Create new documents OR modify existing documents — determined by task, not template
|
||||
- Phase 4: Verify documents exist with expected content (Behavioral Traits) + update shared-memory
|
||||
|
||||
### Code Implementation
|
||||
|
||||
- Phase 2: Load design/spec artifacts from upstream
|
||||
- Phase 3: Implement code changes — subagent choice and approach determined by task complexity
|
||||
- Phase 4: Syntax check + file verification (Behavioral Traits) + update shared-memory
|
||||
|
||||
### Analysis / Audit
|
||||
|
||||
- Phase 2: Load analysis targets (artifacts or source files)
|
||||
- Phase 3: Multi-dimension analysis — perspectives and depth determined by task
|
||||
- Phase 4: Verify report exists + severity classification (Behavioral Traits) + update shared-memory
|
||||
|
||||
### Validation / Testing
|
||||
|
||||
- Phase 2: Detect test framework + identify changed files from upstream
|
||||
- Phase 3: Run test-fix cycle — iteration count and strategy determined by task
|
||||
- Phase 4: Verify pass rate + coverage (Behavioral Traits) + update shared-memory
|
||||
|
||||
---
|
||||
|
||||
## Knowledge Transfer Protocol
|
||||
|
||||
How context flows between roles. Coordinator MUST reference this when composing Phase 2 of any role-spec.
|
||||
|
||||
### Transfer Channels
|
||||
|
||||
| Channel | Scope | Mechanism | When to Use |
|
||||
|---------|-------|-----------|-------------|
|
||||
| **Artifacts** | Producer -> Consumer | Write to `<session>/artifacts/<name>.md`, consumer reads in Phase 2 | Structured deliverables (reports, plans, specs) |
|
||||
| **shared-memory.json** | Cross-role | Read-merge-write `<session>/shared-memory.json` | Key findings, decisions, metadata (small, structured data) |
|
||||
| **Wisdom** | Cross-task | Append to `<session>/wisdom/{learnings,decisions,conventions,issues}.md` | Patterns, conventions, risks discovered during execution |
|
||||
| **context_accumulator** | Intra-role (inner loop) | In-memory array, passed to each subsequent task in same-prefix loop | Prior task summaries within same role's inner loop |
|
||||
| **Exploration cache** | Cross-role | `<session>/explorations/cache-index.json` + per-angle JSON | Codebase discovery results, prevents duplicate exploration |
|
||||
|
||||
### Phase 2 Context Loading (role-spec must specify)
|
||||
|
||||
Every generated role-spec Phase 2 MUST declare which upstream sources to load:
|
||||
|
||||
```
|
||||
| Input | Source | Required |
|
||||
|-------|--------|----------|
|
||||
| Task description | From TaskGet | Yes |
|
||||
| Shared memory | <session>/shared-memory.json | No |
|
||||
| Prior artifacts | <session>/artifacts/ | No |
|
||||
| Wisdom | <session>/wisdom/ | No |
|
||||
|
||||
Loading steps:
|
||||
1. Extract session path from task description
|
||||
2. Read shared-memory.json for cross-role context
|
||||
3. Read prior artifacts (if any from upstream tasks)
|
||||
2. Read upstream artifacts: <list which artifacts from which upstream role>
|
||||
3. Read shared-memory.json for cross-role decisions
|
||||
4. Load wisdom files for accumulated knowledge
|
||||
5. Optionally call explore subagent for codebase context
|
||||
5. For inner_loop roles: load context_accumulator from prior tasks
|
||||
6. Check exploration cache before running new explorations
|
||||
```
|
||||
|
||||
**Phase 3: Subagent Execution**
|
||||
### shared-memory.json Usage Convention
|
||||
|
||||
```
|
||||
Delegate to appropriate subagent based on task:
|
||||
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
run_in_background: false,
|
||||
description: "<task-type> for <task-id>",
|
||||
prompt: "## Task
|
||||
- <task description>
|
||||
- Session: <session-folder>
|
||||
## Context
|
||||
<prior artifacts + shared memory + explore results>
|
||||
## Expected Output
|
||||
Write artifact to: <session>/artifacts/<artifact-name>.md
|
||||
Return JSON summary: { artifact_path, summary, key_decisions[], warnings[] }"
|
||||
})
|
||||
```
|
||||
|
||||
**Phase 4: Result Aggregation**
|
||||
|
||||
```
|
||||
1. Verify subagent output artifact exists
|
||||
2. Read artifact, validate structure/completeness
|
||||
3. Update shared-memory.json with key findings
|
||||
4. Write insights to wisdom/ files
|
||||
```
|
||||
|
||||
### code-gen (docs)
|
||||
|
||||
**Phase 2: Load Prior Context**
|
||||
|
||||
```
|
||||
| Input | Source | Required |
|
||||
|-------|--------|----------|
|
||||
| Task description | From TaskGet | Yes |
|
||||
| Prior artifacts | <session>/artifacts/ from upstream | Conditional |
|
||||
| Shared memory | <session>/shared-memory.json | No |
|
||||
|
||||
Loading steps:
|
||||
1. Extract session path from task description
|
||||
2. Read upstream artifacts
|
||||
3. Read shared-memory.json for cross-role context
|
||||
```
|
||||
|
||||
**Phase 3: Document Generation**
|
||||
|
||||
```
|
||||
Task({
|
||||
subagent_type: "universal-executor",
|
||||
run_in_background: false,
|
||||
description: "Generate <doc-type> for <task-id>",
|
||||
prompt: "## Task
|
||||
- Generate: <document type>
|
||||
- Session: <session-folder>
|
||||
## Prior Context
|
||||
<upstream artifacts + shared memory>
|
||||
## Expected Output
|
||||
Write document to: <session>/artifacts/<doc-name>.md
|
||||
Return JSON: { artifact_path, summary, key_decisions[], warnings[] }"
|
||||
})
|
||||
```
|
||||
|
||||
**Phase 4: Structure Validation**
|
||||
|
||||
```
|
||||
1. Verify document artifact exists
|
||||
2. Check document has expected sections
|
||||
3. Validate no placeholder text remains
|
||||
4. Update shared-memory.json with document metadata
|
||||
```
|
||||
|
||||
### code-gen (code)
|
||||
|
||||
**Phase 2: Load Plan/Specs**
|
||||
|
||||
```
|
||||
| Input | Source | Required |
|
||||
|-------|--------|----------|
|
||||
| Task description | From TaskGet | Yes |
|
||||
| Plan/design artifacts | <session>/artifacts/ | Conditional |
|
||||
| Shared memory | <session>/shared-memory.json | No |
|
||||
|
||||
Loading steps:
|
||||
1. Extract session path from task description
|
||||
2. Read plan/design artifacts from upstream
|
||||
3. Load shared-memory.json for implementation context
|
||||
```
|
||||
|
||||
**Phase 3: Code Implementation**
|
||||
|
||||
```
|
||||
Task({
|
||||
subagent_type: "code-developer",
|
||||
run_in_background: false,
|
||||
description: "Implement <task-id>",
|
||||
prompt: "## Task
|
||||
- <implementation description>
|
||||
- Session: <session-folder>
|
||||
## Plan/Design Context
|
||||
<upstream artifacts>
|
||||
## Expected Output
|
||||
Implement code changes.
|
||||
Write summary to: <session>/artifacts/implementation-summary.md
|
||||
Return JSON: { artifact_path, summary, files_changed[], warnings[] }"
|
||||
})
|
||||
```
|
||||
|
||||
**Phase 4: Syntax Validation**
|
||||
|
||||
```
|
||||
1. Run syntax check (tsc --noEmit or equivalent)
|
||||
2. Verify all planned files exist
|
||||
3. If validation fails -> attempt auto-fix (max 2 attempts)
|
||||
4. Write implementation summary to artifacts/
|
||||
```
|
||||
|
||||
### read-only
|
||||
|
||||
**Phase 2: Target Loading**
|
||||
|
||||
```
|
||||
| Input | Source | Required |
|
||||
|-------|--------|----------|
|
||||
| Task description | From TaskGet | Yes |
|
||||
| Target artifacts/files | From task description or upstream | Yes |
|
||||
| Shared memory | <session>/shared-memory.json | No |
|
||||
|
||||
Loading steps:
|
||||
1. Extract session path and target files from task description
|
||||
2. Read target artifacts or source files for analysis
|
||||
3. Load shared-memory.json for context
|
||||
```
|
||||
|
||||
**Phase 3: Multi-Dimension Analysis**
|
||||
|
||||
```
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
run_in_background: false,
|
||||
description: "Analyze <target> for <task-id>",
|
||||
prompt: "## Task
|
||||
- Analyze: <target description>
|
||||
- Dimensions: <analysis dimensions from coordinator>
|
||||
- Session: <session-folder>
|
||||
## Target Content
|
||||
<artifact content or file content>
|
||||
## Expected Output
|
||||
Write report to: <session>/artifacts/analysis-report.md
|
||||
Return JSON: { artifact_path, summary, findings[], severity_counts }"
|
||||
})
|
||||
```
|
||||
|
||||
**Phase 4: Severity Classification**
|
||||
|
||||
```
|
||||
1. Verify analysis report exists
|
||||
2. Classify findings by severity (Critical/High/Medium/Low)
|
||||
3. Update shared-memory.json with key findings
|
||||
4. Write issues to wisdom/issues.md
|
||||
```
|
||||
|
||||
### validation
|
||||
|
||||
**Phase 2: Environment Detection**
|
||||
|
||||
```
|
||||
| Input | Source | Required |
|
||||
|-------|--------|----------|
|
||||
| Task description | From TaskGet | Yes |
|
||||
| Implementation artifacts | Upstream code changes | Yes |
|
||||
|
||||
Loading steps:
|
||||
1. Detect test framework from project files
|
||||
2. Get changed files from implementation
|
||||
3. Identify test command and coverage tool
|
||||
```
|
||||
|
||||
**Phase 3: Test-Fix Cycle**
|
||||
|
||||
```
|
||||
Task({
|
||||
subagent_type: "test-fix-agent",
|
||||
run_in_background: false,
|
||||
description: "Test-fix for <task-id>",
|
||||
prompt: "## Task
|
||||
- Run tests and fix failures
|
||||
- Session: <session-folder>
|
||||
- Max iterations: 5
|
||||
## Changed Files
|
||||
<from upstream implementation>
|
||||
## Expected Output
|
||||
Write report to: <session>/artifacts/test-report.md
|
||||
Return JSON: { artifact_path, pass_rate, coverage, remaining_failures[] }"
|
||||
})
|
||||
```
|
||||
|
||||
**Phase 4: Result Analysis**
|
||||
|
||||
```
|
||||
1. Check pass rate >= 95%
|
||||
2. Check coverage meets threshold
|
||||
3. Generate test report with pass/fail counts
|
||||
4. Update shared-memory.json with test results
|
||||
```
|
||||
- **Read-merge-write**: Read current content -> merge new keys -> write back (NOT overwrite)
|
||||
- **Namespaced keys**: Each role writes under its own namespace: `{ "<role_name>": { ... } }`
|
||||
- **Small data only**: Key findings, decision summaries, metadata. NOT full documents
|
||||
- **Example**:
|
||||
```json
|
||||
{
|
||||
"researcher": { "key_findings": [...], "scope": "..." },
|
||||
"writer": { "documents_created": [...], "style_decisions": [...] },
|
||||
"developer": { "files_changed": [...], "patterns_used": [...] }
|
||||
}
|
||||
```
|
||||
|
||||
@@ -192,7 +192,9 @@ Beat Cycle (single beat)
|
||||
Fast-Advance (skips coordinator for simple linear successors)
|
||||
======================================================================
|
||||
[Worker A] Phase 5 complete
|
||||
+- 1 ready task? simple successor? --> spawn team-worker B directly
|
||||
+- 1 ready task? simple successor?
|
||||
| --> spawn team-worker B directly
|
||||
| --> log fast_advance to message bus (coordinator syncs on next wake)
|
||||
+- complex case? --> SendMessage to coordinator
|
||||
======================================================================
|
||||
```
|
||||
|
||||
@@ -80,18 +80,40 @@ GC loop (max 2 rounds): QA-FE verdict=NEEDS_FIX -> create DEV-FE-002 + QA-FE-002
|
||||
|
||||
### Task Description Template
|
||||
|
||||
Every task description includes session, scope, and metadata:
|
||||
Every task description uses structured format for clarity:
|
||||
|
||||
```
|
||||
TaskCreate({
|
||||
subject: "<TASK-ID>",
|
||||
owner: "<role>",
|
||||
description: "<task description>\nSession: <session-folder>\nScope: <scope>\nInlineDiscuss: <DISCUSS-NNN or none>\nInnerLoop: <true|false>",
|
||||
description: "PURPOSE: <what this task achieves> | Success: <measurable completion criteria>
|
||||
TASK:
|
||||
- <step 1: specific action>
|
||||
- <step 2: specific action>
|
||||
- <step 3: specific action>
|
||||
CONTEXT:
|
||||
- Session: <session-folder>
|
||||
- Scope: <scope>
|
||||
- Upstream artifacts: <artifact-1.md>, <artifact-2.md>
|
||||
- Key files: <file1>, <file2> (if applicable)
|
||||
- Shared memory: <session>/shared-memory.json
|
||||
EXPECTED: <deliverable path> + <quality criteria>
|
||||
CONSTRAINTS: <scope limits, focus areas>
|
||||
---
|
||||
InlineDiscuss: <DISCUSS-NNN or none>
|
||||
InnerLoop: <true|false>",
|
||||
blockedBy: [<dependency-list>],
|
||||
status: "pending"
|
||||
})
|
||||
```
|
||||
|
||||
**Field Guidelines**:
|
||||
- **PURPOSE**: Clear goal statement + success criteria
|
||||
- **TASK**: 2-5 actionable steps with specific verbs
|
||||
- **CONTEXT**: Session path, scope, upstream artifacts, relevant files
|
||||
- **EXPECTED**: Output artifact path + quality requirements
|
||||
- **CONSTRAINTS**: Scope boundaries, focus areas, exclusions
|
||||
|
||||
**InnerLoop Flag Rules**:
|
||||
|
||||
| Role | InnerLoop |
|
||||
@@ -107,13 +129,22 @@ TaskCreate({
|
||||
TaskCreate({
|
||||
subject: "<ORIGINAL-ID>-R1",
|
||||
owner: "<same-role-as-original>",
|
||||
description: "<revision-type> revision of <ORIGINAL-ID>.\n
|
||||
Session: <session-folder>\n
|
||||
Original artifact: <artifact-path>\n
|
||||
User feedback: <feedback-text or 'system-initiated'>\n
|
||||
Revision scope: <targeted|full>\n
|
||||
InlineDiscuss: <same-discuss-round-as-original>\n
|
||||
InnerLoop: <true|false based on role>",
|
||||
description: "PURPOSE: <revision-type> revision of <ORIGINAL-ID> | Success: Address feedback and pass quality checks
|
||||
TASK:
|
||||
- Review original artifact and feedback
|
||||
- Apply targeted fixes to weak areas
|
||||
- Validate against quality criteria
|
||||
CONTEXT:
|
||||
- Session: <session-folder>
|
||||
- Original artifact: <artifact-path>
|
||||
- User feedback: <feedback-text or 'system-initiated'>
|
||||
- Revision scope: <targeted|full>
|
||||
- Shared memory: <session>/shared-memory.json
|
||||
EXPECTED: Updated artifact at <artifact-path> + revision summary
|
||||
CONSTRAINTS: <revision scope limits>
|
||||
---
|
||||
InlineDiscuss: <same-discuss-round-as-original>
|
||||
InnerLoop: <true|false based on role>",
|
||||
status: "pending",
|
||||
blockedBy: [<predecessor-R1 if cascaded>]
|
||||
})
|
||||
@@ -138,14 +169,23 @@ TaskCreate({
|
||||
TaskCreate({
|
||||
subject: "IMPROVE-<dimension>-001",
|
||||
owner: "writer",
|
||||
description: "Quality improvement: <dimension>.\n
|
||||
Session: <session-folder>\n
|
||||
Current score: <X>%\n
|
||||
Target: 80%\n
|
||||
Readiness report: <session>/spec/readiness-report.md\n
|
||||
Weak areas: <extracted-from-report>\n
|
||||
Strategy: <from-dimension-strategy-table>\n
|
||||
InnerLoop: true",
|
||||
description: "PURPOSE: Improve <dimension> quality from <X>% to 80% | Success: Pass quality threshold
|
||||
TASK:
|
||||
- Review readiness report weak areas
|
||||
- Apply dimension-specific improvement strategy
|
||||
- Validate improvements against criteria
|
||||
CONTEXT:
|
||||
- Session: <session-folder>
|
||||
- Current score: <X>%
|
||||
- Target: 80%
|
||||
- Readiness report: <session>/spec/readiness-report.md
|
||||
- Weak areas: <extracted-from-report>
|
||||
- Strategy: <from-dimension-strategy-table>
|
||||
- Shared memory: <session>/shared-memory.json
|
||||
EXPECTED: Improved artifacts + quality improvement summary
|
||||
CONSTRAINTS: Focus on <dimension> only
|
||||
---
|
||||
InnerLoop: true",
|
||||
status: "pending"
|
||||
})
|
||||
```
|
||||
|
||||
@@ -59,7 +59,12 @@ Receive callback from [<role>]
|
||||
+- None completed -> STOP
|
||||
```
|
||||
|
||||
**Fast-advance awareness**: Check if next task is already `in_progress` (fast-advanced by worker). If yes -> skip spawning, update active_workers.
|
||||
**Fast-advance reconciliation**: When processing any callback or resume:
|
||||
1. Read recent `fast_advance` messages from team_msg (type="fast_advance")
|
||||
2. For each: add spawned successor to `active_workers` if not already present
|
||||
3. Check if expected next task is already `in_progress` (fast-advanced)
|
||||
4. If yes -> skip spawning (already running)
|
||||
5. If no -> normal handleSpawnNext
|
||||
|
||||
---
|
||||
|
||||
@@ -205,6 +210,13 @@ Detect orphaned in_progress task (no active_worker):
|
||||
+- Reset to pending -> handleSpawnNext
|
||||
```
|
||||
|
||||
### Fast-Advance State Sync
|
||||
|
||||
On every coordinator wake (handleCallback, handleResume, handleCheck):
|
||||
1. Read team_msg entries with `type="fast_advance"` since last coordinator wake
|
||||
2. For each entry: sync `active_workers` with the spawned successor
|
||||
3. This ensures coordinator's state reflects fast-advance decisions even before the successor's callback arrives
|
||||
|
||||
### Consensus-Blocked Handling
|
||||
|
||||
```
|
||||
|
||||
@@ -1,155 +1,105 @@
|
||||
---
|
||||
name: team-planex
|
||||
description: Unified team skill for plan-and-execute pipeline. 2-member team (planner + executor) with wave pipeline for concurrent planning and execution. All roles invoke this skill with --role arg. Triggers on "team planex".
|
||||
description: Unified team skill for plan-and-execute pipeline. Uses team-worker agent architecture with role-spec files for domain logic. Coordinator orchestrates pipeline, workers are team-worker agents. Triggers on "team planex".
|
||||
allowed-tools: TeamCreate(*), TeamDelete(*), SendMessage(*), TaskCreate(*), TaskUpdate(*), TaskList(*), TaskGet(*), Task(*), AskUserQuestion(*), Read(*), Write(*), Edit(*), Bash(*), Glob(*), Grep(*)
|
||||
---
|
||||
|
||||
# Team PlanEx
|
||||
|
||||
2 成员边规划边执行团队。通过逐 Issue 节拍流水线实现 planner 和 executor 并行工作:planner 每完成一个 issue 的 solution 后立即创建 EXEC-* 任务(含中间产物文件路径),executor 从文件加载 solution 开始实现。所有成员通过 `--role=xxx` 路由。
|
||||
Unified team skill: plan-and-execute pipeline for issue-based development. Built on **team-worker agent architecture** — all worker roles share a single agent definition with role-specific Phase 2-4 loaded from markdown specs.
|
||||
|
||||
## Architecture Overview
|
||||
> **Note**: This skill has its own coordinator implementation (`roles/coordinator/role.md`), independent of `team-lifecycle-v5`. It follows the same v5 architectural patterns (team-worker agents, role-specs, Spawn-and-Stop) but with a simplified 2-role pipeline (planner + executor) tailored for plan-and-execute workflows.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌──────────────────────────────────────────────┐
|
||||
│ Skill(skill="team-planex", args="--role=xxx") │
|
||||
└────────────────┬─────────────────────────────┘
|
||||
│ Role Router
|
||||
┌───────┴───────┐
|
||||
↓ ↓
|
||||
┌─────────┐ ┌──────────┐
|
||||
│ planner │ │ executor │
|
||||
│ PLAN-* │ │ EXEC-* │
|
||||
└─────────┘ └──────────┘
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ Skill(skill="team-planex", args="需求描述") │
|
||||
└──────────────────┬──────────────────────────┘
|
||||
│ Always → coordinator
|
||||
↓
|
||||
┌──────────────┐
|
||||
│ coordinator │ Phase 1-5 + dispatch/monitor commands
|
||||
└───┬──────┬───┘
|
||||
│ │
|
||||
↓ ↓
|
||||
┌──────────┐ ┌──────────┐
|
||||
│ planner │ │ executor │ team-worker agents
|
||||
│ PLAN-* │ │ EXEC-* │ with role-spec injection
|
||||
└──────────┘ └──────────┘
|
||||
```
|
||||
|
||||
**设计原则**: 只有 2 个角色,没有独立 coordinator。SKILL.md 入口承担轻量编排(创建团队、派发初始任务链),然后 planner 担任 lead 角色持续推进。
|
||||
|
||||
## Role Router
|
||||
|
||||
This skill is **coordinator-only**. Workers do NOT invoke this skill — they are spawned as `team-worker` agents directly.
|
||||
|
||||
### Input Parsing
|
||||
|
||||
Parse `$ARGUMENTS` to extract `--role`. If absent -> Orchestration Mode (SKILL.md as lightweight coordinator).
|
||||
Parse `$ARGUMENTS`. No `--role` needed — always routes to coordinator.
|
||||
|
||||
Optional flags: `--team` (default: "planex"), `--exec` (execution method), `-y`/`--yes` (auto mode).
|
||||
Optional flags: `--exec` (execution method), `-y`/`--yes` (auto mode).
|
||||
|
||||
### Role Registry
|
||||
|
||||
| Role | File | Task Prefix | Type | Compact |
|
||||
|------|------|-------------|------|---------|
|
||||
| planner | [roles/planner.md](roles/planner.md) | PLAN-* | pipeline (lead) | **压缩后必须重读** |
|
||||
| executor | [roles/executor.md](roles/executor.md) | EXEC-* | pipeline | 压缩后必须重读 |
|
||||
|
||||
> **COMPACT PROTECTION**: 角色文件是执行文档,不是参考资料。当 context compression 发生后,角色指令仅剩摘要时,**必须立即 `Read` 对应 role.md 重新加载后再继续执行**。不得基于摘要执行任何 Phase。
|
||||
| Role | Spec | Task Prefix | Type | Inner Loop |
|
||||
|------|------|-------------|------|------------|
|
||||
| coordinator | [roles/coordinator/role.md](roles/coordinator/role.md) | (none) | orchestrator | - |
|
||||
| planner | [role-specs/planner.md](role-specs/planner.md) | PLAN-* | pipeline | true |
|
||||
| executor | [role-specs/executor.md](role-specs/executor.md) | EXEC-* | pipeline | true |
|
||||
|
||||
### Dispatch
|
||||
|
||||
1. Extract `--role` from arguments
|
||||
2. If no `--role` -> Orchestration Mode (SKILL.md as lightweight coordinator)
|
||||
3. Look up role in registry -> Read the role file -> Execute its phases
|
||||
4. Unknown role -> Error with available role list: planner, executor
|
||||
Always route to coordinator. Coordinator reads `roles/coordinator/role.md` and executes its phases.
|
||||
|
||||
## Input Types
|
||||
### Orchestration Mode
|
||||
|
||||
支持 3 种输入方式(通过 args 传入 planner):
|
||||
User provides task description.
|
||||
|
||||
| 输入类型 | 格式 | 示例 |
|
||||
|----------|------|------|
|
||||
| Issue IDs | 直接传入 ID | `--role=planner ISS-20260215-001 ISS-20260215-002` |
|
||||
| 需求文本 | `--text '...'` | `--role=planner --text '实现用户认证模块'` |
|
||||
| Plan 文件 | `--plan path` | `--role=planner --plan plan/2026-02-15-auth.md` |
|
||||
**Invocation**: `Skill(skill="team-planex", args="<task-description>")`
|
||||
|
||||
## Shared Infrastructure
|
||||
**Lifecycle**:
|
||||
```
|
||||
User provides task description
|
||||
-> coordinator Phase 1-3: Parse input -> TeamCreate -> Create task chain (dispatch)
|
||||
-> coordinator Phase 4: spawn planner worker (background) -> STOP
|
||||
-> Worker (team-worker agent) executes -> SendMessage callback -> coordinator advances
|
||||
-> Loop until pipeline complete -> Phase 5 report + completion action
|
||||
```
|
||||
|
||||
### Role Isolation Rules
|
||||
**User Commands** (wake paused coordinator):
|
||||
|
||||
#### Output Tagging(强制)
|
||||
|
||||
所有角色的输出(SendMessage、team_msg)必须带 `[role_name]` 标识前缀。
|
||||
|
||||
#### Planner 边界
|
||||
|
||||
| 允许 | 禁止 |
|
||||
|------|------|
|
||||
| 需求拆解 (issue 创建) | 直接编写/修改代码 |
|
||||
| 方案设计 (issue-plan-agent) | 调用 code-developer |
|
||||
| 冲突检查 (inline files_touched) | 运行测试 |
|
||||
| 创建 EXEC-* 任务 | git commit |
|
||||
| 监控进度 (消息总线) | |
|
||||
|
||||
#### Executor 边界
|
||||
|
||||
| 允许 | 禁止 |
|
||||
|------|------|
|
||||
| 处理 EXEC-* 前缀的任务 | 创建 issue |
|
||||
| 调用 code-developer 实现 | 修改 solution/queue |
|
||||
| 运行测试验证 | 为 planner 创建 PLAN-* 任务 |
|
||||
| git commit 提交 | 直接与用户交互 (AskUserQuestion) |
|
||||
| SendMessage 给 planner | |
|
||||
|
||||
### Team Configuration
|
||||
|
||||
| Key | Value |
|
||||
|-----|-------|
|
||||
| name | planex |
|
||||
| sessionDir | `.workflow/.team/PEX-{slug}-{date}/` |
|
||||
| artifactsDir | `.workflow/.team/PEX-{slug}-{date}/artifacts/` |
|
||||
| issueDataDir | `.workflow/issues/` |
|
||||
|
||||
### Message Bus
|
||||
|
||||
每次 SendMessage 前,先调用 `mcp__ccw-tools__team_msg` 记录:
|
||||
|
||||
- 参数: operation="log", team=`<session-id>`, from=`<role>`, to=`<target-role>`, type=`<type>`, summary="[`<role>`] `<summary>`", ref=`<file_path>`
|
||||
- **注意**: `team` 必须是 **session ID** (如 `PEX-project-2026-02-27`), 不是 team name. 从任务描述的 `Session:` 字段提取.
|
||||
- **CLI fallback**: 当 MCP 不可用时 -> `ccw team log --team <session-id> --from <role> --to <target> --type <type> --summary "[<role>] ..." --json`
|
||||
|
||||
**Message types by role**:
|
||||
|
||||
| Role | Types |
|
||||
|------|-------|
|
||||
| planner | `wave_ready`, `issue_ready`, `all_planned`, `error` |
|
||||
| executor | `impl_complete`, `impl_failed`, `wave_done`, `error` |
|
||||
|
||||
### Task Lifecycle (Both Roles)
|
||||
|
||||
每个 worker 启动后执行相同的任务发现流程:
|
||||
|
||||
1. 调用 `TaskList()` 获取所有任务
|
||||
2. 筛选: subject 匹配本角色前缀 + owner 是本角色 + status 为 pending + blockedBy 为空
|
||||
3. 无任务 -> idle 等待
|
||||
4. 有任务 -> `TaskGet` 获取详情 -> `TaskUpdate` 标记 in_progress
|
||||
5. Phase 2-4: Role-specific (see roles/{role}.md)
|
||||
6. Phase 5: Report + Loop
|
||||
|
||||
**Resume Artifact Check** (防止恢复后重复产出):
|
||||
- 检查本任务的输出产物是否已存在
|
||||
- 产物完整 -> 跳到 Phase 5 报告完成
|
||||
- 产物不完整或不存在 -> 正常执行 Phase 2-4
|
||||
| Command | Action |
|
||||
|---------|--------|
|
||||
| `check` / `status` | Output execution status graph, no advancement |
|
||||
| `resume` / `continue` | Check worker states, advance next step |
|
||||
| `add <issue-ids or --text '...' or --plan path>` | Append new tasks to planner queue |
|
||||
|
||||
---
|
||||
|
||||
## Wave Pipeline (逐 Issue 节拍)
|
||||
## Command Execution Protocol
|
||||
|
||||
```
|
||||
Issue 1: planner 规划 solution -> 写中间产物 -> 冲突检查 -> 创建 EXEC-* -> issue_ready
|
||||
↓ (executor 立即开始)
|
||||
Issue 2: planner 规划 solution -> 写中间产物 -> 冲突检查 -> 创建 EXEC-* -> issue_ready
|
||||
↓ (executor 并行消费)
|
||||
Issue N: ...
|
||||
Final: planner 发送 all_planned -> executor 完成剩余 EXEC-* -> 结束
|
||||
```
|
||||
When coordinator needs to execute a command (dispatch, monitor):
|
||||
|
||||
**节拍规则**:
|
||||
- planner 每完成一个 issue 的 solution 后,**立即**创建 EXEC-* 任务并发送 `issue_ready` 信号
|
||||
- solution 写入中间产物文件(`artifacts/solutions/{issueId}.json`),EXEC-* 任务包含 `solution_file` 路径
|
||||
- executor 从文件加载 solution(无需再调 `ccw issue solution`),fallback 兼容旧模式
|
||||
- planner 不等待 executor,持续推进下一个 issue
|
||||
- 当 planner 发送 `all_planned` 消息后,executor 完成所有剩余任务即可结束
|
||||
1. **Read the command file**: `roles/coordinator/commands/<command-name>.md`
|
||||
2. **Follow the workflow** defined in the command file (Phase 2-4 structure)
|
||||
3. **Commands are inline execution guides** - NOT separate agents or subprocesses
|
||||
4. **Execute synchronously** - complete the command workflow before proceeding
|
||||
|
||||
---
|
||||
|
||||
## Input Types
|
||||
|
||||
支持 3 种输入方式:
|
||||
|
||||
| 输入类型 | 格式 | 示例 |
|
||||
|----------|------|------|
|
||||
| Issue IDs | 直接传入 ID | `ISS-20260215-001 ISS-20260215-002` |
|
||||
| 需求文本 | `--text '...'` | `--text '实现用户认证模块'` |
|
||||
| Plan 文件 | `--plan path` | `--plan plan/2026-02-15-auth.md` |
|
||||
|
||||
## Execution Method Selection
|
||||
|
||||
在编排模式或直接调用 executor 前,**必须先确定执行方式**。支持 3 种执行后端:
|
||||
支持 3 种执行后端:
|
||||
|
||||
| Executor | 后端 | 适用场景 |
|
||||
|----------|------|----------|
|
||||
@@ -159,251 +109,150 @@ Final: planner 发送 all_planned -> executor 完成剩余 EXEC-* -> 结束
|
||||
|
||||
### Selection Decision Table
|
||||
|
||||
| Condition | Execution Method | Code Review |
|
||||
|-----------|-----------------|-------------|
|
||||
| `--exec=agent` specified | Agent | Skip |
|
||||
| `--exec=codex` specified | Codex | Skip |
|
||||
| `--exec=gemini` specified | Gemini | Skip |
|
||||
| `-y` or `--yes` flag present | Auto (default Agent) | Skip |
|
||||
| No flags (interactive) | AskUserQuestion -> user choice | AskUserQuestion -> user choice |
|
||||
| Auto + task_count <= 3 | Agent | Skip |
|
||||
| Auto + task_count > 3 | Codex | Skip |
|
||||
|
||||
### Interactive Prompt (no flags)
|
||||
|
||||
当无 `-y`/`--yes` 且无 `--exec` 时,通过 AskUserQuestion 交互选择:
|
||||
|
||||
- **执行方式选项**: Agent / Codex / Gemini / Auto
|
||||
- **代码审查选项**: Skip / Gemini Review / Codex Review / Agent Review
|
||||
|
||||
### 通过 args 指定
|
||||
|
||||
```bash
|
||||
# 显式指定
|
||||
Skill(skill="team-planex", args="--exec=codex ISS-xxx")
|
||||
Skill(skill="team-planex", args="--exec=agent --text '简单功能'")
|
||||
|
||||
# Auto 模式(跳过交互,-y 或 --yes)
|
||||
Skill(skill="team-planex", args="-y --text '添加日志'")
|
||||
```
|
||||
| Condition | Execution Method |
|
||||
|-----------|-----------------|
|
||||
| `--exec=agent` specified | Agent |
|
||||
| `--exec=codex` specified | Codex |
|
||||
| `--exec=gemini` specified | Gemini |
|
||||
| `-y` or `--yes` flag present | Auto (default Agent) |
|
||||
| No flags (interactive) | AskUserQuestion -> user choice |
|
||||
| Auto + task_count <= 3 | Agent |
|
||||
| Auto + task_count > 3 | Codex |
|
||||
|
||||
---
|
||||
|
||||
## Orchestration Mode
|
||||
## Coordinator Spawn Template
|
||||
|
||||
当不带 `--role` 调用时,SKILL.md 进入轻量编排模式(无独立 coordinator 角色,SKILL.md 自身承担编排)。
|
||||
### v5 Worker Spawn (all roles)
|
||||
|
||||
**Invocation**: `Skill(skill="team-planex", args="任务描述")`
|
||||
|
||||
**Lifecycle**:
|
||||
|
||||
```
|
||||
用户提供任务描述
|
||||
-> SKILL.md 解析输入(Issue IDs / 需求文本 / Plan 文件)
|
||||
-> 初始化 sessionDir + artifacts 目录
|
||||
-> 执行方式选择(见 Execution Method Selection)
|
||||
-> 创建 PLAN-001 任务(owner: planner)
|
||||
-> Spawn planner agent (后台)
|
||||
-> Spawn executor agent (后台)
|
||||
-> 返回(planner lead 后续推进)
|
||||
```
|
||||
|
||||
**User Commands** (唤醒 / 检查状态):
|
||||
|
||||
| Command | Action |
|
||||
|---------|--------|
|
||||
| `check` / `status` | 输出执行状态图,不推进 |
|
||||
| `resume` / `continue` | 检查 worker 状态,推进下一步 |
|
||||
| `add <issue-ids or --text '...' or --plan path>` | 追加新任务到 planner 队列,不影响已有任务 |
|
||||
|
||||
**`add` 命令处理逻辑**:
|
||||
|
||||
1. 解析输入(Issue IDs / `--text` / `--plan`)
|
||||
2. 获取当前最大 PLAN-* 序号(`TaskList` 筛选 `PLAN-*` prefix),计算下一个序号 N
|
||||
3. `TaskCreate({ subject: "PLAN-00N: ...", owner: "planner", status: "pending" })`,description 写入新 issue IDs 或需求文本
|
||||
4. 若 planner 已发送 `all_planned`(检查 team_msg 日志),额外 `SendMessage` 通知 planner 有新任务,使其重新进入 Loop Check
|
||||
5. 若 executor 已退出等待,同样发送消息唤醒 executor 继续轮询 `EXEC-*` 任务
|
||||
|
||||
### Coordinator Spawn Template
|
||||
|
||||
SKILL.md 编排模式 spawn workers 时使用后台模式 (Spawn-and-Go):
|
||||
|
||||
**Planner Spawn**:
|
||||
When coordinator spawns workers, use `team-worker` agent with role-spec path:
|
||||
|
||||
```
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: "Spawn planner worker",
|
||||
subagent_type: "team-worker",
|
||||
description: "Spawn <role> worker",
|
||||
team_name: <team-name>,
|
||||
name: "planner",
|
||||
name: "<role>",
|
||||
run_in_background: true,
|
||||
prompt: `你是 team "<team-name>" 的 PLANNER。
|
||||
prompt: `## Role Assignment
|
||||
role: <role>
|
||||
role_spec: .claude/skills/team-planex/role-specs/<role>.md
|
||||
session: <session-folder>
|
||||
session_id: <session-id>
|
||||
team_name: <team-name>
|
||||
requirement: <task-description>
|
||||
inner_loop: <true|false>
|
||||
execution_method: <agent|codex|gemini>
|
||||
|
||||
## 首要指令
|
||||
你的所有工作必须通过调用 Skill 获取角色定义后执行:
|
||||
Skill(skill="team-planex", args="--role=planner")
|
||||
|
||||
当前输入: <planner-input>
|
||||
Session: <session-dir>
|
||||
|
||||
## 执行配置
|
||||
executor 的执行方式: <execution-method>
|
||||
创建 EXEC-* 任务时,description 中包含:
|
||||
execution_method: <method>
|
||||
code_review: <review-tool>
|
||||
|
||||
## 中间产物(必须)
|
||||
每个 issue 的 solution 写入: <session-dir>/artifacts/solutions/{issueId}.json
|
||||
EXEC-* 任务 description 必须包含 solution_file 字段指向该文件
|
||||
每完成一个 issue 立即发送 issue_ready 消息并创建 EXEC-* 任务
|
||||
|
||||
## 角色准则
|
||||
- 只处理 PLAN-* 任务,不执行其他角色工作
|
||||
- 所有输出带 [planner] 标识前缀
|
||||
- 仅与 coordinator 通信
|
||||
- 不使用 TaskCreate 为其他角色创建任务(EXEC-* 除外)
|
||||
- 每次 SendMessage 前先调用 mcp__ccw-tools__team_msg 记录
|
||||
|
||||
## 工作流程
|
||||
1. 调用 Skill -> 获取角色定义和执行逻辑
|
||||
2. 按 role.md 5-Phase 流程执行
|
||||
3. team_msg + SendMessage 结果给 coordinator
|
||||
4. TaskUpdate completed -> 检查下一个任务`
|
||||
Read role_spec file to load Phase 2-4 domain instructions.
|
||||
Execute built-in Phase 1 (task discovery) -> role-spec Phase 2-4 -> built-in Phase 5 (report).`
|
||||
})
|
||||
```
|
||||
|
||||
**Executor Spawn**:
|
||||
|
||||
```
|
||||
Task({
|
||||
subagent_type: "general-purpose",
|
||||
description: "Spawn executor worker",
|
||||
team_name: <team-name>,
|
||||
name: "executor",
|
||||
run_in_background: true,
|
||||
prompt: `你是 team "<team-name>" 的 EXECUTOR。
|
||||
|
||||
## 首要指令
|
||||
你的所有工作必须通过调用 Skill 获取角色定义后执行:
|
||||
Skill(skill="team-planex", args="--role=executor")
|
||||
|
||||
## 执行配置
|
||||
默认执行方式: <execution-method>
|
||||
代码审查: <review-tool>
|
||||
(每个 EXEC-* 任务 description 中可能包含 execution_method 覆盖)
|
||||
|
||||
## Solution 加载
|
||||
优先从 EXEC-* 任务 description 中的 solution_file 路径读取 solution JSON 文件
|
||||
无 solution_file 时 fallback 到 ccw issue solution 命令
|
||||
|
||||
## 角色准则
|
||||
- 只处理 EXEC-* 任务,不执行其他角色工作
|
||||
- 所有输出带 [executor] 标识前缀
|
||||
- 根据 execution_method 选择执行后端(Agent/Codex/Gemini)
|
||||
- 仅与 coordinator 通信
|
||||
- 每次 SendMessage 前先调用 mcp__ccw-tools__team_msg 记录
|
||||
|
||||
## 工作流程
|
||||
1. 调用 Skill -> 获取角色定义和执行逻辑
|
||||
2. 按 role.md 5-Phase 流程执行
|
||||
3. team_msg + SendMessage 结果给 coordinator
|
||||
4. TaskUpdate completed -> 检查下一个任务`
|
||||
})
|
||||
```
|
||||
**Inner Loop roles** (planner, executor): Set `inner_loop: true`. The team-worker agent handles the loop internally.
|
||||
|
||||
---
|
||||
|
||||
## Cadence Control
|
||||
## Pipeline Definitions
|
||||
|
||||
**节拍模型**: Wave beat -- planner 持续推进,executor 并行消费。每个 wave = planner 完成一个 issue -> executor 开始实现。
|
||||
### Pipeline Diagram
|
||||
|
||||
```
|
||||
Wave Beat Cycle (逐 Issue 节拍)
|
||||
===================================================================
|
||||
Event SKILL.md (编排) Workers
|
||||
-------------------------------------------------------------------
|
||||
用户调用 ---------> ┌─ 解析输入 ─────────┐
|
||||
│ 初始化 session │
|
||||
│ 选择执行方式 │
|
||||
├─ 创建 PLAN-001 ─────┤
|
||||
│ spawn planner ──────┼──> [Planner] Phase 1-5
|
||||
│ spawn executor ─────┼──> [Executor] Phase 1 (idle)
|
||||
└─ 返回 (编排结束) ───┘ │
|
||||
│
|
||||
Wave 1: Planner: issue-1 solution
|
||||
-> 写产物 -> 创建 EXEC-001
|
||||
-> issue_ready ---------> Executor 开始 EXEC-001
|
||||
Wave 2: Planner: issue-2 solution
|
||||
-> 写产物 -> 创建 EXEC-002
|
||||
-> issue_ready ---------> Executor 并行消费
|
||||
Issue-based beat pipeline (逐 Issue 节拍)
|
||||
═══════════════════════════════════════════════════
|
||||
PLAN-001 ──> [planner] issue-1 solution → EXEC-001
|
||||
issue-2 solution → EXEC-002
|
||||
...
|
||||
issue-N solution → EXEC-00N
|
||||
all_planned signal
|
||||
|
||||
EXEC-001 ──> [executor] implement issue-1
|
||||
EXEC-002 ──> [executor] implement issue-2
|
||||
...
|
||||
Wave N: Planner: all_planned
|
||||
Executor: 完成剩余 EXEC-*
|
||||
===================================================================
|
||||
EXEC-00N ──> [executor] implement issue-N
|
||||
═══════════════════════════════════════════════════
|
||||
```
|
||||
|
||||
**Pipeline 节拍视图**:
|
||||
### Cadence Control
|
||||
|
||||
**Beat model**: Event-driven Spawn-and-Stop. Each beat = coordinator wake -> process callback -> spawn next -> STOP.
|
||||
|
||||
```
|
||||
Wave pipeline (planner lead, executor follows)
|
||||
──────────────────────────────────────────────────────────
|
||||
Wave 1 2 3 ... N Final
|
||||
│ │ │ │ │
|
||||
P:iss-1 P:iss-2 P:iss-3 P:iss-N P:all_planned
|
||||
↓ ↓ ↓ ↓ ↓
|
||||
E:exec1 E:exec2 E:exec3 E:execN E:finish
|
||||
│ │
|
||||
(并行消费,executor 不等 planner 全部完成)
|
||||
|
||||
P=planner E=executor
|
||||
Beat Cycle (Coordinator Spawn-and-Stop)
|
||||
======================================================================
|
||||
Event Coordinator Workers
|
||||
----------------------------------------------------------------------
|
||||
用户调用 ----------> ┌─ Phase 1-3 ──────────┐
|
||||
│ 解析输入 │
|
||||
│ TeamCreate │
|
||||
│ 创建 PLAN-001 │
|
||||
├─ Phase 4 ─────────────┤
|
||||
│ spawn planner ────────┼──> [planner] Phase 1-5
|
||||
└─ STOP (idle) ──────────┘ │
|
||||
│
|
||||
callback <─ planner issue_ready ────────────────────────┘
|
||||
┌─ monitor.handleCallback ─┐
|
||||
│ 检查新 EXEC-* 任务 │
|
||||
│ spawn executor ─────────┼──> [executor] Phase 1-5
|
||||
└─ STOP (idle) ───────────┘ │
|
||||
│
|
||||
callback <─ executor impl_complete ────────┘
|
||||
┌─ monitor.handleCallback ─┐
|
||||
│ 标记完成 │
|
||||
│ 检查下一个 ready task │
|
||||
└─ spawn/STOP ────────────┘
|
||||
======================================================================
|
||||
```
|
||||
|
||||
**检查点 (Checkpoint)**:
|
||||
**Checkpoints**:
|
||||
|
||||
| 触发条件 | 位置 | 行为 |
|
||||
|----------|------|------|
|
||||
| Planner 全部完成 | all_planned 信号 | Executor 完成剩余 EXEC-* 后结束 |
|
||||
| Pipeline 停滞 | 无 ready + 无 running | Planner 检查并 escalate to user |
|
||||
| Executor 阻塞 | Executor blocked > 2 tasks | Planner escalate to user |
|
||||
| Pipeline 停滞 | 无 ready + 无 running | Coordinator escalate to user |
|
||||
| Executor 阻塞 | blocked > 2 tasks | Coordinator escalate to user |
|
||||
|
||||
**Stall 检测**:
|
||||
|
||||
| 检查项 | 条件 | 处理 |
|
||||
|--------|------|------|
|
||||
| Executor 无响应 | in_progress EXEC-* 无回调 | 报告等待中的任务列表 |
|
||||
| Pipeline 死锁 | 无 ready + 无 running + 有 pending | 检查 blockedBy 依赖链 |
|
||||
| Planner 规划失败 | issue planning error | Retry once, then skip to next issue |
|
||||
|
||||
---
|
||||
|
||||
## Task Metadata Registry
|
||||
### Task Metadata Registry
|
||||
|
||||
| Task ID | Role | Phase | Dependencies | Description |
|
||||
|---------|------|-------|-------------|-------------|
|
||||
| PLAN-001 | planner | planning | (none) | 初始规划:需求拆解、issue 创建、方案设计 |
|
||||
| EXEC-001 | executor | execution | PLAN-001 (implicit via issue_ready) | 第一个 issue 的代码实现 |
|
||||
| EXEC-002 | executor | execution | (planner issue_ready) | 第二个 issue 的代码实现 |
|
||||
| EXEC-N | executor | execution | (planner issue_ready) | 第 N 个 issue 的代码实现 |
|
||||
| EXEC-001 | executor | execution | (created by planner at runtime) | 第一个 issue 的代码实现 |
|
||||
| EXEC-N | executor | execution | (created by planner at runtime) | 第 N 个 issue 的代码实现 |
|
||||
|
||||
> 注: EXEC-* 任务由 planner 在运行时逐个创建(逐 Issue 节拍),不预先定义完整任务链。
|
||||
|
||||
---
|
||||
|
||||
## Wisdom Accumulation (所有角色)
|
||||
## Completion Action
|
||||
|
||||
跨任务知识积累。SKILL.md 编排模式在 session 初始化时创建 `wisdom/` 目录。
|
||||
When the pipeline completes (all tasks done, coordinator Phase 5):
|
||||
|
||||
**目录**:
|
||||
```
|
||||
<session-folder>/wisdom/
|
||||
├── learnings.md # 模式和洞察
|
||||
├── decisions.md # 架构和设计决策
|
||||
├── conventions.md # 代码库约定
|
||||
└── issues.md # 已知风险和问题
|
||||
```javascript
|
||||
if (autoYes) {
|
||||
// Auto mode: Archive & Clean without prompting
|
||||
completionAction = "Archive & Clean";
|
||||
} else {
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Team pipeline complete. What would you like to do?",
|
||||
header: "Completion",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Archive & Clean (Recommended)", description: "Archive session, clean up tasks and team resources" },
|
||||
{ label: "Keep Active", description: "Keep session active for follow-up work or inspection" },
|
||||
{ label: "Export Results", description: "Export deliverables to a specified location, then clean" }
|
||||
]
|
||||
}]
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Worker 加载** (Phase 2): 从 task description 提取 `Session: <path>`, 读取 wisdom 目录下各文件。
|
||||
**Worker 贡献** (Phase 4/5): 将本任务发现写入对应 wisdom 文件。
|
||||
| Choice | Action |
|
||||
|--------|--------|
|
||||
| Archive & Clean | Update session status="completed" -> TeamDelete -> output final summary |
|
||||
| Keep Active | Update session status="paused" -> output resume instructions |
|
||||
| Export Results | AskUserQuestion for target path -> copy deliverables -> Archive & Clean |
|
||||
|
||||
---
|
||||
|
||||
@@ -426,14 +275,30 @@ P=planner E=executor
|
||||
|
||||
---
|
||||
|
||||
## Message Bus
|
||||
|
||||
每次 SendMessage 前,先调用 `mcp__ccw-tools__team_msg` 记录:
|
||||
|
||||
- 参数: operation="log", team=`<session-id>`, from=`<role>`, to=`<target-role>`, type=`<type>`, summary="[`<role>`] `<summary>`"
|
||||
- **注意**: `team` 必须是 **session ID** (如 `PEX-project-2026-02-27`), 不是 team name.
|
||||
|
||||
**Message types by role**:
|
||||
|
||||
| Role | Types |
|
||||
|------|-------|
|
||||
| planner | `issue_ready`, `all_planned`, `error` |
|
||||
| executor | `impl_complete`, `impl_failed`, `error` |
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Scenario | Resolution |
|
||||
|----------|------------|
|
||||
| Unknown --role value | Error with available role list: planner, executor |
|
||||
| Missing --role arg | Enter orchestration mode (SKILL.md as lightweight coordinator) |
|
||||
| Role file not found | Error with expected path (roles/{name}.md) |
|
||||
| Planner issue planning failure | Retry once, then report error and skip to next issue |
|
||||
| Executor impl failure | Report to planner, continue with next EXEC-* task |
|
||||
| No EXEC-* tasks yet | Executor idles, polls for new tasks |
|
||||
| Pipeline stall | Planner monitors -- if executor blocked > 2 tasks, escalate to user |
|
||||
| Role spec file not found | Error with expected path (role-specs/<name>.md) |
|
||||
| Command file not found | Fallback to inline execution in coordinator role.md |
|
||||
| team-worker agent unavailable | Error: requires .claude/agents/team-worker.md |
|
||||
| Planner issue planning failure | Retry once, then skip to next issue |
|
||||
| Executor impl failure | Report to coordinator, continue with next EXEC-* task |
|
||||
| Pipeline stall | Coordinator monitors, escalate to user |
|
||||
| Completion action timeout | Default to Keep Active |
|
||||
|
||||
102
.claude/skills/team-planex/role-specs/executor.md
Normal file
102
.claude/skills/team-planex/role-specs/executor.md
Normal file
@@ -0,0 +1,102 @@
|
||||
---
|
||||
prefix: EXEC
|
||||
inner_loop: true
|
||||
message_types:
|
||||
success: impl_complete
|
||||
error: impl_failed
|
||||
---
|
||||
|
||||
# Executor
|
||||
|
||||
Single-issue implementation agent. Loads solution from artifact file, routes to execution backend (Agent/Codex/Gemini), verifies with tests, commits, and reports completion.
|
||||
|
||||
## Phase 2: Task & Solution Loading
|
||||
|
||||
| Input | Source | Required |
|
||||
|-------|--------|----------|
|
||||
| Issue ID | Task description `Issue ID:` field | Yes |
|
||||
| Solution file | Task description `Solution file:` field | Yes |
|
||||
| Session folder | Task description `Session:` field | Yes |
|
||||
| Execution method | Task description `Execution method:` field | Yes |
|
||||
| Wisdom | `<session>/wisdom/` | No |
|
||||
|
||||
1. Extract issue ID, solution file path, session folder, execution method
|
||||
2. Load solution JSON from file (file-first)
|
||||
3. If file not found -> fallback: `ccw issue solution <issueId> --json`
|
||||
4. Load wisdom files for conventions and patterns
|
||||
5. Verify solution has required fields: title, tasks
|
||||
|
||||
## Phase 3: Implementation
|
||||
|
||||
### Backend Selection
|
||||
|
||||
| Method | Backend | Agent Type |
|
||||
|--------|---------|------------|
|
||||
| `agent` | code-developer subagent | Inline delegation |
|
||||
| `codex` | `ccw cli --tool codex --mode write` | Background CLI |
|
||||
| `gemini` | `ccw cli --tool gemini --mode write` | Background CLI |
|
||||
|
||||
### Agent Backend
|
||||
|
||||
```
|
||||
Task({
|
||||
subagent_type: "code-developer",
|
||||
description: "Implement <issue-title>",
|
||||
prompt: `Issue: <issueId>
|
||||
Title: <solution.title>
|
||||
Solution: <solution JSON>
|
||||
Implement all tasks from the solution plan.`,
|
||||
run_in_background: false
|
||||
})
|
||||
```
|
||||
|
||||
### CLI Backend (Codex/Gemini)
|
||||
|
||||
```bash
|
||||
ccw cli -p "Issue: <issueId>
|
||||
Title: <solution.title>
|
||||
Solution Plan: <solution JSON>
|
||||
Implement all tasks. Follow existing patterns. Run tests." \
|
||||
--tool <codex|gemini> --mode write
|
||||
```
|
||||
|
||||
Wait for CLI completion before proceeding.
|
||||
|
||||
## Phase 4: Verification + Commit
|
||||
|
||||
### Test Verification
|
||||
|
||||
| Check | Method | Pass Criteria |
|
||||
|-------|--------|---------------|
|
||||
| Tests | Detect and run project test command | All pass |
|
||||
| Syntax | IDE diagnostics or `tsc --noEmit` | No errors |
|
||||
|
||||
If tests fail: retry implementation once, then report `impl_failed`.
|
||||
|
||||
### Commit
|
||||
|
||||
```bash
|
||||
git add -A
|
||||
git commit -m "feat(<issueId>): <solution.title>"
|
||||
```
|
||||
|
||||
### Update Issue Status
|
||||
|
||||
```bash
|
||||
ccw issue update <issueId> --status completed
|
||||
```
|
||||
|
||||
### Report
|
||||
|
||||
Send `impl_complete` message to coordinator via team_msg + SendMessage:
|
||||
- summary: `[executor] Implemented <issueId>: <title>`
|
||||
|
||||
## Boundaries
|
||||
|
||||
| Allowed | Prohibited |
|
||||
|---------|-----------|
|
||||
| Load solution from file | Create or modify issues |
|
||||
| Implement via Agent/Codex/Gemini | Modify solution artifacts |
|
||||
| Run tests | Spawn additional agents |
|
||||
| git commit | Direct user interaction |
|
||||
| Update issue status | Create tasks for other roles |
|
||||
111
.claude/skills/team-planex/role-specs/planner.md
Normal file
111
.claude/skills/team-planex/role-specs/planner.md
Normal file
@@ -0,0 +1,111 @@
|
||||
---
|
||||
prefix: PLAN
|
||||
inner_loop: true
|
||||
subagents: [issue-plan-agent]
|
||||
message_types:
|
||||
success: issue_ready
|
||||
error: error
|
||||
---
|
||||
|
||||
# Planner
|
||||
|
||||
Requirement decomposition → issue creation → solution design → EXEC-* task creation. Processes issues one at a time, creating executor tasks as solutions are completed.
|
||||
|
||||
## Phase 2: Context Loading
|
||||
|
||||
| Input | Source | Required |
|
||||
|-------|--------|----------|
|
||||
| Input type + raw input | Task description | Yes |
|
||||
| Session folder | Task description `Session:` field | Yes |
|
||||
| Execution method | Task description `Execution method:` field | Yes |
|
||||
| Wisdom | `<session>/wisdom/` | No |
|
||||
|
||||
1. Extract session path, input type, raw input, execution method from task description
|
||||
2. Load wisdom files if available
|
||||
3. Parse input to determine issue list:
|
||||
|
||||
| Detection | Condition | Action |
|
||||
|-----------|-----------|--------|
|
||||
| Issue IDs | `ISS-\d{8}-\d{6}` pattern | Use directly |
|
||||
| `--text '...'` | Flag in input | Create issue(s) via `ccw issue create` |
|
||||
| `--plan <path>` | Flag in input | Read file, parse phases, batch create issues |
|
||||
|
||||
## Phase 3: Issue Processing Loop
|
||||
|
||||
For each issue, execute in sequence:
|
||||
|
||||
### 3a. Generate Solution
|
||||
|
||||
Delegate to `issue-plan-agent` subagent:
|
||||
|
||||
```
|
||||
Task({
|
||||
subagent_type: "issue-plan-agent",
|
||||
description: "Plan issue <issueId>",
|
||||
prompt: `issue_ids: ["<issueId>"]
|
||||
project_root: "<project-root>"
|
||||
Generate solution for this issue. Auto-bind single solution.`,
|
||||
run_in_background: false
|
||||
})
|
||||
```
|
||||
|
||||
### 3b. Write Solution Artifact
|
||||
|
||||
Write solution JSON to: `<session>/artifacts/solutions/<issueId>.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"session_id": "<session-id>",
|
||||
"issue_id": "<issueId>",
|
||||
"solution": <solution-from-agent>,
|
||||
"planned_at": "<ISO timestamp>"
|
||||
}
|
||||
```
|
||||
|
||||
### 3c. Check Conflicts
|
||||
|
||||
Extract `files_touched` from solution. Compare against prior solutions in session.
|
||||
Overlapping files -> log warning to `wisdom/issues.md`, continue.
|
||||
|
||||
### 3d. Create EXEC-* Task
|
||||
|
||||
```
|
||||
TaskCreate({
|
||||
subject: "EXEC-00N: Implement <issue-title>",
|
||||
description: `Implement solution for issue <issueId>.
|
||||
|
||||
Issue ID: <issueId>
|
||||
Solution file: <session>/artifacts/solutions/<issueId>.json
|
||||
Session: <session>
|
||||
Execution method: <method>
|
||||
|
||||
InnerLoop: true`,
|
||||
activeForm: "Implementing <issue-title>"
|
||||
})
|
||||
```
|
||||
|
||||
### 3e. Signal issue_ready
|
||||
|
||||
Send message via team_msg + SendMessage to coordinator:
|
||||
- type: `issue_ready`
|
||||
- summary: `[planner] Solution ready for <issueId>`
|
||||
|
||||
### 3f. Continue Loop
|
||||
|
||||
Process next issue. Do NOT wait for executor.
|
||||
|
||||
## Phase 4: Completion Signal
|
||||
|
||||
After all issues processed:
|
||||
1. Send `all_planned` message to coordinator via team_msg + SendMessage
|
||||
2. Summary: total issues planned, EXEC-* tasks created
|
||||
|
||||
## Boundaries
|
||||
|
||||
| Allowed | Prohibited |
|
||||
|---------|-----------|
|
||||
| Parse input, create issues | Write/modify business code |
|
||||
| Generate solutions (issue-plan-agent) | Run tests |
|
||||
| Write solution artifacts | git commit |
|
||||
| Create EXEC-* tasks | Call code-developer |
|
||||
| Conflict checking | Direct user interaction |
|
||||
@@ -0,0 +1,87 @@
|
||||
# Command: dispatch
|
||||
|
||||
## Purpose
|
||||
|
||||
Create the initial task chain for team-planex pipeline. Creates PLAN-001 for planner. EXEC-* tasks are NOT pre-created — planner creates them at runtime per issue.
|
||||
|
||||
## Phase 2: Context Loading
|
||||
|
||||
| Input | Source | Required |
|
||||
|-------|--------|----------|
|
||||
| Input type | Phase 1 requirements | Yes |
|
||||
| Raw input | Phase 1 requirements | Yes |
|
||||
| Session folder | Phase 2 session init | Yes |
|
||||
| Execution method | Phase 1 requirements | Yes |
|
||||
|
||||
## Phase 3: Task Chain Creation
|
||||
|
||||
### Task Creation
|
||||
|
||||
Create a single PLAN-001 task for the planner:
|
||||
|
||||
```
|
||||
TaskCreate({
|
||||
subject: "PLAN-001: Requirement decomposition and solution design",
|
||||
description: `Decompose requirements into issues and generate solutions.
|
||||
|
||||
Input type: <issues|text|plan>
|
||||
Input: <raw-input>
|
||||
Session: <session-folder>
|
||||
Execution method: <agent|codex|gemini>
|
||||
|
||||
## Instructions
|
||||
1. Parse input to get issue list
|
||||
2. For each issue: call issue-plan-agent → write solution artifact
|
||||
3. After each solution: create EXEC-* task (owner: executor) with solution_file path
|
||||
4. After all issues: send all_planned signal
|
||||
|
||||
InnerLoop: true`,
|
||||
activeForm: "Planning requirements"
|
||||
})
|
||||
```
|
||||
|
||||
### EXEC-* Task Template (for planner reference)
|
||||
|
||||
Planner creates EXEC-* tasks at runtime using this template:
|
||||
|
||||
```
|
||||
TaskCreate({
|
||||
subject: "EXEC-00N: Implement <issue-title>",
|
||||
description: `Implement solution for issue <issueId>.
|
||||
|
||||
Issue ID: <issueId>
|
||||
Solution file: <session-folder>/artifacts/solutions/<issueId>.json
|
||||
Session: <session-folder>
|
||||
Execution method: <agent|codex|gemini>
|
||||
|
||||
InnerLoop: true`,
|
||||
activeForm: "Implementing <issue-title>"
|
||||
})
|
||||
```
|
||||
|
||||
### Add Command Task Template
|
||||
|
||||
When coordinator handles `add` command, create additional PLAN tasks:
|
||||
|
||||
```
|
||||
TaskCreate({
|
||||
subject: "PLAN-00N: Additional requirement decomposition",
|
||||
description: `Additional requirements to decompose.
|
||||
|
||||
Input type: <issues|text|plan>
|
||||
Input: <new-input>
|
||||
Session: <session-folder>
|
||||
Execution method: <execution-method>
|
||||
|
||||
InnerLoop: true`,
|
||||
activeForm: "Planning additional requirements"
|
||||
})
|
||||
```
|
||||
|
||||
## Phase 4: Validation
|
||||
|
||||
| Check | Criteria |
|
||||
|-------|----------|
|
||||
| PLAN-001 created | TaskList shows PLAN-001 |
|
||||
| Description complete | Contains Input, Session, Execution method |
|
||||
| No orphans | All tasks have valid owner |
|
||||
163
.claude/skills/team-planex/roles/coordinator/commands/monitor.md
Normal file
163
.claude/skills/team-planex/roles/coordinator/commands/monitor.md
Normal file
@@ -0,0 +1,163 @@
|
||||
# Command: monitor
|
||||
|
||||
## Purpose
|
||||
|
||||
Event-driven pipeline coordination with Spawn-and-Stop pattern. Three wake-up sources: worker callbacks, user `check`, user `resume`.
|
||||
|
||||
## Constants
|
||||
|
||||
| Constant | Value | Description |
|
||||
|----------|-------|-------------|
|
||||
| SPAWN_MODE | background | All workers spawned via `Task(run_in_background: true)` |
|
||||
| ONE_STEP_PER_INVOCATION | true | Coordinator does one operation then STOPS |
|
||||
| WORKER_AGENT | team-worker | All workers are team-worker agents |
|
||||
|
||||
## Phase 2: Context Loading
|
||||
|
||||
| Input | Source | Required |
|
||||
|-------|--------|----------|
|
||||
| Session file | `<session-folder>/team-session.json` | Yes |
|
||||
| Task list | `TaskList()` | Yes |
|
||||
| Active workers | session.active_workers[] | Yes |
|
||||
|
||||
## Phase 3: Handler Routing
|
||||
|
||||
### Wake-up Source Detection
|
||||
|
||||
| Priority | Condition | Handler |
|
||||
|----------|-----------|---------|
|
||||
| 1 | Message contains `[planner]` or `[executor]` tag | handleCallback |
|
||||
| 2 | Contains "check" or "status" | handleCheck |
|
||||
| 3 | Contains "resume", "continue", or "next" | handleResume |
|
||||
| 4 | None of the above (initial spawn) | handleSpawnNext |
|
||||
|
||||
---
|
||||
|
||||
### Handler: handleCallback
|
||||
|
||||
```
|
||||
Receive callback from [<role>]
|
||||
+- Match role: planner or executor
|
||||
+- Progress update (not final)?
|
||||
| +- YES -> Update session -> STOP
|
||||
+- Task status = completed?
|
||||
| +- YES -> remove from active_workers -> update session
|
||||
| | +- role = planner?
|
||||
| | | +- Check for new EXEC-* tasks (planner creates them)
|
||||
| | | +- -> handleSpawnNext (spawn executor for new EXEC-* tasks)
|
||||
| | +- role = executor?
|
||||
| | +- Mark issue done
|
||||
| | +- -> handleSpawnNext (check for more EXEC-* tasks)
|
||||
| +- NO -> progress message -> STOP
|
||||
+- No matching worker found
|
||||
+- Scan all active workers for completed tasks
|
||||
+- Found completed -> process -> handleSpawnNext
|
||||
+- None completed -> STOP
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Handler: handleCheck
|
||||
|
||||
Read-only status report. No advancement.
|
||||
|
||||
```
|
||||
[coordinator] PlanEx Pipeline Status
|
||||
[coordinator] Progress: <completed>/<total> (<percent>%)
|
||||
|
||||
[coordinator] Task Graph:
|
||||
PLAN-001: <status-icon> <summary>
|
||||
EXEC-001: <status-icon> <issue-title>
|
||||
EXEC-002: <status-icon> <issue-title>
|
||||
...
|
||||
|
||||
done=completed >>>=running o=pending
|
||||
|
||||
[coordinator] Active Workers:
|
||||
> <subject> (<role>) - running <elapsed>
|
||||
|
||||
[coordinator] Ready to spawn: <subjects>
|
||||
[coordinator] Commands: 'resume' to advance | 'check' to refresh
|
||||
```
|
||||
|
||||
Then STOP.
|
||||
|
||||
---
|
||||
|
||||
### Handler: handleResume
|
||||
|
||||
```
|
||||
Load active_workers
|
||||
+- No active workers -> handleSpawnNext
|
||||
+- Has active workers -> check each:
|
||||
+- completed -> mark done, log
|
||||
+- in_progress -> still running
|
||||
+- other -> worker failure -> reset to pending
|
||||
After:
|
||||
+- Some completed -> handleSpawnNext
|
||||
+- All running -> report status -> STOP
|
||||
+- All failed -> handleSpawnNext (retry)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Handler: handleSpawnNext
|
||||
|
||||
```
|
||||
Collect task states from TaskList()
|
||||
+- Filter tasks: PLAN-* and EXEC-* prefixes
|
||||
+- readySubjects: pending + not blocked (no blockedBy or all blockedBy completed)
|
||||
+- NONE ready + work in progress -> report waiting -> STOP
|
||||
+- NONE ready + nothing running -> PIPELINE_COMPLETE -> Phase 5
|
||||
+- HAS ready tasks -> for each:
|
||||
+- Inner Loop role AND already has active_worker for that role?
|
||||
| +- YES -> SKIP spawn (existing worker picks up via inner loop)
|
||||
| +- NO -> spawn below
|
||||
+- Determine role from task prefix:
|
||||
| +- PLAN-* -> planner
|
||||
| +- EXEC-* -> executor
|
||||
+- Spawn team-worker:
|
||||
Task({
|
||||
subagent_type: "team-worker",
|
||||
description: "Spawn <role> worker for <subject>",
|
||||
team_name: <team-name>,
|
||||
name: "<role>",
|
||||
run_in_background: true,
|
||||
prompt: `## Role Assignment
|
||||
role: <role>
|
||||
role_spec: .claude/skills/team-planex/role-specs/<role>.md
|
||||
session: <session-folder>
|
||||
session_id: <session-id>
|
||||
team_name: <team-name>
|
||||
requirement: <task-description>
|
||||
inner_loop: true
|
||||
execution_method: <method>`
|
||||
})
|
||||
+- Add to session.active_workers
|
||||
Update session -> output summary -> STOP
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: Validation
|
||||
|
||||
| Check | Criteria |
|
||||
|-------|----------|
|
||||
| Session state consistent | active_workers matches in_progress tasks |
|
||||
| No orphaned tasks | Every in_progress has active_worker |
|
||||
| Pipeline completeness | All expected EXEC-* tasks accounted for |
|
||||
|
||||
## Worker Failure Handling
|
||||
|
||||
1. Reset task -> pending via TaskUpdate
|
||||
2. Log via team_msg (type: error)
|
||||
3. Report to user
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Scenario | Resolution |
|
||||
|----------|------------|
|
||||
| Session file not found | Error, suggest re-initialization |
|
||||
| Unknown role callback | Log, scan for other completions |
|
||||
| All workers running on resume | Report status, suggest check later |
|
||||
| Pipeline stall (no ready + no running + has pending) | Check blockedBy chains, report |
|
||||
154
.claude/skills/team-planex/roles/coordinator/role.md
Normal file
154
.claude/skills/team-planex/roles/coordinator/role.md
Normal file
@@ -0,0 +1,154 @@
|
||||
# Coordinator Role
|
||||
|
||||
Orchestrate the team-planex pipeline: parse input, create team, dispatch tasks, monitor progress via Spawn-and-Stop beats. Uses **team-worker agent** for all worker spawns.
|
||||
|
||||
## Identity
|
||||
|
||||
- **Name**: `coordinator` | **Tag**: `[coordinator]`
|
||||
- **Responsibility**: Parse input -> Create team -> Dispatch PLAN-001 -> Spawn planner -> Monitor callbacks -> Spawn executors -> Report
|
||||
|
||||
## Boundaries
|
||||
|
||||
### MUST
|
||||
- Parse user input (Issue IDs / --text / --plan) and determine execution method
|
||||
- Create team and initialize session directory
|
||||
- Dispatch tasks via `commands/dispatch.md`
|
||||
- Monitor progress via `commands/monitor.md` with Spawn-and-Stop pattern
|
||||
- Maintain session state (team-session.json)
|
||||
|
||||
### MUST NOT
|
||||
- Execute planning or implementation work directly (delegate to workers)
|
||||
- Modify solution artifacts or code (workers own their deliverables)
|
||||
- Call implementation subagents (code-developer, etc.) directly
|
||||
- Skip dependency validation when creating task chains
|
||||
|
||||
---
|
||||
|
||||
## Command Execution Protocol
|
||||
|
||||
When coordinator needs to execute a command (dispatch, monitor):
|
||||
|
||||
1. **Read the command file**: `roles/coordinator/commands/<command-name>.md`
|
||||
2. **Follow the workflow** defined in the command file
|
||||
3. **Commands are inline execution guides** - NOT separate agents
|
||||
4. **Execute synchronously** - complete the command workflow before proceeding
|
||||
|
||||
---
|
||||
|
||||
## Entry Router
|
||||
|
||||
When coordinator is invoked, detect invocation type:
|
||||
|
||||
| Detection | Condition | Handler |
|
||||
|-----------|-----------|---------|
|
||||
| Worker callback | Message contains `[planner]` or `[executor]` tag | -> handleCallback (monitor.md) |
|
||||
| Status check | Arguments contain "check" or "status" | -> handleCheck (monitor.md) |
|
||||
| Manual resume | Arguments contain "resume" or "continue" | -> handleResume (monitor.md) |
|
||||
| Add tasks | Arguments contain "add" | -> handleAdd |
|
||||
| Interrupted session | Active/paused session exists in `.workflow/.team/PEX-*` | -> Phase 0 |
|
||||
| New session | None of above | -> Phase 1 |
|
||||
|
||||
For callback/check/resume: load `commands/monitor.md` and execute the appropriate handler, then STOP.
|
||||
|
||||
### handleAdd
|
||||
|
||||
1. Parse new input (Issue IDs / `--text` / `--plan`)
|
||||
2. Get current max PLAN-* sequence from `TaskList`
|
||||
3. `TaskCreate` new PLAN-00N task (owner: planner)
|
||||
4. If planner already sent `all_planned` (check team_msg) -> `SendMessage` to planner to re-enter loop
|
||||
5. STOP
|
||||
|
||||
---
|
||||
|
||||
## Phase 0: Session Resume Check
|
||||
|
||||
1. Scan `.workflow/.team/PEX-*/team-session.json` for sessions with status "active" or "paused"
|
||||
2. No sessions found -> proceed to Phase 1
|
||||
3. Single session found -> resume (Session Reconciliation)
|
||||
4. Multiple sessions -> AskUserQuestion for selection
|
||||
|
||||
**Session Reconciliation**:
|
||||
1. Audit TaskList -> reconcile session state vs task status
|
||||
2. Reset in_progress tasks -> pending (they were interrupted)
|
||||
3. Rebuild team if needed (TeamCreate + spawn needed workers)
|
||||
4. Kick first executable task -> Phase 4
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Input Parsing + Execution Method
|
||||
|
||||
1. **Parse arguments**: Extract input type (Issue IDs / --text / --plan) and optional flags (--exec, -y)
|
||||
|
||||
2. **Determine execution method** (see SKILL.md Selection Decision Table):
|
||||
- Explicit `--exec` flag -> use specified method
|
||||
- `-y` / `--yes` flag -> Auto mode
|
||||
- No flags -> AskUserQuestion for method choice
|
||||
|
||||
3. **Store requirements**: input_type, raw_input, execution_method
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Create Team + Initialize Session
|
||||
|
||||
1. Generate session ID: `PEX-<slug>-<date>`
|
||||
2. Create session folder: `.workflow/.team/<session-id>/`
|
||||
3. Create subdirectories: `artifacts/solutions/`, `wisdom/`
|
||||
4. Call `TeamCreate` with team name (default: "planex")
|
||||
5. Initialize wisdom files (learnings.md, decisions.md, conventions.md, issues.md)
|
||||
6. Write team-session.json:
|
||||
|
||||
```
|
||||
{
|
||||
session_id: "<session-id>",
|
||||
input_type: "<issues|text|plan>",
|
||||
input: "<raw-input>",
|
||||
execution_method: "<agent|codex|gemini>",
|
||||
status: "active",
|
||||
active_workers: [],
|
||||
started_at: "<ISO timestamp>"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: Create Task Chain
|
||||
|
||||
Delegate to `commands/dispatch.md`:
|
||||
|
||||
1. Read `roles/coordinator/commands/dispatch.md`
|
||||
2. Execute its workflow to create PLAN-001 task
|
||||
3. PLAN-001 contains input info + execution method in description
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: Spawn-and-Stop
|
||||
|
||||
1. Load `commands/monitor.md`
|
||||
2. Execute `handleSpawnNext` to find ready tasks and spawn planner worker
|
||||
3. Output status summary
|
||||
4. **STOP** (idle, wait for worker callback)
|
||||
|
||||
**ONE_STEP_PER_INVOCATION**: true — coordinator does one operation per wake-up, then STOPS.
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Report + Completion Action
|
||||
|
||||
When all tasks are complete (monitor.md detects PIPELINE_COMPLETE):
|
||||
|
||||
1. Load session state -> count completed tasks, duration
|
||||
2. List deliverables with output paths
|
||||
3. Update session status -> "completed"
|
||||
4. Execute Completion Action (see SKILL.md)
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error | Resolution |
|
||||
|-------|------------|
|
||||
| Session file not found | Error, suggest re-initialization |
|
||||
| Unknown worker callback | Log, scan for other completions |
|
||||
| Pipeline stall | Check missing tasks, report to user |
|
||||
| Worker crash | Reset task to pending, re-spawn on next beat |
|
||||
| All workers running on resume | Report status, suggest check later |
|
||||
@@ -1,356 +0,0 @@
|
||||
# Executor Role
|
||||
|
||||
Load solution -> Route to backend (Agent/Codex/Gemini) based on execution_method -> Test verification -> Commit. Supports multiple CLI execution backends. Execution method is determined before skill invocation (see SKILL.md Execution Method Selection).
|
||||
|
||||
## Identity
|
||||
|
||||
- **Name**: `executor` | **Tag**: `[executor]`
|
||||
- **Task Prefix**: `EXEC-*`
|
||||
- **Responsibility**: Code implementation (solution -> route to backend -> test -> commit)
|
||||
|
||||
## Boundaries
|
||||
|
||||
### MUST
|
||||
|
||||
- Only process `EXEC-*` prefixed tasks
|
||||
- All output (SendMessage, team_msg, logs) must carry `[executor]` identifier
|
||||
- Select execution backend based on `execution_method` field in EXEC-* task
|
||||
- Notify planner after each issue completes
|
||||
- Continuously poll for new EXEC-* tasks (planner may create new waves anytime)
|
||||
|
||||
### MUST NOT
|
||||
|
||||
- Create issues (planner responsibility)
|
||||
- Modify solution or queue (planner responsibility)
|
||||
- Call issue-plan-agent or issue-queue-agent
|
||||
- Interact directly with user (AskUserQuestion)
|
||||
- Create PLAN-* tasks for planner
|
||||
|
||||
---
|
||||
|
||||
## Toolbox
|
||||
|
||||
### Execution Backends
|
||||
|
||||
| Backend | Tool | Invocation | Mode |
|
||||
|---------|------|------------|------|
|
||||
| `agent` | code-developer subagent | `Task({ subagent_type: "code-developer" })` | Synchronous |
|
||||
| `codex` | Codex CLI | `ccw cli --tool codex --mode write` | Background |
|
||||
| `gemini` | Gemini CLI | `ccw cli --tool gemini --mode write` | Background |
|
||||
|
||||
### Direct Capabilities
|
||||
|
||||
| Tool | Purpose |
|
||||
|------|---------|
|
||||
| `Read` | Read solution plan and queue files |
|
||||
| `Write` | Write implementation artifacts |
|
||||
| `Edit` | Edit source code |
|
||||
| `Bash` | Run tests, git operations, CLI calls |
|
||||
|
||||
### CLI Capabilities
|
||||
|
||||
| CLI Command | Purpose |
|
||||
|-------------|---------|
|
||||
| `ccw issue status <id> --json` | Check issue status |
|
||||
| `ccw issue solution <id> --json` | Load single issue's bound solution (requires issue ID) |
|
||||
| `ccw issue update <id> --status executing` | Update issue status to executing |
|
||||
| `ccw issue update <id> --status completed` | Mark issue as completed |
|
||||
|
||||
---
|
||||
|
||||
## Message Types
|
||||
|
||||
| Type | Direction | Trigger | Description |
|
||||
|------|-----------|---------|-------------|
|
||||
| `impl_complete` | executor -> planner | Implementation and tests pass | Single issue implementation complete |
|
||||
| `impl_failed` | executor -> planner | Implementation failed after retries | Implementation failure |
|
||||
| `wave_done` | executor -> planner | All EXEC tasks in a wave completed | Entire wave complete |
|
||||
| `error` | executor -> planner | Blocking error | Execution error |
|
||||
|
||||
## Message Bus
|
||||
|
||||
Before every SendMessage, log via `mcp__ccw-tools__team_msg`:
|
||||
|
||||
**NOTE**: `team` must be **session ID** (e.g., `PEX-project-2026-02-27`), NOT team name. Extract from `Session:` field in task description.
|
||||
|
||||
```
|
||||
mcp__ccw-tools__team_msg({
|
||||
operation: "log",
|
||||
team: <session-id>, // e.g., "PEX-project-2026-02-27", NOT "planex"
|
||||
from: "executor",
|
||||
to: "planner",
|
||||
type: <message-type>,
|
||||
summary: "[executor] <task-prefix> complete: <task-subject>",
|
||||
ref: <artifact-path>
|
||||
})
|
||||
```
|
||||
|
||||
**CLI fallback** (when MCP unavailable):
|
||||
|
||||
```
|
||||
Bash("ccw team log --team <session-id> --from executor --to planner --type <message-type> --summary \"[executor] <task-prefix> complete\" --ref <artifact-path> --json")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Execution (5-Phase)
|
||||
|
||||
### Phase 1: Task Discovery
|
||||
|
||||
> See SKILL.md Shared Infrastructure -> Worker Phase 1: Task Discovery
|
||||
|
||||
Standard task discovery flow: TaskList -> filter by prefix `EXEC-*` + owner match + pending + unblocked -> TaskGet -> TaskUpdate in_progress.
|
||||
|
||||
### Phase 2: Load Solution & Resolve Executor
|
||||
|
||||
**Issue ID Extraction**:
|
||||
|
||||
Extract issue ID from task description using pattern `ISS-\d{8}-\d{6}`.
|
||||
|
||||
If no issue ID found:
|
||||
1. Log error via team_msg
|
||||
2. SendMessage error to planner
|
||||
3. TaskUpdate completed
|
||||
4. Return to idle
|
||||
|
||||
**Solution Loading (Dual Mode)**:
|
||||
|
||||
| Mode | Condition | Action |
|
||||
|------|-----------|--------|
|
||||
| File-first | Task description contains `solution_file: <path>` | Read JSON file, extract solution.bound |
|
||||
| CLI fallback | No solution_file field | Call `ccw issue solution <issueId> --json` |
|
||||
|
||||
If no bound solution found:
|
||||
1. Log error via team_msg
|
||||
2. SendMessage error to planner
|
||||
3. TaskUpdate completed
|
||||
4. Return to idle
|
||||
|
||||
**Execution Method Resolution**:
|
||||
|
||||
| Condition | Executor |
|
||||
|-----------|----------|
|
||||
| `execution_method: Agent` in task description | agent |
|
||||
| `execution_method: Codex` in task description | codex |
|
||||
| `execution_method: Gemini` in task description | gemini |
|
||||
| `execution_method: Auto` + task_count <= 3 | agent |
|
||||
| `execution_method: Auto` + task_count > 3 | codex |
|
||||
| Unknown or missing | agent (with warning) |
|
||||
|
||||
**Code Review Resolution**:
|
||||
|
||||
Extract `code_review` from task description. Values: Skip | Gemini Review | Codex Review | Agent Review. Default: Skip.
|
||||
|
||||
**Issue Status Update**:
|
||||
|
||||
```
|
||||
Bash("ccw issue update <issueId> --status executing")
|
||||
```
|
||||
|
||||
### Phase 3: Implementation (Multi-Backend Routing)
|
||||
|
||||
Route to execution backend based on resolved executor.
|
||||
|
||||
#### Option A: Agent Execution
|
||||
|
||||
**When**: executor === 'agent' (simple tasks, task_count <= 3)
|
||||
|
||||
**Tool call**:
|
||||
```
|
||||
Task({
|
||||
subagent_type: "code-developer",
|
||||
run_in_background: false,
|
||||
description: "Implement solution for <issueId>",
|
||||
prompt: <execution-prompt>
|
||||
})
|
||||
```
|
||||
|
||||
Synchronous execution - wait for completion before Phase 4.
|
||||
|
||||
#### Option B: Codex CLI Execution
|
||||
|
||||
**When**: executor === 'codex' (complex tasks, background execution)
|
||||
|
||||
**Tool call**:
|
||||
```
|
||||
Bash("ccw cli -p \"<execution-prompt>\" --tool codex --mode write --id planex-<issueId>", { run_in_background: true })
|
||||
```
|
||||
|
||||
**Resume on failure**:
|
||||
```
|
||||
ccw cli -p "Continue implementation" --resume planex-<issueId> --tool codex --mode write --id planex-<issueId>-retry
|
||||
```
|
||||
|
||||
STOP after spawn - CLI executes in background, wait for task hook callback.
|
||||
|
||||
#### Option C: Gemini CLI Execution
|
||||
|
||||
**When**: executor === 'gemini' (analysis-heavy tasks, background execution)
|
||||
|
||||
**Tool call**:
|
||||
```
|
||||
Bash("ccw cli -p \"<execution-prompt>\" --tool gemini --mode write --id planex-<issueId>", { run_in_background: true })
|
||||
```
|
||||
|
||||
STOP after spawn - CLI executes in background, wait for task hook callback.
|
||||
|
||||
### Execution Prompt Template
|
||||
|
||||
All backends use unified prompt structure:
|
||||
|
||||
```
|
||||
## Issue
|
||||
ID: <issueId>
|
||||
Title: <solution-title>
|
||||
|
||||
## Solution Plan
|
||||
<solution-bound-json>
|
||||
|
||||
## Implementation Requirements
|
||||
|
||||
1. Follow the solution plan tasks in order
|
||||
2. Write clean, minimal code following existing patterns
|
||||
3. Run tests after each significant change
|
||||
4. Ensure all existing tests still pass
|
||||
5. Do NOT over-engineer - implement exactly what the solution specifies
|
||||
|
||||
## Quality Checklist
|
||||
- [ ] All solution tasks implemented
|
||||
- [ ] No TypeScript/linting errors
|
||||
- [ ] Existing tests pass
|
||||
- [ ] New tests added where appropriate
|
||||
- [ ] No security vulnerabilities introduced
|
||||
|
||||
## Project Guidelines
|
||||
@.workflow/specs/*.md
|
||||
```
|
||||
|
||||
### Phase 4: Verify & Commit
|
||||
|
||||
**Test Detection**:
|
||||
|
||||
| Detection | Method |
|
||||
|-----------|--------|
|
||||
| package.json scripts.test | Use `npm test` |
|
||||
| package.json scripts.test:unit | Use `npm run test:unit` |
|
||||
| No test script found | Skip verification, proceed to commit |
|
||||
|
||||
**Test Verification**:
|
||||
|
||||
```
|
||||
Bash("<testCmd> 2>&1 || echo TEST_FAILED")
|
||||
```
|
||||
|
||||
Check output for `TEST_FAILED` or `FAIL` strings.
|
||||
|
||||
**Test Failure Handling**:
|
||||
|
||||
| Condition | Action |
|
||||
|-----------|--------|
|
||||
| Tests failing | Report impl_failed to planner with test output + resume command |
|
||||
| Tests passing | Proceed to code review (if configured) |
|
||||
|
||||
**Code Review (Optional)**:
|
||||
|
||||
| Review Tool | Execution |
|
||||
|-------------|-----------|
|
||||
| Gemini Review | `ccw cli -p "<review-prompt>" --tool gemini --mode analysis --id planex-review-<issueId>` (background) |
|
||||
| Codex Review | `ccw cli --tool codex --mode review --uncommitted` (background, no prompt with target flags) |
|
||||
| Agent Review | Current agent performs inline review against solution convergence criteria |
|
||||
|
||||
**Code Review Prompt**:
|
||||
```
|
||||
PURPOSE: Code review for <issueId> implementation against solution plan
|
||||
TASK: Verify solution convergence criteria | Check test coverage | Analyze code quality | Identify issues
|
||||
MODE: analysis
|
||||
CONTEXT: @**/* | Memory: Review planex execution for <issueId>
|
||||
EXPECTED: Quality assessment with issue identification and recommendations
|
||||
CONSTRAINTS: Focus on solution adherence and code quality | analysis=READ-ONLY
|
||||
```
|
||||
|
||||
**Issue Completion**:
|
||||
|
||||
```
|
||||
Bash("ccw issue update <issueId> --status completed")
|
||||
```
|
||||
|
||||
### Phase 5: Report + Loop
|
||||
|
||||
> See SKILL.md Shared Infrastructure -> Worker Phase 5: Report
|
||||
|
||||
**Success Report**:
|
||||
|
||||
```
|
||||
mcp__ccw-tools__team_msg({
|
||||
operation: "log",
|
||||
team: <session-id>, // e.g., "PEX-project-2026-02-27", NOT "planex"
|
||||
from: "executor",
|
||||
to: "planner",
|
||||
type: "impl_complete",
|
||||
summary: "[executor] Implementation complete for <issueId> via <executor>, tests passing"
|
||||
})
|
||||
|
||||
SendMessage({
|
||||
type: "message",
|
||||
recipient: "planner",
|
||||
content: `## [executor] Implementation Complete
|
||||
|
||||
**Issue**: <issueId>
|
||||
**Executor**: <executor>
|
||||
**Solution**: <solution-id>
|
||||
**Code Review**: <codeReview>
|
||||
**Status**: All tests passing
|
||||
**Issue Status**: Updated to resolved`,
|
||||
summary: "[executor] EXEC complete: <issueId> (<executor>)"
|
||||
})
|
||||
|
||||
TaskUpdate({ taskId: <task-id>, status: "completed" })
|
||||
```
|
||||
|
||||
**Loop Check**:
|
||||
|
||||
Query for next `EXEC-*` task with owner=executor, status=pending, blockedBy empty.
|
||||
|
||||
| Condition | Action |
|
||||
|-----------|--------|
|
||||
| Tasks available | Return to Phase 1 for next task |
|
||||
| No tasks + planner sent all_planned | Send wave_done and idle |
|
||||
| No tasks + planner still planning | Idle for more tasks |
|
||||
|
||||
**Wave Done Signal**:
|
||||
|
||||
```
|
||||
mcp__ccw-tools__team_msg({
|
||||
operation: "log",
|
||||
team: <session-id>, // e.g., "PEX-project-2026-02-27", NOT "planex"
|
||||
from: "executor",
|
||||
to: "planner",
|
||||
type: "wave_done",
|
||||
summary: "[executor] All EXEC tasks completed"
|
||||
})
|
||||
|
||||
SendMessage({
|
||||
type: "message",
|
||||
recipient: "planner",
|
||||
content: "## [executor] All Tasks Done\n\nAll EXEC-* tasks have been completed. Pipeline finished.",
|
||||
summary: "[executor] wave_done: all complete"
|
||||
})
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Scenario | Resolution |
|
||||
|----------|------------|
|
||||
| No EXEC-* tasks available | Idle, wait for planner to create tasks |
|
||||
| Solution plan not found | Report error to planner |
|
||||
| Unknown execution_method | Fallback to `agent` with warning |
|
||||
| Agent (code-developer) failure | Retry once, then report impl_failed |
|
||||
| CLI (Codex/Gemini) failure | Provide resume command with fixed ID, report impl_failed |
|
||||
| CLI timeout | Use fixed ID `planex-{issueId}` for resume |
|
||||
| Tests failing after implementation | Report impl_failed with test output + resume info |
|
||||
| Issue status update failure | Log warning, continue with report |
|
||||
| Dependency not yet complete | Wait - task is blocked by blockedBy |
|
||||
| All tasks done but planner still planning | Send wave_done, then idle for more |
|
||||
| Critical issue beyond scope | SendMessage error to planner |
|
||||
@@ -1,315 +0,0 @@
|
||||
# Planner Role
|
||||
|
||||
Demand decomposition -> Issue creation -> Solution design -> Conflict check -> EXEC task dispatch. Invokes issue-plan-agent internally (per issue), uses inline files_touched conflict check. Dispatches EXEC-* task immediately after each issue's solution is ready. Planner also serves as lead role (no separate coordinator).
|
||||
|
||||
## Identity
|
||||
|
||||
- **Name**: `planner` | **Tag**: `[planner]`
|
||||
- **Task Prefix**: `PLAN-*`
|
||||
- **Responsibility**: Planning lead (requirement -> issues -> solutions -> queue -> dispatch)
|
||||
|
||||
## Boundaries
|
||||
|
||||
### MUST
|
||||
|
||||
- Only process `PLAN-*` prefixed tasks
|
||||
- All output (SendMessage, team_msg, logs) must carry `[planner]` identifier
|
||||
- Immediately create `EXEC-*` task after completing each issue's solution and send `issue_ready` signal
|
||||
- Continue pushing forward without waiting for executor
|
||||
- Write solution artifacts to `<sessionDir>/artifacts/solutions/{issueId}.json`
|
||||
|
||||
### MUST NOT
|
||||
|
||||
- Directly write/modify business code (executor responsibility)
|
||||
- Call code-developer agent
|
||||
- Run project tests
|
||||
- git commit code changes
|
||||
|
||||
---
|
||||
|
||||
## Toolbox
|
||||
|
||||
### Subagent Capabilities
|
||||
|
||||
| Agent Type | Purpose |
|
||||
|------------|---------|
|
||||
| `issue-plan-agent` | Closed-loop planning: ACE exploration + solution generation + binding (single issue granularity) |
|
||||
|
||||
### CLI Capabilities
|
||||
|
||||
| CLI Command | Purpose |
|
||||
|-------------|---------|
|
||||
| `ccw issue create --data '{"title":"..."}' --json` | Create issue from text |
|
||||
| `ccw issue status <id> --json` | Check issue status |
|
||||
| `ccw issue solution <id> --json` | Get single issue's solutions (requires issue ID) |
|
||||
| `ccw issue solutions --status planned --brief` | Batch list all bound solutions (cross-issue) |
|
||||
| `ccw issue bind <id> <sol-id>` | Bind solution to issue |
|
||||
|
||||
### Skill Capabilities
|
||||
|
||||
| Skill | Purpose |
|
||||
|-------|---------|
|
||||
| `Skill(skill="issue:new", args="--text '...'")` | Create issue from text |
|
||||
|
||||
---
|
||||
|
||||
## Message Types
|
||||
|
||||
| Type | Direction | Trigger | Description |
|
||||
|------|-----------|---------|-------------|
|
||||
| `issue_ready` | planner -> executor | Single issue solution + EXEC task created | Per-issue beat signal |
|
||||
| `wave_ready` | planner -> executor | All issues in a wave dispatched | Wave summary signal |
|
||||
| `all_planned` | planner -> executor | All waves planning complete | Final signal |
|
||||
| `error` | planner -> executor | Blocking error | Planning failure |
|
||||
|
||||
## Message Bus
|
||||
|
||||
Before every SendMessage, log via `mcp__ccw-tools__team_msg`:
|
||||
|
||||
**NOTE**: `team` must be **session ID** (e.g., `PEX-project-2026-02-27`), NOT team name. Extract from `Session:` field in task description.
|
||||
|
||||
```
|
||||
mcp__ccw-tools__team_msg({
|
||||
operation: "log",
|
||||
team: <session-id>, // e.g., "PEX-project-2026-02-27", NOT "planex"
|
||||
from: "planner",
|
||||
to: "executor",
|
||||
type: <message-type>,
|
||||
summary: "[planner] <task-prefix> complete: <task-subject>",
|
||||
ref: <artifact-path>
|
||||
})
|
||||
```
|
||||
|
||||
**CLI fallback** (when MCP unavailable):
|
||||
|
||||
```
|
||||
Bash("ccw team log --team <session-id> --from planner --to executor --type <message-type> --summary \"[planner] <task-prefix> complete\" --ref <artifact-path> --json")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Execution (5-Phase)
|
||||
|
||||
### Phase 1: Task Discovery
|
||||
|
||||
> See SKILL.md Shared Infrastructure -> Worker Phase 1: Task Discovery
|
||||
|
||||
Standard task discovery flow: TaskList -> filter by prefix `PLAN-*` + owner match + pending + unblocked -> TaskGet -> TaskUpdate in_progress.
|
||||
|
||||
### Phase 2: Input Parsing
|
||||
|
||||
Parse task description and arguments to determine input type.
|
||||
|
||||
**Input Type Detection**:
|
||||
|
||||
| Detection | Condition | Handler |
|
||||
|-----------|-----------|---------|
|
||||
| Issue IDs | Task description contains `ISS-\d{8}-\d{6}` pattern | Path C: Direct to planning |
|
||||
| Text input | Arguments contain `--text '...'` | Path A: Create issue first |
|
||||
| Plan file | Arguments contain `--plan <path>` | Path B: Parse and batch create |
|
||||
| Execution plan JSON | Plan file is `execution-plan.json` from req-plan | Path D: Wave-aware processing |
|
||||
| Description text | None of above | Treat task description as requirement text |
|
||||
|
||||
**Execution Config Extraction**:
|
||||
|
||||
From arguments, extract:
|
||||
- `execution_method`: Agent | Codex | Gemini | Auto (default: Auto)
|
||||
- `code_review`: Skip | Gemini Review | Codex Review | Agent Review (default: Skip)
|
||||
|
||||
### Phase 3: Issue Processing Pipeline
|
||||
|
||||
Execute different processing paths based on input type.
|
||||
|
||||
#### Path A: Text Input -> Create Issue
|
||||
|
||||
**Workflow**:
|
||||
1. Use `issue:new` skill to create issue from text
|
||||
2. Capture created issue ID
|
||||
3. Add to issue list for planning
|
||||
|
||||
**Tool calls**:
|
||||
```
|
||||
Skill(skill="issue:new", args="--text '<requirement-text>'")
|
||||
```
|
||||
|
||||
#### Path B: Plan File -> Batch Create Issues
|
||||
|
||||
**Workflow**:
|
||||
1. Read plan file content
|
||||
2. Parse phases/steps from markdown structure
|
||||
3. For each phase/step, create an issue
|
||||
4. Add all issue IDs to list for planning
|
||||
|
||||
**Plan Parsing Rules**:
|
||||
- Match `## Phase N: Title` or `## Step N: Title` or `### N. Title`
|
||||
- Each match creates one issue with title and description
|
||||
- Fallback: If no phase structure, entire content becomes single issue
|
||||
|
||||
#### Path C: Issue IDs -> Direct Planning
|
||||
|
||||
Issue IDs are ready, proceed directly to solution planning.
|
||||
|
||||
#### Path D: execution-plan.json -> Wave-Aware Processing
|
||||
|
||||
**Workflow**:
|
||||
1. Parse execution-plan.json with waves array
|
||||
2. For each wave, process issues sequentially
|
||||
3. For each issue in wave:
|
||||
- Call issue-plan-agent to generate solution
|
||||
- Write solution artifact to `<sessionDir>/artifacts/solutions/{issueId}.json`
|
||||
- Perform inline conflict check
|
||||
- Create EXEC-* task with solution_file path
|
||||
- Send issue_ready signal
|
||||
4. After each wave completes, send wave_ready signal
|
||||
5. After all waves, send all_planned signal
|
||||
|
||||
**Issue Planning (per issue)**:
|
||||
|
||||
```
|
||||
Task({
|
||||
subagent_type: "issue-plan-agent",
|
||||
run_in_background: false,
|
||||
description: "Plan solution for <issueId>",
|
||||
prompt: `issue_ids: ["<issueId>"]
|
||||
project_root: "<projectRoot>"
|
||||
|
||||
## Requirements
|
||||
- Generate solution for this issue
|
||||
- Auto-bind single solution
|
||||
- Issues come from req-plan decomposition (tags: req-plan)
|
||||
- Respect dependencies: <issue_dependencies>`
|
||||
})
|
||||
```
|
||||
|
||||
**Solution Artifact**:
|
||||
|
||||
```
|
||||
Write({
|
||||
file_path: "<sessionDir>/artifacts/solutions/<issueId>.json",
|
||||
content: JSON.stringify({
|
||||
session_id: <sessionId>,
|
||||
issue_id: <issueId>,
|
||||
...solution,
|
||||
execution_config: { execution_method, code_review },
|
||||
timestamp: <ISO-timestamp>
|
||||
}, null, 2)
|
||||
})
|
||||
```
|
||||
|
||||
**EXEC Task Creation**:
|
||||
|
||||
```
|
||||
TaskCreate({
|
||||
subject: "EXEC-W<waveNum>-<issueId>: Implement <solution-title>",
|
||||
description: `## Execution Task
|
||||
**Wave**: <waveNum>
|
||||
**Issue**: <issueId>
|
||||
**solution_file**: <solutionFile>
|
||||
**execution_method**: <method>
|
||||
**code_review**: <review>`,
|
||||
activeForm: "Implementing <issueId>",
|
||||
owner: "executor"
|
||||
})
|
||||
```
|
||||
|
||||
#### Wave Processing (Path A/B/C Convergence)
|
||||
|
||||
For non-execution-plan inputs, process all issues as a single logical wave:
|
||||
|
||||
**Workflow**:
|
||||
1. For each issue in list:
|
||||
- Call issue-plan-agent
|
||||
- Write solution artifact
|
||||
- Perform inline conflict check
|
||||
- Create EXEC-* task
|
||||
- Send issue_ready signal
|
||||
2. After all issues complete, send wave_ready signal
|
||||
|
||||
### Phase 4: Inline Conflict Check + Dispatch
|
||||
|
||||
Perform conflict detection using files_touched overlap analysis.
|
||||
|
||||
**Conflict Detection Rules**:
|
||||
|
||||
| Condition | Action |
|
||||
|-----------|--------|
|
||||
| File overlap detected | Add blockedBy dependency to previous task |
|
||||
| Explicit dependency in solution.bound.dependencies.on_issues | Add blockedBy to referenced task |
|
||||
| No conflict | No blockedBy, task is immediately executable |
|
||||
|
||||
**Inline Conflict Check Algorithm**:
|
||||
|
||||
1. Get current solution's files_touched (or affected_files)
|
||||
2. For each previously dispatched solution:
|
||||
- Check if any files overlap
|
||||
- If overlap, add previous execTaskId to blockedBy
|
||||
3. Check explicit dependencies from solution.bound.dependencies.on_issues
|
||||
4. Return blockedBy array for TaskUpdate
|
||||
|
||||
**Wave Summary Signal** (after all issues in wave):
|
||||
|
||||
```
|
||||
mcp__ccw-tools__team_msg({
|
||||
operation: "log", team: <session-id>, from: "planner", to: "executor", // team = session ID
|
||||
type: "wave_ready",
|
||||
summary: "[planner] Wave <waveNum> fully dispatched: <issueCount> issues"
|
||||
})
|
||||
|
||||
SendMessage({
|
||||
type: "message", recipient: "executor",
|
||||
content: "## [planner] Wave <waveNum> Complete\nAll issues dispatched, <count> EXEC tasks created.",
|
||||
summary: "[planner] wave_ready: wave <waveNum>"
|
||||
})
|
||||
```
|
||||
|
||||
### Phase 5: Report + Finalize
|
||||
|
||||
> See SKILL.md Shared Infrastructure -> Worker Phase 5: Report
|
||||
|
||||
**Final Signal** (all waves complete):
|
||||
|
||||
```
|
||||
mcp__ccw-tools__team_msg({
|
||||
operation: "log",
|
||||
team: <session-id>, // e.g., "PEX-project-2026-02-27", NOT "planex"
|
||||
from: "planner",
|
||||
to: "executor",
|
||||
type: "all_planned",
|
||||
summary: "[planner] All <waveCount> waves planned, <issueCount> issues total"
|
||||
})
|
||||
|
||||
SendMessage({
|
||||
type: "message",
|
||||
recipient: "executor",
|
||||
content: `## [planner] All Waves Planned
|
||||
|
||||
**Total Waves**: <waveCount>
|
||||
**Total Issues**: <issueCount>
|
||||
**Status**: All planning complete, waiting for executor to finish remaining EXEC-* tasks
|
||||
|
||||
Pipeline complete when executor sends wave_done confirmation.`,
|
||||
summary: "[planner] all_planned: <waveCount> waves, <issueCount> issues"
|
||||
})
|
||||
|
||||
TaskUpdate({ taskId: <task-id>, status: "completed" })
|
||||
```
|
||||
|
||||
**Loop Check**: Query for next `PLAN-*` task with owner=planner, status=pending, blockedBy empty. If found, return to Phase 1.
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Scenario | Resolution |
|
||||
|----------|------------|
|
||||
| No PLAN-* tasks available | Idle, wait for orchestrator |
|
||||
| Issue creation failure | Retry once with simplified text, then report error |
|
||||
| issue-plan-agent failure | Retry once, then report error and skip to next issue |
|
||||
| Inline conflict check failure | Skip conflict detection, create EXEC task without blockedBy |
|
||||
| Plan file not found | Report error with expected path |
|
||||
| execution-plan.json parse failure | Fallback to plan_file parsing (Path B) |
|
||||
| execution-plan.json missing waves | Report error, suggest re-running req-plan |
|
||||
| Empty input (no issues, no text, no plan) | AskUserQuestion for clarification |
|
||||
| Solution artifact write failure | Log warning, create EXEC task without solution_file (executor fallback) |
|
||||
| Wave partially failed | Report partial success, continue with successful issues |
|
||||
| Critical issue beyond scope | SendMessage error to executor |
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
name: workflow-execute
|
||||
description: Coordinate agent execution for workflow tasks with automatic session discovery, parallel task processing, and status tracking. Triggers on "workflow:execute".
|
||||
description: Coordinate agent execution for workflow tasks with automatic session discovery, parallel task processing, and status tracking. Triggers on "workflow-execute".
|
||||
allowed-tools: Skill, Task, AskUserQuestion, TaskCreate, TaskUpdate, TaskList, Read, Write, Edit, Bash, Glob, Grep
|
||||
---
|
||||
|
||||
@@ -14,18 +14,18 @@ Orchestrates autonomous workflow execution through systematic task discovery, ag
|
||||
|
||||
```bash
|
||||
# Interactive mode (with confirmations)
|
||||
/workflow:execute
|
||||
/workflow:execute --resume-session="WFS-auth"
|
||||
/workflow-execute
|
||||
/workflow-execute --resume-session="WFS-auth"
|
||||
|
||||
# Auto mode (skip confirmations, use defaults)
|
||||
/workflow:execute --yes
|
||||
/workflow:execute -y
|
||||
/workflow:execute -y --resume-session="WFS-auth"
|
||||
/workflow-execute --yes
|
||||
/workflow-execute -y
|
||||
/workflow-execute -y --resume-session="WFS-auth"
|
||||
|
||||
# With auto-commit (commit after each task completion)
|
||||
/workflow:execute --with-commit
|
||||
/workflow:execute -y --with-commit
|
||||
/workflow:execute -y --with-commit --resume-session="WFS-auth"
|
||||
/workflow-execute --with-commit
|
||||
/workflow-execute -y --with-commit
|
||||
/workflow-execute -y --with-commit --resume-session="WFS-auth"
|
||||
```
|
||||
|
||||
## Auto Mode Defaults
|
||||
@@ -153,7 +153,7 @@ bash(find .workflow/active/ -name "WFS-*" -type d 2>/dev/null | wc -l)
|
||||
**Case A: No Sessions** (count = 0)
|
||||
```
|
||||
ERROR: No active workflow sessions found
|
||||
Run /workflow:plan "task description" to create a session
|
||||
Run /workflow-plan "task description" to create a session
|
||||
```
|
||||
|
||||
**Case B: Single Session** (count = 1)
|
||||
@@ -575,7 +575,7 @@ meta.agent missing → Infer from meta.type:
|
||||
| Error Type | Cause | Recovery Strategy | Max Attempts |
|
||||
|-----------|-------|------------------|--------------|
|
||||
| **Discovery Errors** |
|
||||
| No active session | No sessions in `.workflow/active/` | Create or resume session: `/workflow:plan "project"` | N/A |
|
||||
| No active session | No sessions in `.workflow/active/` | Create or resume session: `/workflow-plan "project"` | N/A |
|
||||
| Multiple sessions | Multiple sessions in `.workflow/active/` | Prompt user selection | N/A |
|
||||
| Corrupted session | Invalid JSON files | Recreate session structure or validate files | N/A |
|
||||
| **Execution Errors** |
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
name: workflow-lite-plan
|
||||
description: Lightweight planning and execution skill - route to lite-plan or lite-execute with prompt enhancement. Triggers on "workflow:lite-plan", "workflow:lite-execute".
|
||||
description: Lightweight planning and execution skill - route to lite-plan or lite-execute with prompt enhancement. Triggers on "workflow-lite-plan", "workflow:lite-execute".
|
||||
allowed-tools: Skill, Task, AskUserQuestion, TodoWrite, Read, Write, Edit, Bash, Glob, Grep
|
||||
---
|
||||
|
||||
@@ -41,7 +41,7 @@ const mode = detectMode()
|
||||
|
||||
function detectMode() {
|
||||
if (skillName === 'workflow:lite-execute') return 'execute'
|
||||
return 'plan' // default: workflow:lite-plan
|
||||
return 'plan' // default: workflow-lite-plan
|
||||
}
|
||||
```
|
||||
|
||||
@@ -49,7 +49,7 @@ function detectMode() {
|
||||
|
||||
| Trigger | Mode | Phase Document | Description |
|
||||
|---------|------|----------------|-------------|
|
||||
| `workflow:lite-plan` | plan | [phases/01-lite-plan.md](phases/01-lite-plan.md) | Full planning pipeline (explore → plan → confirm → execute) |
|
||||
| `workflow-lite-plan` | plan | [phases/01-lite-plan.md](phases/01-lite-plan.md) | Full planning pipeline (explore → plan → confirm → execute) |
|
||||
| `workflow:lite-execute` | execute | [phases/02-lite-execute.md](phases/02-lite-execute.md) | Standalone execution (in-memory / prompt / file) |
|
||||
|
||||
## Interactive Preference Collection
|
||||
|
||||
@@ -755,7 +755,7 @@ Read("phases/02-lite-execute.md")
|
||||
| Planning agent failure | Fallback to direct planning by Claude |
|
||||
| Clarification timeout | Use exploration findings as-is |
|
||||
| Confirmation timeout | Save context, display resume instructions |
|
||||
| Modify loop > 3 times | Suggest breaking task or using /workflow:plan |
|
||||
| Modify loop > 3 times | Suggest breaking task or using /workflow-plan |
|
||||
|
||||
## Next Phase
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
name: workflow-multi-cli-plan
|
||||
description: Multi-CLI collaborative planning and execution skill - route to multi-cli-plan or lite-execute with prompt enhancement. Triggers on "workflow:multi-cli-plan", "workflow:lite-execute".
|
||||
description: Multi-CLI collaborative planning and execution skill - route to multi-cli-plan or lite-execute with prompt enhancement. Triggers on "workflow-multi-cli-plan", "workflow:lite-execute".
|
||||
allowed-tools: Skill, Task, AskUserQuestion, TodoWrite, Read, Write, Edit, Bash, Glob, Grep, mcp__ace-tool__search_context
|
||||
---
|
||||
|
||||
@@ -33,7 +33,7 @@ const mode = detectMode()
|
||||
|
||||
function detectMode() {
|
||||
if (skillName === 'workflow:lite-execute') return 'execute'
|
||||
return 'plan' // default: workflow:multi-cli-plan
|
||||
return 'plan' // default: workflow-multi-cli-plan
|
||||
}
|
||||
```
|
||||
|
||||
@@ -41,7 +41,7 @@ function detectMode() {
|
||||
|
||||
| Trigger | Mode | Phase Document | Description |
|
||||
|---------|------|----------------|-------------|
|
||||
| `workflow:multi-cli-plan` | plan | [phases/01-multi-cli-plan.md](phases/01-multi-cli-plan.md) | Multi-CLI collaborative planning (ACE context → discussion → plan → execute) |
|
||||
| `workflow-multi-cli-plan` | plan | [phases/01-multi-cli-plan.md](phases/01-multi-cli-plan.md) | Multi-CLI collaborative planning (ACE context → discussion → plan → execute) |
|
||||
| `workflow:lite-execute` | execute | [phases/02-lite-execute.md](phases/02-lite-execute.md) | Standalone execution (in-memory / prompt / file) |
|
||||
|
||||
## Interactive Preference Collection
|
||||
@@ -124,7 +124,7 @@ Multi-phase execution (multi-cli-plan → lite-execute) spans long conversations
|
||||
|
||||
## Execution Flow
|
||||
|
||||
### Plan Mode (workflow:multi-cli-plan)
|
||||
### Plan Mode (workflow-multi-cli-plan)
|
||||
|
||||
```
|
||||
1. Collect preferences via AskUserQuestion (autoYes)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Phase 1: Multi-CLI Collaborative Planning
|
||||
|
||||
Complete multi-CLI collaborative planning pipeline with ACE context gathering and iterative cross-verification. This phase document preserves the full content of the original `workflow:multi-cli-plan` command.
|
||||
Complete multi-CLI collaborative planning pipeline with ACE context gathering and iterative cross-verification. This phase document preserves the full content of the original `workflow-multi-cli-plan` command.
|
||||
|
||||
## Auto Mode
|
||||
|
||||
@@ -12,12 +12,12 @@ When `workflowPreferences.autoYes` is true: Auto-approve plan, use recommended s
|
||||
|
||||
```bash
|
||||
# Basic usage
|
||||
/workflow:multi-cli-plan "Implement user authentication"
|
||||
/workflow-multi-cli-plan "Implement user authentication"
|
||||
|
||||
# With options
|
||||
/workflow:multi-cli-plan "Add dark mode support" --max-rounds=3
|
||||
/workflow:multi-cli-plan "Refactor payment module" --tools=gemini,codex,claude
|
||||
/workflow:multi-cli-plan "Fix memory leak" --mode=serial
|
||||
/workflow-multi-cli-plan "Add dark mode support" --max-rounds=3
|
||||
/workflow-multi-cli-plan "Refactor payment module" --tools=gemini,codex,claude
|
||||
/workflow-multi-cli-plan "Fix memory leak" --mode=serial
|
||||
```
|
||||
|
||||
**Context Source**: ACE semantic search + Multi-CLI analysis
|
||||
@@ -585,7 +585,7 @@ TodoWrite({ todos: [
|
||||
|
||||
```bash
|
||||
# Simpler single-round planning
|
||||
/workflow:lite-plan "task description"
|
||||
/workflow-lite-plan "task description"
|
||||
|
||||
# Issue-driven discovery
|
||||
/issue:discover-by-prompt "find issues"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
name: workflow-plan
|
||||
description: Unified planning skill - 4-phase planning workflow, plan verification, and interactive replanning. Triggers on "workflow:plan", "workflow:plan-verify", "workflow:replan".
|
||||
description: Unified planning skill - 4-phase planning workflow, plan verification, and interactive replanning. Triggers on "workflow-plan", "workflow-plan-verify", "workflow:replan".
|
||||
allowed-tools: Skill, Task, AskUserQuestion, TodoWrite, Read, Write, Edit, Bash, Glob, Grep
|
||||
---
|
||||
|
||||
@@ -107,9 +107,9 @@ const mode = detectMode(args)
|
||||
|
||||
function detectMode(args) {
|
||||
// Skill trigger determines mode
|
||||
if (skillName === 'workflow:plan-verify') return 'verify'
|
||||
if (skillName === 'workflow-plan-verify') return 'verify'
|
||||
if (skillName === 'workflow:replan') return 'replan'
|
||||
return 'plan' // default: workflow:plan
|
||||
return 'plan' // default: workflow-plan
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -90,11 +90,11 @@ ELSE:
|
||||
SYNTHESIS_AVAILABLE = true
|
||||
|
||||
IF NOT EXISTS(IMPL_PLAN):
|
||||
ERROR: "IMPL_PLAN.md not found. Run /workflow:plan first"
|
||||
ERROR: "IMPL_PLAN.md not found. Run /workflow-plan first"
|
||||
EXIT
|
||||
|
||||
IF TASK_FILES.count == 0:
|
||||
ERROR: "No task JSON files found. Run /workflow:plan first"
|
||||
ERROR: "No task JSON files found. Run /workflow-plan first"
|
||||
EXIT
|
||||
```
|
||||
|
||||
@@ -320,7 +320,7 @@ ${recommendation === 'BLOCK_EXECUTION' ? 'BLOCK: Fix critical issues then re-ver
|
||||
recommendation === 'PROCEED_WITH_FIXES' ? 'FIX RECOMMENDED: Address high issues then re-verify or execute' :
|
||||
'READY: Proceed to Skill(skill="workflow-execute")'}
|
||||
|
||||
Re-verify: \`/workflow:plan-verify --session ${session_id}\`
|
||||
Re-verify: \`/workflow-plan-verify --session ${session_id}\`
|
||||
Execute: \`Skill(skill="workflow-execute", args="--resume-session=${session_id}")\`
|
||||
`
|
||||
|
||||
|
||||
@@ -215,7 +215,7 @@ const workflowConfig = {
|
||||
skillName: "workflow-plan", // kebab-case
|
||||
title: "Workflow Plan", // Human-readable
|
||||
description: "5-phase planning...", // One-line description
|
||||
triggers: ["workflow:plan"], // Trigger phrases
|
||||
triggers: ["workflow-plan"], // Trigger phrases
|
||||
allowedTools: ["Task", "AskUserQuestion", "TodoWrite", "Read", "Write", "Edit", "Bash", "Glob", "Grep", "Skill"],
|
||||
|
||||
// Source information
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
name: workflow-tdd
|
||||
description: Unified TDD workflow skill combining 6-phase TDD planning with Red-Green-Refactor task chain generation, and 4-phase TDD verification with compliance reporting. Triggers on "workflow:tdd-plan", "workflow:tdd-verify".
|
||||
name: workflow-tdd-plan-plan
|
||||
description: Unified TDD workflow skill combining 6-phase TDD planning with Red-Green-Refactor task chain generation, and 4-phase TDD verification with compliance reporting. Triggers on "workflow-tdd-plan", "workflow-tdd-verify".
|
||||
allowed-tools: Skill, Task, AskUserQuestion, TaskCreate, TaskUpdate, TaskList, Read, Write, Edit, Bash, Glob, Grep
|
||||
---
|
||||
|
||||
@@ -89,8 +89,8 @@ const mode = detectMode(args)
|
||||
|
||||
function detectMode(args) {
|
||||
// Skill trigger determines mode
|
||||
if (skillName === 'workflow:tdd-verify') return 'verify'
|
||||
return 'plan' // default: workflow:tdd-plan
|
||||
if (skillName === 'workflow-tdd-verify') return 'verify'
|
||||
return 'plan' // default: workflow-tdd-plan
|
||||
}
|
||||
```
|
||||
|
||||
@@ -496,7 +496,7 @@ Similar to workflow-plan, a `planning-notes.md` can accumulate context across ph
|
||||
- `phases/07-tdd-verify.md` - Phase 7: Test coverage and cycle analysis (inline)
|
||||
|
||||
**Follow-up Skills**:
|
||||
- `workflow-tdd` skill (tdd-verify phase) - Verify TDD compliance (can also invoke via verify mode)
|
||||
- `workflow-tdd-plan` skill (tdd-verify phase) - Verify TDD compliance (can also invoke via verify mode)
|
||||
- `workflow-plan` skill (plan-verify phase) - Verify plan quality and dependencies
|
||||
- Display session status inline - Review TDD task breakdown
|
||||
- `Skill(skill="workflow-execute")` - Begin TDD implementation
|
||||
@@ -50,7 +50,7 @@ process_dir = session_dir/.process
|
||||
# Check task files exist
|
||||
task_files = Glob(task_dir/*.json)
|
||||
IF task_files.count == 0:
|
||||
ERROR: "No task JSON files found. Run /workflow:tdd-plan first"
|
||||
ERROR: "No task JSON files found. Run /workflow-tdd-plan first"
|
||||
EXIT
|
||||
|
||||
# Check summaries exist (optional but recommended for full analysis)
|
||||
@@ -596,7 +596,7 @@ Next: Review full report for detailed findings
|
||||
|
||||
| Error | Cause | Resolution |
|
||||
|-------|-------|------------|
|
||||
| Task files missing | Incomplete planning | Run /workflow:tdd-plan first |
|
||||
| Task files missing | Incomplete planning | Run /workflow-tdd-plan first |
|
||||
| Invalid JSON | Corrupted task files | Regenerate tasks |
|
||||
| Missing summaries | Tasks not executed | Execute tasks before verify |
|
||||
|
||||
@@ -632,4 +632,4 @@ Next: Review full report for detailed findings
|
||||
| PROCEED_WITH_CAVEATS | `workflow-execute` skill | Start with noted caveats |
|
||||
| REQUIRE_FIXES | Review report, refine tasks | Address issues before proceed |
|
||||
| BLOCK_MERGE | `workflow-plan` skill (replan phase) | Significant restructuring needed |
|
||||
| After implementation | Re-run `workflow-tdd` skill (tdd-verify phase) | Verify post-execution compliance |
|
||||
| After implementation | Re-run `workflow-tdd-plan` skill (tdd-verify phase) | Verify post-execution compliance |
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
name: workflow-test-fix
|
||||
description: Unified test-fix pipeline combining test generation (session, context, analysis, task gen) with iterative test-cycle execution (adaptive strategy, progressive testing, CLI fallback). Triggers on "workflow:test-fix-gen", "workflow:test-cycle-execute", "test fix workflow".
|
||||
description: Unified test-fix pipeline combining test generation (session, context, analysis, task gen) with iterative test-cycle execution (adaptive strategy, progressive testing, CLI fallback). Triggers on "workflow-test-fix", "workflow-test-fix", "test fix workflow".
|
||||
allowed-tools: Skill, Task, AskUserQuestion, TaskCreate, TaskUpdate, TaskList, Read, Write, Edit, Bash, Glob, Grep
|
||||
---
|
||||
|
||||
@@ -60,8 +60,8 @@ Task Pipeline (generated in Phase 4, executed in Phase 5):
|
||||
|
||||
Full pipeline and execute-only modes are triggered by skill name routing (see Mode Detection). Workflow preferences (auto mode) are collected interactively via AskUserQuestion before dispatching to phases.
|
||||
|
||||
**Full pipeline** (workflow:test-fix-gen): Task description or session ID as arguments → interactive preference collection → generate + execute pipeline
|
||||
**Execute only** (workflow:test-cycle-execute): Auto-discovers active session → interactive preference collection → execution loop
|
||||
**Full pipeline** (workflow-test-fix): Task description or session ID as arguments → interactive preference collection → generate + execute pipeline
|
||||
**Execute only** (workflow-test-fix): Auto-discovers active session → interactive preference collection → execution loop
|
||||
|
||||
## Interactive Preference Collection
|
||||
|
||||
@@ -109,8 +109,8 @@ Multi-phase test-fix pipeline (Phase 1-5) spans long conversations, especially P
|
||||
|
||||
```
|
||||
Entry Point Detection:
|
||||
├─ /workflow:test-fix-gen → Full Pipeline (Phase 1→2→3→4→Summary→5)
|
||||
└─ /workflow:test-cycle-execute → Execution Only (Phase 5)
|
||||
├─ /workflow-test-fix → Full Pipeline (Phase 1→2→3→4→Summary→5)
|
||||
└─ /workflow-test-fix → Execution Only (Phase 5)
|
||||
|
||||
Phase 1: Session Start (session-start)
|
||||
└─ Ref: phases/01-session-start.md
|
||||
|
||||
@@ -19,13 +19,13 @@ Execute test-fix workflow with dynamic task generation and iterative fix cycles
|
||||
|
||||
```bash
|
||||
# Execute test-fix workflow (auto-discovers active session)
|
||||
/workflow:test-cycle-execute
|
||||
/workflow-test-fix
|
||||
|
||||
# Resume interrupted session
|
||||
/workflow:test-cycle-execute --resume-session="WFS-test-user-auth"
|
||||
/workflow-test-fix --resume-session="WFS-test-user-auth"
|
||||
|
||||
# Custom iteration limit (default: 10)
|
||||
/workflow:test-cycle-execute --max-iterations=15
|
||||
/workflow-test-fix --max-iterations=15
|
||||
```
|
||||
|
||||
**Quality Gate**: Test pass rate >= 95% (criticality-aware) or 100%
|
||||
@@ -60,7 +60,7 @@ Load session, tasks, and iteration state.
|
||||
|
||||
**For full-pipeline entry (from Phase 1-4)**: Use `testSessionId` passed from Phase 4.
|
||||
|
||||
**For direct entry (/workflow:test-cycle-execute)**:
|
||||
**For direct entry (/workflow-test-fix)**:
|
||||
- `--resume-session="WFS-xxx"` → Use specified session
|
||||
- No args → Auto-discover active test session (find `.workflow/active/WFS-test-*`)
|
||||
|
||||
|
||||
896
.claude/skills/workflow-wave-plan/SKILL.md
Normal file
896
.claude/skills/workflow-wave-plan/SKILL.md
Normal file
@@ -0,0 +1,896 @@
|
||||
---
|
||||
name: workflow-wave-plan
|
||||
description: CSV Wave planning and execution - explore via wave, resolve conflicts, execute from CSV with linked exploration context. Triggers on "workflow:wave-plan".
|
||||
argument-hint: "<task description> [--yes|-y] [--concurrency|-c N]"
|
||||
allowed-tools: Task, AskUserQuestion, Read, Write, Edit, Bash, Glob, Grep
|
||||
---
|
||||
|
||||
# Workflow Wave Plan
|
||||
|
||||
CSV Wave-based planning and execution. Uses structured CSV state for both exploration and execution, with cross-phase context propagation via `context_from` linking.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
Requirement
|
||||
↓
|
||||
┌─ Phase 1: Decompose ─────────────────────┐
|
||||
│ Analyze requirement → explore.csv │
|
||||
│ (1 row per exploration angle) │
|
||||
└────────────────────┬──────────────────────┘
|
||||
↓
|
||||
┌─ Phase 2: Wave Explore ──────────────────┐
|
||||
│ Wave loop: spawn Explore agents │
|
||||
│ → findings/key_files → explore.csv │
|
||||
└────────────────────┬──────────────────────┘
|
||||
↓
|
||||
┌─ Phase 3: Synthesize & Plan ─────────────┐
|
||||
│ Read explore findings → cross-reference │
|
||||
│ → resolve conflicts → tasks.csv │
|
||||
│ (context_from links to E* explore rows) │
|
||||
└────────────────────┬──────────────────────┘
|
||||
↓
|
||||
┌─ Phase 4: Wave Execute ──────────────────┐
|
||||
│ Wave loop: build prev_context from CSV │
|
||||
│ → spawn code-developer agents per wave │
|
||||
│ → results → tasks.csv │
|
||||
└────────────────────┬──────────────────────┘
|
||||
↓
|
||||
┌─ Phase 5: Aggregate ─────────────────────┐
|
||||
│ results.csv + context.md + summary │
|
||||
└───────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Context Flow
|
||||
|
||||
```
|
||||
explore.csv tasks.csv
|
||||
┌──────────┐ ┌──────────┐
|
||||
│ E1: arch │──────────→│ T1: setup│ context_from: E1;E2
|
||||
│ findings │ │ prev_ctx │← E1+E2 findings
|
||||
├──────────┤ ├──────────┤
|
||||
│ E2: deps │──────────→│ T2: impl │ context_from: E1;T1
|
||||
│ findings │ │ prev_ctx │← E1+T1 findings
|
||||
├──────────┤ ├──────────┤
|
||||
│ E3: test │──┐ ┌───→│ T3: test │ context_from: E3;T2
|
||||
│ findings │ └───┘ │ prev_ctx │← E3+T2 findings
|
||||
└──────────┘ └──────────┘
|
||||
|
||||
Two context channels:
|
||||
1. Directed: context_from → prev_context (from CSV findings)
|
||||
2. Broadcast: discoveries.ndjson (append-only shared board)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## CSV Schemas
|
||||
|
||||
### explore.csv
|
||||
|
||||
| Column | Type | Set By | Description |
|
||||
|--------|------|--------|-------------|
|
||||
| `id` | string | Decomposer | E1, E2, ... |
|
||||
| `angle` | string | Decomposer | Exploration angle name |
|
||||
| `description` | string | Decomposer | What to explore from this angle |
|
||||
| `focus` | string | Decomposer | Keywords and focus areas |
|
||||
| `deps` | string | Decomposer | Semicolon-separated dep IDs (usually empty) |
|
||||
| `wave` | integer | Wave Engine | Wave number (usually 1) |
|
||||
| `status` | enum | Agent | pending / completed / failed |
|
||||
| `findings` | string | Agent | Discoveries (max 800 chars) |
|
||||
| `key_files` | string | Agent | Relevant files (semicolon-separated) |
|
||||
| `error` | string | Agent | Error message if failed |
|
||||
|
||||
### tasks.csv
|
||||
|
||||
| Column | Type | Set By | Description |
|
||||
|--------|------|--------|-------------|
|
||||
| `id` | string | Planner | T1, T2, ... |
|
||||
| `title` | string | Planner | Task title |
|
||||
| `description` | string | Planner | Self-contained task description — what to implement |
|
||||
| `test` | string | Planner | Test cases: what tests to write and how to verify (unit/integration/edge) |
|
||||
| `acceptance_criteria` | string | Planner | Measurable conditions that define "done" |
|
||||
| `scope` | string | Planner | Target file/directory glob — constrains agent write area, prevents cross-task file conflicts |
|
||||
| `hints` | string | Planner | Implementation tips + reference files. Format: `tips text \|\| file1;file2`. Either part is optional |
|
||||
| `execution_directives` | string | Planner | Execution constraints: commands to run for verification, tool restrictions |
|
||||
| `deps` | string | Planner | Dependency task IDs: T1;T2 |
|
||||
| `context_from` | string | Planner | Context source IDs: **E1;E2;T1** |
|
||||
| `wave` | integer | Wave Engine | Wave number (computed from deps) |
|
||||
| `status` | enum | Agent | pending / completed / failed / skipped |
|
||||
| `findings` | string | Agent | Execution findings (max 500 chars) |
|
||||
| `files_modified` | string | Agent | Files modified (semicolon-separated) |
|
||||
| `tests_passed` | boolean | Agent | Whether all defined test cases passed (true/false) |
|
||||
| `acceptance_met` | string | Agent | Summary of which acceptance criteria were met/unmet |
|
||||
| `error` | string | Agent | Error if failed |
|
||||
|
||||
**context_from prefix convention**: `E*` → explore.csv lookup, `T*` → tasks.csv lookup.
|
||||
|
||||
---
|
||||
|
||||
## Session Structure
|
||||
|
||||
```
|
||||
.workflow/.wave-plan/{session-id}/
|
||||
├── explore.csv # Exploration state
|
||||
├── tasks.csv # Execution state
|
||||
├── discoveries.ndjson # Shared discovery board
|
||||
├── explore-results/ # Detailed per-angle results
|
||||
│ ├── E1.json
|
||||
│ └── E2.json
|
||||
├── task-results/ # Detailed per-task results
|
||||
│ ├── T1.json
|
||||
│ └── T2.json
|
||||
├── results.csv # Final results export
|
||||
└── context.md # Full context summary
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Session Initialization
|
||||
|
||||
```javascript
|
||||
const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
|
||||
|
||||
// Parse flags
|
||||
const AUTO_YES = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
const concurrencyMatch = $ARGUMENTS.match(/(?:--concurrency|-c)\s+(\d+)/)
|
||||
const maxConcurrency = concurrencyMatch ? parseInt(concurrencyMatch[1]) : 4
|
||||
|
||||
const requirement = $ARGUMENTS
|
||||
.replace(/--yes|-y|--concurrency\s+\d+|-c\s+\d+/g, '')
|
||||
.trim()
|
||||
|
||||
const slug = requirement.toLowerCase()
|
||||
.replace(/[^a-z0-9\u4e00-\u9fa5]+/g, '-')
|
||||
.substring(0, 40)
|
||||
const dateStr = getUtc8ISOString().substring(0, 10).replace(/-/g, '')
|
||||
const sessionId = `wp-${slug}-${dateStr}`
|
||||
const sessionFolder = `.workflow/.wave-plan/${sessionId}`
|
||||
|
||||
Bash(`mkdir -p ${sessionFolder}/explore-results ${sessionFolder}/task-results`)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Decompose → explore.csv
|
||||
|
||||
### 1.1 Analyze Requirement
|
||||
|
||||
```javascript
|
||||
const complexity = analyzeComplexity(requirement)
|
||||
// Low: 1 angle | Medium: 2-3 angles | High: 3-4 angles
|
||||
|
||||
const ANGLE_PRESETS = {
|
||||
architecture: ['architecture', 'dependencies', 'integration-points', 'modularity'],
|
||||
security: ['security', 'auth-patterns', 'dataflow', 'validation'],
|
||||
performance: ['performance', 'bottlenecks', 'caching', 'data-access'],
|
||||
bugfix: ['error-handling', 'dataflow', 'state-management', 'edge-cases'],
|
||||
feature: ['patterns', 'integration-points', 'testing', 'dependencies']
|
||||
}
|
||||
|
||||
function selectAngles(text, count) {
|
||||
let preset = 'feature'
|
||||
if (/refactor|architect|restructure|modular/.test(text)) preset = 'architecture'
|
||||
else if (/security|auth|permission|access/.test(text)) preset = 'security'
|
||||
else if (/performance|slow|optimi|cache/.test(text)) preset = 'performance'
|
||||
else if (/fix|bug|error|broken/.test(text)) preset = 'bugfix'
|
||||
return ANGLE_PRESETS[preset].slice(0, count)
|
||||
}
|
||||
|
||||
const angleCount = complexity === 'High' ? 4 : complexity === 'Medium' ? 3 : 1
|
||||
const angles = selectAngles(requirement, angleCount)
|
||||
```
|
||||
|
||||
### 1.2 Generate explore.csv
|
||||
|
||||
```javascript
|
||||
const header = 'id,angle,description,focus,deps,wave,status,findings,key_files,error'
|
||||
const rows = angles.map((angle, i) => {
|
||||
const id = `E${i + 1}`
|
||||
const desc = `Explore codebase from ${angle} perspective for: ${requirement}`
|
||||
return `"${id}","${angle}","${escCSV(desc)}","${angle}","",1,"pending","","",""`
|
||||
})
|
||||
|
||||
Write(`${sessionFolder}/explore.csv`, [header, ...rows].join('\n'))
|
||||
```
|
||||
|
||||
All exploration rows default to wave 1 (independent parallel). If angle dependencies exist, compute waves.
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Wave Explore
|
||||
|
||||
Execute exploration waves using `Task(Explore)` agents.
|
||||
|
||||
### 2.1 Wave Loop
|
||||
|
||||
```javascript
|
||||
const exploreCSV = parseCSV(Read(`${sessionFolder}/explore.csv`))
|
||||
const maxExploreWave = Math.max(...exploreCSV.map(r => parseInt(r.wave)))
|
||||
|
||||
for (let wave = 1; wave <= maxExploreWave; wave++) {
|
||||
const waveRows = exploreCSV.filter(r =>
|
||||
parseInt(r.wave) === wave && r.status === 'pending'
|
||||
)
|
||||
if (waveRows.length === 0) continue
|
||||
|
||||
// Skip rows with failed dependencies
|
||||
const validRows = waveRows.filter(r => {
|
||||
if (!r.deps) return true
|
||||
return r.deps.split(';').filter(Boolean).every(depId => {
|
||||
const dep = exploreCSV.find(d => d.id === depId)
|
||||
return dep && dep.status === 'completed'
|
||||
})
|
||||
})
|
||||
|
||||
waveRows.filter(r => !validRows.includes(r)).forEach(r => {
|
||||
r.status = 'skipped'
|
||||
r.error = 'Dependency failed/skipped'
|
||||
})
|
||||
|
||||
// ★ Spawn ALL explore agents in SINGLE message → parallel execution
|
||||
const results = validRows.map(row =>
|
||||
Task({
|
||||
subagent_type: "Explore",
|
||||
run_in_background: false,
|
||||
description: `Explore: ${row.angle}`,
|
||||
prompt: buildExplorePrompt(row, requirement, sessionFolder)
|
||||
})
|
||||
)
|
||||
|
||||
// Collect results from JSON files → update explore.csv
|
||||
validRows.forEach((row, i) => {
|
||||
const resultPath = `${sessionFolder}/explore-results/${row.id}.json`
|
||||
if (fileExists(resultPath)) {
|
||||
const result = JSON.parse(Read(resultPath))
|
||||
row.status = result.status || 'completed'
|
||||
row.findings = truncate(result.findings, 800)
|
||||
row.key_files = Array.isArray(result.key_files)
|
||||
? result.key_files.join(';')
|
||||
: (result.key_files || '')
|
||||
row.error = result.error || ''
|
||||
} else {
|
||||
// Fallback: parse from agent output text
|
||||
row.status = 'completed'
|
||||
row.findings = truncate(results[i], 800)
|
||||
}
|
||||
})
|
||||
|
||||
writeCSV(`${sessionFolder}/explore.csv`, exploreCSV)
|
||||
}
|
||||
```
|
||||
|
||||
### 2.2 Explore Agent Prompt
|
||||
|
||||
```javascript
|
||||
function buildExplorePrompt(row, requirement, sessionFolder) {
|
||||
return `## Exploration: ${row.angle}
|
||||
|
||||
**Requirement**: ${requirement}
|
||||
**Focus**: ${row.focus}
|
||||
|
||||
### MANDATORY FIRST STEPS
|
||||
1. Read shared discoveries: ${sessionFolder}/discoveries.ndjson (if exists, skip if not)
|
||||
2. Read project context: .workflow/project-tech.json (if exists)
|
||||
|
||||
---
|
||||
|
||||
## Instructions
|
||||
Explore the codebase from the **${row.angle}** perspective:
|
||||
1. Discover relevant files, modules, and patterns
|
||||
2. Identify integration points and dependencies
|
||||
3. Note constraints, risks, and conventions
|
||||
4. Find existing patterns to follow
|
||||
5. Share discoveries: append findings to ${sessionFolder}/discoveries.ndjson
|
||||
|
||||
## Output
|
||||
Write findings to: ${sessionFolder}/explore-results/${row.id}.json
|
||||
|
||||
JSON format:
|
||||
{
|
||||
"status": "completed",
|
||||
"findings": "Concise summary of ${row.angle} discoveries (max 800 chars)",
|
||||
"key_files": ["relevant/file1.ts", "relevant/file2.ts"],
|
||||
"details": {
|
||||
"patterns": ["pattern descriptions"],
|
||||
"integration_points": [{"file": "path", "description": "..."}],
|
||||
"constraints": ["constraint descriptions"],
|
||||
"recommendations": ["recommendation descriptions"]
|
||||
}
|
||||
}
|
||||
|
||||
Also provide a 2-3 sentence summary.`
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: Synthesize & Plan → tasks.csv
|
||||
|
||||
Read exploration findings, cross-reference, resolve conflicts, generate execution tasks.
|
||||
|
||||
### 3.1 Load Explore Results
|
||||
|
||||
```javascript
|
||||
const exploreCSV = parseCSV(Read(`${sessionFolder}/explore.csv`))
|
||||
const completed = exploreCSV.filter(r => r.status === 'completed')
|
||||
|
||||
// Load detailed result JSONs where available
|
||||
const detailedResults = {}
|
||||
completed.forEach(r => {
|
||||
const path = `${sessionFolder}/explore-results/${r.id}.json`
|
||||
if (fileExists(path)) detailedResults[r.id] = JSON.parse(Read(path))
|
||||
})
|
||||
```
|
||||
|
||||
### 3.2 Conflict Resolution Protocol
|
||||
|
||||
Cross-reference findings across all exploration angles:
|
||||
|
||||
```javascript
|
||||
// 1. Identify common files referenced by multiple angles
|
||||
const fileRefs = {}
|
||||
completed.forEach(r => {
|
||||
r.key_files.split(';').filter(Boolean).forEach(f => {
|
||||
if (!fileRefs[f]) fileRefs[f] = []
|
||||
fileRefs[f].push({ angle: r.angle, id: r.id })
|
||||
})
|
||||
})
|
||||
const sharedFiles = Object.entries(fileRefs).filter(([_, refs]) => refs.length > 1)
|
||||
|
||||
// 2. Detect conflicting recommendations
|
||||
// Compare recommendations from different angles for same file/module
|
||||
// Flag contradictions (angle A says "refactor X" vs angle B says "extend X")
|
||||
|
||||
// 3. Resolution rules:
|
||||
// a. Safety first — when approaches conflict, choose safer option
|
||||
// b. Consistency — prefer approaches aligned with existing patterns
|
||||
// c. Scope — prefer minimal-change approaches
|
||||
// d. Document — note all resolved conflicts for transparency
|
||||
|
||||
const synthesis = {
|
||||
sharedFiles,
|
||||
conflicts: detectConflicts(completed, detailedResults),
|
||||
resolutions: [],
|
||||
allKeyFiles: [...new Set(completed.flatMap(r => r.key_files.split(';').filter(Boolean)))]
|
||||
}
|
||||
```
|
||||
|
||||
### 3.3 Generate tasks.csv
|
||||
|
||||
Decompose into execution tasks based on synthesized exploration:
|
||||
|
||||
```javascript
|
||||
// Task decomposition rules:
|
||||
// 1. Group by feature/module (not per-file)
|
||||
// 2. Each description is self-contained (agent sees only its row + prev_context)
|
||||
// 3. deps only when task B requires task A's output
|
||||
// 4. context_from links relevant explore rows (E*) and predecessor tasks (T*)
|
||||
// 5. Prefer parallel (minimize deps)
|
||||
// 6. Use exploration findings: key_files → target files, patterns → references,
|
||||
// integration_points → dependency relationships, constraints → included in description
|
||||
// 7. Each task MUST include: test (how to verify), acceptance_criteria (what defines done)
|
||||
// 8. scope must not overlap between tasks in the same wave
|
||||
// 9. hints = implementation tips + reference files (format: tips || file1;file2)
|
||||
// 10. execution_directives = commands to run for verification, tool restrictions
|
||||
|
||||
const tasks = []
|
||||
// Claude decomposes requirement using exploration synthesis
|
||||
// Example:
|
||||
// tasks.push({ id: 'T1', title: 'Setup types', description: '...', test: 'Verify types compile', acceptance_criteria: 'All interfaces exported', scope: 'src/types/**', hints: 'Follow existing type patterns || src/types/index.ts', execution_directives: 'tsc --noEmit', deps: '', context_from: 'E1;E2' })
|
||||
// tasks.push({ id: 'T2', title: 'Implement core', description: '...', test: 'Unit test: core logic', acceptance_criteria: 'All functions pass tests', scope: 'src/core/**', hints: 'Reuse BaseService || src/services/Base.ts', execution_directives: 'npm test -- --grep core', deps: 'T1', context_from: 'E1;E2;T1' })
|
||||
// tasks.push({ id: 'T3', title: 'Add tests', description: '...', test: 'Integration test suite', acceptance_criteria: '>80% coverage', scope: 'tests/**', hints: 'Follow existing test patterns || tests/auth.test.ts', execution_directives: 'npm test', deps: 'T2', context_from: 'E3;T2' })
|
||||
|
||||
// Compute waves
|
||||
const waves = computeWaves(tasks)
|
||||
tasks.forEach(t => { t.wave = waves[t.id] })
|
||||
|
||||
// Write tasks.csv
|
||||
const header = 'id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,status,findings,files_modified,tests_passed,acceptance_met,error'
|
||||
const rows = tasks.map(t =>
|
||||
[t.id, escCSV(t.title), escCSV(t.description), escCSV(t.test), escCSV(t.acceptance_criteria), escCSV(t.scope), escCSV(t.hints), escCSV(t.execution_directives), t.deps, t.context_from, t.wave, 'pending', '', '', '', '', '']
|
||||
.map(v => `"${v}"`).join(',')
|
||||
)
|
||||
|
||||
Write(`${sessionFolder}/tasks.csv`, [header, ...rows].join('\n'))
|
||||
```
|
||||
|
||||
### 3.4 User Confirmation
|
||||
|
||||
```javascript
|
||||
if (!AUTO_YES) {
|
||||
const maxWave = Math.max(...tasks.map(t => t.wave))
|
||||
|
||||
console.log(`
|
||||
## Execution Plan
|
||||
|
||||
Explore: ${completed.length} angles completed
|
||||
Conflicts resolved: ${synthesis.conflicts.length}
|
||||
Tasks: ${tasks.length} across ${maxWave} waves
|
||||
|
||||
${Array.from({length: maxWave}, (_, i) => i + 1).map(w => {
|
||||
const wt = tasks.filter(t => t.wave === w)
|
||||
return `### Wave ${w} (${wt.length} tasks, concurrent)
|
||||
${wt.map(t => ` - [${t.id}] ${t.title} (from: ${t.context_from})`).join('\n')}`
|
||||
}).join('\n')}
|
||||
`)
|
||||
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: `Proceed with ${tasks.length} tasks across ${maxWave} waves?`,
|
||||
header: "Confirm",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Execute", description: "Proceed with wave execution" },
|
||||
{ label: "Modify", description: `Edit ${sessionFolder}/tasks.csv then re-run` },
|
||||
{ label: "Cancel", description: "Abort" }
|
||||
]
|
||||
}]
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: Wave Execute
|
||||
|
||||
Execute tasks from tasks.csv in wave order, with prev_context built from both explore.csv and tasks.csv.
|
||||
|
||||
### 4.1 Wave Loop
|
||||
|
||||
```javascript
|
||||
const exploreCSV = parseCSV(Read(`${sessionFolder}/explore.csv`))
|
||||
const failedIds = new Set()
|
||||
const skippedIds = new Set()
|
||||
|
||||
let tasksCSV = parseCSV(Read(`${sessionFolder}/tasks.csv`))
|
||||
const maxWave = Math.max(...tasksCSV.map(r => parseInt(r.wave)))
|
||||
|
||||
for (let wave = 1; wave <= maxWave; wave++) {
|
||||
// Re-read master CSV (updated by previous wave)
|
||||
tasksCSV = parseCSV(Read(`${sessionFolder}/tasks.csv`))
|
||||
|
||||
const waveRows = tasksCSV.filter(r =>
|
||||
parseInt(r.wave) === wave && r.status === 'pending'
|
||||
)
|
||||
if (waveRows.length === 0) continue
|
||||
|
||||
// Skip on failed dependencies (cascade)
|
||||
const validRows = []
|
||||
for (const row of waveRows) {
|
||||
const deps = (row.deps || '').split(';').filter(Boolean)
|
||||
if (deps.some(d => failedIds.has(d) || skippedIds.has(d))) {
|
||||
skippedIds.add(row.id)
|
||||
row.status = 'skipped'
|
||||
row.error = 'Dependency failed/skipped'
|
||||
continue
|
||||
}
|
||||
validRows.push(row)
|
||||
}
|
||||
|
||||
if (validRows.length === 0) {
|
||||
writeCSV(`${sessionFolder}/tasks.csv`, tasksCSV)
|
||||
continue
|
||||
}
|
||||
|
||||
// Build prev_context for each row from explore.csv + tasks.csv
|
||||
validRows.forEach(row => {
|
||||
row._prev_context = buildPrevContext(row.context_from, exploreCSV, tasksCSV)
|
||||
})
|
||||
|
||||
// ★ Spawn ALL task agents in SINGLE message → parallel execution
|
||||
const results = validRows.map(row =>
|
||||
Task({
|
||||
subagent_type: "code-developer",
|
||||
run_in_background: false,
|
||||
description: row.title,
|
||||
prompt: buildExecutePrompt(row, requirement, sessionFolder)
|
||||
})
|
||||
)
|
||||
|
||||
// Collect results → update tasks.csv
|
||||
validRows.forEach((row, i) => {
|
||||
const resultPath = `${sessionFolder}/task-results/${row.id}.json`
|
||||
if (fileExists(resultPath)) {
|
||||
const result = JSON.parse(Read(resultPath))
|
||||
row.status = result.status || 'completed'
|
||||
row.findings = truncate(result.findings, 500)
|
||||
row.files_modified = Array.isArray(result.files_modified)
|
||||
? result.files_modified.join(';')
|
||||
: (result.files_modified || '')
|
||||
row.tests_passed = String(result.tests_passed ?? '')
|
||||
row.acceptance_met = result.acceptance_met || ''
|
||||
row.error = result.error || ''
|
||||
} else {
|
||||
row.status = 'completed'
|
||||
row.findings = truncate(results[i], 500)
|
||||
}
|
||||
|
||||
if (row.status === 'failed') failedIds.add(row.id)
|
||||
delete row._prev_context // runtime-only, don't persist
|
||||
})
|
||||
|
||||
writeCSV(`${sessionFolder}/tasks.csv`, tasksCSV)
|
||||
}
|
||||
```
|
||||
|
||||
### 4.2 prev_context Builder
|
||||
|
||||
The key function linking exploration context to execution:
|
||||
|
||||
```javascript
|
||||
function buildPrevContext(contextFrom, exploreCSV, tasksCSV) {
|
||||
if (!contextFrom) return 'No previous context available'
|
||||
|
||||
const ids = contextFrom.split(';').filter(Boolean)
|
||||
const entries = []
|
||||
|
||||
ids.forEach(id => {
|
||||
if (id.startsWith('E')) {
|
||||
// ← Look up in explore.csv (cross-phase link)
|
||||
const row = exploreCSV.find(r => r.id === id)
|
||||
if (row && row.status === 'completed' && row.findings) {
|
||||
entries.push(`[Explore ${row.angle}] ${row.findings}`)
|
||||
if (row.key_files) entries.push(` Key files: ${row.key_files}`)
|
||||
}
|
||||
} else if (id.startsWith('T')) {
|
||||
// ← Look up in tasks.csv (same-phase link)
|
||||
const row = tasksCSV.find(r => r.id === id)
|
||||
if (row && row.status === 'completed' && row.findings) {
|
||||
entries.push(`[Task ${row.id}: ${row.title}] ${row.findings}`)
|
||||
if (row.files_modified) entries.push(` Modified: ${row.files_modified}`)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return entries.length > 0 ? entries.join('\n') : 'No previous context available'
|
||||
}
|
||||
```
|
||||
|
||||
### 4.3 Execute Agent Prompt
|
||||
|
||||
```javascript
|
||||
function buildExecutePrompt(row, requirement, sessionFolder) {
|
||||
return `## Task: ${row.title}
|
||||
|
||||
**ID**: ${row.id}
|
||||
**Goal**: ${requirement}
|
||||
**Scope**: ${row.scope || 'Not specified'}
|
||||
|
||||
## Description
|
||||
${row.description}
|
||||
|
||||
### Implementation Hints & Reference Files
|
||||
${row.hints || 'None'}
|
||||
|
||||
> Format: \`tips text || file1;file2\`. Read ALL reference files (after ||) before starting. Apply tips (before ||) as guidance.
|
||||
|
||||
### Execution Directives
|
||||
${row.execution_directives || 'None'}
|
||||
|
||||
> Commands to run for verification, tool restrictions, or environment requirements.
|
||||
|
||||
### Test Cases
|
||||
${row.test || 'None specified'}
|
||||
|
||||
### Acceptance Criteria
|
||||
${row.acceptance_criteria || 'None specified'}
|
||||
|
||||
## Previous Context (from exploration and predecessor tasks)
|
||||
${row._prev_context}
|
||||
|
||||
### MANDATORY FIRST STEPS
|
||||
1. Read shared discoveries: ${sessionFolder}/discoveries.ndjson (if exists, skip if not)
|
||||
2. Read project context: .workflow/project-tech.json (if exists)
|
||||
|
||||
---
|
||||
|
||||
## Execution Protocol
|
||||
|
||||
1. **Read references**: Parse hints — read all files listed after \`||\` to understand existing patterns
|
||||
2. **Read discoveries**: Load ${sessionFolder}/discoveries.ndjson for shared exploration findings
|
||||
3. **Use context**: Apply previous tasks' findings from prev_context above
|
||||
4. **Stay in scope**: ONLY create/modify files within ${row.scope || 'project'} — do NOT touch files outside this boundary
|
||||
5. **Apply hints**: Follow implementation tips from hints (before \`||\`)
|
||||
6. **Implement**: Execute changes described in the task description
|
||||
7. **Write tests**: Implement the test cases defined above
|
||||
8. **Run directives**: Execute commands from execution_directives to verify your work
|
||||
9. **Verify acceptance**: Ensure all acceptance criteria are met before reporting completion
|
||||
10. **Share discoveries**: Append exploration findings to shared board:
|
||||
\\\`\\\`\\\`bash
|
||||
echo '{"ts":"<ISO>","worker":"${row.id}","type":"<type>","data":{...}}' >> ${sessionFolder}/discoveries.ndjson
|
||||
\\\`\\\`\\\`
|
||||
11. **Report result**: Write JSON to output file
|
||||
|
||||
## Output
|
||||
Write results to: ${sessionFolder}/task-results/${row.id}.json
|
||||
|
||||
{
|
||||
"status": "completed" | "failed",
|
||||
"findings": "What was done (max 500 chars)",
|
||||
"files_modified": ["file1.ts", "file2.ts"],
|
||||
"tests_passed": true | false,
|
||||
"acceptance_met": "Summary of which acceptance criteria were met/unmet",
|
||||
"error": ""
|
||||
}
|
||||
|
||||
**IMPORTANT**: Set status to "completed" ONLY if:
|
||||
- All test cases pass
|
||||
- All acceptance criteria are met
|
||||
Otherwise set status to "failed" with details in error field.`
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Aggregate
|
||||
|
||||
### 5.1 Generate Results
|
||||
|
||||
```javascript
|
||||
const finalTasks = parseCSV(Read(`${sessionFolder}/tasks.csv`))
|
||||
const exploreCSV = parseCSV(Read(`${sessionFolder}/explore.csv`))
|
||||
|
||||
Bash(`cp "${sessionFolder}/tasks.csv" "${sessionFolder}/results.csv"`)
|
||||
|
||||
const completed = finalTasks.filter(r => r.status === 'completed')
|
||||
const failed = finalTasks.filter(r => r.status === 'failed')
|
||||
const skipped = finalTasks.filter(r => r.status === 'skipped')
|
||||
const maxWave = Math.max(...finalTasks.map(r => parseInt(r.wave)))
|
||||
```
|
||||
|
||||
### 5.2 Generate context.md
|
||||
|
||||
```javascript
|
||||
const contextMd = `# Wave Plan Results
|
||||
|
||||
**Requirement**: ${requirement}
|
||||
**Session**: ${sessionId}
|
||||
**Timestamp**: ${getUtc8ISOString()}
|
||||
|
||||
## Summary
|
||||
|
||||
| Metric | Count |
|
||||
|--------|-------|
|
||||
| Explore Angles | ${exploreCSV.length} |
|
||||
| Total Tasks | ${finalTasks.length} |
|
||||
| Completed | ${completed.length} |
|
||||
| Failed | ${failed.length} |
|
||||
| Skipped | ${skipped.length} |
|
||||
| Waves | ${maxWave} |
|
||||
|
||||
## Exploration Results
|
||||
|
||||
${exploreCSV.map(e => `### ${e.id}: ${e.angle} (${e.status})
|
||||
${e.findings || 'N/A'}
|
||||
Key files: ${e.key_files || 'none'}`).join('\n\n')}
|
||||
|
||||
## Task Results
|
||||
|
||||
${finalTasks.map(t => `### ${t.id}: ${t.title} (${t.status})
|
||||
|
||||
| Field | Value |
|
||||
|-------|-------|
|
||||
| Wave | ${t.wave} |
|
||||
| Scope | ${t.scope || 'none'} |
|
||||
| Dependencies | ${t.deps || 'none'} |
|
||||
| Context From | ${t.context_from || 'none'} |
|
||||
| Tests Passed | ${t.tests_passed || 'N/A'} |
|
||||
| Acceptance Met | ${t.acceptance_met || 'N/A'} |
|
||||
| Error | ${t.error || 'none'} |
|
||||
|
||||
**Description**: ${t.description}
|
||||
|
||||
**Test Cases**: ${t.test || 'N/A'}
|
||||
|
||||
**Acceptance Criteria**: ${t.acceptance_criteria || 'N/A'}
|
||||
|
||||
**Hints**: ${t.hints || 'N/A'}
|
||||
|
||||
**Execution Directives**: ${t.execution_directives || 'N/A'}
|
||||
|
||||
**Findings**: ${t.findings || 'N/A'}
|
||||
|
||||
**Files Modified**: ${t.files_modified || 'none'}`).join('\n\n---\n\n')}
|
||||
|
||||
## All Modified Files
|
||||
|
||||
${[...new Set(finalTasks.flatMap(t =>
|
||||
(t.files_modified || '').split(';')).filter(Boolean)
|
||||
)].map(f => '- ' + f).join('\n') || 'None'}
|
||||
`
|
||||
|
||||
Write(`${sessionFolder}/context.md`, contextMd)
|
||||
```
|
||||
|
||||
### 5.3 Summary & Next Steps
|
||||
|
||||
```javascript
|
||||
console.log(`
|
||||
## Wave Plan Complete
|
||||
|
||||
Session: ${sessionFolder}
|
||||
Explore: ${exploreCSV.filter(r => r.status === 'completed').length}/${exploreCSV.length} angles
|
||||
Tasks: ${completed.length}/${finalTasks.length} completed, ${failed.length} failed, ${skipped.length} skipped
|
||||
Waves: ${maxWave}
|
||||
|
||||
Files:
|
||||
- explore.csv — exploration state
|
||||
- tasks.csv — execution state
|
||||
- results.csv — final results
|
||||
- context.md — full report
|
||||
- discoveries.ndjson — shared discoveries
|
||||
`)
|
||||
|
||||
if (!AUTO_YES && failed.length > 0) {
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: `${failed.length} tasks failed. Next action?`,
|
||||
header: "Next Step",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Retry Failed", description: "Reset failed + skipped, re-execute Phase 4" },
|
||||
{ label: "View Report", description: "Display context.md" },
|
||||
{ label: "Done", description: "Complete session" }
|
||||
]
|
||||
}]
|
||||
})
|
||||
// If Retry: reset failed/skipped status to pending, re-run Phase 4
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Utilities
|
||||
|
||||
### Wave Computation (Kahn's BFS)
|
||||
|
||||
```javascript
|
||||
function computeWaves(tasks) {
|
||||
const inDegree = {}, adj = {}, depth = {}
|
||||
tasks.forEach(t => { inDegree[t.id] = 0; adj[t.id] = []; depth[t.id] = 1 })
|
||||
|
||||
tasks.forEach(t => {
|
||||
const deps = (t.deps || '').split(';').filter(Boolean)
|
||||
deps.forEach(dep => {
|
||||
if (adj[dep]) { adj[dep].push(t.id); inDegree[t.id]++ }
|
||||
})
|
||||
})
|
||||
|
||||
const queue = Object.keys(inDegree).filter(id => inDegree[id] === 0)
|
||||
queue.forEach(id => { depth[id] = 1 })
|
||||
|
||||
while (queue.length > 0) {
|
||||
const current = queue.shift()
|
||||
adj[current].forEach(next => {
|
||||
depth[next] = Math.max(depth[next], depth[current] + 1)
|
||||
inDegree[next]--
|
||||
if (inDegree[next] === 0) queue.push(next)
|
||||
})
|
||||
}
|
||||
|
||||
if (Object.values(inDegree).some(d => d > 0)) {
|
||||
throw new Error('Circular dependency detected')
|
||||
}
|
||||
|
||||
return depth // { taskId: waveNumber }
|
||||
}
|
||||
```
|
||||
|
||||
### CSV Helpers
|
||||
|
||||
```javascript
|
||||
function escCSV(s) { return String(s || '').replace(/"/g, '""') }
|
||||
|
||||
function parseCSV(content) {
|
||||
const lines = content.trim().split('\n')
|
||||
const header = lines[0].split(',').map(h => h.replace(/"/g, '').trim())
|
||||
return lines.slice(1).filter(l => l.trim()).map(line => {
|
||||
const values = parseCSVLine(line)
|
||||
const row = {}
|
||||
header.forEach((col, i) => { row[col] = (values[i] || '').replace(/^"|"$/g, '') })
|
||||
return row
|
||||
})
|
||||
}
|
||||
|
||||
function writeCSV(path, rows) {
|
||||
if (rows.length === 0) return
|
||||
// Exclude runtime-only columns (prefixed with _)
|
||||
const cols = Object.keys(rows[0]).filter(k => !k.startsWith('_'))
|
||||
const header = cols.join(',')
|
||||
const lines = rows.map(r =>
|
||||
cols.map(c => `"${escCSV(r[c])}"`).join(',')
|
||||
)
|
||||
Write(path, [header, ...lines].join('\n'))
|
||||
}
|
||||
|
||||
function truncate(s, max) {
|
||||
s = String(s || '')
|
||||
return s.length > max ? s.substring(0, max - 3) + '...' : s
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Discovery Board Protocol
|
||||
|
||||
Shared `discoveries.ndjson` — append-only NDJSON accessible to all agents across all phases.
|
||||
|
||||
**Lifecycle**:
|
||||
- Created by the first agent to write a discovery
|
||||
- Carries over across all phases and waves — never cleared
|
||||
- Agents append via `echo '...' >> discoveries.ndjson`
|
||||
|
||||
**Format**: NDJSON, each line is a self-contained JSON:
|
||||
|
||||
```jsonl
|
||||
{"ts":"...","worker":"E1","type":"code_pattern","data":{"name":"repo-pattern","file":"src/repos/Base.ts"}}
|
||||
{"ts":"...","worker":"T2","type":"integration_point","data":{"file":"src/auth/index.ts","exports":["auth"]}}
|
||||
```
|
||||
|
||||
**Discovery Types**:
|
||||
|
||||
| type | Dedup Key | Description |
|
||||
|------|-----------|-------------|
|
||||
| `code_pattern` | `data.name` | Reusable code pattern found |
|
||||
| `integration_point` | `data.file` | Module connection point |
|
||||
| `convention` | singleton | Code style conventions |
|
||||
| `blocker` | `data.issue` | Blocking issue encountered |
|
||||
| `tech_stack` | singleton | Project technology stack |
|
||||
| `test_command` | singleton | Test commands discovered |
|
||||
|
||||
**Protocol Rules**:
|
||||
1. Read board before own exploration → skip covered areas
|
||||
2. Write discoveries immediately via `echo >>` → don't batch
|
||||
3. Deduplicate — check existing entries; skip if same type + dedup key exists
|
||||
4. Append-only — never modify or delete existing lines
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error | Resolution |
|
||||
|-------|------------|
|
||||
| Explore agent failure | Mark as failed in explore.csv, exclude from planning |
|
||||
| All explores failed | Fallback: plan directly from requirement without exploration |
|
||||
| Execute agent failure | Mark as failed, skip dependents (cascade) |
|
||||
| Agent timeout | Mark as failed in results, continue with wave |
|
||||
| Circular dependency | Abort wave computation, report cycle |
|
||||
| CSV parse error | Validate CSV format before execution, show line number |
|
||||
| discoveries.ndjson corrupt | Ignore malformed lines, continue with valid entries |
|
||||
|
||||
---
|
||||
|
||||
## Core Rules
|
||||
|
||||
1. **Wave Order is Sacred**: Never execute wave N before wave N-1 completes
|
||||
2. **CSV is Source of Truth**: Read master CSV before each wave, write after
|
||||
3. **Context via CSV**: prev_context built from CSV findings, not from memory
|
||||
4. **E* ↔ T* Linking**: tasks.csv `context_from` references explore.csv rows for cross-phase context
|
||||
5. **Skip on Failure**: Failed dep → skip dependent (cascade)
|
||||
6. **Discovery Board Append-Only**: Never clear or modify discoveries.ndjson
|
||||
7. **Explore Before Execute**: Phase 2 completes before Phase 4 starts
|
||||
8. **DO NOT STOP**: Continuous execution until all waves complete or remaining skipped
|
||||
|
||||
---
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Exploration Angles**: 1 for simple, 3-4 for complex; avoid redundant angles
|
||||
2. **Context Linking**: Link every task to at least one explore row (E*) — exploration was done for a reason
|
||||
3. **Task Granularity**: 3-10 tasks optimal; too many = overhead, too few = no parallelism
|
||||
4. **Minimize Cross-Wave Deps**: More tasks in wave 1 = more parallelism
|
||||
5. **Specific Descriptions**: Agent sees only its CSV row + prev_context — make description self-contained
|
||||
6. **Non-Overlapping Scopes**: Same-wave tasks must not write to the same files
|
||||
7. **Context From ≠ Deps**: `deps` = execution order constraint; `context_from` = information flow
|
||||
|
||||
---
|
||||
|
||||
## Usage Recommendations
|
||||
|
||||
| Scenario | Recommended Approach |
|
||||
|----------|---------------------|
|
||||
| Complex feature (unclear architecture) | `workflow:wave-plan` — explore first, then plan |
|
||||
| Simple known-pattern task | `$csv-wave-pipeline` — skip exploration, direct execution |
|
||||
| Independent parallel tasks | `$csv-wave-pipeline -c 8` — single wave, max parallelism |
|
||||
| Diamond dependency (A→B,C→D) | `workflow:wave-plan` — 3 waves with context propagation |
|
||||
| Unknown codebase | `workflow:wave-plan` — exploration phase is essential |
|
||||
@@ -457,7 +457,7 @@ function buildCliCommand(task, cliTool, cliPrompt) {
|
||||
|
||||
**Auto-Check Workflow Context**:
|
||||
- Verify session context paths are provided in agent prompt
|
||||
- If missing, request session context from workflow:execute
|
||||
- If missing, request session context from workflow-execute
|
||||
- Never assume default paths without explicit session context
|
||||
|
||||
### 5. Problem-Solving
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
---
|
||||
description: "Interactive pre-flight checklist for workflow:plan. Validates environment, refines task to GOAL/SCOPE/CONTEXT, collects source docs, configures execution preferences, writes prep-package.json, then launches the workflow."
|
||||
description: "Interactive pre-flight checklist for workflow-plan. Validates environment, refines task to GOAL/SCOPE/CONTEXT, collects source docs, configures execution preferences, writes prep-package.json, then launches the workflow."
|
||||
argument-hint: TASK="<task description>" [EXEC_METHOD=agent|cli|hybrid] [CLI_TOOL=codex|gemini|qwen]
|
||||
---
|
||||
|
||||
# Pre-Flight Checklist for Workflow Plan
|
||||
|
||||
You are an interactive preparation assistant. Your job is to ensure everything is ready for an **unattended** `workflow:plan` run with `--yes` mode. Follow each step sequentially. **Ask the user questions when information is missing.** At the end, write `prep-package.json` and invoke the skill.
|
||||
You are an interactive preparation assistant. Your job is to ensure everything is ready for an **unattended** `workflow-plan` run with `--yes` mode. Follow each step sequentially. **Ask the user questions when information is missing.** At the end, write `prep-package.json` and invoke the skill.
|
||||
|
||||
---
|
||||
|
||||
@@ -112,7 +112,7 @@ Display detected sources:
|
||||
|
||||
### 2.1 Scoring
|
||||
|
||||
Score the user's TASK against 5 dimensions, mapped to workflow:plan's GOAL/SCOPE/CONTEXT format.
|
||||
Score the user's TASK against 5 dimensions, mapped to workflow-plan's GOAL/SCOPE/CONTEXT format.
|
||||
Each dimension scores 0-2 (0=missing, 1=vague, 2=clear). **Total minimum: 6/10 to proceed.**
|
||||
|
||||
| # | 维度 | 映射 | 评分标准 |
|
||||
@@ -165,7 +165,7 @@ For dimensions still at score 1 after Q&A, auto-enhance from codebase:
|
||||
|
||||
### 2.5 Assemble Structured Description
|
||||
|
||||
Map to workflow:plan's GOAL/SCOPE/CONTEXT format:
|
||||
Map to workflow-plan's GOAL/SCOPE/CONTEXT format:
|
||||
|
||||
```
|
||||
GOAL: {objective + success criteria}
|
||||
@@ -353,7 +353,7 @@ $workflow-plan-execute --yes --with-commit TASK="$TASK_STRUCTURED"
|
||||
|
||||
Print:
|
||||
```
|
||||
启动 workflow:plan (自动模式)...
|
||||
启动 workflow-plan (自动模式)...
|
||||
prep-package.json → Phase 1 自动加载并校验
|
||||
执行方式: hybrid (codex) + auto-commit
|
||||
```
|
||||
|
||||
@@ -800,7 +800,7 @@ Offer user follow-up actions based on brainstorming results.
|
||||
|
||||
| Option | Purpose | Action |
|
||||
|--------|---------|--------|
|
||||
| **创建实施计划** | Plan implementation of top idea | Launch `workflow:lite-plan` |
|
||||
| **创建实施计划** | Plan implementation of top idea | Launch `workflow-lite-plan` |
|
||||
| **创建Issue** | Track top ideas for later | Launch `issue:new` with ideas |
|
||||
| **深入分析** | Analyze top idea in detail | Launch `workflow:analyze-with-file` |
|
||||
| **导出分享** | Generate shareable report | Create formatted report document |
|
||||
|
||||
906
.codex/skills/csv-wave-pipeline/SKILL.md
Normal file
906
.codex/skills/csv-wave-pipeline/SKILL.md
Normal file
@@ -0,0 +1,906 @@
|
||||
---
|
||||
name: csv-wave-pipeline
|
||||
description: Requirement planning to wave-based CSV execution pipeline. Decomposes requirement into dependency-sorted CSV tasks, computes execution waves, runs wave-by-wave via spawn_agents_on_csv with cross-wave context propagation.
|
||||
argument-hint: "[-y|--yes] [-c|--concurrency N] [--continue] \"requirement description\""
|
||||
allowed-tools: spawn_agents_on_csv, Read, Write, Edit, Bash, Glob, Grep, AskUserQuestion
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-confirm task decomposition, skip interactive validation, use defaults.
|
||||
|
||||
# CSV Wave Pipeline
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
$csv-wave-pipeline "Implement user authentication with OAuth, JWT, and 2FA"
|
||||
$csv-wave-pipeline -c 4 "Refactor payment module with Stripe and PayPal"
|
||||
$csv-wave-pipeline -y "Build notification system with email and SMS"
|
||||
$csv-wave-pipeline --continue "auth-20260228"
|
||||
```
|
||||
|
||||
**Flags**:
|
||||
- `-y, --yes`: Skip all confirmations (auto mode)
|
||||
- `-c, --concurrency N`: Max concurrent agents within each wave (default: 4)
|
||||
- `--continue`: Resume existing session
|
||||
|
||||
**Output Directory**: `.workflow/.csv-wave/{session-id}/`
|
||||
**Core Output**: `tasks.csv` (master state) + `results.csv` (final) + `discoveries.ndjson` (shared exploration) + `context.md` (human-readable report)
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
Wave-based batch execution using `spawn_agents_on_csv` with **cross-wave context propagation**. Tasks are grouped into dependency waves; each wave executes concurrently, and its results feed into the next wave.
|
||||
|
||||
**Core workflow**: Decompose → Compute Waves → Execute Wave-by-Wave → Aggregate
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────┐
|
||||
│ CSV BATCH EXECUTION WORKFLOW │
|
||||
├─────────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ Phase 1: Requirement → CSV │
|
||||
│ ├─ Parse requirement into subtasks (3-10 tasks) │
|
||||
│ ├─ Identify dependencies (deps column) │
|
||||
│ ├─ Compute dependency waves (topological sort → depth grouping) │
|
||||
│ ├─ Generate tasks.csv with wave column │
|
||||
│ └─ User validates task breakdown (skip if -y) │
|
||||
│ │
|
||||
│ Phase 2: Wave Execution Engine │
|
||||
│ ├─ For each wave (1..N): │
|
||||
│ │ ├─ Build wave CSV (filter rows for this wave) │
|
||||
│ │ ├─ Inject previous wave findings into prev_context column │
|
||||
│ │ ├─ spawn_agents_on_csv(wave CSV) │
|
||||
│ │ ├─ Collect results, merge into master tasks.csv │
|
||||
│ │ └─ Check: any failed? → skip dependents or retry │
|
||||
│ └─ discoveries.ndjson shared across all waves (append-only) │
|
||||
│ │
|
||||
│ Phase 3: Results Aggregation │
|
||||
│ ├─ Export final results.csv │
|
||||
│ ├─ Generate context.md with all findings │
|
||||
│ ├─ Display summary: completed/failed/skipped per wave │
|
||||
│ └─ Offer: view results | retry failed | done │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## CSV Schema
|
||||
|
||||
### tasks.csv (Master State)
|
||||
|
||||
```csv
|
||||
id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,status,findings,files_modified,tests_passed,acceptance_met,error
|
||||
"1","Setup auth module","Create auth directory structure and base files","Verify directory exists and base files export expected interfaces","auth/ dir created; index.ts and types.ts export AuthProvider interface","src/auth/**","Follow monorepo module pattern || package.json;src/shared/types.ts","","","","1","","","","","",""
|
||||
"2","Implement OAuth","Add OAuth provider integration with Google and GitHub","Unit test: mock OAuth callback returns valid token; Integration test: verify redirect URL generation","OAuth login redirects to provider; callback returns JWT; supports Google and GitHub","src/auth/oauth/**","Use passport.js strategy pattern || src/auth/index.ts;docs/oauth-flow.md","Run npm test -- --grep oauth before completion","1","1","2","","","","","",""
|
||||
"3","Add JWT tokens","Implement JWT generation and validation","Unit test: sign/verify round-trip; Edge test: expired token returns 401","generateToken() returns valid JWT; verifyToken() rejects expired/tampered tokens","src/auth/jwt/**","Use jsonwebtoken library; Set default expiry 1h || src/config/auth.ts","Ensure tsc --noEmit passes","1","1","2","","","","","",""
|
||||
"4","Setup 2FA","Add TOTP-based 2FA with QR code generation","Unit test: TOTP verify with correct code; Test: QR data URL is valid","QR code generates scannable image; TOTP verification succeeds within time window","src/auth/2fa/**","Use speakeasy + qrcode libraries || src/auth/oauth/strategy.ts;src/auth/jwt/token.ts","Run full test suite: npm test","2;3","1;2;3","3","","","","","",""
|
||||
```
|
||||
|
||||
**Columns**:
|
||||
|
||||
| Column | Phase | Description |
|
||||
|--------|-------|-------------|
|
||||
| `id` | Input | Unique task identifier (string) |
|
||||
| `title` | Input | Short task title |
|
||||
| `description` | Input | Detailed task description — what to implement |
|
||||
| `test` | Input | Test cases: what tests to write and how to verify (unit/integration/edge) |
|
||||
| `acceptance_criteria` | Input | Acceptance criteria: measurable conditions that define "done" |
|
||||
| `scope` | Input | Target file/directory glob — constrains agent work area, prevents cross-task file conflicts |
|
||||
| `hints` | Input | Implementation tips + reference files. Format: `tips text \|\| file1;file2`. Before `\|\|` = how to implement; after `\|\|` = existing files to read before starting. Either part is optional |
|
||||
| `execution_directives` | Input | Execution constraints: commands to run for verification, tool restrictions, environment requirements |
|
||||
| `deps` | Input | Semicolon-separated dependency task IDs (empty = no deps) |
|
||||
| `context_from` | Input | Semicolon-separated task IDs whose findings this task needs |
|
||||
| `wave` | Computed | Wave number (computed by topological sort, 1-based) |
|
||||
| `status` | Output | `pending` → `completed` / `failed` / `skipped` |
|
||||
| `findings` | Output | Key discoveries or implementation notes (max 500 chars) |
|
||||
| `files_modified` | Output | Semicolon-separated file paths |
|
||||
| `tests_passed` | Output | Whether all defined test cases passed (true/false) |
|
||||
| `acceptance_met` | Output | Summary of which acceptance criteria were met/unmet |
|
||||
| `error` | Output | Error message if failed (empty if success) |
|
||||
|
||||
### Per-Wave CSV (Temporary)
|
||||
|
||||
Each wave generates a temporary `wave-{N}.csv` with an extra `prev_context` column:
|
||||
|
||||
```csv
|
||||
id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,prev_context
|
||||
"2","Implement OAuth","Add OAuth integration","Unit test: mock OAuth callback returns valid token","OAuth login redirects to provider; callback returns JWT","src/auth/oauth/**","Use passport.js strategy pattern || src/auth/index.ts;docs/oauth-flow.md","Run npm test -- --grep oauth","1","1","2","[Task 1] Created auth/ with index.ts and types.ts"
|
||||
"3","Add JWT tokens","Implement JWT","Unit test: sign/verify round-trip; Edge test: expired token returns 401","generateToken() returns valid JWT; verifyToken() rejects expired/tampered tokens","src/auth/jwt/**","Use jsonwebtoken library; Set default expiry 1h || src/config/auth.ts","Ensure tsc --noEmit passes","1","1","2","[Task 1] Created auth/ with index.ts and types.ts"
|
||||
```
|
||||
|
||||
The `prev_context` column is built from `context_from` by looking up completed tasks' `findings` in the master CSV.
|
||||
|
||||
---
|
||||
|
||||
## Output Artifacts
|
||||
|
||||
| File | Purpose | Lifecycle |
|
||||
|------|---------|-----------|
|
||||
| `tasks.csv` | Master state — all tasks with status/findings | Updated after each wave |
|
||||
| `wave-{N}.csv` | Per-wave input (temporary) | Created before wave, deleted after |
|
||||
| `results.csv` | Final export of all task results | Created in Phase 3 |
|
||||
| `discoveries.ndjson` | Shared exploration board across all agents | Append-only, carries across waves |
|
||||
| `context.md` | Human-readable execution report | Created in Phase 3 |
|
||||
|
||||
---
|
||||
|
||||
## Session Structure
|
||||
|
||||
```
|
||||
.workflow/.csv-wave/{session-id}/
|
||||
├── tasks.csv # Master state (updated per wave)
|
||||
├── results.csv # Final results export
|
||||
├── discoveries.ndjson # Shared discovery board (all agents)
|
||||
├── context.md # Human-readable report
|
||||
└── wave-{N}.csv # Temporary per-wave input (cleaned up)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation
|
||||
|
||||
### Session Initialization
|
||||
|
||||
```javascript
|
||||
const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
|
||||
|
||||
// Parse flags
|
||||
const AUTO_YES = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
const continueMode = $ARGUMENTS.includes('--continue')
|
||||
const concurrencyMatch = $ARGUMENTS.match(/(?:--concurrency|-c)\s+(\d+)/)
|
||||
const maxConcurrency = concurrencyMatch ? parseInt(concurrencyMatch[1]) : 4
|
||||
|
||||
// Clean requirement text (remove flags)
|
||||
const requirement = $ARGUMENTS
|
||||
.replace(/--yes|-y|--continue|--concurrency\s+\d+|-c\s+\d+/g, '')
|
||||
.trim()
|
||||
|
||||
const slug = requirement.toLowerCase()
|
||||
.replace(/[^a-z0-9\u4e00-\u9fa5]+/g, '-')
|
||||
.substring(0, 40)
|
||||
const dateStr = getUtc8ISOString().substring(0, 10).replace(/-/g, '')
|
||||
const sessionId = `cwp-${slug}-${dateStr}`
|
||||
const sessionFolder = `.workflow/.csv-wave/${sessionId}`
|
||||
|
||||
// Continue mode: find existing session
|
||||
if (continueMode) {
|
||||
const existing = Bash(`ls -t .workflow/.csv-wave/ 2>/dev/null | head -1`).trim()
|
||||
if (existing) {
|
||||
sessionId = existing
|
||||
sessionFolder = `.workflow/.csv-wave/${sessionId}`
|
||||
// Read existing tasks.csv, find incomplete waves, resume from there
|
||||
const existingCsv = Read(`${sessionFolder}/tasks.csv`)
|
||||
// → jump to Phase 2 with remaining waves
|
||||
}
|
||||
}
|
||||
|
||||
Bash(`mkdir -p ${sessionFolder}`)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Phase 1: Requirement → CSV
|
||||
|
||||
**Objective**: Decompose requirement into tasks, compute dependency waves, generate tasks.csv.
|
||||
|
||||
**Steps**:
|
||||
|
||||
1. **Decompose Requirement**
|
||||
|
||||
```javascript
|
||||
// Use ccw cli to decompose requirement into subtasks
|
||||
Bash({
|
||||
command: `ccw cli -p "PURPOSE: Decompose requirement into 3-10 atomic tasks for batch agent execution. Each task must include implementation description, test cases, and acceptance criteria.
|
||||
TASK:
|
||||
• Parse requirement into independent subtasks
|
||||
• Identify dependencies between tasks (which must complete before others)
|
||||
• Identify context flow (which tasks need previous tasks' findings)
|
||||
• For each task, define concrete test cases (unit/integration/edge)
|
||||
• For each task, define measurable acceptance criteria (what defines 'done')
|
||||
• Each task must be executable by a single agent with file read/write access
|
||||
MODE: analysis
|
||||
CONTEXT: @**/*
|
||||
EXPECTED: JSON object with tasks array. Each task: {id: string, title: string, description: string, test: string, acceptance_criteria: string, scope: string, hints: string, execution_directives: string, deps: string[], context_from: string[]}.
|
||||
- description: what to implement (specific enough for an agent to execute independently)
|
||||
- test: what tests to write and how to verify (e.g. 'Unit test: X returns Y; Edge test: handles Z')
|
||||
- acceptance_criteria: measurable conditions that define done (e.g. 'API returns 200; token expires after 1h')
|
||||
- scope: target file/directory glob (e.g. 'src/auth/**') — tasks in same wave MUST have non-overlapping scopes
|
||||
- hints: implementation tips + reference files, format '<tips> || <ref_file1>;<ref_file2>' (e.g. 'Use strategy pattern || src/base/Strategy.ts;docs/design.md')
|
||||
- execution_directives: commands to run for verification or tool constraints (e.g. 'Run npm test --bail; Ensure tsc passes')
|
||||
- deps: task IDs that must complete first
|
||||
- context_from: task IDs whose findings are needed
|
||||
CONSTRAINTS: 3-10 tasks | Each task is atomic | No circular deps | test and acceptance_criteria must be concrete and verifiable | Same-wave tasks must have non-overlapping scopes
|
||||
|
||||
REQUIREMENT: ${requirement}" --tool gemini --mode analysis --rule planning-breakdown-task-steps`,
|
||||
run_in_background: true
|
||||
})
|
||||
// Wait for CLI completion via hook callback
|
||||
// Parse JSON from CLI output → decomposedTasks[]
|
||||
```
|
||||
|
||||
2. **Compute Waves** (Topological Sort → Depth Grouping)
|
||||
|
||||
```javascript
|
||||
function computeWaves(tasks) {
|
||||
// Build adjacency: task.deps → predecessors
|
||||
const taskMap = new Map(tasks.map(t => [t.id, t]))
|
||||
const inDegree = new Map(tasks.map(t => [t.id, 0]))
|
||||
const adjList = new Map(tasks.map(t => [t.id, []]))
|
||||
|
||||
for (const task of tasks) {
|
||||
for (const dep of task.deps) {
|
||||
if (taskMap.has(dep)) {
|
||||
adjList.get(dep).push(task.id)
|
||||
inDegree.set(task.id, inDegree.get(task.id) + 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BFS-based topological sort with depth tracking
|
||||
const queue = [] // [taskId, depth]
|
||||
const waveAssignment = new Map()
|
||||
|
||||
for (const [id, deg] of inDegree) {
|
||||
if (deg === 0) {
|
||||
queue.push([id, 1])
|
||||
waveAssignment.set(id, 1)
|
||||
}
|
||||
}
|
||||
|
||||
let maxWave = 1
|
||||
let idx = 0
|
||||
while (idx < queue.length) {
|
||||
const [current, depth] = queue[idx++]
|
||||
for (const next of adjList.get(current)) {
|
||||
const newDeg = inDegree.get(next) - 1
|
||||
inDegree.set(next, newDeg)
|
||||
const nextDepth = Math.max(waveAssignment.get(next) || 0, depth + 1)
|
||||
waveAssignment.set(next, nextDepth)
|
||||
if (newDeg === 0) {
|
||||
queue.push([next, nextDepth])
|
||||
maxWave = Math.max(maxWave, nextDepth)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Detect cycles: any task without wave assignment
|
||||
for (const task of tasks) {
|
||||
if (!waveAssignment.has(task.id)) {
|
||||
throw new Error(`Circular dependency detected involving task ${task.id}`)
|
||||
}
|
||||
}
|
||||
|
||||
return { waveAssignment, maxWave }
|
||||
}
|
||||
|
||||
const { waveAssignment, maxWave } = computeWaves(decomposedTasks)
|
||||
```
|
||||
|
||||
3. **Generate tasks.csv**
|
||||
|
||||
```javascript
|
||||
const header = 'id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,status,findings,files_modified,tests_passed,acceptance_met,error'
|
||||
const rows = decomposedTasks.map(task => {
|
||||
const wave = waveAssignment.get(task.id)
|
||||
return [
|
||||
task.id,
|
||||
csvEscape(task.title),
|
||||
csvEscape(task.description),
|
||||
csvEscape(task.test),
|
||||
csvEscape(task.acceptance_criteria),
|
||||
csvEscape(task.scope),
|
||||
csvEscape(task.hints),
|
||||
csvEscape(task.execution_directives),
|
||||
task.deps.join(';'),
|
||||
task.context_from.join(';'),
|
||||
wave,
|
||||
'pending', // status
|
||||
'', // findings
|
||||
'', // files_modified
|
||||
'', // tests_passed
|
||||
'', // acceptance_met
|
||||
'' // error
|
||||
].map(cell => `"${String(cell).replace(/"/g, '""')}"`).join(',')
|
||||
})
|
||||
|
||||
Write(`${sessionFolder}/tasks.csv`, [header, ...rows].join('\n'))
|
||||
```
|
||||
|
||||
4. **User Validation** (skip if AUTO_YES)
|
||||
|
||||
```javascript
|
||||
if (!AUTO_YES) {
|
||||
// Display task breakdown with wave assignment
|
||||
console.log(`\n## Task Breakdown (${decomposedTasks.length} tasks, ${maxWave} waves)\n`)
|
||||
for (let w = 1; w <= maxWave; w++) {
|
||||
const waveTasks = decomposedTasks.filter(t => waveAssignment.get(t.id) === w)
|
||||
console.log(`### Wave ${w} (${waveTasks.length} tasks, concurrent)`)
|
||||
waveTasks.forEach(t => console.log(` - [${t.id}] ${t.title}`))
|
||||
}
|
||||
|
||||
const answer = AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Approve task breakdown?",
|
||||
header: "Validation",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Approve", description: "Proceed with wave execution" },
|
||||
{ label: "Modify", description: `Edit ${sessionFolder}/tasks.csv manually, then --continue` },
|
||||
{ label: "Cancel", description: "Abort" }
|
||||
]
|
||||
}]
|
||||
}) // BLOCKS
|
||||
|
||||
if (answer.Validation === "Modify") {
|
||||
console.log(`Edit: ${sessionFolder}/tasks.csv\nResume: $csv-wave-pipeline --continue`)
|
||||
return
|
||||
} else if (answer.Validation === "Cancel") {
|
||||
return
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Success Criteria**:
|
||||
- tasks.csv created with valid schema and wave assignments
|
||||
- No circular dependencies
|
||||
- User approved (or AUTO_YES)
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Wave Execution Engine
|
||||
|
||||
**Objective**: Execute tasks wave-by-wave via `spawn_agents_on_csv`. Each wave sees previous waves' results.
|
||||
|
||||
**Steps**:
|
||||
|
||||
1. **Wave Loop**
|
||||
|
||||
```javascript
|
||||
const failedIds = new Set()
|
||||
const skippedIds = new Set()
|
||||
|
||||
for (let wave = 1; wave <= maxWave; wave++) {
|
||||
console.log(`\n## Wave ${wave}/${maxWave}\n`)
|
||||
|
||||
// 1. Read current master CSV
|
||||
const masterCsv = parseCsv(Read(`${sessionFolder}/tasks.csv`))
|
||||
|
||||
// 2. Filter tasks for this wave
|
||||
const waveTasks = masterCsv.filter(row => parseInt(row.wave) === wave)
|
||||
|
||||
// 3. Skip tasks whose deps failed
|
||||
const executableTasks = []
|
||||
for (const task of waveTasks) {
|
||||
const deps = task.deps.split(';').filter(Boolean)
|
||||
if (deps.some(d => failedIds.has(d) || skippedIds.has(d))) {
|
||||
skippedIds.add(task.id)
|
||||
// Update master CSV: mark as skipped
|
||||
updateMasterCsvRow(sessionFolder, task.id, {
|
||||
status: 'skipped',
|
||||
error: 'Dependency failed or skipped'
|
||||
})
|
||||
console.log(` [${task.id}] ${task.title} → SKIPPED (dependency failed)`)
|
||||
continue
|
||||
}
|
||||
executableTasks.push(task)
|
||||
}
|
||||
|
||||
if (executableTasks.length === 0) {
|
||||
console.log(` No executable tasks in wave ${wave}`)
|
||||
continue
|
||||
}
|
||||
|
||||
// 4. Build prev_context for each task
|
||||
for (const task of executableTasks) {
|
||||
const contextIds = task.context_from.split(';').filter(Boolean)
|
||||
const prevFindings = contextIds
|
||||
.map(id => {
|
||||
const prevRow = masterCsv.find(r => r.id === id)
|
||||
if (prevRow && prevRow.status === 'completed' && prevRow.findings) {
|
||||
return `[Task ${id}: ${prevRow.title}] ${prevRow.findings}`
|
||||
}
|
||||
return null
|
||||
})
|
||||
.filter(Boolean)
|
||||
.join('\n')
|
||||
task.prev_context = prevFindings || 'No previous context available'
|
||||
}
|
||||
|
||||
// 5. Write wave CSV
|
||||
const waveHeader = 'id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,prev_context'
|
||||
const waveRows = executableTasks.map(t =>
|
||||
[t.id, t.title, t.description, t.test, t.acceptance_criteria, t.scope, t.hints, t.execution_directives, t.deps, t.context_from, t.wave, t.prev_context]
|
||||
.map(cell => `"${String(cell).replace(/"/g, '""')}"`)
|
||||
.join(',')
|
||||
)
|
||||
Write(`${sessionFolder}/wave-${wave}.csv`, [waveHeader, ...waveRows].join('\n'))
|
||||
|
||||
// 6. Execute wave
|
||||
console.log(` Executing ${executableTasks.length} tasks (concurrency: ${maxConcurrency})...`)
|
||||
|
||||
const waveResult = spawn_agents_on_csv({
|
||||
csv_path: `${sessionFolder}/wave-${wave}.csv`,
|
||||
id_column: "id",
|
||||
instruction: buildInstructionTemplate(sessionFolder, wave),
|
||||
max_concurrency: maxConcurrency,
|
||||
max_runtime_seconds: 600,
|
||||
output_csv_path: `${sessionFolder}/wave-${wave}-results.csv`,
|
||||
output_schema: {
|
||||
type: "object",
|
||||
properties: {
|
||||
id: { type: "string" },
|
||||
status: { type: "string", enum: ["completed", "failed"] },
|
||||
findings: { type: "string" },
|
||||
files_modified: { type: "array", items: { type: "string" } },
|
||||
tests_passed: { type: "boolean" },
|
||||
acceptance_met: { type: "string" },
|
||||
error: { type: "string" }
|
||||
},
|
||||
required: ["id", "status", "findings", "tests_passed"]
|
||||
}
|
||||
})
|
||||
// ↑ Blocks until all agents in this wave complete
|
||||
|
||||
// 7. Merge results into master CSV
|
||||
const waveResults = parseCsv(Read(`${sessionFolder}/wave-${wave}-results.csv`))
|
||||
for (const result of waveResults) {
|
||||
updateMasterCsvRow(sessionFolder, result.id, {
|
||||
status: result.status,
|
||||
findings: result.findings || '',
|
||||
files_modified: (result.files_modified || []).join(';'),
|
||||
tests_passed: String(result.tests_passed ?? ''),
|
||||
acceptance_met: result.acceptance_met || '',
|
||||
error: result.error || ''
|
||||
})
|
||||
|
||||
if (result.status === 'failed') {
|
||||
failedIds.add(result.id)
|
||||
console.log(` [${result.id}] ${result.title} → FAILED: ${result.error}`)
|
||||
} else {
|
||||
console.log(` [${result.id}] ${result.title} → COMPLETED`)
|
||||
}
|
||||
}
|
||||
|
||||
// 8. Cleanup temporary wave CSV
|
||||
Bash(`rm -f "${sessionFolder}/wave-${wave}.csv"`)
|
||||
|
||||
console.log(` Wave ${wave} done: ${waveResults.filter(r => r.status === 'completed').length} completed, ${waveResults.filter(r => r.status === 'failed').length} failed`)
|
||||
}
|
||||
```
|
||||
|
||||
2. **Instruction Template Builder**
|
||||
|
||||
```javascript
|
||||
function buildInstructionTemplate(sessionFolder, wave) {
|
||||
return `
|
||||
## TASK ASSIGNMENT
|
||||
|
||||
### MANDATORY FIRST STEPS
|
||||
1. Read shared discoveries: ${sessionFolder}/discoveries.ndjson (if exists, skip if not)
|
||||
2. Read project context: .workflow/project-tech.json (if exists)
|
||||
|
||||
---
|
||||
|
||||
## Your Task
|
||||
|
||||
**Task ID**: {id}
|
||||
**Title**: {title}
|
||||
**Description**: {description}
|
||||
**Scope**: {scope}
|
||||
|
||||
### Implementation Hints & Reference Files
|
||||
{hints}
|
||||
|
||||
> Format: \`<tips> || <ref_file1>;<ref_file2>\`. Read ALL reference files (after ||) before starting implementation. Apply tips (before ||) as implementation guidance.
|
||||
|
||||
### Execution Directives
|
||||
{execution_directives}
|
||||
|
||||
> Commands to run for verification, tool restrictions, or environment requirements. Follow these constraints during and after implementation.
|
||||
|
||||
### Test Cases
|
||||
{test}
|
||||
|
||||
### Acceptance Criteria
|
||||
{acceptance_criteria}
|
||||
|
||||
### Previous Tasks' Findings (Context)
|
||||
{prev_context}
|
||||
|
||||
---
|
||||
|
||||
## Execution Protocol
|
||||
|
||||
1. **Read references**: Parse {hints} — read all files listed after \`||\` to understand existing patterns
|
||||
2. **Read discoveries**: Load ${sessionFolder}/discoveries.ndjson for shared exploration findings
|
||||
3. **Use context**: Apply previous tasks' findings from prev_context above
|
||||
4. **Stay in scope**: ONLY create/modify files within {scope} — do NOT touch files outside this boundary
|
||||
5. **Apply hints**: Follow implementation tips from {hints} (before \`||\`)
|
||||
6. **Execute**: Implement the task as described
|
||||
7. **Write tests**: Implement the test cases defined above
|
||||
8. **Run directives**: Execute commands from {execution_directives} to verify your work
|
||||
9. **Verify acceptance**: Ensure all acceptance criteria are met before reporting completion
|
||||
10. **Share discoveries**: Append exploration findings to shared board:
|
||||
\`\`\`bash
|
||||
echo '{"ts":"<ISO8601>","worker":"{id}","type":"<type>","data":{...}}' >> ${sessionFolder}/discoveries.ndjson
|
||||
\`\`\`
|
||||
11. **Report result**: Return JSON via report_agent_job_result
|
||||
|
||||
### Discovery Types to Share
|
||||
- \`code_pattern\`: {name, file, description} — reusable patterns found
|
||||
- \`integration_point\`: {file, description, exports[]} — module connection points
|
||||
- \`convention\`: {naming, imports, formatting} — code style conventions
|
||||
- \`blocker\`: {issue, severity, impact} — blocking issues encountered
|
||||
|
||||
---
|
||||
|
||||
## Output (report_agent_job_result)
|
||||
|
||||
Return JSON:
|
||||
{
|
||||
"id": "{id}",
|
||||
"status": "completed" | "failed",
|
||||
"findings": "Key discoveries and implementation notes (max 500 chars)",
|
||||
"files_modified": ["path1", "path2"],
|
||||
"tests_passed": true | false,
|
||||
"acceptance_met": "Summary of which acceptance criteria were met/unmet",
|
||||
"error": ""
|
||||
}
|
||||
|
||||
**IMPORTANT**: Set status to "completed" ONLY if:
|
||||
- All test cases pass
|
||||
- All acceptance criteria are met
|
||||
Otherwise set status to "failed" with details in error field.
|
||||
`
|
||||
}
|
||||
```
|
||||
|
||||
3. **Master CSV Update Helper**
|
||||
|
||||
```javascript
|
||||
function updateMasterCsvRow(sessionFolder, taskId, updates) {
|
||||
const csvPath = `${sessionFolder}/tasks.csv`
|
||||
const content = Read(csvPath)
|
||||
const lines = content.split('\n')
|
||||
const header = lines[0].split(',')
|
||||
|
||||
for (let i = 1; i < lines.length; i++) {
|
||||
const cells = parseCsvLine(lines[i])
|
||||
if (cells[0] === taskId || cells[0] === `"${taskId}"`) {
|
||||
// Update specified columns
|
||||
for (const [col, val] of Object.entries(updates)) {
|
||||
const colIdx = header.indexOf(col)
|
||||
if (colIdx >= 0) {
|
||||
cells[colIdx] = `"${String(val).replace(/"/g, '""')}"`
|
||||
}
|
||||
}
|
||||
lines[i] = cells.join(',')
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
Write(csvPath, lines.join('\n'))
|
||||
}
|
||||
```
|
||||
|
||||
**Success Criteria**:
|
||||
- All waves executed in order
|
||||
- Each wave's results merged into master CSV before next wave starts
|
||||
- Dependent tasks skipped when predecessor failed
|
||||
- discoveries.ndjson accumulated across all waves
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Results Aggregation
|
||||
|
||||
**Objective**: Generate final results and human-readable report.
|
||||
|
||||
**Steps**:
|
||||
|
||||
1. **Export results.csv**
|
||||
|
||||
```javascript
|
||||
const masterCsv = Read(`${sessionFolder}/tasks.csv`)
|
||||
// results.csv = master CSV (already has all results populated)
|
||||
Write(`${sessionFolder}/results.csv`, masterCsv)
|
||||
```
|
||||
|
||||
2. **Generate context.md**
|
||||
|
||||
```javascript
|
||||
const tasks = parseCsv(masterCsv)
|
||||
const completed = tasks.filter(t => t.status === 'completed')
|
||||
const failed = tasks.filter(t => t.status === 'failed')
|
||||
const skipped = tasks.filter(t => t.status === 'skipped')
|
||||
|
||||
const contextContent = `# CSV Batch Execution Report
|
||||
|
||||
**Session**: ${sessionId}
|
||||
**Requirement**: ${requirement}
|
||||
**Completed**: ${getUtc8ISOString()}
|
||||
**Waves**: ${maxWave} | **Concurrency**: ${maxConcurrency}
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
| Metric | Count |
|
||||
|--------|-------|
|
||||
| Total Tasks | ${tasks.length} |
|
||||
| Completed | ${completed.length} |
|
||||
| Failed | ${failed.length} |
|
||||
| Skipped | ${skipped.length} |
|
||||
| Waves | ${maxWave} |
|
||||
|
||||
---
|
||||
|
||||
## Wave Execution
|
||||
|
||||
${Array.from({ length: maxWave }, (_, i) => i + 1).map(w => {
|
||||
const waveTasks = tasks.filter(t => parseInt(t.wave) === w)
|
||||
return `### Wave ${w}
|
||||
${waveTasks.map(t => `- **[${t.id}] ${t.title}**: ${t.status}${t.tests_passed ? ' ✓tests' : ''}${t.error ? ' — ' + t.error : ''}
|
||||
${t.findings ? 'Findings: ' + t.findings : ''}`).join('\n')}`
|
||||
}).join('\n\n')}
|
||||
|
||||
---
|
||||
|
||||
## Task Details
|
||||
|
||||
${tasks.map(t => `### ${t.id}: ${t.title}
|
||||
|
||||
| Field | Value |
|
||||
|-------|-------|
|
||||
| Status | ${t.status} |
|
||||
| Wave | ${t.wave} |
|
||||
| Scope | ${t.scope || 'none'} |
|
||||
| Dependencies | ${t.deps || 'none'} |
|
||||
| Context From | ${t.context_from || 'none'} |
|
||||
| Tests Passed | ${t.tests_passed || 'N/A'} |
|
||||
| Acceptance Met | ${t.acceptance_met || 'N/A'} |
|
||||
| Error | ${t.error || 'none'} |
|
||||
|
||||
**Description**: ${t.description}
|
||||
|
||||
**Test Cases**: ${t.test || 'N/A'}
|
||||
|
||||
**Acceptance Criteria**: ${t.acceptance_criteria || 'N/A'}
|
||||
|
||||
**Hints**: ${t.hints || 'N/A'}
|
||||
|
||||
**Execution Directives**: ${t.execution_directives || 'N/A'}
|
||||
|
||||
**Findings**: ${t.findings || 'N/A'}
|
||||
|
||||
**Files Modified**: ${t.files_modified || 'none'}
|
||||
`).join('\n---\n')}
|
||||
|
||||
---
|
||||
|
||||
## All Modified Files
|
||||
|
||||
${[...new Set(tasks.flatMap(t => (t.files_modified || '').split(';')).filter(Boolean))].map(f => '- ' + f).join('\n') || 'None'}
|
||||
`
|
||||
|
||||
Write(`${sessionFolder}/context.md`, contextContent)
|
||||
```
|
||||
|
||||
3. **Display Summary**
|
||||
|
||||
```javascript
|
||||
console.log(`
|
||||
## Execution Complete
|
||||
|
||||
- **Session**: ${sessionId}
|
||||
- **Waves**: ${maxWave}
|
||||
- **Completed**: ${completed.length}/${tasks.length}
|
||||
- **Failed**: ${failed.length}
|
||||
- **Skipped**: ${skipped.length}
|
||||
|
||||
**Results**: ${sessionFolder}/results.csv
|
||||
**Report**: ${sessionFolder}/context.md
|
||||
**Discoveries**: ${sessionFolder}/discoveries.ndjson
|
||||
`)
|
||||
```
|
||||
|
||||
4. **Offer Next Steps** (skip if AUTO_YES)
|
||||
|
||||
```javascript
|
||||
if (!AUTO_YES && failed.length > 0) {
|
||||
const answer = AskUserQuestion({
|
||||
questions: [{
|
||||
question: `${failed.length} tasks failed. Next action?`,
|
||||
header: "Next Step",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Retry Failed", description: `Re-execute ${failed.length} failed tasks with updated context` },
|
||||
{ label: "View Report", description: "Display context.md" },
|
||||
{ label: "Done", description: "Complete session" }
|
||||
]
|
||||
}]
|
||||
}) // BLOCKS
|
||||
|
||||
if (answer['Next Step'] === "Retry Failed") {
|
||||
// Reset failed tasks to pending, re-run Phase 2 for their waves
|
||||
for (const task of failed) {
|
||||
updateMasterCsvRow(sessionFolder, task.id, { status: 'pending', error: '' })
|
||||
}
|
||||
// Also reset skipped tasks whose deps are now retrying
|
||||
for (const task of skipped) {
|
||||
updateMasterCsvRow(sessionFolder, task.id, { status: 'pending', error: '' })
|
||||
}
|
||||
// Re-execute Phase 2 (loop will skip already-completed tasks)
|
||||
// → goto Phase 2
|
||||
} else if (answer['Next Step'] === "View Report") {
|
||||
console.log(Read(`${sessionFolder}/context.md`))
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Success Criteria**:
|
||||
- results.csv exported
|
||||
- context.md generated
|
||||
- Summary displayed to user
|
||||
|
||||
---
|
||||
|
||||
## Shared Discovery Board Protocol
|
||||
|
||||
All agents across all waves share `discoveries.ndjson`. This eliminates redundant codebase exploration.
|
||||
|
||||
**Lifecycle**:
|
||||
- Created by the first agent to write a discovery
|
||||
- Carries over across waves — never cleared
|
||||
- Agents append via `echo '...' >> discoveries.ndjson`
|
||||
|
||||
**Format**: NDJSON, each line is a self-contained JSON:
|
||||
|
||||
```jsonl
|
||||
{"ts":"2026-02-28T10:00:00+08:00","worker":"1","type":"code_pattern","data":{"name":"repository-pattern","file":"src/repos/Base.ts","description":"Abstract CRUD repository"}}
|
||||
{"ts":"2026-02-28T10:01:00+08:00","worker":"2","type":"integration_point","data":{"file":"src/auth/index.ts","description":"Auth module entry","exports":["authenticate","authorize"]}}
|
||||
```
|
||||
|
||||
**Discovery Types**:
|
||||
|
||||
| type | Dedup Key | Description |
|
||||
|------|-----------|-------------|
|
||||
| `code_pattern` | `data.name` | Reusable code pattern found |
|
||||
| `integration_point` | `data.file` | Module connection point |
|
||||
| `convention` | singleton | Code style conventions |
|
||||
| `blocker` | `data.issue` | Blocking issue encountered |
|
||||
| `tech_stack` | singleton | Project technology stack |
|
||||
| `test_command` | singleton | Test commands discovered |
|
||||
|
||||
**Protocol Rules**:
|
||||
1. Read board before own exploration → skip covered areas
|
||||
2. Write discoveries immediately via `echo >>` → don't batch
|
||||
3. Deduplicate — check existing entries; skip if same type + dedup key exists
|
||||
4. Append-only — never modify or delete existing lines
|
||||
|
||||
---
|
||||
|
||||
## Wave Computation Details
|
||||
|
||||
### Algorithm
|
||||
|
||||
Kahn's BFS topological sort with depth tracking:
|
||||
|
||||
```
|
||||
Input: tasks[] with deps[]
|
||||
Output: waveAssignment (taskId → wave number)
|
||||
|
||||
1. Build in-degree map and adjacency list from deps
|
||||
2. Enqueue all tasks with in-degree 0 at wave 1
|
||||
3. BFS: for each dequeued task at wave W:
|
||||
- For each dependent task D:
|
||||
- Decrement D's in-degree
|
||||
- D.wave = max(D.wave, W + 1)
|
||||
- If D's in-degree reaches 0, enqueue D
|
||||
4. Any task without wave assignment → circular dependency error
|
||||
```
|
||||
|
||||
### Wave Properties
|
||||
|
||||
- **Wave 1**: No dependencies — all tasks in wave 1 are fully independent
|
||||
- **Wave N**: All dependencies are in waves 1..(N-1) — guaranteed completed before wave N starts
|
||||
- **Within a wave**: Tasks are independent of each other → safe for concurrent execution
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
Task A (no deps) → Wave 1
|
||||
Task B (no deps) → Wave 1
|
||||
Task C (deps: A) → Wave 2
|
||||
Task D (deps: A, B) → Wave 2
|
||||
Task E (deps: C, D) → Wave 3
|
||||
|
||||
Execution:
|
||||
Wave 1: [A, B] ← concurrent
|
||||
Wave 2: [C, D] ← concurrent, sees A+B findings
|
||||
Wave 3: [E] ← sees A+B+C+D findings
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Context Propagation Flow
|
||||
|
||||
```
|
||||
Wave 1 agents:
|
||||
├─ Execute tasks (no prev_context)
|
||||
├─ Write findings to report_agent_job_result
|
||||
└─ Append discoveries to discoveries.ndjson
|
||||
|
||||
↓ merge results into master CSV
|
||||
|
||||
Wave 2 agents:
|
||||
├─ Read discoveries.ndjson (exploration sharing)
|
||||
├─ Read prev_context column (wave 1 findings from context_from)
|
||||
├─ Execute tasks with full upstream context
|
||||
├─ Write findings to report_agent_job_result
|
||||
└─ Append new discoveries to discoveries.ndjson
|
||||
|
||||
↓ merge results into master CSV
|
||||
|
||||
Wave 3 agents:
|
||||
├─ Read discoveries.ndjson (accumulated from waves 1+2)
|
||||
├─ Read prev_context column (wave 1+2 findings from context_from)
|
||||
├─ Execute tasks
|
||||
└─ ...
|
||||
```
|
||||
|
||||
**Two context channels**:
|
||||
1. **CSV findings** (structured): `context_from` column → `prev_context` injection — task-specific directed context
|
||||
2. **NDJSON discoveries** (broadcast): `discoveries.ndjson` — general exploration findings available to all
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error | Resolution |
|
||||
|-------|------------|
|
||||
| Circular dependency | Detect in wave computation, abort with error message |
|
||||
| Agent timeout | Mark as failed in results, continue with wave |
|
||||
| Agent failed | Mark as failed, skip dependent tasks in later waves |
|
||||
| All agents in wave failed | Log error, offer retry or abort |
|
||||
| CSV parse error | Validate CSV format before execution, show line number |
|
||||
| discoveries.ndjson corrupt | Ignore malformed lines, continue with valid entries |
|
||||
| Continue mode: no session found | List available sessions, prompt user to select |
|
||||
|
||||
---
|
||||
|
||||
## Core Rules
|
||||
|
||||
1. **Start Immediately**: First action is session initialization, then Phase 1
|
||||
2. **Wave Order is Sacred**: Never execute wave N before wave N-1 completes and results are merged
|
||||
3. **CSV is Source of Truth**: Master tasks.csv holds all state — always read before wave, always write after
|
||||
4. **Context Propagation**: prev_context built from master CSV, not from memory
|
||||
5. **Discovery Board is Append-Only**: Never clear, modify, or recreate discoveries.ndjson
|
||||
6. **Skip on Failure**: If a dependency failed, skip the dependent task (don't attempt)
|
||||
7. **Cleanup Temp Files**: Remove wave-{N}.csv after results are merged
|
||||
8. **DO NOT STOP**: Continuous execution until all waves complete or all remaining tasks are skipped
|
||||
|
||||
---
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Task Granularity**: 3-10 tasks optimal; too many = overhead, too few = no parallelism benefit
|
||||
2. **Minimize Cross-Wave Deps**: More tasks in wave 1 = more parallelism
|
||||
3. **Specific Descriptions**: Agent sees only its CSV row + prev_context — make description self-contained
|
||||
4. **Context From ≠ Deps**: `deps` = execution order constraint; `context_from` = information flow. A task can have `context_from` without `deps` (it just reads previous findings but doesn't require them to be done first in its wave)
|
||||
5. **Concurrency Tuning**: `-c 1` for serial execution (maximum context sharing); `-c 8` for I/O-bound tasks
|
||||
|
||||
---
|
||||
|
||||
## Usage Recommendations
|
||||
|
||||
| Scenario | Recommended Approach |
|
||||
|----------|---------------------|
|
||||
| Independent parallel tasks (no deps) | `$csv-wave-pipeline -c 8` — single wave, max parallelism |
|
||||
| Linear pipeline (A→B→C) | `$csv-wave-pipeline -c 1` — 3 waves, serial, full context |
|
||||
| Diamond dependency (A→B,C→D) | `$csv-wave-pipeline` — 3 waves, B+C concurrent in wave 2 |
|
||||
| Complex requirement, unclear tasks | Use `$roadmap-with-file` first for planning, then feed issues here |
|
||||
| Single complex task | Use `$lite-execute` instead |
|
||||
@@ -1,7 +1,6 @@
|
||||
---
|
||||
name: review-cycle
|
||||
description: Unified multi-dimensional code review with automated fix orchestration. Supports session-based (git changes) and module-based (path patterns) review modes with 7-dimension parallel analysis, iterative deep-dive, and automated fix pipeline. Triggers on "workflow:review-cycle", "workflow:review-session-cycle", "workflow:review-module-cycle", "workflow:review-cycle-fix".
|
||||
allowed-tools: spawn_agent, wait, send_input, close_agent, AskUserQuestion, Read, Write, Edit, Bash, Glob, Grep
|
||||
---
|
||||
|
||||
# Review Cycle
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
name: roadmap-with-file
|
||||
description: Strategic requirement roadmap with iterative decomposition and issue creation. Outputs roadmap.md (human-readable, single source) + issues.jsonl (machine-executable). Handoff to team-planex.
|
||||
argument-hint: "[-y|--yes] [-c|--continue] [-m progressive|direct|auto] \"requirement description\""
|
||||
allowed-tools: spawn_agent, wait, send_input, close_agent, AskUserQuestion, Read, Write, Edit, Bash, Glob, Grep
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
name: team-lifecycle
|
||||
description: Full lifecycle orchestrator - spec/impl/test. Spawn-wait-close pipeline with inline discuss subagent, shared explore cache, fast-advance, and consensus severity routing.
|
||||
agents: analyst, writer, planner, executor, tester, reviewer, architect, fe-developer, fe-qa
|
||||
phases: 5
|
||||
---
|
||||
|
||||
# Team Lifecycle Orchestrator
|
||||
261
.codex/skills/team-planex/SKILL.md
Normal file
261
.codex/skills/team-planex/SKILL.md
Normal file
@@ -0,0 +1,261 @@
|
||||
---
|
||||
name: team-planex
|
||||
description: |
|
||||
Inline planning + delegated execution pipeline. Main flow does planning directly,
|
||||
spawns Codex executor per issue immediately. All execution via Codex CLI only.
|
||||
---
|
||||
|
||||
# Team PlanEx (Codex)
|
||||
|
||||
主流程内联规划 + 委托执行。SKILL.md 自身完成规划(不再 spawn planner agent),每完成一个 issue 的 solution 后立即 spawn executor agent 并行实现,无需等待所有规划完成。
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌────────────────────────────────────────┐
|
||||
│ SKILL.md (主流程 = 规划 + 节拍控制) │
|
||||
│ │
|
||||
│ Phase 1: 解析输入 + 初始化 session │
|
||||
│ Phase 2: 逐 issue 规划循环 (内联) │
|
||||
│ ├── issue-plan → 写 solution artifact │
|
||||
│ ├── spawn executor agent ────────────┼──> [executor] 实现
|
||||
│ └── continue (不等 executor) │
|
||||
│ Phase 3: 等待所有 executors │
|
||||
│ Phase 4: 汇总报告 │
|
||||
└────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Agent Registry
|
||||
|
||||
| Agent | Role File | Responsibility |
|
||||
|-------|-----------|----------------|
|
||||
| `executor` | `~/.codex/agents/planex-executor.md` | Codex CLI implementation per issue |
|
||||
|
||||
> Executor agent must be deployed to `~/.codex/agents/` before use.
|
||||
> Source: `.codex/skills/team-planex/agents/`
|
||||
|
||||
---
|
||||
|
||||
## Input Parsing
|
||||
|
||||
Supported input types (parse from `$ARGUMENTS`):
|
||||
|
||||
| Type | Detection | Handler |
|
||||
|------|-----------|---------|
|
||||
| Issue IDs | `ISS-\d{8}-\d{6}` regex | Use directly for planning |
|
||||
| Text | `--text '...'` flag | Create issue(s) first via CLI |
|
||||
| Plan file | `--plan <path>` flag | Read file, parse phases, batch create issues |
|
||||
|
||||
### Issue Creation (when needed)
|
||||
|
||||
For `--text` input:
|
||||
```bash
|
||||
ccw issue create --data '{"title":"<title>","description":"<description>"}' --json
|
||||
```
|
||||
|
||||
For `--plan` input:
|
||||
- Match `## Phase N: Title`, `## Step N: Title`, or `### N. Title`
|
||||
- Each match → one issue (title + description from section content)
|
||||
- Fallback: no structure found → entire file as single issue
|
||||
|
||||
---
|
||||
|
||||
## Session Setup
|
||||
|
||||
Before processing issues, initialize session directory:
|
||||
|
||||
```javascript
|
||||
const slug = toSlug(inputDescription).slice(0, 20)
|
||||
const date = new Date().toISOString().slice(0, 10).replace(/-/g, '')
|
||||
const sessionDir = `.workflow/.team/PEX-${slug}-${date}`
|
||||
const artifactsDir = `${sessionDir}/artifacts/solutions`
|
||||
|
||||
Bash(`mkdir -p "${artifactsDir}"`)
|
||||
|
||||
Write({
|
||||
file_path: `${sessionDir}/team-session.json`,
|
||||
content: JSON.stringify({
|
||||
session_id: `PEX-${slug}-${date}`,
|
||||
input_type: inputType,
|
||||
input: rawInput,
|
||||
status: "running",
|
||||
started_at: new Date().toISOString(),
|
||||
executors: []
|
||||
}, null, 2)
|
||||
})
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Parse Input + Initialize
|
||||
|
||||
1. Parse `$ARGUMENTS` to determine input type
|
||||
2. Create issues if needed (--text / --plan)
|
||||
3. Collect all issue IDs
|
||||
4. Initialize session directory
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Inline Planning Loop
|
||||
|
||||
For each issue, execute planning inline (no planner agent):
|
||||
|
||||
### 2a. Generate Solution via issue-plan-agent
|
||||
|
||||
```javascript
|
||||
const planAgent = spawn_agent({
|
||||
message: `
|
||||
## TASK ASSIGNMENT
|
||||
|
||||
### MANDATORY FIRST STEPS (Agent Execute)
|
||||
1. **Read role definition**: ~/.codex/agents/issue-plan-agent.md (MUST read first)
|
||||
|
||||
---
|
||||
|
||||
issue_ids: ["${issueId}"]
|
||||
project_root: "${projectRoot}"
|
||||
|
||||
## Requirements
|
||||
- Generate solution for this issue
|
||||
- Auto-bind single solution
|
||||
- Output solution JSON when complete
|
||||
`
|
||||
})
|
||||
|
||||
const result = wait({ ids: [planAgent], timeout_ms: 600000 })
|
||||
close_agent({ id: planAgent })
|
||||
```
|
||||
|
||||
### 2b. Write Solution Artifact
|
||||
|
||||
```javascript
|
||||
const solution = parseSolution(result)
|
||||
|
||||
Write({
|
||||
file_path: `${artifactsDir}/${issueId}.json`,
|
||||
content: JSON.stringify({
|
||||
session_id: sessionId,
|
||||
issue_id: issueId,
|
||||
solution: solution,
|
||||
planned_at: new Date().toISOString()
|
||||
}, null, 2)
|
||||
})
|
||||
```
|
||||
|
||||
### 2c. Spawn Executor Immediately
|
||||
|
||||
```javascript
|
||||
const executorId = spawn_agent({
|
||||
message: `
|
||||
## TASK ASSIGNMENT
|
||||
|
||||
### MANDATORY FIRST STEPS (Agent Execute)
|
||||
1. **Read role definition**: ~/.codex/agents/planex-executor.md (MUST read first)
|
||||
|
||||
---
|
||||
|
||||
## Issue
|
||||
Issue ID: ${issueId}
|
||||
Solution file: ${artifactsDir}/${issueId}.json
|
||||
Session: ${sessionDir}
|
||||
|
||||
## Execution
|
||||
Load solution from file → implement via Codex CLI → verify tests → commit → report.
|
||||
`
|
||||
})
|
||||
|
||||
executorIds.push(executorId)
|
||||
executorIssueMap[executorId] = issueId
|
||||
```
|
||||
|
||||
### 2d. Continue to Next Issue
|
||||
|
||||
Do NOT wait for executor. Proceed to next issue immediately.
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: Wait All Executors
|
||||
|
||||
```javascript
|
||||
if (executorIds.length > 0) {
|
||||
const execResults = wait({ ids: executorIds, timeout_ms: 1800000 })
|
||||
|
||||
if (execResults.timed_out) {
|
||||
const pending = executorIds.filter(id => !execResults.status[id]?.completed)
|
||||
if (pending.length > 0) {
|
||||
const pendingIssues = pending.map(id => executorIssueMap[id])
|
||||
Write({
|
||||
file_path: `${sessionDir}/pending-executors.json`,
|
||||
content: JSON.stringify({ pending_issues: pendingIssues, executor_ids: pending }, null, 2)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Collect summaries
|
||||
const summaries = executorIds.map(id => ({
|
||||
issue_id: executorIssueMap[id],
|
||||
status: execResults.status[id]?.completed ? 'completed' : 'timeout',
|
||||
output: execResults.status[id]?.completed ?? null
|
||||
}))
|
||||
|
||||
// Cleanup
|
||||
executorIds.forEach(id => {
|
||||
try { close_agent({ id }) } catch { /* already closed */ }
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: Report
|
||||
|
||||
```javascript
|
||||
const completed = summaries.filter(s => s.status === 'completed').length
|
||||
const failed = summaries.filter(s => s.status === 'timeout').length
|
||||
|
||||
// Update session
|
||||
Write({
|
||||
file_path: `${sessionDir}/team-session.json`,
|
||||
content: JSON.stringify({
|
||||
...session,
|
||||
status: "completed",
|
||||
completed_at: new Date().toISOString(),
|
||||
results: { total: executorIds.length, completed, failed }
|
||||
}, null, 2)
|
||||
})
|
||||
|
||||
return `
|
||||
## Pipeline Complete
|
||||
|
||||
**Total issues**: ${executorIds.length}
|
||||
**Completed**: ${completed}
|
||||
**Timed out**: ${failed}
|
||||
|
||||
${summaries.map(s => `- ${s.issue_id}: ${s.status}`).join('\n')}
|
||||
|
||||
Session: ${sessionDir}
|
||||
`
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## User Commands
|
||||
|
||||
During execution, the user may issue:
|
||||
|
||||
| Command | Action |
|
||||
|---------|--------|
|
||||
| `check` / `status` | Show executor progress summary |
|
||||
| `resume` / `continue` | Urge stalled executor |
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Scenario | Resolution |
|
||||
|----------|------------|
|
||||
| issue-plan-agent timeout (>10 min) | Skip issue, log error, continue to next |
|
||||
| issue-plan-agent failure | Retry once, then skip with error log |
|
||||
| Solution file not written | Executor reports error, logs to `${sessionDir}/errors.json` |
|
||||
| Executor (Codex CLI) failure | Executor handles resume; logs CLI resume command |
|
||||
| No issues to process | Report error: no issues found |
|
||||
@@ -23,13 +23,13 @@ completion report.
|
||||
|
||||
| Action | Allowed |
|
||||
|--------|---------|
|
||||
| Read solution artifact from disk | ✅ |
|
||||
| Implement via Codex CLI | ✅ |
|
||||
| Run tests for verification | ✅ |
|
||||
| git commit completed work | ✅ |
|
||||
| Create or modify issues | ❌ |
|
||||
| Spawn subagents | ❌ |
|
||||
| Interact with user (AskUserQuestion) | ❌ |
|
||||
| Read solution artifact from disk | Yes |
|
||||
| Implement via Codex CLI | Yes |
|
||||
| Run tests for verification | Yes |
|
||||
| git commit completed work | Yes |
|
||||
| Create or modify issues | No |
|
||||
| Spawn subagents | No |
|
||||
| Interact with user (AskUserQuestion) | No |
|
||||
|
||||
---
|
||||
|
||||
@@ -38,7 +38,6 @@ completion report.
|
||||
### Step 1: Load Context
|
||||
|
||||
After reading role definition:
|
||||
- Run: `ccw spec load --category execution`
|
||||
- Extract issue ID, solution file path, session dir from task message
|
||||
|
||||
### Step 2: Load Solution
|
||||
@@ -52,7 +51,7 @@ const solution = solutionData.solution
|
||||
|
||||
If file not found or invalid:
|
||||
- Log error: `[executor] ERROR: Solution file not found: ${solutionFile}`
|
||||
- Output: `EXEC_FAILED:{issueId}:solution_file_missing`
|
||||
- Output: `EXEC_FAILED:${issueId}:solution_file_missing`
|
||||
- Stop execution
|
||||
|
||||
Verify solution has required fields:
|
||||
@@ -81,10 +80,9 @@ ${JSON.stringify(solution.bound, null, 2)}
|
||||
## Implementation Requirements
|
||||
1. Follow the solution plan tasks in order
|
||||
2. Write clean, minimal code following existing patterns
|
||||
3. Read .workflow/specs/*.md for project conventions
|
||||
4. Run tests after each significant change
|
||||
5. Ensure all existing tests still pass
|
||||
6. Do NOT over-engineer - implement exactly what the solution specifies
|
||||
3. Run tests after each significant change
|
||||
4. Ensure all existing tests still pass
|
||||
5. Do NOT over-engineer - implement exactly what the solution specifies
|
||||
|
||||
## Quality Checklist
|
||||
- [ ] All solution tasks implemented
|
||||
@@ -92,9 +90,6 @@ ${JSON.stringify(solution.bound, null, 2)}
|
||||
- [ ] Existing tests pass
|
||||
- [ ] New tests added where specified in solution
|
||||
- [ ] No security vulnerabilities introduced
|
||||
|
||||
## Project Guidelines
|
||||
@.workflow/specs/*.md
|
||||
PROMPT_EOF
|
||||
)" --tool codex --mode write --id planex-${issueId}
|
||||
```
|
||||
@@ -120,7 +115,6 @@ if (testCmd) {
|
||||
const testResult = Bash(`${testCmd} 2>&1 || echo TEST_FAILED`)
|
||||
|
||||
if (testResult.includes('TEST_FAILED') || testResult.includes('FAIL')) {
|
||||
// Report failure with resume command
|
||||
const resumeCmd = `ccw cli -p "Fix failing tests" --resume planex-${issueId} --tool codex --mode write`
|
||||
|
||||
Write({
|
||||
@@ -179,7 +173,6 @@ EXEC_DONE:${issueId}
|
||||
If Codex CLI execution fails or times out:
|
||||
|
||||
```bash
|
||||
# Resume with same session ID
|
||||
ccw cli -p "Continue implementation from where stopped" \
|
||||
--resume planex-${issueId} \
|
||||
--tool codex --mode write \
|
||||
@@ -194,19 +187,19 @@ Resume command is always logged to `${sessionDir}/errors.json` on any failure.
|
||||
|
||||
| Scenario | Resolution |
|
||||
|----------|------------|
|
||||
| Solution file missing | Output `EXEC_FAILED:{id}:solution_file_missing`, stop |
|
||||
| Solution JSON malformed | Output `EXEC_FAILED:{id}:solution_invalid`, stop |
|
||||
| Solution file missing | Output `EXEC_FAILED:<id>:solution_file_missing`, stop |
|
||||
| Solution JSON malformed | Output `EXEC_FAILED:<id>:solution_invalid`, stop |
|
||||
| Issue status update fails | Log warning, continue |
|
||||
| Codex CLI failure | Log resume command to errors.json, output `EXEC_FAILED:{id}:codex_failed` |
|
||||
| Tests failing | Log test output + resume command, output `EXEC_FAILED:{id}:tests_failing` |
|
||||
| Commit fails | Log warning, still output `EXEC_DONE:{id}` (implementation complete) |
|
||||
| Codex CLI failure | Log resume command to errors.json, output `EXEC_FAILED:<id>:codex_failed` |
|
||||
| Tests failing | Log test output + resume command, output `EXEC_FAILED:<id>:tests_failing` |
|
||||
| Commit fails | Log warning, still output `EXEC_DONE:<id>` (implementation complete) |
|
||||
| No test command found | Skip test step, proceed to commit |
|
||||
|
||||
## Key Reminders
|
||||
|
||||
**ALWAYS**:
|
||||
- Output `EXEC_DONE:{issueId}` on its own line when implementation succeeds
|
||||
- Output `EXEC_FAILED:{issueId}:{reason}` on its own line when implementation fails
|
||||
- Output `EXEC_DONE:<issueId>` on its own line when implementation succeeds
|
||||
- Output `EXEC_FAILED:<issueId>:<reason>` on its own line when implementation fails
|
||||
- Log resume command to errors.json on any failure
|
||||
- Use `[executor]` prefix in all status messages
|
||||
|
||||
|
||||
@@ -1,183 +0,0 @@
|
||||
---
|
||||
name: planex-planner
|
||||
description: |
|
||||
PlanEx planner agent. Issue decomposition + solution design with beat protocol.
|
||||
Outputs ISSUE_READY:{id} after each solution, waits for "Continue" signal.
|
||||
Deploy to: ~/.codex/agents/planex-planner.md
|
||||
color: blue
|
||||
---
|
||||
|
||||
# PlanEx Planner
|
||||
|
||||
Requirement decomposition → issue creation → solution design, one issue at a time.
|
||||
Outputs `ISSUE_READY:{issueId}` after each solution and waits for orchestrator to signal
|
||||
"Continue". Only outputs `ALL_PLANNED:{count}` when all issues are processed.
|
||||
|
||||
## Identity
|
||||
|
||||
- **Tag**: `[planner]`
|
||||
- **Beat Protocol**: ISSUE_READY per issue → wait → ALL_PLANNED when done
|
||||
- **Boundary**: Planning only — no code writing, no test running, no git commits
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
| Action | Allowed |
|
||||
|--------|---------|
|
||||
| Parse input (Issue IDs / text / plan file) | ✅ |
|
||||
| Create issues via CLI | ✅ |
|
||||
| Generate solution via issue-plan-agent | ✅ |
|
||||
| Write solution artifacts to disk | ✅ |
|
||||
| Output ISSUE_READY / ALL_PLANNED signals | ✅ |
|
||||
| Write or modify business code | ❌ |
|
||||
| Run tests or git commit | ❌ |
|
||||
|
||||
---
|
||||
|
||||
## CLI Toolbox
|
||||
|
||||
| Command | Purpose |
|
||||
|---------|---------|
|
||||
| `ccw issue create --data '{"title":"...","description":"..."}' --json` | Create issue |
|
||||
| `ccw issue status <id> --json` | Check issue status |
|
||||
| `ccw issue plan <id>` | Plan single issue (generates solution) |
|
||||
|
||||
---
|
||||
|
||||
## Execution Flow
|
||||
|
||||
### Step 1: Load Context
|
||||
|
||||
After reading role definition, load project context:
|
||||
- Run: `ccw spec load --category planning`
|
||||
- Extract session directory and artifacts directory from task message
|
||||
|
||||
### Step 2: Parse Input
|
||||
|
||||
Determine input type from task message:
|
||||
|
||||
| Detection | Condition | Action |
|
||||
|-----------|-----------|--------|
|
||||
| Issue IDs | `ISS-\d{8}-\d{6}` pattern | Use directly for planning |
|
||||
| `--text '...'` | Flag in message | Create issue(s) first via CLI |
|
||||
| `--plan <path>` | Flag in message | Read file, parse phases, batch create issues |
|
||||
|
||||
**Plan file parsing rules** (when `--plan` is used):
|
||||
- Match `## Phase N: Title`, `## Step N: Title`, or `### N. Title`
|
||||
- Each match → one issue (title + description from section content)
|
||||
- Fallback: no structure found → entire file as single issue
|
||||
|
||||
### Step 3: Issue Processing Loop (Beat Protocol)
|
||||
|
||||
For each issue, execute in sequence:
|
||||
|
||||
#### 3a. Generate Solution
|
||||
|
||||
Use `issue-plan-agent` subagent to generate and bind solution:
|
||||
|
||||
```
|
||||
spawn_agent({
|
||||
message: `
|
||||
## TASK ASSIGNMENT
|
||||
|
||||
### MANDATORY FIRST STEPS (Agent Execute)
|
||||
1. **Read role definition**: ~/.codex/agents/issue-plan-agent.md (MUST read first)
|
||||
2. Run: `ccw spec load --category planning`
|
||||
|
||||
---
|
||||
|
||||
issue_ids: ["${issueId}"]
|
||||
project_root: "${projectRoot}"
|
||||
|
||||
## Requirements
|
||||
- Generate solution for this issue
|
||||
- Auto-bind single solution
|
||||
- Output solution JSON when complete
|
||||
`
|
||||
})
|
||||
|
||||
const result = wait({ ids: [agent], timeout_ms: 600000 })
|
||||
close_agent({ id: agent })
|
||||
```
|
||||
|
||||
#### 3b. Write Solution Artifact
|
||||
|
||||
```javascript
|
||||
// Extract solution from issue-plan-agent result
|
||||
const solution = parseSolution(result)
|
||||
|
||||
Write({
|
||||
file_path: `${artifactsDir}/${issueId}.json`,
|
||||
content: JSON.stringify({
|
||||
session_id: sessionId,
|
||||
issue_id: issueId,
|
||||
solution: solution,
|
||||
planned_at: new Date().toISOString()
|
||||
}, null, 2)
|
||||
})
|
||||
```
|
||||
|
||||
#### 3c. Output Beat Signal
|
||||
|
||||
Output EXACTLY (no surrounding text on this line):
|
||||
```
|
||||
ISSUE_READY:{issueId}
|
||||
```
|
||||
|
||||
Then STOP. Do not process next issue. Wait for "Continue" message from orchestrator.
|
||||
|
||||
### Step 4: After All Issues
|
||||
|
||||
When every issue has been processed and confirmed with "Continue":
|
||||
|
||||
Output EXACTLY:
|
||||
```
|
||||
ALL_PLANNED:{totalCount}
|
||||
```
|
||||
|
||||
Where `{totalCount}` is the integer count of issues planned.
|
||||
|
||||
---
|
||||
|
||||
## Issue Creation (when needed)
|
||||
|
||||
For `--text` input:
|
||||
|
||||
```bash
|
||||
ccw issue create --data '{"title":"<title>","description":"<description>"}' --json
|
||||
```
|
||||
|
||||
Parse returned JSON for `id` field → use as issue ID.
|
||||
|
||||
For `--plan` input, create issues one at a time:
|
||||
```bash
|
||||
# For each parsed phase/step:
|
||||
ccw issue create --data '{"title":"<phase-title>","description":"<phase-content>"}' --json
|
||||
```
|
||||
|
||||
Collect all created issue IDs before proceeding to Step 3.
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Scenario | Resolution |
|
||||
|----------|------------|
|
||||
| Issue creation failure | Retry once with simplified text, then report error |
|
||||
| `issue-plan-agent` failure | Retry once, then skip issue with `ISSUE_SKIP:{issueId}:reason` signal |
|
||||
| Plan file not found | Output error immediately, do not proceed |
|
||||
| Artifact write failure | Log warning inline, still output ISSUE_READY (executor will handle missing file) |
|
||||
| "Continue" not received after 5 min | Re-output `ISSUE_READY:{issueId}` once as reminder |
|
||||
|
||||
## Key Reminders
|
||||
|
||||
**ALWAYS**:
|
||||
- Output `ISSUE_READY:{issueId}` on its own line with no surrounding text
|
||||
- Wait after each ISSUE_READY — do NOT auto-continue
|
||||
- Write solution file before outputting ISSUE_READY
|
||||
- Use `[planner]` prefix in all status messages
|
||||
|
||||
**NEVER**:
|
||||
- Output multiple ISSUE_READY signals before waiting for "Continue"
|
||||
- Proceed to next issue without receiving "Continue"
|
||||
- Write or modify any business logic files
|
||||
- Run tests or execute git commands
|
||||
@@ -1,284 +0,0 @@
|
||||
---
|
||||
name: team-planex
|
||||
description: |
|
||||
Beat pipeline: planner decomposes requirements issue-by-issue, orchestrator spawns
|
||||
Codex executor per issue immediately. All execution via Codex CLI only.
|
||||
agents: 2
|
||||
phases: 3
|
||||
---
|
||||
|
||||
# Team PlanEx (Codex)
|
||||
|
||||
逐 Issue 节拍流水线。Planner 每完成一个 issue 的 solution 立即输出 `ISSUE_READY` 信号,Orchestrator 即刻 spawn 独立 Codex executor 并行实现,无需等待 planner 完成全部规划。
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
Input (Issue IDs / --text / --plan)
|
||||
→ Orchestrator: parse input → init session → spawn planner
|
||||
→ Beat loop:
|
||||
wait(planner) → ISSUE_READY:{issueId} → spawn_agent(executor)
|
||||
→ send_input(planner, "Continue")
|
||||
→ ALL_PLANNED:{count} → close_agent(planner)
|
||||
→ wait(all executors) → report
|
||||
```
|
||||
|
||||
## Agent Registry
|
||||
|
||||
| Agent | Role File | Responsibility |
|
||||
|-------|-----------|----------------|
|
||||
| `planner` | `~/.codex/agents/planex-planner.md` | Issue decomp → solution design → ISSUE_READY signals |
|
||||
| `executor` | `~/.codex/agents/planex-executor.md` | Codex CLI implementation per issue |
|
||||
|
||||
> Both agents must be deployed to `~/.codex/agents/` before use.
|
||||
> Source: `.codex/skills/team-planex/agents/`
|
||||
|
||||
---
|
||||
|
||||
## Input Parsing
|
||||
|
||||
Supported input types (parse from `$ARGUMENTS`):
|
||||
|
||||
| Type | Detection | Handler |
|
||||
|------|-----------|---------|
|
||||
| Issue IDs | `ISS-\d{8}-\d{6}` regex | Pass directly to planner |
|
||||
| Text | `--text '...'` flag | Planner creates issue(s) first |
|
||||
| Plan file | `--plan <path>` flag | Planner reads file, batch creates issues |
|
||||
|
||||
---
|
||||
|
||||
## Session Setup
|
||||
|
||||
Before spawning agents, initialize session directory:
|
||||
|
||||
```javascript
|
||||
// Generate session slug from input description (max 20 chars, kebab-case)
|
||||
const slug = toSlug(inputDescription).slice(0, 20)
|
||||
const date = new Date().toISOString().slice(0, 10).replace(/-/g, '')
|
||||
const sessionDir = `.workflow/.team/PEX-${slug}-${date}`
|
||||
const artifactsDir = `${sessionDir}/artifacts/solutions`
|
||||
|
||||
Bash(`mkdir -p "${artifactsDir}"`)
|
||||
|
||||
// Write initial session state
|
||||
Write({
|
||||
file_path: `${sessionDir}/team-session.json`,
|
||||
content: JSON.stringify({
|
||||
session_id: `PEX-${slug}-${date}`,
|
||||
input_type: inputType,
|
||||
input: rawInput,
|
||||
status: "running",
|
||||
started_at: new Date().toISOString(),
|
||||
executors: []
|
||||
}, null, 2)
|
||||
})
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Spawn Planner
|
||||
|
||||
```javascript
|
||||
const plannerAgent = spawn_agent({
|
||||
message: `
|
||||
## TASK ASSIGNMENT
|
||||
|
||||
### MANDATORY FIRST STEPS (Agent Execute)
|
||||
1. **Read role definition**: ~/.codex/agents/planex-planner.md (MUST read first)
|
||||
2. Run: `ccw spec load --category "planning execution"`
|
||||
|
||||
---
|
||||
|
||||
## Session
|
||||
Session directory: ${sessionDir}
|
||||
Artifacts directory: ${artifactsDir}
|
||||
|
||||
## Input
|
||||
${inputType === 'issues' ? `Issue IDs: ${issueIds.join(' ')}` : ''}
|
||||
${inputType === 'text' ? `Requirement: ${requirementText}` : ''}
|
||||
${inputType === 'plan' ? `Plan file: ${planPath}` : ''}
|
||||
|
||||
## Beat Protocol (CRITICAL)
|
||||
Process issues one at a time. After completing each issue's solution:
|
||||
1. Write solution JSON to: ${artifactsDir}/{issueId}.json
|
||||
2. Output EXACTLY this line: ISSUE_READY:{issueId}
|
||||
3. STOP and wait — do NOT continue until you receive "Continue"
|
||||
|
||||
When ALL issues are processed:
|
||||
1. Output EXACTLY: ALL_PLANNED:{totalCount}
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Beat Loop
|
||||
|
||||
Orchestrator coordinates the planner-executor pipeline:
|
||||
|
||||
```javascript
|
||||
const executorIds = []
|
||||
const executorIssueMap = {}
|
||||
|
||||
while (true) {
|
||||
// Wait for planner beat signal (up to 10 min per issue)
|
||||
const plannerOut = wait({ ids: [plannerAgent], timeout_ms: 600000 })
|
||||
|
||||
// Handle timeout: urge convergence and retry
|
||||
if (plannerOut.timed_out) {
|
||||
send_input({
|
||||
id: plannerAgent,
|
||||
message: "Please output ISSUE_READY:{issueId} for current issue or ALL_PLANNED if done."
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
const output = plannerOut.status[plannerAgent].completed
|
||||
|
||||
// Detect ALL_PLANNED — pipeline complete
|
||||
if (output.includes('ALL_PLANNED')) {
|
||||
const match = output.match(/ALL_PLANNED:(\d+)/)
|
||||
const total = match ? parseInt(match[1]) : executorIds.length
|
||||
close_agent({ id: plannerAgent })
|
||||
break
|
||||
}
|
||||
|
||||
// Detect ISSUE_READY — spawn executor immediately
|
||||
const issueMatch = output.match(/ISSUE_READY:(ISS-\d{8}-\d{6}|[A-Z0-9-]+)/)
|
||||
if (issueMatch) {
|
||||
const issueId = issueMatch[1]
|
||||
const solutionFile = `${artifactsDir}/${issueId}.json`
|
||||
|
||||
const executorId = spawn_agent({
|
||||
message: `
|
||||
## TASK ASSIGNMENT
|
||||
|
||||
### MANDATORY FIRST STEPS (Agent Execute)
|
||||
1. **Read role definition**: ~/.codex/agents/planex-executor.md (MUST read first)
|
||||
2. Run: `ccw spec load --category "planning execution"`
|
||||
|
||||
---
|
||||
|
||||
## Issue
|
||||
Issue ID: ${issueId}
|
||||
Solution file: ${solutionFile}
|
||||
Session: ${sessionDir}
|
||||
|
||||
## Execution
|
||||
Load solution from file → implement via Codex CLI → verify tests → commit → report.
|
||||
`
|
||||
})
|
||||
|
||||
executorIds.push(executorId)
|
||||
executorIssueMap[executorId] = issueId
|
||||
|
||||
// Signal planner to continue to next issue
|
||||
send_input({ id: plannerAgent, message: "Continue with next issue." })
|
||||
continue
|
||||
}
|
||||
|
||||
// Unexpected output: urge convergence
|
||||
send_input({
|
||||
id: plannerAgent,
|
||||
message: "Output ISSUE_READY:{issueId} when solution is ready, or ALL_PLANNED when all done."
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: Wait All Executors
|
||||
|
||||
```javascript
|
||||
if (executorIds.length > 0) {
|
||||
// Extended timeout: Codex CLI execution per issue (~10-20 min each)
|
||||
const execResults = wait({ ids: executorIds, timeout_ms: 1800000 })
|
||||
|
||||
if (execResults.timed_out) {
|
||||
const completed = executorIds.filter(id => execResults.status[id]?.completed)
|
||||
const pending = executorIds.filter(id => !execResults.status[id]?.completed)
|
||||
// Log pending issues for manual follow-up
|
||||
if (pending.length > 0) {
|
||||
const pendingIssues = pending.map(id => executorIssueMap[id])
|
||||
Write({
|
||||
file_path: `${sessionDir}/pending-executors.json`,
|
||||
content: JSON.stringify({ pending_issues: pendingIssues, executor_ids: pending }, null, 2)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Collect summaries
|
||||
const summaries = executorIds.map(id => ({
|
||||
issue_id: executorIssueMap[id],
|
||||
status: execResults.status[id]?.completed ? 'completed' : 'timeout',
|
||||
output: execResults.status[id]?.completed ?? null
|
||||
}))
|
||||
|
||||
// Cleanup
|
||||
executorIds.forEach(id => {
|
||||
try { close_agent({ id }) } catch { /* already closed */ }
|
||||
})
|
||||
|
||||
// Final report
|
||||
const completed = summaries.filter(s => s.status === 'completed').length
|
||||
const failed = summaries.filter(s => s.status === 'timeout').length
|
||||
|
||||
return `
|
||||
## Pipeline Complete
|
||||
|
||||
**Total issues**: ${executorIds.length}
|
||||
**Completed**: ${completed}
|
||||
**Timed out**: ${failed}
|
||||
|
||||
${summaries.map(s => `- ${s.issue_id}: ${s.status}`).join('\n')}
|
||||
|
||||
Session: ${sessionDir}
|
||||
`
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## User Commands
|
||||
|
||||
During execution, the user may issue:
|
||||
|
||||
| Command | Action |
|
||||
|---------|--------|
|
||||
| `check` / `status` | Show executor progress summary |
|
||||
| `resume` / `continue` | Urge stalled planner or executor |
|
||||
| `add <issue-ids>` | `send_input` to planner with new issue IDs |
|
||||
| `add --text '...'` | `send_input` to planner to create and plan new issue |
|
||||
| `add --plan <path>` | `send_input` to planner to parse and batch create from plan file |
|
||||
|
||||
**`add` handler** (inject mid-execution):
|
||||
|
||||
```javascript
|
||||
// Get current planner agent ID from session state
|
||||
const session = JSON.parse(Read(`${sessionDir}/team-session.json`))
|
||||
const plannerAgentId = session.planner_agent_id // saved during Phase 1
|
||||
|
||||
send_input({
|
||||
id: plannerAgentId,
|
||||
message: `
|
||||
## NEW ISSUES INJECTED
|
||||
${newInput}
|
||||
|
||||
Process these after current issue (or immediately if idle).
|
||||
Follow beat protocol: ISSUE_READY → wait for Continue → next issue.
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Scenario | Resolution |
|
||||
|----------|------------|
|
||||
| Planner timeout (>10 min per issue) | `send_input` urge convergence, re-enter loop |
|
||||
| Planner never outputs ISSUE_READY | After 3 retries, `close_agent` + report stall |
|
||||
| Solution file not written | Executor reports error, logs to `${sessionDir}/errors.json` |
|
||||
| Executor (Codex CLI) failure | Executor handles resume; logs CLI resume command |
|
||||
| ALL_PLANNED never received | After 60 min total, close planner, wait remaining executors |
|
||||
| No issues to process | AskUserQuestion for clarification |
|
||||
1141
.codex/skills/wave-plan-pipeline/SKILL.md
Normal file
1141
.codex/skills/wave-plan-pipeline/SKILL.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -373,7 +373,7 @@ try {
|
||||
## Related Skills
|
||||
|
||||
**Prerequisite Skills**:
|
||||
- `workflow:plan` or `workflow:execute` - Complete implementation (Session Mode)
|
||||
- `workflow-plan` or `workflow-execute` - Complete implementation (Session Mode)
|
||||
- None for Prompt Mode
|
||||
|
||||
**Phase 1 Agents** (used by phases/01-test-fix-gen.md via spawn_agent):
|
||||
|
||||
105
.github/workflows/docs.yml
vendored
Normal file
105
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
name: Docs Build & Deploy
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- develop
|
||||
paths:
|
||||
- 'docs/**'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- develop
|
||||
paths:
|
||||
- 'docs/**'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: docs
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: docs/package-lock.json
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Build
|
||||
run: npm run docs:build
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: docs/.vitepress/dist
|
||||
|
||||
deploy:
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && github.ref == 'refs/heads/main'
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
|
||||
# Lighthouse CI for PRs
|
||||
lighthouse:
|
||||
if: github.event_name == 'pull_request'
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
defaults:
|
||||
run:
|
||||
working-directory: docs
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: docs/package-lock.json
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Build
|
||||
run: npm run docs:build
|
||||
|
||||
- name: Start preview server
|
||||
run: |
|
||||
npm run docs:preview -- --host 127.0.0.1 --port 4173 &
|
||||
npx --yes wait-on http://127.0.0.1:4173/
|
||||
|
||||
- name: Run Lighthouse CI
|
||||
uses: treosh/lighthouse-ci-action@v10
|
||||
with:
|
||||
urls: |
|
||||
http://127.0.0.1:4173/
|
||||
uploadArtifacts: true
|
||||
temporaryPublicStorage: true
|
||||
commentPR: true
|
||||
budgetPath: docs/lighthouse-budget.json
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -138,3 +138,8 @@ ccw/.tmp-ccw-auth-home/
|
||||
|
||||
# Skills library (local only)
|
||||
.claude/skills_lib/
|
||||
|
||||
# Docs site
|
||||
docs/node_modules/
|
||||
docs/.vitepress/dist/
|
||||
docs/.vitepress/cache/
|
||||
|
||||
26
README.md
26
README.md
@@ -93,7 +93,7 @@ CCW uses two types of invocations:
|
||||
|
||||
| Type | Format | Examples |
|
||||
|------|--------|----------|
|
||||
| **Skills** | Trigger phrase (no slash) | `workflow:lite-plan`, `brainstorm`, `workflow:plan` |
|
||||
| **Skills** | Trigger phrase (no slash) | `workflow-lite-plan`, `brainstorm`, `workflow-plan` |
|
||||
| **Commands** | Slash command | `/ccw`, `/workflow/session:start`, `/issue/new` |
|
||||
|
||||
### Choose Your Workflow Skill
|
||||
@@ -101,11 +101,11 @@ CCW uses two types of invocations:
|
||||
<div align="center">
|
||||
<table>
|
||||
<tr><th>Skill Trigger</th><th>Use Case</th></tr>
|
||||
<tr><td><code>workflow:lite-plan</code></td><td>Lightweight planning, single-module features</td></tr>
|
||||
<tr><td><code>workflow:multi-cli-plan</code></td><td>Multi-CLI collaborative analysis</td></tr>
|
||||
<tr><td><code>workflow:plan</code></td><td>Full planning with session persistence</td></tr>
|
||||
<tr><td><code>workflow:tdd-plan</code></td><td>Test-driven development</td></tr>
|
||||
<tr><td><code>workflow:test-fix-gen</code></td><td>Test generation and fix cycles</td></tr>
|
||||
<tr><td><code>workflow-lite-plan</code></td><td>Lightweight planning, single-module features</td></tr>
|
||||
<tr><td><code>workflow-multi-cli-plan</code></td><td>Multi-CLI collaborative analysis</td></tr>
|
||||
<tr><td><code>workflow-plan</code></td><td>Full planning with session persistence</td></tr>
|
||||
<tr><td><code>workflow-tdd-plan</code></td><td>Test-driven development</td></tr>
|
||||
<tr><td><code>workflow-test-fix</code></td><td>Test generation and fix cycles</td></tr>
|
||||
<tr><td><code>brainstorm</code></td><td>Multi-role brainstorming analysis</td></tr>
|
||||
</table>
|
||||
</div>
|
||||
@@ -114,9 +114,9 @@ CCW uses two types of invocations:
|
||||
|
||||
```bash
|
||||
# Skill triggers (no slash - just describe what you want)
|
||||
workflow:lite-plan "Add JWT authentication"
|
||||
workflow:plan "Implement payment gateway integration"
|
||||
workflow:execute
|
||||
workflow-lite-plan "Add JWT authentication"
|
||||
workflow-plan "Implement payment gateway integration"
|
||||
workflow-execute
|
||||
|
||||
# Brainstorming
|
||||
brainstorm "Design real-time collaboration system"
|
||||
@@ -278,9 +278,9 @@ ccw upgrade -a # Upgrade all installations
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ Workflow Skills │
|
||||
│ 📝 workflow:lite-plan / workflow:multi-cli-plan (lightweight) │
|
||||
│ 📊 workflow:plan / workflow:tdd-plan (session-based) │
|
||||
│ 🧪 workflow:test-fix-gen / workflow:test-cycle-execute │
|
||||
│ 📝 workflow-lite-plan / workflow-multi-cli-plan (lightweight) │
|
||||
│ 📊 workflow-plan / workflow-tdd-plan (session-based) │
|
||||
│ 🧪 workflow-test-fix / workflow-test-fix │
|
||||
│ 🧠 brainstorm (multi-role analysis) │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
↓
|
||||
@@ -324,7 +324,7 @@ Claude-Code-Workflow/
|
||||
│ └── skills/ # 37 modular skills
|
||||
│ ├── workflow-lite-plan/
|
||||
│ ├── workflow-plan/
|
||||
│ ├── workflow-tdd/
|
||||
│ ├── workflow-tdd-plan/
|
||||
│ ├── workflow-test-fix/
|
||||
│ ├── brainstorm/
|
||||
│ ├── team-*/ # Team coordination skills
|
||||
|
||||
26
README_CN.md
26
README_CN.md
@@ -93,7 +93,7 @@ CCW 使用两种调用方式:
|
||||
|
||||
| 类型 | 格式 | 示例 |
|
||||
|------|------|------|
|
||||
| **Skills** | 触发短语(无斜杠) | `workflow:lite-plan`, `brainstorm`, `workflow:plan` |
|
||||
| **Skills** | 触发短语(无斜杠) | `workflow-lite-plan`, `brainstorm`, `workflow-plan` |
|
||||
| **Commands** | 斜杠命令 | `/ccw`, `/workflow/session:start`, `/issue/new` |
|
||||
|
||||
### 选择工作流 Skill
|
||||
@@ -101,11 +101,11 @@ CCW 使用两种调用方式:
|
||||
<div align="center">
|
||||
<table>
|
||||
<tr><th>Skill 触发词</th><th>使用场景</th></tr>
|
||||
<tr><td><code>workflow:lite-plan</code></td><td>轻量规划、单模块功能</td></tr>
|
||||
<tr><td><code>workflow:multi-cli-plan</code></td><td>多 CLI 协同分析</td></tr>
|
||||
<tr><td><code>workflow:plan</code></td><td>完整规划与会话持久化</td></tr>
|
||||
<tr><td><code>workflow:tdd-plan</code></td><td>测试驱动开发</td></tr>
|
||||
<tr><td><code>workflow:test-fix-gen</code></td><td>测试生成与修复循环</td></tr>
|
||||
<tr><td><code>workflow-lite-plan</code></td><td>轻量规划、单模块功能</td></tr>
|
||||
<tr><td><code>workflow-multi-cli-plan</code></td><td>多 CLI 协同分析</td></tr>
|
||||
<tr><td><code>workflow-plan</code></td><td>完整规划与会话持久化</td></tr>
|
||||
<tr><td><code>workflow-tdd-plan</code></td><td>测试驱动开发</td></tr>
|
||||
<tr><td><code>workflow-test-fix</code></td><td>测试生成与修复循环</td></tr>
|
||||
<tr><td><code>brainstorm</code></td><td>多角色头脑风暴分析</td></tr>
|
||||
</table>
|
||||
</div>
|
||||
@@ -114,9 +114,9 @@ CCW 使用两种调用方式:
|
||||
|
||||
```bash
|
||||
# Skill 触发(无斜杠 - 直接描述你想做什么)
|
||||
workflow:lite-plan "添加 JWT 认证"
|
||||
workflow:plan "实现支付网关集成"
|
||||
workflow:execute
|
||||
workflow-lite-plan "添加 JWT 认证"
|
||||
workflow-plan "实现支付网关集成"
|
||||
workflow-execute
|
||||
|
||||
# 头脑风暴
|
||||
brainstorm "设计实时协作系统"
|
||||
@@ -278,9 +278,9 @@ ccw upgrade -a # 升级所有安装
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ 工作流 Skills │
|
||||
│ 📝 workflow:lite-plan / workflow:multi-cli-plan (轻量级) │
|
||||
│ 📊 workflow:plan / workflow:tdd-plan (会话式) │
|
||||
│ 🧪 workflow:test-fix-gen / workflow:test-cycle-execute │
|
||||
│ 📝 workflow-lite-plan / workflow-multi-cli-plan (轻量级) │
|
||||
│ 📊 workflow-plan / workflow-tdd-plan (会话式) │
|
||||
│ 🧪 workflow-test-fix / workflow-test-fix │
|
||||
│ 🧠 brainstorm (多角色分析) │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
↓
|
||||
@@ -324,7 +324,7 @@ Claude-Code-Workflow/
|
||||
│ └── skills/ # 37 个模块化技能
|
||||
│ ├── workflow-lite-plan/
|
||||
│ ├── workflow-plan/
|
||||
│ ├── workflow-tdd/
|
||||
│ ├── workflow-tdd-plan/
|
||||
│ ├── workflow-test-fix/
|
||||
│ ├── brainstorm/
|
||||
│ ├── team-*/ # 团队协调技能
|
||||
|
||||
@@ -21,7 +21,7 @@ CCW uses two types of invocations:
|
||||
|
||||
| Type | Format | Examples |
|
||||
|------|--------|----------|
|
||||
| **Skills** | Trigger phrase (no slash) | `workflow:lite-plan`, `brainstorm`, `workflow:plan` |
|
||||
| **Skills** | Trigger phrase (no slash) | `workflow-lite-plan`, `brainstorm`, `workflow-plan` |
|
||||
| **Commands** | Slash command | `/ccw`, `/workflow/session:start`, `/issue/new` |
|
||||
|
||||
---
|
||||
@@ -32,7 +32,7 @@ CCW uses two types of invocations:
|
||||
|
||||
| Skill Trigger | Purpose | Phases |
|
||||
|---------------|---------|--------|
|
||||
| `workflow:lite-plan` | Lightweight planning with exploration | 5 phases |
|
||||
| `workflow-lite-plan` | Lightweight planning with exploration | 5 phases |
|
||||
| `workflow:lite-execute` | Execute lite-plan output | Execution |
|
||||
|
||||
**5-Phase Interactive Workflow**:
|
||||
@@ -48,7 +48,7 @@ Phase 5: Execution & Tracking
|
||||
|
||||
| Skill Trigger | Purpose |
|
||||
|---------------|---------|
|
||||
| `workflow:multi-cli-plan` | Multi-CLI collaborative analysis |
|
||||
| `workflow-multi-cli-plan` | Multi-CLI collaborative analysis |
|
||||
|
||||
**5-Phase Workflow**:
|
||||
```
|
||||
@@ -63,16 +63,16 @@ Phase 5: Plan Generation
|
||||
|
||||
| Skill Trigger | Purpose | Phases |
|
||||
|---------------|---------|--------|
|
||||
| `workflow:plan` | Full planning with session | 5 phases |
|
||||
| `workflow:plan-verify` | Plan verification | Verification |
|
||||
| `workflow-plan` | Full planning with session | 5 phases |
|
||||
| `workflow-plan-verify` | Plan verification | Verification |
|
||||
| `workflow:replan` | Interactive replanning | Replanning |
|
||||
|
||||
### TDD Workflow
|
||||
|
||||
| Skill Trigger | Purpose |
|
||||
|---------------|---------|
|
||||
| `workflow:tdd-plan` | TDD planning |
|
||||
| `workflow:tdd-verify` | TDD verification |
|
||||
| `workflow-tdd-plan` | TDD planning |
|
||||
| `workflow-tdd-verify` | TDD verification |
|
||||
|
||||
**6-Phase TDD Planning + Red-Green-Refactor**:
|
||||
```
|
||||
@@ -88,8 +88,8 @@ Phase 6: Next cycle
|
||||
|
||||
| Skill Trigger | Purpose |
|
||||
|---------------|---------|
|
||||
| `workflow:test-fix-gen` | Test generation and fix |
|
||||
| `workflow:test-cycle-execute` | Execute test cycles |
|
||||
| `workflow-test-fix` | Test generation and fix |
|
||||
| `workflow-test-fix` | Execute test cycles |
|
||||
|
||||
**Progressive Test Layers (L0-L3)**:
|
||||
|
||||
@@ -232,12 +232,12 @@ Phase 6: Next cycle
|
||||
|
||||
| Skill | Trigger |
|
||||
|-------|---------|
|
||||
| workflow-lite-plan | `workflow:lite-plan`, `workflow:lite-execute` |
|
||||
| workflow-multi-cli-plan | `workflow:multi-cli-plan` |
|
||||
| workflow-plan | `workflow:plan`, `workflow:plan-verify`, `workflow:replan` |
|
||||
| workflow-execute | `workflow:execute` |
|
||||
| workflow-tdd | `workflow:tdd-plan`, `workflow:tdd-verify` |
|
||||
| workflow-test-fix | `workflow:test-fix-gen`, `workflow:test-cycle-execute` |
|
||||
| workflow-lite-plan | `workflow-lite-plan`, `workflow:lite-execute` |
|
||||
| workflow-multi-cli-plan | `workflow-multi-cli-plan` |
|
||||
| workflow-plan | `workflow-plan`, `workflow-plan-verify`, `workflow:replan` |
|
||||
| workflow-execute | `workflow-execute` |
|
||||
| workflow-tdd-plan | `workflow-tdd-plan`, `workflow-tdd-verify` |
|
||||
| workflow-test-fix | `workflow-test-fix`, `workflow-test-fix` |
|
||||
|
||||
### Specialized Skills
|
||||
|
||||
@@ -293,25 +293,25 @@ New System ─┼────────────┼─────
|
||||
Start
|
||||
│
|
||||
├─ Is it a quick fix or config change?
|
||||
│ └─> Yes: workflow:lite-plan
|
||||
│ └─> Yes: workflow-lite-plan
|
||||
│
|
||||
├─ Is it a single module feature?
|
||||
│ └─> Yes: workflow:lite-plan
|
||||
│ └─> Yes: workflow-lite-plan
|
||||
│
|
||||
├─ Does it need multi-CLI analysis?
|
||||
│ └─> Yes: workflow:multi-cli-plan
|
||||
│ └─> Yes: workflow-multi-cli-plan
|
||||
│
|
||||
├─ Is it multi-module with session?
|
||||
│ └─> Yes: workflow:plan
|
||||
│ └─> Yes: workflow-plan
|
||||
│
|
||||
├─ Is it TDD development?
|
||||
│ └─> Yes: workflow:tdd-plan
|
||||
│ └─> Yes: workflow-tdd-plan
|
||||
│
|
||||
├─ Is it test generation?
|
||||
│ └─> Yes: workflow:test-fix-gen
|
||||
│ └─> Yes: workflow-test-fix
|
||||
│
|
||||
└─ Is it architecture/new system?
|
||||
└─> Yes: brainstorm + workflow:plan
|
||||
└─> Yes: brainstorm + workflow-plan
|
||||
```
|
||||
|
||||
---
|
||||
@@ -348,10 +348,10 @@ Start
|
||||
|
||||
| Skill | When to Use |
|
||||
|-------|-------------|
|
||||
| `workflow:lite-plan` | Quick fixes, single features |
|
||||
| `workflow:plan` | Multi-module development |
|
||||
| `workflow-lite-plan` | Quick fixes, single features |
|
||||
| `workflow-plan` | Multi-module development |
|
||||
| `brainstorm` | Architecture, new features |
|
||||
| `workflow:execute` | Execute planned work |
|
||||
| `workflow-execute` | Execute planned work |
|
||||
|
||||
### Most Common Commands
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ CCW 使用两种调用方式:
|
||||
|
||||
| 类型 | 格式 | 示例 |
|
||||
|------|------|------|
|
||||
| **Skills** | 触发短语(无斜杠) | `workflow:lite-plan`, `brainstorm`, `workflow:plan` |
|
||||
| **Skills** | 触发短语(无斜杠) | `workflow-lite-plan`, `brainstorm`, `workflow-plan` |
|
||||
| **Commands** | 斜杠命令 | `/ccw`, `/workflow/session:start`, `/issue/new` |
|
||||
|
||||
---
|
||||
@@ -33,7 +33,7 @@ CCW 使用两种调用方式:
|
||||
|
||||
| Skill 触发词 | 用途 | 阶段 |
|
||||
|--------------|------|------|
|
||||
| `workflow:lite-plan` | 轻量规划与探索 | 5 阶段 |
|
||||
| `workflow-lite-plan` | 轻量规划与探索 | 5 阶段 |
|
||||
| `workflow:lite-execute` | 执行 lite-plan 输出 | 执行 |
|
||||
|
||||
**5 阶段交互式工作流**:
|
||||
@@ -49,7 +49,7 @@ CCW 使用两种调用方式:
|
||||
|
||||
| Skill 触发词 | 用途 |
|
||||
|--------------|------|
|
||||
| `workflow:multi-cli-plan` | 多 CLI 协同分析 |
|
||||
| `workflow-multi-cli-plan` | 多 CLI 协同分析 |
|
||||
|
||||
**5 阶段工作流**:
|
||||
```
|
||||
@@ -64,16 +64,16 @@ CCW 使用两种调用方式:
|
||||
|
||||
| Skill 触发词 | 用途 | 阶段 |
|
||||
|--------------|------|------|
|
||||
| `workflow:plan` | 完整规划与会话 | 5 阶段 |
|
||||
| `workflow:plan-verify` | 规划验证 | 验证 |
|
||||
| `workflow-plan` | 完整规划与会话 | 5 阶段 |
|
||||
| `workflow-plan-verify` | 规划验证 | 验证 |
|
||||
| `workflow:replan` | 交互式重新规划 | 重规划 |
|
||||
|
||||
### TDD 工作流
|
||||
|
||||
| Skill 触发词 | 用途 |
|
||||
|--------------|------|
|
||||
| `workflow:tdd-plan` | TDD 规划 |
|
||||
| `workflow:tdd-verify` | TDD 验证 |
|
||||
| `workflow-tdd-plan` | TDD 规划 |
|
||||
| `workflow-tdd-verify` | TDD 验证 |
|
||||
|
||||
**6 阶段 TDD 规划 + Red-Green-Refactor**:
|
||||
```
|
||||
@@ -89,8 +89,8 @@ CCW 使用两种调用方式:
|
||||
|
||||
| Skill 触发词 | 用途 |
|
||||
|--------------|------|
|
||||
| `workflow:test-fix-gen` | 测试生成与修复 |
|
||||
| `workflow:test-cycle-execute` | 执行测试循环 |
|
||||
| `workflow-test-fix` | 测试生成与修复 |
|
||||
| `workflow-test-fix` | 执行测试循环 |
|
||||
|
||||
**渐进式测试层级 (L0-L3)**:
|
||||
|
||||
@@ -233,12 +233,12 @@ CCW 使用两种调用方式:
|
||||
|
||||
| Skill | 触发词 |
|
||||
|-------|--------|
|
||||
| workflow-lite-plan | `workflow:lite-plan`, `workflow:lite-execute` |
|
||||
| workflow-multi-cli-plan | `workflow:multi-cli-plan` |
|
||||
| workflow-plan | `workflow:plan`, `workflow:plan-verify`, `workflow:replan` |
|
||||
| workflow-execute | `workflow:execute` |
|
||||
| workflow-tdd | `workflow:tdd-plan`, `workflow:tdd-verify` |
|
||||
| workflow-test-fix | `workflow:test-fix-gen`, `workflow:test-cycle-execute` |
|
||||
| workflow-lite-plan | `workflow-lite-plan`, `workflow:lite-execute` |
|
||||
| workflow-multi-cli-plan | `workflow-multi-cli-plan` |
|
||||
| workflow-plan | `workflow-plan`, `workflow-plan-verify`, `workflow:replan` |
|
||||
| workflow-execute | `workflow-execute` |
|
||||
| workflow-tdd-plan | `workflow-tdd-plan`, `workflow-tdd-verify` |
|
||||
| workflow-test-fix | `workflow-test-fix`, `workflow-test-fix` |
|
||||
|
||||
### 专项 Skills
|
||||
|
||||
@@ -294,25 +294,25 @@ CCW 使用两种调用方式:
|
||||
开始
|
||||
│
|
||||
├─ 是快速修复或配置变更?
|
||||
│ └─> 是:workflow:lite-plan
|
||||
│ └─> 是:workflow-lite-plan
|
||||
│
|
||||
├─ 是单模块功能?
|
||||
│ └─> 是:workflow:lite-plan
|
||||
│ └─> 是:workflow-lite-plan
|
||||
│
|
||||
├─ 需要多 CLI 分析?
|
||||
│ └─> 是:workflow:multi-cli-plan
|
||||
│ └─> 是:workflow-multi-cli-plan
|
||||
│
|
||||
├─ 是多模块且需要会话?
|
||||
│ └─> 是:workflow:plan
|
||||
│ └─> 是:workflow-plan
|
||||
│
|
||||
├─ 是 TDD 开发?
|
||||
│ └─> 是:workflow:tdd-plan
|
||||
│ └─> 是:workflow-tdd-plan
|
||||
│
|
||||
├─ 是测试生成?
|
||||
│ └─> 是:workflow:test-fix-gen
|
||||
│ └─> 是:workflow-test-fix
|
||||
│
|
||||
└─ 是架构/新系统?
|
||||
└─> 是:brainstorm + workflow:plan
|
||||
└─> 是:brainstorm + workflow-plan
|
||||
```
|
||||
|
||||
---
|
||||
@@ -349,10 +349,10 @@ CCW 使用两种调用方式:
|
||||
|
||||
| Skill | 何时使用 |
|
||||
|-------|----------|
|
||||
| `workflow:lite-plan` | 快速修复、单功能 |
|
||||
| `workflow:plan` | 多模块开发 |
|
||||
| `workflow-lite-plan` | 快速修复、单功能 |
|
||||
| `workflow-plan` | 多模块开发 |
|
||||
| `brainstorm` | 架构、新功能 |
|
||||
| `workflow:execute` | 执行已规划的工作 |
|
||||
| `workflow-execute` | 执行已规划的工作 |
|
||||
|
||||
### 最常用 Commands
|
||||
|
||||
|
||||
@@ -1,18 +1,21 @@
|
||||
# Codex MCP 功能实现总结
|
||||
|
||||
> **注意**: 此文档描述的是旧的 vanilla JS 前端架构。当前版本 (v7.0+) 使用 React SPA 前端。
|
||||
> 请参考 `ccw/frontend/src/` 目录中的 React 组件。
|
||||
|
||||
## 📝 已完成的修复
|
||||
|
||||
### 1. CCW Tools MCP 卡片样式修复
|
||||
|
||||
**文件**: `ccw/src/templates/dashboard-js/views/mcp-manager.js`
|
||||
**文件**: `ccw/frontend/src/components/McpManager.tsx` (React)
|
||||
|
||||
**修改内容**:
|
||||
- ✅ 卡片边框: `border-primary` → `border-orange-500` (第345行)
|
||||
- ✅ 图标背景: `bg-primary` → `bg-orange-500` (第348行)
|
||||
- ✅ 图标颜色: `text-primary-foreground` → `text-white` (第349行)
|
||||
- ✅ "Available"徽章: `bg-primary/20 text-primary` → `bg-orange-500/20 text-orange-600` (第360行)
|
||||
- ✅ 选择按钮颜色: `text-primary` → `text-orange-500` (第378-379行)
|
||||
- ✅ 安装按钮: `bg-primary` → `bg-orange-500` (第386行、第399行)
|
||||
- ✅ 卡片边框: `border-primary` → `border-orange-500`
|
||||
- ✅ 图标背景: `bg-primary` → `bg-orange-500`
|
||||
- ✅ 图标颜色: `text-primary-foreground` → `text-white`
|
||||
- ✅ "Available"徽章: `bg-primary/20 text-primary` → `bg-orange-500/20 text-orange-600`
|
||||
- ✅ 选择按钮颜色: `text-primary` → `text-orange-500`
|
||||
- ✅ 安装按钮: `bg-primary` → `bg-orange-500`
|
||||
|
||||
**影响范围**: Claude 模式下的 CCW Tools MCP 卡片
|
||||
|
||||
@@ -20,10 +23,10 @@
|
||||
|
||||
### 2. Toast 消息显示时间增强
|
||||
|
||||
**文件**: `ccw/src/templates/dashboard-js/components/navigation.js`
|
||||
**文件**: `ccw/frontend/src/hooks/useToast.ts` (React)
|
||||
|
||||
**修改内容**:
|
||||
- ✅ 显示时间: 2000ms → 3500ms (第300行)
|
||||
- ✅ 显示时间: 2000ms → 3500ms
|
||||
|
||||
**影响范围**: 所有 Toast 消息(MCP 安装、删除、切换等操作反馈)
|
||||
|
||||
@@ -55,38 +58,33 @@ API 请求: POST /api/codex-mcp-add
|
||||
↓
|
||||
前端更新:
|
||||
1. loadMcpConfig() - 重新加载配置
|
||||
2. renderMcpManager() - 重新渲染 UI
|
||||
3. showRefreshToast(...) - 显示成功/失败消息 (3.5秒)
|
||||
2. 状态更新触发 UI 重新渲染
|
||||
3. Toast 显示成功/失败消息 (3.5秒)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📍 关键代码位置
|
||||
|
||||
### 前端
|
||||
### 前端 (React SPA)
|
||||
|
||||
| 功能 | 文件 | 行号 | 说明 |
|
||||
|------|------|------|------|
|
||||
| 复制到 Codex | `components/mcp-manager.js` | 175-177 | `copyClaudeServerToCodex()` 函数 |
|
||||
| 添加到 Codex | `components/mcp-manager.js` | 87-114 | `addCodexMcpServer()` 函数 |
|
||||
| Toast 消息 | `components/navigation.js` | 286-301 | `showRefreshToast()` 函数 |
|
||||
| CCW Tools 样式 | `views/mcp-manager.js` | 342-415 | Claude 模式卡片渲染 |
|
||||
| 其他项目按钮 | `views/mcp-manager.js` | 1015-1020 | "Install to Codex" 按钮 |
|
||||
| 功能 | 文件 | 说明 |
|
||||
|------|------|------|
|
||||
| MCP 管理 | `ccw/frontend/src/components/McpManager.tsx` | MCP 管理组件 |
|
||||
| Toast 消息 | `ccw/frontend/src/hooks/useToast.ts` | Toast hook |
|
||||
| 复制到 Codex | `ccw/frontend/src/api/mcp.ts` | MCP API 调用 |
|
||||
|
||||
### 后端
|
||||
|
||||
| 功能 | 文件 | 行号 | 说明 |
|
||||
|------|------|------|------|
|
||||
| API 端点 | `core/routes/mcp-routes.ts` | 1001-1010 | `/api/codex-mcp-add` 路由 |
|
||||
| 添加服务器 | `core/routes/mcp-routes.ts` | 251-330 | `addCodexMcpServer()` 函数 |
|
||||
| TOML 序列化 | `core/routes/mcp-routes.ts` | 166-188 | `serializeToml()` 函数 |
|
||||
| 功能 | 文件 | 说明 |
|
||||
|------|------|------|
|
||||
| API 端点 | `ccw/src/core/routes/mcp-routes.ts` | `/api/codex-mcp-add` 路由 |
|
||||
| 添加服务器 | `ccw/src/core/routes/mcp-routes.ts` | `addCodexMcpServer()` 函数 |
|
||||
| TOML 序列化 | `ccw/src/core/routes/mcp-routes.ts` | `serializeToml()` 函数 |
|
||||
|
||||
### CSS
|
||||
### CSS (Tailwind)
|
||||
|
||||
| 功能 | 文件 | 行号 | 说明 |
|
||||
|------|------|------|------|
|
||||
| Toast 样式 | `dashboard-css/06-cards.css` | 1501-1538 | Toast 容器和类型样式 |
|
||||
| Toast 动画 | `dashboard-css/06-cards.css` | 1540-1551 | 滑入/淡出动画 |
|
||||
Toast 样式使用 Tailwind CSS 内联样式,定义在 React 组件中。
|
||||
|
||||
---
|
||||
|
||||
@@ -220,33 +218,31 @@ API 请求: POST /api/codex-mcp-add
|
||||
|
||||
## 📦 相关文件清单
|
||||
|
||||
### 已修改文件
|
||||
### 前端文件 (React SPA)
|
||||
|
||||
1. `ccw/src/templates/dashboard-js/views/mcp-manager.js`
|
||||
- 修改: CCW Tools 卡片样式(第342-415行)
|
||||
|
||||
2. `ccw/src/templates/dashboard-js/components/navigation.js`
|
||||
- 修改: Toast 显示时间(第300行)
|
||||
1. `ccw/frontend/src/components/McpManager.tsx`
|
||||
- MCP 管理组件(包含 CCW Tools 卡片样式)
|
||||
|
||||
### 核心功能文件(未修改但相关)
|
||||
2. `ccw/frontend/src/hooks/useToast.ts`
|
||||
- Toast 消息 hook(显示时间 3.5秒)
|
||||
|
||||
3. `ccw/src/templates/dashboard-js/components/mcp-manager.js`
|
||||
- 包含: `addCodexMcpServer()`, `copyClaudeServerToCodex()` 函数
|
||||
3. `ccw/frontend/src/api/mcp.ts`
|
||||
- MCP API 调用函数
|
||||
|
||||
### 后端文件
|
||||
|
||||
4. `ccw/src/core/routes/mcp-routes.ts`
|
||||
- 包含: Codex MCP API 端点和后端逻辑
|
||||
- Codex MCP API 端点和后端逻辑
|
||||
|
||||
5. `ccw/src/templates/dashboard-css/06-cards.css`
|
||||
- 包含: Toast 样式定义
|
||||
### 文档
|
||||
|
||||
### 新增文档
|
||||
|
||||
6. `ccw/docs/CODEX_MCP_TESTING_GUIDE.md`
|
||||
5. `ccw/docs/CODEX_MCP_TESTING_GUIDE.md`
|
||||
- 详细测试指南
|
||||
|
||||
7. `ccw/docs/QUICK_TEST_CODEX_MCP.md`
|
||||
6. `ccw/docs/QUICK_TEST_CODEX_MCP.md`
|
||||
- 快速测试步骤
|
||||
|
||||
|
||||
8. `ccw/docs/CODEX_MCP_IMPLEMENTATION_SUMMARY.md`
|
||||
- 本文档
|
||||
|
||||
|
||||
@@ -277,7 +277,7 @@ _____
|
||||
### Toast 消息机制
|
||||
|
||||
**实现位置**:
|
||||
- `ccw/src/templates/dashboard-js/components/navigation.js:286-301`
|
||||
- `ccw/frontend/src/hooks/useToast.ts` (React)
|
||||
- 显示时间:3500ms (3.5秒)
|
||||
- 淡出动画:300ms
|
||||
|
||||
|
||||
@@ -218,8 +218,8 @@ To verify the fix works:
|
||||
## Related Files
|
||||
|
||||
- **Implementation**: `ccw/src/core/routes/graph-routes.ts`
|
||||
- **Frontend**: `ccw/src/templates/dashboard-js/views/graph-explorer.js`
|
||||
- **Styles**: `ccw/src/templates/dashboard-css/14-graph-explorer.css`
|
||||
- **Frontend**: `ccw/frontend/src/components/GraphExplorer.tsx` (React SPA)
|
||||
- **Styles**: Embedded in React components
|
||||
- **API Docs**: `ccw/src/core/routes/graph-routes.md`
|
||||
- **Migration**: `codex-lens/src/codexlens/storage/migrations/migration_005_cleanup_unused_fields.py`
|
||||
|
||||
|
||||
@@ -238,23 +238,22 @@ ccw view
|
||||
rg "handleGraphRoutes" src/
|
||||
```
|
||||
|
||||
2. **检查前端是否包含 graph-explorer 视图**:
|
||||
2. **检查前端是否包含 Graph Explorer 组件**:
|
||||
```bash
|
||||
ls src/templates/dashboard-js/views/graph-explorer.js
|
||||
ls ccw/frontend/src/components/GraphExplorer.tsx
|
||||
```
|
||||
|
||||
3. **检查 dashboard-generator.ts 是否包含 graph explorer**:
|
||||
3. **检查 React 前端是否正确构建**:
|
||||
```bash
|
||||
rg "graph-explorer" src/core/dashboard-generator.ts
|
||||
ls ccw/frontend/dist/index.html
|
||||
```
|
||||
|
||||
### 解决方案
|
||||
|
||||
确保以下文件存在且正确:
|
||||
- `src/core/routes/graph-routes.ts` - API 路由处理
|
||||
- `src/templates/dashboard-js/views/graph-explorer.js` - 前端视图
|
||||
- `src/templates/dashboard-css/14-graph-explorer.css` - 样式
|
||||
- `src/templates/dashboard.html` - 包含 Graph 导航项(line 334)
|
||||
- `ccw/src/core/routes/graph-routes.ts` - API 路由处理
|
||||
- `ccw/frontend/src/components/GraphExplorer.tsx` - React 前端组件
|
||||
- `ccw/frontend/dist/index.html` - 构建后的前端入口
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
<html lang="en" data-theme="light">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<link rel="icon" type="image/svg+xml" href="/vite.svg" />
|
||||
<link rel="icon" type="image/svg+xml" href="/favicon.svg" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<!-- Preconnect to Google Fonts for faster font loading -->
|
||||
<link rel="preconnect" href="https://fonts.googleapis.com" />
|
||||
|
||||
8
ccw/frontend/public/favicon.svg
Normal file
8
ccw/frontend/public/favicon.svg
Normal file
@@ -0,0 +1,8 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none">
|
||||
<!-- Three horizontal lines - line style -->
|
||||
<line x1="3" y1="6" x2="18" y2="6" stroke="currentColor" stroke-width="2" stroke-linecap="round"/>
|
||||
<line x1="3" y1="12" x2="15" y2="12" stroke="currentColor" stroke-width="2" stroke-linecap="round"/>
|
||||
<line x1="3" y1="18" x2="12" y2="18" stroke="currentColor" stroke-width="2" stroke-linecap="round"/>
|
||||
<!-- Status dot - follows theme color -->
|
||||
<circle cx="19" cy="17" r="3" fill="currentColor"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 532 B |
@@ -53,13 +53,18 @@ function QueryInvalidator() {
|
||||
const registerQueryInvalidator = useWorkflowStore((state) => state.registerQueryInvalidator);
|
||||
|
||||
useEffect(() => {
|
||||
// Register callback to invalidate all 'workspace' prefixed queries
|
||||
// Register callback to invalidate all workspace-related queries on workspace switch
|
||||
const callback = () => {
|
||||
queryClient.invalidateQueries({
|
||||
predicate: (query) => {
|
||||
const queryKey = query.queryKey;
|
||||
// Check if the first element of the query key is 'workspace'
|
||||
return Array.isArray(queryKey) && queryKey[0] === 'workspace';
|
||||
if (!Array.isArray(queryKey)) return false;
|
||||
const prefix = queryKey[0];
|
||||
// Invalidate all query families that depend on workspace data
|
||||
return prefix === 'workspace'
|
||||
|| prefix === 'projectOverview'
|
||||
|| prefix === 'workflowStatusCounts'
|
||||
|| prefix === 'dashboardStats';
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -69,6 +69,7 @@ const statusIcons: Record<string, React.ElementType> = {
|
||||
cancelled: XCircle,
|
||||
idle: Clock,
|
||||
initializing: Loader2,
|
||||
ready: CheckCircle2,
|
||||
};
|
||||
|
||||
// Status color mapping
|
||||
@@ -83,6 +84,7 @@ const statusColors: Record<string, string> = {
|
||||
cancelled: 'bg-destructive/20 text-destructive border-destructive/30',
|
||||
idle: 'bg-muted text-muted-foreground border-border',
|
||||
initializing: 'bg-info/20 text-info border-info/30',
|
||||
ready: 'bg-success/20 text-success border-success/30',
|
||||
};
|
||||
|
||||
// Status to i18n key mapping
|
||||
@@ -97,6 +99,7 @@ const statusI18nKeys: Record<string, string> = {
|
||||
cancelled: 'cancelled',
|
||||
idle: 'idle',
|
||||
initializing: 'initializing',
|
||||
ready: 'ready',
|
||||
};
|
||||
|
||||
// Lite task sub-type icons
|
||||
|
||||
@@ -102,7 +102,7 @@ function HomeEmptyState({ className }: HomeEmptyStateProps) {
|
||||
</div>
|
||||
<div className="flex flex-col gap-2 w-full">
|
||||
<code className="px-3 py-2 bg-muted rounded text-xs font-mono text-center">
|
||||
/workflow:plan
|
||||
/workflow-plan
|
||||
</code>
|
||||
<p className="text-xs text-muted-foreground text-center">
|
||||
{formatMessage({ id: 'home.emptyState.noSessions.hint' })}
|
||||
@@ -119,6 +119,10 @@ const sessionStatusColors: Record<string, { bg: string; text: string }> = {
|
||||
in_progress: { bg: 'bg-warning/20', text: 'text-warning' },
|
||||
completed: { bg: 'bg-success/20', text: 'text-success' },
|
||||
paused: { bg: 'bg-slate-400/20', text: 'text-slate-500' },
|
||||
ready: { bg: 'bg-success/20', text: 'text-success' },
|
||||
initialized: { bg: 'bg-info/20', text: 'text-info' },
|
||||
archived: { bg: 'bg-slate-300/20', text: 'text-slate-400' },
|
||||
failed: { bg: 'bg-destructive/20', text: 'text-destructive' },
|
||||
};
|
||||
|
||||
// ---- Mini Stat Card with Sparkline ----
|
||||
|
||||
119
ccw/frontend/src/components/icons/CCWLogo.tsx
Normal file
119
ccw/frontend/src/components/icons/CCWLogo.tsx
Normal file
@@ -0,0 +1,119 @@
|
||||
// ========================================
|
||||
// CCW Logo Component
|
||||
// ========================================
|
||||
// Line-style logo for Claude Code Workflow
|
||||
|
||||
import { useEffect, useState } from 'react';
|
||||
import { cn } from '@/lib/utils';
|
||||
|
||||
interface CCWLogoProps {
|
||||
/** Size of the icon */
|
||||
size?: number;
|
||||
/** Additional class names */
|
||||
className?: string;
|
||||
/** Whether to show the status dot */
|
||||
showDot?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Hook to get reactive theme accent color
|
||||
*/
|
||||
function useThemeAccentColor(): string {
|
||||
const [accentColor, setAccentColor] = useState<string>(() => {
|
||||
if (typeof document === 'undefined') return 'hsl(220, 60%, 65%)';
|
||||
const root = document.documentElement;
|
||||
const accentValue = getComputedStyle(root).getPropertyValue('--accent').trim();
|
||||
return accentValue ? `hsl(${accentValue})` : 'hsl(220, 60%, 65%)';
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
const updateAccentColor = () => {
|
||||
const root = document.documentElement;
|
||||
const accentValue = getComputedStyle(root).getPropertyValue('--accent').trim();
|
||||
setAccentColor(accentValue ? `hsl(${accentValue})` : 'hsl(220, 60%, 65%)');
|
||||
};
|
||||
|
||||
// Initial update
|
||||
updateAccentColor();
|
||||
|
||||
// Watch for theme changes via MutationObserver
|
||||
const observer = new MutationObserver((mutations) => {
|
||||
mutations.forEach((mutation) => {
|
||||
if (mutation.attributeName === 'data-theme') {
|
||||
updateAccentColor();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
observer.observe(document.documentElement, {
|
||||
attributes: true,
|
||||
attributeFilter: ['data-theme'],
|
||||
});
|
||||
|
||||
return () => observer.disconnect();
|
||||
}, []);
|
||||
|
||||
return accentColor;
|
||||
}
|
||||
|
||||
/**
|
||||
* Line-style CCW logo component
|
||||
* Features three horizontal lines with a status dot that follows theme color
|
||||
*/
|
||||
export function CCWLogo({ size = 24, className, showDot = true }: CCWLogoProps) {
|
||||
const accentColor = useThemeAccentColor();
|
||||
|
||||
return (
|
||||
<svg
|
||||
width={size}
|
||||
height={size}
|
||||
viewBox="0 0 24 24"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
className={cn('ccw-logo', className)}
|
||||
style={{ color: accentColor }}
|
||||
aria-label="Claude Code Workflow"
|
||||
>
|
||||
{/* Three horizontal lines - line style */}
|
||||
<line
|
||||
x1="3"
|
||||
y1="6"
|
||||
x2="18"
|
||||
y2="6"
|
||||
stroke="currentColor"
|
||||
strokeWidth="2"
|
||||
strokeLinecap="round"
|
||||
/>
|
||||
<line
|
||||
x1="3"
|
||||
y1="12"
|
||||
x2="15"
|
||||
y2="12"
|
||||
stroke="currentColor"
|
||||
strokeWidth="2"
|
||||
strokeLinecap="round"
|
||||
/>
|
||||
<line
|
||||
x1="3"
|
||||
y1="18"
|
||||
x2="12"
|
||||
y2="18"
|
||||
stroke="currentColor"
|
||||
strokeWidth="2"
|
||||
strokeLinecap="round"
|
||||
/>
|
||||
|
||||
{/* Status dot - follows theme color via currentColor */}
|
||||
{showDot && (
|
||||
<circle
|
||||
cx="19"
|
||||
cy="17"
|
||||
r="3"
|
||||
fill="currentColor"
|
||||
/>
|
||||
)}
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
export default CCWLogo;
|
||||
6
ccw/frontend/src/components/icons/index.ts
Normal file
6
ccw/frontend/src/components/icons/index.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
// ========================================
|
||||
// Icons Index
|
||||
// ========================================
|
||||
// Custom icon components for CCW Dashboard
|
||||
|
||||
export { CCWLogo } from './CCWLogo';
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user