From 4ce4419ea61712c81f56bcaac4f5deb4ed02a293 Mon Sep 17 00:00:00 2001 From: catlog22 Date: Sat, 7 Feb 2026 15:28:31 +0800 Subject: [PATCH] Remove deprecated issue management skills: issue-discover, issue-new, issue-plan, and issue-queue. These skills have been deleted to streamline the codebase and improve maintainability. --- .../SKILL.md | 352 -------- .../phases/01-interactive-framework.md | 460 ---------- .../phases/02-parallel-role-analysis.md | 704 --------------- .../phases/03-synthesis-integration.md | 405 --------- .claude/skills/workflow-execute/SKILL.md | 639 -------------- .claude/skills/workflow-lite-plan/SKILL.md | 217 ----- .../workflow-lite-plan/phases/01-lite-plan.md | 691 --------------- .../phases/02-multi-cli-plan.md | 570 ------------ .../workflow-lite-plan/phases/03-lite-fix.md | 799 ----------------- .../phases/04-lite-execute.md | 738 ---------------- .claude/skills/workflow-plan/SKILL.md | 367 -------- .../phases/01-session-discovery.md | 281 ------ .../phases/02-context-gathering.md | 427 --------- .../phases/03-conflict-resolution.md | 645 -------------- .../phases/04-task-generation.md | 701 --------------- .../skills/issue-discover-by-prompt/SKILL.md | 365 -------- .codex/skills/issue-discover/SKILL.md | 521 ++++++----- .../issue-discover/phases/01-issue-new.md | 348 ++++++++ .codex/skills/issue-new/SKILL.md | 391 --------- .codex/skills/issue-plan/SKILL.md | 247 ------ .codex/skills/issue-queue/SKILL.md | 299 ------- .codex/skills/issue-resolve/SKILL.md | 343 ++++++++ .../issue-resolve/phases/01-issue-plan.md | 318 +++++++ .codex/skills/workflow-tdd-plan/SKILL.md | 811 ++++++++++++++++++ .../phases/01-test-context-gather.md | 240 ++++++ .../.docusaurus/codeTranslations.json | 85 +- .../default/__mdx-loader-dependency.json | 2 +- ...te-docs-commands-cli-cli-init-mdx-056.json | 6 +- ...ocs-commands-cli-codex-review-mdx-f1b.json | 6 +- ...mands-general-ccw-coordinator-mdx-d55.json | 6 +- ...cs-commands-general-ccw-debug-mdx-97c.json | 6 +- ...ite-docs-commands-general-ccw-mdx-f48.json | 8 +- ...ocs-commands-general-ccw-plan-mdx-04d.json | 6 +- ...ocs-commands-general-ccw-test-mdx-cce.json | 6 +- ...nds-general-codex-coordinator-mdx-f92.json | 6 +- ...-commands-general-flow-create-mdx-fab.json | 6 +- ...ds-issue-issue-convert-to-plan-md-5c7.json | 6 +- ...-commands-issue-issue-discover-md-1e3.json | 6 +- ...s-commands-issue-issue-execute-md-fe8.json | 6 +- ...ds-issue-issue-from-brainstorm-md-2ec.json | 6 +- ...-docs-commands-issue-issue-new-md-4ad.json | 6 +- ...docs-commands-issue-issue-plan-md-a6c.json | 6 +- ...ocs-commands-issue-issue-queue-md-1ba.json | 6 +- ...ommands-memory-memory-compact-mdx-7a1.json | 6 +- ...s-memory-memory-docs-full-cli-mdx-4cc.json | 6 +- ...emory-memory-docs-related-cli-mdx-60e.json | 6 +- ...s-commands-memory-memory-load-mdx-157.json | 6 +- ...nds-memory-memory-update-full-mdx-666.json | 6 +- ...-memory-memory-update-related-mdx-611.json | 6 +- .../.docusaurus/docusaurus.config.mjs | 8 +- ccw/docs-site/.docusaurus/globalData.json | 72 +- ccw/docs-site/.docusaurus/i18n.json | 2 +- ccw/docs-site/.docusaurus/registry.js | 84 +- ccw/docs-site/.docusaurus/routes.js | 175 ++-- .../.docusaurus/routesChunkNames.json | 251 +++--- ccw/docs-site/.docusaurus/site-metadata.json | 4 +- 56 files changed, 2769 insertions(+), 9927 deletions(-) delete mode 100644 .claude/skills/workflow-brainstorm-auto-parallel/SKILL.md delete mode 100644 .claude/skills/workflow-brainstorm-auto-parallel/phases/01-interactive-framework.md delete mode 100644 .claude/skills/workflow-brainstorm-auto-parallel/phases/02-parallel-role-analysis.md delete mode 100644 .claude/skills/workflow-brainstorm-auto-parallel/phases/03-synthesis-integration.md delete mode 100644 .claude/skills/workflow-execute/SKILL.md delete mode 100644 .claude/skills/workflow-lite-plan/SKILL.md delete mode 100644 .claude/skills/workflow-lite-plan/phases/01-lite-plan.md delete mode 100644 .claude/skills/workflow-lite-plan/phases/02-multi-cli-plan.md delete mode 100644 .claude/skills/workflow-lite-plan/phases/03-lite-fix.md delete mode 100644 .claude/skills/workflow-lite-plan/phases/04-lite-execute.md delete mode 100644 .claude/skills/workflow-plan/SKILL.md delete mode 100644 .claude/skills/workflow-plan/phases/01-session-discovery.md delete mode 100644 .claude/skills/workflow-plan/phases/02-context-gathering.md delete mode 100644 .claude/skills/workflow-plan/phases/03-conflict-resolution.md delete mode 100644 .claude/skills/workflow-plan/phases/04-task-generation.md delete mode 100644 .codex/skills/issue-discover-by-prompt/SKILL.md create mode 100644 .codex/skills/issue-discover/phases/01-issue-new.md delete mode 100644 .codex/skills/issue-new/SKILL.md delete mode 100644 .codex/skills/issue-plan/SKILL.md delete mode 100644 .codex/skills/issue-queue/SKILL.md create mode 100644 .codex/skills/issue-resolve/SKILL.md create mode 100644 .codex/skills/issue-resolve/phases/01-issue-plan.md create mode 100644 .codex/skills/workflow-tdd-plan/SKILL.md create mode 100644 .codex/skills/workflow-tdd-plan/phases/01-test-context-gather.md diff --git a/.claude/skills/workflow-brainstorm-auto-parallel/SKILL.md b/.claude/skills/workflow-brainstorm-auto-parallel/SKILL.md deleted file mode 100644 index 5dacc5f1..00000000 --- a/.claude/skills/workflow-brainstorm-auto-parallel/SKILL.md +++ /dev/null @@ -1,352 +0,0 @@ ---- -name: workflow-brainstorm-auto-parallel -description: Parallel brainstorming automation with dynamic role selection and concurrent execution across multiple perspectives. Triggers on "workflow:brainstorm:auto-parallel". -allowed-tools: Task, AskUserQuestion, TodoWrite, Read, Write, Edit, Bash, Glob, Grep ---- - -# Workflow Brainstorm Auto-Parallel - -Parallel brainstorming automation orchestrating interactive framework generation, concurrent multi-role analysis, and synthesis integration to produce comprehensive guidance specifications. - -## Architecture Overview - -``` -┌─────────────────────────────────────────────────────────────────┐ -│ Auto-Parallel Orchestrator (SKILL.md) │ -│ → Pure coordinator: Execute phases, parse outputs, manage tasks│ -└───────────────┬─────────────────────────────────────────────────┘ - │ - ┌───────────┼───────────┬───────────┐ - ↓ ↓ ↓ ↓ -┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ -│ Phase 0 │ │ Phase 1 │ │ Phase 2 │ │ Phase 3 │ -│ Parse │ │Framework│ │Parallel │ │Synthesis│ -│ Params │ │Generate │ │ Roles │ │Integrate│ -└─────────┘ └─────────┘ └─────────┘ └─────────┘ - ↓ ↓ ↓ ↓ - count, guidance- N role synthesis- -style-skill specification analyses specification -``` - -## Key Design Principles - -1. **Pure Orchestrator**: Execute phases in sequence (Phase 1, 3 sequential; Phase 2 parallel) -2. **Auto-Continue**: All phases run autonomously without user intervention between phases -3. **Task Attachment Model**: Sub-tasks are attached/collapsed dynamically in TodoWrite -4. **Progressive Phase Loading**: Phase docs are read on-demand when phase executes -5. **Parallel Execution**: Phase 2 launches N role agents concurrently - -## Auto Mode - -When `--yes` or `-y`: Auto-select recommended roles, skip all clarification questions, use default answers. - -## Execution Flow - -``` -Parameter Parsing: - ├─ Extract --count N (default: 3, max: 9) - ├─ Extract --style-skill package-name (optional, for ui-designer) - └─ Validate style SKILL package exists - -Phase 1: Interactive Framework Generation - └─ Ref: phases/01-interactive-framework.md - ├─ Tasks attached: Phase 0-5 (Context → Topic → Roles → Questions → Conflicts → Spec) - ├─ Output: guidance-specification.md + workflow-session.json - └─ Parse: selected_roles[], session_id - -Phase 2: Parallel Role Analysis - └─ Ref: phases/02-parallel-role-analysis.md - ├─ Tasks attached: N role agents (concurrent execution) - ├─ For each role: Execute conceptual-planning-agent - ├─ Optional: ui-designer appends --style-skill if provided - └─ Output: [role]/analysis*.md (one per role) - -Phase 3: Synthesis Integration - └─ Ref: phases/03-synthesis-integration.md - ├─ Tasks attached: Load → Analyze → Integrate → Generate - ├─ Input: All role analyses + guidance-specification.md - └─ Output: synthesis-specification.md - -Return: - └─ Summary with session info and next steps -``` - -**Phase Reference Documents** (read on-demand when phase executes): - -| Phase | Document | Purpose | -|-------|----------|---------| -| 1 | [phases/01-interactive-framework.md](phases/01-interactive-framework.md) | Interactive clarification generating confirmed guidance specification through role-based analysis | -| 2 | [phases/02-parallel-role-analysis.md](phases/02-parallel-role-analysis.md) | Unified role-specific analysis generation with interactive context gathering and concurrent execution | -| 3 | [phases/03-synthesis-integration.md](phases/03-synthesis-integration.md) | Cross-role synthesis integration with intelligent Q&A and targeted updates | - -## Core Rules - -1. **Start Immediately**: First action is TodoWrite initialization, second action is parameter parsing -2. **No Preliminary Analysis**: Do not analyze topic before Phase 1 - artifacts handles all analysis -3. **Parse Every Output**: Extract selected_roles from workflow-session.json after Phase 1 -4. **Auto-Continue via TodoList**: Check TodoList status to execute next pending phase automatically -5. **Track Progress**: Update TodoWrite dynamically with task attachment/collapse pattern -6. **Task Attachment Model**: Phase executes **attach** sub-tasks to current workflow. Orchestrator **executes** these attached tasks, then **collapses** them after completion -7. **⚠️ CRITICAL: DO NOT STOP**: Continuous multi-phase workflow. After executing all attached tasks, immediately collapse them and execute next phase -8. **Parallel Execution**: Phase 2 attaches multiple agent tasks simultaneously for concurrent execution -9. **Progressive Phase Loading**: Read phase docs ONLY when that phase is about to execute - -## Usage - -``` -Trigger: "workflow:brainstorm:auto-parallel" -Input: "" [--count N] [--style-skill package-name] -``` - -**Recommended Structured Format**: -``` -Input: "GOAL: [objective] SCOPE: [boundaries] CONTEXT: [background]" [--count N] [--style-skill package-name] -``` - -**Parameters**: -- `topic` (required): Topic or challenge description (structured format recommended) -- `--count N` (optional): Number of roles to select (default: 3, max: 9) -- `--style-skill package-name` (optional): Style SKILL package to load for ui-designer (located at `.claude/skills/style-{package-name}/`) - -## Data Flow - -### Phase 0 → Phase 1 - -**Input**: -- `topic`: User-provided topic or challenge description -- `count`: Number of roles to select (parsed from --count parameter) -- `style_skill_package`: Style SKILL package name (parsed from --style-skill parameter) - -**Output**: None (in-memory variables) - -### Phase 1 → Phase 2 - -**Input**: `topic`, `count`, `style_skill_package` - -**Output**: -- `session_id`: Workflow session identifier (WFS-{topic-slug}) -- `selected_roles[]`: Array of selected role names -- `guidance-specification.md`: Framework content -- `workflow-session.json`: Session metadata - -**Parsing**: -```javascript -// Read workflow-session.json after Phase 1 -const session_data = Read(".workflow/active/WFS-{topic}/workflow-session.json"); -const selected_roles = session_data.selected_roles; -const session_id = session_data.session_id; -const style_skill_package = session_data.style_skill_package || null; -``` - -### Phase 2 → Phase 3 - -**Input**: `session_id`, `selected_roles[]`, `style_skill_package` - -**Output**: -- `[role]/analysis*.md`: One analysis per selected role -- `.superdesign/design_iterations/`: UI design artifacts (if --style-skill provided) - -**Validation**: -```javascript -// Verify all role analyses created -for (const role of selected_roles) { - const analysis_path = `${brainstorm_dir}/${role}/analysis.md`; - if (!exists(analysis_path)) { - ERROR: `Missing analysis for ${role}`; - } -} -``` - -### Phase 3 → Completion - -**Input**: `session_id`, all role analyses, guidance-specification.md - -**Output**: -- `synthesis-specification.md`: Integrated cross-role analysis - -**Validation**: -```javascript -const synthesis_path = `${brainstorm_dir}/synthesis-specification.md`; -if (!exists(synthesis_path)) { - ERROR: "Synthesis generation failed"; -} -``` - -## TodoWrite Pattern - -**Core Concept**: Dynamic task attachment and collapse for parallel brainstorming workflow with interactive framework generation and concurrent role analysis. - -### Key Principles - -1. **Task Attachment** (when Phase executed): - - Phase's internal tasks are **attached** to orchestrator's TodoWrite - - Phase 1: artifacts attaches 5 internal tasks (Phase 0-5) - - Phase 2: Multiple role-analysis calls attach N role analysis tasks simultaneously - - Phase 3: synthesis attaches internal tasks - - First attached task marked as `in_progress`, others as `pending` - - Orchestrator **executes** these attached tasks (sequentially for Phase 1, 3; in parallel for Phase 2) - -2. **Task Collapse** (after sub-tasks complete): - - Remove detailed sub-tasks from TodoWrite - - **Collapse** to high-level phase summary - - Example: Phase 1 sub-tasks collapse to "Phase 1: Interactive Framework Generation: completed" - - Phase 2: Multiple role tasks collapse to "Phase 2: Parallel Role Analysis: completed" - - Phase 3: Synthesis tasks collapse to "Phase 3: Synthesis Integration: completed" - - Maintains clean orchestrator-level view - -3. **Continuous Execution**: - - After collapse, automatically proceed to next pending phase - - No user intervention required between phases - - TodoWrite dynamically reflects current execution state - -**Lifecycle Summary**: Initial pending tasks → Phase 1 executed (artifacts tasks ATTACHED) → Artifacts sub-tasks executed → Phase 1 completed (tasks COLLAPSED) → Phase 2 executed (N role tasks ATTACHED in parallel) → Role analyses executed concurrently → Phase 2 completed (tasks COLLAPSED) → Phase 3 executed (synthesis tasks ATTACHED) → Synthesis sub-tasks executed → Phase 3 completed (tasks COLLAPSED) → Workflow complete. - -### Brainstorming Workflow Specific Features - -- **Phase 1**: Interactive framework generation with user Q&A (Phase attachment) -- **Phase 2**: Parallel role analysis execution with N concurrent agents (Task agent attachments) -- **Phase 3**: Cross-role synthesis integration (Phase attachment) -- **Dynamic Role Count**: `--count N` parameter determines number of Phase 2 parallel tasks (default: 3, max: 9) -- **Mixed Execution**: Sequential (Phase 1, 3) and Parallel (Phase 2) task execution - -### Initial TodoWrite (Workflow Start) - -```json -[ - {"content": "Phase 0: Parameter Parsing", "status": "in_progress", "activeForm": "Parsing parameters"}, - {"content": "Phase 1: Interactive Framework Generation", "status": "pending", "activeForm": "Executing artifacts interactive framework"}, - {"content": "Phase 2: Parallel Role Analysis", "status": "pending", "activeForm": "Executing parallel role analysis"}, - {"content": "Phase 3: Synthesis Integration", "status": "pending", "activeForm": "Executing synthesis integration"} -] -``` - -### Phase 1 Task Attachment (Artifacts Execution) - -```json -[ - {"content": "Phase 0: Parameter Parsing", "status": "completed", "activeForm": "Parsing parameters"}, - {"content": "Phase 1: Interactive Framework Generation", "status": "in_progress", "activeForm": "Executing artifacts interactive framework"}, - {"content": " → Phase 0: Context collection", "status": "in_progress", "activeForm": "Collecting context"}, - {"content": " → Phase 1: Topic analysis", "status": "pending", "activeForm": "Analyzing topic"}, - {"content": " → Phase 2: Role selection", "status": "pending", "activeForm": "Selecting roles"}, - {"content": " → Phase 3: Role questions", "status": "pending", "activeForm": "Collecting role questions"}, - {"content": " → Phase 4: Conflict resolution", "status": "pending", "activeForm": "Resolving conflicts"}, - {"content": " → Phase 5: Generate specification", "status": "pending", "activeForm": "Generating specification"}, - {"content": "Phase 2: Parallel Role Analysis", "status": "pending", "activeForm": "Executing parallel role analysis"}, - {"content": "Phase 3: Synthesis Integration", "status": "pending", "activeForm": "Executing synthesis integration"} -] -``` - -### Phase 1 Task Collapse (Artifacts Completed) - -```json -[ - {"content": "Phase 0: Parameter Parsing", "status": "completed", "activeForm": "Parsing parameters"}, - {"content": "Phase 1: Interactive Framework Generation", "status": "completed", "activeForm": "Executing artifacts interactive framework"}, - {"content": "Phase 2: Parallel Role Analysis", "status": "pending", "activeForm": "Executing parallel role analysis"}, - {"content": "Phase 3: Synthesis Integration", "status": "pending", "activeForm": "Executing synthesis integration"} -] -``` - -### Phase 2 Task Attachment (Parallel Role Execution) - -```json -[ - {"content": "Phase 0: Parameter Parsing", "status": "completed", "activeForm": "Parsing parameters"}, - {"content": "Phase 1: Interactive Framework Generation", "status": "completed", "activeForm": "Executing artifacts interactive framework"}, - {"content": "Phase 2: Parallel Role Analysis", "status": "in_progress", "activeForm": "Executing parallel role analysis"}, - {"content": " → Execute system-architect analysis", "status": "in_progress", "activeForm": "Executing system-architect analysis"}, - {"content": " → Execute ui-designer analysis", "status": "in_progress", "activeForm": "Executing ui-designer analysis"}, - {"content": " → Execute product-manager analysis", "status": "in_progress", "activeForm": "Executing product-manager analysis"}, - {"content": "Phase 3: Synthesis Integration", "status": "pending", "activeForm": "Executing synthesis integration"} -] -``` - -### Phase 2 Task Collapse (All Roles Completed) - -```json -[ - {"content": "Phase 0: Parameter Parsing", "status": "completed", "activeForm": "Parsing parameters"}, - {"content": "Phase 1: Interactive Framework Generation", "status": "completed", "activeForm": "Executing artifacts interactive framework"}, - {"content": "Phase 2: Parallel Role Analysis", "status": "completed", "activeForm": "Executing parallel role analysis"}, - {"content": "Phase 3: Synthesis Integration", "status": "pending", "activeForm": "Executing synthesis integration"} -] -``` - -## Session Management - -**⚡ FIRST ACTION**: Check `.workflow/active/` for existing sessions before Phase 1 - -**Multiple Sessions Support**: -- Different Claude instances can have different brainstorming sessions -- If multiple sessions found, prompt user to select -- If single session found, use it -- If no session exists, create `WFS-[topic-slug]` - -**Session Continuity**: -- MUST use selected session for all phases -- Each role's context stored in session directory -- Session isolation: Each session maintains independent state - -## Output Structure - -**Phase 1 Output**: -- `.workflow/active/WFS-{topic}/.brainstorming/guidance-specification.md` (framework content) -- `.workflow/active/WFS-{topic}/workflow-session.json` (metadata: selected_roles[], topic, timestamps, style_skill_package) - -**Phase 2 Output**: -- `.workflow/active/WFS-{topic}/.brainstorming/{role}/analysis.md` (one per role) -- `.superdesign/design_iterations/` (ui-designer artifacts, if --style-skill provided) - -**Phase 3 Output**: -- `.workflow/active/WFS-{topic}/.brainstorming/synthesis-specification.md` (integrated analysis) - -**⚠️ Storage Separation**: Guidance content in .md files, metadata in .json (no duplication) -**⚠️ Style References**: When --style-skill provided, workflow-session.json stores style_skill_package name, ui-designer loads from `.claude/skills/style-{package-name}/` - -## Available Roles - -- data-architect (数据架构师) -- product-manager (产品经理) -- product-owner (产品负责人) -- scrum-master (敏捷教练) -- subject-matter-expert (领域专家) -- system-architect (系统架构师) -- test-strategist (测试策略师) -- ui-designer (UI 设计师) -- ux-expert (UX 专家) - -**Role Selection**: Handled by Phase 1 (artifacts) - intelligent recommendation + user selection - -## Error Handling - -- **Role selection failure**: Phase 1 defaults to product-manager with explanation -- **Agent execution failure**: Agent-specific retry with minimal dependencies -- **Template loading issues**: Agent handles graceful degradation -- **Synthesis conflicts**: Phase 3 highlights disagreements without resolution -- **Context overflow protection**: Per-role limits enforced by conceptual-planning-agent - -## Reference Information - -**File Structure**: -``` -.workflow/active/WFS-[topic]/ -├── workflow-session.json # Session metadata ONLY -└── .brainstorming/ - ├── guidance-specification.md # Framework (Phase 1) - ├── {role}/ - │ ├── analysis.md # Main document (with optional @references) - │ └── analysis-{slug}.md # Section documents (max 5) - └── synthesis-specification.md # Integration (Phase 3) -``` - -**Next Steps** (returned to user): -``` -Brainstorming complete for session: {sessionId} -Roles analyzed: {count} -Synthesis: .workflow/active/WFS-{topic}/.brainstorming/synthesis-specification.md - -✅ Next Steps: -1. Phase 3 synthesis (phases/03-synthesis-integration.md) - Optional refinement (if not auto-executed) -2. Planning workflow (workflow-plan/SKILL.md) --session {sessionId} - Generate implementation plan -``` diff --git a/.claude/skills/workflow-brainstorm-auto-parallel/phases/01-interactive-framework.md b/.claude/skills/workflow-brainstorm-auto-parallel/phases/01-interactive-framework.md deleted file mode 100644 index cebc344e..00000000 --- a/.claude/skills/workflow-brainstorm-auto-parallel/phases/01-interactive-framework.md +++ /dev/null @@ -1,460 +0,0 @@ -# Phase 1: Interactive Framework Generation - -> 来源: `commands/workflow/brainstorm/artifacts.md` - -## Overview - -Seven-phase workflow: **Context collection** → **Topic analysis** → **Role selection** → **Role questions** → **Conflict resolution** → **Final check** → **Generate specification** - -All user interactions use AskUserQuestion tool (max 4 questions per call, multi-round). - -**Input**: `"GOAL: [objective] SCOPE: [boundaries] CONTEXT: [background]" [--count N]` -**Output**: `.workflow/active/WFS-{topic}/.brainstorming/guidance-specification.md` -**Core Principle**: Questions dynamically generated from project context + topic keywords, NOT generic templates - -**Parameters**: -- `topic` (required): Topic or challenge description (structured format recommended) -- `--count N` (optional): Number of roles to select (system recommends N+2 options, default: 3) - ---- - -## Quick Reference - -### Phase Summary - -| Phase | Goal | AskUserQuestion | Storage | -|-------|------|-----------------|---------| -| 0 | Context collection | - | context-package.json | -| 1 | Topic analysis | 2-4 questions | intent_context | -| 2 | Role selection | 1 multi-select | selected_roles | -| 3 | Role questions | 3-4 per role | role_decisions[role] | -| 4 | Conflict resolution | max 4 per round | cross_role_decisions | -| 4.5 | Final check | progressive rounds | additional_decisions | -| 5 | Generate spec | - | guidance-specification.md | - -### AskUserQuestion Pattern - -```javascript -// Single-select (Phase 1, 3, 4) -AskUserQuestion({ - questions: [ - { - question: "{问题文本}", - header: "{短标签}", // max 12 chars - multiSelect: false, - options: [ - { label: "{选项}", description: "{说明和影响}" }, - { label: "{选项}", description: "{说明和影响}" }, - { label: "{选项}", description: "{说明和影响}" } - ] - } - // ... max 4 questions per call - ] -}) - -// Multi-select (Phase 2) -AskUserQuestion({ - questions: [{ - question: "请选择 {count} 个角色", - header: "角色选择", - multiSelect: true, - options: [/* max 4 options per call */] - }] -}) -``` - -### Multi-Round Execution - -```javascript -const BATCH_SIZE = 4; -for (let i = 0; i < allQuestions.length; i += BATCH_SIZE) { - const batch = allQuestions.slice(i, i + BATCH_SIZE); - AskUserQuestion({ questions: batch }); - // Store responses before next round -} -``` - ---- - -## Task Tracking - -**TodoWrite Rule**: EXTEND auto-parallel's task list (NOT replace/overwrite) - -**When called from auto-parallel**: -- Find artifacts parent task → Mark "in_progress" -- APPEND sub-tasks (Phase 0-5) → Mark each as completes -- When Phase 5 completes → Mark parent "completed" -- PRESERVE all other auto-parallel tasks - -**Standalone Mode**: -```json -[ - {"content": "Initialize session", "status": "pending", "activeForm": "Initializing"}, - {"content": "Phase 0: Context collection", "status": "pending", "activeForm": "Phase 0"}, - {"content": "Phase 1: Topic analysis (2-4 questions)", "status": "pending", "activeForm": "Phase 1"}, - {"content": "Phase 2: Role selection", "status": "pending", "activeForm": "Phase 2"}, - {"content": "Phase 3: Role questions (per role)", "status": "pending", "activeForm": "Phase 3"}, - {"content": "Phase 4: Conflict resolution", "status": "pending", "activeForm": "Phase 4"}, - {"content": "Phase 4.5: Final clarification", "status": "pending", "activeForm": "Phase 4.5"}, - {"content": "Phase 5: Generate specification", "status": "pending", "activeForm": "Phase 5"} -] -``` - ---- - -## Execution Phases - -### Session Management - -- Check `.workflow/active/` for existing sessions -- Multiple → Prompt selection | Single → Use it | None → Create `WFS-[topic-slug]` -- Parse `--count N` parameter (default: 3) -- Store decisions in `workflow-session.json` - -### Phase 0: Context Collection - -**Goal**: Gather project context BEFORE user interaction - -**Steps**: -1. Check if `context-package.json` exists → Skip if valid -2. Invoke `context-search-agent` (BRAINSTORM MODE - lightweight) -3. Output: `.workflow/active/WFS-{session-id}/.process/context-package.json` - -**Graceful Degradation**: If agent fails, continue to Phase 1 without context - -```javascript -Task( - subagent_type="context-search-agent", - run_in_background=false, - description="Gather project context for brainstorm", - prompt=` -Execute context-search-agent in BRAINSTORM MODE (Phase 1-2 only). - -Session: ${session_id} -Task: ${task_description} -Output: .workflow/${session_id}/.process/context-package.json - -Required fields: metadata, project_context, assets, dependencies, conflict_detection -` -) -``` - -### Phase 1: Topic Analysis - -**Goal**: Extract keywords/challenges enriched by Phase 0 context - -**Steps**: -1. Load Phase 0 context (tech_stack, modules, conflict_risk) -2. Deep topic analysis (entities, challenges, constraints, metrics) -3. Generate 2-4 context-aware probing questions -4. AskUserQuestion → Store to `session.intent_context` - -**Example**: -```javascript -AskUserQuestion({ - questions: [ - { - question: "实时协作平台的主要技术挑战?", - header: "核心挑战", - multiSelect: false, - options: [ - { label: "实时数据同步", description: "100+用户同时在线,状态同步复杂度高" }, - { label: "可扩展性架构", description: "用户规模增长时的系统扩展能力" }, - { label: "冲突解决机制", description: "多用户同时编辑的冲突处理策略" } - ] - }, - { - question: "MVP阶段最关注的指标?", - header: "优先级", - multiSelect: false, - options: [ - { label: "功能完整性", description: "实现所有核心功能" }, - { label: "用户体验", description: "流畅的交互体验和响应速度" }, - { label: "系统稳定性", description: "高可用性和数据一致性" } - ] - } - ] -}) -``` - -**⚠️ CRITICAL**: Questions MUST reference topic keywords. Generic "Project type?" violates dynamic generation. - -### Phase 2: Role Selection - -**Goal**: User selects roles from intelligent recommendations - -**Available Roles**: data-architect, product-manager, product-owner, scrum-master, subject-matter-expert, system-architect, test-strategist, ui-designer, ux-expert - -**Steps**: -1. Analyze Phase 1 keywords → Recommend count+2 roles with rationale -2. AskUserQuestion (multiSelect=true) → Store to `session.selected_roles` -3. If count+2 > 4, split into multiple rounds - -**Example**: -```javascript -AskUserQuestion({ - questions: [{ - question: "请选择 3 个角色参与头脑风暴分析", - header: "角色选择", - multiSelect: true, - options: [ - { label: "system-architect", description: "实时同步架构设计和技术选型" }, - { label: "ui-designer", description: "协作界面用户体验和状态展示" }, - { label: "product-manager", description: "功能优先级和MVP范围决策" }, - { label: "data-architect", description: "数据同步模型和存储方案设计" } - ] - }] -}) -``` - -**⚠️ CRITICAL**: User MUST interact. NEVER auto-select without confirmation. - -### Phase 3: Role-Specific Questions - -**Goal**: Generate deep questions mapping role expertise to Phase 1 challenges - -**Algorithm**: -1. FOR each selected role: - - Map Phase 1 challenges to role domain - - Generate 3-4 questions (implementation depth, trade-offs, edge cases) - - AskUserQuestion per role → Store to `session.role_decisions[role]` -2. Process roles sequentially (one at a time for clarity) -3. If role needs > 4 questions, split into multiple rounds - -**Example** (system-architect): -```javascript -AskUserQuestion({ - questions: [ - { - question: "100+ 用户实时状态同步方案?", - header: "状态同步", - multiSelect: false, - options: [ - { label: "Event Sourcing", description: "完整事件历史,支持回溯,存储成本高" }, - { label: "集中式状态管理", description: "实现简单,单点瓶颈风险" }, - { label: "CRDT", description: "去中心化,自动合并,学习曲线陡" } - ] - }, - { - question: "两个用户同时编辑冲突如何解决?", - header: "冲突解决", - multiSelect: false, - options: [ - { label: "自动合并", description: "用户无感知,可能产生意外结果" }, - { label: "手动解决", description: "用户控制,增加交互复杂度" }, - { label: "版本控制", description: "保留历史,需要分支管理" } - ] - } - ] -}) -``` - -### Phase 4: Conflict Resolution - -**Goal**: Resolve ACTUAL conflicts from Phase 3 answers - -**Algorithm**: -1. Analyze Phase 3 answers for conflicts: - - Contradictory choices (e.g., "fast iteration" vs "complex Event Sourcing") - - Missing integration (e.g., "Optimistic updates" but no conflict handling) - - Implicit dependencies (e.g., "Live cursors" but no auth defined) -2. Generate clarification questions referencing SPECIFIC Phase 3 choices -3. AskUserQuestion (max 4 per call, multi-round) → Store to `session.cross_role_decisions` -4. If NO conflicts: Skip Phase 4 (inform user: "未检测到跨角色冲突,跳过Phase 4") - -**Example**: -```javascript -AskUserQuestion({ - questions: [{ - question: "CRDT 与 UI 回滚期望冲突,如何解决?\n背景:system-architect选择CRDT,ui-designer期望回滚UI", - header: "架构冲突", - multiSelect: false, - options: [ - { label: "采用 CRDT", description: "保持去中心化,调整UI期望" }, - { label: "显示合并界面", description: "增加用户交互,展示冲突详情" }, - { label: "切换到 OT", description: "支持回滚,增加服务器复杂度" } - ] - }] -}) -``` - -### Phase 4.5: Final Clarification - -**Purpose**: Ensure no important points missed before generating specification - -**Steps**: -1. Ask initial check: - ```javascript - AskUserQuestion({ - questions: [{ - question: "在生成最终规范之前,是否有前面未澄清的重点需要补充?", - header: "补充确认", - multiSelect: false, - options: [ - { label: "无需补充", description: "前面的讨论已经足够完整" }, - { label: "需要补充", description: "还有重要内容需要澄清" } - ] - }] - }) - ``` -2. If "需要补充": - - Analyze user's additional points - - Generate progressive questions (not role-bound, interconnected) - - AskUserQuestion (max 4 per round) → Store to `session.additional_decisions` - - Repeat until user confirms completion -3. If "无需补充": Proceed to Phase 5 - -**Progressive Pattern**: Questions interconnected, each round informs next, continue until resolved. - -### Phase 5: Generate Specification - -**Steps**: -1. Load all decisions: `intent_context` + `selected_roles` + `role_decisions` + `cross_role_decisions` + `additional_decisions` -2. Transform Q&A to declarative: Questions → Headers, Answers → CONFIRMED/SELECTED statements -3. Generate `guidance-specification.md` -4. Update `workflow-session.json` (metadata only) -5. Validate: No interrogative sentences, all decisions traceable - ---- - -## Question Guidelines - -### Core Principle - -**Target**: 开发者(理解技术但需要从用户需求出发) - -**Question Structure**: `[业务场景/需求前提] + [技术关注点]` -**Option Structure**: `标签:[技术方案] + 说明:[业务影响] + [技术权衡]` - -### Quality Rules - -**MUST Include**: -- ✅ All questions in Chinese (用中文提问) -- ✅ 业务场景作为问题前提 -- ✅ 技术选项的业务影响说明 -- ✅ 量化指标和约束条件 - -**MUST Avoid**: -- ❌ 纯技术选型无业务上下文 -- ❌ 过度抽象的用户体验问题 -- ❌ 脱离话题的通用架构问题 - -### Phase-Specific Requirements - -| Phase | Focus | Key Requirements | -|-------|-------|------------------| -| 1 | 意图理解 | Reference topic keywords, 用户场景、业务约束、优先级 | -| 2 | 角色推荐 | Intelligent analysis (NOT keyword mapping), explain relevance | -| 3 | 角色问题 | Reference Phase 1 keywords, concrete options with trade-offs | -| 4 | 冲突解决 | Reference SPECIFIC Phase 3 choices, explain impact on both roles | - ---- - -## Output & Governance - -### Output Template - -**File**: `.workflow/active/WFS-{topic}/.brainstorming/guidance-specification.md` - -```markdown -# [Project] - Confirmed Guidance Specification - -**Metadata**: [timestamp, type, focus, roles] - -## 1. Project Positioning & Goals -**CONFIRMED Objectives**: [from topic + Phase 1] -**CONFIRMED Success Criteria**: [from Phase 1 answers] - -## 2-N. [Role] Decisions -### SELECTED Choices -**[Question topic]**: [User's answer] -- **Rationale**: [From option description] -- **Impact**: [Implications] - -### Cross-Role Considerations -**[Conflict resolved]**: [Resolution from Phase 4] -- **Affected Roles**: [Roles involved] - -## Cross-Role Integration -**CONFIRMED Integration Points**: [API/Data/Auth from multiple roles] - -## Risks & Constraints -**Identified Risks**: [From answers] → Mitigation: [Approach] - -## Next Steps -**⚠️ Automatic Continuation** (when called from auto-parallel): -- auto-parallel assigns agents for role-specific analysis -- Each selected role gets conceptual-planning-agent -- Agents read this guidance-specification.md for context - -## Appendix: Decision Tracking -| Decision ID | Category | Question | Selected | Phase | Rationale | -|-------------|----------|----------|----------|-------|-----------| -| D-001 | Intent | [Q] | [A] | 1 | [Why] | -| D-002 | Roles | [Selected] | [Roles] | 2 | [Why] | -| D-003+ | [Role] | [Q] | [A] | 3 | [Why] | -``` - -### File Structure - -``` -.workflow/active/WFS-[topic]/ -├── workflow-session.json # Metadata ONLY -├── .process/ -│ └── context-package.json # Phase 0 output -└── .brainstorming/ - └── guidance-specification.md # Full guidance content -``` - -### Session Metadata - -```json -{ - "session_id": "WFS-{topic-slug}", - "type": "brainstorming", - "topic": "{original user input}", - "selected_roles": ["system-architect", "ui-designer", "product-manager"], - "phase_completed": "artifacts", - "timestamp": "2025-10-24T10:30:00Z", - "count_parameter": 3 -} -``` - -**⚠️ Rule**: Session JSON stores ONLY metadata. All guidance content goes to guidance-specification.md. - -### Validation Checklist - -- ✅ No interrogative sentences (use CONFIRMED/SELECTED) -- ✅ Every decision traceable to user answer -- ✅ Cross-role conflicts resolved or documented -- ✅ Next steps concrete and specific -- ✅ No content duplication between .json and .md - -### Update Mechanism - -``` -IF guidance-specification.md EXISTS: - Prompt: "Regenerate completely / Update sections / Cancel" -ELSE: - Run full Phase 0-5 flow -``` - -### Governance Rules - -- All decisions MUST use CONFIRMED/SELECTED (NO "?" in decision sections) -- Every decision MUST trace to user answer -- Conflicts MUST be resolved (not marked "TBD") -- Next steps MUST be actionable -- Topic preserved as authoritative reference - -**CRITICAL**: Guidance is single source of truth for downstream phases. Ambiguity violates governance. - ---- - -## Post-Phase Update - -After Phase 1 completes: -- **Output Created**: `guidance-specification.md`, `workflow-session.json` -- **Data Parsed**: `selected_roles[]`, `session_id` -- **Next Action**: Auto-continue to Phase 2 (parallel role analysis) -- **TodoWrite**: Collapse Phase 1 sub-tasks to "Phase 1: Interactive Framework Generation: completed" diff --git a/.claude/skills/workflow-brainstorm-auto-parallel/phases/02-parallel-role-analysis.md b/.claude/skills/workflow-brainstorm-auto-parallel/phases/02-parallel-role-analysis.md deleted file mode 100644 index 04f68b97..00000000 --- a/.claude/skills/workflow-brainstorm-auto-parallel/phases/02-parallel-role-analysis.md +++ /dev/null @@ -1,704 +0,0 @@ -# Phase 2: Parallel Role Analysis - -> 来源: `commands/workflow/brainstorm/role-analysis.md` - -## Overview - -**Unified command for generating and updating role-specific analysis** with interactive context gathering, framework alignment, and incremental update support. Replaces 9 individual role commands with single parameterized workflow. - -### Core Function -- **Multi-Role Support**: Single command supports all 9 brainstorming roles -- **Interactive Context**: Dynamic question generation based on role and framework -- **Incremental Updates**: Merge new insights into existing analyses -- **Framework Alignment**: Address guidance-specification.md discussion points -- **Agent Delegation**: Use conceptual-planning-agent with role-specific templates - -### Supported Roles - -| Role ID | Title | Focus Area | Context Questions | -|---------|-------|------------|-------------------| -| `ux-expert` | UX专家 | User research, information architecture, user journey | 4 | -| `ui-designer` | UI设计师 | Visual design, high-fidelity mockups, design systems | 4 | -| `system-architect` | 系统架构师 | Technical architecture, scalability, integration patterns | 5 | -| `product-manager` | 产品经理 | Product strategy, roadmap, prioritization | 4 | -| `product-owner` | 产品负责人 | Backlog management, user stories, acceptance criteria | 4 | -| `scrum-master` | 敏捷教练 | Process facilitation, impediment removal, team dynamics | 3 | -| `subject-matter-expert` | 领域专家 | Domain knowledge, business rules, compliance | 4 | -| `data-architect` | 数据架构师 | Data models, storage strategies, data flow | 5 | -| `api-designer` | API设计师 | API contracts, versioning, integration patterns | 4 | - ---- - -## Usage - -```bash -# Generate new analysis with interactive context -role-analysis ux-expert - -# Generate with existing framework + context questions -role-analysis system-architect --session WFS-xxx --include-questions - -# Update existing analysis (incremental merge) -role-analysis ui-designer --session WFS-xxx --update - -# Quick generation (skip interactive context) -role-analysis product-manager --session WFS-xxx --skip-questions -``` - ---- - -## Execution Protocol - -### Phase 1: Detection & Validation - -**Step 1.1: Role Validation** -```bash -VALIDATE role_name IN [ - ux-expert, ui-designer, system-architect, product-manager, - product-owner, scrum-master, subject-matter-expert, - data-architect, api-designer -] -IF invalid: - ERROR: "Unknown role: {role_name}. Use one of: ux-expert, ui-designer, ..." - EXIT -``` - -**Step 1.2: Session Detection** -```bash -IF --session PROVIDED: - session_id = --session - brainstorm_dir = .workflow/active/{session_id}/.brainstorming/ -ELSE: - FIND .workflow/active/WFS-*/ - IF multiple: - PROMPT user to select - ELSE IF single: - USE existing - ELSE: - ERROR: "No active session. Run Phase 1 (artifacts) first" - EXIT - -VALIDATE brainstorm_dir EXISTS -``` - -**Step 1.3: Framework Detection** -```bash -framework_file = {brainstorm_dir}/guidance-specification.md -IF framework_file EXISTS: - framework_mode = true - LOAD framework_content -ELSE: - WARN: "No framework found - will create standalone analysis" - framework_mode = false -``` - -**Step 1.4: Update Mode Detection** -```bash -existing_analysis = {brainstorm_dir}/{role_name}/analysis*.md -IF --update FLAG OR existing_analysis EXISTS: - update_mode = true - IF --update NOT PROVIDED: - ASK: "Analysis exists. Update or regenerate?" - OPTIONS: ["Incremental update", "Full regenerate", "Cancel"] -ELSE: - update_mode = false -``` - -### Phase 2: Interactive Context Gathering - -**Trigger Conditions**: -- Default: Always ask unless `--skip-questions` provided -- `--include-questions`: Force context gathering even if analysis exists -- `--skip-questions`: Skip all interactive questions - -**Step 2.1: Load Role Configuration** -```javascript -const roleConfig = { - 'ux-expert': { - title: 'UX专家', - focus_area: 'User research, information architecture, user journey', - question_categories: ['User Intent', 'Requirements', 'UX'], - question_count: 4, - template: '~/.claude/workflows/cli-templates/planning-roles/ux-expert.md' - }, - 'ui-designer': { - title: 'UI设计师', - focus_area: 'Visual design, high-fidelity mockups, design systems', - question_categories: ['Requirements', 'UX', 'Feasibility'], - question_count: 4, - template: '~/.claude/workflows/cli-templates/planning-roles/ui-designer.md' - }, - 'system-architect': { - title: '系统架构师', - focus_area: 'Technical architecture, scalability, integration patterns', - question_categories: ['Scale & Performance', 'Technical Constraints', 'Architecture Complexity', 'Non-Functional Requirements'], - question_count: 5, - template: '~/.claude/workflows/cli-templates/planning-roles/system-architect.md' - }, - 'product-manager': { - title: '产品经理', - focus_area: 'Product strategy, roadmap, prioritization', - question_categories: ['User Intent', 'Requirements', 'Process'], - question_count: 4, - template: '~/.claude/workflows/cli-templates/planning-roles/product-manager.md' - }, - 'product-owner': { - title: '产品负责人', - focus_area: 'Backlog management, user stories, acceptance criteria', - question_categories: ['Requirements', 'Decisions', 'Process'], - question_count: 4, - template: '~/.claude/workflows/cli-templates/planning-roles/product-owner.md' - }, - 'scrum-master': { - title: '敏捷教练', - focus_area: 'Process facilitation, impediment removal, team dynamics', - question_categories: ['Process', 'Risk', 'Decisions'], - question_count: 3, - template: '~/.claude/workflows/cli-templates/planning-roles/scrum-master.md' - }, - 'subject-matter-expert': { - title: '领域专家', - focus_area: 'Domain knowledge, business rules, compliance', - question_categories: ['Requirements', 'Feasibility', 'Terminology'], - question_count: 4, - template: '~/.claude/workflows/cli-templates/planning-roles/subject-matter-expert.md' - }, - 'data-architect': { - title: '数据架构师', - focus_area: 'Data models, storage strategies, data flow', - question_categories: ['Architecture', 'Scale & Performance', 'Technical Constraints', 'Feasibility'], - question_count: 5, - template: '~/.claude/workflows/cli-templates/planning-roles/data-architect.md' - }, - 'api-designer': { - title: 'API设计师', - focus_area: 'API contracts, versioning, integration patterns', - question_categories: ['Architecture', 'Requirements', 'Feasibility', 'Decisions'], - question_count: 4, - template: '~/.claude/workflows/cli-templates/planning-roles/api-designer.md' - } -}; - -config = roleConfig[role_name]; -``` - -**Step 2.2: Generate Role-Specific Questions** - -**9-Category Taxonomy** (from synthesis.md): - -| Category | Focus | Example Question Pattern | -|----------|-------|--------------------------| -| User Intent | 用户目标 | "该分析的核心目标是什么?" | -| Requirements | 需求细化 | "需求的优先级如何排序?" | -| Architecture | 架构决策 | "技术栈的选择考量?" | -| UX | 用户体验 | "交互复杂度的取舍?" | -| Feasibility | 可行性 | "资源约束下的实现范围?" | -| Risk | 风险管理 | "风险容忍度是多少?" | -| Process | 流程规范 | "开发迭代的节奏?" | -| Decisions | 决策确认 | "冲突的解决方案?" | -| Terminology | 术语统一 | "统一使用哪个术语?" | -| Scale & Performance | 性能扩展 | "预期的负载和性能要求?" | -| Technical Constraints | 技术约束 | "现有技术栈的限制?" | -| Architecture Complexity | 架构复杂度 | "架构的复杂度权衡?" | -| Non-Functional Requirements | 非功能需求 | "可用性和可维护性要求?" | - -**Question Generation Algorithm**: -```javascript -async function generateQuestions(role_name, framework_content) { - const config = roleConfig[role_name]; - const questions = []; - - // Parse framework for keywords - const keywords = extractKeywords(framework_content); - - // Generate category-specific questions - for (const category of config.question_categories) { - const question = generateCategoryQuestion(category, keywords, role_name); - questions.push(question); - } - - return questions.slice(0, config.question_count); -} -``` - -**Step 2.3: Multi-Round Question Execution** - -```javascript -const BATCH_SIZE = 4; -const user_context = {}; - -for (let i = 0; i < questions.length; i += BATCH_SIZE) { - const batch = questions.slice(i, i + BATCH_SIZE); - const currentRound = Math.floor(i / BATCH_SIZE) + 1; - const totalRounds = Math.ceil(questions.length / BATCH_SIZE); - - console.log(`\n[Round ${currentRound}/${totalRounds}] ${config.title} 上下文询问\n`); - - AskUserQuestion({ - questions: batch.map(q => ({ - question: q.question, - header: q.category.substring(0, 12), - multiSelect: false, - options: q.options.map(opt => ({ - label: opt.label, - description: opt.description - })) - })) - }); - - // Store responses before next round - for (const answer of responses) { - user_context[answer.question] = { - answer: answer.selected, - category: answer.category, - timestamp: new Date().toISOString() - }; - } -} - -// Save context to file -Write( - `${brainstorm_dir}/${role_name}/${role_name}-context.md`, - formatUserContext(user_context) -); -``` - -**Question Quality Rules** (from artifacts.md): - -**MUST Include**: -- ✅ All questions in Chinese (用中文提问) -- ✅ 业务场景作为问题前提 -- ✅ 技术选项的业务影响说明 -- ✅ 量化指标和约束条件 - -**MUST Avoid**: -- ❌ 纯技术选型无业务上下文 -- ❌ 过度抽象的通用问题 -- ❌ 脱离框架的重复询问 - -### Phase 3: Agent Execution - -**Step 3.1: Load Session Metadata** -```bash -session_metadata = Read(.workflow/active/{session_id}/workflow-session.json) -original_topic = session_metadata.topic -selected_roles = session_metadata.selected_roles -``` - -**Step 3.2: Prepare Agent Context** -```javascript -const agentContext = { - role_name: role_name, - role_config: roleConfig[role_name], - output_location: `${brainstorm_dir}/${role_name}/`, - framework_mode: framework_mode, - framework_path: framework_mode ? `${brainstorm_dir}/guidance-specification.md` : null, - update_mode: update_mode, - user_context: user_context, - original_topic: original_topic, - session_id: session_id -}; -``` - -**Step 3.3: Execute Conceptual Planning Agent** - -**Framework-Based Analysis** (when guidance-specification.md exists): -```javascript -Task( - subagent_type="conceptual-planning-agent", - run_in_background=false, - description=`Generate ${role_name} analysis`, - prompt=` -[FLOW_CONTROL] - -Execute ${role_name} analysis for existing topic framework - -## Context Loading -ASSIGNED_ROLE: ${role_name} -OUTPUT_LOCATION: ${agentContext.output_location} -ANALYSIS_MODE: ${framework_mode ? "framework_based" : "standalone"} -UPDATE_MODE: ${update_mode} - -## Flow Control Steps -1. **load_topic_framework** - - Action: Load structured topic discussion framework - - Command: Read(${agentContext.framework_path}) - - Output: topic_framework_content - -2. **load_role_template** - - Action: Load ${role_name} planning template - - Command: Read(${roleConfig[role_name].template}) - - Output: role_template_guidelines - -3. **load_session_metadata** - - Action: Load session metadata and user intent - - Command: Read(.workflow/active/${session_id}/workflow-session.json) - - Output: session_context - -4. **load_user_context** (if exists) - - Action: Load interactive context responses - - Command: Read(${brainstorm_dir}/${role_name}/${role_name}-context.md) - - Output: user_context_answers - -5. **${update_mode ? 'load_existing_analysis' : 'skip'}** - ${update_mode ? ` - - Action: Load existing analysis for incremental update - - Command: Read(${brainstorm_dir}/${role_name}/analysis.md) - - Output: existing_analysis_content - ` : ''} - -## Analysis Requirements -**Primary Reference**: Original user prompt from workflow-session.json is authoritative -**Framework Source**: Address all discussion points in guidance-specification.md from ${role_name} perspective -**User Context Integration**: Incorporate interactive Q&A responses into analysis -**Role Focus**: ${roleConfig[role_name].focus_area} -**Template Integration**: Apply role template guidelines within framework structure - -## Expected Deliverables -1. **analysis.md** (main document, optionally with analysis-{slug}.md sub-documents) -2. **Framework Reference**: @../guidance-specification.md (if framework_mode) -3. **User Context Reference**: @./${role_name}-context.md (if user context exists) -4. **User Intent Alignment**: Validate against session_context - -## Update Requirements (if UPDATE_MODE) -- **Preserve Structure**: Maintain existing analysis structure -- **Add "Clarifications" Section**: Document new user context with timestamp -- **Merge Insights**: Integrate new perspectives without removing existing content -- **Resolve Conflicts**: If new context contradicts existing analysis, document both and recommend resolution - -## Completion Criteria -- Address each discussion point from guidance-specification.md with ${role_name} expertise -- Provide actionable recommendations from ${role_name} perspective within analysis files -- All output files MUST start with "analysis" prefix (no recommendations.md or other naming) -- Reference framework document using @ notation for integration -- Update workflow-session.json with completion status -` -); -``` - -### Phase 4: Validation & Finalization - -**Step 4.1: Validate Output** -```bash -VERIFY EXISTS: ${brainstorm_dir}/${role_name}/analysis.md -VERIFY CONTAINS: "@../guidance-specification.md" (if framework_mode) -IF user_context EXISTS: - VERIFY CONTAINS: "@./${role_name}-context.md" OR "## Clarifications" section -``` - -**Step 4.2: Update Session Metadata** -```json -{ - "phases": { - "BRAINSTORM": { - "${role_name}": { - "status": "${update_mode ? 'updated' : 'completed'}", - "completed_at": "timestamp", - "framework_addressed": true, - "context_gathered": user_context ? true : false, - "output_location": "${brainstorm_dir}/${role_name}/analysis.md", - "update_history": [ - { - "timestamp": "ISO8601", - "mode": "${update_mode ? 'incremental' : 'initial'}", - "context_questions": question_count - } - ] - } - } - } -} -``` - -**Step 4.3: Completion Report** -```markdown -✅ ${roleConfig[role_name].title} Analysis Complete - -**Output**: ${brainstorm_dir}/${role_name}/analysis.md -**Mode**: ${update_mode ? 'Incremental Update' : 'New Generation'} -**Framework**: ${framework_mode ? '✓ Aligned' : '✗ Standalone'} -**Context Questions**: ${question_count} answered - -${update_mode ? ' -**Changes**: -- Added "Clarifications" section with new user context -- Merged new insights into existing sections -- Resolved conflicts with framework alignment -' : ''} - -**Next Steps**: -${selected_roles.length > 1 ? ` - - Continue with other roles: ${selected_roles.filter(r => r !== role_name).join(', ')} - - Run synthesis: Phase 3 (synthesis integration) -` : ` - - Clarify insights: Phase 3 (synthesis integration) - - Generate plan: workflow-plan/SKILL.md --session ${session_id} -`} -``` - ---- - -## TodoWrite Integration - -### Workflow Progress Tracking - -```javascript -TodoWrite({ - todos: [ - { - content: "Phase 1: Detect session and validate role configuration", - status: "in_progress", - activeForm: "Detecting session and role" - }, - { - content: "Phase 2: Interactive context gathering with AskUserQuestion", - status: "pending", - activeForm: "Gathering user context" - }, - { - content: "Phase 3: Execute conceptual-planning-agent for role analysis", - status: "pending", - activeForm: "Executing agent analysis" - }, - { - content: "Phase 4: Validate output and update session metadata", - status: "pending", - activeForm: "Finalizing and validating" - } - ] -}); -``` - ---- - -## Output Structure - -### Directory Layout - -``` -.workflow/active/WFS-{session}/.brainstorming/ -├── guidance-specification.md # Framework (if exists) -└── {role-name}/ - ├── {role-name}-context.md # Interactive Q&A responses - ├── analysis.md # Main analysis (REQUIRED) - └── analysis-{slug}.md # Section documents (optional, max 5) -``` - -### Analysis Document Structure (New Generation) - -```markdown -# ${roleConfig[role_name].title} Analysis: [Topic from Framework] - -## Framework Reference -**Topic Framework**: @../guidance-specification.md -**Role Focus**: ${roleConfig[role_name].focus_area} -**User Context**: @./${role_name}-context.md - -## User Context Summary -**Context Gathered**: ${question_count} questions answered -**Categories**: ${question_categories.join(', ')} - -${user_context ? formatContextSummary(user_context) : ''} - -## Discussion Points Analysis -[Address each point from guidance-specification.md with ${role_name} expertise] - -### Core Requirements (from framework) -[Role-specific perspective on requirements] - -### Technical Considerations (from framework) -[Role-specific technical analysis] - -### User Experience Factors (from framework) -[Role-specific UX considerations] - -### Implementation Challenges (from framework) -[Role-specific challenges and solutions] - -### Success Metrics (from framework) -[Role-specific metrics and KPIs] - -## ${roleConfig[role_name].title} Specific Recommendations -[Role-specific actionable strategies] - ---- -*Generated by ${role_name} analysis addressing structured framework* -*Context gathered: ${new Date().toISOString()}* -``` - -### Analysis Document Structure (Incremental Update) - -```markdown -# ${roleConfig[role_name].title} Analysis: [Topic] - -## Framework Reference -[Existing content preserved] - -## Clarifications -### Session ${new Date().toISOString().split('T')[0]} -${Object.entries(user_context).map(([q, a]) => ` -- **Q**: ${q} (Category: ${a.category}) - **A**: ${a.answer} -`).join('\n')} - -## User Context Summary -[Updated with new context] - -## Discussion Points Analysis -[Existing content enhanced with new insights] - -[Rest of sections updated based on clarifications] -``` - ---- - -## Integration with Other Phases - -### Called By -- Auto-parallel orchestrator (Phase 2 - parallel role execution) -- Manual invocation for single-role analysis - -### Calls To -- `conceptual-planning-agent` (agent execution) -- `AskUserQuestion` (interactive context gathering) - -### Coordinates With -- Phase 1 (artifacts) - creates framework for role analysis -- Phase 3 (synthesis) - reads role analyses for integration - ---- - -## Quality Assurance - -### Required Analysis Elements -- [ ] Framework discussion points addressed (if framework_mode) -- [ ] User context integrated (if context gathered) -- [ ] Role template guidelines applied -- [ ] Output files follow naming convention (analysis*.md only) -- [ ] Framework reference using @ notation -- [ ] Session metadata updated - -### Context Quality -- [ ] Questions in Chinese with business context -- [ ] Options include technical trade-offs -- [ ] Categories aligned with role focus -- [ ] No generic questions unrelated to framework - -### Update Quality (if update_mode) -- [ ] "Clarifications" section added with timestamp -- [ ] New insights merged without content loss -- [ ] Conflicts documented and resolved -- [ ] Framework alignment maintained - ---- - -## Command Parameters - -### Required Parameters -- `[role-name]`: Role identifier (ux-expert, ui-designer, system-architect, etc.) - -### Optional Parameters -- `--session [session-id]`: Specify brainstorming session (auto-detect if omitted) -- `--update`: Force incremental update mode (auto-detect if analysis exists) -- `--include-questions`: Force context gathering even if analysis exists -- `--skip-questions`: Skip all interactive context gathering -- `--style-skill [package]`: For ui-designer only, load style SKILL package - -### Parameter Combinations - -| Scenario | Command | Behavior | -|----------|---------|----------| -| New analysis | `role-analysis ux-expert` | Generate + ask context questions | -| Quick generation | `role-analysis ux-expert --skip-questions` | Generate without context | -| Update existing | `role-analysis ux-expert --update` | Ask clarifications + merge | -| Force questions | `role-analysis ux-expert --include-questions` | Ask even if exists | -| Specific session | `role-analysis ux-expert --session WFS-xxx` | Target specific session | - ---- - -## Error Handling - -### Invalid Role Name -``` -ERROR: Unknown role: "ui-expert" -Valid roles: ux-expert, ui-designer, system-architect, product-manager, - product-owner, scrum-master, subject-matter-expert, - data-architect, api-designer -``` - -### No Active Session -``` -ERROR: No active brainstorming session found -Run: Phase 1 (artifacts) to create session -``` - -### Missing Framework (with warning) -``` -WARN: No guidance-specification.md found -Generating standalone analysis without framework alignment -Recommend: Run Phase 1 (artifacts) first for better results -``` - -### Agent Execution Failure -``` -ERROR: Conceptual planning agent failed -Check: ${brainstorm_dir}/${role_name}/error.log -Action: Retry with --skip-questions or check framework validity -``` - ---- - -## Advanced Usage - -### Batch Role Generation (via auto-parallel) -```bash -# This is handled by auto-parallel orchestrator -# → Internally calls role-analysis for each selected role in parallel -``` - -### Manual Multi-Role Workflow -```bash -# 1. Create framework (Phase 1) -# Run Phase 1 (artifacts) for topic with --count 3 - -# 2. Generate each role with context -# Execute role-analysis for system-architect --include-questions -# Execute role-analysis for ui-designer --include-questions -# Execute role-analysis for product-manager --include-questions - -# 3. Synthesize insights (Phase 3) -# Run Phase 3 (synthesis) --session WFS-xxx -``` - -### Iterative Refinement -```bash -# Initial generation -# Execute role-analysis for ux-expert - -# User reviews and wants more depth -# Execute role-analysis for ux-expert --update --include-questions -# → Asks clarification questions, merges new insights -``` - ---- - -## Reference Information - -### Role Template Locations -- Templates: `~/.claude/workflows/cli-templates/planning-roles/` -- Format: `{role-name}.md` (e.g., `ux-expert.md`, `system-architect.md`) - -### Context Package -- Location: `.workflow/active/WFS-{session}/.process/context-package.json` -- Used by: `context-search-agent` (Phase 0 of artifacts) -- Contains: Project context, tech stack, conflict risks - ---- - -## Post-Phase Update - -After Phase 2 completes: -- **Output Created**: `[role]/analysis*.md` for each selected role -- **Parallel Execution**: All N roles executed concurrently -- **Next Action**: Auto-continue to Phase 3 (synthesis integration) -- **TodoWrite**: Collapse Phase 2 sub-tasks to "Phase 2: Parallel Role Analysis: completed" diff --git a/.claude/skills/workflow-brainstorm-auto-parallel/phases/03-synthesis-integration.md b/.claude/skills/workflow-brainstorm-auto-parallel/phases/03-synthesis-integration.md deleted file mode 100644 index 439ad077..00000000 --- a/.claude/skills/workflow-brainstorm-auto-parallel/phases/03-synthesis-integration.md +++ /dev/null @@ -1,405 +0,0 @@ -# Phase 3: Synthesis Integration - -> 来源: `commands/workflow/brainstorm/synthesis.md` - -## Overview - -Six-phase workflow to eliminate ambiguities and enhance conceptual depth in role analyses: - -**Phase 1-2**: Session detection → File discovery → Path preparation -**Phase 3A**: Cross-role analysis agent → Generate recommendations -**Phase 4**: User selects enhancements → User answers clarifications (via AskUserQuestion) -**Phase 5**: Parallel update agents (one per role) -**Phase 6**: Context package update → Metadata update → Completion report - -All user interactions use AskUserQuestion tool (max 4 questions per call, multi-round). - -**Document Flow**: -- Input: `[role]/analysis*.md`, `guidance-specification.md`, session metadata -- Output: Updated `[role]/analysis*.md` with Enhancements + Clarifications sections - ---- - -## Quick Reference - -### Phase Summary - -| Phase | Goal | Executor | Output | -|-------|------|----------|--------| -| 1 | Session detection | Main flow | session_id, brainstorm_dir | -| 2 | File discovery | Main flow | role_analysis_paths | -| 3A | Cross-role analysis | Agent | enhancement_recommendations | -| 4 | User interaction | Main flow + AskUserQuestion | update_plan | -| 5 | Document updates | Parallel agents | Updated analysis*.md | -| 6 | Finalization | Main flow | context-package.json, report | - -### AskUserQuestion Pattern - -```javascript -// Enhancement selection (multi-select) -AskUserQuestion({ - questions: [{ - question: "请选择要应用的改进建议", - header: "改进选择", - multiSelect: true, - options: [ - { label: "EP-001: API Contract", description: "添加详细的请求/响应 schema 定义" }, - { label: "EP-002: User Intent", description: "明确用户需求优先级和验收标准" } - ] - }] -}) - -// Clarification questions (single-select, multi-round) -AskUserQuestion({ - questions: [ - { - question: "MVP 阶段的核心目标是什么?", - header: "用户意图", - multiSelect: false, - options: [ - { label: "快速验证", description: "最小功能集,快速上线获取反馈" }, - { label: "技术壁垒", description: "完善架构,为长期发展打基础" }, - { label: "功能完整", description: "覆盖所有规划功能,延迟上线" } - ] - } - ] -}) -``` - ---- - -## Task Tracking - -```json -[ - {"content": "Detect session and validate analyses", "status": "pending", "activeForm": "Detecting session"}, - {"content": "Discover role analysis file paths", "status": "pending", "activeForm": "Discovering paths"}, - {"content": "Execute analysis agent (cross-role analysis)", "status": "pending", "activeForm": "Executing analysis"}, - {"content": "Present enhancements via AskUserQuestion", "status": "pending", "activeForm": "Selecting enhancements"}, - {"content": "Clarification questions via AskUserQuestion", "status": "pending", "activeForm": "Clarifying"}, - {"content": "Execute parallel update agents", "status": "pending", "activeForm": "Updating documents"}, - {"content": "Update context package and metadata", "status": "pending", "activeForm": "Finalizing"} -] -``` - ---- - -## Execution Phases - -### Phase 1: Discovery & Validation - -1. **Detect Session**: Use `--session` parameter or find `.workflow/active/WFS-*` -2. **Validate Files**: - - `guidance-specification.md` (optional, warn if missing) - - `*/analysis*.md` (required, error if empty) -3. **Load User Intent**: Extract from `workflow-session.json` - -### Phase 2: Role Discovery & Path Preparation - -**Main flow prepares file paths for Agent**: - -1. **Discover Analysis Files**: - - Glob: `.workflow/active/WFS-{session}/.brainstorming/*/analysis*.md` - - Supports: analysis.md + analysis-{slug}.md (max 5) - -2. **Extract Role Information**: - - `role_analysis_paths`: Relative paths - - `participating_roles`: Role names from directories - -3. **Pass to Agent**: session_id, brainstorm_dir, role_analysis_paths, participating_roles - -### Phase 3A: Analysis & Enhancement Agent - -**Agent executes cross-role analysis**: - -```javascript -Task(conceptual-planning-agent, ` -## Agent Mission -Analyze role documents, identify conflicts/gaps, generate enhancement recommendations - -## Input -- brainstorm_dir: ${brainstorm_dir} -- role_analysis_paths: ${role_analysis_paths} -- participating_roles: ${participating_roles} - -## Flow Control Steps -1. load_session_metadata → Read workflow-session.json -2. load_role_analyses → Read all analysis files -3. cross_role_analysis → Identify consensus, conflicts, gaps, ambiguities -4. generate_recommendations → Format as EP-001, EP-002, ... - -## Output Format -[ - { - "id": "EP-001", - "title": "API Contract Specification", - "affected_roles": ["system-architect", "api-designer"], - "category": "Architecture", - "current_state": "High-level API descriptions", - "enhancement": "Add detailed contract definitions", - "rationale": "Enables precise implementation", - "priority": "High" - } -] -`) -``` - -### Phase 4: User Interaction - -**All interactions via AskUserQuestion (Chinese questions)** - -#### Step 1: Enhancement Selection - -```javascript -// If enhancements > 4, split into multiple rounds -const enhancements = [...]; // from Phase 3A -const BATCH_SIZE = 4; - -for (let i = 0; i < enhancements.length; i += BATCH_SIZE) { - const batch = enhancements.slice(i, i + BATCH_SIZE); - - AskUserQuestion({ - questions: [{ - question: `请选择要应用的改进建议 (第${Math.floor(i/BATCH_SIZE)+1}轮)`, - header: "改进选择", - multiSelect: true, - options: batch.map(ep => ({ - label: `${ep.id}: ${ep.title}`, - description: `影响: ${ep.affected_roles.join(', ')} | ${ep.enhancement}` - })) - }] - }) - - // Store selections before next round -} - -// User can also skip: provide "跳过" option -``` - -#### Step 2: Clarification Questions - -```javascript -// Generate questions based on 9-category taxonomy scan -// Categories: User Intent, Requirements, Architecture, UX, Feasibility, Risk, Process, Decisions, Terminology - -const clarifications = [...]; // from analysis -const BATCH_SIZE = 4; - -for (let i = 0; i < clarifications.length; i += BATCH_SIZE) { - const batch = clarifications.slice(i, i + BATCH_SIZE); - const currentRound = Math.floor(i / BATCH_SIZE) + 1; - const totalRounds = Math.ceil(clarifications.length / BATCH_SIZE); - - AskUserQuestion({ - questions: batch.map(q => ({ - question: q.question, - header: q.category.substring(0, 12), - multiSelect: false, - options: q.options.map(opt => ({ - label: opt.label, - description: opt.description - })) - })) - }) - - // Store answers before next round -} -``` - -### Question Guidelines - -**Target**: 开发者(理解技术但需要从用户需求出发) - -**Question Structure**: `[跨角色分析发现] + [需要澄清的决策点]` -**Option Structure**: `标签:[具体方案] + 说明:[业务影响] + [技术权衡]` - -**9-Category Taxonomy**: - -| Category | Focus | Example Question Pattern | -|----------|-------|--------------------------| -| User Intent | 用户目标 | "MVP阶段核心目标?" + 验证/壁垒/完整性 | -| Requirements | 需求细化 | "功能优先级如何排序?" + 核心/增强/可选 | -| Architecture | 架构决策 | "技术栈选择考量?" + 熟悉度/先进性/成熟度 | -| UX | 用户体验 | "交互复杂度取舍?" + 简洁/丰富/渐进 | -| Feasibility | 可行性 | "资源约束下的范围?" + 最小/标准/完整 | -| Risk | 风险管理 | "风险容忍度?" + 保守/平衡/激进 | -| Process | 流程规范 | "迭代节奏?" + 快速/稳定/灵活 | -| Decisions | 决策确认 | "冲突解决方案?" + 方案A/方案B/折中 | -| Terminology | 术语统一 | "统一使用哪个术语?" + 术语A/术语B | - -**Quality Rules**: - -**MUST Include**: -- ✅ All questions in Chinese (用中文提问) -- ✅ 基于跨角色分析的具体发现 -- ✅ 选项包含业务影响说明 -- ✅ 解决实际的模糊点或冲突 - -**MUST Avoid**: -- ❌ 与角色分析无关的通用问题 -- ❌ 重复已在 artifacts 阶段确认的内容 -- ❌ 过于细节的实现级问题 - -#### Step 3: Build Update Plan - -```javascript -update_plan = { - "role1": { - "enhancements": ["EP-001", "EP-003"], - "clarifications": [ - {"question": "...", "answer": "...", "category": "..."} - ] - }, - "role2": { - "enhancements": ["EP-002"], - "clarifications": [...] - } -} -``` - -### Phase 5: Parallel Document Update Agents - -**Execute in parallel** (one agent per role): - -```javascript -// Single message with multiple Task calls for parallelism -Task(conceptual-planning-agent, ` -## Agent Mission -Apply enhancements and clarifications to ${role} analysis - -## Input -- role: ${role} -- analysis_path: ${brainstorm_dir}/${role}/analysis.md -- enhancements: ${role_enhancements} -- clarifications: ${role_clarifications} -- original_user_intent: ${intent} - -## Flow Control Steps -1. load_current_analysis → Read analysis file -2. add_clarifications_section → Insert Q&A section -3. apply_enhancements → Integrate into relevant sections -4. resolve_contradictions → Remove conflicts -5. enforce_terminology → Align terminology -6. validate_intent → Verify alignment with user intent -7. write_updated_file → Save changes - -## Output -Updated ${role}/analysis.md -`) -``` - -**Agent Characteristics**: -- **Isolation**: Each agent updates exactly ONE role (parallel safe) -- **Dependencies**: Zero cross-agent dependencies -- **Validation**: All updates must align with original_user_intent - -### Phase 6: Finalization - -#### Step 1: Update Context Package - -```javascript -// Sync updated analyses to context-package.json -const context_pkg = Read(".workflow/active/WFS-{session}/.process/context-package.json") - -// Update guidance-specification if exists -// Update synthesis-specification if exists -// Re-read all role analysis files -// Update metadata timestamps - -Write(context_pkg_path, JSON.stringify(context_pkg)) -``` - -#### Step 2: Update Session Metadata - -```json -{ - "phases": { - "BRAINSTORM": { - "status": "clarification_completed", - "clarification_completed": true, - "completed_at": "timestamp", - "participating_roles": [...], - "clarification_results": { - "enhancements_applied": ["EP-001", "EP-002"], - "questions_asked": 3, - "categories_clarified": ["Architecture", "UX"], - "roles_updated": ["role1", "role2"] - }, - "quality_metrics": { - "user_intent_alignment": "validated", - "ambiguity_resolution": "complete", - "terminology_consistency": "enforced" - } - } - } -} -``` - -#### Step 3: Completion Report - -```markdown -## ✅ Clarification Complete - -**Enhancements Applied**: EP-001, EP-002, EP-003 -**Questions Answered**: 3/5 -**Roles Updated**: role1, role2, role3 - -### Next Steps -✅ PROCEED: Planning workflow (`workflow-plan/SKILL.md`) --session WFS-{session-id} -``` - ---- - -## Output - -**Location**: `.workflow/active/WFS-{session}/.brainstorming/[role]/analysis*.md` - -**Updated Structure**: -```markdown -## Clarifications -### Session {date} -- **Q**: {question} (Category: {category}) - **A**: {answer} - -## {Existing Sections} -{Refined content based on clarifications} -``` - -**Changes**: -- User intent validated/corrected -- Requirements more specific/measurable -- Architecture with rationale -- Ambiguities resolved, placeholders removed -- Consistent terminology - ---- - -## Quality Checklist - -**Content**: -- ✅ All role analyses loaded/analyzed -- ✅ Cross-role analysis (consensus, conflicts, gaps) -- ✅ 9-category ambiguity scan -- ✅ Questions prioritized - -**Analysis**: -- ✅ User intent validated -- ✅ Cross-role synthesis complete -- ✅ Ambiguities resolved -- ✅ Terminology consistent - -**Documents**: -- ✅ Clarifications section formatted -- ✅ Sections reflect answers -- ✅ No placeholders (TODO/TBD) -- ✅ Valid Markdown - ---- - -## Post-Phase Update - -After Phase 3 completes: -- **Output Created**: `synthesis-specification.md`, updated role `analysis*.md` files -- **Cross-Role Integration**: All role insights synthesized and conflicts resolved -- **Next Action**: Workflow complete, recommend planning workflow (`workflow-plan/SKILL.md`) --session {session_id} -- **TodoWrite**: Collapse Phase 3 sub-tasks to "Phase 3: Synthesis Integration: completed" diff --git a/.claude/skills/workflow-execute/SKILL.md b/.claude/skills/workflow-execute/SKILL.md deleted file mode 100644 index d082ef52..00000000 --- a/.claude/skills/workflow-execute/SKILL.md +++ /dev/null @@ -1,639 +0,0 @@ ---- -name: workflow-execute -description: Coordinate agent execution for workflow tasks with automatic session discovery, parallel task processing, and status tracking. Triggers on "workflow execute". -allowed-tools: Task, AskUserQuestion, TodoWrite, Read, Write, Edit, Bash, Glob, Grep, Skill, mcp__ace-tool__search_context ---- - -# Workflow Execute - -Orchestrates autonomous workflow execution through systematic task discovery, agent coordination, and progress tracking. **Executes entire workflow without user interruption** (except initial session selection if multiple active sessions exist), providing complete context to agents and ensuring proper flow control execution with comprehensive TodoWrite tracking. - -**Resume Mode**: When called with `--resume-session` flag, skips discovery phase and directly enters TodoWrite generation and agent execution for the specified session. - -## Architecture Overview - -``` -┌──────────────────────────────────────────────────────────────┐ -│ Workflow Execute Orchestrator (SKILL.md) │ -│ → Parse args → Session discovery → Strategy → Execute tasks │ -└──────────┬───────────────────────────────────────────────────┘ - │ - ┌──────┴──────┐ - │ Normal Mode │──→ Phase 1 → Phase 2 → Phase 3 → Phase 4 → Phase 5 - │ Resume Mode │──→ Phase 3 → Phase 4 → Phase 5 - └─────────────┘ - │ - ┌──────┴──────────────────────────────────────────────┐ - │ Phase 1: Discovery (session selection) │ - │ Phase 2: Validation (planning doc checks) │ - │ Phase 3: TodoWrite Gen (progress tracking init) │ - │ Phase 4: Strategy+Execute (lazy load + agent loop) │ - │ Phase 5: Completion (status sync + user choice)│ - └─────────────────────────────────────────────────────┘ -``` - -## Key Design Principles - -1. **Autonomous Execution**: Complete entire workflow without user interruption -2. **Lazy Loading**: Task JSONs read on-demand during execution, not upfront -3. **ONE AGENT = ONE TASK JSON**: Each agent instance executes exactly one task JSON file -4. **IMPL_PLAN-Driven Strategy**: Execution model derived from planning document -5. **Continuous Progress Tracking**: TodoWrite updates throughout entire workflow - -## Auto Mode - -When `--yes` or `-y`: -- **Session Selection**: Automatically selects the first (most recent) active session -- **Completion Choice**: Automatically completes session (runs `Skill(skill="workflow:session:complete", args="--yes")`) - -When `--with-commit`: -- **Auto-Commit**: After each agent task completes, commit changes based on summary document -- **Commit Principle**: Minimal commits - only commit files modified by the completed task -- **Commit Message**: Generated from task summary with format: "feat/fix/refactor: {task-title} - {summary}" - -## Usage - -``` -Skill(skill="workflow-execute", args="") -Skill(skill="workflow-execute", args="[FLAGS]") - -# Flags --y, --yes Skip all confirmations (auto mode) ---resume-session="" Skip discovery, resume specified session ---with-commit Auto-commit after each task completion - -# Examples -Skill(skill="workflow-execute") # Interactive mode -Skill(skill="workflow-execute", args="--yes") # Auto mode -Skill(skill="workflow-execute", args="--resume-session=\"WFS-auth\"") # Resume specific session -Skill(skill="workflow-execute", args="-y --resume-session=\"WFS-auth\"") # Auto + resume -Skill(skill="workflow-execute", args="--with-commit") # With auto-commit -Skill(skill="workflow-execute", args="-y --with-commit") # Auto + commit -Skill(skill="workflow-execute", args="-y --with-commit --resume-session=\"WFS-auth\"") # All flags -``` - -## Execution Flow - -``` -Normal Mode: -Phase 1: Discovery - ├─ Count active sessions - └─ Decision: - ├─ count=0 → ERROR: No active sessions - ├─ count=1 → Auto-select session → Phase 2 - └─ count>1 → AskUserQuestion (max 4 options) → Phase 2 - -Phase 2: Planning Document Validation - ├─ Check IMPL_PLAN.md exists - ├─ Check TODO_LIST.md exists - └─ Validate .task/ contains IMPL-*.json files - -Phase 3: TodoWrite Generation - ├─ Update session status to "active" (Step 0) - ├─ Parse TODO_LIST.md for task statuses - ├─ Generate TodoWrite for entire workflow - └─ Prepare session context paths - -Phase 4: Execution Strategy & Task Execution - ├─ Step 4A: Parse execution strategy from IMPL_PLAN.md - └─ Step 4B: Execute tasks with lazy loading - └─ Loop: - ├─ Get next in_progress task from TodoWrite - ├─ Lazy load task JSON - ├─ Launch agent with task context - ├─ Mark task completed (update IMPL-*.json status) - │ # Quick fix: Update task status for ccw dashboard - │ # TS=$(date -Iseconds) && jq --arg ts "$TS" '.status="completed" | .status_history=(.status_history // [])+[{"from":"in_progress","to":"completed","changed_at":$ts}]' IMPL-X.json > tmp.json && mv tmp.json IMPL-X.json - ├─ [with-commit] Commit changes based on summary (minimal principle) - │ # Read summary from .summaries/IMPL-X-summary.md - │ # Extract changed files from summary's "Files Modified" section - │ # Generate commit message: "feat/fix/refactor: {task-title} - {summary}" - │ # git add && git commit -m "" - └─ Advance to next task - -Phase 5: Completion - ├─ Update task statuses in JSON files - ├─ Generate summaries - └─ AskUserQuestion: Choose next step - ├─ "Enter Review" → Skill(skill="workflow:review") - └─ "Complete Session" → Skill(skill="workflow:session:complete") - -Resume Mode (--resume-session): - ├─ Skip Phase 1 & Phase 2 - └─ Entry Point: Phase 3 (TodoWrite Generation) - ├─ Update session status to "active" (if not already) - └─ Continue: Phase 4 → Phase 5 -``` - -## Core Rules - -**Complete entire workflow autonomously without user interruption, using TodoWrite for comprehensive progress tracking.** -**Execute all discovered pending tasks until workflow completion or blocking dependency.** -**User-choice completion: When all tasks finished, ask user to choose review or complete.** -**ONE AGENT = ONE TASK JSON: Each agent instance executes exactly one task JSON file - never batch multiple tasks into single agent execution.** - -## Core Responsibilities - -- **Session Discovery**: Identify and select active workflow sessions -- **Execution Strategy Parsing**: Extract execution model from IMPL_PLAN.md -- **TodoWrite Progress Tracking**: Maintain real-time execution status throughout entire workflow -- **Agent Orchestration**: Coordinate specialized agents with complete context -- **Status Synchronization**: Update task JSON files and workflow state -- **Autonomous Completion**: Continue execution until all tasks complete or reach blocking state -- **Session User-Choice Completion**: Ask user to choose review or complete when all tasks finished - -## Execution Philosophy - -- **Progress tracking**: Continuous TodoWrite updates throughout entire workflow execution -- **Autonomous completion**: Execute all tasks without user interruption until workflow complete - -## Performance Optimization Strategy - -**Lazy Loading**: Task JSONs read **on-demand** during execution, not upfront. TODO_LIST.md + IMPL_PLAN.md provide metadata for planning. - -**Loading Strategy**: -- **TODO_LIST.md**: Read in Phase 3 (task metadata, status, dependencies for TodoWrite generation) -- **IMPL_PLAN.md**: Check existence in Phase 2 (normal mode), parse execution strategy in Phase 4A -- **Task JSONs**: Lazy loading - read only when task is about to execute (Phase 4B) - -## Execution Lifecycle - -### Phase 1: Discovery -**Applies to**: Normal mode only (skipped in resume mode) - -**Purpose**: Find and select active workflow session with user confirmation when multiple sessions exist - -**Process**: - -#### Step 1.1: Count Active Sessions -```bash -bash(find .workflow/active/ -name "WFS-*" -type d 2>/dev/null | wc -l) -``` - -#### Step 1.2: Handle Session Selection - -**Case A: No Sessions** (count = 0) -``` -ERROR: No active workflow sessions found -Run Skill(skill="workflow:plan", args="\"task description\"") to create a session -``` - -**Case B: Single Session** (count = 1) -```bash -bash(find .workflow/active/ -name "WFS-*" -type d 2>/dev/null | head -1 | xargs basename) -``` -Auto-select and continue to Phase 2. - -**Case C: Multiple Sessions** (count > 1) - -List sessions with metadata and prompt user selection: -```bash -bash(for dir in .workflow/active/WFS-*/; do [ -d "$dir" ] || continue; session=$(basename "$dir"); project=$(jq -r '.project // "Unknown"' "${dir}workflow-session.json" 2>/dev/null || echo "Unknown"); total=$(grep -c '^\- \[' "${dir}TODO_LIST.md" 2>/dev/null || echo 0); completed=$(grep -c '^\- \[x\]' "${dir}TODO_LIST.md" 2>/dev/null || echo 0); if [ "$total" -gt 0 ]; then progress=$((completed * 100 / total)); else progress=0; fi; echo "$session | $project | $completed/$total tasks ($progress%)"; done) -``` - -**Parse --yes flag**: -```javascript -const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y') -``` - -**Conditional Selection**: -```javascript -if (autoYes) { - // Auto mode: Select first session (most recent) - const firstSession = sessions[0] - console.log(`[--yes] Auto-selecting session: ${firstSession.id}`) - selectedSessionId = firstSession.id - // Continue to Phase 2 -} else { - // Interactive mode: Use AskUserQuestion to present formatted options (max 4 options shown) - // If more than 4 sessions, show most recent 4 with "Other" option for manual input - const sessions = getActiveSessions() // sorted by last modified - const displaySessions = sessions.slice(0, 4) - - AskUserQuestion({ - questions: [{ - question: "Multiple active sessions detected. Select one:", - header: "Session", - multiSelect: false, - options: displaySessions.map(s => ({ - label: s.id, - description: `${s.project} | ${s.progress}` - })) - // Note: User can select "Other" to manually enter session ID - }] - }) -} -``` - -**Input Validation**: -- If user selects from options: Use selected session ID -- If user selects "Other" and provides input: Validate session exists -- If validation fails: Show error and re-prompt or suggest available sessions - -Parse user input (supports: number "1", full ID "WFS-auth-system", or partial "auth"), validate selection, and continue to Phase 2. - -#### Step 1.3: Load Session Metadata -```bash -bash(cat .workflow/active/${sessionId}/workflow-session.json) -``` - -**Output**: Store session metadata in memory -**DO NOT read task JSONs yet** - defer until execution phase (lazy loading) - -**Resume Mode**: This entire phase is skipped when `--resume-session="session-id"` flag is provided. - -### Phase 2: Planning Document Validation -**Applies to**: Normal mode only (skipped in resume mode) - -**Purpose**: Validate planning artifacts exist before execution - -**Process**: -1. **Check IMPL_PLAN.md**: Verify file exists (defer detailed parsing to Phase 4A) -2. **Check TODO_LIST.md**: Verify file exists (defer reading to Phase 3) -3. **Validate Task Directory**: Ensure `.task/` contains at least one IMPL-*.json file - -**Key Optimization**: Only existence checks here. Actual file reading happens in later phases. - -**Resume Mode**: This phase is skipped when `--resume-session` flag is provided. Resume mode entry point is Phase 3. - -### Phase 3: TodoWrite Generation -**Applies to**: Both normal and resume modes (resume mode entry point) - -**Step 0: Update Session Status to Active** -Before generating TodoWrite, update session status from "planning" to "active": -```bash -# Update session status (idempotent - safe to run if already active) -jq '.status = "active" | .execution_started_at = (.execution_started_at // now | todate)' \ - .workflow/active/${sessionId}/workflow-session.json > tmp.json && \ - mv tmp.json .workflow/active/${sessionId}/workflow-session.json -``` -This ensures the dashboard shows the session as "ACTIVE" during execution. - -**Process**: -1. **Create TodoWrite List**: Generate task list from TODO_LIST.md (not from task JSONs) - - Parse TODO_LIST.md to extract all tasks with current statuses - - Identify first pending task with met dependencies - - Generate comprehensive TodoWrite covering entire workflow -2. **Prepare Session Context**: Inject workflow paths for agent use (using provided session-id) -3. **Validate Prerequisites**: Ensure IMPL_PLAN.md and TODO_LIST.md exist and are valid - -**Resume Mode Behavior**: -- Load existing TODO_LIST.md directly from `.workflow/active/{session-id}/` -- Extract current progress from TODO_LIST.md -- Generate TodoWrite from TODO_LIST.md state -- Proceed immediately to agent execution (Phase 4) - -### Phase 4: Execution Strategy Selection & Task Execution -**Applies to**: Both normal and resume modes - -**Step 4A: Parse Execution Strategy from IMPL_PLAN.md** - -Read IMPL_PLAN.md Section 4 to extract: -- **Execution Model**: Sequential | Parallel | Phased | TDD Cycles -- **Parallelization Opportunities**: Which tasks can run in parallel -- **Serialization Requirements**: Which tasks must run sequentially -- **Critical Path**: Priority execution order - -If IMPL_PLAN.md lacks execution strategy, use intelligent fallback (analyze task structure). - -**Step 4B: Execute Tasks with Lazy Loading** - -**Key Optimization**: Read task JSON **only when needed** for execution - -**Execution Loop Pattern**: -``` -while (TODO_LIST.md has pending tasks) { - next_task_id = getTodoWriteInProgressTask() - task_json = Read(.workflow/active/{session}/.task/{next_task_id}.json) // Lazy load - executeTaskWithAgent(task_json) - updateTodoListMarkCompleted(next_task_id) - advanceTodoWriteToNextTask() -} -``` - -**Execution Process per Task**: -1. **Identify Next Task**: From TodoWrite, get the next `in_progress` task ID -2. **Load Task JSON on Demand**: Read `.task/{task-id}.json` for current task ONLY -3. **Validate Task Structure**: Ensure all 5 required fields exist (id, title, status, meta, context, flow_control) -4. **Launch Agent**: Invoke specialized agent with complete context including flow control steps -5. **Monitor Progress**: Track agent execution and handle errors without user interruption -6. **Collect Results**: Gather implementation results and outputs -7. **[with-commit] Auto-Commit**: If `--with-commit` flag enabled, commit changes based on summary - - Read summary from `.summaries/{task-id}-summary.md` - - Extract changed files from summary's "Files Modified" section - - Determine commit type from `meta.type` (feature→feat, bugfix→fix, refactor→refactor) - - Generate commit message: "{type}: {task-title} - {summary-first-line}" - - Commit only modified files (minimal principle): `git add && git commit -m ""` -8. **Continue Workflow**: Identify next pending task from TODO_LIST.md and repeat - -**Note**: TODO_LIST.md updates are handled by agents (e.g., code-developer.md), not by the orchestrator. - -### Phase 5: Completion -**Applies to**: Both normal and resume modes - -**Process**: -1. **Update Task Status**: Mark completed tasks in JSON files -2. **Generate Summary**: Create task summary in `.summaries/` -3. **Update TodoWrite**: Mark current task complete, advance to next -4. **Synchronize State**: Update session state and workflow status -5. **Check Workflow Complete**: Verify all tasks are completed -6. **User Choice**: When all tasks finished, ask user to choose next step: - -```javascript -// Parse --yes flag -const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y') - -if (autoYes) { - // Auto mode: Complete session automatically - console.log(`[--yes] Auto-selecting: Complete Session`) - Skill(skill="workflow:session:complete", args="--yes") -} else { - // Interactive mode: Ask user - AskUserQuestion({ - questions: [{ - question: "All tasks completed. What would you like to do next?", - header: "Next Step", - multiSelect: false, - options: [ - { - label: "Enter Review", - description: "Run specialized review (security/architecture/quality/action-items)" - }, - { - label: "Complete Session", - description: "Archive session and update manifest" - } - ] - }] - }) -} -``` - -**Based on user selection**: -- **"Enter Review"**: Execute `Skill(skill="workflow:review")` -- **"Complete Session"**: Execute `Skill(skill="workflow:session:complete")` - -### Post-Completion Expansion - -完成后询问用户是否扩展为issue(test/enhance/refactor/doc),选中项调用 `Skill(skill="issue:new", args="\"{summary} - {dimension}\"")` - -## Execution Strategy (IMPL_PLAN-Driven) - -### Strategy Priority - -**IMPL_PLAN-Driven Execution (Recommended)**: -1. **Read IMPL_PLAN.md execution strategy** (Section 4: Implementation Strategy) -2. **Follow explicit guidance**: - - Execution Model (Sequential/Parallel/Phased/TDD) - - Parallelization Opportunities (which tasks can run in parallel) - - Serialization Requirements (which tasks must run sequentially) - - Critical Path (priority execution order) -3. **Use TODO_LIST.md for status tracking** only -4. **IMPL_PLAN decides "HOW"**, execute implements it - -**Intelligent Fallback (When IMPL_PLAN lacks execution details)**: -1. **Analyze task structure**: - - Check `meta.execution_group` in task JSONs - - Analyze `depends_on` relationships - - Understand task complexity and risk -2. **Apply smart defaults**: - - No dependencies + same execution_group → Parallel - - Has dependencies → Sequential (wait for deps) - - Critical/high-risk tasks → Sequential -3. **Conservative approach**: When uncertain, prefer sequential execution - -### Execution Models - -#### 1. Sequential Execution -**When**: IMPL_PLAN specifies "Sequential" OR no clear parallelization guidance -**Pattern**: Execute tasks one by one in TODO_LIST order -**TodoWrite**: ONE task marked as `in_progress` at a time - -#### 2. Parallel Execution -**When**: IMPL_PLAN specifies "Parallel" with clear parallelization opportunities -**Pattern**: Execute independent task groups concurrently by launching multiple agent instances -**TodoWrite**: MULTIPLE tasks (in same batch) marked as `in_progress` simultaneously -**Agent Instantiation**: Launch one agent instance per task (respects ONE AGENT = ONE TASK JSON rule) - -#### 3. Phased Execution -**When**: IMPL_PLAN specifies "Phased" with phase breakdown -**Pattern**: Execute tasks in phases, respect phase boundaries -**TodoWrite**: Within each phase, follow Sequential or Parallel rules - -#### 4. Intelligent Fallback -**When**: IMPL_PLAN lacks execution strategy details -**Pattern**: Analyze task structure and apply smart defaults -**TodoWrite**: Follow Sequential or Parallel rules based on analysis - -### Task Status Logic -``` -pending + dependencies_met → executable -completed → skip -blocked → skip until dependencies clear -``` - -## TodoWrite Coordination - -### TodoWrite Rules (Unified) - -**Rule 1: Initial Creation** -- **Normal Mode**: Generate TodoWrite from discovered pending tasks for entire workflow -- **Resume Mode**: Generate from existing session state and current progress - -**Rule 2: In-Progress Task Count (Execution-Model-Dependent)** -- **Sequential execution**: Mark ONLY ONE task as `in_progress` at a time -- **Parallel batch execution**: Mark ALL tasks in current batch as `in_progress` simultaneously -- **Execution group indicator**: Show `[execution_group: group-id]` for parallel tasks - -**Rule 3: Status Updates** -- **Immediate Updates**: Update status after each task/batch completion without user interruption -- **Status Synchronization**: Sync with JSON task files after updates -- **Continuous Tracking**: Maintain TodoWrite throughout entire workflow execution until completion - -**Rule 4: Workflow Completion Check** -- When all tasks marked `completed`, prompt user to choose review or complete session - -### TodoWrite Tool Usage - -**Example 1: Sequential Execution** -```javascript -TodoWrite({ - todos: [ - { - content: "Execute IMPL-1.1: Design auth schema [code-developer] [FLOW_CONTROL]", - status: "in_progress", // ONE task in progress - activeForm: "Executing IMPL-1.1: Design auth schema" - }, - { - content: "Execute IMPL-1.2: Implement auth logic [code-developer] [FLOW_CONTROL]", - status: "pending", - activeForm: "Executing IMPL-1.2: Implement auth logic" - } - ] -}); -``` - -**Example 2: Parallel Batch Execution** -```javascript -TodoWrite({ - todos: [ - { - content: "Execute IMPL-1.1: Build Auth API [code-developer] [execution_group: parallel-auth-api]", - status: "in_progress", // Batch task 1 - activeForm: "Executing IMPL-1.1: Build Auth API" - }, - { - content: "Execute IMPL-1.2: Build User UI [code-developer] [execution_group: parallel-ui-comp]", - status: "in_progress", // Batch task 2 (running concurrently) - activeForm: "Executing IMPL-1.2: Build User UI" - }, - { - content: "Execute IMPL-1.3: Setup Database [code-developer] [execution_group: parallel-db-schema]", - status: "in_progress", // Batch task 3 (running concurrently) - activeForm: "Executing IMPL-1.3: Setup Database" - }, - { - content: "Execute IMPL-2.1: Integration Tests [test-fix-agent] [depends_on: IMPL-1.1, IMPL-1.2, IMPL-1.3]", - status: "pending", // Next batch (waits for current batch completion) - activeForm: "Executing IMPL-2.1: Integration Tests" - } - ] -}); -``` - -## Agent Execution Pattern - -### Flow Control Execution -**[FLOW_CONTROL]** marker indicates task JSON contains `flow_control.pre_analysis` steps for context preparation. - -**Note**: Orchestrator does NOT execute flow control steps - Agent interprets and executes them autonomously. - -### Agent Prompt Template -**Path-Based Invocation**: Pass paths and trigger markers, let agent parse task JSON autonomously. - -```bash -Task(subagent_type="{meta.agent}", - run_in_background=false, - prompt="Implement task {task.id}: {task.title} - - [FLOW_CONTROL] - - **Input**: - - Task JSON: {session.task_json_path} - - Context Package: {session.context_package_path} - - **Output Location**: - - Workflow: {session.workflow_dir} - - TODO List: {session.todo_list_path} - - Summaries: {session.summaries_dir} - - **Execution**: Read task JSON → Execute pre_analysis → Check execution_config.method → (CLI: handoff to CLI tool | Agent: direct implementation) → Update TODO_LIST.md → Generate summary", - description="Implement: {task.id}") -``` - -**Key Markers**: -- `Implement` keyword: Triggers tech stack detection and guidelines loading -- `[FLOW_CONTROL]`: Triggers flow_control.pre_analysis execution - -**Why Path-Based**: Agent (code-developer.md) autonomously: -- Reads and parses task JSON (requirements, acceptance, flow_control, execution_config) -- Executes pre_analysis steps (Phase 1: context gathering) -- Checks execution_config.method (Phase 2: determine mode) -- CLI mode: Builds handoff prompt and executes via ccw cli with resume strategy -- Agent mode: Directly implements using modification_points and logic_flow -- Generates structured summary with integration points - -Embedding task content in prompt creates duplication and conflicts with agent's parsing logic. - -### Agent Assignment Rules -``` -meta.agent specified → Use specified agent -meta.agent missing → Infer from meta.type: - - "feature" → @code-developer - - "test-gen" → @code-developer - - "test-fix" → @test-fix-agent - - "review" → @universal-executor - - "docs" → @doc-generator -``` - -## Data Flow - -``` -Phase 1 (Discovery) → selectedSessionId, sessionMetadata - ↓ -Phase 2 (Validation) → validated paths (IMPL_PLAN.md, TODO_LIST.md, .task/) - ↓ -Phase 3 (TodoWrite Gen) → todoWriteList, sessionContextPaths - ↓ -Phase 4 (Execute) → per-task: taskJson (lazy), agentResult, summaryDoc - ↓ -Phase 5 (Completion) → updatedStatuses, userChoice (review|complete) -``` - -## Workflow File Structure Reference -``` -.workflow/active/WFS-[topic-slug]/ -├── workflow-session.json # Session state and metadata -├── IMPL_PLAN.md # Planning document and requirements -├── TODO_LIST.md # Progress tracking (updated by agents) -├── .task/ # Task definitions (JSON only) -│ ├── IMPL-1.json # Main task definitions -│ └── IMPL-1.1.json # Subtask definitions -├── .summaries/ # Task completion summaries -│ ├── IMPL-1-summary.md # Task completion details -│ └── IMPL-1.1-summary.md # Subtask completion details -└── .process/ # Planning artifacts - ├── context-package.json # Smart context package - └── ANALYSIS_RESULTS.md # Planning analysis results -``` - -## Auto-Commit Mode (--with-commit) - -**Behavior**: After each agent task completes, automatically commit changes based on summary document. - -**Minimal Principle**: Only commit files modified by the completed task. - -**Commit Message Format**: `{type}: {task-title} - {summary}` - -**Type Mapping** (from `meta.type`): -- `feature` → `feat` | `bugfix` → `fix` | `refactor` → `refactor` -- `test-gen` → `test` | `docs` → `docs` | `review` → `chore` - -**Implementation**: -```bash -# 1. Read summary from .summaries/{task-id}-summary.md -# 2. Extract files from "Files Modified" section -# 3. Commit: git add && git commit -m "{type}: {title} - {summary}" -``` - -**Error Handling**: Skip commit on no changes/missing summary, log errors, continue workflow. - -## Error Handling & Recovery - -### Common Errors & Recovery - -| Error Type | Cause | Recovery Strategy | Max Attempts | -|-----------|-------|------------------|--------------| -| **Discovery Errors** | -| No active session | No sessions in `.workflow/active/` | Create or resume session: `Skill(skill="workflow:plan", args="\"project\"")` | N/A | -| Multiple sessions | Multiple sessions in `.workflow/active/` | Prompt user selection | N/A | -| Corrupted session | Invalid JSON files | Recreate session structure or validate files | N/A | -| **Execution Errors** | -| Agent failure | Agent crash/timeout | Retry with simplified context | 2 | -| Flow control error | Command failure | Skip optional, fail critical | 1 per step | -| Context loading error | Missing dependencies | Reload from JSON, use defaults | 3 | -| JSON file corruption | File system issues | Restore from backup/recreate | 1 | - -### Error Prevention -- **Pre-flight Checks**: Validate session integrity before execution -- **Backup Strategy**: Create task snapshots before major operations -- **Atomic Updates**: Update JSON files atomically to prevent corruption -- **Dependency Validation**: Check all depends_on references exist -- **Context Verification**: Ensure all required context is available - -## Flag Parsing - -```javascript -const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y') -const withCommit = $ARGUMENTS.includes('--with-commit') -``` diff --git a/.claude/skills/workflow-lite-plan/SKILL.md b/.claude/skills/workflow-lite-plan/SKILL.md deleted file mode 100644 index 0ee27ca6..00000000 --- a/.claude/skills/workflow-lite-plan/SKILL.md +++ /dev/null @@ -1,217 +0,0 @@ ---- -name: workflow-lite-plan -description: Unified lightweight planning skill with mode selection (Lite Plan, Multi-CLI Plan, Lite Fix). Supports exploration, diagnosis, multi-CLI collaboration, and shared execution via lite-execute. -allowed-tools: Task, AskUserQuestion, TodoWrite, Read, Write, Edit, Bash, Glob, Grep, Skill, mcp__ace-tool__search_context ---- - -# Planning Workflow - -Unified lightweight planning skill that consolidates multiple planning approaches into a single entry point with mode selection. Default mode: **Lite Plan**. All planning modes share a common execution phase (lite-execute). - -## Architecture Overview - -``` -┌──────────────────────────────────────────────────────────┐ -│ Planning Workflow Orchestrator (SKILL.md) │ -│ → Parse args → Mode selection → Load phase → Execute │ -└────────────┬─────────────────────────────────────────────┘ - │ Mode Selection (default: Lite Plan) - ┌────────┼────────┬──────────┐ - ↓ ↓ ↓ ↓ (shared) -┌────────┐ ┌────────┐ ┌────────┐ ┌────────────┐ -│Phase 1 │ │Phase 2 │ │Phase 3 │ │ Phase 4 │ -│ Lite │ │Multi- │ │ Lite │ │ Lite │ -│ Plan │ │CLI Plan│ │ Fix │ │ Execute │ -└────────┘ └────────┘ └────────┘ └────────────┘ - │ │ │ ↑ - └──────────┴──────────┴───────────┘ - (all hand off to Phase 4) -``` - -## Key Design Principles - -1. **Mode Selection First**: User chooses planning approach before any work begins -2. **Shared Execution**: All planning modes produce `executionContext` consumed by Phase 4 (lite-execute) -3. **Progressive Phase Loading**: Only load the selected planning phase + execution phase -4. **Auto-Continue**: Planning phase completes → automatically loads execution phase -5. **Default Lite Plan**: When no mode specified, use Lite Plan (most common) - -## Auto Mode - -When `--yes` or `-y`: Skip mode selection (use default or flag-specified mode), auto-approve plan, skip clarifications. - -## Usage - -``` -Skill(skill="workflow-lite-plan", args="") -Skill(skill="workflow-lite-plan", args="[FLAGS] \"\"") - -# Flags ---mode lite-plan|multi-cli|lite-fix Planning mode selection (default: lite-plan) --y, --yes Skip all confirmations (auto mode) --e, --explore Force exploration (lite-plan only) ---hotfix Fast hotfix mode (lite-fix only) - -# Examples -Skill(skill="workflow-lite-plan", args="\"Implement JWT authentication\"") # Default: lite-plan -Skill(skill="workflow-lite-plan", args="--mode multi-cli \"Refactor payment module\"") # Multi-CLI planning -Skill(skill="workflow-lite-plan", args="--mode lite-fix \"Login fails with 500 error\"") # Bug fix mode -Skill(skill="workflow-lite-plan", args="-y \"Add user profile page\"") # Auto mode -Skill(skill="workflow-lite-plan", args="--mode lite-fix --hotfix \"Production DB timeout\"") # Hotfix mode -``` - -## Execution Flow - -``` -Input Parsing: - ├─ Extract flags: --mode, --yes, --explore, --hotfix - └─ Extract task description (string or file path) - -Mode Selection: - └─ Decision: - ├─ --mode lite-plan (or no --mode flag) → Read phases/01-lite-plan.md - ├─ --mode multi-cli → Read phases/02-multi-cli-plan.md - ├─ --mode lite-fix → Read phases/03-lite-fix.md - └─ No flag + not --yes → AskUserQuestion (default: Lite Plan) - -Planning Phase (one of): - ├─ Phase 1: Lite Plan - │ └─ Ref: phases/01-lite-plan.md - │ └─ Output: executionContext (plan.json + explorations + selections) - │ - ├─ Phase 2: Multi-CLI Plan - │ └─ Ref: phases/02-multi-cli-plan.md - │ └─ Output: executionContext (plan.json + synthesis rounds + selections) - │ - └─ Phase 3: Lite Fix - └─ Ref: phases/03-lite-fix.md - └─ Output: executionContext (fix-plan.json + diagnoses + selections) - -Execution Phase (always): - └─ Phase 4: Lite Execute - └─ Ref: phases/04-lite-execute.md - └─ Input: executionContext from planning phase - └─ Output: Executed tasks + optional code review -``` - -**Phase Reference Documents** (read on-demand when phase executes): - -| Phase | Document | Purpose | -|-------|----------|---------| -| 1 | [phases/01-lite-plan.md](phases/01-lite-plan.md) | Lightweight planning with exploration, clarification, and plan generation | -| 2 | [phases/02-multi-cli-plan.md](phases/02-multi-cli-plan.md) | Multi-CLI collaborative planning with ACE context and cross-verification | -| 3 | [phases/03-lite-fix.md](phases/03-lite-fix.md) | Bug diagnosis and fix planning with severity-based workflow | -| 4 | [phases/04-lite-execute.md](phases/04-lite-execute.md) | Shared execution engine: task grouping, batch execution, code review | - -## Mode Selection Logic - -```javascript -// Flag parsing -const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y') -const modeFlag = extractFlag($ARGUMENTS, '--mode') // 'lite-plan' | 'multi-cli' | 'lite-fix' | null - -// Mode determination -let selectedMode - -if (modeFlag) { - // Explicit mode flag - selectedMode = modeFlag -} else if (autoYes) { - // Auto mode: default to lite-plan - selectedMode = 'lite-plan' -} else { - // Interactive: ask user - const selection = AskUserQuestion({ - questions: [{ - question: "Select planning approach:", - header: "Mode", - multiSelect: false, - options: [ - { label: "Lite Plan (Recommended)", description: "Lightweight planning with exploration and clarification" }, - { label: "Multi-CLI Plan", description: "Multi-model collaborative planning (Gemini + Codex + Claude)" }, - { label: "Lite Fix", description: "Bug diagnosis and fix planning with severity assessment" } - ] - }] - }) - selectedMode = parseSelection(selection) // Map to 'lite-plan' | 'multi-cli' | 'lite-fix' -} - -// Load phase document -const phaseDoc = { - 'lite-plan': 'phases/01-lite-plan.md', - 'multi-cli': 'phases/02-multi-cli-plan.md', - 'lite-fix': 'phases/03-lite-fix.md' -}[selectedMode] - -Read(phaseDoc) // Load selected planning phase -// Execute planning phase... -// After planning completes: -Read('phases/04-lite-execute.md') // Load execution phase -``` - -## Data Flow - -``` -Planning Phase (01/02/03) - │ - ├─ Produces: executionContext = { - │ planObject: plan.json or fix-plan.json, - │ explorationsContext / diagnosisContext / synthesis rounds, - │ clarificationContext, - │ executionMethod: "Agent" | "Codex" | "Auto", - │ codeReviewTool: "Skip" | "Gemini Review" | ..., - │ originalUserInput: string, - │ session: { id, folder, artifacts } - │ } - │ - ↓ -Execution Phase (04) - │ - ├─ Consumes: executionContext - ├─ Task grouping → Batch creation → Parallel/sequential execution - ├─ Optional code review - └─ Development index update -``` - -## TodoWrite Pattern - -**Initialization** (after mode selection): -```json -[ - {"content": "Mode: {selectedMode} - Planning", "status": "in_progress", "activeForm": "Planning ({selectedMode})"}, - {"content": "Execution (Phase 4)", "status": "pending", "activeForm": "Executing tasks"} -] -``` - -**After planning completes**: -```json -[ - {"content": "Mode: {selectedMode} - Planning", "status": "completed", "activeForm": "Planning ({selectedMode})"}, - {"content": "Execution (Phase 4)", "status": "in_progress", "activeForm": "Executing tasks"} -] -``` - -Phase-internal sub-tasks are managed by each phase document (attach/collapse pattern). - -## Core Rules - -1. **Planning phases NEVER execute code** - all execution delegated to Phase 4 -2. **Only ONE planning phase runs** per invocation (Phase 1, 2, or 3) -3. **Phase 4 ALWAYS runs** after planning completes -4. **executionContext is the contract** between planning and execution phases -5. **Progressive loading**: Read phase doc ONLY when about to execute -6. **No cross-phase loading**: Don't load Phase 2 if user selected Phase 1 - -## Error Handling - -| Error | Resolution | -|-------|------------| -| Unknown --mode value | Default to lite-plan with warning | -| Planning phase failure | Display error, offer retry or mode switch | -| executionContext missing | Error: planning phase did not produce context | -| Phase file not found | Error with file path for debugging | - -## Related Skills - -- Full planning workflow: [workflow-plan/SKILL.md](../workflow-plan/SKILL.md) -- Brainstorming: [workflow-brainstorm-auto-parallel/SKILL.md](../workflow-brainstorm-auto-parallel/SKILL.md) diff --git a/.claude/skills/workflow-lite-plan/phases/01-lite-plan.md b/.claude/skills/workflow-lite-plan/phases/01-lite-plan.md deleted file mode 100644 index a284d1c4..00000000 --- a/.claude/skills/workflow-lite-plan/phases/01-lite-plan.md +++ /dev/null @@ -1,691 +0,0 @@ -# Phase 1: Lite Plan - -## Overview - -Intelligent lightweight planning command with dynamic workflow adaptation based on task complexity. Focuses on planning phases (exploration, clarification, planning, confirmation) and delegates execution to Phase 4: Lite Execute (phases/04-lite-execute.md). - -**Core capabilities:** -- Intelligent task analysis with automatic exploration detection -- Dynamic code exploration (cli-explore-agent) when codebase understanding needed -- Interactive clarification after exploration to gather missing information -- Adaptive planning: Low complexity → Direct Claude; Medium/High → cli-lite-planning-agent -- Two-step confirmation: plan display → multi-dimensional input collection -- Execution execute with complete context handoff to lite-execute - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `-y`, `--yes` | Skip all confirmations (auto mode) | -| `-e`, `--explore` | Force code exploration phase (overrides auto-detection) | -| `` | Task description or path to .md file (required) | - -## Output Artifacts - -| Artifact | Description | -|----------|-------------| -| `exploration-{angle}.json` | Per-angle exploration results (1-4 files based on complexity) | -| `explorations-manifest.json` | Index of all exploration files | -| `planning-context.md` | Evidence paths + synthesized understanding | -| `plan.json` | Structured implementation plan (plan-json-schema.json) | - -**Output Directory**: `.workflow/.lite-plan/{task-slug}-{YYYY-MM-DD}/` - -**Agent Usage**: -- Low complexity → Direct Claude planning (no agent) -- Medium/High complexity → `cli-lite-planning-agent` generates `plan.json` - -**Schema Reference**: `~/.claude/workflows/cli-templates/schemas/plan-json-schema.json` - -## Auto Mode Defaults - -When `--yes` or `-y` flag is used: -- **Clarification Questions**: Skipped (no clarification phase) -- **Plan Confirmation**: Auto-selected "Allow" -- **Execution Method**: Auto-selected "Auto" -- **Code Review**: Auto-selected "Skip" - -**Flag Parsing**: -```javascript -const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y') -const forceExplore = $ARGUMENTS.includes('--explore') || $ARGUMENTS.includes('-e') -``` - -## Execution Process - -``` -Phase 1: Task Analysis & Exploration - ├─ Parse input (description or .md file) - ├─ intelligent complexity assessment (Low/Medium/High) - ├─ Exploration decision (auto-detect or --explore flag) - ├─ Context protection: If file reading ≥50k chars → force cli-explore-agent - └─ Decision: - ├─ needsExploration=true → Launch parallel cli-explore-agents (1-4 based on complexity) - └─ needsExploration=false → Skip to Phase 2/3 - -Phase 2: Clarification (optional, multi-round) - ├─ Aggregate clarification_needs from all exploration angles - ├─ Deduplicate similar questions - └─ Decision: - ├─ Has clarifications → AskUserQuestion (max 4 questions per round, multiple rounds allowed) - └─ No clarifications → Skip to Phase 3 - -Phase 3: Planning (NO CODE EXECUTION - planning only) - └─ Decision (based on Phase 1 complexity): - ├─ Low → Load schema: cat ~/.claude/workflows/cli-templates/schemas/plan-json-schema.json → Direct Claude planning (following schema) → plan.json - └─ Medium/High → cli-lite-planning-agent → plan.json (agent internally executes quality check) - -Phase 4: Confirmation & Selection - ├─ Display plan summary (tasks, complexity, estimated time) - └─ AskUserQuestion: - ├─ Confirm: Allow / Modify / Cancel - ├─ Execution: Agent / Codex / Auto - └─ Review: Gemini / Agent / Skip - -Phase 5: Execute - ├─ Build executionContext (plan + explorations + clarifications + selections) - └─ → Hand off to Phase 4: Lite Execute (phases/04-lite-execute.md) --in-memory -``` - -## Implementation - -### Phase 1: Intelligent Multi-Angle Exploration - -**Session Setup** (MANDATORY - follow exactly): -```javascript -// Helper: Get UTC+8 (China Standard Time) ISO string -const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString() - -const taskSlug = task_description.toLowerCase().replace(/[^a-z0-9]+/g, '-').substring(0, 40) -const dateStr = getUtc8ISOString().substring(0, 10) // Format: 2025-11-29 - -const sessionId = `${taskSlug}-${dateStr}` // e.g., "implement-jwt-refresh-2025-11-29" -const sessionFolder = `.workflow/.lite-plan/${sessionId}` - -bash(`mkdir -p ${sessionFolder} && test -d ${sessionFolder} && echo "SUCCESS: ${sessionFolder}" || echo "FAILED: ${sessionFolder}"`) -``` - -**Exploration Decision Logic**: -```javascript -needsExploration = ( - flags.includes('--explore') || flags.includes('-e') || - task.mentions_specific_files || - task.requires_codebase_context || - task.needs_architecture_understanding || - task.modifies_existing_code -) - -if (!needsExploration) { - // Skip to Phase 2 (Clarification) or Phase 3 (Planning) - proceed_to_next_phase() -} -``` - -**⚠️ Context Protection**: File reading ≥50k chars → force `needsExploration=true` (delegate to cli-explore-agent) - -**Complexity Assessment** (Intelligent Analysis): -```javascript -// analyzes task complexity based on: -// - Scope: How many systems/modules are affected? -// - Depth: Surface change vs architectural impact? -// - Risk: Potential for breaking existing functionality? -// - Dependencies: How interconnected is the change? - -const complexity = analyzeTaskComplexity(task_description) -// Returns: 'Low' | 'Medium' | 'High' -// Low: Single file, isolated change, minimal risk -// Medium: Multiple files, some dependencies, moderate risk -// High: Cross-module, architectural, high risk - -// Angle assignment based on task type (orchestrator decides, not agent) -const ANGLE_PRESETS = { - architecture: ['architecture', 'dependencies', 'modularity', 'integration-points'], - security: ['security', 'auth-patterns', 'dataflow', 'validation'], - performance: ['performance', 'bottlenecks', 'caching', 'data-access'], - bugfix: ['error-handling', 'dataflow', 'state-management', 'edge-cases'], - feature: ['patterns', 'integration-points', 'testing', 'dependencies'] -} - -function selectAngles(taskDescription, count) { - const text = taskDescription.toLowerCase() - let preset = 'feature' // default - - if (/refactor|architect|restructure|modular/.test(text)) preset = 'architecture' - else if (/security|auth|permission|access/.test(text)) preset = 'security' - else if (/performance|slow|optimi|cache/.test(text)) preset = 'performance' - else if (/fix|bug|error|issue|broken/.test(text)) preset = 'bugfix' - - return ANGLE_PRESETS[preset].slice(0, count) -} - -const selectedAngles = selectAngles(task_description, complexity === 'High' ? 4 : (complexity === 'Medium' ? 3 : 1)) - -// Planning strategy determination -const planningStrategy = complexity === 'Low' - ? 'Direct Claude Planning' - : 'cli-lite-planning-agent' - -console.log(` -## Exploration Plan - -Task Complexity: ${complexity} -Selected Angles: ${selectedAngles.join(', ')} -Planning Strategy: ${planningStrategy} - -Launching ${selectedAngles.length} parallel explorations... -`) -``` - -**Launch Parallel Explorations** - Orchestrator assigns angle to each agent: - -**⚠️ CRITICAL - NO BACKGROUND EXECUTION**: -- **MUST NOT use `run_in_background: true`** - exploration results are REQUIRED before planning - - -```javascript -// Launch agents with pre-assigned angles -const explorationTasks = selectedAngles.map((angle, index) => - Task( - subagent_type="cli-explore-agent", - run_in_background=false, // ⚠️ MANDATORY: Must wait for results - description=`Explore: ${angle}`, - prompt=` -## Task Objective -Execute **${angle}** exploration for task planning context. Analyze codebase from this specific angle to discover relevant structure, patterns, and constraints. - -## Output Location - -**Session Folder**: ${sessionFolder} -**Output File**: ${sessionFolder}/exploration-${angle}.json - -## Assigned Context -- **Exploration Angle**: ${angle} -- **Task Description**: ${task_description} -- **Exploration Index**: ${index + 1} of ${selectedAngles.length} - -## MANDATORY FIRST STEPS (Execute by Agent) -**You (cli-explore-agent) MUST execute these steps in order:** -1. Run: ccw tool exec get_modules_by_depth '{}' (project structure) -2. Run: rg -l "{keyword_from_task}" --type ts (locate relevant files) -3. Execute: cat ~/.claude/workflows/cli-templates/schemas/explore-json-schema.json (get output schema reference) -4. Read: .workflow/project-tech.json (technology stack and architecture context) -5. Read: .workflow/project-guidelines.json (user-defined constraints and conventions) - -## Exploration Strategy (${angle} focus) - -**Step 1: Structural Scan** (Bash) -- get_modules_by_depth.sh → identify modules related to ${angle} -- find/rg → locate files relevant to ${angle} aspect -- Analyze imports/dependencies from ${angle} perspective - -**Step 2: Semantic Analysis** (Gemini CLI) -- How does existing code handle ${angle} concerns? -- What patterns are used for ${angle}? -- Where would new code integrate from ${angle} viewpoint? - -**Step 3: Write Output** -- Consolidate ${angle} findings into JSON -- Identify ${angle}-specific clarification needs - -## Expected Output - -**Schema Reference**: Schema obtained in MANDATORY FIRST STEPS step 3, follow schema exactly - -**Required Fields** (all ${angle} focused): -- project_structure: Modules/architecture relevant to ${angle} -- relevant_files: Files affected from ${angle} perspective - **IMPORTANT**: Use object format with relevance scores for synthesis: - \`[{path: "src/file.ts", relevance: 0.85, rationale: "Core ${angle} logic"}]\` - Scores: 0.7+ high priority, 0.5-0.7 medium, <0.5 low -- patterns: ${angle}-related patterns to follow -- dependencies: Dependencies relevant to ${angle} -- integration_points: Where to integrate from ${angle} viewpoint (include file:line locations) -- constraints: ${angle}-specific limitations/conventions -- clarification_needs: ${angle}-related ambiguities (options array + recommended index) -- _metadata.exploration_angle: "${angle}" - -## Success Criteria -- [ ] Schema obtained via cat explore-json-schema.json -- [ ] get_modules_by_depth.sh executed -- [ ] At least 3 relevant files identified with ${angle} rationale -- [ ] Patterns are actionable (code examples, not generic advice) -- [ ] Integration points include file:line locations -- [ ] Constraints are project-specific to ${angle} -- [ ] JSON output follows schema exactly -- [ ] clarification_needs includes options + recommended - -## Execution -**Write**: \`${sessionFolder}/exploration-${angle}.json\` -**Return**: 2-3 sentence summary of ${angle} findings -` - ) -) - -// Execute all exploration tasks in parallel -``` - -**Auto-discover Generated Exploration Files**: -```javascript -// After explorations complete, auto-discover all exploration-*.json files -const explorationFiles = bash(`find ${sessionFolder} -name "exploration-*.json" -type f`) - .split('\n') - .filter(f => f.trim()) - -// Read metadata to build manifest -const explorationManifest = { - session_id: sessionId, - task_description: task_description, - timestamp: getUtc8ISOString(), - complexity: complexity, - exploration_count: explorationCount, - explorations: explorationFiles.map(file => { - const data = JSON.parse(Read(file)) - const filename = path.basename(file) - return { - angle: data._metadata.exploration_angle, - file: filename, - path: file, - index: data._metadata.exploration_index - } - }) -} - -Write(`${sessionFolder}/explorations-manifest.json`, JSON.stringify(explorationManifest, null, 2)) - -console.log(` -## Exploration Complete - -Generated exploration files in ${sessionFolder}: -${explorationManifest.explorations.map(e => `- exploration-${e.angle}.json (angle: ${e.angle})`).join('\n')} - -Manifest: explorations-manifest.json -Angles explored: ${explorationManifest.explorations.map(e => e.angle).join(', ')} -`) -``` - -**Output**: -- `${sessionFolder}/exploration-{angle1}.json` -- `${sessionFolder}/exploration-{angle2}.json` -- ... (1-4 files based on complexity) -- `${sessionFolder}/explorations-manifest.json` - ---- - -### Phase 2: Clarification (Optional, Multi-Round) - -**Skip if**: No exploration or `clarification_needs` is empty across all explorations - -**⚠️ CRITICAL**: AskUserQuestion tool limits max 4 questions per call. **MUST execute multiple rounds** to exhaust all clarification needs - do NOT stop at round 1. - -**Aggregate clarification needs from all exploration angles**: -```javascript -// Load manifest and all exploration files -const manifest = JSON.parse(Read(`${sessionFolder}/explorations-manifest.json`)) -const explorations = manifest.explorations.map(exp => ({ - angle: exp.angle, - data: JSON.parse(Read(exp.path)) -})) - -// Aggregate clarification needs from all explorations -const allClarifications = [] -explorations.forEach(exp => { - if (exp.data.clarification_needs?.length > 0) { - exp.data.clarification_needs.forEach(need => { - allClarifications.push({ - ...need, - source_angle: exp.angle - }) - }) - } -}) - -// Intelligent deduplication: analyze allClarifications by intent -// - Identify questions with similar intent across different angles -// - Merge similar questions: combine options, consolidate context -// - Produce dedupedClarifications with unique intents only -const dedupedClarifications = intelligentMerge(allClarifications) - -// Parse --yes flag -const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y') - -if (autoYes) { - // Auto mode: Skip clarification phase - console.log(`[--yes] Skipping ${dedupedClarifications.length} clarification questions`) - console.log(`Proceeding to planning with exploration results...`) - // Continue to Phase 3 -} else if (dedupedClarifications.length > 0) { - // Interactive mode: Multi-round clarification - const BATCH_SIZE = 4 - const totalRounds = Math.ceil(dedupedClarifications.length / BATCH_SIZE) - - for (let i = 0; i < dedupedClarifications.length; i += BATCH_SIZE) { - const batch = dedupedClarifications.slice(i, i + BATCH_SIZE) - const currentRound = Math.floor(i / BATCH_SIZE) + 1 - - console.log(`### Clarification Round ${currentRound}/${totalRounds}`) - - AskUserQuestion({ - questions: batch.map(need => ({ - question: `[${need.source_angle}] ${need.question}\n\nContext: ${need.context}`, - header: need.source_angle.substring(0, 12), - multiSelect: false, - options: need.options.map((opt, index) => ({ - label: need.recommended === index ? `${opt} ★` : opt, - description: need.recommended === index ? `Recommended` : `Use ${opt}` - })) - })) - }) - - // Store batch responses in clarificationContext before next round - } -} -``` - -**Output**: `clarificationContext` (in-memory) - ---- - -### Phase 3: Planning - -**Planning Strategy Selection** (based on Phase 1 complexity): - -**IMPORTANT**: Phase 3 is **planning only** - NO code execution. All execution happens in Phase 5 via lite-execute. - -**Executor Assignment** (Claude 智能分配,plan 生成后执行): - -```javascript -// 分配规则(优先级从高到低): -// 1. 用户明确指定:"用 gemini 分析..." → gemini, "codex 实现..." → codex -// 2. 默认 → agent - -const executorAssignments = {} // { taskId: { executor: 'gemini'|'codex'|'agent', reason: string } } -plan.tasks.forEach(task => { - // Claude 根据上述规则语义分析,为每个 task 分配 executor - executorAssignments[task.id] = { executor: '...', reason: '...' } -}) -``` - -**Low Complexity** - Direct planning by Claude: -```javascript -// Step 1: Read schema -const schema = Bash(`cat ~/.claude/workflows/cli-templates/schemas/plan-json-schema.json`) - -// Step 2: ⚠️ MANDATORY - Read and review ALL exploration files -const manifest = JSON.parse(Read(`${sessionFolder}/explorations-manifest.json`)) -manifest.explorations.forEach(exp => { - const explorationData = Read(exp.path) - console.log(`\n### Exploration: ${exp.angle}\n${explorationData}`) -}) - -// Step 3: Generate plan following schema (Claude directly, no agent) -// ⚠️ Plan MUST incorporate insights from exploration files read in Step 2 -const plan = { - summary: "...", - approach: "...", - tasks: [...], // Each task: { id, title, scope, ..., depends_on, execution_group, complexity } - estimated_time: "...", - recommended_execution: "Agent", - complexity: "Low", - _metadata: { timestamp: getUtc8ISOString(), source: "direct-planning", planning_mode: "direct" } -} - -// Step 4: Write plan to session folder -Write(`${sessionFolder}/plan.json`, JSON.stringify(plan, null, 2)) - -// Step 5: MUST continue to Phase 4 (Confirmation) - DO NOT execute code here -``` - -**Medium/High Complexity** - Invoke cli-lite-planning-agent: - -```javascript -Task( - subagent_type="cli-lite-planning-agent", - run_in_background=false, - description="Generate detailed implementation plan", - prompt=` -Generate implementation plan and write plan.json. - -## Output Location - -**Session Folder**: ${sessionFolder} -**Output Files**: -- ${sessionFolder}/planning-context.md (evidence + understanding) -- ${sessionFolder}/plan.json (implementation plan) - -## Output Schema Reference -Execute: cat ~/.claude/workflows/cli-templates/schemas/plan-json-schema.json (get schema reference before generating plan) - -## Project Context (MANDATORY - Read Both Files) -1. Read: .workflow/project-tech.json (technology stack, architecture, key components) -2. Read: .workflow/project-guidelines.json (user-defined constraints and conventions) - -**CRITICAL**: All generated tasks MUST comply with constraints in project-guidelines.json - -## Task Description -${task_description} - -## Multi-Angle Exploration Context - -${manifest.explorations.map(exp => `### Exploration: ${exp.angle} (${exp.file}) -Path: ${exp.path} - -Read this file for detailed ${exp.angle} analysis.`).join('\n\n')} - -Total explorations: ${manifest.exploration_count} -Angles covered: ${manifest.explorations.map(e => e.angle).join(', ')} - -Manifest: ${sessionFolder}/explorations-manifest.json - -## User Clarifications -${JSON.stringify(clarificationContext) || "None"} - -## Complexity Level -${complexity} - -## Requirements -Generate plan.json following the schema obtained above. Key constraints: -- tasks: 2-7 structured tasks (**group by feature/module, NOT by file**) -- _metadata.exploration_angles: ${JSON.stringify(manifest.explorations.map(e => e.angle))} - -## Task Grouping Rules -1. **Group by feature**: All changes for one feature = one task (even if 3-5 files) -2. **Group by context**: Tasks with similar context or related functional changes can be grouped together -3. **Minimize agent count**: Simple, unrelated tasks can also be grouped to reduce agent execution overhead -4. **Avoid file-per-task**: Do NOT create separate tasks for each file -5. **Substantial tasks**: Each task should represent 15-60 minutes of work -6. **True dependencies only**: Only use depends_on when Task B cannot start without Task A's output -7. **Prefer parallel**: Most tasks should be independent (no depends_on) - -## Execution -1. Read schema file (cat command above) -2. Execute CLI planning using Gemini (Qwen fallback) -3. Read ALL exploration files for comprehensive context -4. Synthesize findings and generate plan following schema -5. **Write**: \`${sessionFolder}/planning-context.md\` (evidence paths + understanding) -6. **Write**: \`${sessionFolder}/plan.json\` -7. Return brief completion summary -` -) -``` - -**Output**: `${sessionFolder}/plan.json` - ---- - -### Phase 4: Task Confirmation & Execution Selection - -**Step 4.1: Display Plan** -```javascript -const plan = JSON.parse(Read(`${sessionFolder}/plan.json`)) - -console.log(` -## Implementation Plan - -**Summary**: ${plan.summary} -**Approach**: ${plan.approach} - -**Tasks** (${plan.tasks.length}): -${plan.tasks.map((t, i) => `${i+1}. ${t.title} (${t.file})`).join('\n')} - -**Complexity**: ${plan.complexity} -**Estimated Time**: ${plan.estimated_time} -**Recommended**: ${plan.recommended_execution} -`) -``` - -**Step 4.2: Collect Confirmation** -```javascript -// Parse --yes flag -const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y') - -let userSelection - -if (autoYes) { - // Auto mode: Use defaults - console.log(`[--yes] Auto-confirming plan:`) - console.log(` - Confirmation: Allow`) - console.log(` - Execution: Auto`) - console.log(` - Review: Skip`) - - userSelection = { - confirmation: "Allow", - execution_method: "Auto", - code_review_tool: "Skip" - } -} else { - // Interactive mode: Ask user - // Note: Execution "Other" option allows specifying CLI tools from ~/.claude/cli-tools.json - userSelection = AskUserQuestion({ - questions: [ - { - question: `Confirm plan? (${plan.tasks.length} tasks, ${plan.complexity})`, - header: "Confirm", - multiSelect: false, - options: [ - { label: "Allow", description: "Proceed as-is" }, - { label: "Modify", description: "Adjust before execution" }, - { label: "Cancel", description: "Abort workflow" } - ] - }, - { - question: "Execution method:", - header: "Execution", - multiSelect: false, - options: [ - { label: "Agent", description: "@code-developer agent" }, - { label: "Codex", description: "codex CLI tool" }, - { label: "Auto", description: `Auto: ${plan.complexity === 'Low' ? 'Agent' : 'Codex'}` } - ] - }, - { - question: "Code review after execution?", - header: "Review", - multiSelect: false, - options: [ - { label: "Gemini Review", description: "Gemini CLI review" }, - { label: "Codex Review", description: "Git-aware review (prompt OR --uncommitted)" }, - { label: "Agent Review", description: "@code-reviewer agent" }, - { label: "Skip", description: "No review" } - ] - } - ] - }) -} -``` - ---- - -### Phase 5: Execute to Execution - -**CRITICAL**: lite-plan NEVER executes code directly. ALL execution MUST go through lite-execute. - -**Step 5.1: Build executionContext** - -```javascript -// Load manifest and all exploration files -const manifest = JSON.parse(Read(`${sessionFolder}/explorations-manifest.json`)) -const explorations = {} - -manifest.explorations.forEach(exp => { - if (file_exists(exp.path)) { - explorations[exp.angle] = JSON.parse(Read(exp.path)) - } -}) - -const plan = JSON.parse(Read(`${sessionFolder}/plan.json`)) - -executionContext = { - planObject: plan, - explorationsContext: explorations, - explorationAngles: manifest.explorations.map(e => e.angle), - explorationManifest: manifest, - clarificationContext: clarificationContext || null, - executionMethod: userSelection.execution_method, // 全局默认,可被 executorAssignments 覆盖 - codeReviewTool: userSelection.code_review_tool, - originalUserInput: task_description, - - // 任务级 executor 分配(优先于全局 executionMethod) - executorAssignments: executorAssignments, // { taskId: { executor, reason } } - - session: { - id: sessionId, - folder: sessionFolder, - artifacts: { - explorations: manifest.explorations.map(exp => ({ - angle: exp.angle, - path: exp.path - })), - explorations_manifest: `${sessionFolder}/explorations-manifest.json`, - plan: `${sessionFolder}/plan.json` - } - } -} -``` - -**Step 5.2: Execute** - -```javascript -// → Hand off to Phase 4: Lite Execute (phases/04-lite-execute.md) --in-memory -``` - -## Session Folder Structure - -``` -.workflow/.lite-plan/{task-slug}-{YYYY-MM-DD}/ -├── exploration-{angle1}.json # Exploration angle 1 -├── exploration-{angle2}.json # Exploration angle 2 -├── exploration-{angle3}.json # Exploration angle 3 (if applicable) -├── exploration-{angle4}.json # Exploration angle 4 (if applicable) -├── explorations-manifest.json # Exploration index -└── plan.json # Implementation plan -``` - -**Example**: -``` -.workflow/.lite-plan/implement-jwt-refresh-2025-11-25-14-30-25/ -├── exploration-architecture.json -├── exploration-auth-patterns.json -├── exploration-security.json -├── explorations-manifest.json -└── plan.json -``` - -## Error Handling - -| Error | Resolution | -|-------|------------| -| Exploration agent failure | Skip exploration, continue with task description only | -| Planning agent failure | Fallback to direct planning by Claude | -| Clarification timeout | Use exploration findings as-is | -| Confirmation timeout | Save context, display resume instructions | -| Modify loop > 3 times | Suggest breaking task or using full planning workflow (workflow-plan/SKILL.md) | - ---- - -## Post-Phase Update - -After Phase 1 (Lite Plan) completes: -- **Output Created**: `executionContext` with plan.json, explorations, clarifications, user selections -- **Session Artifacts**: All files in `.workflow/.lite-plan/{session-id}/` -- **Next Action**: Auto-continue to [Phase 4: Lite Execute](04-lite-execute.md) with --in-memory -- **TodoWrite**: Mark "Lite Plan - Planning" as completed, start "Execution (Phase 4)" diff --git a/.claude/skills/workflow-lite-plan/phases/02-multi-cli-plan.md b/.claude/skills/workflow-lite-plan/phases/02-multi-cli-plan.md deleted file mode 100644 index 25ed57af..00000000 --- a/.claude/skills/workflow-lite-plan/phases/02-multi-cli-plan.md +++ /dev/null @@ -1,570 +0,0 @@ -# Phase 2: Multi-CLI Plan - -## Auto Mode - -When `--yes` or `-y`: Auto-approve plan, use recommended solution and execution method (Agent, Skip review). - -## Overview - -Multi-CLI collaborative planning with ACE context gathering and iterative cross-verification. Uses cli-discuss-agent for Gemini+Codex+Claude analysis to converge on optimal execution plan. - -## Quick Start - -**Parameters**: -- `` (required): Task description -- `--max-rounds` (optional): Maximum discussion rounds (default: 3) -- `--tools` (optional): CLI tools for analysis (default: gemini,codex) -- `--mode` (optional): Execution mode: parallel or serial - -**Context Source**: ACE semantic search + Multi-CLI analysis -**Output Directory**: `.workflow/.multi-cli-plan/{session-id}/` -**Default Max Rounds**: 3 (convergence may complete earlier) -**CLI Tools**: @cli-discuss-agent (analysis), @cli-lite-planning-agent (plan generation) -**Execution**: Auto-hands off to Phase 4: Lite Execute (phases/04-lite-execute.md) after plan approval - -## What & Why - -### Core Concept - -Multi-CLI collaborative planning with **three-phase architecture**: ACE context gathering → Iterative multi-CLI discussion → Plan generation. Orchestrator delegates analysis to agents, only handles user decisions and session management. - -**Process**: -- **Phase 1**: ACE semantic search gathers codebase context -- **Phase 2**: cli-discuss-agent orchestrates Gemini/Codex/Claude for cross-verified analysis -- **Phase 3-5**: User decision → Plan generation → Execution handoff - -**vs Single-CLI Planning**: -- **Single**: One model perspective, potential blind spots -- **Multi-CLI**: Cross-verification catches inconsistencies, builds consensus on solutions - -### Value Proposition - -1. **Multi-Perspective Analysis**: Gemini + Codex + Claude analyze from different angles -2. **Cross-Verification**: Identify agreements/disagreements, build confidence -3. **User-Driven Decisions**: Every round ends with user decision point -4. **Iterative Convergence**: Progressive refinement until consensus reached - -### Orchestrator Boundary (CRITICAL) - -- **ONLY command** for multi-CLI collaborative planning -- Manages: Session state, user decisions, agent delegation, phase transitions -- Delegates: CLI execution to @cli-discuss-agent, plan generation to @cli-lite-planning-agent - -### Execution Flow - -``` -Phase 1: Context Gathering - └─ ACE semantic search, extract keywords, build context package - -Phase 2: Multi-CLI Discussion (Iterative, via @cli-discuss-agent) - ├─ Round N: Agent executes Gemini + Codex + Claude - ├─ Cross-verify findings, synthesize solutions - ├─ Write synthesis.json to rounds/{N}/ - └─ Loop until convergence or max rounds - -Phase 3: Present Options - └─ Display solutions with trade-offs from agent output - -Phase 4: User Decision - ├─ Select solution approach - ├─ Select execution method (Agent/Codex/Auto) - ├─ Select code review tool (Skip/Gemini/Codex/Agent) - └─ Route: - ├─ Approve → Phase 5 - ├─ Need More Analysis → Return to Phase 2 - └─ Cancel → Save session - -Phase 5: Plan Generation & Execution Handoff - ├─ Generate plan.json (via @cli-lite-planning-agent) - ├─ Build executionContext with user selections - └─ Hand off to Phase 4: Lite Execute (phases/04-lite-execute.md) --in-memory -``` - -### Agent Roles - -| Agent | Responsibility | -|-------|---------------| -| **Orchestrator** | Session management, ACE context, user decisions, phase transitions, executionContext assembly | -| **@cli-discuss-agent** | Multi-CLI execution (Gemini/Codex/Claude), cross-verification, solution synthesis, synthesis.json output | -| **@cli-lite-planning-agent** | Task decomposition, plan.json generation following schema | - -## Core Responsibilities - -### Phase 1: Context Gathering - -**Session Initialization**: -```javascript -const sessionId = `MCP-${taskSlug}-${date}` -const sessionFolder = `.workflow/.multi-cli-plan/${sessionId}` -Bash(`mkdir -p ${sessionFolder}/rounds`) -``` - -**ACE Context Queries**: -```javascript -const aceQueries = [ - `Project architecture related to ${keywords}`, - `Existing implementations of ${keywords[0]}`, - `Code patterns for ${keywords} features`, - `Integration points for ${keywords[0]}` -] -// Execute via mcp__ace-tool__search_context -``` - -**Context Package** (passed to agent): -- `relevant_files[]` - Files identified by ACE -- `detected_patterns[]` - Code patterns found -- `architecture_insights` - Structure understanding - -### Phase 2: Agent Delegation - -**Core Principle**: Orchestrator only delegates and reads output - NO direct CLI execution. - -**⚠️ CRITICAL - CLI EXECUTION REQUIREMENT**: -- **MUST** execute CLI calls via `Bash` with `run_in_background: true` -- **MUST** wait for hook callback to receive complete results -- **MUST NOT** proceed with next phase until CLI execution fully completes -- Do NOT use `TaskOutput` polling during CLI execution - wait passively for results -- Minimize scope: Proceed only when 100% result available - -**Agent Invocation**: -```javascript -Task({ - subagent_type: "cli-discuss-agent", - run_in_background: false, - description: `Discussion round ${currentRound}`, - prompt: ` -## Input Context -- task_description: ${taskDescription} -- round_number: ${currentRound} -- session: { id: "${sessionId}", folder: "${sessionFolder}" } -- ace_context: ${JSON.stringify(contextPackageage)} -- previous_rounds: ${JSON.stringify(analysisResults)} -- user_feedback: ${userFeedback || 'None'} -- cli_config: { tools: ["gemini", "codex"], mode: "parallel", fallback_chain: ["gemini", "codex", "claude"] } - -## Execution Process -1. Parse input context (handle JSON strings) -2. Check if ACE supplementary search needed -3. Build CLI prompts with context -4. Execute CLIs (parallel or serial per cli_config.mode) -5. Parse CLI outputs, handle failures with fallback -6. Perform cross-verification between CLI results -7. Synthesize solutions, calculate scores -8. Calculate convergence, generate clarification questions -9. Write synthesis.json - -## Output -Write: ${sessionFolder}/rounds/${currentRound}/synthesis.json - -## Completion Checklist -- [ ] All configured CLI tools executed (or fallback triggered) -- [ ] Cross-verification completed with agreements/disagreements -- [ ] 2-3 solutions generated with file:line references -- [ ] Convergence score calculated (0.0-1.0) -- [ ] synthesis.json written with all Primary Fields -` -}) -``` - -**Read Agent Output**: -```javascript -const synthesis = JSON.parse(Read(`${sessionFolder}/rounds/${round}/synthesis.json`)) -// Access top-level fields: solutions, convergence, cross_verification, clarification_questions -``` - -**Convergence Decision**: -```javascript -if (synthesis.convergence.recommendation === 'converged') { - // Proceed to Phase 3 -} else if (synthesis.convergence.recommendation === 'user_input_needed') { - // Collect user feedback, return to Phase 2 -} else { - // Continue to next round if new_insights && round < maxRounds -} -``` - -### Phase 3: Present Options - -**Display from Agent Output** (no processing): -```javascript -console.log(` -## Solution Options - -${synthesis.solutions.map((s, i) => ` -**Option ${i+1}: ${s.name}** -Source: ${s.source_cli.join(' + ')} -Effort: ${s.effort} | Risk: ${s.risk} - -Pros: ${s.pros.join(', ')} -Cons: ${s.cons.join(', ')} - -Files: ${s.affected_files.slice(0,3).map(f => `${f.file}:${f.line}`).join(', ')} -`).join('\n')} - -## Cross-Verification -Agreements: ${synthesis.cross_verification.agreements.length} -Disagreements: ${synthesis.cross_verification.disagreements.length} -`) -``` - -### Phase 4: User Decision - -**Decision Options**: -```javascript -AskUserQuestion({ - questions: [ - { - question: "Which solution approach?", - header: "Solution", - multiSelect: false, - options: solutions.map((s, i) => ({ - label: `Option ${i+1}: ${s.name}`, - description: `${s.effort} effort, ${s.risk} risk` - })).concat([ - { label: "Need More Analysis", description: "Return to Phase 2" } - ]) - }, - { - question: "Execution method:", - header: "Execution", - multiSelect: false, - options: [ - { label: "Agent", description: "@code-developer agent" }, - { label: "Codex", description: "codex CLI tool" }, - { label: "Auto", description: "Auto-select based on complexity" } - ] - }, - { - question: "Code review after execution?", - header: "Review", - multiSelect: false, - options: [ - { label: "Skip", description: "No review" }, - { label: "Gemini Review", description: "Gemini CLI tool" }, - { label: "Codex Review", description: "codex review --uncommitted" }, - { label: "Agent Review", description: "Current agent review" } - ] - } - ] -}) -``` - -**Routing**: -- Approve + execution method → Phase 5 -- Need More Analysis → Phase 2 with feedback -- Cancel → Save session for resumption - -### Phase 5: Plan Generation & Execution Handoff - -**Step 1: Build Context-Package** (Orchestrator responsibility): -```javascript -// Extract key information from user decision and synthesis -const contextPackage = { - // Core solution details - solution: { - name: selectedSolution.name, - source_cli: selectedSolution.source_cli, - feasibility: selectedSolution.feasibility, - effort: selectedSolution.effort, - risk: selectedSolution.risk, - summary: selectedSolution.summary - }, - // Implementation plan (tasks, flow, milestones) - implementation_plan: selectedSolution.implementation_plan, - // Dependencies - dependencies: selectedSolution.dependencies || { internal: [], external: [] }, - // Technical concerns - technical_concerns: selectedSolution.technical_concerns || [], - // Consensus from cross-verification - consensus: { - agreements: synthesis.cross_verification.agreements, - resolved_conflicts: synthesis.cross_verification.resolution - }, - // User constraints (from Phase 4 feedback) - constraints: userConstraints || [], - // Task context - task_description: taskDescription, - session_id: sessionId -} - -// Write context-package for traceability -Write(`${sessionFolder}/context-package.json`, JSON.stringify(contextPackage, null, 2)) -``` - -**Context-Package Schema**: - -| Field | Type | Description | -|-------|------|-------------| -| `solution` | object | User-selected solution from synthesis | -| `solution.name` | string | Solution identifier | -| `solution.feasibility` | number | Viability score (0-1) | -| `solution.summary` | string | Brief analysis summary | -| `implementation_plan` | object | Task breakdown with flow and dependencies | -| `implementation_plan.approach` | string | High-level technical strategy | -| `implementation_plan.tasks[]` | array | Discrete tasks with id, name, depends_on, files | -| `implementation_plan.execution_flow` | string | Task sequence (e.g., "T1 → T2 → T3") | -| `implementation_plan.milestones` | string[] | Key checkpoints | -| `dependencies` | object | Module and package dependencies | -| `technical_concerns` | string[] | Risks and blockers | -| `consensus` | object | Cross-verified agreements from multi-CLI | -| `constraints` | string[] | User-specified constraints from Phase 4 | - -```json -{ - "solution": { - "name": "Strategy Pattern Refactoring", - "source_cli": ["gemini", "codex"], - "feasibility": 0.88, - "effort": "medium", - "risk": "low", - "summary": "Extract payment gateway interface, implement strategy pattern for multi-gateway support" - }, - "implementation_plan": { - "approach": "Define interface → Create concrete strategies → Implement factory → Migrate existing code", - "tasks": [ - {"id": "T1", "name": "Define PaymentGateway interface", "depends_on": [], "files": [{"file": "src/types/payment.ts", "line": 1, "action": "create"}], "key_point": "Include all existing Stripe methods"}, - {"id": "T2", "name": "Implement StripeGateway", "depends_on": ["T1"], "files": [{"file": "src/payment/stripe.ts", "line": 1, "action": "create"}], "key_point": "Wrap existing logic"}, - {"id": "T3", "name": "Create GatewayFactory", "depends_on": ["T1"], "files": [{"file": "src/payment/factory.ts", "line": 1, "action": "create"}], "key_point": null}, - {"id": "T4", "name": "Migrate processor to use factory", "depends_on": ["T2", "T3"], "files": [{"file": "src/payment/processor.ts", "line": 45, "action": "modify"}], "key_point": "Backward compatible"} - ], - "execution_flow": "T1 → (T2 | T3) → T4", - "milestones": ["Interface defined", "Gateway implementations complete", "Migration done"] - }, - "dependencies": { - "internal": ["@/lib/payment-gateway", "@/types/payment"], - "external": ["stripe@^14.0.0"] - }, - "technical_concerns": ["Existing tests must pass", "No breaking API changes"], - "consensus": { - "agreements": ["Use strategy pattern", "Keep existing API"], - "resolved_conflicts": "Factory over DI for simpler integration" - }, - "constraints": ["backward compatible", "no breaking changes to PaymentResult type"], - "task_description": "Refactor payment processing for multi-gateway support", - "session_id": "MCP-payment-refactor-2026-01-14" -} -``` - -**Step 2: Invoke Planning Agent**: -```javascript -Task({ - subagent_type: "cli-lite-planning-agent", - run_in_background: false, - description: "Generate implementation plan", - prompt: ` -## Schema Reference -Execute: cat ~/.claude/workflows/cli-templates/schemas/plan-json-schema.json - -## Context-Package (from orchestrator) -${JSON.stringify(contextPackage, null, 2)} - -## Execution Process -1. Read plan-json-schema.json for output structure -2. Read project-tech.json and project-guidelines.json -3. Parse context-package fields: - - solution: name, feasibility, summary - - implementation_plan: tasks[], execution_flow, milestones - - dependencies: internal[], external[] - - technical_concerns: risks/blockers - - consensus: agreements, resolved_conflicts - - constraints: user requirements -4. Use implementation_plan.tasks[] as task foundation -5. Preserve task dependencies (depends_on) and execution_flow -6. Expand tasks with detailed acceptance criteria -7. Generate plan.json following schema exactly - -## Output -- ${sessionFolder}/plan.json - -## Completion Checklist -- [ ] plan.json preserves task dependencies from implementation_plan -- [ ] Task execution order follows execution_flow -- [ ] Key_points reflected in task descriptions -- [ ] User constraints applied to implementation -- [ ] Acceptance criteria are testable -- [ ] Schema fields match plan-json-schema.json exactly -` -}) -``` - -**Step 3: Build executionContext**: -```javascript -// After plan.json is generated by cli-lite-planning-agent -const plan = JSON.parse(Read(`${sessionFolder}/plan.json`)) - -// Build executionContext (same structure as lite-plan) -executionContext = { - planObject: plan, - explorationsContext: null, // Multi-CLI doesn't use exploration files - explorationAngles: [], // No exploration angles - explorationManifest: null, // No manifest - clarificationContext: null, // Store user feedback from Phase 2 if exists - executionMethod: userSelection.execution_method, // From Phase 4 - codeReviewTool: userSelection.code_review_tool, // From Phase 4 - originalUserInput: taskDescription, - - // Optional: Task-level executor assignments - executorAssignments: null, // Could be enhanced in future - - session: { - id: sessionId, - folder: sessionFolder, - artifacts: { - explorations: [], // No explorations in multi-CLI workflow - explorations_manifest: null, - plan: `${sessionFolder}/plan.json`, - synthesis_rounds: Array.from({length: currentRound}, (_, i) => - `${sessionFolder}/rounds/${i+1}/synthesis.json` - ), - context_package: `${sessionFolder}/context-package.json` - } - } -} -``` - -**Step 4: Hand off to Execution**: -```javascript -// Hand off to Phase 4: Lite Execute (phases/04-lite-execute.md) with in-memory context -// executionContext is passed in-memory to the execution phase -``` - -## Output File Structure - -``` -.workflow/.multi-cli-plan/{MCP-task-slug-YYYY-MM-DD}/ -├── session-state.json # Session tracking (orchestrator) -├── rounds/ -│ ├── 1/synthesis.json # Round 1 analysis (cli-discuss-agent) -│ ├── 2/synthesis.json # Round 2 analysis (cli-discuss-agent) -│ └── .../ -├── context-package.json # Extracted context for planning (orchestrator) -└── plan.json # Structured plan (cli-lite-planning-agent) -``` - -**File Producers**: - -| File | Producer | Content | -|------|----------|---------| -| `session-state.json` | Orchestrator | Session metadata, rounds, decisions | -| `rounds/*/synthesis.json` | cli-discuss-agent | Solutions, convergence, cross-verification | -| `context-package.json` | Orchestrator | Extracted solution, dependencies, consensus for planning | -| `plan.json` | cli-lite-planning-agent | Structured tasks for lite-execute | - -## synthesis.json Schema - -```json -{ - "round": 1, - "solutions": [{ - "name": "Solution Name", - "source_cli": ["gemini", "codex"], - "feasibility": 0.85, - "effort": "low|medium|high", - "risk": "low|medium|high", - "summary": "Brief analysis summary", - "implementation_plan": { - "approach": "High-level technical approach", - "tasks": [ - {"id": "T1", "name": "Task", "depends_on": [], "files": [], "key_point": "..."} - ], - "execution_flow": "T1 → T2 → T3", - "milestones": ["Checkpoint 1", "Checkpoint 2"] - }, - "dependencies": {"internal": [], "external": []}, - "technical_concerns": ["Risk 1", "Blocker 2"] - }], - "convergence": { - "score": 0.85, - "new_insights": false, - "recommendation": "converged|continue|user_input_needed" - }, - "cross_verification": { - "agreements": [], - "disagreements": [], - "resolution": "..." - }, - "clarification_questions": [] -} -``` - -**Key Planning Fields**: - -| Field | Purpose | -|-------|---------| -| `feasibility` | Viability score (0-1) | -| `implementation_plan.tasks[]` | Discrete tasks with dependencies | -| `implementation_plan.execution_flow` | Task sequence visualization | -| `implementation_plan.milestones` | Key checkpoints | -| `technical_concerns` | Risks and blockers | - -**Note**: Solutions ranked by internal scoring (array order = priority) - -## TodoWrite Structure - -**Initialization**: -```javascript -TodoWrite({ todos: [ - { content: "Phase 1: Context Gathering", status: "in_progress", activeForm: "Gathering context" }, - { content: "Phase 2: Multi-CLI Discussion", status: "pending", activeForm: "Running discussion" }, - { content: "Phase 3: Present Options", status: "pending", activeForm: "Presenting options" }, - { content: "Phase 4: User Decision", status: "pending", activeForm: "Awaiting decision" }, - { content: "Phase 5: Plan Generation", status: "pending", activeForm: "Generating plan" } -]}) -``` - -**During Discussion Rounds**: -```javascript -TodoWrite({ todos: [ - { content: "Phase 1: Context Gathering", status: "completed", activeForm: "Gathering context" }, - { content: "Phase 2: Multi-CLI Discussion", status: "in_progress", activeForm: "Running discussion" }, - { content: " → Round 1: Initial analysis", status: "completed", activeForm: "Analyzing" }, - { content: " → Round 2: Deep verification", status: "in_progress", activeForm: "Verifying" }, - { content: "Phase 3: Present Options", status: "pending", activeForm: "Presenting options" }, - // ... -]}) -``` - -## Error Handling - -| Error | Resolution | -|-------|------------| -| ACE search fails | Fall back to Glob/Grep for file discovery | -| Agent fails | Retry once, then present partial results | -| CLI timeout (in agent) | Agent uses fallback: gemini → codex → claude | -| No convergence | Present best options, flag uncertainty | -| synthesis.json parse error | Request agent retry | -| User cancels | Save session for later resumption | - -## Configuration - -| Flag | Default | Description | -|------|---------|-------------| -| `--max-rounds` | 3 | Maximum discussion rounds | -| `--tools` | gemini,codex | CLI tools for analysis | -| `--mode` | parallel | Execution mode: parallel or serial | -| `--auto-execute` | false | Auto-execute after approval | - -## Best Practices - -1. **Be Specific**: Detailed task descriptions improve ACE context quality -2. **Provide Feedback**: Use clarification rounds to refine requirements -3. **Trust Cross-Verification**: Multi-CLI consensus indicates high confidence -4. **Review Trade-offs**: Consider pros/cons before selecting solution -5. **Check synthesis.json**: Review agent output for detailed analysis -6. **Iterate When Needed**: Don't hesitate to request more analysis - -## Related Phases - -- Simpler single-round planning: [Phase 1: Lite Plan](01-lite-plan.md) -- Shared execution engine: [Phase 4: Lite Execute](04-lite-execute.md) -- Full planning workflow: [workflow-plan/SKILL.md](../../workflow-plan/SKILL.md) - ---- - -## Post-Phase Update - -After Phase 2 (Multi-CLI Plan) completes: -- **Output Created**: `executionContext` with plan.json, synthesis rounds, context-package, user selections -- **Session Artifacts**: All files in `.workflow/.multi-cli-plan/{session-id}/` -- **Next Action**: Auto-continue to [Phase 4: Lite Execute](04-lite-execute.md) with --in-memory -- **TodoWrite**: Mark "Multi-CLI Plan - Planning" as completed, start "Execution (Phase 4)" diff --git a/.claude/skills/workflow-lite-plan/phases/03-lite-fix.md b/.claude/skills/workflow-lite-plan/phases/03-lite-fix.md deleted file mode 100644 index cb57885b..00000000 --- a/.claude/skills/workflow-lite-plan/phases/03-lite-fix.md +++ /dev/null @@ -1,799 +0,0 @@ -# Phase 3: Lite Fix - -## Overview - -Intelligent lightweight bug fixing command with dynamic workflow adaptation based on severity assessment. Focuses on diagnosis phases (root cause analysis, impact assessment, fix planning, confirmation) and delegates execution to Phase 4: Lite Execute (phases/04-lite-execute.md). - -**Core capabilities:** -- Intelligent bug analysis with automatic severity detection -- Dynamic code diagnosis (cli-explore-agent) for root cause identification -- Interactive clarification after diagnosis to gather missing information -- Adaptive fix planning strategy (direct Claude vs cli-lite-planning-agent) based on complexity -- Two-step confirmation: fix-plan display -> multi-dimensional input collection -- Execution handoff with complete context to Phase 4: Lite Execute (phases/04-lite-execute.md) - -## Parameters - -- `-y, --yes`: Skip all confirmations (auto mode) -- `--hotfix, -h`: Production hotfix mode (minimal diagnosis, fast fix) -- `` (required): Bug description, error message, or path to .md file - -## Output Artifacts - -| Artifact | Description | -|----------|-------------| -| `diagnosis-{angle}.json` | Per-angle diagnosis results (1-4 files based on severity) | -| `diagnoses-manifest.json` | Index of all diagnosis files | -| `planning-context.md` | Evidence paths + synthesized understanding | -| `fix-plan.json` | Structured fix plan (fix-plan-json-schema.json) | - -**Output Directory**: `.workflow/.lite-fix/{bug-slug}-{YYYY-MM-DD}/` - -**Agent Usage**: -- Low/Medium severity → Direct Claude planning (no agent) -- High/Critical severity → `cli-lite-planning-agent` generates `fix-plan.json` - -**Schema Reference**: `~/.claude/workflows/cli-templates/schemas/fix-plan-json-schema.json` - -## Auto Mode Defaults - -When `--yes` or `-y` flag is used: -- **Clarification Questions**: Skipped (no clarification phase) -- **Fix Plan Confirmation**: Auto-selected "Allow" -- **Execution Method**: Auto-selected "Auto" -- **Code Review**: Auto-selected "Skip" -- **Severity**: Uses auto-detected severity (no manual override) -- **Hotfix Mode**: Respects --hotfix flag if present, otherwise normal mode - -**Flag Parsing**: -```javascript -const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y') -const hotfixMode = $ARGUMENTS.includes('--hotfix') || $ARGUMENTS.includes('-h') -``` - -## Execution Process - -``` -Phase 1: Bug Analysis & Diagnosis - |- Parse input (description, error message, or .md file) - |- Intelligent severity pre-assessment (Low/Medium/High/Critical) - |- Diagnosis decision (auto-detect or --hotfix flag) - |- Context protection: If file reading >=50k chars -> force cli-explore-agent - +- Decision: - |- needsDiagnosis=true -> Launch parallel cli-explore-agents (1-4 based on severity) - +- needsDiagnosis=false (hotfix) -> Skip directly to Phase 3 (Fix Planning) - -Phase 2: Clarification (optional, multi-round) - |- Aggregate clarification_needs from all diagnosis angles - |- Deduplicate similar questions - +- Decision: - |- Has clarifications -> AskUserQuestion (max 4 questions per round, multiple rounds allowed) - +- No clarifications -> Skip to Phase 3 - -Phase 3: Fix Planning (NO CODE EXECUTION - planning only) - +- Decision (based on Phase 1 severity): - |- Low/Medium -> Load schema: cat ~/.claude/workflows/cli-templates/schemas/fix-plan-json-schema.json -> Direct Claude planning (following schema) -> fix-plan.json -> MUST proceed to Phase 4 - +- High/Critical -> cli-lite-planning-agent -> fix-plan.json -> MUST proceed to Phase 4 - -Phase 4: Confirmation & Selection - |- Display fix-plan summary (tasks, severity, estimated time) - +- AskUserQuestion: - |- Confirm: Allow / Modify / Cancel - |- Execution: Agent / Codex / Auto - +- Review: Gemini / Agent / Skip - -Phase 5: Execute - |- Build executionContext (fix-plan + diagnoses + clarifications + selections) - +- Hand off to Phase 4: Lite Execute (phases/04-lite-execute.md) --in-memory --mode bugfix -``` - -## Implementation - -### Phase 1: Intelligent Multi-Angle Diagnosis - -**Session Setup** (MANDATORY - follow exactly): -```javascript -// Helper: Get UTC+8 (China Standard Time) ISO string -const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString() - -const bugSlug = bug_description.toLowerCase().replace(/[^a-z0-9]+/g, '-').substring(0, 40) -const dateStr = getUtc8ISOString().substring(0, 10) // Format: 2025-11-29 - -const sessionId = `${bugSlug}-${dateStr}` // e.g., "user-avatar-upload-fails-2025-11-29" -const sessionFolder = `.workflow/.lite-fix/${sessionId}` - -bash(`mkdir -p ${sessionFolder} && test -d ${sessionFolder} && echo "SUCCESS: ${sessionFolder}" || echo "FAILED: ${sessionFolder}"`) -``` - -**Diagnosis Decision Logic**: -```javascript -const hotfixMode = $ARGUMENTS.includes('--hotfix') || $ARGUMENTS.includes('-h') - -needsDiagnosis = ( - !hotfixMode && - ( - bug.lacks_specific_error_message || - bug.requires_codebase_context || - bug.needs_execution_tracing || - bug.root_cause_unclear - ) -) - -if (!needsDiagnosis) { - // Skip to Phase 2 (Clarification) or Phase 3 (Fix Planning) - proceed_to_next_phase() -} -``` - -**Context Protection**: File reading >=50k chars -> force `needsDiagnosis=true` (delegate to cli-explore-agent) - -**Severity Pre-Assessment** (Intelligent Analysis): -```javascript -// Analyzes bug severity based on: -// - Symptoms: Error messages, crash reports, user complaints -// - Scope: How many users/features are affected? -// - Urgency: Production down vs minor inconvenience -// - Impact: Data loss, security, business impact - -const severity = analyzeBugSeverity(bug_description) -// Returns: 'Low' | 'Medium' | 'High' | 'Critical' -// Low: Minor UI issue, localized, no data impact -// Medium: Multiple users affected, degraded functionality -// High: Significant functionality broken, many users affected -// Critical: Production down, data loss risk, security issue - -// Angle assignment based on bug type (orchestrator decides, not agent) -const DIAGNOSIS_ANGLE_PRESETS = { - runtime_error: ['error-handling', 'dataflow', 'state-management', 'edge-cases'], - performance: ['performance', 'bottlenecks', 'caching', 'data-access'], - security: ['security', 'auth-patterns', 'dataflow', 'validation'], - data_corruption: ['data-integrity', 'state-management', 'transactions', 'validation'], - ui_bug: ['state-management', 'event-handling', 'rendering', 'data-binding'], - integration: ['api-contracts', 'error-handling', 'timeouts', 'fallbacks'] -} - -function selectDiagnosisAngles(bugDescription, count) { - const text = bugDescription.toLowerCase() - let preset = 'runtime_error' // default - - if (/slow|timeout|performance|lag|hang/.test(text)) preset = 'performance' - else if (/security|auth|permission|access|token/.test(text)) preset = 'security' - else if (/corrupt|data|lost|missing|inconsistent/.test(text)) preset = 'data_corruption' - else if (/ui|display|render|style|click|button/.test(text)) preset = 'ui_bug' - else if (/api|integration|connect|request|response/.test(text)) preset = 'integration' - - return DIAGNOSIS_ANGLE_PRESETS[preset].slice(0, count) -} - -const selectedAngles = selectDiagnosisAngles(bug_description, severity === 'Critical' ? 4 : (severity === 'High' ? 3 : (severity === 'Medium' ? 2 : 1))) - -console.log(` -## Diagnosis Plan - -Bug Severity: ${severity} -Selected Angles: ${selectedAngles.join(', ')} - -Launching ${selectedAngles.length} parallel diagnoses... -`) -``` - -**Launch Parallel Diagnoses** - Orchestrator assigns angle to each agent: - -```javascript -// Launch agents with pre-assigned diagnosis angles -const diagnosisTasks = selectedAngles.map((angle, index) => - Task( - subagent_type="cli-explore-agent", - run_in_background=false, - description=`Diagnose: ${angle}`, - prompt=` -## Task Objective -Execute **${angle}** diagnosis for bug root cause analysis. Analyze codebase from this specific angle to discover root cause, affected paths, and fix hints. - -## Output Location - -**Session Folder**: ${sessionFolder} -**Output File**: ${sessionFolder}/diagnosis-${angle}.json - -## Assigned Context -- **Diagnosis Angle**: ${angle} -- **Bug Description**: ${bug_description} -- **Diagnosis Index**: ${index + 1} of ${selectedAngles.length} - -## MANDATORY FIRST STEPS (Execute by Agent) -**You (cli-explore-agent) MUST execute these steps in order:** -1. Run: ccw tool exec get_modules_by_depth '{}' (project structure) -2. Run: rg -l "{error_keyword_from_bug}" --type ts (locate relevant files) -3. Execute: cat ~/.claude/workflows/cli-templates/schemas/diagnosis-json-schema.json (get output schema reference) -4. Read: .workflow/project-tech.json (technology stack and architecture context) -5. Read: .workflow/project-guidelines.json (user-defined constraints and conventions) - -## Diagnosis Strategy (${angle} focus) - -**Step 1: Error Tracing** (Bash) -- rg for error messages, stack traces, log patterns -- git log --since='2 weeks ago' for recent changes -- Trace execution path in affected modules - -**Step 2: Root Cause Analysis** (Gemini CLI) -- What code paths lead to this ${angle} issue? -- What edge cases are not handled from ${angle} perspective? -- What recent changes might have introduced this bug? - -**Step 3: Write Output** -- Consolidate ${angle} findings into JSON -- Identify ${angle}-specific clarification needs -- Provide fix hints based on ${angle} analysis - -## Expected Output - -**Schema Reference**: Schema obtained in MANDATORY FIRST STEPS step 3, follow schema exactly - -**Required Fields** (all ${angle} focused): -- symptom: Bug symptoms and error messages -- root_cause: Root cause hypothesis from ${angle} perspective - **IMPORTANT**: Use structured format: - \`{file: "src/module/file.ts", line_range: "45-60", issue: "Description", confidence: 0.85}\` -- affected_files: Files involved from ${angle} perspective - **IMPORTANT**: Use object format with relevance scores: - \`[{path: "src/file.ts", relevance: 0.85, rationale: "Contains ${angle} logic"}]\` -- reproduction_steps: Steps to reproduce the bug -- fix_hints: Suggested fix approaches from ${angle} viewpoint -- dependencies: Dependencies relevant to ${angle} diagnosis -- constraints: ${angle}-specific limitations affecting fix -- clarification_needs: ${angle}-related ambiguities (options array + recommended index) -- _metadata.diagnosis_angle: "${angle}" -- _metadata.diagnosis_index: ${index + 1} - -## Success Criteria -- [ ] Schema obtained via cat diagnosis-json-schema.json -- [ ] get_modules_by_depth.sh executed -- [ ] Root cause identified with confidence score -- [ ] At least 3 affected files identified with ${angle} rationale -- [ ] Fix hints are actionable (specific code changes, not generic advice) -- [ ] Reproduction steps are verifiable -- [ ] JSON output follows schema exactly -- [ ] clarification_needs includes options + recommended - -## Execution -**Write**: \`${sessionFolder}/diagnosis-${angle}.json\` -**Return**: 2-3 sentence summary of ${angle} diagnosis findings -` - ) -) - -// Execute all diagnosis tasks in parallel -``` - -**Auto-discover Generated Diagnosis Files**: -```javascript -// After diagnoses complete, auto-discover all diagnosis-*.json files -const diagnosisFiles = bash(`find ${sessionFolder} -name "diagnosis-*.json" -type f`) - .split('\n') - .filter(f => f.trim()) - -// Read metadata to build manifest -const diagnosisManifest = { - session_id: sessionId, - bug_description: bug_description, - timestamp: getUtc8ISOString(), - severity: severity, - diagnosis_count: diagnosisFiles.length, - diagnoses: diagnosisFiles.map(file => { - const data = JSON.parse(Read(file)) - const filename = path.basename(file) - return { - angle: data._metadata.diagnosis_angle, - file: filename, - path: file, - index: data._metadata.diagnosis_index - } - }) -} - -Write(`${sessionFolder}/diagnoses-manifest.json`, JSON.stringify(diagnosisManifest, null, 2)) - -console.log(` -## Diagnosis Complete - -Generated diagnosis files in ${sessionFolder}: -${diagnosisManifest.diagnoses.map(d => `- diagnosis-${d.angle}.json (angle: ${d.angle})`).join('\n')} - -Manifest: diagnoses-manifest.json -Angles diagnosed: ${diagnosisManifest.diagnoses.map(d => d.angle).join(', ')} -`) -``` - -**Output**: -- `${sessionFolder}/diagnosis-{angle1}.json` -- `${sessionFolder}/diagnosis-{angle2}.json` -- ... (1-4 files based on severity) -- `${sessionFolder}/diagnoses-manifest.json` - ---- - -### Phase 2: Clarification (Optional, Multi-Round) - -**Skip if**: No diagnosis or `clarification_needs` is empty across all diagnoses - -**⚠️ CRITICAL**: AskUserQuestion tool limits max 4 questions per call. **MUST execute multiple rounds** to exhaust all clarification needs - do NOT stop at round 1. - -**Aggregate clarification needs from all diagnosis angles**: -```javascript -// Load manifest and all diagnosis files -const manifest = JSON.parse(Read(`${sessionFolder}/diagnoses-manifest.json`)) -const diagnoses = manifest.diagnoses.map(diag => ({ - angle: diag.angle, - data: JSON.parse(Read(diag.path)) -})) - -// Aggregate clarification needs from all diagnoses -const allClarifications = [] -diagnoses.forEach(diag => { - if (diag.data.clarification_needs?.length > 0) { - diag.data.clarification_needs.forEach(need => { - allClarifications.push({ - ...need, - source_angle: diag.angle - }) - }) - } -}) - -// Deduplicate by question similarity -function deduplicateClarifications(clarifications) { - const unique = [] - clarifications.forEach(c => { - const isDuplicate = unique.some(u => - u.question.toLowerCase() === c.question.toLowerCase() - ) - if (!isDuplicate) unique.push(c) - }) - return unique -} - -const uniqueClarifications = deduplicateClarifications(allClarifications) - -// Parse --yes flag -const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y') - -if (autoYes) { - // Auto mode: Skip clarification phase - console.log(`[--yes] Skipping ${uniqueClarifications.length} clarification questions`) - console.log(`Proceeding to fix planning with diagnosis results...`) - // Continue to Phase 3 -} else if (uniqueClarifications.length > 0) { - // Interactive mode: Multi-round clarification - // ⚠️ MUST execute ALL rounds until uniqueClarifications exhausted - const BATCH_SIZE = 4 - const totalRounds = Math.ceil(uniqueClarifications.length / BATCH_SIZE) - - for (let i = 0; i < uniqueClarifications.length; i += BATCH_SIZE) { - const batch = uniqueClarifications.slice(i, i + BATCH_SIZE) - const currentRound = Math.floor(i / BATCH_SIZE) + 1 - - console.log(`### Clarification Round ${currentRound}/${totalRounds}`) - - AskUserQuestion({ - questions: batch.map(need => ({ - question: `[${need.source_angle}] ${need.question}\n\nContext: ${need.context}`, - header: need.source_angle, - multiSelect: false, - options: need.options.map((opt, index) => { - const isRecommended = need.recommended === index - return { - label: isRecommended ? `${opt} ★` : opt, - description: isRecommended ? `Use ${opt} approach (Recommended)` : `Use ${opt} approach` - } - }) - })) - }) - - // Store batch responses in clarificationContext before next round - } -} -``` - -**Output**: `clarificationContext` (in-memory) - ---- - -### Phase 3: Fix Planning - -**Planning Strategy Selection** (based on Phase 1 severity): - -**IMPORTANT**: Phase 3 is **planning only** - NO code execution. All execution happens in Phase 5 via lite-execute. - -**Low/Medium Severity** - Direct planning by Claude: -```javascript -// Step 1: Read schema -const schema = Bash(`cat ~/.claude/workflows/cli-templates/schemas/fix-plan-json-schema.json`) - -// Step 2: Generate fix-plan following schema (Claude directly, no agent) -// For Medium complexity: include rationale + verification (optional, but recommended) -const fixPlan = { - summary: "...", - root_cause: "...", - strategy: "immediate_patch|comprehensive_fix|refactor", - tasks: [...], // Each task: { id, title, scope, ..., depends_on, complexity } - estimated_time: "...", - recommended_execution: "Agent", - severity: severity, - risk_level: "...", - - // Medium complexity fields (optional for direct planning, auto-filled for Low) - ...(severity === "Medium" ? { - design_decisions: [ - { - decision: "Use immediate_patch strategy for minimal risk", - rationale: "Keeps changes localized and quick to review", - tradeoff: "Defers comprehensive refactoring" - } - ], - tasks_with_rationale: { - // Each task gets rationale if Medium - task_rationale_example: { - rationale: { - chosen_approach: "Direct fix approach", - alternatives_considered: ["Workaround", "Refactor"], - decision_factors: ["Minimal impact", "Quick turnaround"], - tradeoffs: "Doesn't address underlying issue" - }, - verification: { - unit_tests: ["test_bug_fix_basic"], - integration_tests: [], - manual_checks: ["Reproduce issue", "Verify fix"], - success_metrics: ["Issue resolved", "No regressions"] - } - } - } - } : {}), - - _metadata: { - timestamp: getUtc8ISOString(), - source: "direct-planning", - planning_mode: "direct", - complexity: severity === "Medium" ? "Medium" : "Low" - } -} - -// Step 3: Merge task rationale into tasks array -if (severity === "Medium") { - fixPlan.tasks = fixPlan.tasks.map(task => ({ - ...task, - rationale: fixPlan.tasks_with_rationale[task.id]?.rationale || { - chosen_approach: "Standard fix", - alternatives_considered: [], - decision_factors: ["Correctness", "Simplicity"], - tradeoffs: "None" - }, - verification: fixPlan.tasks_with_rationale[task.id]?.verification || { - unit_tests: [`test_${task.id}_basic`], - integration_tests: [], - manual_checks: ["Verify fix works"], - success_metrics: ["Test pass"] - } - })) - delete fixPlan.tasks_with_rationale // Clean up temp field -} - -// Step 4: Write fix-plan to session folder -Write(`${sessionFolder}/fix-plan.json`, JSON.stringify(fixPlan, null, 2)) - -// Step 5: MUST continue to Phase 4 (Confirmation) - DO NOT execute code here -``` - -**High/Critical Severity** - Invoke cli-lite-planning-agent: - -```javascript -Task( - subagent_type="cli-lite-planning-agent", - run_in_background=false, - description="Generate detailed fix plan", - prompt=` -Generate fix plan and write fix-plan.json. - -## Output Location - -**Session Folder**: ${sessionFolder} -**Output Files**: -- ${sessionFolder}/planning-context.md (evidence + understanding) -- ${sessionFolder}/fix-plan.json (fix plan) - -## Output Schema Reference -Execute: cat ~/.claude/workflows/cli-templates/schemas/fix-plan-json-schema.json (get schema reference before generating plan) - -## Project Context (MANDATORY - Read Both Files) -1. Read: .workflow/project-tech.json (technology stack, architecture, key components) -2. Read: .workflow/project-guidelines.json (user-defined constraints and conventions) - -**CRITICAL**: All fix tasks MUST comply with constraints in project-guidelines.json - -## Bug Description -${bug_description} - -## Multi-Angle Diagnosis Context - -${manifest.diagnoses.map(diag => `### Diagnosis: ${diag.angle} (${diag.file}) -Path: ${diag.path} - -Read this file for detailed ${diag.angle} analysis.`).join('\n\n')} - -Total diagnoses: ${manifest.diagnosis_count} -Angles covered: ${manifest.diagnoses.map(d => d.angle).join(', ')} - -Manifest: ${sessionFolder}/diagnoses-manifest.json - -## User Clarifications -${JSON.stringify(clarificationContext) || "None"} - -## Severity Level -${severity} - -## Requirements -Generate fix-plan.json with: -- summary: 2-3 sentence overview of the fix -- root_cause: Consolidated root cause from all diagnoses -- strategy: "immediate_patch" | "comprehensive_fix" | "refactor" -- tasks: 1-5 structured fix tasks (**IMPORTANT: group by fix area, NOT by file**) - - **Task Granularity Principle**: Each task = one complete fix unit - - title: action verb + target (e.g., "Fix token validation edge case") - - scope: module path (src/auth/) or feature name - - action: "Fix" | "Update" | "Refactor" | "Add" | "Delete" - - description - - modification_points: ALL files to modify for this fix (group related changes) - - implementation (2-5 steps covering all modification_points) - - acceptance: Quantified acceptance criteria - - depends_on: task IDs this task depends on (use sparingly) - - **High/Critical complexity fields per task** (REQUIRED): - - rationale: - - chosen_approach: Why this fix approach (not alternatives) - - alternatives_considered: Other approaches evaluated - - decision_factors: Key factors influencing choice - - tradeoffs: Known tradeoffs of this approach - - verification: - - unit_tests: Test names to add/verify - - integration_tests: Integration test names - - manual_checks: Manual verification steps - - success_metrics: Quantified success criteria - - risks: - - description: Risk description - - probability: Low|Medium|High - - impact: Low|Medium|High - - mitigation: How to mitigate - - fallback: Fallback if fix fails - - code_skeleton (optional): Key interfaces/functions to implement - - interfaces: [{name, definition, purpose}] - - key_functions: [{signature, purpose, returns}] - -**Top-level High/Critical fields** (REQUIRED): -- data_flow: How data flows through affected code - - diagram: "A → B → C" style flow - - stages: [{stage, input, output, component}] -- design_decisions: Global fix decisions - - [{decision, rationale, tradeoff}] - -- estimated_time, recommended_execution, severity, risk_level -- _metadata: - - timestamp, source, planning_mode - - complexity: "High" | "Critical" - - diagnosis_angles: ${JSON.stringify(manifest.diagnoses.map(d => d.angle))} - -## Task Grouping Rules -1. **Group by fix area**: All changes for one fix = one task (even if 2-3 files) -2. **Avoid file-per-task**: Do NOT create separate tasks for each file -3. **Substantial tasks**: Each task should represent 10-45 minutes of work -4. **True dependencies only**: Only use depends_on when Task B cannot start without Task A's output -5. **Prefer parallel**: Most tasks should be independent (no depends_on) - -## Execution -1. Read ALL diagnosis files for comprehensive context -2. Execute CLI planning using Gemini (Qwen fallback) with --rule planning-fix-strategy template -3. Synthesize findings from multiple diagnosis angles -4. Generate fix-plan with: - - For High/Critical: REQUIRED new fields (rationale, verification, risks, code_skeleton, data_flow, design_decisions) - - Each task MUST have rationale (why this fix), verification (how to verify success), and risks (potential issues) -5. Parse output and structure fix-plan -6. **Write**: \`${sessionFolder}/planning-context.md\` (evidence paths + understanding) -7. **Write**: \`${sessionFolder}/fix-plan.json\` -8. Return brief completion summary - -## Output Format for CLI -Include these sections in your fix-plan output: -- Summary, Root Cause, Strategy (existing) -- Data Flow: Diagram showing affected code paths -- Design Decisions: Key architectural choices in the fix -- Tasks: Each with rationale (Medium/High), verification (Medium/High), risks (High), code_skeleton (High) -` -) -``` - -**Output**: `${sessionFolder}/fix-plan.json` - ---- - -### Phase 4: Task Confirmation & Execution Selection - -**Step 4.1: Display Fix Plan** -```javascript -const fixPlan = JSON.parse(Read(`${sessionFolder}/fix-plan.json`)) - -console.log(` -## Fix Plan - -**Summary**: ${fixPlan.summary} -**Root Cause**: ${fixPlan.root_cause} -**Strategy**: ${fixPlan.strategy} - -**Tasks** (${fixPlan.tasks.length}): -${fixPlan.tasks.map((t, i) => `${i+1}. ${t.title} (${t.scope})`).join('\n')} - -**Severity**: ${fixPlan.severity} -**Risk Level**: ${fixPlan.risk_level} -**Estimated Time**: ${fixPlan.estimated_time} -**Recommended**: ${fixPlan.recommended_execution} -`) -``` - -**Step 4.2: Collect Confirmation** -```javascript -// Parse --yes flag -const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y') - -let userSelection - -if (autoYes) { - // Auto mode: Use defaults - console.log(`[--yes] Auto-confirming fix plan:`) - console.log(` - Confirmation: Allow`) - console.log(` - Execution: Auto`) - console.log(` - Review: Skip`) - - userSelection = { - confirmation: "Allow", - execution_method: "Auto", - code_review_tool: "Skip" - } -} else { - // Interactive mode: Ask user - userSelection = AskUserQuestion({ - questions: [ - { - question: `Confirm fix plan? (${fixPlan.tasks.length} tasks, ${fixPlan.severity} severity)`, - header: "Confirm", - multiSelect: false, - options: [ - { label: "Allow", description: "Proceed as-is" }, - { label: "Modify", description: "Adjust before execution" }, - { label: "Cancel", description: "Abort workflow" } - ] - }, - { - question: "Execution method:", - header: "Execution", - multiSelect: false, - options: [ - { label: "Agent", description: "@code-developer agent" }, - { label: "Codex", description: "codex CLI tool" }, - { label: "Auto", description: `Auto: ${fixPlan.severity === 'Low' ? 'Agent' : 'Codex'}` } - ] - }, - { - question: "Code review after fix?", - header: "Review", - multiSelect: false, - options: [ - { label: "Gemini Review", description: "Gemini CLI" }, - { label: "Agent Review", description: "@code-reviewer" }, - { label: "Skip", description: "No review" } - ] - } - ] - }) -} -``` - ---- - -### Phase 5: Execute to Execution - -**CRITICAL**: lite-fix NEVER executes code directly. ALL execution MUST go through lite-execute. - -**Step 5.1: Build executionContext** - -```javascript -// Load manifest and all diagnosis files -const manifest = JSON.parse(Read(`${sessionFolder}/diagnoses-manifest.json`)) -const diagnoses = {} - -manifest.diagnoses.forEach(diag => { - if (file_exists(diag.path)) { - diagnoses[diag.angle] = JSON.parse(Read(diag.path)) - } -}) - -const fixPlan = JSON.parse(Read(`${sessionFolder}/fix-plan.json`)) - -executionContext = { - mode: "bugfix", - severity: fixPlan.severity, - planObject: { - ...fixPlan, - // Ensure complexity is set based on severity for new field consumption - complexity: fixPlan.complexity || (fixPlan.severity === 'Critical' ? 'High' : (fixPlan.severity === 'High' ? 'High' : 'Medium')) - }, - diagnosisContext: diagnoses, - diagnosisAngles: manifest.diagnoses.map(d => d.angle), - diagnosisManifest: manifest, - clarificationContext: clarificationContext || null, - executionMethod: userSelection.execution_method, - codeReviewTool: userSelection.code_review_tool, - originalUserInput: bug_description, - session: { - id: sessionId, - folder: sessionFolder, - artifacts: { - diagnoses: manifest.diagnoses.map(diag => ({ - angle: diag.angle, - path: diag.path - })), - diagnoses_manifest: `${sessionFolder}/diagnoses-manifest.json`, - fix_plan: `${sessionFolder}/fix-plan.json` - } - } -} -``` - -**Step 5.2: Execute** - -```javascript -// Hand off to Phase 4: Lite Execute (phases/04-lite-execute.md) --in-memory --mode bugfix -// executionContext is passed in-memory to the execution phase -``` - -## Session Folder Structure - -``` -.workflow/.lite-fix/{bug-slug}-{YYYY-MM-DD}/ -├── diagnosis-{angle1}.json # Diagnosis angle 1 -├── diagnosis-{angle2}.json # Diagnosis angle 2 -├── diagnosis-{angle3}.json # Diagnosis angle 3 (if applicable) -├── diagnosis-{angle4}.json # Diagnosis angle 4 (if applicable) -├── diagnoses-manifest.json # Diagnosis index -├── planning-context.md # Evidence + understanding -└── fix-plan.json # Fix plan -``` - -**Example**: -``` -.workflow/.lite-fix/user-avatar-upload-fails-413-2025-11-25/ -├── diagnosis-error-handling.json -├── diagnosis-dataflow.json -├── diagnosis-validation.json -├── diagnoses-manifest.json -├── planning-context.md -└── fix-plan.json -``` - -## Error Handling - -| Error | Resolution | -|-------|------------| -| Diagnosis agent failure | Skip diagnosis, continue with bug description only | -| Planning agent failure | Fallback to direct planning by Claude | -| Clarification timeout | Use diagnosis findings as-is | -| Confirmation timeout | Save context, display resume instructions | -| Modify loop > 3 times | Suggest breaking task or using full planning workflow (workflow-plan/SKILL.md) | -| Root cause unclear | Extend diagnosis time or use broader angles | -| Too complex for lite-fix | Escalate to full planning workflow (workflow-plan/SKILL.md) | - ---- - -## Post-Phase Update - -After Phase 3 (Lite Fix) completes: -- **Output Created**: `executionContext` with fix-plan.json, diagnoses, clarifications, user selections -- **Session Artifacts**: All files in `.workflow/.lite-fix/{session-id}/` -- **Next Action**: Auto-continue to [Phase 4: Lite Execute](04-lite-execute.md) with --in-memory --mode bugfix -- **TodoWrite**: Mark "Lite Fix - Planning" as completed, start "Execution (Phase 4)" diff --git a/.claude/skills/workflow-lite-plan/phases/04-lite-execute.md b/.claude/skills/workflow-lite-plan/phases/04-lite-execute.md deleted file mode 100644 index 97d91948..00000000 --- a/.claude/skills/workflow-lite-plan/phases/04-lite-execute.md +++ /dev/null @@ -1,738 +0,0 @@ -# Phase 4: Lite Execute - -## Overview - -Flexible task execution phase supporting three input modes: in-memory plan (from planning phases), direct prompt description, or file content. Handles execution orchestration, progress tracking, and optional code review. - -**Core capabilities:** -- Multi-mode input (in-memory plan, prompt description, or file path) -- Execution orchestration (Agent or Codex) with full context -- Live progress tracking via TodoWrite at execution call level -- Optional code review with selected tool (Gemini, Agent, or custom) -- Context continuity across multiple executions -- Intelligent format detection (Enhanced Task JSON vs plain text) - -## Parameters - -- `--in-memory`: Use plan from memory (called by planning phases) -- ``: Task description string, or path to file (required) - -## Input Modes - -### Mode 1: In-Memory Plan - -**Trigger**: Called by planning phase after confirmation with `--in-memory` flag - -**Input Source**: `executionContext` global variable set by planning phase - -**Content**: Complete execution context (see Data Structures section) - -**Behavior**: -- Skip execution method selection (already set by planning phase) -- Directly proceed to execution with full context -- All planning artifacts available (exploration, clarifications, plan) - -### Mode 2: Prompt Description - -**Trigger**: User calls with task description string - -**Input**: Simple task description (e.g., "Add unit tests for auth module") - -**Behavior**: -- Store prompt as `originalUserInput` -- Create simple execution plan from prompt -- AskUserQuestion: Select execution method (Agent/Codex/Auto) -- AskUserQuestion: Select code review tool (Skip/Gemini/Agent/Other) -- Proceed to execution with `originalUserInput` included - -**User Interaction**: -```javascript -// Parse --yes flag -const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y') - -let userSelection - -if (autoYes) { - // Auto mode: Use defaults - console.log(`[--yes] Auto-confirming execution:`) - console.log(` - Execution method: Auto`) - console.log(` - Code review: Skip`) - - userSelection = { - execution_method: "Auto", - code_review_tool: "Skip" - } -} else { - // Interactive mode: Ask user - userSelection = AskUserQuestion({ - questions: [ - { - question: "Select execution method:", - header: "Execution", - multiSelect: false, - options: [ - { label: "Agent", description: "@code-developer agent" }, - { label: "Codex", description: "codex CLI tool" }, - { label: "Auto", description: "Auto-select based on complexity" } - ] - }, - { - question: "Enable code review after execution?", - header: "Code Review", - multiSelect: false, - options: [ - { label: "Skip", description: "No review" }, - { label: "Gemini Review", description: "Gemini CLI tool" }, - { label: "Codex Review", description: "Git-aware review (prompt OR --uncommitted)" }, - { label: "Agent Review", description: "Current agent review" } - ] - } - ] - }) -} -``` - -### Mode 3: File Content - -**Trigger**: User calls with file path - -**Input**: Path to file containing task description or plan.json - -**Step 1: Read and Detect Format** - -```javascript -fileContent = Read(filePath) - -// Attempt JSON parsing -try { - jsonData = JSON.parse(fileContent) - - // Check if plan.json from lite-plan session - if (jsonData.summary && jsonData.approach && jsonData.tasks) { - planObject = jsonData - originalUserInput = jsonData.summary - isPlanJson = true - } else { - // Valid JSON but not plan.json - treat as plain text - originalUserInput = fileContent - isPlanJson = false - } -} catch { - // Not valid JSON - treat as plain text prompt - originalUserInput = fileContent - isPlanJson = false -} -``` - -**Step 2: Create Execution Plan** - -If `isPlanJson === true`: -- Use `planObject` directly -- User selects execution method and code review - -If `isPlanJson === false`: -- Treat file content as prompt (same behavior as Mode 2) -- Create simple execution plan from content - -**Step 3: User Interaction** - -- AskUserQuestion: Select execution method (Agent/Codex/Auto) -- AskUserQuestion: Select code review tool -- Proceed to execution with full context - -## Execution Process - -``` -Input Parsing: - └─ Decision (mode detection): - ├─ --in-memory flag → Mode 1: Load executionContext → Skip user selection - ├─ Ends with .md/.json/.txt → Mode 3: Read file → Detect format - │ ├─ Valid plan.json → Use planObject → User selects method + review - │ └─ Not plan.json → Treat as prompt → User selects method + review - └─ Other → Mode 2: Prompt description → User selects method + review - -Execution: - ├─ Step 1: Initialize result tracking (previousExecutionResults = []) - ├─ Step 2: Task grouping & batch creation - │ ├─ Extract explicit depends_on (no file/keyword inference) - │ ├─ Group: independent tasks → single parallel batch (maximize utilization) - │ ├─ Group: dependent tasks → sequential phases (respect dependencies) - │ └─ Create TodoWrite list for batches - ├─ Step 3: Launch execution - │ ├─ Phase 1: All independent tasks (single batch, concurrent) - │ └─ Phase 2+: Dependent tasks by dependency order - ├─ Step 4: Track progress (TodoWrite updates per batch) - └─ Step 5: Code review (if codeReviewTool ≠ "Skip") - -Output: - └─ Execution complete with results in previousExecutionResults[] -``` - -## Detailed Execution Steps - -### Step 1: Initialize Execution Tracking - -**Operations**: -- Initialize result tracking for multi-execution scenarios -- Set up `previousExecutionResults` array for context continuity -- **In-Memory Mode**: Echo execution strategy from planning phase for transparency - -```javascript -// Initialize result tracking -previousExecutionResults = [] - -// In-Memory Mode: Echo execution strategy (transparency before execution) -if (executionContext) { - console.log(` -Execution Strategy (from planning phase): - Method: ${executionContext.executionMethod} - Review: ${executionContext.codeReviewTool} - Tasks: ${executionContext.planObject.tasks.length} - Complexity: ${executionContext.planObject.complexity} -${executionContext.executorAssignments ? ` Assignments: ${JSON.stringify(executionContext.executorAssignments)}` : ''} - `) -} -``` - -### Step 2: Task Grouping & Batch Creation - -**Dependency Analysis & Grouping Algorithm**: -```javascript -// Use explicit depends_on from plan.json (no inference from file/keywords) -function extractDependencies(tasks) { - const taskIdToIndex = {} - tasks.forEach((t, i) => { taskIdToIndex[t.id] = i }) - - return tasks.map((task, i) => { - // Only use explicit depends_on from plan.json - const deps = (task.depends_on || []) - .map(depId => taskIdToIndex[depId]) - .filter(idx => idx !== undefined && idx < i) - return { ...task, taskIndex: i, dependencies: deps } - }) -} - -// Group into batches: maximize parallel execution -function createExecutionCalls(tasks, executionMethod) { - const tasksWithDeps = extractDependencies(tasks) - const processed = new Set() - const calls = [] - - // Phase 1: All independent tasks → single parallel batch (maximize utilization) - const independentTasks = tasksWithDeps.filter(t => t.dependencies.length === 0) - if (independentTasks.length > 0) { - independentTasks.forEach(t => processed.add(t.taskIndex)) - calls.push({ - method: executionMethod, - executionType: "parallel", - groupId: "P1", - taskSummary: independentTasks.map(t => t.title).join(' | '), - tasks: independentTasks - }) - } - - // Phase 2: Dependent tasks → sequential batches (respect dependencies) - let sequentialIndex = 1 - let remaining = tasksWithDeps.filter(t => !processed.has(t.taskIndex)) - - while (remaining.length > 0) { - // Find tasks whose dependencies are all satisfied - const ready = remaining.filter(t => - t.dependencies.every(d => processed.has(d)) - ) - - if (ready.length === 0) { - console.warn('Circular dependency detected, forcing remaining tasks') - ready.push(...remaining) - } - - // Group ready tasks (can run in parallel within this phase) - ready.forEach(t => processed.add(t.taskIndex)) - calls.push({ - method: executionMethod, - executionType: ready.length > 1 ? "parallel" : "sequential", - groupId: ready.length > 1 ? `P${calls.length + 1}` : `S${sequentialIndex++}`, - taskSummary: ready.map(t => t.title).join(ready.length > 1 ? ' | ' : ' → '), - tasks: ready - }) - - remaining = remaining.filter(t => !processed.has(t.taskIndex)) - } - - return calls -} - -executionCalls = createExecutionCalls(planObject.tasks, executionMethod).map(c => ({ ...c, id: `[${c.groupId}]` })) - -TodoWrite({ - todos: executionCalls.map(c => ({ - content: `${c.executionType === "parallel" ? "⚡" : "→"} ${c.id} (${c.tasks.length} tasks)`, - status: "pending", - activeForm: `Executing ${c.id}` - })) -}) -``` - -### Step 3: Launch Execution - -**Executor Resolution** (任务级 executor 优先于全局设置): -```javascript -// 获取任务的 executor(优先使用 executorAssignments,fallback 到全局 executionMethod) -function getTaskExecutor(task) { - const assignments = executionContext?.executorAssignments || {} - if (assignments[task.id]) { - return assignments[task.id].executor // 'gemini' | 'codex' | 'agent' - } - // Fallback: 全局 executionMethod 映射 - const method = executionContext?.executionMethod || 'Auto' - if (method === 'Agent') return 'agent' - if (method === 'Codex') return 'codex' - // Auto: 根据复杂度 - return planObject.complexity === 'Low' ? 'agent' : 'codex' -} - -// 按 executor 分组任务 -function groupTasksByExecutor(tasks) { - const groups = { gemini: [], codex: [], agent: [] } - tasks.forEach(task => { - const executor = getTaskExecutor(task) - groups[executor].push(task) - }) - return groups -} -``` - -**Execution Flow**: Parallel batches concurrently → Sequential batches in order -```javascript -const parallel = executionCalls.filter(c => c.executionType === "parallel") -const sequential = executionCalls.filter(c => c.executionType === "sequential") - -// Phase 1: Launch all parallel batches (single message with multiple tool calls) -if (parallel.length > 0) { - TodoWrite({ todos: executionCalls.map(c => ({ status: c.executionType === "parallel" ? "in_progress" : "pending" })) }) - parallelResults = await Promise.all(parallel.map(c => executeBatch(c))) - previousExecutionResults.push(...parallelResults) - TodoWrite({ todos: executionCalls.map(c => ({ status: parallel.includes(c) ? "completed" : "pending" })) }) -} - -// Phase 2: Execute sequential batches one by one -for (const call of sequential) { - TodoWrite({ todos: executionCalls.map(c => ({ status: c === call ? "in_progress" : "..." })) }) - result = await executeBatch(call) - previousExecutionResults.push(result) - TodoWrite({ todos: executionCalls.map(c => ({ status: "completed" or "pending" })) }) -} -``` - -### Unified Task Prompt Builder - -**Task Formatting Principle**: Each task is a self-contained checklist. The executor only needs to know what THIS task requires. Same template for Agent and CLI. - -```javascript -function buildExecutionPrompt(batch) { - // Task template (6 parts: Modification Points → Why → How → Reference → Risks → Done) - const formatTask = (t) => ` -## ${t.title} - -**Scope**: \`${t.scope}\` | **Action**: ${t.action} - -### Modification Points -${t.modification_points.map(p => `- **${p.file}** → \`${p.target}\`: ${p.change}`).join('\n')} - -${t.rationale ? ` -### Why this approach (Medium/High) -${t.rationale.chosen_approach} -${t.rationale.decision_factors?.length > 0 ? `\nKey factors: ${t.rationale.decision_factors.join(', ')}` : ''} -${t.rationale.tradeoffs ? `\nTradeoffs: ${t.rationale.tradeoffs}` : ''} -` : ''} - -### How to do it -${t.description} - -${t.implementation.map(step => `- ${step}`).join('\n')} - -${t.code_skeleton ? ` -### Code skeleton (High) -${t.code_skeleton.interfaces?.length > 0 ? `**Interfaces**: ${t.code_skeleton.interfaces.map(i => `\`${i.name}\` - ${i.purpose}`).join(', ')}` : ''} -${t.code_skeleton.key_functions?.length > 0 ? `\n**Functions**: ${t.code_skeleton.key_functions.map(f => `\`${f.signature}\` - ${f.purpose}`).join(', ')}` : ''} -${t.code_skeleton.classes?.length > 0 ? `\n**Classes**: ${t.code_skeleton.classes.map(c => `\`${c.name}\` - ${c.purpose}`).join(', ')}` : ''} -` : ''} - -### Reference -- Pattern: ${t.reference?.pattern || 'N/A'} -- Files: ${t.reference?.files?.join(', ') || 'N/A'} -${t.reference?.examples ? `- Notes: ${t.reference.examples}` : ''} - -${t.risks?.length > 0 ? ` -### Risk mitigations (High) -${t.risks.map(r => `- ${r.description} → **${r.mitigation}**`).join('\n')} -` : ''} - -### Done when -${t.acceptance.map(c => `- [ ] ${c}`).join('\n')} -${t.verification?.success_metrics?.length > 0 ? `\n**Success metrics**: ${t.verification.success_metrics.join(', ')}` : ''}` - - // Build prompt - const sections = [] - - if (originalUserInput) sections.push(`## Goal\n${originalUserInput}`) - - sections.push(`## Tasks\n${batch.tasks.map(formatTask).join('\n\n---\n')}`) - - // Context (reference only) - const context = [] - if (previousExecutionResults.length > 0) { - context.push(`### Previous Work\n${previousExecutionResults.map(r => `- ${r.tasksSummary}: ${r.status}`).join('\n')}`) - } - if (clarificationContext) { - context.push(`### Clarifications\n${Object.entries(clarificationContext).map(([q, a]) => `- ${q}: ${a}`).join('\n')}`) - } - if (executionContext?.planObject?.data_flow?.diagram) { - context.push(`### Data Flow\n${executionContext.planObject.data_flow.diagram}`) - } - if (executionContext?.session?.artifacts?.plan) { - context.push(`### Artifacts\nPlan: ${executionContext.session.artifacts.plan}`) - } - // Project guidelines (user-defined constraints) - context.push(`### Project Guidelines\n@.workflow/project-guidelines.json`) - if (context.length > 0) sections.push(`## Context\n${context.join('\n\n')}`) - - sections.push(`Complete each task according to its "Done when" checklist.`) - - return sections.join('\n\n') -} -``` - -**Option A: Agent Execution** - -When to use: -- `getTaskExecutor(task) === "agent"` -- or `executionMethod = "Agent"` (global fallback) -- or `executionMethod = "Auto" AND complexity = "Low"` (global fallback) - -```javascript -Task( - subagent_type="code-developer", - run_in_background=false, - description=batch.taskSummary, - prompt=buildExecutionPrompt(batch) -) -``` - -**Result Collection**: After completion, collect result following `executionResult` structure (see Data Structures section) - -**Option B: CLI Execution (Codex)** - -When to use: -- `getTaskExecutor(task) === "codex"` -- or `executionMethod = "Codex"` (global fallback) -- or `executionMethod = "Auto" AND complexity = "Medium/High"` (global fallback) - -```bash -ccw cli -p "${buildExecutionPrompt(batch)}" --tool codex --mode write -``` - -**Execution with fixed IDs** (predictable ID pattern): -```javascript -// Launch CLI in background, wait for task hook callback -// Generate fixed execution ID: ${sessionId}-${groupId} -const sessionId = executionContext?.session?.id || 'standalone' -const fixedExecutionId = `${sessionId}-${batch.groupId}` // e.g., "implement-auth-2025-12-13-P1" - -// Check if resuming from previous failed execution -const previousCliId = batch.resumeFromCliId || null - -// Build command with fixed ID (and optional resume for continuation) -const cli_command = previousCliId - ? `ccw cli -p "${buildExecutionPrompt(batch)}" --tool codex --mode write --id ${fixedExecutionId} --resume ${previousCliId}` - : `ccw cli -p "${buildExecutionPrompt(batch)}" --tool codex --mode write --id ${fixedExecutionId}` - -// Execute in background, stop output and wait for task hook callback -Bash( - command=cli_command, - run_in_background=true -) -// STOP HERE - CLI executes in background, task hook will notify on completion -``` - -**Resume on Failure** (with fixed ID): -```javascript -// If execution failed or timed out, offer resume option -if (bash_result.status === 'failed' || bash_result.status === 'timeout') { - console.log(` -Execution incomplete. Resume available: - Fixed ID: ${fixedExecutionId} - Lookup: ccw cli detail ${fixedExecutionId} - Resume: ccw cli -p "Continue tasks" --resume ${fixedExecutionId} --tool codex --mode write --id ${fixedExecutionId}-retry -`) - - // Store for potential retry in same session - batch.resumeFromCliId = fixedExecutionId -} -``` - -**Result Collection**: After completion, analyze output and collect result following `executionResult` structure (include `cliExecutionId` for resume capability) - -**Option C: CLI Execution (Gemini)** - -When to use: `getTaskExecutor(task) === "gemini"` (analysis tasks) - -```bash -# Use unified buildExecutionPrompt, switch tool and mode -ccw cli -p "${buildExecutionPrompt(batch)}" --tool gemini --mode analysis --id ${sessionId}-${batch.groupId} -``` - -### Step 4: Progress Tracking - -Progress tracked at batch level (not individual task level). Icons: ⚡ (parallel, concurrent), → (sequential, one-by-one) - -### Step 5: Code Review (Optional) - -**Skip Condition**: Only run if `codeReviewTool ≠ "Skip"` - -**Review Focus**: Verify implementation against plan acceptance criteria and verification requirements -- Read plan.json for task acceptance criteria and verification checklist -- Check each acceptance criterion is fulfilled -- Verify success metrics from verification field (Medium/High complexity) -- Run unit/integration tests specified in verification field -- Validate code quality and identify issues -- Ensure alignment with planned approach and risk mitigations - -**Operations**: -- Agent Review: Current agent performs direct review -- Gemini Review: Execute gemini CLI with review prompt -- Codex Review: Two options - (A) with prompt for complex reviews, (B) `--uncommitted` flag only for quick reviews -- Custom tool: Execute specified CLI tool (qwen, etc.) - -**Unified Review Template** (All tools use same standard): - -**Review Criteria**: -- **Acceptance Criteria**: Verify each criterion from plan.tasks[].acceptance -- **Verification Checklist** (Medium/High): Check unit_tests, integration_tests, success_metrics from plan.tasks[].verification -- **Code Quality**: Analyze quality, identify issues, suggest improvements -- **Plan Alignment**: Validate implementation matches planned approach and risk mitigations - -**Shared Prompt Template** (used by all CLI tools): -``` -PURPOSE: Code review for implemented changes against plan acceptance criteria and verification requirements -TASK: • Verify plan acceptance criteria fulfillment • Check verification requirements (unit tests, success metrics) • Analyze code quality • Identify issues • Suggest improvements • Validate plan adherence and risk mitigations -MODE: analysis -CONTEXT: @**/* @{plan.json} [@{exploration.json}] | Memory: Review lite-execute changes against plan requirements including verification checklist -EXPECTED: Quality assessment with: - - Acceptance criteria verification (all tasks) - - Verification checklist validation (Medium/High: unit_tests, integration_tests, success_metrics) - - Issue identification - - Recommendations - Explicitly check each acceptance criterion and verification item from plan.json tasks. -CONSTRAINTS: Focus on plan acceptance criteria, verification requirements, and plan adherence | analysis=READ-ONLY -``` - -**Tool-Specific Execution** (Apply shared prompt template above): - -```bash -# Method 1: Agent Review (current agent) -# - Read plan.json: ${executionContext.session.artifacts.plan} -# - Apply unified review criteria (see Shared Prompt Template) -# - Report findings directly - -# Method 2: Gemini Review (recommended) -ccw cli -p "[Shared Prompt Template with artifacts]" --tool gemini --mode analysis -# CONTEXT includes: @**/* @${plan.json} [@${exploration.json}] - -# Method 3: Qwen Review (alternative) -ccw cli -p "[Shared Prompt Template with artifacts]" --tool qwen --mode analysis -# Same prompt as Gemini, different execution engine - -# Method 4: Codex Review (git-aware) - Two mutually exclusive options: - -# Option A: With custom prompt (reviews uncommitted by default) -ccw cli -p "[Shared Prompt Template with artifacts]" --tool codex --mode review -# Use for complex reviews with specific focus areas - -# Option B: Target flag only (no prompt allowed) -ccw cli --tool codex --mode review --uncommitted -# Quick review of uncommitted changes without custom instructions - -# IMPORTANT: -p prompt and target flags (--uncommitted/--base/--commit) are MUTUALLY EXCLUSIVE -``` - -**Multi-Round Review with Fixed IDs**: -```javascript -// Generate fixed review ID -const reviewId = `${sessionId}-review` - -// First review pass with fixed ID -const reviewResult = Bash(`ccw cli -p "[Review prompt]" --tool gemini --mode analysis --id ${reviewId}`) - -// If issues found, continue review dialog with fixed ID chain -if (hasUnresolvedIssues(reviewResult)) { - // Resume with follow-up questions - Bash(`ccw cli -p "Clarify the security concerns you mentioned" --resume ${reviewId} --tool gemini --mode analysis --id ${reviewId}-followup`) -} -``` - -**Implementation Note**: Replace `[Shared Prompt Template with artifacts]` placeholder with actual template content, substituting: -- `@{plan.json}` → `@${executionContext.session.artifacts.plan}` -- `[@{exploration.json}]` → exploration files from artifacts (if exists) - -### Step 6: Update Development Index - -**Trigger**: After all executions complete (regardless of code review) - -**Skip Condition**: Skip if `.workflow/project-tech.json` does not exist - -**Operations**: -```javascript -const projectJsonPath = '.workflow/project-tech.json' -if (!fileExists(projectJsonPath)) return // Silent skip - -const projectJson = JSON.parse(Read(projectJsonPath)) - -// Initialize if needed -if (!projectJson.development_index) { - projectJson.development_index = { feature: [], enhancement: [], bugfix: [], refactor: [], docs: [] } -} - -// Detect category from keywords -function detectCategory(text) { - text = text.toLowerCase() - if (/\b(fix|bug|error|issue|crash)\b/.test(text)) return 'bugfix' - if (/\b(refactor|cleanup|reorganize)\b/.test(text)) return 'refactor' - if (/\b(doc|readme|comment)\b/.test(text)) return 'docs' - if (/\b(add|new|create|implement)\b/.test(text)) return 'feature' - return 'enhancement' -} - -// Detect sub_feature from task file paths -function detectSubFeature(tasks) { - const dirs = tasks.map(t => t.file?.split('/').slice(-2, -1)[0]).filter(Boolean) - const counts = dirs.reduce((a, d) => { a[d] = (a[d] || 0) + 1; return a }, {}) - return Object.entries(counts).sort((a, b) => b[1] - a[1])[0]?.[0] || 'general' -} - -const category = detectCategory(`${planObject.summary} ${planObject.approach}`) -const entry = { - title: planObject.summary.slice(0, 60), - sub_feature: detectSubFeature(planObject.tasks), - date: new Date().toISOString().split('T')[0], - description: planObject.approach.slice(0, 100), - status: previousExecutionResults.every(r => r.status === 'completed') ? 'completed' : 'partial', - session_id: executionContext?.session?.id || null -} - -projectJson.development_index[category].push(entry) -projectJson.statistics.last_updated = new Date().toISOString() -Write(projectJsonPath, JSON.stringify(projectJson, null, 2)) - -console.log(`Development index: [${category}] ${entry.title}`) -``` - -## Best Practices - -**Input Modes**: In-memory (planning phase), prompt (standalone), file (JSON/text) -**Task Grouping**: Based on explicit depends_on only; independent tasks run in single parallel batch -**Execution**: All independent tasks launch concurrently via single Claude message with multiple tool calls - -## Error Handling - -| Error | Cause | Resolution | -|-------|-------|------------| -| Missing executionContext | --in-memory without context | Error: "No execution context found. Only available when called by planning phase." | -| File not found | File path doesn't exist | Error: "File not found: {path}. Check file path." | -| Empty file | File exists but no content | Error: "File is empty: {path}. Provide task description." | -| Invalid Enhanced Task JSON | JSON missing required fields | Warning: "Missing required fields. Treating as plain text." | -| Malformed JSON | JSON parsing fails | Treat as plain text (expected for non-JSON files) | -| Execution failure | Agent/Codex crashes | Display error, use fixed ID `${sessionId}-${groupId}` for resume: `ccw cli -p "Continue" --resume --id -retry` | -| Execution timeout | CLI exceeded timeout | Use fixed ID for resume with extended timeout | -| Codex unavailable | Codex not installed | Show installation instructions, offer Agent execution | -| Fixed ID not found | Custom ID lookup failed | Check `ccw cli history`, verify date directories | - -## Data Structures - -### executionContext (Input - Mode 1) - -Passed from planning phase via global variable: - -```javascript -{ - planObject: { - summary: string, - approach: string, - tasks: [...], - estimated_time: string, - recommended_execution: string, - complexity: string - }, - explorationsContext: {...} | null, // Multi-angle explorations - explorationAngles: string[], // List of exploration angles - explorationManifest: {...} | null, // Exploration manifest - clarificationContext: {...} | null, - executionMethod: "Agent" | "Codex" | "Auto", // Global default - codeReviewTool: "Skip" | "Gemini Review" | "Agent Review" | string, - originalUserInput: string, - - // Task-level executor assignments (priority over executionMethod) - executorAssignments: { - [taskId]: { executor: "gemini" | "codex" | "agent", reason: string } - }, - - // Session artifacts location (saved by planning phase) - session: { - id: string, // Session identifier: {taskSlug}-{shortTimestamp} - folder: string, // Session folder path: .workflow/.lite-plan/{session-id} - artifacts: { - explorations: [{angle, path}], // exploration-{angle}.json paths - explorations_manifest: string, // explorations-manifest.json path - plan: string // plan.json path (always present) - } - } -} -``` - -**Artifact Usage**: -- Artifact files contain detailed planning context -- Pass artifact paths to CLI tools and agents for enhanced context -- See execution options above for usage examples - -### executionResult (Output) - -Collected after each execution call completes: - -```javascript -{ - executionId: string, // e.g., "[Agent-1]", "[Codex-1]" - status: "completed" | "partial" | "failed", - tasksSummary: string, // Brief description of tasks handled - completionSummary: string, // What was completed - keyOutputs: string, // Files created/modified, key changes - notes: string, // Important context for next execution - fixedCliId: string | null // Fixed CLI execution ID (e.g., "implement-auth-2025-12-13-P1") -} -``` - -Appended to `previousExecutionResults` array for context continuity in multi-execution scenarios. - -## Post-Completion Expansion - -After completion, ask user whether to expand as issue (test/enhance/refactor/doc). Selected items create new issues accordingly. - -**Fixed ID Pattern**: `${sessionId}-${groupId}` enables predictable lookup without auto-generated timestamps. - -**Resume Usage**: If `status` is "partial" or "failed", use `fixedCliId` to resume: -```bash -# Lookup previous execution -ccw cli detail ${fixedCliId} - -# Resume with new fixed ID for retry -ccw cli -p "Continue from where we left off" --resume ${fixedCliId} --tool codex --mode write --id ${fixedCliId}-retry -``` - ---- - -## Post-Phase Update - -After Phase 4 (Lite Execute) completes: -- **Output Created**: Executed tasks, optional code review results, updated development index -- **Execution Results**: `previousExecutionResults[]` with status per batch -- **Next Action**: Workflow complete. Optionally expand to issue (test/enhance/refactor/doc) -- **TodoWrite**: Mark all execution batches as completed diff --git a/.claude/skills/workflow-plan/SKILL.md b/.claude/skills/workflow-plan/SKILL.md deleted file mode 100644 index f8575a1d..00000000 --- a/.claude/skills/workflow-plan/SKILL.md +++ /dev/null @@ -1,367 +0,0 @@ ---- -name: workflow-plan -description: 5-phase planning workflow with action-planning-agent task generation, outputs IMPL_PLAN.md and task JSONs. Triggers on "workflow:plan". -allowed-tools: Task, AskUserQuestion, TodoWrite, Read, Write, Edit, Bash, Glob, Grep, Skill ---- - -# Workflow Plan - -5-phase planning workflow that orchestrates session discovery, context gathering, conflict resolution, and task generation to produce implementation plans (IMPL_PLAN.md, task JSONs, TODO_LIST.md). - -## Architecture Overview - -``` -┌─────────────────────────────────────────────────────────────────┐ -│ Workflow Plan Orchestrator (SKILL.md) │ -│ → Pure coordinator: Execute phases, parse outputs, pass context │ -└───────────────┬─────────────────────────────────────────────────┘ - │ - ┌───────────┼───────────┬───────────┬───────────┐ - ↓ ↓ ↓ ↓ ↓ -┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ -│ Phase 1 │ │ Phase 2 │ │ Phase 3 │ │Phase 3.5│ │ Phase 4 │ -│ Session │ │ Context │ │Conflict │ │ Gate │ │ Task │ -│Discovery│ │ Gather │ │Resolve │ │(Optional)│ │Generate │ -└─────────┘ └─────────┘ └─────────┘ └─────────┘ └─────────┘ - ↓ ↓ ↓ ↓ - sessionId contextPath resolved IMPL_PLAN.md - conflict_risk artifacts task JSONs - TODO_LIST.md -``` - -## Key Design Principles - -1. **Pure Orchestrator**: Execute phases in sequence, parse outputs, pass context between them -2. **Auto-Continue**: All phases run autonomously without user intervention between phases -3. **Task Attachment Model**: Sub-tasks are attached/collapsed dynamically in TodoWrite -4. **Progressive Phase Loading**: Phase docs are read on-demand, not all at once -5. **Conditional Execution**: Phase 3 only executes when conflict_risk >= medium - -## Auto Mode - -When `--yes` or `-y`: Auto-continue all phases (skip confirmations), use recommended conflict resolutions. - -## Usage - -``` -Skill(skill="workflow-plan", args="") -Skill(skill="workflow-plan", args="[-y|--yes] \"\"") - -# Flags --y, --yes Skip all confirmations (auto mode) - -# Arguments - Task description text, structured GOAL/SCOPE/CONTEXT, or path to .md file - -# Examples -Skill(skill="workflow-plan", args="\"Build authentication system\"") # Simple task -Skill(skill="workflow-plan", args="\"Add JWT auth with email/password and refresh\"") # Detailed task -Skill(skill="workflow-plan", args="-y \"Implement user profile page\"") # Auto mode -Skill(skill="workflow-plan", args="\"requirements.md\"") # From file -``` - -## Execution Flow - -``` -Input Parsing: - └─ Convert user input to structured format (GOAL/SCOPE/CONTEXT) - -Phase 1: Session Discovery - └─ Ref: phases/01-session-discovery.md - └─ Output: sessionId (WFS-xxx) - -Phase 2: Context Gathering - └─ Ref: phases/02-context-gathering.md - ├─ Tasks attached: Analyze structure → Identify integration → Generate package - └─ Output: contextPath + conflict_risk - -Phase 3: Conflict Resolution - └─ Decision (conflict_risk check): - ├─ conflict_risk ≥ medium → Ref: phases/03-conflict-resolution.md - │ ├─ Tasks attached: Detect conflicts → Present to user → Apply strategies - │ └─ Output: Modified brainstorm artifacts - └─ conflict_risk < medium → Skip to Phase 4 - -Phase 4: Task Generation - └─ Ref: phases/04-task-generation.md - └─ Output: IMPL_PLAN.md, task JSONs, TODO_LIST.md - -Return: - └─ Summary with recommended next steps -``` - -**Phase Reference Documents** (read on-demand when phase executes): - -| Phase | Document | Purpose | -|-------|----------|---------| -| 1 | [phases/01-session-discovery.md](phases/01-session-discovery.md) | Session creation/discovery with intelligent session management | -| 2 | [phases/02-context-gathering.md](phases/02-context-gathering.md) | Project context collection via context-search-agent | -| 3 | [phases/03-conflict-resolution.md](phases/03-conflict-resolution.md) | Conflict detection and resolution with CLI analysis | -| 4 | [phases/04-task-generation.md](phases/04-task-generation.md) | Implementation plan and task JSON generation | - -## Core Rules - -1. **Start Immediately**: First action is TodoWrite initialization, second action is Phase 1 execution -2. **No Preliminary Analysis**: Do not read files, analyze structure, or gather context before Phase 1 -3. **Parse Every Output**: Extract required data from each phase output for next phase -4. **Auto-Continue via TodoList**: Check TodoList status to execute next pending phase automatically -5. **Track Progress**: Update TodoWrite dynamically with task attachment/collapse pattern -6. **Progressive Phase Loading**: Read phase docs ONLY when that phase is about to execute -7. **DO NOT STOP**: Continuous multi-phase workflow. After completing each phase, immediately proceed to next - -## Input Processing - -**Convert User Input to Structured Format**: - -1. **Simple Text** → Structure it: - ``` - User: "Build authentication system" - - Structured: - GOAL: Build authentication system - SCOPE: Core authentication features - CONTEXT: New implementation - ``` - -2. **Detailed Text** → Extract components: - ``` - User: "Add JWT authentication with email/password login and token refresh" - - Structured: - GOAL: Implement JWT-based authentication - SCOPE: Email/password login, token generation, token refresh endpoints - CONTEXT: JWT token-based security, refresh token rotation - ``` - -3. **File Reference** (e.g., `requirements.md`) → Read and structure: - - Read file content - - Extract goal, scope, requirements - - Format into structured description - -## Data Flow - -``` -User Input (task description) - ↓ -[Convert to Structured Format] - ↓ Structured Description: - ↓ GOAL: [objective] - ↓ SCOPE: [boundaries] - ↓ CONTEXT: [background] - ↓ -Phase 1: session:start --auto "structured-description" - ↓ Output: sessionId - ↓ Write: planning-notes.md (User Intent section) - ↓ -Phase 2: context-gather --session sessionId "structured-description" - ↓ Input: sessionId + structured description - ↓ Output: contextPath (context-package.json with prioritized_context) + conflict_risk - ↓ Update: planning-notes.md (Context Findings + Consolidated Constraints) - ↓ -Phase 3: conflict-resolution [AUTO-TRIGGERED if conflict_risk ≥ medium] - ↓ Input: sessionId + contextPath + conflict_risk - ↓ Output: Modified brainstorm artifacts - ↓ Update: planning-notes.md (Conflict Decisions + Consolidated Constraints) - ↓ Skip if conflict_risk is none/low → proceed directly to Phase 4 - ↓ -Phase 4: task-generate-agent --session sessionId - ↓ Input: sessionId + planning-notes.md + context-package.json + brainstorm artifacts - ↓ Output: IMPL_PLAN.md, task JSONs, TODO_LIST.md - ↓ -Return summary to user -``` - -**Session Memory Flow**: Each phase receives session ID, which provides access to: -- Previous task summaries -- Existing context and analysis -- Brainstorming artifacts (potentially modified by Phase 3) -- Session-specific configuration - -## TodoWrite Pattern - -**Core Concept**: Dynamic task attachment and collapse for real-time visibility into workflow execution. - -### Key Principles - -1. **Task Attachment** (when phase executed): - - Sub-command's internal tasks are **attached** to orchestrator's TodoWrite - - **Phase 2, 3**: Multiple sub-tasks attached (e.g., Phase 2.1, 2.2, 2.3) - - **Phase 4**: Single agent task attached - - First attached task marked as `in_progress`, others as `pending` - - Orchestrator **executes** these attached tasks sequentially - -2. **Task Collapse** (after sub-tasks complete): - - **Applies to Phase 2, 3**: Remove detailed sub-tasks from TodoWrite - - **Collapse** to high-level phase summary - - **Phase 4**: No collapse needed (single task, just mark completed) - - Maintains clean orchestrator-level view - -3. **Continuous Execution**: - - After completion, automatically proceed to next pending phase - - No user intervention required between phases - - TodoWrite dynamically reflects current execution state - -**Lifecycle**: Initial pending tasks → Phase executed (tasks ATTACHED) → Sub-tasks executed sequentially → Phase completed (tasks COLLAPSED) → Next phase begins → Repeat until all phases complete. - -## Phase-Specific TodoWrite Updates - -### Phase 2 (Tasks Attached): -```json -[ - {"content": "Phase 1: Session Discovery", "status": "completed"}, - {"content": "Phase 2: Context Gathering", "status": "in_progress"}, - {"content": " → Analyze codebase structure", "status": "in_progress"}, - {"content": " → Identify integration points", "status": "pending"}, - {"content": " → Generate context package", "status": "pending"}, - {"content": "Phase 4: Task Generation", "status": "pending"} -] -``` - -### Phase 2 (Collapsed): -```json -[ - {"content": "Phase 1: Session Discovery", "status": "completed"}, - {"content": "Phase 2: Context Gathering", "status": "completed"}, - {"content": "Phase 4: Task Generation", "status": "pending"} -] -``` - -### Phase 3 (Conditional, Tasks Attached): -```json -[ - {"content": "Phase 1: Session Discovery", "status": "completed"}, - {"content": "Phase 2: Context Gathering", "status": "completed"}, - {"content": "Phase 3: Conflict Resolution", "status": "in_progress"}, - {"content": " → Detect conflicts with CLI analysis", "status": "in_progress"}, - {"content": " → Present conflicts to user", "status": "pending"}, - {"content": " → Apply resolution strategies", "status": "pending"}, - {"content": "Phase 4: Task Generation", "status": "pending"} -] -``` - -## Planning Notes Template - -After Phase 1, create `planning-notes.md` with this structure: - -```markdown -# Planning Notes - -**Session**: ${sessionId} -**Created**: ${timestamp} - -## User Intent (Phase 1) - -- **GOAL**: ${userGoal} -- **KEY_CONSTRAINTS**: ${userConstraints} - ---- - -## Context Findings (Phase 2) -(To be filled by context-gather) - -## Conflict Decisions (Phase 3) -(To be filled if conflicts detected) - -## Consolidated Constraints (Phase 4 Input) -1. ${userConstraints} - ---- - -## Task Generation (Phase 4) -(To be filled by action-planning-agent) - -## N+1 Context -### Decisions -| Decision | Rationale | Revisit? | -|----------|-----------|----------| - -### Deferred -- [ ] (For N+1) -``` - -## Post-Phase Updates - -### After Phase 2 - -Read context-package to extract key findings, update planning-notes.md: -- `Context Findings (Phase 2)`: CRITICAL_FILES, ARCHITECTURE, CONFLICT_RISK, CONSTRAINTS -- `Consolidated Constraints`: Append Phase 2 constraints - -### After Phase 3 - -If executed, read conflict-resolution.json, update planning-notes.md: -- `Conflict Decisions (Phase 3)`: RESOLVED, MODIFIED_ARTIFACTS, CONSTRAINTS -- `Consolidated Constraints`: Append Phase 3 planning constraints - -### Memory State Check - -After Phase 3, evaluate context window usage. If memory usage is high (>120K tokens): -```javascript -Skill(skill="compact") -``` - -## Phase 4 User Decision - -After Phase 4 completes, present user with action choices: - -```javascript -AskUserQuestion({ - questions: [{ - question: "Planning complete. What would you like to do next?", - header: "Next Action", - multiSelect: false, - options: [ - { - label: "Verify Plan Quality (Recommended)", - description: "Run quality verification to catch issues before execution." - }, - { - label: "Start Execution", - description: "Begin implementing tasks immediately." - }, - { - label: "Review Status Only", - description: "View task breakdown and session status without taking further action." - } - ] - }] -}); - -// Execute based on user choice -// "Verify Plan Quality" → Skill(skill="workflow:plan-verify", args="--session " + sessionId) -// "Start Execution" → Skill(skill="workflow:execute", args="--session " + sessionId) -// "Review Status Only" → Skill(skill="workflow:status", args="--session " + sessionId) -``` - -## Error Handling - -- **Parsing Failure**: If output parsing fails, retry command once, then report error -- **Validation Failure**: If validation fails, report which file/data is missing -- **Command Failure**: Keep phase `in_progress`, report error to user, do not proceed to next phase - -## Coordinator Checklist - -- **Pre-Phase**: Convert user input to structured format (GOAL/SCOPE/CONTEXT) -- Initialize TodoWrite before any command (Phase 3 added dynamically after Phase 2) -- Execute Phase 1 immediately with structured description -- Parse session ID from Phase 1 output, store in memory -- Pass session ID and structured description to Phase 2 command -- Parse context path from Phase 2 output, store in memory -- **Extract conflict_risk from context-package.json**: Determine Phase 3 execution -- **If conflict_risk >= medium**: Launch Phase 3 with sessionId and contextPath -- **If conflict_risk is none/low**: Skip Phase 3, proceed directly to Phase 4 -- **Build Phase 4 command**: `/workflow:tools:task-generate-agent --session [sessionId]` -- Verify all Phase 4 outputs -- Update TodoWrite after each phase -- After each phase, automatically continue to next phase based on TodoList status - -## Related Commands - -**Prerequisite Commands**: -- `/workflow:brainstorm:artifacts` - Optional: Generate role-based analyses before planning -- `/workflow:brainstorm:synthesis` - Optional: Refine brainstorm analyses with clarifications - -**Follow-up Commands**: -- `/workflow:plan-verify` - Recommended: Verify plan quality before execution -- `/workflow:status` - Review task breakdown and current progress -- `/workflow:execute` - Begin implementation of generated tasks diff --git a/.claude/skills/workflow-plan/phases/01-session-discovery.md b/.claude/skills/workflow-plan/phases/01-session-discovery.md deleted file mode 100644 index 3eda914a..00000000 --- a/.claude/skills/workflow-plan/phases/01-session-discovery.md +++ /dev/null @@ -1,281 +0,0 @@ -# Phase 1: Session Discovery - -Discover existing sessions or start new workflow session with intelligent session management and conflict detection. - -## Objective - -- Ensure project-level state exists (first-time initialization) -- Create or discover workflow session for the planning workflow -- Generate unique session ID (WFS-xxx format) -- Initialize session directory structure - -## Step 0: Initialize Project State (First-time Only) - -**Executed before all modes** - Ensures project-level state files exist by calling `/workflow:init`. - -### Check and Initialize -```bash -# Check if project state exists (both files required) -bash(test -f .workflow/project-tech.json && echo "TECH_EXISTS" || echo "TECH_NOT_FOUND") -bash(test -f .workflow/project-guidelines.json && echo "GUIDELINES_EXISTS" || echo "GUIDELINES_NOT_FOUND") -``` - -**If either NOT_FOUND**, delegate to `/workflow:init`: -```javascript -// Call workflow:init for intelligent project analysis -Skill(skill="workflow:init"); - -// Wait for init completion -// project-tech.json and project-guidelines.json will be created -``` - -**Output**: -- If BOTH_EXIST: `PROJECT_STATE: initialized` -- If NOT_FOUND: Calls `/workflow:init` → creates: - - `.workflow/project-tech.json` with full technical analysis - - `.workflow/project-guidelines.json` with empty scaffold - -**Note**: `/workflow:init` uses cli-explore-agent to build comprehensive project understanding (technology stack, architecture, key components). This step runs once per project. Subsequent executions skip initialization. - -## Execution - -### Step 1.1: Execute Session Start - -```javascript -Skill(skill="workflow:session:start", args="--auto \"[structured-task-description]\"") -``` - -**Task Description Structure**: -``` -GOAL: [Clear, concise objective] -SCOPE: [What's included/excluded] -CONTEXT: [Relevant background or constraints] -``` - -**Example**: -``` -GOAL: Build JWT-based authentication system -SCOPE: User registration, login, token validation -CONTEXT: Existing user database schema, REST API endpoints -``` - -### Step 1.2: Parse Output - -- Extract: `SESSION_ID: WFS-[id]` (store as `sessionId`) - -### Step 1.3: Validate - -- Session ID successfully extracted -- Session directory `.workflow/active/[sessionId]/` exists - -**Note**: Session directory contains `workflow-session.json` (metadata). Do NOT look for `manifest.json` here - it only exists in `.workflow/archives/` for archived sessions. - -### Step 1.4: Initialize Planning Notes - -Create `planning-notes.md` with N+1 context support: - -```javascript -const planningNotesPath = `.workflow/active/${sessionId}/planning-notes.md` -const userGoal = structuredDescription.goal -const userConstraints = structuredDescription.context || "None specified" - -Write(planningNotesPath, `# Planning Notes - -**Session**: ${sessionId} -**Created**: ${new Date().toISOString()} - -## User Intent (Phase 1) - -- **GOAL**: ${userGoal} -- **KEY_CONSTRAINTS**: ${userConstraints} - ---- - -## Context Findings (Phase 2) -(To be filled by context-gather) - -## Conflict Decisions (Phase 3) -(To be filled if conflicts detected) - -## Consolidated Constraints (Phase 4 Input) -1. ${userConstraints} - ---- - -## Task Generation (Phase 4) -(To be filled by action-planning-agent) - -## N+1 Context -### Decisions -| Decision | Rationale | Revisit? | -|----------|-----------|----------| - -### Deferred -- [ ] (For N+1) -`) -``` - -## Session Types - -The `--type` parameter classifies sessions for CCW dashboard organization: - -| Type | Description | Default For | -|------|-------------|-------------| -| `workflow` | Standard implementation (default) | `/workflow:plan` | -| `review` | Code review sessions | `/workflow:review-module-cycle` | -| `tdd` | TDD-based development | `/workflow:tdd-plan` | -| `test` | Test generation/fix sessions | `/workflow:test-fix-gen` | -| `docs` | Documentation sessions | `/memory:docs` | - -**Validation**: If `--type` is provided with invalid value, return error: -``` -ERROR: Invalid session type. Valid types: workflow, review, tdd, test, docs -``` - -## Mode 1: Discovery Mode (Default) - -### Usage -```bash -/workflow:session:start -``` - -### Step 1: List Active Sessions -```bash -bash(ls -1 .workflow/active/ 2>/dev/null | head -5) -``` - -### Step 2: Display Session Metadata -```bash -bash(cat .workflow/active/WFS-promptmaster-platform/workflow-session.json) -``` - -### Step 4: User Decision -Present session information and wait for user to select or create session. - -**Output**: `SESSION_ID: WFS-[user-selected-id]` - -## Mode 2: Auto Mode (Intelligent) - -### Usage -```bash -/workflow:session:start --auto "task description" -``` - -### Step 1: Check Active Sessions Count -```bash -bash(find .workflow/active/ -name "WFS-*" -type d 2>/dev/null | wc -l) -``` - -### Step 2a: No Active Sessions → Create New -```bash -# Generate session slug -bash(echo "implement OAuth2 auth" | sed 's/[^a-zA-Z0-9]/-/g' | tr '[:upper:]' '[:lower:]' | cut -c1-50) - -# Create directory structure -bash(mkdir -p .workflow/active/WFS-implement-oauth2-auth/.process) -bash(mkdir -p .workflow/active/WFS-implement-oauth2-auth/.task) -bash(mkdir -p .workflow/active/WFS-implement-oauth2-auth/.summaries) - -# Create metadata (include type field, default to "workflow" if not specified) -bash(echo '{"session_id":"WFS-implement-oauth2-auth","project":"implement OAuth2 auth","status":"planning","type":"workflow","created_at":"2024-12-04T08:00:00Z"}' > .workflow/active/WFS-implement-oauth2-auth/workflow-session.json) -``` - -**Output**: `SESSION_ID: WFS-implement-oauth2-auth` - -### Step 2b: Single Active Session → Check Relevance -```bash -# Extract session ID -bash(find .workflow/active/ -name "WFS-*" -type d 2>/dev/null | head -1 | xargs basename) - -# Read project name from metadata -bash(cat .workflow/active/WFS-promptmaster-platform/workflow-session.json | grep -o '"project":"[^"]*"' | cut -d'"' -f4) - -# Check keyword match (manual comparison) -# If task contains project keywords → Reuse session -# If task unrelated → Create new session (use Step 2a) -``` - -**Output (reuse)**: `SESSION_ID: WFS-promptmaster-platform` -**Output (new)**: `SESSION_ID: WFS-[new-slug]` - -### Step 2c: Multiple Active Sessions → Use First -```bash -# Get first active session -bash(find .workflow/active/ -name "WFS-*" -type d 2>/dev/null | head -1 | xargs basename) - -# Output warning and session ID -# WARNING: Multiple active sessions detected -# SESSION_ID: WFS-first-session -``` - -## Mode 3: Force New Mode - -### Usage -```bash -/workflow:session:start --new "task description" -``` - -### Step 1: Generate Unique Session Slug -```bash -# Convert to slug -bash(echo "fix login bug" | sed 's/[^a-zA-Z0-9]/-/g' | tr '[:upper:]' '[:lower:]' | cut -c1-50) - -# Check if exists, add counter if needed -bash(ls .workflow/active/WFS-fix-login-bug 2>/dev/null && echo "WFS-fix-login-bug-2" || echo "WFS-fix-login-bug") -``` - -### Step 2: Create Session Structure -```bash -bash(mkdir -p .workflow/active/WFS-fix-login-bug/.process) -bash(mkdir -p .workflow/active/WFS-fix-login-bug/.task) -bash(mkdir -p .workflow/active/WFS-fix-login-bug/.summaries) -``` - -### Step 3: Create Metadata -```bash -# Include type field from --type parameter (default: "workflow") -bash(echo '{"session_id":"WFS-fix-login-bug","project":"fix login bug","status":"planning","type":"workflow","created_at":"2024-12-04T08:00:00Z"}' > .workflow/active/WFS-fix-login-bug/workflow-session.json) -``` - -**Output**: `SESSION_ID: WFS-fix-login-bug` - -## Execution Guideline - -- **Non-interrupting**: When called from other commands, this command completes and returns control to the caller without interrupting subsequent tasks. - -## Session ID Format - -- Pattern: `WFS-[lowercase-slug]` -- Characters: `a-z`, `0-9`, `-` only -- Max length: 50 characters -- Uniqueness: Add numeric suffix if collision (`WFS-auth-2`, `WFS-auth-3`) - -## Output Format Specification - -### Success -``` -SESSION_ID: WFS-session-slug -``` - -### Error -``` -ERROR: --auto mode requires task description -ERROR: Failed to create session directory -``` - -### Analysis (Auto Mode) -``` -ANALYSIS: Task relevance = high -DECISION: Reusing existing session -SESSION_ID: WFS-promptmaster-platform -``` - -## Output - -- **Variable**: `sessionId` (e.g., `WFS-implement-oauth2-auth`) -- **File**: `.workflow/active/{sessionId}/planning-notes.md` -- **TodoWrite**: Mark Phase 1 completed, Phase 2 in_progress - -## Next Phase - -Return to orchestrator showing Phase 1 results, then auto-continue to [Phase 2: Context Gathering](02-context-gathering.md). diff --git a/.claude/skills/workflow-plan/phases/02-context-gathering.md b/.claude/skills/workflow-plan/phases/02-context-gathering.md deleted file mode 100644 index ec73c3ba..00000000 --- a/.claude/skills/workflow-plan/phases/02-context-gathering.md +++ /dev/null @@ -1,427 +0,0 @@ -# Phase 2: Context Gathering - -Intelligently collect project context using context-search-agent based on task description, packages into standardized JSON. - -## Objective - -- Check for existing valid context-package before executing -- Assess task complexity and launch parallel exploration agents -- Invoke context-search-agent to analyze codebase -- Generate standardized `context-package.json` with prioritized context -- Detect conflict risk level for Phase 3 decision - -## Core Philosophy - -- **Agent Delegation**: Delegate all discovery to `context-search-agent` for autonomous execution -- **Detection-First**: Check for existing context-package before executing -- **Plan Mode**: Full comprehensive analysis (vs lightweight brainstorm mode) -- **Standardized Output**: Generate `.workflow/active/{session}/.process/context-package.json` - -## Execution Process - -``` -Input Parsing: - ├─ Parse flags: --session - └─ Parse: task_description (required) - -Step 1: Context-Package Detection - └─ Decision (existing package): - ├─ Valid package exists → Return existing (skip execution) - └─ No valid package → Continue to Step 2 - -Step 2: Complexity Assessment & Parallel Explore - ├─ Analyze task_description → classify Low/Medium/High - ├─ Select exploration angles (1-4 based on complexity) - ├─ Launch N cli-explore-agents in parallel - │ └─ Each outputs: exploration-{angle}.json - └─ Generate explorations-manifest.json - -Step 3: Invoke Context-Search Agent (with exploration input) - ├─ Phase 1: Initialization & Pre-Analysis - ├─ Phase 2: Multi-Source Discovery - │ ├─ Track 0: Exploration Synthesis (prioritize & deduplicate) - │ ├─ Track 1-4: Existing tracks - └─ Phase 3: Synthesis & Packaging - └─ Generate context-package.json with exploration_results - -Step 4: Output Verification - └─ Verify context-package.json contains exploration_results -``` - -## Execution Flow - -### Step 1: Context-Package Detection - -**Execute First** - Check if valid package already exists: - -```javascript -const contextPackagePath = `.workflow/${session_id}/.process/context-package.json`; - -if (file_exists(contextPackagePath)) { - const existing = Read(contextPackagePath); - - // Validate package belongs to current session - if (existing?.metadata?.session_id === session_id) { - console.log("Valid context-package found for session:", session_id); - console.log("Stats:", existing.statistics); - console.log("Conflict Risk:", existing.conflict_detection.risk_level); - return existing; // Skip execution, return existing - } else { - console.warn("Invalid session_id in existing package, re-generating..."); - } -} -``` - -### Step 2: Complexity Assessment & Parallel Explore - -**Only execute if Step 1 finds no valid package** - -```javascript -// 2.1 Complexity Assessment -function analyzeTaskComplexity(taskDescription) { - const text = taskDescription.toLowerCase(); - if (/architect|refactor|restructure|modular|cross-module/.test(text)) return 'High'; - if (/multiple|several|integrate|migrate|extend/.test(text)) return 'Medium'; - return 'Low'; -} - -const ANGLE_PRESETS = { - architecture: ['architecture', 'dependencies', 'modularity', 'integration-points'], - security: ['security', 'auth-patterns', 'dataflow', 'validation'], - performance: ['performance', 'bottlenecks', 'caching', 'data-access'], - bugfix: ['error-handling', 'dataflow', 'state-management', 'edge-cases'], - feature: ['patterns', 'integration-points', 'testing', 'dependencies'], - refactor: ['architecture', 'patterns', 'dependencies', 'testing'] -}; - -function selectAngles(taskDescription, complexity) { - const text = taskDescription.toLowerCase(); - let preset = 'feature'; - if (/refactor|architect|restructure/.test(text)) preset = 'architecture'; - else if (/security|auth|permission/.test(text)) preset = 'security'; - else if (/performance|slow|optimi/.test(text)) preset = 'performance'; - else if (/fix|bug|error|issue/.test(text)) preset = 'bugfix'; - - const count = complexity === 'High' ? 4 : (complexity === 'Medium' ? 3 : 1); - return ANGLE_PRESETS[preset].slice(0, count); -} - -const complexity = analyzeTaskComplexity(task_description); -const selectedAngles = selectAngles(task_description, complexity); -const sessionFolder = `.workflow/active/${session_id}/.process`; - -// 2.2 Launch Parallel Explore Agents -const explorationTasks = selectedAngles.map((angle, index) => - Task( - subagent_type="cli-explore-agent", - run_in_background=false, - description=`Explore: ${angle}`, - prompt=` -## Task Objective -Execute **${angle}** exploration for task planning context. Analyze codebase from this specific angle to discover relevant structure, patterns, and constraints. - -## Assigned Context -- **Exploration Angle**: ${angle} -- **Task Description**: ${task_description} -- **Session ID**: ${session_id} -- **Exploration Index**: ${index + 1} of ${selectedAngles.length} -- **Output File**: ${sessionFolder}/exploration-${angle}.json - -## MANDATORY FIRST STEPS (Execute by Agent) -**You (cli-explore-agent) MUST execute these steps in order:** -1. Run: ccw tool exec get_modules_by_depth '{}' (project structure) -2. Run: rg -l "{keyword_from_task}" --type ts (locate relevant files) -3. Execute: cat ~/.claude/workflows/cli-templates/schemas/explore-json-schema.json (get output schema reference) - -## Exploration Strategy (${angle} focus) - -**Step 1: Structural Scan** (Bash) -- get_modules_by_depth.sh → identify modules related to ${angle} -- find/rg → locate files relevant to ${angle} aspect -- Analyze imports/dependencies from ${angle} perspective - -**Step 2: Semantic Analysis** (Gemini CLI) -- How does existing code handle ${angle} concerns? -- What patterns are used for ${angle}? -- Where would new code integrate from ${angle} viewpoint? - -**Step 3: Write Output** -- Consolidate ${angle} findings into JSON -- Identify ${angle}-specific clarification needs - -## Expected Output - -**File**: ${sessionFolder}/exploration-${angle}.json - -**Schema Reference**: Schema obtained in MANDATORY FIRST STEPS step 3, follow schema exactly - -**Required Fields** (all ${angle} focused): -- project_structure: Modules/architecture relevant to ${angle} -- relevant_files: Files affected from ${angle} perspective - **IMPORTANT**: Use object format with relevance scores for synthesis: - \`[{path: "src/file.ts", relevance: 0.85, rationale: "Core ${angle} logic"}]\` - Scores: 0.7+ high priority, 0.5-0.7 medium, <0.5 low -- patterns: ${angle}-related patterns to follow -- dependencies: Dependencies relevant to ${angle} -- integration_points: Where to integrate from ${angle} viewpoint (include file:line locations) -- constraints: ${angle}-specific limitations/conventions -- clarification_needs: ${angle}-related ambiguities (options array + recommended index) -- _metadata.exploration_angle: "${angle}" - -## Success Criteria -- [ ] Schema obtained via cat explore-json-schema.json -- [ ] get_modules_by_depth.sh executed -- [ ] At least 3 relevant files identified with ${angle} rationale -- [ ] Patterns are actionable (code examples, not generic advice) -- [ ] Integration points include file:line locations -- [ ] Constraints are project-specific to ${angle} -- [ ] JSON output follows schema exactly -- [ ] clarification_needs includes options + recommended - -## Output -Write: ${sessionFolder}/exploration-${angle}.json -Return: 2-3 sentence summary of ${angle} findings -` - ) -); - -// 2.3 Generate Manifest after all complete -const explorationFiles = bash(`find ${sessionFolder} -name "exploration-*.json" -type f`).split('\n').filter(f => f.trim()); -const explorationManifest = { - session_id, - task_description, - timestamp: new Date().toISOString(), - complexity, - exploration_count: selectedAngles.length, - angles_explored: selectedAngles, - explorations: explorationFiles.map(file => { - const data = JSON.parse(Read(file)); - return { angle: data._metadata.exploration_angle, file: file.split('/').pop(), path: file, index: data._metadata.exploration_index }; - }) -}; -Write(`${sessionFolder}/explorations-manifest.json`, JSON.stringify(explorationManifest, null, 2)); -``` - -### Step 3: Invoke Context-Search Agent - -**Only execute after Step 2 completes** - -```javascript -// Load user intent from planning-notes.md (from Phase 1) -const planningNotesPath = `.workflow/active/${session_id}/planning-notes.md`; -let userIntent = { goal: task_description, key_constraints: "None specified" }; - -if (file_exists(planningNotesPath)) { - const notesContent = Read(planningNotesPath); - const goalMatch = notesContent.match(/\*\*GOAL\*\*:\s*(.+)/); - const constraintsMatch = notesContent.match(/\*\*KEY_CONSTRAINTS\*\*:\s*(.+)/); - if (goalMatch) userIntent.goal = goalMatch[1].trim(); - if (constraintsMatch) userIntent.key_constraints = constraintsMatch[1].trim(); -} - -Task( - subagent_type="context-search-agent", - run_in_background=false, - description="Gather comprehensive context for plan", - prompt=` -## Execution Mode -**PLAN MODE** (Comprehensive) - Full Phase 1-3 execution with priority sorting - -## Session Information -- **Session ID**: ${session_id} -- **Task Description**: ${task_description} -- **Output Path**: .workflow/${session_id}/.process/context-package.json - -## User Intent (from Phase 1 - Planning Notes) -**GOAL**: ${userIntent.goal} -**KEY_CONSTRAINTS**: ${userIntent.key_constraints} - -This is the PRIMARY context source - all subsequent analysis must align with user intent. - -## Exploration Input (from Step 2) -- **Manifest**: ${sessionFolder}/explorations-manifest.json -- **Exploration Count**: ${explorationManifest.exploration_count} -- **Angles**: ${explorationManifest.angles_explored.join(', ')} -- **Complexity**: ${complexity} - -## Mission -Execute complete context-search-agent workflow for implementation planning: - -### Phase 1: Initialization & Pre-Analysis -1. **Project State Loading**: - - Read and parse \`.workflow/project-tech.json\`. Use its \`overview\` section as the foundational \`project_context\`. This is your primary source for architecture, tech stack, and key components. - - Read and parse \`.workflow/project-guidelines.json\`. Load \`conventions\`, \`constraints\`, and \`learnings\` into a \`project_guidelines\` section. - - If files don't exist, proceed with fresh analysis. -2. **Detection**: Check for existing context-package (early exit if valid) -3. **Foundation**: Initialize CodexLens, get project structure, load docs -4. **Analysis**: Extract keywords, determine scope, classify complexity based on task description and project state - -### Phase 2: Multi-Source Context Discovery -Execute all discovery tracks (WITH USER INTENT INTEGRATION): -- **Track -1**: User Intent & Priority Foundation (EXECUTE FIRST) - - Load user intent (GOAL, KEY_CONSTRAINTS) from session input - - Map user requirements to codebase entities (files, modules, patterns) - - Establish baseline priority scores based on user goal alignment - - Output: user_intent_mapping.json with preliminary priority scores - -- **Track 0**: Exploration Synthesis (load ${sessionFolder}/explorations-manifest.json, prioritize critical_files, deduplicate patterns/integration_points) -- **Track 1**: Historical archive analysis (query manifest.json for lessons learned) -- **Track 2**: Reference documentation (CLAUDE.md, architecture docs) -- **Track 3**: Web examples (use Exa MCP for unfamiliar tech/APIs) -- **Track 4**: Codebase analysis (5-layer discovery: files, content, patterns, deps, config/tests) - -### Phase 3: Synthesis, Assessment & Packaging -1. Apply relevance scoring and build dependency graph -2. **Synthesize 5-source data** (including Track -1): Merge findings from all sources - - Priority order: User Intent > Archive > Docs > Exploration > Code > Web - - **Prioritize the context from \`project-tech.json\`** for architecture and tech stack unless code analysis reveals it's outdated -3. **Context Priority Sorting**: - a. Combine scores from Track -1 (user intent alignment) + relevance scores + exploration critical_files - b. Classify files into priority tiers: - - **Critical** (score >= 0.85): Directly mentioned in user goal OR exploration critical_files - - **High** (0.70-0.84): Key dependencies, patterns required for goal - - **Medium** (0.50-0.69): Supporting files, indirect dependencies - - **Low** (< 0.50): Contextual awareness only - c. Generate dependency_order: Based on dependency graph + user goal sequence - d. Document sorting_rationale: Explain prioritization logic - -4. **Populate \`project_context\`**: Directly use the \`overview\` from \`project-tech.json\` to fill the \`project_context\` section. Include description, technology_stack, architecture, and key_components. -5. **Populate \`project_guidelines\`**: Load conventions, constraints, and learnings from \`project-guidelines.json\` into a dedicated section. -6. Integrate brainstorm artifacts (if .brainstorming/ exists, read content) -7. Perform conflict detection with risk assessment -8. **Inject historical conflicts** from archive analysis into conflict_detection -9. **Generate prioritized_context section**: - \`\`\`json - { - "prioritized_context": { - "user_intent": { - "goal": "...", - "scope": "...", - "key_constraints": ["..."] - }, - "priority_tiers": { - "critical": [{ "path": "...", "relevance": 0.95, "rationale": "..." }], - "high": [...], - "medium": [...], - "low": [...] - }, - "dependency_order": ["module1", "module2", "module3"], - "sorting_rationale": "Based on user goal alignment (Track -1), exploration critical files, and dependency graph analysis" - } - } - \`\`\` -10. Generate and validate context-package.json with prioritized_context field - -## Output Requirements -Complete context-package.json with: -- **metadata**: task_description, keywords, complexity, tech_stack, session_id -- **project_context**: description, technology_stack, architecture, key_components (sourced from \`project-tech.json\`) -- **project_guidelines**: {conventions, constraints, quality_rules, learnings} (sourced from \`project-guidelines.json\`) -- **assets**: {documentation[], source_code[], config[], tests[]} with relevance scores -- **dependencies**: {internal[], external[]} with dependency graph -- **brainstorm_artifacts**: {guidance_specification, role_analyses[], synthesis_output} with content -- **conflict_detection**: {risk_level, risk_factors, affected_modules[], mitigation_strategy, historical_conflicts[]} -- **exploration_results**: {manifest_path, exploration_count, angles, explorations[], aggregated_insights} (from Track 0) -- **prioritized_context**: {user_intent, priority_tiers{critical, high, medium, low}, dependency_order[], sorting_rationale} - -## Quality Validation -Before completion verify: -- [ ] Valid JSON format with all required fields -- [ ] File relevance accuracy >80% -- [ ] Dependency graph complete (max 2 transitive levels) -- [ ] Conflict risk level calculated correctly -- [ ] No sensitive data exposed -- [ ] Total files <=50 (prioritize high-relevance) - -## Planning Notes Record (REQUIRED) -After completing context-package.json, append a brief execution record to planning-notes.md: - -**File**: .workflow/active/${session_id}/planning-notes.md -**Location**: Under "## Context Findings (Phase 2)" section -**Format**: -\`\`\` -### [Context-Search Agent] YYYY-MM-DD -- **Note**: [brief summary of key findings] -\`\`\` - -Execute autonomously following agent documentation. -Report completion with statistics. -` -) -``` - -### Step 4: Output Verification - -After agent completes, verify output: - -```javascript -// Verify file was created -const outputPath = `.workflow/${session_id}/.process/context-package.json`; -if (!file_exists(outputPath)) { - throw new Error("Agent failed to generate context-package.json"); -} - -// Verify exploration_results included -const pkg = JSON.parse(Read(outputPath)); -if (pkg.exploration_results?.exploration_count > 0) { - console.log(`Exploration results aggregated: ${pkg.exploration_results.exploration_count} angles`); -} -``` - -## Parameter Reference - -| Parameter | Type | Required | Description | -|-----------|------|----------|-------------| -| `--session` | string | Yes | Workflow session ID (e.g., WFS-user-auth) | -| `task_description` | string | Yes | Detailed task description for context extraction | - -## Post-Phase Update - -After context-gather completes, update planning-notes.md: - -```javascript -const contextPackage = JSON.parse(Read(contextPath)) -const conflictRisk = contextPackage.conflict_detection?.risk_level || 'low' -const criticalFiles = (contextPackage.exploration_results?.aggregated_insights?.critical_files || []) - .slice(0, 5).map(f => f.path) -const archPatterns = contextPackage.project_context?.architecture_patterns || [] -const constraints = contextPackage.exploration_results?.aggregated_insights?.constraints || [] - -// Update Phase 2 section -Edit(planningNotesPath, { - old: '## Context Findings (Phase 2)\n(To be filled by context-gather)', - new: `## Context Findings (Phase 2) - -- **CRITICAL_FILES**: ${criticalFiles.join(', ') || 'None identified'} -- **ARCHITECTURE**: ${archPatterns.join(', ') || 'Not detected'} -- **CONFLICT_RISK**: ${conflictRisk} -- **CONSTRAINTS**: ${constraints.length > 0 ? constraints.join('; ') : 'None'}` -}) - -// Append Phase 2 constraints to consolidated list -Edit(planningNotesPath, { - old: '## Consolidated Constraints (Phase 4 Input)', - new: `## Consolidated Constraints (Phase 4 Input) -${constraints.map((c, i) => `${i + 2}. [Context] ${c}`).join('\n')}` -}) -``` - -## Notes - -- **Detection-first**: Always check for existing package before invoking agent -- **User intent integration**: Load user intent from planning-notes.md (Phase 1 output) -- **Output**: Generates `context-package.json` with `prioritized_context` field -- **Plan-specific**: Use this for implementation planning; brainstorm mode uses direct agent call - -## Output - -- **Variable**: `contextPath` (e.g., `.workflow/active/WFS-xxx/.process/context-package.json`) -- **Variable**: `conflictRisk` (none/low/medium/high) -- **File**: Updated `planning-notes.md` with context findings -- **Decision**: If `conflictRisk >= medium` → Phase 3, else → Phase 4 - -## Next Phase - -Return to orchestrator showing Phase 2 results, then auto-continue: -- If `conflict_risk >= medium` → [Phase 3: Conflict Resolution](03-conflict-resolution.md) -- If `conflict_risk < medium` → [Phase 4: Task Generation](04-task-generation.md) diff --git a/.claude/skills/workflow-plan/phases/03-conflict-resolution.md b/.claude/skills/workflow-plan/phases/03-conflict-resolution.md deleted file mode 100644 index a901eb5c..00000000 --- a/.claude/skills/workflow-plan/phases/03-conflict-resolution.md +++ /dev/null @@ -1,645 +0,0 @@ -# Phase 3: Conflict Resolution - -Detect and resolve conflicts between plan and existing codebase using CLI-powered analysis with Gemini/Qwen. - -## Objective - -- Analyze conflicts between plan and existing code, **including module scenario uniqueness detection** -- Generate multiple resolution strategies with **iterative clarification until boundaries are clear** -- Apply selected modifications to brainstorm artifacts - -**Scope**: Detection and strategy generation only - NO code modification or task creation. - -**Trigger**: Auto-executes when `conflict_risk >= medium`. - -## Auto Mode - -When `--yes` or `-y`: Auto-select recommended strategy for each conflict, skip clarification questions. - -## Core Responsibilities - -| Responsibility | Description | -|---------------|-------------| -| **Detect Conflicts** | Analyze plan vs existing code inconsistencies | -| **Scenario Uniqueness** | Search and compare new modules with existing modules for functional overlaps | -| **Generate Strategies** | Provide 2-4 resolution options per conflict | -| **Iterative Clarification** | Ask unlimited questions until scenario boundaries are clear and unique | -| **Agent Re-analysis** | Dynamically update strategies based on user clarifications | -| **CLI Analysis** | Use Gemini/Qwen (Claude fallback) | -| **User Decision** | Present options ONE BY ONE, never auto-apply | -| **Direct Text Output** | Output questions via text directly, NEVER use bash echo/printf | -| **Structured Data** | JSON output for programmatic processing, NO file generation | - -## Conflict Categories - -### 1. Architecture Conflicts -- Incompatible design patterns -- Module structure changes -- Pattern migration requirements - -### 2. API Conflicts -- Breaking contract changes -- Signature modifications -- Public interface impacts - -### 3. Data Model Conflicts -- Schema modifications -- Type breaking changes -- Data migration needs - -### 4. Dependency Conflicts -- Version incompatibilities -- Setup conflicts -- Breaking updates - -### 5. Module Scenario Overlap -- Functional overlap between new and existing modules -- Scenario boundary ambiguity -- Duplicate responsibility detection -- Module merge/split decisions -- **Requires iterative clarification until uniqueness confirmed** - -## Execution Process - -``` -Input Parsing: - ├─ Parse flags: --session, --context - └─ Validation: Both REQUIRED, conflict_risk >= medium - -Phase 1: Validation - ├─ Step 1: Verify session directory exists - ├─ Step 2: Load context-package.json - ├─ Step 3: Check conflict_risk (skip if none/low) - └─ Step 4: Prepare agent task prompt - -Phase 2: CLI-Powered Analysis (Agent) - ├─ Execute Gemini analysis (Qwen fallback) - ├─ Detect conflicts including ModuleOverlap category - └─ Generate 2-4 strategies per conflict with modifications - -Phase 3: Iterative User Interaction - └─ FOR each conflict (one by one): - ├─ Display conflict with overlap_analysis (if ModuleOverlap) - ├─ Display strategies (2-4 + custom option) - ├─ User selects strategy - └─ IF clarification_needed: - ├─ Collect answers - ├─ Agent re-analysis - └─ Loop until uniqueness_confirmed (max 10 rounds) - -Phase 4: Apply Modifications - ├─ Step 1: Extract modifications from resolved strategies - ├─ Step 2: Apply using Edit tool - ├─ Step 3: Update context-package.json (mark resolved) - └─ Step 4: Output custom conflict summary (if any) -``` - -## Execution Flow - -### Phase 1: Validation -``` -1. Verify session directory exists -2. Load context-package.json -3. Check conflict_risk (skip if none/low) -4. Prepare agent task prompt -``` - -### Phase 2: CLI-Powered Analysis - -**Agent Delegation**: -```javascript -Task(subagent_type="cli-execution-agent", run_in_background=false, prompt=` - ## Context - - Session: {session_id} - - Risk: {conflict_risk} - - Files: {existing_files_list} - - ## Exploration Context (from context-package.exploration_results) - - Exploration Count: ${contextPackage.exploration_results?.exploration_count || 0} - - Angles Analyzed: ${JSON.stringify(contextPackage.exploration_results?.angles || [])} - - Pre-identified Conflict Indicators: ${JSON.stringify(contextPackage.exploration_results?.aggregated_insights?.conflict_indicators || [])} - - Critical Files: ${JSON.stringify(contextPackage.exploration_results?.aggregated_insights?.critical_files?.map(f => f.path) || [])} - - All Patterns: ${JSON.stringify(contextPackage.exploration_results?.aggregated_insights?.all_patterns || [])} - - All Integration Points: ${JSON.stringify(contextPackage.exploration_results?.aggregated_insights?.all_integration_points || [])} - - ## Analysis Steps - - ### 0. Load Output Schema (MANDATORY) - Execute: cat ~/.claude/workflows/cli-templates/schemas/conflict-resolution-schema.json - - ### 1. Load Context - - Read existing files from conflict_detection.existing_files - - Load plan from .workflow/active/{session_id}/.process/context-package.json - - Load exploration_results and use aggregated_insights for enhanced analysis - - Extract role analyses and requirements - - ### 2. Execute CLI Analysis (Enhanced with Exploration + Scenario Uniqueness) - - Primary (Gemini): - ccw cli -p " - PURPOSE: Detect conflicts between plan and codebase, using exploration insights - TASK: - • **Review pre-identified conflict_indicators from exploration results** - • Compare architectures (use exploration key_patterns) - • Identify breaking API changes - • Detect data model incompatibilities - • Assess dependency conflicts - • **Analyze module scenario uniqueness** - - Use exploration integration_points for precise locations - - Cross-validate with exploration critical_files - - Generate clarification questions for boundary definition - MODE: analysis - CONTEXT: @**/*.ts @**/*.js @**/*.tsx @**/*.jsx @.workflow/active/{session_id}/**/* - EXPECTED: Conflict list with severity ratings, including: - - Validation of exploration conflict_indicators - - ModuleOverlap conflicts with overlap_analysis - - Targeted clarification questions - CONSTRAINTS: Focus on breaking changes, migration needs, and functional overlaps | Prioritize exploration-identified conflicts | analysis=READ-ONLY - " --tool gemini --mode analysis --rule analysis-code-patterns --cd {project_root} - - Fallback: Qwen (same prompt) → Claude (manual analysis) - - ### 3. Generate Strategies (2-4 per conflict) - - Template per conflict: - - Severity: Critical/High/Medium - - Category: Architecture/API/Data/Dependency/ModuleOverlap - - Affected files + impact - - **For ModuleOverlap**: Include overlap_analysis with existing modules and scenarios - - Options with pros/cons, effort, risk - - **For ModuleOverlap strategies**: Add clarification_needed questions for boundary definition - - Recommended strategy + rationale - - ### 4. Return Structured Conflict Data - - ⚠️ Output to conflict-resolution.json (generated in Phase 4) - - **Schema Reference**: Execute \`cat ~/.claude/workflows/cli-templates/schemas/conflict-resolution-schema.json\` to get full schema - - Return JSON following the schema above. Key requirements: - - Minimum 2 strategies per conflict, max 4 - - All text in Chinese for user-facing fields (brief, name, pros, cons, modification_suggestions) - - modifications.old_content: 20-100 chars for unique Edit tool matching - - modifications.new_content: preserves markdown formatting - - modification_suggestions: 2-5 actionable suggestions for custom handling - - ### 5. Planning Notes Record (REQUIRED) - After analysis complete, append a brief execution record to planning-notes.md: - - **File**: .workflow/active/{session_id}/planning-notes.md - **Location**: Under "## Conflict Decisions (Phase 3)" section - **Format**: - \`\`\` - ### [Conflict-Resolution Agent] YYYY-MM-DD - - **Note**: [brief summary of conflict types, resolution strategies, key decisions] - \`\`\` -`) -``` - -### Phase 3: User Interaction Loop - -```javascript -const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y') - -FOR each conflict: - round = 0, clarified = false, userClarifications = [] - - WHILE (!clarified && round++ < 10): - // 1. Display conflict info (text output for context) - displayConflictSummary(conflict) // id, brief, severity, overlap_analysis if ModuleOverlap - - // 2. Strategy selection - if (autoYes) { - console.log(`[--yes] Auto-selecting recommended strategy`) - selectedStrategy = conflict.strategies[conflict.recommended || 0] - clarified = true // Skip clarification loop - } else { - AskUserQuestion({ - questions: [{ - question: formatStrategiesForDisplay(conflict.strategies), - header: "策略选择", - multiSelect: false, - options: [ - ...conflict.strategies.map((s, i) => ({ - label: `${s.name}${i === conflict.recommended ? ' (推荐)' : ''}`, - description: `${s.complexity}复杂度 | ${s.risk}风险${s.clarification_needed?.length ? ' | ⚠️需澄清' : ''}` - })), - { label: "自定义修改", description: `建议: ${conflict.modification_suggestions?.slice(0,2).join('; ')}` } - ] - }] - }) - - // 3. Handle selection - if (userChoice === "自定义修改") { - customConflicts.push({ id, brief, category, suggestions, overlap_analysis }) - break - } - - selectedStrategy = findStrategyByName(userChoice) - } - - // 4. Clarification (if needed) - batched max 4 per call - if (!autoYes && selectedStrategy.clarification_needed?.length > 0) { - for (batch of chunk(selectedStrategy.clarification_needed, 4)) { - AskUserQuestion({ - questions: batch.map((q, i) => ({ - question: q, header: `澄清${i+1}`, multiSelect: false, - options: [{ label: "详细说明", description: "提供答案" }] - })) - }) - userClarifications.push(...collectAnswers(batch)) - } - - // 5. Agent re-analysis - reanalysisResult = Task({ - subagent_type: "cli-execution-agent", - run_in_background: false, - prompt: `Conflict: ${conflict.id}, Strategy: ${selectedStrategy.name} -User Clarifications: ${JSON.stringify(userClarifications)} -Output: { uniqueness_confirmed, rationale, updated_strategy, remaining_questions }` - }) - - if (reanalysisResult.uniqueness_confirmed) { - selectedStrategy = { ...reanalysisResult.updated_strategy, clarifications: userClarifications } - clarified = true - } else { - selectedStrategy.clarification_needed = reanalysisResult.remaining_questions - } - } else { - clarified = true - } - - if (clarified) resolvedConflicts.push({ conflict, strategy: selectedStrategy }) - END WHILE -END FOR - -selectedStrategies = resolvedConflicts.map(r => ({ - conflict_id: r.conflict.id, strategy: r.strategy, clarifications: r.strategy.clarifications || [] -})) -``` - -**Key Points**: -- AskUserQuestion: max 4 questions/call, batch if more -- Strategy options: 2-4 strategies + "自定义修改" -- Clarification loop: max 10 rounds, agent判断 uniqueness_confirmed -- Custom conflicts: 记录 overlap_analysis 供后续手动处理 - -### Phase 4: Apply Modifications - -```javascript -// 1. Extract modifications from resolved strategies -const modifications = []; -selectedStrategies.forEach(item => { - if (item.strategy && item.strategy.modifications) { - modifications.push(...item.strategy.modifications.map(mod => ({ - ...mod, - conflict_id: item.conflict_id, - clarifications: item.clarifications - }))); - } -}); - -console.log(`\n正在应用 ${modifications.length} 个修改...`); - -// 2. Apply each modification using Edit tool (with fallback to context-package.json) -const appliedModifications = []; -const failedModifications = []; -const fallbackConstraints = []; // For files that don't exist - -modifications.forEach((mod, idx) => { - try { - console.log(`[${idx + 1}/${modifications.length}] 修改 ${mod.file}...`); - - // Check if target file exists (brainstorm files may not exist in lite workflow) - if (!file_exists(mod.file)) { - console.log(` ⚠️ 文件不存在,写入 context-package.json 作为约束`); - fallbackConstraints.push({ - source: "conflict-resolution", - conflict_id: mod.conflict_id, - target_file: mod.file, - section: mod.section, - change_type: mod.change_type, - content: mod.new_content, - rationale: mod.rationale - }); - return; // Skip to next modification - } - - if (mod.change_type === "update") { - Edit({ - file_path: mod.file, - old_string: mod.old_content, - new_string: mod.new_content - }); - } else if (mod.change_type === "add") { - // Handle addition - append or insert based on section - const fileContent = Read(mod.file); - const updated = insertContentAfterSection(fileContent, mod.section, mod.new_content); - Write(mod.file, updated); - } else if (mod.change_type === "remove") { - Edit({ - file_path: mod.file, - old_string: mod.old_content, - new_string: "" - }); - } - - appliedModifications.push(mod); - console.log(` ✓ 成功`); - } catch (error) { - console.log(` ✗ 失败: ${error.message}`); - failedModifications.push({ ...mod, error: error.message }); - } -}); - -// 2b. Generate conflict-resolution.json output file -const resolutionOutput = { - session_id: sessionId, - resolved_at: new Date().toISOString(), - summary: { - total_conflicts: conflicts.length, - resolved_with_strategy: selectedStrategies.length, - custom_handling: customConflicts.length, - fallback_constraints: fallbackConstraints.length - }, - resolved_conflicts: selectedStrategies.map(s => ({ - conflict_id: s.conflict_id, - strategy_name: s.strategy.name, - strategy_approach: s.strategy.approach, - clarifications: s.clarifications || [], - modifications_applied: s.strategy.modifications?.filter(m => - appliedModifications.some(am => am.conflict_id === s.conflict_id) - ) || [] - })), - custom_conflicts: customConflicts.map(c => ({ - id: c.id, - brief: c.brief, - category: c.category, - suggestions: c.suggestions, - overlap_analysis: c.overlap_analysis || null - })), - planning_constraints: fallbackConstraints, // Constraints for files that don't exist - failed_modifications: failedModifications -}; - -const resolutionPath = `.workflow/active/${sessionId}/.process/conflict-resolution.json`; -Write(resolutionPath, JSON.stringify(resolutionOutput, null, 2)); - -// 3. Update context-package.json with resolution details (reference to JSON file) -const contextPackage = JSON.parse(Read(contextPath)); -contextPackage.conflict_detection.conflict_risk = "resolved"; -contextPackage.conflict_detection.resolution_file = resolutionPath; // Reference to detailed JSON -contextPackage.conflict_detection.resolved_conflicts = selectedStrategies.map(s => s.conflict_id); -contextPackage.conflict_detection.custom_conflicts = customConflicts.map(c => c.id); -contextPackage.conflict_detection.resolved_at = new Date().toISOString(); -Write(contextPath, JSON.stringify(contextPackage, null, 2)); - -// 4. Output custom conflict summary with overlap analysis (if any) -if (customConflicts.length > 0) { - console.log(`\n${'='.repeat(60)}`); - console.log(`需要自定义处理的冲突 (${customConflicts.length})`); - console.log(`${'='.repeat(60)}\n`); - - customConflicts.forEach(conflict => { - console.log(`【${conflict.category}】${conflict.id}: ${conflict.brief}`); - - // Show overlap analysis for ModuleOverlap conflicts - if (conflict.category === 'ModuleOverlap' && conflict.overlap_analysis) { - console.log(`\n场景重叠信息:`); - console.log(` 新模块: ${conflict.overlap_analysis.new_module.name}`); - console.log(` 场景: ${conflict.overlap_analysis.new_module.scenarios.join(', ')}`); - console.log(`\n 与以下模块重叠:`); - conflict.overlap_analysis.existing_modules.forEach(mod => { - console.log(` - ${mod.name} (${mod.file})`); - console.log(` 重叠场景: ${mod.overlap_scenarios.join(', ')}`); - }); - } - - console.log(`\n修改建议:`); - conflict.suggestions.forEach(suggestion => { - console.log(` - ${suggestion}`); - }); - console.log(); - }); -} - -// 5. Output failure summary (if any) -if (failedModifications.length > 0) { - console.log(`\n⚠️ 部分修改失败 (${failedModifications.length}):`); - failedModifications.forEach(mod => { - console.log(` - ${mod.file}: ${mod.error}`); - }); -} - -// 6. Return summary -return { - total_conflicts: conflicts.length, - resolved_with_strategy: selectedStrategies.length, - custom_handling: customConflicts.length, - modifications_applied: appliedModifications.length, - modifications_failed: failedModifications.length, - modified_files: [...new Set(appliedModifications.map(m => m.file))], - custom_conflicts: customConflicts, - clarification_records: selectedStrategies.filter(s => s.clarifications.length > 0) -}; -``` - -**Validation**: -``` -✓ Agent returns valid JSON structure with ModuleOverlap conflicts -✓ Conflicts processed ONE BY ONE (not in batches) -✓ ModuleOverlap conflicts include overlap_analysis field -✓ Strategies with clarification_needed display questions -✓ User selections captured correctly per conflict -✓ Clarification loop continues until uniqueness confirmed -✓ Agent re-analysis returns uniqueness_confirmed and updated_strategy -✓ Maximum 10 rounds per conflict safety limit enforced -✓ Edit tool successfully applies modifications -✓ guidance-specification.md updated -✓ Role analyses (*.md) updated -✓ context-package.json marked as resolved with clarification records -✓ Custom conflicts display overlap_analysis for manual handling -✓ Agent log saved to .workflow/active/{session_id}/.chat/ -``` - -## Output Format - -### Primary Output: conflict-resolution.json - -**Path**: `.workflow/active/{session_id}/.process/conflict-resolution.json` - -**Schema**: -```json -{ - "session_id": "WFS-xxx", - "resolved_at": "ISO timestamp", - "summary": { - "total_conflicts": 3, - "resolved_with_strategy": 2, - "custom_handling": 1, - "fallback_constraints": 0 - }, - "resolved_conflicts": [ - { - "conflict_id": "CON-001", - "strategy_name": "策略名称", - "strategy_approach": "实现方法", - "clarifications": [], - "modifications_applied": [] - } - ], - "custom_conflicts": [ - { - "id": "CON-002", - "brief": "冲突摘要", - "category": "ModuleOverlap", - "suggestions": ["建议1", "建议2"], - "overlap_analysis": null - } - ], - "planning_constraints": [], - "failed_modifications": [] -} -``` - -### Key Requirements - -| Requirement | Details | -|------------|---------| -| **Conflict batching** | Max 10 conflicts per round (no total limit) | -| **Strategy count** | 2-4 strategies per conflict | -| **Modifications** | Each strategy includes file paths, old_content, new_content | -| **User-facing text** | Chinese (brief, strategy names, pros/cons) | -| **Technical fields** | English (severity, category, complexity, risk) | -| **old_content precision** | 20-100 chars for unique Edit tool matching | -| **File targets** | guidance-specification.md, role analyses (*.md) | - -## Error Handling - -### Recovery Strategy -``` -1. Pre-check: Verify conflict_risk ≥ medium -2. Monitor: Track agent via Task tool -3. Validate: Parse agent JSON output -4. Recover: - - Agent failure → check logs + report error - - Invalid JSON → retry once with Claude fallback - - CLI failure → fallback to Claude analysis - - Edit tool failure → report affected files + rollback option - - User cancels → mark as "unresolved", continue to task-generate -5. Degrade: If all fail, generate minimal conflict report and skip modifications -``` - -### Rollback Handling -``` -If Edit tool fails mid-application: -1. Log all successfully applied modifications -2. Output rollback option via text interaction -3. If rollback selected: restore files from git or backups -4. If continue: mark partial resolution in context-package.json -``` - -## Integration - -### Interface -**Input**: -- `--session` (required): WFS-{session-id} -- `--context` (required): context-package.json path -- Requires: `conflict_risk >= medium` - -**Output**: -- Generated file: - - `.workflow/active/{session_id}/.process/conflict-resolution.json` (primary output) -- Modified files (if exist): - - `.workflow/active/{session_id}/.brainstorm/guidance-specification.md` - - `.workflow/active/{session_id}/.brainstorm/{role}/analysis.md` - - `.workflow/active/{session_id}/.process/context-package.json` (conflict_risk → resolved, resolution_file reference) - -**User Interaction**: -- **Iterative conflict processing**: One conflict at a time, not in batches -- Each conflict: 2-4 strategy options + "自定义修改" option (with suggestions) -- **Clarification loop**: Unlimited questions per conflict until uniqueness confirmed (max 10 rounds) -- **ModuleOverlap conflicts**: Display overlap_analysis with existing modules -- **Agent re-analysis**: Dynamic strategy updates based on user clarifications - -### Success Criteria -``` -✓ CLI analysis returns valid JSON structure with ModuleOverlap category -✓ Agent performs scenario uniqueness detection (searches existing modules) -✓ Conflicts processed ONE BY ONE with iterative clarification -✓ Min 2 strategies per conflict with modifications -✓ ModuleOverlap conflicts include overlap_analysis with existing modules -✓ Strategies requiring clarification include clarification_needed questions -✓ Each conflict includes 2-5 modification_suggestions -✓ Text output displays conflict with overlap analysis (if ModuleOverlap) -✓ User selections captured per conflict -✓ Clarification loop continues until uniqueness confirmed (unlimited rounds, max 10) -✓ Agent re-analysis with user clarifications updates strategy -✓ Uniqueness confirmation based on clear scenario boundaries -✓ Edit tool applies modifications successfully -✓ Custom conflicts displayed with overlap_analysis for manual handling -✓ guidance-specification.md updated with resolved conflicts -✓ Role analyses (*.md) updated with resolved conflicts -✓ context-package.json marked as "resolved" with clarification records -✓ conflict-resolution.json generated with full resolution details -✓ Modification summary includes: - - Total conflicts - - Resolved with strategy (count) - - Custom handling (count) - - Clarification records - - Overlap analysis for custom ModuleOverlap conflicts -✓ Agent log saved to .workflow/active/{session_id}/.chat/ -✓ Error handling robust (validate/retry/degrade) -``` - -## Post-Phase Update - -If Phase 3 was executed, update planning-notes.md: - -```javascript -const conflictResPath = `.workflow/active/${sessionId}/.process/conflict-resolution.json` - -if (file_exists(conflictResPath)) { - const conflictRes = JSON.parse(Read(conflictResPath)) - const resolved = conflictRes.resolved_conflicts || [] - const planningConstraints = conflictRes.planning_constraints || [] - - // Update Phase 3 section - Edit(planningNotesPath, { - old: '## Conflict Decisions (Phase 3)\n(To be filled if conflicts detected)', - new: `## Conflict Decisions (Phase 3) - -- **RESOLVED**: ${resolved.map(r => `${r.conflict_id} → ${r.strategy_name}`).join('; ') || 'None'} -- **CUSTOM_HANDLING**: ${conflictRes.custom_conflicts?.map(c => c.id).join(', ') || 'None'} -- **CONSTRAINTS**: ${planningConstraints.map(c => c.content).join('; ') || 'None'}` - }) - - // Append Phase 3 constraints to consolidated list - if (planningConstraints.length > 0) { - Edit(planningNotesPath, { - old: '## Consolidated Constraints (Phase 4 Input)', - new: `## Consolidated Constraints (Phase 4 Input) -${planningConstraints.map((c, i) => `${constraintCount + i + 1}. [Conflict] ${c.content}`).join('\n')}` - }) - } -} -``` - -## Memory State Check - -After Phase 3 completion, evaluate context window usage. -If memory usage is high (>120K tokens): - -```javascript -Skill(skill="compact") -``` - -## Output - -- **File**: `.workflow/active/{sessionId}/.process/conflict-resolution.json` -- **Modified files**: brainstorm artifacts (guidance-specification.md, role analyses) -- **Updated**: `context-package.json` with resolved conflict status - -## Next Phase - -Return to orchestrator, then auto-continue to [Phase 4: Task Generation](04-task-generation.md). diff --git a/.claude/skills/workflow-plan/phases/04-task-generation.md b/.claude/skills/workflow-plan/phases/04-task-generation.md deleted file mode 100644 index 3b6d1634..00000000 --- a/.claude/skills/workflow-plan/phases/04-task-generation.md +++ /dev/null @@ -1,701 +0,0 @@ -# Phase 4: Task Generation - -Generate implementation plan documents (IMPL_PLAN.md, task JSONs, TODO_LIST.md) using action-planning-agent - produces planning artifacts, does NOT execute code implementation. - -## Auto Mode - -When `--yes` or `-y`: Skip user questions, use defaults (no materials, Agent executor, Codex CLI tool). - -## Core Philosophy - -- **Planning Only**: Generate planning documents (IMPL_PLAN.md, task JSONs, TODO_LIST.md) - does NOT implement code -- **Agent-Driven Document Generation**: Delegate plan generation to action-planning-agent -- **NO Redundant Context Sorting**: Context priority sorting is ALREADY completed in context-gather Phase 2/3 - - Use `context-package.json.prioritized_context` directly - - DO NOT re-sort files or re-compute priorities - - `priority_tiers` and `dependency_order` are pre-computed and ready-to-use -- **N+1 Parallel Planning**: Auto-detect multi-module projects, enable parallel planning (2+1 or 3+1 mode) -- **Progressive Loading**: Load context incrementally (Core → Selective → On-Demand) due to analysis.md file size -- **Memory-First**: Reuse loaded documents from conversation memory -- **Smart Selection**: Load synthesis_output OR guidance + relevant role analyses, NOT all role analyses -- **MCP-Enhanced**: Use MCP tools for advanced code analysis and research -- **Path Clarity**: All `focus_paths` prefer absolute paths (e.g., `D:\\project\\src\\module`), or clear relative paths from project root (e.g., `./src/module`) - -## Execution Process - -``` -Input Parsing: - ├─ Parse flags: --session - └─ Validation: session_id REQUIRED - -Phase 0: User Configuration (Interactive) - ├─ Question 1: Supplementary materials/guidelines? - ├─ Question 2: Execution method preference (Agent/CLI/Hybrid) - ├─ Question 3: CLI tool preference (if CLI selected) - └─ Store: userConfig for agent prompt - -Phase 1: Context Preparation & Module Detection (Command) - ├─ Assemble session paths (metadata, context package, output dirs) - ├─ Provide metadata (session_id, execution_mode, mcp_capabilities) - ├─ Auto-detect modules from context-package + directory structure - └─ Decision: - ├─ modules.length == 1 → Single Agent Mode (Phase 2A) - └─ modules.length >= 2 → Parallel Mode (Phase 2B + Phase 3) - -Phase 2A: Single Agent Planning (Original Flow) - ├─ Load context package (progressive loading strategy) - ├─ Generate Task JSON Files (.task/IMPL-*.json) - ├─ Create IMPL_PLAN.md - └─ Generate TODO_LIST.md - -Phase 2B: N Parallel Planning (Multi-Module) - ├─ Launch N action-planning-agents simultaneously (one per module) - ├─ Each agent generates module-scoped tasks (IMPL-{prefix}{seq}.json) - ├─ Task ID format: IMPL-A1, IMPL-A2... / IMPL-B1, IMPL-B2... - └─ Each module limited to ≤9 tasks - -Phase 3: Integration (+1 Coordinator, Multi-Module Only) - ├─ Collect all module task JSONs - ├─ Resolve cross-module dependencies (CROSS::{module}::{pattern} → actual ID) - ├─ Generate unified IMPL_PLAN.md (grouped by module) - └─ Generate TODO_LIST.md (hierarchical: module → tasks) -``` - -## Document Generation Lifecycle - -### Phase 0: User Configuration (Interactive) - -**Purpose**: Collect user preferences before task generation to ensure generated tasks match execution expectations. - -**Auto Mode Check**: -```javascript -const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y') - -if (autoYes) { - console.log(`[--yes] Using defaults: No materials, Agent executor, Codex CLI`) - userConfig = { - supplementaryMaterials: { type: "none", content: [] }, - executionMethod: "agent", - preferredCliTool: "codex", - enableResume: true - } - // Skip to Phase 1 -} -``` - -**User Questions** (skipped if autoYes): -```javascript -if (!autoYes) AskUserQuestion({ - questions: [ - { - question: "Do you have supplementary materials or guidelines to include?", - header: "Materials", - multiSelect: false, - options: [ - { label: "No additional materials", description: "Use existing context only" }, - { label: "Provide file paths", description: "I'll specify paths to include" }, - { label: "Provide inline content", description: "I'll paste content directly" } - ] - }, - { - question: "Select execution method for generated tasks:", - header: "Execution", - multiSelect: false, - options: [ - { label: "Agent (Recommended)", description: "Claude agent executes tasks directly" }, - { label: "Hybrid", description: "Agent orchestrates, calls CLI for complex steps" }, - { label: "CLI Only", description: "All execution via CLI tools (codex/gemini/qwen)" } - ] - }, - { - question: "If using CLI, which tool do you prefer?", - header: "CLI Tool", - multiSelect: false, - options: [ - { label: "Codex (Recommended)", description: "Best for implementation tasks" }, - { label: "Gemini", description: "Best for analysis and large context" }, - { label: "Qwen", description: "Alternative analysis tool" }, - { label: "Auto", description: "Let agent decide per-task" } - ] - } - ] -}) -``` - -**Handle Materials Response** (skipped if autoYes): -```javascript -if (!autoYes && userConfig.materials === "Provide file paths") { - // Follow-up question for file paths - const pathsResponse = AskUserQuestion({ - questions: [{ - question: "Enter file paths to include (comma-separated or one per line):", - header: "Paths", - multiSelect: false, - options: [ - { label: "Enter paths", description: "Provide paths in text input" } - ] - }] - }) - userConfig.supplementaryPaths = parseUserPaths(pathsResponse) -} -``` - -**Build userConfig**: -```javascript -const userConfig = { - supplementaryMaterials: { - type: "none|paths|inline", - content: [...], // Parsed paths or inline content - }, - executionMethod: "agent|hybrid|cli", - preferredCliTool: "codex|gemini|qwen|auto", - enableResume: true // Always enable resume for CLI executions -} -``` - -**Pass to Agent**: Include `userConfig` in agent prompt for Phase 2A/2B. - -### Phase 1: Context Preparation & Module Detection (Command Responsibility) - -**Command prepares session paths, metadata, detects module structure. Context priority sorting is NOT performed here - it's already completed in context-gather Phase 2/3.** - -**Session Path Structure**: -``` -.workflow/active/WFS-{session-id}/ -├── workflow-session.json # Session metadata -├── planning-notes.md # Consolidated planning notes -├── .process/ -│ └── context-package.json # Context package with artifact catalog -├── .task/ # Output: Task JSON files -│ ├── IMPL-A1.json # Multi-module: prefixed by module -│ ├── IMPL-A2.json -│ ├── IMPL-B1.json -│ └── ... -├── IMPL_PLAN.md # Output: Implementation plan (grouped by module) -└── TODO_LIST.md # Output: TODO list (hierarchical) -``` - -**Command Preparation**: -1. **Assemble Session Paths** for agent prompt: - - `session_metadata_path` - - `context_package_path` - - Output directory paths - -2. **Provide Metadata** (simple values): - - `session_id` - - `mcp_capabilities` (available MCP tools) - -3. **Auto Module Detection** (determines single vs parallel mode): - ```javascript - function autoDetectModules(contextPackage, projectRoot) { - // === Complexity Gate: Only parallelize for High complexity === - const complexity = contextPackage.metadata?.complexity || 'Medium'; - if (complexity !== 'High') { - // Force single agent mode for Low/Medium complexity - // This maximizes agent context reuse for related tasks - return [{ name: 'main', prefix: '', paths: ['.'] }]; - } - - // Priority 1: Explicit frontend/backend separation - if (exists('src/frontend') && exists('src/backend')) { - return [ - { name: 'frontend', prefix: 'A', paths: ['src/frontend'] }, - { name: 'backend', prefix: 'B', paths: ['src/backend'] } - ]; - } - - // Priority 2: Monorepo structure - if (exists('packages/*') || exists('apps/*')) { - return detectMonorepoModules(); // Returns 2-3 main packages - } - - // Priority 3: Context-package dependency clustering - const modules = clusterByDependencies(contextPackage.dependencies?.internal); - if (modules.length >= 2) return modules.slice(0, 3); - - // Default: Single module (original flow) - return [{ name: 'main', prefix: '', paths: ['.'] }]; - } - ``` - -**Decision Logic**: -- `complexity !== 'High'` → Force Phase 2A (Single Agent, maximize context reuse) -- `modules.length == 1` → Phase 2A (Single Agent, original flow) -- `modules.length >= 2 && complexity == 'High'` → Phase 2B + Phase 3 (N+1 Parallel) - -**Note**: CLI tool usage is now determined semantically by action-planning-agent based on user's task description, not by flags. - -### Phase 2A: Single Agent Planning (Original Flow) - -**Condition**: `modules.length == 1` (no multi-module detected) - -**Purpose**: Generate IMPL_PLAN.md, task JSONs, and TODO_LIST.md - planning documents only, NOT code implementation. - -**Agent Invocation**: -```javascript -Task( - subagent_type="action-planning-agent", - run_in_background=false, - description="Generate planning documents (IMPL_PLAN.md, task JSONs, TODO_LIST.md)", - prompt=` -## TASK OBJECTIVE -Generate implementation planning documents (IMPL_PLAN.md, task JSONs, TODO_LIST.md) for workflow session - -IMPORTANT: This is PLANNING ONLY - you are generating planning documents, NOT implementing code. - -CRITICAL: Follow the progressive loading strategy defined in agent specification (load analysis.md files incrementally due to file size) - -## PLANNING NOTES (PHASE 1-3 CONTEXT) -Load: .workflow/active/{session-id}/planning-notes.md - -This document contains: -- User Intent: Original GOAL and KEY_CONSTRAINTS from Phase 1 -- Context Findings: Critical files, architecture, and constraints from Phase 2 -- Conflict Decisions: Resolved conflicts and planning constraints from Phase 3 -- Consolidated Constraints: All constraints from all phases - -**USAGE**: Read planning-notes.md FIRST. Use Consolidated Constraints list to guide task sequencing and dependencies. - -## SESSION PATHS -Input: - - Session Metadata: .workflow/active/{session-id}/workflow-session.json - - Planning Notes: .workflow/active/{session-id}/planning-notes.md - - Context Package: .workflow/active/{session-id}/.process/context-package.json - -Output: - - Task Dir: .workflow/active/{session-id}/.task/ - - IMPL_PLAN: .workflow/active/{session-id}/IMPL_PLAN.md - - TODO_LIST: .workflow/active/{session-id}/TODO_LIST.md - -## CONTEXT METADATA -Session ID: {session-id} -MCP Capabilities: {exa_code, exa_web, code_index} - -## USER CONFIGURATION (from Phase 0) -Execution Method: ${userConfig.executionMethod} // agent|hybrid|cli -Preferred CLI Tool: ${userConfig.preferredCliTool} // codex|gemini|qwen|auto -Supplementary Materials: ${userConfig.supplementaryMaterials} - -## EXECUTION METHOD MAPPING -Based on userConfig.executionMethod, set task-level meta.execution_config: - -"agent" → - meta.execution_config = { method: "agent", cli_tool: null, enable_resume: false } - Agent executes implementation_approach steps directly - -"cli" → - meta.execution_config = { method: "cli", cli_tool: userConfig.preferredCliTool, enable_resume: true } - Agent executes pre_analysis, then hands off full context to CLI via buildCliHandoffPrompt() - -"hybrid" → - Per-task decision: Analyze task complexity, set method to "agent" OR "cli" per task - - Simple tasks (≤3 files, straightforward logic) → method: "agent" - - Complex tasks (>3 files, complex logic, refactoring) → method: "cli" - CLI tool: userConfig.preferredCliTool, enable_resume: true - -IMPORTANT: Do NOT add command field to implementation_approach steps. Execution routing is controlled by task-level meta.execution_config.method only. - -## PRIORITIZED CONTEXT (from context-package.prioritized_context) - ALREADY SORTED -Context sorting is ALREADY COMPLETED in context-gather Phase 2/3. DO NOT re-sort. -Direct usage: -- **user_intent**: Use goal/scope/key_constraints for task alignment -- **priority_tiers.critical**: These files are PRIMARY focus for task generation -- **priority_tiers.high**: These files are SECONDARY focus -- **dependency_order**: Use this for task sequencing - already computed -- **sorting_rationale**: Reference for understanding priority decisions - -## EXPLORATION CONTEXT (from context-package.exploration_results) - SUPPLEMENT ONLY -If prioritized_context is incomplete, fall back to exploration_results: -- Load exploration_results from context-package.json -- Use aggregated_insights.critical_files for focus_paths generation -- Apply aggregated_insights.constraints to acceptance criteria -- Reference aggregated_insights.all_patterns for implementation approach -- Use aggregated_insights.all_integration_points for precise modification locations -- Use conflict_indicators for risk-aware task sequencing - -## CONFLICT RESOLUTION CONTEXT (if exists) -- Check context-package.conflict_detection.resolution_file for conflict-resolution.json path -- If exists, load .process/conflict-resolution.json: - - Apply planning_constraints as task constraints (for brainstorm-less workflows) - - Reference resolved_conflicts for implementation approach alignment - - Handle custom_conflicts with explicit task notes - -## EXPECTED DELIVERABLES -1. Task JSON Files (.task/IMPL-*.json) - - 6-field schema (id, title, status, context_package_path, meta, context, flow_control) - - Quantified requirements with explicit counts - - Artifacts integration from context package - - **focus_paths generated directly from prioritized_context.priority_tiers (critical + high)** - - NO re-sorting or re-prioritization - use pre-computed tiers as-is - - Critical files are PRIMARY focus, High files are SECONDARY - - Flow control with pre_analysis steps (use prioritized_context.dependency_order for task sequencing) - - **CLI Execution IDs and strategies (MANDATORY)** - -2. Implementation Plan (IMPL_PLAN.md) - - Context analysis and artifact references - - Task breakdown and execution strategy - - Complete structure per agent definition - -3. TODO List (TODO_LIST.md) - - Hierarchical structure (containers, pending, completed markers) - - Links to task JSONs and summaries - - Matches task JSON hierarchy - -## CLI EXECUTION ID REQUIREMENTS (MANDATORY) -Each task JSON MUST include: -- **cli_execution_id**: Unique ID for CLI execution (format: \`{session_id}-{task_id}\`) -- **cli_execution**: Strategy object based on depends_on: - - No deps → \`{ "strategy": "new" }\` - - 1 dep (single child) → \`{ "strategy": "resume", "resume_from": "parent-cli-id" }\` - - 1 dep (multiple children) → \`{ "strategy": "fork", "resume_from": "parent-cli-id" }\` - - N deps → \`{ "strategy": "merge_fork", "merge_from": ["id1", "id2", ...] }\` - -**CLI Execution Strategy Rules**: -1. **new**: Task has no dependencies - starts fresh CLI conversation -2. **resume**: Task has 1 parent AND that parent has only this child - continues same conversation -3. **fork**: Task has 1 parent BUT parent has multiple children - creates new branch with parent context -4. **merge_fork**: Task has multiple parents - merges all parent contexts into new conversation - -**Execution Command Patterns**: -- new: \`ccw cli -p "[prompt]" --tool [tool] --mode write --id [cli_execution_id]\` -- resume: \`ccw cli -p "[prompt]" --resume [resume_from] --tool [tool] --mode write\` -- fork: \`ccw cli -p "[prompt]" --resume [resume_from] --id [cli_execution_id] --tool [tool] --mode write\` -- merge_fork: \`ccw cli -p "[prompt]" --resume [merge_from.join(',')] --id [cli_execution_id] --tool [tool] --mode write\` - -## QUALITY STANDARDS -Hard Constraints: - - Task count <= 18 (hard limit - request re-scope if exceeded) - - All requirements quantified (explicit counts and enumerated lists) - - Acceptance criteria measurable (include verification commands) - - Artifact references mapped from context package - - All documents follow agent-defined structure - -## SUCCESS CRITERIA -- All planning documents generated successfully: - - Task JSONs valid and saved to .task/ directory - - IMPL_PLAN.md created with complete structure - - TODO_LIST.md generated matching task JSONs -- Return completion status with document count and task breakdown summary - -## PLANNING NOTES RECORD (REQUIRED) -After completing, update planning-notes.md: - -**File**: .workflow/active/{session_id}/planning-notes.md - -1. **Task Generation (Phase 4)**: Task count and key tasks -2. **N+1 Context**: Key decisions (with rationale) + deferred items - -\`\`\`markdown -## Task Generation (Phase 4) -### [Action-Planning Agent] YYYY-MM-DD -- **Tasks**: [count] ([IDs]) - -## N+1 Context -### Decisions -| Decision | Rationale | Revisit? | -|----------|-----------|----------| -| [choice] | [why] | [Yes/No] | - -### Deferred -- [ ] [item] - [reason] -\`\`\` -` -) -``` - -### Phase 2B: N Parallel Planning (Multi-Module) - -**Condition**: `modules.length >= 2` (multi-module detected) - -**Purpose**: Launch N action-planning-agents simultaneously, one per module, for parallel task JSON generation. - -**Note**: Phase 2B agents generate Task JSONs ONLY. IMPL_PLAN.md and TODO_LIST.md are generated by Phase 3 Coordinator. - -**Parallel Agent Invocation**: -```javascript -// Launch N agents in parallel (one per module) -const planningTasks = modules.map(module => - Task( - subagent_type="action-planning-agent", - run_in_background=false, - description=`Generate ${module.name} module task JSONs`, - prompt=` -## TASK OBJECTIVE -Generate task JSON files for ${module.name} module within workflow session - -IMPORTANT: This is PLANNING ONLY - generate task JSONs, NOT implementing code. -IMPORTANT: Generate Task JSONs ONLY. IMPL_PLAN.md and TODO_LIST.md by Phase 3 Coordinator. - -CRITICAL: Follow the progressive loading strategy defined in agent specification (load analysis.md files incrementally due to file size) - -## PLANNING NOTES (PHASE 1-3 CONTEXT) -Load: .workflow/active/{session-id}/planning-notes.md - -This document contains consolidated constraints and user intent to guide module-scoped task generation. - -## MODULE SCOPE -- Module: ${module.name} (${module.type}) -- Focus Paths: ${module.paths.join(', ')} -- Task ID Prefix: IMPL-${module.prefix} -- Task Limit: ≤6 tasks (hard limit for this module) -- Other Modules: ${otherModules.join(', ')} (reference only, do NOT generate tasks for them) - -## SESSION PATHS -Input: - - Session Metadata: .workflow/active/{session-id}/workflow-session.json - - Planning Notes: .workflow/active/{session-id}/planning-notes.md - - Context Package: .workflow/active/{session-id}/.process/context-package.json - -Output: - - Task Dir: .workflow/active/{session-id}/.task/ - -## CONTEXT METADATA -Session ID: {session-id} -MCP Capabilities: {exa_code, exa_web, code_index} - -## USER CONFIGURATION (from Phase 0) -Execution Method: ${userConfig.executionMethod} // agent|hybrid|cli -Preferred CLI Tool: ${userConfig.preferredCliTool} // codex|gemini|qwen|auto -Supplementary Materials: ${userConfig.supplementaryMaterials} - -## EXECUTION METHOD MAPPING -Based on userConfig.executionMethod, set task-level meta.execution_config: - -"agent" → - meta.execution_config = { method: "agent", cli_tool: null, enable_resume: false } - Agent executes implementation_approach steps directly - -"cli" → - meta.execution_config = { method: "cli", cli_tool: userConfig.preferredCliTool, enable_resume: true } - Agent executes pre_analysis, then hands off full context to CLI via buildCliHandoffPrompt() - -"hybrid" → - Per-task decision: Analyze task complexity, set method to "agent" OR "cli" per task - - Simple tasks (≤3 files, straightforward logic) → method: "agent" - - Complex tasks (>3 files, complex logic, refactoring) → method: "cli" - CLI tool: userConfig.preferredCliTool, enable_resume: true - -IMPORTANT: Do NOT add command field to implementation_approach steps. Execution routing is controlled by task-level meta.execution_config.method only. - -## PRIORITIZED CONTEXT (from context-package.prioritized_context) - ALREADY SORTED -Context sorting is ALREADY COMPLETED in context-gather Phase 2/3. DO NOT re-sort. -Filter by module scope (${module.paths.join(', ')}): -- **user_intent**: Use for task alignment within module -- **priority_tiers.critical**: Filter for files in ${module.paths.join(', ')} → PRIMARY focus -- **priority_tiers.high**: Filter for files in ${module.paths.join(', ')} → SECONDARY focus -- **dependency_order**: Use module-relevant entries for task sequencing - -## EXPLORATION CONTEXT (from context-package.exploration_results) - SUPPLEMENT ONLY -If prioritized_context is incomplete for this module, fall back to exploration_results: -- Load exploration_results from context-package.json -- Filter for ${module.name} module: Use aggregated_insights.critical_files matching ${module.paths.join(', ')} -- Apply module-relevant constraints from aggregated_insights.constraints -- Reference aggregated_insights.all_patterns applicable to ${module.name} -- Use aggregated_insights.all_integration_points for precise modification locations within module scope -- Use conflict_indicators for risk-aware task sequencing - -## CONFLICT RESOLUTION CONTEXT (if exists) -- Check context-package.conflict_detection.resolution_file for conflict-resolution.json path -- If exists, load .process/conflict-resolution.json: - - Apply planning_constraints relevant to ${module.name} as task constraints - - Reference resolved_conflicts affecting ${module.name} for implementation approach alignment - - Handle custom_conflicts with explicit task notes - -## CROSS-MODULE DEPENDENCIES -- For dependencies ON other modules: Use placeholder depends_on: ["CROSS::{module}::{pattern}"] -- Example: depends_on: ["CROSS::B::api-endpoint"] (this module depends on B's api-endpoint task) -- Phase 3 Coordinator resolves to actual task IDs -- For dependencies FROM other modules: Document in task context as "provides_for" annotation - -## EXPECTED DELIVERABLES -Task JSON Files (.task/IMPL-${module.prefix}*.json): - - 6-field schema (id, title, status, context_package_path, meta, context, flow_control) - - Task ID format: IMPL-${module.prefix}1, IMPL-${module.prefix}2, ... - - Quantified requirements with explicit counts - - Artifacts integration from context package (filtered for ${module.name}) - - **focus_paths generated directly from prioritized_context.priority_tiers filtered by ${module.paths.join(', ')}** - - NO re-sorting - use pre-computed tiers filtered for this module - - Critical files are PRIMARY focus, High files are SECONDARY - - Flow control with pre_analysis steps (use prioritized_context.dependency_order for module task sequencing) - - **CLI Execution IDs and strategies (MANDATORY)** - - Focus ONLY on ${module.name} module scope - -## CLI EXECUTION ID REQUIREMENTS (MANDATORY) -Each task JSON MUST include: -- **cli_execution_id**: Unique ID for CLI execution (format: \`{session_id}-IMPL-${module.prefix}{seq}\`) -- **cli_execution**: Strategy object based on depends_on: - - No deps → \`{ "strategy": "new" }\` - - 1 dep (single child) → \`{ "strategy": "resume", "resume_from": "parent-cli-id" }\` - - 1 dep (multiple children) → \`{ "strategy": "fork", "resume_from": "parent-cli-id" }\` - - N deps → \`{ "strategy": "merge_fork", "merge_from": ["id1", "id2", ...] }\` - - Cross-module dep → \`{ "strategy": "cross_module_fork", "resume_from": "CROSS::{module}::{pattern}" }\` - -**CLI Execution Strategy Rules**: -1. **new**: Task has no dependencies - starts fresh CLI conversation -2. **resume**: Task has 1 parent AND that parent has only this child - continues same conversation -3. **fork**: Task has 1 parent BUT parent has multiple children - creates new branch with parent context -4. **merge_fork**: Task has multiple parents - merges all parent contexts into new conversation -5. **cross_module_fork**: Task depends on task from another module - Phase 3 resolves placeholder - -**Execution Command Patterns**: -- new: \`ccw cli -p "[prompt]" --tool [tool] --mode write --id [cli_execution_id]\` -- resume: \`ccw cli -p "[prompt]" --resume [resume_from] --tool [tool] --mode write\` -- fork: \`ccw cli -p "[prompt]" --resume [resume_from] --id [cli_execution_id] --tool [tool] --mode write\` -- merge_fork: \`ccw cli -p "[prompt]" --resume [merge_from.join(',')] --id [cli_execution_id] --tool [tool] --mode write\` -- cross_module_fork: (Phase 3 resolves placeholder, then uses fork pattern) - -## QUALITY STANDARDS -Hard Constraints: - - Task count <= 9 for this module (hard limit - coordinate with Phase 3 if exceeded) - - All requirements quantified (explicit counts and enumerated lists) - - Acceptance criteria measurable (include verification commands) - - Artifact references mapped from context package (module-scoped filter) - - Focus paths use absolute paths or clear relative paths from project root - - Cross-module dependencies use CROSS:: placeholder format - -## SUCCESS CRITERIA -- Task JSONs saved to .task/ with IMPL-${module.prefix}* naming -- All task JSONs include cli_execution_id and cli_execution strategy -- Cross-module dependencies use CROSS:: placeholder format consistently -- Focus paths scoped to ${module.paths.join(', ')} only -- Return: task count, task IDs, dependency summary (internal + cross-module) - -## PLANNING NOTES RECORD (REQUIRED) -After completing, append to planning-notes.md: - -\`\`\`markdown -### [${module.name}] YYYY-MM-DD -- **Tasks**: [count] ([IDs]) -- **CROSS deps**: [placeholders used] -\`\`\` - ` - ) -); - -// Execute all in parallel -await Promise.all(planningTasks); -``` - -**Output Structure** (direct to .task/): -``` -.task/ -├── IMPL-A1.json # Module A (e.g., frontend) -├── IMPL-A2.json -├── IMPL-B1.json # Module B (e.g., backend) -├── IMPL-B2.json -└── IMPL-C1.json # Module C (e.g., shared) -``` - -**Task ID Naming**: -- Format: `IMPL-{prefix}{seq}.json` -- Prefix: A, B, C... (assigned by detection order) -- Sequence: 1, 2, 3... (per-module increment) - -### Phase 3: Integration (+1 Coordinator Agent, Multi-Module Only) - -**Condition**: Only executed when `modules.length >= 2` - -**Purpose**: Collect all module tasks, resolve cross-module dependencies, generate unified IMPL_PLAN.md and TODO_LIST.md documents. - -**Coordinator Agent Invocation**: -```javascript -// Wait for all Phase 2B agents to complete -const moduleResults = await Promise.all(planningTasks); - -// Launch +1 Coordinator Agent -Task( - subagent_type="action-planning-agent", - run_in_background=false, - description="Integrate module tasks and generate unified documents", - prompt=` -## TASK OBJECTIVE -Integrate all module task JSONs, resolve cross-module dependencies, and generate unified IMPL_PLAN.md and TODO_LIST.md - -IMPORTANT: This is INTEGRATION ONLY - consolidate existing task JSONs, NOT creating new tasks. - -## SESSION PATHS -Input: - - Session Metadata: .workflow/active/{session-id}/workflow-session.json - - Context Package: .workflow/active/{session-id}/.process/context-package.json - - Task JSONs: .workflow/active/{session-id}/.task/IMPL-*.json (from Phase 2B) -Output: - - Updated Task JSONs: .workflow/active/{session-id}/.task/IMPL-*.json (resolved dependencies) - - IMPL_PLAN: .workflow/active/{session-id}/IMPL_PLAN.md - - TODO_LIST: .workflow/active/{session-id}/TODO_LIST.md - -## CONTEXT METADATA -Session ID: {session-id} -Modules: ${modules.map(m => m.name + '(' + m.prefix + ')').join(', ')} -Module Count: ${modules.length} - -## INTEGRATION STEPS -1. Collect all .task/IMPL-*.json, group by module prefix -2. Resolve CROSS:: dependencies → actual task IDs, update task JSONs -3. Generate IMPL_PLAN.md (multi-module format per agent specification) -4. Generate TODO_LIST.md (hierarchical format per agent specification) - -## CROSS-MODULE DEPENDENCY RESOLUTION -- Pattern: CROSS::{module}::{pattern} → IMPL-{module}* matching title/context -- Example: CROSS::B::api-endpoint → IMPL-B1 (if B1 title contains "api-endpoint") -- Log unresolved as warnings - -## EXPECTED DELIVERABLES -1. Updated Task JSONs with resolved dependency IDs -2. IMPL_PLAN.md - multi-module format with cross-dependency section -3. TODO_LIST.md - hierarchical by module with cross-dependency section - -## SUCCESS CRITERIA -- No CROSS:: placeholders remaining in task JSONs -- IMPL_PLAN.md and TODO_LIST.md generated with multi-module structure -- Return: task count, per-module breakdown, resolved dependency count - -## PLANNING NOTES RECORD (REQUIRED) -After integration, update planning-notes.md: - -\`\`\`markdown -### [Coordinator] YYYY-MM-DD -- **Total**: [count] tasks -- **Resolved**: [CROSS:: resolutions] - -## N+1 Context -### Decisions -| Decision | Rationale | Revisit? | -|----------|-----------|----------| -| CROSS::X → IMPL-Y | [why this resolution] | [Yes/No] | - -### Deferred -- [ ] [unresolved CROSS or conflict] - [reason] -\`\`\` - ` -) -``` - -**Dependency Resolution Algorithm**: -```javascript -function resolveCrossModuleDependency(placeholder, allTasks) { - const [, targetModule, pattern] = placeholder.match(/CROSS::(\w+)::(.+)/); - const candidates = allTasks.filter(t => - t.id.startsWith(`IMPL-${targetModule}`) && - (t.title.toLowerCase().includes(pattern.toLowerCase()) || - t.context?.description?.toLowerCase().includes(pattern.toLowerCase())) - ); - return candidates.length > 0 - ? candidates.sort((a, b) => a.id.localeCompare(b.id))[0].id - : placeholder; // Keep for manual resolution -} -``` - -## Output - -- **Files**: - - `.workflow/active/{sessionId}/IMPL_PLAN.md` - - `.workflow/active/{sessionId}/.task/IMPL-*.json` - - `.workflow/active/{sessionId}/TODO_LIST.md` -- **Updated**: `planning-notes.md` with task generation record and N+1 context - -## Next Step - -Return to orchestrator. Present user with action choices: -1. Verify Plan Quality (Recommended) → `/workflow:plan-verify` -2. Start Execution → `/workflow:execute` -3. Review Status Only → `/workflow:status` diff --git a/.codex/skills/issue-discover-by-prompt/SKILL.md b/.codex/skills/issue-discover-by-prompt/SKILL.md deleted file mode 100644 index da2d1bfa..00000000 --- a/.codex/skills/issue-discover-by-prompt/SKILL.md +++ /dev/null @@ -1,365 +0,0 @@ ---- -name: issue-discover-by-prompt -description: Discover issues from user prompt with iterative multi-agent exploration and cross-module comparison -argument-hint: " [--scope=src/**] [--depth=standard|deep] [--max-iterations=5]" ---- - -# Issue Discovery by Prompt (Codex Version) - -## Goal - -Prompt-driven issue discovery with intelligent planning. Instead of fixed perspectives, this command: - -1. **Analyzes user intent** to understand what to find -2. **Plans exploration strategy** dynamically based on codebase structure -3. **Executes iterative exploration** with feedback loops -4. **Performs cross-module comparison** when detecting comparison intent - -**Core Difference from `issue-discover.md`**: -- `issue-discover`: Pre-defined perspectives (bug, security, etc.), parallel execution -- `issue-discover-by-prompt`: User-driven prompt, planned strategy, iterative exploration - -## Inputs - -- **Prompt**: Natural language description of what to find -- **Scope**: `--scope=src/**` - File pattern to explore (default: `**/*`) -- **Depth**: `--depth=standard|deep` - standard (3 iterations) or deep (5+ iterations) -- **Max Iterations**: `--max-iterations=N` (default: 5) - -## Output Requirements - -**Generate Files:** -1. `.workflow/issues/discoveries/{discovery-id}/discovery-state.json` - Session state with iteration tracking -2. `.workflow/issues/discoveries/{discovery-id}/iterations/{N}/{dimension}.json` - Per-iteration findings -3. `.workflow/issues/discoveries/{discovery-id}/comparison-analysis.json` - Cross-dimension comparison (if applicable) -4. `.workflow/issues/discoveries/{discovery-id}/discovery-issues.jsonl` - Generated issue candidates - -**Return Summary:** -```json -{ - "discovery_id": "DBP-YYYYMMDD-HHmmss", - "prompt": "Check if frontend API calls match backend implementations", - "intent_type": "comparison", - "dimensions": ["frontend-calls", "backend-handlers"], - "total_iterations": 3, - "total_findings": 24, - "issues_generated": 12, - "comparison_match_rate": 0.75 -} -``` - -## Workflow - -### Step 1: Initialize Discovery Session - -```bash -# Generate discovery ID -DISCOVERY_ID="DBP-$(date -u +%Y%m%d-%H%M%S)" -OUTPUT_DIR=".workflow/issues/discoveries/${DISCOVERY_ID}" - -# Create directory structure -mkdir -p "${OUTPUT_DIR}/iterations" -``` - -Detect intent type from prompt: -- `comparison`: Contains "match", "compare", "versus", "vs", "between" -- `search`: Contains "find", "locate", "where" -- `verification`: Contains "verify", "check", "ensure" -- `audit`: Contains "audit", "review", "analyze" - -### Step 2: Gather Context - -Use `rg` and file exploration to understand codebase structure: - -```bash -# Find relevant modules based on prompt keywords -rg -l "" --type ts | head -10 -rg -l "" --type ts | head -10 - -# Understand project structure -ls -la src/ -cat .workflow/project-tech.json 2>/dev/null || echo "No project-tech.json" -``` - -Build context package: -```json -{ - "prompt_keywords": ["frontend", "API", "backend"], - "codebase_structure": { "modules": [...], "patterns": [...] }, - "relevant_modules": ["src/api/", "src/services/"] -} -``` - -### Step 3: Plan Exploration Strategy - -Analyze the prompt and context to design exploration strategy. - -**Output exploration plan:** -```json -{ - "intent_analysis": { - "type": "comparison", - "primary_question": "Do frontend API calls match backend implementations?", - "sub_questions": ["Are endpoints aligned?", "Are payloads compatible?"] - }, - "dimensions": [ - { - "name": "frontend-calls", - "description": "Client-side API calls and error handling", - "search_targets": ["src/api/**", "src/hooks/**"], - "focus_areas": ["fetch calls", "error boundaries", "response parsing"] - }, - { - "name": "backend-handlers", - "description": "Server-side API implementations", - "search_targets": ["src/server/**", "src/routes/**"], - "focus_areas": ["endpoint handlers", "response schemas", "error responses"] - } - ], - "comparison_matrix": { - "dimension_a": "frontend-calls", - "dimension_b": "backend-handlers", - "comparison_points": [ - {"aspect": "endpoints", "frontend_check": "fetch URLs", "backend_check": "route paths"}, - {"aspect": "methods", "frontend_check": "HTTP methods used", "backend_check": "methods accepted"}, - {"aspect": "payloads", "frontend_check": "request body structure", "backend_check": "expected schema"}, - {"aspect": "responses", "frontend_check": "response parsing", "backend_check": "response format"} - ] - }, - "estimated_iterations": 3, - "termination_conditions": ["All comparison points verified", "No new findings in last iteration"] -} -``` - -### Step 4: Iterative Exploration - -Execute iterations until termination conditions are met: - -``` -WHILE iteration < max_iterations AND shouldContinue: - 1. Plan iteration focus based on previous findings - 2. Explore each dimension - 3. Collect and analyze findings - 4. Cross-reference between dimensions - 5. Check convergence -``` - -**For each iteration:** - -1. **Search for relevant code** using `rg`: -```bash -# Based on dimension focus areas -rg "fetch\s*\(" --type ts -C 3 | head -50 -rg "app\.(get|post|put|delete)" --type ts -C 3 | head -50 -``` - -2. **Analyze and record findings**: -```json -{ - "dimension": "frontend-calls", - "iteration": 1, - "findings": [ - { - "id": "F-001", - "title": "Undefined endpoint in UserService", - "category": "endpoint-mismatch", - "file": "src/api/userService.ts", - "line": 42, - "snippet": "fetch('/api/users/profile')", - "related_dimension": "backend-handlers", - "confidence": 0.85 - } - ], - "coverage": { - "files_explored": 15, - "areas_covered": ["fetch calls", "axios instances"], - "areas_remaining": ["graphql queries"] - }, - "leads": [ - {"description": "Check GraphQL mutations", "suggested_search": "mutation.*User"} - ] -} -``` - -3. **Cross-reference findings** between dimensions: -```javascript -// For each finding in dimension A, look for related code in dimension B -if (finding.related_dimension) { - searchForRelatedCode(finding, otherDimension); -} -``` - -4. **Check convergence**: -```javascript -const convergence = { - newDiscoveries: newFindings.length, - confidence: calculateConfidence(cumulativeFindings), - converged: newFindings.length === 0 || confidence > 0.9 -}; -``` - -### Step 5: Cross-Analysis (for comparison intent) - -If intent is comparison, analyze findings across dimensions: - -```javascript -for (const point of comparisonMatrix.comparison_points) { - const aFindings = findings.filter(f => - f.related_dimension === dimension_a && f.category.includes(point.aspect) - ); - const bFindings = findings.filter(f => - f.related_dimension === dimension_b && f.category.includes(point.aspect) - ); - - // Find discrepancies - const discrepancies = compareFindings(aFindings, bFindings, point); - - // Calculate match rate - const matchRate = calculateMatchRate(aFindings, bFindings); -} -``` - -Write to `comparison-analysis.json`: -```json -{ - "matrix": { "dimension_a": "...", "dimension_b": "...", "comparison_points": [...] }, - "results": [ - { - "aspect": "endpoints", - "dimension_a_count": 15, - "dimension_b_count": 12, - "discrepancies": [ - {"frontend": "/api/users/profile", "backend": "NOT_FOUND", "type": "missing_endpoint"} - ], - "match_rate": 0.80 - } - ], - "summary": { - "total_discrepancies": 5, - "overall_match_rate": 0.75, - "critical_mismatches": ["endpoints", "payloads"] - } -} -``` - -### Step 6: Generate Issues - -Convert high-confidence findings to issues: - -```bash -# For each finding with confidence >= 0.7 or priority critical/high -echo '{"id":"ISS-DBP-001","title":"Missing backend endpoint for /api/users/profile",...}' >> ${OUTPUT_DIR}/discovery-issues.jsonl -``` - -### Step 7: Update Final State - -```json -{ - "discovery_id": "DBP-...", - "type": "prompt-driven", - "prompt": "...", - "intent_type": "comparison", - "phase": "complete", - "created_at": "...", - "updated_at": "...", - "iterations": [ - {"number": 1, "findings_count": 10, "new_discoveries": 10, "confidence": 0.6}, - {"number": 2, "findings_count": 18, "new_discoveries": 8, "confidence": 0.75}, - {"number": 3, "findings_count": 24, "new_discoveries": 6, "confidence": 0.85} - ], - "results": { - "total_iterations": 3, - "total_findings": 24, - "issues_generated": 12, - "comparison_match_rate": 0.75 - } -} -``` - -### Step 8: Output Summary - -```markdown -## Discovery Complete: DBP-... - -**Prompt**: Check if frontend API calls match backend implementations -**Intent**: comparison -**Dimensions**: frontend-calls, backend-handlers - -### Iteration Summary -| # | Findings | New | Confidence | -|---|----------|-----|------------| -| 1 | 10 | 10 | 60% | -| 2 | 18 | 8 | 75% | -| 3 | 24 | 6 | 85% | - -### Comparison Results -- **Overall Match Rate**: 75% -- **Total Discrepancies**: 5 -- **Critical Mismatches**: endpoints, payloads - -### Issues Generated: 12 -- 2 Critical -- 4 High -- 6 Medium - -### Next Steps -- `/issue:plan DBP-001,DBP-002,...` to plan solutions -- `ccw view` to review findings in dashboard -``` - -## Quality Checklist - -Before completing, verify: - -- [ ] Intent type correctly detected from prompt -- [ ] Dimensions dynamically generated based on prompt -- [ ] Iterations executed until convergence or max limit -- [ ] Cross-reference analysis performed (for comparison intent) -- [ ] High-confidence findings converted to issues -- [ ] Discovery state shows `phase: complete` - -## Error Handling - -| Situation | Action | -|-----------|--------| -| No relevant code found | Report empty result, suggest broader scope | -| Max iterations without convergence | Complete with current findings, note in summary | -| Comparison dimension mismatch | Report which dimension has fewer findings | -| No comparison points matched | Report as "No direct matches found" | - -## Use Cases - -| Scenario | Example Prompt | -|----------|----------------| -| API Contract | "Check if frontend calls match backend endpoints" | -| Error Handling | "Find inconsistent error handling patterns" | -| Migration Gap | "Compare old auth with new auth implementation" | -| Feature Parity | "Verify mobile has all web features" | -| Schema Drift | "Check if TypeScript types match API responses" | -| Integration | "Find mismatches between service A and service B" | - -## Start Discovery - -Parse prompt and detect intent: - -```bash -PROMPT="${1}" -SCOPE="${2:-**/*}" -DEPTH="${3:-standard}" - -# Detect intent keywords -if echo "${PROMPT}" | grep -qiE '(match|compare|versus|vs|between)'; then - INTENT="comparison" -elif echo "${PROMPT}" | grep -qiE '(find|locate|where)'; then - INTENT="search" -elif echo "${PROMPT}" | grep -qiE '(verify|check|ensure)'; then - INTENT="verification" -else - INTENT="audit" -fi - -echo "Intent detected: ${INTENT}" -echo "Starting discovery with scope: ${SCOPE}" -``` - -Then follow the workflow to explore and discover issues. diff --git a/.codex/skills/issue-discover/SKILL.md b/.codex/skills/issue-discover/SKILL.md index 706a1812..e8cc1480 100644 --- a/.codex/skills/issue-discover/SKILL.md +++ b/.codex/skills/issue-discover/SKILL.md @@ -1,262 +1,345 @@ --- name: issue-discover -description: Discover potential issues from multiple perspectives (bug, UX, test, quality, security, performance, maintainability, best-practices) -argument-hint: " [--perspectives=bug,ux,...] [--external]" +description: Unified issue discovery and creation. Create issues from GitHub/text, discover issues via multi-perspective analysis, or prompt-driven iterative exploration. Triggers on "issue:new", "issue:discover", "issue:discover-by-prompt", "create issue", "discover issues", "find issues". +allowed-tools: spawn_agent, wait, send_input, close_agent, AskUserQuestion, Read, Write, Edit, Bash, Glob, Grep, mcp__ace-tool__search_context, mcp__exa__search --- -# Issue Discovery (Codex Version) +# Issue Discover -## Goal +Unified issue discovery and creation skill covering three entry points: manual issue creation, perspective-based discovery, and prompt-driven exploration. -Multi-perspective issue discovery that explores code from different angles to identify potential bugs, UX improvements, test gaps, and other actionable items. Unlike code review (which assesses existing code quality), discovery focuses on **finding opportunities for improvement and potential problems**. +## Architecture Overview -**Discovery Scope**: Specified modules/files only -**Output Directory**: `.workflow/issues/discoveries/{discovery-id}/` -**Available Perspectives**: bug, ux, test, quality, security, performance, maintainability, best-practices - -## Inputs - -- **Target Pattern**: File glob pattern (e.g., `src/auth/**`) -- **Perspectives**: Comma-separated list via `--perspectives` (or interactive selection) -- **External Research**: `--external` flag enables Exa research for security and best-practices - -## Output Requirements - -**Generate Files:** -1. `.workflow/issues/discoveries/{discovery-id}/discovery-state.json` - Session state -2. `.workflow/issues/discoveries/{discovery-id}/perspectives/{perspective}.json` - Per-perspective findings -3. `.workflow/issues/discoveries/{discovery-id}/discovery-issues.jsonl` - Generated issue candidates -4. `.workflow/issues/discoveries/{discovery-id}/summary.md` - Summary report - -**Return Summary:** -```json -{ - "discovery_id": "DSC-YYYYMMDD-HHmmss", - "target_pattern": "src/auth/**", - "perspectives_analyzed": ["bug", "security", "test"], - "total_findings": 15, - "issues_generated": 8, - "priority_distribution": { "critical": 1, "high": 3, "medium": 4 } -} +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Issue Discover Orchestrator (SKILL.md) │ +│ → Action selection → Route to phase → Execute → Summary │ +└───────────────┬─────────────────────────────────────────────────┘ + │ + ├─ AskUserQuestion: Select action + │ + ┌───────────┼───────────┬───────────┐ + ↓ ↓ ↓ │ +┌─────────┐ ┌─────────┐ ┌─────────┐ │ +│ Phase 1 │ │ Phase 2 │ │ Phase 3 │ │ +│ Create │ │Discover │ │Discover │ │ +│ New │ │ Multi │ │by Prompt│ │ +└─────────┘ └─────────┘ └─────────┘ │ + ↓ ↓ ↓ │ + Issue Discoveries Discoveries │ +(registered) (export) (export) │ + │ │ │ │ + └───────────┴───────────┘ │ + ↓ │ + issue-resolve (plan/queue) │ + ↓ │ + /issue:execute │ ``` -## Workflow +## Key Design Principles -### Step 1: Initialize Discovery Session +1. **Action-Driven Routing**: AskUserQuestion selects action, then load single phase +2. **Progressive Phase Loading**: Only read the selected phase document +3. **CLI-First Data Access**: All issue CRUD via `ccw issue` CLI commands +4. **Auto Mode Support**: `-y` flag skips action selection with auto-detection +5. **Subagent Lifecycle**: Explicit lifecycle management with spawn_agent → wait → close_agent +6. **Role Path Loading**: Subagent roles loaded via path reference in MANDATORY FIRST STEPS -```bash -# Generate discovery ID -DISCOVERY_ID="DSC-$(date -u +%Y%m%d-%H%M%S)" -OUTPUT_DIR=".workflow/issues/discoveries/${DISCOVERY_ID}" +## Auto Mode -# Create directory structure -mkdir -p "${OUTPUT_DIR}/perspectives" +When `--yes` or `-y`: Skip action selection, auto-detect action from input type. + +## Usage + +``` +issue-discover +issue-discover [FLAGS] "" + +# Flags +-y, --yes Skip all confirmations (auto mode) +--action Pre-select action: new|discover|discover-by-prompt + +# Phase-specific flags +--priority <1-5> Issue priority (new mode) +--perspectives Comma-separated perspectives (discover mode) +--external Enable Exa research (discover mode) +--scope File scope (discover/discover-by-prompt mode) +--depth standard|deep (discover-by-prompt mode) +--max-iterations Max exploration iterations (discover-by-prompt mode) + +# Examples +issue-discover https://github.com/org/repo/issues/42 # Create from GitHub +issue-discover "Login fails with special chars" # Create from text +issue-discover --action discover src/auth/** # Multi-perspective discovery +issue-discover --action discover src/api/** --perspectives=security,bug # Focused discovery +issue-discover --action discover-by-prompt "Check API contracts" # Prompt-driven discovery +issue-discover -y "auth broken" # Auto mode create ``` -Resolve target files: -```bash -# List files matching pattern -find -type f -name "*.ts" -o -name "*.tsx" -o -name "*.js" -o -name "*.jsx" +## Execution Flow + +``` +Input Parsing: + └─ Parse flags (--action, -y, --perspectives, etc.) and positional args + +Action Selection: + ├─ --action flag provided → Route directly + ├─ Auto-detect from input: + │ ├─ GitHub URL or #number → Create New (Phase 1) + │ ├─ Path pattern (src/**, *.ts) → Discover (Phase 2) + │ ├─ Short text (< 80 chars) → Create New (Phase 1) + │ └─ Long descriptive text (≥ 80 chars) → Discover by Prompt (Phase 3) + └─ Otherwise → AskUserQuestion to select action + +Phase Execution (load one phase): + ├─ Phase 1: Create New → phases/01-issue-new.md + ├─ Phase 2: Discover → phases/02-discover.md + └─ Phase 3: Discover by Prompt → phases/03-discover-by-prompt.md + +Post-Phase: + └─ Summary + Next steps recommendation ``` -If no files found, abort with error message. +### Phase Reference Documents -### Step 2: Select Perspectives +| Phase | Document | Load When | Purpose | +|-------|----------|-----------|---------| +| Phase 1 | [phases/01-issue-new.md](phases/01-issue-new.md) | Action = Create New | Create issue from GitHub URL or text description | +| Phase 2 | [phases/02-discover.md](phases/02-discover.md) | Action = Discover | Multi-perspective issue discovery (bug, security, test, etc.) | +| Phase 3 | [phases/03-discover-by-prompt.md](phases/03-discover-by-prompt.md) | Action = Discover by Prompt | Prompt-driven iterative exploration with Gemini planning | -**If `--perspectives` provided:** -- Parse comma-separated list -- Validate against available perspectives +## Core Rules -**If not provided (interactive):** -- Present perspective groups: - - Quick scan: bug, test, quality - - Security audit: security, bug, quality - - Full analysis: all perspectives -- Use first group as default or wait for user input +1. **Action Selection First**: Always determine action before loading any phase +2. **Single Phase Load**: Only read the selected phase document, never load all phases +3. **CLI Data Access**: Use `ccw issue` CLI for all issue operations, NEVER read files directly +4. **Content Preservation**: Each phase contains complete execution logic from original commands +5. **Auto-Detect Input**: Smart input parsing reduces need for explicit --action flag +6. **⚠️ CRITICAL: DO NOT STOP**: Continuous multi-phase workflow. After completing each phase, immediately proceed to next +7. **Progressive Phase Loading**: Read phase docs ONLY when that phase is about to execute +8. **Explicit Lifecycle**: Always close_agent after wait completes to free resources -### Step 3: Analyze Each Perspective +## Input Processing -For each selected perspective, explore target files and identify issues. +### Auto-Detection Logic -**Perspective-Specific Focus:** +```javascript +function detectAction(input, flags) { + // 1. Explicit --action flag + if (flags.action) return flags.action; -| Perspective | Focus Areas | Priority Guide | -|-------------|-------------|----------------| -| **bug** | Null checks, edge cases, resource leaks, race conditions, boundary conditions, exception handling | Critical=data corruption/crash, High=malfunction, Medium=edge case | -| **ux** | Error messages, loading states, feedback, accessibility, interaction patterns | Critical=inaccessible, High=confusing, Medium=inconsistent | -| **test** | Missing unit tests, edge case coverage, integration gaps, assertion quality | Critical=no security tests, High=no core logic tests | -| **quality** | Complexity, duplication, naming, documentation, code smells | Critical=unmaintainable, High=significant issues | -| **security** | Input validation, auth/authz, injection, XSS/CSRF, data exposure | Critical=auth bypass/injection, High=missing authz | -| **performance** | N+1 queries, memory leaks, caching, algorithm efficiency | Critical=memory leaks, High=N+1 queries | -| **maintainability** | Coupling, interface design, tech debt, extensibility | Critical=forced changes, High=unclear boundaries | -| **best-practices** | Framework conventions, language patterns, anti-patterns | Critical=bug-causing anti-patterns, High=convention violations | + const trimmed = input.trim(); -**For each perspective:** - -1. Read target files and analyze for perspective-specific concerns -2. Use `rg` to search for patterns indicating issues -3. Record findings with: - - `id`: Finding ID (e.g., `F-001`) - - `title`: Brief description - - `priority`: critical/high/medium/low - - `category`: Specific category within perspective - - `description`: Detailed explanation - - `file`: File path - - `line`: Line number - - `snippet`: Code snippet - - `suggested_issue`: Proposed issue text - - `confidence`: 0.0-1.0 - -4. Write to `{OUTPUT_DIR}/perspectives/{perspective}.json`: -```json -{ - "perspective": "security", - "analyzed_at": "2025-01-22T...", - "files_analyzed": 15, - "findings": [ - { - "id": "F-001", - "title": "Missing input validation", - "priority": "high", - "category": "input-validation", - "description": "User input is passed directly to database query", - "file": "src/auth/login.ts", - "line": 42, - "snippet": "db.query(`SELECT * FROM users WHERE name = '${input}'`)", - "suggested_issue": "Add input sanitization to prevent SQL injection", - "confidence": 0.95 - } - ] -} -``` - -### Step 4: External Research (if --external) - -For security and best-practices perspectives, use Exa to search for: -- Industry best practices for the tech stack -- Known vulnerability patterns -- Framework-specific security guidelines - -Write results to `{OUTPUT_DIR}/external-research.json`. - -### Step 5: Aggregate and Prioritize - -1. Load all perspective JSON files -2. Deduplicate findings by file+line -3. Calculate priority scores: - - critical: 1.0 - - high: 0.8 - - medium: 0.5 - - low: 0.2 - - Adjust by confidence - -4. Sort by priority score descending - -### Step 6: Generate Issues - -Convert high-priority findings to issue format: - -```bash -# Append to discovery-issues.jsonl -echo '{"id":"ISS-DSC-001","title":"...","priority":"high",...}' >> ${OUTPUT_DIR}/discovery-issues.jsonl -``` - -Issue criteria: -- `priority` is critical or high -- OR `priority_score >= 0.7` -- OR `confidence >= 0.9` with medium priority - -### Step 7: Update Discovery State - -Write final state to `{OUTPUT_DIR}/discovery-state.json`: -```json -{ - "discovery_id": "DSC-...", - "target_pattern": "src/auth/**", - "phase": "complete", - "created_at": "...", - "updated_at": "...", - "perspectives": ["bug", "security", "test"], - "results": { - "total_findings": 15, - "issues_generated": 8, - "priority_distribution": { - "critical": 1, - "high": 3, - "medium": 4 - } + // 2. GitHub URL → new + if (trimmed.match(/github\.com\/[\w-]+\/[\w-]+\/issues\/\d+/) || trimmed.match(/^#\d+$/)) { + return 'new'; } + + // 3. Path pattern (contains **, /, or --perspectives) → discover + if (trimmed.match(/\*\*/) || trimmed.match(/^src\//) || flags.perspectives) { + return 'discover'; + } + + // 4. Short text (< 80 chars, no special patterns) → new + if (trimmed.length > 0 && trimmed.length < 80 && !trimmed.includes('--')) { + return 'new'; + } + + // 5. Long descriptive text → discover-by-prompt + if (trimmed.length >= 80) { + return 'discover-by-prompt'; + } + + // Cannot auto-detect → ask user + return null; } ``` -### Step 8: Generate Summary +### Action Selection (AskUserQuestion) -Write summary to `{OUTPUT_DIR}/summary.md`: -```markdown -# Discovery Summary: DSC-... +```javascript +// When action cannot be auto-detected +const answer = AskUserQuestion({ + questions: [{ + question: "What would you like to do?", + header: "Action", + multiSelect: false, + options: [ + { + label: "Create New Issue (Recommended)", + description: "Create issue from GitHub URL, text description, or structured input" + }, + { + label: "Discover Issues", + description: "Multi-perspective discovery: bug, security, test, quality, performance, etc." + }, + { + label: "Discover by Prompt", + description: "Describe what to find — Gemini plans the exploration strategy iteratively" + } + ] + }] +}); -**Target**: src/auth/** -**Perspectives**: bug, security, test -**Total Findings**: 15 -**Issues Generated**: 8 - -## Priority Breakdown -- Critical: 1 -- High: 3 -- Medium: 4 - -## Top Findings - -1. **[Critical] SQL Injection in login.ts:42** - Category: security/input-validation - ... - -2. **[High] Missing null check in auth.ts:128** - Category: bug/null-check - ... - -## Next Steps -- Run `/issue:plan` to plan solutions for generated issues -- Use `ccw view` to review findings in dashboard +// Route based on selection +const actionMap = { + "Create New Issue": "new", + "Discover Issues": "discover", + "Discover by Prompt": "discover-by-prompt" +}; ``` -## Quality Checklist +## Data Flow -Before completing, verify: +``` +User Input (URL / text / path pattern / descriptive prompt) + ↓ +[Parse Flags + Auto-Detect Action] + ↓ +[Action Selection] ← AskUserQuestion (if needed) + ↓ +[Read Selected Phase Document] + ↓ +[Execute Phase Logic] + ↓ +[Summary + Next Steps] + ├─ After Create → Suggest issue-resolve (plan solution) + └─ After Discover → Suggest export to issues, then issue-resolve +``` -- [ ] All target files analyzed for selected perspectives -- [ ] Findings include file:line references -- [ ] Priority assigned to all findings -- [ ] Issues generated from high-priority findings -- [ ] Discovery state shows `phase: complete` -- [ ] Summary includes actionable next steps +## Subagent API Reference + +### spawn_agent + +Create a new subagent with task assignment. + +```javascript +const agentId = spawn_agent({ + message: ` +## TASK ASSIGNMENT + +### MANDATORY FIRST STEPS (Agent Execute) +1. **Read role definition**: ~/.codex/agents/{agent-type}.md (MUST read first) +2. Read: .workflow/project-tech.json +3. Read: .workflow/project-guidelines.json + +## TASK CONTEXT +${taskContext} + +## DELIVERABLES +${deliverables} +` +}) +``` + +### wait + +Get results from subagent (only way to retrieve results). + +```javascript +const result = wait({ + ids: [agentId], + timeout_ms: 600000 // 10 minutes +}) + +if (result.timed_out) { + // Handle timeout - can continue waiting or send_input to prompt completion +} + +// Check completion status +if (result.status[agentId].completed) { + const output = result.status[agentId].completed; +} +``` + +### send_input + +Continue interaction with active subagent (for clarification or follow-up). + +```javascript +send_input({ + id: agentId, + message: ` +## CLARIFICATION ANSWERS +${answers} + +## NEXT STEP +Continue with plan generation. +` +}) +``` + +### close_agent + +Clean up subagent resources (irreversible). + +```javascript +close_agent({ id: agentId }) +``` + +## Core Guidelines + +**Data Access Principle**: Issues files can grow very large. To avoid context overflow: + +| Operation | Correct | Incorrect | +|-----------|---------|-----------| +| List issues (brief) | `ccw issue list --status pending --brief` | `Read('issues.jsonl')` | +| Read issue details | `ccw issue status --json` | `Read('issues.jsonl')` | +| Create issue | `echo '...' \| ccw issue create` | Direct file write | +| Update status | `ccw issue update --status ...` | Direct file edit | + +**ALWAYS** use CLI commands for CRUD operations. **NEVER** read entire `issues.jsonl` directly. ## Error Handling -| Situation | Action | -|-----------|--------| -| No files match pattern | Abort with clear error message | -| Perspective analysis fails | Log error, continue with other perspectives | -| No findings | Report "No issues found" (not an error) | -| External research fails | Continue without external context | +| Error | Resolution | +|-------|------------| +| No action detected | Show AskUserQuestion with all 3 options | +| Invalid action type | Show available actions, re-prompt | +| Phase execution fails | Report error, suggest manual intervention | +| No files matched (discover) | Check target pattern, verify path exists | +| Gemini planning failed (discover-by-prompt) | Retry with qwen fallback | +| Agent lifecycle errors | Ensure close_agent in error paths to prevent resource leaks | -## Schema References +## Post-Phase Next Steps -| Schema | Path | Purpose | -|--------|------|---------| -| Discovery State | `~/.claude/workflows/cli-templates/schemas/discovery-state-schema.json` | Session state | -| Discovery Finding | `~/.claude/workflows/cli-templates/schemas/discovery-finding-schema.json` | Finding format | +After successful phase execution, recommend next action: -## Start Discovery +```javascript +// After Create New (issue created) +AskUserQuestion({ + questions: [{ + question: "Issue created. What next?", + header: "Next", + multiSelect: false, + options: [ + { label: "Plan Solution", description: "Generate solution via issue-resolve" }, + { label: "Create Another", description: "Create more issues" }, + { label: "View Issues", description: "Review all issues" }, + { label: "Done", description: "Exit workflow" } + ] + }] +}); -Begin by resolving target files: - -```bash -# Parse target pattern from arguments -TARGET_PATTERN="${1:-src/**}" - -# Count matching files -find ${TARGET_PATTERN} -type f \( -name "*.ts" -o -name "*.tsx" -o -name "*.js" \) | wc -l +// After Discover / Discover by Prompt (discoveries generated) +AskUserQuestion({ + questions: [{ + question: "Discovery complete. What next?", + header: "Next", + multiSelect: false, + options: [ + { label: "Export to Issues", description: "Convert discoveries to issues" }, + { label: "Plan Solutions", description: "Plan solutions for exported issues via issue-resolve" }, + { label: "Done", description: "Exit workflow" } + ] + }] +}); ``` -Then proceed with perspective selection and analysis. +## Related Skills & Commands + +- `issue-resolve` - Plan solutions, convert artifacts, form queues, from brainstorm +- `issue-manage` - Interactive issue CRUD operations +- `/issue:execute` - Execute queue with DAG-based parallel orchestration +- `ccw issue list` - List all issues +- `ccw issue status ` - View issue details diff --git a/.codex/skills/issue-discover/phases/01-issue-new.md b/.codex/skills/issue-discover/phases/01-issue-new.md new file mode 100644 index 00000000..34e988c6 --- /dev/null +++ b/.codex/skills/issue-discover/phases/01-issue-new.md @@ -0,0 +1,348 @@ +# Phase 1: Create New Issue + +> 来源: `commands/issue/new.md` + +## Overview + +Create structured issue from GitHub URL or text description with clarity-based flow control. + +**Core workflow**: Input Analysis → Clarity Detection → Data Extraction → Optional Clarification → GitHub Publishing → Create Issue + +**Input sources**: +- **GitHub URL** - `https://github.com/owner/repo/issues/123` or `#123` +- **Structured text** - Text with expected/actual/affects keywords +- **Vague text** - Short description that needs clarification + +**Output**: +- **Issue** (GH-xxx or ISS-YYYYMMDD-HHMMSS) - Registered issue ready for planning + +## Prerequisites + +- `gh` CLI available (for GitHub URLs) +- `ccw issue` CLI available + +## Auto Mode + +When `--yes` or `-y`: Skip clarification questions, create issue with inferred details. + +## Arguments + +| Argument | Required | Type | Default | Description | +|----------|----------|------|---------|-------------| +| input | Yes | String | - | GitHub URL, `#number`, or text description | +| --priority | No | Integer | auto | Priority 1-5 (auto-inferred if omitted) | +| -y, --yes | No | Flag | false | Skip all confirmations | + +## Issue Structure + +```typescript +interface Issue { + id: string; // GH-123 or ISS-YYYYMMDD-HHMMSS + title: string; + status: 'registered' | 'planned' | 'queued' | 'in_progress' | 'completed' | 'failed'; + priority: number; // 1 (critical) to 5 (low) + context: string; // Problem description (single source of truth) + source: 'github' | 'text' | 'discovery'; + source_url?: string; + labels?: string[]; + + // GitHub binding (for non-GitHub sources that publish to GitHub) + github_url?: string; + github_number?: number; + + // Optional structured fields + expected_behavior?: string; + actual_behavior?: string; + affected_components?: string[]; + + // Feedback history + feedback?: { + type: 'failure' | 'clarification' | 'rejection'; + stage: string; + content: string; + created_at: string; + }[]; + + bound_solution_id: string | null; + created_at: string; + updated_at: string; +} +``` + +## Execution Steps + +### Step 1.1: Input Analysis & Clarity Detection + +```javascript +const input = userInput.trim(); +const flags = parseFlags(userInput); + +// Detect input type and clarity +const isGitHubUrl = input.match(/github\.com\/[\w-]+\/[\w-]+\/issues\/\d+/); +const isGitHubShort = input.match(/^#(\d+)$/); +const hasStructure = input.match(/(expected|actual|affects|steps):/i); + +// Clarity score: 0-3 +let clarityScore = 0; +if (isGitHubUrl || isGitHubShort) clarityScore = 3; // GitHub = fully clear +else if (hasStructure) clarityScore = 2; // Structured text = clear +else if (input.length > 50) clarityScore = 1; // Long text = somewhat clear +else clarityScore = 0; // Vague + +let issueData = {}; +``` + +### Step 1.2: Data Extraction (GitHub or Text) + +```javascript +if (isGitHubUrl || isGitHubShort) { + // GitHub - fetch via gh CLI + const result = Bash(`gh issue view ${extractIssueRef(input)} --json number,title,body,labels,url`); + const gh = JSON.parse(result); + issueData = { + id: `GH-${gh.number}`, + title: gh.title, + source: 'github', + source_url: gh.url, + labels: gh.labels.map(l => l.name), + context: gh.body?.substring(0, 500) || gh.title, + ...parseMarkdownBody(gh.body) + }; +} else { + // Text description + issueData = { + id: `ISS-${new Date().toISOString().replace(/[-:T]/g, '').slice(0, 14)}`, + source: 'text', + ...parseTextDescription(input) + }; +} +``` + +### Step 1.3: Lightweight Context Hint (Conditional) + +```javascript +// ACE search ONLY for medium clarity (1-2) AND missing components +// Skip for: GitHub (has context), vague (needs clarification first) +if (clarityScore >= 1 && clarityScore <= 2 && !issueData.affected_components?.length) { + const keywords = extractKeywords(issueData.context); + + if (keywords.length >= 2) { + try { + const aceResult = mcp__ace-tool__search_context({ + project_root_path: process.cwd(), + query: keywords.slice(0, 3).join(' ') + }); + issueData.affected_components = aceResult.files?.slice(0, 3) || []; + } catch { + // ACE failure is non-blocking + } + } +} +``` + +### Step 1.4: Conditional Clarification (Only if Unclear) + +```javascript +// ONLY ask questions if clarity is low +if (clarityScore < 2 && (!issueData.context || issueData.context.length < 20)) { + const answer = AskUserQuestion({ + questions: [{ + question: 'Please describe the issue in more detail:', + header: 'Clarify', + multiSelect: false, + options: [ + { label: 'Provide details', description: 'Describe what, where, and expected behavior' } + ] + }] + }); + + if (answer.customText) { + issueData.context = answer.customText; + issueData.title = answer.customText.split(/[.\n]/)[0].substring(0, 60); + issueData.feedback = [{ + type: 'clarification', + stage: 'new', + content: answer.customText, + created_at: new Date().toISOString() + }]; + } +} +``` + +### Step 1.5: GitHub Publishing Decision (Non-GitHub Sources) + +```javascript +// For non-GitHub sources, ask if user wants to publish to GitHub +let publishToGitHub = false; + +if (issueData.source !== 'github') { + const publishAnswer = AskUserQuestion({ + questions: [{ + question: 'Would you like to publish this issue to GitHub?', + header: 'Publish', + multiSelect: false, + options: [ + { label: 'Yes, publish to GitHub', description: 'Create issue on GitHub and link it' }, + { label: 'No, keep local only', description: 'Store as local issue without GitHub sync' } + ] + }] + }); + + publishToGitHub = publishAnswer.answers?.['Publish']?.includes('Yes'); +} +``` + +### Step 1.6: Create Issue + +**Issue Creation** (via CLI endpoint): +```bash +# Option 1: Pipe input (recommended for complex JSON) +echo '{"title":"...", "context":"...", "priority":3}' | ccw issue create + +# Option 2: Heredoc (for multi-line JSON) +ccw issue create << 'EOF' +{"title":"...", "context":"含\"引号\"的内容", "priority":3} +EOF +``` + +**GitHub Publishing** (if user opted in): +```javascript +// Step 1: Create local issue FIRST +const localIssue = createLocalIssue(issueData); // ccw issue create + +// Step 2: Publish to GitHub if requested +if (publishToGitHub) { + const ghResult = Bash(`gh issue create --title "${issueData.title}" --body "${issueData.context}"`); + const ghUrl = ghResult.match(/https:\/\/github\.com\/[\w-]+\/[\w-]+\/issues\/\d+/)?.[0]; + const ghNumber = parseInt(ghUrl?.match(/\/issues\/(\d+)/)?.[1]); + + if (ghNumber) { + Bash(`ccw issue update ${localIssue.id} --github-url "${ghUrl}" --github-number ${ghNumber}`); + } +} +``` + +**Workflow:** +``` +1. Create local issue (ISS-YYYYMMDD-NNN) → stored in .workflow/issues.jsonl +2. If publishToGitHub: + a. gh issue create → returns GitHub URL + b. Update local issue with github_url + github_number binding +3. Both local and GitHub issues exist, linked together +``` + +## Execution Flow + +``` +Phase 1: Input Analysis + └─ Detect clarity score (GitHub URL? Structured text? Keywords?) + +Phase 2: Data Extraction (branched by clarity) + ┌────────────┬─────────────────┬──────────────┐ + │ Score 3 │ Score 1-2 │ Score 0 │ + │ GitHub │ Text + ACE │ Vague │ + ├────────────┼─────────────────┼──────────────┤ + │ gh CLI │ Parse struct │ AskQuestion │ + │ → parse │ + quick hint │ (1 question) │ + │ │ (3 files max) │ → feedback │ + └────────────┴─────────────────┴──────────────┘ + +Phase 3: GitHub Publishing Decision (non-GitHub only) + ├─ Source = github: Skip (already from GitHub) + └─ Source ≠ github: AskUserQuestion + ├─ Yes → publishToGitHub = true + └─ No → publishToGitHub = false + +Phase 4: Create Issue + ├─ Score ≥ 2: Direct creation + └─ Score < 2: Confirm first → Create + └─ If publishToGitHub: gh issue create → link URL + +Note: Deep exploration & lifecycle deferred to /issue:plan +``` + +## Helper Functions + +```javascript +function extractKeywords(text) { + const stopWords = new Set(['the', 'a', 'an', 'is', 'are', 'was', 'were', 'not', 'with']); + return text + .toLowerCase() + .split(/\W+/) + .filter(w => w.length > 3 && !stopWords.has(w)) + .slice(0, 5); +} + +function parseTextDescription(text) { + const result = { title: '', context: '' }; + const sentences = text.split(/\.(?=\s|$)/); + + result.title = sentences[0]?.trim().substring(0, 60) || 'Untitled'; + result.context = text.substring(0, 500); + + const expected = text.match(/expected:?\s*([^.]+)/i); + const actual = text.match(/actual:?\s*([^.]+)/i); + const affects = text.match(/affects?:?\s*([^.]+)/i); + + if (expected) result.expected_behavior = expected[1].trim(); + if (actual) result.actual_behavior = actual[1].trim(); + if (affects) { + result.affected_components = affects[1].split(/[,\s]+/).filter(c => c.includes('/') || c.includes('.')); + } + + return result; +} + +function parseMarkdownBody(body) { + if (!body) return {}; + const result = {}; + + const problem = body.match(/##?\s*(problem|description)[:\s]*([\s\S]*?)(?=##|$)/i); + const expected = body.match(/##?\s*expected[:\s]*([\s\S]*?)(?=##|$)/i); + const actual = body.match(/##?\s*actual[:\s]*([\s\S]*?)(?=##|$)/i); + + if (problem) result.context = problem[2].trim().substring(0, 500); + if (expected) result.expected_behavior = expected[2].trim(); + if (actual) result.actual_behavior = actual[2].trim(); + + return result; +} +``` + +## Error Handling + +| Error | Message | Resolution | +|-------|---------|------------| +| GitHub fetch failed | gh CLI error | Check gh auth, verify URL | +| Clarity too low | Input unclear | Ask clarification question | +| Issue creation failed | CLI error | Verify ccw issue endpoint | +| GitHub publish failed | gh issue create error | Create local-only, skip GitHub | + +## Examples + +### Clear Input (No Questions) + +```bash +issue-discover https://github.com/org/repo/issues/42 +# → Fetches, parses, creates immediately + +issue-discover "Login fails with special chars. Expected: success. Actual: 500" +# → Parses structure, creates immediately +``` + +### Vague Input (1 Question) + +```bash +issue-discover "auth broken" +# → Asks: "Please describe the issue in more detail" +# → User provides details → saved to feedback[] +# → Creates issue +``` + +## Post-Phase Update + +After issue creation: +- Issue created with `status: registered` +- Report: issue ID, title, source, affected components +- Show GitHub URL (if published) +- Recommend next step: `/issue:plan ` or `issue-resolve ` diff --git a/.codex/skills/issue-new/SKILL.md b/.codex/skills/issue-new/SKILL.md deleted file mode 100644 index 4d187c56..00000000 --- a/.codex/skills/issue-new/SKILL.md +++ /dev/null @@ -1,391 +0,0 @@ ---- -name: issue-new -description: Create structured issue from GitHub URL or text description. Auto mode with --yes flag. -argument-hint: "[--yes|-y] [--priority PRIORITY] [--labels LABELS]" ---- - -# Issue New Command - -## Core Principles - -**Requirement Clarity Detection** → Ask only when needed -**Flexible Parameter Input** → Support multiple formats and flags -**Auto Mode Support** → `--yes`/`-y` skips confirmation questions - -``` -Clear Input (GitHub URL, structured text) → Direct creation (no questions) -Unclear Input (vague description) → Clarifying questions (unless --yes) -Auto Mode (--yes or -y flag) → Skip all questions, use inference -``` - -## Parameter Formats - -```bash -# GitHub URL (auto-detected) -/prompts:issue-new https://github.com/owner/repo/issues/123 -/prompts:issue-new GH-123 - -# Text description with priority -/prompts:issue-new "Login fails with special chars" --priority 1 - -# Auto mode - skip all questions -/prompts:issue-new --yes "something broken" -/prompts:issue-new -y https://github.com/owner/repo/issues/456 - -# With labels -/prompts:issue-new "Database migration needed" --priority 2 --labels "enhancement,database" -``` - -## Issue Structure - -```typescript -interface Issue { - id: string; // GH-123 or ISS-YYYYMMDD-HHMMSS - title: string; - status: 'registered' | 'planned' | 'queued' | 'in_progress' | 'completed' | 'failed'; - priority: number; // 1 (critical) to 5 (low) - context: string; // Problem description - source: 'github' | 'text' | 'discovery'; - source_url?: string; - labels?: string[]; - - // GitHub binding (for non-GitHub sources that publish to GitHub) - github_url?: string; - github_number?: number; - - // Optional structured fields - expected_behavior?: string; - actual_behavior?: string; - affected_components?: string[]; - - // Solution binding - bound_solution_id: string | null; - - // Timestamps - created_at: string; - updated_at: string; -} -``` - -## Inputs - -- **GitHub URL**: `https://github.com/owner/repo/issues/123` or `#123` -- **Text description**: Natural language description -- **Priority flag**: `--priority 1-5` (optional, default: 3) - -## Output Requirements - -**Create Issue via CLI** (preferred method): -```bash -# Pipe input (recommended for complex JSON) -echo '{"title":"...", "context":"...", "priority":3}' | ccw issue create - -# Returns created issue JSON -{"id":"ISS-20251229-001","title":"...","status":"registered",...} -``` - -**Return Summary:** -```json -{ - "created": true, - "id": "ISS-20251229-001", - "title": "Login fails with special chars", - "source": "text", - "github_published": false, - "next_step": "/issue:plan ISS-20251229-001" -} -``` - -## Workflow - -### Phase 0: Parse Arguments & Flags - -Extract parameters from user input: - -```bash -# Input examples (Codex placeholders) -INPUT="$1" # GitHub URL or text description -AUTO_MODE="$2" # Check for --yes or -y flag - -# Parse flags (comma-separated in single argument) -PRIORITY=$(echo "$INPUT" | grep -oP '(?<=--priority\s)\d+' || echo "3") -LABELS=$(echo "$INPUT" | grep -oP '(?<=--labels\s)\K[^-]*' | xargs) -AUTO_YES=$(echo "$INPUT" | grep -qE '--yes|-y' && echo "true" || echo "false") - -# Extract main input (URL or text) - remove all flags -MAIN_INPUT=$(echo "$INPUT" | sed 's/\s*--priority\s*\d*//; s/\s*--labels\s*[^-]*//; s/\s*--yes\s*//; s/\s*-y\s*//' | xargs) -``` - -### Phase 1: Analyze Input & Clarity Detection - -```javascript -const mainInput = userInput.trim(); - -// Detect input type and clarity -const isGitHubUrl = mainInput.match(/github\.com\/[\w-]+\/[\w-]+\/issues\/\d+/); -const isGitHubShort = mainInput.match(/^GH-?\d+$/); -const hasStructure = mainInput.match(/(expected|actual|affects|steps):/i); - -// Clarity score: 0-3 -let clarityScore = 0; -if (isGitHubUrl || isGitHubShort) clarityScore = 3; // GitHub = fully clear -else if (hasStructure) clarityScore = 2; // Structured text = clear -else if (mainInput.length > 50) clarityScore = 1; // Long text = somewhat clear -else clarityScore = 0; // Vague - -// Auto mode override: if --yes/-y flag, skip all questions -const skipQuestions = process.env.AUTO_YES === 'true'; -``` - -### Phase 2: Extract Issue Data & Priority - -**For GitHub URL/Short:** - -```bash -# Fetch issue details via gh CLI -gh issue view --json number,title,body,labels,url - -# Parse response with priority override -{ - "id": "GH-123", - "title": "...", - "priority": $PRIORITY || 3, # Use --priority flag if provided - "source": "github", - "source_url": "https://github.com/...", - "labels": $LABELS || [...existing labels], - "context": "..." -} -``` - -**For Text Description:** - -```javascript -// Generate issue ID -const id = `ISS-${new Date().toISOString().replace(/[-:T]/g, '').slice(0, 14)}`; - -// Parse structured fields if present -const expected = text.match(/expected:?\s*([^.]+)/i); -const actual = text.match(/actual:?\s*([^.]+)/i); -const affects = text.match(/affects?:?\s*([^.]+)/i); - -// Build issue data with flags -{ - "id": id, - "title": text.split(/[.\n]/)[0].substring(0, 60), - "priority": $PRIORITY || 3, # From --priority flag - "labels": $LABELS?.split(',') || [], # From --labels flag - "source": "text", - "context": text.substring(0, 500), - "expected_behavior": expected?.[1]?.trim(), - "actual_behavior": actual?.[1]?.trim() -} -``` - -### Phase 3: Context Hint (Conditional) - -For medium clarity (score 1-2) without affected components: - -```bash -# Use rg to find potentially related files -rg -l "" --type ts | head -5 -``` - -Add discovered files to `affected_components` (max 3 files). - -**Note**: Skip this for GitHub issues (already have context) and vague inputs (needs clarification first). - -### Phase 4: Conditional Clarification (Skip if Auto Mode) - -**Only ask if**: clarity < 2 AND NOT in auto mode (skipQuestions = false) - -If auto mode (`--yes`/`-y`), proceed directly to creation with inferred details. - -Otherwise, present minimal clarification: - -``` -Input unclear. Please describe: -- What is the issue about? -- Where does it occur? -- What is the expected behavior? -``` - -Wait for user response, then update issue data. - -### Phase 5: GitHub Publishing Decision (Skip if Already GitHub) - -For non-GitHub sources, determine if user wants to publish to GitHub: - -``` - -For non-GitHub sources AND NOT auto mode, ask: - -``` -Would you like to publish this issue to GitHub? -1. Yes, publish to GitHub (create issue and link it) -2. No, keep local only (store without GitHub sync) -``` - -In auto mode: Default to NO (keep local only, unless explicitly requested with --publish flag). - -### Phase 6: Create Issue - -**Create via CLI:** - -```bash -# Build issue JSON -ISSUE_JSON='{"title":"...","context":"...","priority":3,"source":"text"}' - -# Create issue (auto-generates ID) -echo "${ISSUE_JSON}" | ccw issue create -``` - -**If publishing to GitHub:** - -```bash -# Create on GitHub first -GH_URL=$(gh issue create --title "..." --body "..." | grep -oE 'https://github.com/[^ ]+') -GH_NUMBER=$(echo $GH_URL | grep -oE '/issues/([0-9]+)$' | grep -oE '[0-9]+') - -# Update local issue with binding -ccw issue update ${ISSUE_ID} --github-url "${GH_URL}" --github-number ${GH_NUMBER} -``` - -### Phase 7: Output Result - -```markdown -## Issue Created - -**ID**: ISS-20251229-001 -**Title**: Login fails with special chars -**Source**: text -**Priority**: 2 (High) - -**Context**: -500 error when password contains quotes - -**Affected Components**: -- src/auth/login.ts -- src/utils/validation.ts - -**GitHub**: Not published (local only) - -**Next Step**: `/issue:plan ISS-20251229-001` -``` - -## Quality Checklist - -Before completing, verify: - -- [ ] Issue ID generated correctly (GH-xxx or ISS-YYYYMMDD-HHMMSS) -- [ ] Title extracted (max 60 chars) -- [ ] Context captured (problem description) -- [ ] Priority assigned (1-5) -- [ ] Status set to `registered` -- [ ] Created via `ccw issue create` CLI command - -## Error Handling - -| Situation | Action | -|-----------|--------| -| GitHub URL not accessible | Report error, suggest text input | -| gh CLI not available | Fall back to text-based creation | -| Empty input | Prompt for description | -| Very vague input | Ask clarifying questions | -| Issue already exists | Report duplicate, show existing | - - -## Start Execution - -### Parameter Parsing (Phase 0) - -```bash -# Codex passes full input as $1 -INPUT="$1" - -# Extract flags -AUTO_YES=false -PRIORITY=3 -LABELS="" - -# Parse using parameter expansion -while [[ $INPUT == -* ]]; do - case $INPUT in - -y|--yes) - AUTO_YES=true - INPUT="${INPUT#* }" # Remove flag and space - ;; - --priority) - PRIORITY="${INPUT#* }" - PRIORITY="${PRIORITY%% *}" # Extract next word - INPUT="${INPUT#*--priority $PRIORITY }" - ;; - --labels) - LABELS="${INPUT#* }" - LABELS="${LABELS%% --*}" # Extract until next flag - INPUT="${INPUT#*--labels $LABELS }" - ;; - *) - INPUT="${INPUT#* }" - ;; - esac -done - -# Remaining text is the main input (GitHub URL or description) -MAIN_INPUT="$INPUT" -``` - -### Execution Flow (All Phases) - -``` -1. Parse Arguments (Phase 0) - └─ Extract: AUTO_YES, PRIORITY, LABELS, MAIN_INPUT - -2. Detect Input Type & Clarity (Phase 1) - ├─ GitHub URL/Short? → Score 3 (clear) - ├─ Structured text? → Score 2 (somewhat clear) - ├─ Long text? → Score 1 (vague) - └─ Short text? → Score 0 (very vague) - -3. Extract Issue Data (Phase 2) - ├─ If GitHub: gh CLI fetch + parse - └─ If text: Parse structure + apply PRIORITY/LABELS flags - -4. Context Hint (Phase 3, conditional) - └─ Only for clarity 1-2 AND no components → ACE search (max 3 files) - -5. Clarification (Phase 4, conditional) - └─ If clarity < 2 AND NOT auto mode → Ask for details - └─ If auto mode (AUTO_YES=true) → Skip, use inferred data - -6. GitHub Publishing (Phase 5, conditional) - ├─ If source = github → Skip (already from GitHub) - └─ If source != github: - ├─ If auto mode → Default NO (keep local) - └─ If manual → Ask user preference - -7. Create Issue (Phase 6) - ├─ Create local issue via ccw CLI - └─ If publishToGitHub → gh issue create → link - -8. Output Result (Phase 7) - └─ Display: ID, title, source, GitHub status, next step -``` - -## Quick Examples - -```bash -# Auto mode - GitHub issue (no questions) -/prompts:issue-new -y https://github.com/org/repo/issues/42 - -# Standard mode - text with priority -/prompts:issue-new "Database connection timeout" --priority 1 - -# Auto mode - text with priority and labels -/prompts:issue-new --yes "Add caching layer" --priority 2 --labels "enhancement,performance" - -# GitHub short format -/prompts:issue-new GH-123 - -# Complex text description -/prompts:issue-new "User login fails. Expected: redirect to dashboard. Actual: 500 error" -``` \ No newline at end of file diff --git a/.codex/skills/issue-plan/SKILL.md b/.codex/skills/issue-plan/SKILL.md deleted file mode 100644 index 42b1ec24..00000000 --- a/.codex/skills/issue-plan/SKILL.md +++ /dev/null @@ -1,247 +0,0 @@ ---- -name: issue-plan -description: Plan issue(s) into bound solutions using subagent pattern (explore + plan closed-loop) -argument-hint: "[,,...] [--all-pending] [--batch-size 4]" ---- - -# Issue Plan (Codex Version) - -## Goal - -Create executable solution(s) for issue(s) and bind the selected solution to each issue using `ccw issue bind`. - -This workflow uses **subagent pattern** for parallel batch processing: spawn planning agents per batch, wait for results, handle multi-solution selection. - -## Core Guidelines - -**⚠️ Data Access Principle**: Issues and solutions files can grow very large. To avoid context overflow: - -| Operation | Correct | Incorrect | -|-----------|---------|-----------| -| List issues (brief) | `ccw issue list --status pending --brief` | Read issues.jsonl | -| Read issue details | `ccw issue status --json` | Read issues.jsonl | -| Update status | `ccw issue update --status ...` | Direct file edit | -| Bind solution | `ccw issue bind ` | Direct file edit | - -**ALWAYS** use CLI commands for CRUD operations. **NEVER** read entire `issues.jsonl` or `solutions/*.jsonl` directly. - -## Inputs - -- **Explicit issues**: comma-separated IDs, e.g. `ISS-123,ISS-124` -- **All pending**: `--all-pending` → plan all issues in `registered` status -- **Batch size**: `--batch-size N` (default `4`) → max issues per subagent batch - -## Output Requirements - -For each issue: -- Register at least one solution and bind one solution to the issue -- Ensure tasks conform to `~/.claude/workflows/cli-templates/schemas/solution-schema.json` -- Each task includes quantified `acceptance.criteria` and concrete `acceptance.verification` - -Return a final summary JSON: -```json -{ - "bound": [{ "issue_id": "...", "solution_id": "...", "task_count": 0 }], - "pending_selection": [{ "issue_id": "...", "solutions": [{ "id": "...", "task_count": 0, "description": "..." }] }], - "conflicts": [{ "file": "...", "issues": ["..."] }] -} -``` - -## Workflow - -### Step 1: Resolve Issue List - -**If `--all-pending`:** -```bash -ccw issue list --status registered --json -``` - -**Else (explicit IDs):** -```bash -# For each ID, ensure exists -ccw issue init --title "Issue " 2>/dev/null || true -ccw issue status --json -``` - -### Step 2: Group Issues by Similarity - -Group issues for batch processing (max 4 per batch): - -```bash -# Extract issue metadata for grouping -ccw issue list --status registered --brief --json -``` - -Group by: -- Shared tags -- Similar keywords in title -- Related components - -### Step 3: Spawn Planning Subagents (Parallel) - -For each batch, spawn a planning subagent: - -```javascript -// Subagent message structure -spawn_agent({ - message: ` -## TASK ASSIGNMENT - -### MANDATORY FIRST STEPS (Agent Execute) -1. **Read role definition**: ~/.codex/agents/issue-plan-agent.md (MUST read first) -2. Read: .workflow/project-tech.json -3. Read: .workflow/project-guidelines.json -4. Read schema: ~/.claude/workflows/cli-templates/schemas/solution-schema.json - ---- - -Goal: Plan solutions for ${batch.length} issues with executable task breakdown - -Scope: -- CAN DO: Explore codebase, design solutions, create tasks -- CANNOT DO: Execute solutions, modify production code -- Directory: ${process.cwd()} - -Context: -- Issues: ${batch.map(i => `${i.id}: ${i.title}`).join('\n')} -- Fetch full details: ccw issue status --json - -Deliverables: -- For each issue: Write solution to .workflow/issues/solutions/{issue-id}.jsonl -- Single solution → auto-bind via ccw issue bind -- Multiple solutions → return in pending_selection - -Quality bar: -- Tasks have quantified acceptance.criteria -- Each task includes test.commands -- Solution follows schema exactly -` -}) -``` - -**Batch execution (parallel):** -```javascript -// Launch all batches in parallel -const agentIds = batches.map(batch => spawn_agent({ message: buildPrompt(batch) })) - -// Wait for all agents to complete -const results = wait({ ids: agentIds, timeout_ms: 900000 }) // 15 min - -// Collect results -const allBound = [] -const allPendingSelection = [] -const allConflicts = [] - -for (const id of agentIds) { - if (results.status[id].completed) { - const result = JSON.parse(results.status[id].completed) - allBound.push(...(result.bound || [])) - allPendingSelection.push(...(result.pending_selection || [])) - allConflicts.push(...(result.conflicts || [])) - } -} - -// Close all agents -agentIds.forEach(id => close_agent({ id })) -``` - -### Step 4: Handle Multi-Solution Selection - -If `pending_selection` is non-empty, present options: - -``` -Issue ISS-001 has multiple solutions: -1. SOL-ISS-001-1: Refactor with adapter pattern (3 tasks) -2. SOL-ISS-001-2: Direct implementation (2 tasks) - -Select solution (1-2): -``` - -Bind selected solution: -```bash -ccw issue bind ISS-001 SOL-ISS-001-1 -``` - -### Step 5: Handle Conflicts - -If conflicts detected: -- Low/Medium severity: Auto-resolve with recommended order -- High severity: Present to user for decision - -### Step 6: Update Issue Status - -After binding, update status: -```bash -ccw issue update --status planned -``` - -### Step 7: Output Summary - -```markdown -## Planning Complete - -**Planned**: 5 issues -**Bound Solutions**: 4 -**Pending Selection**: 1 - -### Bound Solutions -| Issue | Solution | Tasks | -|-------|----------|-------| -| ISS-001 | SOL-ISS-001-1 | 3 | -| ISS-002 | SOL-ISS-002-1 | 2 | - -### Pending Selection -- ISS-003: 2 solutions available (user selection required) - -### Conflicts Detected -- src/auth.ts touched by ISS-001, ISS-002 (resolved: sequential) - -**Next Step**: `/issue:queue` -``` - -## Subagent Role Reference - -Planning subagent uses role file at: `~/.codex/agents/issue-plan-agent.md` - -Role capabilities: -- Codebase exploration (rg, file reading) -- Solution design with task breakdown -- Schema validation -- Solution registration via CLI - -## Quality Checklist - -Before completing, verify: - -- [ ] All input issues have solutions in `solutions/{issue-id}.jsonl` -- [ ] Single solution issues are auto-bound (`bound_solution_id` set) -- [ ] Multi-solution issues returned in `pending_selection` for user choice -- [ ] Each solution has executable tasks with `modification_points` -- [ ] Task acceptance criteria are quantified (not vague) -- [ ] Conflicts detected and reported (if multiple issues touch same files) -- [ ] Issue status updated to `planned` after binding -- [ ] All subagents closed after completion - -## Error Handling - -| Error | Resolution | -|-------|------------| -| Issue not found | Auto-create via `ccw issue init` | -| Subagent timeout | Retry with increased timeout or smaller batch | -| No solutions generated | Display error, suggest manual planning | -| User cancels selection | Skip issue, continue with others | -| File conflicts | Detect and suggest resolution order | - -## Start Execution - -Begin by resolving issue list: - -```bash -# Default to all pending -ccw issue list --status registered --brief --json - -# Or with explicit IDs -ccw issue status ISS-001 --json -``` - -Then group issues and spawn planning subagents. diff --git a/.codex/skills/issue-queue/SKILL.md b/.codex/skills/issue-queue/SKILL.md deleted file mode 100644 index 680bdad2..00000000 --- a/.codex/skills/issue-queue/SKILL.md +++ /dev/null @@ -1,299 +0,0 @@ ---- -name: issue-queue -description: Form execution queue from bound solutions using subagent for conflict analysis and ordering -argument-hint: "[--queues ] [--issue ] [--append ]" ---- - -# Issue Queue (Codex Version) - -## Goal - -Create an ordered execution queue from all bound solutions. Uses **subagent pattern** to analyze inter-solution file conflicts, calculate semantic priorities, and assign parallel/sequential execution groups. - -**Design Principle**: Queue items are **solutions**, not individual tasks. Each executor receives a complete solution with all its tasks. - -## Core Guidelines - -**⚠️ Data Access Principle**: Issues and queue files can grow very large. To avoid context overflow: - -| Operation | Correct | Incorrect | -|-----------|---------|-----------| -| List issues (brief) | `ccw issue list --status planned --brief` | Read issues.jsonl | -| List queue (brief) | `ccw issue queue --brief` | Read queues/*.json | -| Read issue details | `ccw issue status --json` | Read issues.jsonl | -| Get next item | `ccw issue next --json` | Read queues/*.json | -| Sync from queue | `ccw issue update --from-queue` | Direct file edit | - -**ALWAYS** use CLI commands for CRUD operations. **NEVER** read entire `issues.jsonl` or `queues/*.json` directly. - -## Inputs - -- **All planned**: Default behavior → queue all issues with `planned` status and bound solutions -- **Multiple queues**: `--queues ` → create N parallel queues -- **Specific issue**: `--issue ` → queue only that issue's solution -- **Append mode**: `--append ` → append issue to active queue (don't create new) - -## Output Requirements - -**Generate Files (EXACTLY 2):** -1. `.workflow/issues/queues/{queue-id}.json` - Full queue with solutions, conflicts, groups -2. `.workflow/issues/queues/index.json` - Update with new queue entry - -**Return Summary:** -```json -{ - "queue_id": "QUE-YYYYMMDD-HHMMSS", - "total_solutions": 3, - "total_tasks": 12, - "execution_groups": [{ "id": "P1", "type": "parallel", "count": 2 }], - "conflicts_resolved": 1, - "issues_queued": ["ISS-xxx", "ISS-yyy"] -} -``` - -## Workflow - -### Step 1: Generate Queue ID and Load Solutions - -```bash -# Generate queue ID -QUEUE_ID="QUE-$(date -u +%Y%m%d-%H%M%S)" - -# Load planned issues with bound solutions -ccw issue list --status planned --json -``` - -For each issue, extract: -- `id`, `bound_solution_id`, `priority` -- Read solution from `.workflow/issues/solutions/{issue-id}.jsonl` -- Collect `files_touched` from all tasks' `modification_points.file` - -Build solution list: -```json -[ - { - "issue_id": "ISS-xxx", - "solution_id": "SOL-xxx", - "task_count": 3, - "files_touched": ["src/auth.ts", "src/utils.ts"], - "priority": "medium" - } -] -``` - -### Step 2: Spawn Queue Agent for Conflict Analysis - -Spawn subagent to analyze conflicts and order solutions: - -```javascript -const agentId = spawn_agent({ - message: ` -## TASK ASSIGNMENT - -### MANDATORY FIRST STEPS (Agent Execute) -1. **Read role definition**: ~/.codex/agents/issue-queue-agent.md (MUST read first) -2. Read: .workflow/project-tech.json -3. Read: .workflow/project-guidelines.json - ---- - -Goal: Order ${solutions.length} solutions into execution queue with conflict resolution - -Scope: -- CAN DO: Analyze file conflicts, calculate priorities, assign groups -- CANNOT DO: Execute solutions, modify code -- Queue ID: ${QUEUE_ID} - -Context: -- Solutions: ${JSON.stringify(solutions, null, 2)} -- Project Root: ${process.cwd()} - -Deliverables: -1. Write queue JSON to: .workflow/issues/queues/${QUEUE_ID}.json -2. Update index: .workflow/issues/queues/index.json -3. Return summary JSON - -Quality bar: -- No circular dependencies in DAG -- Parallel groups have NO file overlaps -- Semantic priority calculated (0.0-1.0) -- All conflicts resolved with rationale -` -}) - -// Wait for agent completion -const result = wait({ ids: [agentId], timeout_ms: 600000 }) - -// Parse result -const summary = JSON.parse(result.status[agentId].completed) - -// Check for clarifications -if (summary.clarifications?.length > 0) { - // Handle high-severity conflicts requiring user input - for (const clarification of summary.clarifications) { - console.log(`Conflict: ${clarification.question}`) - console.log(`Options: ${clarification.options.join(', ')}`) - // Get user input and send back - send_input({ - id: agentId, - message: `Conflict ${clarification.conflict_id} resolved: ${userChoice}` - }) - wait({ ids: [agentId], timeout_ms: 300000 }) - } -} - -// Close agent -close_agent({ id: agentId }) -``` - -### Step 3: Multi-Queue Support (if --queues > 1) - -When creating multiple parallel queues: - -1. **Partition solutions** to minimize cross-queue file conflicts -2. **Spawn N agents in parallel** (one per queue) -3. **Wait for all agents** with batch wait - -```javascript -// Partition solutions by file overlap -const partitions = partitionSolutions(solutions, numQueues) - -// Spawn agents in parallel -const agentIds = partitions.map((partition, i) => - spawn_agent({ - message: buildQueuePrompt(partition, `${QUEUE_ID}-${i+1}`, i+1, numQueues) - }) -) - -// Batch wait for all agents -const results = wait({ ids: agentIds, timeout_ms: 600000 }) - -// Collect clarifications from all agents -const allClarifications = agentIds.flatMap((id, i) => - (results.status[id].clarifications || []).map(c => ({ ...c, queue_id: `${QUEUE_ID}-${i+1}`, agent_id: id })) -) - -// Handle clarifications, then close all agents -agentIds.forEach(id => close_agent({ id })) -``` - -### Step 4: Update Issue Statuses - -**MUST use CLI command:** - -```bash -# Batch update from queue (recommended) -ccw issue update --from-queue ${QUEUE_ID} - -# Or individual update -ccw issue update --status queued -``` - -### Step 5: Active Queue Check - -```bash -ccw issue queue list --brief -``` - -**Decision:** -- If no active queue: `ccw issue queue switch ${QUEUE_ID}` -- If active queue exists: Present options to user - -``` -Active queue exists. Choose action: -1. Merge into existing queue -2. Use new queue (keep existing in history) -3. Cancel (delete new queue) - -Select (1-3): -``` - -### Step 6: Output Summary - -```markdown -## Queue Formed: ${QUEUE_ID} - -**Solutions**: 5 -**Tasks**: 18 -**Execution Groups**: 3 - -### Execution Order -| # | Item | Issue | Tasks | Group | Files | -|---|------|-------|-------|-------|-------| -| 1 | S-1 | ISS-001 | 3 | P1 | src/auth.ts | -| 2 | S-2 | ISS-002 | 2 | P1 | src/api.ts | -| 3 | S-3 | ISS-003 | 4 | S2 | src/auth.ts | - -### Conflicts Resolved -- src/auth.ts: S-1 → S-3 (sequential, S-1 creates module) - -**Next Step**: `/issue:execute --queue ${QUEUE_ID}` -``` - -## Subagent Role Reference - -Queue agent uses role file at: `~/.codex/agents/issue-queue-agent.md` - -Role capabilities: -- File conflict detection (5 types) -- Dependency DAG construction -- Semantic priority calculation -- Execution group assignment - -## Queue File Schema - -```json -{ - "id": "QUE-20251228-120000", - "status": "active", - "issue_ids": ["ISS-001", "ISS-002"], - "solutions": [ - { - "item_id": "S-1", - "issue_id": "ISS-001", - "solution_id": "SOL-ISS-001-1", - "status": "pending", - "execution_order": 1, - "execution_group": "P1", - "depends_on": [], - "semantic_priority": 0.8, - "files_touched": ["src/auth.ts"], - "task_count": 3 - } - ], - "conflicts": [...], - "execution_groups": [...] -} -``` - -## Quality Checklist - -Before completing, verify: - -- [ ] Exactly 2 files generated: queue JSON + index update -- [ ] Queue has valid DAG (no circular dependencies) -- [ ] All file conflicts resolved with rationale -- [ ] Semantic priority calculated for each solution (0.0-1.0) -- [ ] Execution groups assigned (P* for parallel, S* for sequential) -- [ ] Issue statuses updated to `queued` -- [ ] All subagents closed after completion - -## Error Handling - -| Situation | Action | -|-----------|--------| -| No planned issues | Return empty queue summary | -| Circular dependency detected | Abort, report cycle details | -| Missing solution file | Skip issue, log warning | -| Agent timeout | Retry with increased timeout | -| Clarification rejected | Abort queue formation | - -## Start Execution - -Begin by listing planned issues: - -```bash -ccw issue list --status planned --json -``` - -Then extract solution data and spawn queue agent. diff --git a/.codex/skills/issue-resolve/SKILL.md b/.codex/skills/issue-resolve/SKILL.md new file mode 100644 index 00000000..6ccd936b --- /dev/null +++ b/.codex/skills/issue-resolve/SKILL.md @@ -0,0 +1,343 @@ +--- +name: issue-resolve +description: Unified issue resolution pipeline with source selection. Plan issues via AI exploration, convert from artifacts, import from brainstorm sessions, or form execution queues. Triggers on "issue:plan", "issue:queue", "issue:convert-to-plan", "issue:from-brainstorm", "resolve issue", "plan issue", "queue issues", "convert plan to issue". +allowed-tools: spawn_agent, wait, send_input, close_agent, AskUserQuestion, Read, Write, Edit, Bash, Glob, Grep +--- + +# Issue Resolve (Codex Version) + +Unified issue resolution pipeline that orchestrates solution creation from multiple sources and queue formation for execution. + +## Architecture Overview + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Issue Resolve Orchestrator (SKILL.md) │ +│ → Source selection → Route to phase → Execute → Summary │ +└───────────────┬─────────────────────────────────────────────────┘ + │ + ├─ AskUserQuestion: Select issue source + │ + ┌───────────┼───────────┬───────────┬───────────┐ + ↓ ↓ ↓ ↓ │ +┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ │ +│ Phase 1 │ │ Phase 2 │ │ Phase 3 │ │ Phase 4 │ │ +│ Explore │ │ Convert │ │ From │ │ Form │ │ +│ & Plan │ │Artifact │ │Brainstorm│ │ Queue │ │ +└─────────┘ └─────────┘ └─────────┘ └─────────┘ │ + ↓ ↓ ↓ ↓ │ + Solutions Solutions Issue+Sol Exec Queue │ + (bound) (bound) (bound) (ordered) │ + │ + ┌────────────────────────────────┘ + ↓ + /issue:execute +``` + +## Key Design Principles + +1. **Source-Driven Routing**: AskUserQuestion selects workflow, then load single phase +2. **Progressive Phase Loading**: Only read the selected phase document +3. **CLI-First Data Access**: All issue/solution CRUD via `ccw issue` CLI commands +4. **Auto Mode Support**: `-y` flag skips source selection (defaults to Explore & Plan) + +## Subagent API Reference + +### spawn_agent +Create a new subagent with task assignment. + +```javascript +const agentId = spawn_agent({ + message: ` +## TASK ASSIGNMENT + +### MANDATORY FIRST STEPS (Agent Execute) +1. **Read role definition**: ~/.codex/agents/{agent-type}.md (MUST read first) +2. Read: .workflow/project-tech.json +3. Read: .workflow/project-guidelines.json + +## TASK CONTEXT +${taskContext} + +## DELIVERABLES +${deliverables} +` +}) +``` + +### wait +Get results from subagent (only way to retrieve results). + +```javascript +const result = wait({ + ids: [agentId], + timeout_ms: 600000 // 10 minutes +}) + +if (result.timed_out) { + // Handle timeout - can continue waiting or send_input to prompt completion +} +``` + +### send_input +Continue interaction with active subagent (for clarification or follow-up). + +```javascript +send_input({ + id: agentId, + message: ` +## CLARIFICATION ANSWERS +${answers} + +## NEXT STEP +Continue with updated analysis. +` +}) +``` + +### close_agent +Clean up subagent resources (irreversible). + +```javascript +close_agent({ id: agentId }) +``` + +## Auto Mode + +When `--yes` or `-y`: Skip source selection, use Explore & Plan for issue IDs, or auto-detect source type for paths. + +## Usage + +``` +codex -p "@.codex/prompts/issue-resolve.md " +codex -p "@.codex/prompts/issue-resolve.md [FLAGS] \"\"" + +# Flags +-y, --yes Skip all confirmations (auto mode) +--source Pre-select source: plan|convert|brainstorm|queue +--batch-size Max issues per agent batch (plan mode, default: 3) +--issue Bind to existing issue (convert mode) +--supplement Add tasks to existing solution (convert mode) +--queues Number of parallel queues (queue mode, default: 1) + +# Examples +codex -p "@.codex/prompts/issue-resolve.md GH-123,GH-124" # Explore & plan issues +codex -p "@.codex/prompts/issue-resolve.md --source plan --all-pending" # Plan all pending issues +codex -p "@.codex/prompts/issue-resolve.md --source convert \".workflow/.lite-plan/my-plan\"" # Convert artifact +codex -p "@.codex/prompts/issue-resolve.md --source brainstorm SESSION=\"BS-rate-limiting\"" # From brainstorm +codex -p "@.codex/prompts/issue-resolve.md --source queue" # Form execution queue +codex -p "@.codex/prompts/issue-resolve.md -y GH-123" # Auto mode, plan single issue +``` + +## Execution Flow + +``` +Input Parsing: + └─ Parse flags (--source, -y, --issue, etc.) and positional args + +Source Selection: + ├─ --source flag provided → Route directly + ├─ Auto-detect from input: + │ ├─ Issue IDs (GH-xxx, ISS-xxx) → Explore & Plan + │ ├─ SESSION="..." → From Brainstorm + │ ├─ File/folder path → Convert from Artifact + │ └─ No input or --all-pending → Explore & Plan (all pending) + └─ Otherwise → AskUserQuestion to select source + +Phase Execution (load one phase): + ├─ Phase 1: Explore & Plan → phases/01-issue-plan.md + ├─ Phase 2: Convert Artifact → phases/02-convert-to-plan.md + ├─ Phase 3: From Brainstorm → phases/03-from-brainstorm.md + └─ Phase 4: Form Queue → phases/04-issue-queue.md + +Post-Phase: + └─ Summary + Next steps recommendation +``` + +### Phase Reference Documents + +| Phase | Document | Load When | Purpose | +|-------|----------|-----------|---------| +| Phase 1 | [phases/01-issue-plan.md](phases/01-issue-plan.md) | Source = Explore & Plan | Batch plan issues via issue-plan-agent | +| Phase 2 | [phases/02-convert-to-plan.md](phases/02-convert-to-plan.md) | Source = Convert Artifact | Convert lite-plan/session/markdown to solutions | +| Phase 3 | [phases/03-from-brainstorm.md](phases/03-from-brainstorm.md) | Source = From Brainstorm | Convert brainstorm ideas to issue + solution | +| Phase 4 | [phases/04-issue-queue.md](phases/04-issue-queue.md) | Source = Form Queue | Order bound solutions into execution queue | + +## Core Rules + +1. **Source Selection First**: Always determine source before loading any phase +2. **Single Phase Load**: Only read the selected phase document, never load all phases +3. **CLI Data Access**: Use `ccw issue` CLI for all issue/solution operations, NEVER read files directly +4. **Content Preservation**: Each phase contains complete execution logic from original commands +5. **Auto-Detect Input**: Smart input parsing reduces need for explicit --source flag +6. **DO NOT STOP**: Continuous multi-phase workflow. After completing each phase, immediately proceed to next +7. **Explicit Lifecycle**: Always close_agent after wait completes to free resources + +## Input Processing + +### Auto-Detection Logic + +```javascript +function detectSource(input, flags) { + // 1. Explicit --source flag + if (flags.source) return flags.source; + + // 2. Auto-detect from input content + const trimmed = input.trim(); + + // Issue IDs pattern (GH-xxx, ISS-xxx, comma-separated) + if (trimmed.match(/^[A-Z]+-\d+/i) || trimmed.includes(',')) { + return 'plan'; + } + + // --all-pending or empty input → plan all pending + if (flags.allPending || trimmed === '') { + return 'plan'; + } + + // SESSION="..." pattern → brainstorm + if (trimmed.includes('SESSION=')) { + return 'brainstorm'; + } + + // File/folder path → convert + if (trimmed.match(/\.(md|json)$/) || trimmed.includes('.workflow/')) { + return 'convert'; + } + + // Cannot auto-detect → ask user + return null; +} +``` + +### Source Selection (AskUserQuestion) + +```javascript +// When source cannot be auto-detected +const answer = AskUserQuestion({ + questions: [{ + question: "How would you like to create/manage issue solutions?", + header: "Source", + multiSelect: false, + options: [ + { + label: "Explore & Plan (Recommended)", + description: "AI explores codebase and generates solutions for issues" + }, + { + label: "Convert from Artifact", + description: "Convert existing lite-plan, workflow session, or markdown to solution" + }, + { + label: "From Brainstorm", + description: "Convert brainstorm session ideas into issue with solution" + }, + { + label: "Form Execution Queue", + description: "Order bound solutions into execution queue for /issue:execute" + } + ] + }] +}); + +// Route based on selection +const sourceMap = { + "Explore & Plan": "plan", + "Convert from Artifact": "convert", + "From Brainstorm": "brainstorm", + "Form Execution Queue": "queue" +}; +``` + +## Data Flow + +``` +User Input (issue IDs / artifact path / session ID / flags) + ↓ +[Parse Flags + Auto-Detect Source] + ↓ +[Source Selection] ← AskUserQuestion (if needed) + ↓ +[Read Selected Phase Document] + ↓ +[Execute Phase Logic] + ↓ +[Summary + Next Steps] + ├─ After Plan/Convert/Brainstorm → Suggest /issue:queue or /issue:execute + └─ After Queue → Suggest /issue:execute +``` + +## Task Tracking Pattern + +```javascript +// Initialize plan with phase steps +update_plan({ + explanation: "Issue resolve workflow started", + plan: [ + { step: "Select issue source", status: "completed" }, + { step: "Execute: [selected phase name]", status: "in_progress" }, + { step: "Summary & next steps", status: "pending" } + ] +}) +``` + +Phase-specific sub-tasks are attached when the phase executes (see individual phase docs for details). + +## Core Guidelines + +**Data Access Principle**: Issues and solutions files can grow very large. To avoid context overflow: + +| Operation | Correct | Incorrect | +|-----------|---------|-----------| +| List issues (brief) | `ccw issue list --status pending --brief` | `Read('issues.jsonl')` | +| Read issue details | `ccw issue status --json` | `Read('issues.jsonl')` | +| Update status | `ccw issue update --status ...` | Direct file edit | +| Bind solution | `ccw issue bind ` | Direct file edit | +| Batch solutions | `ccw issue solutions --status planned --brief` | Loop individual queries | + +**Output Options**: +- `--brief`: JSON with minimal fields (orchestrator use) +- `--json`: Full JSON (agent use only) + +**ALWAYS** use CLI commands for CRUD operations. **NEVER** read entire `issues.jsonl` or `solutions/*.jsonl` directly. + +## Error Handling + +| Error | Resolution | +|-------|------------| +| No source detected | Show AskUserQuestion with all 4 options | +| Invalid source type | Show available sources, re-prompt | +| Phase execution fails | Report error, suggest manual intervention | +| No pending issues (plan) | Suggest creating issues first | +| No bound solutions (queue) | Suggest running plan/convert/brainstorm first | + +## Post-Phase Next Steps + +After successful phase execution, recommend next action: + +```javascript +// After Plan/Convert/Brainstorm (solutions created) +AskUserQuestion({ + questions: [{ + question: "Solutions created. What next?", + header: "Next", + multiSelect: false, + options: [ + { label: "Form Queue", description: "Order solutions for execution (/issue:queue)" }, + { label: "Plan More Issues", description: "Continue creating solutions" }, + { label: "View Issues", description: "Review issue details" }, + { label: "Done", description: "Exit workflow" } + ] + }] +}); + +// After Queue (queue formed) +// → Suggest /issue:execute directly +``` + +## Related Commands + +- `issue-manage` - Interactive issue CRUD operations +- `/issue:execute` - Execute queue with DAG-based parallel orchestration +- `ccw issue list` - List all issues +- `ccw issue status ` - View issue details diff --git a/.codex/skills/issue-resolve/phases/01-issue-plan.md b/.codex/skills/issue-resolve/phases/01-issue-plan.md new file mode 100644 index 00000000..732a2e86 --- /dev/null +++ b/.codex/skills/issue-resolve/phases/01-issue-plan.md @@ -0,0 +1,318 @@ +# Phase 1: Explore & Plan + +## Overview + +Batch plan issue resolution using **issue-plan-agent** that combines exploration and planning into a single closed-loop workflow. + +**Behavior:** +- Single solution per issue → auto-bind +- Multiple solutions → return for user selection +- Agent handles file generation + +## Prerequisites + +- Issue IDs provided (comma-separated) or `--all-pending` flag +- `ccw issue` CLI available +- `.workflow/issues/` directory exists or will be created + +## Auto Mode + +When `--yes` or `-y`: Auto-bind solutions without confirmation, use recommended settings. + +## Core Guidelines + +**Data Access Principle**: Issues and solutions files can grow very large. To avoid context overflow: + +| Operation | Correct | Incorrect | +|-----------|---------|-----------| +| List issues (brief) | `ccw issue list --status pending --brief` | `Read('issues.jsonl')` | +| Read issue details | `ccw issue status --json` | `Read('issues.jsonl')` | +| Update status | `ccw issue update --status ...` | Direct file edit | +| Bind solution | `ccw issue bind ` | Direct file edit | + +**Output Options**: +- `--brief`: JSON with minimal fields (id, title, status, priority, tags) +- `--json`: Full JSON (agent use only) + +**Orchestration vs Execution**: +- **Command (orchestrator)**: Use `--brief` for minimal context +- **Agent (executor)**: Fetch full details → `ccw issue status --json` + +**ALWAYS** use CLI commands for CRUD operations. **NEVER** read entire `issues.jsonl` or `solutions/*.jsonl` directly. + +## Execution Steps + +### Step 1.1: Issue Loading (Brief Info Only) + +```javascript +const batchSize = flags.batchSize || 3; +let issues = []; // {id, title, tags} - brief info for grouping only + +// Default to --all-pending if no input provided +const useAllPending = flags.allPending || !userInput || userInput.trim() === ''; + +if (useAllPending) { + // Get pending issues with brief metadata via CLI + const result = Bash(`ccw issue list --status pending,registered --json`).trim(); + const parsed = result ? JSON.parse(result) : []; + issues = parsed.map(i => ({ id: i.id, title: i.title || '', tags: i.tags || [] })); + + if (issues.length === 0) { + console.log('No pending issues found.'); + return; + } + console.log(`Found ${issues.length} pending issues`); +} else { + // Parse comma-separated issue IDs, fetch brief metadata + const ids = userInput.includes(',') + ? userInput.split(',').map(s => s.trim()) + : [userInput.trim()]; + + for (const id of ids) { + Bash(`ccw issue init ${id} --title "Issue ${id}" 2>/dev/null || true`); + const info = Bash(`ccw issue status ${id} --json`).trim(); + const parsed = info ? JSON.parse(info) : {}; + issues.push({ id, title: parsed.title || '', tags: parsed.tags || [] }); + } +} +// Note: Agent fetches full issue content via `ccw issue status --json` + +// Intelligent grouping: Analyze issues by title/tags, group semantically similar ones +// Strategy: Same module/component, related bugs, feature clusters +// Constraint: Max ${batchSize} issues per batch + +console.log(`Processing ${issues.length} issues in ${batches.length} batch(es)`); + +update_plan({ + explanation: "Issue loading complete, starting batch planning", + plan: batches.map((_, i) => ({ + step: `Plan batch ${i+1}`, + status: 'pending' + })) +}); +``` + +### Step 1.2: Unified Explore + Plan (issue-plan-agent) - PARALLEL + +```javascript +Bash(`mkdir -p .workflow/issues/solutions`); +const pendingSelections = []; // Collect multi-solution issues for user selection +const agentResults = []; // Collect all agent results for conflict aggregation + +// Build prompts for all batches +const agentTasks = batches.map((batch, batchIndex) => { + const issueList = batch.map(i => `- ${i.id}: ${i.title}${i.tags.length ? ` [${i.tags.join(', ')}]` : ''}`).join('\n'); + const batchIds = batch.map(i => i.id); + + const issuePrompt = ` +## TASK ASSIGNMENT + +### MANDATORY FIRST STEPS (Agent Execute) +1. **Read role definition**: ~/.codex/agents/issue-plan-agent.md (MUST read first) +2. Read: .workflow/project-tech.json +3. Read: .workflow/project-guidelines.json + +--- + +## Plan Issues + +**Issues** (grouped by similarity): +${issueList} + +**Project Root**: ${process.cwd()} + +### Project Context (MANDATORY) +1. Read: .workflow/project-tech.json (technology stack, architecture) +2. Read: .workflow/project-guidelines.json (constraints and conventions) + +### Workflow +1. Fetch issue details: ccw issue status --json +2. **Analyze failure history** (if issue.feedback exists): + - Extract failure details from issue.feedback (type='failure', stage='execute') + - Parse error_type, message, task_id, solution_id from content JSON + - Identify failure patterns: repeated errors, root causes, blockers + - **Constraint**: Avoid repeating failed approaches +3. Load project context files +4. Explore codebase (ACE semantic search) +5. Plan solution with tasks (schema: solution-schema.json) + - **If previous solution failed**: Reference failure analysis in solution.approach + - Add explicit verification steps to prevent same failure mode +6. **If github_url exists**: Add final task to comment on GitHub issue +7. Write solution to: .workflow/issues/solutions/{issue-id}.jsonl +8. **CRITICAL - Binding Decision**: + - Single solution → **MUST execute**: ccw issue bind + - Multiple solutions → Return pending_selection only (no bind) + +### Failure-Aware Planning Rules +- **Extract failure patterns**: Parse issue.feedback where type='failure' and stage='execute' +- **Identify root causes**: Analyze error_type (test_failure, compilation, timeout, etc.) +- **Design alternative approach**: Create solution that addresses root cause +- **Add prevention steps**: Include explicit verification to catch same error earlier +- **Document lessons**: Reference previous failures in solution.approach + +### Rules +- Solution ID format: SOL-{issue-id}-{uid} (uid: 4 random alphanumeric chars, e.g., a7x9) +- Single solution per issue → auto-bind via ccw issue bind +- Multiple solutions → register only, return pending_selection +- Tasks must have quantified acceptance.criteria + +### Return Summary +{"bound":[{"issue_id":"...","solution_id":"...","task_count":N}],"pending_selection":[{"issue_id":"...","solutions":[{"id":"...","description":"...","task_count":N}]}]} +`; + + return { batchIndex, batchIds, issuePrompt, batch }; +}); + +// Launch agents in parallel (max 10 concurrent) +const MAX_PARALLEL = 10; +for (let i = 0; i < agentTasks.length; i += MAX_PARALLEL) { + const chunk = agentTasks.slice(i, i + MAX_PARALLEL); + const agentIds = []; + + // Step 1: Spawn agents in parallel + for (const { batchIndex, batchIds, issuePrompt, batch } of chunk) { + updatePlanStep(`Plan batch ${batchIndex + 1}`, 'in_progress'); + const agentId = spawn_agent({ + message: issuePrompt + }); + agentIds.push({ agentId, batchIndex }); + } + + console.log(`Launched ${agentIds.length} agents (chunk ${Math.floor(i/MAX_PARALLEL) + 1}/${Math.ceil(agentTasks.length/MAX_PARALLEL)})...`); + + // Step 2: Batch wait for all agents in this chunk + const allIds = agentIds.map(a => a.agentId); + const waitResult = wait({ + ids: allIds, + timeout_ms: 600000 // 10 minutes + }); + + if (waitResult.timed_out) { + console.log('Some agents timed out, continuing with completed results'); + } + + // Step 3: Collect results from completed agents + for (const { agentId, batchIndex } of agentIds) { + const agentStatus = waitResult.status[agentId]; + if (!agentStatus || !agentStatus.completed) { + console.log(`Batch ${batchIndex + 1}: Agent did not complete, skipping`); + updatePlanStep(`Plan batch ${batchIndex + 1}`, 'completed'); + continue; + } + + const result = agentStatus.completed; + + // Extract JSON from potential markdown code blocks (agent may wrap in ```json...```) + const jsonText = extractJsonFromMarkdown(result); + let summary; + try { + summary = JSON.parse(jsonText); + } catch (e) { + console.log(`Batch ${batchIndex + 1}: Failed to parse agent result, skipping`); + updatePlanStep(`Plan batch ${batchIndex + 1}`, 'completed'); + continue; + } + agentResults.push(summary); // Store for conflict aggregation + + // Verify binding for bound issues (agent should have executed bind) + for (const item of summary.bound || []) { + const status = JSON.parse(Bash(`ccw issue status ${item.issue_id} --json`).trim()); + if (status.bound_solution_id === item.solution_id) { + console.log(`${item.issue_id}: ${item.solution_id} (${item.task_count} tasks)`); + } else { + // Fallback: agent failed to bind, execute here + Bash(`ccw issue bind ${item.issue_id} ${item.solution_id}`); + console.log(`${item.issue_id}: ${item.solution_id} (${item.task_count} tasks) [recovered]`); + } + } + // Collect pending selections + for (const pending of summary.pending_selection || []) { + pendingSelections.push(pending); + } + updatePlanStep(`Plan batch ${batchIndex + 1}`, 'completed'); + } + + // Step 4: Batch cleanup - close all agents in this chunk + allIds.forEach(id => close_agent({ id })); +} +``` + +### Step 1.3: Solution Selection (if pending) + +```javascript +// Handle multi-solution issues +for (const pending of pendingSelections) { + if (pending.solutions.length === 0) continue; + + const options = pending.solutions.slice(0, 4).map(sol => ({ + label: `${sol.id} (${sol.task_count} tasks)`, + description: sol.description || sol.approach || 'No description' + })); + + const answer = AskUserQuestion({ + questions: [{ + question: `Issue ${pending.issue_id}: which solution to bind?`, + header: pending.issue_id, + options: options, + multiSelect: false + }] + }); + + const selected = answer[Object.keys(answer)[0]]; + if (!selected || selected === 'Other') continue; + + const solId = selected.split(' ')[0]; + Bash(`ccw issue bind ${pending.issue_id} ${solId}`); + console.log(`${pending.issue_id}: ${solId} bound`); +} +``` + +### Step 1.4: Summary + +```javascript +// Count planned issues via CLI +const planned = JSON.parse(Bash(`ccw issue list --status planned --brief`) || '[]'); +const plannedCount = planned.length; + +console.log(` +## Done: ${issues.length} issues → ${plannedCount} planned + +Next: \`/issue:queue\` → \`/issue:execute\` +`); +``` + +## Error Handling + +| Error | Resolution | +|-------|------------| +| Issue not found | Auto-create in issues.jsonl | +| ACE search fails | Agent falls back to ripgrep | +| No solutions generated | Display error, suggest manual planning | +| User cancels selection | Skip issue, continue with others | +| File conflicts | Agent detects and suggests resolution order | + +## Bash Compatibility + +**Avoid**: `$(cmd)`, `$var`, `for` loops — will be escaped incorrectly + +**Use**: Simple commands + `&&` chains, quote comma params `"pending,registered"` + +## Quality Checklist + +Before completing, verify: + +- [ ] All input issues have solutions in `solutions/{issue-id}.jsonl` +- [ ] Single solution issues are auto-bound (`bound_solution_id` set) +- [ ] Multi-solution issues returned in `pending_selection` for user choice +- [ ] Each solution has executable tasks with `modification_points` +- [ ] Task acceptance criteria are quantified (not vague) +- [ ] Conflicts detected and reported (if multiple issues touch same files) +- [ ] Issue status updated to `planned` after binding +- [ ] All spawned agents are properly closed via close_agent + +## Post-Phase Update + +After plan completion: +- All processed issues should have `status: planned` and `bound_solution_id` set +- Report: total issues processed, solutions bound, pending selections resolved +- Recommend next step: Form execution queue via Phase 4 diff --git a/.codex/skills/workflow-tdd-plan/SKILL.md b/.codex/skills/workflow-tdd-plan/SKILL.md new file mode 100644 index 00000000..c463c1de --- /dev/null +++ b/.codex/skills/workflow-tdd-plan/SKILL.md @@ -0,0 +1,811 @@ +--- +name: workflow-tdd-plan +description: TDD workflow planning with Red-Green-Refactor task chain generation, test-first development structure, cycle tracking, and post-execution compliance verification. Triggers on "workflow:tdd-plan", "workflow:tdd-verify". +allowed-tools: spawn_agent, wait, send_input, close_agent, AskUserQuestion, Read, Write, Edit, Bash, Glob, Grep +--- + +# Workflow TDD Plan + +6-phase TDD planning workflow that orchestrates session discovery, context gathering, test coverage analysis, conflict resolution, and TDD task generation to produce implementation plans with Red-Green-Refactor cycles. Includes post-execution TDD compliance verification. + +## Architecture Overview + +``` +┌──────────────────────────────────────────────────────────────────┐ +│ Workflow TDD Plan Orchestrator (SKILL.md) │ +│ → Pure coordinator: Execute phases, parse outputs, pass context │ +└───────────────┬──────────────────────────────────────────────────┘ + │ + ┌────────────┼────────────┬────────────┬────────────┐ + ↓ ↓ ↓ ↓ ↓ +┌────────┐ ┌────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐ +│ Phase 1│ │ Phase 2│ │ Phase 3 │ │ Phase 4 │ │ Phase 5 │ +│Session │ │Context │ │Test Covg │ │Conflict │ │TDD Task │ +│Discover│ │Gather │ │Analysis │ │Resolve │ │Generate │ +│ (ext) │ │ (ext) │ │ (local) │ │(ext,cond)│ │ (local) │ +└────────┘ └────────┘ └──────────┘ └──────────┘ └──────────┘ + ↓ ↓ ↓ ↓ ↓ + sessionId contextPath testContext resolved IMPL_PLAN.md + conflict_risk artifacts task JSONs + +Phase 6: TDD Structure Validation (inline in SKILL.md) + +Post-execution verification: +┌──────────────┐ ┌───────────────────┐ +│ TDD Verify │────→│ Coverage Analysis │ +│ (local) │ │ (local) │ +└──────────────┘ └───────────────────┘ +phases/03-tdd- phases/04-tdd- +verify.md coverage-analysis.md +``` + +## Key Design Principles + +1. **Pure Orchestrator**: Execute phases in sequence, parse outputs, pass context between them +2. **Auto-Continue**: All phases run autonomously without user intervention between phases +3. **Subagent Lifecycle**: Explicit lifecycle management with spawn_agent → wait → close_agent +4. **Progressive Phase Loading**: Phase docs are read on-demand, not all at once +5. **Conditional Execution**: Phase 4 only executes when conflict_risk >= medium +6. **TDD-First**: Every feature starts with a failing test (Red phase) +7. **Role Path Loading**: Subagent roles loaded via path reference in MANDATORY FIRST STEPS + +**CLI Tool Selection**: CLI tool usage is determined semantically from user's task description. Include "use Codex/Gemini/Qwen" in your request for CLI execution. + +**Task Attachment Model**: +- Skill execute **expands workflow** by attaching sub-tasks to current TodoWrite +- When executing a sub-command, its internal tasks are attached to the orchestrator's TodoWrite +- Orchestrator **executes these attached tasks** sequentially +- After completion, attached tasks are **collapsed** back to high-level phase summary +- This is **task expansion**, not external delegation + +**Auto-Continue Mechanism**: +- TodoList tracks current phase status and dynamically manages task attachment/collapse +- When each phase finishes executing, automatically execute next pending phase +- All phases run autonomously without user interaction +- **CONTINUOUS EXECUTION** - Do not stop until all phases complete + +## Auto Mode + +When `--yes` or `-y`: Auto-continue all phases (skip confirmations), use recommended conflict resolutions, skip TDD clarifications. + +## Subagent API Reference + +### spawn_agent +Create a new subagent with task assignment. + +```javascript +const agentId = spawn_agent({ + message: ` +## TASK ASSIGNMENT + +### MANDATORY FIRST STEPS (Agent Execute) +1. **Read role definition**: ~/.codex/agents/{agent-type}.md (MUST read first) +2. Read: .workflow/project-tech.json +3. Read: .workflow/project-guidelines.json + +## TASK CONTEXT +${taskContext} + +## DELIVERABLES +${deliverables} +` +}) +``` + +### wait +Get results from subagent (only way to retrieve results). + +```javascript +const result = wait({ + ids: [agentId], + timeout_ms: 600000 // 10 minutes +}) + +if (result.timed_out) { + // Handle timeout - can continue waiting or send_input to prompt completion +} +``` + +### send_input +Continue interaction with active subagent (for clarification or follow-up). + +```javascript +send_input({ + id: agentId, + message: ` +## CLARIFICATION ANSWERS +${answers} + +## NEXT STEP +Continue with plan generation. +` +}) +``` + +### close_agent +Clean up subagent resources (irreversible). + +```javascript +close_agent({ id: agentId }) +``` + +## Usage + +``` +workflow-tdd-plan +workflow-tdd-plan [-y|--yes] "" + +# Flags +-y, --yes Skip all confirmations (auto mode) + +# Arguments + Task description text, TDD-structured format, or path to .md file + +# Examples +workflow-tdd-plan "Build user authentication with tests" # Simple TDD task +workflow-tdd-plan "Add JWT auth with email/password and token refresh" # Detailed task +workflow-tdd-plan -y "Implement payment processing" # Auto mode +workflow-tdd-plan "tdd-requirements.md" # From file +``` + +## TDD Compliance Requirements + +### The Iron Law + +``` +NO PRODUCTION CODE WITHOUT A FAILING TEST FIRST +``` + +**Enforcement Method**: +- Phase 5: `implementation_approach` includes test-first steps (Red → Green → Refactor) +- Green phase: Includes test-fix-cycle configuration (max 3 iterations) +- Auto-revert: Triggered when max iterations reached without passing tests + +**Verification**: Phase 6 validates Red-Green-Refactor structure in all generated tasks + +### TDD Compliance Checkpoint + +| Checkpoint | Validation Phase | Evidence Required | +|------------|------------------|-------------------| +| Test-first structure | Phase 5 | `implementation_approach` has 3 steps | +| Red phase exists | Phase 6 | Step 1: `tdd_phase: "red"` | +| Green phase with test-fix | Phase 6 | Step 2: `tdd_phase: "green"` + test-fix-cycle | +| Refactor phase exists | Phase 6 | Step 3: `tdd_phase: "refactor"` | + +### Core TDD Principles + +**Red Flags - STOP and Reassess**: +- Code written before test +- Test passes immediately (no Red phase witnessed) +- Cannot explain why test should fail +- "Just this once" rationalization +- "Tests after achieve same goals" thinking + +**Why Order Matters**: +- Tests written after code pass immediately → proves nothing +- Test-first forces edge case discovery before implementation +- Tests-after verify what was built, not what's required + +## Core Rules + +1. **Start Immediately**: First action is TodoWrite initialization, second action is execute Phase 1 +2. **No Preliminary Analysis**: Do not read files before Phase 1 +3. **Parse Every Output**: Extract required data for next phase +4. **Auto-Continue via TodoList**: Check TodoList status to execute next pending phase automatically +5. **Track Progress**: Update TodoWrite dynamically with task attachment/collapse pattern +6. **TDD Context**: All descriptions include "TDD:" prefix +7. **Task Attachment Model**: Skill execute **attaches** sub-tasks to current workflow. Orchestrator **executes** these attached tasks itself, then **collapses** them after completion +8. **CRITICAL: DO NOT STOP**: Continuous multi-phase workflow. After executing all attached tasks, immediately collapse them and execute next phase +9. **Explicit Lifecycle**: Always close_agent after wait completes to free resources + +## Execution Flow + +``` +Input Parsing: + └─ Convert user input to TDD-structured format (TDD:/GOAL/SCOPE/CONTEXT/TEST_FOCUS) + +Phase 1: Session Discovery + └─ Ref: workflow-plan-execute/phases/01-session-discovery.md (external) + └─ Output: sessionId (WFS-xxx) + +Phase 2: Context Gathering + └─ Ref: workflow-plan-execute/phases/02-context-gathering.md (external) + ├─ Tasks attached: Analyze structure → Identify integration → Generate package + └─ Output: contextPath + conflict_risk + +Phase 3: Test Coverage Analysis ← ATTACHED (3 tasks) + └─ Ref: phases/01-test-context-gather.md + ├─ Phase 3.1: Detect test framework + ├─ Phase 3.2: Analyze existing test coverage + └─ Phase 3.3: Identify coverage gaps + └─ Output: test-context-package.json ← COLLAPSED + +Phase 4: Conflict Resolution (conditional) + └─ Decision (conflict_risk check): + ├─ conflict_risk ≥ medium → Inline conflict resolution (within Phase 2) + │ ├─ Tasks attached: Detect conflicts → Present to user → Apply strategies + │ └─ Output: Modified brainstorm artifacts ← COLLAPSED + └─ conflict_risk < medium → Skip to Phase 5 + +Phase 5: TDD Task Generation ← ATTACHED (3 tasks) + └─ Ref: phases/02-task-generate-tdd.md + ├─ Phase 5.1: Discovery - analyze TDD requirements + ├─ Phase 5.2: Planning - design Red-Green-Refactor cycles + └─ Phase 5.3: Output - generate IMPL tasks with internal TDD phases + └─ Output: IMPL-*.json, IMPL_PLAN.md ← COLLAPSED + +Phase 6: TDD Structure Validation (inline) + └─ Internal validation + summary returned + └─ Recommend: plan-verify (external) + +Return: + └─ Summary with recommended next steps +``` + +### Phase Reference Documents + +**Local phases** (read on-demand when phase executes): + +| Phase | Document | Purpose | +|-------|----------|---------| +| Phase 3 | [phases/01-test-context-gather.md](phases/01-test-context-gather.md) | Test coverage context gathering via test-context-search-agent | +| Phase 5 | [phases/02-task-generate-tdd.md](phases/02-task-generate-tdd.md) | TDD task JSON generation via action-planning-agent | + +**External phases** (from workflow-plan-execute skill): + +| Phase | Document | Purpose | +|-------|----------|---------| +| Phase 1 | workflow-plan-execute/phases/01-session-discovery.md | Session creation/discovery | +| Phase 2 | workflow-plan-execute/phases/02-context-gathering.md | Project context collection + inline conflict resolution | + +**Post-execution verification**: + +| Phase | Document | Purpose | +|-------|----------|---------| +| TDD Verify | [phases/03-tdd-verify.md](phases/03-tdd-verify.md) | TDD compliance verification with quality gate | +| Coverage Analysis | [phases/04-tdd-coverage-analysis.md](phases/04-tdd-coverage-analysis.md) | Test coverage and cycle analysis (called by TDD Verify) | + +## 6-Phase Execution + +### Phase 1: Session Discovery + +**Step 1.1: Execute** - Session discovery and initialization + +Read and execute: `workflow-plan-execute/phases/01-session-discovery.md` with `--type tdd --auto "TDD: [structured-description]"` + +**TDD Structured Format**: +``` +TDD: [Feature Name] +GOAL: [Objective] +SCOPE: [Included/excluded] +CONTEXT: [Background] +TEST_FOCUS: [Test scenarios] +``` + +**Parse**: Extract sessionId + +**TodoWrite**: Mark phase 1 completed, phase 2 in_progress + +**After Phase 1**: Return to user showing Phase 1 results, then auto-continue to Phase 2 + +--- + +### Phase 2: Context Gathering + +**Step 2.1: Execute** - Context gathering and analysis + +Read and execute: `workflow-plan-execute/phases/02-context-gathering.md` with `--session [sessionId] "TDD: [structured-description]"` + +**Use Same Structured Description**: Pass the same structured format from Phase 1 + +**Input**: `sessionId` from Phase 1 + +**Parse Output**: +- Extract: context-package.json path (store as `contextPath`) +- Typical pattern: `.workflow/active/[sessionId]/.process/context-package.json` + +**Validation**: +- Context package path extracted +- File exists and is valid JSON + +**TodoWrite**: Mark phase 2 completed, phase 3 in_progress + +**After Phase 2**: Return to user showing Phase 2 results, then auto-continue to Phase 3 + +--- + +### Phase 3: Test Coverage Analysis + +**Step 3.1: Execute** - Test coverage analysis and framework detection + +Read and execute: `phases/01-test-context-gather.md` with `--session [sessionId]` + +**Purpose**: Analyze existing codebase for: +- Existing test patterns and conventions +- Current test coverage +- Related components and integration points +- Test framework detection + +**Parse**: Extract testContextPath (`.workflow/active/[sessionId]/.process/test-context-package.json`) + +**TodoWrite Update (Phase 3 - tasks attached)**: +```json +[ + {"content": "Phase 1: Session Discovery", "status": "completed", "activeForm": "Executing session discovery"}, + {"content": "Phase 2: Context Gathering", "status": "completed", "activeForm": "Executing context gathering"}, + {"content": "Phase 3: Test Coverage Analysis", "status": "in_progress", "activeForm": "Executing test coverage analysis"}, + {"content": " → Detect test framework and conventions", "status": "in_progress", "activeForm": "Detecting test framework"}, + {"content": " → Analyze existing test coverage", "status": "pending", "activeForm": "Analyzing test coverage"}, + {"content": " → Identify coverage gaps", "status": "pending", "activeForm": "Identifying coverage gaps"}, + {"content": "Phase 5: TDD Task Generation", "status": "pending", "activeForm": "Executing TDD task generation"}, + {"content": "Phase 6: TDD Structure Validation", "status": "pending", "activeForm": "Validating TDD structure"} +] +``` + +**Note**: Skill execute **attaches** test-context-gather's 3 tasks. Orchestrator **executes** these tasks. + +**Next Action**: Tasks attached → **Execute Phase 3.1-3.3** sequentially + +**TodoWrite Update (Phase 3 completed - tasks collapsed)**: +```json +[ + {"content": "Phase 1: Session Discovery", "status": "completed", "activeForm": "Executing session discovery"}, + {"content": "Phase 2: Context Gathering", "status": "completed", "activeForm": "Executing context gathering"}, + {"content": "Phase 3: Test Coverage Analysis", "status": "completed", "activeForm": "Executing test coverage analysis"}, + {"content": "Phase 5: TDD Task Generation", "status": "pending", "activeForm": "Executing TDD task generation"}, + {"content": "Phase 6: TDD Structure Validation", "status": "pending", "activeForm": "Validating TDD structure"} +] +``` + +**After Phase 3**: Return to user showing test coverage results, then auto-continue to Phase 4/5 + +--- + +### Phase 4: Conflict Resolution (Optional) + +**Trigger**: Only execute when context-package.json indicates conflict_risk is "medium" or "high" + +**Step 4.1: Execute** - Conflict detection and resolution + +Conflict resolution is handled inline within Phase 2 (context-gathering). When conflict_risk >= medium, Phase 2 automatically performs detection and resolution. + +**Input**: +- sessionId from Phase 1 +- contextPath from Phase 2 +- conflict_risk from context-package.json + +**Parse Output**: +- Extract: Execution status (success/skipped/failed) +- Verify: conflict-resolution.json file path (if executed) + +**Skip Behavior**: +- If conflict_risk is "none" or "low", skip directly to Phase 5 +- Display: "No significant conflicts detected, proceeding to TDD task generation" + +**TodoWrite Update (Phase 4 - tasks attached, if conflict_risk >= medium)**: +```json +[ + {"content": "Phase 1: Session Discovery", "status": "completed", "activeForm": "Executing session discovery"}, + {"content": "Phase 2: Context Gathering", "status": "completed", "activeForm": "Executing context gathering"}, + {"content": "Phase 3: Test Coverage Analysis", "status": "completed", "activeForm": "Executing test coverage analysis"}, + {"content": "Phase 4: Conflict Resolution", "status": "in_progress", "activeForm": "Executing conflict resolution"}, + {"content": " → Detect conflicts with CLI analysis", "status": "in_progress", "activeForm": "Detecting conflicts"}, + {"content": " → Log and analyze detected conflicts", "status": "pending", "activeForm": "Analyzing conflicts"}, + {"content": " → Apply resolution strategies", "status": "pending", "activeForm": "Applying resolution strategies"}, + {"content": "Phase 5: TDD Task Generation", "status": "pending", "activeForm": "Executing TDD task generation"}, + {"content": "Phase 6: TDD Structure Validation", "status": "pending", "activeForm": "Validating TDD structure"} +] +``` + +**TodoWrite Update (Phase 4 completed - tasks collapsed)**: +```json +[ + {"content": "Phase 1: Session Discovery", "status": "completed", "activeForm": "Executing session discovery"}, + {"content": "Phase 2: Context Gathering", "status": "completed", "activeForm": "Executing context gathering"}, + {"content": "Phase 3: Test Coverage Analysis", "status": "completed", "activeForm": "Executing test coverage analysis"}, + {"content": "Phase 4: Conflict Resolution", "status": "completed", "activeForm": "Executing conflict resolution"}, + {"content": "Phase 5: TDD Task Generation", "status": "pending", "activeForm": "Executing TDD task generation"}, + {"content": "Phase 6: TDD Structure Validation", "status": "pending", "activeForm": "Validating TDD structure"} +] +``` + +**After Phase 4**: Return to user showing conflict resolution results, then auto-continue to Phase 5 + +**Memory State Check**: +- Evaluate current context window usage and memory state +- If memory usage is high (>110K tokens or approaching context limits): + + **Step 4.5: Execute** - Memory compaction (external skill: compact) + + - This optimizes memory before proceeding to Phase 5 +- Memory compaction is particularly important after analysis phase which may generate extensive documentation + +--- + +### Phase 5: TDD Task Generation + +**Step 5.1: Execute** - TDD task generation via action-planning-agent with Phase 0 user configuration + +Read and execute: `phases/02-task-generate-tdd.md` with `--session [sessionId]` + +**Note**: Phase 0 now includes: +- Supplementary materials collection (file paths or inline content) +- Execution method preference (Agent/Hybrid/CLI) +- CLI tool preference (Codex/Gemini/Qwen/Auto) +- These preferences are passed to agent for task generation + +**Parse**: Extract feature count, task count, CLI execution IDs assigned + +**Validate**: +- IMPL_PLAN.md exists (unified plan with TDD Implementation Tasks section) +- IMPL-*.json files exist (one per feature, or container + subtasks for complex features) +- TODO_LIST.md exists with internal TDD phase indicators +- Each IMPL task includes: + - `meta.tdd_workflow: true` + - `meta.cli_execution_id: {session_id}-{task_id}` + - `meta.cli_execution: { "strategy": "new|resume|fork|merge_fork", ... }` + - `flow_control.implementation_approach` with exactly 3 steps (red/green/refactor) + - Green phase includes test-fix-cycle configuration + - `context.focus_paths`: absolute or clear relative paths + - `flow_control.pre_analysis`: includes exploration integration_points analysis +- IMPL_PLAN.md contains workflow_type: "tdd" in frontmatter +- Task count <=18 (compliance with hard limit) + +**Red Flag Detection** (Non-Blocking Warnings): +- Task count >18: `Warning: Task count exceeds hard limit - request re-scope` +- Missing cli_execution_id: `Warning: Task lacks CLI execution ID for resume support` +- Missing test-fix-cycle: `Warning: Green phase lacks auto-revert configuration` +- Generic task names: `Warning: Vague task names suggest unclear TDD cycles` +- Missing focus_paths: `Warning: Task lacks clear file scope for implementation` + +**Action**: Log warnings to `.workflow/active/[sessionId]/.process/tdd-warnings.log` (non-blocking) + +**TodoWrite Update (Phase 5 - tasks attached)**: +```json +[ + {"content": "Phase 1: Session Discovery", "status": "completed", "activeForm": "Executing session discovery"}, + {"content": "Phase 2: Context Gathering", "status": "completed", "activeForm": "Executing context gathering"}, + {"content": "Phase 3: Test Coverage Analysis", "status": "completed", "activeForm": "Executing test coverage analysis"}, + {"content": "Phase 5: TDD Task Generation", "status": "in_progress", "activeForm": "Executing TDD task generation"}, + {"content": " → Discovery - analyze TDD requirements", "status": "in_progress", "activeForm": "Analyzing TDD requirements"}, + {"content": " → Planning - design Red-Green-Refactor cycles", "status": "pending", "activeForm": "Designing TDD cycles"}, + {"content": " → Output - generate IMPL tasks with internal TDD phases", "status": "pending", "activeForm": "Generating TDD tasks"}, + {"content": "Phase 6: TDD Structure Validation", "status": "pending", "activeForm": "Validating TDD structure"} +] +``` + +**TodoWrite Update (Phase 5 completed - tasks collapsed)**: +```json +[ + {"content": "Phase 1: Session Discovery", "status": "completed", "activeForm": "Executing session discovery"}, + {"content": "Phase 2: Context Gathering", "status": "completed", "activeForm": "Executing context gathering"}, + {"content": "Phase 3: Test Coverage Analysis", "status": "completed", "activeForm": "Executing test coverage analysis"}, + {"content": "Phase 5: TDD Task Generation", "status": "completed", "activeForm": "Executing TDD task generation"}, + {"content": "Phase 6: TDD Structure Validation", "status": "in_progress", "activeForm": "Validating TDD structure"} +] +``` + +### Phase 6: TDD Structure Validation & Action Plan Verification (RECOMMENDED) + +**Internal validation first, then recommend external verification** + +**Internal Validation**: +1. Each task contains complete TDD workflow (Red-Green-Refactor internally) +2. Task structure validation: + - `meta.tdd_workflow: true` in all IMPL tasks + - `meta.cli_execution_id` present (format: {session_id}-{task_id}) + - `meta.cli_execution` strategy assigned (new/resume/fork/merge_fork) + - `flow_control.implementation_approach` has exactly 3 steps + - Each step has correct `tdd_phase`: "red", "green", "refactor" + - `context.focus_paths` are absolute or clear relative paths + - `flow_control.pre_analysis` includes exploration integration analysis +3. Dependency validation: + - Sequential features: IMPL-N depends_on ["IMPL-(N-1)"] if needed + - Complex features: IMPL-N.M depends_on ["IMPL-N.(M-1)"] for subtasks + - CLI execution strategies correctly assigned based on dependency graph +4. Agent assignment: All IMPL tasks use @code-developer +5. Test-fix cycle: Green phase step includes test-fix-cycle logic with max_iterations +6. Task count: Total tasks <=18 (simple + subtasks hard limit) +7. User configuration: + - Execution method choice reflected in task structure + - CLI tool preference documented in implementation guidance (if CLI selected) + +**Red Flag Checklist** (from TDD best practices): +- [ ] No tasks skip Red phase (`tdd_phase: "red"` exists in step 1) +- [ ] Test files referenced in Red phase (explicit paths, not placeholders) +- [ ] Green phase has test-fix-cycle with `max_iterations` configured +- [ ] Refactor phase has clear completion criteria + +**Non-Compliance Warning Format**: +``` +Warning TDD Red Flag: [issue description] + Task: [IMPL-N] + Recommendation: [action to fix] +``` + +**Evidence Gathering** (Before Completion Claims): + +```bash +# Verify session artifacts exist +ls -la .workflow/active/[sessionId]/{IMPL_PLAN.md,TODO_LIST.md} +ls -la .workflow/active/[sessionId]/.task/IMPL-*.json + +# Count generated artifacts +echo "IMPL tasks: $(ls .workflow/active/[sessionId]/.task/IMPL-*.json 2>/dev/null | wc -l)" + +# Sample task structure verification (first task) +jq '{id, tdd: .meta.tdd_workflow, cli_id: .meta.cli_execution_id, phases: [.flow_control.implementation_approach[].tdd_phase]}' \ + "$(ls .workflow/active/[sessionId]/.task/IMPL-*.json | head -1)" +``` + +**Evidence Required Before Summary**: +| Evidence Type | Verification Method | Pass Criteria | +|---------------|---------------------|---------------| +| File existence | `ls -la` artifacts | All files present | +| Task count | Count IMPL-*.json | Count matches claims (<=18) | +| TDD structure | jq sample extraction | Shows red/green/refactor + cli_execution_id | +| CLI execution IDs | jq extraction | All tasks have cli_execution_id assigned | +| Warning log | Check tdd-warnings.log | Logged (may be empty) | + +**Return Summary**: +``` +TDD Planning complete for session: [sessionId] + +Features analyzed: [N] +Total tasks: [M] (1 task per simple feature + subtasks for complex features) + +Task breakdown: +- Simple features: [K] tasks (IMPL-1 to IMPL-K) +- Complex features: [L] features with [P] subtasks +- Total task count: [M] (within 18-task hard limit) + +Structure: +- IMPL-1: {Feature 1 Name} (Internal: Red → Green → Refactor) +- IMPL-2: {Feature 2 Name} (Internal: Red → Green → Refactor) +- IMPL-3: {Complex Feature} (Container) + - IMPL-3.1: {Sub-feature A} (Internal: Red → Green → Refactor) + - IMPL-3.2: {Sub-feature B} (Internal: Red → Green → Refactor) +[...] + +Plans generated: +- Unified Implementation Plan: .workflow/active/[sessionId]/IMPL_PLAN.md + (includes TDD Implementation Tasks section with workflow_type: "tdd") +- Task List: .workflow/active/[sessionId]/TODO_LIST.md + (with internal TDD phase indicators and CLI execution strategies) +- Task JSONs: .workflow/active/[sessionId]/.task/IMPL-*.json + (with cli_execution_id and execution strategies for resume support) + +TDD Configuration: +- Each task contains complete Red-Green-Refactor cycle +- Green phase includes test-fix cycle (max 3 iterations) +- Auto-revert on max iterations reached +- CLI execution strategies: new/resume/fork/merge_fork based on dependency graph + +User Configuration Applied: +- Execution Method: [agent|hybrid|cli] +- CLI Tool Preference: [codex|gemini|qwen|auto] +- Supplementary Materials: [included|none] +- Task generation follows cli-tools-usage.md guidelines + +ACTION REQUIRED: Before execution, ensure you understand WHY each Red phase test is expected to fail. + This is crucial for valid TDD - if you don't know why the test fails, you can't verify it tests the right thing. + +Recommended Next Steps: +1. plan-verify (external) --session [sessionId] # Verify TDD plan quality and dependencies +2. workflow:execute (external) --session [sessionId] # Start TDD execution with CLI strategies +3. phases/03-tdd-verify.md [sessionId] # Post-execution TDD compliance check + +Quality Gate: Consider running plan-verify to validate TDD task structure, dependencies, and CLI execution strategies +``` + +## Input Processing + +Convert user input to TDD-structured format: + +**Simple text** → Add TDD context +**Detailed text** → Extract components with TEST_FOCUS +**File/Issue** → Read and structure with TDD + +## Data Flow + +``` +User Input (task description) + ↓ +[Convert to TDD Structured Format] + ↓ TDD Structured Description: + ↓ TDD: [Feature Name] + ↓ GOAL: [objective] + ↓ SCOPE: [boundaries] + ↓ CONTEXT: [background] + ↓ TEST_FOCUS: [test scenarios] + ↓ +Phase 1: session:start --type tdd --auto "TDD: structured-description" + ↓ Output: sessionId + ↓ +Phase 2: context-gather --session sessionId "TDD: structured-description" + ↓ Output: contextPath + conflict_risk + ↓ +Phase 3: test-context-gather --session sessionId + ↓ Output: testContextPath (test-context-package.json) + ↓ +Phase 4: conflict-resolution [AUTO-TRIGGERED if conflict_risk >= medium] + ↓ Output: Modified brainstorm artifacts + ↓ Skip if conflict_risk is none/low → proceed directly to Phase 5 + ↓ +Phase 5: task-generate-tdd --session sessionId + ↓ Output: IMPL_PLAN.md, task JSONs, TODO_LIST.md + ↓ +Phase 6: Internal validation + summary + ↓ +Return summary to user +``` + +## TodoWrite Pattern + +**Core Concept**: Dynamic task attachment and collapse for TDD workflow with test coverage analysis and Red-Green-Refactor cycle generation. + +### Key Principles + +1. **Task Attachment** (when Skill executed): + - Sub-command's internal tasks are **attached** to orchestrator's TodoWrite + - First attached task marked as `in_progress`, others as `pending` + - Orchestrator **executes** these attached tasks sequentially + +2. **Task Collapse** (after sub-tasks complete): + - Remove detailed sub-tasks from TodoWrite + - **Collapse** to high-level phase summary + - Maintains clean orchestrator-level view + +3. **Continuous Execution**: + - After collapse, automatically proceed to next pending phase + - No user intervention required between phases + - TodoWrite dynamically reflects current execution state + +**Lifecycle Summary**: Initial pending tasks → Phase executed (tasks ATTACHED) → Sub-tasks executed sequentially → Phase completed (tasks COLLAPSED to summary) → Next phase begins (conditional Phase 4 if conflict_risk >= medium) → Repeat until all phases complete. + +### TDD-Specific Features + +- **Phase 3**: Test coverage analysis detects existing patterns and gaps +- **Phase 5**: Generated IMPL tasks contain internal Red-Green-Refactor cycles +- **Conditional Phase 4**: Conflict resolution only if conflict_risk >= medium + +**Note**: See individual Phase descriptions (Phase 3, 4, 5) for detailed TodoWrite Update examples with full JSON structures. + +## Execution Flow Diagram + +``` +TDD Workflow Orchestrator +│ +├─ Phase 1: Session Discovery +│ └─ workflow-plan-execute/phases/01-session-discovery.md --auto +│ └─ Returns: sessionId +│ +├─ Phase 2: Context Gathering +│ └─ workflow-plan-execute/phases/02-context-gathering.md +│ └─ Returns: context-package.json path +│ +├─ Phase 3: Test Coverage Analysis ← ATTACHED (3 tasks) +│ └─ phases/01-test-context-gather.md +│ ├─ Phase 3.1: Detect test framework +│ ├─ Phase 3.2: Analyze existing test coverage +│ └─ Phase 3.3: Identify coverage gaps +│ └─ Returns: test-context-package.json ← COLLAPSED +│ +├─ Phase 4: Conflict Resolution (conditional) +│ IF conflict_risk >= medium: +│ └─ Inline within Phase 2 context-gathering ← ATTACHED (3 tasks) +│ ├─ Phase 4.1: Detect conflicts with CLI +│ ├─ Phase 4.2: Log and analyze detected conflicts +│ └─ Phase 4.3: Apply resolution strategies +│ └─ Returns: conflict-resolution.json ← COLLAPSED +│ ELSE: +│ └─ Skip to Phase 5 +│ +├─ Phase 5: TDD Task Generation ← ATTACHED (3 tasks) +│ └─ phases/02-task-generate-tdd.md +│ ├─ Phase 5.1: Discovery - analyze TDD requirements +│ ├─ Phase 5.2: Planning - design Red-Green-Refactor cycles +│ └─ Phase 5.3: Output - generate IMPL tasks with internal TDD phases +│ └─ Returns: IMPL-*.json, IMPL_PLAN.md ← COLLAPSED +│ (Each IMPL task contains internal Red-Green-Refactor cycle) +│ +└─ Phase 6: TDD Structure Validation + └─ Internal validation + summary returned + └─ Recommend: plan-verify (external) + +Key Points: +• ← ATTACHED: Sub-tasks attached to orchestrator TodoWrite +• ← COLLAPSED: Sub-tasks executed and collapsed to phase summary +• TDD-specific: Each generated IMPL task contains complete Red-Green-Refactor cycle +``` + +## Error Handling + +- **Parsing failure**: Retry once, then report +- **Validation failure**: Report missing/invalid data +- **Command failure**: Keep phase in_progress, report error +- **TDD validation failure**: Report incomplete chains or wrong dependencies +- **Subagent timeout**: Retry wait or send_input to prompt completion, then close_agent + +### TDD Warning Patterns + +| Pattern | Warning Message | Recommended Action | +|---------|----------------|-------------------| +| Task count >10 | High task count detected | Consider splitting into multiple sessions | +| Missing test-fix-cycle | Green phase lacks auto-revert | Add `max_iterations: 3` to task config | +| Red phase missing test path | Test file path not specified | Add explicit test file paths | +| Generic task names | Vague names like "Add feature" | Use specific behavior descriptions | +| No refactor criteria | Refactor phase lacks completion criteria | Define clear refactor scope | + +### Non-Blocking Warning Policy + +**All warnings are advisory** - they do not halt execution: +1. Warnings logged to `.process/tdd-warnings.log` +2. Summary displayed in Phase 6 output +3. User decides whether to address before execution + +### Error Handling Quick Reference + +| Error Type | Detection | Recovery Action | +|------------|-----------|-----------------| +| Parsing failure | Empty/malformed output | Retry once, then report | +| Missing context-package | File read error | Re-run context-gather (workflow-plan-execute/phases/02-context-gathering.md) | +| Invalid task JSON | jq parse error | Report malformed file path | +| Task count exceeds 18 | Count validation >=19 | Request re-scope, split into multiple sessions | +| Missing cli_execution_id | All tasks lack ID | Regenerate tasks with phase 0 user config | +| Test-context missing | File not found | Re-run phases/01-test-context-gather.md | +| Phase timeout | No response | Retry phase, check CLI connectivity | +| CLI tool not available | Tool not in cli-tools.json | Fall back to alternative preferred tool | +| Subagent unresponsive | wait timed_out | send_input to prompt, or close_agent and spawn new | + +## Post-Execution: TDD Verification + +After TDD tasks have been executed (via workflow:execute), run TDD compliance verification: + +Read and execute: `phases/03-tdd-verify.md` with `--session [sessionId]` + +This generates a comprehensive TDD_COMPLIANCE_REPORT.md with quality gate recommendation. + +## Related Skills + +**Prerequisite**: +- None - TDD planning is self-contained (can optionally run brainstorm before) + +**Called by This Skill** (6 phases): +- workflow-plan-execute/phases/01-session-discovery.md - Phase 1: Create or discover TDD workflow session +- workflow-plan-execute/phases/02-context-gathering.md - Phase 2: Gather project context and analyze codebase +- phases/01-test-context-gather.md - Phase 3: Analyze existing test patterns and coverage +- Inline conflict resolution within Phase 2 - Phase 4: Detect and resolve conflicts (conditional) +- compact (external skill) - Phase 4.5: Memory optimization (if context approaching limits) +- phases/02-task-generate-tdd.md - Phase 5: Generate TDD tasks + +**Follow-up**: +- plan-verify (external) - Recommended: Verify TDD plan quality and structure before execution +- workflow:status (external) - Review TDD task breakdown +- workflow:execute (external) - Begin TDD implementation +- phases/03-tdd-verify.md - Post-execution: Verify TDD compliance and generate quality report + +## Next Steps Decision Table + +| Situation | Recommended Action | Purpose | +|-----------|-------------------|---------| +| First time planning | Run plan-verify (external) | Validate task structure before execution | +| Warnings in tdd-warnings.log | Review log, refine tasks | Address Red Flags before proceeding | +| High task count warning | Consider new session | Split into focused sub-sessions | +| Ready to implement | Run workflow:execute (external) | Begin TDD Red-Green-Refactor cycles | +| After implementation | Run phases/03-tdd-verify.md | Generate TDD compliance report | +| Need to review tasks | Run workflow:status (external) | Inspect current task breakdown | + +### TDD Workflow State Transitions + +``` +workflow-tdd-plan (this skill) + ↓ +[Planning Complete] ──→ plan-verify (external, recommended) + ↓ +[Verified/Ready] ─────→ workflow:execute (external) + ↓ +[Implementation] ─────→ phases/03-tdd-verify.md (post-execution) + ↓ +[Quality Report] ─────→ Done or iterate +``` diff --git a/.codex/skills/workflow-tdd-plan/phases/01-test-context-gather.md b/.codex/skills/workflow-tdd-plan/phases/01-test-context-gather.md new file mode 100644 index 00000000..a215a79b --- /dev/null +++ b/.codex/skills/workflow-tdd-plan/phases/01-test-context-gather.md @@ -0,0 +1,240 @@ +# Phase 1: Test Context Gather + +## Overview + +Orchestrator command that invokes `test-context-search-agent` to gather comprehensive test coverage context for test generation workflows. Generates standardized `test-context-package.json` with coverage analysis, framework detection, and source implementation context. + +## Core Philosophy + +- **Agent Delegation**: Delegate all test coverage analysis to `test-context-search-agent` for autonomous execution +- **Detection-First**: Check for existing test-context-package before executing +- **Coverage-First**: Analyze existing test coverage before planning new tests +- **Source Context Loading**: Import implementation summaries from source session +- **Standardized Output**: Generate `.workflow/active/{test_session_id}/.process/test-context-package.json` +- **Explicit Lifecycle**: Always close_agent after wait completes to free resources + +## Execution Process + +``` +Input Parsing: + ├─ Parse flags: --session + └─ Validation: test_session_id REQUIRED + +Step 1: Test-Context-Package Detection + └─ Decision (existing package): + ├─ Valid package exists → Return existing (skip execution) + └─ No valid package → Continue to Step 2 + +Step 2: Invoke Test-Context-Search Agent + ├─ Phase 1: Session Validation & Source Context Loading + │ ├─ Detection: Check for existing test-context-package + │ ├─ Test session validation + │ └─ Source context loading (summaries, changed files) + ├─ Phase 2: Test Coverage Analysis + │ ├─ Track 1: Existing test discovery + │ ├─ Track 2: Coverage gap analysis + │ └─ Track 3: Coverage statistics + └─ Phase 3: Framework Detection & Packaging + ├─ Framework identification + ├─ Convention analysis + └─ Generate test-context-package.json + +Step 3: Output Verification + └─ Verify test-context-package.json created +``` + +## Execution Flow + +### Step 1: Test-Context-Package Detection + +**Execute First** - Check if valid package already exists: + +```javascript +const testContextPath = `.workflow/${test_session_id}/.process/test-context-package.json`; + +if (file_exists(testContextPath)) { + const existing = Read(testContextPath); + + // Validate package belongs to current test session + if (existing?.metadata?.test_session_id === test_session_id) { + console.log("Valid test-context-package found for session:", test_session_id); + console.log("Coverage Stats:", existing.test_coverage.coverage_stats); + console.log("Framework:", existing.test_framework.framework); + console.log("Missing Tests:", existing.test_coverage.missing_tests.length); + return existing; // Skip execution, return existing + } else { + console.warn("Invalid test_session_id in existing package, re-generating..."); + } +} +``` + +### Step 2: Invoke Test-Context-Search Agent + +**Only execute if Step 1 finds no valid package** + +```javascript +// Spawn test-context-search-agent +const agentId = spawn_agent({ + message: ` +## TASK ASSIGNMENT + +### MANDATORY FIRST STEPS (Agent Execute) +1. **Read role definition**: ~/.codex/agents/test-context-search-agent.md (MUST read first) +2. Read: .workflow/project-tech.json +3. Read: .workflow/project-guidelines.json + +--- + +## Execution Mode +**PLAN MODE** (Comprehensive) - Full Phase 1-3 execution + +## Session Information +- **Test Session ID**: ${test_session_id} +- **Output Path**: .workflow/${test_session_id}/.process/test-context-package.json + +## Mission +Execute complete test-context-search-agent workflow for test generation planning: + +### Phase 1: Session Validation & Source Context Loading +1. **Detection**: Check for existing test-context-package (early exit if valid) +2. **Test Session Validation**: Load test session metadata, extract source_session reference +3. **Source Context Loading**: Load source session implementation summaries, changed files, tech stack + +### Phase 2: Test Coverage Analysis +Execute coverage discovery: +- **Track 1**: Existing test discovery (find *.test.*, *.spec.* files) +- **Track 2**: Coverage gap analysis (match implementation files to test files) +- **Track 3**: Coverage statistics (calculate percentages, identify gaps by module) + +### Phase 3: Framework Detection & Packaging +1. Framework identification from package.json/requirements.txt +2. Convention analysis from existing test patterns +3. Generate and validate test-context-package.json + +## Output Requirements +Complete test-context-package.json with: +- **metadata**: test_session_id, source_session_id, task_type, complexity +- **source_context**: implementation_summaries, tech_stack, project_patterns +- **test_coverage**: existing_tests[], missing_tests[], coverage_stats +- **test_framework**: framework, version, test_pattern, conventions +- **assets**: implementation_summary[], existing_test[], source_code[] with priorities +- **focus_areas**: Test generation guidance based on coverage gaps + +## Quality Validation +Before completion verify: +- [ ] Valid JSON format with all required fields +- [ ] Source session context loaded successfully +- [ ] Test coverage gaps identified +- [ ] Test framework detected (or marked as 'unknown') +- [ ] Coverage percentage calculated correctly +- [ ] Missing tests catalogued with priority +- [ ] Execution time < 30 seconds (< 60s for large codebases) + +Execute autonomously following agent documentation. +Report completion with coverage statistics. +` +}); + +// Wait for agent completion +const result = wait({ + ids: [agentId], + timeout_ms: 300000 // 5 minutes +}); + +// Handle timeout +if (result.timed_out) { + console.warn("Test context gathering timed out, sending prompt..."); + send_input({ + id: agentId, + message: "Please complete test-context-package.json generation and report results." + }); + const retryResult = wait({ ids: [agentId], timeout_ms: 120000 }); +} + +// Clean up agent resources +close_agent({ id: agentId }); +``` + +### Step 3: Output Verification + +After agent completes, verify output: + +```javascript +// Verify file was created +const outputPath = `.workflow/${test_session_id}/.process/test-context-package.json`; +if (!file_exists(outputPath)) { + throw new Error("Agent failed to generate test-context-package.json"); +} + +// Load and display summary +const testContext = Read(outputPath); +console.log("Test context package generated successfully"); +console.log("Coverage:", testContext.test_coverage.coverage_stats.coverage_percentage + "%"); +console.log("Tests to generate:", testContext.test_coverage.missing_tests.length); +``` + +## Parameter Reference + +| Parameter | Type | Required | Description | +|-----------|------|----------|-------------| +| `--session` | string | Yes | Test workflow session ID (e.g., WFS-test-auth) | + +## Output Schema + +Refer to `test-context-search-agent.md` Phase 3.2 for complete `test-context-package.json` schema. + +**Key Sections**: +- **metadata**: Test session info, source session reference, complexity +- **source_context**: Implementation summaries with changed files and tech stack +- **test_coverage**: Existing tests, missing tests with priorities, coverage statistics +- **test_framework**: Framework name, version, patterns, conventions +- **assets**: Categorized files with relevance (implementation_summary, existing_test, source_code) +- **focus_areas**: Test generation guidance based on analysis + +## Success Criteria + +- Valid test-context-package.json generated in `.workflow/active/{test_session_id}/.process/` +- Source session context loaded successfully +- Test coverage gaps identified (>90% accuracy) +- Test framework detected and documented +- Execution completes within 30 seconds (60s for large codebases) +- All required schema fields present and valid +- Coverage statistics calculated correctly +- Agent reports completion with statistics + +## Error Handling + +| Error | Cause | Resolution | +|-------|-------|------------| +| Package validation failed | Invalid test_session_id in existing package | Re-run agent to regenerate | +| Source session not found | Invalid source_session reference | Verify test session metadata | +| No implementation summaries | Source session incomplete | Complete source session first | +| Agent execution timeout | Large codebase or slow analysis | Increase timeout, check file access | +| Missing required fields | Agent incomplete execution | Check agent logs, verify schema compliance | +| No test framework detected | Missing test dependencies | Agent marks as 'unknown', manual specification needed | + +## Integration + +### Called By +- SKILL.md (Phase 3: Test Coverage Analysis) + +### Calls +- `test-context-search-agent` via spawn_agent - Autonomous test coverage analysis + +## Notes + +- **Detection-first**: Always check for existing test-context-package before invoking agent +- **No redundancy**: This command is a thin orchestrator, all logic in agent +- **Framework agnostic**: Supports Jest, Mocha, pytest, RSpec, Go testing, etc. +- **Coverage focus**: Primary goal is identifying implementation files without tests +- **Explicit lifecycle**: Always close_agent after wait completes + +--- + +## Post-Phase Update + +After Phase 1 (Test Context Gather) completes: +- **Output Created**: `test-context-package.json` in `.workflow/active/{session}/.process/` +- **Data Available**: Test coverage stats, framework info, missing tests list +- **Next Action**: Continue to Phase 4 (Conflict Resolution, if conflict_risk >= medium) or Phase 5 (TDD Task Generation) +- **TodoWrite**: Collapse Phase 3 sub-tasks to "Phase 3: Test Coverage Analysis: completed" diff --git a/ccw/docs-site/.docusaurus/codeTranslations.json b/ccw/docs-site/.docusaurus/codeTranslations.json index 9e26dfee..5de58a22 100644 --- a/ccw/docs-site/.docusaurus/codeTranslations.json +++ b/ccw/docs-site/.docusaurus/codeTranslations.json @@ -1 +1,84 @@ -{} \ No newline at end of file +{ + "theme.AnnouncementBar.closeButtonAriaLabel": "关闭", + "theme.BackToTopButton.buttonAriaLabel": "回到顶部", + "theme.CodeBlock.copied": "复制成功", + "theme.CodeBlock.copy": "复制", + "theme.CodeBlock.copyButtonAriaLabel": "复制代码到剪贴板", + "theme.CodeBlock.wordWrapToggle": "切换自动换行", + "theme.DocSidebarItem.collapseCategoryAriaLabel": "折叠侧边栏分类 '{label}'", + "theme.DocSidebarItem.expandCategoryAriaLabel": "展开侧边栏分类 '{label}'", + "theme.ErrorPageContent.title": "页面已崩溃。", + "theme.ErrorPageContent.tryAgain": "重试", + "theme.IconExternalLink.ariaLabel": "(opens in new tab)", + "theme.NavBar.navAriaLabel": "主导航", + "theme.NotFound.p1": "我们找不到您要找的页面。", + "theme.NotFound.p2": "请联系原始链接来源网站的所有者,并告知他们链接已损坏。", + "theme.NotFound.title": "找不到页面", + "theme.TOCCollapsible.toggleButtonLabel": "本页总览", + "theme.admonition.caution": "警告", + "theme.admonition.danger": "危险", + "theme.admonition.info": "信息", + "theme.admonition.note": "备注", + "theme.admonition.tip": "提示", + "theme.admonition.warning": "注意", + "theme.blog.archive.description": "历史博文", + "theme.blog.archive.title": "历史博文", + "theme.blog.author.noPosts": "该作者尚未撰写任何文章。", + "theme.blog.author.pageTitle": "{authorName} - {nPosts}", + "theme.blog.authorsList.pageTitle": "作者", + "theme.blog.authorsList.viewAll": "查看所有作者", + "theme.blog.paginator.navAriaLabel": "博文列表分页导航", + "theme.blog.paginator.newerEntries": "较新的博文", + "theme.blog.paginator.olderEntries": "较旧的博文", + "theme.blog.post.paginator.navAriaLabel": "博文分页导航", + "theme.blog.post.paginator.newerPost": "较新一篇", + "theme.blog.post.paginator.olderPost": "较旧一篇", + "theme.blog.post.plurals": "{count} 篇博文", + "theme.blog.post.readMore": "阅读更多", + "theme.blog.post.readMoreLabel": "阅读 {title} 的全文", + "theme.blog.post.readingTime.plurals": "阅读需 {readingTime} 分钟", + "theme.blog.sidebar.navAriaLabel": "最近博文导航", + "theme.blog.tagTitle": "{nPosts} 含有标签「{tagName}」", + "theme.colorToggle.ariaLabel": "切换浅色/暗黑模式(当前为{mode})", + "theme.colorToggle.ariaLabel.mode.dark": "暗黑模式", + "theme.colorToggle.ariaLabel.mode.light": "浅色模式", + "theme.colorToggle.ariaLabel.mode.system": "system mode", + "theme.common.editThisPage": "编辑此页", + "theme.common.headingLinkTitle": "{heading}的直接链接", + "theme.common.skipToMainContent": "跳到主要内容", + "theme.contentVisibility.draftBanner.message": "此页面是草稿,仅在开发环境中可见,不会包含在正式版本中。", + "theme.contentVisibility.draftBanner.title": "草稿页", + "theme.contentVisibility.unlistedBanner.message": "此页面未列出。搜索引擎不会对其索引,只有拥有直接链接的用户才能访问。", + "theme.contentVisibility.unlistedBanner.title": "未列出页", + "theme.docs.DocCard.categoryDescription.plurals": "{count} 个项目", + "theme.docs.breadcrumbs.home": "主页面", + "theme.docs.breadcrumbs.navAriaLabel": "页面路径", + "theme.docs.paginator.navAriaLabel": "文件选项卡", + "theme.docs.paginator.next": "下一页", + "theme.docs.paginator.previous": "上一页", + "theme.docs.sidebar.closeSidebarButtonAriaLabel": "关闭导航栏", + "theme.docs.sidebar.collapseButtonAriaLabel": "收起侧边栏", + "theme.docs.sidebar.collapseButtonTitle": "收起侧边栏", + "theme.docs.sidebar.expandButtonAriaLabel": "展开侧边栏", + "theme.docs.sidebar.expandButtonTitle": "展开侧边栏", + "theme.docs.sidebar.navAriaLabel": "文档侧边栏", + "theme.docs.sidebar.toggleSidebarButtonAriaLabel": "切换导航栏", + "theme.docs.tagDocListPageTitle": "{nDocsTagged}「{tagName}」", + "theme.docs.tagDocListPageTitle.nDocsTagged": "{count} 篇文档带有标签", + "theme.docs.versionBadge.label": "版本:{versionLabel}", + "theme.docs.versions.latestVersionLinkLabel": "最新版本", + "theme.docs.versions.latestVersionSuggestionLabel": "最新的文档请参阅 {latestVersionLink} ({versionLabel})。", + "theme.docs.versions.unmaintainedVersionLabel": "此为 {siteTitle} {versionLabel} 版的文档,现已不再积极维护。", + "theme.docs.versions.unreleasedVersionLabel": "此为 {siteTitle} {versionLabel} 版尚未发行的文档。", + "theme.lastUpdated.atDate": "于 {date} ", + "theme.lastUpdated.byUser": "由 {user} ", + "theme.lastUpdated.lastUpdatedAtBy": "最后{byUser}{atDate}更新", + "theme.navbar.mobileDropdown.collapseButton.collapseAriaLabel": "Collapse the dropdown", + "theme.navbar.mobileDropdown.collapseButton.expandAriaLabel": "Expand the dropdown", + "theme.navbar.mobileLanguageDropdown.label": "选择语言", + "theme.navbar.mobileSidebarSecondaryMenu.backButtonLabel": "← 回到主菜单", + "theme.navbar.mobileVersionsDropdown.label": "选择版本", + "theme.tags.tagsListLabel": "标签:", + "theme.tags.tagsPageLink": "查看所有标签", + "theme.tags.tagsPageTitle": "标签" +} \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/__mdx-loader-dependency.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/__mdx-loader-dependency.json index 1f53d98b..44ca291c 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/__mdx-loader-dependency.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/__mdx-loader-dependency.json @@ -1 +1 @@ -{"options":{"path":"docs","routeBasePath":"/","sidebarPath":"D:\\Claude_dms3\\ccw\\docs-site\\sidebars.ts","editUrl":"https://github.com/ccw/docs/tree/main/","editCurrentVersion":false,"editLocalizedFiles":false,"tagsBasePath":"tags","include":["**/*.{md,mdx}"],"exclude":["**/_*.{js,jsx,ts,tsx,md,mdx}","**/_*/**","**/*.test.{js,jsx,ts,tsx}","**/__tests__/**"],"sidebarCollapsible":true,"sidebarCollapsed":true,"docsRootComponent":"@theme/DocsRoot","docVersionRootComponent":"@theme/DocVersionRoot","docRootComponent":"@theme/DocRoot","docItemComponent":"@theme/DocItem","docTagsListComponent":"@theme/DocTagsListPage","docTagDocListComponent":"@theme/DocTagDocListPage","docCategoryGeneratedIndexComponent":"@theme/DocCategoryGeneratedIndexPage","remarkPlugins":[],"rehypePlugins":[],"recmaPlugins":[],"beforeDefaultRemarkPlugins":[],"beforeDefaultRehypePlugins":[],"admonitions":true,"showLastUpdateTime":false,"showLastUpdateAuthor":false,"includeCurrentVersion":true,"disableVersioning":false,"versions":{},"breadcrumbs":true,"onInlineTags":"warn","id":"default"},"versionsMetadata":[{"versionName":"current","label":"Next","banner":null,"badge":false,"noIndex":false,"className":"docs-version-current","path":"/docs/","tagsPath":"/docs/tags","editUrl":"https://github.com/ccw/docs/tree/main/docs","isLast":true,"routePriority":-1,"sidebarFilePath":"D:\\Claude_dms3\\ccw\\docs-site\\sidebars.ts","contentPath":"D:\\Claude_dms3\\ccw\\docs-site\\docs"}]} \ No newline at end of file +{"options":{"path":"docs","routeBasePath":"/","sidebarPath":"D:\\Claude_dms3\\ccw\\docs-site\\sidebars.ts","editUrl":"https://github.com/ccw/docs/tree/main/","editCurrentVersion":false,"editLocalizedFiles":false,"tagsBasePath":"tags","include":["**/*.{md,mdx}"],"exclude":["**/_*.{js,jsx,ts,tsx,md,mdx}","**/_*/**","**/*.test.{js,jsx,ts,tsx}","**/__tests__/**"],"sidebarCollapsible":true,"sidebarCollapsed":true,"docsRootComponent":"@theme/DocsRoot","docVersionRootComponent":"@theme/DocVersionRoot","docRootComponent":"@theme/DocRoot","docItemComponent":"@theme/DocItem","docTagsListComponent":"@theme/DocTagsListPage","docTagDocListComponent":"@theme/DocTagDocListPage","docCategoryGeneratedIndexComponent":"@theme/DocCategoryGeneratedIndexPage","remarkPlugins":[],"rehypePlugins":[],"recmaPlugins":[],"beforeDefaultRemarkPlugins":[],"beforeDefaultRehypePlugins":[],"admonitions":true,"showLastUpdateTime":false,"showLastUpdateAuthor":false,"includeCurrentVersion":true,"disableVersioning":false,"versions":{},"breadcrumbs":true,"onInlineTags":"warn","id":"default"},"versionsMetadata":[{"versionName":"current","label":"Next","banner":null,"badge":false,"noIndex":false,"className":"docs-version-current","path":"/docs/zh/","tagsPath":"/docs/zh/tags","editUrl":"https://github.com/ccw/docs/tree/main/docs","editUrlLocalized":"https://github.com/ccw/docs/tree/main/i18n/zh/docusaurus-plugin-content-docs/current","isLast":true,"routePriority":-1,"sidebarFilePath":"D:\\Claude_dms3\\ccw\\docs-site\\sidebars.ts","contentPath":"D:\\Claude_dms3\\ccw\\docs-site\\docs","contentPathLocalized":"D:\\Claude_dms3\\ccw\\docs-site\\i18n\\zh\\docusaurus-plugin-content-docs\\current"}]} \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-cli-cli-init-mdx-056.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-cli-cli-init-mdx-056.json index 98324c27..b61807e2 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-cli-cli-init-mdx-056.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-cli-cli-init-mdx-056.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/cli/cli-init.mdx", "sourceDirName": "commands/cli", "slug": "/commands/cli/cli-init", - "permalink": "/docs/commands/cli/cli-init", + "permalink": "/docs/zh/commands/cli/cli-init", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/cli/cli-init.mdx", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "issue:convert-to-plan", - "permalink": "/docs/commands/issue/issue-convert-to-plan" + "permalink": "/docs/zh/commands/issue/issue-convert-to-plan" }, "next": { "title": "/cli:codex-review", - "permalink": "/docs/commands/cli/codex-review" + "permalink": "/docs/zh/commands/cli/codex-review" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-cli-codex-review-mdx-f1b.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-cli-codex-review-mdx-f1b.json index 0ff172cc..ed61290c 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-cli-codex-review-mdx-f1b.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-cli-codex-review-mdx-f1b.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/cli/codex-review.mdx", "sourceDirName": "commands/cli", "slug": "/commands/cli/codex-review", - "permalink": "/docs/commands/cli/codex-review", + "permalink": "/docs/zh/commands/cli/codex-review", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/cli/codex-review.mdx", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "/cli:cli-init", - "permalink": "/docs/commands/cli/cli-init" + "permalink": "/docs/zh/commands/cli/cli-init" }, "next": { "title": "/memory:update-full", - "permalink": "/docs/commands/memory/memory-update-full" + "permalink": "/docs/zh/commands/memory/memory-update-full" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-coordinator-mdx-d55.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-coordinator-mdx-d55.json index 43d3add3..c723148a 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-coordinator-mdx-d55.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-coordinator-mdx-d55.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/general/ccw-coordinator.mdx", "sourceDirName": "commands/general", "slug": "/commands/general/ccw-coordinator", - "permalink": "/docs/commands/general/ccw-coordinator", + "permalink": "/docs/zh/commands/general/ccw-coordinator", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/general/ccw-coordinator.mdx", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "/ccw-test", - "permalink": "/docs/commands/general/ccw-test" + "permalink": "/docs/zh/commands/general/ccw-test" }, "next": { "title": "/ccw-debug", - "permalink": "/docs/commands/general/ccw-debug" + "permalink": "/docs/zh/commands/general/ccw-debug" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-debug-mdx-97c.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-debug-mdx-97c.json index 52dc34aa..a6160b72 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-debug-mdx-97c.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-debug-mdx-97c.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/general/ccw-debug.mdx", "sourceDirName": "commands/general", "slug": "/commands/general/ccw-debug", - "permalink": "/docs/commands/general/ccw-debug", + "permalink": "/docs/zh/commands/general/ccw-debug", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/general/ccw-debug.mdx", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "/ccw-coordinator", - "permalink": "/docs/commands/general/ccw-coordinator" + "permalink": "/docs/zh/commands/general/ccw-coordinator" }, "next": { "title": "/flow-create", - "permalink": "/docs/commands/general/flow-create" + "permalink": "/docs/zh/commands/general/flow-create" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-mdx-f48.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-mdx-f48.json index b95e3dd9..b15bcb3c 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-mdx-f48.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-mdx-f48.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/general/ccw.mdx", "sourceDirName": "commands/general", "slug": "/commands/general/ccw", - "permalink": "/docs/commands/general/ccw", + "permalink": "/docs/zh/commands/general/ccw", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/general/ccw.mdx", @@ -20,11 +20,11 @@ }, "sidebar": "docs", "previous": { - "title": "Overview", - "permalink": "/docs/overview" + "title": "概览", + "permalink": "/docs/zh/overview" }, "next": { "title": "/ccw-plan", - "permalink": "/docs/commands/general/ccw-plan" + "permalink": "/docs/zh/commands/general/ccw-plan" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-plan-mdx-04d.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-plan-mdx-04d.json index 753a139a..d17bf091 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-plan-mdx-04d.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-plan-mdx-04d.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/general/ccw-plan.mdx", "sourceDirName": "commands/general", "slug": "/commands/general/ccw-plan", - "permalink": "/docs/commands/general/ccw-plan", + "permalink": "/docs/zh/commands/general/ccw-plan", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/general/ccw-plan.mdx", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "/ccw", - "permalink": "/docs/commands/general/ccw" + "permalink": "/docs/zh/commands/general/ccw" }, "next": { "title": "/ccw-test", - "permalink": "/docs/commands/general/ccw-test" + "permalink": "/docs/zh/commands/general/ccw-test" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-test-mdx-cce.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-test-mdx-cce.json index 867a837d..4e2720d3 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-test-mdx-cce.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-ccw-test-mdx-cce.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/general/ccw-test.mdx", "sourceDirName": "commands/general", "slug": "/commands/general/ccw-test", - "permalink": "/docs/commands/general/ccw-test", + "permalink": "/docs/zh/commands/general/ccw-test", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/general/ccw-test.mdx", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "/ccw-plan", - "permalink": "/docs/commands/general/ccw-plan" + "permalink": "/docs/zh/commands/general/ccw-plan" }, "next": { "title": "/ccw-coordinator", - "permalink": "/docs/commands/general/ccw-coordinator" + "permalink": "/docs/zh/commands/general/ccw-coordinator" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-codex-coordinator-mdx-f92.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-codex-coordinator-mdx-f92.json index f51432cd..06b81cab 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-codex-coordinator-mdx-f92.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-codex-coordinator-mdx-f92.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/general/codex-coordinator.mdx", "sourceDirName": "commands/general", "slug": "/commands/general/codex-coordinator", - "permalink": "/docs/commands/general/codex-coordinator", + "permalink": "/docs/zh/commands/general/codex-coordinator", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/general/codex-coordinator.mdx", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "/flow-create", - "permalink": "/docs/commands/general/flow-create" + "permalink": "/docs/zh/commands/general/flow-create" }, "next": { "title": "issue:new", - "permalink": "/docs/commands/issue/issue-new" + "permalink": "/docs/zh/commands/issue/issue-new" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-flow-create-mdx-fab.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-flow-create-mdx-fab.json index af2ca45f..2432a1cd 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-flow-create-mdx-fab.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-general-flow-create-mdx-fab.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/general/flow-create.mdx", "sourceDirName": "commands/general", "slug": "/commands/general/flow-create", - "permalink": "/docs/commands/general/flow-create", + "permalink": "/docs/zh/commands/general/flow-create", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/general/flow-create.mdx", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "/ccw-debug", - "permalink": "/docs/commands/general/ccw-debug" + "permalink": "/docs/zh/commands/general/ccw-debug" }, "next": { "title": "/codex-coordinator", - "permalink": "/docs/commands/general/codex-coordinator" + "permalink": "/docs/zh/commands/general/codex-coordinator" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-convert-to-plan-md-5c7.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-convert-to-plan-md-5c7.json index 357625e4..45c9c689 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-convert-to-plan-md-5c7.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-convert-to-plan-md-5c7.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/issue/issue-convert-to-plan.md", "sourceDirName": "commands/issue", "slug": "/commands/issue/issue-convert-to-plan", - "permalink": "/docs/commands/issue/issue-convert-to-plan", + "permalink": "/docs/zh/commands/issue/issue-convert-to-plan", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/issue/issue-convert-to-plan.md", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "issue:from-brainstorm", - "permalink": "/docs/commands/issue/issue-from-brainstorm" + "permalink": "/docs/zh/commands/issue/issue-from-brainstorm" }, "next": { "title": "/cli:cli-init", - "permalink": "/docs/commands/cli/cli-init" + "permalink": "/docs/zh/commands/cli/cli-init" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-discover-md-1e3.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-discover-md-1e3.json index eb9f0d1a..3d22fdda 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-discover-md-1e3.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-discover-md-1e3.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/issue/issue-discover.md", "sourceDirName": "commands/issue", "slug": "/commands/issue/issue-discover", - "permalink": "/docs/commands/issue/issue-discover", + "permalink": "/docs/zh/commands/issue/issue-discover", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/issue/issue-discover.md", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "issue:new", - "permalink": "/docs/commands/issue/issue-new" + "permalink": "/docs/zh/commands/issue/issue-new" }, "next": { "title": "issue:plan", - "permalink": "/docs/commands/issue/issue-plan" + "permalink": "/docs/zh/commands/issue/issue-plan" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-execute-md-fe8.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-execute-md-fe8.json index e5c33039..bd853cc3 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-execute-md-fe8.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-execute-md-fe8.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/issue/issue-execute.md", "sourceDirName": "commands/issue", "slug": "/commands/issue/issue-execute", - "permalink": "/docs/commands/issue/issue-execute", + "permalink": "/docs/zh/commands/issue/issue-execute", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/issue/issue-execute.md", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "issue:queue", - "permalink": "/docs/commands/issue/issue-queue" + "permalink": "/docs/zh/commands/issue/issue-queue" }, "next": { "title": "issue:from-brainstorm", - "permalink": "/docs/commands/issue/issue-from-brainstorm" + "permalink": "/docs/zh/commands/issue/issue-from-brainstorm" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-from-brainstorm-md-2ec.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-from-brainstorm-md-2ec.json index 1b24145e..fcd79234 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-from-brainstorm-md-2ec.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-from-brainstorm-md-2ec.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/issue/issue-from-brainstorm.md", "sourceDirName": "commands/issue", "slug": "/commands/issue/issue-from-brainstorm", - "permalink": "/docs/commands/issue/issue-from-brainstorm", + "permalink": "/docs/zh/commands/issue/issue-from-brainstorm", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/issue/issue-from-brainstorm.md", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "issue:execute", - "permalink": "/docs/commands/issue/issue-execute" + "permalink": "/docs/zh/commands/issue/issue-execute" }, "next": { "title": "issue:convert-to-plan", - "permalink": "/docs/commands/issue/issue-convert-to-plan" + "permalink": "/docs/zh/commands/issue/issue-convert-to-plan" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-new-md-4ad.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-new-md-4ad.json index 9e407718..8daa4bce 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-new-md-4ad.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-new-md-4ad.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/issue/issue-new.md", "sourceDirName": "commands/issue", "slug": "/commands/issue/issue-new", - "permalink": "/docs/commands/issue/issue-new", + "permalink": "/docs/zh/commands/issue/issue-new", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/issue/issue-new.md", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "/codex-coordinator", - "permalink": "/docs/commands/general/codex-coordinator" + "permalink": "/docs/zh/commands/general/codex-coordinator" }, "next": { "title": "issue:discover", - "permalink": "/docs/commands/issue/issue-discover" + "permalink": "/docs/zh/commands/issue/issue-discover" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-plan-md-a6c.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-plan-md-a6c.json index e3b19693..2abd3a33 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-plan-md-a6c.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-plan-md-a6c.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/issue/issue-plan.md", "sourceDirName": "commands/issue", "slug": "/commands/issue/issue-plan", - "permalink": "/docs/commands/issue/issue-plan", + "permalink": "/docs/zh/commands/issue/issue-plan", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/issue/issue-plan.md", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "issue:discover", - "permalink": "/docs/commands/issue/issue-discover" + "permalink": "/docs/zh/commands/issue/issue-discover" }, "next": { "title": "issue:queue", - "permalink": "/docs/commands/issue/issue-queue" + "permalink": "/docs/zh/commands/issue/issue-queue" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-queue-md-1ba.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-queue-md-1ba.json index 5abd0c80..3c33214a 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-queue-md-1ba.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-issue-issue-queue-md-1ba.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/issue/issue-queue.md", "sourceDirName": "commands/issue", "slug": "/commands/issue/issue-queue", - "permalink": "/docs/commands/issue/issue-queue", + "permalink": "/docs/zh/commands/issue/issue-queue", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/issue/issue-queue.md", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "issue:plan", - "permalink": "/docs/commands/issue/issue-plan" + "permalink": "/docs/zh/commands/issue/issue-plan" }, "next": { "title": "issue:execute", - "permalink": "/docs/commands/issue/issue-execute" + "permalink": "/docs/zh/commands/issue/issue-execute" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-compact-mdx-7a1.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-compact-mdx-7a1.json index f4873aa5..a6749967 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-compact-mdx-7a1.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-compact-mdx-7a1.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/memory/memory-compact.mdx", "sourceDirName": "commands/memory", "slug": "/commands/memory/memory-compact", - "permalink": "/docs/commands/memory/memory-compact", + "permalink": "/docs/zh/commands/memory/memory-compact", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/memory/memory-compact.mdx", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "/memory:docs-related-cli", - "permalink": "/docs/commands/memory/memory-docs-related-cli" + "permalink": "/docs/zh/commands/memory/memory-docs-related-cli" }, "next": { "title": "Introduction", - "permalink": "/docs/workflows/introduction" + "permalink": "/docs/zh/workflows/introduction" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-docs-full-cli-mdx-4cc.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-docs-full-cli-mdx-4cc.json index f6394151..ced106c4 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-docs-full-cli-mdx-4cc.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-docs-full-cli-mdx-4cc.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/memory/memory-docs-full-cli.mdx", "sourceDirName": "commands/memory", "slug": "/commands/memory/memory-docs-full-cli", - "permalink": "/docs/commands/memory/memory-docs-full-cli", + "permalink": "/docs/zh/commands/memory/memory-docs-full-cli", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/memory/memory-docs-full-cli.mdx", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "/memory:load", - "permalink": "/docs/commands/memory/memory-load" + "permalink": "/docs/zh/commands/memory/memory-load" }, "next": { "title": "/memory:docs-related-cli", - "permalink": "/docs/commands/memory/memory-docs-related-cli" + "permalink": "/docs/zh/commands/memory/memory-docs-related-cli" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-docs-related-cli-mdx-60e.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-docs-related-cli-mdx-60e.json index 3f4054ed..52a7e910 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-docs-related-cli-mdx-60e.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-docs-related-cli-mdx-60e.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/memory/memory-docs-related-cli.mdx", "sourceDirName": "commands/memory", "slug": "/commands/memory/memory-docs-related-cli", - "permalink": "/docs/commands/memory/memory-docs-related-cli", + "permalink": "/docs/zh/commands/memory/memory-docs-related-cli", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/memory/memory-docs-related-cli.mdx", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "/memory:docs-full-cli", - "permalink": "/docs/commands/memory/memory-docs-full-cli" + "permalink": "/docs/zh/commands/memory/memory-docs-full-cli" }, "next": { "title": "/memory:compact", - "permalink": "/docs/commands/memory/memory-compact" + "permalink": "/docs/zh/commands/memory/memory-compact" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-load-mdx-157.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-load-mdx-157.json index ef887547..3438f60c 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-load-mdx-157.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-load-mdx-157.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/memory/memory-load.mdx", "sourceDirName": "commands/memory", "slug": "/commands/memory/memory-load", - "permalink": "/docs/commands/memory/memory-load", + "permalink": "/docs/zh/commands/memory/memory-load", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/memory/memory-load.mdx", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "/memory:update-related", - "permalink": "/docs/commands/memory/memory-update-related" + "permalink": "/docs/zh/commands/memory/memory-update-related" }, "next": { "title": "/memory:docs-full-cli", - "permalink": "/docs/commands/memory/memory-docs-full-cli" + "permalink": "/docs/zh/commands/memory/memory-docs-full-cli" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-update-full-mdx-666.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-update-full-mdx-666.json index 76cd168b..9c9510d5 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-update-full-mdx-666.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-update-full-mdx-666.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/memory/memory-update-full.mdx", "sourceDirName": "commands/memory", "slug": "/commands/memory/memory-update-full", - "permalink": "/docs/commands/memory/memory-update-full", + "permalink": "/docs/zh/commands/memory/memory-update-full", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/memory/memory-update-full.mdx", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "/cli:codex-review", - "permalink": "/docs/commands/cli/codex-review" + "permalink": "/docs/zh/commands/cli/codex-review" }, "next": { "title": "/memory:update-related", - "permalink": "/docs/commands/memory/memory-update-related" + "permalink": "/docs/zh/commands/memory/memory-update-related" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-update-related-mdx-611.json b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-update-related-mdx-611.json index 97bcd2d8..dbff395e 100644 --- a/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-update-related-mdx-611.json +++ b/ccw/docs-site/.docusaurus/docusaurus-plugin-content-docs/default/site-docs-commands-memory-memory-update-related-mdx-611.json @@ -5,7 +5,7 @@ "source": "@site/docs/commands/memory/memory-update-related.mdx", "sourceDirName": "commands/memory", "slug": "/commands/memory/memory-update-related", - "permalink": "/docs/commands/memory/memory-update-related", + "permalink": "/docs/zh/commands/memory/memory-update-related", "draft": false, "unlisted": false, "editUrl": "https://github.com/ccw/docs/tree/main/docs/commands/memory/memory-update-related.mdx", @@ -21,10 +21,10 @@ "sidebar": "docs", "previous": { "title": "/memory:update-full", - "permalink": "/docs/commands/memory/memory-update-full" + "permalink": "/docs/zh/commands/memory/memory-update-full" }, "next": { "title": "/memory:load", - "permalink": "/docs/commands/memory/memory-load" + "permalink": "/docs/zh/commands/memory/memory-load" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/docusaurus.config.mjs b/ccw/docs-site/.docusaurus/docusaurus.config.mjs index 98db47c6..d8173893 100644 --- a/ccw/docs-site/.docusaurus/docusaurus.config.mjs +++ b/ccw/docs-site/.docusaurus/docusaurus.config.mjs @@ -8,7 +8,7 @@ export default { "tagline": "Professional Workflow Automation Platform", "favicon": "img/favicon.svg", "url": "http://localhost:3001", - "baseUrl": "/docs/", + "baseUrl": "/docs/zh/", "organizationName": "ccw", "projectName": "docs", "trailingSlash": false, @@ -48,9 +48,9 @@ export default { ], "themeConfig": { "navbar": { - "title": "CCW Help", + "title": "CCW 帮助", "logo": { - "alt": "CCW Logo", + "alt": "CCW 标志", "src": "img/logo.svg" }, "items": [ @@ -65,7 +65,7 @@ export default { }, "footer": { "style": "dark", - "copyright": "Copyright © 2026 CCW. Built with Docusaurus.", + "copyright": "版权 © 2026 CCW。使用 Docusaurus 构建。", "links": [] }, "prism": { diff --git a/ccw/docs-site/.docusaurus/globalData.json b/ccw/docs-site/.docusaurus/globalData.json index d9132d1c..db17fcf0 100644 --- a/ccw/docs-site/.docusaurus/globalData.json +++ b/ccw/docs-site/.docusaurus/globalData.json @@ -1,172 +1,172 @@ { "docusaurus-plugin-content-docs": { "default": { - "path": "/docs/", + "path": "/docs/zh/", "versions": [ { "name": "current", - "label": "Next", + "label": "当前", "isLast": true, - "path": "/docs/", + "path": "/docs/zh/", "mainDocId": "index", "docs": [ { "id": "commands/cli/cli-init", - "path": "/docs/commands/cli/cli-init", + "path": "/docs/zh/commands/cli/cli-init", "sidebar": "docs" }, { "id": "commands/cli/codex-review", - "path": "/docs/commands/cli/codex-review", + "path": "/docs/zh/commands/cli/codex-review", "sidebar": "docs" }, { "id": "commands/general/ccw", - "path": "/docs/commands/general/ccw", + "path": "/docs/zh/commands/general/ccw", "sidebar": "docs" }, { "id": "commands/general/ccw-coordinator", - "path": "/docs/commands/general/ccw-coordinator", + "path": "/docs/zh/commands/general/ccw-coordinator", "sidebar": "docs" }, { "id": "commands/general/ccw-debug", - "path": "/docs/commands/general/ccw-debug", + "path": "/docs/zh/commands/general/ccw-debug", "sidebar": "docs" }, { "id": "commands/general/ccw-plan", - "path": "/docs/commands/general/ccw-plan", + "path": "/docs/zh/commands/general/ccw-plan", "sidebar": "docs" }, { "id": "commands/general/ccw-test", - "path": "/docs/commands/general/ccw-test", + "path": "/docs/zh/commands/general/ccw-test", "sidebar": "docs" }, { "id": "commands/general/codex-coordinator", - "path": "/docs/commands/general/codex-coordinator", + "path": "/docs/zh/commands/general/codex-coordinator", "sidebar": "docs" }, { "id": "commands/general/flow-create", - "path": "/docs/commands/general/flow-create", + "path": "/docs/zh/commands/general/flow-create", "sidebar": "docs" }, { "id": "commands/issue/issue-convert-to-plan", - "path": "/docs/commands/issue/issue-convert-to-plan", + "path": "/docs/zh/commands/issue/issue-convert-to-plan", "sidebar": "docs" }, { "id": "commands/issue/issue-discover", - "path": "/docs/commands/issue/issue-discover", + "path": "/docs/zh/commands/issue/issue-discover", "sidebar": "docs" }, { "id": "commands/issue/issue-execute", - "path": "/docs/commands/issue/issue-execute", + "path": "/docs/zh/commands/issue/issue-execute", "sidebar": "docs" }, { "id": "commands/issue/issue-from-brainstorm", - "path": "/docs/commands/issue/issue-from-brainstorm", + "path": "/docs/zh/commands/issue/issue-from-brainstorm", "sidebar": "docs" }, { "id": "commands/issue/issue-new", - "path": "/docs/commands/issue/issue-new", + "path": "/docs/zh/commands/issue/issue-new", "sidebar": "docs" }, { "id": "commands/issue/issue-plan", - "path": "/docs/commands/issue/issue-plan", + "path": "/docs/zh/commands/issue/issue-plan", "sidebar": "docs" }, { "id": "commands/issue/issue-queue", - "path": "/docs/commands/issue/issue-queue", + "path": "/docs/zh/commands/issue/issue-queue", "sidebar": "docs" }, { "id": "commands/memory/memory-compact", - "path": "/docs/commands/memory/memory-compact", + "path": "/docs/zh/commands/memory/memory-compact", "sidebar": "docs" }, { "id": "commands/memory/memory-docs-full-cli", - "path": "/docs/commands/memory/memory-docs-full-cli", + "path": "/docs/zh/commands/memory/memory-docs-full-cli", "sidebar": "docs" }, { "id": "commands/memory/memory-docs-related-cli", - "path": "/docs/commands/memory/memory-docs-related-cli", + "path": "/docs/zh/commands/memory/memory-docs-related-cli", "sidebar": "docs" }, { "id": "commands/memory/memory-load", - "path": "/docs/commands/memory/memory-load", + "path": "/docs/zh/commands/memory/memory-load", "sidebar": "docs" }, { "id": "commands/memory/memory-update-full", - "path": "/docs/commands/memory/memory-update-full", + "path": "/docs/zh/commands/memory/memory-update-full", "sidebar": "docs" }, { "id": "commands/memory/memory-update-related", - "path": "/docs/commands/memory/memory-update-related", + "path": "/docs/zh/commands/memory/memory-update-related", "sidebar": "docs" }, { "id": "faq", - "path": "/docs/faq", + "path": "/docs/zh/faq", "sidebar": "docs" }, { "id": "index", - "path": "/docs/", + "path": "/docs/zh/", "sidebar": "docs" }, { "id": "overview", - "path": "/docs/overview", + "path": "/docs/zh/overview", "sidebar": "docs" }, { "id": "workflows/faq", - "path": "/docs/workflows/faq" + "path": "/docs/zh/workflows/faq" }, { "id": "workflows/introduction", - "path": "/docs/workflows/introduction", + "path": "/docs/zh/workflows/introduction", "sidebar": "docs" }, { "id": "workflows/level-1-ultra-lightweight", - "path": "/docs/workflows/level-1-ultra-lightweight", + "path": "/docs/zh/workflows/level-1-ultra-lightweight", "sidebar": "docs" }, { "id": "workflows/level-2-rapid", - "path": "/docs/workflows/level-2-rapid", + "path": "/docs/zh/workflows/level-2-rapid", "sidebar": "docs" }, { "id": "workflows/level-3-standard", - "path": "/docs/workflows/level-3-standard", + "path": "/docs/zh/workflows/level-3-standard", "sidebar": "docs" }, { "id": "workflows/level-4-brainstorm", - "path": "/docs/workflows/level-4-brainstorm", + "path": "/docs/zh/workflows/level-4-brainstorm", "sidebar": "docs" }, { "id": "workflows/level-5-intelligent", - "path": "/docs/workflows/level-5-intelligent", + "path": "/docs/zh/workflows/level-5-intelligent", "sidebar": "docs" } ], @@ -174,7 +174,7 @@ "sidebars": { "docs": { "link": { - "path": "/docs/", + "path": "/docs/zh/", "label": "Home" } } diff --git a/ccw/docs-site/.docusaurus/i18n.json b/ccw/docs-site/.docusaurus/i18n.json index ea93de74..e9a2547b 100644 --- a/ccw/docs-site/.docusaurus/i18n.json +++ b/ccw/docs-site/.docusaurus/i18n.json @@ -5,7 +5,7 @@ "zh" ], "path": "i18n", - "currentLocale": "en", + "currentLocale": "zh", "localeConfigs": { "en": { "label": "English", diff --git a/ccw/docs-site/.docusaurus/registry.js b/ccw/docs-site/.docusaurus/registry.js index e1d71748..4cac6b1b 100644 --- a/ccw/docs-site/.docusaurus/registry.js +++ b/ccw/docs-site/.docusaurus/registry.js @@ -1,47 +1,39 @@ export default { - "__comp---theme-debug-config-23-a-2ff": [() => import(/* webpackChunkName: "__comp---theme-debug-config-23-a-2ff" */ "@theme/DebugConfig"), "@theme/DebugConfig", require.resolveWeak("@theme/DebugConfig")], - "__comp---theme-debug-contentba-8-ce7": [() => import(/* webpackChunkName: "__comp---theme-debug-contentba-8-ce7" */ "@theme/DebugContent"), "@theme/DebugContent", require.resolveWeak("@theme/DebugContent")], - "__comp---theme-debug-global-dataede-0fa": [() => import(/* webpackChunkName: "__comp---theme-debug-global-dataede-0fa" */ "@theme/DebugGlobalData"), "@theme/DebugGlobalData", require.resolveWeak("@theme/DebugGlobalData")], - "__comp---theme-debug-registry-679-501": [() => import(/* webpackChunkName: "__comp---theme-debug-registry-679-501" */ "@theme/DebugRegistry"), "@theme/DebugRegistry", require.resolveWeak("@theme/DebugRegistry")], - "__comp---theme-debug-routes-946-699": [() => import(/* webpackChunkName: "__comp---theme-debug-routes-946-699" */ "@theme/DebugRoutes"), "@theme/DebugRoutes", require.resolveWeak("@theme/DebugRoutes")], - "__comp---theme-debug-site-metadata-68-e-3d4": [() => import(/* webpackChunkName: "__comp---theme-debug-site-metadata-68-e-3d4" */ "@theme/DebugSiteMetadata"), "@theme/DebugSiteMetadata", require.resolveWeak("@theme/DebugSiteMetadata")], - "__comp---theme-doc-item-178-a40": [() => import(/* webpackChunkName: "__comp---theme-doc-item-178-a40" */ "@theme/DocItem"), "@theme/DocItem", require.resolveWeak("@theme/DocItem")], - "__comp---theme-doc-roota-94-67a": [() => import(/* webpackChunkName: "__comp---theme-doc-roota-94-67a" */ "@theme/DocRoot"), "@theme/DocRoot", require.resolveWeak("@theme/DocRoot")], - "__comp---theme-doc-version-roota-7-b-5de": [() => import(/* webpackChunkName: "__comp---theme-doc-version-roota-7-b-5de" */ "@theme/DocVersionRoot"), "@theme/DocVersionRoot", require.resolveWeak("@theme/DocVersionRoot")], - "__comp---theme-docs-root-5-e-9-0b6": [() => import(/* webpackChunkName: "__comp---theme-docs-root-5-e-9-0b6" */ "@theme/DocsRoot"), "@theme/DocsRoot", require.resolveWeak("@theme/DocsRoot")], - "__props---docs-11-b-f70": [() => import(/* webpackChunkName: "__props---docs-11-b-f70" */ "@generated/docusaurus-plugin-content-docs/default/p/docs-7fc.json"), "@generated/docusaurus-plugin-content-docs/default/p/docs-7fc.json", require.resolveWeak("@generated/docusaurus-plugin-content-docs/default/p/docs-7fc.json")], - "__props---docs-docusaurus-debug-content-344-8d5": [() => import(/* webpackChunkName: "__props---docs-docusaurus-debug-content-344-8d5" */ "@generated/docusaurus-plugin-debug/default/p/docs-docusaurus-debug-content-a52.json"), "@generated/docusaurus-plugin-debug/default/p/docs-docusaurus-debug-content-a52.json", require.resolveWeak("@generated/docusaurus-plugin-debug/default/p/docs-docusaurus-debug-content-a52.json")], - "content---docs-4-ed-831": [() => import(/* webpackChunkName: "content---docs-4-ed-831" */ "@site/docs/index.mdx"), "@site/docs/index.mdx", require.resolveWeak("@site/docs/index.mdx")], - "content---docs-commands-cli-cli-init-056-ce1": [() => import(/* webpackChunkName: "content---docs-commands-cli-cli-init-056-ce1" */ "@site/docs/commands/cli/cli-init.mdx"), "@site/docs/commands/cli/cli-init.mdx", require.resolveWeak("@site/docs/commands/cli/cli-init.mdx")], - "content---docs-commands-cli-codex-reviewf-1-b-55f": [() => import(/* webpackChunkName: "content---docs-commands-cli-codex-reviewf-1-b-55f" */ "@site/docs/commands/cli/codex-review.mdx"), "@site/docs/commands/cli/codex-review.mdx", require.resolveWeak("@site/docs/commands/cli/codex-review.mdx")], - "content---docs-commands-general-ccw-coordinatord-55-c6b": [() => import(/* webpackChunkName: "content---docs-commands-general-ccw-coordinatord-55-c6b" */ "@site/docs/commands/general/ccw-coordinator.mdx"), "@site/docs/commands/general/ccw-coordinator.mdx", require.resolveWeak("@site/docs/commands/general/ccw-coordinator.mdx")], - "content---docs-commands-general-ccw-debug-97-c-a72": [() => import(/* webpackChunkName: "content---docs-commands-general-ccw-debug-97-c-a72" */ "@site/docs/commands/general/ccw-debug.mdx"), "@site/docs/commands/general/ccw-debug.mdx", require.resolveWeak("@site/docs/commands/general/ccw-debug.mdx")], - "content---docs-commands-general-ccw-plan-04-d-fe0": [() => import(/* webpackChunkName: "content---docs-commands-general-ccw-plan-04-d-fe0" */ "@site/docs/commands/general/ccw-plan.mdx"), "@site/docs/commands/general/ccw-plan.mdx", require.resolveWeak("@site/docs/commands/general/ccw-plan.mdx")], - "content---docs-commands-general-ccw-testcce-912": [() => import(/* webpackChunkName: "content---docs-commands-general-ccw-testcce-912" */ "@site/docs/commands/general/ccw-test.mdx"), "@site/docs/commands/general/ccw-test.mdx", require.resolveWeak("@site/docs/commands/general/ccw-test.mdx")], - "content---docs-commands-general-ccwf-48-8c4": [() => import(/* webpackChunkName: "content---docs-commands-general-ccwf-48-8c4" */ "@site/docs/commands/general/ccw.mdx"), "@site/docs/commands/general/ccw.mdx", require.resolveWeak("@site/docs/commands/general/ccw.mdx")], - "content---docs-commands-general-codex-coordinatorf-92-1dc": [() => import(/* webpackChunkName: "content---docs-commands-general-codex-coordinatorf-92-1dc" */ "@site/docs/commands/general/codex-coordinator.mdx"), "@site/docs/commands/general/codex-coordinator.mdx", require.resolveWeak("@site/docs/commands/general/codex-coordinator.mdx")], - "content---docs-commands-general-flow-createfab-98a": [() => import(/* webpackChunkName: "content---docs-commands-general-flow-createfab-98a" */ "@site/docs/commands/general/flow-create.mdx"), "@site/docs/commands/general/flow-create.mdx", require.resolveWeak("@site/docs/commands/general/flow-create.mdx")], - "content---docs-commands-issue-issue-convert-to-plan-5-c-7-184": [() => import(/* webpackChunkName: "content---docs-commands-issue-issue-convert-to-plan-5-c-7-184" */ "@site/docs/commands/issue/issue-convert-to-plan.md"), "@site/docs/commands/issue/issue-convert-to-plan.md", require.resolveWeak("@site/docs/commands/issue/issue-convert-to-plan.md")], - "content---docs-commands-issue-issue-discover-1-e-3-569": [() => import(/* webpackChunkName: "content---docs-commands-issue-issue-discover-1-e-3-569" */ "@site/docs/commands/issue/issue-discover.md"), "@site/docs/commands/issue/issue-discover.md", require.resolveWeak("@site/docs/commands/issue/issue-discover.md")], - "content---docs-commands-issue-issue-executefe-8-c03": [() => import(/* webpackChunkName: "content---docs-commands-issue-issue-executefe-8-c03" */ "@site/docs/commands/issue/issue-execute.md"), "@site/docs/commands/issue/issue-execute.md", require.resolveWeak("@site/docs/commands/issue/issue-execute.md")], - "content---docs-commands-issue-issue-from-brainstorm-2-ec-eeb": [() => import(/* webpackChunkName: "content---docs-commands-issue-issue-from-brainstorm-2-ec-eeb" */ "@site/docs/commands/issue/issue-from-brainstorm.md"), "@site/docs/commands/issue/issue-from-brainstorm.md", require.resolveWeak("@site/docs/commands/issue/issue-from-brainstorm.md")], - "content---docs-commands-issue-issue-new-4-ad-3f0": [() => import(/* webpackChunkName: "content---docs-commands-issue-issue-new-4-ad-3f0" */ "@site/docs/commands/issue/issue-new.md"), "@site/docs/commands/issue/issue-new.md", require.resolveWeak("@site/docs/commands/issue/issue-new.md")], - "content---docs-commands-issue-issue-plana-6-c-fbd": [() => import(/* webpackChunkName: "content---docs-commands-issue-issue-plana-6-c-fbd" */ "@site/docs/commands/issue/issue-plan.md"), "@site/docs/commands/issue/issue-plan.md", require.resolveWeak("@site/docs/commands/issue/issue-plan.md")], - "content---docs-commands-issue-issue-queue-1-ba-55f": [() => import(/* webpackChunkName: "content---docs-commands-issue-issue-queue-1-ba-55f" */ "@site/docs/commands/issue/issue-queue.md"), "@site/docs/commands/issue/issue-queue.md", require.resolveWeak("@site/docs/commands/issue/issue-queue.md")], - "content---docs-commands-memory-memory-compact-7-a-1-41c": [() => import(/* webpackChunkName: "content---docs-commands-memory-memory-compact-7-a-1-41c" */ "@site/docs/commands/memory/memory-compact.mdx"), "@site/docs/commands/memory/memory-compact.mdx", require.resolveWeak("@site/docs/commands/memory/memory-compact.mdx")], - "content---docs-commands-memory-memory-docs-full-cli-4-cc-96f": [() => import(/* webpackChunkName: "content---docs-commands-memory-memory-docs-full-cli-4-cc-96f" */ "@site/docs/commands/memory/memory-docs-full-cli.mdx"), "@site/docs/commands/memory/memory-docs-full-cli.mdx", require.resolveWeak("@site/docs/commands/memory/memory-docs-full-cli.mdx")], - "content---docs-commands-memory-memory-docs-related-cli-60-e-dd0": [() => import(/* webpackChunkName: "content---docs-commands-memory-memory-docs-related-cli-60-e-dd0" */ "@site/docs/commands/memory/memory-docs-related-cli.mdx"), "@site/docs/commands/memory/memory-docs-related-cli.mdx", require.resolveWeak("@site/docs/commands/memory/memory-docs-related-cli.mdx")], - "content---docs-commands-memory-memory-load-157-952": [() => import(/* webpackChunkName: "content---docs-commands-memory-memory-load-157-952" */ "@site/docs/commands/memory/memory-load.mdx"), "@site/docs/commands/memory/memory-load.mdx", require.resolveWeak("@site/docs/commands/memory/memory-load.mdx")], - "content---docs-commands-memory-memory-update-full-666-002": [() => import(/* webpackChunkName: "content---docs-commands-memory-memory-update-full-666-002" */ "@site/docs/commands/memory/memory-update-full.mdx"), "@site/docs/commands/memory/memory-update-full.mdx", require.resolveWeak("@site/docs/commands/memory/memory-update-full.mdx")], - "content---docs-commands-memory-memory-update-related-611-8d3": [() => import(/* webpackChunkName: "content---docs-commands-memory-memory-update-related-611-8d3" */ "@site/docs/commands/memory/memory-update-related.mdx"), "@site/docs/commands/memory/memory-update-related.mdx", require.resolveWeak("@site/docs/commands/memory/memory-update-related.mdx")], - "content---docs-faqea-3-888": [() => import(/* webpackChunkName: "content---docs-faqea-3-888" */ "@site/docs/faq.mdx"), "@site/docs/faq.mdx", require.resolveWeak("@site/docs/faq.mdx")], - "content---docs-overview-188-429": [() => import(/* webpackChunkName: "content---docs-overview-188-429" */ "@site/docs/overview.mdx"), "@site/docs/overview.mdx", require.resolveWeak("@site/docs/overview.mdx")], - "content---docs-workflows-faqbcf-045": [() => import(/* webpackChunkName: "content---docs-workflows-faqbcf-045" */ "@site/docs/workflows/faq.mdx"), "@site/docs/workflows/faq.mdx", require.resolveWeak("@site/docs/workflows/faq.mdx")], - "content---docs-workflows-introduction-9-f-4-275": [() => import(/* webpackChunkName: "content---docs-workflows-introduction-9-f-4-275" */ "@site/docs/workflows/introduction.mdx"), "@site/docs/workflows/introduction.mdx", require.resolveWeak("@site/docs/workflows/introduction.mdx")], - "content---docs-workflows-level-1-ultra-lightweightc-5-a-5db": [() => import(/* webpackChunkName: "content---docs-workflows-level-1-ultra-lightweightc-5-a-5db" */ "@site/docs/workflows/level-1-ultra-lightweight.mdx"), "@site/docs/workflows/level-1-ultra-lightweight.mdx", require.resolveWeak("@site/docs/workflows/level-1-ultra-lightweight.mdx")], - "content---docs-workflows-level-2-rapid-19-b-095": [() => import(/* webpackChunkName: "content---docs-workflows-level-2-rapid-19-b-095" */ "@site/docs/workflows/level-2-rapid.mdx"), "@site/docs/workflows/level-2-rapid.mdx", require.resolveWeak("@site/docs/workflows/level-2-rapid.mdx")], - "content---docs-workflows-level-3-standardbdb-61a": [() => import(/* webpackChunkName: "content---docs-workflows-level-3-standardbdb-61a" */ "@site/docs/workflows/level-3-standard.mdx"), "@site/docs/workflows/level-3-standard.mdx", require.resolveWeak("@site/docs/workflows/level-3-standard.mdx")], - "content---docs-workflows-level-4-brainstormd-04-14f": [() => import(/* webpackChunkName: "content---docs-workflows-level-4-brainstormd-04-14f" */ "@site/docs/workflows/level-4-brainstorm.mdx"), "@site/docs/workflows/level-4-brainstorm.mdx", require.resolveWeak("@site/docs/workflows/level-4-brainstorm.mdx")], - "content---docs-workflows-level-5-intelligent-186-b05": [() => import(/* webpackChunkName: "content---docs-workflows-level-5-intelligent-186-b05" */ "@site/docs/workflows/level-5-intelligent.mdx"), "@site/docs/workflows/level-5-intelligent.mdx", require.resolveWeak("@site/docs/workflows/level-5-intelligent.mdx")], - "plugin---docs-aba-4f5": [() => import(/* webpackChunkName: "plugin---docs-aba-4f5" */ "@generated/docusaurus-plugin-content-docs/default/__plugin.json"), "@generated/docusaurus-plugin-content-docs/default/__plugin.json", require.resolveWeak("@generated/docusaurus-plugin-content-docs/default/__plugin.json")], - "plugin---docs-docusaurus-debugb-38-c84": [() => import(/* webpackChunkName: "plugin---docs-docusaurus-debugb-38-c84" */ "@generated/docusaurus-plugin-debug/default/__plugin.json"), "@generated/docusaurus-plugin-debug/default/__plugin.json", require.resolveWeak("@generated/docusaurus-plugin-debug/default/__plugin.json")],}; + "04db0a2e": [() => import(/* webpackChunkName: "04db0a2e" */ "@site/docs/commands/general/ccw-plan.mdx"), "@site/docs/commands/general/ccw-plan.mdx", require.resolveWeak("@site/docs/commands/general/ccw-plan.mdx")], + "05467734": [() => import(/* webpackChunkName: "05467734" */ "@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/level-2-rapid.mdx"), "@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/level-2-rapid.mdx", require.resolveWeak("@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/level-2-rapid.mdx")], + "0566a0a8": [() => import(/* webpackChunkName: "0566a0a8" */ "@site/docs/commands/cli/cli-init.mdx"), "@site/docs/commands/cli/cli-init.mdx", require.resolveWeak("@site/docs/commands/cli/cli-init.mdx")], + "157db180": [() => import(/* webpackChunkName: "157db180" */ "@site/docs/commands/memory/memory-load.mdx"), "@site/docs/commands/memory/memory-load.mdx", require.resolveWeak("@site/docs/commands/memory/memory-load.mdx")], + "17896441": [() => import(/* webpackChunkName: "17896441" */ "@theme/DocItem"), "@theme/DocItem", require.resolveWeak("@theme/DocItem")], + "1bac9067": [() => import(/* webpackChunkName: "1bac9067" */ "@site/docs/commands/issue/issue-queue.md"), "@site/docs/commands/issue/issue-queue.md", require.resolveWeak("@site/docs/commands/issue/issue-queue.md")], + "1e3006f3": [() => import(/* webpackChunkName: "1e3006f3" */ "@site/docs/commands/issue/issue-discover.md"), "@site/docs/commands/issue/issue-discover.md", require.resolveWeak("@site/docs/commands/issue/issue-discover.md")], + "2a5e3eff": [() => import(/* webpackChunkName: "2a5e3eff" */ "@site/i18n/zh/docusaurus-plugin-content-docs/current/faq.mdx"), "@site/i18n/zh/docusaurus-plugin-content-docs/current/faq.mdx", require.resolveWeak("@site/i18n/zh/docusaurus-plugin-content-docs/current/faq.mdx")], + "2ecf8b4a": [() => import(/* webpackChunkName: "2ecf8b4a" */ "@site/docs/commands/issue/issue-from-brainstorm.md"), "@site/docs/commands/issue/issue-from-brainstorm.md", require.resolveWeak("@site/docs/commands/issue/issue-from-brainstorm.md")], + "3f1fe4a1": [() => import(/* webpackChunkName: "3f1fe4a1" */ "@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/level-3-standard.mdx"), "@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/level-3-standard.mdx", require.resolveWeak("@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/level-3-standard.mdx")], + "46f40178": [() => import(/* webpackChunkName: "46f40178" */ "@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/faq.mdx"), "@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/faq.mdx", require.resolveWeak("@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/faq.mdx")], + "4ad7db0f": [() => import(/* webpackChunkName: "4ad7db0f" */ "@site/docs/commands/issue/issue-new.md"), "@site/docs/commands/issue/issue-new.md", require.resolveWeak("@site/docs/commands/issue/issue-new.md")], + "4cc74730": [() => import(/* webpackChunkName: "4cc74730" */ "@site/docs/commands/memory/memory-docs-full-cli.mdx"), "@site/docs/commands/memory/memory-docs-full-cli.mdx", require.resolveWeak("@site/docs/commands/memory/memory-docs-full-cli.mdx")], + "562bb8cb": [() => import(/* webpackChunkName: "562bb8cb" */ "@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/level-5-intelligent.mdx"), "@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/level-5-intelligent.mdx", require.resolveWeak("@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/level-5-intelligent.mdx")], + "5c7b2278": [() => import(/* webpackChunkName: "5c7b2278" */ "@site/docs/commands/issue/issue-convert-to-plan.md"), "@site/docs/commands/issue/issue-convert-to-plan.md", require.resolveWeak("@site/docs/commands/issue/issue-convert-to-plan.md")], + "5e95c892": [() => import(/* webpackChunkName: "5e95c892" */ "@theme/DocsRoot"), "@theme/DocsRoot", require.resolveWeak("@theme/DocsRoot")], + "60eef997": [() => import(/* webpackChunkName: "60eef997" */ "@site/docs/commands/memory/memory-docs-related-cli.mdx"), "@site/docs/commands/memory/memory-docs-related-cli.mdx", require.resolveWeak("@site/docs/commands/memory/memory-docs-related-cli.mdx")], + "611877e1": [() => import(/* webpackChunkName: "611877e1" */ "@site/docs/commands/memory/memory-update-related.mdx"), "@site/docs/commands/memory/memory-update-related.mdx", require.resolveWeak("@site/docs/commands/memory/memory-update-related.mdx")], + "666bb1bf": [() => import(/* webpackChunkName: "666bb1bf" */ "@site/docs/commands/memory/memory-update-full.mdx"), "@site/docs/commands/memory/memory-update-full.mdx", require.resolveWeak("@site/docs/commands/memory/memory-update-full.mdx")], + "6ab014e9": [() => import(/* webpackChunkName: "6ab014e9" */ "@site/i18n/zh/docusaurus-plugin-content-docs/current/index.mdx"), "@site/i18n/zh/docusaurus-plugin-content-docs/current/index.mdx", require.resolveWeak("@site/i18n/zh/docusaurus-plugin-content-docs/current/index.mdx")], + "775938bf": [() => import(/* webpackChunkName: "775938bf" */ "@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/level-4-brainstorm.mdx"), "@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/level-4-brainstorm.mdx", require.resolveWeak("@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/level-4-brainstorm.mdx")], + "7a1ee27c": [() => import(/* webpackChunkName: "7a1ee27c" */ "@site/docs/commands/memory/memory-compact.mdx"), "@site/docs/commands/memory/memory-compact.mdx", require.resolveWeak("@site/docs/commands/memory/memory-compact.mdx")], + "8a7e39ed": [() => import(/* webpackChunkName: "8a7e39ed" */ "@site/i18n/zh/docusaurus-plugin-content-docs/current/overview.mdx"), "@site/i18n/zh/docusaurus-plugin-content-docs/current/overview.mdx", require.resolveWeak("@site/i18n/zh/docusaurus-plugin-content-docs/current/overview.mdx")], + "97c6e66a": [() => import(/* webpackChunkName: "97c6e66a" */ "@site/docs/commands/general/ccw-debug.mdx"), "@site/docs/commands/general/ccw-debug.mdx", require.resolveWeak("@site/docs/commands/general/ccw-debug.mdx")], + "9cf7cb6b": [() => import(/* webpackChunkName: "9cf7cb6b" */ "@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/level-1-ultra-lightweight.mdx"), "@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/level-1-ultra-lightweight.mdx", require.resolveWeak("@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/level-1-ultra-lightweight.mdx")], + "a6c3df16": [() => import(/* webpackChunkName: "a6c3df16" */ "@site/docs/commands/issue/issue-plan.md"), "@site/docs/commands/issue/issue-plan.md", require.resolveWeak("@site/docs/commands/issue/issue-plan.md")], + "a7bd4aaa": [() => import(/* webpackChunkName: "a7bd4aaa" */ "@theme/DocVersionRoot"), "@theme/DocVersionRoot", require.resolveWeak("@theme/DocVersionRoot")], + "a94703ab": [() => import(/* webpackChunkName: "a94703ab" */ "@theme/DocRoot"), "@theme/DocRoot", require.resolveWeak("@theme/DocRoot")], + "aba21aa0": [() => import(/* webpackChunkName: "aba21aa0" */ "@generated/docusaurus-plugin-content-docs/default/__plugin.json"), "@generated/docusaurus-plugin-content-docs/default/__plugin.json", require.resolveWeak("@generated/docusaurus-plugin-content-docs/default/__plugin.json")], + "b17e4002": [() => import(/* webpackChunkName: "b17e4002" */ "@generated/docusaurus-plugin-content-docs/default/p/docs-zh-d2a.json"), "@generated/docusaurus-plugin-content-docs/default/p/docs-zh-d2a.json", require.resolveWeak("@generated/docusaurus-plugin-content-docs/default/p/docs-zh-d2a.json")], + "ccef5d0f": [() => import(/* webpackChunkName: "ccef5d0f" */ "@site/docs/commands/general/ccw-test.mdx"), "@site/docs/commands/general/ccw-test.mdx", require.resolveWeak("@site/docs/commands/general/ccw-test.mdx")], + "d550a629": [() => import(/* webpackChunkName: "d550a629" */ "@site/docs/commands/general/ccw-coordinator.mdx"), "@site/docs/commands/general/ccw-coordinator.mdx", require.resolveWeak("@site/docs/commands/general/ccw-coordinator.mdx")], + "e5f6eee3": [() => import(/* webpackChunkName: "e5f6eee3" */ "@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/introduction.mdx"), "@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/introduction.mdx", require.resolveWeak("@site/i18n/zh/docusaurus-plugin-content-docs/current/workflows/introduction.mdx")], + "f1bf82ec": [() => import(/* webpackChunkName: "f1bf82ec" */ "@site/docs/commands/cli/codex-review.mdx"), "@site/docs/commands/cli/codex-review.mdx", require.resolveWeak("@site/docs/commands/cli/codex-review.mdx")], + "f4817052": [() => import(/* webpackChunkName: "f4817052" */ "@site/docs/commands/general/ccw.mdx"), "@site/docs/commands/general/ccw.mdx", require.resolveWeak("@site/docs/commands/general/ccw.mdx")], + "f9222419": [() => import(/* webpackChunkName: "f9222419" */ "@site/docs/commands/general/codex-coordinator.mdx"), "@site/docs/commands/general/codex-coordinator.mdx", require.resolveWeak("@site/docs/commands/general/codex-coordinator.mdx")], + "fabaf1c8": [() => import(/* webpackChunkName: "fabaf1c8" */ "@site/docs/commands/general/flow-create.mdx"), "@site/docs/commands/general/flow-create.mdx", require.resolveWeak("@site/docs/commands/general/flow-create.mdx")], + "fe8e3dcf": [() => import(/* webpackChunkName: "fe8e3dcf" */ "@site/docs/commands/issue/issue-execute.md"), "@site/docs/commands/issue/issue-execute.md", require.resolveWeak("@site/docs/commands/issue/issue-execute.md")],}; diff --git a/ccw/docs-site/.docusaurus/routes.js b/ccw/docs-site/.docusaurus/routes.js index 88ac2bcd..7f74ea8a 100644 --- a/ccw/docs-site/.docusaurus/routes.js +++ b/ccw/docs-site/.docusaurus/routes.js @@ -3,240 +3,205 @@ import ComponentCreator from '@docusaurus/ComponentCreator'; export default [ { - path: '/docs/__docusaurus/debug', - component: ComponentCreator('/docs/__docusaurus/debug', 'e58'), - exact: true - }, - { - path: '/docs/__docusaurus/debug/config', - component: ComponentCreator('/docs/__docusaurus/debug/config', '2ce'), - exact: true - }, - { - path: '/docs/__docusaurus/debug/content', - component: ComponentCreator('/docs/__docusaurus/debug/content', '11b'), - exact: true - }, - { - path: '/docs/__docusaurus/debug/globalData', - component: ComponentCreator('/docs/__docusaurus/debug/globalData', 'f13'), - exact: true - }, - { - path: '/docs/__docusaurus/debug/metadata', - component: ComponentCreator('/docs/__docusaurus/debug/metadata', 'bff'), - exact: true - }, - { - path: '/docs/__docusaurus/debug/registry', - component: ComponentCreator('/docs/__docusaurus/debug/registry', '830'), - exact: true - }, - { - path: '/docs/__docusaurus/debug/routes', - component: ComponentCreator('/docs/__docusaurus/debug/routes', '13e'), - exact: true - }, - { - path: '/docs/', - component: ComponentCreator('/docs/', 'a3f'), + path: '/docs/zh/', + component: ComponentCreator('/docs/zh/', 'b34'), routes: [ { - path: '/docs/', - component: ComponentCreator('/docs/', 'fa7'), + path: '/docs/zh/', + component: ComponentCreator('/docs/zh/', 'a8e'), routes: [ { - path: '/docs/', - component: ComponentCreator('/docs/', '294'), + path: '/docs/zh/', + component: ComponentCreator('/docs/zh/', '632'), routes: [ { - path: '/docs/commands/cli/cli-init', - component: ComponentCreator('/docs/commands/cli/cli-init', '159'), + path: '/docs/zh/commands/cli/cli-init', + component: ComponentCreator('/docs/zh/commands/cli/cli-init', 'fe3'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/cli/codex-review', - component: ComponentCreator('/docs/commands/cli/codex-review', 'c66'), + path: '/docs/zh/commands/cli/codex-review', + component: ComponentCreator('/docs/zh/commands/cli/codex-review', 'e65'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/general/ccw', - component: ComponentCreator('/docs/commands/general/ccw', '3c1'), + path: '/docs/zh/commands/general/ccw', + component: ComponentCreator('/docs/zh/commands/general/ccw', '83a'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/general/ccw-coordinator', - component: ComponentCreator('/docs/commands/general/ccw-coordinator', '3b4'), + path: '/docs/zh/commands/general/ccw-coordinator', + component: ComponentCreator('/docs/zh/commands/general/ccw-coordinator', 'f35'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/general/ccw-debug', - component: ComponentCreator('/docs/commands/general/ccw-debug', 'e0c'), + path: '/docs/zh/commands/general/ccw-debug', + component: ComponentCreator('/docs/zh/commands/general/ccw-debug', 'b0a'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/general/ccw-plan', - component: ComponentCreator('/docs/commands/general/ccw-plan', '9ae'), + path: '/docs/zh/commands/general/ccw-plan', + component: ComponentCreator('/docs/zh/commands/general/ccw-plan', '39d'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/general/ccw-test', - component: ComponentCreator('/docs/commands/general/ccw-test', 'e6f'), + path: '/docs/zh/commands/general/ccw-test', + component: ComponentCreator('/docs/zh/commands/general/ccw-test', '765'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/general/codex-coordinator', - component: ComponentCreator('/docs/commands/general/codex-coordinator', 'e7d'), + path: '/docs/zh/commands/general/codex-coordinator', + component: ComponentCreator('/docs/zh/commands/general/codex-coordinator', '486'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/general/flow-create', - component: ComponentCreator('/docs/commands/general/flow-create', '507'), + path: '/docs/zh/commands/general/flow-create', + component: ComponentCreator('/docs/zh/commands/general/flow-create', 'd53'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/issue/issue-convert-to-plan', - component: ComponentCreator('/docs/commands/issue/issue-convert-to-plan', 'a36'), + path: '/docs/zh/commands/issue/issue-convert-to-plan', + component: ComponentCreator('/docs/zh/commands/issue/issue-convert-to-plan', '0df'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/issue/issue-discover', - component: ComponentCreator('/docs/commands/issue/issue-discover', '5ae'), + path: '/docs/zh/commands/issue/issue-discover', + component: ComponentCreator('/docs/zh/commands/issue/issue-discover', '9b4'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/issue/issue-execute', - component: ComponentCreator('/docs/commands/issue/issue-execute', '20b'), + path: '/docs/zh/commands/issue/issue-execute', + component: ComponentCreator('/docs/zh/commands/issue/issue-execute', 'cfd'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/issue/issue-from-brainstorm', - component: ComponentCreator('/docs/commands/issue/issue-from-brainstorm', '10c'), + path: '/docs/zh/commands/issue/issue-from-brainstorm', + component: ComponentCreator('/docs/zh/commands/issue/issue-from-brainstorm', 'd2f'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/issue/issue-new', - component: ComponentCreator('/docs/commands/issue/issue-new', 'abb'), + path: '/docs/zh/commands/issue/issue-new', + component: ComponentCreator('/docs/zh/commands/issue/issue-new', '7f9'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/issue/issue-plan', - component: ComponentCreator('/docs/commands/issue/issue-plan', '57f'), + path: '/docs/zh/commands/issue/issue-plan', + component: ComponentCreator('/docs/zh/commands/issue/issue-plan', 'ed4'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/issue/issue-queue', - component: ComponentCreator('/docs/commands/issue/issue-queue', '316'), + path: '/docs/zh/commands/issue/issue-queue', + component: ComponentCreator('/docs/zh/commands/issue/issue-queue', 'a4b'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/memory/memory-compact', - component: ComponentCreator('/docs/commands/memory/memory-compact', 'fbd'), + path: '/docs/zh/commands/memory/memory-compact', + component: ComponentCreator('/docs/zh/commands/memory/memory-compact', '8dc'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/memory/memory-docs-full-cli', - component: ComponentCreator('/docs/commands/memory/memory-docs-full-cli', '8b8'), + path: '/docs/zh/commands/memory/memory-docs-full-cli', + component: ComponentCreator('/docs/zh/commands/memory/memory-docs-full-cli', '1a7'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/memory/memory-docs-related-cli', - component: ComponentCreator('/docs/commands/memory/memory-docs-related-cli', '707'), + path: '/docs/zh/commands/memory/memory-docs-related-cli', + component: ComponentCreator('/docs/zh/commands/memory/memory-docs-related-cli', 'f28'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/memory/memory-load', - component: ComponentCreator('/docs/commands/memory/memory-load', '1db'), + path: '/docs/zh/commands/memory/memory-load', + component: ComponentCreator('/docs/zh/commands/memory/memory-load', 'aee'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/memory/memory-update-full', - component: ComponentCreator('/docs/commands/memory/memory-update-full', '3fa'), + path: '/docs/zh/commands/memory/memory-update-full', + component: ComponentCreator('/docs/zh/commands/memory/memory-update-full', '2a1'), exact: true, sidebar: "docs" }, { - path: '/docs/commands/memory/memory-update-related', - component: ComponentCreator('/docs/commands/memory/memory-update-related', 'c50'), + path: '/docs/zh/commands/memory/memory-update-related', + component: ComponentCreator('/docs/zh/commands/memory/memory-update-related', '991'), exact: true, sidebar: "docs" }, { - path: '/docs/faq', - component: ComponentCreator('/docs/faq', '296'), + path: '/docs/zh/faq', + component: ComponentCreator('/docs/zh/faq', 'd6c'), exact: true, sidebar: "docs" }, { - path: '/docs/overview', - component: ComponentCreator('/docs/overview', 'f90'), + path: '/docs/zh/overview', + component: ComponentCreator('/docs/zh/overview', '2d1'), exact: true, sidebar: "docs" }, { - path: '/docs/workflows/faq', - component: ComponentCreator('/docs/workflows/faq', '58c'), + path: '/docs/zh/workflows/faq', + component: ComponentCreator('/docs/zh/workflows/faq', '319'), exact: true }, { - path: '/docs/workflows/introduction', - component: ComponentCreator('/docs/workflows/introduction', '702'), + path: '/docs/zh/workflows/introduction', + component: ComponentCreator('/docs/zh/workflows/introduction', 'dc8'), exact: true, sidebar: "docs" }, { - path: '/docs/workflows/level-1-ultra-lightweight', - component: ComponentCreator('/docs/workflows/level-1-ultra-lightweight', 'b4b'), + path: '/docs/zh/workflows/level-1-ultra-lightweight', + component: ComponentCreator('/docs/zh/workflows/level-1-ultra-lightweight', '4d3'), exact: true, sidebar: "docs" }, { - path: '/docs/workflows/level-2-rapid', - component: ComponentCreator('/docs/workflows/level-2-rapid', 'fe1'), + path: '/docs/zh/workflows/level-2-rapid', + component: ComponentCreator('/docs/zh/workflows/level-2-rapid', 'e2a'), exact: true, sidebar: "docs" }, { - path: '/docs/workflows/level-3-standard', - component: ComponentCreator('/docs/workflows/level-3-standard', '65f'), + path: '/docs/zh/workflows/level-3-standard', + component: ComponentCreator('/docs/zh/workflows/level-3-standard', '936'), exact: true, sidebar: "docs" }, { - path: '/docs/workflows/level-4-brainstorm', - component: ComponentCreator('/docs/workflows/level-4-brainstorm', 'fae'), + path: '/docs/zh/workflows/level-4-brainstorm', + component: ComponentCreator('/docs/zh/workflows/level-4-brainstorm', '87d'), exact: true, sidebar: "docs" }, { - path: '/docs/workflows/level-5-intelligent', - component: ComponentCreator('/docs/workflows/level-5-intelligent', 'fa9'), + path: '/docs/zh/workflows/level-5-intelligent', + component: ComponentCreator('/docs/zh/workflows/level-5-intelligent', 'b09'), exact: true, sidebar: "docs" }, { - path: '/docs/', - component: ComponentCreator('/docs/', '6df'), + path: '/docs/zh/', + component: ComponentCreator('/docs/zh/', '0e3'), exact: true, sidebar: "docs" } diff --git a/ccw/docs-site/.docusaurus/routesChunkNames.json b/ccw/docs-site/.docusaurus/routesChunkNames.json index a3890cf0..17b2e1a0 100644 --- a/ccw/docs-site/.docusaurus/routesChunkNames.json +++ b/ccw/docs-site/.docusaurus/routesChunkNames.json @@ -1,186 +1,143 @@ { - "/docs/__docusaurus/debug-e58": { - "__comp": "__comp---theme-debug-config-23-a-2ff", + "/docs/zh/-b34": { + "__comp": "5e95c892", "__context": { - "plugin": "plugin---docs-docusaurus-debugb-38-c84" + "plugin": "aba21aa0" } }, - "/docs/__docusaurus/debug/config-2ce": { - "__comp": "__comp---theme-debug-config-23-a-2ff", - "__context": { - "plugin": "plugin---docs-docusaurus-debugb-38-c84" - } + "/docs/zh/-a8e": { + "__comp": "a7bd4aaa", + "__props": "b17e4002" }, - "/docs/__docusaurus/debug/content-11b": { - "__comp": "__comp---theme-debug-contentba-8-ce7", - "__context": { - "plugin": "plugin---docs-docusaurus-debugb-38-c84" - }, - "__props": "__props---docs-docusaurus-debug-content-344-8d5" + "/docs/zh/-632": { + "__comp": "a94703ab" }, - "/docs/__docusaurus/debug/globalData-f13": { - "__comp": "__comp---theme-debug-global-dataede-0fa", - "__context": { - "plugin": "plugin---docs-docusaurus-debugb-38-c84" - } + "/docs/zh/commands/cli/cli-init-fe3": { + "__comp": "17896441", + "content": "0566a0a8" }, - "/docs/__docusaurus/debug/metadata-bff": { - "__comp": "__comp---theme-debug-site-metadata-68-e-3d4", - "__context": { - "plugin": "plugin---docs-docusaurus-debugb-38-c84" - } + "/docs/zh/commands/cli/codex-review-e65": { + "__comp": "17896441", + "content": "f1bf82ec" }, - "/docs/__docusaurus/debug/registry-830": { - "__comp": "__comp---theme-debug-registry-679-501", - "__context": { - "plugin": "plugin---docs-docusaurus-debugb-38-c84" - } + "/docs/zh/commands/general/ccw-83a": { + "__comp": "17896441", + "content": "f4817052" }, - "/docs/__docusaurus/debug/routes-13e": { - "__comp": "__comp---theme-debug-routes-946-699", - "__context": { - "plugin": "plugin---docs-docusaurus-debugb-38-c84" - } + "/docs/zh/commands/general/ccw-coordinator-f35": { + "__comp": "17896441", + "content": "d550a629" }, - "/docs/-a3f": { - "__comp": "__comp---theme-docs-root-5-e-9-0b6", - "__context": { - "plugin": "plugin---docs-aba-4f5" - } + "/docs/zh/commands/general/ccw-debug-b0a": { + "__comp": "17896441", + "content": "97c6e66a" }, - "/docs/-fa7": { - "__comp": "__comp---theme-doc-version-roota-7-b-5de", - "__props": "__props---docs-11-b-f70" + "/docs/zh/commands/general/ccw-plan-39d": { + "__comp": "17896441", + "content": "04db0a2e" }, - "/docs/-294": { - "__comp": "__comp---theme-doc-roota-94-67a" + "/docs/zh/commands/general/ccw-test-765": { + "__comp": "17896441", + "content": "ccef5d0f" }, - "/docs/commands/cli/cli-init-159": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-cli-cli-init-056-ce1" + "/docs/zh/commands/general/codex-coordinator-486": { + "__comp": "17896441", + "content": "f9222419" }, - "/docs/commands/cli/codex-review-c66": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-cli-codex-reviewf-1-b-55f" + "/docs/zh/commands/general/flow-create-d53": { + "__comp": "17896441", + "content": "fabaf1c8" }, - "/docs/commands/general/ccw-3c1": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-general-ccwf-48-8c4" + "/docs/zh/commands/issue/issue-convert-to-plan-0df": { + "__comp": "17896441", + "content": "5c7b2278" }, - "/docs/commands/general/ccw-coordinator-3b4": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-general-ccw-coordinatord-55-c6b" + "/docs/zh/commands/issue/issue-discover-9b4": { + "__comp": "17896441", + "content": "1e3006f3" }, - "/docs/commands/general/ccw-debug-e0c": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-general-ccw-debug-97-c-a72" + "/docs/zh/commands/issue/issue-execute-cfd": { + "__comp": "17896441", + "content": "fe8e3dcf" }, - "/docs/commands/general/ccw-plan-9ae": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-general-ccw-plan-04-d-fe0" + "/docs/zh/commands/issue/issue-from-brainstorm-d2f": { + "__comp": "17896441", + "content": "2ecf8b4a" }, - "/docs/commands/general/ccw-test-e6f": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-general-ccw-testcce-912" + "/docs/zh/commands/issue/issue-new-7f9": { + "__comp": "17896441", + "content": "4ad7db0f" }, - "/docs/commands/general/codex-coordinator-e7d": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-general-codex-coordinatorf-92-1dc" + "/docs/zh/commands/issue/issue-plan-ed4": { + "__comp": "17896441", + "content": "a6c3df16" }, - "/docs/commands/general/flow-create-507": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-general-flow-createfab-98a" + "/docs/zh/commands/issue/issue-queue-a4b": { + "__comp": "17896441", + "content": "1bac9067" }, - "/docs/commands/issue/issue-convert-to-plan-a36": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-issue-issue-convert-to-plan-5-c-7-184" + "/docs/zh/commands/memory/memory-compact-8dc": { + "__comp": "17896441", + "content": "7a1ee27c" }, - "/docs/commands/issue/issue-discover-5ae": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-issue-issue-discover-1-e-3-569" + "/docs/zh/commands/memory/memory-docs-full-cli-1a7": { + "__comp": "17896441", + "content": "4cc74730" }, - "/docs/commands/issue/issue-execute-20b": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-issue-issue-executefe-8-c03" + "/docs/zh/commands/memory/memory-docs-related-cli-f28": { + "__comp": "17896441", + "content": "60eef997" }, - "/docs/commands/issue/issue-from-brainstorm-10c": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-issue-issue-from-brainstorm-2-ec-eeb" + "/docs/zh/commands/memory/memory-load-aee": { + "__comp": "17896441", + "content": "157db180" }, - "/docs/commands/issue/issue-new-abb": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-issue-issue-new-4-ad-3f0" + "/docs/zh/commands/memory/memory-update-full-2a1": { + "__comp": "17896441", + "content": "666bb1bf" }, - "/docs/commands/issue/issue-plan-57f": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-issue-issue-plana-6-c-fbd" + "/docs/zh/commands/memory/memory-update-related-991": { + "__comp": "17896441", + "content": "611877e1" }, - "/docs/commands/issue/issue-queue-316": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-issue-issue-queue-1-ba-55f" + "/docs/zh/faq-d6c": { + "__comp": "17896441", + "content": "2a5e3eff" }, - "/docs/commands/memory/memory-compact-fbd": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-memory-memory-compact-7-a-1-41c" + "/docs/zh/overview-2d1": { + "__comp": "17896441", + "content": "8a7e39ed" }, - "/docs/commands/memory/memory-docs-full-cli-8b8": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-memory-memory-docs-full-cli-4-cc-96f" + "/docs/zh/workflows/faq-319": { + "__comp": "17896441", + "content": "46f40178" }, - "/docs/commands/memory/memory-docs-related-cli-707": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-memory-memory-docs-related-cli-60-e-dd0" + "/docs/zh/workflows/introduction-dc8": { + "__comp": "17896441", + "content": "e5f6eee3" }, - "/docs/commands/memory/memory-load-1db": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-memory-memory-load-157-952" + "/docs/zh/workflows/level-1-ultra-lightweight-4d3": { + "__comp": "17896441", + "content": "9cf7cb6b" }, - "/docs/commands/memory/memory-update-full-3fa": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-memory-memory-update-full-666-002" + "/docs/zh/workflows/level-2-rapid-e2a": { + "__comp": "17896441", + "content": "05467734" }, - "/docs/commands/memory/memory-update-related-c50": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-commands-memory-memory-update-related-611-8d3" + "/docs/zh/workflows/level-3-standard-936": { + "__comp": "17896441", + "content": "3f1fe4a1" }, - "/docs/faq-296": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-faqea-3-888" + "/docs/zh/workflows/level-4-brainstorm-87d": { + "__comp": "17896441", + "content": "775938bf" }, - "/docs/overview-f90": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-overview-188-429" + "/docs/zh/workflows/level-5-intelligent-b09": { + "__comp": "17896441", + "content": "562bb8cb" }, - "/docs/workflows/faq-58c": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-workflows-faqbcf-045" - }, - "/docs/workflows/introduction-702": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-workflows-introduction-9-f-4-275" - }, - "/docs/workflows/level-1-ultra-lightweight-b4b": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-workflows-level-1-ultra-lightweightc-5-a-5db" - }, - "/docs/workflows/level-2-rapid-fe1": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-workflows-level-2-rapid-19-b-095" - }, - "/docs/workflows/level-3-standard-65f": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-workflows-level-3-standardbdb-61a" - }, - "/docs/workflows/level-4-brainstorm-fae": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-workflows-level-4-brainstormd-04-14f" - }, - "/docs/workflows/level-5-intelligent-fa9": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-workflows-level-5-intelligent-186-b05" - }, - "/docs/-6df": { - "__comp": "__comp---theme-doc-item-178-a40", - "content": "content---docs-4-ed-831" + "/docs/zh/-0e3": { + "__comp": "17896441", + "content": "6ab014e9" } } \ No newline at end of file diff --git a/ccw/docs-site/.docusaurus/site-metadata.json b/ccw/docs-site/.docusaurus/site-metadata.json index 085f2863..503133f5 100644 --- a/ccw/docs-site/.docusaurus/site-metadata.json +++ b/ccw/docs-site/.docusaurus/site-metadata.json @@ -12,9 +12,9 @@ "name": "@docusaurus/plugin-content-pages", "version": "3.9.2" }, - "docusaurus-plugin-debug": { + "docusaurus-plugin-sitemap": { "type": "package", - "name": "@docusaurus/plugin-debug", + "name": "@docusaurus/plugin-sitemap", "version": "3.9.2" }, "docusaurus-plugin-svgr": {