From 8ceae6d6fd17531291a8d432a6ce6b5f6bbd01a9 Mon Sep 17 00:00:00 2001 From: catlog22 Date: Sun, 1 Mar 2026 13:08:12 +0800 Subject: [PATCH] Add Chinese documentation for custom skills development and reference guide - Created a new document for custom skills development (`custom.md`) detailing the structure, creation, implementation, and best practices for developing custom CCW skills. - Added an index document (`index.md`) summarizing all built-in skills, their categories, and usage examples. - Introduced a reference guide (`reference.md`) providing a quick reference for all 33 built-in CCW skills, including triggers and purposes. --- .../workflow-lite-plan/phases/01-lite-plan.md | 46 + .../phases/02-lite-execute.md | 4 +- .../phases/01-multi-cli-plan.md | 13 + .../phases/02-lite-execute.md | 4 +- .../phases/04-task-generation.md | 11 +- .claude/skills/workflow-wave-plan/SKILL.md | 896 ------- ccw/frontend/src/lib/api.ts | 181 +- .../src/locales/en/review-session.json | 5 + .../src/locales/zh/review-session.json | 5 + ccw/frontend/src/pages/ReviewSessionPage.tsx | 33 +- ccw/src/core/auth/csrf-manager.ts | 134 +- ccw/src/core/routes/auth-routes.ts | 32 +- ccw/tests/csrf-manager.test.ts | 86 + docs/.vitepress/config.ts | 97 +- docs/.vitepress/demos/ComponentGallery.tsx | 326 +++ docs/.vitepress/demos/ComponentGalleryZh.tsx | 326 +++ docs/.vitepress/demos/DashboardOverview.tsx | 137 ++ docs/.vitepress/demos/FloatingPanelsDemo.tsx | 82 + docs/.vitepress/demos/MiniStatCards.tsx | 54 + docs/.vitepress/demos/ProjectInfoBanner.tsx | 66 + docs/.vitepress/demos/QueueItemStatusDemo.tsx | 60 + docs/.vitepress/demos/QueueManagementDemo.tsx | 164 ++ docs/.vitepress/demos/ResizablePanesDemo.tsx | 86 + docs/.vitepress/demos/SchedulerConfigDemo.tsx | 110 + docs/.vitepress/demos/SessionCarousel.tsx | 108 + .../demos/TerminalDashboardOverview.tsx | 122 + .../demos/TerminalLayoutPresets.tsx | 48 + docs/.vitepress/demos/badge-variants.tsx | 57 + docs/.vitepress/demos/button-variants.tsx | 112 +- docs/.vitepress/demos/card-variants.tsx | 71 + docs/.vitepress/demos/checkbox-variants.tsx | 79 + docs/.vitepress/demos/input-variants.tsx | 95 + docs/.vitepress/demos/select-variants.tsx | 101 + .../theme/components/AgentOrchestration.vue | 2 +- .../theme/components/DemoContainer.vue | 118 +- .../theme/components/LanguageSwitcher.vue | 131 +- .../theme/components/ProfessionalHome.vue | 204 +- .../theme/composables/useDynamicIcon.ts | 7 + docs/.vitepress/theme/inlineDemoPlugin.ts | 130 + docs/.vitepress/theme/layouts/Layout.vue | 110 +- docs/.vitepress/theme/markdownTransform.ts | 73 +- docs/.vitepress/theme/styles/custom.css | 23 +- docs/.vitepress/theme/styles/mobile.css | 679 ++++- docs/.vitepress/theme/styles/variables.css | 34 +- docs/components/index.md | 355 +-- docs/features/dashboard.md | 373 +-- docs/features/memory.md | 2 +- docs/features/queue.md | 341 +-- docs/features/terminal.md | 346 +-- docs/public/icon-concepts.html | 2180 +++++++++++++++++ docs/skills/claude-collaboration.md | 9 +- docs/skills/claude-index.md | 7 +- docs/skills/core-skills.md | 65 +- docs/skills/index.md | 23 +- docs/skills/reference.md | 52 +- docs/skills/specs/document-standards.md | 248 ++ docs/skills/specs/issue-classification.md | 200 ++ docs/skills/specs/quality-gates.md | 157 ++ docs/skills/specs/quality-standards.md | 190 ++ docs/skills/specs/reference-docs-spec.md | 348 +++ docs/skills/specs/review-dimensions.md | 182 ++ docs/skills/templates/architecture-doc.md | 245 ++ docs/skills/templates/autonomous-action.md | 260 ++ .../templates/autonomous-orchestrator.md | 311 +++ docs/skills/templates/epics-template.md | 311 +++ docs/skills/templates/issue-template.md | 295 +++ docs/skills/templates/product-brief.md | 160 ++ docs/skills/templates/requirements-prd.md | 262 ++ docs/skills/templates/review-report.md | 382 +++ docs/skills/templates/sequential-phase.md | 218 ++ docs/skills/templates/skill-md.md | 274 +++ docs/zh-CN/components/index.md | 354 +-- docs/zh-CN/features/dashboard.md | 373 +-- docs/zh-CN/features/terminal.md | 346 +-- docs/zh/skills/core-skills.md | 1140 +++++++++ docs/zh/skills/custom.md | 270 ++ docs/zh/skills/index.md | 275 +++ docs/zh/skills/reference.md | 174 ++ 78 files changed, 12352 insertions(+), 3638 deletions(-) delete mode 100644 .claude/skills/workflow-wave-plan/SKILL.md create mode 100644 docs/.vitepress/demos/ComponentGallery.tsx create mode 100644 docs/.vitepress/demos/ComponentGalleryZh.tsx create mode 100644 docs/.vitepress/demos/DashboardOverview.tsx create mode 100644 docs/.vitepress/demos/FloatingPanelsDemo.tsx create mode 100644 docs/.vitepress/demos/MiniStatCards.tsx create mode 100644 docs/.vitepress/demos/ProjectInfoBanner.tsx create mode 100644 docs/.vitepress/demos/QueueItemStatusDemo.tsx create mode 100644 docs/.vitepress/demos/QueueManagementDemo.tsx create mode 100644 docs/.vitepress/demos/ResizablePanesDemo.tsx create mode 100644 docs/.vitepress/demos/SchedulerConfigDemo.tsx create mode 100644 docs/.vitepress/demos/SessionCarousel.tsx create mode 100644 docs/.vitepress/demos/TerminalDashboardOverview.tsx create mode 100644 docs/.vitepress/demos/TerminalLayoutPresets.tsx create mode 100644 docs/.vitepress/demos/badge-variants.tsx create mode 100644 docs/.vitepress/demos/card-variants.tsx create mode 100644 docs/.vitepress/demos/checkbox-variants.tsx create mode 100644 docs/.vitepress/demos/input-variants.tsx create mode 100644 docs/.vitepress/demos/select-variants.tsx create mode 100644 docs/.vitepress/theme/inlineDemoPlugin.ts create mode 100644 docs/public/icon-concepts.html create mode 100644 docs/skills/specs/document-standards.md create mode 100644 docs/skills/specs/issue-classification.md create mode 100644 docs/skills/specs/quality-gates.md create mode 100644 docs/skills/specs/quality-standards.md create mode 100644 docs/skills/specs/reference-docs-spec.md create mode 100644 docs/skills/specs/review-dimensions.md create mode 100644 docs/skills/templates/architecture-doc.md create mode 100644 docs/skills/templates/autonomous-action.md create mode 100644 docs/skills/templates/autonomous-orchestrator.md create mode 100644 docs/skills/templates/epics-template.md create mode 100644 docs/skills/templates/issue-template.md create mode 100644 docs/skills/templates/product-brief.md create mode 100644 docs/skills/templates/requirements-prd.md create mode 100644 docs/skills/templates/review-report.md create mode 100644 docs/skills/templates/sequential-phase.md create mode 100644 docs/skills/templates/skill-md.md create mode 100644 docs/zh/skills/core-skills.md create mode 100644 docs/zh/skills/custom.md create mode 100644 docs/zh/skills/index.md create mode 100644 docs/zh/skills/reference.md diff --git a/.claude/skills/workflow-lite-plan/phases/01-lite-plan.md b/.claude/skills/workflow-lite-plan/phases/01-lite-plan.md index 2b4f2eb9..cad9b125 100644 --- a/.claude/skills/workflow-lite-plan/phases/01-lite-plan.md +++ b/.claude/skills/workflow-lite-plan/phases/01-lite-plan.md @@ -104,6 +104,17 @@ const sessionFolder = `.workflow/.lite-plan/${sessionId}` bash(`mkdir -p ${sessionFolder} && test -d ${sessionFolder} && echo "SUCCESS: ${sessionFolder}" || echo "FAILED: ${sessionFolder}"`) ``` +**TodoWrite (Phase 1 start)**: +```javascript +TodoWrite({ todos: [ + { content: "Phase 1: Exploration", status: "in_progress", activeForm: "Exploring codebase" }, + { content: "Phase 2: Clarification", status: "pending", activeForm: "Collecting clarifications" }, + { content: "Phase 3: Planning", status: "pending", activeForm: "Generating plan" }, + { content: "Phase 4: Confirmation", status: "pending", activeForm: "Awaiting confirmation" }, + { content: "Phase 5: Execution", status: "pending", activeForm: "Executing tasks" } +]}) +``` + **Exploration Decision Logic**: ```javascript // Check if task description already contains prior analysis context (from analyze-with-file) @@ -307,6 +318,17 @@ Angles explored: ${explorationManifest.explorations.map(e => e.angle).join(', ') `) ``` +**TodoWrite (Phase 1 complete)**: +```javascript +TodoWrite({ todos: [ + { content: "Phase 1: Exploration", status: "completed", activeForm: "Exploring codebase" }, + { content: "Phase 2: Clarification", status: "in_progress", activeForm: "Collecting clarifications" }, + { content: "Phase 3: Planning", status: "pending", activeForm: "Generating plan" }, + { content: "Phase 4: Confirmation", status: "pending", activeForm: "Awaiting confirmation" }, + { content: "Phase 5: Execution", status: "pending", activeForm: "Executing tasks" } +]}) +``` + **Output**: - `${sessionFolder}/exploration-{angle1}.json` - `${sessionFolder}/exploration-{angle2}.json` @@ -560,6 +582,17 @@ Note: Use files[].change (not modification_points), convergence.criteria (not ac **Output**: `${sessionFolder}/plan.json` +**TodoWrite (Phase 3 complete)**: +```javascript +TodoWrite({ todos: [ + { content: "Phase 1: Exploration", status: "completed", activeForm: "Exploring codebase" }, + { content: "Phase 2: Clarification", status: "completed", activeForm: "Collecting clarifications" }, + { content: "Phase 3: Planning", status: "completed", activeForm: "Generating plan" }, + { content: "Phase 4: Confirmation", status: "in_progress", activeForm: "Awaiting confirmation" }, + { content: "Phase 5: Execution", status: "pending", activeForm: "Executing tasks" } +]}) +``` + --- ### Phase 4: Task Confirmation & Execution Selection @@ -649,6 +682,19 @@ if (autoYes) { } ``` +**TodoWrite (Phase 4 confirmed)**: +```javascript +const executionLabel = userSelection.execution_method + +TodoWrite({ todos: [ + { content: "Phase 1: Exploration", status: "completed", activeForm: "Exploring codebase" }, + { content: "Phase 2: Clarification", status: "completed", activeForm: "Collecting clarifications" }, + { content: "Phase 3: Planning", status: "completed", activeForm: "Generating plan" }, + { content: `Phase 4: Confirmed [${executionLabel}]`, status: "completed", activeForm: "Confirmed" }, + { content: `Phase 5: Execution [${executionLabel}]`, status: "in_progress", activeForm: `Executing [${executionLabel}]` } +]}) +``` + --- ### Phase 5: Handoff to Execution diff --git a/.claude/skills/workflow-lite-plan/phases/02-lite-execute.md b/.claude/skills/workflow-lite-plan/phases/02-lite-execute.md index cd241b92..38ccd57a 100644 --- a/.claude/skills/workflow-lite-plan/phases/02-lite-execute.md +++ b/.claude/skills/workflow-lite-plan/phases/02-lite-execute.md @@ -350,9 +350,9 @@ executionCalls = createExecutionCalls(getTasks(planObject), executionMethod).map TodoWrite({ todos: executionCalls.map(c => ({ - content: `${c.executionType === "parallel" ? "⚡" : "→"} ${c.id} (${c.tasks.length} tasks)`, + content: `${c.executionType === "parallel" ? "⚡" : "→"} ${c.id} [${c.executor}] (${c.tasks.length} tasks)`, status: "pending", - activeForm: `Executing ${c.id}` + activeForm: `Executing ${c.id} [${c.executor}]` })) }) ``` diff --git a/.claude/skills/workflow-multi-cli-plan/phases/01-multi-cli-plan.md b/.claude/skills/workflow-multi-cli-plan/phases/01-multi-cli-plan.md index 3c4bd4c2..a87da93c 100644 --- a/.claude/skills/workflow-multi-cli-plan/phases/01-multi-cli-plan.md +++ b/.claude/skills/workflow-multi-cli-plan/phases/01-multi-cli-plan.md @@ -258,6 +258,19 @@ AskUserQuestion({ - Need More Analysis → Phase 2 with feedback - Cancel → Save session for resumption +**TodoWrite Update (Phase 4 Decision)**: +```javascript +const executionLabel = userSelection.execution_method // "Agent" / "Codex" / "Auto" + +TodoWrite({ todos: [ + { content: "Phase 1: Context Gathering", status: "completed", activeForm: "Gathering context" }, + { content: "Phase 2: Multi-CLI Discussion", status: "completed", activeForm: "Running discussion" }, + { content: "Phase 3: Present Options", status: "completed", activeForm: "Presenting options" }, + { content: `Phase 4: User Decision [${executionLabel}]`, status: "completed", activeForm: "Decision recorded" }, + { content: `Phase 5: Plan Generation [${executionLabel}]`, status: "in_progress", activeForm: `Generating plan [${executionLabel}]` } +]}) +``` + ### Phase 5: Plan Generation & Execution Handoff **Step 1: Build Context-Package** (Orchestrator responsibility): diff --git a/.claude/skills/workflow-multi-cli-plan/phases/02-lite-execute.md b/.claude/skills/workflow-multi-cli-plan/phases/02-lite-execute.md index 8e60f27d..9c2863ae 100644 --- a/.claude/skills/workflow-multi-cli-plan/phases/02-lite-execute.md +++ b/.claude/skills/workflow-multi-cli-plan/phases/02-lite-execute.md @@ -357,9 +357,9 @@ executionCalls = createExecutionCalls(getTasks(planObject), executionMethod).map TodoWrite({ todos: executionCalls.map(c => ({ - content: `${c.executionType === "parallel" ? "⚡" : "→"} ${c.id} (${c.tasks.length} tasks)`, + content: `${c.executionType === "parallel" ? "⚡" : "→"} ${c.id} [${c.executor}] (${c.tasks.length} tasks)`, status: "pending", - activeForm: `Executing ${c.id}` + activeForm: `Executing ${c.id} [${c.executor}]` })) }) ``` diff --git a/.claude/skills/workflow-plan/phases/04-task-generation.md b/.claude/skills/workflow-plan/phases/04-task-generation.md index fbc53076..2357e3e5 100644 --- a/.claude/skills/workflow-plan/phases/04-task-generation.md +++ b/.claude/skills/workflow-plan/phases/04-task-generation.md @@ -338,13 +338,20 @@ Output: ) ``` +**Executor Label** (computed after Step 4.0): +```javascript +const executorLabel = userConfig.executionMethod === 'agent' ? 'Agent' + : userConfig.executionMethod === 'hybrid' ? 'Hybrid' + : `CLI (${userConfig.preferredCliTool})` +``` + ### TodoWrite Update (Phase 4 in progress) ```json [ {"content": "Phase 1: Session Discovery", "status": "completed", "activeForm": "Executing session discovery"}, {"content": "Phase 2: Context Gathering", "status": "completed", "activeForm": "Executing context gathering"}, - {"content": "Phase 4: Task Generation", "status": "in_progress", "activeForm": "Executing task generation"} + {"content": "Phase 4: Task Generation [${executorLabel}]", "status": "in_progress", "activeForm": "Generating tasks [${executorLabel}]"} ] ``` @@ -354,7 +361,7 @@ Output: [ {"content": "Phase 1: Session Discovery", "status": "completed", "activeForm": "Executing session discovery"}, {"content": "Phase 2: Context Gathering", "status": "completed", "activeForm": "Executing context gathering"}, - {"content": "Phase 4: Task Generation", "status": "completed", "activeForm": "Executing task generation"} + {"content": "Phase 4: Task Generation [${executorLabel}]", "status": "completed", "activeForm": "Generating tasks [${executorLabel}]"} ] ``` diff --git a/.claude/skills/workflow-wave-plan/SKILL.md b/.claude/skills/workflow-wave-plan/SKILL.md deleted file mode 100644 index d9a5e54c..00000000 --- a/.claude/skills/workflow-wave-plan/SKILL.md +++ /dev/null @@ -1,896 +0,0 @@ ---- -name: workflow-wave-plan -description: CSV Wave planning and execution - explore via wave, resolve conflicts, execute from CSV with linked exploration context. Triggers on "workflow:wave-plan". -argument-hint: " [--yes|-y] [--concurrency|-c N]" -allowed-tools: Task, AskUserQuestion, Read, Write, Edit, Bash, Glob, Grep ---- - -# Workflow Wave Plan - -CSV Wave-based planning and execution. Uses structured CSV state for both exploration and execution, with cross-phase context propagation via `context_from` linking. - -## Architecture - -``` -Requirement - ↓ -┌─ Phase 1: Decompose ─────────────────────┐ -│ Analyze requirement → explore.csv │ -│ (1 row per exploration angle) │ -└────────────────────┬──────────────────────┘ - ↓ -┌─ Phase 2: Wave Explore ──────────────────┐ -│ Wave loop: spawn Explore agents │ -│ → findings/key_files → explore.csv │ -└────────────────────┬──────────────────────┘ - ↓ -┌─ Phase 3: Synthesize & Plan ─────────────┐ -│ Read explore findings → cross-reference │ -│ → resolve conflicts → tasks.csv │ -│ (context_from links to E* explore rows) │ -└────────────────────┬──────────────────────┘ - ↓ -┌─ Phase 4: Wave Execute ──────────────────┐ -│ Wave loop: build prev_context from CSV │ -│ → spawn code-developer agents per wave │ -│ → results → tasks.csv │ -└────────────────────┬──────────────────────┘ - ↓ -┌─ Phase 5: Aggregate ─────────────────────┐ -│ results.csv + context.md + summary │ -└───────────────────────────────────────────┘ -``` - -## Context Flow - -``` -explore.csv tasks.csv -┌──────────┐ ┌──────────┐ -│ E1: arch │──────────→│ T1: setup│ context_from: E1;E2 -│ findings │ │ prev_ctx │← E1+E2 findings -├──────────┤ ├──────────┤ -│ E2: deps │──────────→│ T2: impl │ context_from: E1;T1 -│ findings │ │ prev_ctx │← E1+T1 findings -├──────────┤ ├──────────┤ -│ E3: test │──┐ ┌───→│ T3: test │ context_from: E3;T2 -│ findings │ └───┘ │ prev_ctx │← E3+T2 findings -└──────────┘ └──────────┘ - -Two context channels: -1. Directed: context_from → prev_context (from CSV findings) -2. Broadcast: discoveries.ndjson (append-only shared board) -``` - ---- - -## CSV Schemas - -### explore.csv - -| Column | Type | Set By | Description | -|--------|------|--------|-------------| -| `id` | string | Decomposer | E1, E2, ... | -| `angle` | string | Decomposer | Exploration angle name | -| `description` | string | Decomposer | What to explore from this angle | -| `focus` | string | Decomposer | Keywords and focus areas | -| `deps` | string | Decomposer | Semicolon-separated dep IDs (usually empty) | -| `wave` | integer | Wave Engine | Wave number (usually 1) | -| `status` | enum | Agent | pending / completed / failed | -| `findings` | string | Agent | Discoveries (max 800 chars) | -| `key_files` | string | Agent | Relevant files (semicolon-separated) | -| `error` | string | Agent | Error message if failed | - -### tasks.csv - -| Column | Type | Set By | Description | -|--------|------|--------|-------------| -| `id` | string | Planner | T1, T2, ... | -| `title` | string | Planner | Task title | -| `description` | string | Planner | Self-contained task description — what to implement | -| `test` | string | Planner | Test cases: what tests to write and how to verify (unit/integration/edge) | -| `acceptance_criteria` | string | Planner | Measurable conditions that define "done" | -| `scope` | string | Planner | Target file/directory glob — constrains agent write area, prevents cross-task file conflicts | -| `hints` | string | Planner | Implementation tips + reference files. Format: `tips text \|\| file1;file2`. Either part is optional | -| `execution_directives` | string | Planner | Execution constraints: commands to run for verification, tool restrictions | -| `deps` | string | Planner | Dependency task IDs: T1;T2 | -| `context_from` | string | Planner | Context source IDs: **E1;E2;T1** | -| `wave` | integer | Wave Engine | Wave number (computed from deps) | -| `status` | enum | Agent | pending / completed / failed / skipped | -| `findings` | string | Agent | Execution findings (max 500 chars) | -| `files_modified` | string | Agent | Files modified (semicolon-separated) | -| `tests_passed` | boolean | Agent | Whether all defined test cases passed (true/false) | -| `acceptance_met` | string | Agent | Summary of which acceptance criteria were met/unmet | -| `error` | string | Agent | Error if failed | - -**context_from prefix convention**: `E*` → explore.csv lookup, `T*` → tasks.csv lookup. - ---- - -## Session Structure - -``` -.workflow/.wave-plan/{session-id}/ -├── explore.csv # Exploration state -├── tasks.csv # Execution state -├── discoveries.ndjson # Shared discovery board -├── explore-results/ # Detailed per-angle results -│ ├── E1.json -│ └── E2.json -├── task-results/ # Detailed per-task results -│ ├── T1.json -│ └── T2.json -├── results.csv # Final results export -└── context.md # Full context summary -``` - ---- - -## Session Initialization - -```javascript -const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString() - -// Parse flags -const AUTO_YES = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y') -const concurrencyMatch = $ARGUMENTS.match(/(?:--concurrency|-c)\s+(\d+)/) -const maxConcurrency = concurrencyMatch ? parseInt(concurrencyMatch[1]) : 4 - -const requirement = $ARGUMENTS - .replace(/--yes|-y|--concurrency\s+\d+|-c\s+\d+/g, '') - .trim() - -const slug = requirement.toLowerCase() - .replace(/[^a-z0-9\u4e00-\u9fa5]+/g, '-') - .substring(0, 40) -const dateStr = getUtc8ISOString().substring(0, 10).replace(/-/g, '') -const sessionId = `wp-${slug}-${dateStr}` -const sessionFolder = `.workflow/.wave-plan/${sessionId}` - -Bash(`mkdir -p ${sessionFolder}/explore-results ${sessionFolder}/task-results`) -``` - ---- - -## Phase 1: Decompose → explore.csv - -### 1.1 Analyze Requirement - -```javascript -const complexity = analyzeComplexity(requirement) -// Low: 1 angle | Medium: 2-3 angles | High: 3-4 angles - -const ANGLE_PRESETS = { - architecture: ['architecture', 'dependencies', 'integration-points', 'modularity'], - security: ['security', 'auth-patterns', 'dataflow', 'validation'], - performance: ['performance', 'bottlenecks', 'caching', 'data-access'], - bugfix: ['error-handling', 'dataflow', 'state-management', 'edge-cases'], - feature: ['patterns', 'integration-points', 'testing', 'dependencies'] -} - -function selectAngles(text, count) { - let preset = 'feature' - if (/refactor|architect|restructure|modular/.test(text)) preset = 'architecture' - else if (/security|auth|permission|access/.test(text)) preset = 'security' - else if (/performance|slow|optimi|cache/.test(text)) preset = 'performance' - else if (/fix|bug|error|broken/.test(text)) preset = 'bugfix' - return ANGLE_PRESETS[preset].slice(0, count) -} - -const angleCount = complexity === 'High' ? 4 : complexity === 'Medium' ? 3 : 1 -const angles = selectAngles(requirement, angleCount) -``` - -### 1.2 Generate explore.csv - -```javascript -const header = 'id,angle,description,focus,deps,wave,status,findings,key_files,error' -const rows = angles.map((angle, i) => { - const id = `E${i + 1}` - const desc = `Explore codebase from ${angle} perspective for: ${requirement}` - return `"${id}","${angle}","${escCSV(desc)}","${angle}","",1,"pending","","",""` -}) - -Write(`${sessionFolder}/explore.csv`, [header, ...rows].join('\n')) -``` - -All exploration rows default to wave 1 (independent parallel). If angle dependencies exist, compute waves. - ---- - -## Phase 2: Wave Explore - -Execute exploration waves using `Task(Explore)` agents. - -### 2.1 Wave Loop - -```javascript -const exploreCSV = parseCSV(Read(`${sessionFolder}/explore.csv`)) -const maxExploreWave = Math.max(...exploreCSV.map(r => parseInt(r.wave))) - -for (let wave = 1; wave <= maxExploreWave; wave++) { - const waveRows = exploreCSV.filter(r => - parseInt(r.wave) === wave && r.status === 'pending' - ) - if (waveRows.length === 0) continue - - // Skip rows with failed dependencies - const validRows = waveRows.filter(r => { - if (!r.deps) return true - return r.deps.split(';').filter(Boolean).every(depId => { - const dep = exploreCSV.find(d => d.id === depId) - return dep && dep.status === 'completed' - }) - }) - - waveRows.filter(r => !validRows.includes(r)).forEach(r => { - r.status = 'skipped' - r.error = 'Dependency failed/skipped' - }) - - // ★ Spawn ALL explore agents in SINGLE message → parallel execution - const results = validRows.map(row => - Task({ - subagent_type: "Explore", - run_in_background: false, - description: `Explore: ${row.angle}`, - prompt: buildExplorePrompt(row, requirement, sessionFolder) - }) - ) - - // Collect results from JSON files → update explore.csv - validRows.forEach((row, i) => { - const resultPath = `${sessionFolder}/explore-results/${row.id}.json` - if (fileExists(resultPath)) { - const result = JSON.parse(Read(resultPath)) - row.status = result.status || 'completed' - row.findings = truncate(result.findings, 800) - row.key_files = Array.isArray(result.key_files) - ? result.key_files.join(';') - : (result.key_files || '') - row.error = result.error || '' - } else { - // Fallback: parse from agent output text - row.status = 'completed' - row.findings = truncate(results[i], 800) - } - }) - - writeCSV(`${sessionFolder}/explore.csv`, exploreCSV) -} -``` - -### 2.2 Explore Agent Prompt - -```javascript -function buildExplorePrompt(row, requirement, sessionFolder) { - return `## Exploration: ${row.angle} - -**Requirement**: ${requirement} -**Focus**: ${row.focus} - -### MANDATORY FIRST STEPS -1. Read shared discoveries: ${sessionFolder}/discoveries.ndjson (if exists, skip if not) -2. Read project context: .workflow/project-tech.json (if exists) - ---- - -## Instructions -Explore the codebase from the **${row.angle}** perspective: -1. Discover relevant files, modules, and patterns -2. Identify integration points and dependencies -3. Note constraints, risks, and conventions -4. Find existing patterns to follow -5. Share discoveries: append findings to ${sessionFolder}/discoveries.ndjson - -## Output -Write findings to: ${sessionFolder}/explore-results/${row.id}.json - -JSON format: -{ - "status": "completed", - "findings": "Concise summary of ${row.angle} discoveries (max 800 chars)", - "key_files": ["relevant/file1.ts", "relevant/file2.ts"], - "details": { - "patterns": ["pattern descriptions"], - "integration_points": [{"file": "path", "description": "..."}], - "constraints": ["constraint descriptions"], - "recommendations": ["recommendation descriptions"] - } -} - -Also provide a 2-3 sentence summary.` -} -``` - ---- - -## Phase 3: Synthesize & Plan → tasks.csv - -Read exploration findings, cross-reference, resolve conflicts, generate execution tasks. - -### 3.1 Load Explore Results - -```javascript -const exploreCSV = parseCSV(Read(`${sessionFolder}/explore.csv`)) -const completed = exploreCSV.filter(r => r.status === 'completed') - -// Load detailed result JSONs where available -const detailedResults = {} -completed.forEach(r => { - const path = `${sessionFolder}/explore-results/${r.id}.json` - if (fileExists(path)) detailedResults[r.id] = JSON.parse(Read(path)) -}) -``` - -### 3.2 Conflict Resolution Protocol - -Cross-reference findings across all exploration angles: - -```javascript -// 1. Identify common files referenced by multiple angles -const fileRefs = {} -completed.forEach(r => { - r.key_files.split(';').filter(Boolean).forEach(f => { - if (!fileRefs[f]) fileRefs[f] = [] - fileRefs[f].push({ angle: r.angle, id: r.id }) - }) -}) -const sharedFiles = Object.entries(fileRefs).filter(([_, refs]) => refs.length > 1) - -// 2. Detect conflicting recommendations -// Compare recommendations from different angles for same file/module -// Flag contradictions (angle A says "refactor X" vs angle B says "extend X") - -// 3. Resolution rules: -// a. Safety first — when approaches conflict, choose safer option -// b. Consistency — prefer approaches aligned with existing patterns -// c. Scope — prefer minimal-change approaches -// d. Document — note all resolved conflicts for transparency - -const synthesis = { - sharedFiles, - conflicts: detectConflicts(completed, detailedResults), - resolutions: [], - allKeyFiles: [...new Set(completed.flatMap(r => r.key_files.split(';').filter(Boolean)))] -} -``` - -### 3.3 Generate tasks.csv - -Decompose into execution tasks based on synthesized exploration: - -```javascript -// Task decomposition rules: -// 1. Group by feature/module (not per-file) -// 2. Each description is self-contained (agent sees only its row + prev_context) -// 3. deps only when task B requires task A's output -// 4. context_from links relevant explore rows (E*) and predecessor tasks (T*) -// 5. Prefer parallel (minimize deps) -// 6. Use exploration findings: key_files → target files, patterns → references, -// integration_points → dependency relationships, constraints → included in description -// 7. Each task MUST include: test (how to verify), acceptance_criteria (what defines done) -// 8. scope must not overlap between tasks in the same wave -// 9. hints = implementation tips + reference files (format: tips || file1;file2) -// 10. execution_directives = commands to run for verification, tool restrictions - -const tasks = [] -// Claude decomposes requirement using exploration synthesis -// Example: -// tasks.push({ id: 'T1', title: 'Setup types', description: '...', test: 'Verify types compile', acceptance_criteria: 'All interfaces exported', scope: 'src/types/**', hints: 'Follow existing type patterns || src/types/index.ts', execution_directives: 'tsc --noEmit', deps: '', context_from: 'E1;E2' }) -// tasks.push({ id: 'T2', title: 'Implement core', description: '...', test: 'Unit test: core logic', acceptance_criteria: 'All functions pass tests', scope: 'src/core/**', hints: 'Reuse BaseService || src/services/Base.ts', execution_directives: 'npm test -- --grep core', deps: 'T1', context_from: 'E1;E2;T1' }) -// tasks.push({ id: 'T3', title: 'Add tests', description: '...', test: 'Integration test suite', acceptance_criteria: '>80% coverage', scope: 'tests/**', hints: 'Follow existing test patterns || tests/auth.test.ts', execution_directives: 'npm test', deps: 'T2', context_from: 'E3;T2' }) - -// Compute waves -const waves = computeWaves(tasks) -tasks.forEach(t => { t.wave = waves[t.id] }) - -// Write tasks.csv -const header = 'id,title,description,test,acceptance_criteria,scope,hints,execution_directives,deps,context_from,wave,status,findings,files_modified,tests_passed,acceptance_met,error' -const rows = tasks.map(t => - [t.id, escCSV(t.title), escCSV(t.description), escCSV(t.test), escCSV(t.acceptance_criteria), escCSV(t.scope), escCSV(t.hints), escCSV(t.execution_directives), t.deps, t.context_from, t.wave, 'pending', '', '', '', '', ''] - .map(v => `"${v}"`).join(',') -) - -Write(`${sessionFolder}/tasks.csv`, [header, ...rows].join('\n')) -``` - -### 3.4 User Confirmation - -```javascript -if (!AUTO_YES) { - const maxWave = Math.max(...tasks.map(t => t.wave)) - - console.log(` -## Execution Plan - -Explore: ${completed.length} angles completed -Conflicts resolved: ${synthesis.conflicts.length} -Tasks: ${tasks.length} across ${maxWave} waves - -${Array.from({length: maxWave}, (_, i) => i + 1).map(w => { - const wt = tasks.filter(t => t.wave === w) - return `### Wave ${w} (${wt.length} tasks, concurrent) -${wt.map(t => ` - [${t.id}] ${t.title} (from: ${t.context_from})`).join('\n')}` -}).join('\n')} - `) - - AskUserQuestion({ - questions: [{ - question: `Proceed with ${tasks.length} tasks across ${maxWave} waves?`, - header: "Confirm", - multiSelect: false, - options: [ - { label: "Execute", description: "Proceed with wave execution" }, - { label: "Modify", description: `Edit ${sessionFolder}/tasks.csv then re-run` }, - { label: "Cancel", description: "Abort" } - ] - }] - }) -} -``` - ---- - -## Phase 4: Wave Execute - -Execute tasks from tasks.csv in wave order, with prev_context built from both explore.csv and tasks.csv. - -### 4.1 Wave Loop - -```javascript -const exploreCSV = parseCSV(Read(`${sessionFolder}/explore.csv`)) -const failedIds = new Set() -const skippedIds = new Set() - -let tasksCSV = parseCSV(Read(`${sessionFolder}/tasks.csv`)) -const maxWave = Math.max(...tasksCSV.map(r => parseInt(r.wave))) - -for (let wave = 1; wave <= maxWave; wave++) { - // Re-read master CSV (updated by previous wave) - tasksCSV = parseCSV(Read(`${sessionFolder}/tasks.csv`)) - - const waveRows = tasksCSV.filter(r => - parseInt(r.wave) === wave && r.status === 'pending' - ) - if (waveRows.length === 0) continue - - // Skip on failed dependencies (cascade) - const validRows = [] - for (const row of waveRows) { - const deps = (row.deps || '').split(';').filter(Boolean) - if (deps.some(d => failedIds.has(d) || skippedIds.has(d))) { - skippedIds.add(row.id) - row.status = 'skipped' - row.error = 'Dependency failed/skipped' - continue - } - validRows.push(row) - } - - if (validRows.length === 0) { - writeCSV(`${sessionFolder}/tasks.csv`, tasksCSV) - continue - } - - // Build prev_context for each row from explore.csv + tasks.csv - validRows.forEach(row => { - row._prev_context = buildPrevContext(row.context_from, exploreCSV, tasksCSV) - }) - - // ★ Spawn ALL task agents in SINGLE message → parallel execution - const results = validRows.map(row => - Task({ - subagent_type: "code-developer", - run_in_background: false, - description: row.title, - prompt: buildExecutePrompt(row, requirement, sessionFolder) - }) - ) - - // Collect results → update tasks.csv - validRows.forEach((row, i) => { - const resultPath = `${sessionFolder}/task-results/${row.id}.json` - if (fileExists(resultPath)) { - const result = JSON.parse(Read(resultPath)) - row.status = result.status || 'completed' - row.findings = truncate(result.findings, 500) - row.files_modified = Array.isArray(result.files_modified) - ? result.files_modified.join(';') - : (result.files_modified || '') - row.tests_passed = String(result.tests_passed ?? '') - row.acceptance_met = result.acceptance_met || '' - row.error = result.error || '' - } else { - row.status = 'completed' - row.findings = truncate(results[i], 500) - } - - if (row.status === 'failed') failedIds.add(row.id) - delete row._prev_context // runtime-only, don't persist - }) - - writeCSV(`${sessionFolder}/tasks.csv`, tasksCSV) -} -``` - -### 4.2 prev_context Builder - -The key function linking exploration context to execution: - -```javascript -function buildPrevContext(contextFrom, exploreCSV, tasksCSV) { - if (!contextFrom) return 'No previous context available' - - const ids = contextFrom.split(';').filter(Boolean) - const entries = [] - - ids.forEach(id => { - if (id.startsWith('E')) { - // ← Look up in explore.csv (cross-phase link) - const row = exploreCSV.find(r => r.id === id) - if (row && row.status === 'completed' && row.findings) { - entries.push(`[Explore ${row.angle}] ${row.findings}`) - if (row.key_files) entries.push(` Key files: ${row.key_files}`) - } - } else if (id.startsWith('T')) { - // ← Look up in tasks.csv (same-phase link) - const row = tasksCSV.find(r => r.id === id) - if (row && row.status === 'completed' && row.findings) { - entries.push(`[Task ${row.id}: ${row.title}] ${row.findings}`) - if (row.files_modified) entries.push(` Modified: ${row.files_modified}`) - } - } - }) - - return entries.length > 0 ? entries.join('\n') : 'No previous context available' -} -``` - -### 4.3 Execute Agent Prompt - -```javascript -function buildExecutePrompt(row, requirement, sessionFolder) { - return `## Task: ${row.title} - -**ID**: ${row.id} -**Goal**: ${requirement} -**Scope**: ${row.scope || 'Not specified'} - -## Description -${row.description} - -### Implementation Hints & Reference Files -${row.hints || 'None'} - -> Format: \`tips text || file1;file2\`. Read ALL reference files (after ||) before starting. Apply tips (before ||) as guidance. - -### Execution Directives -${row.execution_directives || 'None'} - -> Commands to run for verification, tool restrictions, or environment requirements. - -### Test Cases -${row.test || 'None specified'} - -### Acceptance Criteria -${row.acceptance_criteria || 'None specified'} - -## Previous Context (from exploration and predecessor tasks) -${row._prev_context} - -### MANDATORY FIRST STEPS -1. Read shared discoveries: ${sessionFolder}/discoveries.ndjson (if exists, skip if not) -2. Read project context: .workflow/project-tech.json (if exists) - ---- - -## Execution Protocol - -1. **Read references**: Parse hints — read all files listed after \`||\` to understand existing patterns -2. **Read discoveries**: Load ${sessionFolder}/discoveries.ndjson for shared exploration findings -3. **Use context**: Apply previous tasks' findings from prev_context above -4. **Stay in scope**: ONLY create/modify files within ${row.scope || 'project'} — do NOT touch files outside this boundary -5. **Apply hints**: Follow implementation tips from hints (before \`||\`) -6. **Implement**: Execute changes described in the task description -7. **Write tests**: Implement the test cases defined above -8. **Run directives**: Execute commands from execution_directives to verify your work -9. **Verify acceptance**: Ensure all acceptance criteria are met before reporting completion -10. **Share discoveries**: Append exploration findings to shared board: - \\\`\\\`\\\`bash - echo '{"ts":"","worker":"${row.id}","type":"","data":{...}}' >> ${sessionFolder}/discoveries.ndjson - \\\`\\\`\\\` -11. **Report result**: Write JSON to output file - -## Output -Write results to: ${sessionFolder}/task-results/${row.id}.json - -{ - "status": "completed" | "failed", - "findings": "What was done (max 500 chars)", - "files_modified": ["file1.ts", "file2.ts"], - "tests_passed": true | false, - "acceptance_met": "Summary of which acceptance criteria were met/unmet", - "error": "" -} - -**IMPORTANT**: Set status to "completed" ONLY if: -- All test cases pass -- All acceptance criteria are met -Otherwise set status to "failed" with details in error field.` -} -``` - ---- - -## Phase 5: Aggregate - -### 5.1 Generate Results - -```javascript -const finalTasks = parseCSV(Read(`${sessionFolder}/tasks.csv`)) -const exploreCSV = parseCSV(Read(`${sessionFolder}/explore.csv`)) - -Bash(`cp "${sessionFolder}/tasks.csv" "${sessionFolder}/results.csv"`) - -const completed = finalTasks.filter(r => r.status === 'completed') -const failed = finalTasks.filter(r => r.status === 'failed') -const skipped = finalTasks.filter(r => r.status === 'skipped') -const maxWave = Math.max(...finalTasks.map(r => parseInt(r.wave))) -``` - -### 5.2 Generate context.md - -```javascript -const contextMd = `# Wave Plan Results - -**Requirement**: ${requirement} -**Session**: ${sessionId} -**Timestamp**: ${getUtc8ISOString()} - -## Summary - -| Metric | Count | -|--------|-------| -| Explore Angles | ${exploreCSV.length} | -| Total Tasks | ${finalTasks.length} | -| Completed | ${completed.length} | -| Failed | ${failed.length} | -| Skipped | ${skipped.length} | -| Waves | ${maxWave} | - -## Exploration Results - -${exploreCSV.map(e => `### ${e.id}: ${e.angle} (${e.status}) -${e.findings || 'N/A'} -Key files: ${e.key_files || 'none'}`).join('\n\n')} - -## Task Results - -${finalTasks.map(t => `### ${t.id}: ${t.title} (${t.status}) - -| Field | Value | -|-------|-------| -| Wave | ${t.wave} | -| Scope | ${t.scope || 'none'} | -| Dependencies | ${t.deps || 'none'} | -| Context From | ${t.context_from || 'none'} | -| Tests Passed | ${t.tests_passed || 'N/A'} | -| Acceptance Met | ${t.acceptance_met || 'N/A'} | -| Error | ${t.error || 'none'} | - -**Description**: ${t.description} - -**Test Cases**: ${t.test || 'N/A'} - -**Acceptance Criteria**: ${t.acceptance_criteria || 'N/A'} - -**Hints**: ${t.hints || 'N/A'} - -**Execution Directives**: ${t.execution_directives || 'N/A'} - -**Findings**: ${t.findings || 'N/A'} - -**Files Modified**: ${t.files_modified || 'none'}`).join('\n\n---\n\n')} - -## All Modified Files - -${[...new Set(finalTasks.flatMap(t => - (t.files_modified || '').split(';')).filter(Boolean) -)].map(f => '- ' + f).join('\n') || 'None'} -` - -Write(`${sessionFolder}/context.md`, contextMd) -``` - -### 5.3 Summary & Next Steps - -```javascript -console.log(` -## Wave Plan Complete - -Session: ${sessionFolder} -Explore: ${exploreCSV.filter(r => r.status === 'completed').length}/${exploreCSV.length} angles -Tasks: ${completed.length}/${finalTasks.length} completed, ${failed.length} failed, ${skipped.length} skipped -Waves: ${maxWave} - -Files: -- explore.csv — exploration state -- tasks.csv — execution state -- results.csv — final results -- context.md — full report -- discoveries.ndjson — shared discoveries -`) - -if (!AUTO_YES && failed.length > 0) { - AskUserQuestion({ - questions: [{ - question: `${failed.length} tasks failed. Next action?`, - header: "Next Step", - multiSelect: false, - options: [ - { label: "Retry Failed", description: "Reset failed + skipped, re-execute Phase 4" }, - { label: "View Report", description: "Display context.md" }, - { label: "Done", description: "Complete session" } - ] - }] - }) - // If Retry: reset failed/skipped status to pending, re-run Phase 4 -} -``` - ---- - -## Utilities - -### Wave Computation (Kahn's BFS) - -```javascript -function computeWaves(tasks) { - const inDegree = {}, adj = {}, depth = {} - tasks.forEach(t => { inDegree[t.id] = 0; adj[t.id] = []; depth[t.id] = 1 }) - - tasks.forEach(t => { - const deps = (t.deps || '').split(';').filter(Boolean) - deps.forEach(dep => { - if (adj[dep]) { adj[dep].push(t.id); inDegree[t.id]++ } - }) - }) - - const queue = Object.keys(inDegree).filter(id => inDegree[id] === 0) - queue.forEach(id => { depth[id] = 1 }) - - while (queue.length > 0) { - const current = queue.shift() - adj[current].forEach(next => { - depth[next] = Math.max(depth[next], depth[current] + 1) - inDegree[next]-- - if (inDegree[next] === 0) queue.push(next) - }) - } - - if (Object.values(inDegree).some(d => d > 0)) { - throw new Error('Circular dependency detected') - } - - return depth // { taskId: waveNumber } -} -``` - -### CSV Helpers - -```javascript -function escCSV(s) { return String(s || '').replace(/"/g, '""') } - -function parseCSV(content) { - const lines = content.trim().split('\n') - const header = lines[0].split(',').map(h => h.replace(/"/g, '').trim()) - return lines.slice(1).filter(l => l.trim()).map(line => { - const values = parseCSVLine(line) - const row = {} - header.forEach((col, i) => { row[col] = (values[i] || '').replace(/^"|"$/g, '') }) - return row - }) -} - -function writeCSV(path, rows) { - if (rows.length === 0) return - // Exclude runtime-only columns (prefixed with _) - const cols = Object.keys(rows[0]).filter(k => !k.startsWith('_')) - const header = cols.join(',') - const lines = rows.map(r => - cols.map(c => `"${escCSV(r[c])}"`).join(',') - ) - Write(path, [header, ...lines].join('\n')) -} - -function truncate(s, max) { - s = String(s || '') - return s.length > max ? s.substring(0, max - 3) + '...' : s -} -``` - ---- - -## Discovery Board Protocol - -Shared `discoveries.ndjson` — append-only NDJSON accessible to all agents across all phases. - -**Lifecycle**: -- Created by the first agent to write a discovery -- Carries over across all phases and waves — never cleared -- Agents append via `echo '...' >> discoveries.ndjson` - -**Format**: NDJSON, each line is a self-contained JSON: - -```jsonl -{"ts":"...","worker":"E1","type":"code_pattern","data":{"name":"repo-pattern","file":"src/repos/Base.ts"}} -{"ts":"...","worker":"T2","type":"integration_point","data":{"file":"src/auth/index.ts","exports":["auth"]}} -``` - -**Discovery Types**: - -| type | Dedup Key | Description | -|------|-----------|-------------| -| `code_pattern` | `data.name` | Reusable code pattern found | -| `integration_point` | `data.file` | Module connection point | -| `convention` | singleton | Code style conventions | -| `blocker` | `data.issue` | Blocking issue encountered | -| `tech_stack` | singleton | Project technology stack | -| `test_command` | singleton | Test commands discovered | - -**Protocol Rules**: -1. Read board before own exploration → skip covered areas -2. Write discoveries immediately via `echo >>` → don't batch -3. Deduplicate — check existing entries; skip if same type + dedup key exists -4. Append-only — never modify or delete existing lines - ---- - -## Error Handling - -| Error | Resolution | -|-------|------------| -| Explore agent failure | Mark as failed in explore.csv, exclude from planning | -| All explores failed | Fallback: plan directly from requirement without exploration | -| Execute agent failure | Mark as failed, skip dependents (cascade) | -| Agent timeout | Mark as failed in results, continue with wave | -| Circular dependency | Abort wave computation, report cycle | -| CSV parse error | Validate CSV format before execution, show line number | -| discoveries.ndjson corrupt | Ignore malformed lines, continue with valid entries | - ---- - -## Core Rules - -1. **Wave Order is Sacred**: Never execute wave N before wave N-1 completes -2. **CSV is Source of Truth**: Read master CSV before each wave, write after -3. **Context via CSV**: prev_context built from CSV findings, not from memory -4. **E* ↔ T* Linking**: tasks.csv `context_from` references explore.csv rows for cross-phase context -5. **Skip on Failure**: Failed dep → skip dependent (cascade) -6. **Discovery Board Append-Only**: Never clear or modify discoveries.ndjson -7. **Explore Before Execute**: Phase 2 completes before Phase 4 starts -8. **DO NOT STOP**: Continuous execution until all waves complete or remaining skipped - ---- - -## Best Practices - -1. **Exploration Angles**: 1 for simple, 3-4 for complex; avoid redundant angles -2. **Context Linking**: Link every task to at least one explore row (E*) — exploration was done for a reason -3. **Task Granularity**: 3-10 tasks optimal; too many = overhead, too few = no parallelism -4. **Minimize Cross-Wave Deps**: More tasks in wave 1 = more parallelism -5. **Specific Descriptions**: Agent sees only its CSV row + prev_context — make description self-contained -6. **Non-Overlapping Scopes**: Same-wave tasks must not write to the same files -7. **Context From ≠ Deps**: `deps` = execution order constraint; `context_from` = information flow - ---- - -## Usage Recommendations - -| Scenario | Recommended Approach | -|----------|---------------------| -| Complex feature (unclear architecture) | `workflow:wave-plan` — explore first, then plan | -| Simple known-pattern task | `$csv-wave-pipeline` — skip exploration, direct execution | -| Independent parallel tasks | `$csv-wave-pipeline -c 8` — single wave, max parallelism | -| Diamond dependency (A→B,C→D) | `workflow:wave-plan` — 3 waves with context propagation | -| Unknown codebase | `workflow:wave-plan` — exploration phase is essential | diff --git a/ccw/frontend/src/lib/api.ts b/ccw/frontend/src/lib/api.ts index 39f40d8b..86c81ee8 100644 --- a/ccw/frontend/src/lib/api.ts +++ b/ccw/frontend/src/lib/api.ts @@ -104,41 +104,131 @@ export interface ApiError { // ========== CSRF Token Handling ========== /** - * In-memory CSRF token storage - * The token is obtained from X-CSRF-Token response header and stored here - * because the XSRF-TOKEN cookie is HttpOnly and cannot be read by JavaScript + * CSRF token pool for concurrent request support + * The pool maintains multiple tokens to support parallel mutating requests */ -let csrfToken: string | null = null; +const MAX_CSRF_TOKEN_POOL_SIZE = 5; + +// Token pool queue - FIFO for fair distribution +let csrfTokenQueue: string[] = []; /** - * Get CSRF token from memory + * Get a CSRF token from the pool + * @returns Token string or undefined if pool is empty */ -function getCsrfToken(): string | null { - return csrfToken; +function getCsrfTokenFromPool(): string | undefined { + return csrfTokenQueue.shift(); } /** - * Set CSRF token from response header + * Add a CSRF token to the pool with deduplication + * @param token - Token to add + */ +function addCsrfTokenToPool(token: string): void { + if (!token) return; + // Deduplication: don't add if already in pool + if (csrfTokenQueue.includes(token)) return; + // Limit pool size + if (csrfTokenQueue.length >= MAX_CSRF_TOKEN_POOL_SIZE) return; + csrfTokenQueue.push(token); +} + +/** + * Get current pool size (for debugging) + */ +export function getCsrfPoolSize(): number { + return csrfTokenQueue.length; +} + +/** + * Lock for deduplicating concurrent token fetch requests + * Prevents multiple simultaneous calls to fetchTokenSynchronously + */ +let tokenFetchPromise: Promise | null = null; + +/** + * Synchronously fetch a single token when pool is depleted + * This blocks the request until a token is available + * Uses lock mechanism to prevent concurrent fetch deduplication + */ +async function fetchTokenSynchronously(): Promise { + // If a fetch is already in progress, wait for it + if (tokenFetchPromise) { + return tokenFetchPromise; + } + + // Create new fetch promise and store as lock + tokenFetchPromise = (async () => { + try { + const response = await fetch('/api/csrf-token', { + credentials: 'same-origin', + }); + if (!response.ok) { + throw new Error('Failed to fetch CSRF token'); + } + const data = await response.json(); + const token = data.csrfToken; + if (!token) { + throw new Error('No CSRF token in response'); + } + return token; + } finally { + // Release lock after completion (success or failure) + tokenFetchPromise = null; + } + })(); + + return tokenFetchPromise; +} + +/** + * Set CSRF token from response header (adds to pool) */ function updateCsrfToken(response: Response): void { const token = response.headers.get('X-CSRF-Token'); if (token) { - csrfToken = token; + addCsrfTokenToPool(token); } } /** - * Initialize CSRF token by fetching from server + * Initialize CSRF token pool by fetching multiple tokens from server * Should be called once on app initialization */ export async function initializeCsrfToken(): Promise { try { - const response = await fetch('/api/csrf-token', { + // Prefetch 5 tokens for pool + const response = await fetch(`/api/csrf-token?count=${MAX_CSRF_TOKEN_POOL_SIZE}`, { credentials: 'same-origin', }); - updateCsrfToken(response); + + if (!response.ok) { + throw new Error('Failed to initialize CSRF token pool'); + } + + const data = await response.json(); + + // Handle both single token and batch response formats + if (data.tokens && Array.isArray(data.tokens)) { + // Batch response - add all tokens to pool + for (const token of data.tokens) { + addCsrfTokenToPool(token); + } + } else if (data.csrfToken) { + // Single token response - add to pool + addCsrfTokenToPool(data.csrfToken); + } + + console.log(`[CSRF] Token pool initialized with ${csrfTokenQueue.length} tokens`); } catch (error) { - console.error('[CSRF] Failed to initialize CSRF token:', error); + console.error('[CSRF] Failed to initialize CSRF token pool:', error); + // Fallback: try to get at least one token + try { + const token = await fetchTokenSynchronously(); + addCsrfTokenToPool(token); + } catch (fallbackError) { + console.error('[CSRF] Fallback token fetch failed:', fallbackError); + } } } @@ -155,7 +245,18 @@ async function fetchApi( // Add CSRF token for mutating requests if (options.method && ['POST', 'PUT', 'PATCH', 'DELETE'].includes(options.method)) { - const token = getCsrfToken(); + let token = getCsrfTokenFromPool(); + + // If pool is depleted, synchronously fetch a new token + if (!token) { + console.warn('[CSRF] Token pool depleted, fetching synchronously'); + try { + token = await fetchTokenSynchronously(); + } catch (error) { + throw new Error('Failed to acquire CSRF token for request'); + } + } + if (token) { headers.set('X-CSRF-Token', token); } @@ -172,7 +273,7 @@ async function fetchApi( credentials: 'same-origin', }); - // Update CSRF token from response header + // Update CSRF token from response header (adds to pool) updateCsrfToken(response); if (!response.ok) { @@ -2963,6 +3064,18 @@ export interface ReviewDimension { findings: ReviewFinding[]; } +export interface ReviewSummary { + phase?: string; + status?: string; + severityDistribution?: { + critical: number; + high: number; + medium: number; + low: number; + }; + criticalFiles?: string[]; +} + export interface ReviewSession { session_id: string; title?: string; @@ -2970,6 +3083,7 @@ export interface ReviewSession { type: 'review'; phase?: string; reviewDimensions?: ReviewDimension[]; + reviewSummary?: ReviewSummary; _isActive?: boolean; created_at?: string; updated_at?: string; @@ -2986,6 +3100,17 @@ export interface ReviewSessionsResponse { progress?: unknown; }>; }; + // New: Support activeSessions with review type + activeSessions?: Array<{ + session_id: string; + project?: string; + type?: string; + status?: string; + created_at?: string; + hasReview?: boolean; + reviewSummary?: ReviewSummary; + reviewDimensions?: ReviewDimension[]; + }>; } /** @@ -2994,12 +3119,34 @@ export interface ReviewSessionsResponse { export async function fetchReviewSessions(): Promise { const data = await fetchApi('/api/data'); - // If reviewSessions field exists (legacy format), use it + // Priority 1: Use activeSessions with type='review' or hasReview=true + if (data.activeSessions) { + const reviewSessions = data.activeSessions.filter( + session => session.type === 'review' || session.hasReview + ); + if (reviewSessions.length > 0) { + return reviewSessions.map(session => ({ + session_id: session.session_id, + title: session.project || session.session_id, + description: '', + type: 'review' as const, + phase: session.reviewSummary?.phase, + reviewDimensions: session.reviewDimensions || [], + reviewSummary: session.reviewSummary, + _isActive: true, + created_at: session.created_at, + updated_at: undefined, + status: session.status + })); + } + } + + // Priority 2: Legacy reviewSessions field if (data.reviewSessions && data.reviewSessions.length > 0) { return data.reviewSessions; } - // Otherwise, transform reviewData.sessions into ReviewSession format + // Priority 3: Legacy reviewData.sessions format if (data.reviewData?.sessions) { return data.reviewData.sessions.map(session => ({ session_id: session.session_id, diff --git a/ccw/frontend/src/locales/en/review-session.json b/ccw/frontend/src/locales/en/review-session.json index f1a08afe..3df761df 100644 --- a/ccw/frontend/src/locales/en/review-session.json +++ b/ccw/frontend/src/locales/en/review-session.json @@ -96,6 +96,11 @@ "message": "Try adjusting your filters or search query.", "noFixProgress": "No fix progress data available" }, + "notExecuted": { + "title": "Review Not Yet Executed", + "message": "This review session has been created but the review process has not been started yet. No findings have been generated.", + "hint": "💡 Tip: Execute the review workflow to start analyzing code and generate findings." + }, "notFound": { "title": "Review Session Not Found", "message": "The requested review session could not be found." diff --git a/ccw/frontend/src/locales/zh/review-session.json b/ccw/frontend/src/locales/zh/review-session.json index 10d13650..00c0f8a6 100644 --- a/ccw/frontend/src/locales/zh/review-session.json +++ b/ccw/frontend/src/locales/zh/review-session.json @@ -96,6 +96,11 @@ "message": "尝试调整筛选条件或搜索查询。", "noFixProgress": "无修复进度数据" }, + "notExecuted": { + "title": "审查尚未执行", + "message": "此审查会话已创建,但审查流程尚未启动。尚未生成任何发现结果。", + "hint": "💡 提示:请执行审查工作流以开始分析代码并生成发现结果。" + }, "notFound": { "title": "未找到审查会话", "message": "无法找到请求的审查会话。" diff --git a/ccw/frontend/src/pages/ReviewSessionPage.tsx b/ccw/frontend/src/pages/ReviewSessionPage.tsx index 85e87dde..fa40bc61 100644 --- a/ccw/frontend/src/pages/ReviewSessionPage.tsx +++ b/ccw/frontend/src/pages/ReviewSessionPage.tsx @@ -765,13 +765,32 @@ export function ReviewSessionPage() { {filteredFindings.length === 0 ? ( - -

- {formatMessage({ id: 'reviewSession.empty.title' })} -

-

- {formatMessage({ id: 'reviewSession.empty.message' })} -

+ {/* Check if review hasn't been executed yet */} + {reviewSession?.reviewSummary?.status === 'in_progress' && + (!reviewSession?.reviewDimensions || reviewSession.reviewDimensions.length === 0) ? ( + <> + +

+ {formatMessage({ id: 'reviewSession.notExecuted.title' })} +

+

+ {formatMessage({ id: 'reviewSession.notExecuted.message' })} +

+
+ {formatMessage({ id: 'reviewSession.notExecuted.hint' })} +
+ + ) : ( + <> + +

+ {formatMessage({ id: 'reviewSession.empty.title' })} +

+

+ {formatMessage({ id: 'reviewSession.empty.message' })} +

+ + )}
) : ( diff --git a/ccw/src/core/auth/csrf-manager.ts b/ccw/src/core/auth/csrf-manager.ts index 0703b583..7acb7a8a 100644 --- a/ccw/src/core/auth/csrf-manager.ts +++ b/ccw/src/core/auth/csrf-manager.ts @@ -3,6 +3,7 @@ import { randomBytes } from 'crypto'; export interface CsrfTokenManagerOptions { tokenTtlMs?: number; cleanupIntervalMs?: number; + maxTokensPerSession?: number; } type CsrfTokenRecord = { @@ -13,14 +14,20 @@ type CsrfTokenRecord = { const DEFAULT_TOKEN_TTL_MS = 15 * 60 * 1000; // 15 minutes const DEFAULT_CLEANUP_INTERVAL_MS = 5 * 60 * 1000; // 5 minutes +const DEFAULT_MAX_TOKENS_PER_SESSION = 5; export class CsrfTokenManager { private readonly tokenTtlMs: number; - private readonly records = new Map(); + private readonly maxTokensPerSession: number; + // sessionId -> (token -> record) - supports multiple tokens per session + private readonly sessionTokens = new Map>(); + // Quick lookup: token -> sessionId for validation + private readonly tokenToSession = new Map(); private readonly cleanupTimer: NodeJS.Timeout | null; constructor(options: CsrfTokenManagerOptions = {}) { this.tokenTtlMs = options.tokenTtlMs ?? DEFAULT_TOKEN_TTL_MS; + this.maxTokensPerSession = options.maxTokensPerSession ?? DEFAULT_MAX_TOKENS_PER_SESSION; const cleanupIntervalMs = options.cleanupIntervalMs ?? DEFAULT_CLEANUP_INTERVAL_MS; if (cleanupIntervalMs > 0) { @@ -40,50 +47,137 @@ export class CsrfTokenManager { if (this.cleanupTimer) { clearInterval(this.cleanupTimer); } - this.records.clear(); + this.sessionTokens.clear(); + this.tokenToSession.clear(); } + /** + * Generate a single CSRF token for a session + */ generateToken(sessionId: string): string { - const token = randomBytes(32).toString('hex'); - this.records.set(token, { - sessionId, - expiresAtMs: Date.now() + this.tokenTtlMs, - used: false, - }); - return token; + const tokens = this.generateTokens(sessionId, 1); + return tokens[0]; } + /** + * Generate multiple CSRF tokens for a session (pool pattern) + * @param sessionId - Session identifier + * @param count - Number of tokens to generate (max: maxTokensPerSession) + * @returns Array of generated tokens + */ + generateTokens(sessionId: string, count: number): string[] { + // Get or create session token map + let sessionMap = this.sessionTokens.get(sessionId); + if (!sessionMap) { + sessionMap = new Map(); + this.sessionTokens.set(sessionId, sessionMap); + } + + // Limit count to max tokens per session + const currentCount = sessionMap.size; + const availableSlots = Math.max(0, this.maxTokensPerSession - currentCount); + const tokensToGenerate = Math.min(count, availableSlots); + + const tokens: string[] = []; + const expiresAtMs = Date.now() + this.tokenTtlMs; + + for (let i = 0; i < tokensToGenerate; i++) { + const token = randomBytes(32).toString('hex'); + const record: CsrfTokenRecord = { + sessionId, + expiresAtMs, + used: false, + }; + sessionMap.set(token, record); + this.tokenToSession.set(token, sessionId); + tokens.push(token); + } + + return tokens; + } + + /** + * Validate a CSRF token against a session + * Marks token as used (single-use) on successful validation + */ validateToken(token: string, sessionId: string): boolean { - const record = this.records.get(token); + // Quick lookup: get session from token + const tokenSessionId = this.tokenToSession.get(token); + if (!tokenSessionId) return false; + if (tokenSessionId !== sessionId) return false; + + // Get session's token map + const sessionMap = this.sessionTokens.get(sessionId); + if (!sessionMap) return false; + + // Get token record + const record = sessionMap.get(token); if (!record) return false; if (record.used) return false; - if (record.sessionId !== sessionId) return false; + // Check expiration if (Date.now() > record.expiresAtMs) { - this.records.delete(token); + this.removeToken(token, sessionId); return false; } + // Mark as used (single-use enforcement) record.used = true; return true; } + /** + * Remove a token from the pool + */ + private removeToken(token: string, sessionId: string): void { + const sessionMap = this.sessionTokens.get(sessionId); + if (sessionMap) { + sessionMap.delete(token); + // Clean up empty session maps + if (sessionMap.size === 0) { + this.sessionTokens.delete(sessionId); + } + } + this.tokenToSession.delete(token); + } + + /** + * Get the number of active tokens for a session + */ + getTokenCount(sessionId: string): number { + const sessionMap = this.sessionTokens.get(sessionId); + return sessionMap ? sessionMap.size : 0; + } + + /** + * Get total number of active tokens across all sessions + */ + getActiveTokenCount(): number { + return this.tokenToSession.size; + } + + /** + * Clean up expired and used tokens + */ cleanupExpiredTokens(nowMs: number = Date.now()): number { let removed = 0; - for (const [token, record] of this.records.entries()) { - if (record.used || nowMs > record.expiresAtMs) { - this.records.delete(token); - removed += 1; + for (const [sessionId, sessionMap] of this.sessionTokens.entries()) { + for (const [token, record] of sessionMap.entries()) { + if (record.used || nowMs > record.expiresAtMs) { + sessionMap.delete(token); + this.tokenToSession.delete(token); + removed += 1; + } + } + // Clean up empty session maps + if (sessionMap.size === 0) { + this.sessionTokens.delete(sessionId); } } return removed; } - - getActiveTokenCount(): number { - return this.records.size; - } } let csrfManagerInstance: CsrfTokenManager | null = null; diff --git a/ccw/src/core/routes/auth-routes.ts b/ccw/src/core/routes/auth-routes.ts index 274bec4b..409fd445 100644 --- a/ccw/src/core/routes/auth-routes.ts +++ b/ccw/src/core/routes/auth-routes.ts @@ -79,17 +79,37 @@ function setCsrfCookie(res: ServerResponse, token: string, maxAgeSeconds: number } export async function handleAuthRoutes(ctx: RouteContext): Promise { - const { pathname, req, res } = ctx; + const { pathname, req, res, url } = ctx; if (pathname === '/api/csrf-token' && req.method === 'GET') { const sessionId = getOrCreateSessionId(req, res); const tokenManager = getCsrfTokenManager(); - const csrfToken = tokenManager.generateToken(sessionId); - res.setHeader('X-CSRF-Token', csrfToken); - setCsrfCookie(res, csrfToken, 15 * 60); - res.writeHead(200, { 'Content-Type': 'application/json; charset=utf-8' }); - res.end(JSON.stringify({ csrfToken })); + // Check for count parameter (pool pattern) + const countParam = url.searchParams.get('count'); + const count = countParam ? Math.min(Math.max(1, parseInt(countParam, 10) || 1), 10) : 1; + + if (count === 1) { + // Single token response (existing behavior) + const csrfToken = tokenManager.generateToken(sessionId); + res.setHeader('X-CSRF-Token', csrfToken); + setCsrfCookie(res, csrfToken, 15 * 60); + res.writeHead(200, { 'Content-Type': 'application/json; charset=utf-8' }); + res.end(JSON.stringify({ csrfToken })); + } else { + // Batch token response (pool pattern) + const tokens = tokenManager.generateTokens(sessionId, count); + const firstToken = tokens[0]; + + // Set header and cookie with first token for compatibility + res.setHeader('X-CSRF-Token', firstToken); + setCsrfCookie(res, firstToken, 15 * 60); + res.writeHead(200, { 'Content-Type': 'application/json; charset=utf-8' }); + res.end(JSON.stringify({ + tokens, + expiresIn: 15 * 60, // seconds + })); + } return true; } diff --git a/ccw/tests/csrf-manager.test.ts b/ccw/tests/csrf-manager.test.ts index 49a753e2..cc0b1a79 100644 --- a/ccw/tests/csrf-manager.test.ts +++ b/ccw/tests/csrf-manager.test.ts @@ -60,5 +60,91 @@ describe('CsrfTokenManager', async () => { assert.equal(manager.validateToken(token, 'session-1'), true); manager.dispose(); }); + + // ========== Pool Pattern Tests ========== + + it('generateTokens produces N unique tokens', () => { + const manager = new mod.CsrfTokenManager({ cleanupIntervalMs: 0, maxTokensPerSession: 5 }); + const tokens = manager.generateTokens('session-1', 3); + + assert.equal(tokens.length, 3); + // All tokens should be unique + assert.equal(new Set(tokens).size, 3); + // All tokens should be valid hex + for (const token of tokens) { + assert.match(token, /^[a-f0-9]{64}$/); + } + manager.dispose(); + }); + + it('generateTokens respects maxTokensPerSession limit', () => { + const manager = new mod.CsrfTokenManager({ cleanupIntervalMs: 0, maxTokensPerSession: 5 }); + // First batch of 5 + const tokens1 = manager.generateTokens('session-1', 5); + assert.equal(tokens1.length, 5); + + // Second batch should be empty (pool full) + const tokens2 = manager.generateTokens('session-1', 3); + assert.equal(tokens2.length, 0); + + manager.dispose(); + }); + + it('getTokenCount returns correct count for session', () => { + const manager = new mod.CsrfTokenManager({ cleanupIntervalMs: 0 }); + manager.generateTokens('session-1', 3); + manager.generateTokens('session-2', 2); + + assert.equal(manager.getTokenCount('session-1'), 3); + assert.equal(manager.getTokenCount('session-2'), 2); + assert.equal(manager.getTokenCount('session-3'), 0); + manager.dispose(); + }); + + it('validateToken works with pool pattern (multiple tokens per session)', () => { + const manager = new mod.CsrfTokenManager({ cleanupIntervalMs: 0 }); + const tokens = manager.generateTokens('session-1', 3); + + // All tokens should be valid once + assert.equal(manager.validateToken(tokens[0], 'session-1'), true); + assert.equal(manager.validateToken(tokens[1], 'session-1'), true); + assert.equal(manager.validateToken(tokens[2], 'session-1'), true); + + // All tokens should now be invalid (used) + assert.equal(manager.validateToken(tokens[0], 'session-1'), false); + assert.equal(manager.validateToken(tokens[1], 'session-1'), false); + assert.equal(manager.validateToken(tokens[2], 'session-1'), false); + + manager.dispose(); + }); + + it('cleanupExpiredTokens handles multiple sessions', () => { + const manager = new mod.CsrfTokenManager({ tokenTtlMs: 10, cleanupIntervalMs: 0 }); + manager.generateTokens('session-1', 3); + manager.generateTokens('session-2', 2); + + const removed = manager.cleanupExpiredTokens(Date.now() + 100); + assert.equal(removed, 5); + assert.equal(manager.getActiveTokenCount(), 0); + assert.equal(manager.getTokenCount('session-1'), 0); + assert.equal(manager.getTokenCount('session-2'), 0); + manager.dispose(); + }); + + it('concurrent requests can use different tokens from pool', () => { + const manager = new mod.CsrfTokenManager({ cleanupIntervalMs: 0 }); + const tokens = manager.generateTokens('session-1', 5); + + // Simulate 5 concurrent requests using different tokens + const results = tokens.map(token => manager.validateToken(token, 'session-1')); + + // All should succeed + assert.deepEqual(results, [true, true, true, true, true]); + + // Token count should still be 5 (but all marked as used) + assert.equal(manager.getTokenCount('session-1'), 5); + + manager.dispose(); + }); }); diff --git a/docs/.vitepress/config.ts b/docs/.vitepress/config.ts index b96d6526..c6d7c7bd 100644 --- a/docs/.vitepress/config.ts +++ b/docs/.vitepress/config.ts @@ -80,7 +80,8 @@ export default withMermaid(defineConfig({ { text: 'Guide', link: '/guide/ch01-what-is-claude-dms3' }, { text: 'Commands', link: '/commands/claude/' }, { text: 'Skills', link: '/skills/' }, - { text: 'Features', link: '/features/spec' } + { text: 'Features', link: '/features/spec' }, + { text: 'Components', link: '/components/' } ], // Sidebar - 优化导航结构,增加二级标题和归类 @@ -134,6 +135,20 @@ export default withMermaid(defineConfig({ } ], '/skills/': [ + { + text: 'Overview', + collapsible: false, + items: [ + { text: 'Skills Guide', link: '/skills/' } + ] + }, + { + text: '📚 Conventions', + collapsible: true, + items: [ + { text: 'Naming Conventions', link: '/skills/naming-conventions' } + ] + }, { text: '⚡ Claude Skills', collapsible: true, @@ -192,9 +207,13 @@ export default withMermaid(defineConfig({ text: 'UI Components', collapsible: true, items: [ + { text: 'Overview', link: '/components/index' }, { text: 'Button', link: '/components/ui/button' }, { text: 'Card', link: '/components/ui/card' }, - { text: 'Input', link: '/components/ui/input' } + { text: 'Input', link: '/components/ui/input' }, + { text: 'Select', link: '/components/ui/select' }, + { text: 'Checkbox', link: '/components/ui/checkbox' }, + { text: 'Badge', link: '/components/ui/badge' } ] } ], @@ -292,11 +311,10 @@ export default withMermaid(defineConfig({ 'mermaid' ], config: (md) => { - // Add markdown-it plugins if needed - // Custom demo block transform is handled by markdownTransform.ts md.core.ruler.before('block', 'demo-blocks', (state) => { const src = state.src - const transformed = transformDemoBlocks(src, { path: '' }) + const filePath = (state as any).path || '' + const transformed = transformDemoBlocks(src, { path: filePath }) if (transformed !== src) { state.src = transformed } @@ -376,6 +394,20 @@ export default withMermaid(defineConfig({ } ], '/zh/skills/': [ + { + text: '概述', + collapsible: false, + items: [ + { text: '技能指南', link: '/zh/skills/' } + ] + }, + { + text: '📚 规范', + collapsible: true, + items: [ + { text: '命名规范', link: '/zh/skills/naming-conventions' } + ] + }, { text: '⚡ Claude Skills', collapsible: true, @@ -485,6 +517,53 @@ export default withMermaid(defineConfig({ ] } ], + '/zh-CN/skills/': [ + { + text: '概述', + collapsible: false, + items: [ + { text: '技能指南', link: '/zh-CN/skills/' } + ] + }, + { + text: '📚 规范', + collapsible: true, + items: [ + { text: '命名规范', link: '/zh-CN/skills/naming-conventions' } + ] + }, + { + text: '⚡ Claude Skills', + collapsible: true, + items: [ + { text: '概述', link: '/zh-CN/skills/claude-index' }, + { text: '协作', link: '/zh-CN/skills/claude-collaboration' }, + { text: '工作流', link: '/zh-CN/skills/claude-workflow' }, + { text: '记忆', link: '/zh-CN/skills/claude-memory' }, + { text: '审查', link: '/zh-CN/skills/claude-review' }, + { text: '元技能', link: '/zh-CN/skills/claude-meta' } + ] + }, + { + text: '🔧 Codex Skills', + collapsible: true, + items: [ + { text: '概述', link: '/zh-CN/skills/codex-index' }, + { text: '生命周期', link: '/zh-CN/skills/codex-lifecycle' }, + { text: '工作流', link: '/zh-CN/skills/codex-workflow' }, + { text: '专项', link: '/zh-CN/skills/codex-specialized' } + ] + }, + { + text: '🎨 自定义技能', + collapsible: true, + items: [ + { text: '概述', link: '/zh-CN/skills/custom' }, + { text: '核心技能', link: '/zh-CN/skills/core-skills' }, + { text: '参考', link: '/zh-CN/skills/reference' } + ] + } + ], '/zh-CN/features/': [ { text: '⚙️ 核心功能', @@ -494,6 +573,8 @@ export default withMermaid(defineConfig({ { text: 'Memory 记忆系统', link: '/zh-CN/features/memory' }, { text: 'CLI 调用', link: '/zh-CN/features/cli' }, { text: 'Dashboard 面板', link: '/zh-CN/features/dashboard' }, + { text: 'Terminal 终端监控', link: '/zh-CN/features/terminal' }, + { text: 'Queue 队列管理', link: '/zh-CN/features/queue' }, { text: 'CodexLens', link: '/zh-CN/features/codexlens' } ] } @@ -503,9 +584,13 @@ export default withMermaid(defineConfig({ text: 'UI 组件', collapsible: true, items: [ + { text: '概述', link: '/zh-CN/components/index' }, { text: 'Button 按钮', link: '/zh-CN/components/ui/button' }, { text: 'Card 卡片', link: '/zh-CN/components/ui/card' }, - { text: 'Input 输入框', link: '/zh-CN/components/ui/input' } + { text: 'Input 输入框', link: '/zh-CN/components/ui/input' }, + { text: 'Select 选择器', link: '/zh-CN/components/ui/select' }, + { text: 'Checkbox 复选框', link: '/zh-CN/components/ui/checkbox' }, + { text: 'Badge 徽标', link: '/zh-CN/components/ui/badge' } ] } ] diff --git a/docs/.vitepress/demos/ComponentGallery.tsx b/docs/.vitepress/demos/ComponentGallery.tsx new file mode 100644 index 00000000..1b4e03c1 --- /dev/null +++ b/docs/.vitepress/demos/ComponentGallery.tsx @@ -0,0 +1,326 @@ +/** + * Component Gallery Demo + * Interactive showcase of all UI components + */ +import React, { useState } from 'react' + +export default function ComponentGallery() { + const [selectedCategory, setSelectedCategory] = useState('all') + const [buttonVariant, setButtonVariant] = useState('default') + const [switchState, setSwitchState] = useState(false) + const [checkboxState, setCheckboxState] = useState(false) + const [selectedTab, setSelectedTab] = useState('variants') + + const categories = [ + { id: 'all', label: 'All Components' }, + { id: 'buttons', label: 'Buttons' }, + { id: 'forms', label: 'Forms' }, + { id: 'feedback', label: 'Feedback' }, + { id: 'navigation', label: 'Navigation' }, + { id: 'overlays', label: 'Overlays' }, + ] + + const buttonVariants = ['default', 'destructive', 'outline', 'secondary', 'ghost', 'link'] + + return ( +
+ {/* Header */} +
+

UI Component Library

+

Interactive showcase of all available UI components

+
+ + {/* Category Filter */} +
+ {categories.map((cat) => ( + + ))} +
+ + {/* Buttons Section */} + {(selectedCategory === 'all' || selectedCategory === 'buttons') && ( +
+

Buttons

+
+ {/* Variant Selector */} +
+ +
+ {buttonVariants.map((variant) => ( + + ))} +
+
+ + {/* Button Sizes */} +
+ +
+ + + + +
+
+ + {/* All Button Variants */} +
+ +
+ + + + + + + +
+
+
+
+ )} + + {/* Forms Section */} + {(selectedCategory === 'all' || selectedCategory === 'forms') && ( +
+

Form Components

+
+ {/* Input */} +
+ + + +
+ + {/* Textarea */} +
+ +